From 07f759e4e6c5c21d0c694b3eb85742ca0c635151 Mon Sep 17 00:00:00 2001 From: Robert Lin Date: Wed, 27 Apr 2022 07:55:45 -0700 Subject: [PATCH] observation: integrate Logger into all levels of Observation (#34456) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Creating and tagging operations now create loggers embedded into the various Observation types that can be used directly for ✨ structured logs ✨. See internal/observation package docs for more details. --- cmd/frontend/internal/cli/serve_cmd.go | 24 +-- cmd/frontend/internal/highlight/highlight.go | 13 -- cmd/frontend/internal/highlight/html.go | 3 +- cmd/frontend/internal/highlight/language.go | 1 + cmd/gitserver/main.go | 32 +-- cmd/gitserver/server/server.go | 4 +- cmd/migrator/main.go | 3 +- cmd/symbols/fetcher/repository_fetcher.go | 2 +- cmd/symbols/gitserver/client.go | 4 +- cmd/symbols/internal/api/search_sqlite.go | 2 +- cmd/symbols/parser/parser.go | 4 +- cmd/symbols/shared/main.go | 11 +- cmd/worker/internal/migrations/init.go | 3 +- .../cmd/executor/internal/apiclient/client.go | 14 +- .../cmd/executor/internal/command/run.go | 2 +- enterprise/cmd/executor/main.go | 4 +- .../codeintel/httpapi/auth_middleware.go | 2 +- .../codeintel/httpapi/upload_handler.go | 2 +- .../httpapi/upload_handler_multipart.go | 6 +- .../httpapi/upload_handler_single.go | 2 +- .../documentation_query_definitions.go | 2 +- .../documentation_query_references.go | 2 +- .../internal/codeintel/resolvers/exists.go | 2 +- .../codeintel/resolvers/graphql/resolver.go | 20 +- .../codeintel/resolvers/graphql/support.go | 4 +- .../codeintel/resolvers/observability.go | 22 +- .../codeintel/resolvers/query_definitions.go | 2 +- .../codeintel/resolvers/query_diagnostics.go | 2 +- .../resolvers/query_documentation.go | 8 +- .../codeintel/resolvers/query_hover.go | 2 +- .../resolvers/query_implementations.go | 2 +- .../codeintel/resolvers/query_ranges.go | 2 +- .../codeintel/resolvers/query_references.go | 2 +- .../resolvers/query_references_test.go | 3 +- .../codeintel/resolvers/query_stencil.go | 2 +- .../internal/codeintel/resolvers/resolver.go | 2 +- .../frontend/internal/codeintel/services.go | 20 +- enterprise/cmd/frontend/main.go | 21 +- .../internal/worker/handler.go | 2 +- .../internal/worker/handler_test.go | 7 +- .../cmd/precise-code-intel-worker/main.go | 3 +- .../batches/bulk_operation_processor_job.go | 3 +- .../cmd/worker/internal/batches/dbstore.go | 12 +- .../worker/internal/batches/janitor_job.go | 3 +- .../internal/batches/migrations/migrations.go | 4 +- .../worker/internal/batches/reconciler_job.go | 3 +- .../batches/workspace_resolver_job.go | 3 +- .../cmd/worker/internal/codeintel/clients.go | 6 +- .../internal/codeintel/commitgraph/updater.go | 2 +- .../internal/codeintel/commitgraph_job.go | 2 +- .../cmd/worker/internal/codeintel/dbstore.go | 8 +- .../indexing/dependency_sync_scheduler.go | 2 +- .../worker/internal/codeintel/indexing_job.go | 3 +- .../worker/internal/codeintel/janitor_job.go | 3 +- .../worker/internal/codeintel/lsifstore.go | 4 +- enterprise/internal/batches/background.go | 4 +- .../internal/batches/service/service.go | 40 ++-- .../service/service_apply_batch_change.go | 4 +- .../internal/batches/store/batch_changes.go | 16 +- .../store/batch_spec_execution_cache_entry.go | 8 +- .../store/batch_spec_resolution_jobs.go | 6 +- .../batch_spec_workspace_execution_jobs.go | 14 +- .../batches/store/batch_spec_workspaces.go | 10 +- .../internal/batches/store/batch_specs.go | 16 +- .../internal/batches/store/bulk_operations.go | 8 +- .../batches/store/changeset_events.go | 8 +- .../internal/batches/store/changeset_jobs.go | 4 +- .../internal/batches/store/changeset_specs.go | 20 +- .../internal/batches/store/changesets.go | 36 ++-- enterprise/internal/batches/store/codehost.go | 4 +- .../batches/store/site_credentials.go | 10 +- .../codeintel/autoindex/enqueuer/enqueuer.go | 6 +- .../internal/codeintel/gitserver/client.go | 26 +-- .../internal/codeintel/repoupdater/client.go | 4 +- .../codeintel/stores/dbstore/commits.go | 24 +-- .../codeintel/stores/dbstore/configuration.go | 4 +- .../stores/dbstore/configuration_policies.go | 12 +- .../stores/dbstore/dependency_index.go | 6 +- .../codeintel/stores/dbstore/dumps.go | 8 +- .../codeintel/stores/dbstore/indexes.go | 18 +- .../codeintel/stores/dbstore/janitor.go | 8 +- .../codeintel/stores/dbstore/packages.go | 2 +- .../codeintel/stores/dbstore/references.go | 2 +- .../internal/codeintel/stores/dbstore/repo.go | 8 +- .../codeintel/stores/dbstore/support.go | 4 +- .../codeintel/stores/dbstore/uploads.go | 40 ++-- .../codeintel/stores/dbstore/xrepo.go | 6 +- .../codeintel/stores/lsifstore/clear.go | 2 +- .../codeintel/stores/lsifstore/data_write.go | 12 +- .../lsifstore/data_write_documentation.go | 10 +- .../codeintel/stores/lsifstore/diagnostics.go | 2 +- .../stores/lsifstore/documentation.go | 16 +- .../stores/lsifstore/documentation_janitor.go | 2 +- .../codeintel/stores/lsifstore/exists.go | 2 +- .../codeintel/stores/lsifstore/hover.go | 2 +- .../codeintel/stores/lsifstore/locations.go | 4 +- .../codeintel/stores/lsifstore/monikers.go | 4 +- .../codeintel/stores/lsifstore/packages.go | 2 +- .../codeintel/stores/lsifstore/ranges.go | 6 +- .../codeintel/stores/lsifstore/stencil.go | 2 +- .../codemonitors/background/metrics.go | 6 +- .../insights/background/background.go | 5 +- .../internal/insights/compression/worker.go | 2 +- go.mod | 4 +- go.sum | 4 +- internal/codeintel/autoindexing/init.go | 4 +- .../autoindexing/internal/inference/init.go | 4 +- .../internal/inference/service.go | 14 +- .../autoindexing/internal/store/init.go | 4 +- .../autoindexing/internal/store/store.go | 2 +- internal/codeintel/autoindexing/service.go | 14 +- internal/codeintel/dependencies/init.go | 4 +- .../dependencies/internal/lockfiles/init.go | 4 +- .../internal/lockfiles/service.go | 4 +- .../dependencies/internal/store/init.go | 4 +- .../dependencies/internal/store/store.go | 6 +- internal/codeintel/dependencies/service.go | 2 +- internal/codeintel/documents/init.go | 4 +- .../documents/internal/store/init.go | 4 +- .../documents/internal/store/store.go | 2 +- internal/codeintel/documents/service.go | 2 +- internal/codeintel/policies/init.go | 4 +- .../codeintel/policies/internal/store/init.go | 4 +- .../policies/internal/store/store.go | 2 +- internal/codeintel/policies/service.go | 14 +- internal/codeintel/symbols/init.go | 4 +- .../codeintel/symbols/internal/store/init.go | 4 +- .../codeintel/symbols/internal/store/store.go | 2 +- internal/codeintel/symbols/service.go | 2 +- internal/codeintel/uploads/init.go | 4 +- .../codeintel/uploads/internal/store/init.go | 4 +- .../codeintel/uploads/internal/store/store.go | 2 +- internal/codeintel/uploads/service.go | 14 +- .../codeintel/uploads/transport/http/init.go | 4 +- .../uploads/transport/http/resolver.go | 2 +- internal/database/batch/batch.go | 2 +- internal/database/batch/observability.go | 4 +- internal/database/migration/store/describe.go | 2 +- internal/database/migration/store/store.go | 14 +- internal/diskcache/cache.go | 6 +- .../extsvc/jvmpackages/coursier/coursier.go | 37 ++-- .../jvmpackages/coursier/observability.go | 35 ++- internal/extsvc/npm/npm.go | 19 +- internal/extsvc/npm/observability.go | 31 ++- internal/gitserver/client.go | 4 +- internal/gitserver/observability.go | 4 +- internal/goroutine/periodic.go | 2 +- internal/luasandbox/init.go | 4 +- internal/luasandbox/sandbox.go | 10 +- internal/luasandbox/service.go | 2 +- internal/observation/fields.go | 17 ++ internal/observation/observation.go | 203 ++++++++++-------- internal/observation/util.go | 1 - internal/oobmigration/runner.go | 4 +- internal/repos/sync_worker.go | 3 +- internal/trace/traceutil.go | 38 +++- internal/uploadstore/gcs_client.go | 8 +- internal/uploadstore/s3_client.go | 8 +- internal/workerutil/dbworker/store/store.go | 22 +- internal/workerutil/worker.go | 6 +- 160 files changed, 761 insertions(+), 670 deletions(-) create mode 100644 internal/observation/fields.go diff --git a/cmd/frontend/internal/cli/serve_cmd.go b/cmd/frontend/internal/cli/serve_cmd.go index 8d5db8556e3..094f270ddc0 100644 --- a/cmd/frontend/internal/cli/serve_cmd.go +++ b/cmd/frontend/internal/cli/serve_cmd.go @@ -127,12 +127,14 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable) }) defer syncLogs() + logger := sglog.Scoped("server", "the frontend server program") + ready := make(chan struct{}) go debugserver.NewServerRoutine(ready).Start() sqlDB, err := InitDB() if err != nil { - log.Fatalf("ERROR: %v", err) + return err } db := database.NewDB(sqlDB) @@ -140,20 +142,20 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable) log15.Warn("Skipping out-of-band migrations check") } else { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger, Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } outOfBandMigrationRunner := oobmigration.NewRunnerWithDB(db, oobmigration.RefreshInterval, observationContext) if err := oobmigration.ValidateOutOfBandMigrationRunner(ctx, db, outOfBandMigrationRunner); err != nil { - log.Fatalf("failed to validate out of band migrations: %v", err) + return errors.Wrap(err, "failed to validate out of band migrations") } } // override site config first if err := overrideSiteConfig(ctx, db); err != nil { - log.Fatalf("failed to apply site config overrides: %v", err) + return errors.Wrap(err, "failed to apply site config overrides") } globals.ConfigurationServerFrontendOnly = conf.InitConfigurationServerFrontendOnly(&configurationSource{db: db}) conf.Init() @@ -161,17 +163,17 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable) // now we can init the keyring, as it depends on site config if err := keyring.Init(ctx); err != nil { - log.Fatalf("failed to initialize encryption keyring: %v", err) + return errors.Wrap(err, "failed to initialize encryption keyring") } if err := overrideGlobalSettings(ctx, db); err != nil { - log.Fatalf("failed to override global settings: %v", err) + return errors.Wrap(err, "failed to override global settings") } // now the keyring is configured it's safe to override the rest of the config // and that config can access the keyring if err := overrideExtSvcConfig(ctx, db); err != nil { - log.Fatalf("failed to override external service config: %v", err) + return errors.Wrap(err, "failed to override external service config") } // Filter trace logs @@ -187,7 +189,7 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable) authz.DefaultSubRepoPermsChecker, err = authz.NewSubRepoPermsClient(database.SubRepoPerms(db)) if err != nil { - log.Fatalf("Failed to create sub-repo client: %v", err) + return errors.Wrap(err, "Failed to create sub-repo client") } ui.InitRouter(db, enterprise.CodeIntelResolver) @@ -287,11 +289,9 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable) } if printLogo { - fmt.Println(" ") - fmt.Println(logoColor) - fmt.Println(" ") + logger.Info(fmt.Sprintf("\n\n%s\n\n", logoColor)) } - fmt.Printf("✱ Sourcegraph is ready at: %s\n", globals.ExternalURL()) + logger.Info(fmt.Sprintf("✱ Sourcegraph is ready at: %s\n", globals.ExternalURL())) close(ready) goroutine.MonitorBackgroundRoutines(context.Background(), routines...) diff --git a/cmd/frontend/internal/highlight/highlight.go b/cmd/frontend/internal/highlight/highlight.go index 692a17d2509..a849eec3e6c 100644 --- a/cmd/frontend/internal/highlight/highlight.go +++ b/cmd/frontend/internal/highlight/highlight.go @@ -23,7 +23,6 @@ import ( "github.com/sourcegraph/sourcegraph/internal/env" "github.com/sourcegraph/sourcegraph/internal/gosyntect" - "github.com/sourcegraph/sourcegraph/internal/honey" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace/ot" "github.com/sourcegraph/sourcegraph/lib/codeintel/lsiftyped" @@ -39,18 +38,6 @@ var ( func init() { client = gosyntect.New(syntectServer) - - obsvCtx := observation.Context{ - HoneyDataset: &honey.Dataset{ - Name: "codeintel-syntax-highlighting", - SampleRate: 10, // 1 in 10 - }, - } - highlightOp = obsvCtx.Operation(observation.Op{ - Name: "codeintel.syntax-highlight.Code", - LogFields: []otlog.Field{}, - ErrorFilter: func(err error) observation.ErrorFilterBehaviour { return observation.EmitForHoney }, - }) } // IsBinary is a helper to tell if the content of a file is binary or not. diff --git a/cmd/frontend/internal/highlight/html.go b/cmd/frontend/internal/highlight/html.go index aebf12067bf..91a077e8cfc 100644 --- a/cmd/frontend/internal/highlight/html.go +++ b/cmd/frontend/internal/highlight/html.go @@ -6,9 +6,10 @@ import ( "html/template" "strings" - "github.com/sourcegraph/sourcegraph/lib/codeintel/lsiftyped" "golang.org/x/net/html" "golang.org/x/net/html/atom" + + "github.com/sourcegraph/sourcegraph/lib/codeintel/lsiftyped" ) // DocumentToSplitHTML returns a list of each line of HTML. diff --git a/cmd/frontend/internal/highlight/language.go b/cmd/frontend/internal/highlight/language.go index 0666c6aabe3..d3f345b383a 100644 --- a/cmd/frontend/internal/highlight/language.go +++ b/cmd/frontend/internal/highlight/language.go @@ -7,6 +7,7 @@ import ( "github.com/go-enry/go-enry/v2" "github.com/grafana/regexp" + "github.com/sourcegraph/sourcegraph/internal/conf" "github.com/sourcegraph/sourcegraph/internal/conf/conftypes" ) diff --git a/cmd/gitserver/main.go b/cmd/gitserver/main.go index a0c97b5d0e8..cad90458d5f 100644 --- a/cmd/gitserver/main.go +++ b/cmd/gitserver/main.go @@ -6,7 +6,6 @@ import ( "context" "database/sql" "encoding/base64" - "log" "net" "net/http" "net/url" @@ -20,6 +19,7 @@ import ( "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/tidwall/gjson" + "go.uber.org/zap" "golang.org/x/sync/semaphore" "golang.org/x/time/rate" @@ -57,7 +57,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/types" "github.com/sourcegraph/sourcegraph/internal/version" "github.com/sourcegraph/sourcegraph/lib/errors" - sglog "github.com/sourcegraph/sourcegraph/lib/log" + "github.com/sourcegraph/sourcegraph/lib/log" "github.com/sourcegraph/sourcegraph/schema" ) @@ -82,7 +82,7 @@ func main() { conf.Init() logging.Init() - syncLogs := sglog.Init(sglog.Resource{ + syncLogs := log.Init(log.Resource{ Name: env.MyName, Version: version.Version(), InstanceID: hostname.Get(), @@ -93,21 +93,23 @@ func main() { trace.Init() profiler.Init() + logger := log.Scoped("server", "the gitserver service") + if reposDir == "" { - log.Fatal("git-server: SRC_REPOS_DIR is required") + logger.Fatal("SRC_REPOS_DIR is required") } if err := os.MkdirAll(reposDir, os.ModePerm); err != nil { - log.Fatalf("failed to create SRC_REPOS_DIR: %s", err) + logger.Fatal("failed to create SRC_REPOS_DIR", zap.Error(err)) } wantPctFree2, err := getPercent(wantPctFree) if err != nil { - log.Fatalf("SRC_REPOS_DESIRED_PERCENT_FREE is out of range: %v", err) + logger.Fatal("SRC_REPOS_DESIRED_PERCENT_FREE is out of range", zap.Error(err)) } sqlDB, err := getDB() if err != nil { - log.Fatalf("failed to initialize database stores: %v", err) + logger.Fatal("failed to initialize database stores", zap.Error(err)) } db := database.NewDB(sqlDB) @@ -117,7 +119,7 @@ func main() { err = keyring.Init(ctx) if err != nil { - log.Fatalf("failed to initialise keyring: %s", err) + logger.Fatal("failed to initialise keyring", zap.Error(err)) } gitserver := server.Server{ @@ -136,18 +138,18 @@ func main() { } observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger, Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } gitserver.RegisterMetrics(db, observationContext) if tmpDir, err := gitserver.SetupAndClearTmp(); err != nil { - log.Fatalf("failed to setup temporary directory: %s", err) + logger.Fatal("failed to setup temporary directory", log.Error(err)) } else if err := os.Setenv("TMP_DIR", tmpDir); err != nil { // Additionally, set TMP_DIR so other temporary files we may accidentally // create are on the faster RepoDir mount. - log.Fatalf("Setting TMP_DIR: %s", err) + logger.Fatal("Setting TMP_DIR", log.Error(err)) } // Create Handler now since it also initializes state @@ -166,7 +168,7 @@ func main() { // Best effort attempt to sync rate limiters for site level external services // early on. If it fails, we'll try again in the background sync below. if err := syncSiteLevelExternalServiceRateLimiters(ctx, externalServiceStore); err != nil { - log15.Warn("error performing initial site level rate limit sync", "error", err) + logger.Warn("error performing initial site level rate limit sync", log.Error(err)) } go syncRateLimiters(ctx, externalServiceStore, rateLimitSyncerLimitPerSecond) @@ -189,12 +191,12 @@ func main() { Addr: addr, Handler: handler, } - log15.Info("git-server: listening", "addr", srv.Addr) + logger.Info("git-server: listening", log.String("addr", srv.Addr)) go func() { err := srv.ListenAndServe() if err != http.ErrServerClosed { - log.Fatal(err) + logger.Fatal(err.Error()) } }() @@ -217,7 +219,7 @@ func main() { defer cancel() // Stop accepting requests. if err := srv.Shutdown(ctx); err != nil { - log15.Error("shutting down http server", "error", err) + logger.Error("shutting down http server", log.Error(err)) } // The most important thing this does is kill all our clones. If we just diff --git a/cmd/gitserver/server/server.go b/cmd/gitserver/server/server.go index 906041b0767..3e9d249ff66 100644 --- a/cmd/gitserver/server/server.go +++ b/cmd/gitserver/server/server.go @@ -1173,7 +1173,7 @@ func (s *Server) handleBatchLog(w http.ResponseWriter, r *http.Request) { // Run git log for a single repository. // Invoked multiple times from the handler defined below. performGitLogCommand := func(ctx context.Context, repoCommit api.RepoCommit, format string) (output string, isRepoCloned bool, err error) { - ctx, endObservation := operations.batchLogSingle.With(ctx, &err, observation.Args{ + ctx, _, endObservation := operations.batchLogSingle.With(ctx, &err, observation.Args{ LogFields: append( []log.Field{ log.String("format", format), @@ -1206,7 +1206,7 @@ func (s *Server) handleBatchLog(w http.ResponseWriter, r *http.Request) { // Handles the /batch-log route instrumentedHandler := func(ctx context.Context) (statusCodeOnError int, err error) { - ctx, logger, endObservation := operations.batchLog.WithAndLogger(ctx, &err, observation.Args{}) + ctx, logger, endObservation := operations.batchLog.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("statusCodeOnError", statusCodeOnError), diff --git a/cmd/migrator/main.go b/cmd/migrator/main.go index 7c84a2cafaf..8a15cd2cf0d 100644 --- a/cmd/migrator/main.go +++ b/cmd/migrator/main.go @@ -6,7 +6,6 @@ import ( "fmt" "os" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/urfave/cli/v2" @@ -70,7 +69,7 @@ func mainErr(ctx context.Context, args []string) error { func newRunnerFactory() func(ctx context.Context, schemaNames []string) (cliutil.Runner, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: sglog.Scoped("runner", ""), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/cmd/symbols/fetcher/repository_fetcher.go b/cmd/symbols/fetcher/repository_fetcher.go index 17b2d1728b9..307f9949500 100644 --- a/cmd/symbols/fetcher/repository_fetcher.go +++ b/cmd/symbols/fetcher/repository_fetcher.go @@ -59,7 +59,7 @@ func (f *repositoryFetcher) FetchRepositoryArchive(ctx context.Context, args typ } func (f *repositoryFetcher) fetchRepositoryArchive(ctx context.Context, args types.SearchArgs, paths []string, callback func(request ParseRequest)) (err error) { - ctx, trace, endObservation := f.operations.fetchRepositoryArchive.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := f.operations.fetchRepositoryArchive.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", string(args.Repo)), log.String("commitID", string(args.CommitID)), log.Int("paths", len(paths)), diff --git a/cmd/symbols/gitserver/client.go b/cmd/symbols/gitserver/client.go index 943edfb4409..ce0c6995556 100644 --- a/cmd/symbols/gitserver/client.go +++ b/cmd/symbols/gitserver/client.go @@ -44,7 +44,7 @@ func NewClient(observationContext *observation.Context) GitserverClient { } func (c *gitserverClient) FetchTar(ctx context.Context, repo api.RepoName, commit api.CommitID, paths []string) (_ io.ReadCloser, err error) { - ctx, endObservation := c.operations.fetchTar.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.fetchTar.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", string(repo)), log.String("commit", string(commit)), log.Int("paths", len(paths)), @@ -67,7 +67,7 @@ func (c *gitserverClient) FetchTar(ctx context.Context, repo api.RepoName, commi } func (c *gitserverClient) GitDiff(ctx context.Context, repo api.RepoName, commitA, commitB api.CommitID) (_ Changes, err error) { - ctx, endObservation := c.operations.gitDiff.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.gitDiff.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", string(repo)), log.String("commitA", string(commitA)), log.String("commitB", string(commitB)), diff --git a/cmd/symbols/internal/api/search_sqlite.go b/cmd/symbols/internal/api/search_sqlite.go index a481f2d03e7..68bf090d8ff 100644 --- a/cmd/symbols/internal/api/search_sqlite.go +++ b/cmd/symbols/internal/api/search_sqlite.go @@ -22,7 +22,7 @@ const searchTimeout = 60 * time.Second func MakeSqliteSearchFunc(operations *sharedobservability.Operations, cachedDatabaseWriter writer.CachedDatabaseWriter) types.SearchFunc { return func(ctx context.Context, args types.SearchArgs) (results []result.Symbol, err error) { - ctx, trace, endObservation := operations.Search.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := operations.Search.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", string(args.Repo)), log.String("commitID", string(args.CommitID)), log.String("query", args.Query), diff --git a/cmd/symbols/parser/parser.go b/cmd/symbols/parser/parser.go index ddd6d05ac73..8691a25594e 100644 --- a/cmd/symbols/parser/parser.go +++ b/cmd/symbols/parser/parser.go @@ -51,7 +51,7 @@ func NewParser( } func (p *parser) Parse(ctx context.Context, args types.SearchArgs, paths []string) (_ <-chan SymbolOrError, err error) { - ctx, endObservation := p.operations.parse.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := p.operations.parse.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", string(args.Repo)), log.String("commitID", string(args.CommitID)), log.Int("paths", len(paths)), @@ -131,7 +131,7 @@ func min(a, b int) int { } func (p *parser) handleParseRequest(ctx context.Context, symbolOrErrors chan<- SymbolOrError, parseRequest fetcher.ParseRequest, totalSymbols *uint32) (err error) { - ctx, trace, endObservation := p.operations.handleParseRequest.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := p.operations.handleParseRequest.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("path", parseRequest.Path), log.Int("fileSize", len(parseRequest.Data)), }}) diff --git a/cmd/symbols/shared/main.go b/cmd/symbols/shared/main.go index 91a3f1d659f..8d75966bd7f 100644 --- a/cmd/symbols/shared/main.go +++ b/cmd/symbols/shared/main.go @@ -2,11 +2,9 @@ package shared import ( "context" - "log" "net/http" "time" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -30,7 +28,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/trace/ot" "github.com/sourcegraph/sourcegraph/internal/tracer" "github.com/sourcegraph/sourcegraph/internal/version" - sglog "github.com/sourcegraph/sourcegraph/lib/log" + "github.com/sourcegraph/sourcegraph/lib/log" ) const addr = ":3184" @@ -42,7 +40,7 @@ func Main(setup SetupFunc) { env.HandleHelpFlag() conf.Init() logging.Init() - syncLogs := sglog.Init(sglog.Resource{ + syncLogs := log.Init(log.Resource{ Name: env.MyName, Version: version.Version(), InstanceID: hostname.Get(), @@ -56,8 +54,9 @@ func Main(setup SetupFunc) { routines := []goroutine.BackgroundRoutine{} // Initialize tracing/metrics + logger := log.Scoped("service", "the symbols service") observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger, Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, HoneyDataset: &honey.Dataset{ @@ -71,7 +70,7 @@ func Main(setup SetupFunc) { repositoryFetcher := fetcher.NewRepositoryFetcher(gitserverClient, types.LoadRepositoryFetcherConfig(env.BaseConfig{}).MaxTotalPathsLength, observationContext) searchFunc, handleStatus, newRoutines, ctagsBinary, err := setup(observationContext, gitserverClient, repositoryFetcher) if err != nil { - log.Fatalf("Failed to setup: %v", err) + logger.Fatal("Failed to set up", log.Error(err)) } routines = append(routines, newRoutines...) diff --git a/cmd/worker/internal/migrations/init.go b/cmd/worker/internal/migrations/init.go index 4eedf4e3a0b..6d2f47fdf56 100644 --- a/cmd/worker/internal/migrations/init.go +++ b/cmd/worker/internal/migrations/init.go @@ -4,7 +4,6 @@ import ( "context" "os" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -48,7 +47,7 @@ func (m *migrator) Routines(ctx context.Context, logger log.Logger) ([]goroutine db := database.NewDB(sqlDB) observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "migrator routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/executor/internal/apiclient/client.go b/enterprise/cmd/executor/internal/apiclient/client.go index 1b0628c4714..5a0c64cabe3 100644 --- a/enterprise/cmd/executor/internal/apiclient/client.go +++ b/enterprise/cmd/executor/internal/apiclient/client.go @@ -57,7 +57,7 @@ func New(options Options, observationContext *observation.Context) *Client { } func (c *Client) Dequeue(ctx context.Context, queueName string, job *executor.Job) (_ bool, err error) { - ctx, endObservation := c.operations.dequeue.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.dequeue.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("queueName", queueName), }}) defer endObservation(1, observation.Args{}) @@ -73,7 +73,7 @@ func (c *Client) Dequeue(ctx context.Context, queueName string, job *executor.Jo } func (c *Client) AddExecutionLogEntry(ctx context.Context, queueName string, jobID int, entry workerutil.ExecutionLogEntry) (entryID int, err error) { - ctx, endObservation := c.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("queueName", queueName), log.Int("jobID", jobID), }}) @@ -93,7 +93,7 @@ func (c *Client) AddExecutionLogEntry(ctx context.Context, queueName string, job } func (c *Client) UpdateExecutionLogEntry(ctx context.Context, queueName string, jobID, entryID int, entry workerutil.ExecutionLogEntry) (err error) { - ctx, endObservation := c.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("queueName", queueName), log.Int("jobID", jobID), log.Int("entryID", entryID), @@ -114,7 +114,7 @@ func (c *Client) UpdateExecutionLogEntry(ctx context.Context, queueName string, } func (c *Client) MarkComplete(ctx context.Context, queueName string, jobID int) (err error) { - ctx, endObservation := c.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("queueName", queueName), log.Int("jobID", jobID), }}) @@ -132,7 +132,7 @@ func (c *Client) MarkComplete(ctx context.Context, queueName string, jobID int) } func (c *Client) MarkErrored(ctx context.Context, queueName string, jobID int, errorMessage string) (err error) { - ctx, endObservation := c.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("queueName", queueName), log.Int("jobID", jobID), }}) @@ -151,7 +151,7 @@ func (c *Client) MarkErrored(ctx context.Context, queueName string, jobID int, e } func (c *Client) MarkFailed(ctx context.Context, queueName string, jobID int, errorMessage string) (err error) { - ctx, endObservation := c.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("queueName", queueName), log.Int("jobID", jobID), }}) @@ -196,7 +196,7 @@ func (c *Client) Ping(ctx context.Context, queueName string, jobIDs []int) (err } func (c *Client) Heartbeat(ctx context.Context, queueName string, jobIDs []int) (knownIDs []int, err error) { - ctx, endObservation := c.operations.heartbeat.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.heartbeat.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("queueName", queueName), log.String("jobIDs", intsToString(jobIDs)), }}) diff --git a/enterprise/cmd/executor/internal/command/run.go b/enterprise/cmd/executor/internal/command/run.go index 35c8754246f..0994e30f373 100644 --- a/enterprise/cmd/executor/internal/command/run.go +++ b/enterprise/cmd/executor/internal/command/run.go @@ -34,7 +34,7 @@ func runCommand(ctx context.Context, command command, logger *Logger) (err error ctx, cancel := context.WithCancel(ctx) defer cancel() - ctx, endObservation := command.Operation.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := command.Operation.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) log15.Info(fmt.Sprintf("Running command: %s", strings.Join(command.Command, " "))) diff --git a/enterprise/cmd/executor/main.go b/enterprise/cmd/executor/main.go index ae43ad68644..80d1a06527e 100644 --- a/enterprise/cmd/executor/main.go +++ b/enterprise/cmd/executor/main.go @@ -48,7 +48,7 @@ func main() { // Initialize tracing/metrics observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: sglog.Scoped("service", "executor service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -105,7 +105,7 @@ func main() { func makeWorkerMetrics(queueName string) workerutil.WorkerMetrics { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: sglog.Scoped("executor_processor", "executor worker processor"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/frontend/internal/codeintel/httpapi/auth_middleware.go b/enterprise/cmd/frontend/internal/codeintel/httpapi/auth_middleware.go index f9c44bfda3b..eff26ccc1f0 100644 --- a/enterprise/cmd/frontend/internal/codeintel/httpapi/auth_middleware.go +++ b/enterprise/cmd/frontend/internal/codeintel/httpapi/auth_middleware.go @@ -41,7 +41,7 @@ var errVerificationNotSupported = errors.New(strings.Join([]string{ func authMiddleware(next http.Handler, db database.DB, authValidators AuthValidatorMap, operation *observation.Operation) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { statusCode, err := func() (_ int, err error) { - ctx, trace, endObservation := operation.WithAndLogger(r.Context(), &err, observation.Args{}) + ctx, trace, endObservation := operation.With(r.Context(), &err, observation.Args{}) defer endObservation(1, observation.Args{}) // Skip auth check if it's not enabled in the instance's site configuration, if this diff --git a/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler.go b/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler.go index 6c2c9979f56..528a638eb87 100644 --- a/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler.go +++ b/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler.go @@ -79,7 +79,7 @@ func (h *UploadHandler) handleEnqueue(w http.ResponseWriter, r *http.Request) { // easily. The remainder of the function simply serializes the result to the // HTTP response writer. payload, statusCode, err := func() (_ interface{}, statusCode int, err error) { - ctx, trace, endObservation := h.operations.handleEnqueue.WithAndLogger(r.Context(), &err, observation.Args{}) + ctx, trace, endObservation := h.operations.handleEnqueue.With(r.Context(), &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("statusCode", statusCode), diff --git a/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_multipart.go b/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_multipart.go index f91409f6989..00e349af63c 100644 --- a/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_multipart.go +++ b/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_multipart.go @@ -21,7 +21,7 @@ import ( // new upload record with state 'uploading' and returns the generated ID to be used in subsequent // requests for the same upload. func (h *UploadHandler) handleEnqueueMultipartSetup(ctx context.Context, uploadState uploadState, _ io.Reader) (_ interface{}, statusCode int, err error) { - ctx, trace, endObservation := h.operations.handleEnqueueMultipartSetup.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := h.operations.handleEnqueueMultipartSetup.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("statusCode", statusCode), @@ -64,7 +64,7 @@ func (h *UploadHandler) handleEnqueueMultipartSetup(ctx context.Context, uploadS // handleEnqueueMultipartUpload handles a partial upload in a multipart upload. This proxies the // data to the bundle manager and marks the part index in the upload record. func (h *UploadHandler) handleEnqueueMultipartUpload(ctx context.Context, uploadState uploadState, body io.Reader) (_ interface{}, statusCode int, err error) { - ctx, trace, endObservation := h.operations.handleEnqueueMultipartUpload.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := h.operations.handleEnqueueMultipartUpload.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("statusCode", statusCode), @@ -93,7 +93,7 @@ func (h *UploadHandler) handleEnqueueMultipartUpload(ctx context.Context, upload // upload from 'uploading' to 'queued', then instructs the bundle manager to concatenate all of the part // files together. func (h *UploadHandler) handleEnqueueMultipartFinalize(ctx context.Context, uploadState uploadState, _ io.Reader) (_ interface{}, statusCode int, err error) { - ctx, trace, endObservation := h.operations.handleEnqueueMultipartFinalize.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := h.operations.handleEnqueueMultipartFinalize.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("statusCode", statusCode), diff --git a/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_single.go b/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_single.go index 8408125ba9a..824368dd084 100644 --- a/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_single.go +++ b/enterprise/cmd/frontend/internal/codeintel/httpapi/upload_handler_single.go @@ -17,7 +17,7 @@ import ( // handleEnqueueSinglePayload handles a non-multipart upload. This creates an upload record // with state 'queued', proxies the data to the bundle manager, and returns the generated ID. func (h *UploadHandler) handleEnqueueSinglePayload(ctx context.Context, uploadState uploadState, body io.Reader) (_ interface{}, statusCode int, err error) { - ctx, trace, endObservation := h.operations.handleEnqueueSinglePayload.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := h.operations.handleEnqueueSinglePayload.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("statusCode", statusCode), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_definitions.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_definitions.go index d751997467f..7209650d7db 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_definitions.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_definitions.go @@ -12,7 +12,7 @@ import ( // DocumentationDefinitions returns the list of source locations that define the symbol found at // the given documentation path ID, if any. func (r *queryResolver) DocumentationDefinitions(ctx context.Context, pathID string) (_ []AdjustedLocation, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationDefinitions", r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_references.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_references.go index f0dd3a5481e..2fa5a7e1cf9 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_references.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/documentation_query_references.go @@ -18,7 +18,7 @@ const defaultReferencesPageSize = 100 // DocumentationReferences returns the list of source locations that reference the symbol found at // the given documentation path ID, if any. func (r *queryResolver) DocumentationReferences(ctx context.Context, pathID string, limit int, rawCursor string) (_ []AdjustedLocation, _ string, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationReferences", r.operations.documentationReferences, slowReferencesRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.documentationReferences, slowReferencesRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/exists.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/exists.go index e981d6295dc..a2371e8378f 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/exists.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/exists.go @@ -26,7 +26,7 @@ const numAncestors = 100 // path is a prefix are returned. These dump IDs should be subsequently passed to invocations of // Definitions, References, and Hover. func (r *resolver) findClosestDumps(ctx context.Context, cachedCommitChecker *cachedCommitChecker, repositoryID int, commit, path string, exactPath bool, indexer string) (_ []store.Dump, err error) { - ctx, trace, endObservation := r.operations.findClosestDumps.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := r.operations.findClosestDumps.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/resolver.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/resolver.go index d9ed1ef62d0..72d3e94c9f5 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/resolver.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/resolver.go @@ -100,7 +100,7 @@ func (r *Resolver) LSIFUploadByID(ctx context.Context, id graphql.ID) (_ gql.LSI // 🚨 SECURITY: dbstore layer handles authz for GetUploads func (r *Resolver) LSIFUploads(ctx context.Context, args *gql.LSIFUploadsQueryArgs) (_ gql.LSIFUploadConnectionResolver, err error) { - // ctx, endObservation := r.observationContext.lsifUploads.With(ctx, &err, observation.Args{}) + // ctx, _, endObservation := r.observationContext.lsifUploads.With(ctx, &err, observation.Args{}) // endObservation.EndOnCancel(ctx, 1, observation.Args{}) // Delegate behavior to LSIFUploadsByRepo with no specified repository identifier @@ -128,7 +128,7 @@ func (r *Resolver) LSIFUploadsByRepo(ctx context.Context, args *gql.LSIFReposito // 🚨 SECURITY: Only site admins may modify code intelligence upload data func (r *Resolver) DeleteLSIFUpload(ctx context.Context, args *struct{ ID graphql.ID }) (_ *gql.EmptyResponse, err error) { - ctx, endObservation := r.observationContext.deleteLsifUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := r.observationContext.deleteLsifUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("uploadID", string(args.ID)), }}) endObservation.OnCancel(ctx, 1, observation.Args{}) @@ -185,7 +185,7 @@ func (r *Resolver) LSIFIndexes(ctx context.Context, args *gql.LSIFIndexesQueryAr return nil, errAutoIndexingNotEnabled } - ctx, endObservation := r.observationContext.lsifIndexes.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := r.observationContext.lsifIndexes.With(ctx, &err, observation.Args{}) endObservation.OnCancel(ctx, 1, observation.Args{}) // Delegate behavior to LSIFIndexesByRepo with no specified repository identifier @@ -217,7 +217,7 @@ func (r *Resolver) LSIFIndexesByRepo(ctx context.Context, args *gql.LSIFReposito // 🚨 SECURITY: Only site admins may modify code intelligence index data func (r *Resolver) DeleteLSIFIndex(ctx context.Context, args *struct{ ID graphql.ID }) (_ *gql.EmptyResponse, err error) { - ctx, endObservation := r.observationContext.deleteLsifIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := r.observationContext.deleteLsifIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("indexID", string(args.ID)), }}) defer endObservation(1, observation.Args{}) @@ -243,7 +243,7 @@ func (r *Resolver) DeleteLSIFIndex(ctx context.Context, args *struct{ ID graphql // 🚨 SECURITY: Only entrypoint is within the repository resolver so the user is already authenticated func (r *Resolver) CommitGraph(ctx context.Context, id graphql.ID) (_ gql.CodeIntelligenceCommitGraphResolver, err error) { - ctx, endObservation := r.observationContext.commitGraph.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := r.observationContext.commitGraph.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repoID", string(id)), }}) endObservation.OnCancel(ctx, 1, observation.Args{}) @@ -456,7 +456,7 @@ func (r *Resolver) CreateCodeIntelligenceConfigurationPolicy(ctx context.Context // 🚨 SECURITY: Only site admins may modify code intelligence configuration policies func (r *Resolver) UpdateCodeIntelligenceConfigurationPolicy(ctx context.Context, args *gql.UpdateCodeIntelligenceConfigurationPolicyArgs) (_ *gql.EmptyResponse, err error) { - ctx, endObservation := r.observationContext.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := r.observationContext.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("configPolicyID", string(args.ID)), }}) defer endObservation(1, observation.Args{}) @@ -495,7 +495,7 @@ func (r *Resolver) UpdateCodeIntelligenceConfigurationPolicy(ctx context.Context // 🚨 SECURITY: Only site admins may modify code intelligence configuration policies func (r *Resolver) DeleteCodeIntelligenceConfigurationPolicy(ctx context.Context, args *gql.DeleteCodeIntelligenceConfigurationPolicyArgs) (_ *gql.EmptyResponse, err error) { - ctx, endObservation := r.observationContext.deleteConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := r.observationContext.deleteConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("configPolicyID", string(args.Policy)), }}) endObservation.OnCancel(ctx, 1, observation.Args{}) @@ -568,7 +568,7 @@ func (r *Resolver) IndexConfiguration(ctx context.Context, id graphql.ID) (_ gql // 🚨 SECURITY: Only site admins may modify code intelligence indexing configuration func (r *Resolver) UpdateRepositoryIndexConfiguration(ctx context.Context, args *gql.UpdateRepositoryIndexConfigurationArgs) (_ *gql.EmptyResponse, err error) { - ctx, endObservation := r.observationContext.updateIndexConfiguration.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := r.observationContext.updateIndexConfiguration.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repoID", string(args.Repository)), }}) defer endObservation(1, observation.Args{}) @@ -593,7 +593,7 @@ func (r *Resolver) UpdateRepositoryIndexConfiguration(ctx context.Context, args } func (r *Resolver) PreviewRepositoryFilter(ctx context.Context, args *gql.PreviewRepositoryFilterArgs) (_ gql.RepositoryFilterPreviewResolver, err error) { - ctx, endObservation := r.observationContext.previewRepoFilter.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := r.observationContext.previewRepoFilter.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) offset, err := graphqlutil.DecodeIntCursor(args.After) @@ -636,7 +636,7 @@ func (r *Resolver) PreviewRepositoryFilter(ctx context.Context, args *gql.Previe } func (r *Resolver) PreviewGitObjectFilter(ctx context.Context, id graphql.ID, args *gql.PreviewGitObjectFilterArgs) (_ []gql.GitObjectFilterPreviewResolver, err error) { - ctx, endObservation := r.observationContext.previewGitObjectFilter.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := r.observationContext.previewGitObjectFilter.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) repositoryID, err := unmarshalLSIFIndexGQLID(id) diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/support.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/support.go index ab121976d7c..6323f1d55bd 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/support.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/graphql/support.go @@ -83,7 +83,7 @@ func (r *preciseCodeIntelSupportResolver) Indexers() *[]gql.CodeIntelIndexerReso } func (r *Resolver) RequestLanguageSupport(ctx context.Context, args *gql.RequestLanguageSupportArgs) (_ *gql.EmptyResponse, err error) { - ctx, endObservation := r.observationContext.requestLanguageSupport.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := r.observationContext.requestLanguageSupport.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) userID := int(actor.FromContext(ctx).UID) @@ -99,7 +99,7 @@ func (r *Resolver) RequestLanguageSupport(ctx context.Context, args *gql.Request } func (r *Resolver) RequestedLanguageSupport(ctx context.Context) (_ []string, err error) { - ctx, endObservation := r.observationContext.requestedLanguageSupport.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := r.observationContext.requestedLanguageSupport.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) userID := int(actor.FromContext(ctx).UID) diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/observability.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/observability.go index f4cf716f9f9..adb7d984021 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/observability.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/observability.go @@ -5,10 +5,11 @@ import ( "fmt" "time" - "github.com/inconshreveable/log15" + "go.uber.org/zap" "github.com/sourcegraph/sourcegraph/internal/metrics" "github.com/sourcegraph/sourcegraph/internal/observation" + "github.com/sourcegraph/sourcegraph/lib/log" ) type operations struct { @@ -81,33 +82,28 @@ func newOperations(observationContext *observation.Context) *operations { func observeResolver( ctx context.Context, err *error, - name string, operation *observation.Operation, threshold time.Duration, observationArgs observation.Args, ) (context.Context, observation.TraceLogger, func()) { start := time.Now() - ctx, trace, endObservation := operation.WithAndLogger(ctx, err, observationArgs) + ctx, trace, endObservation := operation.With(ctx, err, observationArgs) return ctx, trace, func() { duration := time.Since(start) endObservation(1, observation.Args{}) if duration >= threshold { - lowSlowRequest(name, duration, err, observationArgs) + // use trace logger which includes all relevant fields + lowSlowRequest(trace, duration, err) } } } -func lowSlowRequest(name string, duration time.Duration, err *error, observationArgs observation.Args) { - pairs := append( - observationArgs.LogFieldPairs(), - "type", name, - "duration_ms", duration.Milliseconds(), - ) +func lowSlowRequest(logger log.Logger, duration time.Duration, err *error) { + fields := []log.Field{zap.Duration("duration", duration)} if err != nil && *err != nil { - pairs = append(pairs, "error", (*err).Error()) + fields = append(fields, log.Error(*err)) } - - log15.Warn("Slow codeintel request", pairs...) + logger.Warn("Slow codeintel request", fields...) } diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_definitions.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_definitions.go index 0bf2b63799e..7c66ba1b5d5 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_definitions.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_definitions.go @@ -17,7 +17,7 @@ const DefinitionsLimit = 100 // Definitions returns the list of source locations that define the symbol at the given position. func (r *queryResolver) Definitions(ctx context.Context, line, character int) (_ []AdjustedLocation, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "Definitions", r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_diagnostics.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_diagnostics.go index bcf1073d90b..535e63020b1 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_diagnostics.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_diagnostics.go @@ -19,7 +19,7 @@ const slowDiagnosticsRequestThreshold = time.Second // Diagnostics returns the diagnostics for documents with the given path prefix. func (r *queryResolver) Diagnostics(ctx context.Context, limit int) (adjustedDiagnostics []AdjustedDiagnostic, _ int, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "Diagnostics", r.operations.diagnostics, slowDiagnosticsRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.diagnostics, slowDiagnosticsRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_documentation.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_documentation.go index 3a20408b076..43f2259f062 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_documentation.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_documentation.go @@ -20,7 +20,7 @@ const slowDocumentationPageRequestThreshold = time.Second // // nil, nil is returned if the page does not exist. func (r *queryResolver) DocumentationPage(ctx context.Context, pathID string) (_ *precise.DocumentationPageData, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationPage", r.operations.documentationPage, slowDocumentationPageRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.documentationPage, slowDocumentationPageRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), @@ -55,7 +55,7 @@ const slowDocumentationPathInfoRequestThreshold = time.Second // // nil, nil is returned if the page does not exist. func (r *queryResolver) DocumentationPathInfo(ctx context.Context, pathID string) (_ *precise.DocumentationPathInfoData, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationPathInfo", r.operations.documentationPathInfo, slowDocumentationPathInfoRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.documentationPathInfo, slowDocumentationPathInfoRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), @@ -88,7 +88,7 @@ const slowDocumentationRequestThreshold = time.Second // Documentation returns documentation for the symbol at the given position. func (r *queryResolver) Documentation(ctx context.Context, line, character int) (_ []*Documentation, err error) { - ctx, _, endObservation := observeResolver(ctx, &err, "Documentation", r.operations.documentation, slowDocumentationRequestThreshold, observation.Args{ + ctx, _, endObservation := observeResolver(ctx, &err, r.operations.documentation, slowDocumentationRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), @@ -142,7 +142,7 @@ const slowDocumentationSearchRequestThreshold = 3 * time.Second // DocumentationSearch searches for documentation, limiting the results to the specified set of repos (or all if empty). func (r *resolver) DocumentationSearch(ctx context.Context, query string, repos []string) (_ []precise.DocumentationSearchResult, err error) { - ctx, _, endObservation := observeResolver(ctx, &err, "DocumentationSearch", r.operations.documentationSearch, slowDocumentationSearchRequestThreshold, observation.Args{ + ctx, _, endObservation := observeResolver(ctx, &err, r.operations.documentationSearch, slowDocumentationSearchRequestThreshold, observation.Args{ LogFields: []log.Field{ log.String("query", query), log.String("repos", fmt.Sprint(repos)), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_hover.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_hover.go index 3002d9e573b..07714989603 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_hover.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_hover.go @@ -15,7 +15,7 @@ const slowHoverRequestThreshold = time.Second // Hover returns the hover text and range for the symbol at the given position. func (r *queryResolver) Hover(ctx context.Context, line, character int) (_ string, _ lsifstore.Range, _ bool, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "Hover", r.operations.hover, slowHoverRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.hover, slowHoverRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_implementations.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_implementations.go index 3b3dc349b1d..9cc5b9da191 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_implementations.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_implementations.go @@ -19,7 +19,7 @@ const ImplementationsLimit = 100 // Implementations returns the list of source locations that define the symbol at the given position. func (r *queryResolver) Implementations(ctx context.Context, line, character int, limit int, rawCursor string) (_ []AdjustedLocation, _ string, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "Implementations", r.operations.implementations, slowImplementationsRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.implementations, slowImplementationsRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_ranges.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_ranges.go index 6bd161d877b..4cc834e83bc 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_ranges.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_ranges.go @@ -17,7 +17,7 @@ const slowRangesRequestThreshold = time.Second // results are partial and do not include references outside the current file, or any location that // requires cross-linking of bundles (cross-repo or cross-root). func (r *queryResolver) Ranges(ctx context.Context, startLine, endLine int) (adjustedRanges []AdjustedCodeIntelligenceRange, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "Ranges", r.operations.ranges, slowRangesRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.ranges, slowRangesRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references.go index 52e1f063330..4d8723ebb4b 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references.go @@ -21,7 +21,7 @@ const slowReferencesRequestThreshold = time.Second // References returns the list of source locations that reference the symbol at the given position. func (r *queryResolver) References(ctx context.Context, line, character, limit int, rawCursor string) (_ []AdjustedLocation, _ string, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "References", r.operations.references, slowReferencesRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.references, slowReferencesRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references_test.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references_test.go index ff1dd258fbd..44ad995f597 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references_test.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_references_test.go @@ -16,6 +16,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/lib/codeintel/bloomfilter" "github.com/sourcegraph/sourcegraph/lib/codeintel/precise" + "github.com/sourcegraph/sourcegraph/lib/log/logtest" ) func TestReferences(t *testing.T) { @@ -538,7 +539,7 @@ func TestIgnoredIDs(t *testing.T) { ignoreIDs, 10, 0, - observation.TestTraceLogger, + observation.TestTraceLogger(logtest.Scoped(t)), ) if err != nil { t.Fatalf("uploadIDsWithReferences: %s", err) diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_stencil.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_stencil.go index bcace10bacb..dca21d8854f 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/query_stencil.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/query_stencil.go @@ -16,7 +16,7 @@ const slowStencilRequestThreshold = time.Second // Stencil return all ranges within a single document. func (r *queryResolver) Stencil(ctx context.Context) (adjustedRanges []lsifstore.Range, err error) { - ctx, trace, endObservation := observeResolver(ctx, &err, "Stencil", r.operations.stencil, slowStencilRequestThreshold, observation.Args{ + ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.stencil, slowStencilRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", r.repositoryID), log.String("commit", r.commit), diff --git a/enterprise/cmd/frontend/internal/codeintel/resolvers/resolver.go b/enterprise/cmd/frontend/internal/codeintel/resolvers/resolver.go index ddd4ef1c955..8f52b7d94d6 100644 --- a/enterprise/cmd/frontend/internal/codeintel/resolvers/resolver.go +++ b/enterprise/cmd/frontend/internal/codeintel/resolvers/resolver.go @@ -190,7 +190,7 @@ const slowQueryResolverRequestThreshold = time.Second // given repository, commit, and path, then constructs a new query resolver instance which // can be used to answer subsequent queries. func (r *resolver) QueryResolver(ctx context.Context, args *gql.GitBlobLSIFDataArgs) (_ QueryResolver, err error) { - ctx, _, endObservation := observeResolver(ctx, &err, "QueryResolver", r.operations.queryResolver, slowQueryResolverRequestThreshold, observation.Args{ + ctx, _, endObservation := observeResolver(ctx, &err, r.operations.queryResolver, slowQueryResolverRequestThreshold, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", int(args.Repo.ID)), log.String("commit", string(args.Commit)), diff --git a/enterprise/cmd/frontend/internal/codeintel/services.go b/enterprise/cmd/frontend/internal/codeintel/services.go index c7a9cd587b7..81aa3fef896 100644 --- a/enterprise/cmd/frontend/internal/codeintel/services.go +++ b/enterprise/cmd/frontend/internal/codeintel/services.go @@ -3,10 +3,8 @@ package codeintel import ( "context" "database/sql" - "log" "net/http" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -28,6 +26,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/sentry" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/uploadstore" + "github.com/sourcegraph/sourcegraph/lib/log" ) type Services struct { @@ -48,17 +47,18 @@ type Services struct { func NewServices(ctx context.Context, config *Config, siteConfig conftypes.WatchableSiteConfig, db database.DB) (*Services, error) { // Initialize tracing/metrics + logger := log.Scoped("codeintel", "codeintel services") observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger, Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } // Initialize sentry hub - hub := mustInitializeSentryHub(siteConfig) + hub := mustInitializeSentryHub(logger, siteConfig) // Connect to database - codeIntelDB := mustInitializeCodeIntelDB() + codeIntelDB := mustInitializeCodeIntelDB(logger) // Initialize stores dbStore := store.NewWithDB(db, observationContext) @@ -66,7 +66,7 @@ func NewServices(ctx context.Context, config *Config, siteConfig conftypes.Watch lsifStore := lsifstore.NewStore(codeIntelDB, siteConfig, observationContext) uploadStore, err := lsifuploadstore.New(context.Background(), config.LSIFUploadStoreConfig, observationContext) if err != nil { - log.Fatalf("Failed to initialize upload store: %s", err) + logger.Fatal("Failed to initialize upload store", log.Error(err)) } // Initialize http endpoints @@ -117,18 +117,18 @@ func NewServices(ctx context.Context, config *Config, siteConfig conftypes.Watch }, nil } -func mustInitializeCodeIntelDB() *sql.DB { +func mustInitializeCodeIntelDB(logger log.Logger) *sql.DB { dsn := conf.GetServiceConnectionValueAndRestartOnChange(func(serviceConnections conftypes.ServiceConnections) string { return serviceConnections.CodeIntelPostgresDSN }) db, err := connections.EnsureNewCodeIntelDB(dsn, "frontend", &observation.TestContext) if err != nil { - log.Fatalf("Failed to connect to codeintel database: %s", err) + logger.Fatal("Failed to connect to codeintel database", log.Error(err)) } return db } -func mustInitializeSentryHub(c conftypes.WatchableSiteConfig) *sentry.Hub { +func mustInitializeSentryHub(logger log.Logger, c conftypes.WatchableSiteConfig) *sentry.Hub { getDsn := func(c conftypes.SiteConfigQuerier) string { if c.SiteConfig().Log != nil && c.SiteConfig().Log.Sentry != nil { return c.SiteConfig().Log.Sentry.CodeIntelDSN @@ -138,7 +138,7 @@ func mustInitializeSentryHub(c conftypes.WatchableSiteConfig) *sentry.Hub { hub, err := sentry.NewWithDsn(getDsn(c), c, getDsn) if err != nil { - log.Fatalf("Failed to initialize sentry hub: %s", err) + logger.Fatal("Failed to initialize sentry hub", log.Error(err)) } return hub } diff --git a/enterprise/cmd/frontend/main.go b/enterprise/cmd/frontend/main.go index 65877e92cb5..4f5bf16e82d 100644 --- a/enterprise/cmd/frontend/main.go +++ b/enterprise/cmd/frontend/main.go @@ -7,11 +7,9 @@ package main import ( "context" - "log" "os" "strconv" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -37,6 +35,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/oobmigration" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) func main() { @@ -69,9 +68,10 @@ func init() { } func enterpriseSetupHook(db database.DB, conf conftypes.UnifiedWatchable) enterprise.Services { + logger := log.Scoped("enterprise", "frontend enterprise edition") debug, _ := strconv.ParseBool(os.Getenv("DEBUG")) if debug { - log.Println("enterprise edition") + logger.Debug("enterprise edition") } auth.Init(db) @@ -80,37 +80,38 @@ func enterpriseSetupHook(db database.DB, conf conftypes.UnifiedWatchable) enterp enterpriseServices := enterprise.DefaultServices() observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger, Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } if err := codeIntelConfig.Validate(); err != nil { - log.Fatalf("failed to load codeintel config: %s", err) + logger.Fatal("failed to load codeintel config", log.Error(err)) } services, err := codeintel.NewServices(ctx, codeIntelConfig, conf, db) if err != nil { - log.Fatal(err) + logger.Fatal(err.Error()) } if err := codeintel.Init(ctx, db, codeIntelConfig, &enterpriseServices, observationContext, services); err != nil { - log.Fatalf("failed to initialize codeintel: %s", err) + logger.Fatal("failed to initialize codeintel", log.Error(err)) } // Initialize executor-specific services with the code-intel services. if err := executor.Init(ctx, db, conf, &enterpriseServices, observationContext, services.InternalUploadHandler); err != nil { - log.Fatalf("failed to initialize executor: %s", err) + logger.Fatal("failed to initialize executor", log.Error(err)) } if err := app.Init(db, conf, &enterpriseServices); err != nil { - log.Fatalf("failed to initialize app: %s", err) + logger.Fatal("failed to initialize app", log.Error(err)) } // Initialize all the enterprise-specific services that do not need the codeintel-specific services. for name, fn := range initFunctions { + initLogger := logger.Scoped(name, "") if err := fn(ctx, db, conf, &enterpriseServices, observationContext); err != nil { - log.Fatalf("failed to initialize %s: %s", name, err) + initLogger.Fatal("failed to initialize", log.Error(err)) } } diff --git a/enterprise/cmd/precise-code-intel-worker/internal/worker/handler.go b/enterprise/cmd/precise-code-intel-worker/internal/worker/handler.go index 828808e0c85..81839a070ca 100644 --- a/enterprise/cmd/precise-code-intel-worker/internal/worker/handler.go +++ b/enterprise/cmd/precise-code-intel-worker/internal/worker/handler.go @@ -53,7 +53,7 @@ func (h *handler) Handle(ctx context.Context, record workerutil.Record) (err err var requeued bool - ctx, logger, endObservation := h.handleOp.WithAndLogger(ctx, &err, observation.Args{}) + ctx, logger, endObservation := h.handleOp.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{ LogFields: append( diff --git a/enterprise/cmd/precise-code-intel-worker/internal/worker/handler_test.go b/enterprise/cmd/precise-code-intel-worker/internal/worker/handler_test.go index 64fd96cb6ae..67464eac7d2 100644 --- a/enterprise/cmd/precise-code-intel-worker/internal/worker/handler_test.go +++ b/enterprise/cmd/precise-code-intel-worker/internal/worker/handler_test.go @@ -20,6 +20,7 @@ import ( "github.com/sourcegraph/sourcegraph/lib/codeintel/bloomfilter" "github.com/sourcegraph/sourcegraph/lib/codeintel/precise" "github.com/sourcegraph/sourcegraph/lib/errors" + "github.com/sourcegraph/sourcegraph/lib/log/logtest" ) func TestHandle(t *testing.T) { @@ -66,7 +67,7 @@ func TestHandle(t *testing.T) { gitserverClient: gitserverClient, } - requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger) + requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger(logtest.Scoped(t))) if err != nil { t.Fatalf("unexpected error handling upload: %s", err) } else if requeued { @@ -200,7 +201,7 @@ func TestHandleError(t *testing.T) { gitserverClient: gitserverClient, } - requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger) + requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger(logtest.Scoped(t))) if err == nil { t.Fatalf("unexpected nil error handling upload") } else if !strings.Contains(err.Error(), "uh-oh!") { @@ -255,7 +256,7 @@ func TestHandleCloneInProgress(t *testing.T) { gitserverClient: gitserverClient, } - requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger) + requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger(logtest.Scoped(t))) if err != nil { t.Fatalf("unexpected error handling upload: %s", err) } else if !requeued { diff --git a/enterprise/cmd/precise-code-intel-worker/main.go b/enterprise/cmd/precise-code-intel-worker/main.go index c456e3bbd30..0ee0a828ed8 100644 --- a/enterprise/cmd/precise-code-intel-worker/main.go +++ b/enterprise/cmd/precise-code-intel-worker/main.go @@ -8,7 +8,6 @@ import ( "time" smithyhttp "github.com/aws/smithy-go/transport/http" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -71,7 +70,7 @@ func main() { // Initialize tracing/metrics observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: sglog.Scoped("worker", "the precise codeintel worker"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, HoneyDataset: &honey.Dataset{ diff --git a/enterprise/cmd/worker/internal/batches/bulk_operation_processor_job.go b/enterprise/cmd/worker/internal/batches/bulk_operation_processor_job.go index f02ca24267d..a842543773d 100644 --- a/enterprise/cmd/worker/internal/batches/bulk_operation_processor_job.go +++ b/enterprise/cmd/worker/internal/batches/bulk_operation_processor_job.go @@ -3,7 +3,6 @@ package batches import ( "context" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -35,7 +34,7 @@ func (j *bulkOperationProcessorJob) Config() []env.Config { func (j *bulkOperationProcessorJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "bulk operation processor job routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/batches/dbstore.go b/enterprise/cmd/worker/internal/batches/dbstore.go index e9773526d14..b66a3be8416 100644 --- a/enterprise/cmd/worker/internal/batches/dbstore.go +++ b/enterprise/cmd/worker/internal/batches/dbstore.go @@ -3,7 +3,6 @@ package batches import ( "database/sql" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -15,6 +14,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" + "github.com/sourcegraph/sourcegraph/lib/log" ) // InitStore initializes and returns a *store.Store instance. @@ -29,7 +29,7 @@ func InitStore() (*store.Store, error) { var initStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.batches", "batches store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -54,7 +54,7 @@ func InitReconcilerWorkerStore() (dbworkerstore.Store, error) { var initReconcilerWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.reconciler", "reconciler worker store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -79,7 +79,7 @@ func InitBulkOperationWorkerStore() (dbworkerstore.Store, error) { var initBulkOperationWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.bulk_ops", "bulk operation worker store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -104,7 +104,7 @@ func InitBatchSpecWorkspaceExecutionWorkerStore() (store.BatchSpecWorkspaceExecu var initBatchSpecWorkspaceExecutionWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.execution", "the batch spec workspace execution worker store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -129,7 +129,7 @@ func InitBatchSpecResolutionWorkerStore() (dbworkerstore.Store, error) { var initBatchSpecResolutionWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.batch_spec_resolution", "the batch spec resolution worker store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/batches/janitor_job.go b/enterprise/cmd/worker/internal/batches/janitor_job.go index 09a98b482d2..39ec34a09ed 100644 --- a/enterprise/cmd/worker/internal/batches/janitor_job.go +++ b/enterprise/cmd/worker/internal/batches/janitor_job.go @@ -3,7 +3,6 @@ package batches import ( "context" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -34,7 +33,7 @@ func (j *janitorJob) Config() []env.Config { func (j *janitorJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "janitor job routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/batches/migrations/migrations.go b/enterprise/cmd/worker/internal/batches/migrations/migrations.go index b6debc1093d..38ec598a8c7 100644 --- a/enterprise/cmd/worker/internal/batches/migrations/migrations.go +++ b/enterprise/cmd/worker/internal/batches/migrations/migrations.go @@ -4,7 +4,6 @@ import ( "os" "time" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -14,6 +13,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/oobmigration" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) const ( @@ -34,7 +34,7 @@ const ( func RegisterMigrations(db database.DB, outOfBandMigrationRunner *oobmigration.Runner) error { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("migrations", ""), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/batches/reconciler_job.go b/enterprise/cmd/worker/internal/batches/reconciler_job.go index 8a86026c9b3..77404ceac4b 100644 --- a/enterprise/cmd/worker/internal/batches/reconciler_job.go +++ b/enterprise/cmd/worker/internal/batches/reconciler_job.go @@ -3,7 +3,6 @@ package batches import ( "context" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -36,7 +35,7 @@ func (j *reconcilerJob) Config() []env.Config { func (j *reconcilerJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "reconciler job routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/batches/workspace_resolver_job.go b/enterprise/cmd/worker/internal/batches/workspace_resolver_job.go index de8c9012c58..29ee157fdaa 100644 --- a/enterprise/cmd/worker/internal/batches/workspace_resolver_job.go +++ b/enterprise/cmd/worker/internal/batches/workspace_resolver_job.go @@ -3,7 +3,6 @@ package batches import ( "context" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -33,7 +32,7 @@ func (j *workspaceResolverJob) Config() []env.Config { func (j *workspaceResolverJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "workspace resolver job routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/codeintel/clients.go b/enterprise/cmd/worker/internal/codeintel/clients.go index b2e867b1960..782984a8336 100644 --- a/enterprise/cmd/worker/internal/codeintel/clients.go +++ b/enterprise/cmd/worker/internal/codeintel/clients.go @@ -1,7 +1,6 @@ package codeintel import ( - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) // InitGitserverClient initializes and returns a gitserver client. @@ -25,7 +25,7 @@ func InitGitserverClient() (*gitserver.Client, error) { var initGitserverClient = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("client.gitserver", "gitserver client"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -45,7 +45,7 @@ func InitRepoUpdaterClient() *repoupdater.Client { var initRepoUpdaterClient = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("client.repo-updater", "repo-updater client"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/codeintel/commitgraph/updater.go b/enterprise/cmd/worker/internal/codeintel/commitgraph/updater.go index cfd8036a722..9a3ed8361b4 100644 --- a/enterprise/cmd/worker/internal/codeintel/commitgraph/updater.go +++ b/enterprise/cmd/worker/internal/codeintel/commitgraph/updater.go @@ -102,7 +102,7 @@ func (u *Updater) tryUpdate(ctx context.Context, repositoryID, dirtyToken int) ( // the repository can be unmarked as long as the repository is not marked as dirty again before // the update completes. func (u *Updater) update(ctx context.Context, repositoryID, dirtyToken int) (err error) { - ctx, trace, endObservation := u.operations.commitUpdate.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := u.operations.commitUpdate.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.Int("dirtyToken", dirtyToken), diff --git a/enterprise/cmd/worker/internal/codeintel/commitgraph_job.go b/enterprise/cmd/worker/internal/codeintel/commitgraph_job.go index 11ef1e611b9..7f82f2eddb7 100644 --- a/enterprise/cmd/worker/internal/codeintel/commitgraph_job.go +++ b/enterprise/cmd/worker/internal/codeintel/commitgraph_job.go @@ -35,7 +35,7 @@ func (j *commitGraphJob) Config() []env.Config { func (j *commitGraphJob) Routines(ctx context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "commit graph job routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/codeintel/dbstore.go b/enterprise/cmd/worker/internal/codeintel/dbstore.go index b371ffaf53e..1196a488374 100644 --- a/enterprise/cmd/worker/internal/codeintel/dbstore.go +++ b/enterprise/cmd/worker/internal/codeintel/dbstore.go @@ -1,7 +1,6 @@ package codeintel import ( - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" + "github.com/sourcegraph/sourcegraph/lib/log" ) // InitDBStore initializes and returns a db store instance. @@ -25,7 +25,7 @@ func InitDBStore() (*dbstore.Store, error) { var initDBStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store", "codeintel db store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -50,7 +50,7 @@ func InitDependencySyncingStore() (dbworkerstore.Store, error) { var initDependencySyncStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.dependency_sync", "dependency sync store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -74,7 +74,7 @@ func InitDependencyIndexingStore() (dbworkerstore.Store, error) { var initDependenyIndexStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.dependency_index", "dependency index store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/codeintel/indexing/dependency_sync_scheduler.go b/enterprise/cmd/worker/internal/codeintel/indexing/dependency_sync_scheduler.go index 079f39d9ee2..80a34cd31a0 100644 --- a/enterprise/cmd/worker/internal/codeintel/indexing/dependency_sync_scheduler.go +++ b/enterprise/cmd/worker/internal/codeintel/indexing/dependency_sync_scheduler.go @@ -169,7 +169,7 @@ func (h *dependencySyncSchedulerHandler) Handle(ctx context.Context, record work } func (h *dependencySyncSchedulerHandler) insertDependencyRepo(ctx context.Context, pkg precise.Package) (new bool, err error) { - ctx, endObservation := dependencyReposOps.InsertCloneableDependencyRepo.With(ctx, &err, observation.Args{ + ctx, _, endObservation := dependencyReposOps.InsertCloneableDependencyRepo.With(ctx, &err, observation.Args{ MetricLabelValues: []string{pkg.Scheme}, }) defer func() { diff --git a/enterprise/cmd/worker/internal/codeintel/indexing_job.go b/enterprise/cmd/worker/internal/codeintel/indexing_job.go index dbc7affba36..f3a7d9b10c9 100644 --- a/enterprise/cmd/worker/internal/codeintel/indexing_job.go +++ b/enterprise/cmd/worker/internal/codeintel/indexing_job.go @@ -3,7 +3,6 @@ package codeintel import ( "context" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -38,7 +37,7 @@ func (j *indexingJob) Config() []env.Config { func (j *indexingJob) Routines(ctx context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "indexing job routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/codeintel/janitor_job.go b/enterprise/cmd/worker/internal/codeintel/janitor_job.go index 27fd252d8a1..e740f3a5960 100644 --- a/enterprise/cmd/worker/internal/codeintel/janitor_job.go +++ b/enterprise/cmd/worker/internal/codeintel/janitor_job.go @@ -3,7 +3,6 @@ package codeintel import ( "context" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -35,7 +34,7 @@ func (j *janitorJob) Config() []env.Config { func (j *janitorJob) Routines(ctx context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: logger.Scoped("routines", "janitor job routines"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/cmd/worker/internal/codeintel/lsifstore.go b/enterprise/cmd/worker/internal/codeintel/lsifstore.go index 7b93bb17b53..9d8e9dcd571 100644 --- a/enterprise/cmd/worker/internal/codeintel/lsifstore.go +++ b/enterprise/cmd/worker/internal/codeintel/lsifstore.go @@ -1,7 +1,6 @@ package codeintel import ( - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -10,6 +9,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/conf" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) // InitLSIFStore initializes and returns an LSIF store instance. @@ -24,7 +24,7 @@ func InitLSIFStore() (*lsifstore.Store, error) { var initLSFIStore = memo.NewMemoizedConstructor(func() (interface{}, error) { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.lsif", "lsif store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/internal/batches/background.go b/enterprise/internal/batches/background.go index 5925dcaa304..2c3a420bb2e 100644 --- a/enterprise/internal/batches/background.go +++ b/enterprise/internal/batches/background.go @@ -3,7 +3,6 @@ package batches import ( "context" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -16,6 +15,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/httpcli" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) // InitBackgroundJobs starts all jobs required to run batches. Currently, it is called from @@ -37,7 +37,7 @@ func InitBackgroundJobs( ctx = actor.WithInternalActor(ctx) observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("background", "batches background jobs"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/internal/batches/service/service.go b/enterprise/internal/batches/service/service.go index f16296e7faf..e20d5b46117 100644 --- a/enterprise/internal/batches/service/service.go +++ b/enterprise/internal/batches/service/service.go @@ -234,7 +234,7 @@ type CreateBatchSpecOpts struct { // CreateBatchSpec creates the BatchSpec. func (s *Service) CreateBatchSpec(ctx context.Context, opts CreateBatchSpecOpts) (spec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("changesetSpecs", len(opts.ChangesetSpecRandIDs)), }}) defer endObservation(1, observation.Args{}) @@ -322,7 +322,7 @@ type CreateBatchSpecFromRawOpts struct { // CreateBatchSpecFromRaw creates the BatchSpec. func (s *Service) CreateBatchSpecFromRaw(ctx context.Context, opts CreateBatchSpecFromRawOpts) (spec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.createBatchSpecFromRaw.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.createBatchSpecFromRaw.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Bool("allowIgnored", opts.AllowIgnored), log.Bool("allowUnsupported", opts.AllowUnsupported), }}) @@ -409,7 +409,7 @@ type ExecuteBatchSpecOpts struct { // It returns an error if the batchSpecWorkspaceResolutionJob didn't finish // successfully. func (s *Service) ExecuteBatchSpec(ctx context.Context, opts ExecuteBatchSpecOpts) (batchSpec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.executeBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.executeBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("BatchSpecRandID", opts.BatchSpecRandID), }}) defer endObservation(1, observation.Args{}) @@ -471,7 +471,7 @@ type CancelBatchSpecOpts struct { // CancelBatchSpec cancels all BatchSpecWorkspaceExecutionJobs associated with // the BatchSpec. func (s *Service) CancelBatchSpec(ctx context.Context, opts CancelBatchSpecOpts) (batchSpec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.cancelBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.cancelBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("BatchSpecRandID", opts.BatchSpecRandID), }}) defer endObservation(1, observation.Args{}) @@ -521,7 +521,7 @@ type ReplaceBatchSpecInputOpts struct { // It returns an error if the batchSpecWorkspaceResolutionJob didn't finish // successfully. func (s *Service) ReplaceBatchSpecInput(ctx context.Context, opts ReplaceBatchSpecInputOpts) (batchSpec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.replaceBatchSpecInput.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.replaceBatchSpecInput.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // Before we hit the database, validate the new spec. @@ -564,7 +564,7 @@ func (s *Service) ReplaceBatchSpecInput(ctx context.Context, opts ReplaceBatchSp type UpsertBatchSpecInputOpts = CreateBatchSpecFromRawOpts func (s *Service) UpsertBatchSpecInput(ctx context.Context, opts UpsertBatchSpecInputOpts) (spec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.upsertBatchSpecInput.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.upsertBatchSpecInput.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Bool("allowIgnored", opts.AllowIgnored), log.Bool("allowUnsupported", opts.AllowUnsupported), }}) @@ -646,7 +646,7 @@ func replaceBatchSpec(ctx context.Context, tx *store.Store, oldSpec, newSpec *bt // CreateChangesetSpec validates the given raw spec input and creates the ChangesetSpec. func (s *Service) CreateChangesetSpec(ctx context.Context, rawSpec string, userID int32) (spec *btypes.ChangesetSpec, err error) { - ctx, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) spec, err = btypes.NewChangesetSpecFromRaw(rawSpec) @@ -689,7 +689,7 @@ func (e *changesetSpecNotFoundErr) NotFound() bool { return true } // If it doesn't exist yet, both return values are nil. // It accepts a *store.Store so that it can be used inside a transaction. func (s *Service) GetBatchChangeMatchingBatchSpec(ctx context.Context, spec *btypes.BatchSpec) (_ *btypes.BatchChange, err error) { - ctx, endObservation := s.operations.getBatchChangeMatchingBatchSpec.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.getBatchChangeMatchingBatchSpec.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // TODO: Should name be case-insensitive? i.e. are "foo" and "Foo" the same? @@ -712,7 +712,7 @@ func (s *Service) GetBatchChangeMatchingBatchSpec(ctx context.Context, spec *bty // GetNewestBatchSpec returns the newest batch spec that matches the given // spec's namespace and name and is owned by the given user, or nil if none is found. func (s *Service) GetNewestBatchSpec(ctx context.Context, tx *store.Store, spec *btypes.BatchSpec, userID int32) (_ *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) opts := store.GetNewestBatchSpecOpts{ @@ -755,7 +755,7 @@ func (o MoveBatchChangeOpts) String() string { // MoveBatchChange moves the batch change from one namespace to another and/or renames // the batch change. func (s *Service) MoveBatchChange(ctx context.Context, opts MoveBatchChangeOpts) (batchChange *btypes.BatchChange, err error) { - ctx, endObservation := s.operations.moveBatchChange.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.moveBatchChange.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) tx, err := s.store.Transact(ctx) @@ -798,7 +798,7 @@ func (s *Service) MoveBatchChange(ctx context.Context, opts MoveBatchChangeOpts) // CloseBatchChange closes the BatchChange with the given ID if it has not been closed yet. func (s *Service) CloseBatchChange(ctx context.Context, id int64, closeChangesets bool) (batchChange *btypes.BatchChange, err error) { - ctx, endObservation := s.operations.closeBatchChange.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.closeBatchChange.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) batchChange, err = s.store.GetBatchChange(ctx, store.GetBatchChangeOpts{ID: id}) @@ -844,7 +844,7 @@ func (s *Service) CloseBatchChange(ctx context.Context, id int64, closeChangeset // DeleteBatchChange deletes the BatchChange with the given ID if it hasn't been // deleted yet. func (s *Service) DeleteBatchChange(ctx context.Context, id int64) (err error) { - ctx, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) batchChange, err := s.store.GetBatchChange(ctx, store.GetBatchChangeOpts{ID: id}) @@ -863,7 +863,7 @@ func (s *Service) DeleteBatchChange(ctx context.Context, id int64) (err error) { // whether the actor in the context has permission to enqueue a sync and then // enqueues a sync by calling the repoupdater client. func (s *Service) EnqueueChangesetSync(ctx context.Context, id int64) (err error) { - ctx, endObservation := s.operations.enqueueChangesetSync.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.enqueueChangesetSync.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // Check for existence of changeset so we don't swallow that error. @@ -914,7 +914,7 @@ func (s *Service) EnqueueChangesetSync(ctx context.Context, id int64) (err error // whether the actor in the context has permission to enqueue a reconciler run and then // enqueues it by calling ResetReconcilerState. func (s *Service) ReenqueueChangeset(ctx context.Context, id int64) (changeset *btypes.Changeset, repo *types.Repo, err error) { - ctx, endObservation := s.operations.reenqueueChangeset.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.reenqueueChangeset.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) changeset, err = s.store.GetChangeset(ctx, store.GetChangesetOpts{ID: id}) @@ -999,7 +999,7 @@ var ErrNoNamespace = errors.New("no namespace given") // Since Bitbucket sends the username as a header in REST responses, we can // take it from there and complete the UserCredential. func (s *Service) FetchUsernameForBitbucketServerToken(ctx context.Context, externalServiceID, externalServiceType, token string) (_ string, err error) { - ctx, endObservation := s.operations.fetchUsernameForBitbucketServerToken.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.fetchUsernameForBitbucketServerToken.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) css, err := s.sourcer.ForExternalService(ctx, s.store, store.GetExternalServiceIDsOpts{ @@ -1037,7 +1037,7 @@ var _ usernameSource = &sources.BitbucketServerSource{} // ValidateAuthenticator creates a ChangesetSource, configures it with the given // authenticator and validates it can correctly access the remote server. func (s *Service) ValidateAuthenticator(ctx context.Context, externalServiceID, externalServiceType string, a auth.Authenticator) (err error) { - ctx, endObservation := s.operations.validateAuthenticator.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.validateAuthenticator.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) if Mocks.ValidateAuthenticator != nil { @@ -1072,7 +1072,7 @@ var ErrChangesetsForJobNotFound = errors.New("some changesets could not be found // given BatchChange, checking whether the actor in the context has permission to // trigger a job, and enqueues it. func (s *Service) CreateChangesetJobs(ctx context.Context, batchChangeID int64, ids []int64, jobType btypes.ChangesetJobType, payload interface{}, listOpts store.ListChangesetsOpts) (bulkGroupID string, err error) { - ctx, endObservation := s.operations.createChangesetJobs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createChangesetJobs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // Load the BatchChange to check for write permissions. @@ -1142,7 +1142,7 @@ func (s *Service) ValidateChangesetSpecs(ctx context.Context, batchSpecID int64) // as such and the validation errors that we want to return without logging // them as errors. var nonValidationErr error - ctx, endObservation := s.operations.validateChangesetSpecs.With(ctx, &nonValidationErr, observation.Args{}) + ctx, _, endObservation := s.operations.validateChangesetSpecs.With(ctx, &nonValidationErr, observation.Args{}) defer endObservation(1, observation.Args{}) conflicts, nonValidationErr := s.store.ListChangesetSpecsWithConflictingHeadRef(ctx, batchSpecID) @@ -1242,7 +1242,7 @@ func computeBatchSpecState(ctx context.Context, s *store.Store, spec *btypes.Bat // It only deletes changeset_specs created by workspaces. The imported changeset_specs // will not be altered. func (s *Service) RetryBatchSpecWorkspaces(ctx context.Context, workspaceIDs []int64) (err error) { - ctx, endObservation := s.operations.retryBatchSpecWorkspaces.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.retryBatchSpecWorkspaces.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) if len(workspaceIDs) == 0 { @@ -1352,7 +1352,7 @@ type RetryBatchSpecExecutionOpts struct { // It only deletes changeset_specs created by workspaces. The imported changeset_specs // will not be altered. func (s *Service) RetryBatchSpecExecution(ctx context.Context, opts RetryBatchSpecExecutionOpts) (err error) { - ctx, endObservation := s.operations.retryBatchSpecExecution.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.retryBatchSpecExecution.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) tx, err := s.store.Transact(ctx) diff --git a/enterprise/internal/batches/service/service_apply_batch_change.go b/enterprise/internal/batches/service/service_apply_batch_change.go index a8d43cf1317..30442b438c8 100644 --- a/enterprise/internal/batches/service/service_apply_batch_change.go +++ b/enterprise/internal/batches/service/service_apply_batch_change.go @@ -51,7 +51,7 @@ func (s *Service) ApplyBatchChange( ctx context.Context, opts ApplyBatchChangeOpts, ) (batchChange *btypes.BatchChange, err error) { - ctx, endObservation := s.operations.applyBatchChange.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.applyBatchChange.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) batchSpec, err := s.store.GetBatchSpec(ctx, store.GetBatchSpecOpts{ @@ -172,7 +172,7 @@ func (s *Service) ReconcileBatchChange( ctx context.Context, batchSpec *btypes.BatchSpec, ) (batchChange *btypes.BatchChange, previousSpecID int64, err error) { - ctx, endObservation := s.operations.reconcileBatchChange.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.reconcileBatchChange.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) batchChange, err = s.GetBatchChangeMatchingBatchSpec(ctx, batchSpec) diff --git a/enterprise/internal/batches/store/batch_changes.go b/enterprise/internal/batches/store/batch_changes.go index eaf0ae38972..dd1ac2b54e2 100644 --- a/enterprise/internal/batches/store/batch_changes.go +++ b/enterprise/internal/batches/store/batch_changes.go @@ -52,7 +52,7 @@ var batchChangeInsertColumns = []*sqlf.Query{ // CreateBatchChange creates the given batch change. func (s *Store) CreateBatchChange(ctx context.Context, c *btypes.BatchChange) (err error) { - ctx, endObservation := s.operations.createBatchChange.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createBatchChange.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := s.createBatchChangeQuery(c) @@ -98,7 +98,7 @@ func (s *Store) createBatchChangeQuery(c *btypes.BatchChange) *sqlf.Query { // UpdateBatchChange updates the given bach change. func (s *Store) UpdateBatchChange(ctx context.Context, c *btypes.BatchChange) (err error) { - ctx, endObservation := s.operations.updateBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(c.ID)), }}) defer endObservation(1, observation.Args{}) @@ -140,7 +140,7 @@ func (s *Store) updateBatchChangeQuery(c *btypes.BatchChange) *sqlf.Query { // DeleteBatchChange deletes the batch change with the given ID. func (s *Store) DeleteBatchChange(ctx context.Context, id int64) (err error) { - ctx, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(id)), }}) defer endObservation(1, observation.Args{}) @@ -170,7 +170,7 @@ type CountBatchChangesOpts struct { // CountBatchChanges returns the number of batch changes in the database. func (s *Store) CountBatchChanges(ctx context.Context, opts CountBatchChangesOpts) (count int, err error) { - ctx, endObservation := s.operations.countBatchChanges.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.countBatchChanges.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) repoAuthzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB())) @@ -281,7 +281,7 @@ type GetBatchChangeOpts struct { // GetBatchChange gets a batch change matching the given options. func (s *Store) GetBatchChange(ctx context.Context, opts GetBatchChangeOpts) (bc *btypes.BatchChange, err error) { - ctx, endObservation := s.operations.getBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), }}) defer endObservation(1, observation.Args{}) @@ -353,7 +353,7 @@ type GetBatchChangeDiffStatOpts struct { } func (s *Store) GetBatchChangeDiffStat(ctx context.Context, opts GetBatchChangeDiffStatOpts) (stat *diff.Stat, err error) { - ctx, endObservation := s.operations.getBatchChangeDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getBatchChangeDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchChangeID", int(opts.BatchChangeID)), }}) defer endObservation(1, observation.Args{}) @@ -396,7 +396,7 @@ func getBatchChangeDiffStatQuery(opts GetBatchChangeDiffStatOpts, authzConds *sq } func (s *Store) GetRepoDiffStat(ctx context.Context, repoID api.RepoID) (stat *diff.Stat, err error) { - ctx, endObservation := s.operations.getRepoDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getRepoDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repoID", int(repoID)), }}) defer endObservation(1, observation.Args{}) @@ -457,7 +457,7 @@ type ListBatchChangesOpts struct { // ListBatchChanges lists batch changes with the given filters. func (s *Store) ListBatchChanges(ctx context.Context, opts ListBatchChangesOpts) (cs []*btypes.BatchChange, next int64, err error) { - ctx, endObservation := s.operations.listBatchChanges.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listBatchChanges.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) repoAuthzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB())) diff --git a/enterprise/internal/batches/store/batch_spec_execution_cache_entry.go b/enterprise/internal/batches/store/batch_spec_execution_cache_entry.go index 582002161a0..661a980d033 100644 --- a/enterprise/internal/batches/store/batch_spec_execution_cache_entry.go +++ b/enterprise/internal/batches/store/batch_spec_execution_cache_entry.go @@ -39,7 +39,7 @@ var BatchSpecExecutionCacheEntryColums = SQLColumns{ // CreateBatchSpecExecutionCacheEntry creates the given batch spec workspace jobs. func (s *Store) CreateBatchSpecExecutionCacheEntry(ctx context.Context, ce *btypes.BatchSpecExecutionCacheEntry) (err error) { - ctx, endObservation := s.operations.createBatchSpecExecutionCacheEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.createBatchSpecExecutionCacheEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("Key", ce.Key), }}) defer endObservation(1, observation.Args{}) @@ -100,7 +100,7 @@ type ListBatchSpecExecutionCacheEntriesOpts struct { // ListBatchSpecExecutionCacheEntries gets the BatchSpecExecutionCacheEntries matching the given options. func (s *Store) ListBatchSpecExecutionCacheEntries(ctx context.Context, opts ListBatchSpecExecutionCacheEntriesOpts) (cs []*btypes.BatchSpecExecutionCacheEntry, err error) { - ctx, endObservation := s.operations.listBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.listBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("Count", len(opts.Keys)), }}) defer endObservation(1, observation.Args{}) @@ -160,7 +160,7 @@ WHERE // MarkUsedBatchSpecExecutionCacheEntries updates the LastUsedAt of the given cache entries. func (s *Store) MarkUsedBatchSpecExecutionCacheEntries(ctx context.Context, ids []int64) (err error) { - ctx, endObservation := s.operations.markUsedBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markUsedBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("count", len(ids)), }}) defer endObservation(1, observation.Args{}) @@ -212,7 +212,7 @@ DELETE FROM batch_spec_execution_cache_entries WHERE id IN (SELECT id FROM ids) ` func (s *Store) CleanBatchSpecExecutionCacheEntries(ctx context.Context, maxCacheSize int64) (err error) { - ctx, endObservation := s.operations.cleanBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.cleanBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("MaxTableSize", int(maxCacheSize)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/batches/store/batch_spec_resolution_jobs.go b/enterprise/internal/batches/store/batch_spec_resolution_jobs.go index d92deb1aa8e..14564d110a8 100644 --- a/enterprise/internal/batches/store/batch_spec_resolution_jobs.go +++ b/enterprise/internal/batches/store/batch_spec_resolution_jobs.go @@ -65,7 +65,7 @@ func (e ErrResolutionJobAlreadyExists) Error() string { // CreateBatchSpecResolutionJob creates the given batch spec resolutionjob jobs. func (s *Store) CreateBatchSpecResolutionJob(ctx context.Context, wj *btypes.BatchSpecResolutionJob) (err error) { - ctx, endObservation := s.operations.createBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) + ctx, _, endObservation := s.operations.createBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) defer endObservation(1, observation.Args{}) q := s.createBatchSpecResolutionJobQuery(wj) @@ -120,7 +120,7 @@ type GetBatchSpecResolutionJobOpts struct { // GetBatchSpecResolutionJob gets a BatchSpecResolutionJob matching the given options. func (s *Store) GetBatchSpecResolutionJob(ctx context.Context, opts GetBatchSpecResolutionJobOpts) (job *btypes.BatchSpecResolutionJob, err error) { - ctx, endObservation := s.operations.getBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), log.Int("BatchSpecID", int(opts.BatchSpecID)), }}) @@ -176,7 +176,7 @@ type ListBatchSpecResolutionJobsOpts struct { // ListBatchSpecResolutionJobs lists batch changes with the given filters. func (s *Store) ListBatchSpecResolutionJobs(ctx context.Context, opts ListBatchSpecResolutionJobsOpts) (cs []*btypes.BatchSpecResolutionJob, err error) { - ctx, endObservation := s.operations.listBatchSpecResolutionJobs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listBatchSpecResolutionJobs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listBatchSpecResolutionJobsQuery(opts) diff --git a/enterprise/internal/batches/store/batch_spec_workspace_execution_jobs.go b/enterprise/internal/batches/store/batch_spec_workspace_execution_jobs.go index 47563781999..83b9fc52db0 100644 --- a/enterprise/internal/batches/store/batch_spec_workspace_execution_jobs.go +++ b/enterprise/internal/batches/store/batch_spec_workspace_execution_jobs.go @@ -100,7 +100,7 @@ const executableWorkspaceJobsConditionFmtstr = ` // CreateBatchSpecWorkspaceExecutionJobs creates the given batch spec workspace jobs. func (s *Store) CreateBatchSpecWorkspaceExecutionJobs(ctx context.Context, batchSpecID int64) (err error) { - ctx, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchSpecID", int(batchSpecID)), }}) defer endObservation(1, observation.Args{}) @@ -124,7 +124,7 @@ WHERE // CreateBatchSpecWorkspaceExecutionJobsForWorkspaces creates the batch spec workspace jobs for the given workspaces. func (s *Store) CreateBatchSpecWorkspaceExecutionJobsForWorkspaces(ctx context.Context, workspaceIDs []int64) (err error) { - ctx, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobsForWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) + ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobsForWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) defer endObservation(1, observation.Args{}) q := sqlf.Sprintf(createBatchSpecWorkspaceExecutionJobsForWorkspacesQueryFmtstr, pq.Array(workspaceIDs)) @@ -142,7 +142,7 @@ RETURNING id // DeleteBatchSpecWorkspaceExecutionJobs func (s *Store) DeleteBatchSpecWorkspaceExecutionJobs(ctx context.Context, ids []int64) (err error) { - ctx, endObservation := s.operations.deleteBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) + ctx, _, endObservation := s.operations.deleteBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) defer endObservation(1, observation.Args{}) q := sqlf.Sprintf(deleteBatchSpecWorkspaceExecutionJobsQueryFmtstr, pq.Array(ids)) @@ -164,7 +164,7 @@ type GetBatchSpecWorkspaceExecutionJobOpts struct { // GetBatchSpecWorkspaceExecutionJob gets a BatchSpecWorkspaceExecutionJob matching the given options. func (s *Store) GetBatchSpecWorkspaceExecutionJob(ctx context.Context, opts GetBatchSpecWorkspaceExecutionJobOpts) (job *btypes.BatchSpecWorkspaceExecutionJob, err error) { - ctx, endObservation := s.operations.getBatchSpecWorkspaceExecutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getBatchSpecWorkspaceExecutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), }}) defer endObservation(1, observation.Args{}) @@ -232,7 +232,7 @@ type ListBatchSpecWorkspaceExecutionJobsOpts struct { // ListBatchSpecWorkspaceExecutionJobs lists batch changes with the given filters. func (s *Store) ListBatchSpecWorkspaceExecutionJobs(ctx context.Context, opts ListBatchSpecWorkspaceExecutionJobsOpts) (cs []*btypes.BatchSpecWorkspaceExecutionJob, err error) { - ctx, endObservation := s.operations.listBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listBatchSpecWorkspaceExecutionJobsQuery(opts) @@ -321,7 +321,7 @@ type CancelBatchSpecWorkspaceExecutionJobsOpts struct { // The returned list of records may not match the list of the given IDs, if // some of the records were already canceled, completed, failed, errored, etc. func (s *Store) CancelBatchSpecWorkspaceExecutionJobs(ctx context.Context, opts CancelBatchSpecWorkspaceExecutionJobsOpts) (jobs []*btypes.BatchSpecWorkspaceExecutionJob, err error) { - ctx, endObservation := s.operations.cancelBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) + ctx, _, endObservation := s.operations.cancelBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) defer endObservation(1, observation.Args{}) if opts.BatchSpecID == 0 && len(opts.IDs) == 0 { @@ -412,7 +412,7 @@ func (s *Store) cancelBatchSpecWorkspaceExecutionJobQuery(opts CancelBatchSpecWo // SetBatchSpecWorkspaceExecutionJobAccessToken sets the access_token_id column to the given ID. func (s *Store) SetBatchSpecWorkspaceExecutionJobAccessToken(ctx context.Context, jobID, tokenID int64) (err error) { - ctx, endObservation := s.operations.setBatchSpecWorkspaceExecutionJobAccessToken.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.setBatchSpecWorkspaceExecutionJobAccessToken.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(jobID)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/batches/store/batch_spec_workspaces.go b/enterprise/internal/batches/store/batch_spec_workspaces.go index 647f96230bf..11c79fa1c68 100644 --- a/enterprise/internal/batches/store/batch_spec_workspaces.go +++ b/enterprise/internal/batches/store/batch_spec_workspaces.go @@ -68,7 +68,7 @@ var BatchSpecWorkspaceColums = SQLColumns{ // CreateBatchSpecWorkspace creates the given batch spec workspace jobs. func (s *Store) CreateBatchSpecWorkspace(ctx context.Context, ws ...*btypes.BatchSpecWorkspace) (err error) { - ctx, endObservation := s.operations.createBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.createBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("count", len(ws)), }}) defer endObservation(1, observation.Args{}) @@ -150,7 +150,7 @@ type GetBatchSpecWorkspaceOpts struct { // GetBatchSpecWorkspace gets a BatchSpecWorkspace matching the given options. func (s *Store) GetBatchSpecWorkspace(ctx context.Context, opts GetBatchSpecWorkspaceOpts) (job *btypes.BatchSpecWorkspace, err error) { - ctx, endObservation := s.operations.getBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), }}) defer endObservation(1, observation.Args{}) @@ -273,7 +273,7 @@ func (opts ListBatchSpecWorkspacesOpts) SQLConds(ctx context.Context, db databas // ListBatchSpecWorkspaces lists batch spec workspaces with the given filters. func (s *Store) ListBatchSpecWorkspaces(ctx context.Context, opts ListBatchSpecWorkspacesOpts) (cs []*btypes.BatchSpecWorkspace, next int64, err error) { - ctx, endObservation := s.operations.listBatchSpecWorkspaces.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listBatchSpecWorkspaces.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q, err := listBatchSpecWorkspacesQuery(ctx, s.DatabaseDB(), opts) @@ -323,7 +323,7 @@ func listBatchSpecWorkspacesQuery(ctx context.Context, db database.DB, opts List // CountBatchSpecWorkspaces counts batch spec workspaces with the given filters. func (s *Store) CountBatchSpecWorkspaces(ctx context.Context, opts ListBatchSpecWorkspacesOpts) (count int64, err error) { - ctx, endObservation := s.operations.countBatchSpecWorkspaces.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.countBatchSpecWorkspaces.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q, err := countBatchSpecWorkspacesQuery(ctx, s.DatabaseDB(), opts) @@ -375,7 +375,7 @@ AND NOT %s // MarkSkippedBatchSpecWorkspaces marks the workspace that were skipped in // CreateBatchSpecWorkspaceExecutionJobs as skipped. func (s *Store) MarkSkippedBatchSpecWorkspaces(ctx context.Context, batchSpecID int64) (err error) { - ctx, endObservation := s.operations.markSkippedBatchSpecWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markSkippedBatchSpecWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchSpecID", int(batchSpecID)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/batches/store/batch_specs.go b/enterprise/internal/batches/store/batch_specs.go index 9b8c0a5d0f8..1b6a7fe961a 100644 --- a/enterprise/internal/batches/store/batch_specs.go +++ b/enterprise/internal/batches/store/batch_specs.go @@ -54,7 +54,7 @@ const batchSpecInsertColsFmt = `(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s) // CreateBatchSpec creates the given BatchSpec. func (s *Store) CreateBatchSpec(ctx context.Context, c *btypes.BatchSpec) (err error) { - ctx, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q, err := s.createBatchSpecQuery(c) @@ -111,7 +111,7 @@ func (s *Store) createBatchSpecQuery(c *btypes.BatchSpec) (*sqlf.Query, error) { // UpdateBatchSpec updates the given BatchSpec. func (s *Store) UpdateBatchSpec(ctx context.Context, c *btypes.BatchSpec) (err error) { - ctx, endObservation := s.operations.updateBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(c.ID)), }}) defer endObservation(1, observation.Args{}) @@ -163,7 +163,7 @@ func (s *Store) updateBatchSpecQuery(c *btypes.BatchSpec) (*sqlf.Query, error) { // DeleteBatchSpec deletes the BatchSpec with the given ID. func (s *Store) DeleteBatchSpec(ctx context.Context, id int64) (err error) { - ctx, endObservation := s.operations.deleteBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(id)), }}) defer endObservation(1, observation.Args{}) @@ -186,7 +186,7 @@ type CountBatchSpecsOpts struct { // CountBatchSpecs returns the number of code mods in the database. func (s *Store) CountBatchSpecs(ctx context.Context, opts CountBatchSpecsOpts) (count int, err error) { - ctx, endObservation := s.operations.countBatchSpecs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.countBatchSpecs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := countBatchSpecsQuery(opts) @@ -243,7 +243,7 @@ type GetBatchSpecOpts struct { // GetBatchSpec gets a BatchSpec matching the given options. func (s *Store) GetBatchSpec(ctx context.Context, opts GetBatchSpecOpts) (spec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.getBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), log.String("randID", opts.RandID), }}) @@ -311,7 +311,7 @@ type GetNewestBatchSpecOpts struct { // GetNewestBatchSpec returns the newest batch spec that matches the given // options. func (s *Store) GetNewestBatchSpec(ctx context.Context, opts GetNewestBatchSpecOpts) (spec *btypes.BatchSpec, err error) { - ctx, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := getNewestBatchSpecQuery(&opts) @@ -379,7 +379,7 @@ type ListBatchSpecsOpts struct { // ListBatchSpecs lists BatchSpecs with the given filters. func (s *Store) ListBatchSpecs(ctx context.Context, opts ListBatchSpecsOpts) (cs []*btypes.BatchSpec, next int64, err error) { - ctx, endObservation := s.operations.listBatchSpecs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listBatchSpecs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listBatchSpecsQuery(&opts) @@ -459,7 +459,7 @@ ON // - We could: Add execution_started_at to the batch_specs table and delete // all that are older than TIME_PERIOD and never started executing. func (s *Store) DeleteExpiredBatchSpecs(ctx context.Context) (err error) { - ctx, endObservation := s.operations.deleteExpiredBatchSpecs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.deleteExpiredBatchSpecs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) expirationTime := s.now().Add(-btypes.BatchSpecTTL) diff --git a/enterprise/internal/batches/store/bulk_operations.go b/enterprise/internal/batches/store/bulk_operations.go index bb10fbfc232..560ef953153 100644 --- a/enterprise/internal/batches/store/bulk_operations.go +++ b/enterprise/internal/batches/store/bulk_operations.go @@ -52,7 +52,7 @@ type GetBulkOperationOpts struct { // GetBulkOperation gets a BulkOperation matching the given options. func (s *Store) GetBulkOperation(ctx context.Context, opts GetBulkOperationOpts) (op *btypes.BulkOperation, err error) { - ctx, endObservation := s.operations.getBulkOperation.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getBulkOperation.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("ID", opts.ID), }}) defer endObservation(1, observation.Args{}) @@ -112,7 +112,7 @@ type ListBulkOperationsOpts struct { // ListBulkOperations gets a list of BulkOperations matching the given options. func (s *Store) ListBulkOperations(ctx context.Context, opts ListBulkOperationsOpts) (bs []*btypes.BulkOperation, next int64, err error) { - ctx, endObservation := s.operations.listBulkOperations.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listBulkOperations.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listBulkOperationsQuery(&opts) @@ -181,7 +181,7 @@ type CountBulkOperationsOpts struct { // CountBulkOperations gets the count of BulkOperations in the given batch change. func (s *Store) CountBulkOperations(ctx context.Context, opts CountBulkOperationsOpts) (count int, err error) { - ctx, endObservation := s.operations.countBulkOperations.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.countBulkOperations.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchChangeID", int(opts.BatchChangeID)), }}) defer endObservation(1, observation.Args{}) @@ -224,7 +224,7 @@ type ListBulkOperationErrorsOpts struct { // ListBulkOperationErrors gets a list of BulkOperationErrors in a given BulkOperation. func (s *Store) ListBulkOperationErrors(ctx context.Context, opts ListBulkOperationErrorsOpts) (es []*btypes.BulkOperationError, err error) { - ctx, endObservation := s.operations.listBulkOperationErrors.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.listBulkOperationErrors.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("bulkOperationID", opts.BulkOperationID), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/batches/store/changeset_events.go b/enterprise/internal/batches/store/changeset_events.go index 5731f96e7e7..4938dfc9c24 100644 --- a/enterprise/internal/batches/store/changeset_events.go +++ b/enterprise/internal/batches/store/changeset_events.go @@ -25,7 +25,7 @@ type GetChangesetEventOpts struct { // GetChangesetEvent gets a changeset matching the given options. func (s *Store) GetChangesetEvent(ctx context.Context, opts GetChangesetEventOpts) (ev *btypes.ChangesetEvent, err error) { - ctx, endObservation := s.operations.getChangesetEvent.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getChangesetEvent.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), log.Int("changesetID", int(opts.ChangesetID)), }}) @@ -95,7 +95,7 @@ type ListChangesetEventsOpts struct { // ListChangesetEvents lists ChangesetEvents with the given filters. func (s *Store) ListChangesetEvents(ctx context.Context, opts ListChangesetEventsOpts) (cs []*btypes.ChangesetEvent, next int64, err error) { - ctx, endObservation := s.operations.listChangesetEvents.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listChangesetEvents.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listChangesetEventsQuery(&opts) @@ -161,7 +161,7 @@ type CountChangesetEventsOpts struct { // CountChangesetEvents returns the number of changeset events in the database. func (s *Store) CountChangesetEvents(ctx context.Context, opts CountChangesetEventsOpts) (count int, err error) { - ctx, endObservation := s.operations.countChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.countChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("changesetID", int(opts.ChangesetID)), }}) defer endObservation(1, observation.Args{}) @@ -191,7 +191,7 @@ func countChangesetEventsQuery(opts *CountChangesetEventsOpts) *sqlf.Query { // UpsertChangesetEvents creates or updates the given ChangesetEvents. func (s *Store) UpsertChangesetEvents(ctx context.Context, cs ...*btypes.ChangesetEvent) (err error) { - ctx, endObservation := s.operations.upsertChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.upsertChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("count", len(cs)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/batches/store/changeset_jobs.go b/enterprise/internal/batches/store/changeset_jobs.go index 28ce5871779..292087fa09a 100644 --- a/enterprise/internal/batches/store/changeset_jobs.go +++ b/enterprise/internal/batches/store/changeset_jobs.go @@ -58,7 +58,7 @@ var changesetJobColumns = SQLColumns{ // CreateChangesetJob creates the given changeset jobs. func (s *Store) CreateChangesetJob(ctx context.Context, cs ...*btypes.ChangesetJob) (err error) { - ctx, endObservation := s.operations.createChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.createChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("count", len(cs)), }}) defer endObservation(1, observation.Args{}) @@ -126,7 +126,7 @@ type GetChangesetJobOpts struct { // GetChangesetJob gets a ChangesetJob matching the given options. func (s *Store) GetChangesetJob(ctx context.Context, opts GetChangesetJobOpts) (job *btypes.ChangesetJob, err error) { - ctx, endObservation := s.operations.getChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/batches/store/changeset_specs.go b/enterprise/internal/batches/store/changeset_specs.go index 62c4bd946fb..f37c584b95e 100644 --- a/enterprise/internal/batches/store/changeset_specs.go +++ b/enterprise/internal/batches/store/changeset_specs.go @@ -62,7 +62,7 @@ var changesetSpecColumns = SQLColumns{ // CreateChangesetSpec creates the given ChangesetSpecs. func (s *Store) CreateChangesetSpec(ctx context.Context, cs ...*btypes.ChangesetSpec) (err error) { - ctx, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("Count", len(cs)), }}) defer endObservation(1, observation.Args{}) @@ -143,7 +143,7 @@ func (s *Store) CreateChangesetSpec(ctx context.Context, cs ...*btypes.Changeset // UpdateChangesetSpecBatchSpecID updates the given ChangesetSpecs to be owned by the given batch spec. func (s *Store) UpdateChangesetSpecBatchSpecID(ctx context.Context, cs []int64, batchSpec int64) (err error) { - ctx, endObservation := s.operations.updateChangesetSpecBatchSpecID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateChangesetSpecBatchSpecID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("Count", len(cs)), }}) defer endObservation(1, observation.Args{}) @@ -170,7 +170,7 @@ func (s *Store) updateChangesetSpecQuery(cs []int64, batchSpec int64) *sqlf.Quer // DeleteChangesetSpec deletes the ChangesetSpec with the given ID. func (s *Store) DeleteChangesetSpec(ctx context.Context, id int64) (err error) { - ctx, endObservation := s.operations.deleteChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(id)), }}) defer endObservation(1, observation.Args{}) @@ -192,7 +192,7 @@ type CountChangesetSpecsOpts struct { // CountChangesetSpecs returns the number of changeset specs in the database. func (s *Store) CountChangesetSpecs(ctx context.Context, opts CountChangesetSpecsOpts) (count int, err error) { - ctx, endObservation := s.operations.countChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.countChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchSpecID", int(opts.BatchSpecID)), }}) defer endObservation(1, observation.Args{}) @@ -243,7 +243,7 @@ type GetChangesetSpecOpts struct { // GetChangesetSpec gets a changeset spec matching the given options. func (s *Store) GetChangesetSpec(ctx context.Context, opts GetChangesetSpecOpts) (spec *btypes.ChangesetSpec, err error) { - ctx, endObservation := s.operations.getChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), log.String("randID", opts.RandID), }}) @@ -317,7 +317,7 @@ type ListChangesetSpecsOpts struct { // ListChangesetSpecs lists ChangesetSpecs with the given filters. func (s *Store) ListChangesetSpecs(ctx context.Context, opts ListChangesetSpecsOpts) (cs btypes.ChangesetSpecs, next int64, err error) { - ctx, endObservation := s.operations.listChangesetSpecs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listChangesetSpecs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listChangesetSpecsQuery(&opts) @@ -408,7 +408,7 @@ ORDER BY repo_id ASC, head_ref ASC ` func (s *Store) ListChangesetSpecsWithConflictingHeadRef(ctx context.Context, batchSpecID int64) (conflicts []ChangesetSpecHeadRefConflict, err error) { - ctx, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := sqlf.Sprintf(listChangesetSpecsWithConflictingHeadQueryFmtstr, batchSpecID) @@ -431,7 +431,7 @@ func (s *Store) ListChangesetSpecsWithConflictingHeadRef(ctx context.Context, ba // within BatchSpecTTL. // TODO: Fix comment. func (s *Store) DeleteExpiredChangesetSpecs(ctx context.Context) (err error) { - ctx, endObservation := s.operations.deleteExpiredChangesetSpecs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.deleteExpiredChangesetSpecs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) changesetSpecTTLExpiration := s.now().Add(-btypes.ChangesetSpecTTL) @@ -474,7 +474,7 @@ type DeleteChangesetSpecsOpts struct { // DeleteChangesetSpecs deletes the ChangesetSpecs matching the given options. func (s *Store) DeleteChangesetSpecs(ctx context.Context, opts DeleteChangesetSpecsOpts) (err error) { - ctx, endObservation := s.operations.deleteChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchSpecID", int(opts.BatchSpecID)), }}) defer endObservation(1, observation.Args{}) @@ -598,7 +598,7 @@ type GetRewirerMappingsOpts struct { // Spec 4 should be attached to Changeset 4, since it tracks PR #333 in Repo C. (ChangesetSpec = 4, Changeset = 4) // Changeset 3 doesn't have a matching spec and should be detached from the batch change (and closed) (ChangesetSpec == 0, Changeset = 3). func (s *Store) GetRewirerMappings(ctx context.Context, opts GetRewirerMappingsOpts) (mappings btypes.RewirerMappings, err error) { - ctx, endObservation := s.operations.getRewirerMappings.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getRewirerMappings.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchSpecID", int(opts.BatchSpecID)), log.Int("batchChangeID", int(opts.BatchChangeID)), }}) diff --git a/enterprise/internal/batches/store/changesets.go b/enterprise/internal/batches/store/changesets.go index 288f0cd5114..c6aeedd24cd 100644 --- a/enterprise/internal/batches/store/changesets.go +++ b/enterprise/internal/batches/store/changesets.go @@ -208,7 +208,7 @@ func (s *Store) UpsertChangeset(ctx context.Context, c *btypes.Changeset) error // CreateChangeset creates the given Changeset. func (s *Store) CreateChangeset(ctx context.Context, c *btypes.Changeset) (err error) { - ctx, endObservation := s.operations.createChangeset.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createChangeset.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) if c.CreatedAt.IsZero() { @@ -236,7 +236,7 @@ RETURNING %s // DeleteChangeset deletes the Changeset with the given ID. func (s *Store) DeleteChangeset(ctx context.Context, id int64) (err error) { - ctx, endObservation := s.operations.deleteChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(id)), }}) defer endObservation(1, observation.Args{}) @@ -267,7 +267,7 @@ type CountChangesetsOpts struct { // CountChangesets returns the number of changesets in the database. func (s *Store) CountChangesets(ctx context.Context, opts CountChangesetsOpts) (count int, err error) { - ctx, endObservation := s.operations.countChangesets.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.countChangesets.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) authzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB())) @@ -368,7 +368,7 @@ type GetChangesetOpts struct { // GetChangeset gets a changeset matching the given options. func (s *Store) GetChangeset(ctx context.Context, opts GetChangesetOpts) (ch *btypes.Changeset, err error) { - ctx, endObservation := s.operations.getChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), }}) defer endObservation(1, observation.Args{}) @@ -441,7 +441,7 @@ type ListChangesetSyncDataOpts struct { // ListChangesetSyncData returns sync data on all non-externally-deleted changesets // that are part of at least one open batch change. func (s *Store) ListChangesetSyncData(ctx context.Context, opts ListChangesetSyncDataOpts) (sd []*btypes.ChangesetSyncData, err error) { - ctx, endObservation := s.operations.listChangesetSyncData.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listChangesetSyncData.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listChangesetSyncDataQuery(opts) @@ -528,7 +528,7 @@ type ListChangesetsOpts struct { // ListChangesets lists Changesets with the given filters. func (s *Store) ListChangesets(ctx context.Context, opts ListChangesetsOpts) (cs btypes.Changesets, next int64, err error) { - ctx, endObservation := s.operations.listChangesets.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listChangesets.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) authzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB())) @@ -644,7 +644,7 @@ func listChangesetsQuery(opts *ListChangesetsOpts, authzConds *sqlf.Query) *sqlf // `resetState` argument but *only if* the `currentState` matches its current // `reconciler_state`. func (s *Store) EnqueueChangeset(ctx context.Context, cs *btypes.Changeset, resetState, currentState btypes.ReconcilerState) (err error) { - ctx, endObservation := s.operations.enqueueChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.enqueueChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(cs.ID)), }}) defer endObservation(1, observation.Args{}) @@ -698,7 +698,7 @@ func (s *Store) enqueueChangesetQuery(cs *btypes.Changeset, resetState, currentS // UpdateChangeset updates the given Changeset. func (s *Store) UpdateChangeset(ctx context.Context, cs *btypes.Changeset) (err error) { - ctx, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(cs.ID)), }}) defer endObservation(1, observation.Args{}) @@ -727,7 +727,7 @@ RETURNING // UpdateChangesetBatchChanges updates only the `batch_changes` & `updated_at` // columns of the given Changeset. func (s *Store) UpdateChangesetBatchChanges(ctx context.Context, cs *btypes.Changeset) (err error) { - ctx, endObservation := s.operations.updateChangesetBatchChanges.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateChangesetBatchChanges.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(cs.ID)), }}) defer endObservation(1, observation.Args{}) @@ -743,7 +743,7 @@ func (s *Store) UpdateChangesetBatchChanges(ctx context.Context, cs *btypes.Chan // UpdateChangesetUiPublicationState updates only the `ui_publication_state` & // `updated_at` columns of the given Changeset. func (s *Store) UpdateChangesetUiPublicationState(ctx context.Context, cs *btypes.Changeset) (err error) { - ctx, endObservation := s.operations.updateChangesetUIPublicationState.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateChangesetUIPublicationState.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(cs.ID)), }}) defer endObservation(1, observation.Args{}) @@ -785,7 +785,7 @@ RETURNING // that relate to the state of the changeset on the code host, e.g. // external_branch, external_state, etc. func (s *Store) UpdateChangesetCodeHostState(ctx context.Context, cs *btypes.Changeset) (err error) { - ctx, endObservation := s.operations.updateChangesetCodeHostState.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateChangesetCodeHostState.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(cs.ID)), }}) defer endObservation(1, observation.Args{}) @@ -853,7 +853,7 @@ RETURNING // a slice of head refs. We need this in order to match incoming webhooks to pull requests as // the only information they provide is the remote branch func (s *Store) GetChangesetExternalIDs(ctx context.Context, spec api.ExternalRepoSpec, refs []string) (externalIDs []string, err error) { - ctx, endObservation := s.operations.getChangesetExternalIDs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.getChangesetExternalIDs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) queryFmtString := ` @@ -891,7 +891,7 @@ var CanceledChangesetFailureMessage = "Canceled" // currently processing changesets have finished executing. func (s *Store) CancelQueuedBatchChangeChangesets(ctx context.Context, batchChangeID int64) (err error) { var iterations int - ctx, endObservation := s.operations.cancelQueuedBatchChangeChangesets.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.cancelQueuedBatchChangeChangesets.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchChangeID", int(batchChangeID)), }}) defer endObservation(1, observation.Args{LogFields: []log.Field{log.Int("iterations", iterations)}}) @@ -968,7 +968,7 @@ WHERE // passed. func (s *Store) EnqueueChangesetsToClose(ctx context.Context, batchChangeID int64) (err error) { var iterations int - ctx, endObservation := s.operations.enqueueChangesetsToClose.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.enqueueChangesetsToClose.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchChangeID", int(batchChangeID)), }}) defer func() { @@ -1198,7 +1198,7 @@ func scanChangeset(t *btypes.Changeset, s dbutil.Scanner) error { // GetChangesetsStats returns statistics on all the changesets associated to the given batch change, // or all changesets across the instance. func (s *Store) GetChangesetsStats(ctx context.Context, batchChangeID int64) (stats btypes.ChangesetsStats, err error) { - ctx, endObservation := s.operations.getChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("batchChangeID", int(batchChangeID)), }}) defer endObservation(1, observation.Args{}) @@ -1252,7 +1252,7 @@ WHERE // GetRepoChangesetsStats returns statistics on all the changesets associated to the given repo. func (s *Store) GetRepoChangesetsStats(ctx context.Context, repoID api.RepoID) (stats *btypes.RepoChangesetsStats, err error) { - ctx, endObservation := s.operations.getRepoChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getRepoChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repoID", int(repoID)), }}) defer endObservation(1, observation.Args{}) @@ -1284,7 +1284,7 @@ func (s *Store) GetRepoChangesetsStats(ctx context.Context, repoID api.RepoID) ( } func (s *Store) EnqueueNextScheduledChangeset(ctx context.Context) (ch *btypes.Changeset, err error) { - ctx, endObservation := s.operations.enqueueNextScheduledChangeset.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.enqueueNextScheduledChangeset.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := sqlf.Sprintf( @@ -1326,7 +1326,7 @@ RETURNING %s ` func (s *Store) GetChangesetPlaceInSchedulerQueue(ctx context.Context, id int64) (place int, err error) { - ctx, endObservation := s.operations.getChangesetPlaceInSchedulerQueue.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getChangesetPlaceInSchedulerQueue.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(id)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/batches/store/codehost.go b/enterprise/internal/batches/store/codehost.go index 1e60b30db71..21b77c243ca 100644 --- a/enterprise/internal/batches/store/codehost.go +++ b/enterprise/internal/batches/store/codehost.go @@ -18,7 +18,7 @@ type ListCodeHostsOpts struct { } func (s *Store) ListCodeHosts(ctx context.Context, opts ListCodeHostsOpts) (cs []*btypes.CodeHost, err error) { - ctx, endObservation := s.operations.listCodeHosts.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listCodeHosts.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listCodeHostsQuery(opts) @@ -156,7 +156,7 @@ type GetExternalServiceIDsOpts struct { } func (s *Store) GetExternalServiceIDs(ctx context.Context, opts GetExternalServiceIDsOpts) (ids []int64, err error) { - ctx, endObservation := s.operations.getExternalServiceIDs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.getExternalServiceIDs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := getExternalServiceIDsQuery(opts) diff --git a/enterprise/internal/batches/store/site_credentials.go b/enterprise/internal/batches/store/site_credentials.go index bfc0b18a4c2..9fe2b561472 100644 --- a/enterprise/internal/batches/store/site_credentials.go +++ b/enterprise/internal/batches/store/site_credentials.go @@ -13,7 +13,7 @@ import ( ) func (s *Store) CreateSiteCredential(ctx context.Context, c *btypes.SiteCredential, credential auth.Authenticator) (err error) { - ctx, endObservation := s.operations.createSiteCredential.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createSiteCredential.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) if c.CreatedAt.IsZero() { @@ -65,7 +65,7 @@ func createSiteCredentialQuery(c *btypes.SiteCredential) *sqlf.Query { } func (s *Store) DeleteSiteCredential(ctx context.Context, id int64) (err error) { - ctx, endObservation := s.operations.deleteSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(id)), }}) defer endObservation(1, observation.Args{}) @@ -106,7 +106,7 @@ type GetSiteCredentialOpts struct { } func (s *Store) GetSiteCredential(ctx context.Context, opts GetSiteCredentialOpts) (sc *btypes.SiteCredential, err error) { - ctx, endObservation := s.operations.getSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(opts.ID)), }}) defer endObservation(1, observation.Args{}) @@ -168,7 +168,7 @@ type ListSiteCredentialsOpts struct { } func (s *Store) ListSiteCredentials(ctx context.Context, opts ListSiteCredentialsOpts) (cs []*btypes.SiteCredential, next int64, err error) { - ctx, endObservation := s.operations.listSiteCredentials.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.listSiteCredentials.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) q := listSiteCredentialsQuery(opts) @@ -231,7 +231,7 @@ func listSiteCredentialsQuery(opts ListSiteCredentialsOpts) *sqlf.Query { } func (s *Store) UpdateSiteCredential(ctx context.Context, c *btypes.SiteCredential) (err error) { - ctx, endObservation := s.operations.updateSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("ID", int(c.ID)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/autoindex/enqueuer/enqueuer.go b/enterprise/internal/codeintel/autoindex/enqueuer/enqueuer.go index ca4eaced13d..69c58777bd3 100644 --- a/enterprise/internal/codeintel/autoindex/enqueuer/enqueuer.go +++ b/enterprise/internal/codeintel/autoindex/enqueuer/enqueuer.go @@ -47,7 +47,7 @@ func NewIndexEnqueuer( // InferIndexConfiguration looks at the repository contents at the lastest commit on the default branch of the given // repository and determines an index configuration that is likely to succeed. func (s *IndexEnqueuer) InferIndexConfiguration(ctx context.Context, repositoryID int, commit string) (_ *config.IndexConfiguration, hints []config.IndexJobHint, err error) { - ctx, trace, endObservation := s.operations.InferIndexConfiguration.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := s.operations.InferIndexConfiguration.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }, @@ -102,7 +102,7 @@ func (s *IndexEnqueuer) InferIndexConfiguration(ctx context.Context, repositoryI // will cause this method to no-op. Note that this is NOT a guarantee that there will never be any duplicate records // when the flag is false. func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force bool) (_ []store.Index, err error) { - ctx, trace, endObservation := s.operations.QueueIndex.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := s.operations.QueueIndex.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }, @@ -122,7 +122,7 @@ func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev, // QueueIndexesForPackage enqueues index jobs for a dependency of a recently-processed precise code // intelligence index. func (s *IndexEnqueuer) QueueIndexesForPackage(ctx context.Context, pkg precise.Package) (err error) { - ctx, trace, endObservation := s.operations.QueueIndexForPackage.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := s.operations.QueueIndexForPackage.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.String("scheme", pkg.Scheme), log.String("name", pkg.Name), diff --git a/enterprise/internal/codeintel/gitserver/client.go b/enterprise/internal/codeintel/gitserver/client.go index 9910fff813a..7a02a8dd30c 100644 --- a/enterprise/internal/codeintel/gitserver/client.go +++ b/enterprise/internal/codeintel/gitserver/client.go @@ -35,7 +35,7 @@ func New(db database.DB, dbStore DBStore, observationContext *observation.Contex // CommitExists determines if the given commit exists in the given repository. func (c *Client) CommitExists(ctx context.Context, repositoryID int, commit string) (_ bool, err error) { - ctx, endObservation := c.operations.commitExists.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.commitExists.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), }}) @@ -56,7 +56,7 @@ type RepositoryCommit struct { // CommitsExist determines if the given commits exists in the given repositories. This method returns a // slice of the same size as the input slice, true indicating that the commit at the symmetric index exists. func (c *Client) CommitsExist(ctx context.Context, commits []RepositoryCommit) (_ []bool, err error) { - ctx, endObservation := c.operations.commitsExist.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.commitsExist.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numCommits", len(commits)), }}) defer endObservation(1, observation.Args{}) @@ -126,7 +126,7 @@ func (c *Client) CommitsExist(ctx context.Context, commits []RepositoryCommit) ( // for the given repository (which occurs with empty repositories), a false-valued flag is returned along with // a nil error and empty revision. func (c *Client) Head(ctx context.Context, repositoryID int) (_ string, revisionExists bool, err error) { - ctx, endObservation := c.operations.head.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.head.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -142,7 +142,7 @@ func (c *Client) Head(ctx context.Context, repositoryID int) (_ string, revision // CommitDate returns the time that the given commit was committed. If the given revision does not exist, // a false-valued flag is returned along with a nil error and zero-valued time. func (c *Client) CommitDate(ctx context.Context, repositoryID int, commit string) (_ string, _ time.Time, revisionExists bool, err error) { - ctx, endObservation := c.operations.commitDate.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.commitDate.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), }}) @@ -176,7 +176,7 @@ func (c *Client) CommitDate(ctx context.Context, repositoryID int, commit string } func (c *Client) RepoInfo(ctx context.Context, repos ...api.RepoName) (_ map[api.RepoName]*protocol.RepoInfo, err error) { - ctx, endObservation := c.operations.repoInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.repoInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numRepos", len(repos)), }}) defer endObservation(1, observation.Args{}) @@ -193,7 +193,7 @@ func (c *Client) RepoInfo(ctx context.Context, repos ...api.RepoName) (_ map[api // to its parents. If a commit is supplied, the returned graph will be rooted at the given // commit. If a non-zero limit is supplied, at most that many commits will be returned. func (c *Client) CommitGraph(ctx context.Context, repositoryID int, opts gitserver.CommitGraphOptions) (_ *gitdomain.CommitGraph, err error) { - ctx, endObservation := c.operations.commitGraph.With(ctx, &err, observation.Args{ + ctx, _, endObservation := c.operations.commitGraph.With(ctx, &err, observation.Args{ LogFields: append([]log.Field{log.Int("repositoryID", repositoryID)}, opts.LogFields()...), }) defer endObservation(1, observation.Args{}) @@ -228,7 +228,7 @@ func (c *Client) CommitGraph(ctx context.Context, repositoryID int, opts gitserv // branch and tag of the given repository. If any git objects are provided, it will // only populate entries for descriptions pointing at the given git objects. func (c *Client) RefDescriptions(ctx context.Context, repositoryID int, pointedAt ...string) (_ map[string][]gitdomain.RefDescription, err error) { - ctx, endObservation := c.operations.refDescriptions.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.refDescriptions.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -246,7 +246,7 @@ func (c *Client) RefDescriptions(ctx context.Context, repositoryID int, pointedA // as: all commits on {branchName} not also on the tip of the default branch. If the supplied branch name is the // default branch, then this method instead returns all commits reachable from HEAD. func (c *Client) CommitsUniqueToBranch(ctx context.Context, repositoryID int, branchName string, isDefaultBranch bool, maxAge *time.Time) (_ map[string]time.Time, err error) { - ctx, endObservation := c.operations.commitsUniqueToBranch.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.commitsUniqueToBranch.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("branchName", branchName), log.Bool("isDefaultBranch", isDefaultBranch), @@ -306,7 +306,7 @@ func (c *Client) DefaultBranchContains(ctx context.Context, repositoryID int, co // RawContents returns the contents of a file in a particular commit of a repository. func (c *Client) RawContents(ctx context.Context, repositoryID int, commit, file string) (_ []byte, err error) { - ctx, endObservation := c.operations.rawContents.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.rawContents.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), log.String("file", file), @@ -344,7 +344,7 @@ func (c *Client) RawContents(ctx context.Context, repositoryID int, commit, file // of git ls-tree. The keys of the resulting map are the input (unsanitized) dirnames, and the value of // that key are the files nested under that directory. func (c *Client) DirectoryChildren(ctx context.Context, repositoryID int, commit string, dirnames []string) (_ map[string][]string, err error) { - ctx, endObservation := c.operations.directoryChildren.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.directoryChildren.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), }}) @@ -378,7 +378,7 @@ func (c *Client) DirectoryChildren(ctx context.Context, repositoryID int, commit // FileExists determines whether a file exists in a particular commit of a repository. func (c *Client) FileExists(ctx context.Context, repositoryID int, commit, file string) (_ bool, err error) { - ctx, endObservation := c.operations.fileExists.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.fileExists.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), log.String("file", file), @@ -409,7 +409,7 @@ func (c *Client) FileExists(ctx context.Context, repositoryID int, commit, file // ListFiles returns a list of root-relative file paths matching the given pattern in a particular // commit of a repository. func (c *Client) ListFiles(ctx context.Context, repositoryID int, commit string, pattern *regexp.Regexp) (_ []string, err error) { - ctx, endObservation := c.operations.listFiles.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.listFiles.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), log.String("pattern", pattern.String()), @@ -444,7 +444,7 @@ func (c *Client) ListFiles(ctx context.Context, repositoryID int, commit string, // ResolveRevision returns the absolute commit for a commit-ish spec. func (c *Client) ResolveRevision(ctx context.Context, repositoryID int, versionString string) (commitID api.CommitID, err error) { - ctx, endObservation := c.operations.resolveRevision.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := c.operations.resolveRevision.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("versionString", versionString), }}) diff --git a/enterprise/internal/codeintel/repoupdater/client.go b/enterprise/internal/codeintel/repoupdater/client.go index 39f8c9b22ed..1155df40a6b 100644 --- a/enterprise/internal/codeintel/repoupdater/client.go +++ b/enterprise/internal/codeintel/repoupdater/client.go @@ -22,7 +22,7 @@ func New(observationContext *observation.Context) *Client { } func (c *Client) RepoLookup(ctx context.Context, name api.RepoName) (repo *protocol.RepoInfo, err error) { - ctx, endObservation := c.operations.repoLookup.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) + ctx, _, endObservation := c.operations.repoLookup.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) defer func() { var logFields []log.Field if repo != nil { @@ -40,7 +40,7 @@ func (c *Client) RepoLookup(ctx context.Context, name api.RepoName) (repo *proto } func (c *Client) EnqueueRepoUpdate(ctx context.Context, name api.RepoName) (resp *protocol.RepoUpdateResponse, err error) { - ctx, endObservation := c.operations.enqueueRepoUpdate.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) + ctx, _, endObservation := c.operations.enqueueRepoUpdate.With(ctx, &err, observation.Args{LogFields: []log.Field{}}) defer func() { var logFields []log.Field if resp != nil { diff --git a/enterprise/internal/codeintel/stores/dbstore/commits.go b/enterprise/internal/codeintel/stores/dbstore/commits.go index 0bea25041a0..73729a58086 100644 --- a/enterprise/internal/codeintel/stores/dbstore/commits.go +++ b/enterprise/internal/codeintel/stores/dbstore/commits.go @@ -46,7 +46,7 @@ func scanCommitGraphView(rows *sql.Rows, queryErr error) (_ *commitgraph.CommitG // HasRepository determines if there is LSIF data for the given repository. func (s *Store) HasRepository(ctx context.Context, repositoryID int) (_ bool, err error) { - ctx, endObservation := s.operations.hasRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.hasRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -62,7 +62,7 @@ SELECT 1 FROM lsif_uploads WHERE state NOT IN ('deleted', 'deleting') AND reposi // HasCommit determines if the given commit is known for the given repository. func (s *Store) HasCommit(ctx context.Context, repositoryID int, commit string) (_ bool, err error) { - ctx, endObservation := s.operations.hasCommit.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.hasCommit.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), }}) @@ -89,7 +89,7 @@ SELECT // MarkRepositoryAsDirty marks the given repository's commit graph as out of date. func (s *Store) MarkRepositoryAsDirty(ctx context.Context, repositoryID int) (err error) { - ctx, endObservation := s.operations.markRepositoryAsDirty.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markRepositoryAsDirty.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -127,7 +127,7 @@ func scanIntPairs(rows *sql.Rows, queryErr error) (_ map[int]int, err error) { // DirtyRepositories returns a map from repository identifiers to a dirty token for each repository whose commit // graph is out of date. This token should be passed to CalculateVisibleUploads in order to unmark the repository. func (s *Store) DirtyRepositories(ctx context.Context) (_ map[int]int, err error) { - ctx, trace, endObservation := s.operations.dirtyRepositories.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.dirtyRepositories.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) repositories, err := scanIntPairs(s.Store.Query(ctx, sqlf.Sprintf(dirtyRepositoriesQuery))) @@ -152,7 +152,7 @@ SELECT ldr.repository_id, ldr.dirty_token // only repositories that would be returned by DirtyRepositories. This method returns a duration of zero if there // are no stale repositories. func (s *Store) MaxStaleAge(ctx context.Context) (_ time.Duration, err error) { - ctx, endObservation := s.operations.maxStaleAge.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.maxStaleAge.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) ageSeconds, ok, err := basestore.ScanFirstInt(s.Store.Query(ctx, sqlf.Sprintf(maxStaleAgeQuery))) @@ -180,7 +180,7 @@ SELECT EXTRACT(EPOCH FROM NOW() - ldr.updated_at)::integer AS age // CommitsVisibleToUpload returns the set of commits for which the given upload can answer code intelligence queries. // To paginate, supply the token returned from this method to the invocation for the next page. func (s *Store) CommitsVisibleToUpload(ctx context.Context, uploadID, limit int, token *string) (_ []string, nextToken *string, err error) { - ctx, endObservation := s.operations.commitsVisibleToUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.commitsVisibleToUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("uploadID", uploadID), log.Int("limit", limit), }}) @@ -235,7 +235,7 @@ LIMIT %s // CommitGraphMetadata returns whether or not the commit graph for the given repository is stale, along with the date of // the most recent commit graph refresh for the given repository. func (s *Store) CommitGraphMetadata(ctx context.Context, repositoryID int) (stale bool, updatedAt *time.Time, err error) { - ctx, endObservation := s.operations.commitGraphMetadata.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.commitGraphMetadata.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -335,7 +335,7 @@ func (s *Store) calculateVisibleUploadsInternal( dirtyToken int, now *sqlf.Query, ) (err error) { - ctx, trace, endObservation := s.operations.calculateVisibleUploads.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := s.operations.calculateVisibleUploads.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.Int("numCommitGraphKeys", len(commitGraph.Order())), @@ -492,7 +492,7 @@ WHERE repository_id = %s // caused massive table bloat on some instances. Storing into a temporary table and then inserting/updating/deleting // records into the persisted table minimizes the number of tuples we need to touch and drastically reduces table bloat. func (s *Store) writeVisibleUploads(ctx context.Context, sanitizedInput *sanitizedCommitInput) (err error) { - ctx, trace, endObservation := s.operations.writeVisibleUploads.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.writeVisibleUploads.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) if err := s.createTemporaryNearestUploadsTables(ctx); err != nil { @@ -601,7 +601,7 @@ CREATE TEMPORARY TABLE t_lsif_uploads_visible_at_tip ( // persistNearestUploads modifies the lsif_nearest_uploads table so that it has same data // as t_lsif_nearest_uploads for the given repository. func (s *Store) persistNearestUploads(ctx context.Context, repositoryID int) (err error) { - ctx, trace, endObservation := s.operations.persistNearestUploads.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.persistNearestUploads.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) rowsInserted, rowsUpdated, rowsDeleted, err := s.bulkTransfer( @@ -652,7 +652,7 @@ WHERE // persistNearestUploadsLinks modifies the lsif_nearest_uploads_links table so that it has same // data as t_lsif_nearest_uploads_links for the given repository. func (s *Store) persistNearestUploadsLinks(ctx context.Context, repositoryID int) (err error) { - ctx, trace, endObservation := s.operations.persistNearestUploadsLinks.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.persistNearestUploadsLinks.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) rowsInserted, rowsUpdated, rowsDeleted, err := s.bulkTransfer( @@ -704,7 +704,7 @@ WHERE // persistUploadsVisibleAtTip modifies the lsif_uploads_visible_at_tip table so that it has same // data as t_lsif_uploads_visible_at_tip for the given repository. func (s *Store) persistUploadsVisibleAtTip(ctx context.Context, repositoryID int) (err error) { - ctx, trace, endObservation := s.operations.persistUploadsVisibleAtTip.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.persistUploadsVisibleAtTip.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) rowsInserted, rowsUpdated, rowsDeleted, err := s.bulkTransfer( diff --git a/enterprise/internal/codeintel/stores/dbstore/configuration.go b/enterprise/internal/codeintel/stores/dbstore/configuration.go index 5d8bc74e60b..f1ce17ca820 100644 --- a/enterprise/internal/codeintel/stores/dbstore/configuration.go +++ b/enterprise/internal/codeintel/stores/dbstore/configuration.go @@ -54,7 +54,7 @@ func scanFirstIndexConfiguration(rows *sql.Rows, err error) (IndexConfiguration, // GetIndexConfigurationByRepositoryID returns the index configuration for a repository. func (s *Store) GetIndexConfigurationByRepositoryID(ctx context.Context, repositoryID int) (_ IndexConfiguration, _ bool, err error) { - ctx, endObservation := s.operations.getIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -73,7 +73,7 @@ FROM lsif_index_configuration c WHERE c.repository_id = %s // UpdateIndexConfigurationByRepositoryID updates the index configuration for a repository. func (s *Store) UpdateIndexConfigurationByRepositoryID(ctx context.Context, repositoryID int, data []byte) (err error) { - ctx, endObservation := s.operations.updateIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/stores/dbstore/configuration_policies.go b/enterprise/internal/codeintel/stores/dbstore/configuration_policies.go index dc2b05736ae..68cdda39a97 100644 --- a/enterprise/internal/codeintel/stores/dbstore/configuration_policies.go +++ b/enterprise/internal/codeintel/stores/dbstore/configuration_policies.go @@ -127,7 +127,7 @@ type GetConfigurationPoliciesOptions struct { // If a repository identifier is supplied (is non-zero), then only the configuration policies that apply // to repository are returned. If repository is not supplied, then all policies may be returned. func (s *Store) GetConfigurationPolicies(ctx context.Context, opts GetConfigurationPoliciesOptions) (_ []ConfigurationPolicy, totalCount int, err error) { - ctx, trace, endObservation := s.operations.getConfigurationPolicies.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.getConfigurationPolicies.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", opts.RepositoryID), log.String("term", opts.Term), log.Bool("forDataRetention", opts.ForDataRetention), @@ -242,7 +242,7 @@ func makeConfigurationPolicySearchCondition(term string) *sqlf.Query { // GetConfigurationPolicyByID retrieves the configuration policy with the given identifier. func (s *Store) GetConfigurationPolicyByID(ctx context.Context, id int) (_ ConfigurationPolicy, _ bool, err error) { - ctx, endObservation := s.operations.getConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -281,7 +281,7 @@ WHERE p.id = %s AND (p.repository_id IS NULL OR (%s)) // CreateConfigurationPolicy creates a configuration policy with the given fields (ignoring ID). The hydrated // configuration policy record is returned. func (s *Store) CreateConfigurationPolicy(ctx context.Context, configurationPolicy ConfigurationPolicy) (_ ConfigurationPolicy, err error) { - ctx, endObservation := s.operations.createConfigurationPolicy.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createConfigurationPolicy.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) var retentionDurationHours *int @@ -361,7 +361,7 @@ var ( // UpdateConfigurationPolicy updates the fields of the configuration policy record with the given identifier. func (s *Store) UpdateConfigurationPolicy(ctx context.Context, policy ConfigurationPolicy) (err error) { - ctx, endObservation := s.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", policy.ID), }}) defer endObservation(1, observation.Args{}) @@ -460,7 +460,7 @@ WHERE id = %s // DeleteConfigurationPolicyByID deletes the configuration policy with the given identifier. func (s *Store) DeleteConfigurationPolicyByID(ctx context.Context, id int) (err error) { - ctx, endObservation := s.operations.deleteConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -497,7 +497,7 @@ SELECT protected FROM candidate // SelectPoliciesForRepositoryMembershipUpdate returns a slice of configuration policies that should be considered // for repository membership updates. Configuration policies are returned in the order of least recently updated. func (s *Store) SelectPoliciesForRepositoryMembershipUpdate(ctx context.Context, batchSize int) (configurationPolicies []ConfigurationPolicy, err error) { - ctx, trace, endObservation := s.operations.selectPoliciesForRepositoryMembershipUpdate.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.selectPoliciesForRepositoryMembershipUpdate.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) configurationPolicies, err = scanConfigurationPolicies(s.Store.Query(ctx, sqlf.Sprintf(selectPoliciesForRepositoryMembershipUpdate, batchSize, timeutil.Now()))) diff --git a/enterprise/internal/codeintel/stores/dbstore/dependency_index.go b/enterprise/internal/codeintel/stores/dbstore/dependency_index.go index 68abb44050e..f373f874fbe 100644 --- a/enterprise/internal/codeintel/stores/dbstore/dependency_index.go +++ b/enterprise/internal/codeintel/stores/dbstore/dependency_index.go @@ -177,7 +177,7 @@ func scanFirstDependencyIndexingJobRecord(rows *sql.Rows, err error) (workerutil // InsertDependencySyncingJob inserts a new dependency syncing job and returns its identifier. func (s *Store) InsertDependencySyncingJob(ctx context.Context, uploadID int) (id int, err error) { - ctx, endObservation := s.operations.insertDependencySyncingJob.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.insertDependencySyncingJob.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("id", id), @@ -195,7 +195,7 @@ RETURNING id ` func (s *Store) InsertCloneableDependencyRepo(ctx context.Context, dependency precise.Package) (new bool, err error) { - ctx, endObservation := s.operations.insertCloneableDependencyRepo.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.insertCloneableDependencyRepo.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Bool("new", new), @@ -216,7 +216,7 @@ RETURNING 1 ` func (s *Store) InsertDependencyIndexingJob(ctx context.Context, uploadID int, externalServiceKind string, syncTime time.Time) (id int, err error) { - ctx, endObservation := s.operations.insertDependencyIndexingJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.insertDependencyIndexingJob.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("uploadId", uploadID), log.String("extSvcKind", externalServiceKind), }}) diff --git a/enterprise/internal/codeintel/stores/dbstore/dumps.go b/enterprise/internal/codeintel/stores/dbstore/dumps.go index 94f7ad3156e..bdaa330a8ff 100644 --- a/enterprise/internal/codeintel/stores/dbstore/dumps.go +++ b/enterprise/internal/codeintel/stores/dbstore/dumps.go @@ -77,7 +77,7 @@ func scanDumps(rows *sql.Rows, queryErr error) (_ []Dump, err error) { // GetDumpsByIDs returns a set of dumps by identifiers. func (s *Store) GetDumpsByIDs(ctx context.Context, ids []int) (_ []Dump, err error) { - ctx, trace, endObservation := s.operations.getDumpsByIDs.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.getDumpsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numIDs", len(ids)), log.String("ids", intsToString(ids)), }}) @@ -146,7 +146,7 @@ FROM lsif_dumps_with_repository_name u WHERE u.id IN (%s) // splits the repository into multiple dumps. For this reason, the returned dumps are always sorted in most-recently-finished order to // prevent returning data from stale dumps. func (s *Store) FindClosestDumps(ctx context.Context, repositoryID int, commit, path string, rootMustEnclosePath bool, indexer string) (_ []Dump, err error) { - ctx, trace, endObservation := s.operations.findClosestDumps.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := s.operations.findClosestDumps.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), @@ -200,7 +200,7 @@ ORDER BY u.finished_at DESC // FindClosestDumpsFromGraphFragment returns the set of dumps that can most accurately answer queries for the given repository, commit, // path, and optional indexer by only considering the given fragment of the full git graph. See FindClosestDumps for additional details. func (s *Store) FindClosestDumpsFromGraphFragment(ctx context.Context, repositoryID int, commit, path string, rootMustEnclosePath bool, indexer string, commitGraph *gitdomain.CommitGraph) (_ []Dump, err error) { - ctx, trace, endObservation := s.operations.findClosestDumpsFromGraphFragment.WithAndLogger(ctx, &err, observation.Args{ + ctx, trace, endObservation := s.operations.findClosestDumpsFromGraphFragment.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), @@ -372,7 +372,7 @@ func makeFindClosestDumpConditions(path string, rootMustEnclosePath bool, indexe // commit, root, and indexer. This is necessary to perform during conversions before changing // the state of a processing upload to completed as there is a unique index on these four columns. func (s *Store) DeleteOverlappingDumps(ctx context.Context, repositoryID int, commit, root, indexer string) (err error) { - ctx, trace, endObservation := s.operations.deleteOverlappingDumps.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.deleteOverlappingDumps.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), log.String("root", root), diff --git a/enterprise/internal/codeintel/stores/dbstore/indexes.go b/enterprise/internal/codeintel/stores/dbstore/indexes.go index 1813bdd1801..410275204c5 100644 --- a/enterprise/internal/codeintel/stores/dbstore/indexes.go +++ b/enterprise/internal/codeintel/stores/dbstore/indexes.go @@ -112,7 +112,7 @@ var ScanFirstIndexRecord = scanFirstIndexRecord // GetIndexByID returns an index by its identifier and boolean flag indicating its existence. func (s *Store) GetIndexByID(ctx context.Context, id int) (_ Index, _ bool, err error) { - ctx, endObservation := s.operations.getIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -173,7 +173,7 @@ WHERE u.id = %s AND %s // GetIndexesByIDs returns an index for each of the given identifiers. Not all given ids will necessarily // have a corresponding element in the returned list. func (s *Store) GetIndexesByIDs(ctx context.Context, ids ...int) (_ []Index, err error) { - ctx, endObservation := s.operations.getIndexesByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getIndexesByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("ids", intsToString(ids)), }}) defer endObservation(1, observation.Args{}) @@ -237,7 +237,7 @@ type GetIndexesOptions struct { // GetIndexes returns a list of indexes and the total count of records matching the given conditions. func (s *Store) GetIndexes(ctx context.Context, opts GetIndexesOptions) (_ []Index, _ int, err error) { - ctx, trace, endObservation := s.operations.getIndexes.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.getIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", opts.RepositoryID), log.String("state", opts.State), log.String("term", opts.Term), @@ -347,7 +347,7 @@ func makeIndexSearchCondition(term string) *sqlf.Query { // IsQueued returns true if there is an index or an upload for the repository and commit. func (s *Store) IsQueued(ctx context.Context, repositoryID int, commit string) (_ bool, err error) { - ctx, endObservation := s.operations.isQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.isQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), }}) @@ -368,7 +368,7 @@ SELECT COUNT(*) WHERE EXISTS ( // InsertIndexes inserts a new index and returns the hydrated index models. func (s *Store) InsertIndexes(ctx context.Context, indexes []Index) (_ []Index, err error) { - ctx, endObservation := s.operations.insertIndex.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.insertIndex.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("numIndexes", len(indexes)), @@ -465,7 +465,7 @@ var IndexColumnsWithNullRank = indexColumnsWithNullRank // DeleteIndexByID deletes an index by its identifier. func (s *Store) DeleteIndexByID(ctx context.Context, id int) (_ bool, err error) { - ctx, endObservation := s.operations.deleteIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -489,7 +489,7 @@ DELETE FROM lsif_indexes WHERE id = %s RETURNING repository_id // DeletedRepositoryGracePeriod ago. This returns the repository identifier mapped to the number of indexes // that were removed for that repository. func (s *Store) DeleteIndexesWithoutRepository(ctx context.Context, now time.Time) (_ map[int]int, err error) { - ctx, trace, endObservation := s.operations.deleteIndexesWithoutRepository.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.deleteIndexesWithoutRepository.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // TODO(efritz) - this would benefit from an index on repository_id. We currently have @@ -536,7 +536,7 @@ SELECT d.repository_id, COUNT(*) FROM deleted d GROUP BY d.repository_id // LastIndexScanForRepository returns the last timestamp, if any, that the repository with the given // identifier was considered for auto-indexing scheduling. func (s *Store) LastIndexScanForRepository(ctx context.Context, repositoryID int) (_ *time.Time, err error) { - ctx, endObservation := s.operations.lastIndexScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.lastIndexScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -565,7 +565,7 @@ type IndexesWithRepositoryNamespace struct { // include the set of unprocessed records as well as the latest finished record. These values allow users to // quickly determine if a particular root/indexer pair os up-to-date or having issues processing. func (s *Store) RecentIndexesSummary(ctx context.Context, repositoryID int) (summaries []IndexesWithRepositoryNamespace, err error) { - ctx, logger, endObservation := s.operations.recentIndexesSummary.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, logger, endObservation := s.operations.recentIndexesSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/stores/dbstore/janitor.go b/enterprise/internal/codeintel/stores/dbstore/janitor.go index 54c898aa2fa..4dd83fcb7dd 100644 --- a/enterprise/internal/codeintel/stores/dbstore/janitor.go +++ b/enterprise/internal/codeintel/stores/dbstore/janitor.go @@ -62,7 +62,7 @@ func ScanSourcedCommits(rows *sql.Rows, queryErr error) (_ []SourcedCommits, err // paths and clean up that occupied (but useless) space. The output is of this method is // ordered by repository ID then by commit. func (s *Store) StaleSourcedCommits(ctx context.Context, minimumTimeSinceLastCheck time.Duration, limit int, now time.Time) (_ []SourcedCommits, err error) { - ctx, trace, endObservation := s.operations.staleSourcedCommits.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.staleSourcedCommits.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) now = now.UTC() @@ -123,7 +123,7 @@ GROUP BY repository_id, commit // to the given repository identifier and commit. This method returns the count of upload and index records // modified, respectively. func (s *Store) UpdateSourcedCommits(ctx context.Context, repositoryID int, commit string, now time.Time) (uploadsUpdated int, indexesUpdated int, err error) { - ctx, trace, endObservation := s.operations.updateSourcedCommits.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.updateSourcedCommits.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), }}) @@ -207,7 +207,7 @@ func (s *Store) DeleteSourcedCommits(ctx context.Context, repositoryID int, comm indexesDeleted int, err error, ) { - ctx, trace, endObservation := s.operations.deleteSourcedCommits.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.deleteSourcedCommits.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), }}) @@ -301,7 +301,7 @@ func scanTripleOfCounts(rows *sql.Rows, queryErr error) (value1, value2, value3 // DeleteOldAuditLogs removes lsif_upload audit log records older than the given max age. func (s *Store) DeleteOldAuditLogs(ctx context.Context, maxAge time.Duration, now time.Time) (_ int, err error) { - ctx, endObservation := s.operations.deleteOldAuditLogs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.deleteOldAuditLogs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) count, _, err := basestore.ScanFirstInt(s.Store.Query(ctx, sqlf.Sprintf(deleteOldAuditLogsQuery, now, int(maxAge/time.Second)))) diff --git a/enterprise/internal/codeintel/stores/dbstore/packages.go b/enterprise/internal/codeintel/stores/dbstore/packages.go index 135df724154..b31b8bf60e5 100644 --- a/enterprise/internal/codeintel/stores/dbstore/packages.go +++ b/enterprise/internal/codeintel/stores/dbstore/packages.go @@ -13,7 +13,7 @@ import ( // UpdatePackages upserts package data tied to the given upload. func (s *Store) UpdatePackages(ctx context.Context, dumpID int, packages []precise.Package) (err error) { - ctx, endObservation := s.operations.updatePackages.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updatePackages.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numPackages", len(packages)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/stores/dbstore/references.go b/enterprise/internal/codeintel/stores/dbstore/references.go index b281bf7d90f..cbb4f3f5d8e 100644 --- a/enterprise/internal/codeintel/stores/dbstore/references.go +++ b/enterprise/internal/codeintel/stores/dbstore/references.go @@ -13,7 +13,7 @@ import ( // UpdatePackageReferences inserts reference data tied to the given upload. func (s *Store) UpdatePackageReferences(ctx context.Context, dumpID int, references []precise.PackageReference) (err error) { - ctx, endObservation := s.operations.updatePackageReferences.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updatePackageReferences.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numReferences", len(references)), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/stores/dbstore/repo.go b/enterprise/internal/codeintel/stores/dbstore/repo.go index 690e705b276..1c75a2590b7 100644 --- a/enterprise/internal/codeintel/stores/dbstore/repo.go +++ b/enterprise/internal/codeintel/stores/dbstore/repo.go @@ -20,7 +20,7 @@ var ErrUnknownRepository = errors.New("unknown repository") // RepoName returns the name for the repo with the given identifier. func (s *Store) RepoName(ctx context.Context, repositoryID int) (_ string, err error) { - ctx, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -65,7 +65,7 @@ func scanRepoNames(rows *sql.Rows, queryErr error) (_ map[int]string, err error) // RepoNames returns a map from repository id to names. func (s *Store) RepoNames(ctx context.Context, repositoryIDs ...int) (_ map[int]string, err error) { - ctx, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numRepositories", len(repositoryIDs)), }}) defer endObservation(1, observation.Args{}) @@ -81,7 +81,7 @@ SELECT id, name FROM repo WHERE id = ANY(%s) // RepoIDsByGlobPatterns returns a page of repository identifiers and a total count of repositories matching // one of the given patterns. func (s *Store) RepoIDsByGlobPatterns(ctx context.Context, patterns []string, limit, offset int) (_ []int, _ int, err error) { - ctx, endObservation := s.operations.repoIDsByGlobPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.repoIDsByGlobPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("patterns", strings.Join(patterns, ", ")), log.Int("limit", limit), log.Int("offset", offset), @@ -152,7 +152,7 @@ LIMIT %s OFFSET %s // matches exceeds the given limit (if supplied), then only top ranked repositories by star count // will be associated to the policy in the database and the remainder will be dropped. func (s *Store) UpdateReposMatchingPatterns(ctx context.Context, patterns []string, policyID int, repositoryMatchLimit *int) (err error) { - ctx, endObservation := s.operations.updateReposMatchingPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateReposMatchingPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("pattern", strings.Join(patterns, ",")), log.Int("policyID", policyID), }}) diff --git a/enterprise/internal/codeintel/stores/dbstore/support.go b/enterprise/internal/codeintel/stores/dbstore/support.go index 546e3cf8ef6..d5d15b32a5b 100644 --- a/enterprise/internal/codeintel/stores/dbstore/support.go +++ b/enterprise/internal/codeintel/stores/dbstore/support.go @@ -10,7 +10,7 @@ import ( ) func (s *Store) RequestLanguageSupport(ctx context.Context, userID int, language string) (err error) { - ctx, endObservation := s.operations.requestLanguageSupport.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.requestLanguageSupport.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return s.Exec(ctx, sqlf.Sprintf(requestLanguageSupportQuery, userID, language)) @@ -24,7 +24,7 @@ ON CONFLICT DO NOTHING ` func (s *Store) LanguagesRequestedBy(ctx context.Context, userID int) (_ []string, err error) { - ctx, endObservation := s.operations.languagesRequestedBy.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.languagesRequestedBy.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return basestore.ScanStrings(s.Query(ctx, sqlf.Sprintf(languagesRequestedByQuery, userID))) diff --git a/enterprise/internal/codeintel/stores/dbstore/uploads.go b/enterprise/internal/codeintel/stores/dbstore/uploads.go index 0a6cdfea970..258e729cc6f 100644 --- a/enterprise/internal/codeintel/stores/dbstore/uploads.go +++ b/enterprise/internal/codeintel/stores/dbstore/uploads.go @@ -138,7 +138,7 @@ func scanCounts(rows *sql.Rows, queryErr error) (_ map[int]int, err error) { // GetUploadByID returns an upload by its identifier and boolean flag indicating its existence. func (s *Store) GetUploadByID(ctx context.Context, id int) (_ Upload, _ bool, err error) { - ctx, endObservation := s.operations.getUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -195,7 +195,7 @@ const visibleAtTipSubselectQuery = `SELECT 1 FROM lsif_uploads_visible_at_tip uv // GetUploadsByIDs returns an upload for each of the given identifiers. Not all given ids will necessarily // have a corresponding element in the returned list. func (s *Store) GetUploadsByIDs(ctx context.Context, ids ...int) (_ []Upload, err error) { - ctx, endObservation := s.operations.getUploadsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getUploadsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("ids", intsToString(ids)), }}) defer endObservation(1, observation.Args{}) @@ -250,7 +250,7 @@ WHERE u.state != 'deleted' AND u.id IN (%s) AND %s // DeleteUploadsStuckUploading soft deletes any upload record that has been uploading since the given time. func (s *Store) DeleteUploadsStuckUploading(ctx context.Context, uploadedBefore time.Time) (_ int, err error) { - ctx, trace, endObservation := s.operations.deleteUploadsStuckUploading.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.deleteUploadsStuckUploading.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("uploadedBefore", uploadedBefore.Format(time.RFC3339)), // TODO - should be a duration }}) defer endObservation(1, observation.Args{}) @@ -309,7 +309,7 @@ type GetUploadsOptions struct { // GetUploads returns a list of uploads and the total count of records matching the given conditions. func (s *Store) GetUploads(ctx context.Context, opts GetUploadsOptions) (_ []Upload, _ int, err error) { - ctx, trace, endObservation := s.operations.getUploads.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.getUploads.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", opts.RepositoryID), log.String("state", opts.State), log.String("term", opts.Term), @@ -587,7 +587,7 @@ func makeStateCondition(state string) *sqlf.Query { // InsertUpload inserts a new upload and returns its identifier. func (s *Store) InsertUpload(ctx context.Context, upload Upload) (id int, err error) { - ctx, endObservation := s.operations.insertUpload.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.insertUpload.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("id", id), @@ -638,7 +638,7 @@ RETURNING id // AddUploadPart adds the part index to the given upload's uploaded parts array. This method is idempotent // (the resulting array is deduplicated on update). func (s *Store) AddUploadPart(ctx context.Context, uploadID, partIndex int) (err error) { - ctx, endObservation := s.operations.addUploadPart.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.addUploadPart.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("uploadID", uploadID), log.Int("partIndex", partIndex), }}) @@ -654,7 +654,7 @@ UPDATE lsif_uploads SET uploaded_parts = array(SELECT DISTINCT * FROM unnest(arr // MarkQueued updates the state of the upload to queued and updates the upload size. func (s *Store) MarkQueued(ctx context.Context, id int, uploadSize *int64) (err error) { - ctx, endObservation := s.operations.markQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -674,7 +674,7 @@ WHERE id = %s // MarkFailed updates the state of the upload to failed, increments the num_failures column and sets the finished_at time func (s *Store) MarkFailed(ctx context.Context, id int, reason string) (err error) { - ctx, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -723,7 +723,7 @@ var uploadColumnsWithNullRank = []*sqlf.Query{ // was deleted. The associated repository will be marked as dirty so that its commit graph will be updated in // the background. func (s *Store) DeleteUploadByID(ctx context.Context, id int) (_ bool, err error) { - ctx, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -762,7 +762,7 @@ const DeletedRepositoryGracePeriod = time.Minute * 30 // DeletedRepositoryGracePeriod ago. This returns the repository identifier mapped to the number of uploads // that were removed for that repository. func (s *Store) DeleteUploadsWithoutRepository(ctx context.Context, now time.Time) (_ map[int]int, err error) { - ctx, trace, endObservation := s.operations.deleteUploadsWithoutRepository.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.deleteUploadsWithoutRepository.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) repositories, err := scanCounts(s.Store.Query(ctx, sqlf.Sprintf(deleteUploadsWithoutRepositoryQuery, now.UTC(), DeletedRepositoryGracePeriod/time.Second))) @@ -810,7 +810,7 @@ SELECT d.repository_id, COUNT(*) FROM deleted d GROUP BY d.repository_id // HardDeleteUploadByID deletes the upload record with the given identifier. func (s *Store) HardDeleteUploadByID(ctx context.Context, ids ...int) (err error) { - ctx, endObservation := s.operations.hardDeleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.hardDeleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numIDs", len(ids)), log.String("ids", intsToString(ids)), }}) @@ -868,7 +868,7 @@ func (s *Store) SelectRepositoriesForIndexScan(ctx context.Context, processDelay } func (s *Store) selectRepositoriesForIndexScan(ctx context.Context, processDelay time.Duration, allowGlobalPolicies bool, repositoryMatchLimit *int, limit int, now time.Time) (_ []int, err error) { - ctx, endObservation := s.operations.selectRepositoriesForIndexScan.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.selectRepositoriesForIndexScan.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Bool("allowGlobalPolicies", allowGlobalPolicies), log.Int("limit", limit), }}) @@ -960,7 +960,7 @@ func (s *Store) SelectRepositoriesForRetentionScan(ctx context.Context, processD } func (s *Store) selectRepositoriesForRetentionScan(ctx context.Context, processDelay time.Duration, limit int, now time.Time) (_ []int, err error) { - ctx, endObservation := s.operations.selectRepositoriesForRetentionScan.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.selectRepositoriesForRetentionScan.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return basestore.ScanInts(s.Query(ctx, sqlf.Sprintf( @@ -1010,7 +1010,7 @@ func (s *Store) UpdateUploadRetention(ctx context.Context, protectedIDs, expired } func (s *Store) updateUploadRetention(ctx context.Context, protectedIDs, expiredIDs []int, now time.Time) (err error) { - ctx, endObservation := s.operations.updateUploadRetention.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateUploadRetention.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numProtectedIDs", len(protectedIDs)), log.String("protectedIDs", intsToString(protectedIDs)), log.Int("numExpiredIDs", len(expiredIDs)), @@ -1088,7 +1088,7 @@ var deltaMap = map[DependencyReferenceCountUpdateType]int{ // To keep reference counts consistent, this method should be called directly after insertion and directly // before deletion of each upload record. func (s *Store) UpdateReferenceCounts(ctx context.Context, ids []int, dependencyUpdateType DependencyReferenceCountUpdateType) (updated int, err error) { - ctx, endObservation := s.operations.updateReferenceCounts.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateReferenceCounts.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numIDs", len(ids)), log.String("ids", intsToString(ids)), log.Int("dependencyUpdateType", int(dependencyUpdateType)), @@ -1284,7 +1284,7 @@ FROM locked_uploads lu WHERE lu.id = u.id // as deleted. The associated repositories will be marked as dirty so that their commit graphs // are updated in the near future. func (s *Store) SoftDeleteExpiredUploads(ctx context.Context) (count int, err error) { - ctx, trace, endObservation := s.operations.softDeleteExpiredUploads.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.softDeleteExpiredUploads.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) tx, err := s.transact(ctx) @@ -1343,7 +1343,7 @@ SELECT u.repository_id, count(*) FROM updated u GROUP BY u.repository_id // GetOldestCommitDate returns the oldest commit date for all uploads for the given repository. If there are no // non-nil values, a false-valued flag is returned. func (s *Store) GetOldestCommitDate(ctx context.Context, repositoryID int) (_ time.Time, _ bool, err error) { - ctx, _, endObservation := s.operations.getOldestCommitDate.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.getOldestCommitDate.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -1361,7 +1361,7 @@ SELECT committed_at FROM lsif_uploads WHERE repository_id = %s AND state = 'comp // UpdateCommitedAt updates the commit date for the given repository. func (s *Store) UpdateCommitedAt(ctx context.Context, uploadID int, committedAt time.Time) (err error) { - ctx, _, endObservation := s.operations.updateCommitedAt.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateCommitedAt.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("uploadID", uploadID), }}) defer endObservation(1, observation.Args{}) @@ -1394,7 +1394,7 @@ func nilTimeToString(t *time.Time) string { // LastUploadRetentionScanForRepository returns the last timestamp, if any, that the repository with the // given identifier was considered for upload expiration checks. func (s *Store) LastUploadRetentionScanForRepository(ctx context.Context, repositoryID int) (_ *time.Time, err error) { - ctx, endObservation := s.operations.lastUploadRetentionScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.lastUploadRetentionScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) @@ -1423,7 +1423,7 @@ type UploadsWithRepositoryNamespace struct { // include the set of unprocessed records as well as the latest finished record. These values allow users to // quickly determine if a particular root/indexer pair is up-to-date or having issues processing. func (s *Store) RecentUploadsSummary(ctx context.Context, repositoryID int) (upload []UploadsWithRepositoryNamespace, err error) { - ctx, logger, endObservation := s.operations.recentUploadsSummary.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, logger, endObservation := s.operations.recentUploadsSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/stores/dbstore/xrepo.go b/enterprise/internal/codeintel/stores/dbstore/xrepo.go index 121fc5f4245..302834089da 100644 --- a/enterprise/internal/codeintel/stores/dbstore/xrepo.go +++ b/enterprise/internal/codeintel/stores/dbstore/xrepo.go @@ -22,7 +22,7 @@ var DefinitionDumpsLimit, _ = strconv.ParseInt(env.Get("PRECISE_CODE_INTEL_DEFIN // DefinitionDumps returns the set of dumps that define at least one of the given monikers. func (s *Store) DefinitionDumps(ctx context.Context, monikers []precise.QualifiedMonikerData) (_ []Dump, err error) { - ctx, trace, endObservation := s.operations.definitionDumps.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.definitionDumps.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numMonikers", len(monikers)), log.String("monikers", monikersToString(monikers)), }}) @@ -127,7 +127,7 @@ rank() OVER ( // it can be seen from the given index; otherwise, an index is visible if it can be seen from the tip of // the default branch of its own repository. func (s *Store) ReferenceIDsAndFilters(ctx context.Context, repositoryID int, commit string, monikers []precise.QualifiedMonikerData, limit, offset int) (_ PackageReferenceScanner, _ int, err error) { - ctx, trace, endObservation := s.operations.referenceIDsAndFilters.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.referenceIDsAndFilters.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("repositoryID", repositoryID), log.String("commit", commit), log.Int("numMonikers", len(monikers)), @@ -238,7 +238,7 @@ func monikersToString(vs []precise.QualifiedMonikerData) string { // scanner will return nulls for the Filter field as it's expected to be unused (and rather heavy) by // callers. func (s *Store) ReferencesForUpload(ctx context.Context, uploadID int) (_ PackageReferenceScanner, err error) { - ctx, endObservation := s.operations.referencesForUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.referencesForUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("uploadID", uploadID), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/stores/lsifstore/clear.go b/enterprise/internal/codeintel/stores/lsifstore/clear.go index 765992b70f5..f0763d6295b 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/clear.go +++ b/enterprise/internal/codeintel/stores/lsifstore/clear.go @@ -26,7 +26,7 @@ var tableNames = []string{ } func (s *Store) Clear(ctx context.Context, bundleIDs ...int) (err error) { - ctx, trace, endObservation := s.operations.clear.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.clear.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numBundleIDs", len(bundleIDs)), log.String("bundleIDs", intsToString(bundleIDs)), }}) diff --git a/enterprise/internal/codeintel/stores/lsifstore/data_write.go b/enterprise/internal/codeintel/stores/lsifstore/data_write.go index 18299d95a6c..f559a6afde2 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/data_write.go +++ b/enterprise/internal/codeintel/stores/lsifstore/data_write.go @@ -28,7 +28,7 @@ const CurrentImplementationsSchemaVersion = 2 // WriteMeta is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteMeta(ctx context.Context, bundleID int, meta precise.MetaData) (err error) { - ctx, endObservation := s.operations.writeMeta.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.writeMeta.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) @@ -38,7 +38,7 @@ func (s *Store) WriteMeta(ctx context.Context, bundleID int, meta precise.MetaDa // WriteDocuments is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteDocuments(ctx context.Context, bundleID int, documents chan precise.KeyedDocumentData) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeDocuments.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeDocuments.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) @@ -128,7 +128,7 @@ FROM t_lsif_data_documents source // WriteResultChunks is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteResultChunks(ctx context.Context, bundleID int, resultChunks chan precise.IndexedResultChunkData) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeResultChunks.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeResultChunks.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) @@ -195,7 +195,7 @@ FROM t_lsif_data_result_chunks source // WriteDefinitions is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteDefinitions(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeDefinitions.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeDefinitions.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) @@ -205,7 +205,7 @@ func (s *Store) WriteDefinitions(ctx context.Context, bundleID int, monikerLocat // WriteReferences is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteReferences(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeReferences.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeReferences.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) @@ -215,7 +215,7 @@ func (s *Store) WriteReferences(ctx context.Context, bundleID int, monikerLocati // WriteImplementations is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteImplementations(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeImplementations.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeImplementations.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) diff --git a/enterprise/internal/codeintel/stores/lsifstore/data_write_documentation.go b/enterprise/internal/codeintel/stores/lsifstore/data_write_documentation.go index f01cee634c4..3091bd6b5b0 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/data_write_documentation.go +++ b/enterprise/internal/codeintel/stores/lsifstore/data_write_documentation.go @@ -41,7 +41,7 @@ func (s *Store) WriteDocumentationPages( repositoryNameID int, languageNameID int, ) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeDocumentationPages.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeDocumentationPages.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", upload.ID), log.String("repo", upload.RepositoryName), log.String("commit", upload.Commit), @@ -124,7 +124,7 @@ FROM t_lsif_data_documentation_pages source // WriteDocumentationPathInfo is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteDocumentationPathInfo(ctx context.Context, bundleID int, documentationPathInfo chan *precise.DocumentationPathInfoData) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeDocumentationPathInfo.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeDocumentationPathInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) @@ -190,7 +190,7 @@ FROM t_lsif_data_documentation_path_info source // WriteDocumentationMappings is called (transactionally) from the precise-code-intel-worker. func (s *Store) WriteDocumentationMappings(ctx context.Context, bundleID int, mappings chan precise.DocumentationMapping) (count uint32, err error) { - ctx, trace, endObservation := s.operations.writeDocumentationMappings.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.writeDocumentationMappings.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), }}) defer endObservation(1, observation.Args{}) @@ -256,7 +256,7 @@ FROM t_lsif_data_documentation_mappings source // outside of a long-running transaction to reduce lock contention between shared rows being held longer // than necessary. func (s *Store) WriteDocumentationSearchPrework(ctx context.Context, upload dbstore.Upload, repo *types.Repo, isDefaultBranch bool) (_ int, _ int, err error) { - ctx, endObservation := s.operations.writeDocumentationSearchPrework.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.writeDocumentationSearchPrework.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", upload.RepositoryName), log.Int("bundleID", upload.ID), }}) @@ -304,7 +304,7 @@ func (s *Store) WriteDocumentationSearch( repositoryNameID int, languageNameID int, ) (err error) { - ctx, endObservation := s.operations.writeDocumentationSearch.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.writeDocumentationSearch.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", upload.RepositoryName), log.Int("bundleID", upload.ID), log.Int("pages", len(pages)), diff --git a/enterprise/internal/codeintel/stores/lsifstore/diagnostics.go b/enterprise/internal/codeintel/stores/lsifstore/diagnostics.go index 83f12f9d210..bbc78799f90 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/diagnostics.go +++ b/enterprise/internal/codeintel/stores/lsifstore/diagnostics.go @@ -12,7 +12,7 @@ import ( // Diagnostics returns the diagnostics for the documents that have the given path prefix. This method // also returns the size of the complete result set to aid in pagination. func (s *Store) Diagnostics(ctx context.Context, bundleID int, prefix string, limit, offset int) (_ []Diagnostic, _ int, err error) { - ctx, trace, endObservation := s.operations.diagnostics.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.diagnostics.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("prefix", prefix), log.Int("limit", limit), diff --git a/enterprise/internal/codeintel/stores/lsifstore/documentation.go b/enterprise/internal/codeintel/stores/lsifstore/documentation.go index 0c685b9b1ec..659ed5bd992 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/documentation.go +++ b/enterprise/internal/codeintel/stores/lsifstore/documentation.go @@ -22,7 +22,7 @@ import ( // DocumentationPage returns the documentation page with the given PathID. func (s *Store) DocumentationPage(ctx context.Context, bundleID int, pathID string) (_ *precise.DocumentationPageData, err error) { - ctx, _, endObservation := s.operations.documentationPage.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.documentationPage.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("pathID", pathID), }}) @@ -81,7 +81,7 @@ func (s *Store) scanFirstDocumentationPageData(rows *sql.Rows, queryErr error) ( // DocumentationPathInfo returns info describing what is at the given pathID. func (s *Store) DocumentationPathInfo(ctx context.Context, bundleID int, pathID string) (_ *precise.DocumentationPathInfoData, err error) { - ctx, _, endObservation := s.operations.documentationPathInfo.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.documentationPathInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("pathID", pathID), }}) @@ -141,7 +141,7 @@ func (s *Store) scanFirstDocumentationPathInfoData(rows *sql.Rows, queryErr erro // documentationIDsToPathIDs returns a mapping of the given documentationResult IDs to their // associative path IDs. Empty result IDs ("") are ignored. func (s *Store) documentationIDsToPathIDs(ctx context.Context, bundleID int, ids []precise.ID) (_ map[precise.ID]string, err error) { - ctx, _, endObservation := s.operations.documentationIDsToPathIDs.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.documentationIDsToPathIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("ids", fmt.Sprint(ids)), }}) @@ -202,7 +202,7 @@ WHERE ` func (s *Store) documentationPathIDToID(ctx context.Context, bundleID int, pathID string) (_ precise.ID, err error) { - ctx, _, endObservation := s.operations.documentationPathIDToID.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.documentationPathIDToID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("pathID", pathID), }}) @@ -252,7 +252,7 @@ func (s *Store) scanFirstDocumentationResultID(rows *sql.Rows, queryErr error) ( // e.g. the file where the documented symbol is located - if the path ID is describing such a // symbol, or nil otherwise. func (s *Store) documentationPathIDToFilePath(ctx context.Context, bundleID int, pathID string) (_ *string, err error) { - ctx, _, endObservation := s.operations.documentationPathIDToFilePath.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.documentationPathIDToFilePath.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("pathID", pathID), }}) @@ -312,7 +312,7 @@ func (s *Store) documentationDefinitions( limit, offset int, ) (_ []Location, _ int, err error) { - ctx, trace, endObservation := operation.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := operation.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("resultID", string(resultID)), }}) @@ -371,7 +371,7 @@ func (s *Store) documentationDefinitions( // enforce that the user only has the ability to view results that are from repositories they have // access to. func (s *Store) documentationSearchRepoNameIDs(ctx context.Context, tableSuffix string, possibleRepos []string) (_ []int64, err error) { - ctx, _, endObservation := s.operations.documentationSearchRepoNameIDs.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.documentationSearchRepoNameIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("table", tableSuffix), log.String("possibleRepos", fmt.Sprint(possibleRepos)), }}) @@ -408,7 +408,7 @@ var debugAPIDocsSearchCandidates, _ = strconv.ParseInt(env.Get("DEBUG_API_DOCS_S // enforce that the user only has the ability to view results that are from repositories they have // access to. func (s *Store) DocumentationSearch(ctx context.Context, tableSuffix, query string, repos []string) (_ []precise.DocumentationSearchResult, err error) { - ctx, _, endObservation := s.operations.documentationSearch.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.documentationSearch.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("table", tableSuffix), log.String("query", query), log.String("repos", fmt.Sprint(repos)), diff --git a/enterprise/internal/codeintel/stores/lsifstore/documentation_janitor.go b/enterprise/internal/codeintel/stores/lsifstore/documentation_janitor.go index 28b381da805..ccd122cfc99 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/documentation_janitor.go +++ b/enterprise/internal/codeintel/stores/lsifstore/documentation_janitor.go @@ -21,7 +21,7 @@ func (s *Store) DeleteOldPrivateSearchRecords(ctx context.Context, minimumTimeSi } func (s *Store) deleteOldSearchRecords(ctx context.Context, minimumTimeSinceLastCheck time.Duration, limit int, tableSuffix string, now time.Time) (_ int, err error) { - ctx, endObservation := s.operations.deleteOldSearchRecords.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.deleteOldSearchRecords.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) now = now.UTC() diff --git a/enterprise/internal/codeintel/stores/lsifstore/exists.go b/enterprise/internal/codeintel/stores/lsifstore/exists.go index 6d10c8db862..41b4f782b43 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/exists.go +++ b/enterprise/internal/codeintel/stores/lsifstore/exists.go @@ -12,7 +12,7 @@ import ( // Exists determines if the path exists in the database. func (s *Store) Exists(ctx context.Context, bundleID int, path string) (_ bool, err error) { - ctx, endObservation := s.operations.exists.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.exists.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), }}) diff --git a/enterprise/internal/codeintel/stores/lsifstore/hover.go b/enterprise/internal/codeintel/stores/lsifstore/hover.go index 0e99e7afa06..f5bf1cc59ef 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/hover.go +++ b/enterprise/internal/codeintel/stores/lsifstore/hover.go @@ -12,7 +12,7 @@ import ( // Hover returns the hover text of the symbol at the given position. func (s *Store) Hover(ctx context.Context, bundleID int, path string, line, character int) (_ string, _ Range, _ bool, err error) { - ctx, trace, endObservation := s.operations.hover.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.hover.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), log.Int("line", line), diff --git a/enterprise/internal/codeintel/stores/lsifstore/locations.go b/enterprise/internal/codeintel/stores/lsifstore/locations.go index a6076170f38..851abdcf527 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/locations.go +++ b/enterprise/internal/codeintel/stores/lsifstore/locations.go @@ -36,7 +36,7 @@ func (s *Store) Implementations(ctx context.Context, bundleID int, path string, } func (s *Store) definitionsReferences(ctx context.Context, extractor func(r precise.RangeData) precise.ID, operation *observation.Operation, bundleID int, path string, line, character, limit, offset int) (_ []Location, _ int, err error) { - ctx, trace, endObservation := operation.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := operation.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), log.Int("line", line), @@ -72,7 +72,7 @@ func (s *Store) definitionsReferences(ctx context.Context, extractor func(r prec // method returns a map from result set identifiers to another map from document paths to locations // within that document, as well as a total count of locations within the map. func (s *Store) locations(ctx context.Context, bundleID int, ids []precise.ID, limit, offset int) (_ map[precise.ID][]Location, _ int, err error) { - ctx, trace, endObservation := s.operations.locations.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.locations.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.Int("numIDs", len(ids)), log.String("ids", idsToString(ids)), diff --git a/enterprise/internal/codeintel/stores/lsifstore/monikers.go b/enterprise/internal/codeintel/stores/lsifstore/monikers.go index da2019b6ddb..9eafb7d68a1 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/monikers.go +++ b/enterprise/internal/codeintel/stores/lsifstore/monikers.go @@ -17,7 +17,7 @@ import ( // of monikers are attached to a single range. The order of the output slice is "outside-in", so that // the range attached to earlier monikers enclose the range attached to later monikers. func (s *Store) MonikersByPosition(ctx context.Context, bundleID int, path string, line, character int) (_ [][]precise.MonikerData, err error) { - ctx, trace, endObservation := s.operations.monikersByPosition.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.monikersByPosition.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), log.Int("line", line), @@ -74,7 +74,7 @@ LIMIT 1 // whose scheme+identifier matches one of the given monikers. This method also returns the size of the // complete result set to aid in pagination. func (s *Store) BulkMonikerResults(ctx context.Context, tableName string, uploadIDs []int, monikers []precise.MonikerData, limit, offset int) (_ []Location, _ int, err error) { - ctx, trace, endObservation := s.operations.bulkMonikerResults.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.bulkMonikerResults.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("tableName", tableName), log.Int("numUploadIDs", len(uploadIDs)), log.String("uploadIDs", intsToString(uploadIDs)), diff --git a/enterprise/internal/codeintel/stores/lsifstore/packages.go b/enterprise/internal/codeintel/stores/lsifstore/packages.go index a885c5bf259..045e012578f 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/packages.go +++ b/enterprise/internal/codeintel/stores/lsifstore/packages.go @@ -12,7 +12,7 @@ import ( // PackageInformation looks up package information data by identifier. func (s *Store) PackageInformation(ctx context.Context, bundleID int, path, packageInformationID string) (_ precise.PackageInformationData, _ bool, err error) { - ctx, endObservation := s.operations.packageInformation.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.packageInformation.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), log.String("packageInformationID", packageInformationID), diff --git a/enterprise/internal/codeintel/stores/lsifstore/ranges.go b/enterprise/internal/codeintel/stores/lsifstore/ranges.go index bd7c95ba592..9c4ed5ccb0f 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/ranges.go +++ b/enterprise/internal/codeintel/stores/lsifstore/ranges.go @@ -17,7 +17,7 @@ const MaximumRangesDefinitionLocations = 10000 // Ranges returns definition, reference, implementation, hover, and documentation data for each range within the given span of lines. func (s *Store) Ranges(ctx context.Context, bundleID int, path string, startLine, endLine int) (_ []CodeIntelligenceRange, err error) { - ctx, trace, endObservation := s.operations.ranges.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.ranges.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), log.Int("startLine", startLine), @@ -78,7 +78,7 @@ func (s *Store) Ranges(ctx context.Context, bundleID int, path string, startLine // DocumentationAtPosition returns documentation path IDs found at the given position. func (s *Store) DocumentationAtPosition(ctx context.Context, bundleID int, path string, line, character int) (_ []string, err error) { - ctx, trace, endObservation := s.operations.documentationAtPosition.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.documentationAtPosition.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), log.Int("line", line), @@ -131,7 +131,7 @@ LIMIT 1 // identifiers. Like locations, this method returns a map from result set identifiers to another map from // document paths to locations within that document. func (s *Store) locationsWithinFile(ctx context.Context, bundleID int, ids []precise.ID, path string, documentData precise.DocumentData) (_ map[precise.ID][]Location, err error) { - ctx, trace, endObservation := s.operations.locationsWithinFile.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.locationsWithinFile.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.Int("numIDs", len(ids)), log.String("ids", idsToString(ids)), diff --git a/enterprise/internal/codeintel/stores/lsifstore/stencil.go b/enterprise/internal/codeintel/stores/lsifstore/stencil.go index 5ef8b747c02..ee6f0d25479 100644 --- a/enterprise/internal/codeintel/stores/lsifstore/stencil.go +++ b/enterprise/internal/codeintel/stores/lsifstore/stencil.go @@ -11,7 +11,7 @@ import ( // Stencil return all ranges within a single document. func (s *Store) Stencil(ctx context.Context, bundleID int, path string) (_ []Range, err error) { - ctx, trace, endObservation := s.operations.stencil.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, trace, endObservation := s.operations.stencil.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("bundleID", bundleID), log.String("path", path), }}) diff --git a/enterprise/internal/codemonitors/background/metrics.go b/enterprise/internal/codemonitors/background/metrics.go index 7628bb05df0..b84845a810c 100644 --- a/enterprise/internal/codemonitors/background/metrics.go +++ b/enterprise/internal/codemonitors/background/metrics.go @@ -1,13 +1,13 @@ package background import ( - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/workerutil" + "github.com/sourcegraph/sourcegraph/lib/log" ) type codeMonitorsMetrics struct { @@ -19,7 +19,7 @@ type codeMonitorsMetrics struct { func newMetricsForTriggerQueries() codeMonitorsMetrics { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("triggers", "code monitor triggers"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -52,7 +52,7 @@ func newMetricsForTriggerQueries() codeMonitorsMetrics { func newActionMetrics() codeMonitorsMetrics { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("actions", "code monitors actions"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/internal/insights/background/background.go b/enterprise/internal/insights/background/background.go index 9bd14766080..9d0ab4d52bf 100644 --- a/enterprise/internal/insights/background/background.go +++ b/enterprise/internal/insights/background/background.go @@ -9,6 +9,7 @@ import ( "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/background/pings" "github.com/sourcegraph/sourcegraph/internal/database" + "github.com/sourcegraph/sourcegraph/lib/log" "github.com/sourcegraph/sourcegraph/enterprise/internal/insights/discovery" @@ -40,7 +41,7 @@ func GetBackgroundJobs(ctx context.Context, mainAppDB *sql.DB, insightsDB *sql.D // Create basic metrics for recording information about background jobs. observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("background", "insights background jobs"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } @@ -95,7 +96,7 @@ func GetBackgroundQueryRunnerJob(ctx context.Context, mainAppDB *sql.DB, insight // Create basic metrics for recording information about background jobs. observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("background.query", "background query runner job"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/enterprise/internal/insights/compression/worker.go b/enterprise/internal/insights/compression/worker.go index 689bbcc5ec4..0146ea7d7dc 100644 --- a/enterprise/internal/insights/compression/worker.go +++ b/enterprise/internal/insights/compression/worker.go @@ -198,7 +198,7 @@ func (i *CommitIndexer) indexNextWindow(name string, id api.RepoID, windowDurati // getCommits fetches the commits from the remote gitserver for a repository after a certain time. func getCommits(ctx context.Context, db database.DB, name api.RepoName, after time.Time, until *time.Time, operation *observation.Operation) (_ []*gitdomain.Commit, err error) { - ctx, endObservation := operation.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := operation.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) before := "" diff --git a/go.mod b/go.mod index febd0ff77fc..5a1013aa885 100644 --- a/go.mod +++ b/go.mod @@ -236,7 +236,7 @@ require ( github.com/spf13/cobra v1.2.1 // indirect github.com/twitchtv/twirp v8.1.1+incompatible // indirect go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.21.0 // indirect + go.uber.org/zap v1.21.0 ) require ( @@ -387,7 +387,7 @@ require ( github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/go-errors/errors v1.4.2 // indirect github.com/go-kit/log v0.2.0 // indirect - github.com/go-logr/logr v1.2.2 // indirect + github.com/go-logr/logr v1.2.3 // indirect github.com/go-toast/toast v0.0.0-20190211030409-01e6764cf0a4 // indirect github.com/godbus/dbus/v5 v5.0.6 // indirect github.com/golang/glog v1.0.0 // indirect diff --git a/go.sum b/go.sum index bdb2de1b558..0b2c3e238e1 100644 --- a/go.sum +++ b/go.sum @@ -846,8 +846,8 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.2 h1:ahHml/yUpnlb96Rp8HCvtYVPY8ZYpxq3g7UYchIYwbs= -github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0= +github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.1/go.mod h1:7FAglXiTm7HKlQRDeOQ6ZNUHidzCWXuZWq/1dTyBNF8= github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= diff --git a/internal/codeintel/autoindexing/init.go b/internal/codeintel/autoindexing/init.go index d7a322b9a50..99d2c164e8d 100644 --- a/internal/codeintel/autoindexing/init.go +++ b/internal/codeintel/autoindexing/init.go @@ -3,7 +3,6 @@ package autoindexing import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -24,7 +24,7 @@ var ( func GetService(db database.DB) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("autoindexing.service", "autoindexing service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/autoindexing/internal/inference/init.go b/internal/codeintel/autoindexing/internal/inference/init.go index ea7b37d10a1..0ca709263be 100644 --- a/internal/codeintel/autoindexing/internal/inference/init.go +++ b/internal/codeintel/autoindexing/internal/inference/init.go @@ -3,7 +3,6 @@ package inference import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/luasandbox" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -21,7 +21,7 @@ var ( func GetService(db database.DB) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("inference.service", "inference service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/autoindexing/internal/inference/service.go b/internal/codeintel/autoindexing/internal/inference/service.go index bb25a2ff052..d913bbc5e09 100644 --- a/internal/codeintel/autoindexing/internal/inference/service.go +++ b/internal/codeintel/autoindexing/internal/inference/service.go @@ -46,7 +46,7 @@ func newService( // is assumed to be a table of recognizer instances. Keys conflicting with the default recognizers // will overwrite them (to disable or change default behavior). func (s *Service) InferIndexJobs(ctx context.Context, repo api.RepoName, commit, overrideScript string) (_ []config.IndexJob, err error) { - ctx, endObservation := s.operations.inferIndexJobs.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.inferIndexJobs.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) sandbox, err := s.createSandbox(ctx) @@ -71,7 +71,7 @@ func (s *Service) InferIndexJobs(ctx context.Context, repo api.RepoName, commit, // createSandbox creates a Lua sandbox wih the modules loaded for use with auto indexing inference. func (s *Service) createSandbox(ctx context.Context) (_ *luasandbox.Sandbox, err error) { - ctx, endObservation := s.operations.createSandbox.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.createSandbox.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) opts := luasandbox.CreateOptions{ @@ -88,7 +88,7 @@ func (s *Service) createSandbox(ctx context.Context) (_ *luasandbox.Sandbox, err // setupRecognizers runs the given default and override scripts in the given sandbox and converts the // script return values to a list of recognizer instances. func (s *Service) setupRecognizers(ctx context.Context, sandbox *luasandbox.Sandbox, overrideScript string) (_ []*luatypes.Recognizer, err error) { - ctx, endObservation := s.operations.setupRecognizers.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.setupRecognizers.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) opts := luasandbox.RunOptions{} @@ -120,7 +120,7 @@ func (s *Service) invokeRecognizers( invocationContext *invocationContext, recognizers []*luatypes.Recognizer, ) (_ []config.IndexJob, err error) { - ctx, endObservation := s.operations.invokeRecognizers.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.invokeRecognizers.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) patternsForPaths, patternsForContent := partitionPatterns(recognizers) @@ -155,7 +155,7 @@ func (s *Service) resolvePaths( invocationContext *invocationContext, patternsForPaths []*luatypes.PathPattern, ) (_ []string, err error) { - ctx, endObservation := s.operations.resolvePaths.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.resolvePaths.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) pathPattern, err := flattenPatterns(patternsForPaths, false) @@ -179,7 +179,7 @@ func (s *Service) resolveFileContents( paths []string, patternsForContent []*luatypes.PathPattern, ) (_ map[string]string, err error) { - ctx, endObservation := s.operations.resolveFileContents.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.resolveFileContents.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) relevantPaths, err := filterPathsByPatterns(paths, patternsForContent) @@ -312,7 +312,7 @@ func (s *Service) invokeLinearizedRecognizer( paths []string, contentsByPath map[string]string, ) (_ []config.IndexJob, err error) { - ctx, endObservation := s.operations.invokeLinearizedRecognizer.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.invokeLinearizedRecognizer.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) callPaths, callContentsByPath, err := s.filterPathsForRecognizer(recognizer, paths, contentsByPath) diff --git a/internal/codeintel/autoindexing/internal/store/init.go b/internal/codeintel/autoindexing/internal/store/init.go index 489a41d8b0f..2d4fe1d75e8 100644 --- a/internal/codeintel/autoindexing/internal/store/init.go +++ b/internal/codeintel/autoindexing/internal/store/init.go @@ -3,13 +3,13 @@ package store import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -20,7 +20,7 @@ var ( func GetStore(db database.DB) *Store { storeOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("store.autoindexing", "autoindexing store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/autoindexing/internal/store/store.go b/internal/codeintel/autoindexing/internal/store/store.go index 0cb29d74d5f..cb6d0e6847b 100644 --- a/internal/codeintel/autoindexing/internal/store/store.go +++ b/internal/codeintel/autoindexing/internal/store/store.go @@ -50,7 +50,7 @@ type ListOpts struct { } func (s *Store) List(ctx context.Context, opts ListOpts) (indexJobs []shared.IndexJob, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("numIndexJobs", len(indexJobs)), diff --git a/internal/codeintel/autoindexing/service.go b/internal/codeintel/autoindexing/service.go index 9da83ce4afd..bb9bab59942 100644 --- a/internal/codeintel/autoindexing/service.go +++ b/internal/codeintel/autoindexing/service.go @@ -28,14 +28,14 @@ type ListOpts struct { } func (s *Service) List(ctx context.Context, opts ListOpts) (jobs []IndexJob, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return s.autoindexingStore.List(ctx, store.ListOpts(opts)) } func (s *Service) Get(ctx context.Context, id int) (job IndexJob, ok bool, err error) { - ctx, endObservation := s.operations.get.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.get.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33377 @@ -44,7 +44,7 @@ func (s *Service) Get(ctx context.Context, id int) (job IndexJob, ok bool, err e } func (s *Service) GetBatch(ctx context.Context, ids ...int) (jobs []IndexJob, err error) { - ctx, endObservation := s.operations.getBatch.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.getBatch.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33377 @@ -53,7 +53,7 @@ func (s *Service) GetBatch(ctx context.Context, ids ...int) (jobs []IndexJob, er } func (s *Service) Delete(ctx context.Context, id int) (err error) { - ctx, endObservation := s.operations.delete.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.delete.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33377 @@ -62,7 +62,7 @@ func (s *Service) Delete(ctx context.Context, id int) (err error) { } func (s *Service) Enqueue(ctx context.Context, jobs []IndexJob) (err error) { - ctx, endObservation := s.operations.enqueue.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.enqueue.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33377 @@ -71,7 +71,7 @@ func (s *Service) Enqueue(ctx context.Context, jobs []IndexJob) (err error) { } func (s *Service) Infer(ctx context.Context, repoID int) (jobs []IndexJob, err error) { - ctx, endObservation := s.operations.infer.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.infer.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33377 @@ -80,7 +80,7 @@ func (s *Service) Infer(ctx context.Context, repoID int) (jobs []IndexJob, err e } func (s *Service) UpdateIndexingConfiguration(ctx context.Context, repoID int) (jobs []IndexJob, err error) { - ctx, endObservation := s.operations.updateIndexingConfiguration.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.updateIndexingConfiguration.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33377 diff --git a/internal/codeintel/dependencies/init.go b/internal/codeintel/dependencies/init.go index 560e9c49003..6a151c986dc 100644 --- a/internal/codeintel/dependencies/init.go +++ b/internal/codeintel/dependencies/init.go @@ -3,7 +3,6 @@ package dependencies import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "golang.org/x/sync/semaphore" @@ -14,6 +13,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/env" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -31,7 +31,7 @@ var ( func GetService(db database.DB, gitService GitService, syncer Syncer) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("dependencies.service", "codeintel dependencies service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/dependencies/internal/lockfiles/init.go b/internal/codeintel/dependencies/internal/lockfiles/init.go index 9110f64f919..fd748489363 100644 --- a/internal/codeintel/dependencies/internal/lockfiles/init.go +++ b/internal/codeintel/dependencies/internal/lockfiles/init.go @@ -3,12 +3,12 @@ package lockfiles import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -19,7 +19,7 @@ var ( func GetService(gitSvc GitService) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("lockfiles.service", "codeintel lockfiles serivce"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/dependencies/internal/lockfiles/service.go b/internal/codeintel/dependencies/internal/lockfiles/service.go index d095b67799a..6c27591a4ff 100644 --- a/internal/codeintel/dependencies/internal/lockfiles/service.go +++ b/internal/codeintel/dependencies/internal/lockfiles/service.go @@ -31,7 +31,7 @@ func newService(gitSvc GitService, observationContext *observation.Context) *Ser } func (s *Service) ListDependencies(ctx context.Context, repo api.RepoName, rev string) (deps []reposource.PackageDependency, err error) { - ctx, endObservation := s.operations.listDependencies.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.listDependencies.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", string(repo)), log.String("rev", rev), }}) @@ -46,7 +46,7 @@ func (s *Service) ListDependencies(ctx context.Context, repo api.RepoName, rev s } func (s *Service) StreamDependencies(ctx context.Context, repo api.RepoName, rev string, cb func(reposource.PackageDependency) error) (err error) { - ctx, endObservation := s.operations.streamDependencies.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.streamDependencies.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("repo", string(repo)), log.String("rev", rev), }}) diff --git a/internal/codeintel/dependencies/internal/store/init.go b/internal/codeintel/dependencies/internal/store/init.go index 06e568fbe1b..09a85bf9525 100644 --- a/internal/codeintel/dependencies/internal/store/init.go +++ b/internal/codeintel/dependencies/internal/store/init.go @@ -3,13 +3,13 @@ package store import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -23,7 +23,7 @@ func GetStore(db database.DB) *Store { // it with this package level sync.Once. opsOnce.Do(func() { ops = newOperations(&observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("dependencies.store", "dependencies store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, }) diff --git a/internal/codeintel/dependencies/internal/store/store.go b/internal/codeintel/dependencies/internal/store/store.go index bab5fbe17cb..fa403816f43 100644 --- a/internal/codeintel/dependencies/internal/store/store.go +++ b/internal/codeintel/dependencies/internal/store/store.go @@ -55,7 +55,7 @@ type ListDependencyReposOpts struct { } func (s *Store) ListDependencyRepos(ctx context.Context, opts ListDependencyReposOpts) (dependencyRepos []shared.Repo, err error) { - ctx, endObservation := s.operations.listDependencyRepos.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.listDependencyRepos.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("scheme", opts.Scheme), }}) defer func() { @@ -115,7 +115,7 @@ func makeLimit(limit int) *sqlf.Query { // UpsertDependencyRepos creates the given dependency repos if they doesn't yet exist. The values that // did not exist previously are returned. func (s *Store) UpsertDependencyRepos(ctx context.Context, deps []shared.Repo) (newDeps []shared.Repo, err error) { - ctx, endObservation := s.operations.upsertDependencyRepos.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.upsertDependencyRepos.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numDeps", len(deps)), }}) defer func() { @@ -165,7 +165,7 @@ func (s *Store) UpsertDependencyRepos(ctx context.Context, deps []shared.Repo) ( // DeleteDependencyReposByID removes the dependency repos with the given ids, if they exist. func (s *Store) DeleteDependencyReposByID(ctx context.Context, ids ...int) (err error) { - ctx, endObservation := s.operations.deleteDependencyReposByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.deleteDependencyReposByID.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("numIDs", len(ids)), }}) defer endObservation(1, observation.Args{}) diff --git a/internal/codeintel/dependencies/service.go b/internal/codeintel/dependencies/service.go index 8b431bad238..bae96f99726 100644 --- a/internal/codeintel/dependencies/service.go +++ b/internal/codeintel/dependencies/service.go @@ -49,7 +49,7 @@ func newService( // Dependencies resolves the (transitive) dependencies for a set of repository and revisions. // Both the input repoRevs and the output dependencyRevs are a map from repository names to revspecs. func (s *Service) Dependencies(ctx context.Context, repoRevs map[api.RepoName]types.RevSpecSet) (dependencyRevs map[api.RepoName]types.RevSpecSet, err error) { - ctx, endObservation := s.operations.dependencies.With(ctx, &err, observation.Args{LogFields: constructLogFields(repoRevs)}) + ctx, _, endObservation := s.operations.dependencies.With(ctx, &err, observation.Args{LogFields: constructLogFields(repoRevs)}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("numDependencyRevs", len(dependencyRevs)), diff --git a/internal/codeintel/documents/init.go b/internal/codeintel/documents/init.go index 178566d1d48..05854902a95 100644 --- a/internal/codeintel/documents/init.go +++ b/internal/codeintel/documents/init.go @@ -3,7 +3,6 @@ package documents import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -23,7 +23,7 @@ var ( func GetService(db database.DB) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("documents.service", "codeintel documents service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/documents/internal/store/init.go b/internal/codeintel/documents/internal/store/init.go index 489a41d8b0f..5129dba74a1 100644 --- a/internal/codeintel/documents/internal/store/init.go +++ b/internal/codeintel/documents/internal/store/init.go @@ -3,13 +3,13 @@ package store import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -20,7 +20,7 @@ var ( func GetStore(db database.DB) *Store { storeOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("documents.store", "codeintel documents store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/documents/internal/store/store.go b/internal/codeintel/documents/internal/store/store.go index 4bca6a586fb..2401197d7a3 100644 --- a/internal/codeintel/documents/internal/store/store.go +++ b/internal/codeintel/documents/internal/store/store.go @@ -50,7 +50,7 @@ type ListOpts struct { } func (s *Store) List(ctx context.Context, opts ListOpts) (documents []shared.Document, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("numDocuments", len(documents)), diff --git a/internal/codeintel/documents/service.go b/internal/codeintel/documents/service.go index b3c54cef817..ff25b070d5a 100644 --- a/internal/codeintel/documents/service.go +++ b/internal/codeintel/documents/service.go @@ -27,7 +27,7 @@ type DocumentOpts struct { } func (s *Service) Document(ctx context.Context, opts DocumentOpts) (documents []Document, err error) { - ctx, endObservation := s.operations.document.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.document.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33373 diff --git a/internal/codeintel/policies/init.go b/internal/codeintel/policies/init.go index 63d132e1a9b..58699c88c69 100644 --- a/internal/codeintel/policies/init.go +++ b/internal/codeintel/policies/init.go @@ -3,7 +3,6 @@ package policies import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -23,7 +23,7 @@ var ( func GetService(db database.DB) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("policies.service", "codeintel policies service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/policies/internal/store/init.go b/internal/codeintel/policies/internal/store/init.go index 489a41d8b0f..f0880fc7e81 100644 --- a/internal/codeintel/policies/internal/store/init.go +++ b/internal/codeintel/policies/internal/store/init.go @@ -3,13 +3,13 @@ package store import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -20,7 +20,7 @@ var ( func GetStore(db database.DB) *Store { storeOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("policies.store", "codeintel policies store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/policies/internal/store/store.go b/internal/codeintel/policies/internal/store/store.go index d551ac39dc6..9745d3dd689 100644 --- a/internal/codeintel/policies/internal/store/store.go +++ b/internal/codeintel/policies/internal/store/store.go @@ -50,7 +50,7 @@ type ListOpts struct { } func (s *Store) List(ctx context.Context, opts ListOpts) (policies []shared.Policy, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("numPolicies", len(policies)), diff --git a/internal/codeintel/policies/service.go b/internal/codeintel/policies/service.go index 408345373fc..93fbe4fe30c 100644 --- a/internal/codeintel/policies/service.go +++ b/internal/codeintel/policies/service.go @@ -29,14 +29,14 @@ type ListOpts struct { } func (s *Service) List(ctx context.Context, opts ListOpts) (policies []Policy, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return s.policiesStore.List(ctx, store.ListOpts(opts)) } func (s *Service) Get(ctx context.Context, id int) (policy Policy, ok bool, err error) { - ctx, endObservation := s.operations.get.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.get.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33376 @@ -45,7 +45,7 @@ func (s *Service) Get(ctx context.Context, id int) (policy Policy, ok bool, err } func (s *Service) Create(ctx context.Context, policy Policy) (hydratedPolicy Policy, err error) { - ctx, endObservation := s.operations.create.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.create.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33376 @@ -54,7 +54,7 @@ func (s *Service) Create(ctx context.Context, policy Policy) (hydratedPolicy Pol } func (s *Service) Update(ctx context.Context, policy Policy) (hydratedPolicy Policy, err error) { - ctx, endObservation := s.operations.update.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.update.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33376 @@ -63,7 +63,7 @@ func (s *Service) Update(ctx context.Context, policy Policy) (hydratedPolicy Pol } func (s *Service) Delete(ctx context.Context, id int) (err error) { - ctx, endObservation := s.operations.delete.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.delete.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33376 @@ -72,7 +72,7 @@ func (s *Service) Delete(ctx context.Context, id int) (err error) { } func (s *Service) CommitsMatchingRetentionPolicies(ctx context.Context, repoID int, policies []Policy, instant time.Time, commitSubset ...string) (commitsToPolicies map[string][]Policy, err error) { - ctx, endObservation := s.operations.commitsMatchingRetentionPolicies.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.commitsMatchingRetentionPolicies.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33376 @@ -81,7 +81,7 @@ func (s *Service) CommitsMatchingRetentionPolicies(ctx context.Context, repoID i } func (s *Service) CommitsMatchingIndexingPolicies(ctx context.Context, repoID int, policies []Policy, instant time.Time) (commitsToPolicies map[string][]Policy, err error) { - ctx, endObservation := s.operations.commitsMatchingIndexingPolicies.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.commitsMatchingIndexingPolicies.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33376 diff --git a/internal/codeintel/symbols/init.go b/internal/codeintel/symbols/init.go index 2fa4922b63c..b3615c2f647 100644 --- a/internal/codeintel/symbols/init.go +++ b/internal/codeintel/symbols/init.go @@ -3,7 +3,6 @@ package symbols import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -23,7 +23,7 @@ var ( func GetService(db database.DB) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("symbols.service", "codeintel symbols service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/symbols/internal/store/init.go b/internal/codeintel/symbols/internal/store/init.go index 489a41d8b0f..4f591f0494a 100644 --- a/internal/codeintel/symbols/internal/store/init.go +++ b/internal/codeintel/symbols/internal/store/init.go @@ -3,13 +3,13 @@ package store import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -20,7 +20,7 @@ var ( func GetStore(db database.DB) *Store { storeOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("symbols.store", "codeintels symbols store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/symbols/internal/store/store.go b/internal/codeintel/symbols/internal/store/store.go index c249ff7b9e2..f778ff107f0 100644 --- a/internal/codeintel/symbols/internal/store/store.go +++ b/internal/codeintel/symbols/internal/store/store.go @@ -50,7 +50,7 @@ type ListOpts struct { } func (s *Store) List(ctx context.Context, opts ListOpts) (symbols []shared.Symbol, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("numSymbols", len(symbols)), diff --git a/internal/codeintel/symbols/service.go b/internal/codeintel/symbols/service.go index 66430b67a5a..7af0d112764 100644 --- a/internal/codeintel/symbols/service.go +++ b/internal/codeintel/symbols/service.go @@ -27,7 +27,7 @@ type SymbolOpts struct { } func (s *Service) Symbol(ctx context.Context, opts SymbolOpts) (symbols []Symbol, err error) { - ctx, endObservation := s.operations.symbol.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.symbol.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33374 diff --git a/internal/codeintel/uploads/init.go b/internal/codeintel/uploads/init.go index ced57ad9350..431b432cbf8 100644 --- a/internal/codeintel/uploads/init.go +++ b/internal/codeintel/uploads/init.go @@ -3,7 +3,6 @@ package uploads import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -11,6 +10,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -23,7 +23,7 @@ var ( func GetService(db database.DB) *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("uploads.service", "codeintel uploads service"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/uploads/internal/store/init.go b/internal/codeintel/uploads/internal/store/init.go index 489a41d8b0f..5ba7e77eeeb 100644 --- a/internal/codeintel/uploads/internal/store/init.go +++ b/internal/codeintel/uploads/internal/store/init.go @@ -3,13 +3,13 @@ package store import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/database" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -20,7 +20,7 @@ var ( func GetStore(db database.DB) *Store { storeOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("uploads.store", "codeintel uploads store"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/uploads/internal/store/store.go b/internal/codeintel/uploads/internal/store/store.go index 2c438a851db..0ee9ab50336 100644 --- a/internal/codeintel/uploads/internal/store/store.go +++ b/internal/codeintel/uploads/internal/store/store.go @@ -50,7 +50,7 @@ type ListOpts struct { } func (s *Store) List(ctx context.Context, opts ListOpts) (uploads []shared.Upload, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("numUploads", len(uploads)), diff --git a/internal/codeintel/uploads/service.go b/internal/codeintel/uploads/service.go index 9e1781c8c44..aa02349d6e4 100644 --- a/internal/codeintel/uploads/service.go +++ b/internal/codeintel/uploads/service.go @@ -29,14 +29,14 @@ type ListOpts struct { } func (s *Service) List(ctx context.Context, opts ListOpts) (uploads []Upload, err error) { - ctx, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.list.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return s.uploadsStore.List(ctx, store.ListOpts(opts)) } func (s *Service) Get(ctx context.Context, id int) (upload Upload, ok bool, err error) { - ctx, endObservation := s.operations.get.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.get.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33375 @@ -45,7 +45,7 @@ func (s *Service) Get(ctx context.Context, id int) (upload Upload, ok bool, err } func (s *Service) GetBatch(ctx context.Context, ids ...int) (uploads []Upload, err error) { - ctx, endObservation := s.operations.getBatch.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.getBatch.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33375 @@ -57,7 +57,7 @@ type UploadState struct { } func (s *Service) Enqueue(ctx context.Context, state UploadState, reader io.Reader) (err error) { - ctx, endObservation := s.operations.enqueue.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.enqueue.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33375 @@ -66,7 +66,7 @@ func (s *Service) Enqueue(ctx context.Context, state UploadState, reader io.Read } func (s *Service) Delete(ctx context.Context, id int) (err error) { - ctx, endObservation := s.operations.delete.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.delete.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33375 @@ -75,7 +75,7 @@ func (s *Service) Delete(ctx context.Context, id int) (err error) { } func (s *Service) CommitsVisibleToUpload(ctx context.Context, id int) (commits []string, err error) { - ctx, endObservation := s.operations.commitsVisibleTo.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.commitsVisibleTo.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33375 @@ -84,7 +84,7 @@ func (s *Service) CommitsVisibleToUpload(ctx context.Context, id int) (commits [ } func (s *Service) UploadsVisibleToCommit(ctx context.Context, commit string) (uploads []Upload, err error) { - ctx, endObservation := s.operations.uploadsVisibleTo.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.uploadsVisibleTo.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) // To be implemented in https://github.com/sourcegraph/sourcegraph/issues/33375 diff --git a/internal/codeintel/uploads/transport/http/init.go b/internal/codeintel/uploads/transport/http/init.go index f3d8c11f272..d5948135386 100644 --- a/internal/codeintel/uploads/transport/http/init.go +++ b/internal/codeintel/uploads/transport/http/init.go @@ -3,13 +3,13 @@ package http import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" uploads "github.com/sourcegraph/sourcegraph/internal/codeintel/uploads" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -20,7 +20,7 @@ var ( func GetHandler(svc *uploads.Service) *Handler { handlerOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("uploads.handler", "codeintel uploads http handler"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/codeintel/uploads/transport/http/resolver.go b/internal/codeintel/uploads/transport/http/resolver.go index 12ca6c8544d..9e607aaa07d 100644 --- a/internal/codeintel/uploads/transport/http/resolver.go +++ b/internal/codeintel/uploads/transport/http/resolver.go @@ -56,7 +56,7 @@ func (h *Handler) serveJSON(w http.ResponseWriter, r *http.Request) { } func (h *Handler) handleRequest(r *http.Request) (payload interface{}, statusCode int, err error) { - ctx, trace, endObservation := h.operations.todo.WithAndLogger(r.Context(), &err, observation.Args{}) + ctx, trace, endObservation := h.operations.todo.With(r.Context(), &err, observation.Args{}) defer func() { endObservation(1, observation.Args{LogFields: []log.Field{ log.Int("statusCode", statusCode), diff --git a/internal/database/batch/batch.go b/internal/database/batch/batch.go index 7d30aa95f32..021c10a99fe 100644 --- a/internal/database/batch/batch.go +++ b/internal/database/batch/batch.go @@ -210,7 +210,7 @@ func (i *Inserter) Flush(ctx context.Context) (err error) { log.Int("payloadSize", payloadSize), } combinedLogFields := append(operationlogFields, i.commonLogFields...) - ctx, endObservation := i.operations.flush.With(ctx, &err, observation.Args{LogFields: combinedLogFields}) + ctx, _, endObservation := i.operations.flush.With(ctx, &err, observation.Args{LogFields: combinedLogFields}) defer endObservation(1, observation.Args{}) // Create a query with enough placeholders to match the current batch size. This should diff --git a/internal/database/batch/observability.go b/internal/database/batch/observability.go index 952870628ba..ee4ed90bf70 100644 --- a/internal/database/batch/observability.go +++ b/internal/database/batch/observability.go @@ -4,7 +4,6 @@ import ( "fmt" "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" @@ -12,6 +11,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/metrics" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) type operations struct { @@ -47,7 +47,7 @@ var ( func getOperations() *operations { opsOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("database.batch", ""), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, HoneyDataset: &honey.Dataset{ diff --git a/internal/database/migration/store/describe.go b/internal/database/migration/store/describe.go index 7af18812112..237951e850e 100644 --- a/internal/database/migration/store/describe.go +++ b/internal/database/migration/store/describe.go @@ -13,7 +13,7 @@ import ( ) func (s *Store) Describe(ctx context.Context) (_ map[string]schemas.SchemaDescription, err error) { - ctx, endObservation := s.operations.describe.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.describe.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) descriptions := map[string]schemas.SchemaDescription{} diff --git a/internal/database/migration/store/store.go b/internal/database/migration/store/store.go index cb234d71906..b5411e4a3d8 100644 --- a/internal/database/migration/store/store.go +++ b/internal/database/migration/store/store.go @@ -58,7 +58,7 @@ const currentMigrationLogSchemaVersion = 2 // if they do not already exist. If old versions of the tables exist, this method // will attempt to update them in a backward-compatible manner. func (s *Store) EnsureSchemaTable(ctx context.Context) (err error) { - ctx, endObservation := s.operations.ensureSchemaTable.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.ensureSchemaTable.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) queries := []*sqlf.Query{ @@ -95,7 +95,7 @@ func (s *Store) EnsureSchemaTable(ctx context.Context) (err error) { // A failed migration requires administrator attention. A pending migration may currently be // in-progress, or may indicate that a migration was attempted but failed part way through. func (s *Store) Versions(ctx context.Context) (appliedVersions, pendingVersions, failedVersions []int, err error) { - ctx, endObservation := s.operations.versions.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.versions.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) migrationLogs, err := scanMigrationLogs(s.Query(ctx, sqlf.Sprintf(versionsQuery, s.schemaName))) @@ -150,7 +150,7 @@ ORDER BY version func (s *Store) TryLock(ctx context.Context) (_ bool, _ func(err error) error, err error) { key := s.lockKey() - ctx, endObservation := s.operations.tryLock.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.tryLock.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int32("key", key), }}) defer endObservation(1, observation.Args{}) @@ -182,7 +182,7 @@ func (s *Store) lockKey() int32 { // Up runs the given definition's up query. func (s *Store) Up(ctx context.Context, definition definition.Definition) (err error) { - ctx, endObservation := s.operations.up.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.up.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return s.Exec(ctx, definition.UpQuery) @@ -190,7 +190,7 @@ func (s *Store) Up(ctx context.Context, definition definition.Definition) (err e // Down runs the given definition's down query. func (s *Store) Down(ctx context.Context, definition definition.Definition) (err error) { - ctx, endObservation := s.operations.down.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.down.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return s.Exec(ctx, definition.DownQuery) @@ -199,7 +199,7 @@ func (s *Store) Down(ctx context.Context, definition definition.Definition) (err // IndexStatus returns an object describing the current validity status and creation progress of the // index with the given name. If the index does not exist, a false-valued flag is returned. func (s *Store) IndexStatus(ctx context.Context, tableName, indexName string) (_ storetypes.IndexStatus, _ bool, err error) { - ctx, endObservation := s.operations.indexStatus.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.indexStatus.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return scanFirstIndexStatus(s.Query(ctx, sqlf.Sprintf(indexStatusQuery, tableName, indexName))) @@ -230,7 +230,7 @@ WHERE // with the given definition. All users are assumed to run either `s.Up` or `s.Down` as part of the // given function, among any other behaviors that are necessary to perform in the _critical section_. func (s *Store) WithMigrationLog(ctx context.Context, definition definition.Definition, up bool, f func() error) (err error) { - ctx, endObservation := s.operations.withMigrationLog.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.withMigrationLog.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) logID, err := s.createMigrationLog(ctx, definition.ID, up) diff --git a/internal/diskcache/cache.go b/internal/diskcache/cache.go index b249839209c..a5b24445479 100644 --- a/internal/diskcache/cache.go +++ b/internal/diskcache/cache.go @@ -128,7 +128,7 @@ func (s *store) Open(ctx context.Context, key []string, fetcher Fetcher) (file * } func (s *store) OpenWithPath(ctx context.Context, key []string, fetcher FetcherWithPath) (file *File, err error) { - ctx, trace, endObservation := s.observe.cachedFetch.WithAndLogger(ctx, &err, observation.Args{LogFields: []otelog.Field{ + ctx, trace, endObservation := s.observe.cachedFetch.With(ctx, &err, observation.Args{LogFields: []otelog.Field{ otelog.String(string(ext.Component), s.component), }}) defer endObservation(1, observation.Args{}) @@ -172,7 +172,7 @@ func (s *store) OpenWithPath(ctx context.Context, key []string, fetcher FetcherW go func(ctx context.Context) { var err error var f *File - ctx, trace, endObservation := s.observe.backgroundFetch.WithAndLogger(ctx, &err, observation.Args{LogFields: []otelog.Field{ + ctx, trace, endObservation := s.observe.backgroundFetch.With(ctx, &err, observation.Args{LogFields: []otelog.Field{ otelog.Bool("withBackgroundTimeout", s.backgroundTimeout != 0), }}) defer endObservation(1, observation.Args{}) @@ -296,7 +296,7 @@ type EvictStats struct { } func (s *store) Evict(maxCacheSizeBytes int64) (stats EvictStats, err error) { - _, trace, endObservation := s.observe.evict.WithAndLogger(context.Background(), &err, observation.Args{LogFields: []otelog.Field{ + _, trace, endObservation := s.observe.evict.With(context.Background(), &err, observation.Args{LogFields: []otelog.Field{ otelog.Int64("maxCacheSizeBytes", maxCacheSizeBytes), }}) endObservation(1, observation.Args{}) diff --git a/internal/extsvc/jvmpackages/coursier/coursier.go b/internal/extsvc/jvmpackages/coursier/coursier.go index faa8e1728eb..a724ddaee9b 100644 --- a/internal/extsvc/jvmpackages/coursier/coursier.go +++ b/internal/extsvc/jvmpackages/coursier/coursier.go @@ -4,7 +4,6 @@ import ( "bytes" "context" "fmt" - "log" "os" "os/exec" "path" @@ -12,15 +11,11 @@ import ( "strings" "time" - "github.com/inconshreveable/log15" - "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" - "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/conf/reposource" "github.com/sourcegraph/sourcegraph/internal/env" "github.com/sourcegraph/sourcegraph/internal/observation" - "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/lib/errors" "github.com/sourcegraph/sourcegraph/schema" ) @@ -28,33 +23,27 @@ import ( var CoursierBinary = "coursier" var ( - coursierCacheDir string - observationContext *observation.Context - operations *Operations - invocTimeout, _ = time.ParseDuration(env.Get("SRC_COURSIER_TIMEOUT", "2m", "Time limit per Coursier invocation, which is used to resolve JVM/Java dependencies.")) + coursierCacheDir string + invocTimeout, _ = time.ParseDuration(env.Get("SRC_COURSIER_TIMEOUT", "2m", "Time limit per Coursier invocation, which is used to resolve JVM/Java dependencies.")) ) func init() { - observationContext = &observation.Context{ - Logger: log15.Root(), - Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, - Registerer: prometheus.DefaultRegisterer, - } - operations = NewOperations(observationContext) - // Should only be set for gitserver for persistence, repo-updater will use ephemeral storage. // repo-updater only performs existence checks which doesnt involve downloading any JARs (except for JDK), // only POM files which are much lighter. if reposDir := os.Getenv("SRC_REPOS_DIR"); reposDir != "" { coursierCacheDir = filepath.Join(reposDir, "coursier") if err := os.MkdirAll(coursierCacheDir, os.ModePerm); err != nil { - log.Fatalf("failed to create coursier cache dir in %s: %s", coursierCacheDir, err) + println(fmt.Sprintf("failed to create coursier cache dir in %s: %s", coursierCacheDir, err)) + os.Exit(1) } } } func FetchSources(ctx context.Context, config *schema.JVMPackagesConnection, dependency *reposource.MavenDependency) (sourceCodeJarPath string, err error) { - ctx, endObservation := operations.fetchSources.With(ctx, &err, observation.Args{LogFields: []otlog.Field{ + operations := getOperations() + + ctx, _, endObservation := operations.fetchSources.With(ctx, &err, observation.Args{LogFields: []otlog.Field{ otlog.String("dependency", dependency.PackageManagerSyntax()), }}) defer endObservation(1, observation.Args{}) @@ -107,7 +96,9 @@ func FetchSources(ctx context.Context, config *schema.JVMPackagesConnection, dep } func FetchByteCode(ctx context.Context, config *schema.JVMPackagesConnection, dependency *reposource.MavenDependency) (byteCodeJarPath string, err error) { - ctx, endObservation := operations.fetchByteCode.With(ctx, &err, observation.Args{}) + operations := getOperations() + + ctx, _, endObservation := operations.fetchByteCode.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) paths, err := runCoursierCommand( @@ -134,7 +125,9 @@ func FetchByteCode(ctx context.Context, config *schema.JVMPackagesConnection, de } func Exists(ctx context.Context, config *schema.JVMPackagesConnection, dependency *reposource.MavenDependency) (err error) { - ctx, endObservation := operations.exists.With(ctx, &err, observation.Args{LogFields: []otlog.Field{ + operations := getOperations() + + ctx, _, endObservation := operations.exists.With(ctx, &err, observation.Args{LogFields: []otlog.Field{ otlog.String("dependency", dependency.PackageManagerSyntax()), }}) defer endObservation(1, observation.Args{}) @@ -163,10 +156,12 @@ func (e coursierError) NotFound() bool { } func runCoursierCommand(ctx context.Context, config *schema.JVMPackagesConnection, args ...string) (stdoutLines []string, err error) { + operations := getOperations() + ctx, cancel := context.WithTimeout(ctx, invocTimeout) defer cancel() - ctx, trace, endObservation := operations.runCommand.WithAndLogger(ctx, &err, observation.Args{LogFields: []otlog.Field{ + ctx, trace, endObservation := operations.runCommand.With(ctx, &err, observation.Args{LogFields: []otlog.Field{ otlog.String("repositories", strings.Join(config.Maven.Repositories, "|")), otlog.String("args", strings.Join(args, ", ")), }}) diff --git a/internal/extsvc/jvmpackages/coursier/observability.go b/internal/extsvc/jvmpackages/coursier/observability.go index 96b80b898d9..5b17316394f 100644 --- a/internal/extsvc/jvmpackages/coursier/observability.go +++ b/internal/extsvc/jvmpackages/coursier/observability.go @@ -3,19 +3,27 @@ package coursier import ( "fmt" "strings" + "sync" + + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/metrics" "github.com/sourcegraph/sourcegraph/internal/observation" + "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) -type Operations struct { +type operations struct { + log.Logger + fetchSources *observation.Operation exists *observation.Operation fetchByteCode *observation.Operation runCommand *observation.Operation } -func NewOperations(observationContext *observation.Context) *Operations { +func newOperations(observationContext *observation.Context) *operations { metrics := metrics.NewREDMetrics( observationContext.Registerer, "codeintel_coursier", @@ -37,10 +45,31 @@ func NewOperations(observationContext *observation.Context) *Operations { }) } - return &Operations{ + return &operations{ fetchSources: op("FetchSources"), exists: op("Exists"), fetchByteCode: op("FetchByteCode"), runCommand: op("RunCommand"), + + Logger: observationContext.Logger, } } + +var ( + ops *operations + opsOnce sync.Once +) + +func getOperations() *operations { + opsOnce.Do(func() { + observationContext := &observation.Context{ + Logger: log.Scoped("jvmpackages.coursier", ""), + Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, + Registerer: prometheus.DefaultRegisterer, + } + + ops = newOperations(observationContext) + }) + + return ops +} diff --git a/internal/extsvc/npm/npm.go b/internal/extsvc/npm/npm.go index 65f67416311..ece60f2dfad 100644 --- a/internal/extsvc/npm/npm.go +++ b/internal/extsvc/npm/npm.go @@ -13,16 +13,13 @@ import ( "github.com/inconshreveable/log15" "github.com/opentracing-contrib/go-stdlib/nethttp" - "github.com/opentracing/opentracing-go" otlog "github.com/opentracing/opentracing-go/log" - "github.com/prometheus/client_golang/prometheus" "golang.org/x/time/rate" "github.com/sourcegraph/sourcegraph/internal/conf/reposource" "github.com/sourcegraph/sourcegraph/internal/httpcli" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/ratelimit" - "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/trace/ot" "github.com/sourcegraph/sourcegraph/lib/errors" ) @@ -43,25 +40,15 @@ type Client interface { FetchTarball(ctx context.Context, dep *reposource.NpmDependency) (io.ReadCloser, error) } -var ( - observationContext *observation.Context - operations *Operations -) - func init() { - observationContext = &observation.Context{ - Logger: log15.Root(), - Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, - Registerer: prometheus.DefaultRegisterer, - } - operations = NewOperations(observationContext) - // The HTTP client will transparently handle caching, // so we don't need to set up any on-disk caching here. } func FetchSources(ctx context.Context, client Client, dependency *reposource.NpmDependency) (tarball io.ReadCloser, err error) { - ctx, endObservation := operations.fetchSources.With(ctx, &err, observation.Args{LogFields: []otlog.Field{ + operations := getOperations() + + ctx, _, endObservation := operations.fetchSources.With(ctx, &err, observation.Args{LogFields: []otlog.Field{ otlog.String("dependency", dependency.PackageManagerSyntax()), }}) defer endObservation(1, observation.Args{}) diff --git a/internal/extsvc/npm/observability.go b/internal/extsvc/npm/observability.go index 72153812adf..03fa2f854f3 100644 --- a/internal/extsvc/npm/observability.go +++ b/internal/extsvc/npm/observability.go @@ -3,18 +3,24 @@ package npm import ( "fmt" "strings" + "sync" + + "github.com/opentracing/opentracing-go" + "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/metrics" "github.com/sourcegraph/sourcegraph/internal/observation" + "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) -type Operations struct { +type operations struct { fetchSources *observation.Operation exists *observation.Operation runCommand *observation.Operation } -func NewOperations(observationContext *observation.Context) *Operations { +func newOperations(observationContext *observation.Context) *operations { redMetrics := metrics.NewREDMetrics( observationContext.Registerer, "codeintel_npm", @@ -36,9 +42,28 @@ func NewOperations(observationContext *observation.Context) *Operations { }) } - return &Operations{ + return &operations{ fetchSources: op("FetchSources"), exists: op("Exists"), runCommand: op("RunCommand"), } } + +var ( + ops *operations + opsOnce sync.Once +) + +func getOperations() *operations { + opsOnce.Do(func() { + observationContext := &observation.Context{ + Logger: log.Scoped("npm", ""), + Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, + Registerer: prometheus.DefaultRegisterer, + } + + ops = newOperations(observationContext) + }) + + return ops +} diff --git a/internal/gitserver/client.go b/internal/gitserver/client.go index 6f6df60a2fa..d77028047fc 100644 --- a/internal/gitserver/client.go +++ b/internal/gitserver/client.go @@ -718,7 +718,7 @@ var deadlineExceededCounter = promauto.NewCounter(prometheus.CounterOpts{ // and commit pairs. If the invoked callback returns a non-nil error, the operation will begin // to abort processing further results. func (c *ClientImplementor) BatchLog(ctx context.Context, opts BatchLogOptions, callback BatchLogCallback) (err error) { - ctx, endObservation := c.operations.batchLog.With(ctx, &err, observation.Args{LogFields: opts.LogFields()}) + ctx, _, endObservation := c.operations.batchLog.With(ctx, &err, observation.Args{LogFields: opts.LogFields()}) defer endObservation(1, observation.Args{}) // Make a request to a single gitserver shard and feed the results to the user-supplied @@ -728,7 +728,7 @@ func (c *ClientImplementor) BatchLog(ctx context.Context, opts BatchLogOptions, var numProcessed int repoNames := repoNamesFromRepoCommits(repoCommits) - ctx, logger, endObservation := c.operations.batchLogSingle.WithAndLogger(ctx, &err, observation.Args{ + ctx, logger, endObservation := c.operations.batchLogSingle.With(ctx, &err, observation.Args{ LogFields: []log.Field{ log.String("addr", addr), log.Int("numRepos", len(repoNames)), diff --git a/internal/gitserver/observability.go b/internal/gitserver/observability.go index db97c990209..3fc68bcf000 100644 --- a/internal/gitserver/observability.go +++ b/internal/gitserver/observability.go @@ -4,13 +4,13 @@ import ( "fmt" "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/metrics" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) type operations struct { @@ -57,7 +57,7 @@ var ( func getOperations() *operations { operationsInstOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("gitserver.client", "gitserver client"), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/goroutine/periodic.go b/internal/goroutine/periodic.go index 720ee02d58e..d291130fc1f 100644 --- a/internal/goroutine/periodic.go +++ b/internal/goroutine/periodic.go @@ -136,7 +136,7 @@ func (r *PeriodicGoroutine) Stop() { func runPeriodicHandler(ctx context.Context, handler Handler, operation *observation.Operation) (_ bool, err error) { if operation != nil { - tmpCtx, endObservation := operation.With(ctx, &err, observation.Args{}) + tmpCtx, _, endObservation := operation.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) ctx = tmpCtx } diff --git a/internal/luasandbox/init.go b/internal/luasandbox/init.go index 818d349df3a..b573d464c11 100644 --- a/internal/luasandbox/init.go +++ b/internal/luasandbox/init.go @@ -3,12 +3,12 @@ package luasandbox import ( "sync" - "github.com/inconshreveable/log15" "github.com/opentracing/opentracing-go" "github.com/prometheus/client_golang/prometheus" "github.com/sourcegraph/sourcegraph/internal/observation" "github.com/sourcegraph/sourcegraph/internal/trace" + "github.com/sourcegraph/sourcegraph/lib/log" ) var ( @@ -19,7 +19,7 @@ var ( func GetService() *Service { svcOnce.Do(func() { observationContext := &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("luasandbox", ""), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: prometheus.DefaultRegisterer, } diff --git a/internal/luasandbox/sandbox.go b/internal/luasandbox/sandbox.go index 93f3a568e05..1864c992f61 100644 --- a/internal/luasandbox/sandbox.go +++ b/internal/luasandbox/sandbox.go @@ -32,7 +32,7 @@ func (s *Sandbox) Close() { // RunScript runs the given Lua script text in the sandbox. func (s *Sandbox) RunScript(ctx context.Context, opts RunOptions, script string) (retValue lua.LValue, err error) { - ctx, endObservation := s.operations.runScript.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.runScript.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) return s.RunScriptNamed(ctx, opts, singleScriptFS{script}, "main.lua") @@ -58,7 +58,7 @@ func (fs singleScriptFS) ReadFile(name string) ([]byte, error) { // This method will set the global `loadfile` function so that Lua scripts relative // to the given filesystem can be imported modularly. func (s *Sandbox) RunScriptNamed(ctx context.Context, opts RunOptions, fs FS, name string) (retValue lua.LValue, err error) { - ctx, endObservation := s.operations.runScriptNamed.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.runScriptNamed.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) contents, err := fs.ReadFile(name) @@ -105,7 +105,7 @@ func makeScopedLoadfile(state *lua.LState, fs FS) *lua.LFunction { // Call invokes the given function bound to this sandbox within the sandbox. func (s *Sandbox) Call(ctx context.Context, opts RunOptions, luaFunction *lua.LFunction, args ...interface{}) (retValue lua.LValue, err error) { - ctx, endObservation := s.operations.call.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.call.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) f := func(ctx context.Context, state *lua.LState) error { @@ -130,7 +130,7 @@ func (s *Sandbox) Call(ctx context.Context, opts RunOptions, luaFunction *lua.LF // the caller. This method does not pass values back into the coroutine when resuming // execution. func (s *Sandbox) CallGenerator(ctx context.Context, opts RunOptions, luaFunction *lua.LFunction, args ...interface{}) (retValues []lua.LValue, err error) { - ctx, endObservation := s.operations.callGenerator.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.callGenerator.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) f := func(ctx context.Context, state *lua.LState) error { @@ -173,7 +173,7 @@ const DefaultTimeout = time.Millisecond * 200 // RunGoCallback invokes the given Go callback with exclusive access to the state of the // sandbox. func (s *Sandbox) RunGoCallback(ctx context.Context, opts RunOptions, f func(ctx context.Context, state *lua.LState) error) (err error) { - ctx, endObservation := s.operations.runGoCallback.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.runGoCallback.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) s.m.Lock() diff --git a/internal/luasandbox/service.go b/internal/luasandbox/service.go index 3bfa037673e..70aeeee845e 100644 --- a/internal/luasandbox/service.go +++ b/internal/luasandbox/service.go @@ -25,7 +25,7 @@ type CreateOptions struct { } func (s *Service) CreateSandbox(ctx context.Context, opts CreateOptions) (_ *Sandbox, err error) { - _, endObservation := s.operations.createSandbox.With(ctx, &err, observation.Args{}) + _, _, endObservation := s.operations.createSandbox.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) state := lua.NewState(lua.Options{ diff --git a/internal/observation/fields.go b/internal/observation/fields.go new file mode 100644 index 00000000000..cecfbb35c54 --- /dev/null +++ b/internal/observation/fields.go @@ -0,0 +1,17 @@ +package observation + +import ( + otlog "github.com/opentracing/opentracing-go/log" + "go.uber.org/zap" + + "github.com/sourcegraph/sourcegraph/lib/log" +) + +func toLogFields(otFields []otlog.Field) []log.Field { + fields := make([]log.Field, len(otFields)) + for i, field := range otFields { + // Allow usage of zap.Any here for ease of interop. + fields[i] = zap.Any(field.Key(), field.Value()) + } + return fields +} diff --git a/internal/observation/observation.go b/internal/observation/observation.go index e42193acf28..50c68fb9a18 100644 --- a/internal/observation/observation.go +++ b/internal/observation/observation.go @@ -16,11 +16,11 @@ // // Sample usage: // -// observationContext := observation.NewContex( -// log15.Root(), -// &trace.Tracer{Tracer: opentracing.GlobalTracer()}, -// prometheus.DefaultRegisterer, -// ) +// observationContext := observation.Context{ +// Logger: log.Scoped("my-scope", "a simple description"), +// Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, +// Registerer: prometheus.DefaultRegisterer, +// } // // metrics := metrics.NewREDMetrics( // observationContext.Registerer, @@ -34,15 +34,25 @@ // Metrics: metrics, // }) // +// // You can log some logs directly using operation - these logs will be structured +// // with context about your operation. +// operation.Info("something happened!", log.String("additional", "context")) +// // function SomeOperation(ctx context.Context) (err error) { // // logs and metrics may be available before or after the operation, so they // // can be supplied either at the start of the operation, or after in the // // defer of endObservation. // -// ctx, endObservation := operation.With(ctx, &err, observation.Args{ /* logs and metrics */ }) +// ctx, trace, endObservation := operation.With(ctx, &err, observation.Args{ /* logs and metrics */ }) // defer func() { endObservation(1, observation.Args{ /* additional logs and metrics */ }) }() // // // ... +// +// // You can log some logs directly from the returned trace - these logs will be +// // structured with the trace ID, trace fields, and observation context. +// trace.Info("I did the thing!", log.Int("things", 3)) +// +// // ... // } // // Log fields and metric labels can be supplied at construction of an Operation, at invocation @@ -56,24 +66,25 @@ import ( "fmt" "time" - "github.com/opentracing/opentracing-go/log" + otlog "github.com/opentracing/opentracing-go/log" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/zap" "github.com/sourcegraph/sourcegraph/internal/honey" "github.com/sourcegraph/sourcegraph/internal/hostname" - "github.com/sourcegraph/sourcegraph/internal/logging" "github.com/sourcegraph/sourcegraph/internal/metrics" "github.com/sourcegraph/sourcegraph/internal/sentry" "github.com/sourcegraph/sourcegraph/internal/trace" "github.com/sourcegraph/sourcegraph/internal/version" "github.com/sourcegraph/sourcegraph/lib/errors" + "github.com/sourcegraph/sourcegraph/lib/log" ) // Context carries context about where to send logs, trace spans, and register // metrics. It should be created once on service startup, and passed around to // any location that wants to use it for observing operations. type Context struct { - Logger logging.ErrorLogger + Logger log.Logger Tracer *trace.Tracer Registerer prometheus.Registerer HoneyDataset *honey.Dataset @@ -83,8 +94,6 @@ type Context struct { // TestContext is a behaviorless Context usable for unit tests. var TestContext = Context{Registerer: metrics.TestRegisterer} -var TestOperation *Operation - type ErrorFilterBehaviour uint8 const ( @@ -107,10 +116,12 @@ type Op struct { // format {GroupName}.{OperationName}, where both sections are title cased // (e.g. Store.GetRepoByID). Name string + // Description is a simple description for this Op. + Description string // MetricLabelValues that apply for every invocation of this operation. MetricLabelValues []string // LogFields that apply for for every invocation of this operation. - LogFields []log.Field + LogFields []otlog.Field // ErrorFilter returns true for any error that should be converted to nil // for the purposes of metrics and tracing. If this field is not set then // error values are unaltered. @@ -125,6 +136,14 @@ type Op struct { // Operation combines the state of the parent context to create a new operation. This value // should be owned and used by the code that performs the operation it represents. func (c *Context) Operation(args Op) *Operation { + var logger log.Logger + if c.Logger != nil { + // Create a child logger, if a parent is provided. + logger = c.Logger.Scoped(args.Name, args.Description) + } else { + // Create a new logger. + logger = log.Scoped(args.Name, args.Description) + } return &Operation{ context: c, metrics: args.Metrics, @@ -133,10 +152,13 @@ func (c *Context) Operation(args Op) *Operation { metricLabels: args.MetricLabelValues, logFields: args.LogFields, errorFilter: args.ErrorFilter, + + Logger: logger.With(toLogFields(args.LogFields)...), } } -// Operation represents an interesting section of code that can be invoked. +// Operation represents an interesting section of code that can be invoked. It has an +// embedded Logger that can be used directly. type Operation struct { context *Context metrics *metrics.REDMetrics @@ -144,36 +166,47 @@ type Operation struct { name string kebabName string metricLabels []string - logFields []log.Field + logFields []otlog.Field + + // Logger is a logger scoped to this operation. Must not be nil. + log.Logger } -// TraceLogger is returned from WithAndLogger and can be used to add timestamped key and -// value pairs into a related opentracing span. +// TraceLogger is returned from With and can be used to add timestamped key and +// value pairs into a related opentracing span. It has an embedded Logger that can be used +// directly to log messages in the context of a trace. type TraceLogger interface { - Log(fields ...log.Field) - Tag(fields ...log.Field) + // Log logs and event with fields to the opentracing.Span as well as the nettrace.Trace, + // and also logs an 'trace.event' log entry at INFO level with the fields, including + // any existing tags and parent observation context. + Log(fields ...otlog.Field) + + // Tag adds fields to the opentracing.Span as tags as well as as logs to the nettrace.Trace. + // + // Tag will add fields to the underlying logger. + Tag(fields ...otlog.Field) + + // Logger is a logger scoped to this trace. + log.Logger } -var TestTraceLogger = &traceLogger{} +// TestTraceLogger creates an empty TraceLogger that can be used for testing. The logger +// should be 'logtest.Scoped(t)'. +func TestTraceLogger(logger log.Logger) TraceLogger { + return &traceLogger{Logger: logger} +} type traceLogger struct { opName string event honey.Event trace *trace.Trace + + log.Logger } -func (t traceLogger) Log(fields ...log.Field) { - if honey.Enabled() { - for _, field := range fields { - t.event.AddField(t.opName+"."+toSnakeCase(field.Key()), field.Value()) - } - } - if t.trace != nil { - t.trace.LogFields(fields...) - } -} - -func (t traceLogger) Tag(fields ...log.Field) { +// initWithTags adds tags to everything except the underlying Logger, which should have +// already have init fields due to being spawned from a parent Logger. +func (t *traceLogger) initWithTags(fields ...otlog.Field) { if honey.Enabled() { for _, field := range fields { t.event.AddField(t.opName+"."+toSnakeCase(field.Key()), field.Value()) @@ -184,6 +217,30 @@ func (t traceLogger) Tag(fields ...log.Field) { } } +func (t *traceLogger) Log(fields ...otlog.Field) { + if honey.Enabled() { + for _, field := range fields { + t.event.AddField(t.opName+"."+toSnakeCase(field.Key()), field.Value()) + } + } + if t.trace != nil { + t.trace.LogFields(fields...) + } + t.Logger.Info("trace.event", toLogFields(fields)...) +} + +func (t *traceLogger) Tag(fields ...otlog.Field) { + if honey.Enabled() { + for _, field := range fields { + t.event.AddField(t.opName+"."+toSnakeCase(field.Key()), field.Value()) + } + } + if t.trace != nil { + t.trace.TagFields(fields...) + } + t.Logger = t.Logger.With(toLogFields(fields)...) +} + // FinishFunc is the shape of the function returned by With and should be invoked within // a defer directly before the observed function returns or when a context is cancelled // with OnCancel. @@ -203,12 +260,12 @@ func (f FinishFunc) OnCancel(ctx context.Context, count float64, args Args) { // ErrCollector represents multiple errors and additional log fields that arose from those errors. type ErrCollector struct { errs error - extraFields []log.Field + extraFields []otlog.Field } func NewErrorCollector() *ErrCollector { return &ErrCollector{errs: nil} } -func (e *ErrCollector) Collect(err *error, fields ...log.Field) { +func (e *ErrCollector) Collect(err *error, fields ...otlog.Field) { if err != nil && *err != nil { e.errs = errors.Append(e.errs, *err) e.extraFields = append(e.extraFields, fields...) @@ -227,29 +284,7 @@ type Args struct { // MetricLabelValues that apply only to this invocation of the operation. MetricLabelValues []string // LogFields that apply only to this invocation of the operation. - LogFields []log.Field -} - -// LogFieldMap returns a string-to-interface map containing the contents of this Arg value's -// log fields. -func (args Args) LogFieldMap() map[string]interface{} { - fields := make(map[string]interface{}, len(args.LogFields)) - for _, field := range args.LogFields { - fields[field.Key()] = field.Value() - } - - return fields -} - -// LogFieldPairs returns a slice of key, value, key, value, ... pairs containing the contents -// of this Arg value's log fields. -func (args Args) LogFieldPairs() []interface{} { - pairs := make([]interface{}, 0, len(args.LogFields)*2) - for _, field := range args.LogFields { - pairs = append(pairs, field.Key(), field.Value()) - } - - return pairs + LogFields []otlog.Field } // WithErrors prepares the necessary timers, loggers, and metrics to observe the invocation of an @@ -268,7 +303,7 @@ func (op *Operation) WithErrorsAndLogger(ctx context.Context, root *error, args errTracer := NewErrorCollector() err := error(errTracer) - ctx, traceLogger, endObservation := op.WithAndLogger(ctx, &err, args) + ctx, traceLogger, endObservation := op.With(ctx, &err, args) // to avoid recursion stack overflow, we need a new binding endFunc := endObservation @@ -284,18 +319,10 @@ func (op *Operation) WithErrorsAndLogger(ctx context.Context, root *error, args return ctx, errTracer, traceLogger, endFunc } -// With prepares the necessary timers, loggers, and metrics to observe the invocation of an -// operation. This method returns a modified context and a function to be deferred until the -// end of the operation. -func (op *Operation) With(ctx context.Context, err *error, args Args) (context.Context, FinishFunc) { - ctx, _, endObservation := op.WithAndLogger(ctx, err, args) - return ctx, endObservation -} - -// WithAndLogger prepares the necessary timers, loggers, and metrics to observe the invocation +// With prepares the necessary timers, loggers, and metrics to observe the invocation // of an operation. This method returns a modified context, a function that will add a log field // to the active trace, and a function to be deferred until the end of the operation. -func (op *Operation) WithAndLogger(ctx context.Context, err *error, args Args) (context.Context, TraceLogger, FinishFunc) { +func (op *Operation) With(ctx context.Context, err *error, args Args) (context.Context, TraceLogger, FinishFunc) { start := time.Now() tr, ctx := op.startTrace(ctx) @@ -309,25 +336,29 @@ func (op *Operation) WithAndLogger(ctx context.Context, err *error, args Args) ( }) } - logFields := traceLogger{ + logger := op.Logger.With(toLogFields(args.LogFields)...) + + if traceContext := trace.Context(ctx); traceContext != nil { + event.AddField("traceID", traceContext.TraceID) + logger = logger.WithTrace(*traceContext) + } + + trLogger := traceLogger{ opName: snakecaseOpName, event: event, trace: tr, + Logger: logger, } if mergedFields := mergeLogFields(op.logFields, args.LogFields); len(mergedFields) > 0 { - logFields.Tag(mergedFields...) + trLogger.initWithTags(mergedFields...) } - if traceID := trace.ID(ctx); traceID != "" { - event.AddField("traceID", traceID) - } - - return ctx, logFields, func(count float64, finishArgs Args) { + return ctx, &trLogger, func(count float64, finishArgs Args) { since := time.Since(start) elapsed := since.Seconds() elapsedMs := since.Milliseconds() - defaultFinishFields := []log.Field{log.Float64("count", count), log.Float64("elapsed", elapsed)} + defaultFinishFields := []otlog.Field{otlog.Float64("count", count), otlog.Float64("elapsed", elapsed)} logFields := mergeLogFields(defaultFinishFields, finishArgs.LogFields, args.LogFields) metricLabels := mergeLabels(op.metricLabels, args.MetricLabelValues, finishArgs.MetricLabelValues) @@ -368,20 +399,16 @@ func (op *Operation) startTrace(ctx context.Context) (*trace.Trace, context.Cont // as well as all of the log fields attached ot the operation, the args to With, and the args // to the finish function. This does nothing if the no logger was supplied on the observation // context. -func (op *Operation) emitErrorLogs(err *error, logFields []log.Field) { - if op.context.Logger == nil { +func (op *Operation) emitErrorLogs(err *error, logFields []otlog.Field) { + if op.Logger == nil || err == nil || *err == nil { return } - var kvs []interface{} - for _, field := range logFields { - kvs = append(kvs, field.Key(), field.Value()) - } - - logging.Log(op.context.Logger, op.name, err, kvs...) + fields := append(toLogFields(logFields), zap.Error(*err)) + op.Logger.Error(op.name, fields...) } -func (op *Operation) emitHoneyEvent(err *error, opName string, event honey.Event, logFields []log.Field, duration int64) { +func (op *Operation) emitHoneyEvent(err *error, opName string, event honey.Event, logFields []otlog.Field, duration int64) { if err != nil && *err != nil { event.AddField("error", (*err).Error()) } @@ -396,7 +423,7 @@ func (op *Operation) emitHoneyEvent(err *error, opName string, event honey.Event } // emitSentryError will send errors to Sentry. -func (op *Operation) emitSentryError(err *error, logFields []log.Field) { +func (op *Operation) emitSentryError(err *error, logFields []otlog.Field) { if err == nil || *err == nil { return } @@ -428,7 +455,7 @@ func (op *Operation) emitMetrics(err *error, count, elapsed float64, labels []st // finishTrace will set the error value, log additional fields supplied after the operation's // execution, and finalize the trace span. This does nothing if no trace was constructed at // the start of the operation. -func (op *Operation) finishTrace(err *error, tr *trace.Trace, logFields []log.Field) { +func (op *Operation) finishTrace(err *error, tr *trace.Trace, logFields []otlog.Field) { if tr == nil { return } @@ -467,13 +494,13 @@ func mergeLabels(groups ...[]string) []string { } // mergeLogFields flattens slices of slices of log fields. -func mergeLogFields(groups ...[]log.Field) []log.Field { +func mergeLogFields(groups ...[]otlog.Field) []otlog.Field { size := 0 for _, group := range groups { size += len(group) } - logFields := make([]log.Field, 0, size) + logFields := make([]otlog.Field, 0, size) for _, group := range groups { logFields = append(logFields, group...) } diff --git a/internal/observation/util.go b/internal/observation/util.go index 4f940996205..ec13e33ecef 100644 --- a/internal/observation/util.go +++ b/internal/observation/util.go @@ -27,7 +27,6 @@ func init() { } acronymsReplacer = strings.NewReplacer(pairs...) - TestOperation = TestContext.Operation(Op{Name: "test.context"}) } // kebab transforms a string into lower-kebab-case. diff --git a/internal/oobmigration/runner.go b/internal/oobmigration/runner.go index cb4b06341a4..097dd2bd2a6 100644 --- a/internal/oobmigration/runner.go +++ b/internal/oobmigration/runner.go @@ -370,7 +370,7 @@ func updateProgress(ctx context.Context, store storeIface, migration *Migration, } func runMigrationUp(ctx context.Context, migration *Migration, migrator Migrator, operations *operations) (err error) { - ctx, endObservation := operations.upForMigration(migration.ID).With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := operations.upForMigration(migration.ID).With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("migrationID", migration.ID), }}) defer endObservation(1, observation.Args{}) @@ -380,7 +380,7 @@ func runMigrationUp(ctx context.Context, migration *Migration, migrator Migrator } func runMigrationDown(ctx context.Context, migration *Migration, migrator Migrator, operations *operations) (err error) { - ctx, endObservation := operations.downForMigration(migration.ID).With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := operations.downForMigration(migration.ID).With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("migrationID", migration.ID), }}) defer endObservation(1, observation.Args{}) diff --git a/internal/repos/sync_worker.go b/internal/repos/sync_worker.go index 6432ae943c5..a7dcb9c6033 100644 --- a/internal/repos/sync_worker.go +++ b/internal/repos/sync_worker.go @@ -19,6 +19,7 @@ import ( "github.com/sourcegraph/sourcegraph/internal/workerutil" "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker" "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store" + "github.com/sourcegraph/sourcegraph/lib/log" ) type SyncWorkerOptions struct { @@ -101,7 +102,7 @@ func newWorkerMetrics(r prometheus.Registerer) workerutil.WorkerMetrics { observationContext = &observation.TestContext } else { observationContext = &observation.Context{ - Logger: log15.Root(), + Logger: log.Scoped("sync_worker", ""), Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()}, Registerer: r, } diff --git a/internal/trace/traceutil.go b/internal/trace/traceutil.go index 58b64c1ee7f..bc6a63a4da4 100644 --- a/internal/trace/traceutil.go +++ b/internal/trace/traceutil.go @@ -19,9 +19,11 @@ import ( "github.com/sourcegraph/sourcegraph/internal/trace/ot" "github.com/sourcegraph/sourcegraph/internal/tracer" "github.com/sourcegraph/sourcegraph/lib/errors" + "github.com/sourcegraph/sourcegraph/lib/log/otfields" ) -// ID returns a trace ID, if any, found in the given context. +// ID returns a trace ID, if any, found in the given context. If you need both trace and +// span ID, use trace.Context. func ID(ctx context.Context) string { span := opentracing.SpanFromContext(ctx) if span == nil { @@ -32,15 +34,43 @@ func ID(ctx context.Context) string { // IDFromSpan returns a trace ID, if any, found in the given span. func IDFromSpan(span opentracing.Span) string { + traceCtx := ContextFromSpan(span) + if traceCtx == nil { + return "" + } + return traceCtx.TraceID +} + +// Context retrieves the full trace context, if any, from context - this includes +// both TraceID and SpanID. +func Context(ctx context.Context) *otfields.TraceContext { + span := opentracing.SpanFromContext(ctx) + if span == nil { + return nil + } + return ContextFromSpan(span) +} + +// Context retrieves the full trace context, if any, from the span - this includes +// both TraceID and SpanID. +func ContextFromSpan(span opentracing.Span) *otfields.TraceContext { ddctx, ok := span.Context().(ddtrace.SpanContext) if ok { - return strconv.FormatUint(ddctx.TraceID(), 10) + return &otfields.TraceContext{ + TraceID: strconv.FormatUint(ddctx.TraceID(), 10), + SpanID: strconv.FormatUint(ddctx.SpanID(), 10), + } } + spanCtx, ok := span.Context().(jaeger.SpanContext) if ok { - return spanCtx.TraceID().String() + return &otfields.TraceContext{ + TraceID: spanCtx.TraceID().String(), + SpanID: spanCtx.SpanID().String(), + } } - return "" + + return nil } // URL returns a trace URL for the given trace ID at the given external URL. diff --git a/internal/uploadstore/gcs_client.go b/internal/uploadstore/gcs_client.go index 50ec6e11955..622ff058ca4 100644 --- a/internal/uploadstore/gcs_client.go +++ b/internal/uploadstore/gcs_client.go @@ -81,7 +81,7 @@ func (s *gcsStore) Init(ctx context.Context) error { } func (s *gcsStore) Get(ctx context.Context, key string) (_ io.ReadCloser, err error) { - ctx, endObservation := s.operations.Get.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Get.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("key", key), }}) defer endObservation(1, observation.Args{}) @@ -95,7 +95,7 @@ func (s *gcsStore) Get(ctx context.Context, key string) (_ io.ReadCloser, err er } func (s *gcsStore) Upload(ctx context.Context, key string, r io.Reader) (_ int64, err error) { - ctx, endObservation := s.operations.Upload.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Upload.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("key", key), }}) defer endObservation(1, observation.Args{}) @@ -121,7 +121,7 @@ func (s *gcsStore) Upload(ctx context.Context, key string, r io.Reader) (_ int64 } func (s *gcsStore) Compose(ctx context.Context, destination string, sources ...string) (_ int64, err error) { - ctx, endObservation := s.operations.Compose.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Compose.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("destination", destination), log.String("sources", strings.Join(sources, ", ")), }}) @@ -152,7 +152,7 @@ func (s *gcsStore) Compose(ctx context.Context, destination string, sources ...s } func (s *gcsStore) Delete(ctx context.Context, key string) (err error) { - ctx, endObservation := s.operations.Delete.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Delete.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("key", key), }}) defer endObservation(1, observation.Args{}) diff --git a/internal/uploadstore/s3_client.go b/internal/uploadstore/s3_client.go index d17f2dbf166..02ef5ac64f5 100644 --- a/internal/uploadstore/s3_client.go +++ b/internal/uploadstore/s3_client.go @@ -91,7 +91,7 @@ const maxZeroReads = 3 var errNoDownloadProgress = errors.New("no download progress") func (s *s3Store) Get(ctx context.Context, key string) (_ io.ReadCloser, err error) { - ctx, endObservation := s.operations.Get.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Get.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("key", key), }}) defer endObservation(1, observation.Args{}) @@ -150,7 +150,7 @@ func (s *s3Store) readObjectInto(ctx context.Context, w io.Writer, key string, b } func (s *s3Store) Upload(ctx context.Context, key string, r io.Reader) (_ int64, err error) { - ctx, endObservation := s.operations.Upload.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Upload.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("key", key), }}) defer endObservation(1, observation.Args{}) @@ -169,7 +169,7 @@ func (s *s3Store) Upload(ctx context.Context, key string, r io.Reader) (_ int64, } func (s *s3Store) Compose(ctx context.Context, destination string, sources ...string) (_ int64, err error) { - ctx, endObservation := s.operations.Compose.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Compose.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("destination", destination), log.String("sources", strings.Join(sources, ", ")), }}) @@ -258,7 +258,7 @@ func (s *s3Store) Compose(ctx context.Context, destination string, sources ...st } func (s *s3Store) Delete(ctx context.Context, key string) (err error) { - ctx, endObservation := s.operations.Delete.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.Delete.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.String("key", key), }}) defer endObservation(1, observation.Args{}) diff --git a/internal/workerutil/dbworker/store/store.go b/internal/workerutil/dbworker/store/store.go index ca2a7c459d6..59fd8ee9fab 100644 --- a/internal/workerutil/dbworker/store/store.go +++ b/internal/workerutil/dbworker/store/store.go @@ -362,7 +362,7 @@ var columnNames = []string{ // QueuedCount returns the number of queued records matching the given conditions. func (s *store) QueuedCount(ctx context.Context, includeProcessing bool, conditions []*sqlf.Query) (_ int, err error) { - ctx, endObservation := s.operations.queuedCount.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.queuedCount.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) stateQueries := make([]*sqlf.Query, 0, 2) @@ -400,7 +400,7 @@ SELECT COUNT(*) FROM %s WHERE ( // // See https://github.com/sourcegraph/sourcegraph/issues/32624. func (s *store) MaxDurationInQueue(ctx context.Context) (_ time.Duration, err error) { - ctx, endObservation := s.operations.maxDurationInQueue.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.maxDurationInQueue.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) now := s.now() @@ -488,7 +488,7 @@ var columnsUpdatedByDequeue = []string{ // // The supplied conditions may use the alias provided in `ViewName`, if one was supplied. func (s *store) Dequeue(ctx context.Context, workerHostname string, conditions []*sqlf.Query) (_ workerutil.Record, _ bool, err error) { - ctx, trace, endObservation := s.operations.dequeue.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.dequeue.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) if s.InTransaction() { @@ -616,7 +616,7 @@ func (s *store) makeDequeueUpdateStatements(updatedColumns map[string]*sqlf.Quer } func (s *store) Heartbeat(ctx context.Context, ids []int, options HeartbeatOptions) (knownIDs []int, err error) { - ctx, endObservation := s.operations.heartbeat.With(ctx, &err, observation.Args{}) + ctx, _, endObservation := s.operations.heartbeat.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) if len(ids) == 0 { @@ -693,7 +693,7 @@ RETURNING {id} // Requeue updates the state of the record with the given identifier to queued and adds a processing delay before // the next dequeue of this record can be performed. func (s *store) Requeue(ctx context.Context, id int, after time.Time) (err error) { - ctx, endObservation := s.operations.requeue.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.requeue.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), log.String("after", after.String()), }}) @@ -722,7 +722,7 @@ WHERE {id} = %s // used with UpdateExecutionLogEntry) and a possible error. When the record is not found (due to options not matching // or the record being deleted), ErrExecutionLogEntryNotUpdated is returned. func (s *store) AddExecutionLogEntry(ctx context.Context, id int, entry workerutil.ExecutionLogEntry, options ExecutionLogEntryOptions) (entryID int, err error) { - ctx, endObservation := s.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -773,7 +773,7 @@ RETURNING array_length({execution_logs}, 1) // UpdateExecutionLogEntry updates the executor log entry with the given ID on the given record. When the record is not // found (due to options not matching or the record being deleted), ErrExecutionLogEntryNotUpdated is returned. func (s *store) UpdateExecutionLogEntry(ctx context.Context, recordID, entryID int, entry workerutil.ExecutionLogEntry, options ExecutionLogEntryOptions) (err error) { - ctx, endObservation := s.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("recordID", recordID), log.Int("entryID", entryID), }}) @@ -831,7 +831,7 @@ RETURNING // the processing state to a terminal state, this method will have no effect. This method returns a boolean flag // indicating if the record was updated. func (s *store) MarkComplete(ctx context.Context, id int, options MarkFinalOptions) (_ bool, err error) { - ctx, endObservation := s.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -858,7 +858,7 @@ RETURNING {id} // if the current state of the record is processing. A requeued record or a record already marked with an // error will not be updated. This method returns a boolean flag indicating if the record was updated. func (s *store) MarkErrored(ctx context.Context, id int, failureMessage string, options MarkFinalOptions) (_ bool, err error) { - ctx, endObservation := s.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -889,7 +889,7 @@ RETURNING {id} // if the current state of the record is processing. A requeued record or a record already marked with an // error will not be updated. This method returns a boolean flag indicating if the record was updated. func (s *store) MarkFailed(ctx context.Context, id int, failureMessage string, options MarkFinalOptions) (_ bool, err error) { - ctx, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{ + ctx, _, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{ log.Int("id", id), }}) defer endObservation(1, observation.Args{}) @@ -924,7 +924,7 @@ const defaultResetFailureMessage = "job processor died while handling this messa // identifiers the age of the record's last heartbeat timestamp for each record reset to queued and failed states, // respectively. func (s *store) ResetStalled(ctx context.Context) (resetLastHeartbeatsByIDs, failedLastHeartbeatsByIDs map[int]time.Duration, err error) { - ctx, trace, endObservation := s.operations.resetStalled.WithAndLogger(ctx, &err, observation.Args{}) + ctx, trace, endObservation := s.operations.resetStalled.With(ctx, &err, observation.Args{}) defer endObservation(1, observation.Args{}) now := s.now() diff --git a/internal/workerutil/worker.go b/internal/workerutil/worker.go index 079fae2b27b..5ca42e7ed1c 100644 --- a/internal/workerutil/worker.go +++ b/internal/workerutil/worker.go @@ -277,7 +277,7 @@ func (w *Worker) dequeueAndHandle() (dequeued bool, err error) { logger.Info("Dequeued record for processing", "name", w.options.Name, "id", record.RecordID(), "traceID", trace.IDFromSpan(workerSpan)) if hook, ok := w.handler.(WithHooks); ok { - preCtx, endObservation := w.options.Metrics.operations.preHandle.With(handleCtx, nil, observation.Args{}) + preCtx, _, endObservation := w.options.Metrics.operations.preHandle.With(handleCtx, nil, observation.Args{}) hook.PreHandle(preCtx, record) endObservation(1, observation.Args{}) } @@ -291,7 +291,7 @@ func (w *Worker) dequeueAndHandle() (dequeued bool, err error) { // this worker anymore at this point. Tracing hierarchy is still correct, // as handleCtx used in preHandle/handle is at the same level as // workerCtxWithSpan - postCtx, endObservation := w.options.Metrics.operations.postHandle.With(workerCtxWithSpan, nil, observation.Args{}) + postCtx, _, endObservation := w.options.Metrics.operations.postHandle.With(workerCtxWithSpan, nil, observation.Args{}) defer endObservation(1, observation.Args{}) hook.PostHandle(postCtx, record) } @@ -316,7 +316,7 @@ func (w *Worker) dequeueAndHandle() (dequeued bool, err error) { // handle processes the given record. This method returns an error only if there is an issue updating // the record to a terminal state - no handler errors will bubble up. func (w *Worker) handle(ctx, workerContext context.Context, record Record) (err error) { - ctx, endOperation := w.options.Metrics.operations.handle.With(ctx, &err, observation.Args{}) + ctx, _, endOperation := w.options.Metrics.operations.handle.With(ctx, &err, observation.Args{}) defer endOperation(1, observation.Args{}) // If a maximum runtime is configured, set a deadline on the handle context.