logger: update log lib and remove use of description (#57690)

* log: remove use of description paramter in Scoped

* temporarily point to sglog branch

* bazel configure + gazelle

* remove additional use of description param

* use latest versions of zoekt,log,mountinfo

* go.mod
This commit is contained in:
William Bezuidenhout 2023-10-18 16:29:08 +01:00 committed by GitHub
parent 8f48c6cd83
commit 1ae6cc6bfd
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
352 changed files with 607 additions and 595 deletions

View File

@ -189,7 +189,7 @@ func (a *Actor) Limiter(
}
return &concurrencyLimiter{
logger: logger.Scoped("concurrency", "concurrency limiter"),
logger: logger.Scoped("concurrency"),
actor: a,
feature: feature,
redis: limiter.NewPrefixRedisStore(fmt.Sprintf("concurrent:%s", featurePrefix), redis),

View File

@ -45,7 +45,7 @@ var _ actor.Source = &Source{}
func NewSource(logger log.Logger, cache httpcache.Cache, dotComClient graphql.Client, concurrencyConfig codygateway.ActorConcurrencyLimitConfig) *Source {
return &Source{
log: logger.Scoped("dotcomuser", "dotcom user actor source"),
log: logger.Scoped("dotcomuser"),
cache: cache,
dotcom: dotComClient,
concurrencyConfig: concurrencyConfig,

View File

@ -56,7 +56,7 @@ var _ actor.SourceSyncer = &Source{}
func NewSource(logger log.Logger, cache httpcache.Cache, dotcomClient graphql.Client, internalMode bool, concurrencyConfig codygateway.ActorConcurrencyLimitConfig) *Source {
return &Source{
log: logger.Scoped("productsubscriptions", "product subscription actor source"),
log: logger.Scoped("productsubscriptions"),
cache: cache,
dotcom: dotcomClient,

View File

@ -150,16 +150,16 @@ func (s *Sources) SyncAll(ctx context.Context, logger log.Logger) error {
// at a regular interval. It uses a redsync.Mutex to ensure only one worker is running
// at a time.
func (s *Sources) Worker(obCtx *observation.Context, rmux *redsync.Mutex, rootInterval time.Duration) goroutine.BackgroundRoutine {
logger := obCtx.Logger.Scoped("sources.worker", "sources background routie")
logger := obCtx.Logger.Scoped("sources.worker")
return &redisLockedBackgroundRoutine{
logger: logger.Scoped("redisLock", "distributed lock layer for sources sync"),
logger: logger.Scoped("redisLock"),
rmux: rmux,
routine: goroutine.NewPeriodicGoroutine(
context.Background(),
&sourcesSyncHandler{
logger: logger.Scoped("handler", "handler for actor sources sync"),
logger: logger.Scoped("handler"),
rmux: rmux,
sources: s,
syncInterval: rootInterval,

View File

@ -65,7 +65,7 @@ func defaultWorkers(bufferSize, workerCount int) int {
// goroutine.BackgroundRoutine that must be started.
func NewBufferedLogger(logger log.Logger, handler Logger, bufferSize, workerCount int) *BufferedLogger {
return &BufferedLogger{
log: logger.Scoped("bufferedLogger", "buffered events logger"),
log: logger.Scoped("bufferedLogger"),
handler: handler,

View File

@ -138,7 +138,7 @@ func NewStdoutLogger(logger log.Logger) Logger {
// demo tracing in dev.
return &instrumentedLogger{
Scope: "stdoutLogger",
Logger: &stdoutLogger{logger: logger.Scoped("events", "event logger")},
Logger: &stdoutLogger{logger: logger.Scoped("events")},
}
}

View File

@ -98,7 +98,7 @@ func makeUpstreamHandler[ReqT UpstreamRequest](
// response.
defaultRetryAfterSeconds int,
) http.Handler {
baseLogger = baseLogger.Scoped(upstreamName, fmt.Sprintf("%s upstream handler", upstreamName)).
baseLogger = baseLogger.Scoped(upstreamName).
With(log.String("upstream.url", upstreamAPIURL))
// Convert allowedModels to the Cody Gateway configuration format with the

View File

@ -27,7 +27,7 @@ import (
// we do a simple auth on a static secret instead that is uniquely generated per
// deployment.
func NewDiagnosticsHandler(baseLogger log.Logger, next http.Handler, secret string, sources *actor.Sources) http.Handler {
baseLogger = baseLogger.Scoped("diagnostics", "healthz checks")
baseLogger = baseLogger.Scoped("diagnostics")
hasValidSecret := func(l log.Logger, w http.ResponseWriter, r *http.Request) (yes bool) {
token, err := authbearer.ExtractBearer(r.Header)

View File

@ -34,7 +34,7 @@ func NewHandler(
mf ModelFactory,
allowedModels []string,
) http.Handler {
baseLogger = baseLogger.Scoped("embeddingshandler", "The HTTP API handler for the embeddings endpoint.")
baseLogger = baseLogger.Scoped("embeddingshandler")
return featurelimiter.HandleFeature(
baseLogger,

View File

@ -53,7 +53,7 @@ func NewSlackRateLimitNotifier(
slackWebhookURL string,
slackSender func(ctx context.Context, url string, msg *slack.WebhookMessage) error,
) RateLimitNotifier {
baseLogger = baseLogger.Scoped("slackRateLimitNotifier", "notifications for usage rate limit approaching thresholds")
baseLogger = baseLogger.Scoped("slackRateLimitNotifier")
return func(ctx context.Context, actor codygateway.Actor, feature codygateway.Feature, usageRatio float32, ttl time.Duration) {
thresholds := actorSourceThresholds.Get(actor.GetSource())

View File

@ -112,7 +112,7 @@ func Main(ctx context.Context, obctx *observation.Context, ready service.ReadyFu
}
authr := &auth.Authenticator{
Logger: obctx.Logger.Scoped("auth", "authentication middleware"),
Logger: obctx.Logger.Scoped("auth"),
EventLogger: eventLogger,
Sources: sources,
}
@ -257,14 +257,14 @@ func initOpenTelemetry(ctx context.Context, logger log.Logger, config OpenTeleme
// Enable tracing, at this point tracing wouldn't have been enabled yet because
// we run Cody Gateway without conf which means Sourcegraph tracing is not enabled.
shutdownTracing, err := maybeEnableTracing(ctx,
logger.Scoped("tracing", "OpenTelemetry tracing"),
logger.Scoped("tracing"),
config, res)
if err != nil {
return nil, errors.Wrap(err, "maybeEnableTracing")
}
shutdownMetrics, err := maybeEnableMetrics(ctx,
logger.Scoped("metrics", "OpenTelemetry metrics"),
logger.Scoped("metrics"),
config, res)
if err != nil {
return nil, errors.Wrap(err, "maybeEnableMetrics")

View File

@ -28,7 +28,7 @@ var _ files.Store = &Client{}
// New creates a new Client based on the provided Options.
func New(observationCtx *observation.Context, options apiclient.BaseClientOptions) (*Client, error) {
logger := log.Scoped("executor-api-files-client", "The API client adapter for executors to interact with the Files over HTTP")
logger := log.Scoped("executor-api-files-client")
client, err := apiclient.NewBaseClient(logger, options)
if err != nil {
return nil, err

View File

@ -42,7 +42,7 @@ var _ workerutil.Store[types.Job] = &Client{}
var _ cmdlogger.ExecutionLogEntryStore = &Client{}
func New(observationCtx *observation.Context, options Options, metricsGatherer prometheus.Gatherer) (*Client, error) {
logger := log.Scoped("executor-api-queue-client", "The API client adapter for executors to use dbworkers over HTTP")
logger := log.Scoped("executor-api-queue-client")
client, err := apiclient.NewBaseClient(logger, options.BaseClientOptions)
if err != nil {
return nil, err

View File

@ -30,7 +30,7 @@ func StandaloneRun(ctx context.Context, runner util.CmdRunner, logger log.Logger
return err
}
logger = log.Scoped("service", "executor service")
logger = log.Scoped("service")
// Initialize tracing/metrics
observationCtx := observation.NewContext(logger)
@ -103,7 +103,7 @@ func StandaloneRun(ctx context.Context, runner util.CmdRunner, logger log.Logger
if cfg.UseFirecracker {
routines = append(routines, janitor.NewOrphanedVMJanitor(
log.Scoped("orphaned-vm-janitor", "deletes VMs from a previous executor instance"),
log.Scoped("orphaned-vm-janitor"),
cfg.VMPrefix,
nameSet,
cfg.CleanupTaskInterval,

View File

@ -71,7 +71,7 @@ func createVM(ctx context.Context, cmdRunner util.CmdRunner, config *config.Conf
cmd := &command.RealCommand{
CmdRunner: cmdRunner,
Logger: log.Scoped("executor-test-vm", ""),
Logger: log.Scoped("executor-test-vm"),
}
firecrackerWorkspace, err := workspace.NewFirecrackerWorkspace(
ctx,

View File

@ -268,7 +268,7 @@ func kubernetesOptions(c *config.Config) runner.KubernetesOptions {
}
func makeWorkerMetrics(queueName string) workerutil.WorkerObservability {
observationCtx := observation.NewContext(log.Scoped("executor_processor", "executor worker processor"))
observationCtx := observation.NewContext(log.Scoped("executor_processor"))
return workerutil.NewMetrics(observationCtx, "executor_processor", workerutil.WithSampler(func(job workerutil.Record) bool { return true }),
// derived from historic data, ideally we will use spare high-res histograms once they're a reality

View File

@ -43,7 +43,7 @@ func NewDockerRunner(
return &dockerRunner{
cmd: cmd,
dir: dir,
internalLogger: log.Scoped("docker-runner", ""),
internalLogger: log.Scoped("docker-runner"),
commandLogger: logger,
options: options,
dockerAuthConfig: actualDockerAuthConfig,

View File

@ -81,7 +81,7 @@ func NewFirecrackerRunner(
cmd: cmd,
vmName: vmName,
workspaceDevice: workspaceDevice,
internalLogger: log.Scoped("firecracker-runner", ""),
internalLogger: log.Scoped("firecracker-runner"),
cmdLogger: logger,
options: options,
dockerAuthConfig: actualDockerAuthConfig,

View File

@ -47,7 +47,7 @@ func NewKubernetesRunner(
options command.KubernetesContainerOptions,
) Runner {
return &kubernetesRunner{
internalLogger: log.Scoped("kubernetes-runner", ""),
internalLogger: log.Scoped("kubernetes-runner"),
commandLogger: commandLogger,
cmd: cmd,
dir: dir,

View File

@ -33,7 +33,7 @@ func NewShellRunner(
return &shellRunner{
cmd: cmd,
dir: dir,
internalLogger: log.Scoped("shell-runner", ""),
internalLogger: log.Scoped("shell-runner"),
commandLogger: logger,
options: options,
}

View File

@ -77,9 +77,9 @@ type Options struct {
// NewWorker creates a worker that polls a remote job queue API for work.
func NewWorker(observationCtx *observation.Context, nameSet *janitor.NameSet, options Options) (goroutine.WaitableBackgroundRoutine, error) {
observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("worker", "background worker task periodically fetching jobs"), observationCtx)
observationCtx = observation.ContextWithLogger(observationCtx.Logger.Scoped("worker"), observationCtx)
gatherer := metrics.MakeExecutorMetricsGatherer(log.Scoped("executor-worker.metrics-gatherer", ""), prometheus.DefaultGatherer, options.NodeExporterEndpoint, options.DockerRegistryNodeExporterEndpoint)
gatherer := metrics.MakeExecutorMetricsGatherer(log.Scoped("executor-worker.metrics-gatherer"), prometheus.DefaultGatherer, options.NodeExporterEndpoint, options.DockerRegistryNodeExporterEndpoint)
queueClient, err := queue.New(observationCtx, options.QueueOptions, gatherer)
if err != nil {
return nil, errors.Wrap(err, "building queue worker client")
@ -105,7 +105,7 @@ func NewWorker(observationCtx *observation.Context, nameSet *janitor.NameSet, op
cmdRunner := &util.RealCmdRunner{}
cmd := &command.RealCommand{
CmdRunner: cmdRunner,
Logger: log.Scoped("executor-worker.command", "command execution"),
Logger: log.Scoped("executor-worker.command"),
}
// Configure the supported runtimes

View File

@ -33,7 +33,7 @@ func main() {
})
defer liblog.Sync()
logger := log.Scoped("executor", "the executor service polls the public Sourcegraph frontend API for work to perform")
logger := log.Scoped("executor")
runner := &util.RealCmdRunner{}

View File

@ -67,7 +67,7 @@ func GetAndSaveUser(ctx context.Context, db database.DB, op GetAndSaveUserOp) (n
externalAccountsStore := db.UserExternalAccounts()
users := db.Users()
logger := sglog.Scoped("authGetAndSaveUser", "get and save user authenticated by external providers")
logger := sglog.Scoped("authGetAndSaveUser")
acct := &extsvc.Account{
AccountSpec: op.ExternalAccount,

View File

@ -44,14 +44,14 @@ type externalServices struct {
func NewExternalServices(logger log.Logger, db database.DB) ExternalServicesService {
return &externalServices{
logger: logger.Scoped("ExternalServices", "service related to external service functionality"),
logger: logger.Scoped("ExternalServices"),
db: db,
}
}
func NewMockExternalServices(logger log.Logger, db database.DB, mockSourcer internalrepos.Sourcer) ExternalServicesService {
return &externalServices{
logger: logger.Scoped("ExternalServices", "service related to external service functionality"),
logger: logger.Scoped("ExternalServices"),
db: db,
mockSourcer: mockSourcer,
}
@ -63,7 +63,7 @@ func (e *externalServices) ValidateConnection(ctx context.Context, svc *types.Ex
ctx, cancel := context.WithTimeout(ctx, validateConnectionTimeout)
defer cancel()
genericSourcer := newGenericSourcer(log.Scoped("externalservice.validateconnection", ""), e.db)
genericSourcer := newGenericSourcer(log.Scoped("externalservice.validateconnection"), e.db)
genericSrc, err := genericSourcer(ctx, svc)
if err != nil {
if ctx.Err() != nil && ctx.Err() == context.DeadlineExceeded {
@ -130,7 +130,7 @@ func (e *externalServices) ListNamespaces(ctx context.Context, externalServiceID
return nil, err
}
} else {
genericSourcer := newGenericSourcer(log.Scoped("externalservice.namespacediscovery", ""), e.db)
genericSourcer := newGenericSourcer(log.Scoped("externalservice.namespacediscovery"), e.db)
genericSrc, err = genericSourcer(ctx, externalSvc)
if err != nil {
return nil, err
@ -199,7 +199,7 @@ func (e *externalServices) DiscoverRepos(ctx context.Context, externalServiceID
return nil, err
}
} else {
genericSourcer := newGenericSourcer(log.Scoped("externalservice.repodiscovery", ""), e.db)
genericSourcer := newGenericSourcer(log.Scoped("externalservice.repodiscovery"), e.db)
genericSrc, err = genericSourcer(ctx, externalSvc)
if err != nil {
return nil, err
@ -260,7 +260,7 @@ func (e *externalServices) ExcludeRepoFromExternalServices(ctx context.Context,
return err
}
logger := e.logger.Scoped("ExcludeRepoFromExternalServices", "excluding a repo from external service config").With(log.Int32("repoID", int32(repoID)))
logger := e.logger.Scoped("ExcludeRepoFromExternalServices").With(log.Int32("repoID", int32(repoID)))
for _, extSvcID := range externalServiceIDs {
logger = logger.With(log.Int64("externalServiceID", extSvcID))
}
@ -436,8 +436,8 @@ func schemaContainsExclusion[T comparable](exclusions []*T, newExclusion *T) boo
func newGenericSourcer(logger log.Logger, db database.DB) internalrepos.Sourcer {
// We use the generic sourcer that doesn't have observability attached to it here because the way externalServiceValidate is set up,
// using the regular sourcer will cause a large dump of errors to be logged when it exits ListRepos prematurely.
sourcerLogger := logger.Scoped("repos.Sourcer", "repositories source")
db = database.NewDBWith(sourcerLogger.Scoped("db", "sourcer database"), db)
sourcerLogger := logger.Scoped("repos.Sourcer")
db = database.NewDBWith(sourcerLogger.Scoped("db"), db)
dependenciesService := dependencies.NewService(observation.NewContext(logger), db)
cf := httpcli.NewExternalClientFactory(httpcli.NewLoggingMiddleware(sourcerLogger))
return internalrepos.NewSourcer(sourcerLogger, db, cf, internalrepos.WithDependenciesService(dependenciesService))

View File

@ -40,7 +40,7 @@ func InventoryContext(logger log.Logger, repo api.RepoName, gsClient gitserver.C
return info.OID().String()
}
logger = logger.Scoped("InventoryContext", "returns the inventory context for computing the inventory for the repository at the given commit").
logger = logger.Scoped("InventoryContext").
With(log.String("repo", string(repo)), log.String("commitID", string(commitID)))
invCtx := inventory.Context{
ReadTree: func(ctx context.Context, path string) ([]fs.FileInfo, error) {

View File

@ -48,7 +48,7 @@ type ReposService interface {
// more idiomatic solution.
func NewRepos(logger log.Logger, db database.DB, client gitserver.Client) ReposService {
repoStore := db.Repos()
logger = logger.Scoped("repos", "provides a repos store for the backend")
logger = logger.Scoped("repos")
return &repos{
logger: logger,
db: db,

View File

@ -44,7 +44,7 @@ type UserEmailsService interface {
func NewUserEmailsService(db database.DB, logger log.Logger) UserEmailsService {
return &userEmails{
db: db,
logger: logger.Scoped("UserEmails", "user emails handling service"),
logger: logger.Scoped("UserEmails"),
}
}
@ -56,7 +56,7 @@ type userEmails struct {
// Add adds an email address to a user. If email verification is required, it sends an email
// verification email.
func (e *userEmails) Add(ctx context.Context, userID int32, email string) error {
logger := e.logger.Scoped("Add", "handles addition of user emails")
logger := e.logger.Scoped("Add")
// 🚨 SECURITY: Only the user and site admins can add an email address to a user.
if err := auth.CheckSiteAdminOrSameUser(ctx, e.db, userID); err != nil {
return err
@ -119,7 +119,7 @@ func (e *userEmails) Add(ctx context.Context, userID int32, email string) error
// Remove removes the e-mail from the specified user. Perforce external accounts
// using the e-mail will also be removed.
func (e *userEmails) Remove(ctx context.Context, userID int32, email string) error {
logger := e.logger.Scoped("Remove", "handles removal of user emails").
logger := e.logger.Scoped("Remove").
With(log.Int32("userID", userID))
// 🚨 SECURITY: Only the authenticated user and site admins can remove email
@ -166,7 +166,7 @@ func (e *userEmails) Remove(ctx context.Context, userID int32, email string) err
// SetPrimaryEmail sets the supplied e-mail address as the primary address for
// the given user.
func (e *userEmails) SetPrimaryEmail(ctx context.Context, userID int32, email string) error {
logger := e.logger.Scoped("SetPrimaryEmail", "handles setting primary e-mail for user").
logger := e.logger.Scoped("SetPrimaryEmail").
With(log.Int32("userID", userID))
// 🚨 SECURITY: Only the authenticated user and site admins can set the primary
@ -192,7 +192,7 @@ func (e *userEmails) SetPrimaryEmail(ctx context.Context, userID int32, email st
// If verified is false, Perforce external accounts using the e-mail will be
// removed.
func (e *userEmails) SetVerified(ctx context.Context, userID int32, email string, verified bool) error {
logger := e.logger.Scoped("SetVerified", "handles setting e-mail as verified")
logger := e.logger.Scoped("SetVerified")
// 🚨 SECURITY: Only site admins (NOT users themselves) can manually set email
// verification status. Users themselves must go through the normal email

View File

@ -50,7 +50,7 @@ type AppNoopUpdateChecker struct{}
func NewAppUpdateChecker(logger log.Logger, resolver UpdateManifestResolver) *AppUpdateChecker {
return &AppUpdateChecker{
logger: logger.Scoped("app.update.checker", "Handler that handles sourcegraph app requests that check for updates"),
logger: logger.Scoped("app.update.checker"),
manifestResolver: resolver,
}
}

View File

@ -89,7 +89,7 @@ func (r *schemaResolver) CreateAccessToken(ctx context.Context, args *createAcce
uid := actor.FromContext(ctx).UID
id, token, err := r.db.AccessTokens().Create(ctx, userID, args.Scopes, args.Note, uid)
logger := r.logger.Scoped("CreateAccessToken", "access token creation").
logger := r.logger.Scoped("CreateAccessToken").
With(log.Int32("userID", uid))
if conf.CanSendEmail() {
@ -178,7 +178,7 @@ func (r *schemaResolver) DeleteAccessToken(ctx context.Context, args *deleteAcce
}
logger := r.logger.Scoped("DeleteAccessToken", "access token deletion").
logger := r.logger.Scoped("DeleteAccessToken").
With(log.Int32("userID", token.SubjectUserID))
if conf.CanSendEmail() {

View File

@ -83,7 +83,7 @@ func externalServiceByID(ctx context.Context, db database.DB, gqlID graphql.ID)
return nil, err
}
return &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: db, externalService: es}, nil
return &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: db, externalService: es}, nil
}
func MarshalExternalServiceID(id int64) graphql.ID {
@ -263,7 +263,7 @@ func (r *externalServiceResolver) CheckConnection(ctx context.Context) (*externa
source, err := repos.NewSource(
ctx,
log.Scoped("externalServiceResolver.CheckConnection", ""),
log.Scoped("externalServiceResolver.CheckConnection"),
r.db,
r.externalService,
httpcli.ExternalClientFactory,
@ -485,7 +485,7 @@ func (r *externalServiceNamespaceConnectionResolver) compute(ctx context.Context
return
}
e := newExternalServices(log.Scoped("graphql.externalservicenamespaces", ""), r.db)
e := newExternalServices(log.Scoped("graphql.externalservicenamespaces"), r.db)
r.nodes, r.err = e.ListNamespaces(ctx, externalServiceID, r.args.Kind, config)
r.totalCount = int32(len(r.nodes))
})
@ -549,7 +549,7 @@ func (r *externalServiceRepositoryConnectionResolver) compute(ctx context.Contex
return
}
e := newExternalServices(log.Scoped("graphql.externalservicerepositories", ""), r.db)
e := newExternalServices(log.Scoped("graphql.externalservicerepositories"), r.db)
r.nodes, r.err = e.DiscoverRepos(ctx, externalServiceID, r.args.Kind, config, first, r.args.Query, r.args.ExcludeRepos)
})

View File

@ -86,7 +86,7 @@ func (r *schemaResolver) AddExternalService(ctx context.Context, args *addExtern
// Verify if the connection is functional, to render a warning message in the
// editor if not.
res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver", ""), db: r.db, externalService: externalService}
res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver"), db: r.db, externalService: externalService}
if err = newExternalServices(r.logger, r.db).ValidateConnection(ctx, externalService); err != nil {
res.warning = fmt.Sprintf("External service created, but we encountered a problem while validating the external service: %s", err)
}
@ -166,7 +166,7 @@ func (r *schemaResolver) UpdateExternalService(ctx context.Context, args *update
r.logger.Warn("Failed to trigger external service sync")
}
res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver", ""), db: r.db, externalService: es}
res := &externalServiceResolver{logger: r.logger.Scoped("externalServiceResolver"), db: r.db, externalService: es}
if oldConfig != newConfig {
// Verify if the connection is functional, to render a warning message in the
@ -327,7 +327,7 @@ func (r *externalServiceConnectionResolver) Nodes(ctx context.Context) ([]*exter
}
resolvers := make([]*externalServiceResolver, 0, len(externalServices))
for _, externalService := range externalServices {
resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: r.db, externalService: externalService})
resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: r.db, externalService: externalService})
}
return resolvers, nil
}
@ -392,7 +392,7 @@ func (r *ComputedExternalServiceConnectionResolver) Nodes(_ context.Context) []*
}
resolvers := make([]*externalServiceResolver, 0, len(svcs))
for _, svc := range svcs {
resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: r.db, externalService: svc})
resolvers = append(resolvers, &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: r.db, externalService: svc})
}
return resolvers
}

View File

@ -71,7 +71,7 @@ type GitCommitResolver struct {
func NewGitCommitResolver(db database.DB, gsClient gitserver.Client, repo *RepositoryResolver, id api.CommitID, commit *gitdomain.Commit) *GitCommitResolver {
repoName := repo.RepoName()
return &GitCommitResolver{
logger: log.Scoped("gitCommitResolver", "resolve a specific commit").With(
logger: log.Scoped("gitCommitResolver").With(
log.String("repo", string(repoName)),
log.String("commitID", string(id)),
),

View File

@ -629,7 +629,7 @@ func NewSchema(
}
}
logger := log.Scoped("GraphQL", "general GraphQL logging")
logger := log.Scoped("GraphQL")
opts := []graphql.SchemaOpt{
graphql.Tracer(&requestTracer{
DB: db,
@ -694,7 +694,7 @@ type OptionalResolver struct {
// defaults. It does not implement any sub-resolvers.
func newSchemaResolver(db database.DB, gitserverClient gitserver.Client) *schemaResolver {
r := &schemaResolver{
logger: log.Scoped("schemaResolver", "GraphQL schema resolver"),
logger: log.Scoped("schemaResolver"),
db: db,
gitserverClient: gitserverClient,
repoupdaterClient: repoupdater.DefaultClient,

View File

@ -46,7 +46,7 @@ func newPerforceChangelistResolver(r *RepositoryResolver, changelistID, commitSH
canonicalURL := filepath.Join(repoURL.Path, "-", "changelist", changelistID)
return &PerforceChangelistResolver{
logger: r.logger.Scoped("PerforceChangelistResolver", "resolve a specific changelist"),
logger: r.logger.Scoped("PerforceChangelistResolver"),
repositoryResolver: r,
cid: changelistID,
commitSHA: commitSHA,

View File

@ -112,7 +112,7 @@ func (r *schemaResolver) Repositories(ctx context.Context, args *repositoryArgs)
connectionStore := &repositoriesConnectionStore{
ctx: ctx,
db: r.db,
logger: r.logger.Scoped("repositoryConnectionResolver", "resolves connections to a repository"),
logger: r.logger.Scoped("repositoryConnectionResolver"),
opt: opt,
}

View File

@ -75,7 +75,7 @@ func NewRepositoryResolver(db database.DB, client gitserver.Client, repo *types.
Name: name,
ID: id,
},
logger: log.Scoped("repositoryResolver", "resolve a specific repository").
logger: log.Scoped("repositoryResolver").
With(log.Object("repo",
log.String("name", string(name)),
log.Int32("id", int32(id)))),

View File

@ -43,7 +43,7 @@ func NewBatchSearchImplementer(ctx context.Context, logger log.Logger, db databa
}
return &searchResolver{
logger: logger.Scoped("BatchSearchSearchImplementer", "provides search results and suggestions"),
logger: logger.Scoped("BatchSearchSearchImplementer"),
client: cli,
db: db,
SearchInputs: inputs,

View File

@ -443,7 +443,7 @@ func (r *searchResolver) Stats(ctx context.Context) (stats *searchResultsStats,
if err := json.Unmarshal(jsonRes, &stats); err != nil {
return nil, err
}
stats.logger = r.logger.Scoped("searchResultsStats", "provides status on search results")
stats.logger = r.logger.Scoped("searchResultsStats")
stats.sr = r
return stats, nil
}
@ -504,7 +504,7 @@ func (r *searchResolver) Stats(ctx context.Context) (stats *searchResultsStats,
return nil, err // sparkline generation failed, so don't cache.
}
stats = &searchResultsStats{
logger: r.logger.Scoped("searchResultsStats", "provides status on search results"),
logger: r.logger.Scoped("searchResultsStats"),
JApproximateResultCount: v.ApproximateResultCount(),
JSparkline: sparkline,
sr: r,

View File

@ -27,7 +27,7 @@ func (srs *searchResultsStats) Languages(ctx context.Context) ([]*languageStatis
return nil, err
}
logger := srs.logger.Scoped("languages", "provide stats on langauges from the search results")
logger := srs.logger.Scoped("languages")
langs, err := searchResultsStatsLanguages(ctx, logger, srs.sr.db, gitserver.NewClient(), matches)
if err != nil {
return nil, err

View File

@ -18,7 +18,7 @@ func (r *schemaResolver) SendTestEmail(ctx context.Context, args struct{ To stri
return "", err
}
logger := r.logger.Scoped("SendTestEmail", "email send test")
logger := r.logger.Scoped("SendTestEmail")
// Generate a simple identifier to make each email unique (don't need the full ID)
var testID string

View File

@ -30,7 +30,7 @@ func (r *settingsCascade) Subjects(ctx context.Context) ([]*settingsSubjectResol
return nil, err
}
return resolversForSubjects(ctx, log.Scoped("settings", "subjects"), r.db, subjects)
return resolversForSubjects(ctx, log.Scoped("settings"), r.db, subjects)
}
func (r *settingsCascade) Final(ctx context.Context) (string, error) {
@ -62,7 +62,7 @@ func (r *schemaResolver) ViewerSettings(ctx context.Context) (*settingsCascade,
return nil, err
}
if user == nil {
return &settingsCascade{db: r.db, subject: &settingsSubjectResolver{site: NewSiteResolver(log.Scoped("settings", "ViewerSettings"), r.db)}}, nil
return &settingsCascade{db: r.db, subject: &settingsSubjectResolver{site: NewSiteResolver(log.Scoped("settings"), r.db)}}, nil
}
return &settingsCascade{db: r.db, subject: &settingsSubjectResolver{user: user}}, nil
}

View File

@ -377,7 +377,7 @@ func (r *siteResolver) UpgradeReadiness(ctx context.Context) (*upgradeReadinessR
}
return &upgradeReadinessResolver{
logger: r.logger.Scoped("upgradeReadiness", ""),
logger: r.logger.Scoped("upgradeReadiness"),
db: r.db,
}, nil
}

View File

@ -102,7 +102,7 @@ func (r *schemaResolver) DeleteUsers(ctx context.Context, args *struct {
ids[index] = id
}
logger := r.logger.Scoped("DeleteUsers", "delete users mutation").
logger := r.logger.Scoped("DeleteUsers").
With(log.Int32s("users", ids))
// Collect username, verified email addresses, and external accounts to be used

View File

@ -95,7 +95,7 @@ func (r *statusMessageResolver) ExternalService(ctx context.Context) (*externalS
return nil, err
}
return &externalServiceResolver{logger: log.Scoped("externalServiceResolver", ""), db: r.db, externalService: externalService}, nil
return &externalServiceResolver{logger: log.Scoped("externalServiceResolver"), db: r.db, externalService: externalService}, nil
}
type indexingProgressMessageResolver struct {

View File

@ -76,7 +76,7 @@ func NewUserResolver(ctx context.Context, db database.DB, user *types.User) *Use
return &UserResolver{
db: db,
user: user,
logger: log.Scoped("userResolver", "resolves a specific user").With(log.String("user", user.Username)),
logger: log.Scoped("userResolver").With(log.String("user", user.Username)),
actor: actor.FromContext(ctx),
}
}
@ -86,7 +86,7 @@ func newUserResolverFromActor(a *actor.Actor, db database.DB, user *types.User)
return &UserResolver{
db: db,
user: user,
logger: log.Scoped("userResolver", "resolves a specific user").With(log.String("user", user.Username)),
logger: log.Scoped("userResolver").With(log.String("user", user.Username)),
actor: a,
}
}
@ -399,7 +399,7 @@ func (r *schemaResolver) UpdatePassword(ctx context.Context, args *struct {
return nil, err
}
logger := r.logger.Scoped("UpdatePassword", "password update").
logger := r.logger.Scoped("UpdatePassword").
With(log.Int32("userID", user.ID))
if conf.CanSendEmail() {
@ -427,7 +427,7 @@ func (r *schemaResolver) CreatePassword(ctx context.Context, args *struct {
return nil, err
}
logger := r.logger.Scoped("CreatePassword", "password creation").
logger := r.logger.Scoped("CreatePassword").
With(log.Int32("userID", user.ID))
if conf.CanSendEmail() {

View File

@ -198,7 +198,7 @@ func (r *schemaResolver) AddUserEmail(ctx context.Context, args *addUserEmailArg
return nil, err
}
logger := r.logger.Scoped("AddUserEmail", "adding email to user").
logger := r.logger.Scoped("AddUserEmail").
With(log.Int32("userID", userID))
userEmails := backend.NewUserEmailsService(r.db, logger)

View File

@ -46,7 +46,7 @@ func (r *schemaResolver) CreateUser(ctx context.Context, args *struct {
needsEmailVerification = false
}
logger := r.logger.Scoped("createUser", "create user handler").With(
logger := r.logger.Scoped("createUser").With(
log.Bool("needsEmailVerification", needsEmailVerification))
var emailVerificationCode string

View File

@ -81,7 +81,7 @@ func (r *schemaResolver) RandomizeUserPassword(ctx context.Context, args *struct
return nil, errors.Wrap(err, "cannot parse user ID")
}
logger := r.logger.Scoped("randomizeUserPassword", "endpoint for resetting user passwords").
logger := r.logger.Scoped("randomizeUserPassword").
With(log.Int32("userID", userID))
logger.Info("resetting user password")

View File

@ -143,7 +143,7 @@ func NewWebhookLogConnectionResolver(
}
return &WebhookLogConnectionResolver{
logger: log.Scoped("webhookLogConnectionResolver", ""),
logger: log.Scoped("webhookLogConnectionResolver"),
args: args,
externalServiceID: externalServiceID,
store: db.WebhookLogs(keyring.Default().WebhookLogKey),

View File

@ -36,7 +36,7 @@ func NewHandler(db database.DB, logger log.Logger, githubAppSetupHandler http.Ha
return globals.ExternalURL().Scheme == "https"
}))
logger = logger.Scoped("appHandler", "handles routes for all app related requests")
logger = logger.Scoped("appHandler")
r := router.Router()

View File

@ -145,7 +145,7 @@ func addGrafana(r *mux.Router, db database.DB) {
// The route only forwards known project ids, so a DSN must be defined in siteconfig.Log.Sentry.Dsn
// to allow events to be forwarded. Sentry responses are ignored.
func addSentry(r *mux.Router) {
logger := sglog.Scoped("sentryTunnel", "A Sentry.io specific HTTP route that allows to forward client-side reports, https://docs.sentry.io/platforms/javascript/troubleshooting/#dealing-with-ad-blockers")
logger := sglog.Scoped("sentryTunnel")
// Helper to fetch Sentry configuration from siteConfig.
getConfig := func() (string, string, error) {
@ -285,7 +285,7 @@ func addOpenTelemetryProtocolAdapter(r *mux.Router) {
ctx = context.Background()
endpoint = otlpenv.GetEndpoint()
protocol = otlpenv.GetProtocol()
logger = sglog.Scoped("otlpAdapter", "OpenTelemetry protocol adapter and forwarder").
logger = sglog.Scoped("otlpAdapter").
With(sglog.String("endpoint", endpoint), sglog.String("protocol", string(protocol)))
)

View File

@ -210,7 +210,7 @@ func parseEditorRequest(db database.DB, q url.Values) (*editorRequest, error) {
v := &editorRequest{
db: db,
logger: log.Scoped("editor", "requests from editors."),
logger: log.Scoped("editor"),
}
if search := q.Get("search"); search != "" {

View File

@ -305,7 +305,7 @@ func NewJSContextFromRequest(req *http.Request, db database.DB) JSContext {
}
}
siteResolver := graphqlbackend.NewSiteResolver(logger.Scoped("jscontext", "constructing jscontext"), db)
siteResolver := graphqlbackend.NewSiteResolver(logger.Scoped("jscontext"), db)
needsRepositoryConfiguration, err := siteResolver.NeedsRepositoryConfiguration(ctx)
if err != nil {
needsRepositoryConfiguration = false

View File

@ -52,7 +52,7 @@ type adaptedSignal struct {
// Register attaches a route to the router that adapts requests on the `/otlp` path.
func (sig *adaptedSignal) Register(ctx context.Context, logger log.Logger, r *mux.Router, receiverURL *url.URL) {
adapterLogger := logger.Scoped(path.Base(sig.PathPrefix), "OpenTelemetry signal-specific tunnel")
adapterLogger := logger.Scoped(path.Base(sig.PathPrefix))
// Set up an http/json -> ${configured_protocol} adapter
adapter, err := sig.CreateAdapter()

View File

@ -316,7 +316,7 @@ func (r localExternalServiceResolver) Repositories(ctx context.Context) ([]graph
})
}
case *schema.LocalGitExternalService:
src, err := repos.NewLocalGitSource(ctx, log.Scoped("localExternalServiceResolver.Repositories", ""), r.service)
src, err := repos.NewLocalGitSource(ctx, log.Scoped("localExternalServiceResolver.Repositories"), r.service)
if err != nil {
return nil, err
}

View File

@ -36,7 +36,7 @@ func RegisterSSOSignOutHandler(f func(w http.ResponseWriter, r *http.Request)) {
}
func serveSignOutHandler(logger log.Logger, db database.DB) http.HandlerFunc {
logger = logger.Scoped("signOut", "signout handler")
logger = logger.Scoped("signOut")
recorder := telemetryrecorder.NewBestEffort(logger, db)
return func(w http.ResponseWriter, r *http.Request) {

View File

@ -138,7 +138,7 @@ var mockNewCommon func(w http.ResponseWriter, r *http.Request, title string, ser
// In the case of a repository that is cloning, a Common data structure is
// returned but it has a nil Repo.
func newCommon(w http.ResponseWriter, r *http.Request, db database.DB, title string, indexed bool, serveError serveErrorHandler) (*Common, error) {
logger := log.Scoped("commonHandler", "")
logger := log.Scoped("commonHandler")
if mockNewCommon != nil {
return mockNewCommon(w, r, title, serveError)
}

View File

@ -24,7 +24,7 @@ func serveHelp(w http.ResponseWriter, r *http.Request) {
page := strings.TrimPrefix(r.URL.Path, "/help")
versionStr := version.Version()
logger := sglog.Scoped("serveHelp", "")
logger := sglog.Scoped("serveHelp")
logger.Info("redirecting to docs", sglog.String("page", page), sglog.String("versionStr", versionStr))
// For Cody App, help links are handled in the frontend. We should never get here.

View File

@ -23,7 +23,7 @@ var goSymbolReg = lazyregexp.New("/info/GoPackage/(.+)$")
// serveRepoLanding simply redirects the old (sourcegraph.com/<repo>/-/info) repo landing page
// URLs directly to the repo itself (sourcegraph.com/<repo>).
func serveRepoLanding(db database.DB) func(http.ResponseWriter, *http.Request) error {
logger := log.Scoped("serveRepoLanding", "redirects the old (sourcegraph.com/<repo>/-/info) repo landing page")
logger := log.Scoped("serveRepoLanding")
return func(w http.ResponseWriter, r *http.Request) error {
legacyRepoLandingCounter.Inc()

View File

@ -245,7 +245,7 @@ func brandNameSubtitle(titles ...string) string {
}
func initRouter(db database.DB, router *mux.Router) {
logger := log.Scoped("router", "")
logger := log.Scoped("router")
uirouter.Router = router // make accessible to other packages
@ -505,7 +505,7 @@ func serveErrorNoDebug(w http.ResponseWriter, r *http.Request, db database.DB, e
w.WriteHeader(statusCode)
errorID := randstring.NewLen(6)
logger := log.Scoped("ui", "logger for serveErrorNoDebug")
logger := log.Scoped("ui")
// Determine trace URL and log the error.
var traceURL string

View File

@ -18,7 +18,7 @@ import (
func serveVerifyEmail(db database.DB) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
logger := log.Scoped("verify-email", "")
logger := log.Scoped("verify-email")
email := r.URL.Query().Get("email")
verifyCode := r.URL.Query().Get("code")
actr := actor.FromContext(ctx)

View File

@ -31,7 +31,7 @@ const (
func Init(logger log.Logger, db database.DB) {
const pkgName = "azureoauth"
logger = logger.Scoped(pkgName, "Azure DevOps OAuth config watch")
logger = logger.Scoped(pkgName)
conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems {
_, problems := parseConfig(logger, cfg, db)
return problems

View File

@ -17,7 +17,7 @@ import (
func Init(logger log.Logger, db database.DB) {
const pkgName = "bitbucketcloudoauth"
logger = logger.Scoped(pkgName, "Bitbucket Cloud OAuth config watch")
logger = logger.Scoped(pkgName)
conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems {
_, problems := parseConfig(logger, cfg, db)
return problems

View File

@ -20,6 +20,6 @@ func Init(
_ conftypes.UnifiedWatchable,
enterpriseServices *enterprise.Services,
) error {
enterpriseServices.GitHubAppsResolver = NewResolver(log.Scoped("GitHubAppsResolver", ""), db)
enterpriseServices.GitHubAppsResolver = NewResolver(log.Scoped("GitHubAppsResolver"), db)
return nil
}

View File

@ -17,7 +17,7 @@ import (
func Init(logger log.Logger, db database.DB) {
const pkgName = "githuboauth"
logger = logger.Scoped(pkgName, "GitHub OAuth config watch")
logger = logger.Scoped(pkgName)
conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems {
_, problems := parseConfig(logger, cfg, db)
return problems

View File

@ -194,7 +194,7 @@ func derefInt64(i *int64) int64 {
func (s *sessionIssuerHelper) newClient(token string) *githubsvc.V3Client {
apiURL, _ := githubsvc.APIRoot(s.BaseURL)
return githubsvc.NewV3Client(log.Scoped("session.github.v3", "github v3 client for session issuer"),
return githubsvc.NewV3Client(log.Scoped("session.github.v3"),
extsvc.URNGitHubOAuth, apiURL, &esauth.OAuthBearerToken{Token: token}, nil)
}

View File

@ -17,7 +17,7 @@ import (
func Init(logger log.Logger, db database.DB) {
const pkgName = "gitlaboauth"
logger = log.Scoped(pkgName, "GitLab OAuth config watch")
logger = log.Scoped(pkgName)
conf.ContributeValidator(func(cfg conftypes.SiteConfigQuerier) conf.Problems {
_, problems := parseConfig(logger, cfg, db)

View File

@ -69,7 +69,7 @@ func CallbackHandler(config *oauth2.Config, success, failure http.Handler) http.
}
func gitlabHandler(config *oauth2.Config, success, failure http.Handler) http.Handler {
logger := log.Scoped("GitlabOAuthHandler", "Gitlab OAuth Handler")
logger := log.Scoped("GitlabOAuthHandler")
if failure == nil {
failure = gologin.DefaultFailureHandler

View File

@ -30,7 +30,7 @@ const pkgName = "httpheader"
func Init() {
conf.ContributeValidator(validateConfig)
logger := log.Scoped(pkgName, "HTTP header authentication config watch")
logger := log.Scoped(pkgName)
go func() {
conf.Watch(func() {
newPC, _ := getProviderConfig()

View File

@ -33,7 +33,7 @@ import (
// Init must be called by the frontend to initialize the auth middlewares.
func Init(logger log.Logger, db database.DB) {
logger = logger.Scoped("auth", "provides enterprise authentication middleware")
logger = logger.Scoped("auth")
azureoauth.Init(logger, db)
bitbucketcloudoauth.Init(logger, db)
gerrit.Init()
@ -118,7 +118,7 @@ func Init(logger log.Logger, db database.DB) {
}
func ssoSignOutHandler(w http.ResponseWriter, r *http.Request) {
logger := log.Scoped("ssoSignOutHandler", "Signing out from SSO providers")
logger := log.Scoped("ssoSignOutHandler")
for _, p := range conf.Get().AuthProviders {
var err error
switch {

View File

@ -116,7 +116,7 @@ func withOAuthExternalClient(r *http.Request) *http.Request {
if traceLogEnabled {
loggingClient := *client
loggingClient.Transport = &loggingRoundTripper{
log: log.Scoped("oauth_external.transport", "transport logger for withOAuthExternalClient"),
log: log.Scoped("oauth_external.transport"),
underlying: client.Transport,
}
client = &loggingClient

View File

@ -66,7 +66,7 @@ func Init() {
conf.ContributeValidator(validateConfig)
const pkgName = "openidconnect"
logger := log.Scoped(pkgName, "OpenID Connect config watch")
logger := log.Scoped(pkgName)
go func() {
conf.Watch(func() {
ps := getProviders()

View File

@ -68,7 +68,7 @@ func Init() {
conf.ContributeValidator(validateConfig)
const pkgName = "saml"
logger := log.Scoped(pkgName, "SAML config watch")
logger := log.Scoped(pkgName)
go func() {
conf.Watch(func() {
ps := getProviders()

View File

@ -40,7 +40,7 @@ func Init() {
conf.ContributeValidator(validateConfig)
p := NewProvider(*cloudSiteConfig.AuthProviders.SourcegraphOperator)
logger := log.Scoped(auth.SourcegraphOperatorProviderType, "Sourcegraph Operator config watch")
logger := log.Scoped(auth.SourcegraphOperatorProviderType)
go func() {
if err := p.Refresh(context.Background()); err != nil {
logger.Error("failed to fetch Sourcegraph Operator service provider metadata", log.Error(err))

View File

@ -59,7 +59,7 @@ const (
)
func authHandler(db database.DB) func(w http.ResponseWriter, r *http.Request) {
logger := log.Scoped(internalauth.SourcegraphOperatorProviderType+".authHandler", "Sourcegraph Operator authentication handler")
logger := log.Scoped(internalauth.SourcegraphOperatorProviderType + ".authHandler")
return func(w http.ResponseWriter, r *http.Request) {
switch strings.TrimPrefix(r.URL.Path, authPrefix) {
case "/login": // Endpoint that starts the Authentication Request Code Flow.

View File

@ -67,7 +67,7 @@ func Init(
return problems
})
enterpriseServices.PermissionsGitHubWebhook = webhooks.NewGitHubWebhook(log.Scoped("PermissionsGitHubWebhook", "permissions sync webhook handler for GitHub webhooks"))
enterpriseServices.PermissionsGitHubWebhook = webhooks.NewGitHubWebhook(log.Scoped("PermissionsGitHubWebhook"))
authz.DefaultSubRepoPermsChecker = srp.NewSubRepoPermsClient(db.SubRepoPerms())

View File

@ -54,7 +54,7 @@ func (r *Resolver) checkLicense(feature licensing.Feature) error {
func NewResolver(observationCtx *observation.Context, db database.DB) graphqlbackend.AuthzResolver {
return &Resolver{
logger: observationCtx.Logger.Scoped("authz.Resolver", ""),
logger: observationCtx.Logger.Scoped("authz.Resolver"),
db: db,
}
}

View File

@ -42,7 +42,7 @@ type BatchesStore interface {
// NewFileHandler creates a new FileHandler.
func NewFileHandler(db database.DB, store BatchesStore, operations *Operations) *FileHandler {
return &FileHandler{
logger: sglog.Scoped("FileHandler", "Batch Changes mounted file REST API handler"),
logger: sglog.Scoped("FileHandler"),
db: db,
store: store,
operations: operations,

View File

@ -45,7 +45,7 @@ func Init(
// Register enterprise services.
gitserverClient := gitserver.NewClient()
logger := sglog.Scoped("Batches", "batch changes webhooks")
logger := sglog.Scoped("Batches")
enterpriseServices.BatchChangesResolver = resolvers.New(db, bstore, gitserverClient, logger)
enterpriseServices.BatchesGitHubWebhook = webhooks.NewGitHubWebhook(bstore, gitserverClient, logger)
enterpriseServices.BatchesBitbucketServerWebhook = webhooks.NewBitbucketServerWebhook(bstore, gitserverClient, logger)

View File

@ -11,7 +11,7 @@ import (
)
func DeleteOldEventLogsInPostgres(ctx context.Context, logger log.Logger, db database.DB) {
logger = logger.Scoped("deleteOldEventLogs", "background job to prune old event logs in database")
logger = logger.Scoped("deleteOldEventLogs")
for {
// We choose 93 days as the interval to ensure that we have at least the last three months
@ -28,7 +28,7 @@ func DeleteOldEventLogsInPostgres(ctx context.Context, logger log.Logger, db dat
}
func DeleteOldSecurityEventLogsInPostgres(ctx context.Context, logger log.Logger, db database.DB) {
logger = logger.Scoped("deleteOldSecurityEventLogs", "background job to prune old security event logs in database")
logger = logger.Scoped("deleteOldSecurityEventLogs")
for {
time.Sleep(time.Hour)

View File

@ -19,7 +19,7 @@ import (
// UpdatePermissions is called as part of the background process by the `frontend` service.
func UpdatePermissions(ctx context.Context, logger log.Logger, db database.DB) {
scopedLog := logger.Scoped("permission_update", "Updates the permission in the database based on the rbac schema configuration.")
scopedLog := logger.Scoped("permission_update")
err := db.WithTransact(ctx, func(tx database.DB) error {
permissionStore := tx.Permissions()
rolePermissionStore := tx.RolePermissions()

View File

@ -39,7 +39,7 @@ import (
)
func printConfigValidation(logger log.Logger) {
logger = logger.Scoped("configValidation", "")
logger = logger.Scoped("configValidation")
messages, err := conf.Validate(conf.Raw())
if err != nil {
logger.Warn("unable to validate Sourcegraph site configuration", log.Error(err))
@ -110,7 +110,7 @@ func readSiteConfigFile(paths []string) ([]byte, error) {
}
func overrideSiteConfig(ctx context.Context, logger log.Logger, db database.DB) error {
logger = logger.Scoped("overrideSiteConfig", "")
logger = logger.Scoped("overrideSiteConfig")
paths := filepath.SplitList(os.Getenv("SITE_CONFIG_FILE"))
if len(paths) == 0 {
return nil
@ -163,7 +163,7 @@ func overrideSiteConfig(ctx context.Context, logger log.Logger, db database.DB)
}
func overrideGlobalSettings(ctx context.Context, logger log.Logger, db database.DB) error {
logger = logger.Scoped("overrideGlobalSettings", "")
logger = logger.Scoped("overrideGlobalSettings")
path := os.Getenv("GLOBAL_SETTINGS_FILE")
if path == "" {
return nil
@ -208,7 +208,7 @@ func overrideGlobalSettings(ctx context.Context, logger log.Logger, db database.
}
func overrideExtSvcConfig(ctx context.Context, logger log.Logger, db database.DB) error {
logger = logger.Scoped("overrideExtSvcConfig", "")
logger = logger.Scoped("overrideExtSvcConfig")
path := os.Getenv("EXTSVC_CONFIG_FILE")
if path == "" {
return nil
@ -385,7 +385,7 @@ func overrideExtSvcConfig(ctx context.Context, logger log.Logger, db database.DB
}
func watchUpdate(ctx context.Context, logger log.Logger, update func(context.Context) (bool, error), paths ...string) {
logger = logger.Scoped("watch", "").With(log.Strings("files", paths))
logger = logger.Scoped("watch").With(log.Strings("files", paths))
events, err := watchPaths(ctx, paths...)
if err != nil {
logger.Error("failed to watch config override files", log.Error(err))
@ -463,7 +463,7 @@ func watchPaths(ctx context.Context, paths ...string) (<-chan error, error) {
func newConfigurationSource(logger log.Logger, db database.DB) *configurationSource {
return &configurationSource{
logger: logger.Scoped("configurationSource", ""),
logger: logger.Scoped("configurationSource"),
db: db,
}
}

View File

@ -47,7 +47,7 @@ func newExternalHTTPHandler(
newExecutorProxyHandler enterprise.NewExecutorProxyHandler,
newGitHubAppSetupHandler enterprise.NewGitHubAppSetupHandler,
) (http.Handler, error) {
logger := log.Scoped("external", "external http handlers")
logger := log.Scoped("external")
// Each auth middleware determines on a per-request basis whether it should be enabled (if not, it
// immediately delegates the request to the next middleware in the chain).
@ -148,7 +148,7 @@ func newInternalHTTPHandler(
rateLimitWatcher graphqlbackend.LimitWatcher,
) http.Handler {
internalMux := http.NewServeMux()
logger := log.Scoped("internal", "internal http handlers")
logger := log.Scoped("internal")
internalRouter := router.NewInternal(mux.NewRouter().PathPrefix("/.internal/").Subrouter())
internalhttpapi.RegisterInternalServices(

View File

@ -395,7 +395,7 @@ func makeRateLimitWatcher() (*graphqlbackend.BasicLimitWatcher, error) {
return nil, err
}
return graphqlbackend.NewBasicLimitWatcher(sglog.Scoped("BasicLimitWatcher", "basic rate-limiter"), store), nil
return graphqlbackend.NewBasicLimitWatcher(sglog.Scoped("BasicLimitWatcher"), store), nil
}
// redispoolRegisterDB registers our postgres backed redis. These package

View File

@ -138,5 +138,5 @@ func Init(
}
func scopedContext(name string) *observation.Context {
return observation.NewContext(log.Scoped(name+".transport.graphql", "codeintel "+name+" graphql transport"))
return observation.NewContext(log.Scoped(name + ".transport.graphql"))
}

View File

@ -21,6 +21,6 @@ func Init(
_ conftypes.UnifiedWatchable,
enterpriseServices *enterprise.Services,
) error {
enterpriseServices.CodeMonitorsResolver = resolvers.NewResolver(log.Scoped("codeMonitorResolver", ""), db)
enterpriseServices.CodeMonitorsResolver = resolvers.NewResolver(log.Scoped("codeMonitorResolver"), db)
return nil
}

View File

@ -24,7 +24,7 @@ func Init(
_ conftypes.UnifiedWatchable,
enterpriseServices *enterprise.Services,
) error {
logger := log.Scoped("completions", "Cody completions")
logger := log.Scoped("completions")
enterpriseServices.NewChatCompletionsStreamHandler = func() http.Handler {
completionsHandler := httpapi.NewChatCompletionsStreamHandler(logger, db)

View File

@ -23,7 +23,7 @@ func Init(
_ conftypes.UnifiedWatchable,
enterpriseServices *enterprise.Services,
) error {
logger := log.Scoped("compute", "")
logger := log.Scoped("compute")
enterpriseServices.ComputeResolver = resolvers.NewResolver(logger, db)
enterpriseServices.NewComputeStreamHandler = func() http.Handler {
return streaming.NewComputeStreamHandler(logger, db)

View File

@ -22,7 +22,7 @@ func Init(
_ conftypes.UnifiedWatchable,
enterpriseServices *enterprise.Services,
) error {
logger := logger.Scoped("contentlibrary", "sourcegraph content library")
logger := logger.Scoped("contentlibrary")
enterpriseServices.ContentLibraryResolver = graphqlbackend.NewContentLibraryResolver(db, logger)
return nil
}

View File

@ -51,11 +51,11 @@ func Init(
if envvar.SourcegraphDotComMode() {
enterpriseServices.DotcomRootResolver = dotcomRootResolver{
ProductSubscriptionLicensingResolver: productsubscription.ProductSubscriptionLicensingResolver{
Logger: observationCtx.Logger.Scoped("productsubscriptions", "resolvers for dotcom product subscriptions"),
Logger: observationCtx.Logger.Scoped("productsubscriptions"),
DB: db,
},
CodyGatewayDotcomUserResolver: productsubscription.CodyGatewayDotcomUserResolver{
Logger: observationCtx.Logger.Scoped("codygatewayuser", "resolvers for dotcom cody gateway users"),
Logger: observationCtx.Logger.Scoped("codygatewayuser"),
DB: db,
},
}

View File

@ -39,7 +39,7 @@ func StartCheckForAnomalousLicenseUsage(logger log.Logger, db database.DB) {
client := slack.New(dotcom.SlackLicenseAnomallyWebhook)
t := time.NewTicker(1 * time.Hour)
logger = logger.Scoped("StartCheckForAnomalousLicenseUsage", "starts the checks for anomalous license usage")
logger = logger.Scoped("StartCheckForAnomalousLicenseUsage")
for range t.C {
maybeCheckAnomalies(logger, db, client, glock.NewRealClock(), redispool.Store)

View File

@ -35,7 +35,7 @@ var (
)
func logEvent(ctx context.Context, db database.DB, name string, siteID string) {
logger := log.Scoped("LicenseCheckHandler logEvent", "Event logging for LicenseCheckHandler")
logger := log.Scoped("LicenseCheckHandler logEvent")
eArg, err := json.Marshal(struct {
SiteID string `json:"site_id,omitempty"`
}{
@ -88,7 +88,7 @@ func sendSlackMessage(logger log.Logger, license *dbLicense, siteID string) {
}
func NewLicenseCheckHandler(db database.DB) http.Handler {
baseLogger := log.Scoped("LicenseCheckHandler", "Handles license validity checks")
baseLogger := log.Scoped("LicenseCheckHandler")
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()

View File

@ -34,7 +34,7 @@ func StartCheckForUpcomingLicenseExpirations(logger log.Logger, db database.DB)
client := slack.New(dotcom.SlackLicenseExpirationWebhook)
t := time.NewTicker(1 * time.Hour)
logger = logger.Scoped("StartCheckForUpcomingLicenseExpirations", "starts the various checks for upcoming license expiry")
logger = logger.Scoped("StartCheckForUpcomingLicenseExpirations")
for range t.C {
checkLicensesIfNeeded(logger, db, client)
}

View File

@ -83,7 +83,6 @@ func NewHandler[T workerutil.Record](
metricsStore: metricsStore,
logger: log.Scoped(
fmt.Sprintf("executor-queue-handler-%s", queueHandler.Name),
fmt.Sprintf("The route handler for all executor %s dbworker API tunnel endpoints", queueHandler.Name),
),
queueHandler: queueHandler,
}
@ -140,7 +139,7 @@ func (h *handler[T]) dequeue(ctx context.Context, queueName string, metadata exe
return executortypes.Job{}, false, nil
}
logger := log.Scoped("dequeue", "Select a job record from the database.")
logger := log.Scoped("dequeue")
job, err := h.queueHandler.RecordTransformer(ctx, metadata.version, record, metadata.resources)
if err != nil {
if _, err := h.queueHandler.Store.MarkFailed(ctx, record.RecordID(), fmt.Sprintf("failed to transform record: %s", err), store.MarkFinalOptions{}); err != nil {
@ -384,7 +383,7 @@ func (h *handler[T]) heartbeat(ctx context.Context, executor types.Executor, ids
return nil, nil, err
}
logger := log.Scoped("heartbeat", "Write this heartbeat to the database")
logger := log.Scoped("heartbeat")
// Write this heartbeat to the database so that we can populate the UI with recent executor activity.
if err := h.executorStore.UpsertHeartbeat(ctx, executor); err != nil {

View File

@ -62,7 +62,7 @@ func NewMultiHandler(
BatchesQueueHandler: batchesQueueHandler,
DequeueCache: dequeueCache,
dequeueCacheConfig: dequeueCacheConfig,
logger: log.Scoped("executor-multi-queue-handler", "The route handler for all executor queues"),
logger: log.Scoped("executor-multi-queue-handler"),
}
return multiHandler
}
@ -143,7 +143,7 @@ func (m *MultiHandler) dequeue(ctx context.Context, req executortypes.DequeueReq
DiskSpace: req.DiskSpace,
}
logger := m.logger.Scoped("dequeue", "Pick a job record from the database.")
logger := m.logger.Scoped("dequeue")
var job executortypes.Job
switch selectedQueue {
case m.BatchesQueueHandler.Name:
@ -195,7 +195,7 @@ func (m *MultiHandler) dequeue(ctx context.Context, req executortypes.DequeueReq
job.Version = 2
}
logger = m.logger.Scoped("token", "Create or regenerate a job token.")
logger = m.logger.Scoped("token")
token, err := m.jobTokenStore.Create(ctx, job.ID, job.Queue, job.RepositoryName)
if err != nil {
if errors.Is(err, executorstore.ErrJobTokenAlreadyCreated) {
@ -361,7 +361,7 @@ func (m *MultiHandler) heartbeat(ctx context.Context, executor types.Executor, i
)
}
logger := log.Scoped("multiqueue.heartbeat", "Write the heartbeat of multiple queues to the database")
logger := log.Scoped("multiqueue.heartbeat")
// Write this heartbeat to the database so that we can populate the UI with recent executor activity.
if err = m.executorStore.UpsertHeartbeat(ctx, executor); err != nil {

View File

@ -30,7 +30,7 @@ func Init(
return conf.SiteConfig().ExecutorsAccessToken
}
logger := log.Scoped("executorqueue", "")
logger := log.Scoped("executorqueue")
queueHandler := newExecutorQueuesHandler(
observationCtx,

View File

@ -14,7 +14,7 @@ import (
)
func QueueHandler(observationCtx *observation.Context, db database.DB, _ func() string) handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob] {
logger := log.Scoped("executor-queue.batches", "The executor queue handlers for the batches queue")
logger := log.Scoped("executor-queue.batches")
recordTransformer := func(ctx context.Context, version string, record *btypes.BatchSpecWorkspaceExecutionJob, _ handler.ResourceMetadata) (apiclient.Job, error) {
batchesStore := bstore.New(db, observationCtx, nil)
return transformRecord(ctx, logger, batchesStore, record, version)

View File

@ -29,7 +29,7 @@ const authAuditEntity = "httpapi/auth"
// AccessTokenAuthMiddleware authenticates the user based on the
// token query parameter or the "Authorization" header.
func AccessTokenAuthMiddleware(db database.DB, baseLogger log.Logger, next http.Handler) http.Handler {
baseLogger = baseLogger.Scoped("accessTokenAuth", "Access token authentication middleware")
baseLogger = baseLogger.Scoped("accessTokenAuth")
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// SCIM uses an auth token which is checked separately in the SCIM package.
if strings.HasPrefix(r.URL.Path, "/.api/scim/v2") {

View File

@ -99,7 +99,7 @@ func NewHandler(
rateLimiter graphqlbackend.LimitWatcher,
handlers *Handlers,
) (http.Handler, error) {
logger := sglog.Scoped("Handler", "frontend HTTP API handler")
logger := sglog.Scoped("Handler")
if m == nil {
m = apirouter.New(nil)
@ -123,7 +123,7 @@ func NewHandler(
)
wh := webhooks.Router{
Logger: logger.Scoped("webhooks.Router", "handling webhook requests and dispatching them to handlers"),
Logger: logger.Scoped("webhooks.Router"),
DB: db,
}
webhookhandlers.Init(&wh)
@ -222,7 +222,7 @@ func RegisterInternalServices(
newComputeStreamHandler enterprise.NewComputeStreamHandler,
rateLimitWatcher graphqlbackend.LimitWatcher,
) {
logger := sglog.Scoped("InternalHandler", "frontend internal HTTP API handler")
logger := sglog.Scoped("InternalHandler")
m.StrictSlash(true)
handler := JsonMiddleware(&ErrorHandler{
@ -235,7 +235,7 @@ func RegisterInternalServices(
gsClient := gitserver.NewClient()
indexer := &searchIndexerServer{
db: db,
logger: logger.Scoped("searchIndexerServer", "zoekt-indexserver endpoints"),
logger: logger.Scoped("searchIndexerServer"),
gitserverClient: gsClient,
ListIndexable: backend.NewRepos(logger, db, gsClient).ListIndexable,
RepoStore: db.Repos(),

View File

@ -38,7 +38,7 @@ type releaseCache struct {
func newReleaseCache(logger log.Logger, client *github.V4Client, owner, name string) ReleaseCache {
return &releaseCache{
client: client,
logger: logger.Scoped("ReleaseCache", "release cache"),
logger: logger.Scoped("ReleaseCache"),
branches: map[string]string{},
owner: owner,
name: name,

Some files were not shown because too many files have changed in this diff Show More