mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 15:12:02 +00:00
observation: integrate Logger into all levels of Observation (#34456)
Creating and tagging operations now create loggers embedded into the various Observation types that can be used directly for ✨ structured logs ✨. See internal/observation package docs for more details.
This commit is contained in:
parent
b071fb8103
commit
07f759e4e6
@ -127,12 +127,14 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable)
|
||||
})
|
||||
defer syncLogs()
|
||||
|
||||
logger := sglog.Scoped("server", "the frontend server program")
|
||||
|
||||
ready := make(chan struct{})
|
||||
go debugserver.NewServerRoutine(ready).Start()
|
||||
|
||||
sqlDB, err := InitDB()
|
||||
if err != nil {
|
||||
log.Fatalf("ERROR: %v", err)
|
||||
return err
|
||||
}
|
||||
db := database.NewDB(sqlDB)
|
||||
|
||||
@ -140,20 +142,20 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable)
|
||||
log15.Warn("Skipping out-of-band migrations check")
|
||||
} else {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger,
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
outOfBandMigrationRunner := oobmigration.NewRunnerWithDB(db, oobmigration.RefreshInterval, observationContext)
|
||||
|
||||
if err := oobmigration.ValidateOutOfBandMigrationRunner(ctx, db, outOfBandMigrationRunner); err != nil {
|
||||
log.Fatalf("failed to validate out of band migrations: %v", err)
|
||||
return errors.Wrap(err, "failed to validate out of band migrations")
|
||||
}
|
||||
}
|
||||
|
||||
// override site config first
|
||||
if err := overrideSiteConfig(ctx, db); err != nil {
|
||||
log.Fatalf("failed to apply site config overrides: %v", err)
|
||||
return errors.Wrap(err, "failed to apply site config overrides")
|
||||
}
|
||||
globals.ConfigurationServerFrontendOnly = conf.InitConfigurationServerFrontendOnly(&configurationSource{db: db})
|
||||
conf.Init()
|
||||
@ -161,17 +163,17 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable)
|
||||
|
||||
// now we can init the keyring, as it depends on site config
|
||||
if err := keyring.Init(ctx); err != nil {
|
||||
log.Fatalf("failed to initialize encryption keyring: %v", err)
|
||||
return errors.Wrap(err, "failed to initialize encryption keyring")
|
||||
}
|
||||
|
||||
if err := overrideGlobalSettings(ctx, db); err != nil {
|
||||
log.Fatalf("failed to override global settings: %v", err)
|
||||
return errors.Wrap(err, "failed to override global settings")
|
||||
}
|
||||
|
||||
// now the keyring is configured it's safe to override the rest of the config
|
||||
// and that config can access the keyring
|
||||
if err := overrideExtSvcConfig(ctx, db); err != nil {
|
||||
log.Fatalf("failed to override external service config: %v", err)
|
||||
return errors.Wrap(err, "failed to override external service config")
|
||||
}
|
||||
|
||||
// Filter trace logs
|
||||
@ -187,7 +189,7 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable)
|
||||
|
||||
authz.DefaultSubRepoPermsChecker, err = authz.NewSubRepoPermsClient(database.SubRepoPerms(db))
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create sub-repo client: %v", err)
|
||||
return errors.Wrap(err, "Failed to create sub-repo client")
|
||||
}
|
||||
ui.InitRouter(db, enterprise.CodeIntelResolver)
|
||||
|
||||
@ -287,11 +289,9 @@ func Main(enterpriseSetupHook func(db database.DB, c conftypes.UnifiedWatchable)
|
||||
}
|
||||
|
||||
if printLogo {
|
||||
fmt.Println(" ")
|
||||
fmt.Println(logoColor)
|
||||
fmt.Println(" ")
|
||||
logger.Info(fmt.Sprintf("\n\n%s\n\n", logoColor))
|
||||
}
|
||||
fmt.Printf("✱ Sourcegraph is ready at: %s\n", globals.ExternalURL())
|
||||
logger.Info(fmt.Sprintf("✱ Sourcegraph is ready at: %s\n", globals.ExternalURL()))
|
||||
close(ready)
|
||||
|
||||
goroutine.MonitorBackgroundRoutines(context.Background(), routines...)
|
||||
|
||||
@ -23,7 +23,6 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/env"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gosyntect"
|
||||
"github.com/sourcegraph/sourcegraph/internal/honey"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/lsiftyped"
|
||||
@ -39,18 +38,6 @@ var (
|
||||
|
||||
func init() {
|
||||
client = gosyntect.New(syntectServer)
|
||||
|
||||
obsvCtx := observation.Context{
|
||||
HoneyDataset: &honey.Dataset{
|
||||
Name: "codeintel-syntax-highlighting",
|
||||
SampleRate: 10, // 1 in 10
|
||||
},
|
||||
}
|
||||
highlightOp = obsvCtx.Operation(observation.Op{
|
||||
Name: "codeintel.syntax-highlight.Code",
|
||||
LogFields: []otlog.Field{},
|
||||
ErrorFilter: func(err error) observation.ErrorFilterBehaviour { return observation.EmitForHoney },
|
||||
})
|
||||
}
|
||||
|
||||
// IsBinary is a helper to tell if the content of a file is binary or not.
|
||||
|
||||
@ -6,9 +6,10 @@ import (
|
||||
"html/template"
|
||||
"strings"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/lsiftyped"
|
||||
"golang.org/x/net/html"
|
||||
"golang.org/x/net/html/atom"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/lsiftyped"
|
||||
)
|
||||
|
||||
// DocumentToSplitHTML returns a list of each line of HTML.
|
||||
|
||||
@ -7,6 +7,7 @@ import (
|
||||
|
||||
"github.com/go-enry/go-enry/v2"
|
||||
"github.com/grafana/regexp"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf"
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
|
||||
)
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"encoding/base64"
|
||||
"log"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -20,6 +19,7 @@ import (
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/tidwall/gjson"
|
||||
"go.uber.org/zap"
|
||||
"golang.org/x/sync/semaphore"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
@ -57,7 +57,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/internal/version"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
sglog "github.com/sourcegraph/sourcegraph/lib/log"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
"github.com/sourcegraph/sourcegraph/schema"
|
||||
)
|
||||
|
||||
@ -82,7 +82,7 @@ func main() {
|
||||
|
||||
conf.Init()
|
||||
logging.Init()
|
||||
syncLogs := sglog.Init(sglog.Resource{
|
||||
syncLogs := log.Init(log.Resource{
|
||||
Name: env.MyName,
|
||||
Version: version.Version(),
|
||||
InstanceID: hostname.Get(),
|
||||
@ -93,21 +93,23 @@ func main() {
|
||||
trace.Init()
|
||||
profiler.Init()
|
||||
|
||||
logger := log.Scoped("server", "the gitserver service")
|
||||
|
||||
if reposDir == "" {
|
||||
log.Fatal("git-server: SRC_REPOS_DIR is required")
|
||||
logger.Fatal("SRC_REPOS_DIR is required")
|
||||
}
|
||||
if err := os.MkdirAll(reposDir, os.ModePerm); err != nil {
|
||||
log.Fatalf("failed to create SRC_REPOS_DIR: %s", err)
|
||||
logger.Fatal("failed to create SRC_REPOS_DIR", zap.Error(err))
|
||||
}
|
||||
|
||||
wantPctFree2, err := getPercent(wantPctFree)
|
||||
if err != nil {
|
||||
log.Fatalf("SRC_REPOS_DESIRED_PERCENT_FREE is out of range: %v", err)
|
||||
logger.Fatal("SRC_REPOS_DESIRED_PERCENT_FREE is out of range", zap.Error(err))
|
||||
}
|
||||
|
||||
sqlDB, err := getDB()
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialize database stores: %v", err)
|
||||
logger.Fatal("failed to initialize database stores", zap.Error(err))
|
||||
}
|
||||
db := database.NewDB(sqlDB)
|
||||
|
||||
@ -117,7 +119,7 @@ func main() {
|
||||
|
||||
err = keyring.Init(ctx)
|
||||
if err != nil {
|
||||
log.Fatalf("failed to initialise keyring: %s", err)
|
||||
logger.Fatal("failed to initialise keyring", zap.Error(err))
|
||||
}
|
||||
|
||||
gitserver := server.Server{
|
||||
@ -136,18 +138,18 @@ func main() {
|
||||
}
|
||||
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger,
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
gitserver.RegisterMetrics(db, observationContext)
|
||||
|
||||
if tmpDir, err := gitserver.SetupAndClearTmp(); err != nil {
|
||||
log.Fatalf("failed to setup temporary directory: %s", err)
|
||||
logger.Fatal("failed to setup temporary directory", log.Error(err))
|
||||
} else if err := os.Setenv("TMP_DIR", tmpDir); err != nil {
|
||||
// Additionally, set TMP_DIR so other temporary files we may accidentally
|
||||
// create are on the faster RepoDir mount.
|
||||
log.Fatalf("Setting TMP_DIR: %s", err)
|
||||
logger.Fatal("Setting TMP_DIR", log.Error(err))
|
||||
}
|
||||
|
||||
// Create Handler now since it also initializes state
|
||||
@ -166,7 +168,7 @@ func main() {
|
||||
// Best effort attempt to sync rate limiters for site level external services
|
||||
// early on. If it fails, we'll try again in the background sync below.
|
||||
if err := syncSiteLevelExternalServiceRateLimiters(ctx, externalServiceStore); err != nil {
|
||||
log15.Warn("error performing initial site level rate limit sync", "error", err)
|
||||
logger.Warn("error performing initial site level rate limit sync", log.Error(err))
|
||||
}
|
||||
|
||||
go syncRateLimiters(ctx, externalServiceStore, rateLimitSyncerLimitPerSecond)
|
||||
@ -189,12 +191,12 @@ func main() {
|
||||
Addr: addr,
|
||||
Handler: handler,
|
||||
}
|
||||
log15.Info("git-server: listening", "addr", srv.Addr)
|
||||
logger.Info("git-server: listening", log.String("addr", srv.Addr))
|
||||
|
||||
go func() {
|
||||
err := srv.ListenAndServe()
|
||||
if err != http.ErrServerClosed {
|
||||
log.Fatal(err)
|
||||
logger.Fatal(err.Error())
|
||||
}
|
||||
}()
|
||||
|
||||
@ -217,7 +219,7 @@ func main() {
|
||||
defer cancel()
|
||||
// Stop accepting requests.
|
||||
if err := srv.Shutdown(ctx); err != nil {
|
||||
log15.Error("shutting down http server", "error", err)
|
||||
logger.Error("shutting down http server", log.Error(err))
|
||||
}
|
||||
|
||||
// The most important thing this does is kill all our clones. If we just
|
||||
|
||||
@ -1173,7 +1173,7 @@ func (s *Server) handleBatchLog(w http.ResponseWriter, r *http.Request) {
|
||||
// Run git log for a single repository.
|
||||
// Invoked multiple times from the handler defined below.
|
||||
performGitLogCommand := func(ctx context.Context, repoCommit api.RepoCommit, format string) (output string, isRepoCloned bool, err error) {
|
||||
ctx, endObservation := operations.batchLogSingle.With(ctx, &err, observation.Args{
|
||||
ctx, _, endObservation := operations.batchLogSingle.With(ctx, &err, observation.Args{
|
||||
LogFields: append(
|
||||
[]log.Field{
|
||||
log.String("format", format),
|
||||
@ -1206,7 +1206,7 @@ func (s *Server) handleBatchLog(w http.ResponseWriter, r *http.Request) {
|
||||
|
||||
// Handles the /batch-log route
|
||||
instrumentedHandler := func(ctx context.Context) (statusCodeOnError int, err error) {
|
||||
ctx, logger, endObservation := operations.batchLog.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, logger, endObservation := operations.batchLog.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("statusCodeOnError", statusCodeOnError),
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/urfave/cli/v2"
|
||||
@ -70,7 +69,7 @@ func mainErr(ctx context.Context, args []string) error {
|
||||
|
||||
func newRunnerFactory() func(ctx context.Context, schemaNames []string) (cliutil.Runner, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: sglog.Scoped("runner", ""),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -59,7 +59,7 @@ func (f *repositoryFetcher) FetchRepositoryArchive(ctx context.Context, args typ
|
||||
}
|
||||
|
||||
func (f *repositoryFetcher) fetchRepositoryArchive(ctx context.Context, args types.SearchArgs, paths []string, callback func(request ParseRequest)) (err error) {
|
||||
ctx, trace, endObservation := f.operations.fetchRepositoryArchive.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := f.operations.fetchRepositoryArchive.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repo", string(args.Repo)),
|
||||
log.String("commitID", string(args.CommitID)),
|
||||
log.Int("paths", len(paths)),
|
||||
|
||||
@ -44,7 +44,7 @@ func NewClient(observationContext *observation.Context) GitserverClient {
|
||||
}
|
||||
|
||||
func (c *gitserverClient) FetchTar(ctx context.Context, repo api.RepoName, commit api.CommitID, paths []string) (_ io.ReadCloser, err error) {
|
||||
ctx, endObservation := c.operations.fetchTar.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.fetchTar.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repo", string(repo)),
|
||||
log.String("commit", string(commit)),
|
||||
log.Int("paths", len(paths)),
|
||||
@ -67,7 +67,7 @@ func (c *gitserverClient) FetchTar(ctx context.Context, repo api.RepoName, commi
|
||||
}
|
||||
|
||||
func (c *gitserverClient) GitDiff(ctx context.Context, repo api.RepoName, commitA, commitB api.CommitID) (_ Changes, err error) {
|
||||
ctx, endObservation := c.operations.gitDiff.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.gitDiff.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repo", string(repo)),
|
||||
log.String("commitA", string(commitA)),
|
||||
log.String("commitB", string(commitB)),
|
||||
|
||||
@ -22,7 +22,7 @@ const searchTimeout = 60 * time.Second
|
||||
|
||||
func MakeSqliteSearchFunc(operations *sharedobservability.Operations, cachedDatabaseWriter writer.CachedDatabaseWriter) types.SearchFunc {
|
||||
return func(ctx context.Context, args types.SearchArgs) (results []result.Symbol, err error) {
|
||||
ctx, trace, endObservation := operations.Search.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := operations.Search.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repo", string(args.Repo)),
|
||||
log.String("commitID", string(args.CommitID)),
|
||||
log.String("query", args.Query),
|
||||
|
||||
@ -51,7 +51,7 @@ func NewParser(
|
||||
}
|
||||
|
||||
func (p *parser) Parse(ctx context.Context, args types.SearchArgs, paths []string) (_ <-chan SymbolOrError, err error) {
|
||||
ctx, endObservation := p.operations.parse.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := p.operations.parse.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repo", string(args.Repo)),
|
||||
log.String("commitID", string(args.CommitID)),
|
||||
log.Int("paths", len(paths)),
|
||||
@ -131,7 +131,7 @@ func min(a, b int) int {
|
||||
}
|
||||
|
||||
func (p *parser) handleParseRequest(ctx context.Context, symbolOrErrors chan<- SymbolOrError, parseRequest fetcher.ParseRequest, totalSymbols *uint32) (err error) {
|
||||
ctx, trace, endObservation := p.operations.handleParseRequest.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := p.operations.handleParseRequest.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("path", parseRequest.Path),
|
||||
log.Int("fileSize", len(parseRequest.Data)),
|
||||
}})
|
||||
|
||||
@ -2,11 +2,9 @@ package shared
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -30,7 +28,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
|
||||
"github.com/sourcegraph/sourcegraph/internal/tracer"
|
||||
"github.com/sourcegraph/sourcegraph/internal/version"
|
||||
sglog "github.com/sourcegraph/sourcegraph/lib/log"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
const addr = ":3184"
|
||||
@ -42,7 +40,7 @@ func Main(setup SetupFunc) {
|
||||
env.HandleHelpFlag()
|
||||
conf.Init()
|
||||
logging.Init()
|
||||
syncLogs := sglog.Init(sglog.Resource{
|
||||
syncLogs := log.Init(log.Resource{
|
||||
Name: env.MyName,
|
||||
Version: version.Version(),
|
||||
InstanceID: hostname.Get(),
|
||||
@ -56,8 +54,9 @@ func Main(setup SetupFunc) {
|
||||
routines := []goroutine.BackgroundRoutine{}
|
||||
|
||||
// Initialize tracing/metrics
|
||||
logger := log.Scoped("service", "the symbols service")
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger,
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
HoneyDataset: &honey.Dataset{
|
||||
@ -71,7 +70,7 @@ func Main(setup SetupFunc) {
|
||||
repositoryFetcher := fetcher.NewRepositoryFetcher(gitserverClient, types.LoadRepositoryFetcherConfig(env.BaseConfig{}).MaxTotalPathsLength, observationContext)
|
||||
searchFunc, handleStatus, newRoutines, ctagsBinary, err := setup(observationContext, gitserverClient, repositoryFetcher)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to setup: %v", err)
|
||||
logger.Fatal("Failed to set up", log.Error(err))
|
||||
}
|
||||
routines = append(routines, newRoutines...)
|
||||
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"os"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -48,7 +47,7 @@ func (m *migrator) Routines(ctx context.Context, logger log.Logger) ([]goroutine
|
||||
db := database.NewDB(sqlDB)
|
||||
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "migrator routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -57,7 +57,7 @@ func New(options Options, observationContext *observation.Context) *Client {
|
||||
}
|
||||
|
||||
func (c *Client) Dequeue(ctx context.Context, queueName string, job *executor.Job) (_ bool, err error) {
|
||||
ctx, endObservation := c.operations.dequeue.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.dequeue.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("queueName", queueName),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -73,7 +73,7 @@ func (c *Client) Dequeue(ctx context.Context, queueName string, job *executor.Jo
|
||||
}
|
||||
|
||||
func (c *Client) AddExecutionLogEntry(ctx context.Context, queueName string, jobID int, entry workerutil.ExecutionLogEntry) (entryID int, err error) {
|
||||
ctx, endObservation := c.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("queueName", queueName),
|
||||
log.Int("jobID", jobID),
|
||||
}})
|
||||
@ -93,7 +93,7 @@ func (c *Client) AddExecutionLogEntry(ctx context.Context, queueName string, job
|
||||
}
|
||||
|
||||
func (c *Client) UpdateExecutionLogEntry(ctx context.Context, queueName string, jobID, entryID int, entry workerutil.ExecutionLogEntry) (err error) {
|
||||
ctx, endObservation := c.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("queueName", queueName),
|
||||
log.Int("jobID", jobID),
|
||||
log.Int("entryID", entryID),
|
||||
@ -114,7 +114,7 @@ func (c *Client) UpdateExecutionLogEntry(ctx context.Context, queueName string,
|
||||
}
|
||||
|
||||
func (c *Client) MarkComplete(ctx context.Context, queueName string, jobID int) (err error) {
|
||||
ctx, endObservation := c.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("queueName", queueName),
|
||||
log.Int("jobID", jobID),
|
||||
}})
|
||||
@ -132,7 +132,7 @@ func (c *Client) MarkComplete(ctx context.Context, queueName string, jobID int)
|
||||
}
|
||||
|
||||
func (c *Client) MarkErrored(ctx context.Context, queueName string, jobID int, errorMessage string) (err error) {
|
||||
ctx, endObservation := c.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("queueName", queueName),
|
||||
log.Int("jobID", jobID),
|
||||
}})
|
||||
@ -151,7 +151,7 @@ func (c *Client) MarkErrored(ctx context.Context, queueName string, jobID int, e
|
||||
}
|
||||
|
||||
func (c *Client) MarkFailed(ctx context.Context, queueName string, jobID int, errorMessage string) (err error) {
|
||||
ctx, endObservation := c.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("queueName", queueName),
|
||||
log.Int("jobID", jobID),
|
||||
}})
|
||||
@ -196,7 +196,7 @@ func (c *Client) Ping(ctx context.Context, queueName string, jobIDs []int) (err
|
||||
}
|
||||
|
||||
func (c *Client) Heartbeat(ctx context.Context, queueName string, jobIDs []int) (knownIDs []int, err error) {
|
||||
ctx, endObservation := c.operations.heartbeat.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.heartbeat.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("queueName", queueName),
|
||||
log.String("jobIDs", intsToString(jobIDs)),
|
||||
}})
|
||||
|
||||
@ -34,7 +34,7 @@ func runCommand(ctx context.Context, command command, logger *Logger) (err error
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
ctx, endObservation := command.Operation.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := command.Operation.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
log15.Info(fmt.Sprintf("Running command: %s", strings.Join(command.Command, " ")))
|
||||
|
||||
@ -48,7 +48,7 @@ func main() {
|
||||
|
||||
// Initialize tracing/metrics
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: sglog.Scoped("service", "executor service"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -105,7 +105,7 @@ func main() {
|
||||
|
||||
func makeWorkerMetrics(queueName string) workerutil.WorkerMetrics {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: sglog.Scoped("executor_processor", "executor worker processor"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -41,7 +41,7 @@ var errVerificationNotSupported = errors.New(strings.Join([]string{
|
||||
func authMiddleware(next http.Handler, db database.DB, authValidators AuthValidatorMap, operation *observation.Operation) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
statusCode, err := func() (_ int, err error) {
|
||||
ctx, trace, endObservation := operation.WithAndLogger(r.Context(), &err, observation.Args{})
|
||||
ctx, trace, endObservation := operation.With(r.Context(), &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
// Skip auth check if it's not enabled in the instance's site configuration, if this
|
||||
|
||||
@ -79,7 +79,7 @@ func (h *UploadHandler) handleEnqueue(w http.ResponseWriter, r *http.Request) {
|
||||
// easily. The remainder of the function simply serializes the result to the
|
||||
// HTTP response writer.
|
||||
payload, statusCode, err := func() (_ interface{}, statusCode int, err error) {
|
||||
ctx, trace, endObservation := h.operations.handleEnqueue.WithAndLogger(r.Context(), &err, observation.Args{})
|
||||
ctx, trace, endObservation := h.operations.handleEnqueue.With(r.Context(), &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("statusCode", statusCode),
|
||||
|
||||
@ -21,7 +21,7 @@ import (
|
||||
// new upload record with state 'uploading' and returns the generated ID to be used in subsequent
|
||||
// requests for the same upload.
|
||||
func (h *UploadHandler) handleEnqueueMultipartSetup(ctx context.Context, uploadState uploadState, _ io.Reader) (_ interface{}, statusCode int, err error) {
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueMultipartSetup.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueMultipartSetup.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("statusCode", statusCode),
|
||||
@ -64,7 +64,7 @@ func (h *UploadHandler) handleEnqueueMultipartSetup(ctx context.Context, uploadS
|
||||
// handleEnqueueMultipartUpload handles a partial upload in a multipart upload. This proxies the
|
||||
// data to the bundle manager and marks the part index in the upload record.
|
||||
func (h *UploadHandler) handleEnqueueMultipartUpload(ctx context.Context, uploadState uploadState, body io.Reader) (_ interface{}, statusCode int, err error) {
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueMultipartUpload.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueMultipartUpload.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("statusCode", statusCode),
|
||||
@ -93,7 +93,7 @@ func (h *UploadHandler) handleEnqueueMultipartUpload(ctx context.Context, upload
|
||||
// upload from 'uploading' to 'queued', then instructs the bundle manager to concatenate all of the part
|
||||
// files together.
|
||||
func (h *UploadHandler) handleEnqueueMultipartFinalize(ctx context.Context, uploadState uploadState, _ io.Reader) (_ interface{}, statusCode int, err error) {
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueMultipartFinalize.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueMultipartFinalize.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("statusCode", statusCode),
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
// handleEnqueueSinglePayload handles a non-multipart upload. This creates an upload record
|
||||
// with state 'queued', proxies the data to the bundle manager, and returns the generated ID.
|
||||
func (h *UploadHandler) handleEnqueueSinglePayload(ctx context.Context, uploadState uploadState, body io.Reader) (_ interface{}, statusCode int, err error) {
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueSinglePayload.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := h.operations.handleEnqueueSinglePayload.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("statusCode", statusCode),
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
// DocumentationDefinitions returns the list of source locations that define the symbol found at
|
||||
// the given documentation path ID, if any.
|
||||
func (r *queryResolver) DocumentationDefinitions(ctx context.Context, pathID string) (_ []AdjustedLocation, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationDefinitions", r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -18,7 +18,7 @@ const defaultReferencesPageSize = 100
|
||||
// DocumentationReferences returns the list of source locations that reference the symbol found at
|
||||
// the given documentation path ID, if any.
|
||||
func (r *queryResolver) DocumentationReferences(ctx context.Context, pathID string, limit int, rawCursor string) (_ []AdjustedLocation, _ string, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationReferences", r.operations.documentationReferences, slowReferencesRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.documentationReferences, slowReferencesRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -26,7 +26,7 @@ const numAncestors = 100
|
||||
// path is a prefix are returned. These dump IDs should be subsequently passed to invocations of
|
||||
// Definitions, References, and Hover.
|
||||
func (r *resolver) findClosestDumps(ctx context.Context, cachedCommitChecker *cachedCommitChecker, repositoryID int, commit, path string, exactPath bool, indexer string) (_ []store.Dump, err error) {
|
||||
ctx, trace, endObservation := r.operations.findClosestDumps.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := r.operations.findClosestDumps.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
|
||||
@ -100,7 +100,7 @@ func (r *Resolver) LSIFUploadByID(ctx context.Context, id graphql.ID) (_ gql.LSI
|
||||
|
||||
// 🚨 SECURITY: dbstore layer handles authz for GetUploads
|
||||
func (r *Resolver) LSIFUploads(ctx context.Context, args *gql.LSIFUploadsQueryArgs) (_ gql.LSIFUploadConnectionResolver, err error) {
|
||||
// ctx, endObservation := r.observationContext.lsifUploads.With(ctx, &err, observation.Args{})
|
||||
// ctx, _, endObservation := r.observationContext.lsifUploads.With(ctx, &err, observation.Args{})
|
||||
// endObservation.EndOnCancel(ctx, 1, observation.Args{})
|
||||
|
||||
// Delegate behavior to LSIFUploadsByRepo with no specified repository identifier
|
||||
@ -128,7 +128,7 @@ func (r *Resolver) LSIFUploadsByRepo(ctx context.Context, args *gql.LSIFReposito
|
||||
|
||||
// 🚨 SECURITY: Only site admins may modify code intelligence upload data
|
||||
func (r *Resolver) DeleteLSIFUpload(ctx context.Context, args *struct{ ID graphql.ID }) (_ *gql.EmptyResponse, err error) {
|
||||
ctx, endObservation := r.observationContext.deleteLsifUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := r.observationContext.deleteLsifUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("uploadID", string(args.ID)),
|
||||
}})
|
||||
endObservation.OnCancel(ctx, 1, observation.Args{})
|
||||
@ -185,7 +185,7 @@ func (r *Resolver) LSIFIndexes(ctx context.Context, args *gql.LSIFIndexesQueryAr
|
||||
return nil, errAutoIndexingNotEnabled
|
||||
}
|
||||
|
||||
ctx, endObservation := r.observationContext.lsifIndexes.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := r.observationContext.lsifIndexes.With(ctx, &err, observation.Args{})
|
||||
endObservation.OnCancel(ctx, 1, observation.Args{})
|
||||
|
||||
// Delegate behavior to LSIFIndexesByRepo with no specified repository identifier
|
||||
@ -217,7 +217,7 @@ func (r *Resolver) LSIFIndexesByRepo(ctx context.Context, args *gql.LSIFReposito
|
||||
|
||||
// 🚨 SECURITY: Only site admins may modify code intelligence index data
|
||||
func (r *Resolver) DeleteLSIFIndex(ctx context.Context, args *struct{ ID graphql.ID }) (_ *gql.EmptyResponse, err error) {
|
||||
ctx, endObservation := r.observationContext.deleteLsifIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := r.observationContext.deleteLsifIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("indexID", string(args.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -243,7 +243,7 @@ func (r *Resolver) DeleteLSIFIndex(ctx context.Context, args *struct{ ID graphql
|
||||
|
||||
// 🚨 SECURITY: Only entrypoint is within the repository resolver so the user is already authenticated
|
||||
func (r *Resolver) CommitGraph(ctx context.Context, id graphql.ID) (_ gql.CodeIntelligenceCommitGraphResolver, err error) {
|
||||
ctx, endObservation := r.observationContext.commitGraph.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := r.observationContext.commitGraph.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repoID", string(id)),
|
||||
}})
|
||||
endObservation.OnCancel(ctx, 1, observation.Args{})
|
||||
@ -456,7 +456,7 @@ func (r *Resolver) CreateCodeIntelligenceConfigurationPolicy(ctx context.Context
|
||||
|
||||
// 🚨 SECURITY: Only site admins may modify code intelligence configuration policies
|
||||
func (r *Resolver) UpdateCodeIntelligenceConfigurationPolicy(ctx context.Context, args *gql.UpdateCodeIntelligenceConfigurationPolicyArgs) (_ *gql.EmptyResponse, err error) {
|
||||
ctx, endObservation := r.observationContext.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := r.observationContext.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("configPolicyID", string(args.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -495,7 +495,7 @@ func (r *Resolver) UpdateCodeIntelligenceConfigurationPolicy(ctx context.Context
|
||||
|
||||
// 🚨 SECURITY: Only site admins may modify code intelligence configuration policies
|
||||
func (r *Resolver) DeleteCodeIntelligenceConfigurationPolicy(ctx context.Context, args *gql.DeleteCodeIntelligenceConfigurationPolicyArgs) (_ *gql.EmptyResponse, err error) {
|
||||
ctx, endObservation := r.observationContext.deleteConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := r.observationContext.deleteConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("configPolicyID", string(args.Policy)),
|
||||
}})
|
||||
endObservation.OnCancel(ctx, 1, observation.Args{})
|
||||
@ -568,7 +568,7 @@ func (r *Resolver) IndexConfiguration(ctx context.Context, id graphql.ID) (_ gql
|
||||
|
||||
// 🚨 SECURITY: Only site admins may modify code intelligence indexing configuration
|
||||
func (r *Resolver) UpdateRepositoryIndexConfiguration(ctx context.Context, args *gql.UpdateRepositoryIndexConfigurationArgs) (_ *gql.EmptyResponse, err error) {
|
||||
ctx, endObservation := r.observationContext.updateIndexConfiguration.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := r.observationContext.updateIndexConfiguration.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repoID", string(args.Repository)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -593,7 +593,7 @@ func (r *Resolver) UpdateRepositoryIndexConfiguration(ctx context.Context, args
|
||||
}
|
||||
|
||||
func (r *Resolver) PreviewRepositoryFilter(ctx context.Context, args *gql.PreviewRepositoryFilterArgs) (_ gql.RepositoryFilterPreviewResolver, err error) {
|
||||
ctx, endObservation := r.observationContext.previewRepoFilter.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := r.observationContext.previewRepoFilter.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
offset, err := graphqlutil.DecodeIntCursor(args.After)
|
||||
@ -636,7 +636,7 @@ func (r *Resolver) PreviewRepositoryFilter(ctx context.Context, args *gql.Previe
|
||||
}
|
||||
|
||||
func (r *Resolver) PreviewGitObjectFilter(ctx context.Context, id graphql.ID, args *gql.PreviewGitObjectFilterArgs) (_ []gql.GitObjectFilterPreviewResolver, err error) {
|
||||
ctx, endObservation := r.observationContext.previewGitObjectFilter.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := r.observationContext.previewGitObjectFilter.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
repositoryID, err := unmarshalLSIFIndexGQLID(id)
|
||||
|
||||
@ -83,7 +83,7 @@ func (r *preciseCodeIntelSupportResolver) Indexers() *[]gql.CodeIntelIndexerReso
|
||||
}
|
||||
|
||||
func (r *Resolver) RequestLanguageSupport(ctx context.Context, args *gql.RequestLanguageSupportArgs) (_ *gql.EmptyResponse, err error) {
|
||||
ctx, endObservation := r.observationContext.requestLanguageSupport.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := r.observationContext.requestLanguageSupport.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
userID := int(actor.FromContext(ctx).UID)
|
||||
@ -99,7 +99,7 @@ func (r *Resolver) RequestLanguageSupport(ctx context.Context, args *gql.Request
|
||||
}
|
||||
|
||||
func (r *Resolver) RequestedLanguageSupport(ctx context.Context) (_ []string, err error) {
|
||||
ctx, endObservation := r.observationContext.requestedLanguageSupport.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := r.observationContext.requestedLanguageSupport.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
userID := int(actor.FromContext(ctx).UID)
|
||||
|
||||
@ -5,10 +5,11 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"go.uber.org/zap"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/metrics"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
type operations struct {
|
||||
@ -81,33 +82,28 @@ func newOperations(observationContext *observation.Context) *operations {
|
||||
func observeResolver(
|
||||
ctx context.Context,
|
||||
err *error,
|
||||
name string,
|
||||
operation *observation.Operation,
|
||||
threshold time.Duration,
|
||||
observationArgs observation.Args,
|
||||
) (context.Context, observation.TraceLogger, func()) {
|
||||
start := time.Now()
|
||||
ctx, trace, endObservation := operation.WithAndLogger(ctx, err, observationArgs)
|
||||
ctx, trace, endObservation := operation.With(ctx, err, observationArgs)
|
||||
|
||||
return ctx, trace, func() {
|
||||
duration := time.Since(start)
|
||||
endObservation(1, observation.Args{})
|
||||
|
||||
if duration >= threshold {
|
||||
lowSlowRequest(name, duration, err, observationArgs)
|
||||
// use trace logger which includes all relevant fields
|
||||
lowSlowRequest(trace, duration, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lowSlowRequest(name string, duration time.Duration, err *error, observationArgs observation.Args) {
|
||||
pairs := append(
|
||||
observationArgs.LogFieldPairs(),
|
||||
"type", name,
|
||||
"duration_ms", duration.Milliseconds(),
|
||||
)
|
||||
func lowSlowRequest(logger log.Logger, duration time.Duration, err *error) {
|
||||
fields := []log.Field{zap.Duration("duration", duration)}
|
||||
if err != nil && *err != nil {
|
||||
pairs = append(pairs, "error", (*err).Error())
|
||||
fields = append(fields, log.Error(*err))
|
||||
}
|
||||
|
||||
log15.Warn("Slow codeintel request", pairs...)
|
||||
logger.Warn("Slow codeintel request", fields...)
|
||||
}
|
||||
|
||||
@ -17,7 +17,7 @@ const DefinitionsLimit = 100
|
||||
|
||||
// Definitions returns the list of source locations that define the symbol at the given position.
|
||||
func (r *queryResolver) Definitions(ctx context.Context, line, character int) (_ []AdjustedLocation, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "Definitions", r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.definitions, slowDefinitionsRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -19,7 +19,7 @@ const slowDiagnosticsRequestThreshold = time.Second
|
||||
|
||||
// Diagnostics returns the diagnostics for documents with the given path prefix.
|
||||
func (r *queryResolver) Diagnostics(ctx context.Context, limit int) (adjustedDiagnostics []AdjustedDiagnostic, _ int, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "Diagnostics", r.operations.diagnostics, slowDiagnosticsRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.diagnostics, slowDiagnosticsRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -20,7 +20,7 @@ const slowDocumentationPageRequestThreshold = time.Second
|
||||
//
|
||||
// nil, nil is returned if the page does not exist.
|
||||
func (r *queryResolver) DocumentationPage(ctx context.Context, pathID string) (_ *precise.DocumentationPageData, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationPage", r.operations.documentationPage, slowDocumentationPageRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.documentationPage, slowDocumentationPageRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
@ -55,7 +55,7 @@ const slowDocumentationPathInfoRequestThreshold = time.Second
|
||||
//
|
||||
// nil, nil is returned if the page does not exist.
|
||||
func (r *queryResolver) DocumentationPathInfo(ctx context.Context, pathID string) (_ *precise.DocumentationPathInfoData, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "DocumentationPathInfo", r.operations.documentationPathInfo, slowDocumentationPathInfoRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.documentationPathInfo, slowDocumentationPathInfoRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
@ -88,7 +88,7 @@ const slowDocumentationRequestThreshold = time.Second
|
||||
|
||||
// Documentation returns documentation for the symbol at the given position.
|
||||
func (r *queryResolver) Documentation(ctx context.Context, line, character int) (_ []*Documentation, err error) {
|
||||
ctx, _, endObservation := observeResolver(ctx, &err, "Documentation", r.operations.documentation, slowDocumentationRequestThreshold, observation.Args{
|
||||
ctx, _, endObservation := observeResolver(ctx, &err, r.operations.documentation, slowDocumentationRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
@ -142,7 +142,7 @@ const slowDocumentationSearchRequestThreshold = 3 * time.Second
|
||||
|
||||
// DocumentationSearch searches for documentation, limiting the results to the specified set of repos (or all if empty).
|
||||
func (r *resolver) DocumentationSearch(ctx context.Context, query string, repos []string) (_ []precise.DocumentationSearchResult, err error) {
|
||||
ctx, _, endObservation := observeResolver(ctx, &err, "DocumentationSearch", r.operations.documentationSearch, slowDocumentationSearchRequestThreshold, observation.Args{
|
||||
ctx, _, endObservation := observeResolver(ctx, &err, r.operations.documentationSearch, slowDocumentationSearchRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.String("query", query),
|
||||
log.String("repos", fmt.Sprint(repos)),
|
||||
|
||||
@ -15,7 +15,7 @@ const slowHoverRequestThreshold = time.Second
|
||||
|
||||
// Hover returns the hover text and range for the symbol at the given position.
|
||||
func (r *queryResolver) Hover(ctx context.Context, line, character int) (_ string, _ lsifstore.Range, _ bool, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "Hover", r.operations.hover, slowHoverRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.hover, slowHoverRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -19,7 +19,7 @@ const ImplementationsLimit = 100
|
||||
|
||||
// Implementations returns the list of source locations that define the symbol at the given position.
|
||||
func (r *queryResolver) Implementations(ctx context.Context, line, character int, limit int, rawCursor string) (_ []AdjustedLocation, _ string, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "Implementations", r.operations.implementations, slowImplementationsRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.implementations, slowImplementationsRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -17,7 +17,7 @@ const slowRangesRequestThreshold = time.Second
|
||||
// results are partial and do not include references outside the current file, or any location that
|
||||
// requires cross-linking of bundles (cross-repo or cross-root).
|
||||
func (r *queryResolver) Ranges(ctx context.Context, startLine, endLine int) (adjustedRanges []AdjustedCodeIntelligenceRange, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "Ranges", r.operations.ranges, slowRangesRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.ranges, slowRangesRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -21,7 +21,7 @@ const slowReferencesRequestThreshold = time.Second
|
||||
|
||||
// References returns the list of source locations that reference the symbol at the given position.
|
||||
func (r *queryResolver) References(ctx context.Context, line, character, limit int, rawCursor string) (_ []AdjustedLocation, _ string, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "References", r.operations.references, slowReferencesRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.references, slowReferencesRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/bloomfilter"
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/precise"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log/logtest"
|
||||
)
|
||||
|
||||
func TestReferences(t *testing.T) {
|
||||
@ -538,7 +539,7 @@ func TestIgnoredIDs(t *testing.T) {
|
||||
ignoreIDs,
|
||||
10,
|
||||
0,
|
||||
observation.TestTraceLogger,
|
||||
observation.TestTraceLogger(logtest.Scoped(t)),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("uploadIDsWithReferences: %s", err)
|
||||
|
||||
@ -16,7 +16,7 @@ const slowStencilRequestThreshold = time.Second
|
||||
|
||||
// Stencil return all ranges within a single document.
|
||||
func (r *queryResolver) Stencil(ctx context.Context) (adjustedRanges []lsifstore.Range, err error) {
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, "Stencil", r.operations.stencil, slowStencilRequestThreshold, observation.Args{
|
||||
ctx, trace, endObservation := observeResolver(ctx, &err, r.operations.stencil, slowStencilRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", r.repositoryID),
|
||||
log.String("commit", r.commit),
|
||||
|
||||
@ -190,7 +190,7 @@ const slowQueryResolverRequestThreshold = time.Second
|
||||
// given repository, commit, and path, then constructs a new query resolver instance which
|
||||
// can be used to answer subsequent queries.
|
||||
func (r *resolver) QueryResolver(ctx context.Context, args *gql.GitBlobLSIFDataArgs) (_ QueryResolver, err error) {
|
||||
ctx, _, endObservation := observeResolver(ctx, &err, "QueryResolver", r.operations.queryResolver, slowQueryResolverRequestThreshold, observation.Args{
|
||||
ctx, _, endObservation := observeResolver(ctx, &err, r.operations.queryResolver, slowQueryResolverRequestThreshold, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", int(args.Repo.ID)),
|
||||
log.String("commit", string(args.Commit)),
|
||||
|
||||
@ -3,10 +3,8 @@ package codeintel
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -28,6 +26,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/sentry"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/internal/uploadstore"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
type Services struct {
|
||||
@ -48,17 +47,18 @@ type Services struct {
|
||||
|
||||
func NewServices(ctx context.Context, config *Config, siteConfig conftypes.WatchableSiteConfig, db database.DB) (*Services, error) {
|
||||
// Initialize tracing/metrics
|
||||
logger := log.Scoped("codeintel", "codeintel services")
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger,
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
// Initialize sentry hub
|
||||
hub := mustInitializeSentryHub(siteConfig)
|
||||
hub := mustInitializeSentryHub(logger, siteConfig)
|
||||
|
||||
// Connect to database
|
||||
codeIntelDB := mustInitializeCodeIntelDB()
|
||||
codeIntelDB := mustInitializeCodeIntelDB(logger)
|
||||
|
||||
// Initialize stores
|
||||
dbStore := store.NewWithDB(db, observationContext)
|
||||
@ -66,7 +66,7 @@ func NewServices(ctx context.Context, config *Config, siteConfig conftypes.Watch
|
||||
lsifStore := lsifstore.NewStore(codeIntelDB, siteConfig, observationContext)
|
||||
uploadStore, err := lsifuploadstore.New(context.Background(), config.LSIFUploadStoreConfig, observationContext)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize upload store: %s", err)
|
||||
logger.Fatal("Failed to initialize upload store", log.Error(err))
|
||||
}
|
||||
|
||||
// Initialize http endpoints
|
||||
@ -117,18 +117,18 @@ func NewServices(ctx context.Context, config *Config, siteConfig conftypes.Watch
|
||||
}, nil
|
||||
}
|
||||
|
||||
func mustInitializeCodeIntelDB() *sql.DB {
|
||||
func mustInitializeCodeIntelDB(logger log.Logger) *sql.DB {
|
||||
dsn := conf.GetServiceConnectionValueAndRestartOnChange(func(serviceConnections conftypes.ServiceConnections) string {
|
||||
return serviceConnections.CodeIntelPostgresDSN
|
||||
})
|
||||
db, err := connections.EnsureNewCodeIntelDB(dsn, "frontend", &observation.TestContext)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to connect to codeintel database: %s", err)
|
||||
logger.Fatal("Failed to connect to codeintel database", log.Error(err))
|
||||
}
|
||||
return db
|
||||
}
|
||||
|
||||
func mustInitializeSentryHub(c conftypes.WatchableSiteConfig) *sentry.Hub {
|
||||
func mustInitializeSentryHub(logger log.Logger, c conftypes.WatchableSiteConfig) *sentry.Hub {
|
||||
getDsn := func(c conftypes.SiteConfigQuerier) string {
|
||||
if c.SiteConfig().Log != nil && c.SiteConfig().Log.Sentry != nil {
|
||||
return c.SiteConfig().Log.Sentry.CodeIntelDSN
|
||||
@ -138,7 +138,7 @@ func mustInitializeSentryHub(c conftypes.WatchableSiteConfig) *sentry.Hub {
|
||||
|
||||
hub, err := sentry.NewWithDsn(getDsn(c), c, getDsn)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to initialize sentry hub: %s", err)
|
||||
logger.Fatal("Failed to initialize sentry hub", log.Error(err))
|
||||
}
|
||||
return hub
|
||||
}
|
||||
|
||||
@ -7,11 +7,9 @@ package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -37,6 +35,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/oobmigration"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
@ -69,9 +68,10 @@ func init() {
|
||||
}
|
||||
|
||||
func enterpriseSetupHook(db database.DB, conf conftypes.UnifiedWatchable) enterprise.Services {
|
||||
logger := log.Scoped("enterprise", "frontend enterprise edition")
|
||||
debug, _ := strconv.ParseBool(os.Getenv("DEBUG"))
|
||||
if debug {
|
||||
log.Println("enterprise edition")
|
||||
logger.Debug("enterprise edition")
|
||||
}
|
||||
|
||||
auth.Init(db)
|
||||
@ -80,37 +80,38 @@ func enterpriseSetupHook(db database.DB, conf conftypes.UnifiedWatchable) enterp
|
||||
enterpriseServices := enterprise.DefaultServices()
|
||||
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger,
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
if err := codeIntelConfig.Validate(); err != nil {
|
||||
log.Fatalf("failed to load codeintel config: %s", err)
|
||||
logger.Fatal("failed to load codeintel config", log.Error(err))
|
||||
}
|
||||
|
||||
services, err := codeintel.NewServices(ctx, codeIntelConfig, conf, db)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
logger.Fatal(err.Error())
|
||||
}
|
||||
|
||||
if err := codeintel.Init(ctx, db, codeIntelConfig, &enterpriseServices, observationContext, services); err != nil {
|
||||
log.Fatalf("failed to initialize codeintel: %s", err)
|
||||
logger.Fatal("failed to initialize codeintel", log.Error(err))
|
||||
}
|
||||
|
||||
// Initialize executor-specific services with the code-intel services.
|
||||
if err := executor.Init(ctx, db, conf, &enterpriseServices, observationContext, services.InternalUploadHandler); err != nil {
|
||||
log.Fatalf("failed to initialize executor: %s", err)
|
||||
logger.Fatal("failed to initialize executor", log.Error(err))
|
||||
}
|
||||
|
||||
if err := app.Init(db, conf, &enterpriseServices); err != nil {
|
||||
log.Fatalf("failed to initialize app: %s", err)
|
||||
logger.Fatal("failed to initialize app", log.Error(err))
|
||||
}
|
||||
|
||||
// Initialize all the enterprise-specific services that do not need the codeintel-specific services.
|
||||
for name, fn := range initFunctions {
|
||||
initLogger := logger.Scoped(name, "")
|
||||
if err := fn(ctx, db, conf, &enterpriseServices, observationContext); err != nil {
|
||||
log.Fatalf("failed to initialize %s: %s", name, err)
|
||||
initLogger.Fatal("failed to initialize", log.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -53,7 +53,7 @@ func (h *handler) Handle(ctx context.Context, record workerutil.Record) (err err
|
||||
|
||||
var requeued bool
|
||||
|
||||
ctx, logger, endObservation := h.handleOp.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, logger, endObservation := h.handleOp.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{
|
||||
LogFields: append(
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/bloomfilter"
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/precise"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log/logtest"
|
||||
)
|
||||
|
||||
func TestHandle(t *testing.T) {
|
||||
@ -66,7 +67,7 @@ func TestHandle(t *testing.T) {
|
||||
gitserverClient: gitserverClient,
|
||||
}
|
||||
|
||||
requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger)
|
||||
requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger(logtest.Scoped(t)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error handling upload: %s", err)
|
||||
} else if requeued {
|
||||
@ -200,7 +201,7 @@ func TestHandleError(t *testing.T) {
|
||||
gitserverClient: gitserverClient,
|
||||
}
|
||||
|
||||
requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger)
|
||||
requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger(logtest.Scoped(t)))
|
||||
if err == nil {
|
||||
t.Fatalf("unexpected nil error handling upload")
|
||||
} else if !strings.Contains(err.Error(), "uh-oh!") {
|
||||
@ -255,7 +256,7 @@ func TestHandleCloneInProgress(t *testing.T) {
|
||||
gitserverClient: gitserverClient,
|
||||
}
|
||||
|
||||
requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger)
|
||||
requeued, err := handler.handle(context.Background(), upload, observation.TestTraceLogger(logtest.Scoped(t)))
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error handling upload: %s", err)
|
||||
} else if !requeued {
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
smithyhttp "github.com/aws/smithy-go/transport/http"
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -71,7 +70,7 @@ func main() {
|
||||
|
||||
// Initialize tracing/metrics
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: sglog.Scoped("worker", "the precise codeintel worker"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
HoneyDataset: &honey.Dataset{
|
||||
|
||||
@ -3,7 +3,6 @@ package batches
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -35,7 +34,7 @@ func (j *bulkOperationProcessorJob) Config() []env.Config {
|
||||
|
||||
func (j *bulkOperationProcessorJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "bulk operation processor job routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package batches
|
||||
import (
|
||||
"database/sql"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -15,6 +14,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
// InitStore initializes and returns a *store.Store instance.
|
||||
@ -29,7 +29,7 @@ func InitStore() (*store.Store, error) {
|
||||
|
||||
var initStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.batches", "batches store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -54,7 +54,7 @@ func InitReconcilerWorkerStore() (dbworkerstore.Store, error) {
|
||||
|
||||
var initReconcilerWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.reconciler", "reconciler worker store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -79,7 +79,7 @@ func InitBulkOperationWorkerStore() (dbworkerstore.Store, error) {
|
||||
|
||||
var initBulkOperationWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.bulk_ops", "bulk operation worker store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -104,7 +104,7 @@ func InitBatchSpecWorkspaceExecutionWorkerStore() (store.BatchSpecWorkspaceExecu
|
||||
|
||||
var initBatchSpecWorkspaceExecutionWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.execution", "the batch spec workspace execution worker store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -129,7 +129,7 @@ func InitBatchSpecResolutionWorkerStore() (dbworkerstore.Store, error) {
|
||||
|
||||
var initBatchSpecResolutionWorkerStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.batch_spec_resolution", "the batch spec resolution worker store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package batches
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -34,7 +33,7 @@ func (j *janitorJob) Config() []env.Config {
|
||||
|
||||
func (j *janitorJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "janitor job routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -4,7 +4,6 @@ import (
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -14,6 +13,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/oobmigration"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -34,7 +34,7 @@ const (
|
||||
|
||||
func RegisterMigrations(db database.DB, outOfBandMigrationRunner *oobmigration.Runner) error {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("migrations", ""),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package batches
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -36,7 +35,7 @@ func (j *reconcilerJob) Config() []env.Config {
|
||||
|
||||
func (j *reconcilerJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "reconciler job routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package batches
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -33,7 +32,7 @@ func (j *workspaceResolverJob) Config() []env.Config {
|
||||
|
||||
func (j *workspaceResolverJob) Routines(_ context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "workspace resolver job routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package codeintel
|
||||
|
||||
import (
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -11,6 +10,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/database"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
// InitGitserverClient initializes and returns a gitserver client.
|
||||
@ -25,7 +25,7 @@ func InitGitserverClient() (*gitserver.Client, error) {
|
||||
|
||||
var initGitserverClient = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("client.gitserver", "gitserver client"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -45,7 +45,7 @@ func InitRepoUpdaterClient() *repoupdater.Client {
|
||||
|
||||
var initRepoUpdaterClient = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("client.repo-updater", "repo-updater client"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -102,7 +102,7 @@ func (u *Updater) tryUpdate(ctx context.Context, repositoryID, dirtyToken int) (
|
||||
// the repository can be unmarked as long as the repository is not marked as dirty again before
|
||||
// the update completes.
|
||||
func (u *Updater) update(ctx context.Context, repositoryID, dirtyToken int) (err error) {
|
||||
ctx, trace, endObservation := u.operations.commitUpdate.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := u.operations.commitUpdate.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.Int("dirtyToken", dirtyToken),
|
||||
|
||||
@ -35,7 +35,7 @@ func (j *commitGraphJob) Config() []env.Config {
|
||||
|
||||
func (j *commitGraphJob) Routines(ctx context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "commit graph job routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package codeintel
|
||||
|
||||
import (
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -11,6 +10,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
// InitDBStore initializes and returns a db store instance.
|
||||
@ -25,7 +25,7 @@ func InitDBStore() (*dbstore.Store, error) {
|
||||
|
||||
var initDBStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store", "codeintel db store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -50,7 +50,7 @@ func InitDependencySyncingStore() (dbworkerstore.Store, error) {
|
||||
|
||||
var initDependencySyncStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.dependency_sync", "dependency sync store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
@ -74,7 +74,7 @@ func InitDependencyIndexingStore() (dbworkerstore.Store, error) {
|
||||
|
||||
var initDependenyIndexStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.dependency_index", "dependency index store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -169,7 +169,7 @@ func (h *dependencySyncSchedulerHandler) Handle(ctx context.Context, record work
|
||||
}
|
||||
|
||||
func (h *dependencySyncSchedulerHandler) insertDependencyRepo(ctx context.Context, pkg precise.Package) (new bool, err error) {
|
||||
ctx, endObservation := dependencyReposOps.InsertCloneableDependencyRepo.With(ctx, &err, observation.Args{
|
||||
ctx, _, endObservation := dependencyReposOps.InsertCloneableDependencyRepo.With(ctx, &err, observation.Args{
|
||||
MetricLabelValues: []string{pkg.Scheme},
|
||||
})
|
||||
defer func() {
|
||||
|
||||
@ -3,7 +3,6 @@ package codeintel
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -38,7 +37,7 @@ func (j *indexingJob) Config() []env.Config {
|
||||
|
||||
func (j *indexingJob) Routines(ctx context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "indexing job routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package codeintel
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -35,7 +34,7 @@ func (j *janitorJob) Config() []env.Config {
|
||||
|
||||
func (j *janitorJob) Routines(ctx context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: logger.Scoped("routines", "janitor job routines"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -1,7 +1,6 @@
|
||||
package codeintel
|
||||
|
||||
import (
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -10,6 +9,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
// InitLSIFStore initializes and returns an LSIF store instance.
|
||||
@ -24,7 +24,7 @@ func InitLSIFStore() (*lsifstore.Store, error) {
|
||||
|
||||
var initLSFIStore = memo.NewMemoizedConstructor(func() (interface{}, error) {
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("store.lsif", "lsif store"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -3,7 +3,6 @@ package batches
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
@ -16,6 +15,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/httpcli"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/lib/log"
|
||||
)
|
||||
|
||||
// InitBackgroundJobs starts all jobs required to run batches. Currently, it is called from
|
||||
@ -37,7 +37,7 @@ func InitBackgroundJobs(
|
||||
ctx = actor.WithInternalActor(ctx)
|
||||
|
||||
observationContext := &observation.Context{
|
||||
Logger: log15.Root(),
|
||||
Logger: log.Scoped("background", "batches background jobs"),
|
||||
Tracer: &trace.Tracer{Tracer: opentracing.GlobalTracer()},
|
||||
Registerer: prometheus.DefaultRegisterer,
|
||||
}
|
||||
|
||||
@ -234,7 +234,7 @@ type CreateBatchSpecOpts struct {
|
||||
|
||||
// CreateBatchSpec creates the BatchSpec.
|
||||
func (s *Service) CreateBatchSpec(ctx context.Context, opts CreateBatchSpecOpts) (spec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("changesetSpecs", len(opts.ChangesetSpecRandIDs)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -322,7 +322,7 @@ type CreateBatchSpecFromRawOpts struct {
|
||||
|
||||
// CreateBatchSpecFromRaw creates the BatchSpec.
|
||||
func (s *Service) CreateBatchSpecFromRaw(ctx context.Context, opts CreateBatchSpecFromRawOpts) (spec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpecFromRaw.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.createBatchSpecFromRaw.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Bool("allowIgnored", opts.AllowIgnored),
|
||||
log.Bool("allowUnsupported", opts.AllowUnsupported),
|
||||
}})
|
||||
@ -409,7 +409,7 @@ type ExecuteBatchSpecOpts struct {
|
||||
// It returns an error if the batchSpecWorkspaceResolutionJob didn't finish
|
||||
// successfully.
|
||||
func (s *Service) ExecuteBatchSpec(ctx context.Context, opts ExecuteBatchSpecOpts) (batchSpec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.executeBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.executeBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("BatchSpecRandID", opts.BatchSpecRandID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -471,7 +471,7 @@ type CancelBatchSpecOpts struct {
|
||||
// CancelBatchSpec cancels all BatchSpecWorkspaceExecutionJobs associated with
|
||||
// the BatchSpec.
|
||||
func (s *Service) CancelBatchSpec(ctx context.Context, opts CancelBatchSpecOpts) (batchSpec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.cancelBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.cancelBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("BatchSpecRandID", opts.BatchSpecRandID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -521,7 +521,7 @@ type ReplaceBatchSpecInputOpts struct {
|
||||
// It returns an error if the batchSpecWorkspaceResolutionJob didn't finish
|
||||
// successfully.
|
||||
func (s *Service) ReplaceBatchSpecInput(ctx context.Context, opts ReplaceBatchSpecInputOpts) (batchSpec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.replaceBatchSpecInput.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.replaceBatchSpecInput.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
// Before we hit the database, validate the new spec.
|
||||
@ -564,7 +564,7 @@ func (s *Service) ReplaceBatchSpecInput(ctx context.Context, opts ReplaceBatchSp
|
||||
type UpsertBatchSpecInputOpts = CreateBatchSpecFromRawOpts
|
||||
|
||||
func (s *Service) UpsertBatchSpecInput(ctx context.Context, opts UpsertBatchSpecInputOpts) (spec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.upsertBatchSpecInput.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.upsertBatchSpecInput.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Bool("allowIgnored", opts.AllowIgnored),
|
||||
log.Bool("allowUnsupported", opts.AllowUnsupported),
|
||||
}})
|
||||
@ -646,7 +646,7 @@ func replaceBatchSpec(ctx context.Context, tx *store.Store, oldSpec, newSpec *bt
|
||||
|
||||
// CreateChangesetSpec validates the given raw spec input and creates the ChangesetSpec.
|
||||
func (s *Service) CreateChangesetSpec(ctx context.Context, rawSpec string, userID int32) (spec *btypes.ChangesetSpec, err error) {
|
||||
ctx, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
spec, err = btypes.NewChangesetSpecFromRaw(rawSpec)
|
||||
@ -689,7 +689,7 @@ func (e *changesetSpecNotFoundErr) NotFound() bool { return true }
|
||||
// If it doesn't exist yet, both return values are nil.
|
||||
// It accepts a *store.Store so that it can be used inside a transaction.
|
||||
func (s *Service) GetBatchChangeMatchingBatchSpec(ctx context.Context, spec *btypes.BatchSpec) (_ *btypes.BatchChange, err error) {
|
||||
ctx, endObservation := s.operations.getBatchChangeMatchingBatchSpec.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.getBatchChangeMatchingBatchSpec.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
// TODO: Should name be case-insensitive? i.e. are "foo" and "Foo" the same?
|
||||
@ -712,7 +712,7 @@ func (s *Service) GetBatchChangeMatchingBatchSpec(ctx context.Context, spec *bty
|
||||
// GetNewestBatchSpec returns the newest batch spec that matches the given
|
||||
// spec's namespace and name and is owned by the given user, or nil if none is found.
|
||||
func (s *Service) GetNewestBatchSpec(ctx context.Context, tx *store.Store, spec *btypes.BatchSpec, userID int32) (_ *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
opts := store.GetNewestBatchSpecOpts{
|
||||
@ -755,7 +755,7 @@ func (o MoveBatchChangeOpts) String() string {
|
||||
// MoveBatchChange moves the batch change from one namespace to another and/or renames
|
||||
// the batch change.
|
||||
func (s *Service) MoveBatchChange(ctx context.Context, opts MoveBatchChangeOpts) (batchChange *btypes.BatchChange, err error) {
|
||||
ctx, endObservation := s.operations.moveBatchChange.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.moveBatchChange.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
tx, err := s.store.Transact(ctx)
|
||||
@ -798,7 +798,7 @@ func (s *Service) MoveBatchChange(ctx context.Context, opts MoveBatchChangeOpts)
|
||||
|
||||
// CloseBatchChange closes the BatchChange with the given ID if it has not been closed yet.
|
||||
func (s *Service) CloseBatchChange(ctx context.Context, id int64, closeChangesets bool) (batchChange *btypes.BatchChange, err error) {
|
||||
ctx, endObservation := s.operations.closeBatchChange.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.closeBatchChange.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
batchChange, err = s.store.GetBatchChange(ctx, store.GetBatchChangeOpts{ID: id})
|
||||
@ -844,7 +844,7 @@ func (s *Service) CloseBatchChange(ctx context.Context, id int64, closeChangeset
|
||||
// DeleteBatchChange deletes the BatchChange with the given ID if it hasn't been
|
||||
// deleted yet.
|
||||
func (s *Service) DeleteBatchChange(ctx context.Context, id int64) (err error) {
|
||||
ctx, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
batchChange, err := s.store.GetBatchChange(ctx, store.GetBatchChangeOpts{ID: id})
|
||||
@ -863,7 +863,7 @@ func (s *Service) DeleteBatchChange(ctx context.Context, id int64) (err error) {
|
||||
// whether the actor in the context has permission to enqueue a sync and then
|
||||
// enqueues a sync by calling the repoupdater client.
|
||||
func (s *Service) EnqueueChangesetSync(ctx context.Context, id int64) (err error) {
|
||||
ctx, endObservation := s.operations.enqueueChangesetSync.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.enqueueChangesetSync.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
// Check for existence of changeset so we don't swallow that error.
|
||||
@ -914,7 +914,7 @@ func (s *Service) EnqueueChangesetSync(ctx context.Context, id int64) (err error
|
||||
// whether the actor in the context has permission to enqueue a reconciler run and then
|
||||
// enqueues it by calling ResetReconcilerState.
|
||||
func (s *Service) ReenqueueChangeset(ctx context.Context, id int64) (changeset *btypes.Changeset, repo *types.Repo, err error) {
|
||||
ctx, endObservation := s.operations.reenqueueChangeset.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.reenqueueChangeset.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
changeset, err = s.store.GetChangeset(ctx, store.GetChangesetOpts{ID: id})
|
||||
@ -999,7 +999,7 @@ var ErrNoNamespace = errors.New("no namespace given")
|
||||
// Since Bitbucket sends the username as a header in REST responses, we can
|
||||
// take it from there and complete the UserCredential.
|
||||
func (s *Service) FetchUsernameForBitbucketServerToken(ctx context.Context, externalServiceID, externalServiceType, token string) (_ string, err error) {
|
||||
ctx, endObservation := s.operations.fetchUsernameForBitbucketServerToken.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.fetchUsernameForBitbucketServerToken.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
css, err := s.sourcer.ForExternalService(ctx, s.store, store.GetExternalServiceIDsOpts{
|
||||
@ -1037,7 +1037,7 @@ var _ usernameSource = &sources.BitbucketServerSource{}
|
||||
// ValidateAuthenticator creates a ChangesetSource, configures it with the given
|
||||
// authenticator and validates it can correctly access the remote server.
|
||||
func (s *Service) ValidateAuthenticator(ctx context.Context, externalServiceID, externalServiceType string, a auth.Authenticator) (err error) {
|
||||
ctx, endObservation := s.operations.validateAuthenticator.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.validateAuthenticator.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
if Mocks.ValidateAuthenticator != nil {
|
||||
@ -1072,7 +1072,7 @@ var ErrChangesetsForJobNotFound = errors.New("some changesets could not be found
|
||||
// given BatchChange, checking whether the actor in the context has permission to
|
||||
// trigger a job, and enqueues it.
|
||||
func (s *Service) CreateChangesetJobs(ctx context.Context, batchChangeID int64, ids []int64, jobType btypes.ChangesetJobType, payload interface{}, listOpts store.ListChangesetsOpts) (bulkGroupID string, err error) {
|
||||
ctx, endObservation := s.operations.createChangesetJobs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createChangesetJobs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
// Load the BatchChange to check for write permissions.
|
||||
@ -1142,7 +1142,7 @@ func (s *Service) ValidateChangesetSpecs(ctx context.Context, batchSpecID int64)
|
||||
// as such and the validation errors that we want to return without logging
|
||||
// them as errors.
|
||||
var nonValidationErr error
|
||||
ctx, endObservation := s.operations.validateChangesetSpecs.With(ctx, &nonValidationErr, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.validateChangesetSpecs.With(ctx, &nonValidationErr, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
conflicts, nonValidationErr := s.store.ListChangesetSpecsWithConflictingHeadRef(ctx, batchSpecID)
|
||||
@ -1242,7 +1242,7 @@ func computeBatchSpecState(ctx context.Context, s *store.Store, spec *btypes.Bat
|
||||
// It only deletes changeset_specs created by workspaces. The imported changeset_specs
|
||||
// will not be altered.
|
||||
func (s *Service) RetryBatchSpecWorkspaces(ctx context.Context, workspaceIDs []int64) (err error) {
|
||||
ctx, endObservation := s.operations.retryBatchSpecWorkspaces.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.retryBatchSpecWorkspaces.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
if len(workspaceIDs) == 0 {
|
||||
@ -1352,7 +1352,7 @@ type RetryBatchSpecExecutionOpts struct {
|
||||
// It only deletes changeset_specs created by workspaces. The imported changeset_specs
|
||||
// will not be altered.
|
||||
func (s *Service) RetryBatchSpecExecution(ctx context.Context, opts RetryBatchSpecExecutionOpts) (err error) {
|
||||
ctx, endObservation := s.operations.retryBatchSpecExecution.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.retryBatchSpecExecution.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
tx, err := s.store.Transact(ctx)
|
||||
|
||||
@ -51,7 +51,7 @@ func (s *Service) ApplyBatchChange(
|
||||
ctx context.Context,
|
||||
opts ApplyBatchChangeOpts,
|
||||
) (batchChange *btypes.BatchChange, err error) {
|
||||
ctx, endObservation := s.operations.applyBatchChange.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.applyBatchChange.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
batchSpec, err := s.store.GetBatchSpec(ctx, store.GetBatchSpecOpts{
|
||||
@ -172,7 +172,7 @@ func (s *Service) ReconcileBatchChange(
|
||||
ctx context.Context,
|
||||
batchSpec *btypes.BatchSpec,
|
||||
) (batchChange *btypes.BatchChange, previousSpecID int64, err error) {
|
||||
ctx, endObservation := s.operations.reconcileBatchChange.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.reconcileBatchChange.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
batchChange, err = s.GetBatchChangeMatchingBatchSpec(ctx, batchSpec)
|
||||
|
||||
@ -52,7 +52,7 @@ var batchChangeInsertColumns = []*sqlf.Query{
|
||||
|
||||
// CreateBatchChange creates the given batch change.
|
||||
func (s *Store) CreateBatchChange(ctx context.Context, c *btypes.BatchChange) (err error) {
|
||||
ctx, endObservation := s.operations.createBatchChange.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createBatchChange.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := s.createBatchChangeQuery(c)
|
||||
@ -98,7 +98,7 @@ func (s *Store) createBatchChangeQuery(c *btypes.BatchChange) *sqlf.Query {
|
||||
|
||||
// UpdateBatchChange updates the given bach change.
|
||||
func (s *Store) UpdateBatchChange(ctx context.Context, c *btypes.BatchChange) (err error) {
|
||||
ctx, endObservation := s.operations.updateBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(c.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -140,7 +140,7 @@ func (s *Store) updateBatchChangeQuery(c *btypes.BatchChange) *sqlf.Query {
|
||||
|
||||
// DeleteBatchChange deletes the batch change with the given ID.
|
||||
func (s *Store) DeleteBatchChange(ctx context.Context, id int64) (err error) {
|
||||
ctx, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(id)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -170,7 +170,7 @@ type CountBatchChangesOpts struct {
|
||||
|
||||
// CountBatchChanges returns the number of batch changes in the database.
|
||||
func (s *Store) CountBatchChanges(ctx context.Context, opts CountBatchChangesOpts) (count int, err error) {
|
||||
ctx, endObservation := s.operations.countBatchChanges.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.countBatchChanges.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
repoAuthzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB()))
|
||||
@ -281,7 +281,7 @@ type GetBatchChangeOpts struct {
|
||||
|
||||
// GetBatchChange gets a batch change matching the given options.
|
||||
func (s *Store) GetBatchChange(ctx context.Context, opts GetBatchChangeOpts) (bc *btypes.BatchChange, err error) {
|
||||
ctx, endObservation := s.operations.getBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -353,7 +353,7 @@ type GetBatchChangeDiffStatOpts struct {
|
||||
}
|
||||
|
||||
func (s *Store) GetBatchChangeDiffStat(ctx context.Context, opts GetBatchChangeDiffStatOpts) (stat *diff.Stat, err error) {
|
||||
ctx, endObservation := s.operations.getBatchChangeDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getBatchChangeDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchChangeID", int(opts.BatchChangeID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -396,7 +396,7 @@ func getBatchChangeDiffStatQuery(opts GetBatchChangeDiffStatOpts, authzConds *sq
|
||||
}
|
||||
|
||||
func (s *Store) GetRepoDiffStat(ctx context.Context, repoID api.RepoID) (stat *diff.Stat, err error) {
|
||||
ctx, endObservation := s.operations.getRepoDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getRepoDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repoID", int(repoID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -457,7 +457,7 @@ type ListBatchChangesOpts struct {
|
||||
|
||||
// ListBatchChanges lists batch changes with the given filters.
|
||||
func (s *Store) ListBatchChanges(ctx context.Context, opts ListBatchChangesOpts) (cs []*btypes.BatchChange, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listBatchChanges.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listBatchChanges.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
repoAuthzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB()))
|
||||
|
||||
@ -39,7 +39,7 @@ var BatchSpecExecutionCacheEntryColums = SQLColumns{
|
||||
|
||||
// CreateBatchSpecExecutionCacheEntry creates the given batch spec workspace jobs.
|
||||
func (s *Store) CreateBatchSpecExecutionCacheEntry(ctx context.Context, ce *btypes.BatchSpecExecutionCacheEntry) (err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpecExecutionCacheEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.createBatchSpecExecutionCacheEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("Key", ce.Key),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -100,7 +100,7 @@ type ListBatchSpecExecutionCacheEntriesOpts struct {
|
||||
|
||||
// ListBatchSpecExecutionCacheEntries gets the BatchSpecExecutionCacheEntries matching the given options.
|
||||
func (s *Store) ListBatchSpecExecutionCacheEntries(ctx context.Context, opts ListBatchSpecExecutionCacheEntriesOpts) (cs []*btypes.BatchSpecExecutionCacheEntry, err error) {
|
||||
ctx, endObservation := s.operations.listBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.listBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("Count", len(opts.Keys)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -160,7 +160,7 @@ WHERE
|
||||
|
||||
// MarkUsedBatchSpecExecutionCacheEntries updates the LastUsedAt of the given cache entries.
|
||||
func (s *Store) MarkUsedBatchSpecExecutionCacheEntries(ctx context.Context, ids []int64) (err error) {
|
||||
ctx, endObservation := s.operations.markUsedBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.markUsedBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("count", len(ids)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -212,7 +212,7 @@ DELETE FROM batch_spec_execution_cache_entries WHERE id IN (SELECT id FROM ids)
|
||||
`
|
||||
|
||||
func (s *Store) CleanBatchSpecExecutionCacheEntries(ctx context.Context, maxCacheSize int64) (err error) {
|
||||
ctx, endObservation := s.operations.cleanBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.cleanBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("MaxTableSize", int(maxCacheSize)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -65,7 +65,7 @@ func (e ErrResolutionJobAlreadyExists) Error() string {
|
||||
|
||||
// CreateBatchSpecResolutionJob creates the given batch spec resolutionjob jobs.
|
||||
func (s *Store) CreateBatchSpecResolutionJob(ctx context.Context, wj *btypes.BatchSpecResolutionJob) (err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
ctx, _, endObservation := s.operations.createBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := s.createBatchSpecResolutionJobQuery(wj)
|
||||
@ -120,7 +120,7 @@ type GetBatchSpecResolutionJobOpts struct {
|
||||
|
||||
// GetBatchSpecResolutionJob gets a BatchSpecResolutionJob matching the given options.
|
||||
func (s *Store) GetBatchSpecResolutionJob(ctx context.Context, opts GetBatchSpecResolutionJobOpts) (job *btypes.BatchSpecResolutionJob, err error) {
|
||||
ctx, endObservation := s.operations.getBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
log.Int("BatchSpecID", int(opts.BatchSpecID)),
|
||||
}})
|
||||
@ -176,7 +176,7 @@ type ListBatchSpecResolutionJobsOpts struct {
|
||||
|
||||
// ListBatchSpecResolutionJobs lists batch changes with the given filters.
|
||||
func (s *Store) ListBatchSpecResolutionJobs(ctx context.Context, opts ListBatchSpecResolutionJobsOpts) (cs []*btypes.BatchSpecResolutionJob, err error) {
|
||||
ctx, endObservation := s.operations.listBatchSpecResolutionJobs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listBatchSpecResolutionJobs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listBatchSpecResolutionJobsQuery(opts)
|
||||
|
||||
@ -100,7 +100,7 @@ const executableWorkspaceJobsConditionFmtstr = `
|
||||
|
||||
// CreateBatchSpecWorkspaceExecutionJobs creates the given batch spec workspace jobs.
|
||||
func (s *Store) CreateBatchSpecWorkspaceExecutionJobs(ctx context.Context, batchSpecID int64) (err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchSpecID", int(batchSpecID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -124,7 +124,7 @@ WHERE
|
||||
|
||||
// CreateBatchSpecWorkspaceExecutionJobsForWorkspaces creates the batch spec workspace jobs for the given workspaces.
|
||||
func (s *Store) CreateBatchSpecWorkspaceExecutionJobsForWorkspaces(ctx context.Context, workspaceIDs []int64) (err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobsForWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobsForWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := sqlf.Sprintf(createBatchSpecWorkspaceExecutionJobsForWorkspacesQueryFmtstr, pq.Array(workspaceIDs))
|
||||
@ -142,7 +142,7 @@ RETURNING id
|
||||
|
||||
// DeleteBatchSpecWorkspaceExecutionJobs
|
||||
func (s *Store) DeleteBatchSpecWorkspaceExecutionJobs(ctx context.Context, ids []int64) (err error) {
|
||||
ctx, endObservation := s.operations.deleteBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
ctx, _, endObservation := s.operations.deleteBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := sqlf.Sprintf(deleteBatchSpecWorkspaceExecutionJobsQueryFmtstr, pq.Array(ids))
|
||||
@ -164,7 +164,7 @@ type GetBatchSpecWorkspaceExecutionJobOpts struct {
|
||||
|
||||
// GetBatchSpecWorkspaceExecutionJob gets a BatchSpecWorkspaceExecutionJob matching the given options.
|
||||
func (s *Store) GetBatchSpecWorkspaceExecutionJob(ctx context.Context, opts GetBatchSpecWorkspaceExecutionJobOpts) (job *btypes.BatchSpecWorkspaceExecutionJob, err error) {
|
||||
ctx, endObservation := s.operations.getBatchSpecWorkspaceExecutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getBatchSpecWorkspaceExecutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -232,7 +232,7 @@ type ListBatchSpecWorkspaceExecutionJobsOpts struct {
|
||||
|
||||
// ListBatchSpecWorkspaceExecutionJobs lists batch changes with the given filters.
|
||||
func (s *Store) ListBatchSpecWorkspaceExecutionJobs(ctx context.Context, opts ListBatchSpecWorkspaceExecutionJobsOpts) (cs []*btypes.BatchSpecWorkspaceExecutionJob, err error) {
|
||||
ctx, endObservation := s.operations.listBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listBatchSpecWorkspaceExecutionJobsQuery(opts)
|
||||
@ -321,7 +321,7 @@ type CancelBatchSpecWorkspaceExecutionJobsOpts struct {
|
||||
// The returned list of records may not match the list of the given IDs, if
|
||||
// some of the records were already canceled, completed, failed, errored, etc.
|
||||
func (s *Store) CancelBatchSpecWorkspaceExecutionJobs(ctx context.Context, opts CancelBatchSpecWorkspaceExecutionJobsOpts) (jobs []*btypes.BatchSpecWorkspaceExecutionJob, err error) {
|
||||
ctx, endObservation := s.operations.cancelBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
ctx, _, endObservation := s.operations.cancelBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
if opts.BatchSpecID == 0 && len(opts.IDs) == 0 {
|
||||
@ -412,7 +412,7 @@ func (s *Store) cancelBatchSpecWorkspaceExecutionJobQuery(opts CancelBatchSpecWo
|
||||
|
||||
// SetBatchSpecWorkspaceExecutionJobAccessToken sets the access_token_id column to the given ID.
|
||||
func (s *Store) SetBatchSpecWorkspaceExecutionJobAccessToken(ctx context.Context, jobID, tokenID int64) (err error) {
|
||||
ctx, endObservation := s.operations.setBatchSpecWorkspaceExecutionJobAccessToken.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.setBatchSpecWorkspaceExecutionJobAccessToken.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(jobID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -68,7 +68,7 @@ var BatchSpecWorkspaceColums = SQLColumns{
|
||||
|
||||
// CreateBatchSpecWorkspace creates the given batch spec workspace jobs.
|
||||
func (s *Store) CreateBatchSpecWorkspace(ctx context.Context, ws ...*btypes.BatchSpecWorkspace) (err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.createBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("count", len(ws)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -150,7 +150,7 @@ type GetBatchSpecWorkspaceOpts struct {
|
||||
|
||||
// GetBatchSpecWorkspace gets a BatchSpecWorkspace matching the given options.
|
||||
func (s *Store) GetBatchSpecWorkspace(ctx context.Context, opts GetBatchSpecWorkspaceOpts) (job *btypes.BatchSpecWorkspace, err error) {
|
||||
ctx, endObservation := s.operations.getBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -273,7 +273,7 @@ func (opts ListBatchSpecWorkspacesOpts) SQLConds(ctx context.Context, db databas
|
||||
|
||||
// ListBatchSpecWorkspaces lists batch spec workspaces with the given filters.
|
||||
func (s *Store) ListBatchSpecWorkspaces(ctx context.Context, opts ListBatchSpecWorkspacesOpts) (cs []*btypes.BatchSpecWorkspace, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listBatchSpecWorkspaces.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listBatchSpecWorkspaces.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q, err := listBatchSpecWorkspacesQuery(ctx, s.DatabaseDB(), opts)
|
||||
@ -323,7 +323,7 @@ func listBatchSpecWorkspacesQuery(ctx context.Context, db database.DB, opts List
|
||||
|
||||
// CountBatchSpecWorkspaces counts batch spec workspaces with the given filters.
|
||||
func (s *Store) CountBatchSpecWorkspaces(ctx context.Context, opts ListBatchSpecWorkspacesOpts) (count int64, err error) {
|
||||
ctx, endObservation := s.operations.countBatchSpecWorkspaces.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.countBatchSpecWorkspaces.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q, err := countBatchSpecWorkspacesQuery(ctx, s.DatabaseDB(), opts)
|
||||
@ -375,7 +375,7 @@ AND NOT %s
|
||||
// MarkSkippedBatchSpecWorkspaces marks the workspace that were skipped in
|
||||
// CreateBatchSpecWorkspaceExecutionJobs as skipped.
|
||||
func (s *Store) MarkSkippedBatchSpecWorkspaces(ctx context.Context, batchSpecID int64) (err error) {
|
||||
ctx, endObservation := s.operations.markSkippedBatchSpecWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.markSkippedBatchSpecWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchSpecID", int(batchSpecID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -54,7 +54,7 @@ const batchSpecInsertColsFmt = `(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
|
||||
|
||||
// CreateBatchSpec creates the given BatchSpec.
|
||||
func (s *Store) CreateBatchSpec(ctx context.Context, c *btypes.BatchSpec) (err error) {
|
||||
ctx, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q, err := s.createBatchSpecQuery(c)
|
||||
@ -111,7 +111,7 @@ func (s *Store) createBatchSpecQuery(c *btypes.BatchSpec) (*sqlf.Query, error) {
|
||||
|
||||
// UpdateBatchSpec updates the given BatchSpec.
|
||||
func (s *Store) UpdateBatchSpec(ctx context.Context, c *btypes.BatchSpec) (err error) {
|
||||
ctx, endObservation := s.operations.updateBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(c.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -163,7 +163,7 @@ func (s *Store) updateBatchSpecQuery(c *btypes.BatchSpec) (*sqlf.Query, error) {
|
||||
|
||||
// DeleteBatchSpec deletes the BatchSpec with the given ID.
|
||||
func (s *Store) DeleteBatchSpec(ctx context.Context, id int64) (err error) {
|
||||
ctx, endObservation := s.operations.deleteBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(id)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -186,7 +186,7 @@ type CountBatchSpecsOpts struct {
|
||||
|
||||
// CountBatchSpecs returns the number of code mods in the database.
|
||||
func (s *Store) CountBatchSpecs(ctx context.Context, opts CountBatchSpecsOpts) (count int, err error) {
|
||||
ctx, endObservation := s.operations.countBatchSpecs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.countBatchSpecs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := countBatchSpecsQuery(opts)
|
||||
@ -243,7 +243,7 @@ type GetBatchSpecOpts struct {
|
||||
|
||||
// GetBatchSpec gets a BatchSpec matching the given options.
|
||||
func (s *Store) GetBatchSpec(ctx context.Context, opts GetBatchSpecOpts) (spec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.getBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
log.String("randID", opts.RandID),
|
||||
}})
|
||||
@ -311,7 +311,7 @@ type GetNewestBatchSpecOpts struct {
|
||||
// GetNewestBatchSpec returns the newest batch spec that matches the given
|
||||
// options.
|
||||
func (s *Store) GetNewestBatchSpec(ctx context.Context, opts GetNewestBatchSpecOpts) (spec *btypes.BatchSpec, err error) {
|
||||
ctx, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.getNewestBatchSpec.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := getNewestBatchSpecQuery(&opts)
|
||||
@ -379,7 +379,7 @@ type ListBatchSpecsOpts struct {
|
||||
|
||||
// ListBatchSpecs lists BatchSpecs with the given filters.
|
||||
func (s *Store) ListBatchSpecs(ctx context.Context, opts ListBatchSpecsOpts) (cs []*btypes.BatchSpec, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listBatchSpecs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listBatchSpecs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listBatchSpecsQuery(&opts)
|
||||
@ -459,7 +459,7 @@ ON
|
||||
// - We could: Add execution_started_at to the batch_specs table and delete
|
||||
// all that are older than TIME_PERIOD and never started executing.
|
||||
func (s *Store) DeleteExpiredBatchSpecs(ctx context.Context) (err error) {
|
||||
ctx, endObservation := s.operations.deleteExpiredBatchSpecs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.deleteExpiredBatchSpecs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
expirationTime := s.now().Add(-btypes.BatchSpecTTL)
|
||||
|
||||
@ -52,7 +52,7 @@ type GetBulkOperationOpts struct {
|
||||
|
||||
// GetBulkOperation gets a BulkOperation matching the given options.
|
||||
func (s *Store) GetBulkOperation(ctx context.Context, opts GetBulkOperationOpts) (op *btypes.BulkOperation, err error) {
|
||||
ctx, endObservation := s.operations.getBulkOperation.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getBulkOperation.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("ID", opts.ID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -112,7 +112,7 @@ type ListBulkOperationsOpts struct {
|
||||
|
||||
// ListBulkOperations gets a list of BulkOperations matching the given options.
|
||||
func (s *Store) ListBulkOperations(ctx context.Context, opts ListBulkOperationsOpts) (bs []*btypes.BulkOperation, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listBulkOperations.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listBulkOperations.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listBulkOperationsQuery(&opts)
|
||||
@ -181,7 +181,7 @@ type CountBulkOperationsOpts struct {
|
||||
|
||||
// CountBulkOperations gets the count of BulkOperations in the given batch change.
|
||||
func (s *Store) CountBulkOperations(ctx context.Context, opts CountBulkOperationsOpts) (count int, err error) {
|
||||
ctx, endObservation := s.operations.countBulkOperations.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.countBulkOperations.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchChangeID", int(opts.BatchChangeID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -224,7 +224,7 @@ type ListBulkOperationErrorsOpts struct {
|
||||
|
||||
// ListBulkOperationErrors gets a list of BulkOperationErrors in a given BulkOperation.
|
||||
func (s *Store) ListBulkOperationErrors(ctx context.Context, opts ListBulkOperationErrorsOpts) (es []*btypes.BulkOperationError, err error) {
|
||||
ctx, endObservation := s.operations.listBulkOperationErrors.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.listBulkOperationErrors.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("bulkOperationID", opts.BulkOperationID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -25,7 +25,7 @@ type GetChangesetEventOpts struct {
|
||||
|
||||
// GetChangesetEvent gets a changeset matching the given options.
|
||||
func (s *Store) GetChangesetEvent(ctx context.Context, opts GetChangesetEventOpts) (ev *btypes.ChangesetEvent, err error) {
|
||||
ctx, endObservation := s.operations.getChangesetEvent.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getChangesetEvent.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
log.Int("changesetID", int(opts.ChangesetID)),
|
||||
}})
|
||||
@ -95,7 +95,7 @@ type ListChangesetEventsOpts struct {
|
||||
|
||||
// ListChangesetEvents lists ChangesetEvents with the given filters.
|
||||
func (s *Store) ListChangesetEvents(ctx context.Context, opts ListChangesetEventsOpts) (cs []*btypes.ChangesetEvent, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listChangesetEvents.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listChangesetEvents.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listChangesetEventsQuery(&opts)
|
||||
@ -161,7 +161,7 @@ type CountChangesetEventsOpts struct {
|
||||
|
||||
// CountChangesetEvents returns the number of changeset events in the database.
|
||||
func (s *Store) CountChangesetEvents(ctx context.Context, opts CountChangesetEventsOpts) (count int, err error) {
|
||||
ctx, endObservation := s.operations.countChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.countChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("changesetID", int(opts.ChangesetID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -191,7 +191,7 @@ func countChangesetEventsQuery(opts *CountChangesetEventsOpts) *sqlf.Query {
|
||||
|
||||
// UpsertChangesetEvents creates or updates the given ChangesetEvents.
|
||||
func (s *Store) UpsertChangesetEvents(ctx context.Context, cs ...*btypes.ChangesetEvent) (err error) {
|
||||
ctx, endObservation := s.operations.upsertChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.upsertChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("count", len(cs)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -58,7 +58,7 @@ var changesetJobColumns = SQLColumns{
|
||||
|
||||
// CreateChangesetJob creates the given changeset jobs.
|
||||
func (s *Store) CreateChangesetJob(ctx context.Context, cs ...*btypes.ChangesetJob) (err error) {
|
||||
ctx, endObservation := s.operations.createChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.createChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("count", len(cs)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -126,7 +126,7 @@ type GetChangesetJobOpts struct {
|
||||
|
||||
// GetChangesetJob gets a ChangesetJob matching the given options.
|
||||
func (s *Store) GetChangesetJob(ctx context.Context, opts GetChangesetJobOpts) (job *btypes.ChangesetJob, err error) {
|
||||
ctx, endObservation := s.operations.getChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -62,7 +62,7 @@ var changesetSpecColumns = SQLColumns{
|
||||
|
||||
// CreateChangesetSpec creates the given ChangesetSpecs.
|
||||
func (s *Store) CreateChangesetSpec(ctx context.Context, cs ...*btypes.ChangesetSpec) (err error) {
|
||||
ctx, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("Count", len(cs)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -143,7 +143,7 @@ func (s *Store) CreateChangesetSpec(ctx context.Context, cs ...*btypes.Changeset
|
||||
|
||||
// UpdateChangesetSpecBatchSpecID updates the given ChangesetSpecs to be owned by the given batch spec.
|
||||
func (s *Store) UpdateChangesetSpecBatchSpecID(ctx context.Context, cs []int64, batchSpec int64) (err error) {
|
||||
ctx, endObservation := s.operations.updateChangesetSpecBatchSpecID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateChangesetSpecBatchSpecID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("Count", len(cs)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -170,7 +170,7 @@ func (s *Store) updateChangesetSpecQuery(cs []int64, batchSpec int64) *sqlf.Quer
|
||||
|
||||
// DeleteChangesetSpec deletes the ChangesetSpec with the given ID.
|
||||
func (s *Store) DeleteChangesetSpec(ctx context.Context, id int64) (err error) {
|
||||
ctx, endObservation := s.operations.deleteChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(id)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -192,7 +192,7 @@ type CountChangesetSpecsOpts struct {
|
||||
|
||||
// CountChangesetSpecs returns the number of changeset specs in the database.
|
||||
func (s *Store) CountChangesetSpecs(ctx context.Context, opts CountChangesetSpecsOpts) (count int, err error) {
|
||||
ctx, endObservation := s.operations.countChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.countChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchSpecID", int(opts.BatchSpecID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -243,7 +243,7 @@ type GetChangesetSpecOpts struct {
|
||||
|
||||
// GetChangesetSpec gets a changeset spec matching the given options.
|
||||
func (s *Store) GetChangesetSpec(ctx context.Context, opts GetChangesetSpecOpts) (spec *btypes.ChangesetSpec, err error) {
|
||||
ctx, endObservation := s.operations.getChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
log.String("randID", opts.RandID),
|
||||
}})
|
||||
@ -317,7 +317,7 @@ type ListChangesetSpecsOpts struct {
|
||||
|
||||
// ListChangesetSpecs lists ChangesetSpecs with the given filters.
|
||||
func (s *Store) ListChangesetSpecs(ctx context.Context, opts ListChangesetSpecsOpts) (cs btypes.ChangesetSpecs, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listChangesetSpecs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listChangesetSpecs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listChangesetSpecsQuery(&opts)
|
||||
@ -408,7 +408,7 @@ ORDER BY repo_id ASC, head_ref ASC
|
||||
`
|
||||
|
||||
func (s *Store) ListChangesetSpecsWithConflictingHeadRef(ctx context.Context, batchSpecID int64) (conflicts []ChangesetSpecHeadRefConflict, err error) {
|
||||
ctx, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := sqlf.Sprintf(listChangesetSpecsWithConflictingHeadQueryFmtstr, batchSpecID)
|
||||
@ -431,7 +431,7 @@ func (s *Store) ListChangesetSpecsWithConflictingHeadRef(ctx context.Context, ba
|
||||
// within BatchSpecTTL.
|
||||
// TODO: Fix comment.
|
||||
func (s *Store) DeleteExpiredChangesetSpecs(ctx context.Context) (err error) {
|
||||
ctx, endObservation := s.operations.deleteExpiredChangesetSpecs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.deleteExpiredChangesetSpecs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
changesetSpecTTLExpiration := s.now().Add(-btypes.ChangesetSpecTTL)
|
||||
@ -474,7 +474,7 @@ type DeleteChangesetSpecsOpts struct {
|
||||
|
||||
// DeleteChangesetSpecs deletes the ChangesetSpecs matching the given options.
|
||||
func (s *Store) DeleteChangesetSpecs(ctx context.Context, opts DeleteChangesetSpecsOpts) (err error) {
|
||||
ctx, endObservation := s.operations.deleteChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchSpecID", int(opts.BatchSpecID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -598,7 +598,7 @@ type GetRewirerMappingsOpts struct {
|
||||
// Spec 4 should be attached to Changeset 4, since it tracks PR #333 in Repo C. (ChangesetSpec = 4, Changeset = 4)
|
||||
// Changeset 3 doesn't have a matching spec and should be detached from the batch change (and closed) (ChangesetSpec == 0, Changeset = 3).
|
||||
func (s *Store) GetRewirerMappings(ctx context.Context, opts GetRewirerMappingsOpts) (mappings btypes.RewirerMappings, err error) {
|
||||
ctx, endObservation := s.operations.getRewirerMappings.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getRewirerMappings.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchSpecID", int(opts.BatchSpecID)),
|
||||
log.Int("batchChangeID", int(opts.BatchChangeID)),
|
||||
}})
|
||||
|
||||
@ -208,7 +208,7 @@ func (s *Store) UpsertChangeset(ctx context.Context, c *btypes.Changeset) error
|
||||
|
||||
// CreateChangeset creates the given Changeset.
|
||||
func (s *Store) CreateChangeset(ctx context.Context, c *btypes.Changeset) (err error) {
|
||||
ctx, endObservation := s.operations.createChangeset.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createChangeset.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
if c.CreatedAt.IsZero() {
|
||||
@ -236,7 +236,7 @@ RETURNING %s
|
||||
|
||||
// DeleteChangeset deletes the Changeset with the given ID.
|
||||
func (s *Store) DeleteChangeset(ctx context.Context, id int64) (err error) {
|
||||
ctx, endObservation := s.operations.deleteChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(id)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -267,7 +267,7 @@ type CountChangesetsOpts struct {
|
||||
|
||||
// CountChangesets returns the number of changesets in the database.
|
||||
func (s *Store) CountChangesets(ctx context.Context, opts CountChangesetsOpts) (count int, err error) {
|
||||
ctx, endObservation := s.operations.countChangesets.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.countChangesets.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
authzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB()))
|
||||
@ -368,7 +368,7 @@ type GetChangesetOpts struct {
|
||||
|
||||
// GetChangeset gets a changeset matching the given options.
|
||||
func (s *Store) GetChangeset(ctx context.Context, opts GetChangesetOpts) (ch *btypes.Changeset, err error) {
|
||||
ctx, endObservation := s.operations.getChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -441,7 +441,7 @@ type ListChangesetSyncDataOpts struct {
|
||||
// ListChangesetSyncData returns sync data on all non-externally-deleted changesets
|
||||
// that are part of at least one open batch change.
|
||||
func (s *Store) ListChangesetSyncData(ctx context.Context, opts ListChangesetSyncDataOpts) (sd []*btypes.ChangesetSyncData, err error) {
|
||||
ctx, endObservation := s.operations.listChangesetSyncData.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listChangesetSyncData.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listChangesetSyncDataQuery(opts)
|
||||
@ -528,7 +528,7 @@ type ListChangesetsOpts struct {
|
||||
|
||||
// ListChangesets lists Changesets with the given filters.
|
||||
func (s *Store) ListChangesets(ctx context.Context, opts ListChangesetsOpts) (cs btypes.Changesets, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listChangesets.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listChangesets.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
authzConds, err := database.AuthzQueryConds(ctx, database.NewDB(s.Handle().DB()))
|
||||
@ -644,7 +644,7 @@ func listChangesetsQuery(opts *ListChangesetsOpts, authzConds *sqlf.Query) *sqlf
|
||||
// `resetState` argument but *only if* the `currentState` matches its current
|
||||
// `reconciler_state`.
|
||||
func (s *Store) EnqueueChangeset(ctx context.Context, cs *btypes.Changeset, resetState, currentState btypes.ReconcilerState) (err error) {
|
||||
ctx, endObservation := s.operations.enqueueChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.enqueueChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(cs.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -698,7 +698,7 @@ func (s *Store) enqueueChangesetQuery(cs *btypes.Changeset, resetState, currentS
|
||||
|
||||
// UpdateChangeset updates the given Changeset.
|
||||
func (s *Store) UpdateChangeset(ctx context.Context, cs *btypes.Changeset) (err error) {
|
||||
ctx, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(cs.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -727,7 +727,7 @@ RETURNING
|
||||
// UpdateChangesetBatchChanges updates only the `batch_changes` & `updated_at`
|
||||
// columns of the given Changeset.
|
||||
func (s *Store) UpdateChangesetBatchChanges(ctx context.Context, cs *btypes.Changeset) (err error) {
|
||||
ctx, endObservation := s.operations.updateChangesetBatchChanges.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateChangesetBatchChanges.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(cs.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -743,7 +743,7 @@ func (s *Store) UpdateChangesetBatchChanges(ctx context.Context, cs *btypes.Chan
|
||||
// UpdateChangesetUiPublicationState updates only the `ui_publication_state` &
|
||||
// `updated_at` columns of the given Changeset.
|
||||
func (s *Store) UpdateChangesetUiPublicationState(ctx context.Context, cs *btypes.Changeset) (err error) {
|
||||
ctx, endObservation := s.operations.updateChangesetUIPublicationState.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateChangesetUIPublicationState.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(cs.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -785,7 +785,7 @@ RETURNING
|
||||
// that relate to the state of the changeset on the code host, e.g.
|
||||
// external_branch, external_state, etc.
|
||||
func (s *Store) UpdateChangesetCodeHostState(ctx context.Context, cs *btypes.Changeset) (err error) {
|
||||
ctx, endObservation := s.operations.updateChangesetCodeHostState.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateChangesetCodeHostState.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(cs.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -853,7 +853,7 @@ RETURNING
|
||||
// a slice of head refs. We need this in order to match incoming webhooks to pull requests as
|
||||
// the only information they provide is the remote branch
|
||||
func (s *Store) GetChangesetExternalIDs(ctx context.Context, spec api.ExternalRepoSpec, refs []string) (externalIDs []string, err error) {
|
||||
ctx, endObservation := s.operations.getChangesetExternalIDs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.getChangesetExternalIDs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
queryFmtString := `
|
||||
@ -891,7 +891,7 @@ var CanceledChangesetFailureMessage = "Canceled"
|
||||
// currently processing changesets have finished executing.
|
||||
func (s *Store) CancelQueuedBatchChangeChangesets(ctx context.Context, batchChangeID int64) (err error) {
|
||||
var iterations int
|
||||
ctx, endObservation := s.operations.cancelQueuedBatchChangeChangesets.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.cancelQueuedBatchChangeChangesets.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchChangeID", int(batchChangeID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{LogFields: []log.Field{log.Int("iterations", iterations)}})
|
||||
@ -968,7 +968,7 @@ WHERE
|
||||
// passed.
|
||||
func (s *Store) EnqueueChangesetsToClose(ctx context.Context, batchChangeID int64) (err error) {
|
||||
var iterations int
|
||||
ctx, endObservation := s.operations.enqueueChangesetsToClose.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.enqueueChangesetsToClose.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchChangeID", int(batchChangeID)),
|
||||
}})
|
||||
defer func() {
|
||||
@ -1198,7 +1198,7 @@ func scanChangeset(t *btypes.Changeset, s dbutil.Scanner) error {
|
||||
// GetChangesetsStats returns statistics on all the changesets associated to the given batch change,
|
||||
// or all changesets across the instance.
|
||||
func (s *Store) GetChangesetsStats(ctx context.Context, batchChangeID int64) (stats btypes.ChangesetsStats, err error) {
|
||||
ctx, endObservation := s.operations.getChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("batchChangeID", int(batchChangeID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -1252,7 +1252,7 @@ WHERE
|
||||
|
||||
// GetRepoChangesetsStats returns statistics on all the changesets associated to the given repo.
|
||||
func (s *Store) GetRepoChangesetsStats(ctx context.Context, repoID api.RepoID) (stats *btypes.RepoChangesetsStats, err error) {
|
||||
ctx, endObservation := s.operations.getRepoChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getRepoChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repoID", int(repoID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -1284,7 +1284,7 @@ func (s *Store) GetRepoChangesetsStats(ctx context.Context, repoID api.RepoID) (
|
||||
}
|
||||
|
||||
func (s *Store) EnqueueNextScheduledChangeset(ctx context.Context) (ch *btypes.Changeset, err error) {
|
||||
ctx, endObservation := s.operations.enqueueNextScheduledChangeset.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.enqueueNextScheduledChangeset.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := sqlf.Sprintf(
|
||||
@ -1326,7 +1326,7 @@ RETURNING %s
|
||||
`
|
||||
|
||||
func (s *Store) GetChangesetPlaceInSchedulerQueue(ctx context.Context, id int64) (place int, err error) {
|
||||
ctx, endObservation := s.operations.getChangesetPlaceInSchedulerQueue.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getChangesetPlaceInSchedulerQueue.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(id)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -18,7 +18,7 @@ type ListCodeHostsOpts struct {
|
||||
}
|
||||
|
||||
func (s *Store) ListCodeHosts(ctx context.Context, opts ListCodeHostsOpts) (cs []*btypes.CodeHost, err error) {
|
||||
ctx, endObservation := s.operations.listCodeHosts.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listCodeHosts.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listCodeHostsQuery(opts)
|
||||
@ -156,7 +156,7 @@ type GetExternalServiceIDsOpts struct {
|
||||
}
|
||||
|
||||
func (s *Store) GetExternalServiceIDs(ctx context.Context, opts GetExternalServiceIDsOpts) (ids []int64, err error) {
|
||||
ctx, endObservation := s.operations.getExternalServiceIDs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.getExternalServiceIDs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := getExternalServiceIDsQuery(opts)
|
||||
|
||||
@ -13,7 +13,7 @@ import (
|
||||
)
|
||||
|
||||
func (s *Store) CreateSiteCredential(ctx context.Context, c *btypes.SiteCredential, credential auth.Authenticator) (err error) {
|
||||
ctx, endObservation := s.operations.createSiteCredential.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createSiteCredential.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
if c.CreatedAt.IsZero() {
|
||||
@ -65,7 +65,7 @@ func createSiteCredentialQuery(c *btypes.SiteCredential) *sqlf.Query {
|
||||
}
|
||||
|
||||
func (s *Store) DeleteSiteCredential(ctx context.Context, id int64) (err error) {
|
||||
ctx, endObservation := s.operations.deleteSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(id)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -106,7 +106,7 @@ type GetSiteCredentialOpts struct {
|
||||
}
|
||||
|
||||
func (s *Store) GetSiteCredential(ctx context.Context, opts GetSiteCredentialOpts) (sc *btypes.SiteCredential, err error) {
|
||||
ctx, endObservation := s.operations.getSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(opts.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -168,7 +168,7 @@ type ListSiteCredentialsOpts struct {
|
||||
}
|
||||
|
||||
func (s *Store) ListSiteCredentials(ctx context.Context, opts ListSiteCredentialsOpts) (cs []*btypes.SiteCredential, next int64, err error) {
|
||||
ctx, endObservation := s.operations.listSiteCredentials.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.listSiteCredentials.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
q := listSiteCredentialsQuery(opts)
|
||||
@ -231,7 +231,7 @@ func listSiteCredentialsQuery(opts ListSiteCredentialsOpts) *sqlf.Query {
|
||||
}
|
||||
|
||||
func (s *Store) UpdateSiteCredential(ctx context.Context, c *btypes.SiteCredential) (err error) {
|
||||
ctx, endObservation := s.operations.updateSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("ID", int(c.ID)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -47,7 +47,7 @@ func NewIndexEnqueuer(
|
||||
// InferIndexConfiguration looks at the repository contents at the lastest commit on the default branch of the given
|
||||
// repository and determines an index configuration that is likely to succeed.
|
||||
func (s *IndexEnqueuer) InferIndexConfiguration(ctx context.Context, repositoryID int, commit string) (_ *config.IndexConfiguration, hints []config.IndexJobHint, err error) {
|
||||
ctx, trace, endObservation := s.operations.InferIndexConfiguration.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := s.operations.InferIndexConfiguration.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
},
|
||||
@ -102,7 +102,7 @@ func (s *IndexEnqueuer) InferIndexConfiguration(ctx context.Context, repositoryI
|
||||
// will cause this method to no-op. Note that this is NOT a guarantee that there will never be any duplicate records
|
||||
// when the flag is false.
|
||||
func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force bool) (_ []store.Index, err error) {
|
||||
ctx, trace, endObservation := s.operations.QueueIndex.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := s.operations.QueueIndex.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
},
|
||||
@ -122,7 +122,7 @@ func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev,
|
||||
// QueueIndexesForPackage enqueues index jobs for a dependency of a recently-processed precise code
|
||||
// intelligence index.
|
||||
func (s *IndexEnqueuer) QueueIndexesForPackage(ctx context.Context, pkg precise.Package) (err error) {
|
||||
ctx, trace, endObservation := s.operations.QueueIndexForPackage.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := s.operations.QueueIndexForPackage.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.String("scheme", pkg.Scheme),
|
||||
log.String("name", pkg.Name),
|
||||
|
||||
@ -35,7 +35,7 @@ func New(db database.DB, dbStore DBStore, observationContext *observation.Contex
|
||||
|
||||
// CommitExists determines if the given commit exists in the given repository.
|
||||
func (c *Client) CommitExists(ctx context.Context, repositoryID int, commit string) (_ bool, err error) {
|
||||
ctx, endObservation := c.operations.commitExists.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.commitExists.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
}})
|
||||
@ -56,7 +56,7 @@ type RepositoryCommit struct {
|
||||
// CommitsExist determines if the given commits exists in the given repositories. This method returns a
|
||||
// slice of the same size as the input slice, true indicating that the commit at the symmetric index exists.
|
||||
func (c *Client) CommitsExist(ctx context.Context, commits []RepositoryCommit) (_ []bool, err error) {
|
||||
ctx, endObservation := c.operations.commitsExist.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.commitsExist.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numCommits", len(commits)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -126,7 +126,7 @@ func (c *Client) CommitsExist(ctx context.Context, commits []RepositoryCommit) (
|
||||
// for the given repository (which occurs with empty repositories), a false-valued flag is returned along with
|
||||
// a nil error and empty revision.
|
||||
func (c *Client) Head(ctx context.Context, repositoryID int) (_ string, revisionExists bool, err error) {
|
||||
ctx, endObservation := c.operations.head.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.head.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -142,7 +142,7 @@ func (c *Client) Head(ctx context.Context, repositoryID int) (_ string, revision
|
||||
// CommitDate returns the time that the given commit was committed. If the given revision does not exist,
|
||||
// a false-valued flag is returned along with a nil error and zero-valued time.
|
||||
func (c *Client) CommitDate(ctx context.Context, repositoryID int, commit string) (_ string, _ time.Time, revisionExists bool, err error) {
|
||||
ctx, endObservation := c.operations.commitDate.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.commitDate.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
}})
|
||||
@ -176,7 +176,7 @@ func (c *Client) CommitDate(ctx context.Context, repositoryID int, commit string
|
||||
}
|
||||
|
||||
func (c *Client) RepoInfo(ctx context.Context, repos ...api.RepoName) (_ map[api.RepoName]*protocol.RepoInfo, err error) {
|
||||
ctx, endObservation := c.operations.repoInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.repoInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numRepos", len(repos)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -193,7 +193,7 @@ func (c *Client) RepoInfo(ctx context.Context, repos ...api.RepoName) (_ map[api
|
||||
// to its parents. If a commit is supplied, the returned graph will be rooted at the given
|
||||
// commit. If a non-zero limit is supplied, at most that many commits will be returned.
|
||||
func (c *Client) CommitGraph(ctx context.Context, repositoryID int, opts gitserver.CommitGraphOptions) (_ *gitdomain.CommitGraph, err error) {
|
||||
ctx, endObservation := c.operations.commitGraph.With(ctx, &err, observation.Args{
|
||||
ctx, _, endObservation := c.operations.commitGraph.With(ctx, &err, observation.Args{
|
||||
LogFields: append([]log.Field{log.Int("repositoryID", repositoryID)}, opts.LogFields()...),
|
||||
})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -228,7 +228,7 @@ func (c *Client) CommitGraph(ctx context.Context, repositoryID int, opts gitserv
|
||||
// branch and tag of the given repository. If any git objects are provided, it will
|
||||
// only populate entries for descriptions pointing at the given git objects.
|
||||
func (c *Client) RefDescriptions(ctx context.Context, repositoryID int, pointedAt ...string) (_ map[string][]gitdomain.RefDescription, err error) {
|
||||
ctx, endObservation := c.operations.refDescriptions.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.refDescriptions.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -246,7 +246,7 @@ func (c *Client) RefDescriptions(ctx context.Context, repositoryID int, pointedA
|
||||
// as: all commits on {branchName} not also on the tip of the default branch. If the supplied branch name is the
|
||||
// default branch, then this method instead returns all commits reachable from HEAD.
|
||||
func (c *Client) CommitsUniqueToBranch(ctx context.Context, repositoryID int, branchName string, isDefaultBranch bool, maxAge *time.Time) (_ map[string]time.Time, err error) {
|
||||
ctx, endObservation := c.operations.commitsUniqueToBranch.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.commitsUniqueToBranch.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("branchName", branchName),
|
||||
log.Bool("isDefaultBranch", isDefaultBranch),
|
||||
@ -306,7 +306,7 @@ func (c *Client) DefaultBranchContains(ctx context.Context, repositoryID int, co
|
||||
|
||||
// RawContents returns the contents of a file in a particular commit of a repository.
|
||||
func (c *Client) RawContents(ctx context.Context, repositoryID int, commit, file string) (_ []byte, err error) {
|
||||
ctx, endObservation := c.operations.rawContents.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.rawContents.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
log.String("file", file),
|
||||
@ -344,7 +344,7 @@ func (c *Client) RawContents(ctx context.Context, repositoryID int, commit, file
|
||||
// of git ls-tree. The keys of the resulting map are the input (unsanitized) dirnames, and the value of
|
||||
// that key are the files nested under that directory.
|
||||
func (c *Client) DirectoryChildren(ctx context.Context, repositoryID int, commit string, dirnames []string) (_ map[string][]string, err error) {
|
||||
ctx, endObservation := c.operations.directoryChildren.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.directoryChildren.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
}})
|
||||
@ -378,7 +378,7 @@ func (c *Client) DirectoryChildren(ctx context.Context, repositoryID int, commit
|
||||
|
||||
// FileExists determines whether a file exists in a particular commit of a repository.
|
||||
func (c *Client) FileExists(ctx context.Context, repositoryID int, commit, file string) (_ bool, err error) {
|
||||
ctx, endObservation := c.operations.fileExists.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.fileExists.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
log.String("file", file),
|
||||
@ -409,7 +409,7 @@ func (c *Client) FileExists(ctx context.Context, repositoryID int, commit, file
|
||||
// ListFiles returns a list of root-relative file paths matching the given pattern in a particular
|
||||
// commit of a repository.
|
||||
func (c *Client) ListFiles(ctx context.Context, repositoryID int, commit string, pattern *regexp.Regexp) (_ []string, err error) {
|
||||
ctx, endObservation := c.operations.listFiles.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.listFiles.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
log.String("pattern", pattern.String()),
|
||||
@ -444,7 +444,7 @@ func (c *Client) ListFiles(ctx context.Context, repositoryID int, commit string,
|
||||
|
||||
// ResolveRevision returns the absolute commit for a commit-ish spec.
|
||||
func (c *Client) ResolveRevision(ctx context.Context, repositoryID int, versionString string) (commitID api.CommitID, err error) {
|
||||
ctx, endObservation := c.operations.resolveRevision.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := c.operations.resolveRevision.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("versionString", versionString),
|
||||
}})
|
||||
|
||||
@ -22,7 +22,7 @@ func New(observationContext *observation.Context) *Client {
|
||||
}
|
||||
|
||||
func (c *Client) RepoLookup(ctx context.Context, name api.RepoName) (repo *protocol.RepoInfo, err error) {
|
||||
ctx, endObservation := c.operations.repoLookup.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
ctx, _, endObservation := c.operations.repoLookup.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
defer func() {
|
||||
var logFields []log.Field
|
||||
if repo != nil {
|
||||
@ -40,7 +40,7 @@ func (c *Client) RepoLookup(ctx context.Context, name api.RepoName) (repo *proto
|
||||
}
|
||||
|
||||
func (c *Client) EnqueueRepoUpdate(ctx context.Context, name api.RepoName) (resp *protocol.RepoUpdateResponse, err error) {
|
||||
ctx, endObservation := c.operations.enqueueRepoUpdate.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
ctx, _, endObservation := c.operations.enqueueRepoUpdate.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
|
||||
defer func() {
|
||||
var logFields []log.Field
|
||||
if resp != nil {
|
||||
|
||||
@ -46,7 +46,7 @@ func scanCommitGraphView(rows *sql.Rows, queryErr error) (_ *commitgraph.CommitG
|
||||
|
||||
// HasRepository determines if there is LSIF data for the given repository.
|
||||
func (s *Store) HasRepository(ctx context.Context, repositoryID int) (_ bool, err error) {
|
||||
ctx, endObservation := s.operations.hasRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.hasRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -62,7 +62,7 @@ SELECT 1 FROM lsif_uploads WHERE state NOT IN ('deleted', 'deleting') AND reposi
|
||||
|
||||
// HasCommit determines if the given commit is known for the given repository.
|
||||
func (s *Store) HasCommit(ctx context.Context, repositoryID int, commit string) (_ bool, err error) {
|
||||
ctx, endObservation := s.operations.hasCommit.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.hasCommit.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
}})
|
||||
@ -89,7 +89,7 @@ SELECT
|
||||
|
||||
// MarkRepositoryAsDirty marks the given repository's commit graph as out of date.
|
||||
func (s *Store) MarkRepositoryAsDirty(ctx context.Context, repositoryID int) (err error) {
|
||||
ctx, endObservation := s.operations.markRepositoryAsDirty.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.markRepositoryAsDirty.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -127,7 +127,7 @@ func scanIntPairs(rows *sql.Rows, queryErr error) (_ map[int]int, err error) {
|
||||
// DirtyRepositories returns a map from repository identifiers to a dirty token for each repository whose commit
|
||||
// graph is out of date. This token should be passed to CalculateVisibleUploads in order to unmark the repository.
|
||||
func (s *Store) DirtyRepositories(ctx context.Context) (_ map[int]int, err error) {
|
||||
ctx, trace, endObservation := s.operations.dirtyRepositories.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.dirtyRepositories.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
repositories, err := scanIntPairs(s.Store.Query(ctx, sqlf.Sprintf(dirtyRepositoriesQuery)))
|
||||
@ -152,7 +152,7 @@ SELECT ldr.repository_id, ldr.dirty_token
|
||||
// only repositories that would be returned by DirtyRepositories. This method returns a duration of zero if there
|
||||
// are no stale repositories.
|
||||
func (s *Store) MaxStaleAge(ctx context.Context) (_ time.Duration, err error) {
|
||||
ctx, endObservation := s.operations.maxStaleAge.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.maxStaleAge.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
ageSeconds, ok, err := basestore.ScanFirstInt(s.Store.Query(ctx, sqlf.Sprintf(maxStaleAgeQuery)))
|
||||
@ -180,7 +180,7 @@ SELECT EXTRACT(EPOCH FROM NOW() - ldr.updated_at)::integer AS age
|
||||
// CommitsVisibleToUpload returns the set of commits for which the given upload can answer code intelligence queries.
|
||||
// To paginate, supply the token returned from this method to the invocation for the next page.
|
||||
func (s *Store) CommitsVisibleToUpload(ctx context.Context, uploadID, limit int, token *string) (_ []string, nextToken *string, err error) {
|
||||
ctx, endObservation := s.operations.commitsVisibleToUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.commitsVisibleToUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("uploadID", uploadID),
|
||||
log.Int("limit", limit),
|
||||
}})
|
||||
@ -235,7 +235,7 @@ LIMIT %s
|
||||
// CommitGraphMetadata returns whether or not the commit graph for the given repository is stale, along with the date of
|
||||
// the most recent commit graph refresh for the given repository.
|
||||
func (s *Store) CommitGraphMetadata(ctx context.Context, repositoryID int) (stale bool, updatedAt *time.Time, err error) {
|
||||
ctx, endObservation := s.operations.commitGraphMetadata.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.commitGraphMetadata.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -335,7 +335,7 @@ func (s *Store) calculateVisibleUploadsInternal(
|
||||
dirtyToken int,
|
||||
now *sqlf.Query,
|
||||
) (err error) {
|
||||
ctx, trace, endObservation := s.operations.calculateVisibleUploads.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := s.operations.calculateVisibleUploads.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.Int("numCommitGraphKeys", len(commitGraph.Order())),
|
||||
@ -492,7 +492,7 @@ WHERE repository_id = %s
|
||||
// caused massive table bloat on some instances. Storing into a temporary table and then inserting/updating/deleting
|
||||
// records into the persisted table minimizes the number of tuples we need to touch and drastically reduces table bloat.
|
||||
func (s *Store) writeVisibleUploads(ctx context.Context, sanitizedInput *sanitizedCommitInput) (err error) {
|
||||
ctx, trace, endObservation := s.operations.writeVisibleUploads.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.writeVisibleUploads.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
if err := s.createTemporaryNearestUploadsTables(ctx); err != nil {
|
||||
@ -601,7 +601,7 @@ CREATE TEMPORARY TABLE t_lsif_uploads_visible_at_tip (
|
||||
// persistNearestUploads modifies the lsif_nearest_uploads table so that it has same data
|
||||
// as t_lsif_nearest_uploads for the given repository.
|
||||
func (s *Store) persistNearestUploads(ctx context.Context, repositoryID int) (err error) {
|
||||
ctx, trace, endObservation := s.operations.persistNearestUploads.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.persistNearestUploads.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
rowsInserted, rowsUpdated, rowsDeleted, err := s.bulkTransfer(
|
||||
@ -652,7 +652,7 @@ WHERE
|
||||
// persistNearestUploadsLinks modifies the lsif_nearest_uploads_links table so that it has same
|
||||
// data as t_lsif_nearest_uploads_links for the given repository.
|
||||
func (s *Store) persistNearestUploadsLinks(ctx context.Context, repositoryID int) (err error) {
|
||||
ctx, trace, endObservation := s.operations.persistNearestUploadsLinks.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.persistNearestUploadsLinks.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
rowsInserted, rowsUpdated, rowsDeleted, err := s.bulkTransfer(
|
||||
@ -704,7 +704,7 @@ WHERE
|
||||
// persistUploadsVisibleAtTip modifies the lsif_uploads_visible_at_tip table so that it has same
|
||||
// data as t_lsif_uploads_visible_at_tip for the given repository.
|
||||
func (s *Store) persistUploadsVisibleAtTip(ctx context.Context, repositoryID int) (err error) {
|
||||
ctx, trace, endObservation := s.operations.persistUploadsVisibleAtTip.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.persistUploadsVisibleAtTip.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
rowsInserted, rowsUpdated, rowsDeleted, err := s.bulkTransfer(
|
||||
|
||||
@ -54,7 +54,7 @@ func scanFirstIndexConfiguration(rows *sql.Rows, err error) (IndexConfiguration,
|
||||
|
||||
// GetIndexConfigurationByRepositoryID returns the index configuration for a repository.
|
||||
func (s *Store) GetIndexConfigurationByRepositoryID(ctx context.Context, repositoryID int) (_ IndexConfiguration, _ bool, err error) {
|
||||
ctx, endObservation := s.operations.getIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -73,7 +73,7 @@ FROM lsif_index_configuration c WHERE c.repository_id = %s
|
||||
|
||||
// UpdateIndexConfigurationByRepositoryID updates the index configuration for a repository.
|
||||
func (s *Store) UpdateIndexConfigurationByRepositoryID(ctx context.Context, repositoryID int, data []byte) (err error) {
|
||||
ctx, endObservation := s.operations.updateIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -127,7 +127,7 @@ type GetConfigurationPoliciesOptions struct {
|
||||
// If a repository identifier is supplied (is non-zero), then only the configuration policies that apply
|
||||
// to repository are returned. If repository is not supplied, then all policies may be returned.
|
||||
func (s *Store) GetConfigurationPolicies(ctx context.Context, opts GetConfigurationPoliciesOptions) (_ []ConfigurationPolicy, totalCount int, err error) {
|
||||
ctx, trace, endObservation := s.operations.getConfigurationPolicies.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.getConfigurationPolicies.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", opts.RepositoryID),
|
||||
log.String("term", opts.Term),
|
||||
log.Bool("forDataRetention", opts.ForDataRetention),
|
||||
@ -242,7 +242,7 @@ func makeConfigurationPolicySearchCondition(term string) *sqlf.Query {
|
||||
|
||||
// GetConfigurationPolicyByID retrieves the configuration policy with the given identifier.
|
||||
func (s *Store) GetConfigurationPolicyByID(ctx context.Context, id int) (_ ConfigurationPolicy, _ bool, err error) {
|
||||
ctx, endObservation := s.operations.getConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -281,7 +281,7 @@ WHERE p.id = %s AND (p.repository_id IS NULL OR (%s))
|
||||
// CreateConfigurationPolicy creates a configuration policy with the given fields (ignoring ID). The hydrated
|
||||
// configuration policy record is returned.
|
||||
func (s *Store) CreateConfigurationPolicy(ctx context.Context, configurationPolicy ConfigurationPolicy) (_ ConfigurationPolicy, err error) {
|
||||
ctx, endObservation := s.operations.createConfigurationPolicy.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.createConfigurationPolicy.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
var retentionDurationHours *int
|
||||
@ -361,7 +361,7 @@ var (
|
||||
|
||||
// UpdateConfigurationPolicy updates the fields of the configuration policy record with the given identifier.
|
||||
func (s *Store) UpdateConfigurationPolicy(ctx context.Context, policy ConfigurationPolicy) (err error) {
|
||||
ctx, endObservation := s.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", policy.ID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -460,7 +460,7 @@ WHERE id = %s
|
||||
|
||||
// DeleteConfigurationPolicyByID deletes the configuration policy with the given identifier.
|
||||
func (s *Store) DeleteConfigurationPolicyByID(ctx context.Context, id int) (err error) {
|
||||
ctx, endObservation := s.operations.deleteConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -497,7 +497,7 @@ SELECT protected FROM candidate
|
||||
// SelectPoliciesForRepositoryMembershipUpdate returns a slice of configuration policies that should be considered
|
||||
// for repository membership updates. Configuration policies are returned in the order of least recently updated.
|
||||
func (s *Store) SelectPoliciesForRepositoryMembershipUpdate(ctx context.Context, batchSize int) (configurationPolicies []ConfigurationPolicy, err error) {
|
||||
ctx, trace, endObservation := s.operations.selectPoliciesForRepositoryMembershipUpdate.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.selectPoliciesForRepositoryMembershipUpdate.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
configurationPolicies, err = scanConfigurationPolicies(s.Store.Query(ctx, sqlf.Sprintf(selectPoliciesForRepositoryMembershipUpdate, batchSize, timeutil.Now())))
|
||||
|
||||
@ -177,7 +177,7 @@ func scanFirstDependencyIndexingJobRecord(rows *sql.Rows, err error) (workerutil
|
||||
|
||||
// InsertDependencySyncingJob inserts a new dependency syncing job and returns its identifier.
|
||||
func (s *Store) InsertDependencySyncingJob(ctx context.Context, uploadID int) (id int, err error) {
|
||||
ctx, endObservation := s.operations.insertDependencySyncingJob.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.insertDependencySyncingJob.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
@ -195,7 +195,7 @@ RETURNING id
|
||||
`
|
||||
|
||||
func (s *Store) InsertCloneableDependencyRepo(ctx context.Context, dependency precise.Package) (new bool, err error) {
|
||||
ctx, endObservation := s.operations.insertCloneableDependencyRepo.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.insertCloneableDependencyRepo.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Bool("new", new),
|
||||
@ -216,7 +216,7 @@ RETURNING 1
|
||||
`
|
||||
|
||||
func (s *Store) InsertDependencyIndexingJob(ctx context.Context, uploadID int, externalServiceKind string, syncTime time.Time) (id int, err error) {
|
||||
ctx, endObservation := s.operations.insertDependencyIndexingJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.insertDependencyIndexingJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("uploadId", uploadID),
|
||||
log.String("extSvcKind", externalServiceKind),
|
||||
}})
|
||||
|
||||
@ -77,7 +77,7 @@ func scanDumps(rows *sql.Rows, queryErr error) (_ []Dump, err error) {
|
||||
|
||||
// GetDumpsByIDs returns a set of dumps by identifiers.
|
||||
func (s *Store) GetDumpsByIDs(ctx context.Context, ids []int) (_ []Dump, err error) {
|
||||
ctx, trace, endObservation := s.operations.getDumpsByIDs.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.getDumpsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numIDs", len(ids)),
|
||||
log.String("ids", intsToString(ids)),
|
||||
}})
|
||||
@ -146,7 +146,7 @@ FROM lsif_dumps_with_repository_name u WHERE u.id IN (%s)
|
||||
// splits the repository into multiple dumps. For this reason, the returned dumps are always sorted in most-recently-finished order to
|
||||
// prevent returning data from stale dumps.
|
||||
func (s *Store) FindClosestDumps(ctx context.Context, repositoryID int, commit, path string, rootMustEnclosePath bool, indexer string) (_ []Dump, err error) {
|
||||
ctx, trace, endObservation := s.operations.findClosestDumps.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := s.operations.findClosestDumps.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
@ -200,7 +200,7 @@ ORDER BY u.finished_at DESC
|
||||
// FindClosestDumpsFromGraphFragment returns the set of dumps that can most accurately answer queries for the given repository, commit,
|
||||
// path, and optional indexer by only considering the given fragment of the full git graph. See FindClosestDumps for additional details.
|
||||
func (s *Store) FindClosestDumpsFromGraphFragment(ctx context.Context, repositoryID int, commit, path string, rootMustEnclosePath bool, indexer string, commitGraph *gitdomain.CommitGraph) (_ []Dump, err error) {
|
||||
ctx, trace, endObservation := s.operations.findClosestDumpsFromGraphFragment.WithAndLogger(ctx, &err, observation.Args{
|
||||
ctx, trace, endObservation := s.operations.findClosestDumpsFromGraphFragment.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
@ -372,7 +372,7 @@ func makeFindClosestDumpConditions(path string, rootMustEnclosePath bool, indexe
|
||||
// commit, root, and indexer. This is necessary to perform during conversions before changing
|
||||
// the state of a processing upload to completed as there is a unique index on these four columns.
|
||||
func (s *Store) DeleteOverlappingDumps(ctx context.Context, repositoryID int, commit, root, indexer string) (err error) {
|
||||
ctx, trace, endObservation := s.operations.deleteOverlappingDumps.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.deleteOverlappingDumps.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
log.String("root", root),
|
||||
|
||||
@ -112,7 +112,7 @@ var ScanFirstIndexRecord = scanFirstIndexRecord
|
||||
|
||||
// GetIndexByID returns an index by its identifier and boolean flag indicating its existence.
|
||||
func (s *Store) GetIndexByID(ctx context.Context, id int) (_ Index, _ bool, err error) {
|
||||
ctx, endObservation := s.operations.getIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -173,7 +173,7 @@ WHERE u.id = %s AND %s
|
||||
// GetIndexesByIDs returns an index for each of the given identifiers. Not all given ids will necessarily
|
||||
// have a corresponding element in the returned list.
|
||||
func (s *Store) GetIndexesByIDs(ctx context.Context, ids ...int) (_ []Index, err error) {
|
||||
ctx, endObservation := s.operations.getIndexesByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getIndexesByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("ids", intsToString(ids)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -237,7 +237,7 @@ type GetIndexesOptions struct {
|
||||
|
||||
// GetIndexes returns a list of indexes and the total count of records matching the given conditions.
|
||||
func (s *Store) GetIndexes(ctx context.Context, opts GetIndexesOptions) (_ []Index, _ int, err error) {
|
||||
ctx, trace, endObservation := s.operations.getIndexes.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.getIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", opts.RepositoryID),
|
||||
log.String("state", opts.State),
|
||||
log.String("term", opts.Term),
|
||||
@ -347,7 +347,7 @@ func makeIndexSearchCondition(term string) *sqlf.Query {
|
||||
|
||||
// IsQueued returns true if there is an index or an upload for the repository and commit.
|
||||
func (s *Store) IsQueued(ctx context.Context, repositoryID int, commit string) (_ bool, err error) {
|
||||
ctx, endObservation := s.operations.isQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.isQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
}})
|
||||
@ -368,7 +368,7 @@ SELECT COUNT(*) WHERE EXISTS (
|
||||
|
||||
// InsertIndexes inserts a new index and returns the hydrated index models.
|
||||
func (s *Store) InsertIndexes(ctx context.Context, indexes []Index) (_ []Index, err error) {
|
||||
ctx, endObservation := s.operations.insertIndex.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.insertIndex.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numIndexes", len(indexes)),
|
||||
@ -465,7 +465,7 @@ var IndexColumnsWithNullRank = indexColumnsWithNullRank
|
||||
|
||||
// DeleteIndexByID deletes an index by its identifier.
|
||||
func (s *Store) DeleteIndexByID(ctx context.Context, id int) (_ bool, err error) {
|
||||
ctx, endObservation := s.operations.deleteIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -489,7 +489,7 @@ DELETE FROM lsif_indexes WHERE id = %s RETURNING repository_id
|
||||
// DeletedRepositoryGracePeriod ago. This returns the repository identifier mapped to the number of indexes
|
||||
// that were removed for that repository.
|
||||
func (s *Store) DeleteIndexesWithoutRepository(ctx context.Context, now time.Time) (_ map[int]int, err error) {
|
||||
ctx, trace, endObservation := s.operations.deleteIndexesWithoutRepository.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.deleteIndexesWithoutRepository.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
// TODO(efritz) - this would benefit from an index on repository_id. We currently have
|
||||
@ -536,7 +536,7 @@ SELECT d.repository_id, COUNT(*) FROM deleted d GROUP BY d.repository_id
|
||||
// LastIndexScanForRepository returns the last timestamp, if any, that the repository with the given
|
||||
// identifier was considered for auto-indexing scheduling.
|
||||
func (s *Store) LastIndexScanForRepository(ctx context.Context, repositoryID int) (_ *time.Time, err error) {
|
||||
ctx, endObservation := s.operations.lastIndexScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.lastIndexScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -565,7 +565,7 @@ type IndexesWithRepositoryNamespace struct {
|
||||
// include the set of unprocessed records as well as the latest finished record. These values allow users to
|
||||
// quickly determine if a particular root/indexer pair os up-to-date or having issues processing.
|
||||
func (s *Store) RecentIndexesSummary(ctx context.Context, repositoryID int) (summaries []IndexesWithRepositoryNamespace, err error) {
|
||||
ctx, logger, endObservation := s.operations.recentIndexesSummary.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, logger, endObservation := s.operations.recentIndexesSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -62,7 +62,7 @@ func ScanSourcedCommits(rows *sql.Rows, queryErr error) (_ []SourcedCommits, err
|
||||
// paths and clean up that occupied (but useless) space. The output is of this method is
|
||||
// ordered by repository ID then by commit.
|
||||
func (s *Store) StaleSourcedCommits(ctx context.Context, minimumTimeSinceLastCheck time.Duration, limit int, now time.Time) (_ []SourcedCommits, err error) {
|
||||
ctx, trace, endObservation := s.operations.staleSourcedCommits.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.staleSourcedCommits.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
now = now.UTC()
|
||||
@ -123,7 +123,7 @@ GROUP BY repository_id, commit
|
||||
// to the given repository identifier and commit. This method returns the count of upload and index records
|
||||
// modified, respectively.
|
||||
func (s *Store) UpdateSourcedCommits(ctx context.Context, repositoryID int, commit string, now time.Time) (uploadsUpdated int, indexesUpdated int, err error) {
|
||||
ctx, trace, endObservation := s.operations.updateSourcedCommits.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.updateSourcedCommits.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
}})
|
||||
@ -207,7 +207,7 @@ func (s *Store) DeleteSourcedCommits(ctx context.Context, repositoryID int, comm
|
||||
indexesDeleted int,
|
||||
err error,
|
||||
) {
|
||||
ctx, trace, endObservation := s.operations.deleteSourcedCommits.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.deleteSourcedCommits.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
}})
|
||||
@ -301,7 +301,7 @@ func scanTripleOfCounts(rows *sql.Rows, queryErr error) (value1, value2, value3
|
||||
|
||||
// DeleteOldAuditLogs removes lsif_upload audit log records older than the given max age.
|
||||
func (s *Store) DeleteOldAuditLogs(ctx context.Context, maxAge time.Duration, now time.Time) (_ int, err error) {
|
||||
ctx, endObservation := s.operations.deleteOldAuditLogs.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.deleteOldAuditLogs.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
count, _, err := basestore.ScanFirstInt(s.Store.Query(ctx, sqlf.Sprintf(deleteOldAuditLogsQuery, now, int(maxAge/time.Second))))
|
||||
|
||||
@ -13,7 +13,7 @@ import (
|
||||
|
||||
// UpdatePackages upserts package data tied to the given upload.
|
||||
func (s *Store) UpdatePackages(ctx context.Context, dumpID int, packages []precise.Package) (err error) {
|
||||
ctx, endObservation := s.operations.updatePackages.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updatePackages.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numPackages", len(packages)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -13,7 +13,7 @@ import (
|
||||
|
||||
// UpdatePackageReferences inserts reference data tied to the given upload.
|
||||
func (s *Store) UpdatePackageReferences(ctx context.Context, dumpID int, references []precise.PackageReference) (err error) {
|
||||
ctx, endObservation := s.operations.updatePackageReferences.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updatePackageReferences.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numReferences", len(references)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -20,7 +20,7 @@ var ErrUnknownRepository = errors.New("unknown repository")
|
||||
|
||||
// RepoName returns the name for the repo with the given identifier.
|
||||
func (s *Store) RepoName(ctx context.Context, repositoryID int) (_ string, err error) {
|
||||
ctx, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -65,7 +65,7 @@ func scanRepoNames(rows *sql.Rows, queryErr error) (_ map[int]string, err error)
|
||||
|
||||
// RepoNames returns a map from repository id to names.
|
||||
func (s *Store) RepoNames(ctx context.Context, repositoryIDs ...int) (_ map[int]string, err error) {
|
||||
ctx, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.repoName.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numRepositories", len(repositoryIDs)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -81,7 +81,7 @@ SELECT id, name FROM repo WHERE id = ANY(%s)
|
||||
// RepoIDsByGlobPatterns returns a page of repository identifiers and a total count of repositories matching
|
||||
// one of the given patterns.
|
||||
func (s *Store) RepoIDsByGlobPatterns(ctx context.Context, patterns []string, limit, offset int) (_ []int, _ int, err error) {
|
||||
ctx, endObservation := s.operations.repoIDsByGlobPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.repoIDsByGlobPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("patterns", strings.Join(patterns, ", ")),
|
||||
log.Int("limit", limit),
|
||||
log.Int("offset", offset),
|
||||
@ -152,7 +152,7 @@ LIMIT %s OFFSET %s
|
||||
// matches exceeds the given limit (if supplied), then only top ranked repositories by star count
|
||||
// will be associated to the policy in the database and the remainder will be dropped.
|
||||
func (s *Store) UpdateReposMatchingPatterns(ctx context.Context, patterns []string, policyID int, repositoryMatchLimit *int) (err error) {
|
||||
ctx, endObservation := s.operations.updateReposMatchingPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateReposMatchingPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("pattern", strings.Join(patterns, ",")),
|
||||
log.Int("policyID", policyID),
|
||||
}})
|
||||
|
||||
@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func (s *Store) RequestLanguageSupport(ctx context.Context, userID int, language string) (err error) {
|
||||
ctx, endObservation := s.operations.requestLanguageSupport.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.requestLanguageSupport.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
return s.Exec(ctx, sqlf.Sprintf(requestLanguageSupportQuery, userID, language))
|
||||
@ -24,7 +24,7 @@ ON CONFLICT DO NOTHING
|
||||
`
|
||||
|
||||
func (s *Store) LanguagesRequestedBy(ctx context.Context, userID int) (_ []string, err error) {
|
||||
ctx, endObservation := s.operations.languagesRequestedBy.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.languagesRequestedBy.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
return basestore.ScanStrings(s.Query(ctx, sqlf.Sprintf(languagesRequestedByQuery, userID)))
|
||||
|
||||
@ -138,7 +138,7 @@ func scanCounts(rows *sql.Rows, queryErr error) (_ map[int]int, err error) {
|
||||
|
||||
// GetUploadByID returns an upload by its identifier and boolean flag indicating its existence.
|
||||
func (s *Store) GetUploadByID(ctx context.Context, id int) (_ Upload, _ bool, err error) {
|
||||
ctx, endObservation := s.operations.getUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -195,7 +195,7 @@ const visibleAtTipSubselectQuery = `SELECT 1 FROM lsif_uploads_visible_at_tip uv
|
||||
// GetUploadsByIDs returns an upload for each of the given identifiers. Not all given ids will necessarily
|
||||
// have a corresponding element in the returned list.
|
||||
func (s *Store) GetUploadsByIDs(ctx context.Context, ids ...int) (_ []Upload, err error) {
|
||||
ctx, endObservation := s.operations.getUploadsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getUploadsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("ids", intsToString(ids)),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -250,7 +250,7 @@ WHERE u.state != 'deleted' AND u.id IN (%s) AND %s
|
||||
|
||||
// DeleteUploadsStuckUploading soft deletes any upload record that has been uploading since the given time.
|
||||
func (s *Store) DeleteUploadsStuckUploading(ctx context.Context, uploadedBefore time.Time) (_ int, err error) {
|
||||
ctx, trace, endObservation := s.operations.deleteUploadsStuckUploading.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.deleteUploadsStuckUploading.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("uploadedBefore", uploadedBefore.Format(time.RFC3339)), // TODO - should be a duration
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -309,7 +309,7 @@ type GetUploadsOptions struct {
|
||||
|
||||
// GetUploads returns a list of uploads and the total count of records matching the given conditions.
|
||||
func (s *Store) GetUploads(ctx context.Context, opts GetUploadsOptions) (_ []Upload, _ int, err error) {
|
||||
ctx, trace, endObservation := s.operations.getUploads.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.getUploads.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", opts.RepositoryID),
|
||||
log.String("state", opts.State),
|
||||
log.String("term", opts.Term),
|
||||
@ -587,7 +587,7 @@ func makeStateCondition(state string) *sqlf.Query {
|
||||
|
||||
// InsertUpload inserts a new upload and returns its identifier.
|
||||
func (s *Store) InsertUpload(ctx context.Context, upload Upload) (id int, err error) {
|
||||
ctx, endObservation := s.operations.insertUpload.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.insertUpload.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
@ -638,7 +638,7 @@ RETURNING id
|
||||
// AddUploadPart adds the part index to the given upload's uploaded parts array. This method is idempotent
|
||||
// (the resulting array is deduplicated on update).
|
||||
func (s *Store) AddUploadPart(ctx context.Context, uploadID, partIndex int) (err error) {
|
||||
ctx, endObservation := s.operations.addUploadPart.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.addUploadPart.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("uploadID", uploadID),
|
||||
log.Int("partIndex", partIndex),
|
||||
}})
|
||||
@ -654,7 +654,7 @@ UPDATE lsif_uploads SET uploaded_parts = array(SELECT DISTINCT * FROM unnest(arr
|
||||
|
||||
// MarkQueued updates the state of the upload to queued and updates the upload size.
|
||||
func (s *Store) MarkQueued(ctx context.Context, id int, uploadSize *int64) (err error) {
|
||||
ctx, endObservation := s.operations.markQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.markQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -674,7 +674,7 @@ WHERE id = %s
|
||||
|
||||
// MarkFailed updates the state of the upload to failed, increments the num_failures column and sets the finished_at time
|
||||
func (s *Store) MarkFailed(ctx context.Context, id int, reason string) (err error) {
|
||||
ctx, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -723,7 +723,7 @@ var uploadColumnsWithNullRank = []*sqlf.Query{
|
||||
// was deleted. The associated repository will be marked as dirty so that its commit graph will be updated in
|
||||
// the background.
|
||||
func (s *Store) DeleteUploadByID(ctx context.Context, id int) (_ bool, err error) {
|
||||
ctx, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("id", id),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -762,7 +762,7 @@ const DeletedRepositoryGracePeriod = time.Minute * 30
|
||||
// DeletedRepositoryGracePeriod ago. This returns the repository identifier mapped to the number of uploads
|
||||
// that were removed for that repository.
|
||||
func (s *Store) DeleteUploadsWithoutRepository(ctx context.Context, now time.Time) (_ map[int]int, err error) {
|
||||
ctx, trace, endObservation := s.operations.deleteUploadsWithoutRepository.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.deleteUploadsWithoutRepository.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
repositories, err := scanCounts(s.Store.Query(ctx, sqlf.Sprintf(deleteUploadsWithoutRepositoryQuery, now.UTC(), DeletedRepositoryGracePeriod/time.Second)))
|
||||
@ -810,7 +810,7 @@ SELECT d.repository_id, COUNT(*) FROM deleted d GROUP BY d.repository_id
|
||||
|
||||
// HardDeleteUploadByID deletes the upload record with the given identifier.
|
||||
func (s *Store) HardDeleteUploadByID(ctx context.Context, ids ...int) (err error) {
|
||||
ctx, endObservation := s.operations.hardDeleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.hardDeleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numIDs", len(ids)),
|
||||
log.String("ids", intsToString(ids)),
|
||||
}})
|
||||
@ -868,7 +868,7 @@ func (s *Store) SelectRepositoriesForIndexScan(ctx context.Context, processDelay
|
||||
}
|
||||
|
||||
func (s *Store) selectRepositoriesForIndexScan(ctx context.Context, processDelay time.Duration, allowGlobalPolicies bool, repositoryMatchLimit *int, limit int, now time.Time) (_ []int, err error) {
|
||||
ctx, endObservation := s.operations.selectRepositoriesForIndexScan.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.selectRepositoriesForIndexScan.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Bool("allowGlobalPolicies", allowGlobalPolicies),
|
||||
log.Int("limit", limit),
|
||||
}})
|
||||
@ -960,7 +960,7 @@ func (s *Store) SelectRepositoriesForRetentionScan(ctx context.Context, processD
|
||||
}
|
||||
|
||||
func (s *Store) selectRepositoriesForRetentionScan(ctx context.Context, processDelay time.Duration, limit int, now time.Time) (_ []int, err error) {
|
||||
ctx, endObservation := s.operations.selectRepositoriesForRetentionScan.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.selectRepositoriesForRetentionScan.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
return basestore.ScanInts(s.Query(ctx, sqlf.Sprintf(
|
||||
@ -1010,7 +1010,7 @@ func (s *Store) UpdateUploadRetention(ctx context.Context, protectedIDs, expired
|
||||
}
|
||||
|
||||
func (s *Store) updateUploadRetention(ctx context.Context, protectedIDs, expiredIDs []int, now time.Time) (err error) {
|
||||
ctx, endObservation := s.operations.updateUploadRetention.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateUploadRetention.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numProtectedIDs", len(protectedIDs)),
|
||||
log.String("protectedIDs", intsToString(protectedIDs)),
|
||||
log.Int("numExpiredIDs", len(expiredIDs)),
|
||||
@ -1088,7 +1088,7 @@ var deltaMap = map[DependencyReferenceCountUpdateType]int{
|
||||
// To keep reference counts consistent, this method should be called directly after insertion and directly
|
||||
// before deletion of each upload record.
|
||||
func (s *Store) UpdateReferenceCounts(ctx context.Context, ids []int, dependencyUpdateType DependencyReferenceCountUpdateType) (updated int, err error) {
|
||||
ctx, endObservation := s.operations.updateReferenceCounts.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateReferenceCounts.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numIDs", len(ids)),
|
||||
log.String("ids", intsToString(ids)),
|
||||
log.Int("dependencyUpdateType", int(dependencyUpdateType)),
|
||||
@ -1284,7 +1284,7 @@ FROM locked_uploads lu WHERE lu.id = u.id
|
||||
// as deleted. The associated repositories will be marked as dirty so that their commit graphs
|
||||
// are updated in the near future.
|
||||
func (s *Store) SoftDeleteExpiredUploads(ctx context.Context) (count int, err error) {
|
||||
ctx, trace, endObservation := s.operations.softDeleteExpiredUploads.WithAndLogger(ctx, &err, observation.Args{})
|
||||
ctx, trace, endObservation := s.operations.softDeleteExpiredUploads.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
tx, err := s.transact(ctx)
|
||||
@ -1343,7 +1343,7 @@ SELECT u.repository_id, count(*) FROM updated u GROUP BY u.repository_id
|
||||
// GetOldestCommitDate returns the oldest commit date for all uploads for the given repository. If there are no
|
||||
// non-nil values, a false-valued flag is returned.
|
||||
func (s *Store) GetOldestCommitDate(ctx context.Context, repositoryID int) (_ time.Time, _ bool, err error) {
|
||||
ctx, _, endObservation := s.operations.getOldestCommitDate.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.getOldestCommitDate.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -1361,7 +1361,7 @@ SELECT committed_at FROM lsif_uploads WHERE repository_id = %s AND state = 'comp
|
||||
|
||||
// UpdateCommitedAt updates the commit date for the given repository.
|
||||
func (s *Store) UpdateCommitedAt(ctx context.Context, uploadID int, committedAt time.Time) (err error) {
|
||||
ctx, _, endObservation := s.operations.updateCommitedAt.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.updateCommitedAt.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("uploadID", uploadID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -1394,7 +1394,7 @@ func nilTimeToString(t *time.Time) string {
|
||||
// LastUploadRetentionScanForRepository returns the last timestamp, if any, that the repository with the
|
||||
// given identifier was considered for upload expiration checks.
|
||||
func (s *Store) LastUploadRetentionScanForRepository(ctx context.Context, repositoryID int) (_ *time.Time, err error) {
|
||||
ctx, endObservation := s.operations.lastUploadRetentionScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.lastUploadRetentionScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -1423,7 +1423,7 @@ type UploadsWithRepositoryNamespace struct {
|
||||
// include the set of unprocessed records as well as the latest finished record. These values allow users to
|
||||
// quickly determine if a particular root/indexer pair is up-to-date or having issues processing.
|
||||
func (s *Store) RecentUploadsSummary(ctx context.Context, repositoryID int) (upload []UploadsWithRepositoryNamespace, err error) {
|
||||
ctx, logger, endObservation := s.operations.recentUploadsSummary.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, logger, endObservation := s.operations.recentUploadsSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -22,7 +22,7 @@ var DefinitionDumpsLimit, _ = strconv.ParseInt(env.Get("PRECISE_CODE_INTEL_DEFIN
|
||||
|
||||
// DefinitionDumps returns the set of dumps that define at least one of the given monikers.
|
||||
func (s *Store) DefinitionDumps(ctx context.Context, monikers []precise.QualifiedMonikerData) (_ []Dump, err error) {
|
||||
ctx, trace, endObservation := s.operations.definitionDumps.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.definitionDumps.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numMonikers", len(monikers)),
|
||||
log.String("monikers", monikersToString(monikers)),
|
||||
}})
|
||||
@ -127,7 +127,7 @@ rank() OVER (
|
||||
// it can be seen from the given index; otherwise, an index is visible if it can be seen from the tip of
|
||||
// the default branch of its own repository.
|
||||
func (s *Store) ReferenceIDsAndFilters(ctx context.Context, repositoryID int, commit string, monikers []precise.QualifiedMonikerData, limit, offset int) (_ PackageReferenceScanner, _ int, err error) {
|
||||
ctx, trace, endObservation := s.operations.referenceIDsAndFilters.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.referenceIDsAndFilters.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("repositoryID", repositoryID),
|
||||
log.String("commit", commit),
|
||||
log.Int("numMonikers", len(monikers)),
|
||||
@ -238,7 +238,7 @@ func monikersToString(vs []precise.QualifiedMonikerData) string {
|
||||
// scanner will return nulls for the Filter field as it's expected to be unused (and rather heavy) by
|
||||
// callers.
|
||||
func (s *Store) ReferencesForUpload(ctx context.Context, uploadID int) (_ PackageReferenceScanner, err error) {
|
||||
ctx, endObservation := s.operations.referencesForUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.referencesForUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("uploadID", uploadID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -26,7 +26,7 @@ var tableNames = []string{
|
||||
}
|
||||
|
||||
func (s *Store) Clear(ctx context.Context, bundleIDs ...int) (err error) {
|
||||
ctx, trace, endObservation := s.operations.clear.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.clear.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("numBundleIDs", len(bundleIDs)),
|
||||
log.String("bundleIDs", intsToString(bundleIDs)),
|
||||
}})
|
||||
|
||||
@ -28,7 +28,7 @@ const CurrentImplementationsSchemaVersion = 2
|
||||
|
||||
// WriteMeta is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteMeta(ctx context.Context, bundleID int, meta precise.MetaData) (err error) {
|
||||
ctx, endObservation := s.operations.writeMeta.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.writeMeta.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -38,7 +38,7 @@ func (s *Store) WriteMeta(ctx context.Context, bundleID int, meta precise.MetaDa
|
||||
|
||||
// WriteDocuments is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteDocuments(ctx context.Context, bundleID int, documents chan precise.KeyedDocumentData) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeDocuments.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeDocuments.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -128,7 +128,7 @@ FROM t_lsif_data_documents source
|
||||
|
||||
// WriteResultChunks is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteResultChunks(ctx context.Context, bundleID int, resultChunks chan precise.IndexedResultChunkData) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeResultChunks.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeResultChunks.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -195,7 +195,7 @@ FROM t_lsif_data_result_chunks source
|
||||
|
||||
// WriteDefinitions is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteDefinitions(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeDefinitions.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeDefinitions.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -205,7 +205,7 @@ func (s *Store) WriteDefinitions(ctx context.Context, bundleID int, monikerLocat
|
||||
|
||||
// WriteReferences is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteReferences(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeReferences.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeReferences.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -215,7 +215,7 @@ func (s *Store) WriteReferences(ctx context.Context, bundleID int, monikerLocati
|
||||
|
||||
// WriteImplementations is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteImplementations(ctx context.Context, bundleID int, monikerLocations chan precise.MonikerLocations) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeImplementations.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeImplementations.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
@ -41,7 +41,7 @@ func (s *Store) WriteDocumentationPages(
|
||||
repositoryNameID int,
|
||||
languageNameID int,
|
||||
) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeDocumentationPages.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeDocumentationPages.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", upload.ID),
|
||||
log.String("repo", upload.RepositoryName),
|
||||
log.String("commit", upload.Commit),
|
||||
@ -124,7 +124,7 @@ FROM t_lsif_data_documentation_pages source
|
||||
|
||||
// WriteDocumentationPathInfo is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteDocumentationPathInfo(ctx context.Context, bundleID int, documentationPathInfo chan *precise.DocumentationPathInfoData) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeDocumentationPathInfo.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeDocumentationPathInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -190,7 +190,7 @@ FROM t_lsif_data_documentation_path_info source
|
||||
|
||||
// WriteDocumentationMappings is called (transactionally) from the precise-code-intel-worker.
|
||||
func (s *Store) WriteDocumentationMappings(ctx context.Context, bundleID int, mappings chan precise.DocumentationMapping) (count uint32, err error) {
|
||||
ctx, trace, endObservation := s.operations.writeDocumentationMappings.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.writeDocumentationMappings.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
@ -256,7 +256,7 @@ FROM t_lsif_data_documentation_mappings source
|
||||
// outside of a long-running transaction to reduce lock contention between shared rows being held longer
|
||||
// than necessary.
|
||||
func (s *Store) WriteDocumentationSearchPrework(ctx context.Context, upload dbstore.Upload, repo *types.Repo, isDefaultBranch bool) (_ int, _ int, err error) {
|
||||
ctx, endObservation := s.operations.writeDocumentationSearchPrework.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.writeDocumentationSearchPrework.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repo", upload.RepositoryName),
|
||||
log.Int("bundleID", upload.ID),
|
||||
}})
|
||||
@ -304,7 +304,7 @@ func (s *Store) WriteDocumentationSearch(
|
||||
repositoryNameID int,
|
||||
languageNameID int,
|
||||
) (err error) {
|
||||
ctx, endObservation := s.operations.writeDocumentationSearch.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.writeDocumentationSearch.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("repo", upload.RepositoryName),
|
||||
log.Int("bundleID", upload.ID),
|
||||
log.Int("pages", len(pages)),
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
// Diagnostics returns the diagnostics for the documents that have the given path prefix. This method
|
||||
// also returns the size of the complete result set to aid in pagination.
|
||||
func (s *Store) Diagnostics(ctx context.Context, bundleID int, prefix string, limit, offset int) (_ []Diagnostic, _ int, err error) {
|
||||
ctx, trace, endObservation := s.operations.diagnostics.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.diagnostics.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("prefix", prefix),
|
||||
log.Int("limit", limit),
|
||||
|
||||
@ -22,7 +22,7 @@ import (
|
||||
|
||||
// DocumentationPage returns the documentation page with the given PathID.
|
||||
func (s *Store) DocumentationPage(ctx context.Context, bundleID int, pathID string) (_ *precise.DocumentationPageData, err error) {
|
||||
ctx, _, endObservation := s.operations.documentationPage.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.documentationPage.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("pathID", pathID),
|
||||
}})
|
||||
@ -81,7 +81,7 @@ func (s *Store) scanFirstDocumentationPageData(rows *sql.Rows, queryErr error) (
|
||||
|
||||
// DocumentationPathInfo returns info describing what is at the given pathID.
|
||||
func (s *Store) DocumentationPathInfo(ctx context.Context, bundleID int, pathID string) (_ *precise.DocumentationPathInfoData, err error) {
|
||||
ctx, _, endObservation := s.operations.documentationPathInfo.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.documentationPathInfo.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("pathID", pathID),
|
||||
}})
|
||||
@ -141,7 +141,7 @@ func (s *Store) scanFirstDocumentationPathInfoData(rows *sql.Rows, queryErr erro
|
||||
// documentationIDsToPathIDs returns a mapping of the given documentationResult IDs to their
|
||||
// associative path IDs. Empty result IDs ("") are ignored.
|
||||
func (s *Store) documentationIDsToPathIDs(ctx context.Context, bundleID int, ids []precise.ID) (_ map[precise.ID]string, err error) {
|
||||
ctx, _, endObservation := s.operations.documentationIDsToPathIDs.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.documentationIDsToPathIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("ids", fmt.Sprint(ids)),
|
||||
}})
|
||||
@ -202,7 +202,7 @@ WHERE
|
||||
`
|
||||
|
||||
func (s *Store) documentationPathIDToID(ctx context.Context, bundleID int, pathID string) (_ precise.ID, err error) {
|
||||
ctx, _, endObservation := s.operations.documentationPathIDToID.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.documentationPathIDToID.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("pathID", pathID),
|
||||
}})
|
||||
@ -252,7 +252,7 @@ func (s *Store) scanFirstDocumentationResultID(rows *sql.Rows, queryErr error) (
|
||||
// e.g. the file where the documented symbol is located - if the path ID is describing such a
|
||||
// symbol, or nil otherwise.
|
||||
func (s *Store) documentationPathIDToFilePath(ctx context.Context, bundleID int, pathID string) (_ *string, err error) {
|
||||
ctx, _, endObservation := s.operations.documentationPathIDToFilePath.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.documentationPathIDToFilePath.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("pathID", pathID),
|
||||
}})
|
||||
@ -312,7 +312,7 @@ func (s *Store) documentationDefinitions(
|
||||
limit,
|
||||
offset int,
|
||||
) (_ []Location, _ int, err error) {
|
||||
ctx, trace, endObservation := operation.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := operation.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("resultID", string(resultID)),
|
||||
}})
|
||||
@ -371,7 +371,7 @@ func (s *Store) documentationDefinitions(
|
||||
// enforce that the user only has the ability to view results that are from repositories they have
|
||||
// access to.
|
||||
func (s *Store) documentationSearchRepoNameIDs(ctx context.Context, tableSuffix string, possibleRepos []string) (_ []int64, err error) {
|
||||
ctx, _, endObservation := s.operations.documentationSearchRepoNameIDs.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.documentationSearchRepoNameIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("table", tableSuffix),
|
||||
log.String("possibleRepos", fmt.Sprint(possibleRepos)),
|
||||
}})
|
||||
@ -408,7 +408,7 @@ var debugAPIDocsSearchCandidates, _ = strconv.ParseInt(env.Get("DEBUG_API_DOCS_S
|
||||
// enforce that the user only has the ability to view results that are from repositories they have
|
||||
// access to.
|
||||
func (s *Store) DocumentationSearch(ctx context.Context, tableSuffix, query string, repos []string) (_ []precise.DocumentationSearchResult, err error) {
|
||||
ctx, _, endObservation := s.operations.documentationSearch.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.documentationSearch.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("table", tableSuffix),
|
||||
log.String("query", query),
|
||||
log.String("repos", fmt.Sprint(repos)),
|
||||
|
||||
@ -21,7 +21,7 @@ func (s *Store) DeleteOldPrivateSearchRecords(ctx context.Context, minimumTimeSi
|
||||
}
|
||||
|
||||
func (s *Store) deleteOldSearchRecords(ctx context.Context, minimumTimeSinceLastCheck time.Duration, limit int, tableSuffix string, now time.Time) (_ int, err error) {
|
||||
ctx, endObservation := s.operations.deleteOldSearchRecords.With(ctx, &err, observation.Args{})
|
||||
ctx, _, endObservation := s.operations.deleteOldSearchRecords.With(ctx, &err, observation.Args{})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
now = now.UTC()
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
|
||||
// Exists determines if the path exists in the database.
|
||||
func (s *Store) Exists(ctx context.Context, bundleID int, path string) (_ bool, err error) {
|
||||
ctx, endObservation := s.operations.exists.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.exists.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
}})
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
|
||||
// Hover returns the hover text of the symbol at the given position.
|
||||
func (s *Store) Hover(ctx context.Context, bundleID int, path string, line, character int) (_ string, _ Range, _ bool, err error) {
|
||||
ctx, trace, endObservation := s.operations.hover.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.hover.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
log.Int("line", line),
|
||||
|
||||
@ -36,7 +36,7 @@ func (s *Store) Implementations(ctx context.Context, bundleID int, path string,
|
||||
}
|
||||
|
||||
func (s *Store) definitionsReferences(ctx context.Context, extractor func(r precise.RangeData) precise.ID, operation *observation.Operation, bundleID int, path string, line, character, limit, offset int) (_ []Location, _ int, err error) {
|
||||
ctx, trace, endObservation := operation.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := operation.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
log.Int("line", line),
|
||||
@ -72,7 +72,7 @@ func (s *Store) definitionsReferences(ctx context.Context, extractor func(r prec
|
||||
// method returns a map from result set identifiers to another map from document paths to locations
|
||||
// within that document, as well as a total count of locations within the map.
|
||||
func (s *Store) locations(ctx context.Context, bundleID int, ids []precise.ID, limit, offset int) (_ map[precise.ID][]Location, _ int, err error) {
|
||||
ctx, trace, endObservation := s.operations.locations.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.locations.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.Int("numIDs", len(ids)),
|
||||
log.String("ids", idsToString(ids)),
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
// of monikers are attached to a single range. The order of the output slice is "outside-in", so that
|
||||
// the range attached to earlier monikers enclose the range attached to later monikers.
|
||||
func (s *Store) MonikersByPosition(ctx context.Context, bundleID int, path string, line, character int) (_ [][]precise.MonikerData, err error) {
|
||||
ctx, trace, endObservation := s.operations.monikersByPosition.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.monikersByPosition.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
log.Int("line", line),
|
||||
@ -74,7 +74,7 @@ LIMIT 1
|
||||
// whose scheme+identifier matches one of the given monikers. This method also returns the size of the
|
||||
// complete result set to aid in pagination.
|
||||
func (s *Store) BulkMonikerResults(ctx context.Context, tableName string, uploadIDs []int, monikers []precise.MonikerData, limit, offset int) (_ []Location, _ int, err error) {
|
||||
ctx, trace, endObservation := s.operations.bulkMonikerResults.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.bulkMonikerResults.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.String("tableName", tableName),
|
||||
log.Int("numUploadIDs", len(uploadIDs)),
|
||||
log.String("uploadIDs", intsToString(uploadIDs)),
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
|
||||
// PackageInformation looks up package information data by identifier.
|
||||
func (s *Store) PackageInformation(ctx context.Context, bundleID int, path, packageInformationID string) (_ precise.PackageInformationData, _ bool, err error) {
|
||||
ctx, endObservation := s.operations.packageInformation.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, _, endObservation := s.operations.packageInformation.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
log.String("packageInformationID", packageInformationID),
|
||||
|
||||
@ -17,7 +17,7 @@ const MaximumRangesDefinitionLocations = 10000
|
||||
|
||||
// Ranges returns definition, reference, implementation, hover, and documentation data for each range within the given span of lines.
|
||||
func (s *Store) Ranges(ctx context.Context, bundleID int, path string, startLine, endLine int) (_ []CodeIntelligenceRange, err error) {
|
||||
ctx, trace, endObservation := s.operations.ranges.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.ranges.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
log.Int("startLine", startLine),
|
||||
@ -78,7 +78,7 @@ func (s *Store) Ranges(ctx context.Context, bundleID int, path string, startLine
|
||||
|
||||
// DocumentationAtPosition returns documentation path IDs found at the given position.
|
||||
func (s *Store) DocumentationAtPosition(ctx context.Context, bundleID int, path string, line, character int) (_ []string, err error) {
|
||||
ctx, trace, endObservation := s.operations.documentationAtPosition.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.documentationAtPosition.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
log.Int("line", line),
|
||||
@ -131,7 +131,7 @@ LIMIT 1
|
||||
// identifiers. Like locations, this method returns a map from result set identifiers to another map from
|
||||
// document paths to locations within that document.
|
||||
func (s *Store) locationsWithinFile(ctx context.Context, bundleID int, ids []precise.ID, path string, documentData precise.DocumentData) (_ map[precise.ID][]Location, err error) {
|
||||
ctx, trace, endObservation := s.operations.locationsWithinFile.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.locationsWithinFile.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.Int("numIDs", len(ids)),
|
||||
log.String("ids", idsToString(ids)),
|
||||
|
||||
@ -11,7 +11,7 @@ import (
|
||||
|
||||
// Stencil return all ranges within a single document.
|
||||
func (s *Store) Stencil(ctx context.Context, bundleID int, path string) (_ []Range, err error) {
|
||||
ctx, trace, endObservation := s.operations.stencil.WithAndLogger(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
ctx, trace, endObservation := s.operations.stencil.With(ctx, &err, observation.Args{LogFields: []log.Field{
|
||||
log.Int("bundleID", bundleID),
|
||||
log.String("path", path),
|
||||
}})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user