Observation: use opentelemetry types rather than opentracing types (#51958)

This builds on #51954 to convert all uses of `LogFields` to use
opentelemetry attributes instead.
This commit is contained in:
Camden Cheek 2023-05-16 09:19:27 -06:00 committed by GitHub
parent 8414f1313e
commit f900515497
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
138 changed files with 1025 additions and 1174 deletions

View File

@ -1491,16 +1491,16 @@ func (s *Server) handleBatchLog(w http.ResponseWriter, r *http.Request) {
// Invoked multiple times from the handler defined below.
performGitLogCommand := func(ctx context.Context, repoCommit api.RepoCommit, format string) (output string, isRepoCloned bool, err error) {
ctx, _, endObservation := operations.batchLogSingle.With(ctx, &err, observation.Args{
LogFields: append(
[]otlog.Field{
otlog.String("format", format),
Attrs: append(
[]attribute.KeyValue{
attribute.String("format", format),
},
repoCommit.LogFields()...,
repoCommit.Attrs()...,
),
})
defer func() {
endObservation(1, observation.Args{LogFields: []otlog.Field{
otlog.Bool("isRepoCloned", isRepoCloned),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Bool("isRepoCloned", isRepoCloned),
}})
}()
@ -1532,8 +1532,8 @@ func (s *Server) handleBatchLog(w http.ResponseWriter, r *http.Request) {
instrumentedHandler := func(ctx context.Context) (statusCodeOnError int, err error) {
ctx, logger, endObservation := operations.batchLog.With(ctx, &err, observation.Args{})
defer func() {
endObservation(1, observation.Args{LogFields: []otlog.Field{
otlog.Int("statusCodeOnError", statusCodeOnError),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("statusCodeOnError", statusCodeOnError),
}})
}()

View File

@ -14,7 +14,6 @@ go_library(
"//internal/metrics",
"//internal/observation",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@com_github_prometheus_client_golang//prometheus",
"@io_opentelemetry_go_otel//attribute",
],

View File

@ -5,7 +5,6 @@ import (
"context"
"io"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/cmd/symbols/gitserver"
@ -61,10 +60,10 @@ func (f *repositoryFetcher) FetchRepositoryArchive(ctx context.Context, repo api
}
func (f *repositoryFetcher) fetchRepositoryArchive(ctx context.Context, repo api.RepoName, commit api.CommitID, paths []string, callback func(request ParseRequest)) (err error) {
ctx, trace, endObservation := f.operations.fetchRepositoryArchive.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repo", string(repo)),
log.String("commitID", string(commit)),
log.Int("paths", len(paths)),
ctx, trace, endObservation := f.operations.fetchRepositoryArchive.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repo", string(repo)),
attribute.String("commitID", string(commit)),
attribute.Int("paths", len(paths)),
}})
defer endObservation(1, observation.Args{})

View File

@ -17,7 +17,7 @@ go_library(
"//internal/observation",
"//internal/types",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -5,7 +5,7 @@ import (
"context"
"io"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
@ -55,10 +55,10 @@ func NewClient(observationCtx *observation.Context) GitserverClient {
}
func (c *gitserverClient) FetchTar(ctx context.Context, repo api.RepoName, commit api.CommitID, paths []string) (_ io.ReadCloser, err error) {
ctx, _, endObservation := c.operations.fetchTar.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repo", string(repo)),
log.String("commit", string(commit)),
log.Int("paths", len(paths)),
ctx, _, endObservation := c.operations.fetchTar.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repo", string(repo)),
attribute.String("commit", string(commit)),
attribute.Int("paths", len(paths)),
}})
defer endObservation(1, observation.Args{})
@ -78,10 +78,10 @@ func (c *gitserverClient) FetchTar(ctx context.Context, repo api.RepoName, commi
}
func (c *gitserverClient) GitDiff(ctx context.Context, repo api.RepoName, commitA, commitB api.CommitID) (_ Changes, err error) {
ctx, _, endObservation := c.operations.gitDiff.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repo", string(repo)),
log.String("commitA", string(commitA)),
log.String("commitB", string(commitB)),
ctx, _, endObservation := c.operations.gitDiff.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repo", string(repo)),
attribute.String("commitA", string(commitA)),
attribute.String("commitB", string(commitB)),
}})
defer endObservation(1, observation.Args{})

View File

@ -29,7 +29,6 @@ go_library(
"//internal/types",
"//lib/errors",
"@com_github_dustin_go_humanize//:go-humanize",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_go_ctags//:go-ctags",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",

View File

@ -5,13 +5,11 @@ import (
"strings"
"time"
"github.com/opentracing/opentracing-go/log"
"github.com/dustin/go-humanize"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/dustin/go-humanize"
"github.com/sourcegraph/sourcegraph/cmd/symbols/internal/api/observability"
"github.com/sourcegraph/sourcegraph/cmd/symbols/internal/database/store"
"github.com/sourcegraph/sourcegraph/cmd/symbols/internal/database/writer"
@ -29,22 +27,22 @@ func MakeSqliteSearchFunc(observationCtx *observation.Context, cachedDatabaseWri
operations := sharedobservability.NewOperations(observationCtx)
return func(ctx context.Context, args search.SymbolsParameters) (results []result.Symbol, err error) {
ctx, trace, endObservation := operations.Search.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repo", string(args.Repo)),
log.String("commitID", string(args.CommitID)),
log.String("query", args.Query),
log.Bool("isRegExp", args.IsRegExp),
log.Bool("isCaseSensitive", args.IsCaseSensitive),
log.Int("numIncludePatterns", len(args.IncludePatterns)),
log.String("includePatterns", strings.Join(args.IncludePatterns, ":")),
log.String("excludePattern", args.ExcludePattern),
log.Int("first", args.First),
log.Float64("timeoutSeconds", args.Timeout.Seconds()),
ctx, trace, endObservation := operations.Search.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repo", string(args.Repo)),
attribute.String("commitID", string(args.CommitID)),
attribute.String("query", args.Query),
attribute.Bool("isRegExp", args.IsRegExp),
attribute.Bool("isCaseSensitive", args.IsCaseSensitive),
attribute.Int("numIncludePatterns", len(args.IncludePatterns)),
attribute.String("includePatterns", strings.Join(args.IncludePatterns, ":")),
attribute.String("excludePattern", args.ExcludePattern),
attribute.Int("first", args.First),
attribute.Float64("timeoutSeconds", args.Timeout.Seconds()),
}})
defer func() {
endObservation(1, observation.Args{
MetricLabelValues: []string{observability.GetParseAmount(ctx)},
LogFields: []log.Field{log.String("parseAmount", observability.GetParseAmount(ctx))},
Attrs: []attribute.KeyValue{attribute.String("parseAmount", observability.GetParseAmount(ctx))},
})
}()
ctx = observability.SeedParseAmount(ctx)

View File

@ -19,7 +19,6 @@ go_library(
"//internal/search/result",
"//lib/errors",
"@com_github_inconshreveable_log15//:log15",
"@com_github_opentracing_opentracing_go//log",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_sourcegraph_go_ctags//:go-ctags",
"@com_github_sourcegraph_log//:log",

View File

@ -7,7 +7,6 @@ import (
"sync/atomic"
"github.com/inconshreveable/log15"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/go-ctags"
"go.opentelemetry.io/otel/attribute"
@ -56,11 +55,11 @@ func NewParser(
}
func (p *parser) Parse(ctx context.Context, args search.SymbolsParameters, paths []string) (_ <-chan SymbolOrError, err error) {
ctx, _, endObservation := p.operations.parse.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("repo", string(args.Repo)),
otlog.String("commitID", string(args.CommitID)),
otlog.Int("paths", len(paths)),
otlog.String("paths", strings.Join(paths, ":")),
ctx, _, endObservation := p.operations.parse.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repo", string(args.Repo)),
attribute.String("commitID", string(args.CommitID)),
attribute.Int("paths", len(paths)),
attribute.StringSlice("paths", paths),
}})
// NOTE: We call endObservation synchronously within this function when we
// return an error. Once we get on the success-only path, we install it to
@ -94,9 +93,9 @@ func (p *parser) Parse(ctx context.Context, args search.SymbolsParameters, paths
go func() {
defer func() {
endObservation(1, observation.Args{LogFields: []otlog.Field{
otlog.Int("numRequests", int(totalRequests)),
otlog.Int("numSymbols", int(totalSymbols)),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numRequests", int(totalRequests)),
attribute.Int("numSymbols", int(totalSymbols)),
}})
}()
@ -136,9 +135,9 @@ func min(a, b int) int {
}
func (p *parser) handleParseRequest(ctx context.Context, symbolOrErrors chan<- SymbolOrError, parseRequest fetcher.ParseRequest, totalSymbols *uint32) (err error) {
ctx, trace, endObservation := p.operations.handleParseRequest.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("path", parseRequest.Path),
otlog.Int("fileSize", len(parseRequest.Data)),
ctx, trace, endObservation := p.operations.handleParseRequest.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("path", parseRequest.Path),
attribute.Int("fileSize", len(parseRequest.Data)),
}})
defer endObservation(1, observation.Args{})

View File

@ -15,8 +15,8 @@ go_library(
"//internal/metrics",
"//internal/observation",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -6,8 +6,8 @@ import (
"io"
"net/http"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
@ -41,9 +41,9 @@ func New(observationCtx *observation.Context, options apiclient.BaseClientOption
}
func (c *Client) Exists(ctx context.Context, job types.Job, bucket string, key string) (exists bool, err error) {
ctx, _, endObservation := c.operations.exists.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("bucket", bucket),
otlog.String("key", key),
ctx, _, endObservation := c.operations.exists.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("bucket", bucket),
attribute.String("key", key),
}})
defer endObservation(1, observation.Args{})
@ -65,9 +65,9 @@ func (c *Client) Exists(ctx context.Context, job types.Job, bucket string, key s
}
func (c *Client) Get(ctx context.Context, job types.Job, bucket string, key string) (content io.ReadCloser, err error) {
ctx, _, endObservation := c.operations.get.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("bucket", bucket),
otlog.String("key", key),
ctx, _, endObservation := c.operations.get.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("bucket", bucket),
attribute.String("key", key),
}})
defer endObservation(1, observation.Args{})

View File

@ -19,10 +19,10 @@ go_library(
"//internal/version",
"//internal/workerutil",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_prometheus_common//expfmt",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -7,13 +7,11 @@ import (
"fmt"
"io"
"net/http"
"strconv"
"strings"
"time"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/common/expfmt"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/log"
@ -60,8 +58,8 @@ func (c *Client) QueuedCount(ctx context.Context) (int, error) {
}
func (c *Client) Dequeue(ctx context.Context, workerHostname string, extraArguments any) (job types.Job, _ bool, err error) {
ctx, _, endObservation := c.operations.dequeue.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
ctx, _, endObservation := c.operations.dequeue.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("queueName", c.options.QueueName),
}})
defer endObservation(1, observation.Args{})
@ -81,9 +79,9 @@ func (c *Client) Dequeue(ctx context.Context, workerHostname string, extraArgume
}
func (c *Client) MarkComplete(ctx context.Context, job types.Job) (_ bool, err error) {
ctx, _, endObservation := c.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", job.ID),
ctx, _, endObservation := c.operations.markComplete.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("queueName", c.options.QueueName),
attribute.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
@ -104,9 +102,9 @@ func (c *Client) MarkComplete(ctx context.Context, job types.Job) (_ bool, err e
}
func (c *Client) MarkErrored(ctx context.Context, job types.Job, failureMessage string) (_ bool, err error) {
ctx, _, endObservation := c.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", job.ID),
ctx, _, endObservation := c.operations.markErrored.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("queueName", c.options.QueueName),
attribute.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
@ -128,9 +126,9 @@ func (c *Client) MarkErrored(ctx context.Context, job types.Job, failureMessage
}
func (c *Client) MarkFailed(ctx context.Context, job types.Job, failureMessage string) (_ bool, err error) {
ctx, _, endObservation := c.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", job.ID),
ctx, _, endObservation := c.operations.markFailed.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("queueName", c.options.QueueName),
attribute.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
@ -152,9 +150,9 @@ func (c *Client) MarkFailed(ctx context.Context, job types.Job, failureMessage s
}
func (c *Client) Heartbeat(ctx context.Context, jobIDs []int) (knownIDs, cancelIDs []int, err error) {
ctx, _, endObservation := c.operations.heartbeat.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.String("jobIDs", intsToString(jobIDs)),
ctx, _, endObservation := c.operations.heartbeat.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("queueName", c.options.QueueName),
attribute.IntSlice("jobIDs", jobIDs),
}})
defer endObservation(1, observation.Args{})
@ -224,15 +222,6 @@ func (c *Client) Heartbeat(ctx context.Context, jobIDs []int) (knownIDs, cancelI
return respV1, cancelIDs, nil
}
func intsToString(ints []int) string {
segments := make([]string, 0, len(ints))
for _, id := range ints {
segments = append(segments, strconv.Itoa(id))
}
return strings.Join(segments, ", ")
}
func gatherMetrics(logger log.Logger, gatherer prometheus.Gatherer) (string, error) {
maxDuration := 3 * time.Second
ctx, cancel := context.WithTimeout(context.Background(), maxDuration)
@ -286,9 +275,9 @@ func (c *Client) Ping(ctx context.Context) (err error) {
}
func (c *Client) AddExecutionLogEntry(ctx context.Context, job types.Job, entry internalexecutor.ExecutionLogEntry) (entryID int, err error) {
ctx, _, endObservation := c.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", job.ID),
ctx, _, endObservation := c.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("queueName", c.options.QueueName),
attribute.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
@ -308,10 +297,10 @@ func (c *Client) AddExecutionLogEntry(ctx context.Context, job types.Job, entry
}
func (c *Client) UpdateExecutionLogEntry(ctx context.Context, job types.Job, entryID int, entry internalexecutor.ExecutionLogEntry) (err error) {
ctx, _, endObservation := c.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", job.ID),
otlog.Int("entryID", entryID),
ctx, _, endObservation := c.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("queueName", c.options.QueueName),
attribute.Int("jobID", job.ID),
attribute.Int("entryID", entryID),
}})
defer endObservation(1, observation.Args{})

View File

@ -20,8 +20,8 @@ go_library(
"@com_github_gorilla_mux//:mux",
"@com_github_graph_gophers_graphql_go//:graphql-go",
"@com_github_graph_gophers_graphql_go//relay",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -13,8 +13,8 @@ import (
"github.com/gorilla/mux"
"github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
"github.com/opentracing/opentracing-go/log"
sglog "github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/store"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
@ -74,8 +74,8 @@ func (h *FileHandler) Get() http.Handler {
func (h *FileHandler) get(r *http.Request) (_ io.Reader, statusCode int, err error) {
ctx, _, endObservation := h.operations.get.With(r.Context(), &err, observation.Args{})
defer func() {
endObservation(1, observation.Args{LogFields: []log.Field{
log.Int("statusCode", statusCode),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("statusCode", statusCode),
}})
}()
@ -113,8 +113,8 @@ func (h *FileHandler) Exists() http.Handler {
func (h *FileHandler) exists(r *http.Request) (statusCode int, err error) {
ctx, _, endObservation := h.operations.exists.With(r.Context(), &err, observation.Args{})
defer func() {
endObservation(1, observation.Args{LogFields: []log.Field{
log.Int("statusCode", statusCode),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("statusCode", statusCode),
}})
}()
@ -185,8 +185,8 @@ const maxMemory = 1 << 20 // 1MB
func (h *FileHandler) upload(r *http.Request) (resp uploadResponse, statusCode int, err error) {
ctx, _, endObservation := h.operations.upload.With(r.Context(), &err, observation.Args{})
defer func() {
endObservation(1, observation.Args{LogFields: []log.Field{
log.Int("statusCode", statusCode),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("statusCode", statusCode),
}})
}()

View File

@ -46,9 +46,9 @@ go_library(
"@com_github_gobwas_glob//:glob",
"@com_github_grafana_regexp//:regexp",
"@com_github_graph_gophers_graphql_go//:graphql-go",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@in_gopkg_yaml_v2//:yaml_v2",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -8,7 +8,7 @@ import (
"time"
"github.com/graph-gophers/graphql-go"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"gopkg.in/yaml.v2"
sglog "github.com/sourcegraph/log"
@ -321,8 +321,8 @@ type CreateBatchSpecOpts struct {
// CreateBatchSpec creates the BatchSpec.
func (s *Service) CreateBatchSpec(ctx context.Context, opts CreateBatchSpecOpts) (spec *btypes.BatchSpec, err error) {
ctx, _, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("changesetSpecs", len(opts.ChangesetSpecRandIDs)),
ctx, _, endObservation := s.operations.createBatchSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("changesetSpecs", len(opts.ChangesetSpecRandIDs)),
}})
defer endObservation(1, observation.Args{})
@ -413,9 +413,9 @@ type CreateBatchSpecFromRawOpts struct {
// CreateBatchSpecFromRaw creates the BatchSpec.
func (s *Service) CreateBatchSpecFromRaw(ctx context.Context, opts CreateBatchSpecFromRawOpts) (spec *btypes.BatchSpec, err error) {
ctx, _, endObservation := s.operations.createBatchSpecFromRaw.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Bool("allowIgnored", opts.AllowIgnored),
log.Bool("allowUnsupported", opts.AllowUnsupported),
ctx, _, endObservation := s.operations.createBatchSpecFromRaw.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Bool("allowIgnored", opts.AllowIgnored),
attribute.Bool("allowUnsupported", opts.AllowUnsupported),
}})
defer endObservation(1, observation.Args{})
@ -521,8 +521,8 @@ type ExecuteBatchSpecOpts struct {
// It returns an error if the batchSpecWorkspaceResolutionJob didn't finish
// successfully.
func (s *Service) ExecuteBatchSpec(ctx context.Context, opts ExecuteBatchSpecOpts) (batchSpec *btypes.BatchSpec, err error) {
ctx, _, endObservation := s.operations.executeBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("BatchSpecRandID", opts.BatchSpecRandID),
ctx, _, endObservation := s.operations.executeBatchSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("BatchSpecRandID", opts.BatchSpecRandID),
}})
defer endObservation(1, observation.Args{})
@ -603,8 +603,8 @@ type CancelBatchSpecOpts struct {
// CancelBatchSpec cancels all BatchSpecWorkspaceExecutionJobs associated with
// the BatchSpec.
func (s *Service) CancelBatchSpec(ctx context.Context, opts CancelBatchSpecOpts) (batchSpec *btypes.BatchSpec, err error) {
ctx, _, endObservation := s.operations.cancelBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("BatchSpecRandID", opts.BatchSpecRandID),
ctx, _, endObservation := s.operations.cancelBatchSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("BatchSpecRandID", opts.BatchSpecRandID),
}})
defer endObservation(1, observation.Args{})
@ -704,9 +704,9 @@ func (s *Service) ReplaceBatchSpecInput(ctx context.Context, opts ReplaceBatchSp
type UpsertBatchSpecInputOpts = CreateBatchSpecFromRawOpts
func (s *Service) UpsertBatchSpecInput(ctx context.Context, opts UpsertBatchSpecInputOpts) (spec *btypes.BatchSpec, err error) {
ctx, _, endObservation := s.operations.upsertBatchSpecInput.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Bool("allowIgnored", opts.AllowIgnored),
log.Bool("allowUnsupported", opts.AllowUnsupported),
ctx, _, endObservation := s.operations.upsertBatchSpecInput.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Bool("allowIgnored", opts.AllowIgnored),
attribute.Bool("allowUnsupported", opts.AllowUnsupported),
}})
defer endObservation(1, observation.Args{})

View File

@ -61,9 +61,9 @@ go_library(
"@com_github_jackc_pgconn//:pgconn",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_go_diff//diff",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -7,8 +7,8 @@ import (
"github.com/jackc/pgconn"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/go-diff/diff"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/api"
@ -191,8 +191,8 @@ func (s *Store) createBatchChangeQuery(c *btypes.BatchChange) *sqlf.Query {
// UpdateBatchChange updates the given bach change.
func (s *Store) UpdateBatchChange(ctx context.Context, c *btypes.BatchChange) (err error) {
ctx, _, endObservation := s.operations.updateBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(c.ID)),
ctx, _, endObservation := s.operations.updateBatchChange.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(c.ID)),
}})
defer endObservation(1, observation.Args{})
@ -239,8 +239,8 @@ func (s *Store) updateBatchChangeQuery(c *btypes.BatchChange) *sqlf.Query {
// DeleteBatchChange deletes the batch change with the given ID.
func (s *Store) DeleteBatchChange(ctx context.Context, id int64) (err error) {
ctx, _, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(id)),
ctx, _, endObservation := s.operations.deleteBatchChange.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(id)),
}})
defer endObservation(1, observation.Args{})
@ -378,8 +378,8 @@ type GetBatchChangeOpts struct {
// GetBatchChange gets a batch change matching the given options.
func (s *Store) GetBatchChange(ctx context.Context, opts GetBatchChangeOpts) (bc *btypes.BatchChange, err error) {
ctx, _, endObservation := s.operations.getBatchChange.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
ctx, _, endObservation := s.operations.getBatchChange.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
}})
defer endObservation(1, observation.Args{})
@ -475,8 +475,8 @@ type GetBatchChangeDiffStatOpts struct {
}
func (s *Store) GetBatchChangeDiffStat(ctx context.Context, opts GetBatchChangeDiffStatOpts) (stat *diff.Stat, err error) {
ctx, _, endObservation := s.operations.getBatchChangeDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchChangeID", int(opts.BatchChangeID)),
ctx, _, endObservation := s.operations.getBatchChangeDiffStat.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchChangeID", int(opts.BatchChangeID)),
}})
defer endObservation(1, observation.Args{})
@ -516,8 +516,8 @@ func getBatchChangeDiffStatQuery(opts GetBatchChangeDiffStatOpts, authzConds *sq
}
func (s *Store) GetRepoDiffStat(ctx context.Context, repoID api.RepoID) (stat *diff.Stat, err error) {
ctx, _, endObservation := s.operations.getRepoDiffStat.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repoID", int(repoID)),
ctx, _, endObservation := s.operations.getRepoDiffStat.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repoID", int(repoID)),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,7 +5,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -39,8 +39,8 @@ var BatchSpecExecutionCacheEntryColums = SQLColumns{
// CreateBatchSpecExecutionCacheEntry creates the given batch spec workspace jobs.
func (s *Store) CreateBatchSpecExecutionCacheEntry(ctx context.Context, ce *btypes.BatchSpecExecutionCacheEntry) (err error) {
ctx, _, endObservation := s.operations.createBatchSpecExecutionCacheEntry.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("Key", ce.Key),
ctx, _, endObservation := s.operations.createBatchSpecExecutionCacheEntry.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("Key", ce.Key),
}})
defer endObservation(1, observation.Args{})
@ -101,8 +101,8 @@ type ListBatchSpecExecutionCacheEntriesOpts struct {
// ListBatchSpecExecutionCacheEntries gets the BatchSpecExecutionCacheEntries matching the given options.
func (s *Store) ListBatchSpecExecutionCacheEntries(ctx context.Context, opts ListBatchSpecExecutionCacheEntriesOpts) (cs []*btypes.BatchSpecExecutionCacheEntry, err error) {
ctx, _, endObservation := s.operations.listBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("Count", len(opts.Keys)),
ctx, _, endObservation := s.operations.listBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("Count", len(opts.Keys)),
}})
defer endObservation(1, observation.Args{})
@ -163,8 +163,8 @@ WHERE
// MarkUsedBatchSpecExecutionCacheEntries updates the LastUsedAt of the given cache entries.
func (s *Store) MarkUsedBatchSpecExecutionCacheEntries(ctx context.Context, ids []int64) (err error) {
ctx, _, endObservation := s.operations.markUsedBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("count", len(ids)),
ctx, _, endObservation := s.operations.markUsedBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("count", len(ids)),
}})
defer endObservation(1, observation.Args{})
@ -214,8 +214,8 @@ DELETE FROM batch_spec_execution_cache_entries WHERE id IN (SELECT id FROM ids)
`
func (s *Store) CleanBatchSpecExecutionCacheEntries(ctx context.Context, maxCacheSize int64) (err error) {
ctx, _, endObservation := s.operations.cleanBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("MaxTableSize", int(maxCacheSize)),
ctx, _, endObservation := s.operations.cleanBatchSpecExecutionCacheEntries.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("MaxTableSize", int(maxCacheSize)),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -63,7 +63,7 @@ func (e ErrResolutionJobAlreadyExists) Error() string {
// CreateBatchSpecResolutionJob creates the given batch spec resolutionjob jobs.
func (s *Store) CreateBatchSpecResolutionJob(ctx context.Context, wj *btypes.BatchSpecResolutionJob) (err error) {
ctx, _, endObservation := s.operations.createBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
ctx, _, endObservation := s.operations.createBatchSpecResolutionJob.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
q := s.createBatchSpecResolutionJobQuery(wj)
@ -117,9 +117,9 @@ type GetBatchSpecResolutionJobOpts struct {
// GetBatchSpecResolutionJob gets a BatchSpecResolutionJob matching the given options.
func (s *Store) GetBatchSpecResolutionJob(ctx context.Context, opts GetBatchSpecResolutionJobOpts) (job *btypes.BatchSpecResolutionJob, err error) {
ctx, _, endObservation := s.operations.getBatchSpecResolutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
log.Int("BatchSpecID", int(opts.BatchSpecID)),
ctx, _, endObservation := s.operations.getBatchSpecResolutionJob.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
attribute.Int("BatchSpecID", int(opts.BatchSpecID)),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,7 +5,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -104,8 +104,8 @@ const executableWorkspaceJobsConditionFmtstr = `
// CreateBatchSpecWorkspaceExecutionJobs creates the given batch spec workspace jobs.
func (s *Store) CreateBatchSpecWorkspaceExecutionJobs(ctx context.Context, batchSpecID int64) (err error) {
ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSpecID", int(batchSpecID)),
ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSpecID", int(batchSpecID)),
}})
defer endObservation(1, observation.Args{})
@ -131,7 +131,7 @@ WHERE
// CreateBatchSpecWorkspaceExecutionJobsForWorkspaces creates the batch spec workspace jobs for the given workspaces.
func (s *Store) CreateBatchSpecWorkspaceExecutionJobsForWorkspaces(ctx context.Context, workspaceIDs []int64) (err error) {
ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobsForWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
ctx, _, endObservation := s.operations.createBatchSpecWorkspaceExecutionJobsForWorkspaces.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
q := sqlf.Sprintf(createBatchSpecWorkspaceExecutionJobsForWorkspacesQueryFmtstr, versionForExecution(ctx, s), pq.Array(workspaceIDs))
@ -154,7 +154,7 @@ RETURNING id
// DeleteBatchSpecWorkspaceExecutionJobs deletes jobs based on the provided options.
func (s *Store) DeleteBatchSpecWorkspaceExecutionJobs(ctx context.Context, opts DeleteBatchSpecWorkspaceExecutionJobsOpts) (err error) {
ctx, _, endObservation := s.operations.deleteBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
ctx, _, endObservation := s.operations.deleteBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
if len(opts.IDs) == 0 && len(opts.WorkspaceIDs) == 0 {
@ -205,8 +205,8 @@ type GetBatchSpecWorkspaceExecutionJobOpts struct {
// GetBatchSpecWorkspaceExecutionJob gets a BatchSpecWorkspaceExecutionJob matching the given options.
func (s *Store) GetBatchSpecWorkspaceExecutionJob(ctx context.Context, opts GetBatchSpecWorkspaceExecutionJobOpts) (job *btypes.BatchSpecWorkspaceExecutionJob, err error) {
ctx, _, endObservation := s.operations.getBatchSpecWorkspaceExecutionJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
ctx, _, endObservation := s.operations.getBatchSpecWorkspaceExecutionJob.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
}})
defer endObservation(1, observation.Args{})
@ -384,7 +384,7 @@ type CancelBatchSpecWorkspaceExecutionJobsOpts struct {
// The returned list of records may not match the list of the given IDs, if
// some of the records were already canceled, completed, failed, errored, etc.
func (s *Store) CancelBatchSpecWorkspaceExecutionJobs(ctx context.Context, opts CancelBatchSpecWorkspaceExecutionJobsOpts) (jobs []*btypes.BatchSpecWorkspaceExecutionJob, err error) {
ctx, _, endObservation := s.operations.cancelBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{LogFields: []log.Field{}})
ctx, _, endObservation := s.operations.cancelBatchSpecWorkspaceExecutionJobs.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
if opts.BatchSpecID == 0 && len(opts.IDs) == 0 {

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -109,8 +109,8 @@ type DeleteBatchSpecWorkspaceFileOpts struct {
// DeleteBatchSpecWorkspaceFile deletes BatchSpecWorkspaceFiles that match the specified DeleteBatchSpecWorkspaceFileOpts.
func (s *Store) DeleteBatchSpecWorkspaceFile(ctx context.Context, opts DeleteBatchSpecWorkspaceFileOpts) (err error) {
ctx, _, endObservation := s.operations.deleteBatchSpecWorkspaceFile.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
ctx, _, endObservation := s.operations.deleteBatchSpecWorkspaceFile.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
}})
defer endObservation(1, observation.Args{})
@ -151,9 +151,9 @@ type GetBatchSpecWorkspaceFileOpts struct {
// GetBatchSpecWorkspaceFile retrieves the matching BatchSpecWorkspaceFile based on the provided GetBatchSpecWorkspaceFileOpts.
func (s *Store) GetBatchSpecWorkspaceFile(ctx context.Context, opts GetBatchSpecWorkspaceFileOpts) (file *btypes.BatchSpecWorkspaceFile, err error) {
ctx, _, endObservation := s.operations.getBatchSpecWorkspaceFile.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
log.String("RandID", opts.RandID),
ctx, _, endObservation := s.operations.getBatchSpecWorkspaceFile.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
attribute.String("RandID", opts.RandID),
}})
defer endObservation(1, observation.Args{})

View File

@ -8,7 +8,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/search"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
@ -68,8 +68,8 @@ var BatchSpecWorkspaceColums = SQLColumns{
// CreateBatchSpecWorkspace creates the given batch spec workspace jobs.
func (s *Store) CreateBatchSpecWorkspace(ctx context.Context, ws ...*btypes.BatchSpecWorkspace) (err error) {
ctx, _, endObservation := s.operations.createBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("count", len(ws)),
ctx, _, endObservation := s.operations.createBatchSpecWorkspace.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("count", len(ws)),
}})
defer endObservation(1, observation.Args{})
@ -150,8 +150,8 @@ type GetBatchSpecWorkspaceOpts struct {
// GetBatchSpecWorkspace gets a BatchSpecWorkspace matching the given options.
func (s *Store) GetBatchSpecWorkspace(ctx context.Context, opts GetBatchSpecWorkspaceOpts) (job *btypes.BatchSpecWorkspace, err error) {
ctx, _, endObservation := s.operations.getBatchSpecWorkspace.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
ctx, _, endObservation := s.operations.getBatchSpecWorkspace.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
}})
defer endObservation(1, observation.Args{})
@ -382,8 +382,8 @@ AND NOT %s
// MarkSkippedBatchSpecWorkspaces marks the workspace that were skipped in
// CreateBatchSpecWorkspaceExecutionJobs as skipped.
func (s *Store) MarkSkippedBatchSpecWorkspaces(ctx context.Context, batchSpecID int64) (err error) {
ctx, _, endObservation := s.operations.markSkippedBatchSpecWorkspaces.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSpecID", int(batchSpecID)),
ctx, _, endObservation := s.operations.markSkippedBatchSpecWorkspaces.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSpecID", int(batchSpecID)),
}})
defer endObservation(1, observation.Args{})
@ -494,8 +494,8 @@ WHERE
// DisableBatchSpecWorkspaceExecutionCache removes caching information from workspaces prior to execution.
func (s *Store) DisableBatchSpecWorkspaceExecutionCache(ctx context.Context, batchSpecID int64) (err error) {
ctx, _, endObservation := s.operations.disableBatchSpecWorkspaceExecutionCache.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSpecID", int(batchSpecID)),
ctx, _, endObservation := s.operations.disableBatchSpecWorkspaceExecutionCache.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSpecID", int(batchSpecID)),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/api"
@ -115,8 +115,8 @@ func (s *Store) createBatchSpecQuery(c *btypes.BatchSpec) (*sqlf.Query, error) {
// UpdateBatchSpec updates the given BatchSpec.
func (s *Store) UpdateBatchSpec(ctx context.Context, c *btypes.BatchSpec) (err error) {
ctx, _, endObservation := s.operations.updateBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(c.ID)),
ctx, _, endObservation := s.operations.updateBatchSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(c.ID)),
}})
defer endObservation(1, observation.Args{})
@ -167,8 +167,8 @@ func (s *Store) updateBatchSpecQuery(c *btypes.BatchSpec) (*sqlf.Query, error) {
// DeleteBatchSpec deletes the BatchSpec with the given ID.
func (s *Store) DeleteBatchSpec(ctx context.Context, id int64) (err error) {
ctx, _, endObservation := s.operations.deleteBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(id)),
ctx, _, endObservation := s.operations.deleteBatchSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(id)),
}})
defer endObservation(1, observation.Args{})
@ -256,9 +256,9 @@ type GetBatchSpecOpts struct {
// GetBatchSpec gets a BatchSpec matching the given options.
func (s *Store) GetBatchSpec(ctx context.Context, opts GetBatchSpecOpts) (spec *btypes.BatchSpec, err error) {
ctx, _, endObservation := s.operations.getBatchSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
log.String("randID", opts.RandID),
ctx, _, endObservation := s.operations.getBatchSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
attribute.String("randID", opts.RandID),
}})
defer endObservation(1, observation.Args{})
@ -480,9 +480,9 @@ ON
// 🚨 SECURITY: Repos that the current user (based on the context) does not have
// access to will be filtered out.
func (s *Store) ListBatchSpecRepoIDs(ctx context.Context, id int64) (ids []api.RepoID, err error) {
ctx, _, endObservation := s.operations.listBatchSpecRepoIDs.With(ctx, &err, observation.Args{
LogFields: []log.Field{log.Int64("ID", id)},
})
ctx, _, endObservation := s.operations.listBatchSpecRepoIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int64("ID", id),
}})
defer endObservation(1, observation.Args{})
authzConds, err := database.AuthzQueryConds(ctx, database.NewDBWith(s.logger, s))

View File

@ -5,7 +5,7 @@ import (
"time"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -52,8 +52,8 @@ type GetBulkOperationOpts struct {
// GetBulkOperation gets a BulkOperation matching the given options.
func (s *Store) GetBulkOperation(ctx context.Context, opts GetBulkOperationOpts) (op *btypes.BulkOperation, err error) {
ctx, _, endObservation := s.operations.getBulkOperation.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("ID", opts.ID),
ctx, _, endObservation := s.operations.getBulkOperation.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("ID", opts.ID),
}})
defer endObservation(1, observation.Args{})
@ -179,8 +179,8 @@ type CountBulkOperationsOpts struct {
// CountBulkOperations gets the count of BulkOperations in the given batch change.
func (s *Store) CountBulkOperations(ctx context.Context, opts CountBulkOperationsOpts) (count int, err error) {
ctx, _, endObservation := s.operations.countBulkOperations.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchChangeID", int(opts.BatchChangeID)),
ctx, _, endObservation := s.operations.countBulkOperations.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchChangeID", int(opts.BatchChangeID)),
}})
defer endObservation(1, observation.Args{})
@ -221,8 +221,8 @@ type ListBulkOperationErrorsOpts struct {
// ListBulkOperationErrors gets a list of BulkOperationErrors in a given BulkOperation.
func (s *Store) ListBulkOperationErrors(ctx context.Context, opts ListBulkOperationErrorsOpts) (es []*btypes.BulkOperationError, err error) {
ctx, _, endObservation := s.operations.listBulkOperationErrors.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("bulkOperationID", opts.BulkOperationID),
ctx, _, endObservation := s.operations.listBulkOperationErrors.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("bulkOperationID", opts.BulkOperationID),
}})
defer endObservation(1, observation.Args{})

View File

@ -7,7 +7,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -25,9 +25,9 @@ type GetChangesetEventOpts struct {
// GetChangesetEvent gets a changeset matching the given options.
func (s *Store) GetChangesetEvent(ctx context.Context, opts GetChangesetEventOpts) (ev *btypes.ChangesetEvent, err error) {
ctx, _, endObservation := s.operations.getChangesetEvent.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
log.Int("changesetID", int(opts.ChangesetID)),
ctx, _, endObservation := s.operations.getChangesetEvent.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
attribute.Int("changesetID", int(opts.ChangesetID)),
}})
defer endObservation(1, observation.Args{})
@ -159,8 +159,8 @@ type CountChangesetEventsOpts struct {
// CountChangesetEvents returns the number of changeset events in the database.
func (s *Store) CountChangesetEvents(ctx context.Context, opts CountChangesetEventsOpts) (count int, err error) {
ctx, _, endObservation := s.operations.countChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("changesetID", int(opts.ChangesetID)),
ctx, _, endObservation := s.operations.countChangesetEvents.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("changesetID", int(opts.ChangesetID)),
}})
defer endObservation(1, observation.Args{})
@ -188,8 +188,8 @@ func countChangesetEventsQuery(opts *CountChangesetEventsOpts) *sqlf.Query {
// UpsertChangesetEvents creates or updates the given ChangesetEvents.
func (s *Store) UpsertChangesetEvents(ctx context.Context, cs ...*btypes.ChangesetEvent) (err error) {
ctx, _, endObservation := s.operations.upsertChangesetEvents.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("count", len(cs)),
ctx, _, endObservation := s.operations.upsertChangesetEvents.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("count", len(cs)),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,7 +5,7 @@ import (
"encoding/json"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database/batch"
@ -57,8 +57,8 @@ var changesetJobColumns = SQLColumns{
// CreateChangesetJob creates the given changeset jobs.
func (s *Store) CreateChangesetJob(ctx context.Context, cs ...*btypes.ChangesetJob) (err error) {
ctx, _, endObservation := s.operations.createChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("count", len(cs)),
ctx, _, endObservation := s.operations.createChangesetJob.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("count", len(cs)),
}})
defer endObservation(1, observation.Args{})
@ -125,8 +125,8 @@ type GetChangesetJobOpts struct {
// GetChangesetJob gets a ChangesetJob matching the given options.
func (s *Store) GetChangesetJob(ctx context.Context, opts GetChangesetJobOpts) (job *btypes.ChangesetJob, err error) {
ctx, _, endObservation := s.operations.getChangesetJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
ctx, _, endObservation := s.operations.getChangesetJob.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
}})
defer endObservation(1, observation.Args{})

View File

@ -7,7 +7,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/search"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
@ -78,8 +78,8 @@ var oneGigabyte = 1000000000
// CreateChangesetSpec creates the given ChangesetSpecs.
func (s *Store) CreateChangesetSpec(ctx context.Context, cs ...*btypes.ChangesetSpec) (err error) {
ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("Count", len(cs)),
ctx, _, endObservation := s.operations.createChangesetSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("Count", len(cs)),
}})
defer endObservation(1, observation.Args{})
@ -163,8 +163,8 @@ func (s *Store) CreateChangesetSpec(ctx context.Context, cs ...*btypes.Changeset
// UpdateChangesetSpecBatchSpecID updates the given ChangesetSpecs to be owned by the given batch spec.
func (s *Store) UpdateChangesetSpecBatchSpecID(ctx context.Context, cs []int64, batchSpec int64) (err error) {
ctx, _, endObservation := s.operations.updateChangesetSpecBatchSpecID.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("Count", len(cs)),
ctx, _, endObservation := s.operations.updateChangesetSpecBatchSpecID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("Count", len(cs)),
}})
defer endObservation(1, observation.Args{})
@ -189,8 +189,8 @@ func (s *Store) updateChangesetSpecQuery(cs []int64, batchSpec int64) *sqlf.Quer
// DeleteChangesetSpec deletes the ChangesetSpec with the given ID.
func (s *Store) DeleteChangesetSpec(ctx context.Context, id int64) (err error) {
ctx, _, endObservation := s.operations.deleteChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(id)),
ctx, _, endObservation := s.operations.deleteChangesetSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(id)),
}})
defer endObservation(1, observation.Args{})
@ -210,8 +210,8 @@ type CountChangesetSpecsOpts struct {
// CountChangesetSpecs returns the number of changeset specs in the database.
func (s *Store) CountChangesetSpecs(ctx context.Context, opts CountChangesetSpecsOpts) (count int, err error) {
ctx, _, endObservation := s.operations.countChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSpecID", int(opts.BatchSpecID)),
ctx, _, endObservation := s.operations.countChangesetSpecs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSpecID", int(opts.BatchSpecID)),
}})
defer endObservation(1, observation.Args{})
@ -260,9 +260,9 @@ type GetChangesetSpecOpts struct {
// GetChangesetSpec gets a changeset spec matching the given options.
func (s *Store) GetChangesetSpec(ctx context.Context, opts GetChangesetSpecOpts) (spec *btypes.ChangesetSpec, err error) {
ctx, _, endObservation := s.operations.getChangesetSpec.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
log.String("randID", opts.RandID),
ctx, _, endObservation := s.operations.getChangesetSpec.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
attribute.String("randID", opts.RandID),
}})
defer endObservation(1, observation.Args{})
@ -503,8 +503,8 @@ type DeleteChangesetSpecsOpts struct {
// DeleteChangesetSpecs deletes the ChangesetSpecs matching the given options.
func (s *Store) DeleteChangesetSpecs(ctx context.Context, opts DeleteChangesetSpecsOpts) (err error) {
ctx, _, endObservation := s.operations.deleteChangesetSpecs.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSpecID", int(opts.BatchSpecID)),
ctx, _, endObservation := s.operations.deleteChangesetSpecs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSpecID", int(opts.BatchSpecID)),
}})
defer endObservation(1, observation.Args{})
@ -638,9 +638,9 @@ type GetRewirerMappingsOpts struct {
// Spec 4 should be attached to Changeset 4, since it tracks PR #333 in Repo C. (ChangesetSpec = 4, Changeset = 4)
// Changeset 3 doesn't have a matching spec and should be detached from the batch change (and closed) (ChangesetSpec == 0, Changeset = 3).
func (s *Store) GetRewirerMappings(ctx context.Context, opts GetRewirerMappingsOpts) (mappings btypes.RewirerMappings, err error) {
ctx, _, endObservation := s.operations.getRewirerMappings.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSpecID", int(opts.BatchSpecID)),
log.Int("batchChangeID", int(opts.BatchChangeID)),
ctx, _, endObservation := s.operations.getRewirerMappings.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSpecID", int(opts.BatchSpecID)),
attribute.Int("batchChangeID", int(opts.BatchChangeID)),
}})
defer endObservation(1, observation.Args{})

View File

@ -12,10 +12,10 @@ import (
adobatches "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/sources/azuredevops"
"github.com/sourcegraph/sourcegraph/internal/extsvc/azuredevops"
"go.opentelemetry.io/otel/attribute"
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/search"
bbcs "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/sources/bitbucketcloud"
@ -245,8 +245,8 @@ var temporaryChangesetInsertColumns = []string{
// CreateChangeset creates the given Changesets.
func (s *Store) CreateChangeset(ctx context.Context, cs ...*btypes.Changeset) (err error) {
ctx, _, endObservation := s.operations.createChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("count", len(cs)),
ctx, _, endObservation := s.operations.createChangeset.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("count", len(cs)),
}})
defer endObservation(1, observation.Args{})
@ -343,8 +343,8 @@ func (s *Store) CreateChangeset(ctx context.Context, cs ...*btypes.Changeset) (e
// DeleteChangeset deletes the Changeset with the given ID.
func (s *Store) DeleteChangeset(ctx context.Context, id int64) (err error) {
ctx, _, endObservation := s.operations.deleteChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(id)),
ctx, _, endObservation := s.operations.deleteChangeset.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(id)),
}})
defer endObservation(1, observation.Args{})
@ -478,8 +478,8 @@ type GetChangesetOpts struct {
// GetChangeset gets a changeset matching the given options.
func (s *Store) GetChangeset(ctx context.Context, opts GetChangesetOpts) (ch *btypes.Changeset, err error) {
ctx, _, endObservation := s.operations.getChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
ctx, _, endObservation := s.operations.getChangeset.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
}})
defer endObservation(1, observation.Args{})
@ -769,8 +769,8 @@ func listChangesetsQuery(opts *ListChangesetsOpts, authzConds *sqlf.Query) *sqlf
// `resetState` argument but *only if* the `currentState` matches its current
// `reconciler_state`.
func (s *Store) EnqueueChangeset(ctx context.Context, cs *btypes.Changeset, resetState, currentState btypes.ReconcilerState) (err error) {
ctx, _, endObservation := s.operations.enqueueChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(cs.ID)),
ctx, _, endObservation := s.operations.enqueueChangeset.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(cs.ID)),
}})
defer endObservation(1, observation.Args{})
@ -824,8 +824,8 @@ func (s *Store) enqueueChangesetQuery(cs *btypes.Changeset, resetState, currentS
// UpdateChangeset updates the given Changeset.
func (s *Store) UpdateChangeset(ctx context.Context, cs *btypes.Changeset) (err error) {
ctx, _, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(cs.ID)),
ctx, _, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(cs.ID)),
}})
defer endObservation(1, observation.Args{})
@ -927,8 +927,8 @@ RETURNING
// Once the changesets are in the temporary table, the values are then used to update their "previous" value in the actual
// changesets table.
func (s *Store) UpdateChangesetsForApply(ctx context.Context, cs []*btypes.Changeset) (err error) {
ctx, _, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("count", len(cs)),
ctx, _, endObservation := s.operations.updateChangeset.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("count", len(cs)),
}})
defer endObservation(1, observation.Args{})
@ -1028,8 +1028,8 @@ WHERE c.id = source.id
// UpdateChangesetBatchChanges updates only the `batch_changes` & `updated_at`
// columns of the given Changeset.
func (s *Store) UpdateChangesetBatchChanges(ctx context.Context, cs *btypes.Changeset) (err error) {
ctx, _, endObservation := s.operations.updateChangesetBatchChanges.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(cs.ID)),
ctx, _, endObservation := s.operations.updateChangesetBatchChanges.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(cs.ID)),
}})
defer endObservation(1, observation.Args{})
@ -1044,8 +1044,8 @@ func (s *Store) UpdateChangesetBatchChanges(ctx context.Context, cs *btypes.Chan
// UpdateChangesetUiPublicationState updates only the `ui_publication_state` &
// `updated_at` columns of the given Changeset.
func (s *Store) UpdateChangesetUiPublicationState(ctx context.Context, cs *btypes.Changeset) (err error) {
ctx, _, endObservation := s.operations.updateChangesetUIPublicationState.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(cs.ID)),
ctx, _, endObservation := s.operations.updateChangesetUIPublicationState.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(cs.ID)),
}})
defer endObservation(1, observation.Args{})
@ -1085,8 +1085,8 @@ RETURNING
// that relate to the state of the changeset on the code host, e.g.
// external_branch, external_state, etc.
func (s *Store) UpdateChangesetCodeHostState(ctx context.Context, cs *btypes.Changeset) (err error) {
ctx, _, endObservation := s.operations.updateChangesetCodeHostState.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(cs.ID)),
ctx, _, endObservation := s.operations.updateChangesetCodeHostState.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(cs.ID)),
}})
defer endObservation(1, observation.Args{})
@ -1190,10 +1190,10 @@ var CanceledChangesetFailureMessage = "Canceled"
// currently processing changesets have finished executing.
func (s *Store) CancelQueuedBatchChangeChangesets(ctx context.Context, batchChangeID int64) (err error) {
var iterations int
ctx, _, endObservation := s.operations.cancelQueuedBatchChangeChangesets.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchChangeID", int(batchChangeID)),
ctx, _, endObservation := s.operations.cancelQueuedBatchChangeChangesets.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchChangeID", int(batchChangeID)),
}})
defer endObservation(1, observation.Args{LogFields: []log.Field{log.Int("iterations", iterations)}})
defer endObservation(1, observation.Args{Attrs: []attribute.KeyValue{attribute.Int("iterations", iterations)}})
// Just for safety, so we don't end up with stray cancel requests bombarding
// the DB with 10 requests a second forever:
@ -1266,11 +1266,11 @@ WHERE
// passed.
func (s *Store) EnqueueChangesetsToClose(ctx context.Context, batchChangeID int64) (err error) {
var iterations int
ctx, _, endObservation := s.operations.enqueueChangesetsToClose.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchChangeID", int(batchChangeID)),
ctx, _, endObservation := s.operations.enqueueChangesetsToClose.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchChangeID", int(batchChangeID)),
}})
defer func() {
endObservation(1, observation.Args{LogFields: []log.Field{log.Int("iterations", iterations)}})
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{attribute.Int("iterations", iterations)}})
}()
// Just for safety, so we don't end up with stray cancel requests bombarding
@ -1485,8 +1485,8 @@ func ScanChangeset(t *btypes.Changeset, s dbutil.Scanner) error {
// GetChangesetsStats returns statistics on all the changesets associated to the given batch change,
// or all changesets across the instance.
func (s *Store) GetChangesetsStats(ctx context.Context, batchChangeID int64) (stats btypes.ChangesetsStats, err error) {
ctx, _, endObservation := s.operations.getChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchChangeID", int(batchChangeID)),
ctx, _, endObservation := s.operations.getChangesetsStats.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchChangeID", int(batchChangeID)),
}})
defer endObservation(1, observation.Args{})
@ -1538,8 +1538,8 @@ WHERE
// GetRepoChangesetsStats returns statistics on all the changesets associated to the given repo.
func (s *Store) GetRepoChangesetsStats(ctx context.Context, repoID api.RepoID) (stats *btypes.RepoChangesetsStats, err error) {
ctx, _, endObservation := s.operations.getRepoChangesetsStats.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repoID", int(repoID)),
ctx, _, endObservation := s.operations.getRepoChangesetsStats.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repoID", int(repoID)),
}})
defer endObservation(1, observation.Args{})
@ -1635,8 +1635,8 @@ RETURNING %s
`
func (s *Store) GetChangesetPlaceInSchedulerQueue(ctx context.Context, id int64) (place int, err error) {
ctx, _, endObservation := s.operations.getChangesetPlaceInSchedulerQueue.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(id)),
ctx, _, endObservation := s.operations.getChangesetPlaceInSchedulerQueue.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(id)),
}})
defer endObservation(1, observation.Args{})
@ -1779,8 +1779,8 @@ func uiPublicationStateColumn(c *btypes.Changeset) *string {
// CleanDetachedChangesets deletes changesets that have been detached after duration specified.
func (s *Store) CleanDetachedChangesets(ctx context.Context, retention time.Duration) (err error) {
ctx, _, endObservation := s.operations.cleanDetachedChangesets.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("Retention", retention.String()),
ctx, _, endObservation := s.operations.cleanDetachedChangesets.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Stringer("Retention", retention),
}})
defer endObservation(1, observation.Args{})

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -73,8 +73,8 @@ func createSiteCredentialQuery(ctx context.Context, c *btypes.SiteCredential, ke
}
func (s *Store) DeleteSiteCredential(ctx context.Context, id int64) (err error) {
ctx, _, endObservation := s.operations.deleteSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(id)),
ctx, _, endObservation := s.operations.deleteSiteCredential.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(id)),
}})
defer endObservation(1, observation.Args{})
@ -113,8 +113,8 @@ type GetSiteCredentialOpts struct {
}
func (s *Store) GetSiteCredential(ctx context.Context, opts GetSiteCredentialOpts) (sc *btypes.SiteCredential, err error) {
ctx, _, endObservation := s.operations.getSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(opts.ID)),
ctx, _, endObservation := s.operations.getSiteCredential.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(opts.ID)),
}})
defer endObservation(1, observation.Args{})
@ -214,8 +214,8 @@ func listSiteCredentialsQuery(opts ListSiteCredentialsOpts) *sqlf.Query {
}
func (s *Store) UpdateSiteCredential(ctx context.Context, c *btypes.SiteCredential) (err error) {
ctx, _, endObservation := s.operations.updateSiteCredential.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("ID", int(c.ID)),
ctx, _, endObservation := s.operations.updateSiteCredential.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("ID", int(c.ID)),
}})
defer endObservation(1, observation.Args{})

View File

@ -34,7 +34,6 @@ go_library(
"//internal/repoupdater",
"//lib/codeintel/autoindex/config",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],

View File

@ -23,7 +23,6 @@ go_library(
"//internal/observation",
"//internal/repoupdater/protocol",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -3,7 +3,6 @@ package enqueuer
import (
"context"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/autoindexing/internal/inference"
@ -57,12 +56,10 @@ func NewIndexEnqueuer(
// will cause this method to no-op. Note that this is NOT a guarantee that there will never be any duplicate records
// when the flag is false.
func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) (_ []uploadsshared.Index, err error) {
ctx, trace, endObservation := s.operations.queueIndex.With(ctx, &err, observation.Args{
LogFields: []otlog.Field{
otlog.Int("repositoryID", repositoryID),
otlog.String("rev", rev),
},
})
ctx, trace, endObservation := s.operations.queueIndex.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("rev", rev),
}})
defer endObservation(1, observation.Args{})
repo, err := s.repoStore.Get(ctx, api.RepoID(repositoryID))
@ -83,13 +80,11 @@ func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev,
// QueueIndexesForPackage enqueues index jobs for a dependency of a recently-processed precise code
// intelligence index.
func (s *IndexEnqueuer) QueueIndexesForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo, assumeSynced bool) (err error) {
ctx, trace, endObservation := s.operations.queueIndexForPackage.With(ctx, &err, observation.Args{
LogFields: []otlog.Field{
otlog.String("scheme", pkg.Scheme),
otlog.String("name", string(pkg.Name)),
otlog.String("version", pkg.Version),
},
})
ctx, trace, endObservation := s.operations.queueIndexForPackage.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("scheme", pkg.Scheme),
attribute.String("name", string(pkg.Name)),
attribute.String("version", pkg.Version),
}})
defer endObservation(1, observation.Args{})
repoName, revision, ok := inference.InferRepositoryAndRevision(pkg)

View File

@ -34,7 +34,6 @@ go_library(
"//internal/ratelimit",
"//lib/codeintel/autoindex/config",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@com_github_yuin_gopher_lua//:gopher-lua",
"@io_opentelemetry_go_otel//attribute",

View File

@ -9,7 +9,6 @@ import (
"strings"
"time"
otelog "github.com/opentracing/opentracing-go/log"
baselua "github.com/yuin/gopher-lua"
"go.opentelemetry.io/otel/attribute"
@ -84,9 +83,9 @@ func newService(
// will overwrite them (to disable or change default behavior). Each recognizer's generate function
// is invoked and the resulting index jobs are combined into a flattened list.
func (s *Service) InferIndexJobs(ctx context.Context, repo api.RepoName, commit, overrideScript string) (_ []config.IndexJob, err error) {
ctx, _, endObservation := s.operations.inferIndexJobs.With(ctx, &err, observation.Args{LogFields: []otelog.Field{
otelog.String("repo", string(repo)),
otelog.String("commit", commit),
ctx, _, endObservation := s.operations.inferIndexJobs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repo", string(repo)),
attribute.String("commit", commit),
}})
defer endObservation(1, observation.Args{})
@ -131,9 +130,9 @@ func (s *Service) InferIndexJobs(ctx context.Context, repo api.RepoName, commit,
// will overwrite them (to disable or change default behavior). Each recognizer's hints function is
// invoked and the resulting index job hints are combined into a flattened list.
func (s *Service) InferIndexJobHints(ctx context.Context, repo api.RepoName, commit, overrideScript string) (_ []config.IndexJobHint, err error) {
ctx, _, endObservation := s.operations.inferIndexJobHints.With(ctx, &err, observation.Args{LogFields: []otelog.Field{
otelog.String("repo", string(repo)),
otelog.String("commit", commit),
ctx, _, endObservation := s.operations.inferIndexJobHints.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repo", string(repo)),
attribute.String("commit", commit),
}})
defer endObservation(1, observation.Args{})

View File

@ -27,9 +27,9 @@ go_library(
"//internal/observation",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -5,7 +5,7 @@ import (
"strings"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/observation"
@ -34,8 +34,8 @@ LIMIT 1
`
func (s *store) SetInferenceScript(ctx context.Context, script string) (err error) {
ctx, _, endObservation := s.operations.setInferenceScript.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("scriptSize", len(script)),
ctx, _, endObservation := s.operations.setInferenceScript.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("scriptSize", len(script)),
}})
defer endObservation(1, observation.Args{})

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/autoindexing/shared"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -13,8 +13,8 @@ import (
)
func (s *store) RepositoryExceptions(ctx context.Context, repositoryID int) (canSchedule, canInfer bool, err error) {
ctx, _, endObservation := s.operations.repositoryExceptions.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.repositoryExceptions.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -43,8 +43,8 @@ WHERE cae.repository_id = %s
`
func (s *store) SetRepositoryExceptions(ctx context.Context, repositoryID int, canSchedule, canInfer bool) (err error) {
ctx, _, endObservation := s.operations.setRepositoryExceptions.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.setRepositoryExceptions.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -65,8 +65,8 @@ ON CONFLICT (repository_id) DO UPDATE SET
`
func (s *store) GetIndexConfigurationByRepositoryID(ctx context.Context, repositoryID int) (_ shared.IndexConfiguration, _ bool, err error) {
ctx, _, endObservation := s.operations.getIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.getIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -83,9 +83,9 @@ WHERE c.repository_id = %s
`
func (s *store) UpdateIndexConfigurationByRepositoryID(ctx context.Context, repositoryID int, data []byte) (err error) {
ctx, _, endObservation := s.operations.updateIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.Int("dataSize", len(data)),
ctx, _, endObservation := s.operations.updateIndexConfigurationByRepositoryID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.Int("dataSize", len(data)),
}})
defer endObservation(1, observation.Args{})

View File

@ -7,7 +7,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -16,8 +16,8 @@ import (
)
func (s *store) TopRepositoriesToConfigure(ctx context.Context, limit int) (_ []shared.RepositoryWithCount, err error) {
ctx, _, endObservation := s.operations.topRepositoriesToConfigure.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("limit", limit),
ctx, _, endObservation := s.operations.topRepositoriesToConfigure.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("limit", limit),
}})
defer endObservation(1, observation.Args{})
@ -57,9 +57,9 @@ LIMIT %s
`
func (s *store) RepositoryIDsWithConfiguration(ctx context.Context, offset, limit int) (_ []shared.RepositoryWithAvailableIndexers, totalCount int, err error) {
ctx, _, endObservation := s.operations.repositoryIDsWithConfiguration.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("offset", offset),
log.Int("limit", limit),
ctx, _, endObservation := s.operations.repositoryIDsWithConfiguration.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("offset", offset),
attribute.Int("limit", limit),
}})
defer endObservation(1, observation.Args{})
@ -83,8 +83,8 @@ OFFSET %s
`
func (s *store) GetLastIndexScanForRepository(ctx context.Context, repositoryID int) (_ *time.Time, err error) {
ctx, _, endObservation := s.operations.getLastIndexScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.getLastIndexScanForRepository.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -104,10 +104,10 @@ SELECT last_index_scan_at FROM lsif_last_index_scan WHERE repository_id = %s
`
func (s *store) SetConfigurationSummary(ctx context.Context, repositoryID int, numEvents int, availableIndexers map[string]shared.AvailableIndexer) (err error) {
ctx, _, endObservation := s.operations.setConfigurationSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.Int("numEvents", numEvents),
log.Int("numIndexers", len(availableIndexers)),
ctx, _, endObservation := s.operations.setConfigurationSummary.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.Int("numEvents", numEvents),
attribute.Int("numIndexers", len(availableIndexers)),
}})
defer endObservation(1, observation.Args{})
@ -129,8 +129,8 @@ SET
`
func (s *store) TruncateConfigurationSummary(ctx context.Context, numRecordsToRetain int) (err error) {
ctx, _, endObservation := s.operations.truncateConfigurationSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numRecordsToRetain", numRecordsToRetain),
ctx, _, endObservation := s.operations.truncateConfigurationSummary.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numRecordsToRetain", numRecordsToRetain),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,20 +5,20 @@ import (
"time"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
func (s *store) InsertDependencyIndexingJob(ctx context.Context, uploadID int, externalServiceKind string, syncTime time.Time) (id int, err error) {
ctx, _, endObservation := s.operations.insertDependencyIndexingJob.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("uploadId", uploadID),
log.String("extSvcKind", externalServiceKind),
ctx, _, endObservation := s.operations.insertDependencyIndexingJob.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("uploadId", uploadID),
attribute.String("extSvcKind", externalServiceKind),
}})
defer func() {
endObservation(1, observation.Args{LogFields: []log.Field{
log.Int("id", id),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
}()
@ -38,9 +38,9 @@ RETURNING id
`
func (s *store) QueueRepoRev(ctx context.Context, repositoryID int, rev string) (err error) {
ctx, _, endObservation := s.operations.queueRepoRev.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("rev", rev),
ctx, _, endObservation := s.operations.queueRepoRev.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("rev", rev),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,7 +5,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -16,9 +16,9 @@ import (
)
func (s *store) IsQueued(ctx context.Context, repositoryID int, commit string) (_ bool, err error) {
ctx, _, endObservation := s.operations.isQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
ctx, _, endObservation := s.operations.isQueued.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
}})
defer endObservation(1, observation.Args{})
@ -74,11 +74,11 @@ SELECT
`
func (s *store) IsQueuedRootIndexer(ctx context.Context, repositoryID int, commit string, root string, indexer string) (_ bool, err error) {
ctx, _, endObservation := s.operations.isQueuedRootIndexer.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
log.String("root", root),
log.String("indexer", indexer),
ctx, _, endObservation := s.operations.isQueuedRootIndexer.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
attribute.String("root", root),
attribute.String("indexer", indexer),
}})
defer endObservation(1, observation.Args{})
@ -110,8 +110,8 @@ LIMIT 1
// - share code with uploads store (should own this?)
func (s *store) InsertIndexes(ctx context.Context, indexes []shared.Index) (_ []shared.Index, err error) {
ctx, _, endObservation := s.operations.insertIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numIndexes", len(indexes)),
ctx, _, endObservation := s.operations.insertIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIndexes", len(indexes)),
}})
endObservation(1, observation.Args{})

View File

@ -6,7 +6,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -33,10 +33,10 @@ func (s *store) GetRepositoriesForIndexScan(
repositoryMatchLimitValue = *repositoryMatchLimit
}
ctx, _, endObservation := s.operations.getRepositoriesForIndexScan.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Bool("allowGlobalPolicies", allowGlobalPolicies),
log.Int("repositoryMatchLimit", repositoryMatchLimitValue),
log.Int("limit", limit),
ctx, _, endObservation := s.operations.getRepositoriesForIndexScan.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Bool("allowGlobalPolicies", allowGlobalPolicies),
attribute.Int("repositoryMatchLimit", repositoryMatchLimitValue),
attribute.Int("limit", limit),
}})
defer endObservation(1, observation.Args{})
@ -173,8 +173,8 @@ WHERE
//
func (s *store) GetQueuedRepoRev(ctx context.Context, batchSize int) (_ []RepoRev, err error) {
ctx, _, endObservation := s.operations.getQueuedRepoRev.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSize", batchSize),
ctx, _, endObservation := s.operations.getQueuedRepoRev.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSize", batchSize),
}})
defer endObservation(1, observation.Args{})
@ -194,8 +194,8 @@ LIMIT %s
`
func (s *store) MarkRepoRevsAsProcessed(ctx context.Context, ids []int) (err error) {
ctx, _, endObservation := s.operations.markRepoRevsAsProcessed.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numIDs", len(ids)),
ctx, _, endObservation := s.operations.markRepoRevsAsProcessed.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIDs", len(ids)),
}})
defer endObservation(1, observation.Args{})

View File

@ -4,7 +4,6 @@ import (
"context"
"time"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
@ -82,11 +81,9 @@ func (s *Service) GetIndexConfigurationByRepositoryID(ctx context.Context, repos
// InferIndexConfiguration looks at the repository contents at the latest commit on the default branch of the given
// repository and determines an index configuration that is likely to succeed.
func (s *Service) InferIndexConfiguration(ctx context.Context, repositoryID int, commit string, localOverrideScript string, bypassLimit bool) (_ *config.IndexConfiguration, _ []config.IndexJobHint, err error) {
ctx, trace, endObservation := s.operations.inferIndexConfiguration.With(ctx, &err, observation.Args{
LogFields: []otlog.Field{
otlog.Int("repositoryID", repositoryID),
},
})
ctx, trace, endObservation := s.operations.inferIndexConfiguration.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
repo, err := s.repoStore.Get(ctx, api.RepoID(repositoryID))

View File

@ -27,7 +27,6 @@ go_library(
"//lib/codeintel/autoindex/config",
"//lib/errors",
"@com_github_graph_gophers_graphql_go//:graphql-go",
"@com_github_opentracing_opentracing_go//log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -3,7 +3,7 @@ package graphql
import (
"context"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
resolverstubs "github.com/sourcegraph/sourcegraph/internal/codeintel/resolvers"
"github.com/sourcegraph/sourcegraph/internal/observation"
@ -17,8 +17,8 @@ func (r *rootResolver) CodeIntelligenceInferenceScript(ctx context.Context) (scr
}
func (r *rootResolver) UpdateCodeIntelligenceInferenceScript(ctx context.Context, args *resolverstubs.UpdateCodeIntelligenceInferenceScriptArgs) (_ *resolverstubs.EmptyResponse, err error) {
ctx, _, endObservation := r.operations.updateCodeIntelligenceInferenceScript.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("script", args.Script),
ctx, _, endObservation := r.operations.updateCodeIntelligenceInferenceScript.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("script", args.Script),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,6 @@ import (
"encoding/json"
"github.com/graph-gophers/graphql-go"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/autoindexing/internal/inference"
@ -19,8 +18,8 @@ import (
// 🚨 SECURITY: Only entrypoint is within the repository resolver so the user is already authenticated
func (r *rootResolver) IndexConfiguration(ctx context.Context, repoID graphql.ID) (_ resolverstubs.IndexConfigurationResolver, err error) {
_, traceErrs, endObservation := r.operations.indexConfiguration.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repoID", string(repoID)),
_, traceErrs, endObservation := r.operations.indexConfiguration.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repoID", string(repoID)),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -38,9 +37,9 @@ func (r *rootResolver) IndexConfiguration(ctx context.Context, repoID graphql.ID
// 🚨 SECURITY: Only site admins may modify code intelligence indexing configuration
func (r *rootResolver) UpdateRepositoryIndexConfiguration(ctx context.Context, args *resolverstubs.UpdateRepositoryIndexConfigurationArgs) (_ *resolverstubs.EmptyResponse, err error) {
ctx, _, endObservation := r.operations.updateRepositoryIndexConfiguration.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repository", string(args.Repository)),
log.String("configuration", args.Configuration),
ctx, _, endObservation := r.operations.updateRepositoryIndexConfiguration.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repository", string(args.Repository)),
attribute.String("configuration", args.Configuration),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,7 @@ import (
"encoding/base64"
"strings"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
sharedresolvers "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/shared/resolvers"
uploadsshared "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -19,10 +19,10 @@ import (
// 🚨 SECURITY: Only site admins may infer auto-index jobs
func (r *rootResolver) InferAutoIndexJobsForRepo(ctx context.Context, args *resolverstubs.InferAutoIndexJobsForRepoArgs) (_ []resolverstubs.AutoIndexJobDescriptionResolver, err error) {
ctx, _, endObservation := r.operations.inferAutoIndexJobsForRepo.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repository", string(args.Repository)),
log.String("rev", resolverstubs.Deref(args.Rev, "")),
log.String("script", resolverstubs.Deref(args.Script, "")),
ctx, _, endObservation := r.operations.inferAutoIndexJobsForRepo.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repository", string(args.Repository)),
attribute.String("rev", resolverstubs.Deref(args.Rev, "")),
attribute.String("script", resolverstubs.Deref(args.Script, "")),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -63,10 +63,10 @@ func (r *rootResolver) InferAutoIndexJobsForRepo(ctx context.Context, args *reso
// 🚨 SECURITY: Only site admins may queue auto-index jobs
func (r *rootResolver) QueueAutoIndexJobsForRepo(ctx context.Context, args *resolverstubs.QueueAutoIndexJobsForRepoArgs) (_ []resolverstubs.PreciseIndexResolver, err error) {
ctx, traceErrs, endObservation := r.operations.queueAutoIndexJobsForRepo.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repository", string(args.Repository)),
log.String("rev", resolverstubs.Deref(args.Rev, "")),
log.String("configuration", resolverstubs.Deref(args.Configuration, "")),
ctx, traceErrs, endObservation := r.operations.queueAutoIndexJobsForRepo.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repository", string(args.Repository)),
attribute.String("rev", resolverstubs.Deref(args.Rev, "")),
attribute.String("configuration", resolverstubs.Deref(args.Configuration, "")),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})

View File

@ -31,7 +31,6 @@ go_library(
"//lib/codeintel/precise",
"//lib/errors",
"@com_github_dgraph_io_ristretto//:ristretto",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_go_diff//diff",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_scip//bindings/go/scip",

View File

@ -28,7 +28,6 @@ go_library(
"//lib/errors",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_scip//bindings/go/scip",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_protobuf//proto",

View File

@ -5,7 +5,6 @@ import (
"strings"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
@ -16,9 +15,9 @@ import (
// GetPathExists determines if the path exists in the database.
func (s *store) GetPathExists(ctx context.Context, bundleID int, path string) (_ bool, err error) {
ctx, _, endObservation := s.operations.getPathExists.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("bundleID", bundleID),
log.String("path", path),
ctx, _, endObservation := s.operations.getPathExists.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("bundleID", bundleID),
attribute.String("path", path),
}})
defer endObservation(1, observation.Args{})
@ -42,9 +41,9 @@ SELECT EXISTS (
// Stencil returns all ranges within a single document.
func (s *store) GetStencil(ctx context.Context, bundleID int, path string) (_ []shared.Range, err error) {
ctx, trace, endObservation := s.operations.getStencil.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("bundleID", bundleID),
log.String("path", path),
ctx, trace, endObservation := s.operations.getStencil.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("bundleID", bundleID),
attribute.String("path", path),
}})
defer endObservation(1, observation.Args{})
@ -82,11 +81,11 @@ LIMIT 1
// GetRanges returns definition, reference, implementation, and hover data for each range within the given span of lines.
func (s *store) GetRanges(ctx context.Context, bundleID int, path string, startLine, endLine int) (_ []shared.CodeIntelligenceRange, err error) {
ctx, _, endObservation := s.operations.getRanges.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("bundleID", bundleID),
log.String("path", path),
log.Int("startLine", startLine),
log.Int("endLine", endLine),
ctx, _, endObservation := s.operations.getRanges.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("bundleID", bundleID),
attribute.String("path", path),
attribute.Int("startLine", startLine),
attribute.Int("endLine", endLine),
}})
defer endObservation(1, observation.Args{})

View File

@ -3,12 +3,10 @@ package lsifstore
import (
"context"
"fmt"
"strconv"
"strings"
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
@ -41,14 +39,14 @@ func (s *store) GetPrototypeLocations(ctx context.Context, bundleID int, path st
// whose scheme+identifier matches one of the given monikers. This method also returns the size of the
// complete result set to aid in pagination.
func (s *store) GetBulkMonikerLocations(ctx context.Context, tableName string, uploadIDs []int, monikers []precise.MonikerData, limit, offset int) (_ []shared.Location, totalCount int, err error) {
ctx, trace, endObservation := s.operations.getBulkMonikerLocations.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("tableName", tableName),
log.Int("numUploadIDs", len(uploadIDs)),
log.String("uploadIDs", intsToString(uploadIDs)),
log.Int("numMonikers", len(monikers)),
log.String("monikers", monikersToString(monikers)),
log.Int("limit", limit),
log.Int("offset", offset),
ctx, trace, endObservation := s.operations.getBulkMonikerLocations.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("tableName", tableName),
attribute.Int("numUploadIDs", len(uploadIDs)),
attribute.IntSlice("uploadIDs", uploadIDs),
attribute.Int("numMonikers", len(monikers)),
attribute.String("monikers", monikersToString(monikers)),
attribute.Int("limit", limit),
attribute.Int("offset", offset),
}})
defer endObservation(1, observation.Args{})
@ -135,11 +133,11 @@ func (s *store) getLocations(
path string,
line, character, limit, offset int,
) (_ []shared.Location, _ int, err error) {
ctx, trace, endObservation := operation.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("bundleID", bundleID),
log.String("path", path),
log.Int("line", line),
log.Int("character", character),
ctx, trace, endObservation := operation.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("bundleID", bundleID),
attribute.String("path", path),
attribute.Int("line", line),
attribute.Int("character", character),
}})
defer endObservation(1, observation.Args{})
@ -359,15 +357,6 @@ func extractOccurrenceData(document *scip.Document, occurrence *scip.Occurrence)
}
}
func intsToString(vs []int) string {
strs := make([]string, 0, len(vs))
for _, v := range vs {
strs = append(strs, strconv.Itoa(v))
}
return strings.Join(strs, ", ")
}
func monikersToString(vs []precise.MonikerData) string {
strs := make([]string, 0, len(vs))
for _, v := range vs {

View File

@ -5,8 +5,8 @@ import (
"context"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/protobuf/proto"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -16,9 +16,9 @@ import (
)
func (s *store) SCIPDocument(ctx context.Context, id int, path string) (_ *scip.Document, err error) {
ctx, _, endObservation := s.operations.scipDocument.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("path", path),
log.Int("uploadID", id),
ctx, _, endObservation := s.operations.scipDocument.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("path", path),
attribute.Int("uploadID", id),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,6 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
@ -17,11 +16,11 @@ import (
// GetHover returns the hover text of the symbol at the given position.
func (s *store) GetHover(ctx context.Context, bundleID int, path string, line, character int) (_ string, _ shared.Range, _ bool, err error) {
ctx, trace, endObservation := s.operations.getHover.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("bundleID", bundleID),
log.String("path", path),
log.Int("line", line),
log.Int("character", character),
ctx, trace, endObservation := s.operations.getHover.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("bundleID", bundleID),
attribute.String("path", path),
attribute.Int("line", line),
attribute.Int("character", character),
}})
defer endObservation(1, observation.Args{})
@ -194,11 +193,11 @@ WHERE EXISTS (
// GetDiagnostics returns the diagnostics for the documents that have the given path prefix. This method
// also returns the size of the complete result set to aid in pagination.
func (s *store) GetDiagnostics(ctx context.Context, bundleID int, prefix string, limit, offset int) (_ []shared.Diagnostic, _ int, err error) {
ctx, trace, endObservation := s.operations.getDiagnostics.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("bundleID", bundleID),
log.String("prefix", prefix),
log.Int("limit", limit),
log.Int("offset", offset),
ctx, trace, endObservation := s.operations.getDiagnostics.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("bundleID", bundleID),
attribute.String("prefix", prefix),
attribute.Int("limit", limit),
attribute.Int("offset", offset),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,6 @@ import (
"strings"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
@ -20,11 +19,11 @@ import (
// of monikers are attached to a single range. The order of the output slice is "outside-in", so that
// the range attached to earlier monikers enclose the range attached to later monikers.
func (s *store) GetMonikersByPosition(ctx context.Context, uploadID int, path string, line, character int) (_ [][]precise.MonikerData, err error) {
ctx, trace, endObservation := s.operations.getMonikersByPosition.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("uploadID", uploadID),
log.String("path", path),
log.Int("line", line),
log.Int("character", character),
ctx, trace, endObservation := s.operations.getMonikersByPosition.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("uploadID", uploadID),
attribute.String("path", path),
attribute.Int("line", line),
attribute.Int("character", character),
}})
defer endObservation(1, observation.Args{})
@ -109,10 +108,10 @@ LIMIT 1
// GetPackageInformation returns package information data by identifier.
func (s *store) GetPackageInformation(ctx context.Context, bundleID int, path, packageInformationID string) (_ precise.PackageInformationData, _ bool, err error) {
_, _, endObservation := s.operations.getPackageInformation.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("bundleID", bundleID),
log.String("path", path),
log.String("packageInformationID", packageInformationID),
_, _, endObservation := s.operations.getPackageInformation.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("bundleID", bundleID),
attribute.String("path", path),
attribute.String("packageInformationID", packageInformationID),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,6 @@ import (
"fmt"
"strings"
traceLog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
@ -52,17 +51,15 @@ func newService(
// GetHover returns the set of locations defining the symbol at the given position.
func (s *Service) GetHover(ctx context.Context, args RequestArgs, requestState RequestState) (_ string, _ shared.Range, _ bool, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getHover, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
traceLog.Int("line", args.Line),
traceLog.Int("character", args.Character),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getHover, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
attribute.Int("line", args.Line),
attribute.Int("character", args.Character),
}})
defer endObservation()
adjustedUploads, err := s.getVisibleUploads(ctx, args.Line, args.Character, requestState)
@ -178,17 +175,15 @@ func (s *Service) GetHover(ctx context.Context, args RequestArgs, requestState R
// GetReferences returns the list of source locations that reference the symbol at the given position.
func (s *Service) GetReferences(ctx context.Context, args RequestArgs, requestState RequestState, cursor ReferencesCursor) (_ []shared.UploadLocation, _ ReferencesCursor, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getReferences, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
traceLog.Int("line", args.Line),
traceLog.Int("character", args.Character),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getReferences, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
attribute.Int("line", args.Line),
attribute.Int("character", args.Character),
}})
defer endObservation()
// Adjust the path and position for each visible upload based on its git difference to
@ -655,17 +650,15 @@ func (s *Service) getBulkMonikerLocations(ctx context.Context, uploads []uploads
const DefinitionsLimit = 100
func (s *Service) GetImplementations(ctx context.Context, args RequestArgs, requestState RequestState, cursor ImplementationsCursor) (_ []shared.UploadLocation, _ ImplementationsCursor, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getImplementations, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
traceLog.Int("line", args.Line),
traceLog.Int("character", args.Character),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getImplementations, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
attribute.Int("line", args.Line),
attribute.Int("character", args.Character),
}})
defer endObservation()
// Adjust the path and position for each visible upload based on its git difference to
@ -752,17 +745,15 @@ func (s *Service) GetImplementations(ctx context.Context, args RequestArgs, requ
}
func (s *Service) GetPrototypes(ctx context.Context, args RequestArgs, requestState RequestState, cursor ImplementationsCursor) (_ []shared.UploadLocation, _ ImplementationsCursor, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getImplementations, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
traceLog.Int("line", args.Line),
traceLog.Int("character", args.Character),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getImplementations, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
attribute.Int("line", args.Line),
attribute.Int("character", args.Character),
}})
defer endObservation()
// Adjust the path and position for each visible upload based on its git difference to
@ -833,17 +824,15 @@ func (s *Service) GetPrototypes(ctx context.Context, args RequestArgs, requestSt
// GetDefinitions returns the set of locations defining the symbol at the given position.
func (s *Service) GetDefinitions(ctx context.Context, args RequestArgs, requestState RequestState) (_ []shared.UploadLocation, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getDefinitions, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
traceLog.Int("line", args.Line),
traceLog.Int("character", args.Character),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getDefinitions, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
attribute.Int("line", args.Line),
attribute.Int("character", args.Character),
}})
defer endObservation()
// Adjust the path and position for each visible upload based on its git difference to
@ -918,16 +907,14 @@ func (s *Service) GetDefinitions(ctx context.Context, args RequestArgs, requestS
}
func (s *Service) GetDiagnostics(ctx context.Context, args RequestArgs, requestState RequestState) (diagnosticsAtUploads []DiagnosticAtUpload, _ int, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getDiagnostics, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
traceLog.Int("limit", args.Limit),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getDiagnostics, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
attribute.Int("limit", args.Limit),
}})
defer endObservation()
visibleUploads, err := s.getUploadPaths(ctx, args.Path, requestState)
@ -1028,14 +1015,14 @@ func (s *Service) getRequestedCommitDiagnostic(ctx context.Context, args Request
}
func (s *Service) VisibleUploadsForPath(ctx context.Context, requestState RequestState) (dumps []uploadsshared.Dump, err error) {
ctx, _, endObservation := s.operations.visibleUploadsForPath.With(ctx, &err, observation.Args{LogFields: []traceLog.Field{
traceLog.String("path", requestState.Path),
traceLog.String("commit", requestState.Commit),
traceLog.Int("repositoryID", requestState.RepositoryID),
ctx, _, endObservation := s.operations.visibleUploadsForPath.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("path", requestState.Path),
attribute.String("commit", requestState.Commit),
attribute.Int("repositoryID", requestState.RepositoryID),
}})
defer func() {
endObservation(1, observation.Args{LogFields: []traceLog.Field{
traceLog.Int("numUploads", len(dumps)),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numUploads", len(dumps)),
}})
}()
@ -1076,17 +1063,15 @@ func (s *Service) getUploadPaths(ctx context.Context, path string, requestState
}
func (s *Service) GetRanges(ctx context.Context, args RequestArgs, requestState RequestState, startLine, endLine int) (adjustedRanges []AdjustedCodeIntelligenceRange, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getRanges, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
traceLog.Int("startLine", startLine),
traceLog.Int("endLine", endLine),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getRanges, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
attribute.Int("startLine", startLine),
attribute.Int("endLine", endLine),
}})
defer endObservation()
uploadsWithPath, err := s.getUploadPaths(ctx, args.Path, requestState)
@ -1160,15 +1145,13 @@ func (s *Service) getCodeIntelligenceRange(ctx context.Context, args RequestArgs
// GetStencil returns the set of locations defining the symbol at the given position.
func (s *Service) GetStencil(ctx context.Context, args RequestArgs, requestState RequestState) (adjustedRanges []shared.Range, err error) {
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getStencil, serviceObserverThreshold, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("numUploads", len(requestState.GetCacheUploads())),
traceLog.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
},
})
ctx, trace, endObservation := observeResolver(ctx, &err, s.operations.getStencil, serviceObserverThreshold, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("numUploads", len(requestState.GetCacheUploads())),
attribute.String("uploads", uploadIDsToString(requestState.GetCacheUploads())),
}})
defer endObservation()
adjustedUploads, err := s.getUploadPaths(ctx, args.Path, requestState)
@ -1212,15 +1195,13 @@ func (s *Service) GetDumpsByIDs(ctx context.Context, ids []int) ([]uploadsshared
}
func (s *Service) GetClosestDumpsForBlob(ctx context.Context, repositoryID int, commit, path string, exactPath bool, indexer string) (_ []uploadsshared.Dump, err error) {
ctx, trace, endObservation := s.operations.getClosestDumpsForBlob.With(ctx, &err, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", repositoryID),
traceLog.String("commit", commit),
traceLog.String("path", path),
traceLog.Bool("exactPath", exactPath),
traceLog.String("indexer", indexer),
},
})
ctx, trace, endObservation := s.operations.getClosestDumpsForBlob.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
attribute.String("path", path),
attribute.Bool("exactPath", exactPath),
attribute.String("indexer", indexer),
}})
defer endObservation(1, observation.Args{})
candidates, err := s.uploadSvc.InferClosestUploads(ctx, repositoryID, commit, path, exactPath, indexer)
@ -1391,15 +1372,15 @@ func (s *Service) getVisibleUpload(ctx context.Context, line, character int, upl
}
func (s *Service) SnapshotForDocument(ctx context.Context, repositoryID int, commit, path string, uploadID int) (data []shared.SnapshotData, err error) {
ctx, _, endObservation := s.operations.snapshotForDocument.With(ctx, &err, observation.Args{LogFields: []traceLog.Field{
traceLog.Int("repoID", repositoryID),
traceLog.String("commit", commit),
traceLog.String("path", path),
traceLog.Int("uploadID", uploadID),
ctx, _, endObservation := s.operations.snapshotForDocument.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repoID", repositoryID),
attribute.String("commit", commit),
attribute.String("path", path),
attribute.Int("uploadID", uploadID),
}})
defer func() {
endObservation(1, observation.Args{LogFields: []traceLog.Field{
traceLog.Int("snapshotSymbols", len(data)),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("snapshotSymbols", len(data)),
}})
}()

View File

@ -36,9 +36,9 @@ go_library(
"//internal/observation",
"//lib/errors",
"@com_github_graph_gophers_graphql_go//:graphql-go",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_go_lsp//:go-lsp",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -5,8 +5,8 @@ import (
"fmt"
"time"
traceLog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/codenav"
"github.com/sourcegraph/sourcegraph/internal/metrics"
@ -88,14 +88,12 @@ func lowSlowRequest(logger log.Logger, duration time.Duration, err *error) {
}
func getObservationArgs(args codenav.RequestArgs) observation.Args {
return observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", args.RepositoryID),
traceLog.String("commit", args.Commit),
traceLog.String("path", args.Path),
traceLog.Int("line", args.Line),
traceLog.Int("character", args.Character),
traceLog.Int("limit", args.Limit),
},
}
return observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", args.RepositoryID),
attribute.String("commit", args.Commit),
attribute.String("path", args.Path),
attribute.Int("line", args.Line),
attribute.Int("character", args.Character),
attribute.Int("limit", args.Limit),
}}
}

View File

@ -4,7 +4,7 @@ import (
"context"
"strings"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/cmd/frontend/envvar"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/codenav"
@ -71,12 +71,12 @@ func NewRootResolver(
// 🚨 SECURITY: dbstore layer handles authz for query resolution
func (r *rootResolver) GitBlobLSIFData(ctx context.Context, args *resolverstubs.GitBlobLSIFDataArgs) (_ resolverstubs.GitBlobLSIFDataResolver, err error) {
ctx, _, endObservation := r.operations.gitBlobLsifData.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repoID", int(args.Repo.ID)),
log.String("commit", string(args.Commit)),
log.String("path", args.Path),
log.Bool("exactPath", args.ExactPath),
log.String("toolName", args.ToolName),
ctx, _, endObservation := r.operations.gitBlobLsifData.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repoID", int(args.Repo.ID)),
attribute.String("commit", string(args.Commit)),
attribute.String("path", args.Path),
attribute.Bool("exactPath", args.ExactPath),
attribute.String("toolName", args.ToolName),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -163,10 +163,10 @@ func (r *gitBlobLSIFDataResolver) ToGitBlobLSIFData() (resolverstubs.GitBlobLSIF
}
func (r *gitBlobLSIFDataResolver) VisibleIndexes(ctx context.Context) (_ *[]resolverstubs.PreciseIndexResolver, err error) {
ctx, traceErrs, endObservation := r.operations.visibleIndexes.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repoID", r.requestState.RepositoryID),
log.String("commit", r.requestState.Commit),
log.String("path", r.requestState.Path),
ctx, traceErrs, endObservation := r.operations.visibleIndexes.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repoID", r.requestState.RepositoryID),
attribute.String("commit", r.requestState.Commit),
attribute.String("path", r.requestState.Path),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,7 +5,7 @@ import (
"strings"
"time"
traceLog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/codenav"
resolverstubs "github.com/sourcegraph/sourcegraph/internal/codeintel/resolvers"
@ -16,16 +16,14 @@ import (
// Definitions returns the list of source locations that define the symbol at the given position.
func (r *gitBlobLSIFDataResolver) Definitions(ctx context.Context, args *resolverstubs.LSIFQueryPositionArgs) (_ resolverstubs.LocationConnectionResolver, err error) {
requestArgs := codenav.RequestArgs{RepositoryID: r.requestState.RepositoryID, Commit: r.requestState.Commit, Path: r.requestState.Path, Line: int(args.Line), Character: int(args.Character)}
ctx, _, endObservation := observeResolver(ctx, &err, r.operations.definitions, time.Second, observation.Args{
LogFields: []traceLog.Field{
traceLog.Int("repositoryID", requestArgs.RepositoryID),
traceLog.String("commit", requestArgs.Commit),
traceLog.String("path", requestArgs.Path),
traceLog.Int("line", requestArgs.Line),
traceLog.Int("character", requestArgs.Character),
traceLog.Int("limit", requestArgs.Limit),
},
})
ctx, _, endObservation := observeResolver(ctx, &err, r.operations.definitions, time.Second, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", requestArgs.RepositoryID),
attribute.String("commit", requestArgs.Commit),
attribute.String("path", requestArgs.Path),
attribute.Int("line", requestArgs.Line),
attribute.Int("character", requestArgs.Character),
attribute.Int("limit", requestArgs.Limit),
}})
defer endObservation()
def, err := r.codeNavSvc.GetDefinitions(ctx, requestArgs, r.requestState)

View File

@ -4,7 +4,7 @@ import (
"context"
"time"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/codenav"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/shared/resolvers/gitresolvers"
@ -21,15 +21,13 @@ var ErrIllegalBounds = errors.New("illegal bounds")
// requires cross-linking of bundles (cross-repo or cross-root).
func (r *gitBlobLSIFDataResolver) Ranges(ctx context.Context, args *resolverstubs.LSIFRangesArgs) (_ resolverstubs.CodeIntelligenceRangeConnectionResolver, err error) {
requestArgs := codenav.RequestArgs{RepositoryID: r.requestState.RepositoryID, Commit: r.requestState.Commit, Path: r.requestState.Path}
ctx, _, endObservation := observeResolver(ctx, &err, r.operations.ranges, time.Second, observation.Args{
LogFields: []log.Field{
log.Int("repositoryID", requestArgs.RepositoryID),
log.String("commit", requestArgs.Commit),
log.String("path", requestArgs.Path),
log.Int("startLine", int(args.StartLine)),
log.Int("endLine", int(args.EndLine)),
},
})
ctx, _, endObservation := observeResolver(ctx, &err, r.operations.ranges, time.Second, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", requestArgs.RepositoryID),
attribute.String("commit", requestArgs.Commit),
attribute.String("path", requestArgs.Path),
attribute.Int("startLine", int(args.StartLine)),
attribute.Int("endLine", int(args.EndLine)),
}})
defer endObservation()
if args.StartLine < 0 || args.EndLine < args.StartLine {

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/graph-gophers/graphql-go"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/codenav/shared"
uploadgraphql "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/transport/graphql"
@ -18,8 +18,8 @@ func (r *gitBlobLSIFDataResolver) Snapshot(ctx context.Context, args *struct{ In
return nil, err
}
ctx, _, endObservation := r.operations.snapshot.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("uploadID", uploadID),
ctx, _, endObservation := r.operations.snapshot.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("uploadID", uploadID),
}})
defer endObservation(1, observation.Args{})

View File

@ -23,7 +23,6 @@ go_library(
"//lib/errors",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],

View File

@ -5,7 +5,6 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/policies/shared"
@ -20,13 +19,13 @@ import (
// If a repository identifier is supplied (is non-zero), then only the configuration policies that apply
// to repository are returned. If repository is not supplied, then all policies may be returned.
func (s *store) GetConfigurationPolicies(ctx context.Context, opts shared.GetConfigurationPoliciesOptions) (_ []shared.ConfigurationPolicy, totalCount int, err error) {
ctx, trace, endObservation := s.operations.getConfigurationPolicies.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("repositoryID", opts.RepositoryID),
otlog.String("term", opts.Term),
otlog.Bool("forDataRetention", opts.ForDataRetention),
otlog.Bool("forIndexing", opts.ForIndexing),
otlog.Int("limit", opts.Limit),
otlog.Int("offset", opts.Offset),
ctx, trace, endObservation := s.operations.getConfigurationPolicies.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.String("term", opts.Term),
attribute.Bool("forDataRetention", opts.ForDataRetention),
attribute.Bool("forIndexing", opts.ForIndexing),
attribute.Int("limit", opts.Limit),
attribute.Int("offset", opts.Offset),
}})
defer endObservation(1, observation.Args{})
@ -136,8 +135,8 @@ OFFSET %s
`
func (s *store) GetConfigurationPolicyByID(ctx context.Context, id int) (_ shared.ConfigurationPolicy, _ bool, err error) {
ctx, _, endObservation := s.operations.getConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("id", id),
ctx, _, endObservation := s.operations.getConfigurationPolicyByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -243,8 +242,8 @@ var (
)
func (s *store) UpdateConfigurationPolicy(ctx context.Context, policy shared.ConfigurationPolicy) (err error) {
ctx, _, endObservation := s.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("id", policy.ID),
ctx, _, endObservation := s.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", policy.ID),
}})
defer endObservation(1, observation.Args{})
@ -322,8 +321,8 @@ WHERE id = %s
`
func (s *store) DeleteConfigurationPolicyByID(ctx context.Context, id int) (err error) {
ctx, _, endObservation := s.operations.deleteConfigurationPolicyByID.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("id", id),
ctx, _, endObservation := s.operations.deleteConfigurationPolicyByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})

View File

@ -2,10 +2,9 @@ package store
import (
"context"
"strings"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/policies/shared"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -15,10 +14,10 @@ import (
)
func (s *store) GetRepoIDsByGlobPatterns(ctx context.Context, patterns []string, limit, offset int) (_ []int, _ int, err error) {
ctx, _, endObservation := s.operations.getRepoIDsByGlobPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numPatterns", len(patterns)),
log.Int("limit", limit),
log.Int("offset", offset),
ctx, _, endObservation := s.operations.getRepoIDsByGlobPatterns.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numPatterns", len(patterns)),
attribute.Int("limit", limit),
attribute.Int("offset", offset),
}})
defer endObservation(1, observation.Args{})
@ -77,10 +76,10 @@ OFFSET %s
`
func (s *store) UpdateReposMatchingPatterns(ctx context.Context, patterns []string, policyID int, repositoryMatchLimit *int) (err error) {
ctx, _, endObservation := s.operations.updateReposMatchingPatterns.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numPatterns", len(patterns)),
log.String("pattern", strings.Join(patterns, ",")),
log.Int("policyID", policyID),
ctx, _, endObservation := s.operations.updateReposMatchingPatterns.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numPatterns", len(patterns)),
attribute.StringSlice("pattern", patterns),
attribute.Int("policyID", policyID),
}})
defer endObservation(1, observation.Args{})
@ -151,8 +150,8 @@ SELECT
`
func (s *store) SelectPoliciesForRepositoryMembershipUpdate(ctx context.Context, batchSize int) (_ []shared.ConfigurationPolicy, err error) {
ctx, _, endObservation := s.operations.selectPoliciesForRepositoryMembershipUpdate.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSize", batchSize),
ctx, _, endObservation := s.operations.selectPoliciesForRepositoryMembershipUpdate.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSize", batchSize),
}})
defer endObservation(1, observation.Args{})

View File

@ -25,7 +25,6 @@ go_library(
"//internal/observation",
"//lib/errors",
"@com_github_graph_gophers_graphql_go//:graphql-go",
"@com_github_opentracing_opentracing_go//log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -4,7 +4,7 @@ import (
"context"
"time"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/policies/shared"
resolverstubs "github.com/sourcegraph/sourcegraph/internal/codeintel/resolvers"
@ -14,8 +14,8 @@ import (
// 🚨 SECURITY: Only site admins may modify code intelligence configuration policies
func (r *rootResolver) CreateCodeIntelligenceConfigurationPolicy(ctx context.Context, args *resolverstubs.CreateCodeIntelligenceConfigurationPolicyArgs) (_ resolverstubs.CodeIntelligenceConfigurationPolicyResolver, err error) {
ctx, traceErrs, endObservation := r.operations.createConfigurationPolicy.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("repository", string(resolverstubs.Deref(args.Repository, ""))),
ctx, traceErrs, endObservation := r.operations.createConfigurationPolicy.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("repository", string(resolverstubs.Deref(args.Repository, ""))),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -61,8 +61,8 @@ func (r *rootResolver) CreateCodeIntelligenceConfigurationPolicy(ctx context.Con
// 🚨 SECURITY: Only site admins may modify code intelligence configuration policies
func (r *rootResolver) UpdateCodeIntelligenceConfigurationPolicy(ctx context.Context, args *resolverstubs.UpdateCodeIntelligenceConfigurationPolicyArgs) (_ *resolverstubs.EmptyResponse, err error) {
ctx, _, endObservation := r.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("policyID", string(args.ID)),
ctx, _, endObservation := r.operations.updateConfigurationPolicy.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("policyID", string(args.ID)),
}})
defer endObservation(1, observation.Args{})
@ -101,8 +101,8 @@ func (r *rootResolver) UpdateCodeIntelligenceConfigurationPolicy(ctx context.Con
// 🚨 SECURITY: Only site admins may modify code intelligence configuration policies
func (r *rootResolver) DeleteCodeIntelligenceConfigurationPolicy(ctx context.Context, args *resolverstubs.DeleteCodeIntelligenceConfigurationPolicyArgs) (_ *resolverstubs.EmptyResponse, err error) {
ctx, _, endObservation := r.operations.deleteConfigurationPolicy.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("policyID", string(args.Policy)),
ctx, _, endObservation := r.operations.deleteConfigurationPolicy.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("policyID", string(args.Policy)),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/graph-gophers/graphql-go"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
policiesshared "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/policies/shared"
resolverstubs "github.com/sourcegraph/sourcegraph/internal/codeintel/resolvers"
@ -15,14 +15,14 @@ const DefaultConfigurationPolicyPageSize = 50
// 🚨 SECURITY: dbstore layer handles authz for GetConfigurationPolicies
func (r *rootResolver) CodeIntelligenceConfigurationPolicies(ctx context.Context, args *resolverstubs.CodeIntelligenceConfigurationPoliciesArgs) (_ resolverstubs.CodeIntelligenceConfigurationPolicyConnectionResolver, err error) {
ctx, traceErrs, endObservation := r.operations.configurationPolicies.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int32("first", resolverstubs.Deref(args.First, 0)),
log.String("after", resolverstubs.Deref(args.After, "")),
log.String("repository", string(resolverstubs.Deref(args.Repository, ""))),
log.String("query", resolverstubs.Deref(args.Query, "")),
log.Bool("forDataRetention", resolverstubs.Deref(args.ForDataRetention, false)),
log.Bool("forIndexing", resolverstubs.Deref(args.ForIndexing, false)),
log.Bool("protected", resolverstubs.Deref(args.Protected, false)),
ctx, traceErrs, endObservation := r.operations.configurationPolicies.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("first", int(resolverstubs.Deref(args.First, 0))),
attribute.String("after", resolverstubs.Deref(args.After, "")),
attribute.String("repository", string(resolverstubs.Deref(args.Repository, ""))),
attribute.String("query", resolverstubs.Deref(args.Query, "")),
attribute.Bool("forDataRetention", resolverstubs.Deref(args.ForDataRetention, false)),
attribute.Bool("forIndexing", resolverstubs.Deref(args.ForIndexing, false)),
attribute.Bool("protected", resolverstubs.Deref(args.Protected, false)),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -69,8 +69,8 @@ func (r *rootResolver) CodeIntelligenceConfigurationPolicies(ctx context.Context
}
func (r *rootResolver) ConfigurationPolicyByID(ctx context.Context, policyID graphql.ID) (_ resolverstubs.CodeIntelligenceConfigurationPolicyResolver, err error) {
ctx, traceErrs, endObservation := r.operations.configurationPolicyByID.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("policyID", string(policyID)),
ctx, traceErrs, endObservation := r.operations.configurationPolicyByID.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("policyID", string(policyID)),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})

View File

@ -2,11 +2,10 @@ package graphql
import (
"context"
"strings"
"time"
"github.com/graph-gophers/graphql-go"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/policies/shared"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/shared/resolvers/gitresolvers"
@ -21,9 +20,9 @@ const (
)
func (r *rootResolver) PreviewRepositoryFilter(ctx context.Context, args *resolverstubs.PreviewRepositoryFilterArgs) (_ resolverstubs.RepositoryFilterPreviewResolver, err error) {
ctx, _, endObservation := r.operations.previewRepoFilter.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int32("first", resolverstubs.Deref(args.First, 0)),
log.String("patterns", strings.Join(args.Patterns, ", ")),
ctx, _, endObservation := r.operations.previewRepoFilter.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("first", int(resolverstubs.Deref(args.First, 0))),
attribute.StringSlice("patterns", args.Patterns),
}})
defer endObservation(1, observation.Args{})
@ -56,10 +55,10 @@ func (r *rootResolver) PreviewRepositoryFilter(ctx context.Context, args *resolv
}
func (r *rootResolver) PreviewGitObjectFilter(ctx context.Context, id graphql.ID, args *resolverstubs.PreviewGitObjectFilterArgs) (_ resolverstubs.GitObjectFilterPreviewResolver, err error) {
ctx, _, endObservation := r.operations.previewGitObjectFilter.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int32("first", resolverstubs.Deref(args.First, 0)),
log.String("type", string(args.Type)),
log.String("pattern", args.Pattern),
ctx, _, endObservation := r.operations.previewGitObjectFilter.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("first", int(resolverstubs.Deref(args.First, 0))),
attribute.String("type", string(args.Type)),
attribute.String("pattern", args.Pattern),
}})
defer endObservation(1, observation.Args{})

View File

@ -16,8 +16,8 @@ go_library(
"//internal/metrics",
"//internal/observation",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_scip//bindings/go/scip",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_protobuf//proto",
],
)

View File

@ -5,8 +5,8 @@ import (
"context"
"github.com/keegancsmith/sqlf"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/protobuf/proto"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -21,8 +21,8 @@ func (s *store) InsertDefinitionsAndReferencesForDocument(
rankingBatchNumber int,
setDefsAndRefs func(ctx context.Context, upload shared.ExportedUpload, rankingBatchNumber int, rankingGraphKey, path string, document *scip.Document) error,
) (err error) {
ctx, _, endObservation := s.operations.insertDefinitionsAndReferencesForDocument.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("id", upload.UploadID),
ctx, _, endObservation := s.operations.insertDefinitionsAndReferencesForDocument.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", upload.UploadID),
}})
defer endObservation(1, observation.Args{})

View File

@ -31,8 +31,8 @@ go_library(
"//lib/errors",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -4,7 +4,6 @@ import (
"context"
"github.com/keegancsmith/sqlf"
otlog "github.com/opentracing/opentracing-go/log"
rankingshared "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/ranking/internal/shared"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -345,7 +344,7 @@ SELECT
`
func (s *store) VacuumStaleGraphs(ctx context.Context, derivativeGraphKey string, batchSize int) (_ int, err error) {
ctx, _, endObservation := s.operations.vacuumStaleGraphs.With(ctx, &err, observation.Args{LogFields: []otlog.Field{}})
ctx, _, endObservation := s.operations.vacuumStaleGraphs.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
count, _, err := basestore.ScanFirstInt(s.db.Query(ctx, sqlf.Sprintf(vacuumStaleGraphsQuery, derivativeGraphKey, derivativeGraphKey, batchSize)))

View File

@ -5,15 +5,15 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/batch"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
func (s *store) InsertInitialPathRanks(ctx context.Context, exportedUploadID int, documentPaths chan string, batchSize int, graphKey string) (err error) {
ctx, _, endObservation := s.operations.insertInitialPathRanks.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("graphKey", graphKey),
ctx, _, endObservation := s.operations.insertInitialPathRanks.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("graphKey", graphKey),
}})
defer endObservation(1, observation.Args{})

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/keegancsmith/sqlf"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
rankingshared "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/ranking/internal/shared"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -17,8 +17,8 @@ func (s *store) InsertPathRanks(
derivativeGraphKey string,
batchSize int,
) (numInputsProcessed int, numPathRanksInserted int, err error) {
ctx, _, endObservation := s.operations.insertPathRanks.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("derivativeGraphKey", derivativeGraphKey),
ctx, _, endObservation := s.operations.insertPathRanks.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("derivativeGraphKey", derivativeGraphKey),
}})
defer endObservation(1, observation.Args{})
@ -133,7 +133,7 @@ SELECT
`
func (s *store) VacuumStaleRanks(ctx context.Context, derivativeGraphKey string) (rankRecordsDeleted, rankRecordsScanned int, err error) {
ctx, _, endObservation := s.operations.vacuumStaleRanks.With(ctx, &err, observation.Args{LogFields: []otlog.Field{}})
ctx, _, endObservation := s.operations.vacuumStaleRanks.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
if _, ok := rankingshared.GraphKeyFromDerivativeGraphKey(derivativeGraphKey); !ok {

View File

@ -5,7 +5,6 @@ import (
"time"
"github.com/keegancsmith/sqlf"
otlog "github.com/opentracing/opentracing-go/log"
rankingshared "github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/ranking/internal/shared"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -109,7 +108,7 @@ func (s *store) SoftDeleteStaleExportedUploads(ctx context.Context, graphKey str
numStaleExportedUploadRecordsDeleted int,
err error,
) {
ctx, _, endObservation := s.operations.softDeleteStaleExportedUploads.With(ctx, &err, observation.Args{LogFields: []otlog.Field{}})
ctx, _, endObservation := s.operations.softDeleteStaleExportedUploads.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
rows, err := s.db.Query(ctx, sqlf.Sprintf(
@ -176,7 +175,7 @@ func (s *store) VacuumDeletedExportedUploads(ctx context.Context, derivativeGrap
numExportedUploadRecordsDeleted int,
err error,
) {
ctx, _, endObservation := s.operations.vacuumDeletedExportedUploads.With(ctx, &err, observation.Args{LogFields: []otlog.Field{}})
ctx, _, endObservation := s.operations.vacuumDeletedExportedUploads.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
graphKey, ok := rankingshared.GraphKeyFromDerivativeGraphKey(derivativeGraphKey)

View File

@ -21,8 +21,8 @@ go_library(
"@com_github_hashicorp_go_version//:go-version",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -8,7 +8,7 @@ import (
"github.com/hashicorp/go-version"
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/sentinel/shared"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -18,8 +18,8 @@ import (
)
func (s *store) VulnerabilityMatchByID(ctx context.Context, id int) (_ shared.VulnerabilityMatch, _ bool, err error) {
ctx, _, endObservation := s.operations.vulnerabilityMatchByID.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("id", id),
ctx, _, endObservation := s.operations.vulnerabilityMatchByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -54,12 +54,12 @@ WHERE m.id = %s
`
func (s *store) GetVulnerabilityMatches(ctx context.Context, args shared.GetVulnerabilityMatchesArgs) (_ []shared.VulnerabilityMatch, _ int, err error) {
ctx, _, endObservation := s.operations.getVulnerabilityMatches.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("limit", args.Limit),
otlog.Int("offset", args.Offset),
otlog.String("severity", args.Severity),
otlog.String("language", args.Language),
otlog.String("repositoryName", args.RepositoryName),
ctx, _, endObservation := s.operations.getVulnerabilityMatches.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("limit", args.Limit),
attribute.Int("offset", args.Offset),
attribute.String("severity", args.Severity),
attribute.String("language", args.Language),
attribute.String("repositoryName", args.RepositoryName),
}})
defer endObservation(1, observation.Args{})
@ -157,10 +157,10 @@ LEFT JOIN repo r ON r.id = lu.repository_id
`
func (s *store) GetVulnerabilityMatchesCountByRepository(ctx context.Context, args shared.GetVulnerabilityMatchesCountByRepositoryArgs) (_ []shared.VulnerabilityMatchesByRepository, _ int, err error) {
ctx, _, endObservation := s.operations.getVulnerabilityMatchesCountByRepository.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("limit", args.Limit),
otlog.Int("offset", args.Offset),
otlog.String("repositoryName", args.RepositoryName),
ctx, _, endObservation := s.operations.getVulnerabilityMatchesCountByRepository.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("limit", args.Limit),
attribute.Int("offset", args.Offset),
attribute.String("repositoryName", args.RepositoryName),
}})
defer endObservation(1, observation.Args{})
@ -211,8 +211,8 @@ limit %s offset %s
//
func (s *store) ScanMatches(ctx context.Context, batchSize int) (numReferencesScanned int, numVulnerabilityMatches int, err error) {
ctx, _, endObservation := s.operations.scanMatches.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("batchSize", batchSize),
ctx, _, endObservation := s.operations.scanMatches.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSize", batchSize),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/sentinel/shared"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -16,8 +16,8 @@ import (
)
func (s *store) VulnerabilityByID(ctx context.Context, id int) (_ shared.Vulnerability, _ bool, err error) {
ctx, _, endObservation := s.operations.vulnerabilityByID.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("id", id),
ctx, _, endObservation := s.operations.vulnerabilityByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -76,8 +76,8 @@ const vulnerabilityAffectedSymbolFields = `
`
func (s *store) GetVulnerabilitiesByIDs(ctx context.Context, ids ...int) (_ []shared.Vulnerability, err error) {
ctx, _, endObservation := s.operations.getVulnerabilitiesByIDs.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("numIDs", len(ids)),
ctx, _, endObservation := s.operations.getVulnerabilitiesByIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIDs", len(ids)),
}})
defer endObservation(1, observation.Args{})
@ -99,9 +99,9 @@ ORDER BY v.id, vap.id, vas.id
`
func (s *store) GetVulnerabilities(ctx context.Context, args shared.GetVulnerabilitiesArgs) (_ []shared.Vulnerability, _ int, err error) {
ctx, _, endObservation := s.operations.getVulnerabilities.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("limit", args.Limit),
otlog.Int("offset", args.Offset),
ctx, _, endObservation := s.operations.getVulnerabilities.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("limit", args.Limit),
attribute.Int("offset", args.Offset),
}})
defer endObservation(1, observation.Args{})
@ -130,8 +130,8 @@ ORDER BY v.id, vap.id, vas.id
`
func (s *store) InsertVulnerabilities(ctx context.Context, vulnerabilities []shared.Vulnerability) (_ int, err error) {
ctx, _, endObservation := s.operations.insertVulnerabilities.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("numVulnerabilities", len(vulnerabilities)),
ctx, _, endObservation := s.operations.insertVulnerabilities.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numVulnerabilities", len(vulnerabilities)),
}})
defer endObservation(1, observation.Args{})

View File

@ -20,6 +20,6 @@ go_library(
"//internal/metrics",
"//internal/observation",
"@com_github_graph_gophers_graphql_go//:graphql-go",
"@com_github_opentracing_opentracing_go//log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/graph-gophers/graphql-go"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/sentinel/shared"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/shared/resolvers/gitresolvers"
@ -44,9 +44,9 @@ func NewRootResolver(
}
func (r *rootResolver) Vulnerabilities(ctx context.Context, args resolverstubs.GetVulnerabilitiesArgs) (_ resolverstubs.VulnerabilityConnectionResolver, err error) {
ctx, _, endObservation := r.operations.getVulnerabilities.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int32("first", resolverstubs.Deref(args.First, 0)),
log.String("after", resolverstubs.Deref(args.After, "")),
ctx, _, endObservation := r.operations.getVulnerabilities.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("first", int(resolverstubs.Deref(args.First, 0))),
attribute.String("after", resolverstubs.Deref(args.After, "")),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -72,9 +72,9 @@ func (r *rootResolver) Vulnerabilities(ctx context.Context, args resolverstubs.G
}
func (r *rootResolver) VulnerabilityMatches(ctx context.Context, args resolverstubs.GetVulnerabilityMatchesArgs) (_ resolverstubs.VulnerabilityMatchConnectionResolver, err error) {
ctx, errTracer, endObservation := r.operations.getMatches.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int32("first", resolverstubs.Deref(args.First, 0)),
log.String("after", resolverstubs.Deref(args.After, "")),
ctx, errTracer, endObservation := r.operations.getMatches.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("first", int(resolverstubs.Deref(args.First, 0))),
attribute.String("after", resolverstubs.Deref(args.After, "")),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -134,7 +134,7 @@ func (r *rootResolver) VulnerabilityMatches(ctx context.Context, args resolverst
}
func (r *rootResolver) VulnerabilityMatchesCountByRepository(ctx context.Context, args resolverstubs.GetVulnerabilityMatchCountByRepositoryArgs) (_ resolverstubs.VulnerabilityMatchCountByRepositoryConnectionResolver, err error) {
ctx, _, endObservation := r.operations.vulnerabilityMatchesCountByRepository.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{}})
ctx, _, endObservation := r.operations.vulnerabilityMatchesCountByRepository.WithErrors(ctx, &err, observation.Args{})
endObservation.OnCancel(ctx, 1, observation.Args{})
limit, offset, err := args.ParseLimitOffset(50)
@ -165,8 +165,8 @@ func (r *rootResolver) VulnerabilityMatchesCountByRepository(ctx context.Context
}
func (r *rootResolver) VulnerabilityByID(ctx context.Context, vulnerabilityID graphql.ID) (_ resolverstubs.VulnerabilityResolver, err error) {
ctx, _, endObservation := r.operations.vulnerabilityByID.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("vulnerabilityID", string(vulnerabilityID)),
ctx, _, endObservation := r.operations.vulnerabilityByID.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("vulnerabilityID", string(vulnerabilityID)),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -184,8 +184,8 @@ func (r *rootResolver) VulnerabilityByID(ctx context.Context, vulnerabilityID gr
}
func (r *rootResolver) VulnerabilityMatchByID(ctx context.Context, vulnerabilityMatchID graphql.ID) (_ resolverstubs.VulnerabilityMatchResolver, err error) {
ctx, errTracer, endObservation := r.operations.vulnerabilityMatchByID.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("vulnerabilityMatchID", string(vulnerabilityMatchID)),
ctx, errTracer, endObservation := r.operations.vulnerabilityMatchByID.WithErrors(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("vulnerabilityMatchID", string(vulnerabilityMatchID)),
}})
endObservation.OnCancel(ctx, 1, observation.Args{})
@ -221,7 +221,7 @@ func (r *rootResolver) VulnerabilityMatchByID(ctx context.Context, vulnerability
}
func (r *rootResolver) VulnerabilityMatchesSummaryCounts(ctx context.Context) (_ resolverstubs.VulnerabilityMatchesSummaryCountResolver, err error) {
ctx, _, endObservation := r.operations.vulnerabilityMatchesSummaryCounts.WithErrors(ctx, &err, observation.Args{LogFields: []log.Field{}})
ctx, _, endObservation := r.operations.vulnerabilityMatchesSummaryCounts.WithErrors(ctx, &err, observation.Args{})
endObservation.OnCancel(ctx, 1, observation.Args{})
counts, err := r.sentinelSvc.GetVulnerabilityMatchesSummaryCounts(ctx)

View File

@ -34,9 +34,9 @@ go_library(
"//internal/uploadstore",
"//lib/codeintel/precise",
"//lib/errors",
"@com_github_opentracing_opentracing_go//log",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],
)

View File

@ -35,7 +35,6 @@ go_library(
"//lib/errors",
"@com_github_jackc_pgconn//:pgconn",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_opentracing_opentracing_go//log",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_scip//bindings/go/scip",

View File

@ -10,7 +10,6 @@ import (
"github.com/jackc/pgconn"
"github.com/keegancsmith/sqlf"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/prometheus/client_golang/prometheus"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
@ -118,12 +117,10 @@ func (h *handler) Handle(ctx context.Context, logger log.Logger, upload uploadss
ctx, otLogger, endObservation := h.handleOp.With(ctx, &err, observation.Args{})
defer func() {
endObservation(1, observation.Args{
LogFields: append(
createLogFields(upload),
otlog.Bool("requeued", requeued),
),
})
endObservation(1, observation.Args{Attrs: append(
createLogFields(upload),
attribute.Bool("requeued", requeued),
)})
}()
requeued, err = h.HandleRawUpload(ctx, logger, upload, h.uploadStore, otLogger)
@ -168,21 +165,21 @@ func (h *handler) getUploadSize(field *int64) int64 {
return 0
}
func createLogFields(upload uploadsshared.Upload) []otlog.Field {
fields := []otlog.Field{
otlog.Int("uploadID", upload.ID),
otlog.Int("repositoryID", upload.RepositoryID),
otlog.String("commit", upload.Commit),
otlog.String("root", upload.Root),
otlog.String("indexer", upload.Indexer),
otlog.Int("queueDuration", int(time.Since(upload.UploadedAt))),
func createLogFields(upload uploadsshared.Upload) []attribute.KeyValue {
attrs := []attribute.KeyValue{
attribute.Int("uploadID", upload.ID),
attribute.Int("repositoryID", upload.RepositoryID),
attribute.String("commit", upload.Commit),
attribute.String("root", upload.Root),
attribute.String("indexer", upload.Indexer),
attribute.Stringer("queueDuration", time.Since(upload.UploadedAt)),
}
if upload.UploadSize != nil {
fields = append(fields, otlog.Int64("uploadSize", *upload.UploadSize))
attrs = append(attrs, attribute.Int64("uploadSize", *upload.UploadSize))
}
return fields
return attrs
}
// defaultBranchContains tells if the default branch contains the given commit ID.

View File

@ -25,8 +25,8 @@ go_library(
"//lib/errors",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_scip//bindings/go/scip",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_protobuf//proto",
],
)

View File

@ -2,22 +2,20 @@ package lsifstore
import (
"context"
"strconv"
"strings"
"time"
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
func (s *store) IDsWithMeta(ctx context.Context, ids []int) (_ []int, err error) {
ctx, _, endObservation := s.operations.idsWithMeta.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("numIDs", len(ids)),
otlog.String("ids", intsToString(ids)),
ctx, _, endObservation := s.operations.idsWithMeta.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIDs", len(ids)),
attribute.IntSlice("ids", ids),
}})
defer endObservation(1, observation.Args{})
@ -34,8 +32,8 @@ WHERE m.upload_id = ANY(%s)
`
func (s *store) ReconcileCandidates(ctx context.Context, batchSize int) (_ []int, err error) {
ctx, _, endObservation := s.operations.reconcileCandidates.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("batchSize", batchSize),
ctx, _, endObservation := s.operations.reconcileCandidates.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSize", batchSize),
}})
defer endObservation(1, observation.Args{})
@ -76,9 +74,9 @@ RETURNING dump_id
`
func (s *store) DeleteLsifDataByUploadIds(ctx context.Context, bundleIDs ...int) (err error) {
ctx, _, endObservation := s.operations.deleteLsifDataByUploadIds.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("numBundleIDs", len(bundleIDs)),
otlog.String("bundleIDs", intsToString(bundleIDs)),
ctx, _, endObservation := s.operations.deleteLsifDataByUploadIds.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numBundleIDs", len(bundleIDs)),
attribute.IntSlice("bundleIDs", bundleIDs),
}})
defer endObservation(1, observation.Args{})
@ -157,8 +155,8 @@ WHERE dump_id IN (SELECT dump_id FROM locked_rows)
`
func (s *store) DeleteUnreferencedDocuments(ctx context.Context, batchSize int, maxAge time.Duration, now time.Time) (_, _ int, err error) {
ctx, _, endObservation := s.operations.idsWithMeta.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("maxAge", maxAge.String()),
ctx, _, endObservation := s.operations.idsWithMeta.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Stringer("maxAge", maxAge),
}})
defer endObservation(1, observation.Args{})
@ -215,15 +213,3 @@ SELECT
(SELECT COUNT(*) FROM candidates),
(SELECT COUNT(*) FROM deleted_documents)
`
//
//
func intsToString(vs []int) string {
strs := make([]string, 0, len(vs))
for _, v := range vs {
strs = append(strs, strconv.Itoa(v))
}
return strings.Join(strs, ", ")
}

View File

@ -10,7 +10,7 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
otlog "github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/protobuf/proto"
"github.com/sourcegraph/scip/bindings/go/scip"
@ -49,8 +49,8 @@ type ProcessedSCIPDocument struct {
}
func (s *store) InsertMetadata(ctx context.Context, uploadID int, meta ProcessedMetadata) (err error) {
ctx, _, endObservation := s.operations.insertMetadata.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("uploadID", uploadID),
ctx, _, endObservation := s.operations.insertMetadata.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("uploadID", uploadID),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,8 +5,8 @@ import (
"context"
"github.com/keegancsmith/sqlf"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/scip/bindings/go/scip"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/protobuf/proto"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -21,8 +21,8 @@ func (s *store) InsertDefinitionsAndReferencesForDocument(
rankingBatchNumber int,
setDefsAndRefs func(ctx context.Context, upload shared.ExportedUpload, rankingBatchNumber int, rankingGraphKey, path string, document *scip.Document) error,
) (err error) {
ctx, _, endObservation := s.operations.insertDefinitionsAndReferencesForDocument.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.Int("id", upload.UploadID),
ctx, _, endObservation := s.operations.insertDefinitionsAndReferencesForDocument.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", upload.UploadID),
}})
defer endObservation(1, observation.Args{})

View File

@ -38,7 +38,6 @@ go_library(
"@com_github_jackc_pgtype//:pgtype",
"@com_github_keegancsmith_sqlf//:sqlf",
"@com_github_lib_pq//:pq",
"@com_github_opentracing_opentracing_go//log",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
],

View File

@ -7,7 +7,6 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -16,9 +15,9 @@ import (
// HardDeleteUploadsByIDs deletes the upload record with the given identifier.
func (s *store) HardDeleteUploadsByIDs(ctx context.Context, ids ...int) (err error) {
ctx, _, endObservation := s.operations.hardDeleteUploadsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numIDs", len(ids)),
log.String("ids", intsToString(ids)),
ctx, _, endObservation := s.operations.hardDeleteUploadsByIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIDs", len(ids)),
attribute.IntSlice("ids", ids),
}})
defer endObservation(1, observation.Args{})
@ -56,8 +55,8 @@ DELETE FROM lsif_indexes WHERE id IN (SELECT id FROM locked_indexes)
// DeleteUploadsStuckUploading soft deletes any upload record that has been uploading since the given time.
func (s *store) DeleteUploadsStuckUploading(ctx context.Context, uploadedBefore time.Time) (_, _ int, err error) {
ctx, trace, endObservation := s.operations.deleteUploadsStuckUploading.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("uploadedBefore", uploadedBefore.Format(time.RFC3339)), // TODO - should be a duration
ctx, trace, endObservation := s.operations.deleteUploadsStuckUploading.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("uploadedBefore", uploadedBefore.Format(time.RFC3339)), // TODO - should be a duration
}})
defer endObservation(1, observation.Args{})
@ -586,9 +585,9 @@ GROUP BY repository_id, commit
// UpdateSourcedCommits updates the commit_last_checked_at field of each upload records belonging to
// the given repository identifier and commit. This method returns the count of upload records modified
func (s *store) UpdateSourcedCommits(ctx context.Context, repositoryID int, commit string, now time.Time) (uploadsUpdated int, err error) {
ctx, trace, endObservation := s.operations.updateSourcedCommits.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
ctx, trace, endObservation := s.operations.updateSourcedCommits.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
}})
defer endObservation(1, observation.Args{})
@ -654,9 +653,9 @@ func (s *store) DeleteSourcedCommits(ctx context.Context, repositoryID int, comm
uploadsUpdated, uploadsDeleted int,
err error,
) {
ctx, trace, endObservation := s.operations.deleteSourcedCommits.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
ctx, trace, endObservation := s.operations.deleteSourcedCommits.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
}})
defer endObservation(1, observation.Args{})

View File

@ -6,7 +6,7 @@ import (
"time"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -18,8 +18,8 @@ import (
// has not yet completed and an error is returned to prevent downstream expiration errors being made due to
// outdated commit graph data.
func (s *store) GetOldestCommitDate(ctx context.Context, repositoryID int) (_ time.Time, _ bool, err error) {
ctx, _, endObservation := s.operations.getOldestCommitDate.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.getOldestCommitDate.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -52,9 +52,9 @@ LIMIT 1
// UpdateCommittedAt updates the committed_at column for upload matching the given repository and commit.
func (s *store) UpdateCommittedAt(ctx context.Context, repositoryID int, commit, commitDateString string) (err error) {
ctx, _, endObservation := s.operations.updateCommittedAt.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
ctx, _, endObservation := s.operations.updateCommittedAt.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
}})
defer func() { endObservation(1, observation.Args{}) }()
@ -68,8 +68,8 @@ INSERT INTO codeintel_commit_dates(repository_id, commit_bytea, committed_at) VA
// SourcedCommitsWithoutCommittedAt returns the repository and commits of uploads that do not have an
// associated commit date value.
func (s *store) SourcedCommitsWithoutCommittedAt(ctx context.Context, batchSize int) (_ []SourcedCommits, err error) {
ctx, _, endObservation := s.operations.sourcedCommitsWithoutCommittedAt.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("batchSize", batchSize),
ctx, _, endObservation := s.operations.sourcedCommitsWithoutCommittedAt.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("batchSize", batchSize),
}})
defer func() { endObservation(1, observation.Args{}) }()

View File

@ -10,7 +10,6 @@ import (
"time"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/internal/commitgraph"
@ -24,8 +23,8 @@ import (
// SetRepositoryAsDirty marks the given repository's commit graph as out of date.
func (s *store) SetRepositoryAsDirty(ctx context.Context, repositoryID int) (err error) {
ctx, _, endObservation := s.operations.setRepositoryAsDirty.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.setRepositoryAsDirty.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -79,14 +78,12 @@ func (s *store) UpdateUploadsVisibleToCommits(
dirtyToken int,
now time.Time,
) (err error) {
ctx, trace, endObservation := s.operations.updateUploadsVisibleToCommits.With(ctx, &err, observation.Args{
LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.Int("numCommitGraphKeys", len(commitGraph.Order())),
log.Int("numRefDescriptions", len(refDescriptions)),
log.Int("dirtyToken", dirtyToken),
},
})
ctx, trace, endObservation := s.operations.updateUploadsVisibleToCommits.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.Int("numCommitGraphKeys", len(commitGraph.Order())),
attribute.Int("numRefDescriptions", len(refDescriptions)),
attribute.Int("dirtyToken", dirtyToken),
}})
defer endObservation(1, observation.Args{})
return s.withTransaction(ctx, func(tx *store) error {
@ -225,9 +222,9 @@ WHERE repository_id = %s
// GetCommitsVisibleToUpload returns the set of commits for which the given upload can answer code intelligence queries.
// To paginate, supply the token returned from this method to the invocation for the next page.
func (s *store) GetCommitsVisibleToUpload(ctx context.Context, uploadID, limit int, token *string) (_ []string, nextToken *string, err error) {
ctx, _, endObservation := s.operations.getCommitsVisibleToUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("uploadID", uploadID),
log.Int("limit", limit),
ctx, _, endObservation := s.operations.getCommitsVisibleToUpload.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("uploadID", uploadID),
attribute.Int("limit", limit),
}})
defer endObservation(1, observation.Args{})
@ -298,15 +295,13 @@ LIMIT %s
// splits the repository into multiple dumps. For this reason, the returned dumps are always sorted in most-recently-finished order to
// prevent returning data from stale dumps.
func (s *store) FindClosestDumps(ctx context.Context, repositoryID int, commit, path string, rootMustEnclosePath bool, indexer string) (_ []shared.Dump, err error) {
ctx, trace, endObservation := s.operations.findClosestDumps.With(ctx, &err, observation.Args{
LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
log.String("path", path),
log.Bool("rootMustEnclosePath", rootMustEnclosePath),
log.String("indexer", indexer),
},
})
ctx, trace, endObservation := s.operations.findClosestDumps.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
attribute.String("path", path),
attribute.Bool("rootMustEnclosePath", rootMustEnclosePath),
attribute.String("indexer", indexer),
}})
defer endObservation(1, observation.Args{})
conds := makeFindClosestDumpConditions(path, rootMustEnclosePath, indexer)
@ -351,16 +346,14 @@ ORDER BY u.finished_at DESC
// FindClosestDumpsFromGraphFragment returns the set of dumps that can most accurately answer queries for the given repository, commit,
// path, and optional indexer by only considering the given fragment of the full git graph. See FindClosestDumps for additional details.
func (s *store) FindClosestDumpsFromGraphFragment(ctx context.Context, repositoryID int, commit, path string, rootMustEnclosePath bool, indexer string, commitGraph *gitdomain.CommitGraph) (_ []shared.Dump, err error) {
ctx, trace, endObservation := s.operations.findClosestDumpsFromGraphFragment.With(ctx, &err, observation.Args{
LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
log.String("path", path),
log.Bool("rootMustEnclosePath", rootMustEnclosePath),
log.String("indexer", indexer),
log.Int("numCommitGraphKeys", len(commitGraph.Order())),
},
})
ctx, trace, endObservation := s.operations.findClosestDumpsFromGraphFragment.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
attribute.String("path", path),
attribute.Bool("rootMustEnclosePath", rootMustEnclosePath),
attribute.String("indexer", indexer),
attribute.Int("numCommitGraphKeys", len(commitGraph.Order())),
}})
defer endObservation(1, observation.Args{})
if len(commitGraph.Order()) == 0 {
@ -522,8 +515,8 @@ SELECT EXTRACT(EPOCH FROM NOW() - ldr.set_dirty_at)::integer AS age
// CommitGraphMetadata returns whether or not the commit graph for the given repository is stale, along with the date of
// the most recent commit graph refresh for the given repository.
func (s *store) GetCommitGraphMetadata(ctx context.Context, repositoryID int) (stale bool, updatedAt *time.Time, err error) {
ctx, _, endObservation := s.operations.getCommitGraphMetadata.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.getCommitGraphMetadata.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})

View File

@ -5,7 +5,7 @@ import (
"database/sql"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -16,8 +16,8 @@ import (
// ReferencesForUpload returns the set of import monikers attached to the given upload identifier.
func (s *store) ReferencesForUpload(ctx context.Context, uploadID int) (_ shared.PackageReferenceScanner, err error) {
ctx, _, endObservation := s.operations.referencesForUpload.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("uploadID", uploadID),
ctx, _, endObservation := s.operations.referencesForUpload.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("uploadID", uploadID),
}})
defer endObservation(1, observation.Args{})
@ -38,8 +38,8 @@ ORDER BY r.scheme, r.manager, r.name, r.version
// UpdatePackages upserts package data tied to the given upload.
func (s *store) UpdatePackages(ctx context.Context, dumpID int, packages []precise.Package) (err error) {
ctx, _, endObservation := s.operations.updatePackages.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numPackages", len(packages)),
ctx, _, endObservation := s.operations.updatePackages.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numPackages", len(packages)),
}})
defer endObservation(1, observation.Args{})
@ -102,8 +102,8 @@ func loadPackagesChannel(packages []precise.Package) <-chan []any {
// UpdatePackageReferences inserts reference data tied to the given upload.
func (s *store) UpdatePackageReferences(ctx context.Context, dumpID int, references []precise.PackageReference) (err error) {
ctx, _, endObservation := s.operations.updatePackageReferences.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numReferences", len(references)),
ctx, _, endObservation := s.operations.updatePackageReferences.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numReferences", len(references)),
}})
defer endObservation(1, observation.Args{})

View File

@ -7,7 +7,6 @@ import (
"time"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
@ -18,8 +17,8 @@ import (
// GetLastUploadRetentionScanForRepository returns the last timestamp, if any, that the repository with the
// given identifier was considered for upload expiration checks.
func (s *store) GetLastUploadRetentionScanForRepository(ctx context.Context, repositoryID int) (_ *time.Time, err error) {
ctx, _, endObservation := s.operations.getLastUploadRetentionScanForRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.getLastUploadRetentionScanForRepository.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -86,11 +85,11 @@ RETURNING repository_id
// records with the given protected identifiers and sets the expired field on the upload
// records with the given expired identifiers.
func (s *store) UpdateUploadRetention(ctx context.Context, protectedIDs, expiredIDs []int) (err error) {
ctx, _, endObservation := s.operations.updateUploadRetention.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numProtectedIDs", len(protectedIDs)),
log.String("protectedIDs", intsToString(protectedIDs)),
log.Int("numExpiredIDs", len(expiredIDs)),
log.String("expiredIDs", intsToString(expiredIDs)),
ctx, _, endObservation := s.operations.updateUploadRetention.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numProtectedIDs", len(protectedIDs)),
attribute.IntSlice("protectedIDs", protectedIDs),
attribute.Int("numExpiredIDs", len(expiredIDs)),
attribute.IntSlice("expiredIDs", expiredIDs),
}})
defer endObservation(1, observation.Args{})
@ -479,8 +478,8 @@ SELECT (SELECT COUNT(*) FROM candidates), u.repository_id, COUNT(*) FROM updated
// SetRepositoryAsDirtyWithTx marks the given repository's commit graph as out of date.
func (s *store) setRepositoryAsDirtyWithTx(ctx context.Context, repositoryID int, tx *basestore.Store) (err error) {
ctx, _, endObservation := s.operations.setRepositoryAsDirty.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.setRepositoryAsDirty.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})

View File

@ -3,11 +3,9 @@ package store
import (
"context"
"sort"
"strings"
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -21,12 +19,12 @@ import (
// GetIndexes returns a list of indexes and the total count of records matching the given conditions.
func (s *store) GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) (_ []uploadsshared.Index, _ int, err error) {
ctx, trace, endObservation := s.operations.getIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", opts.RepositoryID),
log.String("state", opts.State),
log.String("term", opts.Term),
log.Int("limit", opts.Limit),
log.Int("offset", opts.Offset),
ctx, trace, endObservation := s.operations.getIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.String("state", opts.State),
attribute.String("term", opts.Term),
attribute.Int("limit", opts.Limit),
attribute.Int("offset", opts.Offset),
}})
defer endObservation(1, observation.Args{})
@ -186,8 +184,8 @@ func scanIndex(s dbutil.Scanner) (index uploadsshared.Index, err error) {
// GetIndexByID returns an index by its identifier and boolean flag indicating its existence.
func (s *store) GetIndexByID(ctx context.Context, id int) (_ uploadsshared.Index, _ bool, err error) {
ctx, _, endObservation := s.operations.getIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("id", id),
ctx, _, endObservation := s.operations.getIndexByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -234,8 +232,8 @@ WHERE repo.deleted_at IS NULL AND u.id = %s AND %s
// GetIndexesByIDs returns an index for each of the given identifiers. Not all given ids will necessarily
// have a corresponding element in the returned list.
func (s *store) GetIndexesByIDs(ctx context.Context, ids ...int) (_ []uploadsshared.Index, err error) {
ctx, _, endObservation := s.operations.getIndexesByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("ids", intsToString(ids)),
ctx, _, endObservation := s.operations.getIndexesByIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.IntSlice("ids", ids),
}})
defer endObservation(1, observation.Args{})
@ -291,8 +289,8 @@ ORDER BY u.id
// DeleteIndexByID deletes an index by its identifier.
func (s *store) DeleteIndexByID(ctx context.Context, id int) (_ bool, err error) {
ctx, _, endObservation := s.operations.deleteIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("id", id),
ctx, _, endObservation := s.operations.deleteIndexByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -306,10 +304,10 @@ DELETE FROM lsif_indexes WHERE id = %s RETURNING repository_id
// DeleteIndexes deletes indexes matching the given filter criteria.
func (s *store) DeleteIndexes(ctx context.Context, opts shared.DeleteIndexesOptions) (err error) {
ctx, _, endObservation := s.operations.deleteIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", opts.RepositoryID),
log.String("states", strings.Join(opts.States, ",")),
log.String("term", opts.Term),
ctx, _, endObservation := s.operations.deleteIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.StringSlice("states", opts.States),
attribute.String("term", opts.Term),
}})
defer endObservation(1, observation.Args{})
@ -358,8 +356,8 @@ WHERE u.repository_id = repo.id AND %s
// ReindexIndexByID reindexes an index by its identifier.
func (s *store) ReindexIndexByID(ctx context.Context, id int) (err error) {
ctx, _, endObservation := s.operations.reindexIndexByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("id", id),
ctx, _, endObservation := s.operations.reindexIndexByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -374,10 +372,10 @@ WHERE id = %s
// ReindexIndexes reindexes indexes matching the given filter criteria.
func (s *store) ReindexIndexes(ctx context.Context, opts shared.ReindexIndexesOptions) (err error) {
ctx, _, endObservation := s.operations.reindexIndexes.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", opts.RepositoryID),
log.String("states", strings.Join(opts.States, ",")),
log.String("term", opts.Term),
ctx, _, endObservation := s.operations.reindexIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.StringSlice("states", opts.States),
attribute.String("term", opts.Term),
}})
defer endObservation(1, observation.Args{})

View File

@ -4,7 +4,7 @@ import (
"context"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
@ -13,8 +13,8 @@ import (
// HasRepository determines if there is LSIF data for the given repository.
func (s *store) HasRepository(ctx context.Context, repositoryID int) (_ bool, err error) {
ctx, _, endObservation := s.operations.hasRepository.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, _, endObservation := s.operations.hasRepository.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -28,9 +28,9 @@ SELECT 1 FROM lsif_uploads WHERE state NOT IN ('deleted', 'deleting') AND reposi
// HasCommit determines if the given commit is known for the given repository.
func (s *store) HasCommit(ctx context.Context, repositoryID int, commit string) (_ bool, err error) {
ctx, _, endObservation := s.operations.hasCommit.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
ctx, _, endObservation := s.operations.hasCommit.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
}})
defer endObservation(1, observation.Args{})
@ -56,8 +56,8 @@ SELECT
func (s *store) InsertDependencySyncingJob(ctx context.Context, uploadID int) (id int, err error) {
ctx, _, endObservation := s.operations.insertDependencySyncingJob.With(ctx, &err, observation.Args{})
defer func() {
endObservation(1, observation.Args{LogFields: []log.Field{
log.Int("id", id),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
}()

View File

@ -6,7 +6,6 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -20,8 +19,8 @@ import (
func (s *store) InsertUpload(ctx context.Context, upload shared.Upload) (id int, err error) {
ctx, _, endObservation := s.operations.insertUpload.With(ctx, &err, observation.Args{})
defer func() {
endObservation(1, observation.Args{LogFields: []log.Field{
log.Int("id", id),
endObservation(1, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
}()
@ -72,9 +71,9 @@ RETURNING id
// AddUploadPart adds the part index to the given upload's uploaded parts array. This method is idempotent
// (the resulting array is deduplicated on update).
func (s *store) AddUploadPart(ctx context.Context, uploadID, partIndex int) (err error) {
ctx, _, endObservation := s.operations.addUploadPart.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("uploadID", uploadID),
log.Int("partIndex", partIndex),
ctx, _, endObservation := s.operations.addUploadPart.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("uploadID", uploadID),
attribute.Int("partIndex", partIndex),
}})
defer endObservation(1, observation.Args{})
@ -87,8 +86,8 @@ UPDATE lsif_uploads SET uploaded_parts = array(SELECT DISTINCT * FROM unnest(arr
// MarkQueued updates the state of the upload to queued and updates the upload size.
func (s *store) MarkQueued(ctx context.Context, id int, uploadSize *int64) (err error) {
ctx, _, endObservation := s.operations.markQueued.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("id", id),
ctx, _, endObservation := s.operations.markQueued.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -106,8 +105,8 @@ WHERE id = %s
// MarkFailed updates the state of the upload to failed, increments the num_failures column and sets the finished_at time
func (s *store) MarkFailed(ctx context.Context, id int, reason string) (err error) {
ctx, _, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("id", id),
ctx, _, endObservation := s.operations.markFailed.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -130,11 +129,11 @@ WHERE
// commit, root, and indexer. This is necessary to perform during conversions before changing
// the state of a processing upload to completed as there is a unique index on these four columns.
func (s *store) DeleteOverlappingDumps(ctx context.Context, repositoryID int, commit, root, indexer string) (err error) {
ctx, trace, endObservation := s.operations.deleteOverlappingDumps.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
log.String("root", root),
log.String("indexer", indexer),
ctx, trace, endObservation := s.operations.deleteOverlappingDumps.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
attribute.String("root", root),
attribute.String("indexer", indexer),
}})
defer endObservation(1, observation.Args{})

View File

@ -4,7 +4,6 @@ import (
"context"
"github.com/keegancsmith/sqlf"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -16,8 +15,8 @@ import (
)
func (s *store) GetIndexers(ctx context.Context, opts shared.GetIndexersOptions) (_ []string, err error) {
ctx, _, endObservation := s.operations.getIndexers.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", opts.RepositoryID),
ctx, _, endObservation := s.operations.getIndexers.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
}})
defer endObservation(1, observation.Args{})
@ -56,8 +55,8 @@ WHERE
// include the set of unprocessed records as well as the latest finished record. These values allow users to
// quickly determine if a particular root/indexer pair is up-to-date or having issues processing.
func (s *store) GetRecentUploadsSummary(ctx context.Context, repositoryID int) (upload []shared.UploadsWithRepositoryNamespace, err error) {
ctx, logger, endObservation := s.operations.getRecentUploadsSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, logger, endObservation := s.operations.getRecentUploadsSummary.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
@ -178,8 +177,8 @@ const sanitizedIndexerExpression = `
// include the set of unprocessed records as well as the latest finished record. These values allow users to
// quickly determine if a particular root/indexer pair os up-to-date or having issues processing.
func (s *store) GetRecentIndexesSummary(ctx context.Context, repositoryID int) (summaries []uploadsshared.IndexesWithRepositoryNamespace, err error) {
ctx, logger, endObservation := s.operations.getRecentIndexesSummary.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
ctx, logger, endObservation := s.operations.getRecentIndexesSummary.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})

View File

@ -12,7 +12,6 @@ import (
"github.com/jackc/pgtype"
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"github.com/opentracing/opentracing-go/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/uploads/shared"
@ -27,7 +26,7 @@ import (
// GetUploads returns a list of uploads and the total count of records matching the given conditions.
func (s *store) GetUploads(ctx context.Context, opts shared.GetUploadsOptions) (uploads []shared.Upload, totalCount int, err error) {
ctx, trace, endObservation := s.operations.getUploads.With(ctx, &err, observation.Args{LogFields: buildGetUploadsLogFields(opts)})
ctx, trace, endObservation := s.operations.getUploads.With(ctx, &err, observation.Args{Attrs: buildGetUploadsLogFields(opts)})
defer endObservation(1, observation.Args{})
tableExpr, conds, cte := buildGetConditionsAndCte(opts)
@ -175,7 +174,7 @@ var scanFirstUpload = basestore.NewFirstScanner(scanCompleteUpload)
// GetUploadByID returns an upload by its identifier and boolean flag indicating its existence.
func (s *store) GetUploadByID(ctx context.Context, id int) (_ shared.Upload, _ bool, err error) {
ctx, _, endObservation := s.operations.getUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{log.Int("id", id)}})
ctx, _, endObservation := s.operations.getUploadByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{attribute.Int("id", id)}})
defer endObservation(1, observation.Args{})
authzConds, err := database.AuthzQueryConds(ctx, database.NewDBWith(s.logger, s.db))
@ -221,9 +220,9 @@ WHERE repo.deleted_at IS NULL AND u.state != 'deleted' AND u.id = %s AND %s
// GetDumpsByIDs returns a set of dumps by identifiers.
func (s *store) GetDumpsByIDs(ctx context.Context, ids []int) (_ []shared.Dump, err error) {
ctx, trace, endObservation := s.operations.getDumpsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numIDs", len(ids)),
log.String("ids", intsToString(ids)),
ctx, trace, endObservation := s.operations.getDumpsByIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIDs", len(ids)),
attribute.IntSlice("ids", ids),
}})
defer endObservation(1, observation.Args{})
@ -268,8 +267,8 @@ FROM lsif_dumps_with_repository_name u WHERE u.id IN (%s)
`
func (s *store) getUploadsByIDs(ctx context.Context, allowDeleted bool, ids ...int) (_ []shared.Upload, err error) {
ctx, _, endObservation := s.operations.getUploadsByIDs.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.String("ids", intsToString(ids)),
ctx, _, endObservation := s.operations.getUploadsByIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.IntSlice("ids", ids),
}})
defer endObservation(1, observation.Args{})
@ -414,13 +413,13 @@ func (s *store) GetUploadIDsWithReferences(
// GetVisibleUploadsMatchingMonikers returns visible uploads that refer (via package information) to any of
// the given monikers' packages.
func (s *store) GetVisibleUploadsMatchingMonikers(ctx context.Context, repositoryID int, commit string, monikers []precise.QualifiedMonikerData, limit, offset int) (_ shared.PackageReferenceScanner, _ int, err error) {
ctx, trace, endObservation := s.operations.getVisibleUploadsMatchingMonikers.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", repositoryID),
log.String("commit", commit),
log.Int("numMonikers", len(monikers)),
log.String("monikers", monikersToString(monikers)),
log.Int("limit", limit),
log.Int("offset", offset),
ctx, trace, endObservation := s.operations.getVisibleUploadsMatchingMonikers.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("commit", commit),
attribute.Int("numMonikers", len(monikers)),
attribute.String("monikers", monikersToString(monikers)),
attribute.Int("limit", limit),
attribute.Int("offset", offset),
}})
defer endObservation(1, observation.Args{})
@ -547,9 +546,9 @@ var definitionDumpsLimit, _ = strconv.ParseInt(env.Get("PRECISE_CODE_INTEL_DEFIN
// GetDumpsWithDefinitionsForMonikers returns the set of dumps that define at least one of the given monikers.
func (s *store) GetDumpsWithDefinitionsForMonikers(ctx context.Context, monikers []precise.QualifiedMonikerData) (_ []shared.Dump, err error) {
ctx, trace, endObservation := s.operations.getDumpsWithDefinitionsForMonikers.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("numMonikers", len(monikers)),
log.String("monikers", monikersToString(monikers)),
ctx, trace, endObservation := s.operations.getDumpsWithDefinitionsForMonikers.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numMonikers", len(monikers)),
attribute.String("monikers", monikersToString(monikers)),
}})
defer endObservation(1, observation.Args{})
@ -718,7 +717,7 @@ var scanUploadAuditLogs = basestore.NewSliceScanner(scanUploadAuditLog)
// DeleteUploads deletes uploads by filter criteria. The associated repositories will be marked as dirty
// so that their commit graphs will be updated in the background.
func (s *store) DeleteUploads(ctx context.Context, opts shared.DeleteUploadsOptions) (err error) {
ctx, _, endObservation := s.operations.deleteUploads.With(ctx, &err, observation.Args{LogFields: buildDeleteUploadsLogFields(opts)})
ctx, _, endObservation := s.operations.deleteUploads.With(ctx, &err, observation.Args{Attrs: buildDeleteUploadsLogFields(opts)})
defer endObservation(1, observation.Args{})
conds := buildDeleteConditions(opts)
@ -767,7 +766,9 @@ RETURNING repository_id
// was deleted. The associated repository will be marked as dirty so that its commit graph will be updated in
// the background.
func (s *store) DeleteUploadByID(ctx context.Context, id int) (_ bool, err error) {
ctx, _, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{log.Int("id", id)}})
ctx, _, endObservation := s.operations.deleteUploadByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
var a bool
@ -805,11 +806,11 @@ RETURNING repository_id
// ReindexUploads reindexes uploads matching the given filter criteria.
func (s *store) ReindexUploads(ctx context.Context, opts shared.ReindexUploadsOptions) (err error) {
ctx, _, endObservation := s.operations.reindexUploads.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("repositoryID", opts.RepositoryID),
log.String("states", strings.Join(opts.States, ",")),
log.String("term", opts.Term),
log.Bool("visibleAtTip", opts.VisibleAtTip),
ctx, _, endObservation := s.operations.reindexUploads.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.StringSlice("states", opts.States),
attribute.String("term", opts.Term),
attribute.Bool("visibleAtTip", opts.VisibleAtTip),
}})
defer endObservation(1, observation.Args{})
@ -879,8 +880,8 @@ WHERE u.id IN (SELECT id FROM index_candidates)
// ReindexUploadByID reindexes an upload by its identifier.
func (s *store) ReindexUploadByID(ctx context.Context, id int) (err error) {
ctx, _, endObservation := s.operations.reindexUploadByID.With(ctx, &err, observation.Args{LogFields: []log.Field{
log.Int("id", id),
ctx, _, endObservation := s.operations.reindexUploadByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -1185,30 +1186,30 @@ FROM (
JOIN lsif_uploads_audit_logs au ON au.upload_id = s.upload_id
`
func buildGetUploadsLogFields(opts shared.GetUploadsOptions) []log.Field {
return []log.Field{
log.Int("repositoryID", opts.RepositoryID),
log.String("state", opts.State),
log.String("term", opts.Term),
log.Bool("visibleAtTip", opts.VisibleAtTip),
log.Int("dependencyOf", opts.DependencyOf),
log.Int("dependentOf", opts.DependentOf),
log.String("uploadedBefore", nilTimeToString(opts.UploadedBefore)),
log.String("uploadedAfter", nilTimeToString(opts.UploadedAfter)),
log.String("lastRetentionScanBefore", nilTimeToString(opts.LastRetentionScanBefore)),
log.Bool("inCommitGraph", opts.InCommitGraph),
log.Bool("allowExpired", opts.AllowExpired),
log.Bool("oldestFirst", opts.OldestFirst),
log.Int("limit", opts.Limit),
log.Int("offset", opts.Offset),
func buildGetUploadsLogFields(opts shared.GetUploadsOptions) []attribute.KeyValue {
return []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.String("state", opts.State),
attribute.String("term", opts.Term),
attribute.Bool("visibleAtTip", opts.VisibleAtTip),
attribute.Int("dependencyOf", opts.DependencyOf),
attribute.Int("dependentOf", opts.DependentOf),
attribute.String("uploadedBefore", nilTimeToString(opts.UploadedBefore)),
attribute.String("uploadedAfter", nilTimeToString(opts.UploadedAfter)),
attribute.String("lastRetentionScanBefore", nilTimeToString(opts.LastRetentionScanBefore)),
attribute.Bool("inCommitGraph", opts.InCommitGraph),
attribute.Bool("allowExpired", opts.AllowExpired),
attribute.Bool("oldestFirst", opts.OldestFirst),
attribute.Int("limit", opts.Limit),
attribute.Int("offset", opts.Offset),
}
}
func buildDeleteUploadsLogFields(opts shared.DeleteUploadsOptions) []log.Field {
return []log.Field{
log.String("states", strings.Join(opts.States, ",")),
log.String("term", opts.Term),
log.Bool("visibleAtTip", opts.VisibleAtTip),
func buildDeleteUploadsLogFields(opts shared.DeleteUploadsOptions) []attribute.KeyValue {
return []attribute.KeyValue{
attribute.StringSlice("states", opts.States),
attribute.String("term", opts.Term),
attribute.Bool("visibleAtTip", opts.VisibleAtTip),
}
}

View File

@ -3,8 +3,6 @@ package store
import (
"database/sql"
"sort"
"strconv"
"strings"
"github.com/keegancsmith/sqlf"
@ -117,15 +115,6 @@ FROM (
WHERE t.r <= 1
`
func intsToString(vs []int) string {
strs := make([]string, 0, len(vs))
for _, v := range vs {
strs = append(strs, strconv.Itoa(v))
}
return strings.Join(strs, ", ")
}
func scanCountsWithTotalCount(rows *sql.Rows, queryErr error) (totalCount int, _ map[int]int, err error) {
if queryErr != nil {
return 0, nil, queryErr

Some files were not shown because too many files have changed in this diff Show More