Tracing: final cleanups (#54694)

This will be my last PR related to the backend tracing work I've been
doing. This is a set of small cleanups to the `trace` package that I've
collecting as I have worked on tracing and used tracing. Following this
PR, the `trace` package is just a very lightweight wrapper around the
standard OpenTelemetry APIs. I think it's best to keep the package
around rather than using opentelemetry directly because it easy to add
convenience methods (which I would be sad to lose).

Each commit is self-contained and has a descriptive message.

If anyone wants to pick up where I'm leaving off, here are a few things
left undone:
- Convert Zoekt to use OpenTelemetry rather than OpenTracing
- Add OpenTelemetry support to other services like syntect-server
- Merge the `internal/trace` and `internal/tracer` packages
- Consider adding a type that conforms to the OpenTelemetry `Span`
interface but also writes to `x/net/trace` that can be enabled when
tracing is not available.
- Remove unrelated code from the `trace` and `tracer` package (see
[here](https://sourcegraph.com/github.com/sourcegraph/sourcegraph@a6759b95dbd8e5e3a604f7fd452b0b85f37091d9/-/blob/internal/tracer/tracer.go?L75-83)
and
[here](https://sourcegraph.com/github.com/sourcegraph/sourcegraph@769fbbf5008e8decc63967dbff53f26333620265/-/blob/internal/trace/buckets.go?L3-7))
- Noodle on a `Traceable` interface (one that impls `Attr()` or
`Attrs()`) so types can be easily added with `SetAttributes()`
- Experiment with sampling
- Experiment with replacing `policy.ShouldTrace` with native
opentelemetry tools

## Test plan

Tested manually that tracing still looks good locally. Will test on
other instances when it rolls out.

<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->
This commit is contained in:
Camden Cheek 2023-07-13 02:16:11 -06:00 committed by GitHub
parent 42c16ebdaa
commit 084a10ba3b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
93 changed files with 255 additions and 518 deletions

View File

@ -181,9 +181,9 @@ func (s *repos) List(ctx context.Context, opt database.ReposListOptions) (repos
ctx, done := startTrace(ctx, "List", opt, &err)
defer func() {
if err == nil {
if tr := trace.TraceFromContext(ctx); tr != nil {
tr.SetAttributes(attribute.Int("result.len", len(repos)))
}
trace.FromContext(ctx).SetAttributes(
attribute.Int("result.len", len(repos)),
)
}
done()
}()
@ -200,9 +200,9 @@ func (s *repos) ListIndexable(ctx context.Context) (repos []types.MinimalRepo, e
ctx, done := startTrace(ctx, "ListIndexable", nil, &err)
defer func() {
if err == nil {
if tr := trace.TraceFromContext(ctx); tr != nil {
tr.SetAttributes(attribute.Int("result.len", len(repos)))
}
trace.FromContext(ctx).SetAttributes(
attribute.Int("result.len", len(repos)),
)
}
done()
}()

View File

@ -45,7 +45,7 @@ func startTrace(ctx context.Context, method string, arg any, err *error) (contex
}
requestDuration.With(labels).Observe(elapsed.Seconds())
requestGauge.WithLabelValues(name).Dec()
tr.FinishWithErr(err)
tr.EndWithErr(err)
}
return ctx, done

View File

@ -103,7 +103,7 @@ func linksForRepository(
tr, ctx := trace.New(ctx, "linksForRepository",
repo.Name.Attr(),
attribute.Stringer("externalRepo", repo.ExternalRepo))
defer tr.Finish()
defer tr.End()
var err error
phabRepo, err = db.Phabricator().GetByName(ctx, repo.Name)

View File

@ -280,7 +280,7 @@ func (r *GitCommitResolver) Path(ctx context.Context, args *struct {
func (r *GitCommitResolver) path(ctx context.Context, path string, validate func(fs.FileInfo) error) (_ *GitTreeEntryResolver, err error) {
tr, ctx := trace.New(ctx, "GitCommitResolver.path", attribute.String("path", path))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
stat, err := r.gitserverClient.Stat(ctx, authz.DefaultSubRepoPermsChecker, r.gitRepo, api.CommitID(r.oid), path)
if err != nil {

View File

@ -43,7 +43,7 @@ func (r *GitTreeEntryResolver) Files(ctx context.Context, args *gitTreeEntryConn
func (r *GitTreeEntryResolver) entries(ctx context.Context, args *gitTreeEntryConnectionArgs, filter func(fi fs.FileInfo) bool) (_ []*GitTreeEntryResolver, err error) {
tr, ctx := trace.New(ctx, "GitTreeEntryResolver.entries")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
entries, err := r.gitserverClient.ReadDir(ctx, authz.DefaultSubRepoPermsChecker, r.commit.repoResolver.RepoName(), api.CommitID(r.commit.OID()), r.Path(), r.isRecursive || args.Recursive)
if err != nil {

View File

@ -198,7 +198,7 @@ func (r *GitTreeEntryResolver) URL(ctx context.Context) (string, error) {
func (r *GitTreeEntryResolver) url(ctx context.Context) *url.URL {
tr, ctx := trace.New(ctx, "GitTreeEntryResolver.url")
defer tr.Finish()
defer tr.End()
if submodule := r.Submodule(); submodule != nil {
tr.SetAttributes(attribute.Bool("submodule", true))
@ -295,7 +295,7 @@ func (r *GitTreeEntryResolver) Submodule() *gitSubmoduleResolver {
func cloneURLToRepoName(ctx context.Context, db database.DB, cloneURL string) (_ string, err error) {
tr, ctx := trace.New(ctx, "cloneURLToRepoName")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repoName, err := cloneurls.RepoSourceCloneURLToRepoName(ctx, db, cloneURL)
if err != nil {

View File

@ -244,7 +244,7 @@ type RepositoryCommitArgs struct {
func (r *RepositoryResolver) Commit(ctx context.Context, args *RepositoryCommitArgs) (_ *GitCommitResolver, err error) {
tr, ctx := trace.New(ctx, "RepositoryResolver.Commit",
attribute.String("commit", args.Rev))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repo, err := r.repo(ctx)
if err != nil {
@ -269,7 +269,7 @@ type RepositoryChangelistArgs struct {
func (r *RepositoryResolver) Changelist(ctx context.Context, args *RepositoryChangelistArgs) (_ *PerforceChangelistResolver, err error) {
tr, ctx := trace.New(ctx, "RepositoryResolver.Changelist",
attribute.String("changelist", args.CID))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
cid, err := strconv.ParseInt(args.CID, 10, 64)
if err != nil {
@ -315,7 +315,7 @@ func (r *RepositoryResolver) Changelist(ctx context.Context, args *RepositoryCha
func (r *RepositoryResolver) FirstEverCommit(ctx context.Context) (_ *GitCommitResolver, err error) {
tr, ctx := trace.New(ctx, "RepositoryResolver.FirstEverCommit")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repo, err := r.repo(ctx)
if err != nil {

View File

@ -193,7 +193,7 @@ func (sr *SearchResultsResolver) ElapsedMilliseconds() int32 {
func (sr *SearchResultsResolver) DynamicFilters(ctx context.Context) []*searchFilterResolver {
tr, _ := trace.New(ctx, "DynamicFilters", attribute.String("resolver", "SearchResultsResolver"))
defer tr.Finish()
defer tr.End()
var filters streaming.SearchFilters
filters.Update(streaming.SearchEvent{
@ -236,7 +236,7 @@ func (sf *searchFilterResolver) Kind() string {
// the first line match inside of it was authored.
func (sr *SearchResultsResolver) blameFileMatch(ctx context.Context, fm *result.FileMatch) (t time.Time, err error) {
tr, ctx := trace.New(ctx, "SearchResultsResolver.blameFileMatch")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
// Blame the first line match.
if len(fm.ChunkMatches) == 0 {

View File

@ -46,7 +46,7 @@ func (r *settingsCascade) Final(ctx context.Context) (string, error) {
// Deprecated: in the GraphQL API
func (r *settingsCascade) Merged(ctx context.Context) (_ *configurationResolver, err error) {
tr, ctx := trace.New(ctx, "SettingsCascade.Merged")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
var messages []string
s, err := r.Final(ctx)

View File

@ -22,7 +22,7 @@ func Handler(h func(http.ResponseWriter, *http.Request) error) http.Handler {
Error: func(w http.ResponseWriter, req *http.Request, status int, err error) {
if status < 200 || status >= 400 {
var traceURL, traceID string
if tr := trace.TraceFromContext(req.Context()); tr != nil {
if tr := trace.FromContext(req.Context()); tr.IsRecording() {
tr.SetError(err)
traceID = trace.ID(req.Context())
traceURL = trace.URL(traceID, conf.DefaultClient())

View File

@ -41,7 +41,7 @@ func serveRepoLanding(db database.DB) func(http.ResponseWriter, *http.Request) e
func serveDefLanding(w http.ResponseWriter, r *http.Request) (err error) {
tr, ctx := trace.New(r.Context(), "serveDefLanding")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
r = r.WithContext(ctx)
legacyDefLandingCounter.Inc()

View File

@ -505,7 +505,7 @@ func serveErrorNoDebug(w http.ResponseWriter, r *http.Request, db database.DB, e
// Determine trace URL and log the error.
var traceURL string
if tr := trace.TraceFromContext(r.Context()); tr != nil {
if tr := trace.FromContext(r.Context()); tr.IsRecording() {
tr.SetError(err)
tr.SetAttributes(attribute.String("error-id", errorID))
traceURL = trace.URL(trace.ID(r.Context()), conf.DefaultClient())

View File

@ -37,7 +37,7 @@ func handleStreamBlame(logger log.Logger, db database.DB, gitserverClient gitser
return
}
tr, ctx := trace.New(r.Context(), "blame.Stream")
defer tr.Finish()
defer tr.End()
r = r.WithContext(ctx)
if _, ok := mux.Vars(r)["Repo"]; !ok {

View File

@ -59,7 +59,7 @@ type streamHandler struct {
func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
tr, ctx := trace.New(r.Context(), "search.ServeStream")
defer tr.Finish()
defer tr.End()
r = r.WithContext(ctx)
streamWriter, err := streamhttp.NewWriter(w)
@ -82,7 +82,7 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
}
func (h *streamHandler) serveHTTP(r *http.Request, tr *trace.Trace, eventWriter *eventWriter) (err error) {
func (h *streamHandler) serveHTTP(r *http.Request, tr trace.Trace, eventWriter *eventWriter) (err error) {
ctx := r.Context()
start := time.Now()

View File

@ -41,7 +41,7 @@ func List(ctx context.Context, registry *url.URL, query string) (xs []*Extension
if xs != nil {
tr.SetAttributes(attribute.Int("results", len(xs)))
}
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
}()
var q url.Values

View File

@ -58,7 +58,7 @@ func runCommand(ctx context.Context, cmd wrexec.Cmder) (exitCode int, err error)
if err != nil {
tr.SetAttributes(attribute.Int("exitCode", exitCode))
}
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
}()
err = cmd.Run()

View File

@ -113,7 +113,7 @@ func runCommandGraceful(ctx context.Context, logger log.Logger, cmd wrexec.Cmder
if err != nil {
tr.SetAttributes(attribute.Int("exitCode", exitCode))
}
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
}()
exitCode = unsetExitStatus
@ -481,7 +481,7 @@ func (s *Server) Handler() http.Handler {
// able to create some simple wrappers
tr, ctx := trace.New(ctx, "GetObject",
attribute.String("objectName", objectName))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
return getObjectService.GetObject(ctx, repo, objectName)
})
@ -1141,7 +1141,7 @@ func (s *Server) handleArchive(w http.ResponseWriter, r *http.Request) {
func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
logger := s.Logger.Scoped("handleSearch", "http handler for search")
tr, ctx := trace.New(r.Context(), "handleSearch")
defer tr.Finish()
defer tr.End()
// Decode the request
protocol.RegisterGob()
@ -1218,7 +1218,7 @@ func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
matchesBuf.Flush()
}
func (s *Server) searchWithObservability(ctx context.Context, tr *trace.Trace, args *protocol.SearchRequest, onMatch func(*protocol.CommitMatch) error) (limitHit bool, err error) {
func (s *Server) searchWithObservability(ctx context.Context, tr trace.Trace, args *protocol.SearchRequest, onMatch func(*protocol.CommitMatch) error) (limitHit bool, err error) {
searchStart := time.Now()
searchRunning.Inc()
@ -1618,7 +1618,7 @@ func (s *Server) exec(ctx context.Context, logger log.Logger, req *protocol.Exec
}
args := strings.Join(req.Args, " ")
var tr *trace.Trace
var tr trace.Trace
tr, ctx = trace.New(ctx, "exec."+cmd, req.Repo.Attr())
tr.SetAttributes(
attribute.String("args", args),
@ -1636,7 +1636,7 @@ func (s *Server) exec(ctx context.Context, logger log.Logger, req *protocol.Exec
attribute.String("ensure_revision_status", ensureRevisionStatus),
)
tr.SetError(execErr)
tr.Finish()
tr.End()
duration := time.Since(start)
execRunning.WithLabelValues(cmd).Dec()
@ -1890,7 +1890,7 @@ func (s *Server) p4Exec(ctx context.Context, logger log.Logger, req *protocol.P4
}
args := strings.Join(req.Args, " ")
var tr *trace.Trace
var tr trace.Trace
tr, ctx = trace.New(ctx, "p4exec."+cmd, attribute.String("port", req.P4Port))
tr.SetAttributes(attribute.String("args", args))
logger = logger.WithTrace(trace.Context(ctx))
@ -1903,7 +1903,7 @@ func (s *Server) p4Exec(ctx context.Context, logger log.Logger, req *protocol.P4
attribute.Int64("stderr", stderrN),
)
tr.SetError(execErr)
tr.Finish()
tr.End()
duration := time.Since(start)
execRunning.WithLabelValues(cmd).Dec()
@ -2554,7 +2554,7 @@ var headBranchPattern = lazyregexp.New(`HEAD branch: (.+?)\n`)
func (s *Server) doRepoUpdate(ctx context.Context, repo api.RepoName, revspec string) (err error) {
tr, ctx := trace.New(ctx, "doRepoUpdate", repo.Attr())
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
s.repoUpdateLocksMu.Lock()
l, ok := s.repoUpdateLocks[repo]

View File

@ -316,7 +316,7 @@ func (gs *GRPCServer) Search(req *proto.SearchRequest, ss proto.GitserverService
}
tr, ctx := trace.New(ss.Context(), "search")
defer tr.Finish()
defer tr.End()
limitHit, err := gs.Server.searchWithObservability(ctx, tr, args, onMatch)
if err != nil {

View File

@ -128,7 +128,7 @@ func (s *Server) enqueueRepoUpdate(ctx context.Context, req *protocol.RepoUpdate
)
}
tr.SetError(err)
tr.Finish()
tr.End()
}()
rs, err := s.Store.RepoStore().List(ctx, database.ReposListOptions{Names: []string{string(req.Repo)}})
@ -305,7 +305,7 @@ func (s *Server) repoLookup(ctx context.Context, args protocol.RepoLookupArgs) (
defer func() {
s.Logger.Debug("repoLookup", log.String("result", fmt.Sprint(result)), log.Error(err))
tr.SetError(err)
tr.Finish()
tr.End()
}()
if args.Repo == "" {

View File

@ -141,7 +141,7 @@ func (s *Service) search(ctx context.Context, p *protocol.Request, sender matchS
metricRunning.Inc()
defer metricRunning.Dec()
var tr *trace.Trace
var tr trace.Trace
tr, ctx = trace.New(ctx, "search",
p.Repo.Attr(),
p.Commit.Attr(),
@ -156,7 +156,7 @@ func (s *Service) search(ctx context.Context, p *protocol.Request, sender matchS
attribute.Bool("patternMatchesContent", p.PatternMatchesContent),
attribute.Bool("patternMatchesPath", p.PatternMatchesPath),
attribute.String("select", p.Select))
defer tr.Finish()
defer tr.End()
defer func(start time.Time) {
code := "200"
// We often have canceled and timed out requests. We do not want to

View File

@ -262,7 +262,7 @@ func regexSearchBatch(ctx context.Context, rg *readerGrep, zf *zipFile, limit in
// regexSearch concurrently searches files in zr looking for matches using rg.
func regexSearch(ctx context.Context, rg *readerGrep, zf *zipFile, patternMatchesContent, patternMatchesPaths bool, isPatternNegated bool, sender matchSender) (err error) {
tr, ctx := trace.New(ctx, "regexSearch")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if rg.re != nil {
tr.SetAttributes(attribute.Stringer("re", rg.re))

View File

@ -397,7 +397,7 @@ func structuralSearch(ctx context.Context, inputType comby.Input, paths filePatt
}
tr, ctx := trace.New(ctx, "structuralSearch", repo.Attr())
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
// Cap the number of forked processes to limit the size of zip contents being mapped to memory. Resolving #7133 could help to lift this restriction.
numWorkers := 4

View File

@ -143,7 +143,7 @@ func (s *Store) PrepareZip(ctx context.Context, repo api.RepoName, commit api.Co
func (s *Store) PrepareZipPaths(ctx context.Context, repo api.RepoName, commit api.CommitID, paths []string) (path string, err error) {
tr, ctx := trace.New(ctx, "ArchiveStore.PrepareZipPaths")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
var cacheHit bool
start := time.Now()
@ -257,7 +257,7 @@ func (s *Store) fetch(ctx context.Context, repo api.RepoName, commit api.CommitI
metricFetchFailed.Inc()
}
metricFetching.Dec()
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
}
defer func() {
if rc == nil {

View File

@ -171,9 +171,7 @@ func (c *CachedEmbeddingIndexGetter) get(ctx context.Context, repoName api.RepoN
repoEmbeddingIndexName := embeddings.GetRepoEmbeddingIndexName(repoName)
cacheEntry, ok := c.cache.Get(repoEmbeddingIndexName)
if tr := trace.TraceFromContext(ctx); tr != nil {
tr.AddEvent("checked embedding index cache", attribute.Bool("hit", ok))
}
trace.FromContext(ctx).AddEvent("checked embedding index cache", attribute.Bool("hit", ok))
if !ok {
// We do not have the index in the cache. Download and cache it.
return c.getAndCacheIndex(ctx, repoEmbeddingIndexName, lastFinishedRepoEmbeddingJob.FinishedAt)

View File

@ -30,7 +30,7 @@ func searchRepoEmbeddingIndexes(
weaviate *weaviateClient,
) (_ *embeddings.EmbeddingCombinedSearchResults, err error) {
tr, ctx := trace.New(ctx, "searchRepoEmbeddingIndexes", params.Attrs()...)
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
floatQuery, queryModel, err := getQueryEmbedding(ctx, params.Query)
if err != nil {
@ -51,7 +51,7 @@ func searchRepoEmbeddingIndexes(
tr, ctx := trace.New(ctx, "searchRepo",
attribute.String("repoName", string(repoName)),
)
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if weaviate.Use(ctx) {
return weaviate.Search(ctx, repoName, repoID, floatQuery, params.CodeResultsCount, params.TextResultsCount)

View File

@ -59,7 +59,7 @@ func newMiddleware(db database.DB, authPrefix string, isAPIHandler bool, next ht
// redirecting.
span, _ := trace.New(r.Context(), "githubapp")
span.SetAttributes(attribute.Bool("isAPIHandler", isAPIHandler))
span.Finish()
span.End()
if strings.HasPrefix(r.URL.Path, authPrefix+"/") {
handler.ServeHTTP(w, r)
return

View File

@ -41,7 +41,7 @@ func NewMiddleware(db database.DB, serviceType, authPrefix string, isAPIHandler
if !isAPIHandler && strings.HasPrefix(r.URL.Path, authPrefix+"/") {
span.AddEvent("delegate to auth flow handler")
r = withOAuthExternalClient(r)
span.Finish()
span.End()
oauthFlowHandler.ServeHTTP(w, r)
return
}
@ -50,7 +50,7 @@ func NewMiddleware(db database.DB, serviceType, authPrefix string, isAPIHandler
// next.
if actor.FromContext(ctx).IsAuthenticated() {
span.AddEvent("authenticated, proceeding to next")
span.Finish()
span.End()
next.ServeHTTP(w, r)
return
}
@ -65,14 +65,14 @@ func NewMiddleware(db database.DB, serviceType, authPrefix string, isAPIHandler
v := make(url.Values)
v.Set("redirect", auth.SafeRedirectURL(r.URL.String()))
v.Set("pc", pc.ConfigID().ID)
span.Finish()
span.End()
http.Redirect(w, r, authPrefix+"/login?"+v.Encode(), http.StatusFound)
return
}
span.AddEvent("proceeding to next")
span.Finish()
span.End()
next.ServeHTTP(w, r)
})
}

View File

@ -40,7 +40,7 @@ type SessionIssuerHelper interface {
func SessionIssuer(logger log.Logger, db database.DB, s SessionIssuerHelper, sessionKey string) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
span, ctx := trace.New(r.Context(), "oauth.SessionIssuer")
defer span.Finish()
defer span.End()
// Scopes logger to family from trace.New
logger := trace.Logger(ctx, logger)

View File

@ -418,7 +418,7 @@ func (r *Resolver) batchSpecWorkspaceByID(ctx context.Context, gqlID graphql.ID)
func (r *Resolver) CreateBatchChange(ctx context.Context, args *graphqlbackend.CreateBatchChangeArgs) (_ graphqlbackend.BatchChangeResolver, err error) {
tr, _ := trace.New(ctx, "Resolver.CreateBatchChange", attribute.String("BatchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := rbac.CheckCurrentUserHasPermission(ctx, r.store.DatabaseDB(), rbac.BatchChangesWritePermission); err != nil {
return nil, err
@ -448,7 +448,7 @@ func (r *Resolver) CreateBatchChange(ctx context.Context, args *graphqlbackend.C
func (r *Resolver) ApplyBatchChange(ctx context.Context, args *graphqlbackend.ApplyBatchChangeArgs) (_ graphqlbackend.BatchChangeResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.ApplyBatchChange", attribute.String("BatchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := rbac.CheckCurrentUserHasPermission(ctx, r.store.DatabaseDB(), rbac.BatchChangesWritePermission); err != nil {
return nil, err
@ -551,7 +551,7 @@ func (r *Resolver) applyOrCreateBatchChange(ctx context.Context, args *graphqlba
func (r *Resolver) CreateBatchSpec(ctx context.Context, args *graphqlbackend.CreateBatchSpecArgs) (_ graphqlbackend.BatchSpecResolver, err error) {
tr, ctx := trace.New(ctx, "CreateBatchSpec", attribute.String("namespace", string(args.Namespace)), attribute.String("batchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := batchChangesCreateAccess(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -606,7 +606,7 @@ func (r *Resolver) CreateBatchSpec(ctx context.Context, args *graphqlbackend.Cre
func (r *Resolver) CreateChangesetSpec(ctx context.Context, args *graphqlbackend.CreateChangesetSpecArgs) (_ graphqlbackend.ChangesetSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.CreateChangesetSpec")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := batchChangesCreateAccess(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -635,7 +635,7 @@ func (r *Resolver) CreateChangesetSpec(ctx context.Context, args *graphqlbackend
func (r *Resolver) CreateChangesetSpecs(ctx context.Context, args *graphqlbackend.CreateChangesetSpecsArgs) (_ []graphqlbackend.ChangesetSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.CreateChangesetSpecs")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := batchChangesCreateAccess(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -673,7 +673,7 @@ func (r *Resolver) CreateChangesetSpecs(ctx context.Context, args *graphqlbacken
func (r *Resolver) MoveBatchChange(ctx context.Context, args *graphqlbackend.MoveBatchChangeArgs) (_ graphqlbackend.BatchChangeResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.MoveBatchChange", attribute.String("batchChange", string(args.BatchChange)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -719,7 +719,7 @@ func (r *Resolver) MoveBatchChange(ctx context.Context, args *graphqlbackend.Mov
func (r *Resolver) DeleteBatchChange(ctx context.Context, args *graphqlbackend.DeleteBatchChangeArgs) (_ *graphqlbackend.EmptyResponse, err error) {
tr, ctx := trace.New(ctx, "Resolver.DeleteBatchChange", attribute.String("batchChange", string(args.BatchChange)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -995,7 +995,7 @@ func listChangesetOptsFromArgs(args *graphqlbackend.ListChangesetsArgs, batchCha
func (r *Resolver) CloseBatchChange(ctx context.Context, args *graphqlbackend.CloseBatchChangeArgs) (_ graphqlbackend.BatchChangeResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.CloseBatchChange", attribute.String("batchChange", string(args.BatchChange)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -1031,7 +1031,7 @@ func (r *Resolver) CloseBatchChange(ctx context.Context, args *graphqlbackend.Cl
func (r *Resolver) SyncChangeset(ctx context.Context, args *graphqlbackend.SyncChangesetArgs) (_ *graphqlbackend.EmptyResponse, err error) {
tr, ctx := trace.New(ctx, "Resolver.SyncChangeset", attribute.String("changeset", string(args.Changeset)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1056,7 +1056,7 @@ func (r *Resolver) SyncChangeset(ctx context.Context, args *graphqlbackend.SyncC
func (r *Resolver) ReenqueueChangeset(ctx context.Context, args *graphqlbackend.ReenqueueChangesetArgs) (_ graphqlbackend.ChangesetResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.ReenqueueChangeset", attribute.String("changeset", string(args.Changeset)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1089,7 +1089,7 @@ func (r *Resolver) CreateBatchChangesCredential(ctx context.Context, args *graph
attribute.String("externalServiceKind", args.ExternalServiceKind),
attribute.String("externalServiceURL", args.ExternalServiceURL),
)
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1259,7 +1259,7 @@ func (r *Resolver) generateAuthenticatorForCredential(ctx context.Context, exter
func (r *Resolver) DeleteBatchChangesCredential(ctx context.Context, args *graphqlbackend.DeleteBatchChangesCredentialArgs) (_ *graphqlbackend.EmptyResponse, err error) {
tr, ctx := trace.New(ctx, "Resolver.DeleteBatchChangesCredential", attribute.String("credential", string(args.BatchChangesCredential)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1318,7 +1318,7 @@ func (r *Resolver) DetachChangesets(ctx context.Context, args *graphqlbackend.De
tr, ctx := trace.New(ctx, "Resolver.DetachChangesets",
attribute.String("batchChange", string(args.BatchChange)),
attribute.Int("changesets.len", len(args.Changesets)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1356,7 +1356,7 @@ func (r *Resolver) CreateChangesetComments(ctx context.Context, args *graphqlbac
tr, ctx := trace.New(ctx, "Resolver.CreateChangesetComments",
attribute.String("batchChange", string(args.BatchChange)),
attribute.Int("changesets.len", len(args.Changesets)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1403,7 +1403,7 @@ func (r *Resolver) ReenqueueChangesets(ctx context.Context, args *graphqlbackend
tr, ctx := trace.New(ctx, "Resolver.ReenqueueChangesets",
attribute.String("batchChange", string(args.BatchChange)),
attribute.Int("changesets.len", len(args.Changesets)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1441,7 +1441,7 @@ func (r *Resolver) MergeChangesets(ctx context.Context, args *graphqlbackend.Mer
tr, ctx := trace.New(ctx, "Resolver.MergeChangesets",
attribute.String("batchChange", string(args.BatchChange)),
attribute.Int("changesets.len", len(args.Changesets)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1482,7 +1482,7 @@ func (r *Resolver) CloseChangesets(ctx context.Context, args *graphqlbackend.Clo
tr, ctx := trace.New(ctx, "Resolver.CloseChangesets",
attribute.String("batchChange", string(args.BatchChange)),
attribute.Int("changesets.len", len(args.Changesets)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1522,7 +1522,7 @@ func (r *Resolver) PublishChangesets(ctx context.Context, args *graphqlbackend.P
tr, ctx := trace.New(ctx, "Resolver.PublishChangesets",
attribute.String("batchChange", string(args.BatchChange)),
attribute.Int("changesets.len", len(args.Changesets)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1557,7 +1557,7 @@ func (r *Resolver) BatchSpecs(ctx context.Context, args *graphqlbackend.ListBatc
tr, ctx := trace.New(ctx, "Resolver.BatchSpecs",
attribute.Int("first", int(args.First)),
attribute.String("after", fmt.Sprintf("%v", args.After)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -1603,7 +1603,7 @@ func (r *Resolver) BatchSpecs(ctx context.Context, args *graphqlbackend.ListBatc
func (r *Resolver) CreateEmptyBatchChange(ctx context.Context, args *graphqlbackend.CreateEmptyBatchChangeArgs) (_ graphqlbackend.BatchChangeResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.CreateEmptyBatchChange",
attribute.String("namespace", string(args.Namespace)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1639,7 +1639,7 @@ func (r *Resolver) CreateEmptyBatchChange(ctx context.Context, args *graphqlback
func (r *Resolver) UpsertEmptyBatchChange(ctx context.Context, args *graphqlbackend.UpsertEmptyBatchChangeArgs) (_ graphqlbackend.BatchChangeResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.UpsertEmptyBatchChange",
attribute.String("namespace", string(args.Namespace)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1671,7 +1671,7 @@ func (r *Resolver) UpsertEmptyBatchChange(ctx context.Context, args *graphqlback
func (r *Resolver) CreateBatchSpecFromRaw(ctx context.Context, args *graphqlbackend.CreateBatchSpecFromRawArgs) (_ graphqlbackend.BatchSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.CreateBatchSpecFromRaw",
attribute.String("namespace", string(args.Namespace)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := batchChangesCreateAccess(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -1712,7 +1712,7 @@ func (r *Resolver) CreateBatchSpecFromRaw(ctx context.Context, args *graphqlback
func (r *Resolver) ExecuteBatchSpec(ctx context.Context, args *graphqlbackend.ExecuteBatchSpecArgs) (_ graphqlbackend.BatchSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.ExecuteBatchSpec",
attribute.String("batchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
}
@ -1751,7 +1751,7 @@ func (r *Resolver) ExecuteBatchSpec(ctx context.Context, args *graphqlbackend.Ex
func (r *Resolver) CancelBatchSpecExecution(ctx context.Context, args *graphqlbackend.CancelBatchSpecExecutionArgs) (_ graphqlbackend.BatchSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.CancelBatchSpecExecution",
attribute.String("batchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -1780,7 +1780,7 @@ func (r *Resolver) CancelBatchSpecExecution(ctx context.Context, args *graphqlba
func (r *Resolver) RetryBatchSpecWorkspaceExecution(ctx context.Context, args *graphqlbackend.RetryBatchSpecWorkspaceExecutionArgs) (_ *graphqlbackend.EmptyResponse, err error) {
tr, ctx := trace.New(ctx, "Resolver.RetryBatchSpecWorkspaceExecution",
attribute.String("workspaces", fmt.Sprintf("%+v", args.BatchSpecWorkspaces)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -1821,7 +1821,7 @@ func (r *Resolver) RetryBatchSpecWorkspaceExecution(ctx context.Context, args *g
func (r *Resolver) ReplaceBatchSpecInput(ctx context.Context, args *graphqlbackend.ReplaceBatchSpecInputArgs) (_ graphqlbackend.BatchSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.ReplaceBatchSpecInput",
attribute.String("batchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -1862,7 +1862,7 @@ func (r *Resolver) ReplaceBatchSpecInput(ctx context.Context, args *graphqlbacke
func (r *Resolver) UpsertBatchSpecInput(ctx context.Context, args *graphqlbackend.UpsertBatchSpecInputArgs) (_ graphqlbackend.BatchSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.UpsertBatchSpecInput", attribute.String("batchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := batchChangesCreateAccess(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -1911,7 +1911,7 @@ func (r *Resolver) CancelBatchSpecWorkspaceExecution(ctx context.Context, args *
func (r *Resolver) RetryBatchSpecExecution(ctx context.Context, args *graphqlbackend.RetryBatchSpecExecutionArgs) (_ graphqlbackend.BatchSpecResolver, err error) {
tr, ctx := trace.New(ctx, "Resolver.RetryBatchSpecExecution", attribute.String("batchSpec", string(args.BatchSpec)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -2011,7 +2011,7 @@ func (r *Resolver) AvailableBulkOperations(ctx context.Context, args *graphqlbac
tr, ctx := trace.New(ctx, "Resolver.AvailableBulkOperations",
attribute.String("batchChange", string(args.BatchChange)),
attribute.Int("changesets.len", len(args.Changesets)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if err := enterprise.BatchChangesEnabledForUser(ctx, r.store.DatabaseDB()); err != nil {
return nil, err
@ -2052,7 +2052,7 @@ func (r *Resolver) AvailableBulkOperations(ctx context.Context, args *graphqlbac
func (r *Resolver) CheckBatchChangesCredential(ctx context.Context, args *graphqlbackend.CheckBatchChangesCredentialArgs) (_ *graphqlbackend.EmptyResponse, err error) {
tr, ctx := trace.New(ctx, "Resolver.CheckBatchChangesCredential",
attribute.String("credential", string(args.BatchChangesCredential)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
cred, err := r.batchChangesCredentialByID(ctx, args.BatchChangesCredential)
if err != nil {

View File

@ -53,7 +53,7 @@ func (h *streamHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
}
tr, ctx := trace.New(ctx, "compute.ServeStream", attribute.String("query", args.Query))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
eventWriter, err := streamhttp.NewWriter(w)
if err != nil {

View File

@ -62,7 +62,7 @@ func (r *Resolver) GetCodyContext(ctx context.Context, args graphqlbackend.GetCo
}
tr, ctx := trace.New(ctx, "resolveChunks")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
return iter.MapErr(fileChunks, func(fileChunk *codycontext.FileChunkContext) (graphqlbackend.ContextResultResolver, error) {
return r.fileChunkToResolver(ctx, fileChunk)

View File

@ -55,7 +55,7 @@ func (tc *tracedClient) MakeRequest(
span.SetError(err)
span.SetError(resp.Errors)
span.Finish()
span.End()
return err
}

View File

@ -713,7 +713,7 @@ func (s *PermsSyncer) observe(ctx context.Context, name string) (context.Context
tr, ctx := trace.New(ctx, name)
return ctx, func(typ requestType, id int32, err *error) {
defer tr.Finish()
defer tr.End()
tr.SetAttributes(attribute.Int64("id", int64(id)))
var typLabel string

View File

@ -93,7 +93,7 @@ func (p *Provider) FetchAccount(ctx context.Context, user *types.User, _ []*exts
tr.SetError(err)
}
tr.Finish()
tr.End()
}()
bitbucketUser, err := p.user(ctx, user.Username)

View File

@ -95,7 +95,7 @@ func (p *Provider) FetchAccount(ctx context.Context, user *types.User, _ []*exts
tr.SetError(err)
}
tr.Finish()
tr.End()
}()
emailSet := make(map[string]struct{}, len(verifiedEmails))

View File

@ -91,7 +91,7 @@ type workspaceResolver struct {
func (wr *workspaceResolver) ResolveWorkspacesForBatchSpec(ctx context.Context, batchSpec *batcheslib.BatchSpec) (workspaces []*RepoWorkspace, err error) {
tr, ctx := trace.New(ctx, "workspaceResolver.ResolveWorkspacesForBatchSpec")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
// First, find all repositories that match the batch spec `on` definitions.
// This list is filtered by permissions using database.Repos.List.
@ -232,7 +232,7 @@ var ErrMalformedOnQueryOrRepository = batcheslib.NewValidationError(errors.New("
// resolveRepositoriesOn resolves a single on: entry in a batch spec.
func (wr *workspaceResolver) resolveRepositoriesOn(ctx context.Context, on *batcheslib.OnQueryOrRepository) (_ []*RepoRevision, _ onlib.RepositoryRuleType, err error) {
tr, ctx := trace.New(ctx, "workspaceResolver.resolveRepositoriesOn")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if on.RepositoriesMatchingQuery != "" {
revs, err := wr.resolveRepositoriesMatchingQuery(ctx, on.RepositoriesMatchingQuery)
@ -272,7 +272,7 @@ func (wr *workspaceResolver) resolveRepositoriesOn(ctx context.Context, on *batc
func (wr *workspaceResolver) resolveRepositoryName(ctx context.Context, name string) (_ *RepoRevision, err error) {
tr, ctx := trace.New(ctx, "workspaceResolver.resolveRepositoryName")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repo, err := wr.store.Repos().GetByName(ctx, api.RepoName(name))
if err != nil {
@ -290,7 +290,7 @@ func (wr *workspaceResolver) resolveRepositoryName(ctx context.Context, name str
func (wr *workspaceResolver) resolveRepositoryNameAndBranch(ctx context.Context, name, branch string) (_ *RepoRevision, err error) {
tr, ctx := trace.New(ctx, "workspaceResolver.resolveRepositoryNameAndBranch")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repo, err := wr.store.Repos().GetByName(ctx, api.RepoName(name))
if err != nil {
@ -315,7 +315,7 @@ func (wr *workspaceResolver) resolveRepositoryNameAndBranch(ctx context.Context,
func (wr *workspaceResolver) resolveRepositoriesMatchingQuery(ctx context.Context, query string) (_ []*RepoRevision, err error) {
tr, ctx := trace.New(ctx, "workspaceResolver.resolveRepositorySearch")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
query = setDefaultQueryCount(query)
@ -428,7 +428,7 @@ func (wr *workspaceResolver) runSearch(ctx context.Context, query string, onMatc
func repoToRepoRevisionWithDefaultBranch(ctx context.Context, gitserverClient gitserver.Client, repo *types.Repo, fileMatches []string) (_ *RepoRevision, err error) {
tr, ctx := trace.New(ctx, "repoToRepoRevision")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
branch, commit, err := gitserverClient.GetDefaultBranch(ctx, repo.Name, false)
if err != nil {
@ -448,7 +448,7 @@ const batchIgnoreFilePath = ".batchignore"
func hasBatchIgnoreFile(ctx context.Context, gitserverClient gitserver.Client, r *RepoRevision) (_ bool, err error) {
tr, ctx := trace.New(ctx, "hasBatchIgnoreFile", attribute.Int("repoID", int(r.Repo.ID)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
stat, err := gitserverClient.Stat(ctx, authz.DefaultSubRepoPermsChecker, r.Repo.Name, r.Commit, batchIgnoreFilePath)
if err != nil {

View File

@ -25,7 +25,7 @@ import (
// host to see if the repository actually exists.
func RepoSourceCloneURLToRepoName(ctx context.Context, db database.DB, cloneURL string) (repoName api.RepoName, err error) {
tr, ctx := trace.New(ctx, "RepoSourceCloneURLToRepoName", attribute.String("cloneURL", cloneURL))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if repoName := reposource.CustomCloneURLToRepoName(cloneURL); repoName != "" {
return repoName, nil
@ -107,7 +107,7 @@ func getRepoNameFromService(ctx context.Context, cloneURL string, svc *types.Ext
tr, ctx := trace.New(ctx, "getRepoNameFromService",
attribute.Int64("externalService.ID", svc.ID),
attribute.String("externalService.Kind", svc.Kind))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
cfg, err := extsvc.ParseEncryptableConfig(ctx, svc.Kind, svc.Config)
if err != nil {

View File

@ -205,7 +205,7 @@ func SetupCmdWithPipes(ctx context.Context, args Args) (cmd *exec.Cmd, stdin io.
// Matches returns all matches in all files for which comby finds matches.
func Matches(ctx context.Context, args Args) (_ []*FileMatch, err error) {
tr, ctx := trace.New(ctx, "comby.Matches")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
args.ResultKind = MatchOnly
results, err := Run(ctx, args, ToFileMatch)
@ -222,7 +222,7 @@ func Matches(ctx context.Context, args Args) (_ []*FileMatch, err error) {
// Replacements performs in-place replacement for match and rewrite template.
func Replacements(ctx context.Context, args Args) (_ []*FileReplacement, err error) {
tr, ctx := trace.New(ctx, "comby.Replacements")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
results, err := Run(ctx, args, toFileReplacement)
if err != nil {
@ -239,7 +239,7 @@ func Replacements(ctx context.Context, args Args) (_ []*FileReplacement, err err
// pattern in a rewrite template and outputs the result, newline-sparated.
func Outputs(ctx context.Context, args Args) (_ string, err error) {
tr, ctx := trace.New(ctx, "comby.Outputs")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
results, err := Run(ctx, args, toOutput)
if err != nil {

View File

@ -43,7 +43,7 @@ func Trace(ctx context.Context, family, model string) *traceBuilder {
type traceBuilder struct {
start time.Time
tr *trace.Trace
tr trace.Trace
err *error
event honey.Event
ctx context.Context
@ -75,7 +75,7 @@ func (t *traceBuilder) Build() (context.Context, func()) {
err = *(t.err)
}
t.tr.SetError(err)
t.tr.Finish()
t.tr.End()
ev := t.event
if ev == nil {

View File

@ -438,7 +438,7 @@ func (s *userExternalAccountsStore) List(ctx context.Context, opt ExternalAccoun
attribute.Int("accounts.count", len(acct)),
)
tr.Finish()
tr.End()
}()
conds := s.listSQL(opt)
@ -457,7 +457,7 @@ func (s *userExternalAccountsStore) ListForUsers(ctx context.Context, userIDs []
attribute.String("userIDs", fmt.Sprintf("%v", userIDs)),
attribute.Int("accounts.count", count),
)
tr.Finish()
tr.End()
}()
if len(userIDs) == 0 {
return

View File

@ -1348,7 +1348,7 @@ ORDER BY es.id, essj.finished_at DESC
func (e *externalServiceStore) List(ctx context.Context, opt ExternalServicesListOptions) (_ []*types.ExternalService, err error) {
tr, ctx := trace.New(ctx, "externalServiceStore.List")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if opt.OrderByDirection != "ASC" {
opt.OrderByDirection = "DESC"
@ -1443,7 +1443,7 @@ func (e *externalServiceStore) List(ctx context.Context, opt ExternalServicesLis
func (e *externalServiceStore) ListRepos(ctx context.Context, opt ExternalServiceReposListOptions) (_ []*types.ExternalServiceRepo, err error) {
tr, ctx := trace.New(ctx, "externalServiceStore.ListRepos")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
predicate := sqlf.Sprintf("TRUE")

View File

@ -1800,7 +1800,7 @@ func (s *permsStore) observe(ctx context.Context, family string) (context.Contex
tr.SetError(*err)
}
tr.Finish()
tr.End()
}
}

View File

@ -126,7 +126,7 @@ func (s *repoStore) Transact(ctx context.Context) (RepoStore, error) {
// When a repo isn't found or has been blocked, an error is returned.
func (s *repoStore) Get(ctx context.Context, id api.RepoID) (_ *types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.Get")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repos, err := s.listRepos(ctx, tr, ReposListOptions{
IDs: []api.RepoID{id},
@ -195,7 +195,7 @@ func logPrivateRepoAccessGranted(ctx context.Context, db DB, ids []api.RepoID) {
// When a repo isn't found or has been blocked, an error is returned.
func (s *repoStore) GetByName(ctx context.Context, nameOrURI api.RepoName) (_ *types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.GetByName")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repos, err := s.listRepos(ctx, tr, ReposListOptions{
Names: []string{string(nameOrURI)},
@ -234,7 +234,7 @@ func (s *repoStore) GetByName(ctx context.Context, nameOrURI api.RepoName) (_ *t
// When a repo isn't found or has been blocked, an error is returned.
func (s *repoStore) GetByHashedName(ctx context.Context, repoHashedName api.RepoHashedName) (_ *types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.GetByHashedName")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
repos, err := s.listRepos(ctx, tr, ReposListOptions{
HashedName: string(repoHashedName),
@ -256,7 +256,7 @@ func (s *repoStore) GetByHashedName(ctx context.Context, repoHashedName api.Repo
// than the candidate list due to no repository is associated with some IDs.
func (s *repoStore) GetByIDs(ctx context.Context, ids ...api.RepoID) (_ []*types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.GetByIDs")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
// listRepos will return a list of all repos if we pass in an empty ID list,
// so it is better to just return here rather than leak repo info.
@ -284,7 +284,7 @@ func (s *repoStore) GetReposSetByIDs(ctx context.Context, ids ...api.RepoID) (ma
func (s *repoStore) GetRepoDescriptionsByIDs(ctx context.Context, ids ...api.RepoID) (_ map[api.RepoID]string, err error) {
tr, ctx := trace.New(ctx, "repos.GetRepoDescriptionsByIDs")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
opts := ReposListOptions{
Select: []string{"repo.id", "repo.description"},
@ -311,7 +311,7 @@ func (s *repoStore) GetRepoDescriptionsByIDs(ctx context.Context, ids ...api.Rep
func (s *repoStore) Count(ctx context.Context, opt ReposListOptions) (ct int, err error) {
tr, ctx := trace.New(ctx, "repos.Count")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
opt.Select = []string{"COUNT(*)"}
opt.OrderBy = nil
@ -328,7 +328,7 @@ func (s *repoStore) Count(ctx context.Context, opt ReposListOptions) (ct int, er
// number of IDs given if a repo with the given ID does not exist.
func (s *repoStore) Metadata(ctx context.Context, ids ...api.RepoID) (_ []*types.SearchedRepo, err error) {
tr, ctx := trace.New(ctx, "repos.Metadata")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
opts := ReposListOptions{
IDs: ids,
@ -798,7 +798,7 @@ const (
// Matching is done with fuzzy matching, i.e. "query" will match any repo name that matches the regexp `q.*u.*e.*r.*y`
func (s *repoStore) List(ctx context.Context, opt ReposListOptions) (results []*types.Repo, err error) {
tr, ctx := trace.New(ctx, "repos.List")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if len(opt.OrderBy) == 0 {
opt.OrderBy = append(opt.OrderBy, RepoListSort{Field: RepoListID})
@ -810,7 +810,7 @@ func (s *repoStore) List(ctx context.Context, opt ReposListOptions) (results []*
// StreamMinimalRepos calls the given callback for each of the repositories names and ids that match the given options.
func (s *repoStore) StreamMinimalRepos(ctx context.Context, opt ReposListOptions, cb func(*types.MinimalRepo)) (err error) {
tr, ctx := trace.New(ctx, "repos.StreamMinimalRepos")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
opt.Select = minimalRepoColumns
if len(opt.OrderBy) == 0 {
@ -874,7 +874,7 @@ func (s *repoStore) ListMinimalRepos(ctx context.Context, opt ReposListOptions)
})
}
func (s *repoStore) listRepos(ctx context.Context, tr *trace.Trace, opt ReposListOptions) (rs []*types.Repo, err error) {
func (s *repoStore) listRepos(ctx context.Context, tr trace.Trace, opt ReposListOptions) (rs []*types.Repo, err error) {
var privateIDs []api.RepoID
err = s.list(ctx, tr, opt, func(rows *sql.Rows) error {
var r types.Repo
@ -898,7 +898,7 @@ func (s *repoStore) listRepos(ctx context.Context, tr *trace.Trace, opt ReposLis
return rs, err
}
func (s *repoStore) list(ctx context.Context, tr *trace.Trace, opt ReposListOptions, scanRepo func(rows *sql.Rows) error) error {
func (s *repoStore) list(ctx context.Context, tr trace.Trace, opt ReposListOptions, scanRepo func(rows *sql.Rows) error) error {
q, err := s.listSQL(ctx, tr, opt)
if err != nil {
return err
@ -922,7 +922,7 @@ func (s *repoStore) list(ctx context.Context, tr *trace.Trace, opt ReposListOpti
return rows.Err()
}
func (s *repoStore) listSQL(ctx context.Context, tr *trace.Trace, opt ReposListOptions) (*sqlf.Query, error) {
func (s *repoStore) listSQL(ctx context.Context, tr trace.Trace, opt ReposListOptions) (*sqlf.Query, error) {
var ctes, joins, where []*sqlf.Query
querySuffix := sqlf.Sprintf("%s %s", opt.OrderBy.SQL(), opt.LimitOffset.SQL())
@ -1290,7 +1290,7 @@ const listSourcegraphDotComIndexableReposMinStars = 5
func (s *repoStore) ListSourcegraphDotComIndexableRepos(ctx context.Context, opts ListSourcegraphDotComIndexableReposOptions) (results []types.MinimalRepo, err error) {
tr, ctx := trace.New(ctx, "repos.ListIndexable")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
var joins, where []*sqlf.Query
if opts.CloneStatus != types.CloneStatusUnknown {
@ -1368,7 +1368,7 @@ ORDER BY stars DESC NULLS LAST
// Associated external services must already exist.
func (s *repoStore) Create(ctx context.Context, repos ...*types.Repo) (err error) {
tr, ctx := trace.New(ctx, "repos.Create")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
records := make([]*repoRecord, 0, len(repos))
@ -1672,7 +1672,7 @@ func (s *repoStore) GetFirstRepoByCloneURL(ctx context.Context, cloneURL string)
return s.GetByName(ctx, repoName)
}
func parsePattern(tr *trace.Trace, p string, caseSensitive bool) ([]*sqlf.Query, error) {
func parsePattern(tr trace.Trace, p string, caseSensitive bool) ([]*sqlf.Query, error) {
exact, like, pattern, err := parseIncludePattern(p)
if err != nil {
return nil, err
@ -1710,7 +1710,7 @@ func parsePattern(tr *trace.Trace, p string, caseSensitive bool) ([]*sqlf.Query,
return []*sqlf.Query{sqlf.Sprintf("(%s)", sqlf.Join(conds, "OR"))}, nil
}
func parseDescriptionPattern(tr *trace.Trace, p string) ([]*sqlf.Query, error) {
func parseDescriptionPattern(tr trace.Trace, p string) ([]*sqlf.Query, error) {
exact, like, pattern, err := parseIncludePattern(p)
if err != nil {
return nil, err

View File

@ -2381,7 +2381,7 @@ func TestParseIncludePattern(t *testing.T) {
}
tr, _ := trace.New(context.Background(), "")
defer tr.Finish()
defer tr.End()
for pattern, want := range tests {
exact, like, regexp, err := parseIncludePattern(pattern)

View File

@ -74,7 +74,7 @@ func (s *savedSearchStore) ListAll(ctx context.Context) (savedSearches []api.Sav
tr, ctx := trace.New(ctx, "database.SavedSearches.ListAll",
attribute.Int("count", len(savedSearches)),
)
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
q := sqlf.Sprintf(`SELECT
id,
@ -320,7 +320,7 @@ func (s *savedSearchStore) Create(ctx context.Context, newSavedSearch *types.Sav
}
tr, ctx := trace.New(ctx, "database.SavedSearches.Create")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
savedQuery = &types.SavedSearch{
Description: newSavedSearch.Description,
@ -359,7 +359,7 @@ func (s *savedSearchStore) Create(ctx context.Context, newSavedSearch *types.Sav
// proper permissions to perform the update.
func (s *savedSearchStore) Update(ctx context.Context, savedSearch *types.SavedSearch) (savedQuery *types.SavedSearch, err error) {
tr, ctx := trace.New(ctx, "database.SavedSearches.Update")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
savedQuery = &types.SavedSearch{
Description: savedSearch.Description,
@ -396,7 +396,7 @@ func (s *savedSearchStore) Update(ctx context.Context, savedSearch *types.SavedS
// proper permissions to perform the delete.
func (s *savedSearchStore) Delete(ctx context.Context, id int32) (err error) {
tr, ctx := trace.New(ctx, "database.SavedSearches.Delete")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
_, err = s.Handle().ExecContext(ctx, `DELETE FROM saved_searches WHERE ID=$1`, id)
return err
}

View File

@ -150,7 +150,7 @@ func (o *settingsStore) GetLatestSchemaSettings(ctx context.Context, subject api
// responsible for ensuring this or that the response never makes it to a user.
func (o *settingsStore) ListAll(ctx context.Context, impreciseSubstring string) (_ []*api.Settings, err error) {
tr, ctx := trace.New(ctx, "database.Settings.ListAll")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
q := sqlf.Sprintf(`
WITH q AS (

View File

@ -1069,7 +1069,7 @@ type UsersListOptions struct {
func (u *userStore) List(ctx context.Context, opt *UsersListOptions) (_ []*types.User, err error) {
tr, ctx := trace.New(ctx, "database.Users.List", attribute.String("opt", fmt.Sprintf("%+v", opt)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if opt == nil {
opt = &UsersListOptions{}
@ -1083,7 +1083,7 @@ func (u *userStore) List(ctx context.Context, opt *UsersListOptions) (_ []*types
// ListForSCIM lists users along with their email addresses and SCIM ExternalID.
func (u *userStore) ListForSCIM(ctx context.Context, opt *UsersListOptions) (_ []*types.UserForSCIM, err error) {
tr, ctx := trace.New(ctx, "database.Users.ListForSCIM", attribute.String("opt", fmt.Sprintf("%+v", opt)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if opt == nil {
opt = &UsersListOptions{}

View File

@ -112,7 +112,7 @@ func UpdateRepoEmbeddingIndex(
func DownloadRepoEmbeddingIndex(ctx context.Context, uploadStore uploadstore.Store, key string) (_ *RepoEmbeddingIndex, err error) {
tr, ctx := trace.New(ctx, "DownloadRepoEmbeddingIndex", attribute.String("key", key))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
dec, err := newDecoder(ctx, uploadStore, key)
if err != nil {
@ -153,9 +153,9 @@ func newDecoder(ctx context.Context, uploadStore uploadstore.Store, key string)
if err := dec.Decode(&formatVersion); err != nil {
// If there's an error, assume this is an old index that doesn't encode the
// version. Open the file again to reset the reader.
if tr := trace.TraceFromContext(ctx); tr != nil {
tr.AddEvent("failed to decode IndexFormatVersion, assuming that this is an old index that doesn't start with a version", trace.Error(err))
}
trace.FromContext(ctx).AddEvent(
"failed to decode IndexFormatVersion, assuming that this is an old index that doesn't start with a version",
trace.Error(err))
if err := f.Close(); err != nil {
return nil, err

View File

@ -22,14 +22,14 @@ func MaybeEncrypt(ctx context.Context, key Key, data string) (_, keyIdent string
tr, trCtx := trace.New(ctx, "key.Encrypt")
encrypted, err := key.Encrypt(trCtx, []byte(data))
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
if err != nil {
return "", "", err
}
tr, trCtx = trace.New(ctx, "key.Version")
version, err := key.Version(trCtx)
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
if err != nil {
return "", "", errors.Wrap(err, "failed to get encryption key version")
}
@ -52,11 +52,11 @@ func MaybeDecrypt(ctx context.Context, key Key, data, keyIdent string) (string,
tr, innerCtx := trace.New(ctx, "key.Decrypt")
decrypted, err := key.Decrypt(innerCtx, []byte(data))
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
if err != nil {
tr, innerCtx = trace.New(ctx, "key.Version")
version, versionErr := key.Version(innerCtx)
tr.FinishWithErr(&versionErr)
tr.EndWithErr(&versionErr)
if versionErr == nil && keyIdent != version.JSON() {
return "", errors.New("key mismatch: value is encrypted with an encryption key distinct from the one available in site-config")
}

View File

@ -211,7 +211,7 @@ func (c *client) reqPage(ctx context.Context, url string, results any) (*PageTok
func (c *client) do(ctx context.Context, req *http.Request, result any) (code int, err error) {
tr, ctx := trace.New(ctx, "BitbucketCloud.do")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
req = req.WithContext(ctx)
req.URL = c.URL.ResolveReference(req.URL)

View File

@ -976,7 +976,7 @@ func (c *Client) send(ctx context.Context, method, path string, qry url.Values,
func (c *Client) do(ctx context.Context, req *http.Request, result any) (_ *http.Response, err error) {
tr, ctx := trace.New(ctx, "BitbucketServer.do")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
req = req.WithContext(ctx)
req.URL.Path, err = url.JoinPath(c.URL.Path, req.URL.Path) // First join paths so that base path is kept

View File

@ -1601,7 +1601,7 @@ func doRequest(ctx context.Context, logger log.Logger, apiURL *url.URL, auther a
if resp != nil {
tr.SetAttributes(attribute.String("status", resp.Status))
}
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
}()
req = req.WithContext(ctx)

View File

@ -258,7 +258,7 @@ func (c *Client) doWithBaseURL(ctx context.Context, req *http.Request, result an
if resp != nil {
tr.SetAttributes(attribute.String("status", resp.Status))
}
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
}()
req = req.WithContext(ctx)

View File

@ -143,7 +143,7 @@ func (client *HTTPClient) makeGetRequest(ctx context.Context, doer httpcli.Doer,
do := func() (_ *http.Response, err error) {
tr, ctx := trace.New(ctx, "npm")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
req = req.WithContext(ctx)
if err := client.limiter.Wait(ctx); err != nil {

View File

@ -41,7 +41,7 @@ func (p *ReverseProxy) ServeHTTP(repo api.RepoName, method, op string, director
repo.Attr(),
attribute.String("method", method),
attribute.String("op", op))
defer tr.Finish()
defer tr.End()
p.HTTPLimiter.Acquire()
defer p.HTTPLimiter.Release()

View File

@ -145,7 +145,7 @@ func NewPeriodicGoroutine(ctx context.Context, handler Handler, options ...Optio
if r.operation == nil {
r.operation = observation.NewContext(
log.Scoped("periodic", "periodic goroutine handler"),
observation.Tracer(oteltrace.NewNoopTracerProvider()),
observation.Tracer(oteltrace.NewNoopTracerProvider().Tracer("noop")),
observation.Metrics(metrics.NoOpRegisterer),
).Operation(observation.Op{
Name: r.name,

View File

@ -180,7 +180,7 @@ func (c *Client) Highlight(ctx context.Context, q *Query, format HighlightRespon
attribute.String("filepath", q.Filepath),
attribute.String("theme", q.Theme),
attribute.Bool("css", q.CSS))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if isTreesitterBased(q.Engine) && !IsTreesitterSupported(q.Filetype) {
return nil, errors.New("Not a valid treesitter filetype")

View File

@ -530,7 +530,7 @@ func NewRetryPolicy(max int, retryAfterMaxSleepDuration time.Duration) rehttp.Re
defer func() {
// Avoid trace log spam if we haven't invoked the retry policy.
shouldTraceLog := retry || a.Index > 0
if tr := trace.TraceFromContext(a.Request.Context()); tr != nil && shouldTraceLog {
if tr := trace.FromContext(a.Request.Context()); tr.IsRecording() && shouldTraceLog {
fields := []attribute.KeyValue{
attribute.Bool("retry", retry),
attribute.Int("attempt", a.Index),

View File

@ -84,7 +84,7 @@ func (t *externalTransport) RoundTrip(r *http.Request) (*http.Response, error) {
func (t *externalTransport) update(ctx context.Context, config *schema.TlsExternal) *http.Transport {
// No function calls here use the context further
tr, _ := trace.New(ctx, "externalTransport.update")
defer tr.Finish()
defer tr.End()
t.mu.Lock()
defer t.mu.Unlock()

View File

@ -23,7 +23,7 @@ import (
func GetSearchHandlers() map[types.GenerationMethod]InsightsHandler {
searchStream := func(ctx context.Context, query string) (*streaming.TabulationResult, error) {
tr, ctx := trace.New(ctx, "CodeInsightsSearch.searchStream")
defer tr.Finish()
defer tr.End()
decoder, streamResults := streaming.TabulationDecoder()
err := streaming.Search(ctx, query, nil, decoder)
@ -37,7 +37,7 @@ func GetSearchHandlers() map[types.GenerationMethod]InsightsHandler {
computeSearchStream := func(ctx context.Context, query string) (*streaming.ComputeTabulationResult, error) {
decoder, streamResults := streaming.MatchContextComputeDecoder()
tr, ctx := trace.New(ctx, "CodeInsightsSearch.computeMatchContextSearchStream")
defer tr.Finish()
defer tr.End()
err := streaming.ComputeMatchContextStream(ctx, query, decoder)
if err != nil {

View File

@ -26,7 +26,7 @@ type Opts struct {
func Search(ctx context.Context, query string, patternType *string, decoder streamhttp.FrontendStreamDecoder) (err error) {
tr, ctx := trace.New(ctx, "insights.StreamSearch",
attribute.String("query", query))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
req, err := streamhttp.NewRequest(internalapi.Client.URL+"/.internal", query)
if err != nil {
@ -60,7 +60,7 @@ func Search(ctx context.Context, query string, patternType *string, decoder stre
func genericComputeStream(ctx context.Context, handler func(io.Reader) error, query, operation string) (err error) {
tr, ctx := trace.New(ctx, operation)
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
req, err := client.NewComputeStreamRequest(internalapi.Client.URL+"/.internal", query)
if err != nil {

View File

@ -22,7 +22,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_log//logtest",
"@io_opentelemetry_go_otel//:otel",
"@io_opentelemetry_go_otel//attribute",
"@io_opentelemetry_go_otel_trace//:trace",
"@org_uber_go_zap//:zap",

View File

@ -7,7 +7,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/sourcegraph/log"
"github.com/sourcegraph/log/logtest"
"go.opentelemetry.io/otel"
oteltrace "go.opentelemetry.io/otel/trace"
"github.com/sourcegraph/sourcegraph/internal/honey"
@ -20,7 +19,7 @@ import (
// any location that wants to use it for observing operations.
type Context struct {
Logger log.Logger
Tracer *trace.Tracer
Tracer oteltrace.Tracer // may be nil
Registerer prometheus.Registerer
HoneyDataset *honey.Dataset
}
@ -49,6 +48,7 @@ func TestContextTB(t testing.TB) *Context {
return &Context{
Logger: logtest.Scoped(t),
Registerer: metrics.NoOpRegisterer,
Tracer: oteltrace.NewNoopTracerProvider().Tracer("noop"),
}
}
@ -97,7 +97,7 @@ func (c *Context) Operation(args Op) *Operation {
func NewContext(logger log.Logger, opts ...Opt) *Context {
ctx := &Context{
Logger: logger,
Tracer: &trace.Tracer{TracerProvider: otel.GetTracerProvider()},
Tracer: trace.GetTracer(),
Registerer: prometheus.DefaultRegisterer,
}
@ -110,9 +110,9 @@ func NewContext(logger log.Logger, opts ...Opt) *Context {
type Opt func(*Context)
func Tracer(provider oteltrace.TracerProvider) Opt {
func Tracer(tracer oteltrace.Tracer) Opt {
return func(ctx *Context) {
ctx.Tracer = &trace.Tracer{TracerProvider: provider}
ctx.Tracer = tracer
}
}

View File

@ -101,13 +101,17 @@ type TraceLogger interface {
// TestTraceLogger creates an empty TraceLogger that can be used for testing. The logger
// should be 'logtest.Scoped(t)'.
func TestTraceLogger(logger log.Logger) TraceLogger {
return &traceLogger{Logger: logger}
tr, _ := trace.New(context.Background(), "test")
return &traceLogger{
Logger: logger,
trace: tr,
}
}
type traceLogger struct {
opName string
event honey.Event
trace *trace.Trace
trace trace.Trace
context *Context
log.Logger
@ -121,9 +125,7 @@ func (t *traceLogger) initWithTags(attrs ...attribute.KeyValue) {
t.event.AddField(t.opName+"."+toSnakeCase(string(field.Key)), field.Value.AsInterface())
}
}
if t.trace != nil {
t.trace.SetAttributes(attrs...)
}
t.trace.SetAttributes(attrs...)
}
func (t *traceLogger) AddEvent(name string, attributes ...attribute.KeyValue) {
@ -143,9 +145,7 @@ func (t *traceLogger) AddEvent(name string, attributes ...attribute.KeyValue) {
// won't be sent but the "parent" may be sent.
event.Send()
}
if t.trace != nil {
t.trace.AddEvent(name, attributes...)
}
t.trace.AddEvent(name, attributes...)
}
func (t *traceLogger) SetAttributes(attributes ...attribute.KeyValue) {
@ -154,9 +154,7 @@ func (t *traceLogger) SetAttributes(attributes ...attribute.KeyValue) {
t.event.AddField(t.opName+"."+toSnakeCase(string(attr.Key)), attr.Value)
}
}
if t.trace != nil {
t.trace.SetAttributes(attributes...)
}
t.trace.SetAttributes(attributes...)
t.Logger = t.Logger.With(attributesToLogFields(attributes)...)
}
@ -317,13 +315,12 @@ func (op *Operation) With(ctx context.Context, err *error, args Args) (context.C
// startTrace creates a new Trace object and returns the wrapped context. This returns
// an unmodified context and a nil startTrace if no tracer was supplied on the observation context.
func (op *Operation) startTrace(ctx context.Context) (*trace.Trace, context.Context) {
if op.context.Tracer == nil {
return nil, ctx
func (op *Operation) startTrace(ctx context.Context) (trace.Trace, context.Context) {
tracer := op.context.Tracer
if tracer == nil {
tracer = trace.GetTracer()
}
tr, ctx := op.context.Tracer.New(ctx, op.kebabName)
return tr, ctx
return trace.NewInTracer(ctx, tracer, op.kebabName)
}
// emitErrorLogs will log as message if the operation has failed. This log contains the error
@ -373,17 +370,13 @@ func (op *Operation) emitMetrics(err *error, count, elapsed float64, labels []st
// finishTrace will set the error value, log additional fields supplied after the operation's
// execution, and finalize the trace span. This does nothing if no trace was constructed at
// the start of the operation.
func (op *Operation) finishTrace(err *error, tr *trace.Trace, attrs []attribute.KeyValue) {
if tr == nil {
return
}
func (op *Operation) finishTrace(err *error, tr trace.Trace, attrs []attribute.KeyValue) {
if err != nil {
tr.SetError(*err)
}
tr.SetAttributes(attrs...)
tr.Finish()
tr.End()
}
// applyErrorFilter returns nil if the given error does not pass the registered error filter.

View File

@ -116,7 +116,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus",
"@com_github_prometheus_client_golang//prometheus/promauto",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//:otel",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_x_net//http2",
"@org_golang_x_sync//errgroup",
@ -197,7 +196,6 @@ go_test(
"//internal/rcache",
"//internal/testutil",
"//internal/timeutil",
"//internal/trace",
"//internal/types",
"//internal/types/typestest",
"//lib/errors",
@ -217,7 +215,6 @@ go_test(
"@com_github_sourcegraph_zoekt//:zoekt",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@io_opentelemetry_go_otel//:otel",
"@org_golang_x_time//rate",
],
)

View File

@ -13,7 +13,6 @@ import (
api "github.com/sourcegraph/sourcegraph/internal/api"
database "github.com/sourcegraph/sourcegraph/internal/database"
basestore "github.com/sourcegraph/sourcegraph/internal/database/basestore"
trace "github.com/sourcegraph/sourcegraph/internal/trace"
types "github.com/sourcegraph/sourcegraph/internal/types"
)
@ -60,9 +59,6 @@ type MockStore struct {
// SetMetricsFunc is an instance of a mock function object controlling
// the behavior of the method SetMetrics.
SetMetricsFunc *StoreSetMetricsFunc
// SetTracerFunc is an instance of a mock function object controlling
// the behavior of the method SetTracer.
SetTracerFunc *StoreSetTracerFunc
// TransactFunc is an instance of a mock function object controlling the
// behavior of the method Transact.
TransactFunc *StoreTransactFunc
@ -142,11 +138,6 @@ func NewMockStore() *MockStore {
return
},
},
SetTracerFunc: &StoreSetTracerFunc{
defaultHook: func(trace.Tracer) {
return
},
},
TransactFunc: &StoreTransactFunc{
defaultHook: func(context.Context) (r0 Store, r1 error) {
return
@ -234,11 +225,6 @@ func NewStrictMockStore() *MockStore {
panic("unexpected invocation of MockStore.SetMetrics")
},
},
SetTracerFunc: &StoreSetTracerFunc{
defaultHook: func(trace.Tracer) {
panic("unexpected invocation of MockStore.SetTracer")
},
},
TransactFunc: &StoreTransactFunc{
defaultHook: func(context.Context) (Store, error) {
panic("unexpected invocation of MockStore.Transact")
@ -302,9 +288,6 @@ func NewMockStoreFrom(i Store) *MockStore {
SetMetricsFunc: &StoreSetMetricsFunc{
defaultHook: i.SetMetrics,
},
SetTracerFunc: &StoreSetTracerFunc{
defaultHook: i.SetTracer,
},
TransactFunc: &StoreTransactFunc{
defaultHook: i.Transact,
},
@ -1562,104 +1545,6 @@ func (c StoreSetMetricsFuncCall) Results() []interface{} {
return []interface{}{}
}
// StoreSetTracerFunc describes the behavior when the SetTracer method of
// the parent MockStore instance is invoked.
type StoreSetTracerFunc struct {
defaultHook func(trace.Tracer)
hooks []func(trace.Tracer)
history []StoreSetTracerFuncCall
mutex sync.Mutex
}
// SetTracer delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockStore) SetTracer(v0 trace.Tracer) {
m.SetTracerFunc.nextHook()(v0)
m.SetTracerFunc.appendCall(StoreSetTracerFuncCall{v0})
return
}
// SetDefaultHook sets function that is called when the SetTracer method of
// the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreSetTracerFunc) SetDefaultHook(hook func(trace.Tracer)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// SetTracer method of the parent MockStore instance invokes the hook at the
// front of the queue and discards it. After the queue is empty, the default
// hook function is invoked for any future action.
func (f *StoreSetTracerFunc) PushHook(hook func(trace.Tracer)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *StoreSetTracerFunc) SetDefaultReturn() {
f.SetDefaultHook(func(trace.Tracer) {
return
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *StoreSetTracerFunc) PushReturn() {
f.PushHook(func(trace.Tracer) {
return
})
}
func (f *StoreSetTracerFunc) nextHook() func(trace.Tracer) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *StoreSetTracerFunc) appendCall(r0 StoreSetTracerFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of StoreSetTracerFuncCall objects describing
// the invocations of this function.
func (f *StoreSetTracerFunc) History() []StoreSetTracerFuncCall {
f.mutex.Lock()
history := make([]StoreSetTracerFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// StoreSetTracerFuncCall is an object that describes an invocation of
// method SetTracer on an instance of MockStore.
type StoreSetTracerFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 trace.Tracer
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c StoreSetTracerFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c StoreSetTracerFuncCall) Results() []interface{} {
return []interface{}{}
}
// StoreTransactFunc describes the behavior when the Transact method of the
// parent MockStore instance is invoked.
type StoreTransactFunc struct {

View File

@ -9,7 +9,6 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/lib/pq"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/log"
@ -35,8 +34,6 @@ type Store interface {
// SetMetrics updates metrics for the store in place.
SetMetrics(m StoreMetrics)
// SetTracer updates tracer for the store in place.
SetTracer(t trace.Tracer)
basestore.ShareableStore
With(other basestore.ShareableStore) Store
@ -99,8 +96,6 @@ type store struct {
Logger log.Logger
// Metrics are sent to Prometheus by default.
Metrics StoreMetrics
// Used for tracing calls to store methods. Uses otel.GetTracerProvider() by default.
Tracer trace.Tracer
txtrace *trace.Trace
txctx context.Context
@ -112,7 +107,6 @@ func NewStore(logger log.Logger, db database.DB) Store {
return &store{
Store: s,
Logger: logger,
Tracer: trace.Tracer{TracerProvider: otel.GetTracerProvider()},
}
}
@ -129,14 +123,12 @@ func (s *store) ExternalServiceStore() database.ExternalServiceStore {
}
func (s *store) SetMetrics(m StoreMetrics) { s.Metrics = m }
func (s *store) SetTracer(t trace.Tracer) { s.Tracer = t }
func (s *store) With(other basestore.ShareableStore) Store {
return &store{
Store: s.Store.With(other),
Logger: s.Logger,
Metrics: s.Metrics,
Tracer: s.Tracer,
}
}
@ -157,7 +149,7 @@ func (s *store) transact(ctx context.Context) (stx *store, err error) {
tr.SetError(err)
// Finish is called in Done in the non-error case
tr.Finish()
tr.End()
}
}(time.Now())
@ -169,7 +161,6 @@ func (s *store) transact(ctx context.Context) (stx *store, err error) {
Store: txBase,
Logger: s.Logger,
Metrics: s.Metrics,
Tracer: s.Tracer,
txtrace: tr,
txctx: ctx,
}, nil
@ -197,7 +188,7 @@ func (s *store) Done(err error) error {
s.Metrics.Done.Observe(secs, 1, nil)
}
tr.Finish()
tr.End()
}(time.Now())
return s.Store.Done(err)
@ -208,9 +199,9 @@ func (s *store) trace(ctx context.Context, family string) (*trace.Trace, context
if txctx == nil {
txctx = ctx
}
tr, txctx := s.Tracer.New(txctx, family)
tr, txctx := trace.New(txctx, family)
ctx = trace.CopyContext(ctx, txctx)
return tr, ctx
return &tr, ctx
}
func (s *store) DeleteExternalServiceReposNotIn(ctx context.Context, svc *types.ExternalService, ids map[api.RepoID]struct{}) (deleted []api.RepoID, err error) {
@ -231,7 +222,7 @@ func (s *store) DeleteExternalServiceReposNotIn(ctx context.Context, svc *types.
}
tr.SetError(err)
tr.Finish()
tr.End()
}(time.Now())
set := make(pq.Int64Array, 0, len(ids))
@ -282,7 +273,7 @@ func (s *store) DeleteExternalServiceRepo(ctx context.Context, svc *types.Extern
}
tr.SetError(err)
tr.Finish()
tr.End()
}(time.Now())
if !s.InTransaction() {
@ -347,7 +338,7 @@ func (s *store) CreateExternalServiceRepo(ctx context.Context, svc *types.Extern
}
tr.SetError(err)
tr.Finish()
tr.End()
}(time.Now())
metadata, err := json.Marshal(r.Metadata)
@ -448,7 +439,7 @@ func (s *store) UpdateRepo(ctx context.Context, r *types.Repo) (saved *types.Rep
}
tr.SetError(err)
tr.Finish()
tr.End()
}(time.Now())
if r.ID == 0 {
@ -507,7 +498,7 @@ func (s *store) UpdateExternalServiceRepo(ctx context.Context, svc *types.Extern
}
tr.SetError(err)
tr.Finish()
tr.End()
}(time.Now())
if r.ID == 0 {
@ -610,7 +601,7 @@ func (s *store) EnqueueSyncJobs(ctx context.Context, isCloud bool) (err error) {
secs := time.Since(began).Seconds()
s.Metrics.EnqueueSyncJobs.Observe(secs, 0, &err)
tr.SetError(err)
tr.Finish()
tr.End()
}(time.Now())
filter := "TRUE"

View File

@ -11,7 +11,6 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/keegancsmith/sqlf"
"go.opentelemetry.io/otel"
"github.com/sourcegraph/log"
"github.com/sourcegraph/log/logtest"
@ -24,7 +23,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/repos"
"github.com/sourcegraph/sourcegraph/internal/timeutil"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
@ -583,6 +581,5 @@ func getTestRepoStore(t *testing.T) repos.Store {
logger := logtest.Scoped(t)
store := repos.NewStore(logtest.Scoped(t), database.NewDB(logger, dbtest.NewDB(logger, t)))
store.SetMetrics(repos.NewStoreMetrics())
store.SetTracer(trace.Tracer{TracerProvider: otel.GetTracerProvider()})
return store
}

View File

@ -293,7 +293,7 @@ func (s *Syncer) SyncRepo(ctx context.Context, name api.RepoName, background boo
logger.Debug("SyncRepo started")
tr, ctx := trace.New(ctx, "Syncer.SyncRepo", name.Attr())
defer tr.Finish()
defer tr.End()
repo, err = s.Store.RepoStore().GetByName(ctx, name)
if err != nil && !errcode.IsNotFound(err) {
@ -912,7 +912,7 @@ func (s *Syncer) observeSync(
syncErrors.WithLabelValues(name, owner, syncErrorReason(err)).Inc()
}
tr.Finish()
tr.End()
}
}

View File

@ -133,7 +133,7 @@ func (c *Client) RepoLookup(
if result != nil {
tr.SetAttributes(attribute.Bool("found", result.Repo != nil))
}
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
}()
if internalgrpc.IsGRPCEnabled(ctx) {
@ -443,7 +443,7 @@ func (c *Client) httpPost(ctx context.Context, method string, payload any) (resp
func (c *Client) do(ctx context.Context, req *http.Request) (_ *http.Response, err error) {
tr, ctx := trace.New(ctx, "repoupdater.do")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
req.Header.Set("Content-Type", "application/json")

View File

@ -764,11 +764,9 @@ func isZoektRolloutError(ctx context.Context, err error) bool {
}
metricIgnoredError.WithLabelValues(reason).Inc()
if span := trace.TraceFromContext(ctx); span != nil {
span.AddEvent("rollout",
attribute.String("rollout.reason", reason),
attribute.String("rollout.error", err.Error()))
}
trace.FromContext(ctx).AddEvent("rollout",
attribute.String("rollout.reason", reason),
attribute.String("rollout.error", err.Error()))
return true
}

View File

@ -73,7 +73,7 @@ func (m *meteredSearcher) StreamSearch(ctx context.Context, q query.Q, opts *zoe
tr, ctx := trace.New(ctx, "zoekt."+cat, attrs...)
defer func() {
tr.SetErrorIfNotContext(err)
tr.Finish()
tr.End()
}()
if opts != nil {
fields := []attribute.KeyValue{
@ -232,7 +232,7 @@ func (m *meteredSearcher) List(ctx context.Context, q query.Q, opts *zoekt.ListO
attribute.Stringer("opts", opts),
attribute.String("query", qStr),
)
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
event := honey.NoopEvent()
if honey.Enabled() && cat == "ListAll" {

View File

@ -95,7 +95,7 @@ func (s *searchClient) Plan(
protocol search.Protocol,
) (_ *search.Inputs, err error) {
tr, ctx := trace.New(ctx, "NewSearchInputs", attribute.String("query", searchQuery))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
searchType, err := detectSearchType(version, patternType)
if err != nil {
@ -157,7 +157,7 @@ func (s *searchClient) Execute(
inputs *search.Inputs,
) (_ *search.Alert, err error) {
tr, ctx := trace.New(ctx, "Execute")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
planJob, err := jobutil.NewPlanJob(inputs, inputs.Plan)
if err != nil {

View File

@ -71,7 +71,7 @@ func (l *LogJob) MapChildren(fn job.MapFunc) job.Job {
// invariant that query and pattern error checking has already been performed.
func (l *LogJob) logEvent(ctx context.Context, clients job.RuntimeClients, duration time.Duration) {
tr, ctx := trace.New(ctx, "LogSearchDuration")
defer tr.Finish()
defer tr.End()
var types []string
resultTypes, _ := l.inputs.Query.StringValues(query.FieldType)

View File

@ -13,7 +13,7 @@ import (
type finishSpanFunc func(*search.Alert, error)
func StartSpan(ctx context.Context, stream streaming.Sender, job Job) (*trace.Trace, context.Context, streaming.Sender, finishSpanFunc) {
func StartSpan(ctx context.Context, stream streaming.Sender, job Job) (trace.Trace, context.Context, streaming.Sender, finishSpanFunc) {
tr, ctx := trace.New(ctx, job.Name())
tr.SetAttributes(job.Attributes(VerbosityMax)...)
@ -25,16 +25,16 @@ func StartSpan(ctx context.Context, stream streaming.Sender, job Job) (*trace.Tr
tr.SetAttributes(attribute.String("alert", alert.Title))
}
tr.SetAttributes(attribute.Int64("total_results", observingStream.totalEvents.Load()))
tr.Finish()
tr.End()
}
}
func newObservingStream(tr *trace.Trace, parent streaming.Sender) *observingStream {
func newObservingStream(tr trace.Trace, parent streaming.Sender) *observingStream {
return &observingStream{tr: tr, parent: parent}
}
type observingStream struct {
tr *trace.Trace
tr trace.Trace
parent streaming.Sender
totalEvents atomic.Int64
}

View File

@ -117,7 +117,7 @@ func (r *Resolver) Iterator(ctx context.Context, opts search.RepoOptions) *itera
func (r *Resolver) Resolve(ctx context.Context, op search.RepoOptions) (_ Resolved, errs error) {
tr, ctx := trace.New(ctx, "searchrepos.Resolve", attribute.Stringer("opts", &op))
defer tr.FinishWithErr(&errs)
defer tr.EndWithErr(&errs)
excludePatterns := op.MinusRepoFilters
includePatterns, includePatternRevs := findPatternRevs(op.RepoFilters)
@ -536,7 +536,7 @@ func (r *Resolver) filterRepoHasFileContent(
tr.SetAttributes(attribute.Int("inputRevCount", len(repoRevs)))
defer func() {
tr.SetError(err)
tr.Finish()
tr.End()
}()
// Early return if there are no filters
@ -761,7 +761,7 @@ func computeExcludedRepos(ctx context.Context, db database.DB, op search.RepoOpt
tr.SetAttributes(
attribute.Int("excludedForks", ex.Forks),
attribute.Int("excludedArchived", ex.Archived))
tr.FinishWithErr(&err)
tr.EndWithErr(&err)
}()
excludePatterns := op.MinusRepoFilters

View File

@ -61,7 +61,7 @@ func ResolveSearchContextSpec(ctx context.Context, db database.DB, searchContext
defer func() {
tr.AddEvent("resolved search context", attribute.String("searchContext", fmt.Sprintf("%+v", sc)))
tr.SetErrorIfNotContext(err)
tr.Finish()
tr.End()
}()
parsedSearchContextSpec := ParseSearchContextSpec(searchContextSpec)

View File

@ -45,7 +45,7 @@ func Search(
}
tr, ctx := trace.New(ctx, "searcher.client", repo.Attr(), commit.Attr())
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
r := protocol.Request{
Repo: repo,
@ -122,7 +122,7 @@ func Search(
func textSearchStream(ctx context.Context, url string, body []byte, cb func([]*protocol.FileMatch)) (_ bool, err error) {
tr, ctx := trace.New(ctx, "searcher.textSearchStream")
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
req, err := http.NewRequestWithContext(ctx, "GET", url, bytes.NewReader(body))
if err != nil {

View File

@ -91,7 +91,7 @@ func searchInRepo(ctx context.Context, gitserverClient gitserver.Client, repoRev
tr, ctx := trace.New(ctx, "symbols.searchInRepo",
repoRevs.Repo.Name.Attr(),
attribute.String("rev", inputRev))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
// Do not trigger a repo-updater lookup (e.g.,
// backend.{GitRepo,Repos.ResolveRev}) because that would slow this operation

View File

@ -225,7 +225,7 @@ func PartitionRepos(
}
tr, ctx := trace.New(ctx, "PartitionRepos", attribute.String("type", string(typ)))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
// Only include indexes with symbol information if a symbol request.
var filterFunc func(repo zoekt.MinimalRepoListEntry) bool
@ -756,7 +756,7 @@ func (t *GlobalTextSearchJob) MapChildren(job.MapFunc) job.Job { return t }
// access to on all connected code hosts / external services.
func privateReposForActor(ctx context.Context, logger log.Logger, db database.DB, repoOptions search.RepoOptions) []types.MinimalRepo {
tr, ctx := trace.New(ctx, "privateReposForActor")
defer tr.Finish()
defer tr.End()
userID := int32(0)
if envvar.SourcegraphDotComMode() {

View File

@ -732,7 +732,7 @@ func TestContextWithoutDeadline(t *testing.T) {
}
// We want to keep trace info
if tr2 := trace.TraceFromContext(ctxNoDeadline); tr != tr2 {
if tr2 := trace.FromContext(ctxNoDeadline); !tr.SpanContext().Equal(tr2.SpanContext()) {
t.Error("trace information not propogated")
}

View File

@ -372,7 +372,7 @@ func CookieMiddlewareWithCSRFSafety(
func authenticateByCookie(logger log.Logger, db database.DB, r *http.Request, w http.ResponseWriter) context.Context {
span, ctx := trace.New(r.Context(), "session.authenticateByCookie")
defer span.Finish()
defer span.End()
logger = trace.Logger(ctx, logger)
// If the request is already authenticated from a cookie (and not a token), then do not clobber the request's existing

View File

@ -75,7 +75,7 @@ func (s *service) ForSubject(ctx context.Context, subject api.SettingsSubject) (
tr, ctx := trace.New(ctx, "settings.ForSubject")
defer func() {
tr.SetError(err)
tr.Finish()
tr.End()
}()
subjects, err := s.RelevantSubjects(ctx, subject)

View File

@ -175,7 +175,7 @@ func (c *Client) Search(ctx context.Context, args search.SymbolsParameters) (sym
tr, ctx := trace.New(ctx, "symbols.Search",
args.Repo.Attr(),
args.CommitID.Attr())
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
var response search.SymbolsResponse
@ -275,7 +275,7 @@ func (c *Client) LocalCodeIntel(ctx context.Context, args types.RepoCommitPath)
tr, ctx := trace.New(ctx, "symbols.LocalCodeIntel",
attribute.String("repo", args.Repo),
attribute.String("commitID", args.Commit))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if internalgrpc.IsGRPCEnabled(ctx) {
return c.localCodeIntelGRPC(ctx, args)
@ -340,7 +340,7 @@ func (c *Client) SymbolInfo(ctx context.Context, args types.RepoCommitPathPoint)
tr, ctx := trace.New(ctx, "squirrel.SymbolInfo",
attribute.String("repo", args.Repo),
attribute.String("commitID", args.Commit))
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
if internalgrpc.IsGRPCEnabled(ctx) {
result, err = c.symbolInfoGRPC(ctx, args)
@ -445,7 +445,7 @@ func (c *Client) httpPost(
tr, ctx := trace.New(ctx, "symbols.httpPost",
attribute.String("method", method),
repo.Attr())
defer tr.FinishWithErr(&err)
defer tr.EndWithErr(&err)
symbolsURL, err := c.url(repo)
if err != nil {

View File

@ -10,7 +10,6 @@ go_library(
"httptrace.go",
"logger.go",
"trace.go",
"tracer.go",
"url.go",
],
importpath = "github.com/sourcegraph/sourcegraph/internal/trace",
@ -35,14 +34,10 @@ go_library(
go_test(
name = "trace_test",
srcs = [
"attributes_test.go",
"context_test.go",
],
srcs = ["attributes_test.go"],
embed = [":trace"],
deps = [
"//lib/errors",
"@com_github_stretchr_testify//require",
"@io_opentelemetry_go_otel_trace//:trace",
],
)

View File

@ -1,2 +1,3 @@
** @keegancsmith
**/* @sourcegraph/dev-experience
**/* @camdencheek

View File

@ -2,36 +2,11 @@ package trace
import (
"fmt"
"strings"
"unicode/utf8"
"go.opentelemetry.io/otel/attribute"
)
type attributesStringer []attribute.KeyValue
func (a attributesStringer) String() string {
var b strings.Builder
for i, attr := range a {
if i > 0 {
b.WriteString("\n")
}
var (
key = string(attr.Key)
value = attr.Value.Emit()
)
b.Grow(len(key) + 1 + len(value))
b.WriteString(key)
b.WriteString(":")
b.WriteString(value)
}
return b.String()
}
type stringerFunc func() string
func (s stringerFunc) String() string { return s() }
// Scoped wraps a set of opentelemetry attributes with a prefixed key.
func Scoped(scope string, kvs ...attribute.KeyValue) []attribute.KeyValue {
res := make([]attribute.KeyValue, len(kvs))

View File

@ -9,42 +9,16 @@ import (
"github.com/sourcegraph/sourcegraph/internal/trace/policy"
)
type traceContextKey string
const traceKey = traceContextKey("trace")
// contextWithTrace returns a new context.Context that holds a reference to trace's
// SpanContext. External callers should likely use CopyContext, as this properly propagates all
// tracing context from one context to another.
func contextWithTrace(ctx context.Context, tr *Trace) context.Context {
ctx = oteltrace.ContextWithSpan(ctx, tr.oteltraceSpan)
ctx = context.WithValue(ctx, traceKey, tr)
return ctx
}
// TraceFromContext returns the Trace previously associated with ctx, or
// nil if no such Trace could be found.
func TraceFromContext(ctx context.Context) *Trace {
tr, _ := ctx.Value(traceKey).(*Trace)
if tr == nil {
// There is no Trace in the context, so check for a raw OTel span we can use.
span := oteltrace.SpanFromContext(ctx)
if span.IsRecording() {
tr = &Trace{oteltraceSpan: span}
}
}
return tr
// FromContext returns the Trace previously associated with ctx.
func FromContext(ctx context.Context) Trace {
return Trace{oteltrace.SpanFromContext(ctx)}
}
// CopyContext copies the tracing-related context items from one context to another and returns that
// context.
func CopyContext(ctx context.Context, from context.Context) context.Context {
if tr := TraceFromContext(from); tr != nil {
ctx = contextWithTrace(ctx, tr)
}
if shouldTrace := policy.ShouldTrace(from); shouldTrace {
ctx = policy.WithShouldTrace(ctx, shouldTrace)
}
ctx = oteltrace.ContextWithSpan(ctx, oteltrace.SpanFromContext(from))
ctx = policy.WithShouldTrace(ctx, policy.ShouldTrace(from))
return ctx
}

View File

@ -1,33 +0,0 @@
package trace
import (
"context"
"testing"
oteltrace "go.opentelemetry.io/otel/trace"
"github.com/stretchr/testify/require"
)
func TestTraceFromContext(t *testing.T) {
t.Run("set in context", func(t *testing.T) {
ctx := contextWithTrace(context.Background(), &Trace{})
tr := TraceFromContext(ctx)
require.NotNil(t, tr)
})
t.Run("not set in context", func(t *testing.T) {
tr := TraceFromContext(context.Background())
require.Nil(t, tr)
})
t.Run("not set in context, but raw opentelemetry span is", func(t *testing.T) {
ctx := oteltrace.ContextWithSpan(context.Background(), recordingSpan{})
tr := TraceFromContext(ctx)
require.NotNil(t, tr)
})
}
type recordingSpan struct{ oteltrace.Span }
func (r recordingSpan) IsRecording() bool { return true }

View File

@ -8,7 +8,6 @@ import (
// Logger will set the TraceContext on l if ctx has one. This is an expanded
// convenience function around l.WithTrace for the common case.
//
func Logger(ctx context.Context, l log.Logger) log.Logger {
// Attach any trace (WithTrace no-ops if empty trace is provided)
return l.WithTrace(Context(ctx))

View File

@ -11,37 +11,42 @@ import (
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// Trace is a combined version of opentelemetry.Span and (optionally)
// golang.org/x/net/trace.Trace, applying its various API functions to both
// underlying trace types. Use New to construct one.
type Trace struct {
// oteltraceSpan is always set.
oteltraceSpan oteltrace.Span
// tracerName is the name of the default tracer for the Sourcegraph backend.
const tracerName = "sourcegraph/internal/trace"
// GetTracer returns the default tracer for the Sourcegraph backend.
func GetTracer() oteltrace.Tracer {
return otel.GetTracerProvider().Tracer(tracerName)
}
// New returns a new Trace with the specified name.
// Trace is a light wrapper of opentelemetry.Span. Use New to construct one.
type Trace struct {
oteltrace.Span // never nil
}
// New returns a new Trace with the specified name in the default tracer.
// For tips on naming, see the OpenTelemetry Span documentation:
// https://opentelemetry.io/docs/specs/otel/trace/api/#span
func New(ctx context.Context, name string, attrs ...attribute.KeyValue) (*Trace, context.Context) {
tr := Tracer{TracerProvider: otel.GetTracerProvider()}
return tr.New(ctx, name, attrs...)
func New(ctx context.Context, name string, attrs ...attribute.KeyValue) (Trace, context.Context) {
return NewInTracer(ctx, GetTracer(), name, attrs...)
}
// SetAttributes sets kv as attributes of the Span.
func (t *Trace) SetAttributes(attributes ...attribute.KeyValue) {
t.oteltraceSpan.SetAttributes(attributes...)
// NewInTracer is the same as New, but uses the given tracer.
func NewInTracer(ctx context.Context, tracer oteltrace.Tracer, name string, attrs ...attribute.KeyValue) (Trace, context.Context) {
ctx, span := tracer.Start(ctx, name, oteltrace.WithAttributes(attrs...))
return Trace{span}, ctx
}
// AddEvent records an event on this span with the given name and attributes.
//
// Note that it differs from the underlying (oteltrace.Span).AddEvent slightly, and only
// accepts attributes for simplicity, and for ease of adapting to nettrace.
func (t *Trace) AddEvent(name string, attributes ...attribute.KeyValue) {
t.oteltraceSpan.AddEvent(name, oteltrace.WithAttributes(attributes...))
// accepts attributes for simplicity.
func (t Trace) AddEvent(name string, attributes ...attribute.KeyValue) {
t.Span.AddEvent(name, oteltrace.WithAttributes(attributes...))
}
// SetError declares that this trace and span resulted in an error.
func (t *Trace) SetError(err error) {
func (t Trace) SetError(err error) {
if err == nil {
return
}
@ -49,32 +54,26 @@ func (t *Trace) SetError(err error) {
// Truncate the error string to avoid tracing massive error messages.
err = truncateError(err, defaultErrorRuneLimit)
t.oteltraceSpan.RecordError(err)
t.oteltraceSpan.SetStatus(codes.Error, err.Error())
t.RecordError(err)
t.SetStatus(codes.Error, err.Error())
}
// SetErrorIfNotContext calls SetError unless err is context.Canceled or
// context.DeadlineExceeded.
func (t *Trace) SetErrorIfNotContext(err error) {
func (t Trace) SetErrorIfNotContext(err error) {
if errors.IsAny(err, context.Canceled, context.DeadlineExceeded) {
err = truncateError(err, defaultErrorRuneLimit)
t.oteltraceSpan.RecordError(err)
t.RecordError(err)
return
}
t.SetError(err)
}
// Finish declares that this trace and span is complete.
// The trace should not be used after calling this method.
func (t *Trace) Finish() {
t.oteltraceSpan.End()
}
// FinishWithErr finishes the span and sets its error value.
// EndWithErr finishes the span and sets its error value.
// It takes a pointer to an error so it can be used directly
// in a defer statement.
func (t *Trace) FinishWithErr(err *error) {
func (t Trace) EndWithErr(err *error) {
t.SetError(*err)
t.Finish()
t.End()
}

View File

@ -1,31 +0,0 @@
package trace
import (
"context"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
oteltrace "go.opentelemetry.io/otel/trace"
)
// A Tracer for trace creation, parameterised over an opentelemetry.TracerProvider. Set
// TracerProvider if you don't want to use the global tracer provider, otherwise the
// global TracerProvider is used.
type Tracer struct {
TracerProvider oteltrace.TracerProvider
}
// New returns a new Trace with the specified name. Must be closed with Finish().
func (t Tracer) New(ctx context.Context, name string, attrs ...attribute.KeyValue) (*Trace, context.Context) {
if t.TracerProvider == nil {
t.TracerProvider = otel.GetTracerProvider()
}
var otelSpan oteltrace.Span
ctx, otelSpan = t.TracerProvider.
Tracer("sourcegraph/internal/trace").
Start(ctx, name, oteltrace.WithAttributes(attrs...))
trace := &Trace{oteltraceSpan: otelSpan}
return trace, contextWithTrace(ctx, trace)
}

View File

@ -320,7 +320,7 @@ func (w *Worker[T]) dequeueAndHandle() (dequeued bool, err error) {
// Register the record as running so it is included in heartbeat updates.
if !w.runningIDSet.Add(record.RecordUID(), cancel) {
workerSpan.FinishWithErr(&ErrJobAlreadyExists)
workerSpan.EndWithErr(&ErrJobAlreadyExists)
return false, ErrJobAlreadyExists
}
@ -359,7 +359,7 @@ func (w *Worker[T]) dequeueAndHandle() (dequeued bool, err error) {
w.options.Metrics.numJobs.Dec()
w.handlerSemaphore <- struct{}{}
w.wg.Done()
workerSpan.Finish()
workerSpan.End()
}()
if err := w.handle(handleCtx, workerCtxWithSpan, record); err != nil {