mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 17:11:49 +00:00
Tracing: remove opentracing (#52978)
This removes the remainder of opentracing-related code from sourcegraph/sourcegraph. We still use an opentracing propagator so that downstream services that use opentracing (cough cough Zoekt) will still work (but Zoekt is weird in a lot of ways and is kind of broken already in some cases).
This commit is contained in:
parent
c253b05259
commit
dfab9c5b4b
2
cmd/frontend/backend/BUILD.bazel
generated
2
cmd/frontend/backend/BUILD.bazel
generated
@ -64,8 +64,6 @@ go_library(
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
"@com_github_grafana_regexp//:regexp",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_opentracing_opentracing_go//log",
|
||||
"@com_github_prometheus_client_golang//prometheus",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
|
||||
@ -6,10 +6,9 @@ import (
|
||||
"net/url"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
otlog "github.com/opentracing/opentracing-go/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/sourcegraph/log"
|
||||
|
||||
@ -27,6 +26,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/inventory"
|
||||
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
|
||||
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
@ -181,8 +181,8 @@ func (s *repos) List(ctx context.Context, opt database.ReposListOptions) (repos
|
||||
ctx, done := startTrace(ctx, "Repos", "List", opt, &err)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
span.LogFields(otlog.Int("result.len", len(repos)))
|
||||
if tr := trace.TraceFromContext(ctx); tr != nil {
|
||||
tr.SetAttributes(attribute.Int("result.len", len(repos)))
|
||||
}
|
||||
}
|
||||
done()
|
||||
@ -200,8 +200,8 @@ func (s *repos) ListIndexable(ctx context.Context) (repos []types.MinimalRepo, e
|
||||
ctx, done := startTrace(ctx, "Repos", "ListIndexable", nil, &err)
|
||||
defer func() {
|
||||
if err == nil {
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
span.LogFields(otlog.Int("result.len", len(repos)))
|
||||
if tr := trace.TraceFromContext(ctx); tr != nil {
|
||||
tr.SetAttributes(attribute.Int("result.len", len(repos)))
|
||||
}
|
||||
}
|
||||
done()
|
||||
|
||||
@ -168,7 +168,7 @@ func (t *requestTracer) TraceQuery(ctx context.Context, queryString string, oper
|
||||
}
|
||||
|
||||
func (requestTracer) TraceField(ctx context.Context, _, typeName, fieldName string, _ bool, _ map[string]any) (context.Context, func(*gqlerrors.QueryError)) {
|
||||
// We don't call into t.OpenTracingTracer.TraceField since it generates too many spans which is really hard to read.
|
||||
// We don't call into t.tracer.TraceField since it generates too many spans which is really hard to read.
|
||||
start := time.Now()
|
||||
return ctx, func(err *gqlerrors.QueryError) {
|
||||
isErrStr := strconv.FormatBool(err != nil)
|
||||
|
||||
2
cmd/frontend/internal/app/errorutil/BUILD.bazel
generated
2
cmd/frontend/internal/app/errorutil/BUILD.bazel
generated
@ -11,7 +11,5 @@ go_library(
|
||||
"//internal/env",
|
||||
"//internal/trace",
|
||||
"@com_github_inconshreveable_log15//:log15",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_opentracing_opentracing_go//ext",
|
||||
],
|
||||
)
|
||||
|
||||
@ -7,8 +7,6 @@ import (
|
||||
"net/http"
|
||||
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/handlerutil"
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf"
|
||||
@ -24,9 +22,8 @@ func Handler(h func(http.ResponseWriter, *http.Request) error) http.Handler {
|
||||
Error: func(w http.ResponseWriter, req *http.Request, status int, err error) {
|
||||
if status < 200 || status >= 400 {
|
||||
var traceURL, traceID string
|
||||
if span := opentracing.SpanFromContext(req.Context()); span != nil {
|
||||
ext.Error.Set(span, true)
|
||||
span.SetTag("err", err)
|
||||
if tr := trace.TraceFromContext(req.Context()); tr != nil {
|
||||
tr.SetError(err)
|
||||
traceID = trace.ID(req.Context())
|
||||
traceURL = trace.URL(traceID, conf.DefaultClient())
|
||||
}
|
||||
|
||||
3
cmd/frontend/internal/app/ui/BUILD.bazel
generated
3
cmd/frontend/internal/app/ui/BUILD.bazel
generated
@ -63,11 +63,10 @@ go_library(
|
||||
"@com_github_grafana_regexp//:regexp",
|
||||
"@com_github_inconshreveable_log15//:log15",
|
||||
"@com_github_nytimes_gziphandler//:gziphandler",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_opentracing_opentracing_go//ext",
|
||||
"@com_github_prometheus_client_golang//prometheus",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
"@io_opentelemetry_go_otel//attribute",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -14,8 +14,7 @@ import (
|
||||
"github.com/NYTimes/gziphandler"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/inconshreveable/log15"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/frontend/envvar"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
|
||||
@ -498,10 +497,9 @@ func serveErrorNoDebug(w http.ResponseWriter, r *http.Request, db database.DB, e
|
||||
|
||||
// Determine trace URL and log the error.
|
||||
var traceURL string
|
||||
if span := opentracing.SpanFromContext(r.Context()); span != nil {
|
||||
ext.Error.Set(span, true)
|
||||
span.SetTag("err", err)
|
||||
span.SetTag("error-id", errorID)
|
||||
if tr := trace.TraceFromContext(r.Context()); tr != nil {
|
||||
tr.SetError(err)
|
||||
tr.SetAttributes(attribute.String("error-id", errorID))
|
||||
traceURL = trace.URL(trace.ID(r.Context()), conf.DefaultClient())
|
||||
}
|
||||
log15.Error("ui HTTP handler error response", "method", r.Method, "request_uri", r.URL.RequestURI(), "status_code", statusCode, "error", err, "error_id", errorID, "trace", traceURL)
|
||||
|
||||
@ -39,9 +39,9 @@ func newOperations(observationCtx *observation.Context) *operations {
|
||||
})
|
||||
}
|
||||
|
||||
// suboperations do not have their own metrics but do have their
|
||||
// own opentracing spans. This allows us to more granularly track
|
||||
// the latency for parts of a request without noising up Prometheus.
|
||||
// suboperations do not have their own metrics but do have their own spans.
|
||||
// This allows us to more granularly track the latency for parts of a
|
||||
// request without noising up Prometheus.
|
||||
subOp := func(name string) *observation.Operation {
|
||||
return observationCtx.Operation(observation.Op{
|
||||
Name: fmt.Sprintf("gitserver.api.%s", name),
|
||||
|
||||
1
cmd/searcher/internal/search/BUILD.bazel
generated
1
cmd/searcher/internal/search/BUILD.bazel
generated
@ -45,7 +45,6 @@ go_library(
|
||||
"//schema",
|
||||
"@com_github_bmatcuk_doublestar//:doublestar",
|
||||
"@com_github_grafana_regexp//:regexp",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_prometheus_client_golang//prometheus",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto",
|
||||
"@com_github_roaringbitmap_roaring//:roaring",
|
||||
|
||||
@ -13,7 +13,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
@ -192,7 +191,7 @@ func (s *Store) PrepareZipPaths(ctx context.Context, repo api.RepoName, commit a
|
||||
// TODO: consider adding a cache method that doesn't actually bother opening the file,
|
||||
// since we're just going to close it again immediately.
|
||||
cacheHit := true
|
||||
bgctx := opentracing.ContextWithSpan(context.Background(), opentracing.SpanFromContext(ctx))
|
||||
bgctx := trace.CopyContext(context.Background(), ctx)
|
||||
f, err := s.cache.Open(bgctx, []string{key}, func(ctx context.Context) (io.ReadCloser, error) {
|
||||
cacheHit = false
|
||||
return s.fetch(ctx, repo, commit, filter, paths)
|
||||
|
||||
@ -115,7 +115,7 @@ func NewUploadProcessorHandler(
|
||||
func (h *handler) Handle(ctx context.Context, logger log.Logger, upload uploadsshared.Upload) (err error) {
|
||||
var requeued bool
|
||||
|
||||
ctx, otLogger, endObservation := h.handleOp.With(ctx, &err, observation.Args{})
|
||||
ctx, tr, endObservation := h.handleOp.With(ctx, &err, observation.Args{})
|
||||
defer func() {
|
||||
endObservation(1, observation.Args{Attrs: append(
|
||||
createLogFields(upload),
|
||||
@ -123,7 +123,7 @@ func (h *handler) Handle(ctx context.Context, logger log.Logger, upload uploadss
|
||||
)})
|
||||
}()
|
||||
|
||||
requeued, err = h.HandleRawUpload(ctx, logger, upload, h.uploadStore, otLogger)
|
||||
requeued, err = h.HandleRawUpload(ctx, logger, upload, h.uploadStore, tr)
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
@ -238,7 +238,7 @@ func (s *permsStore) LoadUserPermissions(ctx context.Context, userID int32) (p [
|
||||
defer func() {
|
||||
tracingFields := []attribute.KeyValue{}
|
||||
for _, perm := range p {
|
||||
tracingFields = append(tracingFields, perm.TracingFields()...)
|
||||
tracingFields = append(tracingFields, perm.Attrs()...)
|
||||
}
|
||||
save(&err, tracingFields...)
|
||||
}()
|
||||
@ -270,7 +270,7 @@ func (s *permsStore) LoadRepoPermissions(ctx context.Context, repoID int32) (p [
|
||||
defer func() {
|
||||
tracingFields := []attribute.KeyValue{}
|
||||
for _, perm := range p {
|
||||
tracingFields = append(tracingFields, perm.TracingFields()...)
|
||||
tracingFields = append(tracingFields, perm.Attrs()...)
|
||||
}
|
||||
save(&err, tracingFields...)
|
||||
}()
|
||||
@ -371,7 +371,7 @@ func (s *permsStore) setUserRepoPermissions(ctx context.Context, p []authz.Permi
|
||||
defer func() {
|
||||
f := []attribute.KeyValue{}
|
||||
for _, permission := range p {
|
||||
f = append(f, permission.TracingFields()...)
|
||||
f = append(f, permission.Attrs()...)
|
||||
}
|
||||
save(&err, f...)
|
||||
}()
|
||||
@ -702,7 +702,7 @@ DO UPDATE SET
|
||||
|
||||
func (s *permsStore) LoadUserPendingPermissions(ctx context.Context, p *authz.UserPendingPermissions) (err error) {
|
||||
ctx, save := s.observe(ctx, "LoadUserPendingPermissions", "")
|
||||
defer func() { save(&err, p.TracingFields()...) }()
|
||||
defer func() { save(&err, p.Attrs()...) }()
|
||||
|
||||
id, ids, updatedAt, err := s.loadUserPendingPermissions(ctx, p, "")
|
||||
if err != nil {
|
||||
@ -717,7 +717,7 @@ func (s *permsStore) LoadUserPendingPermissions(ctx context.Context, p *authz.Us
|
||||
|
||||
func (s *permsStore) SetRepoPendingPermissions(ctx context.Context, accounts *extsvc.Accounts, p *authz.RepoPermissions) (err error) {
|
||||
ctx, save := s.observe(ctx, "SetRepoPendingPermissions", "")
|
||||
defer func() { save(&err, append(p.TracingFields(), accounts.TracingFields()...)...) }()
|
||||
defer func() { save(&err, append(p.Attrs(), accounts.TracingFields()...)...) }()
|
||||
|
||||
var txs *permsStore
|
||||
if s.InTransaction() {
|
||||
@ -961,7 +961,7 @@ AND object_type = %s
|
||||
|
||||
func (s *permsStore) GrantPendingPermissions(ctx context.Context, p *authz.UserGrantPermissions) (err error) {
|
||||
ctx, save := s.observe(ctx, "GrantPendingPermissions", "")
|
||||
defer func() { save(&err, p.TracingFields()...) }()
|
||||
defer func() { save(&err, p.Attrs()...) }()
|
||||
|
||||
var txs *permsStore
|
||||
if s.InTransaction() {
|
||||
|
||||
5
go.mod
5
go.mod
@ -168,8 +168,7 @@ require (
|
||||
github.com/moby/buildkit v0.11.3
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opentracing-contrib/go-stdlib v1.0.0
|
||||
github.com/opentracing/opentracing-go v1.2.0
|
||||
github.com/opentracing/opentracing-go v1.2.0 // indirect
|
||||
github.com/peterbourgon/ff v1.7.1
|
||||
github.com/peterbourgon/ff/v3 v3.3.0
|
||||
github.com/peterhellberg/link v1.1.0
|
||||
@ -226,7 +225,7 @@ require (
|
||||
go.opentelemetry.io/contrib/propagators/jaeger v1.14.0
|
||||
go.opentelemetry.io/contrib/propagators/ot v1.14.0
|
||||
go.opentelemetry.io/otel v1.13.0
|
||||
go.opentelemetry.io/otel/bridge/opentracing v1.13.0
|
||||
go.opentelemetry.io/otel/bridge/opentracing v1.13.0 // indirect
|
||||
go.opentelemetry.io/otel/exporters/jaeger v1.13.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.13.0
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.13.0
|
||||
|
||||
2
go.sum
2
go.sum
@ -1818,8 +1818,6 @@ github.com/opencontainers/selinux v1.8.0/go.mod h1:RScLhm78qiWa2gbVCcGkC7tCGdgk3
|
||||
github.com/opencontainers/selinux v1.8.2/go.mod h1:MUIHuUEvKB1wtJjQdOyYRgOnLD2xAPP8dBsCoU0KuF8=
|
||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opencontainers/selinux v1.10.1/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||
github.com/opentracing-contrib/go-stdlib v1.0.0 h1:TBS7YuVotp8myLon4Pv7BtCBzOTo1DeZCld0Z63mW2w=
|
||||
github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NHziSmy2W5GbdQZFUHADCBU=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs=
|
||||
github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc=
|
||||
|
||||
@ -97,8 +97,7 @@ const (
|
||||
SourceAPI PermsSource = "api"
|
||||
)
|
||||
|
||||
// TracingFields returns tracing fields for the opentracing log.
|
||||
func (p *Permission) TracingFields() []attribute.KeyValue {
|
||||
func (p *Permission) Attrs() []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
attribute.Int("SrcPermissions.UserID", int(p.UserID)),
|
||||
attribute.Int("SrcPermissions.RepoID", int(p.RepoID)),
|
||||
@ -120,8 +119,7 @@ type RepoPermissions struct {
|
||||
Unrestricted bool // Anyone can see the repo, overrides all other permissions
|
||||
}
|
||||
|
||||
// TracingFields returns tracing fields for the opentracing log.
|
||||
func (p *RepoPermissions) TracingFields() []attribute.KeyValue {
|
||||
func (p *RepoPermissions) Attrs() []attribute.KeyValue {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Int("RepoPermissions.RepoID", int(p.RepoID)),
|
||||
attribute.Stringer("RepoPermissions.Perm", p.Perm),
|
||||
@ -154,8 +152,7 @@ type UserGrantPermissions struct {
|
||||
AccountID string
|
||||
}
|
||||
|
||||
// TracingFields returns tracing fields for the opentracing log.
|
||||
func (p *UserGrantPermissions) TracingFields() []attribute.KeyValue {
|
||||
func (p *UserGrantPermissions) Attrs() []attribute.KeyValue {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Int("UserGrantPermissions.UserID", int(p.UserID)),
|
||||
attribute.Int("UserGrantPermissions.UserExternalAccountID", int(p.UserExternalAccountID)),
|
||||
@ -202,8 +199,7 @@ func (p *UserPendingPermissions) GenerateSortedIDsSlice() []int32 {
|
||||
return p.IDs.Sorted(collections.NaturalCompare[int32])
|
||||
}
|
||||
|
||||
// TracingFields returns tracing fields for the opentracing log.
|
||||
func (p *UserPendingPermissions) TracingFields() []attribute.KeyValue {
|
||||
func (p *UserPendingPermissions) Attrs() []attribute.KeyValue {
|
||||
attrs := []attribute.KeyValue{
|
||||
attribute.Int64("UserPendingPermissions.ID", p.ID),
|
||||
attribute.String("UserPendingPermissions.ServiceType", p.ServiceType),
|
||||
|
||||
1
internal/diskcache/BUILD.bazel
generated
1
internal/diskcache/BUILD.bazel
generated
@ -16,7 +16,6 @@ go_library(
|
||||
"//internal/observation",
|
||||
"//internal/trace",
|
||||
"//lib/errors",
|
||||
"@com_github_opentracing_opentracing_go//ext",
|
||||
"@io_opentelemetry_go_otel//attribute",
|
||||
],
|
||||
)
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
@ -130,7 +129,7 @@ func (s *store) Open(ctx context.Context, key []string, fetcher Fetcher) (file *
|
||||
|
||||
func (s *store) OpenWithPath(ctx context.Context, key []string, fetcher FetcherWithPath) (file *File, err error) {
|
||||
ctx, trace, endObservation := s.observe.cachedFetch.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
|
||||
attribute.String(string(ext.Component), s.component),
|
||||
attribute.String("component", s.component),
|
||||
}})
|
||||
defer endObservation(1, observation.Args{})
|
||||
|
||||
|
||||
1
internal/gitserver/BUILD.bazel
generated
1
internal/gitserver/BUILD.bazel
generated
@ -43,7 +43,6 @@ go_library(
|
||||
"//lib/errors",
|
||||
"@com_github_go_git_go_git_v5//plumbing/format/config",
|
||||
"@com_github_golang_groupcache//lru",
|
||||
"@com_github_opentracing_opentracing_go//log",
|
||||
"@com_github_prometheus_client_golang//prometheus",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto",
|
||||
"@com_github_sourcegraph_conc//pool",
|
||||
|
||||
@ -21,7 +21,6 @@ import (
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing/format/config"
|
||||
"github.com/golang/groupcache/lru"
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
@ -275,22 +274,6 @@ func stableTimeRepr(t time.Time) string {
|
||||
return string(s)
|
||||
}
|
||||
|
||||
func (opts *CommitGraphOptions) LogFields() []log.Field {
|
||||
var since string
|
||||
if opts.Since != nil {
|
||||
since = stableTimeRepr(*opts.Since)
|
||||
} else {
|
||||
since = stableTimeRepr(time.Unix(0, 0))
|
||||
}
|
||||
|
||||
return []log.Field{
|
||||
log.String("commit", opts.Commit),
|
||||
log.Int("limit", opts.Limit),
|
||||
log.Bool("allrefs", opts.AllRefs),
|
||||
log.String("since", since),
|
||||
}
|
||||
}
|
||||
|
||||
// CommitGraph returns the commit graph for the given repository as a mapping
|
||||
// from a commit to its parents. If a commit is supplied, the returned graph will
|
||||
// be rooted at the given commit. If a non-zero limit is supplied, at most that
|
||||
|
||||
@ -58,9 +58,9 @@ func newOperations(observationCtx *observation.Context) *operations {
|
||||
})
|
||||
}
|
||||
|
||||
// suboperations do not have their own metrics but do have their
|
||||
// own opentracing spans. This allows us to more granularly track
|
||||
// the latency for parts of a request without noising up Prometheus.
|
||||
// suboperations do not have their own metrics but do have their own spans.
|
||||
// This allows us to more granularly track the latency for parts of a
|
||||
// request without noising up Prometheus.
|
||||
subOp := func(name string) *observation.Operation {
|
||||
return observationCtx.Operation(observation.Op{
|
||||
Name: fmt.Sprintf("gitserver.client.%s", name),
|
||||
|
||||
1
internal/gitserver/protocol/BUILD.bazel
generated
1
internal/gitserver/protocol/BUILD.bazel
generated
@ -18,7 +18,6 @@ go_library(
|
||||
"//internal/gitserver/v1:gitserver",
|
||||
"//internal/search/result",
|
||||
"//lib/errors",
|
||||
"@com_github_opentracing_opentracing_go//log",
|
||||
"@io_opentelemetry_go_otel//attribute",
|
||||
"@org_golang_google_protobuf//types/known/durationpb",
|
||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||
|
||||
@ -5,7 +5,6 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/opentracing/opentracing-go/log"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"google.golang.org/protobuf/types/known/durationpb"
|
||||
"google.golang.org/protobuf/types/known/timestamppb"
|
||||
@ -299,13 +298,6 @@ func (bl *BatchLogRequest) FromProto(p *proto.BatchLogRequest) {
|
||||
bl.Format = p.GetFormat()
|
||||
}
|
||||
|
||||
func (req BatchLogRequest) LogFields() []log.Field {
|
||||
return []log.Field{
|
||||
log.Int("numRepoCommits", len(req.RepoCommits)),
|
||||
log.String("format", req.Format),
|
||||
}
|
||||
}
|
||||
|
||||
func (req BatchLogRequest) SpanAttributes() []attribute.KeyValue {
|
||||
return []attribute.KeyValue{
|
||||
attribute.Int("numRepoCommits", len(req.RepoCommits)),
|
||||
|
||||
@ -53,7 +53,7 @@ func DialOptions(logger log.Logger) []grpc.DialOption {
|
||||
propagator.StreamClientPropagator(actor.ActorPropagator{}),
|
||||
propagator.StreamClientPropagator(policy.ShouldTracePropagator{}),
|
||||
propagator.StreamClientPropagator(requestclient.Propagator{}),
|
||||
otelStreamInterceptor,
|
||||
otelgrpc.StreamClientInterceptor(),
|
||||
internalerrs.PrometheusStreamClientInterceptor,
|
||||
internalerrs.LoggingStreamClientInterceptor(logger),
|
||||
),
|
||||
@ -62,19 +62,13 @@ func DialOptions(logger log.Logger) []grpc.DialOption {
|
||||
propagator.UnaryClientPropagator(actor.ActorPropagator{}),
|
||||
propagator.UnaryClientPropagator(policy.ShouldTracePropagator{}),
|
||||
propagator.UnaryClientPropagator(requestclient.Propagator{}),
|
||||
otelUnaryInterceptor,
|
||||
otelgrpc.UnaryClientInterceptor(),
|
||||
internalerrs.PrometheusUnaryClientInterceptor,
|
||||
internalerrs.LoggingUnaryClientInterceptor(logger),
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
// Package-level variables because they are somewhat expensive to recreate every time
|
||||
otelStreamInterceptor = otelgrpc.StreamClientInterceptor()
|
||||
otelUnaryInterceptor = otelgrpc.UnaryClientInterceptor()
|
||||
)
|
||||
|
||||
// NewServer creates a new *grpc.Server with the default options
|
||||
func NewServer(logger log.Logger, additionalOpts ...grpc.ServerOption) *grpc.Server {
|
||||
s := grpc.NewServer(append(ServerOptions(logger), additionalOpts...)...)
|
||||
|
||||
3
internal/httpcli/BUILD.bazel
generated
3
internal/httpcli/BUILD.bazel
generated
@ -28,9 +28,6 @@ go_library(
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
"@com_github_gregjones_httpcache//:httpcache",
|
||||
"@com_github_opentracing_contrib_go_stdlib//nethttp",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_opentracing_opentracing_go//log",
|
||||
"@com_github_prometheus_client_golang//prometheus",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto",
|
||||
"@com_github_puerkitobio_rehttp//:rehttp",
|
||||
|
||||
@ -17,12 +17,10 @@ import (
|
||||
|
||||
"github.com/PuerkitoBio/rehttp"
|
||||
"github.com/gregjones/httpcache"
|
||||
"github.com/opentracing-contrib/go-stdlib/nethttp"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
otlog "github.com/opentracing/opentracing-go/log"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sourcegraph/log"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/actor"
|
||||
"github.com/sourcegraph/sourcegraph/internal/env"
|
||||
@ -461,11 +459,6 @@ func TracedTransportOpt(cli *http.Client) error {
|
||||
// Propagate trace policy
|
||||
cli.Transport = &policy.Transport{RoundTripper: cli.Transport}
|
||||
|
||||
// Keep the legacy nethttp transport for now that was used before - otelhttp
|
||||
// should propagate traces in the same way, but we keep this just in case.
|
||||
// This used to be in policy.Transport, but is clearer here.
|
||||
cli.Transport = &nethttp.Transport{RoundTripper: cli.Transport}
|
||||
|
||||
// Collect and propagate OpenTelemetry trace (among other formats initialized
|
||||
// in internal/tracer)
|
||||
cli.Transport = instrumentation.NewHTTPTransport(cli.Transport)
|
||||
@ -537,20 +530,19 @@ func NewRetryPolicy(max int, retryAfterMaxSleepDuration time.Duration) rehttp.Re
|
||||
defer func() {
|
||||
// Avoid trace log spam if we haven't invoked the retry policy.
|
||||
shouldTraceLog := retry || a.Index > 0
|
||||
if span := opentracing.SpanFromContext(a.Request.Context()); span != nil && shouldTraceLog {
|
||||
fields := []otlog.Field{
|
||||
otlog.Event("request-retry-decision"),
|
||||
otlog.Bool("retry", retry),
|
||||
otlog.Int("attempt", a.Index),
|
||||
otlog.String("method", a.Request.Method),
|
||||
otlog.String("url", a.Request.URL.String()),
|
||||
otlog.Int("status", status),
|
||||
otlog.String("retry-after", retryAfterHeader),
|
||||
if tr := trace.TraceFromContext(a.Request.Context()); tr != nil && shouldTraceLog {
|
||||
fields := []attribute.KeyValue{
|
||||
attribute.Bool("retry", retry),
|
||||
attribute.Int("attempt", a.Index),
|
||||
attribute.String("method", a.Request.Method),
|
||||
attribute.Stringer("url", a.Request.URL),
|
||||
attribute.Int("status", status),
|
||||
attribute.String("retry-after", retryAfterHeader),
|
||||
}
|
||||
if a.Error != nil {
|
||||
fields = append(fields, otlog.Error(a.Error))
|
||||
fields = append(fields, trace.Error(a.Error))
|
||||
}
|
||||
span.LogFields(fields...)
|
||||
tr.AddEvent("request-retry-decision", fields...)
|
||||
}
|
||||
|
||||
// Update request context with latest retry for logging middleware
|
||||
|
||||
@ -83,7 +83,7 @@ type Operation struct {
|
||||
}
|
||||
|
||||
// TraceLogger is returned from With and can be used to add timestamped key and
|
||||
// value pairs into a related opentracing span. It has an embedded Logger that can be used
|
||||
// value pairs into a related span. It has an embedded Logger that can be used
|
||||
// directly to log messages in the context of a trace.
|
||||
type TraceLogger interface {
|
||||
// AddEvent logs an event with name and fields on the trace.
|
||||
|
||||
@ -99,7 +99,7 @@ type store struct {
|
||||
Logger log.Logger
|
||||
// Metrics are sent to Prometheus by default.
|
||||
Metrics StoreMetrics
|
||||
// Used for tracing calls to store methods. Uses opentracing.GlobalTracer() by default.
|
||||
// Used for tracing calls to store methods. Uses otel.GetTracerProvider() by default.
|
||||
Tracer trace.Tracer
|
||||
|
||||
txtrace *trace.Trace
|
||||
|
||||
2
internal/search/BUILD.bazel
generated
2
internal/search/BUILD.bazel
generated
@ -27,13 +27,11 @@ go_library(
|
||||
"//internal/search/query",
|
||||
"//internal/search/result",
|
||||
"//internal/trace",
|
||||
"//internal/trace/ot",
|
||||
"//internal/trace/policy",
|
||||
"//internal/types",
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
"@com_github_grafana_regexp//:regexp",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
"@com_github_sourcegraph_zoekt//:zoekt",
|
||||
"@com_github_sourcegraph_zoekt//query",
|
||||
|
||||
3
internal/search/backend/BUILD.bazel
generated
3
internal/search/backend/BUILD.bazel
generated
@ -23,8 +23,6 @@ go_library(
|
||||
"//internal/honey",
|
||||
"//internal/httpcli",
|
||||
"//internal/trace",
|
||||
"//internal/trace/ot",
|
||||
"//internal/trace/policy",
|
||||
"//internal/types",
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
@ -32,7 +30,6 @@ go_library(
|
||||
"@com_github_inconshreveable_log15//:log15",
|
||||
"@com_github_keegancsmith_rpc//:rpc",
|
||||
"@com_github_mitchellh_hashstructure//:hashstructure",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_prometheus_client_golang//prometheus",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
|
||||
@ -6,7 +6,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/keegancsmith/rpc"
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
sglog "github.com/sourcegraph/log"
|
||||
@ -17,8 +16,6 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/actor"
|
||||
"github.com/sourcegraph/sourcegraph/internal/honey"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/policy"
|
||||
)
|
||||
|
||||
var requestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
|
||||
@ -103,20 +100,6 @@ func (m *meteredSearcher) StreamSearch(ctx context.Context, q query.Q, opts *zoe
|
||||
event.AddField("query.size", len(b))
|
||||
}
|
||||
|
||||
if isLeaf && opts != nil && policy.ShouldTrace(ctx) {
|
||||
// Replace any existing spanContext with a new one, given we've done additional tracing
|
||||
spanContext := make(map[string]string)
|
||||
if span := opentracing.SpanFromContext(ctx); span == nil {
|
||||
m.log.Warn("ctx does not have a trace span associated with it")
|
||||
} else if err := ot.GetTracer(ctx).Inject(span.Context(), opentracing.TextMap, opentracing.TextMapCarrier(spanContext)); err == nil { //nolint:staticcheck // Drop once we get rid of OpenTracing
|
||||
newOpts := *opts
|
||||
newOpts.SpanContext = spanContext
|
||||
opts = &newOpts
|
||||
} else {
|
||||
m.log.Warn("error injecting new span context into map", sglog.Error(err))
|
||||
}
|
||||
}
|
||||
|
||||
// Instrument the RPC layer
|
||||
var writeRequestStart, writeRequestDone time.Time
|
||||
if isLeaf {
|
||||
|
||||
@ -8,7 +8,6 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/opentracing/opentracing-go" //nolint:staticcheck // Drop once we get rid of OpenTracing
|
||||
"github.com/sourcegraph/log"
|
||||
"github.com/sourcegraph/zoekt"
|
||||
zoektquery "github.com/sourcegraph/zoekt/query"
|
||||
@ -16,7 +15,6 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf"
|
||||
"github.com/sourcegraph/sourcegraph/internal/search/result"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/ot" //nolint:staticcheck // Drop once we get rid of OpenTracing
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/policy"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/api"
|
||||
@ -188,11 +186,9 @@ type ZoektParameters struct {
|
||||
|
||||
// ToSearchOptions converts the parameters to options for the Zoekt search API.
|
||||
func (o *ZoektParameters) ToSearchOptions(ctx context.Context, logger log.Logger) *zoekt.SearchOptions {
|
||||
shouldTrace, spanContext := getSpanContext(ctx, logger)
|
||||
defaultTimeout := 20 * time.Second
|
||||
searchOpts := &zoekt.SearchOptions{
|
||||
Trace: shouldTrace,
|
||||
SpanContext: spanContext,
|
||||
Trace: policy.ShouldTrace(ctx),
|
||||
MaxWallTime: defaultTimeout,
|
||||
ChunkMatches: true,
|
||||
UseKeywordScoring: o.KeywordScoring,
|
||||
@ -239,21 +235,6 @@ func (o *ZoektParameters) ToSearchOptions(ctx context.Context, logger log.Logger
|
||||
return searchOpts
|
||||
}
|
||||
|
||||
func getSpanContext(ctx context.Context, logger log.Logger) (shouldTrace bool, spanContext map[string]string) {
|
||||
if !policy.ShouldTrace(ctx) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
spanContext = make(map[string]string)
|
||||
if span := opentracing.SpanFromContext(ctx); span != nil {
|
||||
if err := ot.GetTracer(ctx).Inject(span.Context(), opentracing.TextMap, opentracing.TextMapCarrier(spanContext)); err != nil { //nolint:staticcheck // Drop once we get rid of OpenTracing
|
||||
logger.Warn("Error injecting span context into map", log.Error(err))
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return true, spanContext
|
||||
}
|
||||
|
||||
// SearcherParameters the inputs for a search fulfilled by the Searcher service
|
||||
// (cmd/searcher). Searcher fulfills (1) unindexed literal and regexp searches
|
||||
// and (2) structural search requests.
|
||||
|
||||
@ -50,7 +50,6 @@ func ID(ctx context.Context) string {
|
||||
// Context retrieves the full trace context, if any, from context - this includes
|
||||
// both TraceID and SpanID.
|
||||
func Context(ctx context.Context) log.TraceContext {
|
||||
// get the OpenTelemetry span, which is always present via the OpenTracing bridge
|
||||
if otelSpan := oteltrace.SpanContextFromContext(ctx); otelSpan.IsValid() {
|
||||
return log.TraceContext{
|
||||
TraceID: otelSpan.TraceID().String(),
|
||||
|
||||
12
internal/trace/ot/BUILD.bazel
generated
12
internal/trace/ot/BUILD.bazel
generated
@ -1,12 +0,0 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "ot",
|
||||
srcs = ["ot.go"],
|
||||
importpath = "github.com/sourcegraph/sourcegraph/internal/trace/ot",
|
||||
visibility = ["//:__subpackages__"],
|
||||
deps = [
|
||||
"//internal/trace/policy",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
],
|
||||
)
|
||||
@ -1,34 +0,0 @@
|
||||
// Package ot wraps github.com/opentracing/opentracing-go and
|
||||
// github.com./opentracing-contrib/go-stdlib with selective tracing behavior that is toggled on and
|
||||
// off with the presence of a context item (uses context.Context). This context item is propagated
|
||||
// across API boundaries through a HTTP header (X-Sourcegraph-Should-Trace).
|
||||
package ot
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/policy"
|
||||
)
|
||||
|
||||
// Deprecated: Use otel.Tracer(...) from go.opentelemetry.io/otel instead.
|
||||
//
|
||||
// GetTracer returns the tracer to use for the given context. If ShouldTrace returns true, it
|
||||
// returns the global tracer. Otherwise, it returns the NoopTracer.
|
||||
func GetTracer(ctx context.Context) opentracing.Tracer {
|
||||
return getTracer(ctx, opentracing.GlobalTracer())
|
||||
}
|
||||
|
||||
// getTracer is like GetTracer, but accepts a tracer as an argument. If ShouldTrace returns false,
|
||||
// it returns the NoopTracer. If it returns true and the passed-in tracer is not nil, it returns the
|
||||
// passed-in tracer. Otherwise, it returns the global tracer.
|
||||
func getTracer(ctx context.Context, tracer opentracing.Tracer) opentracing.Tracer {
|
||||
if !policy.ShouldTrace(ctx) {
|
||||
return opentracing.NoopTracer{}
|
||||
}
|
||||
if tracer == nil {
|
||||
return opentracing.GlobalTracer()
|
||||
}
|
||||
return tracer
|
||||
}
|
||||
3
internal/tracer/BUILD.bazel
generated
3
internal/tracer/BUILD.bazel
generated
@ -6,7 +6,6 @@ go_library(
|
||||
srcs = [
|
||||
"conf.go",
|
||||
"doc.go",
|
||||
"logged_ot.go",
|
||||
"logged_otel.go",
|
||||
"otel.go",
|
||||
"otel_should_trace.go",
|
||||
@ -26,11 +25,9 @@ go_library(
|
||||
"//internal/version",
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
"@com_github_opentracing_opentracing_go//:opentracing-go",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
"@io_opentelemetry_go_otel//:otel",
|
||||
"@io_opentelemetry_go_otel//semconv/v1.4.0:v1_4_0",
|
||||
"@io_opentelemetry_go_otel_bridge_opentracing//:opentracing",
|
||||
"@io_opentelemetry_go_otel_sdk//resource",
|
||||
"@io_opentelemetry_go_otel_sdk//trace",
|
||||
"@io_opentelemetry_go_otel_sdk//trace/tracetest",
|
||||
|
||||
@ -1,48 +0,0 @@
|
||||
package tracer
|
||||
|
||||
import (
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/sourcegraph/log"
|
||||
)
|
||||
|
||||
// loggedOTTracer wraps opentracing.Tracer.
|
||||
type loggedOTTracer struct {
|
||||
tracer opentracing.Tracer
|
||||
|
||||
debug *atomic.Bool
|
||||
logger log.Logger
|
||||
}
|
||||
|
||||
var _ opentracing.Tracer = &loggedOTTracer{}
|
||||
|
||||
func newLoggedOTTracer(logger log.Logger, tracer opentracing.Tracer, debug *atomic.Bool) *loggedOTTracer {
|
||||
return &loggedOTTracer{
|
||||
tracer: tracer,
|
||||
logger: logger.AddCallerSkip(1),
|
||||
debug: debug,
|
||||
}
|
||||
}
|
||||
|
||||
func (t *loggedOTTracer) StartSpan(operationName string, opts ...opentracing.StartSpanOption) opentracing.Span {
|
||||
if t.debug.Load() {
|
||||
t.logger.Info("StartSpan",
|
||||
log.String("operationName", operationName))
|
||||
}
|
||||
return t.tracer.StartSpan(operationName, opts...)
|
||||
}
|
||||
|
||||
func (t *loggedOTTracer) Inject(sm opentracing.SpanContext, format any, carrier any) error {
|
||||
if t.debug.Load() {
|
||||
t.logger.Info("Inject")
|
||||
}
|
||||
return t.tracer.Inject(sm, format, carrier)
|
||||
}
|
||||
|
||||
func (t *loggedOTTracer) Extract(format any, carrier any) (opentracing.SpanContext, error) {
|
||||
if t.debug.Load() {
|
||||
t.logger.Info("Extract")
|
||||
}
|
||||
return t.tracer.Extract(format, carrier)
|
||||
}
|
||||
@ -5,10 +5,8 @@ import (
|
||||
"sync/atomic"
|
||||
"text/template"
|
||||
|
||||
"github.com/opentracing/opentracing-go"
|
||||
"github.com/sourcegraph/log"
|
||||
"go.opentelemetry.io/otel"
|
||||
otelbridge "go.opentelemetry.io/otel/bridge/opentracing"
|
||||
oteltracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||
oteltrace "go.opentelemetry.io/otel/trace"
|
||||
"go.uber.org/automaxprocs/maxprocs"
|
||||
@ -102,8 +100,7 @@ func Init(logger log.Logger, c WatchableConfigurationSource) {
|
||||
|
||||
// Create and set up global tracers from provider. We will be making updates to these
|
||||
// tracers through the debugMode ref and underlying provider.
|
||||
otTracer, otelTracerProvider := newBridgeTracers(logger, provider, debugMode)
|
||||
opentracing.SetGlobalTracer(otTracer)
|
||||
otelTracerProvider := newTracer(logger, provider, debugMode)
|
||||
otel.SetTracerProvider(otelTracerProvider)
|
||||
|
||||
// Initially everything is disabled since we haven't read conf yet - start a goroutine
|
||||
@ -123,26 +120,10 @@ func Init(logger log.Logger, c WatchableConfigurationSource) {
|
||||
})
|
||||
}
|
||||
|
||||
// newBridgeTracers creates an opentracing.Tracer that exports all OpenTracing traces,
|
||||
// allowing us to continue leveraging the OpenTracing API (which is a predecessor to
|
||||
// OpenTelemetry tracing) without making changes to existing tracing code. The returned
|
||||
// opentracing.Tracer and oteltrace.TracerProvider should be set as global defaults for
|
||||
// their respective libraries.
|
||||
//
|
||||
// All configuration should be sourced directly from the environment using the specification
|
||||
// laid out in https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md
|
||||
func newBridgeTracers(logger log.Logger, provider *oteltracesdk.TracerProvider, debugMode *atomic.Bool) (opentracing.Tracer, oteltrace.TracerProvider) {
|
||||
func newTracer(logger log.Logger, provider *oteltracesdk.TracerProvider, debugMode *atomic.Bool) oteltrace.TracerProvider {
|
||||
propagator := oteldefaults.Propagator()
|
||||
otel.SetTextMapPropagator(propagator)
|
||||
|
||||
// Set up otBridgeTracer for converting OpenTracing API calls to OpenTelemetry, and
|
||||
// otelTracerProvider for the inverse.
|
||||
otBridgeTracer := otelbridge.NewBridgeTracer()
|
||||
otBridgeTracer.SetTextMapPropagator(propagator)
|
||||
otelTracerProvider := otelbridge.NewTracerProvider(otBridgeTracer, provider)
|
||||
otBridgeTracer.SetOpenTelemetryTracer(
|
||||
otelTracerProvider.Tracer("sourcegraph/internal/tracer.opentracing-bridge"))
|
||||
|
||||
// Set up logging
|
||||
otelLogger := logger.AddCallerSkip(2).Scoped("otel", "OpenTelemetry library")
|
||||
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
|
||||
@ -152,16 +133,6 @@ func newBridgeTracers(logger log.Logger, provider *oteltracesdk.TracerProvider,
|
||||
otelLogger.Debug("error encountered", log.Error(err))
|
||||
}
|
||||
}))
|
||||
bridgeLogger := logger.AddCallerSkip(2).Scoped("ot.bridge", "OpenTracing to OpenTelemetry compatibility layer")
|
||||
otBridgeTracer.SetWarningHandler(func(msg string) {
|
||||
if debugMode.Load() {
|
||||
bridgeLogger.Warn(msg)
|
||||
} else {
|
||||
bridgeLogger.Debug(msg)
|
||||
}
|
||||
})
|
||||
|
||||
// Wrap each tracer in additional logging
|
||||
return newLoggedOTTracer(logger, otBridgeTracer, debugMode),
|
||||
newLoggedOtelTracerProvider(logger, otelTracerProvider, debugMode)
|
||||
return newLoggedOtelTracerProvider(logger, provider, debugMode)
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ func TestConfigWatcher(t *testing.T) {
|
||||
noopProcessor = oteltracesdk.NewBatchSpanProcessor(tracetest.NewNoopExporter())
|
||||
)
|
||||
|
||||
otTracer, otelTracerProvider := newBridgeTracers(logger, provider, debugMode)
|
||||
otelTracerProvider := newTracer(logger, provider, debugMode)
|
||||
// otelTracer represents a tracer a caller might hold. All tracers should be updated
|
||||
// by updating the underlying provider.
|
||||
otelTracer := otelTracerProvider.Tracer(t.Name())
|
||||
@ -101,12 +101,6 @@ func TestConfigWatcher(t *testing.T) {
|
||||
|
||||
// span recorder must be registered, and spans from both tracers must go to it
|
||||
var spanCount int
|
||||
t.Run("ot bridge spans go to new processor", func(t *testing.T) {
|
||||
span := otTracer.StartSpan("foo")
|
||||
span.Finish()
|
||||
spanCount++
|
||||
assert.Len(t, spansRecorder.Ended(), spanCount)
|
||||
})
|
||||
t.Run("otel tracer spans go to new processor", func(t *testing.T) {
|
||||
_, span := otelTracer.Start(policy.WithShouldTrace(ctx, true), "foo")
|
||||
span.End()
|
||||
@ -133,11 +127,6 @@ func TestConfigWatcher(t *testing.T) {
|
||||
doUpdate()
|
||||
|
||||
// no new spans should register
|
||||
t.Run("ot bridge spans not go to processor", func(t *testing.T) {
|
||||
span := otTracer.StartSpan("foo")
|
||||
span.Finish()
|
||||
assert.Len(t, spansRecorder.Ended(), spanCount)
|
||||
})
|
||||
t.Run("otel tracer spans not go to processor", func(t *testing.T) {
|
||||
_, span := otelTracer.Start(policy.WithShouldTrace(ctx, true), "foo")
|
||||
span.End()
|
||||
@ -180,12 +169,6 @@ func TestConfigWatcher(t *testing.T) {
|
||||
|
||||
// span recorder must be registered, and spans from both tracers must go to it
|
||||
var spanCount1 int
|
||||
{
|
||||
span := otTracer.StartSpan("foo")
|
||||
span.Finish()
|
||||
spanCount1++
|
||||
assert.Len(t, spansRecorder1.Ended(), spanCount1)
|
||||
}
|
||||
{
|
||||
_, span := otelTracer.Start(ctx, "foo") // does not need ShouldTrace due to policy
|
||||
span.End()
|
||||
@ -194,7 +177,6 @@ func TestConfigWatcher(t *testing.T) {
|
||||
}
|
||||
|
||||
// should have debug set
|
||||
assert.True(t, otTracer.(*loggedOTTracer).debug.Load())
|
||||
assert.True(t, otelTracerProvider.(*loggedOtelTracerProvider).debug.Load())
|
||||
|
||||
// should set global policy
|
||||
@ -217,12 +199,6 @@ func TestConfigWatcher(t *testing.T) {
|
||||
|
||||
// span recorder must be registered, and spans from both tracers must go to it
|
||||
var spanCount2 int
|
||||
{
|
||||
span := otTracer.StartSpan("foo")
|
||||
span.Finish()
|
||||
spanCount2++
|
||||
assert.Len(t, spansRecorder2.Ended(), spanCount2)
|
||||
}
|
||||
{
|
||||
_, span := otelTracer.Start(ctx, "foo")
|
||||
span.End()
|
||||
|
||||
Loading…
Reference in New Issue
Block a user