feat(sg): sqlite-backed local store for sg analytics (#63578)

Removes existing `sg analytics` command and replaces it with a
one-per-invocation sqlite backed approach. This is a local storage for
invocation events before theyre pushed to bigquery

## Test plan

```
sqlite> select * from analytics;
0190792e-af38-751a-b93e-8481290a18b6|1|{"args":[],"command":"sg help","flags":{"help":null,"sg":null},"nargs":0,"end_time":"2024-07-03T15:20:21.069837706Z","success":true}
0190792f-4e2b-7c35-98d6-ad73cab82391|1|{"args":["dotcom"],"command":"sg live","flags":{"live":null,"sg":null},"nargs":1,"end_time":"2024-07-03T15:21:04.563232429Z","success":true}
```

## Changelog

<!-- OPTIONAL; info at
https://www.notion.so/sourcegraph/Writing-a-changelog-entry-dd997f411d524caabf0d8d38a24a878c
-->

---------

Co-authored-by: William Bezuidenhout <william.bezuidenhout@sourcegraph.com>
This commit is contained in:
Noah S-C 2024-07-09 11:47:49 +01:00 committed by GitHub
parent d9dff1191a
commit e669330215
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
23 changed files with 783 additions and 758 deletions

View File

@ -2708,8 +2708,8 @@ def go_dependencies():
name = "com_github_google_pprof",
build_file_proto_mode = "disable_global",
importpath = "github.com/google/pprof",
sum = "h1:PxlBVtIFHR/mtWk2i0gTEdCz+jBnqiuHNSki0epDbVs=",
version = "v0.0.0-20231205033806-a5a03c77bf08",
sum = "h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=",
version = "v0.0.0-20240409012703-83162a5b38cd",
)
go_repository(
name = "com_github_google_renameio",
@ -3339,8 +3339,8 @@ def go_dependencies():
name = "com_github_ianlancetaylor_demangle",
build_file_proto_mode = "disable_global",
importpath = "github.com/ianlancetaylor/demangle",
sum = "h1:BA4a7pe6ZTd9F8kXETBoijjFJ/ntaa//1wiH9BZu4zU=",
version = "v0.0.0-20230524184225-eabc099b10ab",
sum = "h1:KwWnWVWCNtNq/ewIX7HIKnELmEx2nDP42yskD/pi7QE=",
version = "v0.0.0-20240312041847-bd984b5ce465",
)
go_repository(
name = "com_github_imdario_mergo",
@ -3864,8 +3864,8 @@ def go_dependencies():
name = "com_github_klauspost_cpuid_v2",
build_file_proto_mode = "disable_global",
importpath = "github.com/klauspost/cpuid/v2",
sum = "h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=",
version = "v2.2.5",
sum = "h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=",
version = "v2.2.7",
)
go_repository(
name = "com_github_klauspost_pgzip",
@ -8559,15 +8559,43 @@ def go_dependencies():
name = "org_modernc_cc_v3",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/cc/v3",
sum = "h1:P3g79IUS/93SYhtoeaHW+kRCIrYaxJ27MFPv+7kaTOw=",
version = "v3.40.0",
sum = "h1:QoR1Sn3YWlmA1T4vLaKZfawdVtSiGx8H+cEojbC7v1Q=",
version = "v3.41.0",
)
go_repository(
name = "org_modernc_cc_v4",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/cc/v4",
sum = "h1:dycHFB/jDc3IyacKipCNSDrjIC0Lm1hyoWOZTRR20Lk=",
version = "v4.21.2",
)
go_repository(
name = "org_modernc_ccgo_v3",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/ccgo/v3",
sum = "h1:Mkgdzl46i5F/CNR/Kj80Ri59hC8TKAhZrYSaqvkwzUw=",
version = "v3.16.13",
sum = "h1:o3OmOqx4/OFnl4Vm3G8Bgmqxnvxnh0nbxeT5p/dWChA=",
version = "v3.17.0",
)
go_repository(
name = "org_modernc_ccgo_v4",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/ccgo/v4",
sum = "h1:6wrtRozgrhCxieCeJh85QsxkX/2FFrT9hdaWPlbn4Zo=",
version = "v4.17.10",
)
go_repository(
name = "org_modernc_fileutil",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/fileutil",
sum = "h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=",
version = "v1.3.0",
)
go_repository(
name = "org_modernc_gc_v2",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/gc/v2",
sum = "h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=",
version = "v2.4.1",
)
go_repository(
name = "org_modernc_gc_v3",
@ -8580,8 +8608,8 @@ def go_dependencies():
name = "org_modernc_libc",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/libc",
sum = "h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk=",
version = "v1.41.0",
sum = "h1:uau0VoiT5hnR+SpoWekCKbLqm7v6dhRL3hI+NQhgN3M=",
version = "v1.52.1",
)
go_repository(
name = "org_modernc_mathutil",
@ -8594,8 +8622,8 @@ def go_dependencies():
name = "org_modernc_memory",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/memory",
sum = "h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=",
version = "v1.7.2",
sum = "h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=",
version = "v1.8.0",
)
go_repository(
name = "org_modernc_opt",
@ -8604,12 +8632,19 @@ def go_dependencies():
sum = "h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=",
version = "v0.1.3",
)
go_repository(
name = "org_modernc_sortutil",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/sortutil",
sum = "h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=",
version = "v1.2.0",
)
go_repository(
name = "org_modernc_sqlite",
build_file_proto_mode = "disable_global",
importpath = "modernc.org/sqlite",
sum = "h1:0lOXGrycJPptfHDuohfYgNqoe4hu+gYuN/pKgY5XjS4=",
version = "v1.29.6",
sum = "h1:YFhPVfu2iIgUf9kuA1CR7iiHdcEEsI2i+yjRYHscyxk=",
version = "v1.30.1",
)
go_repository(
name = "org_modernc_strutil",

View File

@ -10,7 +10,6 @@ go_library(
"live.go",
"main.go",
"os.go",
"sg_analytics.go",
"sg_audit.go",
"sg_backport.go",
"sg_bazel.go",
@ -84,6 +83,7 @@ go_library(
"//dev/sg/sams",
"//dev/team",
"//internal/accesstoken",
"//internal/collections",
"//internal/database",
"//internal/database/basestore",
"//internal/database/connections/live",
@ -123,8 +123,6 @@ go_library(
"@com_github_sourcegraph_run//:run",
"@com_github_urfave_cli_v2//:cli",
"@in_gopkg_yaml_v3//:yaml_v3",
"@io_opentelemetry_go_otel//attribute",
"@io_opentelemetry_go_otel_trace//:trace",
"@org_golang_x_exp//maps",
"@org_golang_x_mod//semver",
"@org_golang_x_oauth2//:oauth2",

View File

@ -6,8 +6,6 @@ import (
"strings"
"github.com/urfave/cli/v2"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
@ -36,30 +34,42 @@ func addAnalyticsHooks(commandPath []string, commands []*cli.Command) {
// Wrap action with analytics
wrappedAction := command.Action
command.Action = func(cmd *cli.Context) (actionErr error) {
var span *analytics.Span
cmd.Context, span = analytics.StartSpan(cmd.Context, fullCommand, "action",
trace.WithAttributes(
attribute.StringSlice("flags", cmd.FlagNames()),
attribute.Int("args", cmd.NArg()),
))
defer span.End()
cmdFlags := make(map[string][]string)
for _, parent := range cmd.Lineage() {
if parent.Command == nil {
continue
}
cmdFlags[parent.Command.Name] = parent.LocalFlagNames()
}
cmdCtx, err := analytics.NewInvocation(cmd.Context, cmd.App.Version, map[string]any{
"command": fullCommand,
"flags": cmdFlags,
"args": cmd.Args().Slice(),
"nargs": cmd.NArg(),
})
if err != nil {
std.Out.WriteWarningf("Failed to create analytics event: %s", err)
return
}
cmd.Context = cmdCtx
// Make sure analytics are persisted before exit (interrupts or panics)
defer func() {
if p := recover(); p != nil {
// Render a more elegant message
std.Out.WriteWarningf("Encountered panic - please open an issue with the command output:\n\t%s",
sgBugReportTemplate)
std.Out.WriteWarningf("Encountered panic - please open an issue with the command output:\n\t%s", sgBugReportTemplate)
message := fmt.Sprintf("%v:\n%s", p, getRelevantStack("addAnalyticsHooks"))
actionErr = cli.Exit(message, 1)
// Log event
span.RecordError("panic", actionErr)
err := analytics.InvocationPanicked(cmd.Context, p)
maybeLog("failed to persist analytics panic event: %s", err)
}
}()
interrupt.Register(func() {
span.Cancelled()
span.End()
err := analytics.InvocationCancelled(cmd.Context)
maybeLog("failed to persist analytics cancel event: %s", err)
})
// Call the underlying action
@ -67,9 +77,11 @@ func addAnalyticsHooks(commandPath []string, commands []*cli.Command) {
// Capture analytics post-run
if actionErr != nil {
span.RecordError("error", actionErr)
err := analytics.InvocationFailed(cmd.Context, actionErr)
maybeLog("failed to persist analytics cancel event: %s", err)
} else {
span.Succeeded()
err := analytics.InvocationSucceeded(cmd.Context)
maybeLog("failed to persist analytics success event: %s", err)
}
return actionErr
@ -77,6 +89,13 @@ func addAnalyticsHooks(commandPath []string, commands []*cli.Command) {
}
}
func maybeLog(fmt string, err error) { //nolint:unparam
if err == nil {
return
}
std.Out.WriteWarningf(fmt, err)
}
// getRelevantStack generates a stacktrace that encapsulates the relevant parts of a
// stacktrace for user-friendly reading.
func getRelevantStack(excludeFunctions ...string) string {

View File

@ -6,43 +6,35 @@ go_library(
name = "analytics",
srcs = [
"analytics.go",
"context.go",
"background.go",
"bigquery.go",
"oauth.go",
"spans.go",
"tracer.go",
"sqlite.go",
],
importpath = "github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics",
tags = [TAG_INFRA_DEVINFRA],
visibility = ["//dev/sg:__subpackages__"],
deps = [
"//dev/sg/internal/background",
"//dev/sg/internal/secrets",
"//dev/sg/internal/std",
"//dev/sg/root",
"//lib/errors",
"@com_github_google_uuid//:uuid",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_run//:run",
"@io_opentelemetry_go_otel//:otel",
"@io_opentelemetry_go_otel//attribute",
"@io_opentelemetry_go_otel//codes",
"@io_opentelemetry_go_otel//semconv/v1.4.0:v1_4_0",
"@io_opentelemetry_go_otel_exporters_otlp_otlptrace//:otlptrace",
"@io_opentelemetry_go_otel_exporters_otlp_otlptrace_otlptracegrpc//:otlptracegrpc",
"@io_opentelemetry_go_otel_sdk//resource",
"@io_opentelemetry_go_otel_sdk//trace",
"@io_opentelemetry_go_otel_trace//:trace",
"@io_opentelemetry_go_otel_trace//noop",
"@io_opentelemetry_go_proto_otlp//collector/trace/v1:trace",
"@io_opentelemetry_go_proto_otlp//trace/v1:trace",
"@com_google_cloud_go_bigquery//:bigquery",
"@org_golang_google_api//oauth2/v2:oauth2",
"@org_golang_google_protobuf//encoding/protojson",
"@org_golang_x_oauth2//:oauth2",
"@org_golang_x_oauth2//google",
"@org_modernc_sqlite//:sqlite",
"@org_modernc_sqlite//lib",
],
)
go_test(
name = "analytics_test",
srcs = [
"analytics_test.go",
"mocks_test.go",
"oauth_test.go",
],

View File

@ -1,116 +1,378 @@
package analytics
import (
"bufio"
"context"
"database/sql"
"encoding/json"
"fmt"
"os"
"path"
"sync"
"time"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
"google.golang.org/protobuf/encoding/protojson"
"github.com/google/uuid"
_ "modernc.org/sqlite" // pure Go SQLite implementation
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
const (
sgAnalyticsVersionResourceKey = "sg.analytics_version"
// Increment to make breaking changes to spans and discard old spans
sgAnalyticsVersion = "v1.1"
)
var ErrDBNotInitialized = errors.New("analytics database not initialized")
const schemaVersion = "1"
type key int
const (
honeycombEndpoint = "grpc://api.honeycomb.io:443"
otlpEndpointEnvKey = "OTEL_EXPORTER_OTLP_ENDPOINT"
invocationKey key = 0
)
// Submit pushes all persisted events to Honeycomb if OTEL_EXPORTER_OTLP_ENDPOINT is not
// set.
func Submit(ctx context.Context, honeycombToken string) error {
spans, err := Load()
type invocation struct {
uuid uuid.UUID
metadata map[string]any
}
func (i invocation) GetStartTime() *time.Time {
v, ok := i.metadata["start_time"]
if !ok {
return nil
}
raw := v.(string)
t, err := time.Parse(time.RFC3339, raw)
if err != nil {
return err
return nil
}
if len(spans) == 0 {
return errors.New("no spans to submit")
return &t
}
func (i invocation) GetEndTime() *time.Time {
v, ok := i.metadata["end_time"]
if !ok {
return nil
}
raw := v.(string)
t, err := time.Parse(time.RFC3339, raw)
if err != nil {
return nil
}
return &t
}
func (i invocation) GetDuration() time.Duration {
start := i.GetStartTime()
end := i.GetEndTime()
if start == nil || end == nil {
return 0
}
// if endpoint is not set, point to Honeycomb
var otlpOptions []otlptracegrpc.Option
if _, exists := os.LookupEnv(otlpEndpointEnvKey); !exists {
os.Setenv(otlpEndpointEnvKey, honeycombEndpoint)
otlpOptions = append(otlpOptions, otlptracegrpc.WithHeaders(map[string]string{
"x-honeycomb-team": honeycombToken,
}))
return end.Sub(*start)
}
func (i invocation) IsSuccess() bool {
v, ok := i.metadata["success"]
if !ok {
return false
}
return v.(bool)
}
func (i invocation) IsCancelled() bool {
v, ok := i.metadata["cancelled"]
if !ok {
return false
}
return v.(bool)
}
func (i invocation) IsFailed() bool {
v, ok := i.metadata["failed"]
if !ok {
return false
}
return v.(bool)
}
func (i invocation) IsPanicked() bool {
v, ok := i.metadata["panicked"]
if !ok {
return false
}
return v.(bool)
}
func (i invocation) GetCommand() string {
v, ok := i.metadata["command"]
if !ok {
return ""
}
return v.(string)
}
func (i invocation) GetVersion() string {
v, ok := i.metadata["version"]
if !ok {
return ""
}
return v.(string)
}
func (i invocation) GetError() string {
v, ok := i.metadata["error"]
if !ok {
return ""
}
return v.(string)
}
func (i invocation) GetUserID() string {
v, ok := i.metadata["user_id"]
if !ok {
return ""
}
return v.(string)
}
func (i invocation) GetFlags() map[string]any {
v, ok := i.metadata["flags"]
if !ok {
return nil
}
return v.(map[string]any)
}
func (i invocation) GetArgs() []any {
v, ok := i.metadata["args"]
if !ok {
return nil
}
return v.([]any)
}
var store = sync.OnceValue(func() analyticsStore {
db, err := newDiskStore()
if err != nil {
std.Out.WriteWarningf("Failed to create sg analytics store: %s", err)
}
return analyticsStore{db: db}
})
func newDiskStore() (Execer, error) {
sghome, err := root.GetSGHomePath()
if err != nil {
return nil, err
}
// Set up a trace exporter
client := otlptracegrpc.NewClient(otlpOptions...)
if err := client.Start(ctx); err != nil {
return errors.Wrap(err, "failed to initialize export client")
// this will create the file if it doesnt exist
db, err := sql.Open("sqlite", "file://"+path.Join(sghome, "analytics.sqlite"))
if err != nil {
return nil, err
}
db.SetMaxOpenConns(1)
rdb := retryableConn{db}
_, err = rdb.Exec(`CREATE TABLE IF NOT EXISTS analytics (
event_uuid TEXT PRIMARY KEY,
-- invocation_uuid TEXT,
schema_version TEXT NOT NULL,
metadata_json TEXT
)`)
if err != nil {
return nil, err
}
// send spans and shut down
if err := client.UploadTraces(ctx, spans); err != nil {
return errors.Wrap(err, "failed to export spans")
return &rdb, nil
}
type analyticsStore struct {
db Execer
}
func (s analyticsStore) NewInvocation(ctx context.Context, uuid uuid.UUID, version string, meta map[string]any) error {
if s.db == nil {
return ErrDBNotInitialized
}
if err := client.Stop(ctx); err != nil {
return errors.Wrap(err, "failed to flush span exporter")
meta["user_id"] = getEmail()
meta["version"] = version
meta["start_time"] = time.Now()
b, err := json.Marshal(meta)
if err != nil {
return errors.Wrapf(err, "failed to JSON marshal metadata %v")
}
_, err = s.db.Exec(`INSERT INTO analytics (event_uuid, schema_version, metadata_json) VALUES (?, ?, ?)`, uuid, schemaVersion, string(b))
if err != nil {
return errors.Wrapf(err, "failed to insert sg analytics event")
}
return nil
}
// Persist stores all events in context to disk.
func Persist(ctx context.Context) error {
store := getStore(ctx)
if store == nil {
return nil
func (s analyticsStore) AddMetadata(ctx context.Context, uuid uuid.UUID, meta map[string]any) error {
if s.db == nil {
return ErrDBNotInitialized
}
return store.Persist(ctx)
}
// Reset deletes all persisted events.
func Reset() error {
p, err := spansPath()
b, err := json.Marshal(meta)
if err != nil {
return err
return errors.Wrapf(err, "failed to JSON marshal metadata %v")
}
if _, err := os.Stat(p); os.IsNotExist(err) {
// don't have to remove something that doesn't exist
return nil
_, err = s.db.Exec(`UPDATE analytics SET metadata_json = json_patch(metadata_json, ?) WHERE event_uuid = ?`, string(b), uuid)
if err != nil {
return errors.Wrapf(err, "failed to update sg analytics event")
}
return os.Remove(p)
return nil
}
// Load retrieves all persisted events.
func Load() (spans []*tracepb.ResourceSpans, errs error) {
p, err := spansPath()
func (s analyticsStore) DeleteInvocation(ctx context.Context, uuid string) error {
if s.db == nil {
return ErrDBNotInitialized
}
_, err := s.db.Exec(`DELETE FROM analytics WHERE event_uuid = ?`, uuid)
if err != nil {
return errors.Wrapf(err, "failed to delete sg analytics event")
}
return nil
}
func (s analyticsStore) ListCompleted(ctx context.Context) ([]invocation, error) {
if s.db == nil {
return nil, nil
}
res, err := s.db.Query(`SELECT * FROM analytics WHERE json_extract(metadata_json, '$.end_time') IS NOT NULL LIMIT 10`)
if err != nil {
return nil, err
}
file, err := os.Open(p)
results := []invocation{}
for res.Next() {
invc := invocation{metadata: map[string]any{}}
var eventUUID string
var schemaVersion string
var metadata_json string
if err := res.Scan(&eventUUID, &schemaVersion, &metadata_json); err != nil {
return nil, err
}
invc.uuid, err = uuid.Parse(eventUUID)
if err != nil {
return nil, err
}
err := json.Unmarshal([]byte(metadata_json), &invc.metadata)
if err != nil {
return nil, err
}
results = append(results, invc)
}
return results, err
}
// Dont invoke this function directly. Use the `getEmail` function instead.
func emailfunc() string {
sgHome, err := root.GetSGHomePath()
if err != nil {
return "anonymous"
}
b, err := os.ReadFile(path.Join(sgHome, "whoami.json"))
if err != nil {
return "anonymous"
}
var whoami struct {
Email string
}
if err := json.Unmarshal(b, &whoami); err != nil {
return "anonymous"
}
return whoami.Email
}
var getEmail = sync.OnceValue[string](emailfunc)
func NewInvocation(ctx context.Context, version string, meta map[string]any) (context.Context, error) {
// v7 for sortable property (not vital as we also store timestamps, but no harm to have)
u, _ := uuid.NewV7()
invc := invocation{u, meta}
if err := store().NewInvocation(ctx, u, version, meta); err != nil && !errors.Is(err, ErrDBNotInitialized) {
return nil, err
}
defer file.Close()
scanner := bufio.NewScanner(file)
for scanner.Scan() {
var req coltracepb.ExportTraceServiceRequest
if err := protojson.Unmarshal(scanner.Bytes(), &req); err != nil {
errs = errors.Append(errs, err)
continue // drop malformed data
}
for _, s := range req.GetResourceSpans() {
if !isValidVersion(s) {
continue
}
spans = append(spans, s)
}
}
return
return context.WithValue(ctx, invocationKey, invc), nil
}
func AddMeta(ctx context.Context, meta map[string]any) error {
invc, ok := ctx.Value(invocationKey).(invocation)
if !ok {
return nil
}
return store().AddMetadata(ctx, invc.uuid, meta)
}
func InvocationSucceeded(ctx context.Context) error {
invc, ok := ctx.Value(invocationKey).(invocation)
if !ok {
return nil
}
return store().AddMetadata(ctx, invc.uuid, map[string]any{
"success": true,
"end_time": time.Now(),
})
}
func InvocationCancelled(ctx context.Context) error {
invc, ok := ctx.Value(invocationKey).(invocation)
if !ok {
return nil
}
return store().AddMetadata(ctx, invc.uuid, map[string]any{
"cancelled": true,
"end_time": time.Now(),
})
}
func InvocationFailed(ctx context.Context, err error) error {
invc, ok := ctx.Value(invocationKey).(invocation)
if !ok {
return nil
}
return store().AddMetadata(ctx, invc.uuid, map[string]any{
"failed": true,
"error": err.Error(),
"end_time": time.Now(),
})
}
func InvocationPanicked(ctx context.Context, err any) error {
invc, ok := ctx.Value(invocationKey).(invocation)
if !ok {
return nil
}
return store().AddMetadata(ctx, invc.uuid, map[string]any{
"panicked": true,
"error": fmt.Sprint(err),
"end_time": time.Now(),
})
}

View File

@ -0,0 +1,45 @@
package analytics
import (
"os"
"path"
"testing"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
)
func TestGetEmail(t *testing.T) {
os.Setenv("HOME", t.TempDir())
sgHome, err := root.GetSGHomePath()
if err != nil {
t.Fatal(err)
}
whoamiPath := path.Join(sgHome, "whoami.json")
t.Run("whoami doesnt exist", func(t *testing.T) {
if email := emailfunc(); email != "anonymous" {
t.Fatal("expected anonymous")
}
})
t.Run("misformed whoami", func(t *testing.T) {
err := os.WriteFile(whoamiPath, []byte("{"), 0o700)
if err != nil {
t.Fatal(err)
}
if email := emailfunc(); email != "anonymous" {
t.Fatal("expected anonymous")
}
})
t.Run("well formed", func(t *testing.T) {
err := os.WriteFile(whoamiPath, []byte(`{"email":"bananaphone@gmail.com"}`), 0o700)
if err != nil {
t.Fatal(err)
}
if email := emailfunc(); email != "bananaphone@gmail.com" {
t.Fatal("expected bananaphone@gmail.com")
}
})
}

View File

@ -0,0 +1,87 @@
package analytics
import (
"context"
"os"
"time"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/background"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
)
var (
bq *BigQueryClient
done chan struct{}
)
func BackgroundEventPublisher(ctx context.Context) {
done = make(chan struct{})
background.Run(ctx, func(ctx context.Context, bgOut *std.Output) {
var err error
bq, err = NewBigQueryClient(ctx, SGLocalDev, AnalyticsDatasetName, EventsTableName)
if err != nil {
bgOut.WriteWarningf("failed to create BigQuery client for analytics", err)
return
}
defer bq.Close()
processEvents(ctx, bgOut, store(), done)
})
}
func StopBackgroundEventPublisher() {
close(done)
}
func toEvents(items []invocation) []event {
results := make([]event, 0, len(items))
for _, i := range items {
ev := NewEvent(i)
results = append(results, *ev)
}
return results
}
func processEvents(ctx context.Context, bgOut *std.Output, store analyticsStore, done chan struct{}) {
for {
select {
case <-done:
return
default:
results, err := store.ListCompleted(ctx)
if err != nil {
bgOut.WriteWarningf("failed to list completed analytics events", err)
// TODO(burmudar): We sleep here for now, but we need to try about
// 3 times and stop and print out that we stopped because there is something big wrong
time.Sleep(time.Second)
continue
}
if len(results) == 0 {
// No events to process - so we quit.
//
// Upon next start up there will be another event to publish
return
}
events := toEvents(results)
for _, ev := range events {
err := bq.InsertEvent(ctx, ev)
if err != nil {
if os.Getenv("SG_ANALYTICS_DEBUG") == "1" {
panic(err)
}
bgOut.WriteWarningf("failed to insert analytics event into bigquery: %v", err)
continue
}
err = store.DeleteInvocation(ctx, ev.UUID)
if err != nil {
bgOut.WriteWarningf("failed to delete analytics event: %v", err)
}
}
}
}
}

View File

@ -0,0 +1,111 @@
package analytics
import (
"context"
"encoding/json"
"time"
"cloud.google.com/go/bigquery"
)
type BigQueryClient struct {
*bigquery.Client
ProjectID string
Dataset *bigquery.Dataset
Table *bigquery.Table
}
type event struct {
UUID string `json:"uuid"`
UserID string `json:"user_id"`
RecordedAt time.Time `json:"recorded_at"`
Command string `json:"command"`
Version string `json:"version"`
FlagsAndArgs json.RawMessage `json:"flags_and_args,omitempty"`
Duration time.Duration `json:"duration,omitempty"`
Error string `json:"error,omitempty"`
Data json.RawMessage `json:"data,omitempty"`
Metadata json.RawMessage `json:"metadata,omitempty"`
}
// Save implements the bigquery.ValueSaver interface which allows it
// to be used on Table.Inserter()
func (e event) Save() (map[string]bigquery.Value, string, error) {
durationInterval := &bigquery.IntervalValue{
Seconds: int32(e.Duration.Seconds()),
}
m := map[string]bigquery.Value{
"uuid": e.UUID,
"user_id": e.UserID,
"recorded_at": e.RecordedAt,
"command": e.Command,
"version": e.Version,
"duration": durationInterval.String(),
"error": e.Error,
"flags_and_args": string(e.FlagsAndArgs),
"metadata": string(e.Metadata),
}
insertID := e.UUID
return m, insertID, nil
}
func NewEvent(i invocation) *event {
var e event
e.UUID = i.uuid.String()
e.UserID = i.GetUserID()
if t := i.GetEndTime(); t != nil {
e.RecordedAt = *t
}
e.Command = i.GetCommand()
e.Version = i.GetVersion()
e.Duration = i.GetDuration()
e.Error = i.GetError()
flagsAndArgs := struct {
Flags map[string]any `json:"flags"`
Args []any `json:"args"`
}{
Flags: i.GetFlags(),
Args: i.GetArgs(),
}
e.FlagsAndArgs, _ = json.Marshal(flagsAndArgs)
metadata := map[string]any{
"success": i.IsSuccess(),
"failed": i.IsFailed(),
"cancelled": i.IsCancelled(),
"panicked": i.IsPanicked(),
}
e.Metadata, _ = json.Marshal(metadata)
return &e
}
const (
SGLocalDev = "sourcegraph-local-dev"
AnalyticsDatasetName = "sg_analytics"
EventsTableName = "events"
)
func NewBigQueryClient(ctx context.Context, project, datasetName, tableName string) (*BigQueryClient, error) {
client, err := bigquery.NewClient(ctx, project)
if err != nil {
return nil, err
}
dataset := client.Dataset(datasetName)
return &BigQueryClient{
Client: client,
ProjectID: project,
Dataset: dataset,
Table: dataset.Table(tableName),
}, nil
}
func (bq *BigQueryClient) InsertEvent(ctx context.Context, ev event) error {
ins := bq.Table.Inserter()
return ins.Put(ctx, ev)
}

View File

@ -1,77 +0,0 @@
package analytics
import (
"context"
"github.com/sourcegraph/log"
"github.com/sourcegraph/run"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/sdk/resource"
oteltracesdk "go.opentelemetry.io/otel/sdk/trace"
semconv "go.opentelemetry.io/otel/semconv/v1.4.0"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
)
// WithContext enables analytics in this context.
func WithContext(ctx context.Context, sgVersion string) (context.Context, error) {
processor, err := newSpanToDiskProcessor(ctx)
if err != nil {
return ctx, errors.Wrap(err, "disk exporter")
}
// Loose attempt at getting identity - if we fail, just discard
identity, _ := run.Cmd(ctx, "git config user.email").StdOut().Run().String()
// Create a provider with configuration and resource specification
provider := oteltracesdk.NewTracerProvider(
oteltracesdk.WithResource(newResource(log.Resource{
Name: "sg",
Namespace: sgVersion,
Version: sgVersion,
InstanceID: identity,
})),
oteltracesdk.WithSampler(oteltracesdk.AlwaysSample()),
oteltracesdk.WithSpanProcessor(processor),
)
// Configure OpenTelemetry defaults
otel.SetTracerProvider(provider)
otel.SetErrorHandler(otel.ErrorHandlerFunc(func(err error) {
std.Out.WriteWarningf("opentelemetry: %s", err.Error())
}))
// Create a root span for an execution of sg for all spans to be grouped under
var rootSpan *Span
ctx, rootSpan = StartSpan(ctx, "sg", "root")
return context.WithValue(ctx, spansStoreKey{}, &spansStore{
rootSpan: rootSpan.Span,
provider: provider,
}), nil
}
// newResource adapts sourcegraph/log.Resource into the OpenTelemetry package's Resource
// type.
func newResource(r log.Resource) *resource.Resource {
return resource.NewWithAttributes(
semconv.SchemaURL,
semconv.ServiceNameKey.String(r.Name),
semconv.ServiceNamespaceKey.String(r.Namespace),
semconv.ServiceInstanceIDKey.String(r.InstanceID),
semconv.ServiceVersionKey.String(r.Version),
attribute.String(sgAnalyticsVersionResourceKey, sgAnalyticsVersion))
}
func isValidVersion(spans *tracepb.ResourceSpans) bool {
for _, attrib := range spans.GetResource().GetAttributes() {
if attrib.GetKey() == sgAnalyticsVersionResourceKey {
return attrib.Value.GetStringValue() == sgAnalyticsVersion
}
}
return false
}

View File

@ -1,126 +0,0 @@
package analytics
import (
"context"
"os"
"path/filepath"
"sync"
"go.opentelemetry.io/otel/exporters/otlp/otlptrace"
oteltracesdk "go.opentelemetry.io/otel/sdk/trace"
tracesdk "go.opentelemetry.io/otel/sdk/trace"
"go.opentelemetry.io/otel/trace"
coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1"
tracepb "go.opentelemetry.io/proto/otlp/trace/v1"
"google.golang.org/protobuf/encoding/protojson"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// newSpanToDiskProcessor creates an OpenTelemetry span processor that persists spans
// to disk in protojson format.
func newSpanToDiskProcessor(ctx context.Context) (tracesdk.SpanProcessor, error) {
exporter, err := otlptrace.New(ctx, &otlpDiskClient{})
if err != nil {
return nil, errors.Wrap(err, "create exporter")
}
return tracesdk.NewBatchSpanProcessor(exporter), nil
}
type spansStoreKey struct{}
// spansStore manages the OpenTelemetry tracer provider that manages all events associated
// with a run of sg.
type spansStore struct {
rootSpan trace.Span
provider *oteltracesdk.TracerProvider
persistOnce sync.Once
}
// getStore retrieves the events store from context if it exists. Callers should check
// that the store is non-nil before attempting to use it.
func getStore(ctx context.Context) *spansStore {
store, ok := ctx.Value(spansStoreKey{}).(*spansStore)
if !ok {
return nil
}
return store
}
// Persist is called once per sg run, at the end, to save events
func (s *spansStore) Persist(ctx context.Context) error {
var err error
s.persistOnce.Do(func() {
s.rootSpan.End()
err = s.provider.Shutdown(ctx)
})
return err
}
func spansPath() (string, error) {
home, err := root.GetSGHomePath()
if err != nil {
return "", err
}
return filepath.Join(home, "spans"), nil
}
// otlpDiskClient is an OpenTelemetry trace client that "sends" spans to disk, instead of
// to an external collector.
type otlpDiskClient struct {
f *os.File
uploadMux sync.Mutex
}
var _ otlptrace.Client = &otlpDiskClient{}
// Start should establish connection(s) to endpoint(s). It is
// called just once by the exporter, so the implementation
// does not need to worry about idempotence and locking.
func (c *otlpDiskClient) Start(ctx context.Context) error {
p, err := spansPath()
if err != nil {
return err
}
c.f, err = os.OpenFile(p, os.O_CREATE|os.O_APPEND|os.O_RDWR, os.ModePerm)
return err
}
// Stop should close the connections. The function is called
// only once by the exporter, so the implementation does not
// need to worry about idempotence, but it may be called
// concurrently with UploadTraces, so proper
// locking is required. The function serves as a
// synchronization point - after the function returns, the
// process of closing connections is assumed to be finished.
func (c *otlpDiskClient) Stop(ctx context.Context) error {
c.uploadMux.Lock()
defer c.uploadMux.Unlock()
if err := c.f.Sync(); err != nil {
return errors.Wrap(err, "file.Sync")
}
return c.f.Close()
}
// UploadTraces should transform the passed traces to the wire
// format and send it to the collector. May be called
// concurrently.
func (c *otlpDiskClient) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error {
c.uploadMux.Lock()
defer c.uploadMux.Unlock()
// Create a request we can marshal
req := coltracepb.ExportTraceServiceRequest{
ResourceSpans: protoSpans,
}
b, err := protojson.Marshal(&req)
if err != nil {
return errors.Wrap(err, "protojson.Marshal")
}
if _, err := c.f.Write(append(b, '\n')); err != nil {
return errors.Wrap(err, "Write")
}
return c.f.Sync()
}

View File

@ -0,0 +1,49 @@
package analytics
import (
"database/sql"
"github.com/sourcegraph/sourcegraph/lib/errors"
"modernc.org/sqlite"
sqlite3 "modernc.org/sqlite/lib"
)
type Execer interface {
Exec(query string, args ...interface{}) (sql.Result, error)
Query(stmt string, args ...interface{}) (*sql.Rows, error)
}
type retryableConn struct {
db *sql.DB
}
func (c *retryableConn) Exec(query string, args ...interface{}) (sql.Result, error) {
for i := 0; i < 2; i++ {
res, err := c.db.Exec(query, args...)
if err == nil {
return res, nil
}
var sqliteerr *sqlite.Error
if errors.As(err, &sqliteerr) && sqliteerr.Code() == sqlite3.SQLITE_BUSY {
continue
}
return nil, err
}
return nil, errors.New("sqlite insert failed after multiple attempts due to locking")
}
func (c *retryableConn) Query(stmt string, args ...interface{}) (*sql.Rows, error) {
for i := 0; i < 2; i++ {
res, err := c.db.Query(stmt, args...)
if err == nil {
return res, nil
}
var sqliteerr *sqlite.Error
if errors.As(err, &sqliteerr) && sqliteerr.Code() == sqlite3.SQLITE_BUSY {
continue
}
return nil, err
}
return nil, errors.New("sqlite query failed after multiple attempts due to locking")
}

View File

@ -1,78 +0,0 @@
package analytics
import (
"context"
"go.opentelemetry.io/otel"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/codes"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
)
// spanCategoryKey denotes the type of a span, e.g. "root" or "action"
const spanCategoryKey attribute.Key = "sg.span_category"
// StartSpan starts an OpenTelemetry span from context. Example:
//
// ctx, span := analytics.StartSpan(ctx, spanName,
// trace.WithAttributes(...)
// defer span.End()
// // ... do your things
//
// Span provides convenience functions for setting the status of the span.
func StartSpan(ctx context.Context, spanName string, category string, opts ...trace.SpanStartOption) (context.Context, *Span) {
opts = append(opts, trace.WithAttributes(spanCategoryKey.String(category)))
ctx, s := otel.GetTracerProvider().Tracer("dev/sg/analytics").Start(ctx, spanName, opts...)
return ctx, &Span{s}
}
// Span wraps an OpenTelemetry span with convenience functions.
type Span struct{ trace.Span }
// Error records and error in span.
func (s *Span) RecordError(kind string, err error, options ...trace.EventOption) {
s.Failed(kind)
s.Span.RecordError(err)
}
// Succeeded records a success in span.
func (s *Span) Succeeded() {
// description is only kept if error, so we add an event
s.Span.AddEvent("success")
s.Span.SetStatus(codes.Ok, "success")
}
// Failed records a failure.
func (s *Span) Failed(reason ...string) {
v := "failed"
if len(reason) > 0 {
v = reason[0]
}
s.Span.AddEvent(v)
s.Span.SetStatus(codes.Error, v)
}
// Cancelled records a cancellation.
func (s *Span) Cancelled() {
// description is only kept if error, so we add an event
s.Span.AddEvent("cancelled")
s.Span.SetStatus(codes.Ok, "cancelled")
}
// Skipped records a skipped task.
func (s *Span) Skipped(reason ...string) {
v := "skipped"
if len(reason) > 0 {
v = reason[0]
}
// description is only kept if error, so we add an event
s.Span.AddEvent(v)
s.Span.SetStatus(codes.Ok, v)
}
// NoOpSpan is a safe-to-use, no-op span.
func NoOpSpan() *Span {
_, s := noop.NewTracerProvider().Tracer("").Start(context.Background(), "")
return &Span{s}
}

View File

@ -7,11 +7,8 @@ go_library(
tags = [TAG_INFRA_DEVINFRA],
visibility = ["//dev/sg:__subpackages__"],
deps = [
"//dev/sg/internal/analytics",
"//dev/sg/internal/std",
"//lib/output",
"@io_opentelemetry_go_otel//attribute",
"@io_opentelemetry_go_otel_trace//:trace",
"@org_uber_go_atomic//:atomic",
],
)

View File

@ -7,19 +7,18 @@ import (
"sync"
"time"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
"github.com/sourcegraph/sourcegraph/lib/output"
)
type key int
var jobsKey key
var hasRun bool
var (
jobsKey key
hasRun bool
)
type backgroundJobs struct {
wg sync.WaitGroup
@ -81,10 +80,6 @@ func Wait(ctx context.Context, out *std.Output) {
jobs := loadFromContext(ctx)
pendingCount := int(jobs.stillRunningCount.Load())
_, span := analytics.StartSpan(ctx, "background_wait", "",
trace.WithAttributes(attribute.Int("jobs", pendingCount)))
defer span.End()
firstResultWithOutput := true
if jobs.verbose && pendingCount > 0 {
out.WriteLine(output.Styledf(output.StylePending, "Waiting for %d remaining background %s to complete...",
@ -110,7 +105,6 @@ func Wait(ctx context.Context, out *std.Output) {
if jobs.verbose && pendingCount > 0 {
out.WriteLine(output.Line(output.EmojiSuccess, output.StyleSuccess, "Background jobs done!"))
}
span.Succeeded()
}
func pluralize(single, plural string, count int) string {

View File

@ -17,7 +17,6 @@ go_library(
tags = [TAG_INFRA_DEVINFRA],
visibility = ["//dev/sg:__subpackages__"],
deps = [
"//dev/sg/internal/analytics",
"//dev/sg/internal/sgconf",
"//dev/sg/internal/std",
"//dev/sg/internal/usershell",
@ -34,9 +33,6 @@ go_library(
"@com_github_sourcegraph_conc//pool",
"@com_github_sourcegraph_conc//stream",
"@com_github_sourcegraph_run//:run",
"@io_opentelemetry_go_otel//attribute",
"@io_opentelemetry_go_otel_trace//:trace",
"@org_uber_go_atomic//:atomic",
],
)

View File

@ -6,15 +6,12 @@ import (
"io"
"strings"
"sync"
"sync/atomic"
"time"
"github.com/sourcegraph/conc/pool"
"github.com/sourcegraph/conc/stream"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"go.uber.org/atomic"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
"github.com/sourcegraph/sourcegraph/internal/limiter"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -89,14 +86,7 @@ func NewRunner[Args any](in io.Reader, out *std.Output, categories []Category[Ar
}
// Check executes all checks exactly once and exits.
func (r *Runner[Args]) Check(
ctx context.Context,
args Args,
) error {
var span *analytics.Span
ctx, span = r.startSpan(ctx, "Check")
defer span.End()
func (r *Runner[Args]) Check(ctx context.Context, args Args) error {
results := r.runAllCategoryChecks(ctx, args)
if len(results.failed) > 0 {
if len(results.skipped) > 0 {
@ -109,14 +99,7 @@ func (r *Runner[Args]) Check(
}
// Fix attempts to applies available fixes on checks that are not satisfied.
func (r *Runner[Args]) Fix(
ctx context.Context,
args Args,
) error {
var span *analytics.Span
ctx, span = r.startSpan(ctx, "Fix")
defer span.End()
func (r *Runner[Args]) Fix(ctx context.Context, args Args) error {
// Get state
results := r.runAllCategoryChecks(ctx, args)
if len(results.failed) == 0 {
@ -149,21 +132,14 @@ func (r *Runner[Args]) Fix(
// Interactive runs both checks and fixes in an interactive manner, prompting the user for
// decisions about which fixes to apply.
func (r *Runner[Args]) Interactive(
ctx context.Context,
args Args,
) error {
var span *analytics.Span
ctx, span = r.startSpan(ctx, "Interactive")
defer span.End()
func (r *Runner[Args]) Interactive(ctx context.Context, args Args) error {
// Keep interactive runner up until all issues are fixed or the user exits
results := &runAllCategoryChecksResult{
failed: []int{1}, // initialize, this gets reset immediately
}
buildChoices := func(failed []int) map[int]string {
var choices = make(map[int]string)
choices := make(map[int]string)
for _, idx := range failed {
category := r.categories[idx]
// categories are zero based indexes internally, but are displayed with 1-based indexes
@ -239,14 +215,10 @@ var errSkipped = errors.New("skipped")
// runAllCategoryChecks is the main entrypoint for running the checks in this runner.
func (r *Runner[Args]) runAllCategoryChecks(ctx context.Context, args Args) *runAllCategoryChecksResult {
var runAllSpan *analytics.Span
var cancelAll context.CancelFunc
ctx, cancelAll = context.WithCancel(ctx)
defer cancelAll()
ctx, runAllSpan = r.startSpan(ctx, "runAllCategoryChecks")
defer runAllSpan.End()
allCancelled := atomic.NewBool(false)
allCancelled := atomic.Bool{}
if r.RenderDescription != nil {
r.RenderDescription(r.Output)
@ -348,16 +320,9 @@ func (r *Runner[Args]) runAllCategoryChecks(ctx context.Context, args Args) *run
}
categoriesGroup.Go(func() stream.Callback {
categoryCtx, categorySpan := r.startSpan(ctx, "category "+category.Name,
trace.WithAttributes(
attribute.String("action", "check_category"),
))
defer categorySpan.End()
if err := category.CheckEnabled(categoryCtx, args); err != nil {
if err := category.CheckEnabled(ctx, args); err != nil {
// Mark as done
updateCategorySkipped(i, err)
categorySpan.Skipped()
return cb(errSkipped)
}
@ -372,17 +337,10 @@ func (r *Runner[Args]) runAllCategoryChecks(ctx context.Context, args Args) *run
checksLimiter.Acquire()
defer checksLimiter.Release()
ctx, span := r.startSpan(categoryCtx, "check "+check.Name,
trace.WithAttributes(
attribute.String("action", "check"),
attribute.String("category", category.Name),
))
defer span.End()
defer updateChecksProgress()
if err := check.IsEnabled(ctx, args); err != nil {
updateCheckSkipped(i, check.Name, err)
span.Skipped()
return nil
}
@ -396,14 +354,12 @@ func (r *Runner[Args]) runAllCategoryChecks(ctx context.Context, args Args) *run
err = errors.New("skipped because another check failed")
check.cachedCheckErr = err
updateCheckSkipped(i, check.Name, err)
span.Skipped()
return err
}
// mark check as failed
updateCheckFailed(i, check.Name, err)
check.cachedCheckOutput = updateOutput.String()
span.Failed()
// If we should fail fast, mark as failed
if r.FailFast {
@ -414,7 +370,6 @@ func (r *Runner[Args]) runAllCategoryChecks(ctx context.Context, args Args) *run
return err
}
span.Succeeded()
return nil
})
}
@ -502,7 +457,6 @@ func (r *Runner[Args]) runAllCategoryChecks(ctx context.Context, args Args) *run
}
if len(results.failed) == 0 {
runAllSpan.Succeeded()
if len(results.skipped) == 0 {
r.Output.Write("")
r.Output.WriteLine(output.Linef(output.EmojiOk, output.StyleBold, "Everything looks good! Happy hacking!"))
@ -516,13 +470,6 @@ func (r *Runner[Args]) runAllCategoryChecks(ctx context.Context, args Args) *run
}
func (r *Runner[Args]) presentFailedCategoryWithOptions(ctx context.Context, categoryIdx int, category *Category[Args], args Args, results *runAllCategoryChecksResult) error {
var span *analytics.Span
ctx, span = r.startSpan(ctx, "presentFailedCategoryWithOptions",
trace.WithAttributes(
attribute.String("category", category.Name),
))
defer span.End()
r.printCategoryHeaderAndDependencies(categoryIdx+1, category)
fixableCategory := category.HasFixable()
@ -557,7 +504,6 @@ func (r *Runner[Args]) presentFailedCategoryWithOptions(ctx context.Context, cat
return nil
}
if err != nil {
span.Failed("fix_failed")
return err
}
return nil
@ -583,13 +529,6 @@ func (r *Runner[Args]) printCategoryHeaderAndDependencies(categoryIdx int, categ
}
func (r *Runner[Args]) fixCategoryManually(ctx context.Context, categoryIdx int, category *Category[Args], args Args) error {
var span *analytics.Span
ctx, span = r.startSpan(ctx, "fixCategoryManually",
trace.WithAttributes(
attribute.String("category", category.Name),
))
defer span.End()
for {
toFix := []int{}
@ -677,13 +616,6 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
r.Output.WriteLine(output.Styledf(output.StylePending, "Trying my hardest to fix %q automatically...", category.Name))
var span *analytics.Span
ctx, span = r.startSpan(ctx, "fix category "+category.Name,
trace.WithAttributes(
attribute.String("action", "fix_category"),
))
defer span.End()
// Make sure to call this with a final message before returning!
complete := func(emoji string, style output.Style, fmtStr string, args ...any) {
r.Output.WriteLine(output.Linef(emoji, output.CombineStyles(style, output.StyleBold),
@ -691,14 +623,12 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
}
if err := category.CheckEnabled(ctx, args); err != nil {
span.Skipped("skipped")
complete(output.EmojiQuestionMark, output.StyleGrey, "Skipped: %s", err.Error())
return true
}
// If nothing in this category is fixable, we are done
if !category.HasFixable() {
span.Skipped("not_fixable")
complete(output.EmojiFailure, output.StyleFailure, "Cannot be fixed automatically.")
return false
}
@ -707,7 +637,6 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
var unmetDependencies []string
for _, d := range category.DependsOn {
if met, exists := results.categories[d]; !exists {
span.Failed("required_check_not_found")
complete(output.EmojiFailure, output.StyleFailure, "Required check category %q not found", d)
return false
} else if !met {
@ -715,30 +644,20 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
}
}
if len(unmetDependencies) > 0 {
span.Failed("unmet_dependencies")
complete(output.EmojiFailure, output.StyleFailure, "Required dependencies %s not met.", strings.Join(unmetDependencies, ", "))
return false
}
fixCheck := func(c *Check[Args]) {
checkCtx, span := r.startSpan(ctx, "fix "+c.Name,
trace.WithAttributes(
attribute.String("action", "fix"),
attribute.String("category", category.Name),
))
defer span.End()
// If category is fixed, we are good to go
if c.IsSatisfied() {
span.Succeeded()
return
}
// Skip
if err := c.IsEnabled(checkCtx, args); err != nil {
if err := c.IsEnabled(ctx, args); err != nil {
r.Output.WriteLine(output.Linef(output.EmojiQuestionMark, output.CombineStyles(output.StyleGrey, output.StyleBold),
"%q skipped: %s", c.Name, err.Error()))
span.Skipped()
return
}
@ -746,7 +665,6 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
if c.Fix == nil {
r.Output.WriteLine(output.Linef(output.EmojiShrug, output.CombineStyles(output.StyleWarning, output.StyleBold),
"%q cannot be fixed automatically.", c.Name))
span.Skipped("unfixable")
return
}
@ -761,7 +679,6 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
if err != nil {
r.Output.WriteLine(output.Linef(output.EmojiWarning, output.CombineStyles(output.StyleFailure, output.StyleBold),
"Failed to fix %q: %s", c.Name, err.Error()))
span.Failed()
return
}
@ -771,17 +688,15 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
c.cachedCheckErr = nil
c.cachedCheckOutput = ""
} else {
err = c.Update(checkCtx, r.Output, args)
err = c.Update(ctx, r.Output, args)
}
if err != nil {
r.Output.WriteLine(output.Styledf(output.CombineStyles(output.StyleWarning, output.StyleBold),
"Check %q still failing: %s", c.Name, err.Error()))
span.Failed("unfixed")
} else {
r.Output.WriteLine(output.Styledf(output.CombineStyles(output.StyleSuccess, output.StyleBold),
"Check %q is satisfied now!", c.Name))
span.Succeeded()
}
}
@ -799,10 +714,3 @@ func (r *Runner[Args]) fixCategoryAutomatically(ctx context.Context, categoryIdx
return
}
func (r *Runner[Args]) startSpan(ctx context.Context, spanName string, opts ...trace.SpanStartOption) (context.Context, *analytics.Span) {
if r.AnalyticsCategory == "" {
return ctx, analytics.NoOpSpan()
}
return analytics.StartSpan(ctx, spanName, r.AnalyticsCategory, opts...)
}

View File

@ -21,7 +21,6 @@ go_library(
tags = [TAG_INFRA_DEVINFRA],
visibility = ["//dev/sg:__subpackages__"],
deps = [
"//dev/sg/internal/analytics",
"//dev/sg/internal/secrets",
"//dev/sg/internal/std",
"//dev/sg/interrupt",

View File

@ -9,9 +9,7 @@ import (
"strings"
"time"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
"github.com/sourcegraph/sourcegraph/dev/sg/interrupt"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/internal/download"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -47,7 +45,7 @@ type InstallManager struct {
progress output.Progress
ticker *time.Ticker
tickInterval time.Duration
stats *installAnalytics
startTime time.Time
}
func Install(ctx context.Context, env map[string]string, verbose bool, cmds []Installer) error {
@ -56,7 +54,7 @@ func Install(ctx context.Context, env map[string]string, verbose bool, cmds []In
}
installer := newInstallManager(cmds, std.Out, env, verbose)
installer.start(ctx)
installer.start()
installer.install(ctx, cmds)
@ -85,7 +83,7 @@ func newInstallManager(cmds []Installer, out *std.Output, env map[string]string,
}
// starts all progress bars and counters but does not start installation
func (installer *InstallManager) start(ctx context.Context) {
func (installer *InstallManager) start() {
installer.Write("")
installer.WriteLine(output.Linef(output.EmojiLightbulb, output.StyleBold, "Installing %d commands...", installer.total))
installer.Write("")
@ -96,8 +94,7 @@ func (installer *InstallManager) start(ctx context.Context) {
// Every uninterrupted 15 seconds we will print out a waiting message
installer.startTicker(15 * time.Second)
installer.startAnalytics(ctx, installer.cmds)
installer.startTime = time.Now()
}
// Starts the installation process in a non-blocking process
@ -147,17 +144,13 @@ func (installer *InstallManager) wait(ctx context.Context) error {
}
}
}
func (installer *InstallManager) startTicker(interval time.Duration) {
installer.ticker = time.NewTicker(interval)
installer.tickInterval = interval
}
func (installer *InstallManager) startAnalytics(ctx context.Context, cmds map[string]Installer) {
installer.stats = startInstallAnalytics(ctx, cmds)
}
func (installer *InstallManager) handleInstalled(name string) {
installer.stats.handleInstalled(name)
installer.ticker.Reset(installer.tickInterval)
installer.done += installer.cmds[name].Count()
@ -173,7 +166,7 @@ func (installer *InstallManager) complete() {
installer.Write("")
if installer.verbose {
installer.WriteLine(output.Linef(output.EmojiSuccess, output.StyleSuccess, "Everything installed! Took %s. Booting up the system!", installer.stats.duration()))
installer.WriteLine(output.Linef(output.EmojiSuccess, output.StyleSuccess, "Everything installed! Took %s. Booting up the system!", time.Since(installer.startTime)))
} else {
installer.WriteLine(output.Linef(output.EmojiSuccess, output.StyleSuccess, "Everything installed! Booting up the system!"))
}
@ -192,7 +185,6 @@ func (installer *InstallManager) complete() {
func (installer *InstallManager) handleFailure(name string, err error) {
installer.progress.Destroy()
installer.stats.handleFailure(name, err)
printCmdError(installer.Output.Output, name, err)
}
@ -220,49 +212,6 @@ func (installer *InstallManager) isDone() bool {
return len(installer.cmds) == 0
}
type installAnalytics struct {
Start time.Time
Spans map[string]*analytics.Span
}
func startInstallAnalytics(ctx context.Context, cmds map[string]Installer) *installAnalytics {
installer := &installAnalytics{
Start: time.Now(),
Spans: make(map[string]*analytics.Span, len(cmds)),
}
for cmd := range cmds {
_, installer.Spans[cmd] = analytics.StartSpan(ctx, fmt.Sprintf("install %s", cmd), "install_command")
}
interrupt.Register(installer.handleInterrupt)
return installer
}
func (a *installAnalytics) handleInterrupt() {
for _, span := range a.Spans {
if span.IsRecording() {
span.Cancelled()
span.End()
}
}
}
func (a *installAnalytics) handleInstalled(name string) {
a.Spans[name].Succeeded()
a.Spans[name].End()
}
func (a *installAnalytics) handleFailure(name string, err error) {
a.Spans[name].RecordError("failed", err)
a.Spans[name].End()
}
func (a *installAnalytics) duration() time.Duration {
return time.Since(a.Start)
}
type installFunc func(context.Context, map[string]string) error
var installFuncs = map[string]installFunc{
@ -297,7 +246,7 @@ var installFuncs = map[string]installFunc{
// Make sure the data folder exists.
disk := env["JAEGER_DISK"]
if err := os.MkdirAll(disk, 0755); err != nil {
if err := os.MkdirAll(disk, 0o755); err != nil {
return err
}

View File

@ -28,6 +28,7 @@ import (
"github.com/sourcegraph/sourcegraph/dev/sg/msp"
"github.com/sourcegraph/sourcegraph/dev/sg/root"
"github.com/sourcegraph/sourcegraph/dev/sg/sams"
"github.com/sourcegraph/sourcegraph/internal/collections"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -200,14 +201,6 @@ var sg = &cli.App{
std.Out.WriteWarningf("Failed to persist identity for analytics, continuing: %s", err)
}
cmd.Context, err = analytics.WithContext(cmd.Context, cmd.App.Version)
if err != nil {
std.Out.WriteWarningf("Failed to initialize analytics: %s", err)
}
// Ensure analytics are persisted
interrupt.Register(func() { _ = analytics.Persist(cmd.Context) })
// Add analytics to each command
addAnalyticsHooks([]string{"sg"}, cmd.App.Commands)
}
@ -220,6 +213,10 @@ var sg = &cli.App{
cmd.Context = background.Context(cmd.Context, verbose)
interrupt.Register(func() { background.Wait(cmd.Context, std.Out) })
// start the analytics publisher
analytics.BackgroundEventPublisher(cmd.Context)
interrupt.Register(analytics.StopBackgroundEventPublisher)
// Configure logger, for commands that use components that use loggers
if _, set := os.LookupEnv(log.EnvDevelopment); !set {
os.Setenv(log.EnvDevelopment, "true")
@ -244,13 +241,9 @@ var sg = &cli.App{
}
// Check for updates, unless we are running update manually.
skipBackgroundTasks := map[string]struct{}{
"update": {},
"version": {},
"live": {},
"teammate": {},
}
if _, skipped := skipBackgroundTasks[cmd.Args().First()]; !skipped {
skipBackgroundTasks := collections.NewSet("update", "version", "live", "teammate")
if !skipBackgroundTasks.Has(cmd.Args().First()) {
background.Run(cmd.Context, func(ctx context.Context, out *std.Output) {
err := checkSgVersionAndUpdate(ctx, out, cmd.Bool("skip-auto-update"))
if err != nil {
@ -270,8 +263,6 @@ var sg = &cli.App{
if !bashCompletionsMode {
// Wait for background jobs to finish up, iff not in autocomplete mode
background.Wait(cmd.Context, std.Out)
// Persist analytics
_ = analytics.Persist(cmd.Context)
}
return nil
@ -316,7 +307,6 @@ var sg = &cli.App{
enterprise.Command,
// Util
analyticsCommand,
doctorCommand,
funkyLogoCommand,
helpCommand,

View File

@ -1,128 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"strings"
"time"
"github.com/urfave/cli/v2"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/category"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/secrets"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/lib/output"
)
var analyticsCommand = &cli.Command{
Name: "analytics",
Usage: "Manage analytics collected by sg",
Category: category.Util,
Subcommands: []*cli.Command{
{
Name: "submit",
ArgsUsage: " ",
Usage: "Make sg better by submitting all analytics stored locally!",
Description: "Requires HONEYCOMB_ENV_TOKEN or OTEL_EXPORTER_OTLP_ENDPOINT to be set.",
Action: func(cmd *cli.Context) error {
sec, err := secrets.FromContext(cmd.Context)
if err != nil {
return err
}
// we leave OTEL_EXPORTER_OTLP_ENDPOINT configuration a bit of a
// hidden thing, most users will want to just send to Honeycomb
//
honeyToken, err := sec.GetExternal(cmd.Context, secrets.ExternalSecret{
Project: secrets.LocalDevProject,
Name: "SG_ANALYTICS_HONEYCOMB_TOKEN",
})
if err != nil {
return errors.Wrap(err, "failed to get Honeycomb token from gcloud secrets")
}
pending := std.Out.Pending(output.Line(output.EmojiHourglass, output.StylePending, "Hang tight! We're submitting your analytics"))
if err := analytics.Submit(cmd.Context, honeyToken); err != nil {
pending.Destroy()
return err
}
pending.Complete(output.Line(output.EmojiSuccess, output.StyleSuccess, "Your analytics have been successfully submitted!"))
return analytics.Reset()
},
},
{
Name: "reset",
Usage: "Delete all analytics stored locally",
Action: func(cmd *cli.Context) error {
if err := analytics.Reset(); err != nil {
return err
}
std.Out.WriteSuccessf("Analytics reset!")
return nil
},
},
{
Name: "view",
Usage: "View all analytics stored locally",
Flags: []cli.Flag{
&cli.BoolFlag{
Name: "raw",
Usage: "view raw data",
},
},
Action: func(cmd *cli.Context) error {
spans, err := analytics.Load()
if err != nil {
std.Out.Writef("No analytics found: %s", err.Error())
return nil
}
if len(spans) == 0 {
std.Out.WriteSuccessf("No analytics events found")
return nil
}
var out strings.Builder
for _, span := range spans {
if cmd.Bool("raw") {
b, _ := json.MarshalIndent(span, "", " ")
out.WriteString(fmt.Sprintf("\n```json\n%s\n```", string(b)))
out.WriteString("\n")
} else {
for _, ss := range span.GetScopeSpans() {
for _, s := range ss.GetSpans() {
var events []string
for _, event := range s.GetEvents() {
events = append(events, event.Name)
}
var attributes []string
for _, attribute := range s.GetAttributes() {
attributes = append(attributes, fmt.Sprintf("%s: %s",
attribute.GetKey(), attribute.GetValue().String()))
}
ts := time.Unix(0, int64(s.GetEndTimeUnixNano())).Local().Format("2006-01-02 03:04:05PM")
entry := fmt.Sprintf("- [%s] `%s`", ts, s.GetName())
if len(events) > 0 {
entry += fmt.Sprintf(" %s", strings.Join(events, ", "))
}
if len(attributes) > 0 {
entry += fmt.Sprintf(" _(%s)_", strings.Join(attributes, ", "))
}
out.WriteString(entry)
out.WriteString("\n")
}
}
}
}
out.WriteString("\nTo submit these events, use `sg analytics submit`.\n")
return std.Out.WriteMarkdown(out.String())
},
},
},
}

View File

@ -9,10 +9,7 @@ import (
"strings"
"github.com/urfave/cli/v2"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/trace"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/category"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/repo"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
@ -120,15 +117,10 @@ func updateToPrebuiltSG(ctx context.Context, release string) (bool, error) {
}
func checkSgVersionAndUpdate(ctx context.Context, out *std.Output, skipUpdate bool) error {
ctx, span := analytics.StartSpan(ctx, "auto_update", "background",
trace.WithAttributes(attribute.Bool("skipUpdate", skipUpdate)))
defer span.End()
if BuildCommit == "dev" {
// If `sg` was built with a dirty `./dev/sg` directory it's a dev build
// and we don't need to display this message.
out.Verbose("Skipping update check on dev build")
span.Skipped()
return nil
}
@ -136,7 +128,6 @@ func checkSgVersionAndUpdate(ctx context.Context, out *std.Output, skipUpdate bo
if err != nil {
// Ignore the error, because we only want to check the version if we're
// in sourcegraph/sourcegraph
span.Skipped()
return nil
}
@ -147,7 +138,6 @@ func checkSgVersionAndUpdate(ctx context.Context, out *std.Output, skipUpdate bo
if !repo.HasCommit(ctx, rev) {
out.VerboseLine(output.Styledf(output.StyleWarning,
"current sg version %s not found locally - you may want to run 'git fetch origin main'.", rev))
span.Skipped()
return nil
}
@ -155,17 +145,13 @@ func checkSgVersionAndUpdate(ctx context.Context, out *std.Output, skipUpdate bo
revList, err := run.GitCmd("rev-list", fmt.Sprintf("%s..origin/main", rev), "--", "./dev/sg")
if err != nil {
// Unexpected error occured
span.RecordError("check_error", err)
return err
}
revList = strings.TrimSpace(revList)
if revList == "" {
// No newer commits found. sg is up to date.
span.AddEvent("already_up_to_date")
span.Skipped()
return nil
}
span.SetAttributes(attribute.String("rev-list", revList))
if skipUpdate {
out.WriteLine(output.Styled(output.StyleSearchMatch, "╭───────────────────────────────────────────────────────────────────────╮"))
@ -173,23 +159,19 @@ func checkSgVersionAndUpdate(ctx context.Context, out *std.Output, skipUpdate bo
out.WriteLine(output.Styled(output.StyleSearchMatch, "│ To see what's new, run 'sg version changelog -next'. │"))
out.WriteLine(output.Styled(output.StyleSearchMatch, "╰───────────────────────────────────────────────────────────────────────╯"))
span.Skipped()
return nil
}
out.WriteLine(output.Line(output.EmojiInfo, output.StyleSuggestion, "Auto updating sg ..."))
updated, err := updateToPrebuiltSG(ctx, "latest") // always install latest when auto-updating
if err != nil {
span.RecordError("failed", err)
return errors.Newf("failed to install update: %s", err)
}
if !updated {
span.Skipped("not_updated")
return nil
}
out.WriteSuccessf("sg has been updated!")
out.Write("To see what's new, run 'sg version changelog'.")
span.Succeeded()
return nil
}

15
go.mod
View File

@ -143,7 +143,7 @@ require (
github.com/keegancsmith/rpc v1.3.0
github.com/keegancsmith/sqlf v1.1.1
github.com/keegancsmith/tmpfriend v0.0.0-20180423180255-86e88902a513
github.com/klauspost/cpuid/v2 v2.2.5
github.com/klauspost/cpuid/v2 v2.2.7
github.com/kljensen/snowball v0.6.0
github.com/kr/text v0.2.0
github.com/lib/pq v1.10.7
@ -218,7 +218,7 @@ require (
go.opentelemetry.io/otel/sdk v1.25.0
go.opentelemetry.io/otel/sdk/metric v1.24.0
go.opentelemetry.io/otel/trace v1.26.0
go.opentelemetry.io/proto/otlp v1.2.0
go.opentelemetry.io/proto/otlp v1.2.0 // indirect
go.uber.org/atomic v1.11.0
go.uber.org/automaxprocs v1.5.2
go.uber.org/ratelimit v0.2.0
@ -326,6 +326,7 @@ require (
gorm.io/driver/postgres v1.5.9
gorm.io/gorm v1.25.10
gorm.io/plugin/opentelemetry v0.1.4
modernc.org/sqlite v1.30.1
oss.terrastruct.com/d2 v0.6.5
pgregory.net/rapid v1.1.0
sigs.k8s.io/controller-runtime v0.17.3
@ -421,6 +422,7 @@ require (
github.com/mpvl/unique v0.0.0-20150818121801-cbe035fff7de // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/natefinch/wrap v0.2.0 // indirect
github.com/ncruces/go-strftime v0.1.9 // indirect
github.com/oklog/ulid/v2 v2.1.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/openfga/api/proto v0.0.0-20240529184453-5b0b4941f3e0 // indirect
@ -433,6 +435,7 @@ require (
github.com/pressly/goose/v3 v3.20.0 // indirect
github.com/prometheus/prometheus v0.40.5 // indirect
github.com/redis/go-redis/extra/rediscmd/v9 v9.0.5 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
github.com/rickb777/date v1.14.3 // indirect
github.com/rickb777/plural v1.2.2 // indirect
github.com/sagikazarmark/locafero v0.4.0 // indirect
@ -477,6 +480,12 @@ require (
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
k8s.io/apiextensions-apiserver v0.29.2 // indirect
k8s.io/component-base v0.29.2 // indirect
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect
modernc.org/libc v1.52.1 // indirect
modernc.org/mathutil v1.6.0 // indirect
modernc.org/memory v1.8.0 // indirect
modernc.org/strutil v1.2.0 // indirect
modernc.org/token v1.1.0 // indirect
oss.terrastruct.com/util-go v0.0.0-20231101220827-55b3812542c2 // indirect
)
@ -567,7 +576,7 @@ require (
github.com/golang/glog v1.2.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 // indirect
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd // indirect
github.com/googleapis/gax-go/v2 v2.12.4
github.com/gopherjs/gopherjs v1.17.2 // indirect
github.com/gopherjs/gopherwasm v1.1.0 // indirect

32
go.sum
View File

@ -858,8 +858,8 @@ github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLe
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20211214055906-6f57359322fd/go.mod h1:KgnwoLYCZ8IQu3XUZ8Nc/bM9CCZFOyjUNOSygVozoDg=
github.com/google/pprof v0.0.0-20230207041349-798e818bf904/go.mod h1:uglQLonpP8qtYCYyzA+8c/9qtqgA3qsXGYqCPKARAFg=
github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08 h1:PxlBVtIFHR/mtWk2i0gTEdCz+jBnqiuHNSki0epDbVs=
github.com/google/pprof v0.0.0-20231205033806-a5a03c77bf08/go.mod h1:czg5+yv1E0ZGTi6S6vVK1mke0fV+FaUhNGcd6VRS9Ik=
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd h1:gbpYu9NMq8jhDVbvlGkMFWCjLFlqqEZjEmObmhUy6Vo=
github.com/google/pprof v0.0.0-20240409012703-83162a5b38cd/go.mod h1:kf6iHlnVGwgKolg33glAes7Yg/8iWP8ukqeldJSO7jw=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o=
github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw=
@ -1166,8 +1166,8 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
github.com/klauspost/compress v1.17.8 h1:YcnTYrq7MikUT7k0Yb5eceMmALQPYBW/Xltxn0NAMnU=
github.com/klauspost/compress v1.17.8/go.mod h1:Di0epgTjJY877eYKx5yC51cX2A2Vl2ibi7bDH9ttBbw=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/cpuid/v2 v2.2.7 h1:ZWSB3igEs+d0qvnxR/ZBzXVmxkgt8DdzP6m9pfuVLDM=
github.com/klauspost/cpuid/v2 v2.2.7/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/pgzip v1.2.6 h1:8RXeL5crjEUFnR2/Sn6GJNWtSQ3Dk8pq4CL3jvdDyjU=
github.com/klauspost/pgzip v1.2.6/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/kljensen/snowball v0.6.0 h1:6DZLCcZeL0cLfodx+Md4/OLC6b/bfurWUOUGs1ydfOU=
@ -2606,16 +2606,28 @@ k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSn
k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
layeh.com/gopher-luar v1.0.10 h1:55b0mpBhN9XSshEd2Nz6WsbYXctyBT35azk4POQNSXo=
layeh.com/gopher-luar v1.0.10/go.mod h1:TPnIVCZ2RJBndm7ohXyaqfhzjlZ+OA2SZR/YwL8tECk=
modernc.org/cc/v4 v4.21.2 h1:dycHFB/jDc3IyacKipCNSDrjIC0Lm1hyoWOZTRR20Lk=
modernc.org/cc/v4 v4.21.2/go.mod h1:HM7VJTZbUCR3rV8EYBi9wxnJ0ZBRiGE5OeGXNA0IsLQ=
modernc.org/ccgo/v4 v4.17.10 h1:6wrtRozgrhCxieCeJh85QsxkX/2FFrT9hdaWPlbn4Zo=
modernc.org/ccgo/v4 v4.17.10/go.mod h1:0NBHgsqTTpm9cA5z2ccErvGZmtntSM9qD2kFAs6pjXM=
modernc.org/fileutil v1.3.0 h1:gQ5SIzK3H9kdfai/5x41oQiKValumqNTDXMvKo62HvE=
modernc.org/fileutil v1.3.0/go.mod h1:XatxS8fZi3pS8/hKG2GH/ArUogfxjpEKs3Ku3aK4JyQ=
modernc.org/gc/v2 v2.4.1 h1:9cNzOqPyMJBvrUipmynX0ZohMhcxPtMccYgGOJdOiBw=
modernc.org/gc/v2 v2.4.1/go.mod h1:wzN5dK1AzVGoH6XOzc3YZ+ey/jPgYHLuVckd62P0GYU=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk=
modernc.org/libc v1.41.0/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY=
modernc.org/libc v1.52.1 h1:uau0VoiT5hnR+SpoWekCKbLqm7v6dhRL3hI+NQhgN3M=
modernc.org/libc v1.52.1/go.mod h1:HR4nVzFDSDizP620zcMCgjb1/8xk2lg5p/8yjfGv1IQ=
modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
modernc.org/sqlite v1.29.6 h1:0lOXGrycJPptfHDuohfYgNqoe4hu+gYuN/pKgY5XjS4=
modernc.org/sqlite v1.29.6/go.mod h1:S02dvcmm7TnTRvGhv8IGYyLnIt7AS2KPaB1F/71p75U=
modernc.org/memory v1.8.0 h1:IqGTL6eFMaDZZhEWwcREgeMXYwmW83LYW8cROZYkg+E=
modernc.org/memory v1.8.0/go.mod h1:XPZ936zp5OMKGWPqbD3JShgd/ZoQ7899TUuQqxY+peU=
modernc.org/opt v0.1.3 h1:3XOZf2yznlhC+ibLltsDGzABUGVx8J6pnFMS3E4dcq4=
modernc.org/opt v0.1.3/go.mod h1:WdSiB5evDcignE70guQKxYUl14mgWtbClRi5wmkkTX0=
modernc.org/sortutil v1.2.0 h1:jQiD3PfS2REGJNzNCMMaLSp/wdMNieTbKX920Cqdgqc=
modernc.org/sortutil v1.2.0/go.mod h1:TKU2s7kJMf1AE84OoiGppNHJwvB753OYfNl2WRb++Ss=
modernc.org/sqlite v1.30.1 h1:YFhPVfu2iIgUf9kuA1CR7iiHdcEEsI2i+yjRYHscyxk=
modernc.org/sqlite v1.30.1/go.mod h1:DUmsiWQDaAvU4abhc/N+djlom/L2o8f7gZ95RCvyoLU=
modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=