Cleanup: use new go 1.21 features (#58617)

Now that we're running go 1.21, we can take advantage of its new features. This cleans up a few utilites that have since been included in the standard library.
This commit is contained in:
Camden Cheek 2023-11-28 14:49:38 -06:00 committed by GitHub
parent 64a81cffdb
commit b9a7e3b809
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
74 changed files with 91 additions and 616 deletions

View File

@ -16,7 +16,6 @@ go_library(
"//internal/codygateway",
"//internal/goroutine",
"//internal/trace",
"//internal/xcontext",
"//lib/errors",
"@com_github_sourcegraph_log//:log",
"@com_google_cloud_go_bigquery//:bigquery",

View File

@ -13,7 +13,6 @@ import (
sgactor "github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/codygateway"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/xcontext"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -114,7 +113,7 @@ func (l *bigQueryLogger) LogEvent(spanCtx context.Context, event Event) (err err
if err := l.tableInserter.Put(
// Create a cancel-free context to avoid interrupting the log when
// the parent context is cancelled.
xcontext.Detach(spanCtx),
context.WithoutCancel(spanCtx),
bigQueryEvent{
Name: string(event.Name),
Source: event.Source,

View File

@ -12,7 +12,6 @@ go_library(
importpath = "github.com/sourcegraph/sourcegraph/cmd/cody-gateway/internal/limiter",
visibility = ["//cmd/cody-gateway:__subpackages__"],
deps = [
"//internal/xcontext",
"//lib/errors",
"@io_opentelemetry_go_otel//:otel",
"@io_opentelemetry_go_otel//attribute",

View File

@ -2,7 +2,6 @@ package limiter
import (
"context"
"github.com/sourcegraph/sourcegraph/internal/xcontext"
"time"
"go.opentelemetry.io/otel"
@ -104,7 +103,7 @@ func (l StaticLimiter) TryAcquire(ctx context.Context) (_ func(context.Context,
if l.RateLimitAlerter != nil {
// Call with usage 1 for 100% (rate limit exceeded)
go l.RateLimitAlerter(xcontext.Detach(ctx), 1, retryAfter.Sub(l.NowFunc()))
go l.RateLimitAlerter(context.WithoutCancel(ctx), 1, retryAfter.Sub(l.NowFunc()))
}
return nil, RateLimitExceededError{
@ -132,7 +131,7 @@ func (l StaticLimiter) TryAcquire(ctx context.Context) (_ func(context.Context,
// same time block since the TTL would have been set.
return func(ctx context.Context, usage int) (err error) {
// NOTE: This is to make sure we still commit usage even if the context was canceled.
ctx = xcontext.Detach(ctx)
ctx = context.WithoutCancel(ctx)
var incrementedTo, ttlSeconds int
// We need to start a new span because the previous one has ended

View File

@ -38,7 +38,6 @@ go_library(
"//internal/observation",
"//internal/service",
"//internal/trace",
"//internal/xcontext",
"//lib/errors",
"@com_github_hashicorp_golang_lru_v2//:golang-lru",
"@com_github_prometheus_client_golang//prometheus",

View File

@ -19,7 +19,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/embeddings"
"github.com/sourcegraph/sourcegraph/internal/embeddings/background/repo"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/xcontext"
)
type downloadRepoEmbeddingIndexFn func(ctx context.Context, repoID api.RepoID, repoName api.RepoName) (*embeddings.RepoEmbeddingIndex, error)
@ -159,7 +158,7 @@ func (c *CachedEmbeddingIndexGetter) Get(ctx context.Context, repoID api.RepoID,
// Run the fetch in the background, but outside the singleflight so context
// errors are not shared.
go func() {
detachedCtx := xcontext.Detach(ctx)
detachedCtx := context.WithoutCancel(ctx)
// Run the fetch request through a singleflight to keep from fetching the
// same index multiple times concurrently
v, err, _ = c.sf.Do(fmt.Sprintf("%d", repoID), func() (interface{}, error) {

View File

@ -326,7 +326,6 @@ go_library(
"//internal/src-prometheus",
"//internal/suspiciousnames",
"//internal/symbols",
"//internal/syncx",
"//internal/temporarysettings",
"//internal/trace",
"//internal/txemail",

View File

@ -3,6 +3,7 @@ package graphqlbackend
import (
"context"
"strconv"
"sync"
"github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
@ -10,7 +11,6 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend/graphqlutil"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/gqlutil"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/internal/types"
)
@ -74,14 +74,14 @@ func newOutboundWebhookLogConnectionResolver(
limit := opts.Limit
logStore := store.ToLogStore()
nodes := syncx.OnceValues(func() ([]*types.OutboundWebhookLog, error) {
nodes := sync.OnceValues(func() ([]*types.OutboundWebhookLog, error) {
opts.Limit += 1
return logStore.ListForOutboundWebhook(ctx, opts)
})
return &outboundWebhookLogConnectionResolver{
nodes: nodes,
resolvers: syncx.OnceValues(func() ([]OutboundWebhookLogResolver, error) {
resolvers: sync.OnceValues(func() ([]OutboundWebhookLogResolver, error) {
logs, err := nodes()
if err != nil {
return nil, err
@ -101,7 +101,7 @@ func newOutboundWebhookLogConnectionResolver(
return resolvers, nil
}),
totalCount: syncx.OnceValues(func() (int32, error) {
totalCount: sync.OnceValues(func() (int32, error) {
total, errored, err := logStore.CountsForOutboundWebhook(ctx, opts.OutboundWebhookID)
if opts.OnlyErrors {
return int32(errored), err
@ -199,7 +199,7 @@ func newOutboundWebhookJobResolver(
id int64,
) OutboundWebhookJobResolver {
return &outboundWebhookJobResolver{
job: syncx.OnceValues(func() (*types.OutboundWebhookJob, error) {
job: sync.OnceValues(func() (*types.OutboundWebhookJob, error) {
return store.GetByID(ctx, id)
}),
}

View File

@ -4,6 +4,7 @@ import (
"context"
"sort"
"strconv"
"sync"
"github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
@ -13,7 +14,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/encryption"
"github.com/sourcegraph/sourcegraph/internal/encryption/keyring"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/webhooks/outbound"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -252,14 +252,14 @@ func newOutboundWebhookConnectionResolver(
) OutboundWebhookConnectionResolver {
limit := opts.Limit
nodes := syncx.OnceValues(func() ([]*types.OutboundWebhook, error) {
nodes := sync.OnceValues(func() ([]*types.OutboundWebhook, error) {
opts.Limit += 1
return store.List(ctx, opts)
})
return &outboundWebhookConnectionResolver{
nodes: nodes,
resolvers: syncx.OnceValues(func() ([]OutboundWebhookResolver, error) {
resolvers: sync.OnceValues(func() ([]OutboundWebhookResolver, error) {
webhooks, err := nodes()
if err != nil {
return nil, err
@ -276,7 +276,7 @@ func newOutboundWebhookConnectionResolver(
return resolvers, nil
}),
totalCount: syncx.OnceValues(func() (int32, error) {
totalCount: sync.OnceValues(func() (int32, error) {
count, err := store.Count(ctx, opts.OutboundWebhookCountOpts)
return int32(count), err
}),
@ -327,7 +327,7 @@ func newOutboundWebhookResolverFromDatabase(ctx context.Context, store database.
return &outboundWebhookResolver{
store: store,
id: marshalOutboundWebhookID(id),
webhook: syncx.OnceValues(func() (*types.OutboundWebhook, error) {
webhook: sync.OnceValues(func() (*types.OutboundWebhook, error) {
return store.GetByID(ctx, id)
}),
}

View File

@ -2,6 +2,7 @@ package graphqlbackend
import (
"context"
"sync"
"github.com/graph-gophers/graphql-go"
"github.com/graph-gophers/graphql-go/relay"
@ -14,7 +15,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -87,7 +87,7 @@ type packageRepoReferenceConnectionResolver struct {
}
func (r *packageRepoReferenceConnectionResolver) Nodes(ctx context.Context) ([]*packageRepoReferenceResolver, error) {
once := syncx.OnceValues(func() (map[api.RepoName]*types.Repo, error) {
once := sync.OnceValues(func() (map[api.RepoName]*types.Repo, error) {
allNames := make([]string, 0, len(r.deps))
for _, dep := range r.deps {
name, err := dependencyRepoToRepoName(dep)

View File

@ -54,13 +54,6 @@ func (r *repositoryStatsResolver) Indexed(ctx context.Context) (int32, error) {
return min(indexedRepos, total), nil
}
func min(a, b int32) int32 {
if a < b {
return a
}
return b
}
func (r *repositoryStatsResolver) IndexedLinesCount(ctx context.Context) (BigInt, error) {
_, indexedLinesCount, err := r.computeIndexedStats(ctx)
if err != nil {

View File

@ -194,12 +194,6 @@ func getProactiveResultLimit() int {
}
func min(x, y int) int {
if x < y {
return x
}
return y
}
func getExtendedTimeout(ctx context.Context, db database.DB) int {
searchLimit := limits.SearchLimits(conf.Get()).MaxTimeoutSeconds

View File

@ -1594,14 +1594,7 @@ func sortSeriesResolvers(ctx context.Context, seriesOptions types.SeriesDisplayO
}
}
return resolvers[:minInt(int32(len(resolvers)), limit)], nil
}
func minInt(a, b int32) int32 {
if a < b {
return a
}
return b
return resolvers[:min(int32(len(resolvers)), limit)], nil
}
func lowercaseGroupBy(groupBy *string) *string {

View File

@ -65,7 +65,6 @@ go_library(
"//internal/ratelimit",
"//internal/search/streaming/http",
"//internal/security",
"//internal/syncx",
"//internal/trace",
"//internal/types",
"//internal/unpack",

View File

@ -11,7 +11,6 @@ go_library(
],
importpath = "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/cacert",
visibility = ["//cmd/gitserver:__subpackages__"],
deps = ["//internal/syncx"],
)
go_test(

View File

@ -3,13 +3,13 @@
package cacert
import (
"github.com/sourcegraph/sourcegraph/internal/syncx"
"sync"
)
// System returns PEM encoded system certificates. Note: This function only
// works on Linux. Other operating systems do not rely on PEM files at known
// locations, instead they rely on system calls.
var System = syncx.OnceValues(func() ([][]byte, error) {
var System = sync.OnceValues(func() ([][]byte, error) {
c, err := loadSystemRoots()
if err != nil {
return nil, err

View File

@ -20,7 +20,6 @@ go_library(
"//internal/conf",
"//internal/fileutil",
"//internal/gitserver/gitdomain",
"//internal/syncx",
"//internal/trace",
"//internal/wrexec",
"//lib/errors",

View File

@ -12,6 +12,7 @@ import (
"sort"
"strconv"
"strings"
"sync"
"syscall"
"time"
@ -20,7 +21,6 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/internal/fileutil"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -174,7 +174,7 @@ func RemoveBadRefs(ctx context.Context, dir common.GitDir) (errs error) {
// older versions of git do not remove tags case insensitively, so we generate
// every possible case of HEAD (2^4 = 16)
var badRefs = syncx.OnceValue(func() []string {
var badRefs = sync.OnceValue(func() []string {
refs := make([]string, 0, 1<<4)
for bits := uint8(0); bits < (1 << 4); bits++ {
s := []byte("HEAD")

View File

@ -4,6 +4,7 @@ import (
"context"
"math"
"strconv"
"sync"
"sync/atomic"
"time"
@ -17,7 +18,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
"github.com/sourcegraph/sourcegraph/internal/gitserver/search"
"github.com/sourcegraph/sourcegraph/internal/honey"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/internal/trace"
)
@ -69,7 +69,7 @@ func (s *Server) searchWithObservability(ctx context.Context, tr trace.Trace, ar
}
}()
observeLatency := syncx.OnceFunc(func() {
observeLatency := sync.OnceFunc(func() {
searchLatency.Observe(time.Since(searchStart).Seconds())
})

View File

@ -43,7 +43,6 @@ go_library(
"//internal/search/zoekt",
"//internal/searcher/v1:searcher",
"//internal/trace",
"//internal/xcontext",
"//lib/errors",
"//schema",
"@com_github_bmatcuk_doublestar//:doublestar",

View File

@ -29,7 +29,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/metrics"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/xcontext"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -195,7 +194,7 @@ func (s *Store) PrepareZipPaths(ctx context.Context, repo api.RepoName, commit a
// TODO: consider adding a cache method that doesn't actually bother opening the file,
// since we're just going to close it again immediately.
cacheHit := true
bgctx := xcontext.Detach(ctx)
bgctx := context.WithoutCancel(ctx)
f, err := s.cache.Open(bgctx, []string{key}, func(ctx context.Context) (io.ReadCloser, error) {
cacheHit = false
return s.fetch(ctx, repo, commit, filter, paths)

View File

@ -129,13 +129,6 @@ func (p *parser) Parse(ctx context.Context, args search.SymbolsParameters, paths
return symbolOrErrors, nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func (p *parser) handleParseRequest(
ctx context.Context,
symbolOrErrors chan<- SymbolOrError,

View File

@ -2,6 +2,7 @@ package squirrel
import (
"math"
"slices"
"strings"
)
@ -19,7 +20,7 @@ func findHover(node Node) string {
// Skip over Java annotations and the like.
for ; prev != nil; prev = prev.PrevNamedSibling() {
if !contains(style.skipNodeTypes, prev.Type()) {
if !slices.Contains(style.skipNodeTypes, prev.Type()) {
break
}
}
@ -27,7 +28,7 @@ func findHover(node Node) string {
// Collect comments backwards.
comments := []string{}
lastStartRow := -1
for ; prev != nil && contains(style.nodeTypes, prev.Type()); prev = prev.PrevNamedSibling() {
for ; prev != nil && slices.Contains(style.nodeTypes, prev.Type()); prev = prev.PrevNamedSibling() {
if lastStartRow == -1 {
lastStartRow = int(prev.StartPoint().Row)
} else if lastStartRow != int(prev.EndPoint().Row+1) {

View File

@ -4,6 +4,7 @@ import (
"context"
"os"
"path/filepath"
"slices"
"sort"
"testing"
@ -140,7 +141,7 @@ func TestNonLocalDefinition(t *testing.T) {
gotSymbolInfo, err := squirrel.SymbolInfo(context.Background(), ref.repoCommitPathPoint)
fatalIfErrorLabel(t, err, "symbolInfo")
if contains(ref.tags, "nodef") {
if slices.Contains(ref.tags, "nodef") {
if gotSymbolInfo != nil {
t.Fatalf("unexpected definition for %s", ref.symbol)
}

View File

@ -196,24 +196,6 @@ func nodeLength(node *sitter.Node) int {
return length
}
// Of course.
func min(a, b int) int {
if a < b {
return a
}
return b
}
// When generic?
func contains(slice []string, str string) bool {
for _, s := range slice {
if s == str {
return true
}
}
return false
}
// Node is a sitter.Node plus convenient info.
type Node struct {
RepoCommitPath types.RepoCommitPath

View File

@ -1,6 +1,8 @@
package executorqueue
import (
"slices"
"github.com/inconshreveable/log15"
executortypes "github.com/sourcegraph/sourcegraph/internal/executor/types"
@ -16,7 +18,7 @@ var validCloudProviderNames = []string{"aws", "gcp"}
func normalizeAllocations(m map[string]map[string]float64, awsConfigured, gcpConfigured bool) (map[string]QueueAllocation, error) {
for queueName := range m {
if !contains(executortypes.ValidQueueNames, queueName) {
if !slices.Contains(executortypes.ValidQueueNames, queueName) {
return nil, errors.Errorf("invalid queue '%s'", queueName)
}
}
@ -51,7 +53,7 @@ func normalizeQueueAllocation(queueName string, queueAllocation map[string]float
}
for cloudProvider, allocation := range queueAllocation {
if !contains(validCloudProviderNames, cloudProvider) {
if !slices.Contains(validCloudProviderNames, cloudProvider) {
return QueueAllocation{}, errors.Errorf("invalid cloud provider '%s', expected 'aws' or 'gcp'", cloudProvider)
}
@ -78,13 +80,3 @@ func normalizeQueueAllocation(queueName string, queueAllocation map[string]float
PercentageGCP: queueAllocation["gcp"],
}, nil
}
func contains(slice []string, value string) bool {
for _, v := range slice {
if value == v {
return true
}
}
return false
}

View File

@ -64,7 +64,6 @@ go_test(
"//internal/api",
"//internal/auth",
"//internal/authz",
"//internal/collections",
"//internal/conf",
"//internal/database",
"//internal/database/dbmocks",

View File

@ -1,6 +1,7 @@
package permissions
import (
"cmp"
"context"
"encoding/json"
"testing"
@ -12,7 +13,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/authz"
"github.com/sourcegraph/sourcegraph/internal/collections"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
@ -183,7 +183,7 @@ func TestSetPermissionsForUsers(t *testing.T) {
err := perms.LoadUserPendingPermissions(ctx, userPerms)
require.NoError(t, err)
require.Equal(t, []int32{1, 2}, userPerms.IDs.Sorted(collections.NaturalCompare[int32]))
require.Equal(t, []int32{1, 2}, userPerms.IDs.Sorted(cmp.Less[int32]))
}
}

View File

@ -130,10 +130,3 @@ func isBuildFailed(build buildkite.Build, timeout time.Duration) bool {
}
return false
}
func max(x, y int) int {
if x < y {
return y
}
return x
}

View File

@ -1,15 +1,9 @@
package changed
import "path/filepath"
func contains(s []string, str string) bool {
for _, v := range s {
if v == str {
return true
}
}
return false
}
import (
"path/filepath"
"slices"
)
// Changes in the root directory files should trigger client tests.
var clientRootFiles = []string{
@ -25,5 +19,5 @@ var clientRootFiles = []string{
}
func isRootClientFile(p string) bool {
return filepath.Dir(p) == "." && contains(clientRootFiles, p)
return filepath.Dir(p) == "." && slices.Contains(clientRootFiles, p)
}

View File

@ -6,6 +6,7 @@ import (
"bufio"
"fmt"
"os"
"slices"
"strconv"
"strings"
"time"
@ -208,7 +209,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
if err != nil {
panic(fmt.Sprintf("ExtractBranchArgument: %s", err))
}
if !contains(images.SourcegraphDockerImages, patchImage) {
if !slices.Contains(images.SourcegraphDockerImages, patchImage) {
panic(fmt.Sprintf("no image %q found", patchImage))
}
@ -230,7 +231,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
if err != nil {
panic(fmt.Sprintf("ExtractBranchArgument: %s", err))
}
if !contains(images.SourcegraphDockerImages, patchImage) {
if !slices.Contains(images.SourcegraphDockerImages, patchImage) {
panic(fmt.Sprintf("no image %q found", patchImage))
}
ops = operations.NewSet(

View File

@ -9,15 +9,6 @@ import (
// Code in this file is used to split web integration tests workloads.
func contains(s []string, str string) bool {
for _, v := range s {
if v == str {
return true
}
}
return false
}
func getWebIntegrationFileNames() []string {
var fileNames []string
@ -54,14 +45,6 @@ func chunkItems(items []string, size int) [][]string {
return chunks
}
func min(x int, y int) int {
if x < y {
return x
}
return y
}
// getChunkedWebIntegrationFileNames gets web integration test filenames and splits them in chunks for parallelizing client integration tests.
func getChunkedWebIntegrationFileNames(chunkSize int) []string {
testFiles := getWebIntegrationFileNames()

View File

@ -26,7 +26,6 @@ go_library(
"//dev/managedservicesplatform/internal/stack/options/googleprovider",
"//dev/managedservicesplatform/internal/stack/options/randomprovider",
"//dev/managedservicesplatform/spec",
"//internal/syncx",
"//lib/errors",
"//lib/pointers",
"@com_github_hashicorp_terraform_cdk_go_cdktf//:cdktf",

View File

@ -5,6 +5,7 @@ import (
"html/template"
"strconv"
"strings"
"sync"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
@ -31,7 +32,6 @@ import (
"github.com/sourcegraph/sourcegraph/dev/managedservicesplatform/internal/stack/options/googleprovider"
"github.com/sourcegraph/sourcegraph/dev/managedservicesplatform/internal/stack/options/randomprovider"
"github.com/sourcegraph/sourcegraph/dev/managedservicesplatform/spec"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/lib/pointers"
)
@ -159,7 +159,7 @@ func NewStack(stacks *stack.Set, vars Variables) (crossStackOutput *CrossStackOu
var privateNetworkEnabled bool
// privateNetwork is only instantiated if used, and is only instantiated
// once. If called, it always returns a non-nil value.
privateNetwork := syncx.OnceValue(func() *privatenetwork.Output {
privateNetwork := sync.OnceValue(func() *privatenetwork.Output {
privateNetworkEnabled = true
return privatenetwork.New(stack, privatenetwork.Config{
ProjectID: vars.ProjectID,

View File

@ -52,7 +52,7 @@ func (w *prefixSuffixSaver) Write(p []byte) (n int, err error) {
// grow larger than w.N. It returns the un-appended suffix of p.
func (w *prefixSuffixSaver) fill(dst *[]byte, p []byte) (pRemain []byte) {
if remain := w.N - len(*dst); remain > 0 {
add := minInt(len(p), remain)
add := min(len(p), remain)
*dst = append(*dst, p[:add]...)
p = p[add:]
}
@ -76,10 +76,3 @@ func (w *prefixSuffixSaver) Bytes() []byte {
buf.Write(w.suffix[:w.suffixOff])
return buf.Bytes()
}
func minInt(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -14,7 +14,6 @@ go_library(
"//internal/env",
"//internal/grpc/defaults",
"//internal/httpcli",
"//internal/syncx",
"//lib/errors",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_prometheus_client_golang//prometheus/promauto",

View File

@ -10,6 +10,7 @@ import (
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
@ -24,7 +25,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -53,7 +53,7 @@ type internalClient struct {
var Client = &internalClient{
URL: frontendInternal.String(),
getConfClient: syncx.OnceValues(func() (proto.ConfigServiceClient, error) {
getConfClient: sync.OnceValues(func() (proto.ConfigServiceClient, error) {
logger := log.Scoped("internalapi")
conn, err := defaults.Dial(frontendInternal.Host, logger)
if err != nil {

View File

@ -1,6 +1,7 @@
package authz
import (
"cmp"
"fmt"
"strings"
"time"
@ -199,7 +200,7 @@ type UserPendingPermissions struct {
// GenerateSortedIDsSlice returns a sorted slice of the IDs set.
func (p *UserPendingPermissions) GenerateSortedIDsSlice() []int32 {
return p.IDs.Sorted(collections.NaturalCompare[int32])
return p.IDs.Sorted(cmp.Less[int32])
}
func (p *UserPendingPermissions) Attrs() []attribute.KeyValue {

View File

@ -3,6 +3,7 @@ package main
import (
"context"
"fmt"
"slices"
"sort"
"strings"
@ -39,7 +40,7 @@ func LoadTrackingIssues(ctx context.Context, cli *graphql.Client, org string, tr
issuesMap := map[string]*Issue{}
for _, v := range issues {
if !contains(v.Labels, "tracking") {
if !slices.Contains(v.Labels, "tracking") {
issuesMap[v.ID] = v
}
}
@ -79,7 +80,7 @@ func makeQueries(org string, trackingIssues []*Issue) (queries []string) {
properSuperset := func(a, b []string) bool {
for _, term := range b {
if !contains(a, term) {
if !slices.Contains(a, term) {
return false
}
}

View File

@ -1,6 +1,9 @@
package main
import "strings"
import (
"slices"
"strings"
)
// IssueContext tracks a visible set of issues, tracking issues, and pull requests
// with respect to a given tracking issue. The visible set of issues and pull requests
@ -65,7 +68,7 @@ func matchingTrackingIssues(trackingIssue *Issue, issues []*Issue, pullRequests
}
for _, pullRequest := range matchingPullRequests(pullRequests, matcher) {
for _, issue := range pullRequest.TrackedBy {
if contains(issue.Labels, "tracking") {
if slices.Contains(issue.Labels, "tracking") {
stack = append(stack, issue)
} else {
stack = append(stack, issue.TrackedBy...)

View File

@ -1,6 +1,7 @@
package main
import (
"slices"
"sort"
"strings"
@ -45,7 +46,7 @@ var customerMatcher = regexp.MustCompile(`https://app\.hubspot\.com/contacts/276
// categorizeCustomerIssue adds a customer emoji if the repository matches sourcegraph/customer or if
// the issue contains a hubspot URL.
func categorizeCustomerIssue(labels []string, repository, body string, categories map[string]string) {
if repository == "sourcegraph/customer" || contains(labels, "customer") {
if repository == "sourcegraph/customer" || slices.Contains(labels, "customer") {
if customer := customerMatcher.FindString(body); customer != "" {
categories["customer"] = "[👩](" + customer + ")"
} else {

View File

@ -2,16 +2,6 @@ package main
import "strings"
func contains(haystack []string, needle string) bool {
for _, candidate := range haystack {
if candidate == needle {
return true
}
}
return false
}
func redactLabels(labels []string) (redacted []string) {
for _, label := range labels {
if strings.HasPrefix(label, "estimate/") || strings.HasPrefix(label, "planned/") {

View File

@ -1,6 +1,9 @@
package main
import "fmt"
import (
"fmt"
"slices"
)
type Matcher struct {
labels []string
@ -23,7 +26,7 @@ func NewMatcher(labels []string, milestone string, assignee string, noAssignee b
// with the tracking issue will never be matched.
func (m *Matcher) Issue(issue *Issue) bool {
return testAll(
!contains(issue.Labels, "tracking"),
!slices.Contains(issue.Labels, "tracking"),
m.testAssignee(issue.Assignees...),
m.testLabels(issue.Labels),
m.testMilestone(issue.Milestone, issue.Labels),
@ -50,14 +53,14 @@ func (m *Matcher) testAssignee(assignees ...string) bool {
return true
}
return contains(assignees, m.assignee)
return slices.Contains(assignees, m.assignee)
}
// testLabels returns true if every label that this matcher was configured with exists
// in the given label list.
func (m *Matcher) testLabels(labels []string) bool {
for _, label := range m.labels {
if !contains(labels, label) {
if !slices.Contains(labels, label) {
return false
}
}
@ -69,7 +72,7 @@ func (m *Matcher) testLabels(labels []string) bool {
// was configured with, if the given labels contains a planned/{milestone} label, or
// the milestone on the tracking issue is not restricted.
func (m *Matcher) testMilestone(milestone string, labels []string) bool {
return m.milestone == "" || milestone == m.milestone || contains(labels, fmt.Sprintf("planned/%s", m.milestone))
return m.milestone == "" || milestone == m.milestone || slices.Contains(labels, fmt.Sprintf("planned/%s", m.milestone))
}
// testAll returns true if all of the given values are true.

View File

@ -2,6 +2,7 @@ package main
import (
"fmt"
"slices"
"sort"
"strconv"
"strings"
@ -440,7 +441,7 @@ func (ar *AssigneeRenderer) resetDisplayFlags() {
// doRenderIssue returns the given issue rendered in markdown.
func (ar *AssigneeRenderer) doRenderIssue(issue *Issue, milestone string) string {
url := issue.URL
if issue.Milestone != milestone && contains(issue.Labels, fmt.Sprintf("planned/%s", milestone)) {
if issue.Milestone != milestone && slices.Contains(issue.Labels, fmt.Sprintf("planned/%s", milestone)) {
// deprioritized
url = fmt.Sprintf("~%s~", url)
}

View File

@ -3,7 +3,6 @@ package context
import (
"context"
"fmt"
"math"
"strconv"
"strings"
"sync"
@ -394,13 +393,3 @@ func fileMatchToContextMatches(fm *result.FileMatch) []FileChunkContext {
EndLine: endLine,
}}
}
func max(vals ...int) int {
res := math.MinInt32
for _, val := range vals {
if val > res {
res = val
}
}
return res
}

View File

@ -1,6 +1,7 @@
package collections
import (
"cmp"
"testing"
"github.com/grafana/regexp"
@ -11,7 +12,7 @@ func TestSet(t *testing.T) {
a := NewSet(1, 2, 3)
b := NewSet(2, 3, 4)
cmp := NaturalCompare[int]
cmp := cmp.Less[int]
t.Run("Set can be created from another Set", func(t *testing.T) {
c := NewSet(a.Values()...)

View File

@ -6,14 +6,6 @@ import (
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// Returns minimum of 2 numbers
func Min[T constraints.Ordered](a T, b T) T {
if a < b {
return a
}
return b
}
// NaturalCompare is a comparator function that will help sort numbers in natural order
// when used in sort.Slice.
// For example, 1, 2, 3, 10, 11, 12, 20, 21, 22, 100, 101, 102, 200, 201, 202, ...
@ -26,10 +18,10 @@ func SplitIntoChunks[T any](slice []T, size int) ([][]T, error) {
if size < 1 {
return nil, errors.Newf("size must be greater than 1")
}
numChunks := Min(1+(len(slice)-1)/size, len(slice))
numChunks := min(1+(len(slice)-1)/size, len(slice))
chunks := make([][]T, numChunks)
for i := 0; i < numChunks; i++ {
maxIndex := Min((i+1)*size, len(slice))
maxIndex := min((i+1)*size, len(slice))
chunks[i] = slice[i*size : maxIndex]
}
return chunks, nil

View File

@ -1,55 +1,12 @@
package collections
import (
"math"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
)
func Test_Min(t *testing.T) {
t.Run("Returns first int that is smaller", func(t *testing.T) {
got := Min(1, 2)
want := 1
if got != want {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("Returns second int that is smaller", func(t *testing.T) {
got := Min(2, 1)
want := 1
if got != want {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("Works with a float as well", func(t *testing.T) {
got := Min(1.5, 1.52)
want := 1.5
if got != want {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("Works with infinity", func(t *testing.T) {
got := Min(1.5, math.Inf(1))
want := 1.5
if got != want {
t.Errorf("got %v, want %v", got, want)
}
})
t.Run("Works with negative infinity", func(t *testing.T) {
got := Min(1.5, math.Inf(-1))
want := math.Inf(-1)
if got != want {
t.Errorf("got %v, want %v", got, want)
}
})
}
func Test_SplitIntoChunks(t *testing.T) {
t.Run("Splits a slice into chunks of size 3", func(t *testing.T) {
got, err := SplitIntoChunks([]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, 3)

View File

@ -132,13 +132,6 @@ func respondRateLimited(w http.ResponseWriter, err RateLimitExceededError) {
http.Error(w, err.Error(), http.StatusTooManyRequests)
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
// newSwitchingResponseHandler handles requests to an LLM provider, and wraps the correct
// handler based on the requestParams.Stream flag.
func newSwitchingResponseHandler(logger log.Logger, feature types.CompletionsFeature) func(ctx context.Context, requestParams types.CompletionRequestParameters, cc types.CompletionsClient, w http.ResponseWriter) {

View File

@ -194,10 +194,3 @@ func getConfiguredLimit(ctx context.Context, db database.DB, scope types.Complet
return 0, nil
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -150,7 +150,6 @@ go_library(
"//internal/trace",
"//internal/types",
"//internal/version",
"//internal/xcontext",
"//lib/errors",
"//lib/pointers",
"//schema",

View File

@ -7,7 +7,6 @@ go_library(
importpath = "github.com/sourcegraph/sourcegraph/internal/database/dbconn/rds",
visibility = ["//:__subpackages__"],
deps = [
"//internal/syncx",
"//lib/errors",
"@com_github_aws_aws_sdk_go//aws/session",
"@com_github_aws_aws_sdk_go//service/rds/rdsutils",

View File

@ -5,13 +5,13 @@ import (
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/rds/rdsutils"
"github.com/jackc/pgx/v4"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -40,7 +40,7 @@ func (u *Updater) Update(cfg *pgx.ConnConfig) (*pgx.ConnConfig, error) {
logger := log.Scoped("rds")
if cfg.Password != "" {
// only output the warning once, or it will emit a new entry on every connection
syncx.OnceFunc(func() {
sync.OnceFunc(func() {
logger.Warn("'PG_CONNECTION_UPDATER' is 'EC2_ROLE_CREDENTIALS', but 'PGPASSWORD' is also set. Ignoring 'PGPASSWORD'.")
})
}

View File

@ -26,7 +26,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/version"
"github.com/sourcegraph/sourcegraph/internal/xcontext"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -311,7 +310,7 @@ func (l *eventLogStore) BulkInsert(ctx context.Context, events []*Event) error {
// Create a cancel-free context to avoid interrupting the insert when
// the parent context is cancelled, and add our own timeout on the insert
// to make sure things don't get stuck in an unbounded manner.
insertCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Minute)
insertCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 5*time.Minute)
defer cancel()
return batch.InsertValues(

View File

@ -22,7 +22,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/telemetry/sensitivemetadataallowlist"
telemetrygatewayv1 "github.com/sourcegraph/sourcegraph/internal/telemetrygateway/v1"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/xcontext"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -155,7 +154,7 @@ func (s *telemetryEventsExportQueueStore) QueueForExport(ctx context.Context, ev
// Create a cancel-free context to avoid interrupting the insert when
// the parent context is cancelled, and add our own timeout on the insert
// to make sure things don't get stuck in an unbounded manner.
insertCtx, cancel := context.WithTimeout(xcontext.Detach(ctx), 5*time.Minute)
insertCtx, cancel := context.WithTimeout(context.WithoutCancel(ctx), 5*time.Minute)
defer cancel()
err = batch.InsertValues(

View File

@ -147,10 +147,3 @@ func IsValidTextFile(fileName string) bool {
basename := strings.ToLower(filepath.Base(fileName))
return strings.HasPrefix(basename, "license")
}
func min(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -202,20 +202,6 @@ func (index *EmbeddingIndex) score(query []int8, i int, opts SearchOptions) Sear
}
}
func min(a, b int) int {
if a < b {
return a
}
return b
}
func max(a, b int) int {
if a > b {
return a
}
return b
}
type SearchOptions struct {
UseDocumentRanks bool
}

View File

@ -114,13 +114,6 @@ func sortAndLimitComputedGroups(timeSeries []GeneratedTimeSeries) []GeneratedTim
return timeSeries[i].Points[0].Count > timeSeries[j].Points[0].Count
}
sort.SliceStable(timeSeries, descValueSort)
limit := minInt(20, len(timeSeries))
limit := min(20, len(timeSeries))
return timeSeries[:limit]
}
func minInt(a, b int) int {
if a < b {
return a
}
return b
}

View File

@ -17,7 +17,6 @@ go_library(
"//internal/httpcli",
"//internal/repoupdater/protocol",
"//internal/repoupdater/v1:repoupdater",
"//internal/syncx",
"//internal/trace",
"//lib/errors",
"@com_github_sourcegraph_log//:log",

View File

@ -9,6 +9,7 @@ import (
"net/http"
"net/url"
"os"
"sync"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
@ -22,7 +23,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/repoupdater/v1"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -66,7 +66,7 @@ func NewClient(serverURL string) *Client {
return &Client{
URL: serverURL,
HTTPClient: defaultDoer,
grpcClient: syncx.OnceValues(func() (proto.RepoUpdaterServiceClient, error) {
grpcClient: sync.OnceValues(func() (proto.RepoUpdaterServiceClient, error) {
u, err := url.Parse(serverURL)
if err != nil {
return nil, err

View File

@ -8,7 +8,6 @@ go_library(
importpath = "github.com/sourcegraph/sourcegraph/internal/requestclient/geolocation",
visibility = ["//:__subpackages__"],
deps = [
"//internal/syncx",
"//lib/errors",
"@com_github_oschwald_maxminddb_golang//:maxminddb-golang",
],

View File

@ -7,10 +7,10 @@ package geolocation
import (
_ "embed"
"net"
"sync"
"github.com/oschwald/maxminddb-golang"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -20,7 +20,7 @@ var mmdbData []byte
// getLocationsDB holds the MMDB-format database embedded at mmdbData.
// It is only evaluated once - subsequent calls will return the first initialized
// *maxminddb.Reader instance.
var getLocationsDB = syncx.OnceValue(func() *maxminddb.Reader {
var getLocationsDB = sync.OnceValue(func() *maxminddb.Reader {
db, err := maxminddb.FromBytes(mmdbData)
if err != nil {
panic(errors.Wrap(err, "initialize IP database"))

View File

@ -14,7 +14,6 @@ go_library(
"//internal/search/result",
"//internal/search/zoekt",
"//internal/symbols",
"//internal/syncx",
"//internal/trace/policy",
"//internal/types",
"//lib/errors",

View File

@ -3,6 +3,7 @@ package symbol
import (
"context"
"regexp/syntax" //nolint:depguard // zoekt requires this pkg
"sync"
"time"
"github.com/RoaringBitmap/roaring"
@ -17,7 +18,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/search/result"
zoektutil "github.com/sourcegraph/sourcegraph/internal/search/zoekt"
"github.com/sourcegraph/sourcegraph/internal/symbols"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/internal/trace/policy"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -28,7 +28,7 @@ const DefaultSymbolLimit = 100
// NOTE: this lives inside a syncx.OnceValue because search.Indexed depends on
// conf.Get, and running conf.Get() at init time can cause a deadlock. So,
// we construct it lazily instead.
var DefaultZoektSymbolsClient = syncx.OnceValue(func() *ZoektSymbolsClient {
var DefaultZoektSymbolsClient = sync.OnceValue(func() *ZoektSymbolsClient {
return &ZoektSymbolsClient{
subRepoPermsChecker: authz.DefaultSubRepoPermsChecker,
zoektStreamer: search.Indexed(),

View File

@ -29,7 +29,6 @@ go_library(
"//internal/search/streaming",
"//internal/trace",
"//internal/types",
"//internal/xcontext",
"//lib/errors",
"@com_github_go_enry_go_enry_v2//:go-enry",
"@com_github_grafana_regexp//:regexp",

View File

@ -28,7 +28,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/search/streaming"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/xcontext"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -596,7 +595,7 @@ func zoektFileMatchToSymbolResults(repoName types.MinimalRepo, inputRev string,
// contextWithoutDeadline returns a context which will cancel if the cOld is
// canceled.
func contextWithoutDeadline(cOld context.Context) (context.Context, context.CancelFunc) {
cNew := xcontext.Detach(cOld)
cNew := context.WithoutCancel(cOld)
cNew, cancel := context.WithCancel(cNew)
go func() {

View File

@ -16,7 +16,6 @@ go_library(
"//internal/profiler",
"//internal/service",
"//internal/singleprogram",
"//internal/syncx",
"//internal/tracer",
"//internal/version",
"@com_github_getsentry_sentry_go//:sentry-go",

View File

@ -23,7 +23,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/profiler"
sgservice "github.com/sourcegraph/sourcegraph/internal/service"
"github.com/sourcegraph/sourcegraph/internal/singleprogram"
"github.com/sourcegraph/sourcegraph/internal/syncx"
"github.com/sourcegraph/sourcegraph/internal/tracer"
"github.com/sourcegraph/sourcegraph/internal/version"
)
@ -233,7 +232,7 @@ func run(
obctx := observation.ContextWithLogger(log.Scoped(service.Name()), obctx)
// ensure ready is only called once and always call it.
ready := syncx.OnceFunc(allReadyWG.Done)
ready := sync.OnceFunc(allReadyWG.Done)
defer ready()
// Don't run executors for Cody App

View File

@ -1,16 +0,0 @@
load("//dev:go_defs.bzl", "go_test")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "syncx",
srcs = ["oncefunc.go"],
importpath = "github.com/sourcegraph/sourcegraph/internal/syncx",
visibility = ["//:__subpackages__"],
)
go_test(
name = "syncx_test",
timeout = "short",
srcs = ["oncefunc_test.go"],
deps = [":syncx"],
)

View File

@ -1,91 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// package syncx contains an accepted proposal for the sync package in go1.20.
// See https://github.com/golang/go/issues/56102 and https://go.dev/cl/451356
package syncx
import "sync"
// OnceFunc returns a function that invokes f only once. The returned function
// may be called concurrently.
//
// If f panics, the returned function will panic with the same value on every call.
func OnceFunc(f func()) func() {
var once sync.Once
var valid bool
var p any
return func() {
once.Do(func() {
defer func() {
p = recover()
if !valid {
// Re-panic immediately so on the first call the user gets a
// complete stack trace into f.
panic(p)
}
}()
f()
valid = true // Set only if f does not panic
})
if !valid {
panic(p)
}
}
}
// OnceValue returns a function that invokes f only once and returns the value
// returned by f. The returned function may be called concurrently.
//
// If f panics, the returned function will panic with the same value on every call.
func OnceValue[T any](f func() T) func() T {
var once sync.Once
var valid bool
var p any
var result T
return func() T {
once.Do(func() {
defer func() {
p = recover()
if !valid {
panic(p)
}
}()
result = f()
valid = true
})
if !valid {
panic(p)
}
return result
}
}
// OnceValues returns a function that invokes f only once and returns the values
// returned by f. The returned function may be called concurrently.
//
// If f panics, the returned function will panic with the same value on every call.
func OnceValues[T1, T2 any](f func() (T1, T2)) func() (T1, T2) {
var once sync.Once
var valid bool
var p any
var r1 T1
var r2 T2
return func() (T1, T2) {
once.Do(func() {
defer func() {
p = recover()
if !valid {
panic(p)
}
}()
r1, r2 = f()
valid = true
})
if !valid {
panic(p)
}
return r1, r2
}
}

View File

@ -1,160 +0,0 @@
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package syncx_test
import (
"bytes"
"runtime/debug"
"sync"
"testing"
"github.com/sourcegraph/sourcegraph/internal/syncx"
)
// We assume that the Once.Do tests have already covered parallelism.
func TestOnceFunc(t *testing.T) {
calls := 0
f := syncx.OnceFunc(func() { calls++ })
allocs := testing.AllocsPerRun(10, f)
if calls != 1 {
t.Errorf("want calls==1, got %d", calls)
}
if allocs != 0 {
t.Errorf("want 0 allocations per call, got %v", allocs)
}
}
func TestOnceValue(t *testing.T) {
calls := 0
f := syncx.OnceValue(func() int {
calls++
return calls
})
allocs := testing.AllocsPerRun(10, func() { f() })
value := f()
if calls != 1 {
t.Errorf("want calls==1, got %d", calls)
}
if value != 1 {
t.Errorf("want value==1, got %d", value)
}
if allocs != 0 {
t.Errorf("want 0 allocations per call, got %v", allocs)
}
}
func TestOnceValues(t *testing.T) {
calls := 0
f := syncx.OnceValues(func() (int, int) {
calls++
return calls, calls + 1
})
allocs := testing.AllocsPerRun(10, func() { f() })
v1, v2 := f()
if calls != 1 {
t.Errorf("want calls==1, got %d", calls)
}
if v1 != 1 || v2 != 2 {
t.Errorf("want v1==1 and v2==2, got %d and %d", v1, v2)
}
if allocs != 0 {
t.Errorf("want 0 allocations per call, got %v", allocs)
}
}
func testOncePanic(t *testing.T, calls *int, f func()) {
// Check that the each call to f panics with the same value, but the
// underlying function is only called once.
for _, label := range []string{"first time", "second time"} {
var p any
panicked := true
func() {
defer func() {
p = recover()
}()
f()
panicked = false
}()
if !panicked {
t.Fatalf("%s: f did not panic", label)
}
if p != "x" {
t.Fatalf("%s: want panic %v, got %v", label, "x", p)
}
}
if *calls != 1 {
t.Errorf("want calls==1, got %d", *calls)
}
}
func TestOnceFuncPanic(t *testing.T) {
calls := 0
f := syncx.OnceFunc(func() {
calls++
panic("x")
})
testOncePanic(t, &calls, f)
}
func TestOnceValuePanic(t *testing.T) {
calls := 0
f := syncx.OnceValue(func() int {
calls++
panic("x")
})
testOncePanic(t, &calls, func() { f() })
}
func TestOnceValuesPanic(t *testing.T) {
calls := 0
f := syncx.OnceValues(func() (int, int) {
calls++
panic("x")
})
testOncePanic(t, &calls, func() { f() })
}
func TestOnceFuncPanicTraceback(t *testing.T) {
// Test that on the first invocation of a OnceFunc, the stack trace goes all
// the way to the origin of the panic.
f := syncx.OnceFunc(onceFuncPanic)
defer func() {
if p := recover(); p != "x" {
t.Fatalf("want panic %v, got %v", "x", p)
}
stack := debug.Stack()
// Add second case for bazel binary names
want := []string{"syncx_test.onceFuncPanic", "syncx_test_test.onceFuncPanic"}
if !bytes.Contains(stack, []byte(want[0])) && !bytes.Contains(stack, []byte(want[1])) {
t.Fatalf("want stack containing %v, got:\n%s", want, string(stack))
}
}()
f()
}
func onceFuncPanic() {
panic("x")
}
func BenchmarkOnceFunc(b *testing.B) {
b.Run("OnceFunc", func(b *testing.B) {
b.ReportAllocs()
f := syncx.OnceFunc(func() {})
for i := 0; i < b.N; i++ {
f()
}
})
// Versus open-coding with Once.Do
b.Run("Once", func(b *testing.B) {
b.ReportAllocs()
var once sync.Once
f := func() {}
for i := 0; i < b.N; i++ {
once.Do(f)
}
})
}

View File

@ -1,8 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "xcontext",
srcs = ["detach.go"],
importpath = "github.com/sourcegraph/sourcegraph/internal/xcontext",
visibility = ["//:__subpackages__"],
)

View File

@ -1,26 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package xcontext is a package to offer the extra functionality we need
// from contexts that is not available from the standard context package.
//
// Copied from an internal golang package:
// https://github.com/golang/tools/blob/a01290f9844baeb2bacb81f21640f46b78680918/internal/xcontext/xcontext.go#L7
package xcontext
import (
"context"
"time"
)
// Detach returns a context that keeps all the values of its parent context
// but detaches from the cancellation and error handling.
func Detach(ctx context.Context) context.Context { return detachedContext{ctx} }
type detachedContext struct{ parent context.Context }
func (v detachedContext) Deadline() (time.Time, bool) { return time.Time{}, false }
func (v detachedContext) Done() <-chan struct{} { return nil }
func (v detachedContext) Err() error { return nil }
func (v detachedContext) Value(key interface{}) interface{} { return v.parent.Value(key) }

View File

@ -5,6 +5,7 @@ import (
"encoding/json"
"fmt"
"io"
"slices"
"strings"
"github.com/grafana/regexp"
@ -39,7 +40,7 @@ func (v *Visualizer) Visualize(indexFile io.Reader, fromID, subgraphDepth int, e
return true
}
if contains(lineContext.Element.Label, exclude) {
if slices.Contains(exclude, lineContext.Element.Label) {
return true
}
@ -67,14 +68,14 @@ func (v *Visualizer) Visualize(indexFile io.Reader, fromID, subgraphDepth int, e
}
vertex, _ := v.Context.Stasher.Vertex(edge.OutV)
if contains(vertex.Element.Label, exclude) {
if slices.Contains(exclude, vertex.Element.Label) {
return true
}
return forEachInV(edge, func(inV int) bool {
if _, ok := vertices[inV]; ok {
vertex, _ = v.Context.Stasher.Vertex(inV)
if contains(vertex.Element.Label, exclude) {
if slices.Contains(exclude, vertex.Element.Label) {
return true
}
fmt.Printf("\tv%d -> v%d [label=\"(%d) %s\"];\n", edge.OutV, inV, lineContext.Element.ID, lineContext.Element.Label)
@ -102,12 +103,3 @@ func getReachableVerticesAtDepth(from int, forwardEdges, backwardEdges map[int][
getReachableVerticesAtDepth(v, forwardEdges, backwardEdges, depth-1, vertices)
}
}
func contains(s string, ss []string) bool {
for _, str := range ss {
if str == s {
return true
}
}
return false
}