Housekeeping: Rename variables to avoid collisions with packages (#47179)

This commit is contained in:
David Veszelovszki 2023-01-31 16:28:43 +01:00 committed by GitHub
parent 4ce669e11a
commit f23537a669
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
203 changed files with 921 additions and 923 deletions

View File

@ -60,14 +60,14 @@ func Start(ctx context.Context, observationCtx *observation.Context, config *Con
// Ready immediately
ready()
service := &blobstore.Service{
bsService := &blobstore.Service{
DataDir: config.DataDir,
Log: logger,
ObservationCtx: observation.NewContext(logger),
}
// Set up handler middleware
handler := actor.HTTPMiddleware(logger, service)
handler := actor.HTTPMiddleware(logger, bsService)
handler = trace.HTTPMiddleware(logger, handler, conf.DefaultClient())
handler = instrumentation.HTTPMiddleware("", handler)

View File

@ -312,8 +312,8 @@ func validateExecutorSecret(secret *database.ExecutorSecret, value string) error
if dac.CredsStore != "" {
return errors.New("cannot use credential stores in docker auth config set via secrets")
}
for key, auth := range dac.Auths {
if !bytes.Contains(auth.Auth, []byte(":")) {
for key, dacAuth := range dac.Auths {
if !bytes.Contains(dacAuth.Auth, []byte(":")) {
return errors.Newf("invalid credential in auths section for %q format has to be base64(username:password)", key)
}
}

View File

@ -53,15 +53,15 @@ func FileOrDir(ctx context.Context, db database.DB, client gitserver.Client, rep
}
if link != nil {
var url string
var urlStr string
if isDir {
url = link.Tree
urlStr = link.Tree
} else {
url = link.Blob
urlStr = link.Blob
}
if url != "" {
url = strings.NewReplacer("{rev}", rev, "{path}", path).Replace(url)
links = append(links, NewResolver(url, serviceType))
if urlStr != "" {
urlStr = strings.NewReplacer("{rev}", rev, "{path}", path).Replace(urlStr)
links = append(links, NewResolver(urlStr, serviceType))
}
}

View File

@ -193,15 +193,15 @@ func (r *GitCommitResolver) Parents(ctx context.Context) ([]*GitCommitResolver,
}
func (r *GitCommitResolver) URL() string {
url := r.repoResolver.url()
url.Path += "/-/commit/" + r.inputRevOrImmutableRev()
return url.String()
repoUrl := r.repoResolver.url()
repoUrl.Path += "/-/commit/" + r.inputRevOrImmutableRev()
return repoUrl.String()
}
func (r *GitCommitResolver) CanonicalURL() string {
url := r.repoResolver.url()
url.Path += "/-/commit/" + string(r.oid)
return url.String()
repoUrl := r.repoResolver.url()
repoUrl.Path += "/-/commit/" + string(r.oid)
return repoUrl.String()
}
func (r *GitCommitResolver) ExternalURLs(ctx context.Context) ([]*externallink.Resolver, error) {
@ -398,7 +398,7 @@ func (r *GitCommitResolver) inputRevOrImmutableRev() string {
// "/REPO/-/commit/REVSPEC").
func (r *GitCommitResolver) repoRevURL() *url.URL {
// Dereference to copy to avoid mutation
url := *r.repoResolver.RepoMatch.URL()
repoUrl := *r.repoResolver.RepoMatch.URL()
var rev string
if r.inputRev != nil {
rev = *r.inputRev // use the original input rev from the user
@ -406,14 +406,14 @@ func (r *GitCommitResolver) repoRevURL() *url.URL {
rev = string(r.oid)
}
if rev != "" {
url.Path += "@" + rev
repoUrl.Path += "@" + rev
}
return &url
return &repoUrl
}
func (r *GitCommitResolver) canonicalRepoRevURL() *url.URL {
// Dereference to copy the URL to avoid mutation
url := *r.repoResolver.RepoMatch.URL()
url.Path += "@" + string(r.oid)
return &url
repoUrl := *r.repoResolver.RepoMatch.URL()
repoUrl.Path += "@" + string(r.oid)
return &repoUrl
}

View File

@ -14,8 +14,8 @@ import (
)
func (r *GitTreeEntryResolver) IsRoot() bool {
path := path.Clean(r.Path())
return path == "/" || path == "." || path == ""
cleanPath := path.Clean(r.Path())
return cleanPath == "/" || cleanPath == "." || cleanPath == ""
}
type gitTreeEntryConnectionArgs struct {

View File

@ -215,8 +215,8 @@ func (r *GitTreeEntryResolver) url(ctx context.Context) *url.URL {
}
func (r *GitTreeEntryResolver) CanonicalURL() string {
url := r.commit.canonicalRepoRevURL()
return r.urlPath(url).String()
canonicalUrl := r.commit.canonicalRepoRevURL()
return r.urlPath(canonicalUrl).String()
}
func (r *GitTreeEntryResolver) urlPath(prefix *url.URL) *url.URL {

View File

@ -83,7 +83,7 @@ func (h *HighlightedFileResolver) LineRanges(args *struct{ Ranges []highlight.Li
func highlightContent(ctx context.Context, args *HighlightArgs, content, path string, metadata highlight.Metadata) (*HighlightedFileResolver, error) {
var (
result = &HighlightedFileResolver{}
resolver = &HighlightedFileResolver{}
err error
simulateTimeout = metadata.RepoName == "github.com/sourcegraph/AlwaysHighlightTimeoutTest"
)
@ -98,12 +98,12 @@ func highlightContent(ctx context.Context, args *HighlightArgs, content, path st
Format: gosyntect.GetResponseFormat(args.Format),
})
result.aborted = aborted
result.response = response
resolver.aborted = aborted
resolver.response = response
if err != nil {
return nil, err
}
return result, nil
return resolver, nil
}

View File

@ -115,12 +115,12 @@ func (r *schemaResolver) OutboundWebhookEventTypes(ctx context.Context) ([]Outbo
return nil, err
}
types := outbound.GetRegisteredEventTypes()
sort.Slice(types, func(i, j int) bool {
return types[i].Key < types[j].Key
eventTypes := outbound.GetRegisteredEventTypes()
sort.Slice(eventTypes, func(i, j int) bool {
return eventTypes[i].Key < eventTypes[j].Key
})
resolvers := make([]OutboundWebhookEventTypeResolver, len(types))
for i, et := range types {
resolvers := make([]OutboundWebhookEventTypeResolver, len(eventTypes))
for i, et := range eventTypes {
resolvers[i] = &outboundWebhookEventTypeResolver{et}
}
@ -362,14 +362,14 @@ func (r *outboundWebhookResolver) EventTypes() ([]OutboundWebhookScopedEventType
return nil, err
}
types := make([]OutboundWebhookScopedEventTypeResolver, len(webhook.EventTypes))
eventTypes := make([]OutboundWebhookScopedEventTypeResolver, len(webhook.EventTypes))
for i, et := range webhook.EventTypes {
types[i] = &outboundWebhookScopedEventTypeResolver{
eventTypes[i] = &outboundWebhookScopedEventTypeResolver{
eventType: et.EventType,
scope: et.Scope,
}
}
return types, nil
return eventTypes, nil
}
func (r *outboundWebhookResolver) Stats(ctx context.Context) (OutboundWebhookLogStatsResolver, error) {

View File

@ -164,8 +164,8 @@ func TestRepositoryLabel(t *testing.T) {
ID: api.RepoID(0),
},
}
result, _ := r.Label()
html, err := result.HTML()
markdown, _ := r.Label()
html, err := markdown.HTML()
if err != nil {
t.Fatal(err)
}

View File

@ -117,22 +117,22 @@ func TestSearch(t *testing.T) {
gsClient.ResolveRevisionFunc.SetDefaultHook(tc.repoRevsMock)
sr := newSchemaResolver(db, gsClient)
schema, err := graphql.ParseSchema(mainSchema, sr, graphql.Tracer(&requestTracer{}))
gqlSchema, err := graphql.ParseSchema(mainSchema, sr, graphql.Tracer(&requestTracer{}))
if err != nil {
t.Fatal(err)
}
result := schema.Exec(context.Background(), testSearchGQLQuery, "", vars)
if len(result.Errors) > 0 {
t.Fatalf("graphQL query returned errors: %+v", result.Errors)
response := gqlSchema.Exec(context.Background(), testSearchGQLQuery, "", vars)
if len(response.Errors) > 0 {
t.Fatalf("graphQL query returned errors: %+v", response.Errors)
}
var search struct {
var searchStruct struct {
Results Results
}
if err := json.Unmarshal(result.Data, &search); err != nil {
if err := json.Unmarshal(response.Data, &searchStruct); err != nil {
t.Fatalf("parsing JSON response: %v", err)
}
gotResults := search.Results
gotResults := searchStruct.Results
if !reflect.DeepEqual(gotResults, tc.wantResults) {
t.Fatalf("results = %+v, want %+v", gotResults, tc.wantResults)
}

View File

@ -133,11 +133,11 @@ func (r *siteConfigurationResolver) ID(ctx context.Context) (int32, error) {
if err := auth.CheckCurrentUserIsSiteAdmin(ctx, r.db); err != nil {
return 0, err
}
conf, err := r.db.Conf().SiteGetLatest(ctx)
config, err := r.db.Conf().SiteGetLatest(ctx)
if err != nil {
return 0, err
}
return conf.ID, nil
return config.ID, nil
}
func (r *siteConfigurationResolver) EffectiveContents(ctx context.Context) (JSONCString, error) {

View File

@ -109,13 +109,13 @@ func TestSiteConfigConnection(t *testing.T) {
stubs := setupSiteConfigStubs(t)
// Create a context with an admin user as the actor.
context := actor.WithActor(context.Background(), &actor.Actor{UID: 1})
contextWithActor := actor.WithActor(context.Background(), &actor.Actor{UID: 1})
RunTests(t, []*Test{
{
Schema: mustParseGraphQLSchema(t, stubs.db),
Label: "Get first 2 site configuration history",
Context: context,
Context: contextWithActor,
Query: `
{
site {
@ -184,7 +184,7 @@ func TestSiteConfigConnection(t *testing.T) {
{
Schema: mustParseGraphQLSchema(t, stubs.db),
Label: "Get last 3 site configuration history",
Context: context,
Context: contextWithActor,
Query: `
{
site {
@ -253,7 +253,7 @@ func TestSiteConfigConnection(t *testing.T) {
{
Schema: mustParseGraphQLSchema(t, stubs.db),
Label: "Get first 2 site configuration history based on an offset",
Context: context,
Context: contextWithActor,
Query: fmt.Sprintf(`
{
site {
@ -322,7 +322,7 @@ func TestSiteConfigConnection(t *testing.T) {
{
Schema: mustParseGraphQLSchema(t, stubs.db),
Label: "Get last 2 site configuration history based on an offset",
Context: context,
Context: contextWithActor,
Query: fmt.Sprintf(`
{
site {

View File

@ -34,11 +34,11 @@ func (r *GitTreeEntryResolver) Symbol(ctx context.Context, args *struct {
Line int32
Character int32
}) (*symbolResolver, error) {
symbol, err := symbol.GetMatchAtLineCharacter(ctx, authz.DefaultSubRepoPermsChecker, r.commit.repoResolver.RepoMatch.RepoName(), api.CommitID(r.commit.oid), r.Path(), int(args.Line), int(args.Character))
if err != nil || symbol == nil {
symbolMatch, err := symbol.GetMatchAtLineCharacter(ctx, authz.DefaultSubRepoPermsChecker, r.commit.repoResolver.RepoMatch.RepoName(), api.CommitID(r.commit.oid), r.Path(), int(args.Line), int(args.Character))
if err != nil || symbolMatch == nil {
return nil, err
}
return &symbolResolver{r.db, r.commit, symbol}, nil
return &symbolResolver{r.db, r.commit, symbolMatch}, nil
}
func (r *GitCommitResolver) Symbols(ctx context.Context, args *symbolsArgs) (*symbolConnectionResolver, error) {
@ -52,10 +52,10 @@ func (r *GitCommitResolver) Symbols(ctx context.Context, args *symbolsArgs) (*sy
}, nil
}
func symbolResultsToResolvers(db database.DB, commit *GitCommitResolver, symbols []*result.SymbolMatch) []symbolResolver {
symbolResolvers := make([]symbolResolver, 0, len(symbols))
for _, symbol := range symbols {
symbolResolvers = append(symbolResolvers, toSymbolResolver(db, commit, symbol))
func symbolResultsToResolvers(db database.DB, commit *GitCommitResolver, symbolMatches []*result.SymbolMatch) []symbolResolver {
symbolResolvers := make([]symbolResolver, 0, len(symbolMatches))
for _, symbolMatch := range symbolMatches {
symbolResolvers = append(symbolResolvers, toSymbolResolver(db, commit, symbolMatch))
}
return symbolResolvers
}

View File

@ -158,10 +158,10 @@ func (r *WebhookLogConnectionResolver) Nodes(ctx context.Context) ([]*webhookLog
nodes := make([]*webhookLogResolver, len(logs))
db := database.NewDBWith(r.logger, r.store)
for i, log := range logs {
for i, l := range logs {
nodes[i] = &webhookLogResolver{
db: db,
log: log,
log: l,
}
}
@ -230,12 +230,12 @@ func webhookLogByID(ctx context.Context, db database.DB, gqlID graphql.ID) (*web
return nil, err
}
log, err := db.WebhookLogs(keyring.Default().WebhookLogKey).GetByID(ctx, id)
l, err := db.WebhookLogs(keyring.Default().WebhookLogKey).GetByID(ctx, id)
if err != nil {
return nil, err
}
return &webhookLogResolver{db: db, log: log}, nil
return &webhookLogResolver{db: db, log: l}, nil
}
func (r *webhookLogResolver) ID() graphql.ID {

View File

@ -224,12 +224,12 @@ func addSentry(r *mux.Router) {
// We want to keep this short, the default client settings are not strict enough.
Timeout: 3 * time.Second,
}
url := fmt.Sprintf("%s/api/%s/envelope/", sentryHost, pID)
apiUrl := fmt.Sprintf("%s/api/%s/envelope/", sentryHost, pID)
// Asynchronously forward to Sentry, there's no need to keep holding this connection
// opened any longer.
go func() {
resp, err := client.Post(url, "text/plain;charset=UTF-8", bytes.NewReader(b))
resp, err := client.Post(apiUrl, "text/plain;charset=UTF-8", bytes.NewReader(b))
if err != nil || resp.StatusCode >= 400 {
logger.Warn("failed to forward", sglog.Error(err), sglog.Int("statusCode", resp.StatusCode))
return

View File

@ -26,18 +26,18 @@ func newExporter(
switch protocol {
case otlpenv.ProtocolGRPC:
exporterFactory = otlpexporter.NewFactory()
config := exporterFactory.CreateDefaultConfig().(*otlpexporter.Config)
config.GRPCClientSettings.Endpoint = endpoint
config.GRPCClientSettings.TLSSetting = configtls.TLSClientSetting{
tempConfig := exporterFactory.CreateDefaultConfig().(*otlpexporter.Config)
tempConfig.GRPCClientSettings.Endpoint = endpoint
tempConfig.GRPCClientSettings.TLSSetting = configtls.TLSClientSetting{
Insecure: otlpenv.IsInsecure(endpoint),
}
signalExporterConfig = config
signalExporterConfig = tempConfig
case otlpenv.ProtocolHTTPJSON:
exporterFactory = otlphttpexporter.NewFactory()
config := exporterFactory.CreateDefaultConfig().(*otlphttpexporter.Config)
config.HTTPClientSettings.Endpoint = endpoint
signalExporterConfig = config
tempConfig := exporterFactory.CreateDefaultConfig().(*otlphttpexporter.Config)
tempConfig.HTTPClientSettings.Endpoint = endpoint
signalExporterConfig = tempConfig
default:
err = errors.Newf("unexpected protocol %q", protocol)

View File

@ -18,11 +18,11 @@ const revSuffixNoDots = `{Rev:(?:@(?:(?:[^@=/.-]|(?:[^=/@.]{2,}))/)*(?:[^@=/.-]|
func addOldTreeRedirectRoute(matchRouter *mux.Router) {
matchRouter.Path("/" + routevar.Repo + revSuffixNoDots + `/.tree{Path:.*}`).Methods("GET").Name(OldTreeRedirect).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
v := mux.Vars(r)
path := path.Clean(v["Path"])
if !strings.HasPrefix(path, "/") && path != "" {
path = "/" + path
cleanedPath := path.Clean(v["Path"])
if !strings.HasPrefix(cleanedPath, "/") && cleanedPath != "" {
cleanedPath = "/" + cleanedPath
}
http.Redirect(w, r, URLToRepoTreeEntry(api.RepoName(v["Repo"]), v["Rev"], path).String(), http.StatusMovedPermanently)
http.Redirect(w, r, URLToRepoTreeEntry(api.RepoName(v["Repo"]), v["Rev"], cleanedPath).String(), http.StatusMovedPermanently)
})
}

View File

@ -322,11 +322,11 @@ func getAndMarshalMigratedExtensionsUsageJSON(ctx context.Context, db database.D
func getAndMarshalCodeHostVersionsJSON(_ context.Context, _ database.DB) (_ json.RawMessage, err error) {
defer recordOperation("getAndMarshalCodeHostVersionsJSON")(&err)
versions, err := versions.GetVersions()
v, err := versions.GetVersions()
if err != nil {
return nil, err
}
return json.Marshal(versions)
return json.Marshal(v)
}
func getDependencyVersions(ctx context.Context, db database.DB, logger log.Logger) (json.RawMessage, error) {

View File

@ -19,13 +19,13 @@ func TestLatestDockerVersionPushed(t *testing.T) {
t.Skip("Skipping due to network request against dockerhub")
}
url := fmt.Sprintf("https://index.docker.io/v1/repositories/sourcegraph/server/tags/%s", latestReleaseDockerServerImageBuild.Version)
resp, err := http.Get(url)
urlStr := fmt.Sprintf("https://index.docker.io/v1/repositories/sourcegraph/server/tags/%s", latestReleaseDockerServerImageBuild.Version)
resp, err := http.Get(urlStr)
if err != nil {
t.Skip("Failed to contact dockerhub", err)
}
if resp.StatusCode == 404 {
t.Fatalf("sourcegraph/server:%s does not exist on dockerhub. %s", latestReleaseDockerServerImageBuild.Version, url)
t.Fatalf("sourcegraph/server:%s does not exist on dockerhub. %s", latestReleaseDockerServerImageBuild.Version, urlStr)
}
if resp.StatusCode != 200 {
t.Skip("unexpected response from dockerhub", resp.StatusCode)
@ -37,14 +37,14 @@ func TestLatestKubernetesVersionPushed(t *testing.T) {
t.Skip("Skipping due to network request")
}
url := fmt.Sprintf("https://github.com/sourcegraph/deploy-sourcegraph/releases/tag/v%v", latestReleaseKubernetesBuild.Version)
resp, err := http.Head(url)
urlStr := fmt.Sprintf("https://github.com/sourcegraph/deploy-sourcegraph/releases/tag/v%v", latestReleaseKubernetesBuild.Version)
resp, err := http.Head(urlStr)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != 200 {
t.Errorf("Could not find Kubernetes release %s on GitHub. Response code %s from %s, err: %v", latestReleaseKubernetesBuild.Version, resp.Status, url, err)
t.Errorf("Could not find Kubernetes release %s on GitHub. Response code %s from %s, err: %v", latestReleaseKubernetesBuild.Version, resp.Status, urlStr, err)
}
}
@ -53,14 +53,14 @@ func TestLatestDockerComposeOrPureDockerVersionPushed(t *testing.T) {
t.Skip("Skipping due to network request")
}
url := fmt.Sprintf("https://github.com/sourcegraph/deploy-sourcegraph-docker/releases/tag/v%v", latestReleaseDockerComposeOrPureDocker.Version)
resp, err := http.Head(url)
urlStr := fmt.Sprintf("https://github.com/sourcegraph/deploy-sourcegraph-docker/releases/tag/v%v", latestReleaseDockerComposeOrPureDocker.Version)
resp, err := http.Head(urlStr)
if err != nil {
t.Fatal(err)
}
if resp.StatusCode != 200 {
t.Errorf("Could not find Docker Compose or Pure Docker release %s on GitHub. Response code %s from %s, err: %v", latestReleaseDockerComposeOrPureDocker.Version, resp.Status, url, err)
t.Errorf("Could not find Docker Compose or Pure Docker release %s on GitHub. Response code %s from %s, err: %v", latestReleaseDockerComposeOrPureDocker.Version, resp.Status, urlStr, err)
}
}

View File

@ -732,9 +732,9 @@ func replicaAddrs(deployType, countStr, serviceName, port string) (string, error
func getEnv(environ []string, key string) (string, bool) {
key = key + "="
for _, env := range environ {
if strings.HasPrefix(env, key) {
return env[len(key):], true
for _, envVar := range environ {
if strings.HasPrefix(envVar, key) {
return envVar[len(key):], true
}
}
return "", false

View File

@ -138,7 +138,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
}
// Run enterprise setup hook
enterprise := enterpriseSetupHook(db, conf.DefaultClient())
enterpriseServices := enterpriseSetupHook(db, conf.DefaultClient())
if err != nil {
return errors.Wrap(err, "Failed to create sub-repo client")
@ -209,18 +209,18 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
schema, err := graphqlbackend.NewSchema(db,
gitserver.NewClient(),
enterprise.BatchChangesResolver,
enterprise.CodeIntelResolver,
enterprise.InsightsResolver,
enterprise.AuthzResolver,
enterprise.CodeMonitorsResolver,
enterprise.LicenseResolver,
enterprise.DotcomResolver,
enterprise.SearchContextsResolver,
enterprise.NotebooksResolver,
enterprise.ComputeResolver,
enterprise.InsightsAggregationResolver,
enterprise.WebhooksResolver,
enterpriseServices.BatchChangesResolver,
enterpriseServices.CodeIntelResolver,
enterpriseServices.InsightsResolver,
enterpriseServices.AuthzResolver,
enterpriseServices.CodeMonitorsResolver,
enterpriseServices.LicenseResolver,
enterpriseServices.DotcomResolver,
enterpriseServices.SearchContextsResolver,
enterpriseServices.NotebooksResolver,
enterpriseServices.ComputeResolver,
enterpriseServices.InsightsAggregationResolver,
enterpriseServices.WebhooksResolver,
)
if err != nil {
return err
@ -231,12 +231,12 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
return err
}
server, err := makeExternalAPI(db, logger, schema, enterprise, rateLimitWatcher)
server, err := makeExternalAPI(db, logger, schema, enterpriseServices, rateLimitWatcher)
if err != nil {
return err
}
internalAPI, err := makeInternalAPI(db, logger, schema, enterprise, rateLimitWatcher)
internalAPI, err := makeInternalAPI(db, logger, schema, enterpriseServices, rateLimitWatcher)
if err != nil {
return err
}

View File

@ -73,11 +73,11 @@ func parseSiteConfig(conf *conf.Unified) (*config, error) {
if c.Github.Uri != "" {
config.urn = c.Github.Uri
}
url, err := url.Parse(config.urn)
configUrl, err := url.Parse(config.urn)
if err != nil {
return nil, errors.Wrap(err, "parsing GitHub URL from configuration")
}
config.api, _ = github.APIRoot(url)
config.api, _ = github.APIRoot(configUrl)
if c.Github.Repository != nil {
if c.Github.Repository.Owner != "" {

View File

@ -30,8 +30,8 @@ func TestMain(m *testing.M) {
}
func newClientFactory(t testing.TB, name string) (*httpcli.Factory, func(testing.TB)) {
cassette := filepath.Join("testdata", strings.ReplaceAll(name, " ", "-"))
rec := newRecorder(t, cassette, update(name))
cassetteName := filepath.Join("testdata", strings.ReplaceAll(name, " ", "-"))
rec := newRecorder(t, cassetteName, update(name))
mw := httpcli.NewMiddleware(httpcli.GitHubProxyRedirectMiddleware)
return httpcli.NewFactory(mw, httptestutil.NewRecorderOpt(rec)),
func(t testing.TB) { save(t, rec) }

View File

@ -129,8 +129,8 @@ func (h *srcCliVersionHandler) updateCachedVersion() (string, error) {
return "", errors.New("parsing minimum version")
}
url := fmt.Sprintf("%s/%d.%d", srcCliVersionCache, minimumVersion.Major(), minimumVersion.Minor())
req, err := http.NewRequest(http.MethodGet, url, nil)
urlStr := fmt.Sprintf("%s/%d.%d", srcCliVersionCache, minimumVersion.Major(), minimumVersion.Minor())
req, err := http.NewRequest(http.MethodGet, urlStr, nil)
if err != nil {
return "", errors.Wrap(err, "building request")
}

View File

@ -115,13 +115,13 @@ func TestRepo(t *testing.T) {
t.Errorf("%q: got vars == %v, want %v", test.path, m.Vars, test.wantVars)
}
url, err := m.Route.URLPath(pairs(m.Vars)...)
urlPath, err := m.Route.URLPath(pairs(m.Vars)...)
if err != nil {
t.Errorf("%q: URLPath: %s", test.path, err)
continue
}
if url.Path != test.path {
t.Errorf("%q: got path == %q, want %q", test.path, url.Path, test.path)
if urlPath.Path != test.path {
t.Errorf("%q: got path == %q, want %q", test.path, urlPath.Path, test.path)
}
}
}
@ -153,13 +153,13 @@ func TestRev(t *testing.T) {
t.Errorf("%q: got vars == %v, want %v", test.path, m.Vars, test.wantVars)
}
url, err := m.Route.URLPath(pairs(m.Vars)...)
urlPath, err := m.Route.URLPath(pairs(m.Vars)...)
if err != nil {
t.Errorf("%q: URLPath: %s", test.path, err)
continue
}
if url.Path != test.path {
t.Errorf("%q: got path == %q, want %q", test.path, url.Path, test.path)
if urlPath.Path != test.path {
t.Errorf("%q: got path == %q, want %q", test.path, urlPath.Path, test.path)
}
}
}

View File

@ -566,8 +566,8 @@ func GuessSource(r *http.Request) trace.SourceType {
func repoIDs(results []result.Match) []api.RepoID {
ids := make(map[api.RepoID]struct{}, 5)
for _, result := range results {
ids[result.RepoName().ID] = struct{}{}
for _, r := range results {
ids[r.RepoName().ID] = struct{}{}
}
res := make([]api.RepoID, 0, len(ids))

View File

@ -342,7 +342,7 @@ func TestCleanupExpired(t *testing.T) {
repoPerforceGCOld := path.Join(root, "repo-perforce-gc-old", ".git")
repoRemoteURLScrub := path.Join(root, "repo-remote-url-scrub", ".git")
remote := path.Join(root, "remote", ".git")
for _, path := range []string{
for _, gitDirPath := range []string{
repoNew, repoOld,
repoGCNew, repoGCOld,
repoBoom, repoCorrupt,
@ -350,7 +350,7 @@ func TestCleanupExpired(t *testing.T) {
repoRemoteURLScrub,
remote,
} {
cmd := exec.Command("git", "--bare", "init", path)
cmd := exec.Command("git", "--bare", "init", gitDirPath)
if err := cmd.Run(); err != nil {
t.Fatal(err)
}
@ -387,7 +387,7 @@ func TestCleanupExpired(t *testing.T) {
writeFile(t, filepath.Join(repoGCNew, "gc.log"), []byte("warning: There are too many unreachable loose objects; run 'git prune' to remove them."))
writeFile(t, filepath.Join(repoGCOld, "gc.log"), []byte("warning: There are too many unreachable loose objects; run 'git prune' to remove them."))
for path, delta := range map[string]time.Duration{
for gitDirPath, delta := range map[string]time.Duration{
repoOld: 2 * repoTTL,
repoGCOld: 2 * repoTTLGC,
repoBoom: 2 * repoTTL,
@ -396,10 +396,10 @@ func TestCleanupExpired(t *testing.T) {
repoPerforceGCOld: 2 * repoTTLGC,
} {
ts := time.Now().Add(-delta)
if err := setRecloneTime(GitDir(path), ts); err != nil {
if err := setRecloneTime(GitDir(gitDirPath), ts); err != nil {
t.Fatal(err)
}
if err := os.Chtimes(filepath.Join(path, "HEAD"), ts, ts); err != nil {
if err := os.Chtimes(filepath.Join(gitDirPath, "HEAD"), ts, ts); err != nil {
t.Fatal(err)
}
}
@ -496,10 +496,10 @@ func TestCleanup_RemoveNonExistentRepos(t *testing.T) {
initRepos := func(root string) (repoExists string, repoNotExists string) {
repoExists = path.Join(root, "repo-exists", ".git")
repoNotExists = path.Join(root, "repo-not-exists", ".git")
for _, path := range []string{
for _, gitDirPath := range []string{
repoExists, repoNotExists,
} {
cmd := exec.Command("git", "--bare", "init", path)
cmd := exec.Command("git", "--bare", "init", gitDirPath)
if err := cmd.Run(); err != nil {
t.Fatal(err)
}

View File

@ -24,7 +24,7 @@ func newOperations(observationCtx *observation.Context) *operations {
})
observationCtx.Registerer.MustRegister(batchLogSemaphoreWait)
metrics := metrics.NewREDMetrics(
redMetrics := metrics.NewREDMetrics(
observationCtx.Registerer,
"gitserver_api",
metrics.WithLabels("op"),
@ -35,7 +35,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("gitserver.api.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -2787,12 +2787,12 @@ func setHEAD(ctx context.Context, logger log.Logger, rf *wrexec.RecordingCommand
// branch does not exist, pick first branch
cmd := exec.CommandContext(ctx, "git", "branch")
dir.Set(cmd)
list, err := cmd.Output()
output, err := cmd.Output()
if err != nil {
logger.Error("Failed to list branches", log.Error(err), log.String("output", string(output)))
return errors.Wrap(err, "failed to list branches")
}
lines := strings.Split(string(list), "\n")
lines := strings.Split(string(output), "\n")
branch := strings.TrimPrefix(strings.TrimPrefix(lines[0], "* "), " ")
if branch != "" {
headBranch = branch

View File

@ -54,8 +54,8 @@ func (dir GitDir) Set(cmd *exec.Cmd) {
}
func (s *Server) dir(name api.RepoName) GitDir {
path := string(protocol.NormalizeRepo(name))
return GitDir(filepath.Join(s.ReposDir, filepath.FromSlash(path), ".git"))
p := string(protocol.NormalizeRepo(name))
return GitDir(filepath.Join(s.ReposDir, filepath.FromSlash(p), ".git"))
}
func (s *Server) name(dir GitDir) api.RepoName {

View File

@ -166,8 +166,8 @@ func createMaliciousJar(t *testing.T, name string) {
writer := zip.NewWriter(f)
defer writer.Close()
for _, filepath := range maliciousPaths {
_, err = writer.Create(filepath)
for _, filePath := range maliciousPaths {
_, err = writer.Create(filePath)
assert.Nil(t, err)
}

View File

@ -259,8 +259,8 @@ func TestDecompressTgz(t *testing.T) {
dir := t.TempDir()
var fileInfos []fileInfo
for _, path := range testData.paths {
fileInfos = append(fileInfos, fileInfo{path: path, contents: []byte("x")})
for _, testDataPath := range testData.paths {
fileInfos = append(fileInfos, fileInfo{path: testDataPath, contents: []byte("x")})
}
tgz := bytes.NewReader(createTgz(t, fileInfos))

View File

@ -126,7 +126,7 @@ func Start(ctx context.Context, observationCtx *observation.Context, ready servi
git := gitserver.NewClient()
service := &search.Service{
sService := &search.Service{
Store: &search.Store{
FetchTar: func(ctx context.Context, repo api.RepoName, commit api.CommitID) (io.ReadCloser, error) {
// We pass in a nil sub-repo permissions checker and an internal actor here since
@ -169,10 +169,10 @@ func Start(ctx context.Context, observationCtx *observation.Context, ready servi
Log: logger,
}
service.Store.Start()
sService.Store.Start()
// Set up handler middleware
handler := actor.HTTPMiddleware(logger, service)
handler := actor.HTTPMiddleware(logger, sService)
handler = trace.HTTPMiddleware(logger, handler, conf.DefaultClient())
handler = instrumentation.HTTPMiddleware("", handler)
@ -182,7 +182,7 @@ func Start(ctx context.Context, observationCtx *observation.Context, ready servi
grpcServer := grpc.NewServer(grpcdefaults.ServerOptions(logger)...)
reflection.Register(grpcServer)
grpcServer.RegisterService(&proto.Searcher_ServiceDesc, &search.Server{
Service: service,
Service: sService,
})
host := ""

View File

@ -307,7 +307,7 @@ func (g *generator) generate(ctx context.Context) error {
return errors.Wrap(err, "failed to write sitemap.xml.gz")
}
}
for index, sitemap := range sitemaps {
for index, sm := range sitemaps {
fileName := fmt.Sprintf("sitemap_%03d.xml.gz", index)
outFile, err := os.Create(filepath.Join(g.outDir, fileName))
if err != nil {
@ -316,7 +316,7 @@ func (g *generator) generate(ctx context.Context) error {
defer outFile.Close()
writer := gzip.NewWriter(outFile)
defer writer.Close()
_, err = sitemap.WriteTo(writer)
_, err = sm.WriteTo(writer)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("failed to write %s", fileName))
}

View File

@ -13,7 +13,7 @@ type operations struct {
}
func newOperations(observationCtx *observation.Context) *operations {
metrics := metrics.NewREDMetrics(
redMetrics := metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_symbols_gitserver",
metrics.WithLabels("op"),
@ -24,7 +24,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.symbols.gitserver.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -130,7 +130,7 @@ func handleSearchWith(l logger.Logger, searchFunc types.SearchFunc) http.Handler
return
}
result, err := searchFunc(r.Context(), args)
resultSymbols, err := searchFunc(r.Context(), args)
if err != nil {
// Ignore reporting errors where client disconnected
if r.Context().Err() == context.Canceled && errors.Is(err, context.Canceled) {
@ -150,7 +150,7 @@ func handleSearchWith(l logger.Logger, searchFunc types.SearchFunc) http.Handler
return
}
if err := json.NewEncoder(w).Encode(search.SymbolsResponse{Symbols: result}); err != nil {
if err := json.NewEncoder(w).Encode(search.SymbolsResponse{Symbols: resultSymbols}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}

View File

@ -63,8 +63,8 @@ func TestHandler(t *testing.T) {
gitserverClient := NewMockGitserverClient()
gitserverClient.FetchTarFunc.SetDefaultHook(gitserver.CreateTestFetchTarFunc(files))
parser := parser.NewParser(&observation.TestContext, parserPool, fetcher.NewRepositoryFetcher(&observation.TestContext, gitserverClient, 1000, 1_000_000), 0, 10)
databaseWriter := writer.NewDatabaseWriter(observation.TestContextTB(t), tmpDir, gitserverClient, parser, semaphore.NewWeighted(1))
symbolParser := parser.NewParser(&observation.TestContext, parserPool, fetcher.NewRepositoryFetcher(&observation.TestContext, gitserverClient, 1000, 1_000_000), 0, 10)
databaseWriter := writer.NewDatabaseWriter(observation.TestContextTB(t), tmpDir, gitserverClient, symbolParser, semaphore.NewWeighted(1))
cachedDatabaseWriter := writer.NewCachedDatabaseWriter(databaseWriter, cache)
handler := NewHandler(MakeSqliteSearchFunc(observation.TestContextTB(t), cachedDatabaseWriter, database.NewMockDB()), gitserverClient.ReadFile, nil, "")
@ -127,17 +127,17 @@ func TestHandler(t *testing.T) {
for label, testCase := range testCases {
t.Run(label, func(t *testing.T) {
result, err := client.Search(context.Background(), testCase.args)
resultSymbols, err := client.Search(context.Background(), testCase.args)
if err != nil {
t.Fatalf("unexpected error performing search: %s", err)
}
if result == nil {
if resultSymbols == nil {
if testCase.expected != nil {
t.Errorf("unexpected search result. want=%+v, have=nil", testCase.expected)
}
} else if !reflect.DeepEqual(result, testCase.expected) {
t.Errorf("unexpected search result. want=%+v, have=%+v", testCase.expected, result)
} else if !reflect.DeepEqual(resultSymbols, testCase.expected) {
t.Errorf("unexpected search result. want=%+v, have=%+v", testCase.expected, resultSymbols)
}
})
}

View File

@ -178,17 +178,17 @@ func negate(query *sqlf.Query) *sqlf.Query {
}
func globEscape(str string) string {
var result strings.Builder
var out strings.Builder
specials := `[]*?`
for _, c := range str {
if strings.ContainsRune(specials, c) {
fmt.Fprintf(&result, "[%c]", c)
fmt.Fprintf(&out, "[%c]", c)
} else {
fmt.Fprintf(&result, "%c", c)
fmt.Fprintf(&out, "%c", c)
}
}
return result.String()
return out.String()
}

View File

@ -12,7 +12,7 @@ type Operations struct {
}
func NewOperations(observationCtx *observation.Context) *Operations {
metrics := metrics.NewREDMetrics(
redMetrics := metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_symbols_api",
metrics.WithLabels("op", "parseAmount"),
@ -24,7 +24,7 @@ func NewOperations(observationCtx *observation.Context) *Operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.symbols.api.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -53,17 +53,17 @@ func NewSlackNotification(id, channel string) *SlackNotification {
func NewNotificationClient(logger log.Logger, slackToken, githubToken, channel string) *NotificationClient {
debug := os.Getenv("BUILD_TRACKER_SLACK_DEBUG") == "1"
slack := slack.New(slackToken, slack.OptionDebug(debug))
slackClient := slack.New(slackToken, slack.OptionDebug(debug))
httpClient := http.Client{
Timeout: 5 * time.Second,
}
githubClient := github.NewClient(&httpClient)
teamResolver := team.NewTeammateResolver(githubClient, slack)
teamResolver := team.NewTeammateResolver(githubClient, slackClient)
return &NotificationClient{
logger: logger.Scoped("notificationClient", "client which interacts with Slack and Github to send notifications"),
slack: *slack,
slack: *slackClient,
team: teamResolver,
channel: channel,
}

View File

@ -20,17 +20,17 @@ func main() {
contents := map[string]string{}
for _, schema := range schemas.Schemas {
for _, definition := range schema.Definitions.All() {
metadata, err := renderMetadata(definition)
for _, def := range schema.Definitions.All() {
metadata, err := renderMetadata(def)
if err != nil {
panic(err.Error())
}
migrationDirectory := filepath.Join(tempDirectory, schema.Name, strconv.Itoa(definition.ID))
migrationDirectory := filepath.Join(tempDirectory, schema.Name, strconv.Itoa(def.ID))
contents[filepath.Join(migrationDirectory, "metadata.yaml")] = string(metadata)
contents[filepath.Join(migrationDirectory, "up.sql")] = definition.UpQuery.Query(sqlf.PostgresBindVar)
contents[filepath.Join(migrationDirectory, "down.sql")] = definition.DownQuery.Query(sqlf.PostgresBindVar)
contents[filepath.Join(migrationDirectory, "up.sql")] = def.UpQuery.Query(sqlf.PostgresBindVar)
contents[filepath.Join(migrationDirectory, "down.sql")] = def.DownQuery.Query(sqlf.PostgresBindVar)
}
}

View File

@ -244,7 +244,7 @@ func (c *Client) GetProjectByKey(ctx context.Context, key string) (*Project, err
// CreateRepo creates a repo within the given project with the given name.
func (c *Client) CreateRepo(ctx context.Context, p *Project, repoName string) (*Repo, error) {
url := c.url(fmt.Sprintf("/rest/api/latest/projects/%s/repos", p.Key))
endpointUrl := c.url(fmt.Sprintf("/rest/api/latest/projects/%s/repos", p.Key))
rawRepoData, err := json.Marshal(struct {
Name string `json:"name"`
@ -260,7 +260,7 @@ func (c *Client) CreateRepo(ctx context.Context, p *Project, repoName string) (*
return nil, err
}
respData, err := c.post(ctx, url, rawRepoData)
respData, err := c.post(ctx, endpointUrl, rawRepoData)
if err != nil {
return nil, err
}
@ -282,7 +282,7 @@ func (c *Client) CreateRepo(ctx context.Context, p *Project, repoName string) (*
// authenticated user. Therefore, it is strongly recommended, that if you want to create a project to use the
// BasicAuth client.
func (c *Client) CreateProject(ctx context.Context, p *Project) (*Project, error) {
url := c.url("/rest/api/latest/projects")
endpointUrl := c.url("/rest/api/latest/projects")
rawProjectData, err := json.Marshal(struct {
Key string `json:"key"`
@ -297,7 +297,7 @@ func (c *Client) CreateProject(ctx context.Context, p *Project) (*Project, error
return nil, err
}
respData, err := c.post(ctx, url, rawProjectData)
respData, err := c.post(ctx, endpointUrl, rawProjectData)
if err != nil {
return nil, err
}
@ -313,8 +313,8 @@ func (c *Client) CreateProject(ctx context.Context, p *Project) (*Project, error
func (c *Client) ListProjects(ctx context.Context) ([]*Project, error) {
var err error
url := c.url("/rest/api/latest/projects")
all, err := getAll[*Project](ctx, c, url)
endpointUrl := c.url("/rest/api/latest/projects")
all, err := getAll[*Project](ctx, c, endpointUrl)
if err != nil {
return nil, err
}
@ -343,8 +343,8 @@ func extractResults[T any](items []getResult[T]) ([]T, error) {
func (c *Client) ListReposForProject(ctx context.Context, project *Project, page int, perPage int) ([]*Repo, int, error) {
repos := make([]*Repo, 0)
url := c.url(fmt.Sprintf("/rest/api/latest/projects/%s/repos", project.Key))
resp, err := c.getPaged(ctx, url, page, perPage)
endpointUrl := c.url(fmt.Sprintf("/rest/api/latest/projects/%s/repos", project.Key))
resp, err := c.getPaged(ctx, endpointUrl, page, perPage)
if err != nil {
return nil, 0, err
}

View File

@ -47,8 +47,8 @@ var checks = map[string]check.CheckFunc{
func runChecksWithName(ctx context.Context, names []string) error {
funcs := make(map[string]check.CheckFunc, len(names))
for _, name := range names {
if check, ok := checks[name]; ok {
funcs[name] = check
if c, ok := checks[name]; ok {
funcs[name] = c
} else {
return errors.Newf("check %q not found", name)
}
@ -81,10 +81,10 @@ func runChecks(ctx context.Context, checks map[string]check.CheckFunc) error {
var failed []string
for name, check := range checks {
for name, c := range checks {
p := std.Out.Pending(output.Linef(output.EmojiLightbulb, output.StylePending, "Running check %q...", name))
if err := check(ctx); err != nil {
if err := c(ctx); err != nil {
p.Complete(output.Linef(output.EmojiFailure, output.StyleWarning, "Check %q failed with the following errors:", name))
std.Out.WriteLine(output.Styledf(output.StyleWarning, "%s", err))

View File

@ -68,9 +68,9 @@ func newResource(r log.Resource) *resource.Resource {
}
func isValidVersion(spans *tracepb.ResourceSpans) bool {
for _, attribute := range spans.GetResource().GetAttributes() {
if attribute.GetKey() == sgAnalyticsVersionResourceKey {
return attribute.Value.GetStringValue() == sgAnalyticsVersion
for _, attrib := range spans.GetResource().GetAttributes() {
if attrib.GetKey() == sgAnalyticsVersionResourceKey {
return attrib.Value.GetStringValue() == sgAnalyticsVersion
}
}
return false

View File

@ -113,13 +113,13 @@ func getStoreProvider(serverAddress string) (string, error) {
return config.CredentialsStore, nil
}
url, err := url.Parse(serverAddress)
serverUrl, err := url.Parse(serverAddress)
if err != nil {
return "", errors.Wrapf(err, "failed to parse server address %s", serverAddress)
}
if config.CredentialHelpers[url.Host] != "" {
return config.CredentialHelpers[url.Host], nil
if config.CredentialHelpers[serverUrl.Host] != "" {
return config.CredentialHelpers[serverUrl.Host], nil
}
return "", errors.Errorf("failed to find store provider or credential helper for %s", serverAddress)
@ -131,10 +131,10 @@ func GetCredentialsFromStore(serverAddress string) (*credentials.Credentials, er
return nil, err
}
program := client.NewShellProgramFunc(fmt.Sprintf("docker-credential-%s", provider))
credentials, err := client.Get(program, serverAddress)
creds, err := client.Get(program, serverAddress)
if err != nil {
return nil, err
}
return credentials, err
return creds, err
}

View File

@ -118,11 +118,11 @@ func UpdateHelm(path string, creds credentials.Credentials, pinTag string) error
return errors.Wrapf(err, "couldn't unmarshal %s", valuesFilePath)
}
var images []string
extraImages(values, &images)
var imgs []string
extraImages(values, &imgs)
valuesFileString := string(valuesFile)
for _, img := range images {
for _, img := range imgs {
var updatedImg string
updatedImg, err = getUpdatedImage(img, creds, pinTag)
if err != nil {

View File

@ -44,12 +44,12 @@ func LeavesForCommit(databases []db.Database, commit string) error {
func selectLeavesForCommit(database db.Database, ds *definition.Definitions, commit string) ([]definition.Definition, error) {
migrationsDir := filepath.Join("migrations", database.Name)
output, err := run.GitCmd("ls-tree", "-r", "--name-only", commit, migrationsDir)
gitCmdOutput, err := run.GitCmd("ls-tree", "-r", "--name-only", commit, migrationsDir)
if err != nil {
return nil, err
}
ds, err = ds.Filter(parseVersions(strings.Split(output, "\n"), migrationsDir))
ds, err = ds.Filter(parseVersions(strings.Split(gitCmdOutput, "\n"), migrationsDir))
if err != nil {
return nil, err
}

View File

@ -91,11 +91,11 @@ func Revert(databases []db.Database, commit string) error {
func selectMigrationsDefinedInCommit(database db.Database, commit string) ([]int, error) {
migrationsDir := filepath.Join("migrations", database.Name)
output, err := run.GitCmd("diff", "--name-only", commit+".."+commit+"~1", migrationsDir)
gitCmdOutput, err := run.GitCmd("diff", "--name-only", commit+".."+commit+"~1", migrationsDir)
if err != nil {
return nil, err
}
versions := parseVersions(strings.Split(output, "\n"), migrationsDir)
versions := parseVersions(strings.Split(gitCmdOutput, "\n"), migrationsDir)
return versions, nil
}

View File

@ -30,13 +30,13 @@ func Rewrite(database db.Database, rev string) error {
_ = os.RemoveAll(migrationsDirTemp)
}()
root, err := http.FS(fs).Open("/")
rootDir, err := http.FS(fs).Open("/")
if err != nil {
return err
}
defer func() { _ = root.Close() }()
defer func() { _ = rootDir.Close() }()
migrations, err := root.Readdir(0)
migrations, err := rootDir.Readdir(0)
if err != nil {
return err
}

View File

@ -163,12 +163,12 @@ func Squash(database db.Database, commit string, inContainer, runInTimescaleDBCo
func selectNewRootMigration(database db.Database, ds *definition.Definitions, commit string) (definition.Definition, bool, error) {
migrationsDir := filepath.Join("migrations", database.Name)
output, err := run.GitCmd("ls-tree", "-r", "--name-only", commit, migrationsDir)
gitCmdOutput, err := run.GitCmd("ls-tree", "-r", "--name-only", commit, migrationsDir)
if err != nil {
return definition.Definition{}, false, err
}
versionsAtCommit := parseVersions(strings.Split(output, "\n"), migrationsDir)
versionsAtCommit := parseVersions(strings.Split(gitCmdOutput, "\n"), migrationsDir)
filteredDefinitions, err := ds.Filter(versionsAtCommit)
if err != nil {
@ -277,8 +277,8 @@ func runTargetedUpMigrations(database db.Database, targetVersions []int, postgre
var dbs []*sql.DB
defer func() {
for _, db := range dbs {
_ = db.Close()
for _, dbHandle := range dbs {
_ = dbHandle.Close()
}
}()
@ -530,8 +530,8 @@ func removeAncestorsOf(database db.Database, ds *definition.Definitions, targetV
allDefinitions := ds.All()
allIDs := make([]int, 0, len(allDefinitions))
for _, definition := range allDefinitions {
allIDs = append(allIDs, definition.ID)
for _, def := range allDefinitions {
allIDs = append(allIDs, def.ID)
}
properDescendants, err := ds.Down(allIDs, []int{targetVersion})
@ -540,16 +540,16 @@ func removeAncestorsOf(database db.Database, ds *definition.Definitions, targetV
}
keep := make(map[int]struct{}, len(properDescendants))
for _, definition := range properDescendants {
keep[definition.ID] = struct{}{}
for _, def := range properDescendants {
keep[def.ID] = struct{}{}
}
// Gather the set of filtered that are NOT a proper descendant of the given target version.
// This will leave us with the ancestors of the target version (including itself).
filteredIDs := make([]int, 0, len(allDefinitions))
for _, definition := range allDefinitions {
if _, ok := keep[definition.ID]; !ok {
filteredIDs = append(filteredIDs, definition.ID)
for _, def := range allDefinitions {
if _, ok := keep[def.ID]; !ok {
filteredIDs = append(filteredIDs, def.ID)
}
}

View File

@ -134,9 +134,9 @@ func parseVersions(lines []string, migrationsDir string) []int {
// rootRelative removes the repo root prefix from the given path.
func rootRelative(path string) string {
if root, _ := root.RepositoryRoot(); root != "" {
if repoRoot, _ := root.RepositoryRoot(); repoRoot != "" {
sep := string(os.PathSeparator)
rootWithTrailingSep := strings.TrimRight(root, sep) + sep
rootWithTrailingSep := strings.TrimRight(repoRoot, sep) + sep
return strings.TrimPrefix(path, rootWithTrailingSep)
}

View File

@ -41,7 +41,7 @@ func Commands(ctx context.Context, parentEnv map[string]string, verbose bool, cm
}
go monitor.run(pathChanges)
root, err := root.RepositoryRoot()
repoRoot, err := root.RepositoryRoot()
if err != nil {
return err
}
@ -49,7 +49,7 @@ func Commands(ctx context.Context, parentEnv map[string]string, verbose bool, cm
// binaries get installed to <repository-root>/.bin. If the binary is installed with go build, then go
// will create .bin directory. Some binaries (like docsite) get downloaded instead of built and therefore
// need the directory to exist before hand.
binDir := filepath.Join(root, ".bin")
binDir := filepath.Join(repoRoot, ".bin")
if err := os.Mkdir(binDir, 0755); err != nil && !os.IsExist(err) {
return err
}
@ -69,7 +69,7 @@ func Commands(ctx context.Context, parentEnv map[string]string, verbose bool, cm
failures: failures,
installed: installed,
okayToStart: okayToStart,
repositoryRoot: root,
repositoryRoot: repoRoot,
parentEnv: parentEnv,
}
@ -695,7 +695,7 @@ var watchIgnorePatterns = []*regexp.Regexp{
}
func watch() (<-chan string, error) {
root, err := root.RepositoryRoot()
repoRoot, err := root.RepositoryRoot()
if err != nil {
return nil, err
}
@ -703,7 +703,7 @@ func watch() (<-chan string, error) {
paths := make(chan string)
events := make(chan notify.EventInfo, 1)
if err := notify.Watch(root+"/...", events, notify.All); err != nil {
if err := notify.Watch(repoRoot+"/...", events, notify.All); err != nil {
return nil, err
}
@ -713,7 +713,7 @@ func watch() (<-chan string, error) {
outer:
for event := range events {
path := strings.TrimPrefix(strings.TrimPrefix(event.Path(), root), "/")
path := strings.TrimPrefix(strings.TrimPrefix(event.Path(), repoRoot), "/")
for _, pattern := range watchIgnorePatterns {
if pattern.MatchString(path) {
@ -729,7 +729,7 @@ func watch() (<-chan string, error) {
}
func Test(ctx context.Context, cmd Command, args []string, parentEnv map[string]string) error {
root, err := root.RepositoryRoot()
repoRoot, err := root.RepositoryRoot()
if err != nil {
return err
}
@ -759,12 +759,12 @@ func Test(ctx context.Context, cmd Command, args []string, parentEnv map[string]
}
c := exec.CommandContext(commandCtx, "bash", "-c", strings.Join(cmdArgs, " "))
c.Dir = root
c.Dir = repoRoot
c.Env = makeEnv(parentEnv, secretsEnv, cmd.Env)
c.Stdout = os.Stdout
c.Stderr = os.Stderr
std.Out.WriteLine(output.Styledf(output.StylePending, "Running %s in %q...", c, root))
std.Out.WriteLine(output.Styledf(output.StylePending, "Running %s in %q...", c, repoRoot))
return c.Run()
}

View File

@ -744,14 +744,14 @@ From there, you can start exploring logs with the Grafana explore panel.
}
func buildGrafanaURL(text string, stepName string) string {
var base string
var urlWithPlaceholder string
if stepName == "" {
base = "https://sourcegraph.grafana.net/explore?orgId=1&left=%7B%22datasource%22:%22grafanacloud-sourcegraph-logs%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22editorMode%22:%22code%22,%22expr%22:%22%7Bapp%3D%5C%22buildkite%5C%22%7D%20%7C%3D%20%60_TEXT_%60%22,%22queryType%22:%22range%22%7D%5D,%22range%22:%7B%22from%22:%22now-10d%22,%22to%22:%22now%22%7D%7D"
urlWithPlaceholder = "https://sourcegraph.grafana.net/explore?orgId=1&left=%7B%22datasource%22:%22grafanacloud-sourcegraph-logs%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22editorMode%22:%22code%22,%22expr%22:%22%7Bapp%3D%5C%22buildkite%5C%22%7D%20%7C%3D%20%60_TEXT_%60%22,%22queryType%22:%22range%22%7D%5D,%22range%22:%7B%22from%22:%22now-10d%22,%22to%22:%22now%22%7D%7D"
} else {
base = "https://sourcegraph.grafana.net/explore?orgId=1&left=%7B%22datasource%22:%22grafanacloud-sourcegraph-logs%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22editorMode%22:%22code%22,%22expr%22:%22%7Bapp%3D%5C%22buildkite%5C%22,%20step_key%3D~%5C%22_STEP_%5C%22%7D%20%7C%3D%20%60_TEXT_%60%22,%22queryType%22:%22range%22%7D%5D,%22range%22:%7B%22from%22:%22now-10d%22,%22to%22:%22now%22%7D%7D"
urlWithPlaceholder = "https://sourcegraph.grafana.net/explore?orgId=1&left=%7B%22datasource%22:%22grafanacloud-sourcegraph-logs%22,%22queries%22:%5B%7B%22refId%22:%22A%22,%22editorMode%22:%22code%22,%22expr%22:%22%7Bapp%3D%5C%22buildkite%5C%22,%20step_key%3D~%5C%22_STEP_%5C%22%7D%20%7C%3D%20%60_TEXT_%60%22,%22queryType%22:%22range%22%7D%5D,%22range%22:%7B%22from%22:%22now-10d%22,%22to%22:%22now%22%7D%7D"
}
url := strings.ReplaceAll(base, "_TEXT_", text)
return strings.ReplaceAll(url, "_STEP_", fmt.Sprintf(".*%s.*", stepName))
replaced := strings.ReplaceAll(urlWithPlaceholder, "_TEXT_", text)
return strings.ReplaceAll(replaced, "_STEP_", fmt.Sprintf(".*%s.*", stepName))
}
func getAllowedBuildTypeArgs() []string {

View File

@ -108,9 +108,9 @@ func listSecretExec(ctx *cli.Context) error {
}
func bashCompleteSecrets() (options []string) {
secrets, err := loadSecrets()
allSecrets, err := loadSecrets()
if err != nil {
return nil
}
return secrets.Keys()
return allSecrets.Keys()
}

View File

@ -253,11 +253,11 @@ func shouldUpdateDevPrivate(ctx context.Context, path, branch string) (bool, err
return false, err
}
// Now we check if there are any changes. If the output is empty, we're not missing out on anything.
output, err := sgrun.Bash(ctx, fmt.Sprintf("git diff --shortstat origin/%s", branch)).Dir(path).Run().String()
outputStr, err := sgrun.Bash(ctx, fmt.Sprintf("git diff --shortstat origin/%s", branch)).Dir(path).Run().String()
if err != nil {
return false, err
}
return len(output) > 0, err
return len(outputStr) > 0, err
}

View File

@ -89,9 +89,9 @@ func mustParseConstraint(constraint string) *semver.Constraints {
}
func mustParseCIDR(val string) *net.IPNet {
_, net, err := net.ParseCIDR(val)
_, ipNetwork, err := net.ParseCIDR(val)
if err != nil {
panic(err)
}
return net
return ipNetwork
}

View File

@ -86,15 +86,13 @@ func StandaloneRunRun(ctx context.Context, logger log.Logger, cfg *config.Config
nameSet := janitor.NewNameSet()
ctx, cancel := context.WithCancel(ctx)
worker, err := worker.NewWorker(observationCtx, nameSet, opts)
wrk, err := worker.NewWorker(observationCtx, nameSet, opts)
if err != nil {
cancel()
return err
}
routines := []goroutine.BackgroundRoutine{
worker,
}
routines := []goroutine.BackgroundRoutine{wrk}
if cfg.UseFirecracker {
routines = append(routines, janitor.NewOrphanedVMJanitor(
@ -112,7 +110,7 @@ func StandaloneRunRun(ctx context.Context, logger log.Logger, cfg *config.Config
// in that we want a maximum runtime and/or number of jobs to be
// executed by a single instance, after which the service should shut
// down without error.
worker.Wait()
wrk.Wait()
// Once the worker has finished its current set of jobs and stops
// the dequeue loop, we want to finish off the rest of the sibling

View File

@ -65,7 +65,7 @@ func createVM(ctx context.Context, config *config.Config, repositoryName, revisi
operations := command.NewOperations(&observation.TestContext)
hostRunner := command.NewRunner("", commandLogger, command.Options{}, operations)
workspace, err := workspace.NewFirecrackerWorkspace(
firecrackerWorkspace, err := workspace.NewFirecrackerWorkspace(
ctx,
// No need for files store in the test.
nil,
@ -94,7 +94,7 @@ func createVM(ctx context.Context, config *config.Config, repositoryName, revisi
fopts := firecrackerOptions(config)
fopts.Enabled = true
runner := command.NewRunner(workspace.Path(), commandLogger, command.Options{
runner := command.NewRunner(firecrackerWorkspace.Path(), commandLogger, command.Options{
ExecutorName: name,
ResourceOptions: resourceOptions(config),
DockerOptions: dockerOptions(config),

View File

@ -93,11 +93,11 @@ func (h *handler) Handle(ctx context.Context, logger log.Logger, job executor.Jo
logger.Info("Creating workspace")
hostRunner := h.runnerFactory("", commandLogger, command.Options{}, h.operations)
workspace, err := h.prepareWorkspace(ctx, hostRunner, job, commandLogger)
ws, err := h.prepareWorkspace(ctx, hostRunner, job, commandLogger)
if err != nil {
return errors.Wrap(err, "failed to prepare workspace")
}
defer workspace.Remove(ctx, h.options.KeepWorkspaces)
defer ws.Remove(ctx, h.options.KeepWorkspaces)
vmNameSuffix, err := uuid.NewRandom()
if err != nil {
@ -126,7 +126,7 @@ func (h *handler) Handle(ctx context.Context, logger log.Logger, job executor.Jo
if len(job.DockerAuthConfig.Auths) > 0 {
options.DockerOptions.DockerAuthConfig = job.DockerAuthConfig
}
runner := h.runnerFactory(workspace.Path(), commandLogger, options, h.operations)
runner := h.runnerFactory(ws.Path(), commandLogger, options, h.operations)
logger.Info("Setting up VM")
@ -154,7 +154,7 @@ func (h *handler) Handle(ctx context.Context, logger log.Logger, job executor.Jo
dockerStepCommand := command.CommandSpec{
Key: key,
Image: dockerStep.Image,
ScriptPath: workspace.ScriptFilenames()[i],
ScriptPath: ws.ScriptFilenames()[i],
Dir: dockerStep.Dir,
Env: dockerStep.Env,
Operation: h.operations.Exec,

View File

@ -20,20 +20,20 @@ type svc struct{}
func (svc) Name() string { return "executor" }
func (svc) Configure() (env.Config, []debugserver.Endpoint) {
var config config.Config
config.Load()
return &config, nil
var conf config.Config
conf.Load()
return &conf, nil
}
func (svc) Start(ctx context.Context, observationCtx *observation.Context, ready service.ReadyFunc, cfg env.Config) error {
config := cfg.(*config.Config)
conf := cfg.(*config.Config)
// Always use the in-memory secret.
config.FrontendAuthorizationToken = confdefaults.SingleProgramInMemoryExecutorPassword
conf.FrontendAuthorizationToken = confdefaults.SingleProgramInMemoryExecutorPassword
// TODO(sqs) HACK(sqs): run executors for both queues
if deploy.IsDeployTypeSingleProgram(deploy.Type()) {
otherConfig := *config
if config.QueueName == "batches" {
otherConfig := *conf
if conf.QueueName == "batches" {
otherConfig.QueueName = "codeintel"
} else {
otherConfig.QueueName = "batches"
@ -45,7 +45,7 @@ func (svc) Start(ctx context.Context, observationCtx *observation.Context, ready
}()
}
return run.StandaloneRunRun(ctx, observationCtx.Logger, config, false)
return run.StandaloneRunRun(ctx, observationCtx.Logger, conf, false)
}
var Service service.Service = svc{}

View File

@ -344,7 +344,7 @@ func TestMiddleware(t *testing.T) {
if err := idpAuthnReq.Validate(); err != nil {
t.Fatal(err)
}
session := saml.Session{
samlSession := saml.Session{
ID: "session-id",
CreateTime: time.Now(),
ExpireTime: time.Now().Add(24 * time.Hour),
@ -354,7 +354,7 @@ func TestMiddleware(t *testing.T) {
UserName: "testuser_username",
UserEmail: "testuser@email.com",
}
if err := (saml.DefaultAssertionMaker{}).MakeAssertion(idpAuthnReq, &session); err != nil {
if err := (saml.DefaultAssertionMaker{}).MakeAssertion(idpAuthnReq, &samlSession); err != nil {
t.Fatal(err)
}
if err := idpAuthnReq.MakeResponse(); err != nil {

View File

@ -69,16 +69,16 @@ func (r *batchChangeResolver) Description() *string {
}
func (r *batchChangeResolver) State() string {
var state btypes.BatchChangeState
var batchChangeState btypes.BatchChangeState
if r.batchChange.Closed() {
state = btypes.BatchChangeStateClosed
batchChangeState = btypes.BatchChangeStateClosed
} else if r.batchChange.IsDraft() {
state = btypes.BatchChangeStateDraft
batchChangeState = btypes.BatchChangeStateDraft
} else {
state = btypes.BatchChangeStateOpen
batchChangeState = btypes.BatchChangeStateOpen
}
return state.ToGraphQL()
return batchChangeState.ToGraphQL()
}
func (r *batchChangeResolver) Creator(ctx context.Context) (*graphqlbackend.UserResolver, error) {

View File

@ -52,12 +52,12 @@ func (r *bulkOperationResolver) Progress() float64 {
}
func (r *bulkOperationResolver) Errors(ctx context.Context) ([]graphqlbackend.ChangesetJobErrorResolver, error) {
errors, err := r.store.ListBulkOperationErrors(ctx, store.ListBulkOperationErrorsOpts{BulkOperationID: r.bulkOperation.ID})
boErrors, err := r.store.ListBulkOperationErrors(ctx, store.ListBulkOperationErrorsOpts{BulkOperationID: r.bulkOperation.ID})
if err != nil {
return nil, err
}
changesetIDs := uniqueChangesetIDsForBulkOperationErrors(errors)
changesetIDs := uniqueChangesetIDsForBulkOperationErrors(boErrors)
changesetsByID := map[int64]*btypes.Changeset{}
reposByID := map[api.RepoID]*types.Repo{}
@ -78,8 +78,8 @@ func (r *bulkOperationResolver) Errors(ctx context.Context) ([]graphqlbackend.Ch
}
}
res := make([]graphqlbackend.ChangesetJobErrorResolver, 0, len(errors))
for _, e := range errors {
res := make([]graphqlbackend.ChangesetJobErrorResolver, 0, len(boErrors))
for _, e := range boErrors {
ch := changesetsByID[e.ChangesetID]
repo, accessible := reposByID[ch.RepoID]
resolver := &changesetJobErrorResolver{store: r.store, gitserverClient: r.gitserverClient, changeset: ch, repo: repo}

View File

@ -148,15 +148,15 @@ func (r *changesetResolver) BatchChanges(ctx context.Context, args *graphqlbacke
ChangesetID: r.changeset.ID,
}
state, err := parseBatchChangeState(args.State)
bcState, err := parseBatchChangeState(args.State)
if err != nil {
return nil, err
}
if state != "" {
opts.States = []btypes.BatchChangeState{state}
if bcState != "" {
opts.States = []btypes.BatchChangeState{bcState}
}
// If multiple `states` are provided, prefer them over `state`.
// If multiple `states` are provided, prefer them over `bcState`.
if args.States != nil {
states, err := parseBatchChangeStates(args.States)
if err != nil {
@ -331,8 +331,8 @@ func (r *changesetResolver) ReviewState(ctx context.Context) *string {
if !r.changeset.Published() {
return nil
}
state := string(r.changeset.ExternalReviewState)
return &state
reviewState := string(r.changeset.ExternalReviewState)
return &reviewState
}
func (r *changesetResolver) CheckState() *string {
@ -340,12 +340,12 @@ func (r *changesetResolver) CheckState() *string {
return nil
}
state := string(r.changeset.ExternalCheckState)
if state == string(btypes.ChangesetCheckStateUnknown) {
checkState := string(r.changeset.ExternalCheckState)
if checkState == string(btypes.ChangesetCheckStateUnknown) {
return nil
}
return &state
return &checkState
}
func (r *changesetResolver) Error() *string { return r.changeset.FailureMessage }

View File

@ -1831,11 +1831,11 @@ func testBatchSpecWorkspacesResponse(t *testing.T, s *graphql.Schema, ctx contex
t.Fatalf("unexpected workspaces total count (-want +got):\n%s", diff)
}
types := map[string]int{}
typeCounts := map[string]int{}
for _, c := range response.Node.WorkspaceResolution.Workspaces.Nodes {
types[c.Typename]++
typeCounts[c.Typename]++
}
if diff := cmp.Diff(w.types, types); diff != "" {
if diff := cmp.Diff(w.types, typeCounts); diff != "" {
t.Fatalf("unexpected workspace types (-want +got):\n%s", diff)
}
}

View File

@ -130,7 +130,7 @@ func TestCreateBatchSpec(t *testing.T) {
if testing.Short() {
t.Skip()
}
license := func(tags ...string) *licensing.Info { return &licensing.Info{Info: license.Info{Tags: tags}} }
licensingInfo := func(tags ...string) *licensing.Info { return &licensing.Info{Info: license.Info{Tags: tags}} }
logger := logtest.Scoped(t)
ctx := context.Background()
@ -180,22 +180,22 @@ func TestCreateBatchSpec(t *testing.T) {
}{
"batch changes license, restricted, over the limit": {
changesetSpecs: changesetSpecs,
licenseInfo: license("starter"),
licenseInfo: licensingInfo("starter"),
wantErr: true,
},
"batch changes license, restricted, under the limit": {
changesetSpecs: changesetSpecs[0 : maxNumChangesets-1],
licenseInfo: license("starter"),
licenseInfo: licensingInfo("starter"),
wantErr: false,
},
"batch changes license, unrestricted, over the limit": {
changesetSpecs: changesetSpecs,
licenseInfo: license("starter", "batch-changes"),
licenseInfo: licensingInfo("starter", "batch-changes"),
wantErr: false,
},
"campaigns license, no limit": {
changesetSpecs: changesetSpecs,
licenseInfo: license("starter", "campaigns"),
licenseInfo: licensingInfo("starter", "campaigns"),
wantErr: false,
},
"no license": {
@ -782,12 +782,12 @@ func TestCreateEmptyBatchChange(t *testing.T) {
}
// Second time should fail because namespace + name are not unique
errors := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateEmptyBatchChange)
errs := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateEmptyBatchChange)
if len(errors) != 1 {
if len(errs) != 1 {
t.Fatalf("expected single errors, but got none")
}
if have, want := errors[0].Message, service.ErrNameNotUnique.Error(); have != want {
if have, want := errs[0].Message, service.ErrNameNotUnique.Error(); have != want {
t.Fatalf("wrong error. want=%q, have=%q", want, have)
}
@ -812,14 +812,14 @@ func TestCreateEmptyBatchChange(t *testing.T) {
"name": "not: valid:\nname",
}
errors = apitest.Exec(actorCtx, t, s, input3, &response, mutationCreateEmptyBatchChange)
errs = apitest.Exec(actorCtx, t, s, input3, &response, mutationCreateEmptyBatchChange)
if len(errors) != 1 {
if len(errs) != 1 {
t.Fatalf("expected single errors, but got none")
}
expError := "The batch change name can only contain word characters, dots and dashes."
if have, want := errors[0].Message, expError; !strings.Contains(have, "The batch change name can only contain word characters, dots and dashes.") {
if have, want := errs[0].Message, expError; !strings.Contains(have, "The batch change name can only contain word characters, dots and dashes.") {
t.Fatalf("wrong error. want to contain=%q, have=%q", want, have)
}
}
@ -879,15 +879,15 @@ func TestUpsertEmptyBatchChange(t *testing.T) {
"name": "my-batch-change",
}
errors := apitest.Exec(actorCtx, t, s, badInput, &response, mutationUpsertEmptyBatchChange)
errs := apitest.Exec(actorCtx, t, s, badInput, &response, mutationUpsertEmptyBatchChange)
if len(errors) != 1 {
if len(errs) != 1 {
t.Fatalf("expected single errors")
}
wantError := "invalid ID \"bad_namespace-id\" for namespace"
if have, want := errors[0].Message, wantError; have != want {
if have, want := errs[0].Message, wantError; have != want {
t.Fatalf("wrong error. want=%q, have=%q", want, have)
}
}
@ -947,12 +947,12 @@ func TestCreateBatchChange(t *testing.T) {
}
// Second time it should fail
errors := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateBatchChange)
errs := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateBatchChange)
if len(errors) != 1 {
if len(errs) != 1 {
t.Fatalf("expected single errors, but got none")
}
if have, want := errors[0].Message, service.ErrMatchingBatchChangeExists.Error(); have != want {
if have, want := errs[0].Message, service.ErrMatchingBatchChangeExists.Error(); have != want {
t.Fatalf("wrong error. want=%q, have=%q", want, have)
}
}
@ -1688,12 +1688,12 @@ func TestCreateBatchChangesCredential(t *testing.T) {
}
// Second time it should fail
errors := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateCredential)
errs := apitest.Exec(actorCtx, t, s, input, &response, mutationCreateCredential)
if len(errors) != 1 {
if len(errs) != 1 {
t.Fatalf("expected single errors, but got none")
}
if have, want := errors[0].Extensions["code"], "ErrDuplicateCredential"; have != want {
if have, want := errs[0].Extensions["code"], "ErrDuplicateCredential"; have != want {
t.Fatalf("wrong error code. want=%q, have=%q", want, have)
}
})
@ -1758,12 +1758,12 @@ func TestDeleteBatchChangesCredential(t *testing.T) {
apitest.MustExec(actorCtx, t, s, input, &response, mutationDeleteCredential)
// Second time it should fail
errors := apitest.Exec(actorCtx, t, s, input, &response, mutationDeleteCredential)
errs := apitest.Exec(actorCtx, t, s, input, &response, mutationDeleteCredential)
if len(errors) != 1 {
if len(errs) != 1 {
t.Fatalf("expected single errors, but got none")
}
if have, want := errors[0].Message, fmt.Sprintf("user credential not found: [%d]", userCred.ID); have != want {
if have, want := errs[0].Message, fmt.Sprintf("user credential not found: [%d]", userCred.ID); have != want {
t.Fatalf("wrong error code. want=%q, have=%q", want, have)
}
})
@ -1780,12 +1780,12 @@ func TestDeleteBatchChangesCredential(t *testing.T) {
apitest.MustExec(actorCtx, t, s, input, &response, mutationDeleteCredential)
// Second time it should fail
errors := apitest.Exec(actorCtx, t, s, input, &response, mutationDeleteCredential)
errs := apitest.Exec(actorCtx, t, s, input, &response, mutationDeleteCredential)
if len(errors) != 1 {
if len(errs) != 1 {
t.Fatalf("expected single errors, but got none")
}
if have, want := errors[0].Message, "no results"; have != want {
if have, want := errs[0].Message, "no results"; have != want {
t.Fatalf("wrong error code. want=%q, have=%q", want, have)
}
})

View File

@ -374,29 +374,29 @@ func TestQueryMonitor(t *testing.T) {
_, err = r.insertTestMonitorWithOpts(ctx, t, actionOpt, postHookOpt)
require.NoError(t, err)
schema, err := graphqlbackend.NewSchemaWithCodeMonitorsResolver(db, r)
gqlSchema, err := graphqlbackend.NewSchemaWithCodeMonitorsResolver(db, r)
require.NoError(t, err)
t.Run("query by user", func(t *testing.T) {
queryByUser(ctx, t, schema, r, user1, user2)
queryByUser(ctx, t, gqlSchema, r, user1, user2)
})
t.Run("query by ID", func(t *testing.T) {
queryByID(ctx, t, schema, r, m.(*monitor), user1, user2)
queryByID(ctx, t, gqlSchema, r, m.(*monitor), user1, user2)
})
t.Run("monitor paging", func(t *testing.T) {
monitorPaging(ctx, t, schema, user1)
monitorPaging(ctx, t, gqlSchema, user1)
})
t.Run("recipients paging", func(t *testing.T) {
recipientPaging(ctx, t, schema, user1, user2)
recipientPaging(ctx, t, gqlSchema, user1, user2)
})
t.Run("actions paging", func(t *testing.T) {
actionPaging(ctx, t, schema, user1)
actionPaging(ctx, t, gqlSchema, user1)
})
t.Run("trigger events paging", func(t *testing.T) {
triggerEventPaging(ctx, t, schema, user1)
triggerEventPaging(ctx, t, gqlSchema, user1)
})
t.Run("action events paging", func(t *testing.T) {
actionEventPaging(ctx, t, schema, user1)
actionEventPaging(ctx, t, gqlSchema, user1)
})
}
@ -701,7 +701,7 @@ func TestEditCodeMonitor(t *testing.T) {
// Update the code monitor.
// We update all fields, delete one action, and add a new action.
schema, err := graphqlbackend.NewSchemaWithCodeMonitorsResolver(db, r)
gqlSchema, err := graphqlbackend.NewSchemaWithCodeMonitorsResolver(db, r)
require.NoError(t, err)
updateInput := map[string]any{
"monitorID": string(relay.MarshalID(MonitorKind, 1)),
@ -712,7 +712,7 @@ func TestEditCodeMonitor(t *testing.T) {
"user2ID": ns2,
}
got := apitest.UpdateCodeMonitorResponse{}
batchesApitest.MustExec(ctx, t, schema, updateInput, &got, editMonitor)
batchesApitest.MustExec(ctx, t, gqlSchema, updateInput, &got, editMonitor)
want := apitest.UpdateCodeMonitorResponse{
UpdateCodeMonitor: apitest.Monitor{

View File

@ -64,11 +64,11 @@ func (r *computeMatchResolver) Range() gql.RangeResolver {
}
func (r *computeMatchResolver) Environment() []gql.ComputeEnvironmentEntryResolver {
var result []gql.ComputeEnvironmentEntryResolver
var resolvers []gql.ComputeEnvironmentEntryResolver
for variable, value := range r.m.Environment {
result = append(result, newEnvironmentEntryResolver(variable, value))
resolvers = append(resolvers, newEnvironmentEntryResolver(variable, value))
}
return result
return resolvers
}
func newEnvironmentEntryResolver(variable string, value compute.Data) *computeEnvironmentEntryResolver {
@ -214,8 +214,8 @@ func toResultResolverList(ctx context.Context, cmd compute.Command, matches []re
repoResolver := getRepoResolver(m.RepoName(), "")
path, commit := pathAndCommitFromResult(m)
result := toComputeResultResolver(computeResult, repoResolver, path, commit)
results = append(results, result)
resolver := toComputeResultResolver(computeResult, repoResolver, path, commit)
results = append(results, resolver)
}
return results, nil
}

View File

@ -19,18 +19,18 @@ import (
func toComputeResult(ctx context.Context, cmd compute.Command, match result.Match) (out []compute.Result, _ error) {
if v, ok := match.(*result.CommitMatch); ok && v.DiffPreview != nil {
for _, diffMatch := range v.CommitToDiffMatches() {
result, err := cmd.Run(ctx, diffMatch)
runResult, err := cmd.Run(ctx, diffMatch)
if err != nil {
return nil, err
}
out = append(out, result)
out = append(out, runResult)
}
} else {
result, err := cmd.Run(ctx, match)
runResult, err := cmd.Run(ctx, match)
if err != nil {
return nil, err
}
out = append(out, result)
out = append(out, runResult)
}
return out, nil
}

View File

@ -41,7 +41,7 @@ func (s dbLicenses) Create(ctx context.Context, subscriptionID, licenseKey strin
return mocks.licenses.Create(subscriptionID, licenseKey)
}
uuid, err := uuid.NewRandom()
newUUID, err := uuid.NewRandom()
if err != nil {
return "", errors.Wrap(err, "new UUID")
}
@ -54,7 +54,7 @@ func (s dbLicenses) Create(ctx context.Context, subscriptionID, licenseKey strin
INSERT INTO product_licenses(id, product_subscription_id, license_key, license_version, license_tags, license_user_count, license_expires_at)
VALUES($1, $2, $3, $4, $5, $6, $7) RETURNING id
`,
uuid, subscriptionID, licenseKey, dbutil.NewNullInt64(int64(version)), pq.Array(info.Tags), dbutil.NewNullInt64(int64(info.UserCount)), dbutil.NullTime{Time: expiresAt},
newUUID, subscriptionID, licenseKey, dbutil.NewNullInt64(int64(version)), pq.Array(info.Tags), dbutil.NewNullInt64(int64(info.UserCount)), dbutil.NullTime{Time: expiresAt},
).Scan(&id); err != nil {
return "", errors.Wrap(err, "insert")
}

View File

@ -53,14 +53,14 @@ func (s dbSubscriptions) Create(ctx context.Context, userID int32, username stri
accountNumber = username[i+1:]
}
uuid, err := uuid.NewRandom()
newUUID, err := uuid.NewRandom()
if err != nil {
return "", errors.Wrap(err, "new UUID")
}
if err = s.db.QueryRowContext(ctx, `
INSERT INTO product_subscriptions(id, user_id, account_number) VALUES($1, $2, $3) RETURNING id
`,
uuid, userID, accountNumber,
newUUID, userID, accountNumber,
).Scan(&id); err != nil {
return "", errors.Wrap(err, "insert")
}

View File

@ -18,13 +18,13 @@ func TestGitserverProxySimple(t *testing.T) {
}))
defer originServer.Close()
url, err := url.Parse(originServer.URL)
originServerURL, err := url.Parse(originServer.URL)
if err != nil {
t.Fatalf("unexpected error parsing url: %s", err)
}
gs := NewMockGitserverClient()
gs.AddrForRepoFunc.PushReturn(url.Host, nil)
gs.AddrForRepoFunc.PushReturn(originServerURL.Host, nil)
proxyServer := httptest.NewServer(gitserverProxy(logtest.Scoped(t), gs, "/info/refs"))
defer proxyServer.Close()
@ -57,13 +57,13 @@ func TestGitserverProxyTargetPath(t *testing.T) {
}))
defer originServer.Close()
url, err := url.Parse(originServer.URL)
originServerURL, err := url.Parse(originServer.URL)
if err != nil {
t.Fatalf("unexpected error parsing url: %s", err)
}
gs := NewMockGitserverClient()
gs.AddrForRepoFunc.PushReturn(url.Host, nil)
gs.AddrForRepoFunc.PushReturn(originServerURL.Host, nil)
proxyServer := httptest.NewServer(gitserverProxy(logtest.Scoped(t), gs, "/foo"))
defer proxyServer.Close()
@ -89,13 +89,13 @@ func TestGitserverProxyHeaders(t *testing.T) {
}))
defer originServer.Close()
url, err := url.Parse(originServer.URL)
originServerURL, err := url.Parse(originServer.URL)
if err != nil {
t.Fatalf("unexpected error parsing url: %s", err)
}
gs := NewMockGitserverClient()
gs.AddrForRepoFunc.PushReturn(url.Host, nil)
gs.AddrForRepoFunc.PushReturn(originServerURL.Host, nil)
proxyServer := httptest.NewServer(gitserverProxy(logtest.Scoped(t), gs, "/test"))
defer proxyServer.Close()
@ -137,13 +137,13 @@ func TestGitserverProxyRedirectWithPayload(t *testing.T) {
}))
defer originServer.Close()
url, err := url.Parse(originServer.URL)
originServerURL, err := url.Parse(originServer.URL)
if err != nil {
t.Fatalf("unexpected error parsing url: %s", err)
}
gs := NewMockGitserverClient()
gs.AddrForRepoFunc.PushReturn(url.Host, nil)
gs.AddrForRepoFunc.PushReturn(originServerURL.Host, nil)
proxyServer := httptest.NewServer(gitserverProxy(logtest.Scoped(t), gs, "/test"))
defer proxyServer.Close()

View File

@ -391,7 +391,7 @@ func TestHeartbeat(t *testing.T) {
executorStore := database.NewMockExecutorStore()
metricsStore := metricsstore.NewMockDistributedStore()
executor := types.Executor{
exec := types.Executor{
Hostname: "test-hostname",
QueueName: "test-queue-name",
OS: "test-os",
@ -405,7 +405,7 @@ func TestHeartbeat(t *testing.T) {
handler := NewHandler(executorStore, metricsStore, QueueOptions[testRecord]{Store: s, RecordTransformer: recordTransformer})
if knownIDs, canceled, err := handler.heartbeat(context.Background(), executor, []int{testKnownID, 10}); err != nil {
if knownIDs, canceled, err := handler.heartbeat(context.Background(), exec, []int{testKnownID, 10}); err != nil {
t.Fatalf("unexpected error performing heartbeat: %s", err)
} else if diff := cmp.Diff([]int{testKnownID}, knownIDs); diff != "" {
t.Errorf("unexpected unknown ids (-want +got):\n%s", diff)
@ -415,7 +415,7 @@ func TestHeartbeat(t *testing.T) {
if callCount := len(executorStore.UpsertHeartbeatFunc.History()); callCount != 1 {
t.Errorf("unexpected heartbeat upsert count. want=%d have=%d", 1, callCount)
} else if name := executorStore.UpsertHeartbeatFunc.History()[0].Arg1; name != executor {
} else if name := executorStore.UpsertHeartbeatFunc.History()[0].Arg1; name != exec {
t.Errorf("unexpected heartbeat name. want=%q have=%q", "deadbeef", name)
}
}

View File

@ -307,7 +307,7 @@ func (r *Resolver) Notebooks(ctx context.Context, args graphqlbackend.ListNotebo
}
store := notebooks.Notebooks(r.db)
notebooks, err := store.ListNotebooks(ctx, pageOpts, opts)
nbs, err := store.ListNotebooks(ctx, pageOpts, opts)
if err != nil {
return nil, err
}
@ -318,14 +318,14 @@ func (r *Resolver) Notebooks(ctx context.Context, args graphqlbackend.ListNotebo
}
hasNextPage := false
if len(notebooks) == int(args.First)+1 {
if len(nbs) == int(args.First)+1 {
hasNextPage = true
notebooks = notebooks[:len(notebooks)-1]
nbs = nbs[:len(nbs)-1]
}
return &notebookConnectionResolver{
afterCursor: afterCursor,
notebooks: r.notebooksToResolvers(notebooks),
notebooks: r.notebooksToResolvers(nbs),
totalCount: int32(count),
hasNextPage: hasNextPage,
}, nil

View File

@ -120,7 +120,7 @@ func (s *extensionStore) Create(ctx context.Context, publisherUserID, publisherO
return 0, errors.New("at most 1 of the publisher user/org may be set")
}
uuid, err := uuid.NewRandom()
newUUID, err := uuid.NewRandom()
if err != nil {
return 0, err
}
@ -137,7 +137,7 @@ VALUES(
%s
)
RETURNING id`,
uuid,
newUUID,
publisherUserID,
publisherOrgID,
name,

View File

@ -93,15 +93,15 @@ func TestGitHubHandler(t *testing.T) {
http.Error(w, err.Error(), http.StatusBadRequest)
}
repos, err := repoStore.List(ctx, database.ReposListOptions{Names: []string{string(req.Repo)}})
repositories, err := repoStore.List(ctx, database.ReposListOptions{Names: []string{string(req.Repo)}})
if err != nil {
http.Error(w, err.Error(), http.StatusNotFound)
}
if len(repos) != 1 {
http.Error(w, fmt.Sprintf("expected 1 repo, got %v", len(repos)), http.StatusNotFound)
if len(repositories) != 1 {
http.Error(w, fmt.Sprintf("expected 1 repo, got %v", len(repositories)), http.StatusNotFound)
}
repo := repos[0]
repo := repositories[0]
res := &protocol.RepoUpdateResponse{
ID: repo.ID,
Name: string(repo.Name),
@ -165,11 +165,11 @@ func TestGitLabHandler(t *testing.T) {
repoName := "gitlab.com/ryanslade/ryan-test-private"
db := database.NewMockDB()
repos := database.NewMockRepoStore()
repos.GetFirstRepoNameByCloneURLFunc.SetDefaultHook(func(ctx context.Context, s string) (api.RepoName, error) {
repositories := database.NewMockRepoStore()
repositories.GetFirstRepoNameByCloneURLFunc.SetDefaultHook(func(ctx context.Context, s string) (api.RepoName, error) {
return api.RepoName(repoName), nil
})
db.ReposFunc.SetDefaultReturn(repos)
db.ReposFunc.SetDefaultReturn(repositories)
handler := NewGitLabHandler()
data, err := os.ReadFile("testdata/gitlab-push.json")
@ -201,11 +201,11 @@ func TestBitbucketServerHandler(t *testing.T) {
repoName := "bitbucket.sgdev.org/private/test-2020-06-01"
db := database.NewMockDB()
repos := database.NewMockRepoStore()
repos.GetFirstRepoNameByCloneURLFunc.SetDefaultHook(func(ctx context.Context, s string) (api.RepoName, error) {
repositories := database.NewMockRepoStore()
repositories.GetFirstRepoNameByCloneURLFunc.SetDefaultHook(func(ctx context.Context, s string) (api.RepoName, error) {
return "bitbucket.sgdev.org/private/test-2020-06-01", nil
})
db.ReposFunc.SetDefaultReturn(repos)
db.ReposFunc.SetDefaultReturn(repositories)
handler := NewBitbucketServerHandler()
data, err := os.ReadFile("testdata/bitbucket-server-push.json")
@ -237,11 +237,11 @@ func TestBitbucketCloudHandler(t *testing.T) {
repoName := "bitbucket.org/sourcegraph-testing/sourcegraph"
db := database.NewMockDB()
repos := database.NewMockRepoStore()
repos.GetFirstRepoNameByCloneURLFunc.SetDefaultHook(func(ctx context.Context, s string) (api.RepoName, error) {
repositories := database.NewMockRepoStore()
repositories.GetFirstRepoNameByCloneURLFunc.SetDefaultHook(func(ctx context.Context, s string) (api.RepoName, error) {
return "bitbucket.org/sourcegraph-testing/sourcegraph", nil
})
db.ReposFunc.SetDefaultReturn(repos)
db.ReposFunc.SetDefaultReturn(repositories)
handler := NewBitbucketCloudHandler()
data, err := os.ReadFile("testdata/bitbucket-cloud-push.json")

View File

@ -221,7 +221,7 @@ func (r *batchSpecWorkspaceCreator) process(
usedCacheEntries := []int64{}
changesetsByWorkspace := make(map[*btypes.BatchSpecWorkspace][]*btypes.ChangesetSpec)
author, err := author.GetChangesetAuthorForUser(ctx, database.UsersWith(r.logger, r.store), spec.UserID)
changesetAuthor, err := author.GetChangesetAuthorForUser(ctx, database.UsersWith(r.logger, r.store), spec.UserID)
if err != nil {
return err
}
@ -281,7 +281,7 @@ func (r *batchSpecWorkspaceCreator) process(
workspace.dbWorkspace.CachedResultFound = true
rawSpecs, err := cache.ChangesetSpecsFromCache(spec.Spec, workspace.repo, *res.Value, workspace.dbWorkspace.Path, true, author)
rawSpecs, err := cache.ChangesetSpecsFromCache(spec.Spec, workspace.repo, *res.Value, workspace.dbWorkspace.Path, true, changesetAuthor)
if err != nil {
return err
}

View File

@ -429,16 +429,16 @@ func newMetricsForBitbucketProjectPermissionsQueries(logger log.Logger) bitbucke
})
observationCtx.Registerer.MustRegister(resets)
errors := prometheus.NewCounter(prometheus.CounterOpts{
errorCounter := prometheus.NewCounter(prometheus.CounterOpts{
Name: "src_explicit_permissions_bitbucket_project_query_errors_total",
Help: "The number of errors that occur during job.",
})
observationCtx.Registerer.MustRegister(errors)
observationCtx.Registerer.MustRegister(errorCounter)
return bitbucketProjectPermissionsMetrics{
workerMetrics: workerutil.NewMetrics(observationCtx, "explicit_permissions_bitbucket_project_queries"),
resets: resets,
resetFailures: resetFailures,
errors: errors,
errors: errorCounter,
}
}

View File

@ -896,18 +896,18 @@ func publishFinalDockerImage(c Config, app string) operations.Operation {
devImage := images.DevRegistryImage(app, "")
publishImage := images.PublishedRegistryImage(app, "")
var images []string
var imgs []string
for _, image := range []string{publishImage, devImage} {
if app != "server" || c.RunType.Is(runtype.TaggedRelease, runtype.ImagePatch, runtype.ImagePatchNoTest) {
images = append(images, fmt.Sprintf("%s:%s", image, c.Version))
imgs = append(imgs, fmt.Sprintf("%s:%s", image, c.Version))
}
if app == "server" && c.RunType.Is(runtype.ReleaseBranch) {
images = append(images, fmt.Sprintf("%s:%s-insiders", image, c.Branch))
imgs = append(imgs, fmt.Sprintf("%s:%s-insiders", image, c.Branch))
}
if c.RunType.Is(runtype.MainBranch) {
images = append(images, fmt.Sprintf("%s:insiders", image))
imgs = append(imgs, fmt.Sprintf("%s:insiders", image))
}
}
@ -923,11 +923,11 @@ func publishFinalDockerImage(c Config, app string) operations.Operation {
strconv.Itoa(c.BuildNumber),
} {
internalImage := fmt.Sprintf("%s:%s", devImage, tag)
images = append(images, internalImage)
imgs = append(imgs, internalImage)
}
candidateImage := fmt.Sprintf("%s:%s", devImage, c.candidateImageTag())
cmd := fmt.Sprintf("./dev/ci/docker-publish.sh %s %s", candidateImage, strings.Join(images, " "))
cmd := fmt.Sprintf("./dev/ci/docker-publish.sh %s %s", candidateImage, strings.Join(imgs, " "))
pipeline.AddStep(fmt.Sprintf(":docker: :truck: %s", app),
// This step just pulls a prebuild image and pushes it to some registries. The

View File

@ -29,14 +29,14 @@ func main() {
os.Exit(1)
}
jwt, err := genJwtToken(*appID, *keyPath)
jwtToken, err := genJwtToken(*appID, *keyPath)
if err != nil {
log.Fatal(err)
}
ctx := context.Background()
ts := oauth2.StaticTokenSource(
&oauth2.Token{AccessToken: jwt},
&oauth2.Token{AccessToken: jwtToken},
)
tc := oauth2.NewClient(ctx, ts)
ghc := github.NewClient(tc)

View File

@ -26,12 +26,12 @@ func TestNewAuthzProviders(t *testing.T) {
[]schema.AuthProviders{},
)
assert := assert.New(t)
assertion := assert.New(t)
assert.Len(initResults.Providers, 0, "unexpected a providers: %+v", initResults.Providers)
assert.Len(initResults.Problems, 0, "unexpected problems: %+v", initResults.Problems)
assert.Len(initResults.Warnings, 0, "unexpected warnings: %+v", initResults.Warnings)
assert.Len(initResults.InvalidConnections, 0, "unexpected invalidConnections: %+v", initResults.InvalidConnections)
assertion.Len(initResults.Providers, 0, "unexpected a providers: %+v", initResults.Providers)
assertion.Len(initResults.Problems, 0, "unexpected problems: %+v", initResults.Problems)
assertion.Len(initResults.Warnings, 0, "unexpected warnings: %+v", initResults.Warnings)
assertion.Len(initResults.InvalidConnections, 0, "unexpected invalidConnections: %+v", initResults.InvalidConnections)
})
t.Run("no matching auth provider", func(t *testing.T) {

View File

@ -37,12 +37,12 @@ func TestNewAuthzProviders(t *testing.T) {
false,
)
assert := assert.New(t)
assertion := assert.New(t)
assert.Len(initResults.Providers, 0, "unexpected a providers: %+v", initResults.Providers)
assert.Len(initResults.Problems, 0, "unexpected problems: %+v", initResults.Problems)
assert.Len(initResults.Warnings, 0, "unexpected warnings: %+v", initResults.Warnings)
assert.Len(initResults.InvalidConnections, 0, "unexpected invalidConnections: %+v", initResults.InvalidConnections)
assertion.Len(initResults.Providers, 0, "unexpected a providers: %+v", initResults.Providers)
assertion.Len(initResults.Problems, 0, "unexpected problems: %+v", initResults.Problems)
assertion.Len(initResults.Warnings, 0, "unexpected warnings: %+v", initResults.Warnings)
assertion.Len(initResults.InvalidConnections, 0, "unexpected invalidConnections: %+v", initResults.InvalidConnections)
})
t.Run("no matching auth provider", func(t *testing.T) {

View File

@ -1138,30 +1138,30 @@ func TestHandleArchivedRepo(t *testing.T) {
ch := &btypes.Changeset{ExternalState: btypes.ChangesetExternalStateDraft}
repo := &types.Repo{Archived: false}
store := repos.NewMockStore()
store.UpdateRepoFunc.SetDefaultReturn(repo, nil)
mockStore := repos.NewMockStore()
mockStore.UpdateRepoFunc.SetDefaultReturn(repo, nil)
err := handleArchivedRepo(ctx, store, repo, ch)
err := handleArchivedRepo(ctx, mockStore, repo, ch)
assert.NoError(t, err)
assert.True(t, repo.Archived)
assert.Equal(t, btypes.ChangesetExternalStateReadOnly, ch.ExternalState)
assert.NotEmpty(t, store.UpdateRepoFunc.History())
assert.NotEmpty(t, mockStore.UpdateRepoFunc.History())
})
t.Run("store error", func(t *testing.T) {
ch := &btypes.Changeset{ExternalState: btypes.ChangesetExternalStateDraft}
repo := &types.Repo{Archived: false}
store := repos.NewMockStore()
mockStore := repos.NewMockStore()
want := errors.New("")
store.UpdateRepoFunc.SetDefaultReturn(nil, want)
mockStore.UpdateRepoFunc.SetDefaultReturn(nil, want)
have := handleArchivedRepo(ctx, store, repo, ch)
have := handleArchivedRepo(ctx, mockStore, repo, ch)
assert.Error(t, have)
assert.ErrorIs(t, have, want)
assert.True(t, repo.Archived)
assert.Equal(t, btypes.ChangesetExternalStateDraft, ch.ExternalState)
assert.NotEmpty(t, store.UpdateRepoFunc.History())
assert.NotEmpty(t, mockStore.UpdateRepoFunc.History())
})
}

View File

@ -391,8 +391,8 @@ func getCloneURL(repo *types.Repo) (*vcs.URL, error) {
}
parsedURLs := make([]*vcs.URL, 0, len(cloneURLs))
for _, url := range cloneURLs {
parsedURL, err := vcs.ParseURL(url)
for _, cloneURL := range cloneURLs {
parsedURL, err := vcs.ParseURL(cloneURL)
if err != nil {
return nil, err
}

View File

@ -212,7 +212,7 @@ func (s *batchSpecWorkspaceExecutionWorkerStore) MarkComplete(ctx context.Contex
}
}
author, err := author.GetChangesetAuthorForUser(ctx, database.UsersWith(s.logger, s), batchSpec.UserID)
changesetAuthor, err := author.GetChangesetAuthorForUser(ctx, database.UsersWith(s.logger, s), batchSpec.UserID)
if err != nil {
return false, errors.Wrap(err, "creating changeset author")
}
@ -229,7 +229,7 @@ func (s *batchSpecWorkspaceExecutionWorkerStore) MarkComplete(ctx context.Contex
latestStepResult.Value,
workspace.Path,
true,
author,
changesetAuthor,
)
if err != nil {
return false, errors.Wrap(err, "failed to build changeset specs from cache")

View File

@ -287,7 +287,7 @@ type syncerMetrics struct {
}
func makeMetrics(observationCtx *observation.Context) *syncerMetrics {
metrics := &syncerMetrics{
m := &syncerMetrics{
syncs: prometheus.NewCounterVec(prometheus.CounterOpts{
Name: "src_repoupdater_changeset_syncer_syncs",
Help: "Total number of changeset syncs",
@ -315,14 +315,14 @@ func makeMetrics(observationCtx *observation.Context) *syncerMetrics {
Help: "The number of changesets behind schedule",
}, []string{"codehost"}),
}
observationCtx.Registerer.MustRegister(metrics.syncs)
observationCtx.Registerer.MustRegister(metrics.priorityQueued)
observationCtx.Registerer.MustRegister(metrics.syncDuration)
observationCtx.Registerer.MustRegister(metrics.computeScheduleDuration)
observationCtx.Registerer.MustRegister(metrics.scheduleSize)
observationCtx.Registerer.MustRegister(metrics.behindSchedule)
observationCtx.Registerer.MustRegister(m.syncs)
observationCtx.Registerer.MustRegister(m.priorityQueued)
observationCtx.Registerer.MustRegister(m.syncDuration)
observationCtx.Registerer.MustRegister(m.computeScheduleDuration)
observationCtx.Registerer.MustRegister(m.scheduleSize)
observationCtx.Registerer.MustRegister(m.behindSchedule)
return metrics
return m
}
// Run will start the process of changeset syncing. It is long running

View File

@ -51,7 +51,7 @@ func NewScheduler(
indexEnqueuer: indexEnqueuer,
}
metrics := m.Get(func() *metrics.REDMetrics {
redMetrics := m.Get(func() *metrics.REDMetrics {
return metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_autoindexing_background",
@ -70,7 +70,7 @@ func NewScheduler(
observationCtx.Operation(observation.Op{
Name: "codeintel.indexing.HandleIndexSchedule",
MetricLabelValues: []string{"HandleIndexSchedule"},
Metrics: metrics,
Metrics: redMetrics,
ErrorFilter: func(err error) observation.ErrorFilterBehaviour {
if errors.As(err, &inference.LimitError{}) {
return observation.EmitForDefault.Without(observation.EmitForMetrics)

View File

@ -21,7 +21,7 @@ type operations struct {
var m = new(metrics.SingletonREDMetrics)
func newOperations(observationCtx *observation.Context) *operations {
metrics := m.Get(func() *metrics.REDMetrics {
redMetrics := m.Get(func() *metrics.REDMetrics {
return metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_autoindexing_inference",
@ -34,7 +34,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.autoindexing.inference.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -30,9 +30,9 @@ func newTest(db database.DB) *store {
}
func TestProcessStaleSourcedCommits(t *testing.T) {
logger := logtest.Scoped(t)
sqlDB := dbtest.NewDB(logger, t)
db := database.NewDB(logger, sqlDB)
log := logtest.Scoped(t)
sqlDB := dbtest.NewDB(log, t)
db := database.NewDB(log, sqlDB)
store := newTest(db)
ctx := context.Background()

View File

@ -24,7 +24,7 @@ func init() {
}
func TestQueueIndexesExplicit(t *testing.T) {
config := `{
conf := `{
"shared_steps": [
{
"root": "/",
@ -74,7 +74,7 @@ func TestQueueIndexesExplicit(t *testing.T) {
mockGitserverClient,
nil, // symbolsClient
)
_, _ = service.QueueIndexes(context.Background(), 42, "HEAD", config, false, false)
_, _ = service.QueueIndexes(context.Background(), 42, "HEAD", conf, false, false)
if len(mockDBStore.IsQueuedFunc.History()) != 1 {
t.Errorf("unexpected number of calls to IsQueued. want=%d have=%d", 1, len(mockDBStore.IsQueuedFunc.History()))

View File

@ -27,7 +27,7 @@ type operations struct {
var m = new(metrics.SingletonREDMetrics)
func newOperations(observationCtx *observation.Context) *operations {
metrics := m.Get(func() *metrics.REDMetrics {
redMetrics := m.Get(func() *metrics.REDMetrics {
return metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_codenav_lsifstore",
@ -40,7 +40,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.codenav.lsifstore.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -15,7 +15,7 @@ type operations struct {
var m = new(metrics.SingletonREDMetrics)
func newOperations(observationCtx *observation.Context) *operations {
metrics := m.Get(func() *metrics.REDMetrics {
redMetrics := m.Get(func() *metrics.REDMetrics {
return metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_codenav_store",
@ -28,7 +28,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.codenav.store.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -26,7 +26,7 @@ type operations struct {
var m = new(metrics.SingletonREDMetrics)
func newOperations(observationCtx *observation.Context) *operations {
metrics := m.Get(func() *metrics.REDMetrics {
redMetrics := m.Get(func() *metrics.REDMetrics {
return metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_codenav",
@ -39,7 +39,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.codenav.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -67,7 +67,7 @@ func (m *Matcher) CommitsDescribedByPolicy(ctx context.Context, repositoryID int
}
// mutable context
context := matcherContext{
mContext := matcherContext{
repositoryID: repositoryID,
policies: policies,
patterns: patterns,
@ -85,26 +85,26 @@ func (m *Matcher) CommitsDescribedByPolicy(ctx context.Context, repositoryID int
switch refDescription.Type {
case gitdomain.RefTypeTag:
// Match tagged commits
m.matchTaggedCommits(context, commit, refDescription, now)
m.matchTaggedCommits(mContext, commit, refDescription, now)
case gitdomain.RefTypeBranch:
// Match tips of branches
m.matchBranchHeads(context, commit, refDescription, now)
m.matchBranchHeads(mContext, commit, refDescription, now)
}
}
}
// Match commits on branches but not at tip
if err := m.matchCommitsOnBranch(ctx, context, now); err != nil {
if err := m.matchCommitsOnBranch(ctx, mContext, now); err != nil {
return nil, err
}
// Match comments via rev-parse
if err := m.matchCommitPolicies(ctx, context, now); err != nil {
if err := m.matchCommitPolicies(ctx, mContext, now); err != nil {
return nil, err
}
return context.commitMap, nil
return mContext.commitMap, nil
}
type matcherContext struct {

View File

@ -28,7 +28,7 @@ type operations struct {
var m = new(metrics.SingletonREDMetrics)
func newOperations(observationCtx *observation.Context) *operations {
metrics := m.Get(func() *metrics.REDMetrics {
redMetrics := m.Get(func() *metrics.REDMetrics {
return metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_policies",
@ -41,7 +41,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.policies.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
})
}

View File

@ -93,12 +93,12 @@ func (r *rootResolver) CodeIntelligenceConfigurationPolicies(ctx context.Context
opts.ForIndexing = *args.ForIndexing
}
policies, totalCount, err := r.policySvc.GetConfigurationPolicies(ctx, opts)
configPolicies, totalCount, err := r.policySvc.GetConfigurationPolicies(ctx, opts)
if err != nil {
return nil, err
}
return NewCodeIntelligenceConfigurationPolicyConnectionResolver(r.policySvc, policies, totalCount, traceErrs), nil
return NewCodeIntelligenceConfigurationPolicyConnectionResolver(r.policySvc, configPolicies, totalCount, traceErrs), nil
}
// 🚨 SECURITY: Only site admins may modify code intelligence configuration policies

View File

@ -29,7 +29,7 @@ type operations struct {
var m = new(metrics.SingletonREDMetrics)
func newOperations(observationCtx *observation.Context) *operations {
metrics := m.Get(func() *metrics.REDMetrics {
redMetrics := m.Get(func() *metrics.REDMetrics {
return metrics.NewREDMetrics(
observationCtx.Registerer,
"codeintel_gitserver",
@ -42,7 +42,7 @@ func newOperations(observationCtx *observation.Context) *operations {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("codeintel.gitserver.%s", name),
MetricLabelValues: []string{name},
Metrics: metrics,
Metrics: redMetrics,
ErrorFilter: func(err error) observation.ErrorFilterBehaviour {
if errors.HasType(err, &gitdomain.RevisionNotFoundError{}) {
return observation.EmitForNone

View File

@ -43,9 +43,9 @@ func (r *GitCommitResolver) AbbreviatedOID() string {
}
func (r *GitCommitResolver) URL() string {
url := r.repoResolver.url()
url.Path += "/-/commit/" + r.inputRevOrImmutableRev()
return url.String()
u := r.repoResolver.url()
u.Path += "/-/commit/" + r.inputRevOrImmutableRev()
return u.String()
}
// inputRevOrImmutableRev returns the input revspec, if it is provided and nonempty. Otherwise it returns the
@ -59,9 +59,9 @@ func (r *GitCommitResolver) inputRevOrImmutableRev() string {
func (r *GitCommitResolver) canonicalRepoRevURL() *url.URL {
// Dereference to copy the URL to avoid mutation
url := *r.repoResolver.RepoMatch.URL()
url.Path += "@" + string(r.oid)
return &url
repoURL := *r.repoResolver.RepoMatch.URL()
repoURL.Path += "@" + string(r.oid)
return &repoURL
}
// repoRevURL returns the URL path prefix to use when constructing URLs to resources at this
@ -71,7 +71,7 @@ func (r *GitCommitResolver) canonicalRepoRevURL() *url.URL {
// "/REPO/-/commit/REVSPEC").
func (r *GitCommitResolver) repoRevURL() *url.URL {
// Dereference to copy to avoid mutation
url := *r.repoResolver.RepoMatch.URL()
repoURL := *r.repoResolver.RepoMatch.URL()
var rev string
if r.inputRev != nil {
rev = *r.inputRev // use the original input rev from the user
@ -79,7 +79,7 @@ func (r *GitCommitResolver) repoRevURL() *url.URL {
rev = string(r.oid)
}
if rev != "" {
url.Path += "@" + rev
repoURL.Path += "@" + rev
}
return &url
return &repoURL
}

View File

@ -90,13 +90,13 @@ func (r *GitTreeEntryResolver) Repository() resolverstubs.RepositoryResolver {
}
func (r *GitTreeEntryResolver) CanonicalURL() string {
url := r.commit.canonicalRepoRevURL()
return r.urlPath(url).String()
canonicalURL := r.commit.canonicalRepoRevURL()
return r.urlPath(canonicalURL).String()
}
func (r *GitTreeEntryResolver) IsRoot() bool {
path := path.Clean(r.Path())
return path == "/" || path == "." || path == ""
cleanedPath := path.Clean(r.Path())
return cleanedPath == "/" || cleanedPath == "." || cleanedPath == ""
}
func (r *GitTreeEntryResolver) IsDirectory() bool { return r.stat.Mode().IsDir() }

View File

@ -173,8 +173,8 @@ func (r *UploadResolver) AuditLogs(ctx context.Context) (*[]resolverstubs.LSIFUp
}
resolvers := make([]resolverstubs.LSIFUploadsAuditLogsResolver, 0, len(logs))
for _, log := range logs {
resolvers = append(resolvers, NewLSIFUploadsAuditLogsResolver(log))
for _, uploadLog := range logs {
resolvers = append(resolvers, NewLSIFUploadsAuditLogsResolver(uploadLog))
}
return &resolvers, nil

Some files were not shown because too many files have changed in this diff Show More