mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 15:51:43 +00:00
golangci-lint: enable unparam linter (#31856)
Team work makes the linter dream work. Co-authored-by: Varun Gandhi <varun.gandhi@sourcegraph.com>
This commit is contained in:
parent
4cf9945fb0
commit
8d3e231322
@ -15,6 +15,7 @@ linters:
|
||||
- typecheck
|
||||
- unconvert
|
||||
- unused
|
||||
- unparam
|
||||
|
||||
linters-settings:
|
||||
depguard:
|
||||
|
||||
@ -64,11 +64,11 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
}}
|
||||
getOneUserOp := GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u1"),
|
||||
UserProps: userProps("u1", "u1@example.com", true),
|
||||
UserProps: userProps("u1", "u1@example.com"),
|
||||
}
|
||||
getNonExistentUserCreateIfNotExistOp := GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "nonexistent"),
|
||||
UserProps: userProps("nonexistent", "nonexistent@example.com", true),
|
||||
UserProps: userProps("nonexistent", "nonexistent@example.com"),
|
||||
CreateIfNotExist: true,
|
||||
}
|
||||
|
||||
@ -103,7 +103,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
description: "ext acct exists, user has same username and email",
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u1"),
|
||||
UserProps: userProps("u1", "u1@example.com", true),
|
||||
UserProps: userProps("u1", "u1@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -117,7 +117,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
// save this as a new verified user email
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u1"),
|
||||
UserProps: userProps("doesnotexist", "doesnotexist@example.com", true),
|
||||
UserProps: userProps("doesnotexist", "doesnotexist@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -131,7 +131,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
// inconsistency
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u1"),
|
||||
UserProps: userProps("u1", "u2@example.com", true),
|
||||
UserProps: userProps("u1", "u2@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -143,7 +143,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
description: "ext acct doesn't exist, user with username and email exists",
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s-new", "c1", "s-new/u1"),
|
||||
UserProps: userProps("u1", "u1@example.com", true),
|
||||
UserProps: userProps("u1", "u1@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -157,7 +157,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
// Note: if the email doesn't match, the user effectively doesn't exist from our POV
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s-new", "c1", "s-new/u1"),
|
||||
UserProps: userProps("u1", "doesnotmatch@example.com", true),
|
||||
UserProps: userProps("u1", "doesnotmatch@example.com"),
|
||||
CreateIfNotExist: true,
|
||||
},
|
||||
expSafeErr: "Username \"u1\" already exists, but no verified email matched \"doesnotmatch@example.com\"",
|
||||
@ -168,7 +168,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
// We treat this as a resolved user and ignore the non-matching username
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s-new", "c1", "s-new/u1"),
|
||||
UserProps: userProps("doesnotmatch", "u1@example.com", true),
|
||||
UserProps: userProps("doesnotmatch", "u1@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -181,7 +181,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
description: "ext acct doesn't exist, username and email don't exist, should create user",
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u-new"),
|
||||
UserProps: userProps("u-new", "u-new@example.com", true),
|
||||
UserProps: userProps("u-new", "u-new@example.com"),
|
||||
CreateIfNotExist: true,
|
||||
},
|
||||
expUserID: 10001,
|
||||
@ -189,7 +189,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
10001: {ext("st1", "s1", "c1", "s1/u-new")},
|
||||
},
|
||||
expCreatedUsers: map[int32]database.NewUser{
|
||||
10001: userProps("u-new", "u-new@example.com", true),
|
||||
10001: userProps("u-new", "u-new@example.com"),
|
||||
},
|
||||
expCalledGrantPendingPermissions: true,
|
||||
},
|
||||
@ -197,7 +197,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
description: "ext acct doesn't exist, username and email don't exist, should NOT create user",
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u-new"),
|
||||
UserProps: userProps("u-new", "u-new@example.com", true),
|
||||
UserProps: userProps("u-new", "u-new@example.com"),
|
||||
CreateIfNotExist: false,
|
||||
},
|
||||
expSafeErr: "User account with verified email \"u-new@example.com\" does not exist. Ask a site admin to create your account and then verify your email.",
|
||||
@ -207,7 +207,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
description: "ext acct exists, (ignore username and email), authenticated",
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u2"),
|
||||
UserProps: userProps("ignore", "ignore", true),
|
||||
UserProps: userProps("ignore", "ignore"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
actorUID: 2,
|
||||
@ -222,7 +222,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
actorUID: 1,
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u1"),
|
||||
UserProps: userProps("u1", "u1@example.com", true),
|
||||
UserProps: userProps("u1", "u1@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -237,7 +237,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
actorUID: 1,
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "s1/u1"),
|
||||
UserProps: userProps("doesnotmatch", "u1@example.com", true),
|
||||
UserProps: userProps("doesnotmatch", "u1@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -255,7 +255,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
actorUID: 1,
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s-new", "c1", "s-new/u1"),
|
||||
UserProps: userProps("u1", "doesnotmatch@example.com", true),
|
||||
UserProps: userProps("u1", "doesnotmatch@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expUserID: 1,
|
||||
@ -268,7 +268,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
description: "ext acct doesn't exist, user has same username, lookupByUsername=true",
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "doesnotexist"),
|
||||
UserProps: userProps("u1", "", true),
|
||||
UserProps: userProps("u1", ""),
|
||||
LookUpByUsername: true,
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
@ -306,7 +306,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
innerCases: []innerCase{{
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "nonexistent"),
|
||||
UserProps: userProps("u1", "u1@example.com", true),
|
||||
UserProps: userProps("u1", "u1@example.com"),
|
||||
},
|
||||
expSafeErr: "Unexpected error associating the external account with your Sourcegraph user. The most likely cause for this problem is that another Sourcegraph user is already linked with this external account. A site admin or the other user can unlink the account to fix this problem.",
|
||||
expErr: unexpectedErr,
|
||||
@ -318,7 +318,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
innerCases: []innerCase{{
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "nonexistent"),
|
||||
UserProps: userProps("u1", "u1@example.com", true),
|
||||
UserProps: userProps("u1", "u1@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expSafeErr: "Unexpected error looking up the Sourcegraph user by verified email. Ask a site admin for help.",
|
||||
@ -331,7 +331,7 @@ func TestGetAndSaveUser(t *testing.T) {
|
||||
innerCases: []innerCase{{
|
||||
op: GetAndSaveUserOp{
|
||||
ExternalAccount: ext("st1", "s1", "c1", "nonexistent"),
|
||||
UserProps: userProps("u1", "u1@example.com", true),
|
||||
UserProps: userProps("u1", "u1@example.com"),
|
||||
},
|
||||
createIfNotExistIrrelevant: true,
|
||||
expSafeErr: "Unexpected error getting the Sourcegraph user account. Ask a site admin for help.",
|
||||
@ -737,10 +737,10 @@ func ext(serviceType, serviceID, clientID, accountID string) extsvc.AccountSpec
|
||||
}
|
||||
}
|
||||
|
||||
func userProps(username, email string, verifiedEmail bool) database.NewUser {
|
||||
func userProps(username, email string) database.NewUser {
|
||||
return database.NewUser{
|
||||
Username: username,
|
||||
Email: email,
|
||||
EmailIsVerified: verifiedEmail,
|
||||
EmailIsVerified: true,
|
||||
}
|
||||
}
|
||||
|
||||
@ -29,7 +29,7 @@ var errRepoNotFound = &errcode.Mock{
|
||||
|
||||
func (s *MockRepos) MockGet(t *testing.T, wantRepo api.RepoID) (called *bool) {
|
||||
called = new(bool)
|
||||
s.Get = func(ctx context.Context, repo api.RepoID) (*types.Repo, error) {
|
||||
s.Get = func(_ context.Context, repo api.RepoID) (*types.Repo, error) {
|
||||
*called = true
|
||||
if repo != wantRepo {
|
||||
t.Errorf("got repo %d, want %d", repo, wantRepo)
|
||||
@ -42,7 +42,7 @@ func (s *MockRepos) MockGet(t *testing.T, wantRepo api.RepoID) (called *bool) {
|
||||
|
||||
func (s *MockRepos) MockGetByName(t *testing.T, wantName api.RepoName, repo api.RepoID) (called *bool) {
|
||||
called = new(bool)
|
||||
s.GetByName = func(ctx context.Context, name api.RepoName) (*types.Repo, error) {
|
||||
s.GetByName = func(_ context.Context, name api.RepoName) (*types.Repo, error) {
|
||||
*called = true
|
||||
if name != wantName {
|
||||
t.Errorf("got repo name %q, want %q", name, wantName)
|
||||
@ -55,7 +55,7 @@ func (s *MockRepos) MockGetByName(t *testing.T, wantName api.RepoName, repo api.
|
||||
|
||||
func (s *MockRepos) MockGet_Return(t *testing.T, returns *types.Repo) (called *bool) {
|
||||
called = new(bool)
|
||||
s.Get = func(ctx context.Context, repo api.RepoID) (*types.Repo, error) {
|
||||
s.Get = func(_ context.Context, repo api.RepoID) (*types.Repo, error) {
|
||||
*called = true
|
||||
if repo != returns.ID {
|
||||
t.Errorf("got repo %d, want %d", repo, returns.ID)
|
||||
@ -68,7 +68,7 @@ func (s *MockRepos) MockGet_Return(t *testing.T, returns *types.Repo) (called *b
|
||||
|
||||
func (s *MockRepos) MockList(t *testing.T, wantRepos ...api.RepoName) (called *bool) {
|
||||
called = new(bool)
|
||||
s.List = func(ctx context.Context, opt database.ReposListOptions) ([]*types.Repo, error) {
|
||||
s.List = func(_ context.Context, opt database.ReposListOptions) ([]*types.Repo, error) {
|
||||
*called = true
|
||||
repos := make([]*types.Repo, len(wantRepos))
|
||||
for i, repo := range wantRepos {
|
||||
|
||||
@ -26,6 +26,7 @@ var requestGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
|
||||
Help: "Current number of requests running for a method.",
|
||||
}, []string{"method"})
|
||||
|
||||
//nolint:unparam // unparam complains that `server` always has same value across call-sites, but that's OK
|
||||
func trace(ctx context.Context, server, method string, arg interface{}, err *error) (context.Context, func()) {
|
||||
requestGauge.WithLabelValues(server + "." + method).Inc()
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@ import (
|
||||
|
||||
const singletonSiteGQLID = "site"
|
||||
|
||||
func (r *schemaResolver) siteByGQLID(ctx context.Context, id graphql.ID) (Node, error) {
|
||||
func (r *schemaResolver) siteByGQLID(_ context.Context, id graphql.ID) (Node, error) {
|
||||
siteGQLID, err := unmarshalSiteGQLID(id)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -19,7 +19,7 @@ import (
|
||||
// duplication kludge.
|
||||
|
||||
// NOTE: Keep in sync with services/backend/httpapi/repo_shield.go
|
||||
func badgeValue(r *http.Request, db database.DB) (int, error) {
|
||||
func badgeValue(r *http.Request) (int, error) {
|
||||
totalRefs, err := backend.CountGoImporters(r.Context(), httpcli.InternalDoer, routevar.ToRepo(mux.Vars(r)))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Defs.TotalRefs")
|
||||
@ -43,7 +43,7 @@ func badgeValueFmt(totalRefs int) string {
|
||||
|
||||
func serveRepoBadge(db database.DB) func(http.ResponseWriter, *http.Request) error {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
value, err := badgeValue(r, db)
|
||||
value, err := badgeValue(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -19,12 +19,12 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
func editorRev(ctx context.Context, db database.DB, repoName api.RepoName, rev string, beExplicit bool) (string, error) {
|
||||
func editorRev(ctx context.Context, db database.DB, repoName api.RepoName, rev string, beExplicit bool) string {
|
||||
if beExplicit {
|
||||
return "@" + rev, nil
|
||||
return "@" + rev
|
||||
}
|
||||
if rev == "HEAD" {
|
||||
return "", nil // Detached head state
|
||||
return ""
|
||||
}
|
||||
repos := backend.NewRepos(db.Repos())
|
||||
repo, err := repos.GetByName(ctx, repoName)
|
||||
@ -34,23 +34,23 @@ func editorRev(ctx context.Context, db database.DB, repoName api.RepoName, rev s
|
||||
// this case, the best user experience is to send them to the branch
|
||||
// they asked for. The front-end will inform them if the branch does
|
||||
// not exist.
|
||||
return "@" + rev, nil
|
||||
return "@" + rev
|
||||
}
|
||||
// If we are on the default branch we want to return a clean URL without a
|
||||
// branch. If we fail its best to return the full URL and allow the
|
||||
// front-end to inform them of anything that is wrong.
|
||||
defaultBranchCommitID, err := repos.ResolveRev(ctx, repo, "")
|
||||
if err != nil {
|
||||
return "@" + rev, nil
|
||||
return "@" + rev
|
||||
}
|
||||
branchCommitID, err := repos.ResolveRev(ctx, repo, rev)
|
||||
if err != nil {
|
||||
return "@" + rev, nil
|
||||
return "@" + rev
|
||||
}
|
||||
if defaultBranchCommitID == branchCommitID {
|
||||
return "", nil // default branch, so make a clean URL without a branch.
|
||||
return ""
|
||||
}
|
||||
return "@" + rev, nil
|
||||
return "@" + rev
|
||||
}
|
||||
|
||||
// editorRequest represents the parameters to a Sourcegraph "open file", "search", etc. editor request.
|
||||
@ -189,10 +189,8 @@ func (r *editorRequest) openFileRedirect(ctx context.Context) (string, error) {
|
||||
if inputRev == "" {
|
||||
inputRev, beExplicit = of.branch, false
|
||||
}
|
||||
rev, err := editorRev(ctx, r.db, repoName, inputRev, beExplicit)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
rev := editorRev(ctx, r.db, repoName, inputRev, beExplicit)
|
||||
|
||||
u := &url.URL{Path: path.Join("/", string(repoName)+rev, "/-/blob/", of.file)}
|
||||
q := u.Query()
|
||||
|
||||
@ -15,7 +15,7 @@ import (
|
||||
|
||||
func TestEditorRev(t *testing.T) {
|
||||
repoName := api.RepoName("myRepo")
|
||||
backend.Mocks.Repos.ResolveRev = func(v0 context.Context, repo *types.Repo, rev string) (api.CommitID, error) {
|
||||
backend.Mocks.Repos.ResolveRev = func(_ context.Context, _ *types.Repo, rev string) (api.CommitID, error) {
|
||||
if rev == "branch" {
|
||||
return api.CommitID(strings.Repeat("b", 40)), nil
|
||||
}
|
||||
@ -48,10 +48,7 @@ func TestEditorRev(t *testing.T) {
|
||||
{strings.Repeat("d", 40), "@" + strings.Repeat("d", 40), true}, // default revision, explicit
|
||||
}
|
||||
for _, c := range cases {
|
||||
got, err := editorRev(ctx, database.NewMockDB(), repoName, c.inputRev, c.beExplicit)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := editorRev(ctx, database.NewMockDB(), repoName, c.inputRev, c.beExplicit)
|
||||
if got != c.expEditorRev {
|
||||
t.Errorf("On input rev %q: got %q, want %q", c.inputRev, got, c.expEditorRev)
|
||||
}
|
||||
|
||||
@ -270,7 +270,7 @@ func getAndMarshalCodeHostIntegrationUsageJSON(ctx context.Context, db database.
|
||||
return json.Marshal(codeHostIntegrationUsage)
|
||||
}
|
||||
|
||||
func getAndMarshalCodeHostVersionsJSON(ctx context.Context, db database.DB) (_ json.RawMessage, err error) {
|
||||
func getAndMarshalCodeHostVersionsJSON(_ context.Context, _ database.DB) (_ json.RawMessage, err error) {
|
||||
defer recordOperation("getAndMarshalCodeHostVersionsJSON")(&err)
|
||||
|
||||
versions, err := versions.GetVersions()
|
||||
|
||||
@ -44,7 +44,7 @@ func newExternalHTTPHandler(
|
||||
newGitHubAppCloudSetupHandler enterprise.NewGitHubAppCloudSetupHandler,
|
||||
newComputeStreamHandler enterprise.NewComputeStreamHandler,
|
||||
rateLimitWatcher graphqlbackend.LimitWatcher,
|
||||
) (http.Handler, error) {
|
||||
) http.Handler {
|
||||
// Each auth middleware determines on a per-request basis whether it should be enabled (if not, it
|
||||
// immediately delegates the request to the next middleware in the chain).
|
||||
authMiddlewares := auth.AuthMiddleware()
|
||||
@ -110,7 +110,7 @@ func newExternalHTTPHandler(
|
||||
h = tracepkg.HTTPMiddleware(h, conf.DefaultClient())
|
||||
h = ot.HTTPMiddleware(h)
|
||||
|
||||
return h, nil
|
||||
return h
|
||||
}
|
||||
|
||||
func healthCheckMiddleware(next http.Handler) http.Handler {
|
||||
|
||||
@ -299,7 +299,7 @@ func makeExternalAPI(db database.DB, schema *graphql.Schema, enterprise enterpri
|
||||
}
|
||||
|
||||
// Create the external HTTP handler.
|
||||
externalHandler, err := newExternalHTTPHandler(
|
||||
externalHandler := newExternalHTTPHandler(
|
||||
db,
|
||||
schema,
|
||||
enterprise.GitHubWebhook,
|
||||
@ -311,9 +311,6 @@ func makeExternalAPI(db database.DB, schema *graphql.Schema, enterprise enterpri
|
||||
enterprise.NewComputeStreamHandler,
|
||||
rateLimiter,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
httpServer := &http.Server{
|
||||
Handler: externalHandler,
|
||||
ReadTimeout: 75 * time.Second,
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
)
|
||||
|
||||
// NOTE: Keep in sync with services/backend/httpapi/repo_shield.go
|
||||
func badgeValue(r *http.Request, db database.DB) (int, error) {
|
||||
func badgeValue(r *http.Request) (int, error) {
|
||||
totalRefs, err := backend.CountGoImporters(r.Context(), httpcli.InternalDoer, routevar.ToRepo(mux.Vars(r)))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "Defs.TotalRefs")
|
||||
@ -38,7 +38,7 @@ func badgeValueFmt(totalRefs int) string {
|
||||
|
||||
func serveRepoShield(db database.DB) func(http.ResponseWriter, *http.Request) error {
|
||||
return func(w http.ResponseWriter, r *http.Request) error {
|
||||
value, err := badgeValue(r, db)
|
||||
value, err := badgeValue(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -391,6 +391,8 @@ func strPtr(s string) *string {
|
||||
|
||||
// withDecoration hydrates event match with decorated hunks for a corresponding file match.
|
||||
func withDecoration(ctx context.Context, eventMatch streamhttp.EventMatch, internalResult result.Match, kind string, contextLines int) streamhttp.EventMatch {
|
||||
// FIXME: Use contextLines to constrain hunks.
|
||||
_ = contextLines
|
||||
if _, ok := internalResult.(*result.FileMatch); !ok {
|
||||
return eventMatch
|
||||
}
|
||||
|
||||
@ -14,7 +14,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
func TestInstrumentHandler(t *testing.T) {
|
||||
func TestInstrumentHandler(_ *testing.T) {
|
||||
h := http.Handler(nil)
|
||||
instrumentHandler(prometheus.DefaultRegisterer, h)
|
||||
}
|
||||
|
||||
@ -14,7 +14,6 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
|
||||
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"github.com/sourcegraph/sourcegraph/schema"
|
||||
)
|
||||
|
||||
// handleGetGitolitePhabricatorMetadata serves the Gitolite
|
||||
@ -42,7 +41,7 @@ func (s *Server) handleGetGitolitePhabricatorMetadata(w http.ResponseWriter, r *
|
||||
if gconf.Phabricator == nil {
|
||||
continue
|
||||
}
|
||||
callsign, err := getGitolitePhabCallsign(r.Context(), gconf, repoName, gconf.Phabricator.CallsignCommand)
|
||||
callsign, err := getGitolitePhabCallsign(r.Context(), repoName, gconf.Phabricator.CallsignCommand)
|
||||
if err != nil {
|
||||
log15.Warn("failed to get Phabricator callsign", "host", gconf.Host, "repo", repoName, "err", err)
|
||||
continue
|
||||
@ -71,7 +70,7 @@ func (s *Server) handleGetGitolitePhabricatorMetadata(w http.ResponseWriter, r *
|
||||
|
||||
var callSignPattern = lazyregexp.New("^[A-Z]+$")
|
||||
|
||||
func getGitolitePhabCallsign(ctx context.Context, gconf *schema.GitoliteConnection, repo, command string) (string, error) {
|
||||
func getGitolitePhabCallsign(ctx context.Context, repo, command string) (string, error) {
|
||||
cmd := exec.CommandContext(ctx, "sh", "-c", command)
|
||||
cmd.Env = append(os.Environ(), "REPO="+repo)
|
||||
stdout, err := cmd.Output()
|
||||
|
||||
@ -63,7 +63,7 @@ func (s *Server) repoInfo(ctx context.Context, repo api.RepoName) (*protocol.Rep
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (s *Server) repoCloneProgress(repo api.RepoName) (*protocol.RepoCloneProgress, error) {
|
||||
func (s *Server) repoCloneProgress(repo api.RepoName) *protocol.RepoCloneProgress {
|
||||
dir := s.dir(repo)
|
||||
resp := protocol.RepoCloneProgress{
|
||||
Cloned: repoCloned(dir),
|
||||
@ -73,7 +73,7 @@ func (s *Server) repoCloneProgress(repo api.RepoName) (*protocol.RepoCloneProgre
|
||||
resp.CloneInProgress = true
|
||||
resp.CloneProgress = "This will never finish cloning"
|
||||
}
|
||||
return &resp, nil
|
||||
return &resp
|
||||
}
|
||||
|
||||
func (s *Server) handleRepoInfo(w http.ResponseWriter, r *http.Request) {
|
||||
@ -128,11 +128,7 @@ func (s *Server) handleRepoCloneProgress(w http.ResponseWriter, r *http.Request)
|
||||
Results: make(map[api.RepoName]*protocol.RepoCloneProgress, len(req.Repos)),
|
||||
}
|
||||
for _, repoName := range req.Repos {
|
||||
result, err := s.repoCloneProgress(repoName)
|
||||
if err != nil {
|
||||
http.Error(w, err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
result := s.repoCloneProgress(repoName)
|
||||
resp.Results[repoName] = result
|
||||
}
|
||||
|
||||
|
||||
@ -762,9 +762,9 @@ func TestCloneRepo_EnsureValidity(t *testing.T) {
|
||||
var (
|
||||
remote = t.TempDir()
|
||||
reposDir = t.TempDir()
|
||||
cmd = func(name string, arg ...string) string {
|
||||
cmd = func(name string, arg ...string) {
|
||||
t.Helper()
|
||||
return runCmd(t, remote, name, arg...)
|
||||
runCmd(t, remote, name, arg...)
|
||||
}
|
||||
)
|
||||
|
||||
@ -780,9 +780,9 @@ func TestCloneRepo_EnsureValidity(t *testing.T) {
|
||||
var (
|
||||
remote = t.TempDir()
|
||||
reposDir = t.TempDir()
|
||||
cmd = func(name string, arg ...string) string {
|
||||
cmd = func(name string, arg ...string) {
|
||||
t.Helper()
|
||||
return runCmd(t, remote, name, arg...)
|
||||
runCmd(t, remote, name, arg...)
|
||||
}
|
||||
)
|
||||
|
||||
@ -940,9 +940,9 @@ func TestSyncRepoState(t *testing.T) {
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
remoteDir := t.TempDir()
|
||||
|
||||
cmd := func(name string, arg ...string) string {
|
||||
cmd := func(name string, arg ...string) {
|
||||
t.Helper()
|
||||
return runCmd(t, remoteDir, name, arg...)
|
||||
runCmd(t, remoteDir, name, arg...)
|
||||
}
|
||||
|
||||
// Setup a repo with a commit so we can see if we can clone it.
|
||||
|
||||
@ -223,7 +223,7 @@ func (s *Service) search(ctx context.Context, p *protocol.Request, sender matchS
|
||||
if p.IsStructuralPat {
|
||||
return filteredStructuralSearch(ctx, zipPath, zf, &p.PatternInfo, p.Repo, sender)
|
||||
} else {
|
||||
return regexSearch(ctx, rg, zf, p.Limit, p.PatternMatchesContent, p.PatternMatchesPath, p.IsNegated, sender)
|
||||
return regexSearch(ctx, rg, zf, p.PatternMatchesContent, p.PatternMatchesPath, p.IsNegated, sender)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -299,12 +299,12 @@ func (rg *readerGrep) FindZip(zf *store.ZipFile, f *store.SrcFile, limit int) (p
|
||||
func regexSearchBatch(ctx context.Context, rg *readerGrep, zf *store.ZipFile, limit int, patternMatchesContent, patternMatchesPaths bool, isPatternNegated bool) ([]protocol.FileMatch, bool, error) {
|
||||
ctx, cancel, sender := newLimitedStreamCollector(ctx, limit)
|
||||
defer cancel()
|
||||
err := regexSearch(ctx, rg, zf, limit, patternMatchesContent, patternMatchesPaths, isPatternNegated, sender)
|
||||
err := regexSearch(ctx, rg, zf, patternMatchesContent, patternMatchesPaths, isPatternNegated, sender)
|
||||
return sender.Collected(), sender.LimitHit(), err
|
||||
}
|
||||
|
||||
// regexSearch concurrently searches files in zr looking for matches using rg.
|
||||
func regexSearch(ctx context.Context, rg *readerGrep, zf *store.ZipFile, limit int, patternMatchesContent, patternMatchesPaths bool, isPatternNegated bool, sender matchSender) error {
|
||||
func regexSearch(ctx context.Context, rg *readerGrep, zf *store.ZipFile, patternMatchesContent, patternMatchesPaths bool, isPatternNegated bool, sender matchSender) error {
|
||||
var err error
|
||||
span, ctx := ot.StartSpanFromContext(ctx, "RegexSearch")
|
||||
ext.Component.Set(span, "regex_search")
|
||||
|
||||
@ -12,13 +12,11 @@ const grafanaProcLine = `grafana: /usr/share/grafana/bin/grafana-server -config
|
||||
|
||||
const jaegerProcLine = `jaeger: env QUERY_BASE_PATH=/-/debug/jaeger jaeger --memory.max-traces=20000 >> /var/opt/sourcegraph/jaeger.log 2>&1`
|
||||
|
||||
func maybeMonitoring() ([]string, error) {
|
||||
func maybeMonitoring() []string {
|
||||
if os.Getenv("DISABLE_OBSERVABILITY") != "" {
|
||||
log15.Info("WARNING: Running with monitoring disabled")
|
||||
return []string{""}, nil
|
||||
return []string{""}
|
||||
}
|
||||
return []string{
|
||||
prometheusProcLine,
|
||||
grafanaProcLine,
|
||||
jaegerProcLine}, nil
|
||||
|
||||
return []string{prometheusProcLine, grafanaProcLine, jaegerProcLine}
|
||||
}
|
||||
|
||||
@ -149,11 +149,7 @@ func Main() {
|
||||
}
|
||||
procfile = append(procfile, ProcfileAdditions...)
|
||||
|
||||
monitoringLines, err := maybeMonitoring()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if len(monitoringLines) != 0 {
|
||||
if monitoringLines := maybeMonitoring(); len(monitoringLines) != 0 {
|
||||
procfile = append(procfile, monitoringLines...)
|
||||
}
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@ var durations = map[string][]float64{}
|
||||
|
||||
// queryGraphQL performs a GraphQL request and stores its latency not the global durations
|
||||
// map. If the verbose flag is set, a line with the request's latency is printed.
|
||||
func queryGraphQL(ctx context.Context, queryName, query string, variables map[string]interface{}, target interface{}) error {
|
||||
func queryGraphQL(_ context.Context, queryName, query string, variables map[string]interface{}, target interface{}) error {
|
||||
requestStart := time.Now()
|
||||
|
||||
if err := internal.GraphQLClient().GraphQL(internal.SourcegraphAccessToken, query, variables, target); err != nil {
|
||||
|
||||
@ -125,7 +125,7 @@ type uploadState struct {
|
||||
// returns a map from repository names to the state of that repository. Each repository
|
||||
// state has a flag indicating whether or not its commit graph is stale, and an entry
|
||||
// for each upload belonging to that repository including that upload's state.
|
||||
func queryRepoState(ctx context.Context, repoNames []string, uploads []uploadMeta) (map[string]repoState, error) {
|
||||
func queryRepoState(_ context.Context, repoNames []string, uploads []uploadMeta) (map[string]repoState, error) {
|
||||
uploadIDs := make([]string, 0, len(uploads))
|
||||
for _, upload := range uploads {
|
||||
uploadIDs = append(uploadIDs, upload.id)
|
||||
|
||||
@ -9,7 +9,6 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/db"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/stdout"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/migration/definition"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"github.com/sourcegraph/sourcegraph/lib/output"
|
||||
)
|
||||
@ -18,12 +17,7 @@ import (
|
||||
func Revert(databases []db.Database, commit string) error {
|
||||
versionsByDatabase := make(map[string][]int, len(databases))
|
||||
for _, database := range databases {
|
||||
definitions, err := readDefinitions(database)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
versions, err := selectMigrationsDefinedInCommit(database, definitions, commit)
|
||||
versions, err := selectMigrationsDefinedInCommit(database, commit)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -84,7 +78,7 @@ func Revert(databases []db.Database, commit string) error {
|
||||
|
||||
// selectMigrationsDefinedInCommit returns the identifiers of migrations defined in the given
|
||||
// commit for the given schema.a
|
||||
func selectMigrationsDefinedInCommit(database db.Database, ds *definition.Definitions, commit string) ([]int, error) {
|
||||
func selectMigrationsDefinedInCommit(database db.Database, commit string) ([]int, error) {
|
||||
migrationsDir := filepath.Join("migrations", database.Name)
|
||||
|
||||
output, err := run.GitCmd("diff", "--name-only", commit+".."+commit+"~1", migrationsDir)
|
||||
|
||||
@ -8,7 +8,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/lib/process"
|
||||
)
|
||||
|
||||
func nameToColor(s string, v ...interface{}) output.Style {
|
||||
func nameToColor(s string) output.Style {
|
||||
h := fnv.New32()
|
||||
h.Write([]byte(s))
|
||||
// We don't use 256 colors because some of those are too dark/bright and hard to read
|
||||
|
||||
@ -56,7 +56,7 @@ func setupExec(ctx context.Context, args []string) error {
|
||||
go func() {
|
||||
for range c {
|
||||
writeOrangeLinef("\n💡 You may need to restart your shell for the changes to work in this terminal.")
|
||||
writeOrangeLinef(" Close this terminal and open a new one or type the following command and press ENTER: " + filepath.Base(usershell.ShellPath(ctx)))
|
||||
writeOrangeLinef(" Close this terminal and open a new one or type the following command and press ENTER: %s", filepath.Base(usershell.ShellPath(ctx)))
|
||||
os.Exit(0)
|
||||
}
|
||||
}()
|
||||
|
||||
@ -39,10 +39,7 @@ func (s *Serve) Start() error {
|
||||
s.Addr = ln.Addr().String()
|
||||
|
||||
s.Info.Printf("listening on http://%s", s.Addr)
|
||||
h, err := s.handler()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "configuring server")
|
||||
}
|
||||
h := s.handler()
|
||||
|
||||
if err := (&http.Server{Handler: h}).Serve(ln); err != nil {
|
||||
return errors.Wrap(err, "serving")
|
||||
@ -70,14 +67,14 @@ type Repo struct {
|
||||
URI string
|
||||
}
|
||||
|
||||
func (s *Serve) handler() (http.Handler, error) {
|
||||
func (s *Serve) handler() http.Handler {
|
||||
s.Info.Printf("serving git repositories from %s", s.Root)
|
||||
s.configureRepos()
|
||||
|
||||
// Start the HTTP server.
|
||||
mux := &http.ServeMux{}
|
||||
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.HandleFunc("/", func(w http.ResponseWriter, _ *http.Request) {
|
||||
w.Header().Set("Content-Type", "text/html; charset=utf-8")
|
||||
err := indexHTML.Execute(w, map[string]interface{}{
|
||||
"Explain": explainAddr(s.Addr),
|
||||
@ -91,7 +88,7 @@ func (s *Serve) handler() (http.Handler, error) {
|
||||
}
|
||||
})
|
||||
|
||||
mux.HandleFunc("/v1/list-repos", func(w http.ResponseWriter, r *http.Request) {
|
||||
mux.HandleFunc("/v1/list-repos", func(w http.ResponseWriter, _ *http.Request) {
|
||||
var repos []Repo
|
||||
var reposRootIsRepo bool
|
||||
for _, name := range s.configureRepos() {
|
||||
@ -139,7 +136,7 @@ func (s *Serve) handler() (http.Handler, error) {
|
||||
s.Info.Printf("%s %s", r.Method, r.URL.Path)
|
||||
}
|
||||
mux.ServeHTTP(w, r)
|
||||
}), nil
|
||||
})
|
||||
}
|
||||
|
||||
type httpDir struct {
|
||||
|
||||
@ -38,7 +38,7 @@ func TestReposHandler(t *testing.T) {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
root := gitInitRepos(t, tc.repos...)
|
||||
|
||||
h, err := (&Serve{
|
||||
h := (&Serve{
|
||||
Info: testLogger(t),
|
||||
Debug: discardLogger,
|
||||
Addr: testAddress,
|
||||
@ -46,9 +46,6 @@ func TestReposHandler(t *testing.T) {
|
||||
|
||||
updatingServerInfo: 2, // disables background updates
|
||||
}).handler()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var want []Repo
|
||||
for _, name := range tc.repos {
|
||||
@ -71,7 +68,7 @@ func TestReposHandler(t *testing.T) {
|
||||
// This is the difference to above, we point our root at the git repo
|
||||
root = filepath.Join(root, "project-root")
|
||||
|
||||
h, err := (&Serve{
|
||||
h := (&Serve{
|
||||
Info: testLogger(t),
|
||||
Debug: discardLogger,
|
||||
Addr: testAddress,
|
||||
@ -79,9 +76,6 @@ func TestReposHandler(t *testing.T) {
|
||||
|
||||
updatingServerInfo: 2, // disables background updates
|
||||
}).handler()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// project-root is served from /repos, etc
|
||||
want := []Repo{{Name: "project-root", URI: "/repos"}}
|
||||
|
||||
@ -31,7 +31,7 @@ func formatRawOrDockerCommand(spec CommandSpec, dir string, options Options) com
|
||||
Command: flatten(
|
||||
"docker", "run", "--rm",
|
||||
dockerResourceFlags(options.ResourceOptions),
|
||||
dockerVolumeFlags(dir, spec.ScriptPath),
|
||||
dockerVolumeFlags(dir),
|
||||
dockerWorkingdirectoryFlags(spec.Dir),
|
||||
// If the env vars will be part of the command line args, we need to quote them
|
||||
dockerEnvFlags(quoteEnv(spec.Env)),
|
||||
@ -50,7 +50,7 @@ func dockerResourceFlags(options ResourceOptions) []string {
|
||||
}
|
||||
}
|
||||
|
||||
func dockerVolumeFlags(wd, scriptPath string) []string {
|
||||
func dockerVolumeFlags(wd string) []string {
|
||||
return []string{"-v", wd + ":/data"}
|
||||
}
|
||||
|
||||
|
||||
@ -30,7 +30,7 @@ const firecrackerContainerDir = "/work"
|
||||
// The name value supplied here refers to the Firecracker virtual machine, which must have
|
||||
// also been the name supplied to a successful invocation of setupFirecracker. Additionally,
|
||||
// the virtual machine must not yet have been torn down (via teardownFirecracker).
|
||||
func formatFirecrackerCommand(spec CommandSpec, name, repoDir string, options Options) command {
|
||||
func formatFirecrackerCommand(spec CommandSpec, name string, options Options) command {
|
||||
rawOrDockerCommand := formatRawOrDockerCommand(spec, firecrackerContainerDir, options)
|
||||
|
||||
innerCommand := strings.Join(rawOrDockerCommand.Command, " ")
|
||||
@ -114,7 +114,7 @@ func callWithInstrumentedLock(operations *Operations, f func() error) error {
|
||||
|
||||
// teardownFirecracker issues a stop and a remove request for the Firecracker VM with
|
||||
// the given name.
|
||||
func teardownFirecracker(ctx context.Context, runner commandRunner, logger *Logger, name string, options Options, operations *Operations) error {
|
||||
func teardownFirecracker(ctx context.Context, runner commandRunner, logger *Logger, name string, operations *Operations) error {
|
||||
removeCommand := command{
|
||||
Key: "teardown.firecracker.remove",
|
||||
Command: flatten("ignite", "rm", "-f", name),
|
||||
|
||||
@ -23,7 +23,6 @@ func TestFormatFirecrackerCommandRaw(t *testing.T) {
|
||||
Operation: makeTestOperation(),
|
||||
},
|
||||
"deadbeef",
|
||||
"/proj/src",
|
||||
Options{},
|
||||
)
|
||||
|
||||
@ -51,7 +50,6 @@ func TestFormatFirecrackerCommandDockerScript(t *testing.T) {
|
||||
Operation: makeTestOperation(),
|
||||
},
|
||||
"deadbeef",
|
||||
"/proj/src",
|
||||
Options{
|
||||
ResourceOptions: ResourceOptions{
|
||||
NumCPUs: 4,
|
||||
@ -125,10 +123,9 @@ func TestSetupFirecracker(t *testing.T) {
|
||||
|
||||
func TestTeardownFirecracker(t *testing.T) {
|
||||
runner := NewMockCommandRunner()
|
||||
options := Options{}
|
||||
operations := NewOperations(&observation.TestContext)
|
||||
|
||||
if err := teardownFirecracker(context.Background(), runner, nil, "deadbeef", options, operations); err != nil {
|
||||
if err := teardownFirecracker(context.Background(), runner, nil, "deadbeef", operations); err != nil {
|
||||
t.Fatalf("unexpected error tearing down virtual machine: %s", err)
|
||||
}
|
||||
|
||||
|
||||
@ -119,11 +119,11 @@ func (r *firecrackerRunner) Setup(ctx context.Context) error {
|
||||
}
|
||||
|
||||
func (r *firecrackerRunner) Teardown(ctx context.Context) error {
|
||||
return teardownFirecracker(ctx, defaultRunner, r.logger, r.name, r.options, r.operations)
|
||||
return teardownFirecracker(ctx, defaultRunner, r.logger, r.name, r.operations)
|
||||
}
|
||||
|
||||
func (r *firecrackerRunner) Run(ctx context.Context, command CommandSpec) error {
|
||||
return runCommand(ctx, formatFirecrackerCommand(command, r.name, r.dir, r.options), r.logger)
|
||||
return runCommand(ctx, formatFirecrackerCommand(command, r.name, r.options), r.logger)
|
||||
}
|
||||
|
||||
type runnerWrapper struct{}
|
||||
|
||||
@ -256,7 +256,7 @@ func writeFiles(workspaceFileContentsByPath map[string][]byte, logger *command.L
|
||||
return nil
|
||||
}
|
||||
|
||||
func createHoneyEvent(ctx context.Context, job executor.Job, err error, duration time.Duration) honey.Event {
|
||||
func createHoneyEvent(_ context.Context, job executor.Job, err error, duration time.Duration) honey.Event {
|
||||
fields := map[string]interface{}{
|
||||
"duration_ms": duration.Milliseconds(),
|
||||
"recordID": job.RecordID(),
|
||||
|
||||
@ -46,7 +46,7 @@ func (p *provider) Config() schema.AuthProviders {
|
||||
func (p *provider) Refresh(ctx context.Context) error {
|
||||
p.mu.Lock()
|
||||
defer p.mu.Unlock()
|
||||
p.oidc, p.refreshErr = newProvider(ctx, p.config.Issuer)
|
||||
p.oidc, p.refreshErr = newProvider(p.config.Issuer)
|
||||
return p.refreshErr
|
||||
}
|
||||
|
||||
@ -120,7 +120,7 @@ type providerExtraClaims struct {
|
||||
|
||||
var mockNewProvider func(issuerURL string) (*oidcProvider, error)
|
||||
|
||||
func newProvider(ctx context.Context, issuerURL string) (*oidcProvider, error) {
|
||||
func newProvider(issuerURL string) (*oidcProvider, error) {
|
||||
if mockNewProvider != nil {
|
||||
return mockNewProvider(issuerURL)
|
||||
}
|
||||
|
||||
@ -32,7 +32,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/schema"
|
||||
)
|
||||
|
||||
func testGitLabWebhook(db *sql.DB, userID int32) func(*testing.T) {
|
||||
func testGitLabWebhook(db *sql.DB) func(*testing.T) {
|
||||
return func(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
|
||||
@ -22,5 +22,5 @@ func TestWebhooksIntegration(t *testing.T) {
|
||||
|
||||
t.Run("GitHubWebhook", testGitHubWebhook(sqlDB, user.ID))
|
||||
t.Run("BitbucketWebhook", testBitbucketWebhook(sqlDB, user.ID))
|
||||
t.Run("GitLabWebhook", testGitLabWebhook(sqlDB, user.ID))
|
||||
t.Run("GitLabWebhook", testGitLabWebhook(sqlDB))
|
||||
}
|
||||
|
||||
@ -28,7 +28,7 @@ func Init(ctx context.Context, db database.DB, config *Config, enterpriseService
|
||||
},
|
||||
}
|
||||
|
||||
resolver, err := newResolver(ctx, db, config, resolverObservationContext, services)
|
||||
resolver, err := newResolver(db, config, resolverObservationContext, services)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -38,7 +38,7 @@ func Init(ctx context.Context, db database.DB, config *Config, enterpriseService
|
||||
return nil
|
||||
}
|
||||
|
||||
func newResolver(ctx context.Context, db database.DB, config *Config, observationContext *observation.Context, services *Services) (gql.CodeIntelResolver, error) {
|
||||
func newResolver(db database.DB, config *Config, observationContext *observation.Context, services *Services) (gql.CodeIntelResolver, error) {
|
||||
policyMatcher := policies.NewMatcher(
|
||||
services.gitserverClient,
|
||||
policies.NoopExtractor,
|
||||
|
||||
@ -191,10 +191,7 @@ func (r *CachedLocationResolver) cachedPath(ctx context.Context, id api.RepoID,
|
||||
}
|
||||
|
||||
// Resolve new value and store in cache
|
||||
resolver, err := r.resolvePath(ctx, parentResolver.resolver, path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
resolver := r.resolvePath(parentResolver.resolver, path)
|
||||
parentResolver.children[path] = resolver
|
||||
return resolver, nil
|
||||
}
|
||||
@ -237,8 +234,8 @@ func (r *CachedLocationResolver) resolveCommit(ctx context.Context, repositoryRe
|
||||
|
||||
// Path resolves the git tree entry with the given commit resolver and relative path. This method must be
|
||||
// called only when constructing a resolver to populate the cache.
|
||||
func (r *CachedLocationResolver) resolvePath(ctx context.Context, commitResolver *gql.GitCommitResolver, path string) (*gql.GitTreeEntryResolver, error) {
|
||||
return gql.NewGitTreeEntryResolver(r.db, commitResolver, gql.CreateFileInfo(path, true)), nil
|
||||
func (r *CachedLocationResolver) resolvePath(commitResolver *gql.GitCommitResolver, path string) *gql.GitTreeEntryResolver {
|
||||
return gql.NewGitTreeEntryResolver(r.db, commitResolver, gql.CreateFileInfo(path, true))
|
||||
}
|
||||
|
||||
// resolveLocations creates a slide of LocationResolvers for the given list of adjusted locations. The
|
||||
|
||||
@ -240,7 +240,7 @@ func TestResolveLocations(t *testing.T) {
|
||||
backend.Mocks.Repos.GetCommit = nil
|
||||
})
|
||||
|
||||
git.Mocks.ResolveRevision = func(spec string, opt git.ResolveRevisionOptions) (api.CommitID, error) {
|
||||
git.Mocks.ResolveRevision = func(spec string, _ git.ResolveRevisionOptions) (api.CommitID, error) {
|
||||
if spec == "deadbeef3" {
|
||||
return "", &gitdomain.RevisionNotFoundError{}
|
||||
}
|
||||
|
||||
@ -22,7 +22,7 @@ func TestPrefetcherUploads(t *testing.T) {
|
||||
5: {ID: 5},
|
||||
}
|
||||
|
||||
mockResolver.GetUploadsByIDsFunc.SetDefaultHook(func(ctx context.Context, ids ...int) ([]dbstore.Upload, error) {
|
||||
mockResolver.GetUploadsByIDsFunc.SetDefaultHook(func(_ context.Context, ids ...int) ([]dbstore.Upload, error) {
|
||||
matching := make([]dbstore.Upload, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
matching = append(matching, uploads[id])
|
||||
@ -93,7 +93,7 @@ func TestPrefetcherIndexes(t *testing.T) {
|
||||
5: {ID: 5},
|
||||
}
|
||||
|
||||
mockResolver.GetIndexesByIDsFunc.SetDefaultHook(func(ctx context.Context, ids ...int) ([]dbstore.Index, error) {
|
||||
mockResolver.GetIndexesByIDsFunc.SetDefaultHook(func(_ context.Context, ids ...int) ([]dbstore.Index, error) {
|
||||
matching := make([]dbstore.Index, 0, len(ids))
|
||||
for _, id := range ids {
|
||||
matching = append(matching, indexes[id])
|
||||
|
||||
@ -114,7 +114,7 @@ func (r *Resolver) LSIFUploadsByRepo(ctx context.Context, args *gql.LSIFReposito
|
||||
}})
|
||||
endObservation.OnCancel(ctx, 1, observation.Args{})
|
||||
|
||||
opts, err := makeGetUploadsOptions(ctx, args)
|
||||
opts, err := makeGetUploadsOptions(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -203,7 +203,7 @@ func (r *Resolver) LSIFIndexesByRepo(ctx context.Context, args *gql.LSIFReposito
|
||||
}})
|
||||
endObservation.OnCancel(ctx, 1, observation.Args{})
|
||||
|
||||
opts, err := makeGetIndexesOptions(ctx, args)
|
||||
opts, err := makeGetIndexesOptions(args)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -616,8 +616,8 @@ func (r *Resolver) PreviewGitObjectFilter(ctx context.Context, id graphql.ID, ar
|
||||
|
||||
// makeGetUploadsOptions translates the given GraphQL arguments into options defined by the
|
||||
// store.GetUploads operations.
|
||||
func makeGetUploadsOptions(ctx context.Context, args *gql.LSIFRepositoryUploadsQueryArgs) (store.GetUploadsOptions, error) {
|
||||
repositoryID, err := resolveRepositoryID(ctx, args.RepositoryID)
|
||||
func makeGetUploadsOptions(args *gql.LSIFRepositoryUploadsQueryArgs) (store.GetUploadsOptions, error) {
|
||||
repositoryID, err := resolveRepositoryID(args.RepositoryID)
|
||||
if err != nil {
|
||||
return store.GetUploadsOptions{}, err
|
||||
}
|
||||
@ -658,8 +658,8 @@ func makeGetUploadsOptions(ctx context.Context, args *gql.LSIFRepositoryUploadsQ
|
||||
|
||||
// makeGetIndexesOptions translates the given GraphQL arguments into options defined by the
|
||||
// store.GetIndexes operations.
|
||||
func makeGetIndexesOptions(ctx context.Context, args *gql.LSIFRepositoryIndexesQueryArgs) (store.GetIndexesOptions, error) {
|
||||
repositoryID, err := resolveRepositoryID(ctx, args.RepositoryID)
|
||||
func makeGetIndexesOptions(args *gql.LSIFRepositoryIndexesQueryArgs) (store.GetIndexesOptions, error) {
|
||||
repositoryID, err := resolveRepositoryID(args.RepositoryID)
|
||||
if err != nil {
|
||||
return store.GetIndexesOptions{}, err
|
||||
}
|
||||
@ -679,7 +679,7 @@ func makeGetIndexesOptions(ctx context.Context, args *gql.LSIFRepositoryIndexesQ
|
||||
}
|
||||
|
||||
// resolveRepositoryByID gets a repository's internal identifier from a GraphQL identifier.
|
||||
func resolveRepositoryID(ctx context.Context, id graphql.ID) (int, error) {
|
||||
func resolveRepositoryID(id graphql.ID) (int, error) {
|
||||
if id == "" {
|
||||
return 0, nil
|
||||
}
|
||||
|
||||
@ -89,7 +89,7 @@ func TestDeleteLSIFIndexUnauthenticated(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMakeGetUploadsOptions(t *testing.T) {
|
||||
opts, err := makeGetUploadsOptions(context.Background(), &gql.LSIFRepositoryUploadsQueryArgs{
|
||||
opts, err := makeGetUploadsOptions(&gql.LSIFRepositoryUploadsQueryArgs{
|
||||
LSIFUploadsQueryArgs: &gql.LSIFUploadsQueryArgs{
|
||||
ConnectionArgs: graphqlutil.ConnectionArgs{
|
||||
First: intPtr(5),
|
||||
@ -120,7 +120,7 @@ func TestMakeGetUploadsOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMakeGetUploadsOptionsDefaults(t *testing.T) {
|
||||
opts, err := makeGetUploadsOptions(context.Background(), &gql.LSIFRepositoryUploadsQueryArgs{
|
||||
opts, err := makeGetUploadsOptions(&gql.LSIFRepositoryUploadsQueryArgs{
|
||||
LSIFUploadsQueryArgs: &gql.LSIFUploadsQueryArgs{},
|
||||
})
|
||||
if err != nil {
|
||||
@ -142,7 +142,7 @@ func TestMakeGetUploadsOptionsDefaults(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMakeGetIndexesOptions(t *testing.T) {
|
||||
opts, err := makeGetIndexesOptions(context.Background(), &gql.LSIFRepositoryIndexesQueryArgs{
|
||||
opts, err := makeGetIndexesOptions(&gql.LSIFRepositoryIndexesQueryArgs{
|
||||
LSIFIndexesQueryArgs: &gql.LSIFIndexesQueryArgs{
|
||||
ConnectionArgs: graphqlutil.ConnectionArgs{
|
||||
First: intPtr(5),
|
||||
@ -170,7 +170,7 @@ func TestMakeGetIndexesOptions(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestMakeGetIndexesOptionsDefaults(t *testing.T) {
|
||||
opts, err := makeGetIndexesOptions(context.Background(), &gql.LSIFRepositoryIndexesQueryArgs{
|
||||
opts, err := makeGetIndexesOptions(&gql.LSIFRepositoryIndexesQueryArgs{
|
||||
LSIFIndexesQueryArgs: &gql.LSIFIndexesQueryArgs{},
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@ -211,7 +211,7 @@ func (r *Resolver) UpdateCodeMonitor(ctx context.Context, args *graphqlbackend.U
|
||||
return nil, err
|
||||
}
|
||||
|
||||
toCreate, toDelete, err := splitActionIDs(ctx, args, actionIDs)
|
||||
toCreate, toDelete, err := splitActionIDs(args, actionIDs)
|
||||
if len(toDelete) == len(actionIDs) && len(toCreate) == 0 {
|
||||
return nil, errors.Errorf("you tried to delete all actions, but every monitor must be connected to at least 1 action")
|
||||
}
|
||||
@ -436,7 +436,7 @@ func (r *Resolver) actionIDsForMonitorIDInt64(ctx context.Context, monitorID int
|
||||
|
||||
// splitActionIDs splits actions into three buckets: create, delete and update.
|
||||
// Note: args is mutated. After splitActionIDs, args only contains actions to be updated.
|
||||
func splitActionIDs(ctx context.Context, args *graphqlbackend.UpdateCodeMonitorArgs, actionIDs []graphql.ID) (toCreate []*graphqlbackend.CreateActionArgs, toDelete []graphql.ID, err error) {
|
||||
func splitActionIDs(args *graphqlbackend.UpdateCodeMonitorArgs, actionIDs []graphql.ID) (toCreate []*graphqlbackend.CreateActionArgs, toDelete []graphql.ID, err error) {
|
||||
aMap := make(map[graphql.ID]struct{}, len(actionIDs))
|
||||
for _, id := range actionIDs {
|
||||
aMap[id] = struct{}{}
|
||||
|
||||
@ -182,7 +182,7 @@ func TestSingleNotebookCRUD(t *testing.T) {
|
||||
}
|
||||
|
||||
testGetNotebook(t, db, schema, user1)
|
||||
testCreateNotebook(t, db, schema, user1, user2, org)
|
||||
testCreateNotebook(t, schema, user1, user2, org)
|
||||
testUpdateNotebook(t, db, schema, user1, user2, org)
|
||||
testDeleteNotebook(t, db, schema, user1, user2, org)
|
||||
}
|
||||
@ -205,7 +205,7 @@ func testGetNotebook(t *testing.T, db database.DB, schema *graphql.Schema, user
|
||||
compareNotebookAPIResponses(t, wantNotebookResponse, response.Node, false)
|
||||
}
|
||||
|
||||
func testCreateNotebook(t *testing.T, db database.DB, schema *graphql.Schema, user1 *types.User, user2 *types.User, org *types.Org) {
|
||||
func testCreateNotebook(t *testing.T, schema *graphql.Schema, user1 *types.User, user2 *types.User, org *types.Org) {
|
||||
tests := []struct {
|
||||
name string
|
||||
namespaceUserID int32
|
||||
@ -506,19 +506,16 @@ func createNotebooks(t *testing.T, db *sql.DB, notebooksToCreate []*notebooks.No
|
||||
return createdNotebooks
|
||||
}
|
||||
|
||||
func createNotebookStars(t *testing.T, db *sql.DB, notebookID int64, userIDs ...int32) []*notebooks.NotebookStar {
|
||||
func createNotebookStars(t *testing.T, db *sql.DB, notebookID int64, userIDs ...int32) {
|
||||
t.Helper()
|
||||
n := notebooks.Notebooks(db)
|
||||
internalCtx := actor.WithInternalActor(context.Background())
|
||||
createdStars := make([]*notebooks.NotebookStar, 0, len(userIDs))
|
||||
for _, userID := range userIDs {
|
||||
createdStar, err := n.CreateNotebookStar(internalCtx, notebookID, userID)
|
||||
_, err := n.CreateNotebookStar(internalCtx, notebookID, userID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
createdStars = append(createdStars, createdStar)
|
||||
}
|
||||
return createdStars
|
||||
}
|
||||
|
||||
func TestListNotebooks(t *testing.T) {
|
||||
|
||||
@ -34,9 +34,7 @@ func listLocalRegistryExtensions(ctx context.Context, db database.DB, args graph
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := prefixLocalExtensionID(vs...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefixLocalExtensionID(vs...)
|
||||
|
||||
releasesByExtensionID, err := getLatestForBatch(ctx, db, vs)
|
||||
if err != nil {
|
||||
|
||||
@ -17,9 +17,7 @@ func init() {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := prefixLocalExtensionID(x); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefixLocalExtensionID(x)
|
||||
return &extensionDBResolver{db: db, v: x}, nil
|
||||
}
|
||||
|
||||
@ -38,14 +36,13 @@ func init() {
|
||||
|
||||
// prefixLocalExtensionID adds the local registry's extension ID prefix (from
|
||||
// GetLocalRegistryExtensionIDPrefix) to all extensions' extension IDs in the list.
|
||||
func prefixLocalExtensionID(xs ...*stores.Extension) error {
|
||||
func prefixLocalExtensionID(xs ...*stores.Extension) {
|
||||
prefix := registry.GetLocalRegistryExtensionIDPrefix()
|
||||
if prefix == nil {
|
||||
return nil
|
||||
return
|
||||
}
|
||||
for _, x := range xs {
|
||||
x.NonCanonicalExtensionID = *prefix + "/" + x.NonCanonicalExtensionID
|
||||
x.NonCanonicalRegistry = *prefix
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -30,9 +30,7 @@ func registryExtensionByIDInt32(ctx context.Context, db database.DB, id int32) (
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if err := prefixLocalExtensionID(x); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
prefixLocalExtensionID(x)
|
||||
return &extensionDBResolver{db: db, v: x}, nil
|
||||
}
|
||||
|
||||
|
||||
@ -132,9 +132,9 @@ func (q *requestQueue) enqueue(meta *requestMeta) (updated bool) {
|
||||
|
||||
// remove removes the sync request from the queue if the request.acquired matches the
|
||||
// acquired argument.
|
||||
func (q *requestQueue) remove(typ requestType, id int32, acquired bool) (removed bool) {
|
||||
func (q *requestQueue) remove(typ requestType, id int32, acquired bool) {
|
||||
if id == 0 {
|
||||
return false
|
||||
return
|
||||
}
|
||||
|
||||
q.mu.Lock()
|
||||
@ -147,10 +147,7 @@ func (q *requestQueue) remove(typ requestType, id int32, acquired bool) (removed
|
||||
request := q.index[key]
|
||||
if request != nil && request.acquired == acquired {
|
||||
heap.Remove(q, request.index)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// acquireNext acquires the next sync request. The acquired request must be removed from
|
||||
|
||||
@ -12,7 +12,6 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/policies"
|
||||
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/stores/dbstore"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/timeutil"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -20,9 +19,8 @@ func init() {
|
||||
}
|
||||
|
||||
func TestIndexScheduler(t *testing.T) {
|
||||
now := timeutil.Now()
|
||||
dbStore := testIndexSchedulerMockDBStore()
|
||||
policyMatcher := testIndexSchedulerMockPolicyMatcher(now)
|
||||
policyMatcher := testIndexSchedulerMockPolicyMatcher()
|
||||
indexEnqueuer := NewMockIndexEnqueuer()
|
||||
|
||||
scheduler := &IndexScheduler{
|
||||
@ -128,7 +126,7 @@ func testIndexSchedulerMockDBStore() *MockDBStore {
|
||||
return dbStore
|
||||
}
|
||||
|
||||
func testIndexSchedulerMockPolicyMatcher(now time.Time) *MockPolicyMatcher {
|
||||
func testIndexSchedulerMockPolicyMatcher() *MockPolicyMatcher {
|
||||
policyMatches := map[int]map[string][]policies.PolicyMatch{
|
||||
50: {
|
||||
"deadbeef01": {},
|
||||
|
||||
@ -17,7 +17,7 @@ import (
|
||||
func TestUploadExpirer(t *testing.T) {
|
||||
now := timeutil.Now()
|
||||
dbStore := testUploadExpirerMockDBStore(now)
|
||||
policyMatcher := testUploadExpirerMockPolicyMatcher(now)
|
||||
policyMatcher := testUploadExpirerMockPolicyMatcher()
|
||||
|
||||
uploadExpirer := &uploadExpirer{
|
||||
dbStore: dbStore,
|
||||
@ -202,7 +202,7 @@ func testUploadExpirerMockDBStore(now time.Time) *MockDBStore {
|
||||
return dbStore
|
||||
}
|
||||
|
||||
func testUploadExpirerMockPolicyMatcher(now time.Time) *MockPolicyMatcher {
|
||||
func testUploadExpirerMockPolicyMatcher() *MockPolicyMatcher {
|
||||
policyMatches := map[int]map[string][]policies.PolicyMatch{
|
||||
50: {
|
||||
"deadbeef01": {{PolicyDuration: days(1)}}, // 1 = 1
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
//nolint:unparam // unparam complains that `u` always has same value across call-sites, but that's OK
|
||||
func mustURL(t *testing.T, u string) *url.URL {
|
||||
parsed, err := url.Parse(u)
|
||||
if err != nil {
|
||||
@ -98,7 +99,8 @@ func TestProvider_FetchUserPerms(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
mockListAffiliatedRepositories = func(ctx context.Context, visibility github.Visibility, page int, affiliations ...github.RepositoryAffiliation) ([]*github.Repository, bool, int, error) {
|
||||
//nolint:unparam // Returning constant value for 'int' result is OK
|
||||
mockListAffiliatedRepositories = func(_ context.Context, _ github.Visibility, page int, _ ...github.RepositoryAffiliation) ([]*github.Repository, bool, int, error) {
|
||||
switch page {
|
||||
case 1:
|
||||
return []*github.Repository{
|
||||
@ -114,10 +116,11 @@ func TestProvider_FetchUserPerms(t *testing.T) {
|
||||
return []*github.Repository{}, false, 1, nil
|
||||
}
|
||||
|
||||
mockOrgNoRead = &github.OrgDetails{Org: github.Org{Login: "not-sourcegraph"}, DefaultRepositoryPermission: "none"}
|
||||
mockOrgNoRead2 = &github.OrgDetails{Org: github.Org{Login: "not-sourcegraph-2"}, DefaultRepositoryPermission: "none"}
|
||||
mockOrgRead = &github.OrgDetails{Org: github.Org{Login: "sourcegraph"}, DefaultRepositoryPermission: "read"}
|
||||
mockListOrgDetails = func(ctx context.Context, page int) (orgs []github.OrgDetailsAndMembership, hasNextPage bool, rateLimitCost int, err error) {
|
||||
mockOrgNoRead = &github.OrgDetails{Org: github.Org{Login: "not-sourcegraph"}, DefaultRepositoryPermission: "none"}
|
||||
mockOrgNoRead2 = &github.OrgDetails{Org: github.Org{Login: "not-sourcegraph-2"}, DefaultRepositoryPermission: "none"}
|
||||
mockOrgRead = &github.OrgDetails{Org: github.Org{Login: "sourcegraph"}, DefaultRepositoryPermission: "read"}
|
||||
//nolint:unparam // Returning constant value for 'int' result is OK
|
||||
mockListOrgDetails = func(_ context.Context, page int) (orgs []github.OrgDetailsAndMembership, hasNextPage bool, rateLimitCost int, err error) {
|
||||
switch page {
|
||||
case 1:
|
||||
return []github.OrgDetailsAndMembership{{
|
||||
@ -138,7 +141,8 @@ func TestProvider_FetchUserPerms(t *testing.T) {
|
||||
return nil, false, 1, nil
|
||||
}
|
||||
|
||||
mockListOrgRepositories = func(ctx context.Context, org string, page int, repoType string) (repos []*github.Repository, hasNextPage bool, rateLimitCost int, err error) {
|
||||
//nolint:unparam // Returning constant value for 'int' result is OK
|
||||
mockListOrgRepositories = func(_ context.Context, org string, page int, _ string) (repos []*github.Repository, hasNextPage bool, rateLimitCost int, err error) {
|
||||
switch org {
|
||||
case mockOrgRead.Login:
|
||||
switch page {
|
||||
@ -212,7 +216,7 @@ func TestProvider_FetchUserPerms(t *testing.T) {
|
||||
}
|
||||
// should call with token
|
||||
calledWithToken := false
|
||||
mockClient.MockWithToken = func(token string) client {
|
||||
mockClient.MockWithToken = func(_ string) client {
|
||||
calledWithToken = true
|
||||
return mockClient
|
||||
}
|
||||
@ -283,7 +287,7 @@ func TestProvider_FetchUserPerms(t *testing.T) {
|
||||
mockClient := &mockClient{
|
||||
MockListAffiliatedRepositories: mockListAffiliatedRepositories,
|
||||
MockGetAuthenticatedUserOrgsDetailsAndMembership: mockListOrgDetails,
|
||||
MockGetAuthenticatedUserTeams: func(ctx context.Context, page int) (teams []*github.Team, hasNextPage bool, rateLimitCost int, err error) {
|
||||
MockGetAuthenticatedUserTeams: func(_ context.Context, page int) (teams []*github.Team, hasNextPage bool, rateLimitCost int, err error) {
|
||||
switch page {
|
||||
case 1:
|
||||
return []*github.Team{
|
||||
@ -301,7 +305,7 @@ func TestProvider_FetchUserPerms(t *testing.T) {
|
||||
return nil, false, 1, nil
|
||||
},
|
||||
MockListOrgRepositories: mockListOrgRepositories,
|
||||
MockListTeamRepositories: func(ctx context.Context, org, team string, page int) (repos []*github.Repository, hasNextPage bool, rateLimitCost int, err error) {
|
||||
MockListTeamRepositories: func(_ context.Context, org, team string, page int) (repos []*github.Repository, hasNextPage bool, rateLimitCost int, err error) {
|
||||
switch org {
|
||||
case "not-sourcegraph":
|
||||
switch team {
|
||||
@ -366,7 +370,7 @@ func TestProvider_FetchUserPerms(t *testing.T) {
|
||||
callsToListOrgRepos++
|
||||
return mockListOrgRepositories(ctx, org, page, repoType)
|
||||
},
|
||||
MockListTeamRepositories: func(ctx context.Context, org, team string, page int) (repos []*github.Repository, hasNextPage bool, rateLimitCost int, err error) {
|
||||
MockListTeamRepositories: func(_ context.Context, _, _ string, _ int) (repos []*github.Repository, hasNextPage bool, rateLimitCost int, err error) {
|
||||
callsToListTeamRepos++
|
||||
return []*github.Repository{
|
||||
{ID: "MDEwOlJlcG9zaXRvcnkyNDI2nsteam1="},
|
||||
@ -565,7 +569,8 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
mockListCollaborators = func(ctx context.Context, owner, repo string, page int, affiliation github.CollaboratorAffiliation) ([]*github.Collaborator, bool, error) {
|
||||
//nolint:unparam // Allow returning nil error on all code paths
|
||||
mockListCollaborators = func(_ context.Context, _, _ string, page int, _ github.CollaboratorAffiliation) ([]*github.Collaborator, bool, error) {
|
||||
switch page {
|
||||
case 1:
|
||||
return []*github.Collaborator{
|
||||
@ -625,7 +630,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
}
|
||||
return mockListCollaborators(ctx, owner, repo, page, affiliation)
|
||||
},
|
||||
MockGetOrganization: func(ctx context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
MockGetOrganization: func(_ context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
if login == "user" {
|
||||
return nil, &github.OrgNotFoundError{}
|
||||
}
|
||||
@ -667,7 +672,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
}
|
||||
return mockListCollaborators(ctx, owner, repo, page, affiliation)
|
||||
},
|
||||
MockGetOrganization: func(ctx context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
MockGetOrganization: func(_ context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
if login == "org" {
|
||||
return &github.OrgDetails{
|
||||
DefaultRepositoryPermission: "read",
|
||||
@ -676,7 +681,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
t.Fatalf("unexpected call to GetOrganization with %q", login)
|
||||
return nil, nil
|
||||
},
|
||||
MockListOrganizationMembers: func(ctx context.Context, owner string, page int, adminOnly bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
MockListOrganizationMembers: func(_ context.Context, _ string, page int, adminOnly bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
if adminOnly {
|
||||
t.Fatal("unexpected adminOnly ListOrganizationMembers")
|
||||
}
|
||||
@ -731,7 +736,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
|
||||
p.client = mockClientFunc(&mockClient{
|
||||
MockListRepositoryCollaborators: mockListCollaborators,
|
||||
MockListOrganizationMembers: func(ctx context.Context, owner string, page int, adminOnly bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
MockListOrganizationMembers: func(_ context.Context, _ string, page int, adminOnly bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
if adminOnly {
|
||||
return []*github.Collaborator{
|
||||
{DatabaseID: 9999},
|
||||
@ -759,7 +764,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
MockGetRepository: func(ctx context.Context, owner, repo string) (*github.Repository, error) {
|
||||
return &mockInternalOrgRepo, nil
|
||||
},
|
||||
MockGetOrganization: func(ctx context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
MockGetOrganization: func(_ context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
if login == "org" {
|
||||
return &github.OrgDetails{
|
||||
DefaultRepositoryPermission: "none",
|
||||
@ -843,7 +848,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
}
|
||||
return mockListCollaborators(ctx, owner, repo, page, affiliation)
|
||||
},
|
||||
MockGetOrganization: func(ctx context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
MockGetOrganization: func(_ context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
if login == "org" {
|
||||
return &github.OrgDetails{
|
||||
DefaultRepositoryPermission: "none",
|
||||
@ -852,7 +857,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
t.Fatalf("unexpected call to GetOrganization with %q", login)
|
||||
return nil, nil
|
||||
},
|
||||
MockListOrganizationMembers: func(ctx context.Context, org string, page int, adminOnly bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
MockListOrganizationMembers: func(_ context.Context, org string, _ int, adminOnly bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
if org != "org" {
|
||||
t.Fatalf("unexpected call to list org members with %q", org)
|
||||
}
|
||||
@ -863,7 +868,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
{DatabaseID: 3456},
|
||||
}, false, nil
|
||||
},
|
||||
MockListRepositoryTeams: func(ctx context.Context, owner, repo string, page int) (teams []*github.Team, hasNextPage bool, _ error) {
|
||||
MockListRepositoryTeams: func(_ context.Context, _, _ string, page int) (teams []*github.Team, hasNextPage bool, _ error) {
|
||||
switch page {
|
||||
case 1:
|
||||
return []*github.Team{
|
||||
@ -877,7 +882,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
|
||||
return []*github.Team{}, false, nil
|
||||
},
|
||||
MockListTeamMembers: func(ctx context.Context, owner, team string, page int) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
MockListTeamMembers: func(_ context.Context, _, team string, page int) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
switch page {
|
||||
case 1:
|
||||
return []*github.Collaborator{
|
||||
@ -937,7 +942,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
}
|
||||
return mockListCollaborators(ctx, owner, repo, page, affiliation)
|
||||
},
|
||||
MockGetOrganization: func(ctx context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
MockGetOrganization: func(_ context.Context, login string) (org *github.OrgDetails, err error) {
|
||||
if login == "org" {
|
||||
return &github.OrgDetails{
|
||||
DefaultRepositoryPermission: "read",
|
||||
@ -946,7 +951,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
t.Fatalf("unexpected call to GetOrganization with %q", login)
|
||||
return nil, nil
|
||||
},
|
||||
MockListOrganizationMembers: func(ctx context.Context, owner string, page int, adminOnly bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
MockListOrganizationMembers: func(_ context.Context, _ string, page int, _ bool) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
callsToListOrgMembers++
|
||||
|
||||
switch page {
|
||||
@ -1046,7 +1051,7 @@ func TestProvider_FetchRepoPerms(t *testing.T) {
|
||||
{Slug: "team2"},
|
||||
}, false, nil
|
||||
},
|
||||
MockListTeamMembers: func(ctx context.Context, owner, team string, page int) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
MockListTeamMembers: func(_ context.Context, _, team string, _ int) (users []*github.Collaborator, hasNextPage bool, _ error) {
|
||||
switch team {
|
||||
case "team1":
|
||||
return []*github.Collaborator{
|
||||
|
||||
@ -8,8 +8,6 @@ import (
|
||||
"testing"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"golang.org/x/oauth2"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/frontend/auth/providers"
|
||||
"github.com/sourcegraph/sourcegraph/internal/extsvc"
|
||||
"github.com/sourcegraph/sourcegraph/internal/extsvc/auth"
|
||||
@ -368,14 +366,9 @@ func (m mockAuthnProvider) Refresh(ctx context.Context) error {
|
||||
panic("should not be called")
|
||||
}
|
||||
|
||||
func acct(t *testing.T, userID int32, serviceType, serviceID, accountID, oauthTok string) *extsvc.Account {
|
||||
func acct(t *testing.T, userID int32, serviceType, serviceID, accountID string) *extsvc.Account {
|
||||
var data extsvc.AccountData
|
||||
|
||||
var authData *oauth2.Token
|
||||
if oauthTok != "" {
|
||||
authData = &oauth2.Token{AccessToken: oauthTok}
|
||||
}
|
||||
|
||||
if serviceType == extsvc.TypeGitLab {
|
||||
gitlabAcctID, err := strconv.Atoi(accountID)
|
||||
if err != nil {
|
||||
@ -384,7 +377,7 @@ func acct(t *testing.T, userID int32, serviceType, serviceID, accountID, oauthTo
|
||||
|
||||
gitlab.SetExternalAccountData(&data, &gitlab.User{
|
||||
ID: int32(gitlabAcctID),
|
||||
}, authData)
|
||||
}, nil)
|
||||
}
|
||||
|
||||
return &extsvc.Account{
|
||||
|
||||
@ -92,16 +92,16 @@ func Test_GitLab_FetchAccount(t *testing.T) {
|
||||
{
|
||||
description: "1 account, matches",
|
||||
user: &types.User{ID: 123},
|
||||
current: []*extsvc.Account{acct(t, 1, "saml", "https://okta.mine/", "bl", "")},
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101", ""),
|
||||
current: []*extsvc.Account{acct(t, 1, "saml", "https://okta.mine/", "bl")},
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101"),
|
||||
},
|
||||
{
|
||||
description: "many accounts, none match",
|
||||
user: &types.User{ID: 123},
|
||||
current: []*extsvc.Account{
|
||||
acct(t, 1, "saml", "https://okta.mine/", "nomatch", ""),
|
||||
acct(t, 1, "saml", "nomatch", "bl", ""),
|
||||
acct(t, 1, "nomatch", "https://okta.mine/", "bl", ""),
|
||||
acct(t, 1, "saml", "https://okta.mine/", "nomatch"),
|
||||
acct(t, 1, "saml", "nomatch", "bl"),
|
||||
acct(t, 1, "nomatch", "https://okta.mine/", "bl"),
|
||||
},
|
||||
expMine: nil,
|
||||
},
|
||||
@ -109,11 +109,11 @@ func Test_GitLab_FetchAccount(t *testing.T) {
|
||||
description: "many accounts, 1 match",
|
||||
user: &types.User{ID: 123},
|
||||
current: []*extsvc.Account{
|
||||
acct(t, 1, "saml", "nomatch", "bl", ""),
|
||||
acct(t, 1, "nomatch", "https://okta.mine/", "bl", ""),
|
||||
acct(t, 1, "saml", "https://okta.mine/", "bl", ""),
|
||||
acct(t, 1, "saml", "nomatch", "bl"),
|
||||
acct(t, 1, "nomatch", "https://okta.mine/", "bl"),
|
||||
acct(t, 1, "saml", "https://okta.mine/", "bl"),
|
||||
},
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101", ""),
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101"),
|
||||
},
|
||||
{
|
||||
description: "no user",
|
||||
@ -134,7 +134,7 @@ func Test_GitLab_FetchAccount(t *testing.T) {
|
||||
{
|
||||
description: "username match",
|
||||
user: &types.User{ID: 123, Username: "b.l"},
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101", ""),
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101"),
|
||||
},
|
||||
{
|
||||
description: "no username match",
|
||||
@ -182,13 +182,13 @@ func Test_GitLab_FetchAccount(t *testing.T) {
|
||||
{
|
||||
description: "1 authn provider matches",
|
||||
user: &types.User{ID: 123},
|
||||
current: []*extsvc.Account{acct(t, 1, "openidconnect", "https://onelogin.mine/", "bl", "")},
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101", ""),
|
||||
current: []*extsvc.Account{acct(t, 1, "openidconnect", "https://onelogin.mine/", "bl")},
|
||||
expMine: acct(t, 123, extsvc.TypeGitLab, "https://gitlab.mine/", "101"),
|
||||
},
|
||||
{
|
||||
description: "0 authn providers match",
|
||||
user: &types.User{ID: 123},
|
||||
current: []*extsvc.Account{acct(t, 1, "openidconnect", "https://onelogin.mine/", "nomatch", "")},
|
||||
current: []*extsvc.Account{acct(t, 1, "openidconnect", "https://onelogin.mine/", "nomatch")},
|
||||
expMine: nil,
|
||||
},
|
||||
},
|
||||
|
||||
@ -20,10 +20,8 @@ import (
|
||||
// to connection issues.
|
||||
func NewAuthzProviders(conns []*types.PerforceConnection) (ps []authz.Provider, problems []string, warnings []string) {
|
||||
for _, c := range conns {
|
||||
p, err := newAuthzProvider(c.URN, c.Authorization, c.P4Port, c.P4User, c.P4Passwd, c.Depots)
|
||||
if err != nil {
|
||||
problems = append(problems, err.Error())
|
||||
} else if p != nil {
|
||||
p := newAuthzProvider(c.URN, c.Authorization, c.P4Port, c.P4User, c.P4Passwd, c.Depots)
|
||||
if p != nil {
|
||||
ps = append(ps, p)
|
||||
}
|
||||
}
|
||||
@ -36,9 +34,10 @@ func newAuthzProvider(
|
||||
a *schema.PerforceAuthorization,
|
||||
host, user, password string,
|
||||
depots []string,
|
||||
) (authz.Provider, error) {
|
||||
) authz.Provider {
|
||||
// Call this function from ValidateAuthz if this function starts returning an error.
|
||||
if a == nil {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
var depotIDs []extsvc.RepoID
|
||||
@ -54,12 +53,12 @@ func newAuthzProvider(
|
||||
}
|
||||
}
|
||||
|
||||
return NewProvider(urn, host, user, password, depotIDs), nil
|
||||
return NewProvider(urn, host, user, password, depotIDs)
|
||||
}
|
||||
|
||||
// ValidateAuthz validates the authorization fields of the given Perforce
|
||||
// external service config.
|
||||
func ValidateAuthz(cfg *schema.PerforceConnection) error {
|
||||
_, err := newAuthzProvider("", cfg.Authorization, cfg.P4Port, cfg.P4User, cfg.P4Passwd, cfg.Depots)
|
||||
return err
|
||||
func ValidateAuthz(_ *schema.PerforceConnection) error {
|
||||
// newAuthzProvider always succeeds, so directly return nil here.
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -94,11 +94,11 @@ func (b *bulkProcessor) Process(ctx context.Context, job *btypes.ChangesetJob) (
|
||||
case btypes.ChangesetJobTypeDetach:
|
||||
return b.detach(ctx, job)
|
||||
case btypes.ChangesetJobTypeReenqueue:
|
||||
return b.reenqueueChangeset(ctx, job)
|
||||
return b.reenqueueChangeset(ctx)
|
||||
case btypes.ChangesetJobTypeMerge:
|
||||
return b.mergeChangeset(ctx, job)
|
||||
case btypes.ChangesetJobTypeClose:
|
||||
return b.closeChangeset(ctx, job)
|
||||
return b.closeChangeset(ctx)
|
||||
case btypes.ChangesetJobTypePublish:
|
||||
return b.publishChangeset(ctx, job)
|
||||
|
||||
@ -149,7 +149,7 @@ func (b *bulkProcessor) detach(ctx context.Context, job *btypes.ChangesetJob) er
|
||||
return b.tx.EnqueueChangeset(ctx, b.ch, global.DefaultReconcilerEnqueueState(), "")
|
||||
}
|
||||
|
||||
func (b *bulkProcessor) reenqueueChangeset(ctx context.Context, job *btypes.ChangesetJob) error {
|
||||
func (b *bulkProcessor) reenqueueChangeset(ctx context.Context) error {
|
||||
svc := service.New(b.tx)
|
||||
_, _, err := svc.ReenqueueChangeset(ctx, b.ch.ID)
|
||||
return err
|
||||
@ -189,7 +189,7 @@ func (b *bulkProcessor) mergeChangeset(ctx context.Context, job *btypes.Changese
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *bulkProcessor) closeChangeset(ctx context.Context, job *btypes.ChangesetJob) (err error) {
|
||||
func (b *bulkProcessor) closeChangeset(ctx context.Context) (err error) {
|
||||
cs := &sources.Changeset{
|
||||
Changeset: b.ch,
|
||||
TargetRepo: b.repo,
|
||||
|
||||
@ -20,11 +20,6 @@ type parser struct {
|
||||
allowErrors bool
|
||||
}
|
||||
|
||||
// context holds settings active within a given scope during parsing.
|
||||
type context struct {
|
||||
field string // name of the field currently in scope (or "")
|
||||
}
|
||||
|
||||
// Parse parses the input string and returns its parse tree. Returned errors are of
|
||||
// type *ParseError, which includes the error position and message.
|
||||
//
|
||||
@ -38,8 +33,7 @@ type context struct {
|
||||
func Parse(input string) (ParseTree, error) {
|
||||
tokens := Scan(input)
|
||||
p := parser{tokens: tokens}
|
||||
ctx := context{field: ""}
|
||||
exprs, err := p.parseExprList(ctx)
|
||||
exprs, err := p.parseExprList()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -51,8 +45,7 @@ func Parse(input string) (ParseTree, error) {
|
||||
func ParseAllowingErrors(input string) ParseTree {
|
||||
tokens := Scan(input)
|
||||
p := parser{tokens: tokens, allowErrors: true}
|
||||
ctx := context{field: ""}
|
||||
exprs, err := p.parseExprList(ctx)
|
||||
exprs, err := p.parseExprList()
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("(bug) error returned by parseExprList despite allowErrors=true (this should never happen): %v", err))
|
||||
}
|
||||
@ -85,7 +78,7 @@ func (p *parser) next() Token {
|
||||
}
|
||||
|
||||
// exprList := {exprSign} | exprSign (sep exprSign)*
|
||||
func (p *parser) parseExprList(ctx context) (exprList []*Expr, err error) {
|
||||
func (p *parser) parseExprList() (exprList []*Expr, err error) {
|
||||
if p.peek().Type == TokenEOF {
|
||||
return nil, nil
|
||||
}
|
||||
@ -100,7 +93,7 @@ func (p *parser) parseExprList(ctx context) (exprList []*Expr, err error) {
|
||||
continue
|
||||
}
|
||||
|
||||
expr, err := p.parseExprSign(ctx)
|
||||
expr, err := p.parseExprSign()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -111,7 +104,7 @@ func (p *parser) parseExprList(ctx context) (exprList []*Expr, err error) {
|
||||
}
|
||||
|
||||
// exprSign := {"-"} expr
|
||||
func (p *parser) parseExprSign(ctx context) (*Expr, error) {
|
||||
func (p *parser) parseExprSign() (*Expr, error) {
|
||||
tok := p.next()
|
||||
switch tok.Type {
|
||||
case TokenMinus:
|
||||
@ -121,7 +114,7 @@ func (p *parser) parseExprSign(ctx context) (*Expr, error) {
|
||||
p.backup()
|
||||
}
|
||||
|
||||
expr, err := p.parseExpr(ctx)
|
||||
expr, err := p.parseExpr()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -135,7 +128,7 @@ func (p *parser) parseExprSign(ctx context) (*Expr, error) {
|
||||
}
|
||||
|
||||
// expr := exprField | lit | quoted | pattern
|
||||
func (p *parser) parseExpr(ctx context) (*Expr, error) {
|
||||
func (p *parser) parseExpr() (*Expr, error) {
|
||||
tok := p.next()
|
||||
switch tok.Type {
|
||||
case TokenLiteral:
|
||||
|
||||
@ -2281,6 +2281,7 @@ func testBatchSpec(user int32) *btypes.BatchSpec {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `extState` always has same value across call-sites, but that's OK
|
||||
func testChangeset(repoID api.RepoID, batchChange int64, extState btypes.ChangesetExternalState) *btypes.Changeset {
|
||||
changeset := &btypes.Changeset{
|
||||
RepoID: repoID,
|
||||
|
||||
@ -33,10 +33,10 @@ func NewGitLabSource(svc *types.ExternalService, cf *httpcli.Factory) (*GitLabSo
|
||||
if err := jsonc.Unmarshal(svc.Config, &c); err != nil {
|
||||
return nil, errors.Errorf("external service id=%d config error: %s", svc.ID, err)
|
||||
}
|
||||
return newGitLabSource(&c, cf, nil)
|
||||
return newGitLabSource(&c, cf)
|
||||
}
|
||||
|
||||
func newGitLabSource(c *schema.GitLabConnection, cf *httpcli.Factory, au auth.Authenticator) (*GitLabSource, error) {
|
||||
func newGitLabSource(c *schema.GitLabConnection, cf *httpcli.Factory) (*GitLabSource, error) {
|
||||
baseURL, err := url.Parse(c.Url)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -59,15 +59,13 @@ func newGitLabSource(c *schema.GitLabConnection, cf *httpcli.Factory, au auth.Au
|
||||
|
||||
// Don't modify passed-in parameter.
|
||||
var authr auth.Authenticator
|
||||
if au == nil && c.Token != "" {
|
||||
if c.Token != "" {
|
||||
switch c.TokenType {
|
||||
case "oauth":
|
||||
authr = &auth.OAuthBearerToken{Token: c.Token}
|
||||
default:
|
||||
authr = &gitlab.SudoableToken{Token: c.Token}
|
||||
}
|
||||
} else {
|
||||
authr = au
|
||||
}
|
||||
|
||||
provider := gitlab.NewClientProvider(baseURL, cli)
|
||||
|
||||
@ -696,7 +696,7 @@ func TestGitLabSource_ChangesetSource(t *testing.T) {
|
||||
|
||||
p := newGitLabChangesetSourceTestProvider(t)
|
||||
p.changeset.Changeset.Metadata = mr
|
||||
p.mockCreateComment(commentBody, mr, inner)
|
||||
p.mockCreateComment(commentBody, inner)
|
||||
|
||||
have := p.source.CreateComment(p.ctx, p.changeset, commentBody)
|
||||
if !errors.Is(have, inner) {
|
||||
@ -709,7 +709,7 @@ func TestGitLabSource_ChangesetSource(t *testing.T) {
|
||||
|
||||
p := newGitLabChangesetSourceTestProvider(t)
|
||||
p.changeset.Changeset.Metadata = mr
|
||||
p.mockCreateComment(commentBody, mr, nil)
|
||||
p.mockCreateComment(commentBody, nil)
|
||||
|
||||
if err := p.source.CreateComment(p.ctx, p.changeset, commentBody); err != nil {
|
||||
t.Errorf("unexpected error: %+v", err)
|
||||
@ -943,6 +943,7 @@ func (p *gitLabChangesetSourceTestProvider) mockGetMergeRequest(expected gitlab.
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `pageSize` always has same value across call-sites, but that's OK
|
||||
func (p *gitLabChangesetSourceTestProvider) mockGetMergeRequestNotes(expectedIID gitlab.ID, notes []*gitlab.Note, pageSize int, err error) {
|
||||
gitlab.MockGetMergeRequestNotes = func(client *gitlab.Client, ctx context.Context, project *gitlab.Project, iid gitlab.ID) func() ([]*gitlab.Note, error) {
|
||||
p.testCommonParams(ctx, client, project)
|
||||
@ -957,6 +958,7 @@ func (p *gitLabChangesetSourceTestProvider) mockGetMergeRequestNotes(expectedIID
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `pageSize` always has same value across call-sites, but that's OK
|
||||
func (p *gitLabChangesetSourceTestProvider) mockGetMergeRequestResourceStateEvents(expectedIID gitlab.ID, events []*gitlab.ResourceStateEvent, pageSize int, err error) {
|
||||
gitlab.MockGetMergeRequestResourceStateEvents = func(client *gitlab.Client, ctx context.Context, project *gitlab.Project, iid gitlab.ID) func() ([]*gitlab.ResourceStateEvent, error) {
|
||||
p.testCommonParams(ctx, client, project)
|
||||
@ -971,6 +973,7 @@ func (p *gitLabChangesetSourceTestProvider) mockGetMergeRequestResourceStateEven
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `pageSize` always has same value across call-sites, but that's OK
|
||||
func (p *gitLabChangesetSourceTestProvider) mockGetMergeRequestPipelines(expectedIID gitlab.ID, pipelines []*gitlab.Pipeline, pageSize int, err error) {
|
||||
gitlab.MockGetMergeRequestPipelines = func(client *gitlab.Client, ctx context.Context, project *gitlab.Project, iid gitlab.ID) func() ([]*gitlab.Pipeline, error) {
|
||||
p.testCommonParams(ctx, client, project)
|
||||
@ -1006,7 +1009,7 @@ func (p *gitLabChangesetSourceTestProvider) mockUpdateMergeRequest(expectedMR, u
|
||||
}
|
||||
}
|
||||
|
||||
func (p *gitLabChangesetSourceTestProvider) mockCreateComment(expected string, mr *gitlab.MergeRequest, err error) {
|
||||
func (p *gitLabChangesetSourceTestProvider) mockCreateComment(expected string, err error) {
|
||||
gitlab.MockCreateMergeRequestNote = func(client *gitlab.Client, ctx context.Context, project *gitlab.Project, mr *gitlab.MergeRequest, body string) error {
|
||||
p.testCommonParams(ctx, client, project)
|
||||
if expected != body {
|
||||
@ -1099,7 +1102,7 @@ func paginatedPipelineIterator(pipelines []*gitlab.Pipeline, pageSize int) func(
|
||||
func TestGitLabSource_WithAuthenticator(t *testing.T) {
|
||||
t.Run("supported", func(t *testing.T) {
|
||||
var src ChangesetSource
|
||||
src, err := newGitLabSource(&schema.GitLabConnection{}, nil, nil)
|
||||
src, err := newGitLabSource(&schema.GitLabConnection{}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected non-nil error: %v", err)
|
||||
}
|
||||
@ -1123,7 +1126,7 @@ func TestGitLabSource_WithAuthenticator(t *testing.T) {
|
||||
} {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
var src ChangesetSource
|
||||
src, err := newGitLabSource(&schema.GitLabConnection{}, nil, nil)
|
||||
src, err := newGitLabSource(&schema.GitLabConnection{}, nil)
|
||||
if err != nil {
|
||||
t.Errorf("unexpected non-nil error: %v", err)
|
||||
}
|
||||
@ -1153,7 +1156,7 @@ func TestDecorateMergeRequestData(t *testing.T) {
|
||||
src, err := newGitLabSource(&schema.GitLabConnection{
|
||||
Url: "https://gitlab.com",
|
||||
Token: os.Getenv("GITLAB_TOKEN"),
|
||||
}, cf, nil)
|
||||
}, cf)
|
||||
|
||||
assert.Nil(t, err)
|
||||
return src
|
||||
|
||||
@ -36,11 +36,10 @@ func TestMain(m *testing.M) {
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func newClientFactory(t testing.TB, name string, mws ...httpcli.Middleware) (*httpcli.Factory, func(testing.TB)) {
|
||||
func newClientFactory(t testing.TB, name string) (*httpcli.Factory, func(testing.TB)) {
|
||||
cassete := filepath.Join("testdata", "sources", strings.ReplaceAll(name, " ", "-"))
|
||||
rec := newRecorder(t, cassete, update(name))
|
||||
mws = append(mws, httpcli.GitHubProxyRedirectMiddleware, gitserverRedirectMiddleware)
|
||||
mw := httpcli.NewMiddleware(mws...)
|
||||
mw := httpcli.NewMiddleware(httpcli.GitHubProxyRedirectMiddleware, gitserverRedirectMiddleware)
|
||||
return httpcli.NewFactory(mw, httptestutil.NewRecorderOpt(rec)),
|
||||
func(t testing.TB) { save(t, rec) }
|
||||
}
|
||||
|
||||
@ -108,7 +108,7 @@ func (s *sourcer) loadBatchesSource(ctx context.Context, tx SourcerStore, extern
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "loading external service")
|
||||
}
|
||||
css, err := buildChangesetSource(tx, s.cf, extSvc)
|
||||
css, err := buildChangesetSource(s.cf, extSvc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "building changeset source")
|
||||
}
|
||||
@ -272,7 +272,7 @@ func loadExternalService(ctx context.Context, s database.ExternalServiceStore, o
|
||||
|
||||
// buildChangesetSource get an authenticated ChangesetSource for the given repo
|
||||
// to load the changeset state from.
|
||||
func buildChangesetSource(store SourcerStore, cf *httpcli.Factory, externalService *types.ExternalService) (ChangesetSource, error) {
|
||||
func buildChangesetSource(cf *httpcli.Factory, externalService *types.ExternalService) (ChangesetSource, error) {
|
||||
switch externalService.Kind {
|
||||
case extsvc.KindGitHub:
|
||||
return NewGithubSource(externalService, cf)
|
||||
|
||||
@ -1475,6 +1475,7 @@ func bbsChangeset(id int64, t time.Time) *btypes.Changeset {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `id` always has same value across call-sites, but that's OK
|
||||
func glChangeset(id int64, t time.Time) *btypes.Changeset {
|
||||
return &btypes.Changeset{
|
||||
ID: id,
|
||||
@ -1549,6 +1550,7 @@ func ghReviewDismissed(id int64, t time.Time, login, reviewer string) *btypes.Ch
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `id` always has same value across call-sites, but that's OK
|
||||
func ghReadyForReview(id int64, t time.Time, login string) *btypes.ChangesetEvent {
|
||||
return &btypes.ChangesetEvent{
|
||||
ChangesetID: id,
|
||||
@ -1575,6 +1577,7 @@ func ghConvertToDraft(id int64, t time.Time, login string) *btypes.ChangesetEven
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `id` always has same value across call-sites, but that's OK
|
||||
func glUnmarkWorkInProgress(id int64, t time.Time, login string) *btypes.ChangesetEvent {
|
||||
return &btypes.ChangesetEvent{
|
||||
ChangesetID: id,
|
||||
|
||||
@ -168,7 +168,7 @@ func TestComputeBitbucketBuildStatus(t *testing.T) {
|
||||
|
||||
now := timeutil.Now()
|
||||
sha := "abcdef"
|
||||
statusEvent := func(minutesSinceSync int, key, state string) *btypes.ChangesetEvent {
|
||||
statusEvent := func(key, state string) *btypes.ChangesetEvent {
|
||||
commit := &bitbucketserver.CommitStatus{
|
||||
Commit: sha,
|
||||
Status: bitbucketserver.BuildStatus{
|
||||
@ -206,61 +206,61 @@ func TestComputeBitbucketBuildStatus(t *testing.T) {
|
||||
{
|
||||
name: "single success",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "SUCCESSFUL"),
|
||||
statusEvent("ctx1", "SUCCESSFUL"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStatePassed,
|
||||
},
|
||||
{
|
||||
name: "single pending",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "INPROGRESS"),
|
||||
statusEvent("ctx1", "INPROGRESS"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStatePending,
|
||||
},
|
||||
{
|
||||
name: "single error",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "FAILED"),
|
||||
statusEvent("ctx1", "FAILED"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStateFailed,
|
||||
},
|
||||
{
|
||||
name: "pending + error",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "INPROGRESS"),
|
||||
statusEvent(1, "ctx2", "FAILED"),
|
||||
statusEvent("ctx1", "INPROGRESS"),
|
||||
statusEvent("ctx2", "FAILED"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStatePending,
|
||||
},
|
||||
{
|
||||
name: "pending + success",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "INPROGRESS"),
|
||||
statusEvent(1, "ctx2", "SUCCESSFUL"),
|
||||
statusEvent("ctx1", "INPROGRESS"),
|
||||
statusEvent("ctx2", "SUCCESSFUL"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStatePending,
|
||||
},
|
||||
{
|
||||
name: "success + error",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "SUCCESSFUL"),
|
||||
statusEvent(1, "ctx2", "FAILED"),
|
||||
statusEvent("ctx1", "SUCCESSFUL"),
|
||||
statusEvent("ctx2", "FAILED"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStateFailed,
|
||||
},
|
||||
{
|
||||
name: "success x2",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "SUCCESSFUL"),
|
||||
statusEvent(1, "ctx2", "SUCCESSFUL"),
|
||||
statusEvent("ctx1", "SUCCESSFUL"),
|
||||
statusEvent("ctx2", "SUCCESSFUL"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStatePassed,
|
||||
},
|
||||
{
|
||||
name: "later events have precedence",
|
||||
events: []*btypes.ChangesetEvent{
|
||||
statusEvent(1, "ctx1", "INPROGRESS"),
|
||||
statusEvent(1, "ctx1", "SUCCESSFUL"),
|
||||
statusEvent("ctx1", "INPROGRESS"),
|
||||
statusEvent("ctx1", "SUCCESSFUL"),
|
||||
},
|
||||
want: btypes.ChangesetCheckStatePassed,
|
||||
},
|
||||
@ -833,6 +833,7 @@ func TestComputeLabels(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `state` always has same value across call-sites, but that's OK
|
||||
func bitbucketChangeset(updatedAt time.Time, state, reviewStatus string) *btypes.Changeset {
|
||||
return &btypes.Changeset{
|
||||
ExternalServiceType: extsvc.TypeBitbucketServer,
|
||||
|
||||
@ -668,6 +668,7 @@ func createWorkspaces(t *testing.T, ctx context.Context, s *Store) []*btypes.Bat
|
||||
}
|
||||
|
||||
func workspacesIDs(t *testing.T, workspaces []*btypes.BatchSpecWorkspace) []int64 {
|
||||
t.Helper()
|
||||
ids := make([]int64, len(workspaces))
|
||||
for i, w := range workspaces {
|
||||
ids[i] = w.ID
|
||||
|
||||
@ -983,7 +983,7 @@ func testStoreChangesetSpecsCurrentState(t *testing.T, ctx context.Context, s *S
|
||||
})
|
||||
}
|
||||
|
||||
func testStoreChangesetSpecsCurrentStateAndTextSearch(t *testing.T, ctx context.Context, s *Store, clock ct.Clock) {
|
||||
func testStoreChangesetSpecsCurrentStateAndTextSearch(t *testing.T, ctx context.Context, s *Store, _ ct.Clock) {
|
||||
repoStore := database.ReposWith(s)
|
||||
esStore := database.ExternalServicesWith(s)
|
||||
|
||||
@ -1007,16 +1007,16 @@ func testStoreChangesetSpecsCurrentStateAndTextSearch(t *testing.T, ctx context.
|
||||
|
||||
// Now we'll add three old and new pairs of changeset specs. Two will have
|
||||
// matching statuses, and a different two will have matching names.
|
||||
createChangesetSpecPair := func(t *testing.T, ctx context.Context, s *Store, oldBatchSpec, newBatchSpec *btypes.BatchSpec, opts ct.TestSpecOpts) (old, new *btypes.ChangesetSpec) {
|
||||
createChangesetSpecPair := func(t *testing.T, ctx context.Context, s *Store, oldBatchSpec, newBatchSpec *btypes.BatchSpec, opts ct.TestSpecOpts) (old *btypes.ChangesetSpec) {
|
||||
opts.BatchSpec = oldBatchSpec.ID
|
||||
old = ct.CreateChangesetSpec(t, ctx, s, opts)
|
||||
|
||||
opts.BatchSpec = newBatchSpec.ID
|
||||
new = ct.CreateChangesetSpec(t, ctx, s, opts)
|
||||
_ = ct.CreateChangesetSpec(t, ctx, s, opts)
|
||||
|
||||
return old, new
|
||||
return old
|
||||
}
|
||||
oldOpenFoo, _ := createChangesetSpecPair(t, ctx, s, oldBatchSpec, newBatchSpec, ct.TestSpecOpts{
|
||||
oldOpenFoo := createChangesetSpecPair(t, ctx, s, oldBatchSpec, newBatchSpec, ct.TestSpecOpts{
|
||||
User: user.ID,
|
||||
Repo: repo.ID,
|
||||
BatchSpec: oldBatchSpec.ID,
|
||||
@ -1024,7 +1024,7 @@ func testStoreChangesetSpecsCurrentStateAndTextSearch(t *testing.T, ctx context.
|
||||
Published: true,
|
||||
HeadRef: "open-foo",
|
||||
})
|
||||
oldOpenBar, _ := createChangesetSpecPair(t, ctx, s, oldBatchSpec, newBatchSpec, ct.TestSpecOpts{
|
||||
oldOpenBar := createChangesetSpecPair(t, ctx, s, oldBatchSpec, newBatchSpec, ct.TestSpecOpts{
|
||||
User: user.ID,
|
||||
Repo: repo.ID,
|
||||
BatchSpec: oldBatchSpec.ID,
|
||||
@ -1032,7 +1032,7 @@ func testStoreChangesetSpecsCurrentStateAndTextSearch(t *testing.T, ctx context.
|
||||
Published: true,
|
||||
HeadRef: "open-bar",
|
||||
})
|
||||
oldClosedFoo, _ := createChangesetSpecPair(t, ctx, s, oldBatchSpec, newBatchSpec, ct.TestSpecOpts{
|
||||
oldClosedFoo := createChangesetSpecPair(t, ctx, s, oldBatchSpec, newBatchSpec, ct.TestSpecOpts{
|
||||
User: user.ID,
|
||||
Repo: repo.ID,
|
||||
BatchSpec: oldBatchSpec.ID,
|
||||
|
||||
@ -146,7 +146,7 @@ func (s *batchSpecWorkspaceExecutionWorkerStore) markFinal(ctx context.Context,
|
||||
return false, err
|
||||
}
|
||||
|
||||
executionResults, stepResults, err := extractCacheEntries(ctx, events)
|
||||
executionResults, stepResults, err := extractCacheEntries(events)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
@ -225,7 +225,7 @@ func (s *batchSpecWorkspaceExecutionWorkerStore) MarkComplete(ctx context.Contex
|
||||
return s.Store.MarkFailed(ctx, id, fmt.Sprintf(fmtStr, args...), options)
|
||||
}
|
||||
|
||||
executionResults, stepResults, err := extractCacheEntries(ctx, events)
|
||||
executionResults, stepResults, err := extractCacheEntries(events)
|
||||
if err != nil {
|
||||
return rollbackAndMarkFailed(err, fmt.Sprintf("failed to extract cache entries: %s", err))
|
||||
}
|
||||
@ -355,7 +355,7 @@ SET
|
||||
WHERE id = %s
|
||||
`
|
||||
|
||||
func extractCacheEntries(ctx context.Context, events []*batcheslib.LogEvent) (executionResults, stepResults []*btypes.BatchSpecExecutionCacheEntry, err error) {
|
||||
func extractCacheEntries(events []*batcheslib.LogEvent) (executionResults, stepResults []*btypes.BatchSpecExecutionCacheEntry, err error) {
|
||||
for _, e := range events {
|
||||
switch m := e.Metadata.(type) {
|
||||
case *batcheslib.CacheResultMetadata:
|
||||
|
||||
@ -302,6 +302,7 @@ func insertNearestUploads(t testing.TB, db *sql.DB, repositoryID int, uploads ma
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `repositoryID` always has same value across call-sites, but that's OK
|
||||
func insertLinks(t testing.TB, db *sql.DB, repositoryID int, links map[string]commitgraph.LinkRelationship) {
|
||||
if len(links) == 0 {
|
||||
return
|
||||
@ -336,6 +337,7 @@ func toCommitGraphView(uploads []Upload) *commitgraph.CommitGraphView {
|
||||
return commitGraphView
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `repositoryID` always has same value across call-sites, but that's OK
|
||||
func getVisibleUploads(t testing.TB, db *sql.DB, repositoryID int, commits []string) map[string][]int {
|
||||
idsByCommit := map[string][]int{}
|
||||
for _, commit := range commits {
|
||||
@ -357,6 +359,7 @@ func getVisibleUploads(t testing.TB, db *sql.DB, repositoryID int, commits []str
|
||||
return idsByCommit
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `repositoryID` always has same value across call-sites, but that's OK
|
||||
func getUploadsVisibleAtTip(t testing.TB, db *sql.DB, repositoryID int) []int {
|
||||
query := sqlf.Sprintf(
|
||||
`SELECT upload_id FROM lsif_uploads_visible_at_tip WHERE repository_id = %s AND is_default_branch ORDER BY upload_id`,
|
||||
|
||||
@ -86,7 +86,7 @@ func (m *committedAtMigrator) handleSourcedCommits(ctx context.Context, tx *dbst
|
||||
// come back to this and figure out how to batch these so we're not doing so many
|
||||
// gitserver roundtrips on these kind of background tasks for code intelligence.
|
||||
for _, commit := range sourcedCommits.Commits {
|
||||
if err := m.handleCommit(ctx, tx, sourcedCommits.RepositoryID, sourcedCommits.RepositoryName, commit); err != nil {
|
||||
if err := m.handleCommit(ctx, tx, sourcedCommits.RepositoryID, commit); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -99,7 +99,7 @@ func (m *committedAtMigrator) handleSourcedCommits(ctx context.Context, tx *dbst
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *committedAtMigrator) handleCommit(ctx context.Context, tx *dbstore.Store, repositoryID int, repositoryName, commit string) error {
|
||||
func (m *committedAtMigrator) handleCommit(ctx context.Context, tx *dbstore.Store, repositoryID int, commit string) error {
|
||||
_, commitDate, revisionExists, err := m.gitserverClient.CommitDate(ctx, repositoryID, commit)
|
||||
if err != nil && !gitdomain.IsRepoNotExist(err) {
|
||||
return errors.Wrap(err, "gitserver.CommitDate")
|
||||
|
||||
@ -22,7 +22,6 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/batch"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/timeutil"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/lsif/protocol"
|
||||
"github.com/sourcegraph/sourcegraph/lib/codeintel/precise"
|
||||
@ -336,7 +335,7 @@ func (s *Store) WriteDocumentationSearch(
|
||||
return errors.Wrap(err, "upsertTags")
|
||||
}
|
||||
|
||||
if err := tx.replaceSearchRecords(ctx, upload, repositoryNameID, languageNameID, pages, tagIDs, tableSuffix, timeutil.Now()); err != nil {
|
||||
if err := tx.replaceSearchRecords(ctx, upload, repositoryNameID, languageNameID, pages, tagIDs, tableSuffix); err != nil {
|
||||
return errors.Wrap(err, "replaceSearchRecords")
|
||||
}
|
||||
|
||||
@ -508,7 +507,6 @@ func (s *Store) replaceSearchRecords(
|
||||
pages []*precise.DocumentationPageData,
|
||||
tagIDs map[string]int,
|
||||
tableSuffix string,
|
||||
now time.Time,
|
||||
) error {
|
||||
tx, err := s.Transact(ctx)
|
||||
if err != nil {
|
||||
|
||||
@ -46,7 +46,7 @@ func newTriggerQueryEnqueuer(ctx context.Context, store edb.CodeMonitorStore) go
|
||||
return goroutine.NewPeriodicGoroutine(ctx, 1*time.Minute, enqueueActive)
|
||||
}
|
||||
|
||||
func newTriggerQueryResetter(ctx context.Context, s edb.CodeMonitorStore, metrics codeMonitorsMetrics) *dbworker.Resetter {
|
||||
func newTriggerQueryResetter(_ context.Context, s edb.CodeMonitorStore, metrics codeMonitorsMetrics) *dbworker.Resetter {
|
||||
workerStore := createDBWorkerStoreForTriggerJobs(s)
|
||||
|
||||
options := dbworker.ResetterOptions{
|
||||
@ -92,7 +92,7 @@ func newActionRunner(ctx context.Context, s edb.CodeMonitorStore, metrics codeMo
|
||||
return worker
|
||||
}
|
||||
|
||||
func newActionJobResetter(ctx context.Context, s edb.CodeMonitorStore, metrics codeMonitorsMetrics) *dbworker.Resetter {
|
||||
func newActionJobResetter(_ context.Context, s edb.CodeMonitorStore, metrics codeMonitorsMetrics) *dbworker.Resetter {
|
||||
workerStore := createDBWorkerStoreForActionJobs(s)
|
||||
|
||||
options := dbworker.ResetterOptions{
|
||||
|
||||
@ -50,7 +50,7 @@ const varAllowed = "abcdefghijklmnopqrstuvwxyzABCEDEFGHIJKLMNOPQRSTUVWXYZ1234567
|
||||
|
||||
// scanTemplate scans an input string to produce a Template. Recognized
|
||||
// metavariable syntax is `$(varAllowed+)`.
|
||||
func scanTemplate(buf []byte) (*Template, error) {
|
||||
func scanTemplate(buf []byte) *Template {
|
||||
// Tracks whether the current token is a variable.
|
||||
var isVariable bool
|
||||
|
||||
@ -148,7 +148,7 @@ func scanTemplate(buf []byte) (*Template, error) {
|
||||
}
|
||||
}
|
||||
t := Template(result)
|
||||
return &t, nil
|
||||
return &t
|
||||
}
|
||||
|
||||
func toJSON(atom Atom) interface{} {
|
||||
@ -202,11 +202,8 @@ var builtinVariables = map[string]struct{}{
|
||||
"email": empty,
|
||||
}
|
||||
|
||||
func templatize(pattern string) (string, error) {
|
||||
t, err := scanTemplate([]byte(pattern))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
func templatize(pattern string) string {
|
||||
t := scanTemplate([]byte(pattern))
|
||||
var templatized []string
|
||||
for _, atom := range *t {
|
||||
switch a := atom.(type) {
|
||||
@ -223,14 +220,11 @@ func templatize(pattern string) (string, error) {
|
||||
templatized = append(templatized, a.Name)
|
||||
}
|
||||
}
|
||||
return strings.Join(templatized, ""), nil
|
||||
return strings.Join(templatized, "")
|
||||
}
|
||||
|
||||
func substituteMetaVariables(pattern string, env *MetaEnvironment) (string, error) {
|
||||
templated, err := templatize(pattern)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
templated := templatize(pattern)
|
||||
t, err := template.New("").Parse(templated)
|
||||
if err != nil {
|
||||
return "", err
|
||||
|
||||
@ -9,10 +9,7 @@ import (
|
||||
|
||||
func Test_scanTemplate(t *testing.T) {
|
||||
test := func(input string) string {
|
||||
t, err := scanTemplate([]byte(input))
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error: %s", err)
|
||||
}
|
||||
t := scanTemplate([]byte(input))
|
||||
return toJSONString(t)
|
||||
}
|
||||
|
||||
@ -63,23 +60,15 @@ func Test_scanTemplate(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_templatize(t *testing.T) {
|
||||
test := func(input string) string {
|
||||
t, err := templatize(input)
|
||||
if err != nil {
|
||||
return fmt.Sprintf("Error: %s", err)
|
||||
}
|
||||
return t
|
||||
}
|
||||
|
||||
autogold.Want(
|
||||
"basic templatize",
|
||||
"artifcats: {{.Repo}}").
|
||||
Equal(t, test("artifcats: $repo"))
|
||||
Equal(t, templatize("artifcats: $repo"))
|
||||
|
||||
autogold.Want(
|
||||
"exclude regex var in templatize",
|
||||
"artifcats: {{.Repo}} $1").
|
||||
Equal(t, test("artifcats: $repo $1"))
|
||||
Equal(t, templatize("artifcats: $repo $1"))
|
||||
}
|
||||
|
||||
func Test_substituteMetaVariables(t *testing.T) {
|
||||
|
||||
@ -12,9 +12,8 @@ import (
|
||||
|
||||
func TestEnqueueActionEmailsForQueryIDInt64QueryByRecordID(t *testing.T) {
|
||||
ctx, db, s := newTestStore(t)
|
||||
_, _, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
_, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures := s.insertTestMonitor(userCTX, t)
|
||||
|
||||
triggerJobs, err := s.EnqueueQueryTriggerJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
@ -42,9 +41,8 @@ func TestEnqueueActionEmailsForQueryIDInt64QueryByRecordID(t *testing.T) {
|
||||
|
||||
func TestGetActionJobMetadata(t *testing.T) {
|
||||
ctx, db, s := newTestStore(t)
|
||||
userName, _, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
userName, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures := s.insertTestMonitor(userCTX, t)
|
||||
|
||||
triggerJobs, err := s.EnqueueQueryTriggerJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
@ -77,9 +75,8 @@ func TestGetActionJobMetadata(t *testing.T) {
|
||||
|
||||
func TestScanActionJobs(t *testing.T) {
|
||||
ctx, db, s := newTestStore(t)
|
||||
_, _, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
_, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures := s.insertTestMonitor(userCTX, t)
|
||||
|
||||
triggerJobs, err := s.EnqueueQueryTriggerJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -9,9 +9,8 @@ import (
|
||||
|
||||
func TestQueryTriggerForJob(t *testing.T) {
|
||||
ctx, db, s := newTestStore(t)
|
||||
_, _, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
_, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures := s.insertTestMonitor(userCTX, t)
|
||||
|
||||
triggerJobs, err := s.EnqueueQueryTriggerJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
@ -26,9 +25,8 @@ func TestQueryTriggerForJob(t *testing.T) {
|
||||
|
||||
func TestSetQueryTriggerNextRun(t *testing.T) {
|
||||
ctx, db, s := newTestStore(t)
|
||||
_, id, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
_, id, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures := s.insertTestMonitor(userCTX, t)
|
||||
|
||||
triggerJobs, err := s.EnqueueQueryTriggerJobs(ctx)
|
||||
require.NoError(t, err)
|
||||
@ -60,11 +58,10 @@ func TestSetQueryTriggerNextRun(t *testing.T) {
|
||||
|
||||
func TestResetTriggerQueryTimestamps(t *testing.T) {
|
||||
ctx, db, s := newTestStore(t)
|
||||
_, _, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
_, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures := s.insertTestMonitor(userCTX, t)
|
||||
|
||||
err = s.ResetQueryTriggerTimestamps(ctx, fixtures.query.ID)
|
||||
err := s.ResetQueryTriggerTimestamps(ctx, fixtures.query.ID)
|
||||
require.NoError(t, err)
|
||||
|
||||
got, err := s.GetQueryTriggerForMonitor(ctx, fixtures.monitor.ID)
|
||||
|
||||
@ -8,9 +8,8 @@ import (
|
||||
|
||||
func TestListRecipients(t *testing.T) {
|
||||
ctx, db, s := newTestStore(t)
|
||||
_, _, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
_, _, userCTX := newTestUser(ctx, t, db)
|
||||
fixtures := s.insertTestMonitor(userCTX, t)
|
||||
|
||||
rs, err := s.ListRecipients(ctx, ListRecipientsOpts{EmailID: &fixtures.emails[0].ID})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -19,10 +19,9 @@ func TestCodeMonitorStoreSlackWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
action, err := s.CreateSlackWebhookAction(ctx, fixtures.monitor.ID, true, false, url1)
|
||||
require.NoError(t, err)
|
||||
@ -37,10 +36,9 @@ func TestCodeMonitorStoreSlackWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
action, err := s.CreateSlackWebhookAction(ctx, fixtures.monitor.ID, true, false, url1)
|
||||
require.NoError(t, err)
|
||||
@ -59,7 +57,7 @@ func TestCodeMonitorStoreSlackWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
|
||||
_, err := s.UpdateSlackWebhookAction(ctx, 383838, false, false, url2)
|
||||
@ -70,10 +68,9 @@ func TestCodeMonitorStoreSlackWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
action1, err := s.CreateSlackWebhookAction(ctx, fixtures.monitor.ID, true, false, url1)
|
||||
require.NoError(t, err)
|
||||
@ -95,10 +92,9 @@ func TestCodeMonitorStoreSlackWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
count, err := s.CountSlackWebhookActions(ctx, fixtures.monitor.ID)
|
||||
require.NoError(t, err)
|
||||
@ -116,10 +112,9 @@ func TestCodeMonitorStoreSlackWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
actions, err := s.ListSlackWebhookActions(ctx, ListActionsOpts{MonitorID: &fixtures.monitor.ID})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -16,7 +16,7 @@ type testFixtures struct {
|
||||
recipients [2]*Recipient
|
||||
}
|
||||
|
||||
func (s *codeMonitorStore) insertTestMonitor(ctx context.Context, t *testing.T) (*testFixtures, error) {
|
||||
func (s *codeMonitorStore) insertTestMonitor(ctx context.Context, t *testing.T) *testFixtures {
|
||||
t.Helper()
|
||||
|
||||
fixtures := testFixtures{}
|
||||
@ -62,5 +62,5 @@ func (s *codeMonitorStore) insertTestMonitor(ctx context.Context, t *testing.T)
|
||||
require.NoError(t, err)
|
||||
// TODO(camdencheek): add other action types (webhooks) here
|
||||
}
|
||||
return &fixtures, nil
|
||||
return &fixtures
|
||||
}
|
||||
|
||||
@ -23,9 +23,8 @@ FROM cm_trigger_jobs;
|
||||
func TestDeleteOldJobLogs(t *testing.T) {
|
||||
retentionInDays := 7
|
||||
ctx, db, s := newTestStore(t)
|
||||
_, _, _, userCTX := newTestUser(ctx, t, db)
|
||||
_, err := s.insertTestMonitor(userCTX, t)
|
||||
require.NoError(t, err)
|
||||
_, _, userCTX := newTestUser(ctx, t, db)
|
||||
_ = s.insertTestMonitor(userCTX, t)
|
||||
|
||||
// Add 1 job and date it back to a long time ago.
|
||||
triggerJobs, err := s.EnqueueQueryTriggerJobs(ctx)
|
||||
|
||||
@ -19,10 +19,9 @@ func TestCodeMonitorStoreWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
action, err := s.CreateWebhookAction(ctx, fixtures.monitor.ID, true, false, url1)
|
||||
require.NoError(t, err)
|
||||
@ -37,10 +36,9 @@ func TestCodeMonitorStoreWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
action, err := s.CreateWebhookAction(ctx, fixtures.monitor.ID, true, false, url1)
|
||||
require.NoError(t, err)
|
||||
@ -59,7 +57,7 @@ func TestCodeMonitorStoreWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
|
||||
_, err := s.UpdateWebhookAction(ctx, 383838, false, false, url2)
|
||||
@ -70,10 +68,9 @@ func TestCodeMonitorStoreWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
action1, err := s.CreateWebhookAction(ctx, fixtures.monitor.ID, true, false, url1)
|
||||
require.NoError(t, err)
|
||||
@ -95,10 +92,9 @@ func TestCodeMonitorStoreWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
count, err := s.CountWebhookActions(ctx, fixtures.monitor.ID)
|
||||
require.NoError(t, err)
|
||||
@ -116,10 +112,9 @@ func TestCodeMonitorStoreWebhooks(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
db := database.NewDB(dbtest.NewDB(t))
|
||||
_, _, _, ctx := newTestUser(ctx, t, db)
|
||||
_, _, ctx := newTestUser(ctx, t, db)
|
||||
s := CodeMonitors(db)
|
||||
fixtures, err := s.insertTestMonitor(ctx, t)
|
||||
require.NoError(t, err)
|
||||
fixtures := s.insertTestMonitor(ctx, t)
|
||||
|
||||
actions, err := s.ListWebhookActions(ctx, ListActionsOpts{MonitorID: &fixtures.monitor.ID})
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -247,14 +247,14 @@ func newTestStore(t *testing.T) (context.Context, dbutil.DB, *codeMonitorStore)
|
||||
return ctx, db, CodeMonitorsWithClock(db, func() time.Time { return now })
|
||||
}
|
||||
|
||||
func newTestUser(ctx context.Context, t *testing.T, db dbutil.DB) (name string, id int32, namespace graphql.ID, userContext context.Context) {
|
||||
func newTestUser(ctx context.Context, t *testing.T, db dbutil.DB) (name string, id int32, userContext context.Context) {
|
||||
t.Helper()
|
||||
|
||||
name = "cm-user1"
|
||||
id = insertTestUser(ctx, t, db, name, true)
|
||||
namespace = relay.MarshalID("User", id)
|
||||
_ = relay.MarshalID("User", id)
|
||||
ctx = actor.WithActor(ctx, actor.FromUser(id))
|
||||
return name, id, namespace, ctx
|
||||
return name, id, ctx
|
||||
}
|
||||
|
||||
func insertTestUser(ctx context.Context, t *testing.T, db dbutil.DB, name string, isAdmin bool) (userID int32) {
|
||||
|
||||
@ -252,7 +252,7 @@ func (s *permsStore) SetUserPermissions(ctx context.Context, p *authz.UserPermis
|
||||
var page *upsertRepoPermissionsPage
|
||||
page, addQueue, removeQueue, hasNextPage = newUpsertRepoPermissionsPage(addQueue, removeQueue)
|
||||
|
||||
if q, err := upsertRepoPermissionsBatchQuery(page, allAdded, allRemoved, []uint32{uint32(p.UserID)}, p.Perm, updatedAt); err != nil {
|
||||
if q, err := upsertRepoPermissionsBatchQuery(page, allAdded, []uint32{uint32(p.UserID)}, p.Perm, updatedAt); err != nil {
|
||||
return err
|
||||
} else if err = txs.execute(ctx, q); err != nil {
|
||||
return errors.Wrap(err, "execute upsert repo permissions batch query")
|
||||
@ -956,7 +956,7 @@ func (s *permsStore) GrantPendingPermissions(ctx context.Context, userID int32,
|
||||
var page *upsertRepoPermissionsPage
|
||||
page, addQueue, _, hasNextPage = newUpsertRepoPermissionsPage(addQueue, nil)
|
||||
|
||||
if q, err := upsertRepoPermissionsBatchQuery(page, allRepoIDs, nil, allUserIDs, p.Perm, updatedAt); err != nil {
|
||||
if q, err := upsertRepoPermissionsBatchQuery(page, allRepoIDs, allUserIDs, p.Perm, updatedAt); err != nil {
|
||||
return err
|
||||
} else if err = txs.execute(ctx, q); err != nil {
|
||||
return errors.Wrap(err, "execute upsert repo permissions batch query")
|
||||
@ -1038,7 +1038,7 @@ func newUpsertRepoPermissionsPage(addQueue, removeQueue []uint32) (
|
||||
// and deletion (for `removedRepoIDs`) of `userIDs` using upsert.
|
||||
//
|
||||
// Pages should be set up using the helper function `newUpsertRepoPermissionsPage`
|
||||
func upsertRepoPermissionsBatchQuery(page *upsertRepoPermissionsPage, allAddedRepoIDs, allRemovedRepoIDs, userIDs []uint32, perm authz.Perms, updatedAt time.Time) (*sqlf.Query, error) {
|
||||
func upsertRepoPermissionsBatchQuery(page *upsertRepoPermissionsPage, allAddedRepoIDs, userIDs []uint32, perm authz.Perms, updatedAt time.Time) (*sqlf.Query, error) {
|
||||
// If changing the parameters used in this query, make sure to run relevant tests
|
||||
// named `postgresParameterLimitTest` using "go test -slow-tests".
|
||||
const format = `
|
||||
@ -1730,6 +1730,7 @@ WHERE perms.repo_id IN
|
||||
return m, nil
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `title` always has same value across call-sites, but that's OK
|
||||
func (s *permsStore) observe(ctx context.Context, family, title string) (context.Context, func(*error, ...otlog.Field)) {
|
||||
began := s.clock()
|
||||
tr, ctx := trace.New(ctx, "database.PermsStore."+family, title)
|
||||
|
||||
@ -22,7 +22,7 @@ import (
|
||||
|
||||
const schemaErrorPrefix = "insights oob migration schema error"
|
||||
|
||||
func getLangStatsInsights(ctx context.Context, settingsRow api.Settings) []insights.LangStatsInsight {
|
||||
func getLangStatsInsights(settingsRow api.Settings) []insights.LangStatsInsight {
|
||||
prefix := "codeStatsInsights."
|
||||
var raw map[string]json.RawMessage
|
||||
results := make([]insights.LangStatsInsight, 0)
|
||||
@ -48,7 +48,7 @@ func getLangStatsInsights(ctx context.Context, settingsRow api.Settings) []insig
|
||||
return results
|
||||
}
|
||||
|
||||
func getFrontendInsights(ctx context.Context, settingsRow api.Settings) []insights.SearchInsight {
|
||||
func getFrontendInsights(settingsRow api.Settings) []insights.SearchInsight {
|
||||
prefix := "searchInsights."
|
||||
var raw map[string]json.RawMessage
|
||||
results := make([]insights.SearchInsight, 0)
|
||||
@ -75,7 +75,7 @@ func getFrontendInsights(ctx context.Context, settingsRow api.Settings) []insigh
|
||||
return results
|
||||
}
|
||||
|
||||
func getBackendInsights(ctx context.Context, setting api.Settings) []insights.SearchInsight {
|
||||
func getBackendInsights(setting api.Settings) []insights.SearchInsight {
|
||||
prefix := "insights.allrepos"
|
||||
|
||||
results := make([]insights.SearchInsight, 0)
|
||||
@ -103,7 +103,7 @@ func getBackendInsights(ctx context.Context, setting api.Settings) []insights.Se
|
||||
return results
|
||||
}
|
||||
|
||||
func getDashboards(ctx context.Context, settingsRow api.Settings) []insights.SettingDashboard {
|
||||
func getDashboards(settingsRow api.Settings) []insights.SettingDashboard {
|
||||
prefix := "insights.dashboards"
|
||||
|
||||
results := make([]insights.SettingDashboard, 0)
|
||||
|
||||
@ -206,9 +206,9 @@ func (m *migrator) performMigrationForRow(ctx context.Context, jobStoreTx *store
|
||||
return nil
|
||||
}
|
||||
|
||||
langStatsInsights := getLangStatsInsights(ctx, *settings)
|
||||
frontendInsights := getFrontendInsights(ctx, *settings)
|
||||
backendInsights := getBackendInsights(ctx, *settings)
|
||||
langStatsInsights := getLangStatsInsights(*settings)
|
||||
frontendInsights := getFrontendInsights(*settings)
|
||||
backendInsights := getBackendInsights(*settings)
|
||||
|
||||
// here we are constructing a total set of all of the insights defined in this specific settings block. This will help guide us
|
||||
// to understand which insights are created here, versus which are referenced from elsewhere. This will be useful for example
|
||||
@ -255,7 +255,7 @@ func (m *migrator) performMigrationForRow(ctx context.Context, jobStoreTx *store
|
||||
}
|
||||
}
|
||||
|
||||
dashboards := getDashboards(ctx, *settings)
|
||||
dashboards := getDashboards(*settings)
|
||||
totalDashboards := len(dashboards)
|
||||
if totalDashboards != job.MigratedDashboards {
|
||||
err = jobStoreTx.UpdateTotalDashboards(ctx, job.UserId, job.OrgId, totalDashboards)
|
||||
@ -318,23 +318,20 @@ func (m *migrator) createSpecialCaseDashboard(ctx context.Context, subjectName s
|
||||
}
|
||||
defer func() { err = tx.Store.Done(err) }()
|
||||
|
||||
created, _, err := m.createDashboard(ctx, tx, specialCaseDashboardTitle(subjectName), insightReferences, migration)
|
||||
created, err := m.createDashboard(ctx, tx, specialCaseDashboardTitle(subjectName), insightReferences, migration)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "CreateSpecialCaseDashboard")
|
||||
}
|
||||
return created, nil
|
||||
}
|
||||
|
||||
func (m *migrator) createDashboard(ctx context.Context, tx *store.DBDashboardStore, title string, insightReferences []string, migration migrationContext) (_ *types.Dashboard, _ []string, err error) {
|
||||
func (m *migrator) createDashboard(ctx context.Context, tx *store.DBDashboardStore, title string, insightReferences []string, migration migrationContext) (_ *types.Dashboard, err error) {
|
||||
var mapped []string
|
||||
var failed []string
|
||||
|
||||
for _, reference := range insightReferences {
|
||||
id, exists, err := m.lookupUniqueId(ctx, migration, reference)
|
||||
id, _, err := m.lookupUniqueId(ctx, migration, reference)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
} else if !exists {
|
||||
failed = append(failed, reference)
|
||||
return nil, err
|
||||
}
|
||||
mapped = append(mapped, id)
|
||||
}
|
||||
@ -358,10 +355,10 @@ func (m *migrator) createDashboard(ctx context.Context, tx *store.DBDashboardSto
|
||||
OrgID: migration.orgIds,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "CreateDashboard")
|
||||
return nil, errors.Wrap(err, "CreateDashboard")
|
||||
}
|
||||
|
||||
return created, failed, nil
|
||||
return created, nil
|
||||
}
|
||||
|
||||
// migrationContext represents a context for which we are currently migrating. If we are migrating a user setting we would populate this with their
|
||||
@ -411,7 +408,7 @@ func (m *migrator) migrateDashboard(ctx context.Context, from insights.SettingDa
|
||||
return nil
|
||||
}
|
||||
|
||||
_, _, err = m.createDashboard(ctx, tx, from.Title, from.InsightIds, migrationContext)
|
||||
_, err = m.createDashboard(ctx, tx, from.Title, from.InsightIds, migrationContext)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -37,7 +37,7 @@ type dashboardConnectionResolver struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (d *dashboardConnectionResolver) compute(ctx context.Context) ([]*types.Dashboard, int64, error) {
|
||||
func (d *dashboardConnectionResolver) compute(ctx context.Context) ([]*types.Dashboard, error) {
|
||||
d.once.Do(func() {
|
||||
args := store.DashboardQueryArgs{}
|
||||
if d.args.After != nil {
|
||||
@ -80,11 +80,11 @@ func (d *dashboardConnectionResolver) compute(ctx context.Context) ([]*types.Das
|
||||
}
|
||||
}
|
||||
})
|
||||
return d.dashboards, d.next, d.err
|
||||
return d.dashboards, d.err
|
||||
}
|
||||
|
||||
func (d *dashboardConnectionResolver) Nodes(ctx context.Context) ([]graphqlbackend.InsightsDashboardResolver, error) {
|
||||
dashboards, _, err := d.compute(ctx)
|
||||
dashboards, err := d.compute(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -97,7 +97,7 @@ func (d *dashboardConnectionResolver) Nodes(ctx context.Context) ([]graphqlbacke
|
||||
}
|
||||
|
||||
func (d *dashboardConnectionResolver) PageInfo(ctx context.Context) (*graphqlutil.PageInfo, error) {
|
||||
_, _, err := d.compute(ctx)
|
||||
_, err := d.compute(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -54,7 +54,7 @@ const (
|
||||
)
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) GetNextSettingsMigrationJobs(ctx context.Context, jobType SettingsMigrationJobType) ([]*SettingsMigrationJob, error) {
|
||||
where := getWhereForSubjectType(ctx, jobType)
|
||||
where := getWhereForSubjectType(jobType)
|
||||
q := sqlf.Sprintf(getSettingsMigrationJobsSql, where)
|
||||
return scanSettingsMigrationJobs(s.Query(ctx, q))
|
||||
}
|
||||
@ -97,7 +97,7 @@ func scanSettingsMigrationJobs(rows *sql.Rows, queryErr error) (_ []*SettingsMig
|
||||
}
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) UpdateTotalInsights(ctx context.Context, userId *int, orgId *int, totalInsights int) error {
|
||||
q := sqlf.Sprintf(updateTotalInsightsSql, totalInsights, getWhereForSubject(ctx, userId, orgId))
|
||||
q := sqlf.Sprintf(updateTotalInsightsSql, totalInsights, getWhereForSubject(userId, orgId))
|
||||
row := s.QueryRow(ctx, q)
|
||||
if row.Err() != nil {
|
||||
return row.Err()
|
||||
@ -111,7 +111,7 @@ UPDATE insights_settings_migration_jobs SET total_insights = %s WHERE %s
|
||||
`
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) UpdateMigratedInsights(ctx context.Context, userId *int, orgId *int, migratedInsights int) error {
|
||||
q := sqlf.Sprintf(updateMigratedInsightsSql, migratedInsights, getWhereForSubject(ctx, userId, orgId))
|
||||
q := sqlf.Sprintf(updateMigratedInsightsSql, migratedInsights, getWhereForSubject(userId, orgId))
|
||||
row := s.QueryRow(ctx, q)
|
||||
if row.Err() != nil {
|
||||
return row.Err()
|
||||
@ -125,7 +125,7 @@ UPDATE insights_settings_migration_jobs SET migrated_insights = %s WHERE %s
|
||||
`
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) UpdateTotalDashboards(ctx context.Context, userId *int, orgId *int, totalDashboards int) error {
|
||||
q := sqlf.Sprintf(updateTotalDashboardsSql, totalDashboards, getWhereForSubject(ctx, userId, orgId))
|
||||
q := sqlf.Sprintf(updateTotalDashboardsSql, totalDashboards, getWhereForSubject(userId, orgId))
|
||||
row := s.QueryRow(ctx, q)
|
||||
if row.Err() != nil {
|
||||
return row.Err()
|
||||
@ -139,7 +139,7 @@ UPDATE insights_settings_migration_jobs SET total_dashboards = %s WHERE %s
|
||||
`
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) UpdateMigratedDashboards(ctx context.Context, userId *int, orgId *int, migratedDashboards int) error {
|
||||
q := sqlf.Sprintf(updateMigratedDashboardsSql, migratedDashboards, getWhereForSubject(ctx, userId, orgId))
|
||||
q := sqlf.Sprintf(updateMigratedDashboardsSql, migratedDashboards, getWhereForSubject(userId, orgId))
|
||||
row := s.QueryRow(ctx, q)
|
||||
if row.Err() != nil {
|
||||
return row.Err()
|
||||
@ -153,7 +153,7 @@ UPDATE insights_settings_migration_jobs SET migrated_dashboards = %s WHERE %s
|
||||
`
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) UpdateRuns(ctx context.Context, userId *int, orgId *int, runs int) error {
|
||||
q := sqlf.Sprintf(updateRunsSql, runs, getWhereForSubject(ctx, userId, orgId))
|
||||
q := sqlf.Sprintf(updateRunsSql, runs, getWhereForSubject(userId, orgId))
|
||||
row := s.QueryRow(ctx, q)
|
||||
if row.Err() != nil {
|
||||
return row.Err()
|
||||
@ -167,7 +167,7 @@ UPDATE insights_settings_migration_jobs SET runs = %s WHERE %s
|
||||
`
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) MarkCompleted(ctx context.Context, userId *int, orgId *int) error {
|
||||
q := sqlf.Sprintf(markCompletedSql, s.Now(), getWhereForSubject(ctx, userId, orgId))
|
||||
q := sqlf.Sprintf(markCompletedSql, s.Now(), getWhereForSubject(userId, orgId))
|
||||
row := s.QueryRow(ctx, q)
|
||||
if row.Err() != nil {
|
||||
return row.Err()
|
||||
@ -191,7 +191,7 @@ SELECT COUNT(*) from insights_settings_migration_jobs;
|
||||
`
|
||||
|
||||
func (s *DBSettingsMigrationJobsStore) IsJobTypeComplete(ctx context.Context, jobType SettingsMigrationJobType) (bool, error) {
|
||||
where := getWhereForSubjectType(ctx, jobType)
|
||||
where := getWhereForSubjectType(jobType)
|
||||
q := sqlf.Sprintf(countIncompleteJobsSql, where)
|
||||
|
||||
count, _, err := basestore.ScanFirstInt(s.Query(ctx, q))
|
||||
@ -204,7 +204,7 @@ SELECT COUNT(*) FROM insights_settings_migration_jobs
|
||||
WHERE %s AND completed_at IS NULL;
|
||||
`
|
||||
|
||||
func getWhereForSubject(ctx context.Context, userId *int, orgId *int) *sqlf.Query {
|
||||
func getWhereForSubject(userId *int, orgId *int) *sqlf.Query {
|
||||
if userId != nil {
|
||||
return sqlf.Sprintf("user_id = %s", *userId)
|
||||
} else if orgId != nil {
|
||||
@ -214,7 +214,7 @@ func getWhereForSubject(ctx context.Context, userId *int, orgId *int) *sqlf.Quer
|
||||
}
|
||||
}
|
||||
|
||||
func getWhereForSubjectType(ctx context.Context, jobType SettingsMigrationJobType) *sqlf.Query {
|
||||
func getWhereForSubjectType(jobType SettingsMigrationJobType) *sqlf.Query {
|
||||
if jobType == UserJob {
|
||||
return sqlf.Sprintf("user_id IS NOT NULL")
|
||||
} else if jobType == OrgJob {
|
||||
|
||||
@ -11,15 +11,15 @@ import (
|
||||
// Resolve will populate the relationship fields of the registered issues and pull
|
||||
// requests objects.
|
||||
func Resolve(trackingIssues, issues []*Issue, pullRequests []*PullRequest) error {
|
||||
linkPullRequestsAndIssues(trackingIssues, issues, pullRequests)
|
||||
linkPullRequestsAndIssues(issues, pullRequests)
|
||||
linkTrackingIssues(trackingIssues, issues, pullRequests)
|
||||
return checkForCycles(trackingIssues, issues, pullRequests)
|
||||
return checkForCycles(issues)
|
||||
}
|
||||
|
||||
// linkPullRequestsAndIssues populates the LinkedPullRequests and LinkedIssues fields of
|
||||
// each resolved issue and pull request value. A pull request and an issue are linked if
|
||||
// the pull request body contains a reference to the issue number.
|
||||
func linkPullRequestsAndIssues(trackingIssues, issues []*Issue, pullRequests []*PullRequest) {
|
||||
func linkPullRequestsAndIssues(issues []*Issue, pullRequests []*PullRequest) {
|
||||
for _, issue := range issues {
|
||||
patterns := []*regexp.Regexp{
|
||||
// TODO(efritz) - should probably match repository as well
|
||||
@ -70,7 +70,7 @@ func linkTrackingIssues(trackingIssues, issues []*Issue, pullRequests []*PullReq
|
||||
// checkForCycles checks for a cycle over the tracked issues relationship in the set of resolved
|
||||
// issues. We currently check this condition because the rendering pass does not check for cycles
|
||||
// and can create an infinite loop.
|
||||
func checkForCycles(trackingIssues, issues []*Issue, pullRequests []*PullRequest) error {
|
||||
func checkForCycles(issues []*Issue) error {
|
||||
for _, issue := range issues {
|
||||
if !visitNode(issue, map[string]struct{}{}) {
|
||||
// TODO(efritz) - we should try to proactively cut cycles
|
||||
|
||||
@ -347,7 +347,7 @@ func MustValidateDefaults() {
|
||||
}
|
||||
|
||||
// mustValidate panics if the configuration does not pass validation.
|
||||
func mustValidate(name string, cfg conftypes.RawUnified) conftypes.RawUnified {
|
||||
func mustValidate(name string, cfg conftypes.RawUnified) {
|
||||
problems, err := Validate(cfg)
|
||||
if err != nil {
|
||||
panic(fmt.Sprintf("Error with %q: %s", name, err))
|
||||
@ -355,5 +355,4 @@ func mustValidate(name string, cfg conftypes.RawUnified) conftypes.RawUnified {
|
||||
if len(problems) > 0 {
|
||||
panic(fmt.Sprintf("conf: problems with default configuration for %q:\n %s", name, strings.Join(problems.Messages(), "\n ")))
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
||||
@ -87,7 +87,7 @@ func TestIterateRepoGitserverStatus(t *testing.T) {
|
||||
|
||||
var noShardCount int
|
||||
// Iterate again against repos with no shard
|
||||
err = GitserverRepos(db).IterateRepoGitserverStatus(ctx, IterateRepoGitserverStatusOptions{OnlyWithoutShard: true}, func(repo types.RepoGitserverStatus) error {
|
||||
err = GitserverRepos(db).IterateRepoGitserverStatus(ctx, IterateRepoGitserverStatusOptions{OnlyWithoutShard: true}, func(_ types.RepoGitserverStatus) error {
|
||||
noShardCount++
|
||||
return nil
|
||||
})
|
||||
|
||||
@ -172,13 +172,11 @@ func TestOrgs_GetOrgsWithRepositoriesByUserID(t *testing.T) {
|
||||
return user
|
||||
}
|
||||
|
||||
createOrgMember := func(ctx context.Context, db *sql.DB, userID int32, orgID int32) *types.OrgMembership {
|
||||
member, err := OrgMembers(db).Create(ctx, orgID, userID)
|
||||
createOrgMember := func(ctx context.Context, db *sql.DB, userID int32, orgID int32) {
|
||||
_, err := OrgMembers(db).Create(ctx, orgID, userID)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
return nil
|
||||
}
|
||||
return member
|
||||
}
|
||||
|
||||
t.Parallel()
|
||||
|
||||
@ -58,6 +58,7 @@ func AuthzQueryConds(ctx context.Context, db DB) (*sqlf.Query, error) {
|
||||
return q, nil
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `perms` always has same value across call-sites, but that's OK, as we only support read permissions right now.
|
||||
func authzQuery(bypassAuthz, usePermissionsUserMapping bool, authenticatedUserID int32, perms authz.Perms) *sqlf.Query {
|
||||
const queryFmtString = `(
|
||||
%s -- TRUE or FALSE to indicate whether to bypass the check
|
||||
|
||||
@ -115,7 +115,7 @@ func TestAuthzQueryConds(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "authenticated user is a site admin",
|
||||
setup: func(t *testing.T) (context.Context, DB) {
|
||||
setup: func(_ *testing.T) (context.Context, DB) {
|
||||
users := NewMockUserStoreFrom(db.Users())
|
||||
users.GetByCurrentAuthUserFunc.SetDefaultReturn(&types.User{ID: 1, SiteAdmin: true}, nil)
|
||||
mockDB := NewMockDBFrom(db)
|
||||
@ -141,7 +141,7 @@ func TestAuthzQueryConds(t *testing.T) {
|
||||
},
|
||||
{
|
||||
name: "authenticated user is not a site admin",
|
||||
setup: func(t *testing.T) (context.Context, DB) {
|
||||
setup: func(_ *testing.T) (context.Context, DB) {
|
||||
users := NewMockUserStoreFrom(db.Users())
|
||||
users.GetByCurrentAuthUserFunc.SetDefaultReturn(&types.User{ID: 1}, nil)
|
||||
mockDB := NewMockDBFrom(db)
|
||||
|
||||
@ -192,7 +192,7 @@ func idsToQueries(ids []int32) []*sqlf.Query {
|
||||
return queries
|
||||
}
|
||||
|
||||
func getSearchContextsQueryConditions(opts ListSearchContextsOptions) ([]*sqlf.Query, error) {
|
||||
func getSearchContextsQueryConditions(opts ListSearchContextsOptions) []*sqlf.Query {
|
||||
namespaceConds := []*sqlf.Query{}
|
||||
if opts.NoNamespace {
|
||||
namespaceConds = append(namespaceConds, sqlf.Sprintf("(sc.namespace_user_id IS NULL AND sc.namespace_org_id IS NULL)"))
|
||||
@ -223,7 +223,7 @@ func getSearchContextsQueryConditions(opts ListSearchContextsOptions) ([]*sqlf.Q
|
||||
conds = append(conds, sqlf.Sprintf("1 = 1"))
|
||||
}
|
||||
|
||||
return conds, nil
|
||||
return conds
|
||||
}
|
||||
|
||||
func (s *searchContextsStore) listSearchContexts(ctx context.Context, cond *sqlf.Query, orderBy *sqlf.Query, limit int32, offset int32) ([]*types.SearchContext, error) {
|
||||
@ -240,19 +240,13 @@ func (s *searchContextsStore) listSearchContexts(ctx context.Context, cond *sqlf
|
||||
}
|
||||
|
||||
func (s *searchContextsStore) ListSearchContexts(ctx context.Context, pageOpts ListSearchContextsPageOptions, opts ListSearchContextsOptions) ([]*types.SearchContext, error) {
|
||||
conds, err := getSearchContextsQueryConditions(opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conds := getSearchContextsQueryConditions(opts)
|
||||
orderBy := getSearchContextOrderByClause(opts.OrderBy, opts.OrderByDescending)
|
||||
return s.listSearchContexts(ctx, sqlf.Join(conds, "\n AND "), orderBy, pageOpts.First, pageOpts.After)
|
||||
}
|
||||
|
||||
func (s *searchContextsStore) CountSearchContexts(ctx context.Context, opts ListSearchContextsOptions) (int32, error) {
|
||||
conds, err := getSearchContextsQueryConditions(opts)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
conds := getSearchContextsQueryConditions(opts)
|
||||
permissionsCond, err := searchContextsPermissionsCondition(ctx, s.Handle().DB())
|
||||
if err != nil {
|
||||
return -1, err
|
||||
|
||||
@ -126,7 +126,7 @@ func (o *settingsStore) GetLatest(ctx context.Context, subject api.SettingsSubje
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
settings, err := parseQueryRows(ctx, rows)
|
||||
settings, err := parseQueryRows(rows)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -193,10 +193,10 @@ func (o *settingsStore) ListAll(ctx context.Context, impreciseSubstring string)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return parseQueryRows(ctx, rows)
|
||||
return parseQueryRows(rows)
|
||||
}
|
||||
|
||||
func parseQueryRows(ctx context.Context, rows *sql.Rows) ([]*api.Settings, error) {
|
||||
func parseQueryRows(rows *sql.Rows) ([]*api.Settings, error) {
|
||||
settings := []*api.Settings{}
|
||||
defer rows.Close()
|
||||
for rows.Next() {
|
||||
|
||||
@ -100,7 +100,7 @@ type cachedRepo struct {
|
||||
|
||||
// getRepositoryFromCache attempts to get a response from the redis cache.
|
||||
// It returns nil error for cache-hit condition and non-nil error for cache-miss.
|
||||
func (c *Client) getRepositoryFromCache(ctx context.Context, key string) *cachedRepo {
|
||||
func (c *Client) getRepositoryFromCache(_ context.Context, key string) *cachedRepo {
|
||||
b, ok := c.repoCache.Get(key)
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
@ -1733,7 +1733,7 @@ var GetRepositoryMock func(ctx context.Context, owner, name string) (*Repository
|
||||
|
||||
// cachedGetRepository caches the getRepositoryFromAPI call.
|
||||
func (c *V3Client) cachedGetRepository(ctx context.Context, key string, getRepositoryFromAPI func(ctx context.Context) (repo *Repository, keys []string, err error)) (*Repository, error) {
|
||||
if cached := c.getRepositoryFromCache(ctx, key); cached != nil {
|
||||
if cached := c.getRepositoryFromCache(key); cached != nil {
|
||||
reposGitHubCacheCounter.WithLabelValues("hit").Inc()
|
||||
if cached.NotFound {
|
||||
return nil, ErrRepoNotFound
|
||||
|
||||
@ -140,6 +140,7 @@ func (c *V3Client) get(ctx context.Context, requestURI string, result interface{
|
||||
return c.request(ctx, req, result)
|
||||
}
|
||||
|
||||
//nolint:unparam // Return *httpResponseState for consistency with other methods
|
||||
func (c *V3Client) post(ctx context.Context, requestURI string, payload, result interface{}) (*httpResponseState, error) {
|
||||
body, err := json.Marshal(payload)
|
||||
if err != nil {
|
||||
@ -510,7 +511,7 @@ func (c *V3Client) ListTeamMembers(ctx context.Context, owner, team string, page
|
||||
|
||||
// getRepositoryFromCache attempts to get a response from the redis cache.
|
||||
// It returns nil error for cache-hit condition and non-nil error for cache-miss.
|
||||
func (c *V3Client) getRepositoryFromCache(ctx context.Context, key string) *cachedRepo {
|
||||
func (c *V3Client) getRepositoryFromCache(key string) *cachedRepo {
|
||||
b, ok := c.repoCache.Get(strings.ToLower(key))
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
@ -167,7 +167,7 @@ type cachedProj struct {
|
||||
|
||||
// getProjectFromCache attempts to get a response from the redis cache.
|
||||
// It returns nil error for cache-hit condition and non-nil error for cache-miss.
|
||||
func (c *Client) getProjectFromCache(ctx context.Context, key string) *cachedProj {
|
||||
func (c *Client) getProjectFromCache(_ context.Context, key string) *cachedProj {
|
||||
b, ok := c.projCache.Get(strings.ToLower(key))
|
||||
if !ok {
|
||||
return nil
|
||||
|
||||
@ -1074,6 +1074,7 @@ func (c *ClientImplementor) httpPostWithURI(ctx context.Context, repo api.RepoNa
|
||||
return c.do(ctx, repo, "POST", uri, b)
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `method` always has same value across call-sites, but that's OK
|
||||
// do performs a request to a gitserver instance based on the address in the uri argument.
|
||||
func (c *ClientImplementor) do(ctx context.Context, repo api.RepoName, method, uri string, payload []byte) (resp *http.Response, err error) {
|
||||
parsedURL, err := url.ParseRequestURI(uri)
|
||||
|
||||
@ -443,6 +443,7 @@ func TestExpJitterDelay(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `code` always has same value across call-sites, but that's OK
|
||||
func newFakeClient(code int, body []byte, err error) Doer {
|
||||
return DoerFunc(func(r *http.Request) (*http.Response, error) {
|
||||
rr := httptest.NewRecorder()
|
||||
|
||||
@ -66,7 +66,7 @@ func TestGetLang_language(t *testing.T) {
|
||||
lang, err := getLang(context.Background(),
|
||||
test.file,
|
||||
make([]byte, fileReadBufferSize),
|
||||
makeFileReader(context.Background(), test.file.Path, test.file.Contents))
|
||||
makeFileReader(test.file.Contents))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -77,7 +77,7 @@ func TestGetLang_language(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func makeFileReader(ctx context.Context, path, contents string) func(context.Context, string) (io.ReadCloser, error) {
|
||||
func makeFileReader(contents string) func(context.Context, string) (io.ReadCloser, error) {
|
||||
return func(ctx context.Context, path string) (io.ReadCloser, error) {
|
||||
return io.NopCloser(strings.NewReader(contents)), nil
|
||||
}
|
||||
@ -129,7 +129,7 @@ func TestGet_readFile(t *testing.T) {
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.file.Name(), func(t *testing.T) {
|
||||
fr := makeFileReader(context.Background(), test.file.(fi).Path, test.file.(fi).Contents)
|
||||
fr := makeFileReader(test.file.(fi).Contents)
|
||||
lang, err := getLang(context.Background(), test.file, make([]byte, fileReadBufferSize), fr)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user