dotcom: Remove on-demand cloning of repositories (#63321)

Historically, sourcegraph.com has been the only instance. It was
connected to GitHub.com and GitLab.com only.
Configuration should be as simple as possible, and we wanted everyone to
try it on any repo. So public repos were added on-demand when browsed
from these code hosts.

Since, dotcom is no longer the only instance, and this is a special case
that only exists for sourcegraph.com.
This causes a bunch of additional complexity and various extra code
paths that we don't test well enough today.

We want to make dotcom simpler to understand, so we've made the decision
to disable that feature, and instead we will maintain a list of
repositories that we have on the instance.
We already disallowed several repos half a year ago, by restricting size
of repos with few stars heavily.
This is basically just a continuation of that.

In the diff, you'll mostly find deletions. This PR does not do much
other than removing the code paths that were only enabled in dotcom mode
in the repo syncer, and then removes code that became unused as a result
of that.

## Test plan

Ran a dotcom mode instance locally, it did not behave differently than a
regular instance wrt. repo cloning.
We will need to verify during the rollout that we're not suddenly
hitting code paths that don't scale to the dotcom size.

## Changelog

Dotcom no longer clones repos on demand.
This commit is contained in:
Erik Seliger 2024-06-26 14:53:14 -07:00 committed by GitHub
parent 94bf178504
commit 83d0f6876c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
96 changed files with 315 additions and 5227 deletions

View File

@ -44,17 +44,6 @@ export class RepoSeeOtherError extends Error {
}
}
const REPO_DENIED_ERROR_NAME = 'RepoDeniedError' as const
export class RepoDeniedError extends Error {
public readonly name = REPO_DENIED_ERROR_NAME
constructor(public readonly reason: string) {
super(`Repository could not be added on-demand: ${reason}`)
}
}
export const isRepoDeniedErrorLike = (value: unknown): value is RepoDeniedError =>
isErrorLike(value) && value.name === REPO_DENIED_ERROR_NAME
// Will work even for errors that came from GraphQL, background pages, comlink webworkers, etc.
// TODO remove error message assertion after https://github.com/sourcegraph/sourcegraph/issues/9697 and https://github.com/sourcegraph/sourcegraph/issues/9693 are fixed
/** Returns the redirect URL if the passed value is like a RepoSeeOtherError, otherwise `false`. */

View File

@ -1,7 +1,6 @@
import { useEffect } from 'react'
import AlertCircleIcon from 'mdi-react/AlertCircleIcon'
import AlertIcon from 'mdi-react/AlertIcon'
import SourceRepositoryIcon from 'mdi-react/SourceRepositoryIcon'
import type { ErrorLike } from '@sourcegraph/common'
@ -10,8 +9,6 @@ import {
isCloneInProgressErrorLike,
isRevisionNotFoundErrorLike,
isRepoNotFoundErrorLike,
isRepoDeniedErrorLike,
RepoDeniedError,
} from '@sourcegraph/shared/src/backend/errors'
import { RepoQuestionIcon } from '@sourcegraph/shared/src/components/icons'
import { displayRepoName } from '@sourcegraph/shared/src/components/RepoLink'
@ -47,12 +44,6 @@ export const RepoContainerError: React.FunctionComponent<React.PropsWithChildren
)
}
if (isRepoDeniedErrorLike(repoFetchError)) {
return (
<RepoDeniedPage repoFetchError={repoFetchError} repoName={repoName} telemetryRecorder={telemetryRecorder} />
)
}
if (isCloneInProgressErrorLike(repoFetchError)) {
return (
<CloneInProgressPage
@ -77,24 +68,6 @@ export const RepoContainerError: React.FunctionComponent<React.PropsWithChildren
return <OtherRepoErrorPage repoFetchError={repoFetchError} telemetryRecorder={telemetryRecorder} />
}
interface RepoDeniedPageProps extends TelemetryV2Props {
repoFetchError: RepoDeniedError
repoName: string
}
export const RepoDeniedPage: React.FunctionComponent<React.PropsWithChildren<RepoDeniedPageProps>> = props => {
const { repoName, repoFetchError, telemetryRecorder } = props
useEffect(() => telemetryRecorder.recordEvent('repo.error.repoDenied', 'view'), [telemetryRecorder])
return (
<HeroPage
icon={AlertIcon}
title={displayRepoName(repoName)}
body={<Text className="mt-4">Repository cannot be added on-demand: {repoFetchError.reason}.</Text>}
/>
)
}
export const CloneInProgressPage: React.FunctionComponent<React.PropsWithChildren<RepoContainerErrorProps>> = props => {
const { repoName, viewerCanAdminister, repoFetchError, telemetryRecorder } = props

View File

@ -7,7 +7,6 @@ import {
CloneInProgressError,
RepoNotFoundError,
RepoSeeOtherError,
RepoDeniedError,
RevisionNotFoundError,
} from '@sourcegraph/shared/src/backend/errors'
import {
@ -126,9 +125,6 @@ export const resolveRepoRevision = memoizeObservable(
{ repoName, revision: revision || '' }
).pipe(
map(({ data, errors }) => {
if (errors?.length === 1 && errors[0].extensions?.code === 'ErrRepoDenied') {
throw new RepoDeniedError(errors[0].message)
}
if (!data) {
throw createAggregateError(errors)
}

View File

@ -33,7 +33,6 @@ go_library(
"//internal/codeintel/dependencies",
"//internal/conf",
"//internal/database",
"//internal/database/dbcache",
"//internal/dotcom",
"//internal/encryption/keyring",
"//internal/env",
@ -56,12 +55,10 @@ go_library(
"//internal/rcache",
"//internal/repos",
"//internal/repoupdater",
"//internal/repoupdater/protocol",
"//internal/trace",
"//internal/txemail",
"//internal/txemail/txtypes",
"//internal/types",
"//internal/vcs",
"//lib/errors",
"//schema",
"@com_github_grafana_regexp//:regexp",
@ -101,7 +98,6 @@ go_test(
"//internal/dotcom",
"//internal/encryption",
"//internal/encryption/keyring",
"//internal/errcode",
"//internal/extsvc",
"//internal/extsvc/auth",
"//internal/extsvc/awscodecommit",
@ -116,11 +112,8 @@ go_test(
"//internal/fileutil",
"//internal/gitserver",
"//internal/gitserver/gitdomain",
"//internal/httpcli",
"//internal/rcache",
"//internal/repos",
"//internal/repoupdater",
"//internal/repoupdater/protocol",
"//internal/txemail",
"//internal/txemail/txtypes",
"//internal/types",

View File

@ -284,9 +284,8 @@ func TestExternalService_ListNamespaces(t *testing.T) {
}`
githubSource := types.ExternalService{
Kind: extsvc.KindGitHub,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(githubConnection),
Kind: extsvc.KindGitHub,
Config: extsvc.NewUnencryptedConfig(githubConnection),
}
gitlabConnection := `
@ -296,9 +295,8 @@ func TestExternalService_ListNamespaces(t *testing.T) {
}`
gitlabSource := types.ExternalService{
Kind: extsvc.KindGitLab,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(gitlabConnection),
Kind: extsvc.KindGitLab,
Config: extsvc.NewUnencryptedConfig(gitlabConnection),
}
githubOrg := &types.ExternalServiceNamespace{
@ -315,10 +313,9 @@ func TestExternalService_ListNamespaces(t *testing.T) {
}`
githubExternalService := types.ExternalService{
ID: 1,
Kind: extsvc.KindGitHub,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
ID: 1,
Kind: extsvc.KindGitHub,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
}
gitlabExternalServiceConfig := `
@ -329,10 +326,9 @@ func TestExternalService_ListNamespaces(t *testing.T) {
}`
gitlabExternalService := types.ExternalService{
ID: 2,
Kind: extsvc.KindGitLab,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(gitlabExternalServiceConfig),
ID: 2,
Kind: extsvc.KindGitLab,
Config: extsvc.NewUnencryptedConfig(gitlabExternalServiceConfig),
}
gitlabRepository := &types.Repo{
@ -468,10 +464,9 @@ func TestExternalService_DiscoverRepos(t *testing.T) {
}`
githubSource := types.ExternalService{
ID: 1,
Kind: extsvc.KindGitHub,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(githubConnection),
ID: 1,
Kind: extsvc.KindGitHub,
Config: extsvc.NewUnencryptedConfig(githubConnection),
}
gitlabConnection := `
@ -481,10 +476,9 @@ func TestExternalService_DiscoverRepos(t *testing.T) {
}`
gitlabSource := types.ExternalService{
ID: 2,
Kind: extsvc.KindGitLab,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(gitlabConnection),
ID: 2,
Kind: extsvc.KindGitLab,
Config: extsvc.NewUnencryptedConfig(gitlabConnection),
}
githubRepository := &types.Repo{
@ -530,10 +524,9 @@ func TestExternalService_DiscoverRepos(t *testing.T) {
}`
githubExternalService := types.ExternalService{
ID: 1,
Kind: extsvc.KindGitHub,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
ID: 1,
Kind: extsvc.KindGitHub,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
}
gitlabExternalServiceConfig := `
@ -544,10 +537,9 @@ func TestExternalService_DiscoverRepos(t *testing.T) {
}`
gitlabExternalService := types.ExternalService{
ID: 2,
Kind: extsvc.KindGitLab,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(gitlabExternalServiceConfig),
ID: 2,
Kind: extsvc.KindGitLab,
Config: extsvc.NewUnencryptedConfig(gitlabExternalServiceConfig),
}
var idDoesNotExist int64 = 99

View File

@ -3,30 +3,21 @@ package backend
import (
"context"
"fmt"
"github.com/sourcegraph/sourcegraph/internal/env"
"net/http"
"strconv"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/inventory"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbcache"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -41,10 +32,6 @@ type ReposService interface {
}
// NewRepos uses the provided `database.DB` to initialize a new RepoService.
//
// NOTE: The underlying cache is reused from Repos global variable to actually
// make cache be useful. This is mostly a workaround for now until we come up a
// more idiomatic solution.
func NewRepos(logger log.Logger, db database.DB, client gitserver.Client) ReposService {
repoStore := db.Repos()
logger = logger.Scoped("repos")
@ -53,7 +40,6 @@ func NewRepos(logger log.Logger, db database.DB, client gitserver.Client) ReposS
db: db,
gitserverClient: client,
store: repoStore,
cache: dbcache.NewIndexableReposLister(logger, repoStore),
}
}
@ -61,9 +47,7 @@ type repos struct {
logger log.Logger
db database.DB
gitserverClient gitserver.Client
cf httpcli.Doer
store database.RepoStore
cache *dbcache.IndexableReposLister
}
func (s *repos) Get(ctx context.Context, repo api.RepoID) (_ *types.Repo, err error) {
@ -77,8 +61,7 @@ func (s *repos) Get(ctx context.Context, repo api.RepoID) (_ *types.Repo, err er
return s.store.Get(ctx, repo)
}
// GetByName retrieves the repository with the given name. It will lazy sync a repo
// not yet present in the database under certain conditions. See repos.Syncer.SyncRepo.
// GetByName retrieves the repository with the given name.
func (s *repos) GetByName(ctx context.Context, name api.RepoName) (_ *types.Repo, err error) {
if Mocks.Repos.GetByName != nil {
return Mocks.Repos.GetByName(ctx, name)
@ -87,125 +70,7 @@ func (s *repos) GetByName(ctx context.Context, name api.RepoName) (_ *types.Repo
ctx, done := startTrace(ctx, "GetByName", name, &err)
defer done()
repo, err := s.store.GetByName(ctx, name)
if err == nil {
return repo, nil
}
if !errcode.IsNotFound(err) {
return nil, err
}
if errcode.IsNotFound(err) && !dotcom.SourcegraphDotComMode() {
// The repo doesn't exist and we're not on sourcegraph.com, we should not lazy
// clone it.
return nil, err
}
newName, err := s.addRepoToSourcegraphDotCom(ctx, name)
if err != nil {
return nil, err
}
return s.store.GetByName(ctx, newName)
}
// addRepoToSourcegraphDotCom adds the repository with the given name to the database by calling
// repo-updater when in sourcegraph.com mode. It's possible that the repo has
// been renamed on the code host in which case a different name may be returned.
// name is assumed to not exist as a repo in the database.
func (s *repos) addRepoToSourcegraphDotCom(ctx context.Context, name api.RepoName) (addedName api.RepoName, err error) {
ctx, done := startTrace(ctx, "Add", name, &err)
defer done()
// Avoid hitting repo-updater (and incurring a hit against our GitHub/etc. API rate
// limit) for repositories that don't exist or private repositories that people attempt to
// access.
codehost := extsvc.CodeHostOf(name, extsvc.PublicCodeHosts...)
if codehost == nil {
return "", &database.RepoNotFoundErr{Name: name}
}
// Verify repo exists and is cloneable publicly before continuing to put load
// on repo-updater.
// For package hosts, we have no good metric to figure this out at the moment.
if !codehost.IsPackageHost() {
if err := s.isGitRepoPubliclyCloneable(ctx, name); err != nil {
return "", err
}
}
// Looking up the repo in repo-updater makes it sync that repo to the
// database on sourcegraph.com if that repo is from github.com or gitlab.com
lookupResult, err := repoupdater.DefaultClient.RepoLookup(ctx, protocol.RepoLookupArgs{Repo: name})
if lookupResult != nil && lookupResult.Repo != nil {
return lookupResult.Repo.Name, err
}
return "", err
}
var metricIsRepoCloneable = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "src_frontend_repo_add_is_cloneable",
Help: "temporary metric to measure if this codepath is valuable on sourcegraph.com",
}, []string{"status"})
// isGitRepoPubliclyCloneable checks if a git repo with the given name would be
// cloneable without auth - ie. if sourcegraph.com could clone it with a cloud_default
// external service. This is explicitly without any auth, so we don't consume
// any API rate limit, since many users visit private or bogus repos.
// We deduce the unauthenticated clone URL from the repo name by simply adding .git
// to it.
// Name is verified by the caller to be for either of our public cloud default
// hosts.
func (s *repos) isGitRepoPubliclyCloneable(ctx context.Context, name api.RepoName) error {
// This is on the request path, don't block for too long if upstream is struggling.
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
status := "unknown"
defer func() {
metricIsRepoCloneable.WithLabelValues(status).Inc()
}()
// Speak git smart protocol to check if repo exists without cloning.
remoteURL, err := vcs.ParseURL("https://" + string(name) + ".git/info/refs?service=git-upload-pack")
if err != nil {
// No idea how to construct a remote URL for this repo, bail.
return &database.RepoNotFoundErr{Name: api.RepoName(name)}
}
req, err := http.NewRequestWithContext(ctx, http.MethodGet, remoteURL.String(), nil)
if err != nil {
return errors.Wrap(err, "failed to construct request to check if repository exists")
}
cf := httpcli.ExternalDoer
if s.cf != nil {
cf = s.cf
}
resp, err := cf.Do(req)
if err != nil {
return errors.Wrap(err, "failed to check if repository exists")
}
// No interest in the response body.
_ = resp.Body.Close()
if resp.StatusCode != http.StatusOK {
if ctx.Err() != nil {
status = "timeout"
} else {
status = "fail"
}
// Not cloneable without auth.
return &database.RepoNotFoundErr{Name: api.RepoName(name)}
}
status = "success"
return nil
return s.store.GetByName(ctx, name)
}
func (s *repos) List(ctx context.Context, opt database.ReposListOptions) (repos []*types.Repo, err error) {
@ -242,10 +107,6 @@ func (s *repos) ListIndexable(ctx context.Context) (repos []types.MinimalRepo, e
done()
}()
if dotcom.SourcegraphDotComMode() {
return s.cache.List(ctx)
}
return s.store.ListMinimalRepos(ctx, database.ReposListOptions{
OnlyCloned: true,
})

View File

@ -7,9 +7,7 @@ import (
"fmt"
"io"
"io/fs"
"net/http"
"os"
"strings"
"testing"
mockrequire "github.com/derision-test/go-mockgen/v2/testutil/require"
@ -23,14 +21,10 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/fileutil"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/internal/types"
)
@ -67,82 +61,6 @@ func TestReposService_List(t *testing.T) {
require.Equal(t, wantRepos, repos)
}
func TestRepos_AddRepoToSourcegraphDotCom(t *testing.T) {
var s repos
ctx := testContext()
const repoName = "github.com/my/repo"
const newName = "github.com/my/repo2"
calledRepoLookup := false
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
calledRepoLookup = true
if args.Repo != repoName {
t.Errorf("got %q, want %q", args.Repo, repoName)
}
return &protocol.RepoLookupResult{
Repo: &protocol.RepoInfo{Name: newName, Description: "d"},
}, nil
}
defer func() { repoupdater.MockRepoLookup = nil }()
// The repoName could change if it has been renamed on the code host
s = repos{
logger: logtest.Scoped(t),
cf: httpcli.DoerFunc(func(r *http.Request) (*http.Response, error) {
require.Equal(t, "https://github.com/my/repo.git/info/refs?service=git-upload-pack", r.URL.String())
return &http.Response{StatusCode: 200, Body: io.NopCloser(strings.NewReader(""))}, nil
}),
}
addedName, err := s.addRepoToSourcegraphDotCom(ctx, repoName)
if err != nil {
t.Fatal(err)
}
if addedName != newName {
t.Fatalf("Want %q, got %q", newName, addedName)
}
if !calledRepoLookup {
t.Error("!calledRepoLookup")
}
// Verify that non 200 codes return the right error.
s = repos{
logger: logtest.Scoped(t),
cf: httpcli.DoerFunc(func(r *http.Request) (*http.Response, error) {
require.Equal(t, "https://github.com/my/repo.git/info/refs?service=git-upload-pack", r.URL.String())
return &http.Response{StatusCode: 401, Body: io.NopCloser(strings.NewReader(""))}, nil
}),
}
_, err = s.addRepoToSourcegraphDotCom(ctx, repoName)
require.Error(t, err)
require.IsType(t, &database.RepoNotFoundErr{}, err)
}
func TestRepos_Add_NonPublicCodehosts(t *testing.T) {
var s repos
ctx := testContext()
const repoName = "github.private.corp/my/repo"
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
t.Fatal("unexpected call to repo-updater for non public code host")
return nil, nil
}
defer func() { repoupdater.MockRepoLookup = nil }()
gitserver.MockIsRepoCloneable = func(name api.RepoName) error {
t.Fatal("unexpected call to gitserver for non public code host")
return nil
}
defer func() { gitserver.MockIsRepoCloneable = nil }()
// The repoName could change if it has been renamed on the code host
_, err := s.addRepoToSourcegraphDotCom(ctx, repoName)
if !errcode.IsNotFound(err) {
t.Fatalf("expected a not found error, got: %v", err)
}
}
type gitObjectInfo string
func (oid gitObjectInfo) OID() gitdomain.OID {
@ -160,13 +78,6 @@ func TestReposGetInventory(t *testing.T) {
wantRootOID = "oid-root"
)
gitserverClient := gitserver.NewMockClient()
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
if args.Repo != wantRepoName {
t.Errorf("got %q, want %q", args.Repo, wantRepoName)
}
return &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{Name: wantRepoName}}, nil
}
defer func() { repoupdater.MockRepoLookup = nil }()
gitserverClient.StatFunc.SetDefaultHook(func(_ context.Context, _ api.RepoName, commit api.CommitID, path string) (fs.FileInfo, error) {
if commit != wantCommitID {
t.Errorf("got commit %q, want %q", commit, wantCommitID)

View File

@ -10,8 +10,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -22,18 +20,7 @@ func TestRepos_ResolveRev_noRevSpecified_getsDefaultBranch(t *testing.T) {
const wantRepo = "a"
want := strings.Repeat("a", 40)
calledRepoLookup := false
client := gitserver.NewMockClient()
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
calledRepoLookup = true
if args.Repo != wantRepo {
t.Errorf("got %q, want %q", args.Repo, wantRepo)
}
return &protocol.RepoLookupResult{
Repo: &protocol.RepoInfo{Name: wantRepo},
}, nil
}
defer func() { repoupdater.MockRepoLookup = nil }()
var calledVCSRepoResolveRevision bool
client.ResolveRevisionFunc.SetDefaultHook(func(context.Context, api.RepoName, string, gitserver.ResolveRevisionOptions) (api.CommitID, error) {
calledVCSRepoResolveRevision = true
@ -45,9 +32,6 @@ func TestRepos_ResolveRev_noRevSpecified_getsDefaultBranch(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if calledRepoLookup {
t.Error("calledRepoLookup")
}
if !calledVCSRepoResolveRevision {
t.Error("!calledVCSRepoResolveRevision")
}
@ -63,17 +47,6 @@ func TestRepos_ResolveRev_noCommitIDSpecified_resolvesRev(t *testing.T) {
const wantRepo = "a"
want := strings.Repeat("a", 40)
calledRepoLookup := false
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
calledRepoLookup = true
if args.Repo != wantRepo {
t.Errorf("got %q, want %q", args.Repo, wantRepo)
}
return &protocol.RepoLookupResult{
Repo: &protocol.RepoInfo{Name: wantRepo},
}, nil
}
defer func() { repoupdater.MockRepoLookup = nil }()
var calledVCSRepoResolveRevision bool
client := gitserver.NewMockClient()
client.ResolveRevisionFunc.SetDefaultHook(func(context.Context, api.RepoName, string, gitserver.ResolveRevisionOptions) (api.CommitID, error) {
@ -85,9 +58,6 @@ func TestRepos_ResolveRev_noCommitIDSpecified_resolvesRev(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if calledRepoLookup {
t.Error("calledRepoLookup")
}
if !calledVCSRepoResolveRevision {
t.Error("!calledVCSRepoResolveRevision")
}
@ -103,17 +73,6 @@ func TestRepos_ResolveRev_commitIDSpecified_resolvesCommitID(t *testing.T) {
const wantRepo = "a"
want := strings.Repeat("a", 40)
calledRepoLookup := false
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
calledRepoLookup = true
if args.Repo != wantRepo {
t.Errorf("got %q, want %q", args.Repo, wantRepo)
}
return &protocol.RepoLookupResult{
Repo: &protocol.RepoInfo{Name: wantRepo},
}, nil
}
defer func() { repoupdater.MockRepoLookup = nil }()
var calledVCSRepoResolveRevision bool
client := gitserver.NewMockClient()
client.ResolveRevisionFunc.SetDefaultHook(func(context.Context, api.RepoName, string, gitserver.ResolveRevisionOptions) (api.CommitID, error) {
@ -125,9 +84,6 @@ func TestRepos_ResolveRev_commitIDSpecified_resolvesCommitID(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if calledRepoLookup {
t.Error("calledRepoLookup")
}
if !calledVCSRepoResolveRevision {
t.Error("!calledVCSRepoResolveRevision")
}
@ -143,17 +99,6 @@ func TestRepos_ResolveRev_commitIDSpecified_failsToResolve(t *testing.T) {
const wantRepo = "a"
want := errors.New("x")
calledRepoLookup := false
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
calledRepoLookup = true
if args.Repo != wantRepo {
t.Errorf("got %q, want %q", args.Repo, wantRepo)
}
return &protocol.RepoLookupResult{
Repo: &protocol.RepoInfo{Name: wantRepo},
}, nil
}
defer func() { repoupdater.MockRepoLookup = nil }()
var calledVCSRepoResolveRevision bool
client := gitserver.NewMockClient()
client.ResolveRevisionFunc.SetDefaultHook(func(context.Context, api.RepoName, string, gitserver.ResolveRevisionOptions) (api.CommitID, error) {
@ -165,9 +110,6 @@ func TestRepos_ResolveRev_commitIDSpecified_failsToResolve(t *testing.T) {
if !errors.Is(err, want) {
t.Fatalf("got err %v, want %v", err, want)
}
if calledRepoLookup {
t.Error("calledRepoLookup")
}
if !calledVCSRepoResolveRevision {
t.Error("!calledVCSRepoResolveRevision")
}

View File

@ -1224,10 +1224,9 @@ func TestExternalServiceNamespaces(t *testing.T) {
}`
githubExternalService := types.ExternalService{
ID: 1,
Kind: extsvc.KindGitHub,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
ID: 1,
Kind: extsvc.KindGitHub,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
}
id := relay.MarshalID("ExternalServiceNamespace", namespace)
@ -1515,10 +1514,9 @@ func TestExternalServiceRepositories(t *testing.T) {
}`
githubExternalService := types.ExternalService{
ID: 1,
Kind: extsvc.KindGitHub,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
ID: 1,
Kind: extsvc.KindGitHub,
Config: extsvc.NewUnencryptedConfig(githubExternalServiceConfig),
}
externalServiceGraphqlID := MarshalExternalServiceID(githubExternalService.ID)

View File

@ -960,26 +960,11 @@ func (r *schemaResolver) RepositoryRedirect(ctx context.Context, args *repositor
if errcode.IsNotFound(err) {
return nil, nil
}
if errcode.IsRepoDenied(err) {
return nil, repositoryDeniedError{err}
}
return nil, err
}
return &repositoryRedirect{repo: NewRepositoryResolver(r.db, r.gitserverClient, repo)}, nil
}
type repositoryDeniedError struct {
error
}
func (r repositoryDeniedError) Error() string {
return r.error.Error()
}
func (r repositoryDeniedError) Extensions() map[string]any {
return map[string]any{"code": "ErrRepoDenied"}
}
func (r *schemaResolver) PhabricatorRepo(ctx context.Context, args *struct {
Name *string
// TODO(chris): Remove URI in favor of Name.

View File

@ -251,10 +251,6 @@ func newCommon(w http.ResponseWriter, r *http.Request, db database.DB, title str
dangerouslyServeError(w, r, db, errors.New("repository could not be cloned"), http.StatusInternalServerError)
return nil, nil
}
if errcode.IsRepoDenied(err) {
serveError(w, r, db, err, http.StatusNotFound)
return nil, nil
}
if gitdomain.IsRepoNotExist(err) {
if gitdomain.IsCloneInProgress(err) {
// Repo is cloning.

View File

@ -127,7 +127,6 @@ func TestChangesetCountsOverTimeIntegration(t *testing.T) {
Name: githubRepo.Name,
VCS: protocol.VCSInfo{URL: githubRepo.URI},
})
defer mockState.Unmock()
bstore := store.New(db, observation.TestContextTB(t), nil)

View File

@ -167,7 +167,7 @@ func (h *BitbucketCloudWebhook) parseEvent(r *http.Request) (interface{}, *types
}
}
if extSvc == nil || err != nil {
if extSvc == nil {
return nil, nil, &httpError{http.StatusUnauthorized, err}
}

View File

@ -206,11 +206,10 @@ func testBitbucketCloudWebhook(db *sql.DB) func(*testing.T) {
// Set up mocks to prevent the diffstat computation from trying to
// use a real gitserver, and so we can control what diff is used to
// create the diffstat.
state := bt.MockChangesetSyncState(&protocol.RepoInfo{
bt.MockChangesetSyncState(&protocol.RepoInfo{
Name: "repo",
VCS: protocol.VCSInfo{URL: "https://example.com/repo/"},
})
defer state.Unmock()
u, err := extsvc.WebhookURL(extsvc.KindBitbucketCloud, es.ID, cfg, esURL)
assert.Nil(t, err)

View File

@ -156,11 +156,10 @@ func testBitbucketServerWebhook(db database.DB, userID int32) func(*testing.T) {
// Set up mocks to prevent the diffstat computation from trying to
// use a real gitserver, and so we can control what diff is used to
// create the diffstat.
state := bt.MockChangesetSyncState(&protocol.RepoInfo{
bt.MockChangesetSyncState(&protocol.RepoInfo{
Name: "repo",
VCS: protocol.VCSInfo{URL: "https://example.com/repo/"},
})
defer state.Unmock()
gsClient := gitserver.NewMockClient()
for _, ch := range changesets {

View File

@ -150,11 +150,10 @@ func testGitHubWebhook(db database.DB, userID int32) func(*testing.T) {
// Set up mocks to prevent the diffstat computation from trying to
// use a real gitserver, and so we can control what diff is used to
// create the diffstat.
state := bt.MockChangesetSyncState(&protocol.RepoInfo{
bt.MockChangesetSyncState(&protocol.RepoInfo{
Name: "repo",
VCS: protocol.VCSInfo{URL: "https://example.com/repo/"},
})
defer state.Unmock()
src, err := sourcer.ForChangeset(ctx, s, changeset, sources.AuthenticationStrategyUserCredential, githubRepo)
if err != nil {

View File

@ -35,7 +35,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/symbols"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
)
func printConfigValidation(logger log.Logger) {
@ -265,30 +264,10 @@ func overrideExtSvcConfig(ctx context.Context, logger log.Logger, db database.DB
return false, errors.Wrapf(err, "marshaling extsvc config ([%v][%v])", key, i)
}
// When overriding external service config from a file we allow setting the value
// of the cloud_default column.
var cloudDefault bool
switch key {
case extsvc.KindGitHub:
var c schema.GitHubConnection
if err = json.Unmarshal(marshaledCfg, &c); err != nil {
return false, err
}
cloudDefault = c.CloudDefault
case extsvc.KindGitLab:
var c schema.GitLabConnection
if err = json.Unmarshal(marshaledCfg, &c); err != nil {
return false, err
}
cloudDefault = c.CloudDefault
}
toAdd[&types.ExternalService{
Kind: key,
DisplayName: fmt.Sprintf("%s #%d", key, i+1),
Config: extsvc.NewUnencryptedConfig(string(marshaledCfg)),
CloudDefault: cloudDefault,
Kind: key,
DisplayName: fmt.Sprintf("%s #%d", key, i+1),
Config: extsvc.NewUnencryptedConfig(string(marshaledCfg)),
}] = true
}
}
@ -364,7 +343,7 @@ func overrideExtSvcConfig(ctx context.Context, logger log.Logger, db database.DB
if err != nil {
return false, err
}
update := &database.ExternalServiceUpdate{DisplayName: &extSvc.DisplayName, Config: &rawConfig, CloudDefault: &extSvc.CloudDefault}
update := &database.ExternalServiceUpdate{DisplayName: &extSvc.DisplayName, Config: &rawConfig}
if err := extsvcs.Update(ctx, ps, id, update); err != nil {
return false, errors.Wrap(err, "ExternalServices.Update")

View File

@ -10,7 +10,6 @@ go_library(
"//internal/api",
"//internal/conf/reposource",
"//internal/database",
"//internal/dotcom",
"//internal/extsvc",
"//internal/trace",
"//internal/types",

View File

@ -10,7 +10,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
@ -57,13 +56,6 @@ func RepoSourceCloneURLToRepoName(ctx context.Context, db database.DB, cloneURL
},
}
if dotcom.SourcegraphDotComMode() {
// We want to check these first as they'll be able to decode the majority of
// repos. If our cloud_default services are unable to decode the clone url then
// we fall back to going through all services until we find a match.
opt.OnlyCloudDefault = true
}
for {
svcs, err := db.ExternalServices().List(ctx, opt)
if err != nil {
@ -84,12 +76,6 @@ func RepoSourceCloneURLToRepoName(ctx context.Context, db database.DB, cloneURL
}
}
if opt.OnlyCloudDefault {
// Try again without narrowing down to cloud_default external services
opt.OnlyCloudDefault = false
continue
}
if len(svcs) < opt.Limit {
break // Less results than limit means we've reached end
}

View File

@ -73,7 +73,6 @@ type RedactedExternalService struct {
LastSyncAt time.Time
NextSyncAt time.Time
Unrestricted bool
CloudDefault bool
HasWebhooks *bool
TokenExpiresAt *time.Time
}
@ -94,7 +93,6 @@ func convertExtSvcToRedacted(ctx context.Context, extSvc *types.ExternalService)
LastSyncAt: extSvc.LastSyncAt,
NextSyncAt: extSvc.NextSyncAt,
Unrestricted: extSvc.Unrestricted,
CloudDefault: extSvc.CloudDefault,
HasWebhooks: extSvc.HasWebhooks,
TokenExpiresAt: extSvc.TokenExpiresAt,
}, nil

View File

@ -122,7 +122,6 @@ const (
"LastSyncAt": "0001-01-01T00:00:00Z",
"NextSyncAt": "0001-01-01T00:00:00Z",
"Unrestricted": false,
"CloudDefault": false,
"HasWebhooks": null,
"TokenExpiresAt": null
},
@ -151,7 +150,6 @@ const (
"LastSyncAt": "0001-01-01T00:00:00Z",
"NextSyncAt": "0001-01-01T00:00:00Z",
"Unrestricted": false,
"CloudDefault": false,
"HasWebhooks": null,
"TokenExpiresAt": null
},
@ -175,7 +173,6 @@ const (
"LastSyncAt": "0001-01-01T00:00:00Z",
"NextSyncAt": "0001-01-01T00:00:00Z",
"Unrestricted": false,
"CloudDefault": false,
"HasWebhooks": null,
"TokenExpiresAt": null
}

View File

@ -44,8 +44,6 @@ go_library(
"//internal/api",
"//internal/conf",
"//internal/database",
"//internal/diskusage",
"//internal/dotcom",
"//internal/env",
"//internal/errcode",
"//internal/extsvc/gitolite",
@ -64,7 +62,6 @@ go_library(
"//internal/limiter",
"//internal/perforce",
"//internal/ratelimit",
"//internal/repoupdater",
"//internal/security",
"//internal/trace",
"//internal/types",

View File

@ -9,7 +9,6 @@ import (
"os"
"os/exec"
"path/filepath"
"sort"
"strconv"
"strings"
"time"
@ -27,8 +26,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/diskusage"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/gitserver/connection"
@ -44,7 +41,6 @@ type JanitorConfig struct {
JanitorInterval time.Duration
ShardID string
DesiredPercentFree int
DisableDeleteReposOnWrongShard bool
}
@ -53,36 +49,6 @@ func NewJanitor(ctx context.Context, cfg JanitorConfig, db database.DB, fs gitse
actor.WithInternalActor(ctx),
goroutine.HandlerFunc(func(ctx context.Context) error {
logger.Info("Starting janitor run")
// On Sourcegraph.com, we clone repos lazily, meaning whatever github.com
// repo is visited will be cloned eventually. So over time, we would always
// accumulate terabytes of repos, of which many are probably not visited
// often. Thus, we have this special cleanup worker for Sourcegraph.com that
// will remove repos that have not been changed in a long time (thats the
// best metric we have here today) once our disks are running full.
// On customer instances, this worker is useless, because repos are always
// managed by an external service connection and they will be recloned
// ASAP.
if dotcom.SourcegraphDotComMode() {
func() {
logger := logger.Scoped("dotcom-repo-cleaner")
start := time.Now()
logger.Info("Starting dotcom repo cleaner")
usage, err := fs.DiskUsage()
if err != nil {
logger.Error("getting free disk space", log.Error(err))
return
}
toFree := howManyBytesToFree(logger, usage, cfg.DesiredPercentFree)
if err := freeUpSpace(ctx, logger, db, fs, cfg.ShardID, usage, cfg.DesiredPercentFree, toFree); err != nil {
logger.Error("error freeing up space", log.Error(err))
}
logger.Info("dotcom repo cleaner finished", log.Int64("toFree", toFree), log.Bool("failed", err != nil), log.String("duration", time.Since(start).String()))
}()
}
gitserverAddrs := connection.NewGitserverAddresses(conf.Get())
// TODO: Should this return an error?
@ -189,10 +155,6 @@ var (
Name: "src_gitserver_repos_recloned",
Help: "number of repos removed and re-cloned due to age",
})
reposRemovedDiskPressure = promauto.NewCounter(prometheus.CounterOpts{
Name: "src_gitserver_repos_removed_disk_pressure",
Help: "number of repos removed due to not enough disk space",
})
janitorRunning = promauto.NewGauge(prometheus.GaugeOpts{
Name: "src_gitserver_janitor_running",
Help: "set to 1 when the gitserver janitor background job is running",
@ -709,137 +671,6 @@ func checkRepoFlaggedForCorruption(gitDir common.GitDir) (bool, error) {
return true, nil
}
// howManyBytesToFree returns the number of bytes that should be freed to make sure
// there is sufficient disk space free to satisfy s.DesiredPercentFree.
func howManyBytesToFree(logger log.Logger, usage diskusage.DiskUsage, desiredPercentFree int) int64 {
actualFreeBytes := usage.Free()
// Free up space if necessary.
diskSizeBytes := usage.Size()
desiredFreeBytes := uint64(float64(desiredPercentFree) / 100.0 * float64(diskSizeBytes))
howManyBytesToFree := int64(desiredFreeBytes - actualFreeBytes)
if howManyBytesToFree < 0 {
howManyBytesToFree = 0
}
const G = float64(1024 * 1024 * 1024)
logger.Debug(
"howManyBytesToFree",
log.Int("desired percent free", desiredPercentFree),
log.Float64("actual percent free", float64(actualFreeBytes)/float64(diskSizeBytes)*100.0),
log.Float64("amount to free in GiB", float64(howManyBytesToFree)/G),
)
return howManyBytesToFree
}
// freeUpSpace removes git directories under the fs, in order from least
// recently to most recently used, until it has freed howManyBytesToFree.
func freeUpSpace(ctx context.Context, logger log.Logger, db database.DB, fs gitserverfs.FS, shardID string, usage diskusage.DiskUsage, desiredPercentFree int, howManyBytesToFree int64) error {
if howManyBytesToFree <= 0 {
return nil
}
logger = logger.Scoped("freeUpSpace")
// Get the git directories and their mod times.
gitDirs, err := findGitDirs(fs)
if err != nil {
return errors.Wrap(err, "finding git dirs")
}
dirModTimes := make(map[common.GitDir]time.Time, len(gitDirs))
for _, d := range gitDirs {
mt, err := gitDirModTime(d)
if err != nil {
// If we get an error here, we move it to the end of the queue,
// since it's the janitor's job to clean/fix this.
logger.Warn("computing mod time of git dir failed", log.String("dir", string(d)), log.Error(err))
dirModTimes[d] = time.Now()
continue
}
dirModTimes[d] = mt
}
// Sort the repos from least to most recently used.
sort.Slice(gitDirs, func(i, j int) bool {
return dirModTimes[gitDirs[i]].Before(dirModTimes[gitDirs[j]])
})
// Remove repos until howManyBytesToFree is met or exceeded.
var spaceFreed int64
diskSizeBytes := usage.Size()
for _, d := range gitDirs {
if spaceFreed >= howManyBytesToFree {
return nil
}
// Fast-exit if the context has been canceled.
select {
case <-ctx.Done():
return ctx.Err()
default:
}
repoName := fs.ResolveRepoName(d)
delta, err := fs.DirSize(d.Path())
if err != nil {
logger.Warn("failed to get dir size", log.String("dir", string(d)), log.Error(err))
continue
}
if err := fs.RemoveRepo(repoName); err != nil {
logger.Warn("failed to remove least recently used repo", log.String("dir", string(d)), log.Error(err))
continue
}
// Set as not_cloned in the database.
if err := db.GitserverRepos().SetCloneStatus(ctx, repoName, types.CloneStatusNotCloned, shardID); err != nil {
logger.Warn("failed to update clone status", log.Error(err))
}
spaceFreed += delta
reposRemovedDiskPressure.Inc()
// Report the new disk usage situation after removing this repo.
usage, err := fs.DiskUsage()
if err != nil {
return errors.Wrap(err, "finding the amount of space free on disk")
}
actualFreeBytes := usage.Free()
G := float64(1024 * 1024 * 1024)
logger.Warn("removed least recently used repo",
log.String("repo", string(d)),
log.Duration("how old", time.Since(dirModTimes[d])),
log.Float64("free space in GiB", float64(actualFreeBytes)/G),
log.Float64("actual percent of disk space free", float64(actualFreeBytes)/float64(diskSizeBytes)*100.0),
log.Float64("desired percent of disk space free", float64(desiredPercentFree)),
log.Float64("space freed in GiB", float64(spaceFreed)/G),
log.Float64("how much space to free in GiB", float64(howManyBytesToFree)/G))
}
// Check.
if spaceFreed < howManyBytesToFree {
return errors.Errorf("only freed %d bytes, wanted to free %d", spaceFreed, howManyBytesToFree)
}
return nil
}
func gitDirModTime(d common.GitDir) (time.Time, error) {
head, err := os.Stat(d.Path("HEAD"))
if err != nil {
return time.Time{}, errors.Wrap(err, "getting repository modification time")
}
return head.ModTime(), nil
}
// findGitDirs collects the GitDirs of all repos in the FS.
func findGitDirs(fs gitserverfs.FS) ([]common.GitDir, error) {
var dirs []common.GitDir
return dirs, fs.ForEachRepo(func(_ api.RepoName, dir common.GitDir) (done bool) {
dirs = append(dirs, dir)
return false
})
}
// gitIsNonBareBestEffort returns true if the repository is not a bare
// repo. If we fail to check or the repository is bare we return false.
//

View File

@ -2,13 +2,11 @@ package internal
import (
"context"
"io"
"io/fs"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"testing"
"time"
@ -717,212 +715,6 @@ func TestCleanupOldLocks(t *testing.T) {
}
}
func TestHowManyBytesToFree(t *testing.T) {
const G = 1024 * 1024 * 1024
logger := logtest.Scoped(t)
tcs := []struct {
name string
diskSize uint64
bytesFree uint64
want int64
}{
{
name: "if there is already enough space, no space is freed",
diskSize: 10 * G,
bytesFree: 1.5 * G,
want: 0,
},
{
name: "if there is exactly enough space, no space is freed",
diskSize: 10 * G,
bytesFree: 1 * G,
want: 0,
},
{
name: "if there not enough space, some space is freed",
diskSize: 10 * G,
bytesFree: 0.5 * G,
want: int64(0.5 * G),
},
}
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
b := howManyBytesToFree(
logger,
&fakeDiskUsage{
diskSize: tc.diskSize,
bytesFree: tc.bytesFree,
},
10,
)
if b != tc.want {
t.Errorf("s.howManyBytesToFree(...) is %v, want 0", b)
}
})
}
}
type fakeDiskUsage struct {
bytesFree uint64
diskSize uint64
}
func (f *fakeDiskUsage) Free() uint64 {
return f.bytesFree
}
func (f *fakeDiskUsage) Size() uint64 {
return f.diskSize
}
func (f *fakeDiskUsage) PercentUsed() float32 {
return 1
}
func (f *fakeDiskUsage) Available() uint64 {
return 1
}
// assertPaths checks that all paths under want exist. It excludes non-empty directories
func assertPaths(t *testing.T, root string, want ...string) {
t.Helper()
notfound := make(map[string]struct{})
for _, p := range want {
notfound[p] = struct{}{}
}
var unwanted []string
err := filepath.Walk(root, func(path string, info fs.FileInfo, err error) error {
if err != nil {
return err
}
if info.IsDir() {
if empty, err := isEmptyDir(path); err != nil {
t.Fatal(err)
} else if !empty {
return nil
}
}
rel, err := filepath.Rel(root, path)
if err != nil {
return err
}
if _, ok := notfound[rel]; ok {
delete(notfound, rel)
} else {
unwanted = append(unwanted, rel)
}
return nil
})
if err != nil {
t.Fatal(err)
}
if len(notfound) > 0 {
var paths []string
for p := range notfound {
paths = append(paths, p)
}
sort.Strings(paths)
t.Errorf("did not find expected paths: %s", strings.Join(paths, " "))
}
if len(unwanted) > 0 {
sort.Strings(unwanted)
t.Errorf("found unexpected paths: %s", strings.Join(unwanted, " "))
}
}
func isEmptyDir(path string) (bool, error) {
f, err := os.Open(path)
if err != nil {
return false, err
}
defer f.Close()
_, err = f.Readdirnames(1)
if err == io.EOF {
return true, nil
}
return false, err
}
func TestFreeUpSpace(t *testing.T) {
logger := logtest.Scoped(t)
root := t.TempDir()
fs := gitserverfs.New(observation.TestContextTB(t), root)
require.NoError(t, fs.Initialize())
t.Run("no error if no space requested and no repos", func(t *testing.T) {
if err := freeUpSpace(context.Background(), logger, newMockedGitserverDB(), fs, "test-gitserver", &fakeDiskUsage{}, 10, 0); err != nil {
t.Fatal(err)
}
})
t.Run("error if space requested and no repos", func(t *testing.T) {
if err := freeUpSpace(context.Background(), logger, newMockedGitserverDB(), fs, "test-gitserver", &fakeDiskUsage{}, 10, 1); err == nil {
t.Fatal("want error")
}
})
t.Run("oldest repo gets removed to free up space", func(t *testing.T) {
r1 := filepath.Join(root, "repo1")
r2 := filepath.Join(root, "repo2")
if err := makeFakeRepo(r1, 1000); err != nil {
t.Fatal(err)
}
if err := makeFakeRepo(r2, 1000); err != nil {
t.Fatal(err)
}
// Force the modification time of r2 to be after that of r1.
fi1, err := os.Stat(r1)
if err != nil {
t.Fatal(err)
}
mtime2 := fi1.ModTime().Add(time.Second)
if err := os.Chtimes(r2, time.Now(), mtime2); err != nil {
t.Fatal(err)
}
db := dbmocks.NewMockDB()
gr := dbmocks.NewMockGitserverRepoStore()
db.GitserverReposFunc.SetDefaultReturn(gr)
// Run.
if err := freeUpSpace(context.Background(), logger, db, fs, "test-gitserver", &fakeDiskUsage{}, 10, 1000); err != nil {
t.Fatal(err)
}
// Check.
assertPaths(t, root,
".tmp",
".p4home",
"repo2/.git/HEAD",
"repo2/.git/space_eater")
rds, err := fs.DirSize(root)
require.NoError(t, err)
wantSize := int64(1000)
if rds > wantSize {
t.Errorf("repo dir size is %d, want no more than %d", rds, wantSize)
}
if len(gr.SetCloneStatusFunc.History()) == 0 {
t.Fatal("expected gitserverRepos.SetCloneStatus to be called, but wasn't")
}
require.Equal(t, gr.SetCloneStatusFunc.History()[0].Arg2, types.CloneStatusNotCloned)
})
}
func makeFakeRepo(d string, sizeBytes int) error {
gd := filepath.Join(d, ".git")
if err := os.MkdirAll(gd, 0o700); err != nil {
return errors.Wrap(err, "creating .git dir and any parents")
}
if err := os.WriteFile(filepath.Join(gd, "HEAD"), nil, 0o666); err != nil {
return errors.Wrap(err, "creating HEAD file")
}
if err := os.WriteFile(filepath.Join(gd, "space_eater"), make([]byte, sizeBytes), 0o666); err != nil {
return errors.Wrapf(err, "writing to space_eater file")
}
return nil
}
func prepareEmptyGitRepo(t *testing.T, dir string) common.GitDir {
t.Helper()
cmd := exec.Command("git", "init", ".")

View File

@ -31,7 +31,6 @@ type FS interface {
TempDir(prefix string) (string, error)
IgnorePath(string) bool
P4HomeDir() (string, error)
VisitRepos(func(api.RepoName, common.GitDir) (done bool, _ error)) error
RepoCloned(api.RepoName) (bool, error)
RemoveRepo(api.RepoName) error
ForEachRepo(func(api.RepoName, common.GitDir) (done bool)) error
@ -96,10 +95,6 @@ func (r *realGitserverFS) P4HomeDir() (string, error) {
return makeP4HomeDir(r.reposDir)
}
func (r *realGitserverFS) VisitRepos(visit func(api.RepoName, common.GitDir) (done bool, _ error)) error {
return nil
}
func (r *realGitserverFS) RepoCloned(name api.RepoName) (bool, error) {
return repoCloned(r.RepoDir(name))
}

View File

@ -54,9 +54,6 @@ type MockFS struct {
// TempDirFunc is an instance of a mock function object controlling the
// behavior of the method TempDir.
TempDirFunc *FSTempDirFunc
// VisitReposFunc is an instance of a mock function object controlling
// the behavior of the method VisitRepos.
VisitReposFunc *FSVisitReposFunc
}
// NewMockFS creates a new mock of the FS interface. All methods return zero
@ -123,11 +120,6 @@ func NewMockFS() *MockFS {
return
},
},
VisitReposFunc: &FSVisitReposFunc{
defaultHook: func(func(api.RepoName, common.GitDir) (bool, error)) (r0 error) {
return
},
},
}
}
@ -195,11 +187,6 @@ func NewStrictMockFS() *MockFS {
panic("unexpected invocation of MockFS.TempDir")
},
},
VisitReposFunc: &FSVisitReposFunc{
defaultHook: func(func(api.RepoName, common.GitDir) (bool, error)) error {
panic("unexpected invocation of MockFS.VisitRepos")
},
},
}
}
@ -243,9 +230,6 @@ func NewMockFSFrom(i FS) *MockFS {
TempDirFunc: &FSTempDirFunc{
defaultHook: i.TempDir,
},
VisitReposFunc: &FSVisitReposFunc{
defaultHook: i.VisitRepos,
},
}
}
@ -1467,104 +1451,3 @@ func (c FSTempDirFuncCall) Args() []interface{} {
func (c FSTempDirFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// FSVisitReposFunc describes the behavior when the VisitRepos method of the
// parent MockFS instance is invoked.
type FSVisitReposFunc struct {
defaultHook func(func(api.RepoName, common.GitDir) (bool, error)) error
hooks []func(func(api.RepoName, common.GitDir) (bool, error)) error
history []FSVisitReposFuncCall
mutex sync.Mutex
}
// VisitRepos delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockFS) VisitRepos(v0 func(api.RepoName, common.GitDir) (bool, error)) error {
r0 := m.VisitReposFunc.nextHook()(v0)
m.VisitReposFunc.appendCall(FSVisitReposFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the VisitRepos method of
// the parent MockFS instance is invoked and the hook queue is empty.
func (f *FSVisitReposFunc) SetDefaultHook(hook func(func(api.RepoName, common.GitDir) (bool, error)) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// VisitRepos method of the parent MockFS instance invokes the hook at the
// front of the queue and discards it. After the queue is empty, the default
// hook function is invoked for any future action.
func (f *FSVisitReposFunc) PushHook(hook func(func(api.RepoName, common.GitDir) (bool, error)) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *FSVisitReposFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(func(api.RepoName, common.GitDir) (bool, error)) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *FSVisitReposFunc) PushReturn(r0 error) {
f.PushHook(func(func(api.RepoName, common.GitDir) (bool, error)) error {
return r0
})
}
func (f *FSVisitReposFunc) nextHook() func(func(api.RepoName, common.GitDir) (bool, error)) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *FSVisitReposFunc) appendCall(r0 FSVisitReposFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of FSVisitReposFuncCall objects describing the
// invocations of this function.
func (f *FSVisitReposFunc) History() []FSVisitReposFuncCall {
f.mutex.Lock()
history := make([]FSVisitReposFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// FSVisitReposFuncCall is an object that describes an invocation of method
// VisitRepos on an instance of MockFS.
type FSVisitReposFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 func(api.RepoName, common.GitDir) (bool, error)
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c FSVisitReposFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c FSVisitReposFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}

View File

@ -20,13 +20,11 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
"github.com/sourcegraph/sourcegraph/internal/grpc/chunk"
"github.com/sourcegraph/sourcegraph/internal/grpc/streamio"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -100,7 +98,7 @@ func (gs *grpcServer) CreateCommitFromPatchBinary(s proto.GitserverService_Creat
}
repoName := api.RepoName(metadata.GetRepo())
if err := gs.checkRepoExists(s.Context(), repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -172,7 +170,7 @@ func (gs *grpcServer) Archive(req *proto.ArchiveRequest, ss proto.GitserverServi
repoName := api.RepoName(req.GetRepo())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ss.Context(), repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -232,7 +230,7 @@ func (gs *grpcServer) GetObject(ctx context.Context, req *proto.GetObjectRequest
repoName := api.RepoName(req.GetRepo())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -295,7 +293,7 @@ func (gs *grpcServer) Search(req *proto.SearchRequest, ss proto.GitserverService
repoName := api.RepoName(req.GetRepo())
if err := gs.checkRepoExists(ss.Context(), repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -660,7 +658,7 @@ func (gs *grpcServer) MergeBase(ctx context.Context, req *proto.MergeBaseRequest
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -707,7 +705,7 @@ func (gs *grpcServer) GetCommit(ctx context.Context, req *proto.GetCommitRequest
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -766,7 +764,7 @@ func (gs *grpcServer) Blame(req *proto.BlameRequest, ss proto.GitserverService_B
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -844,7 +842,7 @@ func (gs *grpcServer) DefaultBranch(ctx context.Context, req *proto.DefaultBranc
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -904,7 +902,7 @@ func (gs *grpcServer) ReadFile(req *proto.ReadFileRequest, ss proto.GitserverSer
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -961,7 +959,7 @@ func (gs *grpcServer) ResolveRevision(ctx context.Context, req *proto.ResolveRev
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -1018,7 +1016,7 @@ func (gs *grpcServer) RevAtTime(ctx context.Context, req *proto.RevAtTimeRequest
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -1059,7 +1057,7 @@ func (gs *grpcServer) ListRefs(req *proto.ListRefsRequest, ss proto.GitserverSer
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ss.Context(), repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -1158,7 +1156,7 @@ func (gs *grpcServer) RawDiff(req *proto.RawDiffRequest, ss proto.GitserverServi
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -1218,7 +1216,7 @@ func (gs *grpcServer) ContributorCounts(ctx context.Context, req *proto.Contribu
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -1267,7 +1265,7 @@ func (gs *grpcServer) FirstEverCommit(ctx context.Context, request *proto.FirstE
repoName := api.RepoName(request.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -1329,7 +1327,7 @@ func (gs *grpcServer) BehindAhead(ctx context.Context, req *proto.BehindAheadReq
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -1376,7 +1374,7 @@ func (gs *grpcServer) ChangedFiles(req *proto.ChangedFilesRequest, ss proto.Gits
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -1457,7 +1455,7 @@ func (gs *grpcServer) Stat(ctx context.Context, req *proto.StatRequest) (*proto.
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return nil, err
}
@ -1524,7 +1522,7 @@ func (gs *grpcServer) ReadDir(req *proto.ReadDirRequest, ss proto.GitserverServi
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -1641,7 +1639,7 @@ func (gs *grpcServer) CommitLog(req *proto.CommitLogRequest, ss proto.GitserverS
repoName := api.RepoName(req.GetRepoName())
repoDir := gs.fs.RepoDir(repoName)
if err := gs.checkRepoExists(ctx, repoName); err != nil {
if err := gs.checkRepoExists(repoName); err != nil {
return err
}
@ -1738,7 +1736,7 @@ func (gs *grpcServer) CommitLog(req *proto.CommitLogRequest, ss proto.GitserverS
// On Sourcegraph.com, not all repos are managed by the scheduler. We thus
// need to enqueue a manual update of a repo that is visited but not cloned to
// ensure it is cloned and managed.
func (gs *grpcServer) checkRepoExists(ctx context.Context, repo api.RepoName) error {
func (gs *grpcServer) checkRepoExists(repo api.RepoName) error {
cloned, err := gs.fs.RepoCloned(repo)
if err != nil {
return status.New(codes.Internal, errors.Wrap(err, "failed to check if repo is cloned").Error()).Err()
@ -1748,19 +1746,6 @@ func (gs *grpcServer) checkRepoExists(ctx context.Context, repo api.RepoName) er
return nil
}
// On sourcegraph.com, not all repos are managed by the scheduler. We thus
// need to enqueue a manual clone of a repo that is visited but not cloned.
if dotcom.SourcegraphDotComMode() {
if conf.Get().DisableAutoGitUpdates {
gs.logger.Debug("not cloning on demand as DisableAutoGitUpdates is set")
} else {
_, err := repoupdater.DefaultClient.EnqueueRepoUpdate(ctx, repo)
if err != nil {
return errors.Wrap(err, "failed to enqueue repo clone")
}
}
}
cloneProgress, locked := gs.locker.Status(repo)
// We checked above that the repo is not cloned. So if the repo is currently

View File

@ -35,7 +35,6 @@ type Config struct {
SyncRepoStateBatchSize int
SyncRepoStateUpdatePerSecond int
JanitorReposDesiredPercentFree int
JanitorInterval time.Duration
JanitorDisableDeleteReposOnWrongShard bool
@ -75,15 +74,6 @@ func (c *Config) Load() {
c.SyncRepoStateBatchSize = c.GetInt("SRC_REPOS_SYNC_STATE_BATCH_SIZE", "500", "Number of updates to perform per batch")
c.SyncRepoStateUpdatePerSecond = c.GetInt("SRC_REPOS_SYNC_STATE_UPSERT_PER_SEC", "500", "The number of updated rows allowed per second across all gitserver instances")
// Align these variables with the 'disk_space_remaining' alerts in monitoring
c.JanitorReposDesiredPercentFree = c.GetInt("SRC_REPOS_DESIRED_PERCENT_FREE", "10", "Target percentage of free space on disk.")
if c.JanitorReposDesiredPercentFree < 0 {
c.AddError(errors.Errorf("negative value given for SRC_REPOS_DESIRED_PERCENT_FREE: %d", c.JanitorReposDesiredPercentFree))
}
if c.JanitorReposDesiredPercentFree > 100 {
c.AddError(errors.Errorf("excessively high value given for SRC_REPOS_DESIRED_PERCENT_FREE: %d", c.JanitorReposDesiredPercentFree))
}
c.JanitorInterval = c.GetInterval("SRC_REPOS_JANITOR_INTERVAL", "1m", "Interval between cleanup runs")
c.JanitorDisableDeleteReposOnWrongShard = c.GetBool("SRC_REPOS_JANITOR_DISABLE_DELETE_REPOS_ON_WRONG_SHARD", "false", "Disable deleting repos on wrong shard")

View File

@ -1,7 +1,6 @@
package shared
import (
"strconv"
"testing"
"time"
)
@ -31,9 +30,6 @@ func TestConfigDefaults(t *testing.T) {
if have, want := config.SyncRepoStateUpdatePerSecond, 500; have != want {
t.Errorf("invalid value for SyncRepoStateUpdatePerSecond: have=%d want=%d", have, want)
}
if have, want := config.JanitorReposDesiredPercentFree, 10; have != want {
t.Errorf("invalid value for JanitorReposDesiredPercentFree: have=%d want=%d", have, want)
}
if have, want := config.JanitorInterval, time.Minute; have != want {
t.Errorf("invalid value for JanitorInterval: have=%s want=%s", have, want)
}
@ -42,47 +38,6 @@ func TestConfigDefaults(t *testing.T) {
}
}
func TestConfig_PercentFree(t *testing.T) {
tests := []struct {
i int
want int
wantErr bool
}{
{i: -1, wantErr: true},
{i: -4, wantErr: true},
{i: 300, wantErr: true},
{i: 0, want: 0},
{i: 50, want: 50},
{i: 100, want: 100},
}
for i, tt := range tests {
t.Run(strconv.Itoa(i), func(t *testing.T) {
config := Config{}
config.SetMockGetter(mapGetter(map[string]string{"SRC_REPOS_DESIRED_PERCENT_FREE": strconv.Itoa(tt.i)}))
config.Load()
err := config.Validate()
if err != nil {
if !tt.wantErr {
t.Fatalf("unexpected validation error: %s", err)
} else {
// An error was expected and it was returned, so we can end the test here.
return
}
}
if tt.wantErr && err == nil {
t.Fatal("unexpected nil validation error")
}
if have, want := config.JanitorReposDesiredPercentFree, tt.want; have != want {
t.Errorf("invalid value for JanitorReposDesiredPercentFree: have=%d want=%d", have, want)
}
})
}
}
func mapGetter(env map[string]string) func(name, defaultValue, description string) string {
return func(name, defaultValue, description string) string {
if v, ok := env[name]; ok {

View File

@ -143,7 +143,6 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
server.JanitorConfig{
ShardID: hostname,
JanitorInterval: config.JanitorInterval,
DesiredPercentFree: config.JanitorReposDesiredPercentFree,
DisableDeleteReposOnWrongShard: config.JanitorDisableDeleteReposOnWrongShard,
},
db,

View File

@ -13,7 +13,6 @@ go_library(
"//internal/api",
"//internal/batches/syncer",
"//internal/database",
"//internal/errcode",
"//internal/repos",
"//internal/repoupdater/protocol",
"//internal/repoupdater/v1:repoupdater",
@ -40,30 +39,21 @@ go_test(
"requires-network",
],
deps = [
"//cmd/repo-updater/internal/gitserver",
"//cmd/repo-updater/internal/scheduler",
"//internal/api",
"//internal/conf/reposource",
"//internal/database",
"//internal/database/dbmocks",
"//internal/database/dbtest",
"//internal/extsvc",
"//internal/extsvc/awscodecommit",
"//internal/extsvc/github",
"//internal/extsvc/gitlab",
"//internal/grpc",
"//internal/grpc/defaults",
"//internal/observation",
"//internal/repos",
"//internal/repoupdater",
"//internal/repoupdater/protocol",
"//internal/repoupdater/v1:repoupdater",
"//internal/timeutil",
"//internal/types",
"//internal/types/typestest",
"//lib/errors",
"@com_github_google_go_cmp//cmp",
"@com_github_google_go_cmp//cmp/cmpopts",
"@com_github_inconshreveable_log15//:log15",
"@com_github_sourcegraph_log//logtest",
"@org_golang_google_grpc//:go_default_library",

View File

@ -3,7 +3,6 @@ package repoupdater
import (
"context"
"fmt"
"time"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
@ -15,7 +14,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/batches/syncer"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/repos"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/repoupdater/v1"
@ -44,50 +42,6 @@ func (s *Server) RepoUpdateSchedulerInfo(_ context.Context, req *proto.RepoUpdat
return res.ToProto(), nil
}
func (s *Server) RepoLookup(ctx context.Context, req *proto.RepoLookupRequest) (result *proto.RepoLookupResponse, err error) {
// NOTE: Internal actor is required to have full visibility of the repo table
// (i.e. bypass repository authorization).
ctx = actor.WithInternalActor(ctx)
repoName := api.RepoName(req.GetRepo())
// Sourcegraph.com: this is on the user path, do not block forever if codehost is
// being bad. Ideally block before cloudflare 504s the request (1min). Other: we
// only speak to our database, so response should be in a few ms.
ctx, cancel := context.WithTimeout(ctx, 30*time.Second)
defer cancel()
tr, ctx := trace.New(ctx, "repoLookup", attribute.String("repo", string(repoName)))
defer func() {
s.Logger.Debug("repoLookup", log.String("result", fmt.Sprint(result)), log.Error(err))
tr.SetError(err)
tr.End()
}()
if repoName == "" {
return nil, errors.New("Repo must be set (is blank)")
}
repo, err := s.Syncer.SyncRepo(ctx, repoName, true)
if err != nil {
if errcode.IsNotFound(err) {
return (&protocol.RepoLookupResult{ErrorNotFound: true}).ToProto(), nil
}
if errcode.IsUnauthorized(err) || errcode.IsForbidden(err) {
return (&protocol.RepoLookupResult{ErrorUnauthorized: true}).ToProto(), nil
}
if errcode.IsTemporary(err) {
return (&protocol.RepoLookupResult{ErrorTemporarilyUnavailable: true}).ToProto(), nil
}
if errcode.IsRepoDenied(err) {
return (&protocol.RepoLookupResult{ErrorRepoDenied: err.Error()}).ToProto(), nil
}
return nil, err
}
return (&protocol.RepoLookupResult{Repo: protocol.NewRepoInfo(repo)}).ToProto(), nil
}
func (s *Server) EnqueueRepoUpdate(ctx context.Context, req *proto.EnqueueRepoUpdateRequest) (resp *proto.EnqueueRepoUpdateResponse, err error) {
// NOTE: Internal actor is required to have full visibility of the repo table
// (i.e. bypass repository authorization).

View File

@ -8,35 +8,25 @@ import (
"reflect"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"google.golang.org/grpc"
"github.com/sourcegraph/log/logtest"
"github.com/sourcegraph/sourcegraph/cmd/repo-updater/internal/gitserver"
"github.com/sourcegraph/sourcegraph/cmd/repo-updater/internal/scheduler"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/extsvc/awscodecommit"
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitlab"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/repos"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/repoupdater/v1"
"github.com/sourcegraph/sourcegraph/internal/timeutil"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/types/typestest"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -141,462 +131,6 @@ func TestServer_EnqueueRepoUpdate(t *testing.T) {
}
}
func TestServer_RepoLookup(t *testing.T) {
logger := logtest.Scoped(t)
db := dbtest.NewDB(t)
store := repos.NewStore(logger, database.NewDB(logger, db))
ctx := context.Background()
clock := timeutil.NewFakeClock(time.Now(), 0)
now := clock.Now()
githubSource := types.ExternalService{
Kind: extsvc.KindGitHub,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(`{
"url": "https://github.com",
"token": "secret-token",
"repos": ["owner/name"]
}`),
}
awsSource := types.ExternalService{
Kind: extsvc.KindAWSCodeCommit,
Config: extsvc.NewUnencryptedConfig(`
{
"region": "us-east-1",
"accessKeyID": "abc",
"secretAccessKey": "abc",
"gitCredentials": {
"username": "user",
"password": "pass"
}
}
`),
}
gitlabSource := types.ExternalService{
Kind: extsvc.KindGitLab,
CloudDefault: true,
Config: extsvc.NewUnencryptedConfig(`
{
"url": "https://gitlab.com",
"token": "abc",
"projectQuery": ["none"]
}
`),
}
npmSource := types.ExternalService{
Kind: extsvc.KindNpmPackages,
Config: extsvc.NewUnencryptedConfig(`
{
"registry": "npm.org"
}
`),
}
if err := store.ExternalServiceStore().Upsert(ctx, &githubSource, &awsSource, &gitlabSource, &npmSource); err != nil {
t.Fatal(err)
}
githubRepository := &types.Repo{
Name: "github.com/foo/bar",
Description: "The description",
Archived: false,
Fork: false,
CreatedAt: now,
UpdatedAt: now,
ExternalRepo: api.ExternalRepoSpec{
ID: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
ServiceType: extsvc.TypeGitHub,
ServiceID: "https://github.com/",
},
Sources: map[string]*types.SourceInfo{
githubSource.URN(): {
ID: githubSource.URN(),
CloneURL: "git@github.com:foo/bar.git",
},
},
Metadata: &github.Repository{
ID: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
URL: "github.com/foo/bar",
DatabaseID: 1234,
Description: "The description",
NameWithOwner: "foo/bar",
},
}
awsCodeCommitRepository := &types.Repo{
Name: "git-codecommit.us-west-1.amazonaws.com/stripe-go",
Description: "The stripe-go lib",
Archived: false,
Fork: false,
CreatedAt: now,
ExternalRepo: api.ExternalRepoSpec{
ID: "f001337a-3450-46fd-b7d2-650c0EXAMPLE",
ServiceType: extsvc.TypeAWSCodeCommit,
ServiceID: "arn:aws:codecommit:us-west-1:999999999999:",
},
Sources: map[string]*types.SourceInfo{
awsSource.URN(): {
ID: awsSource.URN(),
CloneURL: "git@git-codecommit.us-west-1.amazonaws.com/v1/repos/stripe-go",
},
},
Metadata: &awscodecommit.Repository{
ARN: "arn:aws:codecommit:us-west-1:999999999999:stripe-go",
AccountID: "999999999999",
ID: "f001337a-3450-46fd-b7d2-650c0EXAMPLE",
Name: "stripe-go",
Description: "The stripe-go lib",
HTTPCloneURL: "https://git-codecommit.us-west-1.amazonaws.com/v1/repos/stripe-go",
LastModified: &now,
},
}
gitlabRepository := &types.Repo{
Name: "gitlab.com/gitlab-org/gitaly",
Description: "Gitaly is a Git RPC service for handling all the git calls made by GitLab",
URI: "gitlab.com/gitlab-org/gitaly",
CreatedAt: now,
UpdatedAt: now,
ExternalRepo: api.ExternalRepoSpec{
ID: "2009901",
ServiceType: extsvc.TypeGitLab,
ServiceID: "https://gitlab.com/",
},
Sources: map[string]*types.SourceInfo{
gitlabSource.URN(): {
ID: gitlabSource.URN(),
CloneURL: "https://gitlab.com/gitlab-org/gitaly.git",
},
},
Metadata: &gitlab.Project{
ProjectCommon: gitlab.ProjectCommon{
ID: 2009901,
PathWithNamespace: "gitlab-org/gitaly",
Description: "Gitaly is a Git RPC service for handling all the git calls made by GitLab",
WebURL: "https://gitlab.com/gitlab-org/gitaly",
HTTPURLToRepo: "https://gitlab.com/gitlab-org/gitaly.git",
SSHURLToRepo: "git@gitlab.com:gitlab-org/gitaly.git",
},
Visibility: "",
Archived: false,
},
}
npmRepository := &types.Repo{
Name: "npm/package",
URI: "npm/package",
ExternalRepo: api.ExternalRepoSpec{
ID: "npm/package",
ServiceType: extsvc.TypeNpmPackages,
ServiceID: extsvc.TypeNpmPackages,
},
Sources: map[string]*types.SourceInfo{
npmSource.URN(): {
ID: npmSource.URN(),
CloneURL: "npm/package",
},
},
Metadata: &reposource.NpmMetadata{Package: func() *reposource.NpmPackageName {
p, _ := reposource.NewNpmPackageName("", "package")
return p
}()},
}
testCases := []struct {
name string
args protocol.RepoLookupArgs
stored types.Repos
result *protocol.RepoLookupResult
src repos.Source
assert typestest.ReposAssertion
assertDelay time.Duration
err string
}{
{
name: "found - aws code commit",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("git-codecommit.us-west-1.amazonaws.com/stripe-go"),
},
stored: []*types.Repo{awsCodeCommitRepository},
result: &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{
ExternalRepo: api.ExternalRepoSpec{
ID: "f001337a-3450-46fd-b7d2-650c0EXAMPLE",
ServiceType: extsvc.TypeAWSCodeCommit,
ServiceID: "arn:aws:codecommit:us-west-1:999999999999:",
},
Name: "git-codecommit.us-west-1.amazonaws.com/stripe-go",
Description: "The stripe-go lib",
VCS: protocol.VCSInfo{URL: "git@git-codecommit.us-west-1.amazonaws.com/v1/repos/stripe-go"},
Links: &protocol.RepoLinks{
Root: "https://us-west-1.console.aws.amazon.com/codesuite/codecommit/repositories/stripe-go/browse",
Tree: "https://us-west-1.console.aws.amazon.com/codesuite/codecommit/repositories/stripe-go/browse/{rev}/--/{path}",
Blob: "https://us-west-1.console.aws.amazon.com/codesuite/codecommit/repositories/stripe-go/browse/{rev}/--/{path}",
Commit: "https://us-west-1.console.aws.amazon.com/codesuite/codecommit/repositories/stripe-go/commit/{commit}",
},
}},
},
{
name: "not synced from non public codehost",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("github.private.corp/a/b"),
},
src: repos.NewFakeSource(&githubSource, nil),
result: &protocol.RepoLookupResult{ErrorNotFound: true},
err: fmt.Sprintf("repository not found (name=%s notfound=%v)", api.RepoName("github.private.corp/a/b"), true),
},
{
name: "synced - npm package host",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("npm/package"),
},
stored: []*types.Repo{},
src: repos.NewFakeSource(&npmSource, nil, npmRepository),
result: &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{
ExternalRepo: npmRepository.ExternalRepo,
Name: npmRepository.Name,
VCS: protocol.VCSInfo{URL: string(npmRepository.Name)},
}},
assert: typestest.AssertReposEqual(npmRepository),
},
{
name: "synced - github.com cloud default",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("github.com/foo/bar"),
},
stored: []*types.Repo{},
src: repos.NewFakeSource(&githubSource, nil, githubRepository),
result: &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{
ExternalRepo: api.ExternalRepoSpec{
ID: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
ServiceType: extsvc.TypeGitHub,
ServiceID: "https://github.com/",
},
Name: "github.com/foo/bar",
Description: "The description",
VCS: protocol.VCSInfo{URL: "git@github.com:foo/bar.git"},
Links: &protocol.RepoLinks{
Root: "github.com/foo/bar",
Tree: "github.com/foo/bar/tree/{rev}/{path}",
Blob: "github.com/foo/bar/blob/{rev}/{path}",
Commit: "github.com/foo/bar/commit/{commit}",
},
}},
assert: typestest.AssertReposEqual(githubRepository),
},
{
name: "found - github.com already exists",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("github.com/foo/bar"),
},
stored: []*types.Repo{githubRepository},
src: repos.NewFakeSource(&githubSource, nil, githubRepository),
result: &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{
ExternalRepo: api.ExternalRepoSpec{
ID: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
ServiceType: extsvc.TypeGitHub,
ServiceID: "https://github.com/",
},
Name: "github.com/foo/bar",
Description: "The description",
VCS: protocol.VCSInfo{URL: "git@github.com:foo/bar.git"},
Links: &protocol.RepoLinks{
Root: "github.com/foo/bar",
Tree: "github.com/foo/bar/tree/{rev}/{path}",
Blob: "github.com/foo/bar/blob/{rev}/{path}",
Commit: "github.com/foo/bar/commit/{commit}",
},
}},
},
{
name: "not found - github.com",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("github.com/foo/bar"),
},
src: repos.NewFakeSource(&githubSource, github.ErrRepoNotFound),
result: &protocol.RepoLookupResult{ErrorNotFound: true},
err: fmt.Sprintf("repository not found (name=%s notfound=%v)", api.RepoName("github.com/foo/bar"), true),
assert: typestest.AssertReposEqual(),
},
{
name: "unauthorized - github.com",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("github.com/foo/bar"),
},
src: repos.NewFakeSource(&githubSource, &github.APIError{Code: http.StatusUnauthorized}),
result: &protocol.RepoLookupResult{ErrorUnauthorized: true},
err: fmt.Sprintf("not authorized (name=%s noauthz=%v)", api.RepoName("github.com/foo/bar"), true),
assert: typestest.AssertReposEqual(),
},
{
name: "temporarily unavailable - github.com",
args: protocol.RepoLookupArgs{
Repo: api.RepoName("github.com/foo/bar"),
},
src: repos.NewFakeSource(&githubSource, &github.APIError{Message: "API rate limit exceeded"}),
result: &protocol.RepoLookupResult{ErrorTemporarilyUnavailable: true},
err: fmt.Sprintf(
"repository temporarily unavailable (name=%s istemporary=%v)",
api.RepoName("github.com/foo/bar"),
true,
),
assert: typestest.AssertReposEqual(),
},
{
name: "synced - gitlab.com",
args: protocol.RepoLookupArgs{Repo: gitlabRepository.Name},
stored: []*types.Repo{},
src: repos.NewFakeSource(&gitlabSource, nil, gitlabRepository),
result: &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{
Name: "gitlab.com/gitlab-org/gitaly",
Description: "Gitaly is a Git RPC service for handling all the git calls made by GitLab",
Fork: false,
Archived: false,
VCS: protocol.VCSInfo{
URL: "https://gitlab.com/gitlab-org/gitaly.git",
},
Links: &protocol.RepoLinks{
Root: "https://gitlab.com/gitlab-org/gitaly",
Tree: "https://gitlab.com/gitlab-org/gitaly/tree/{rev}/{path}",
Blob: "https://gitlab.com/gitlab-org/gitaly/blob/{rev}/{path}",
Commit: "https://gitlab.com/gitlab-org/gitaly/commit/{commit}",
},
ExternalRepo: gitlabRepository.ExternalRepo,
}},
assert: typestest.AssertReposEqual(gitlabRepository),
},
{
name: "found - gitlab.com",
args: protocol.RepoLookupArgs{Repo: gitlabRepository.Name},
stored: []*types.Repo{gitlabRepository},
src: repos.NewFakeSource(&gitlabSource, nil, gitlabRepository),
result: &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{
Name: "gitlab.com/gitlab-org/gitaly",
Description: "Gitaly is a Git RPC service for handling all the git calls made by GitLab",
Fork: false,
Archived: false,
VCS: protocol.VCSInfo{
URL: "https://gitlab.com/gitlab-org/gitaly.git",
},
Links: &protocol.RepoLinks{
Root: "https://gitlab.com/gitlab-org/gitaly",
Tree: "https://gitlab.com/gitlab-org/gitaly/tree/{rev}/{path}",
Blob: "https://gitlab.com/gitlab-org/gitaly/blob/{rev}/{path}",
Commit: "https://gitlab.com/gitlab-org/gitaly/commit/{commit}",
},
ExternalRepo: gitlabRepository.ExternalRepo,
}},
},
{
name: "Private repos are not supported on sourcegraph.com",
args: protocol.RepoLookupArgs{
Repo: githubRepository.Name,
},
src: repos.NewFakeSource(&githubSource, nil, githubRepository.With(func(r *types.Repo) {
r.Private = true
})),
result: &protocol.RepoLookupResult{ErrorNotFound: true},
err: fmt.Sprintf("repository not found (name=%s notfound=%v)", githubRepository.Name, true),
},
{
name: "Private repos that used to be public should be removed asynchronously",
args: protocol.RepoLookupArgs{
Repo: githubRepository.Name,
},
src: repos.NewFakeSource(&githubSource, github.ErrRepoNotFound),
stored: []*types.Repo{githubRepository.With(func(r *types.Repo) {
r.UpdatedAt = r.UpdatedAt.Add(-time.Hour)
})},
result: &protocol.RepoLookupResult{Repo: &protocol.RepoInfo{
ExternalRepo: api.ExternalRepoSpec{
ID: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
ServiceType: extsvc.TypeGitHub,
ServiceID: "https://github.com/",
},
Name: "github.com/foo/bar",
Description: "The description",
VCS: protocol.VCSInfo{URL: "git@github.com:foo/bar.git"},
Links: &protocol.RepoLinks{
Root: "github.com/foo/bar",
Tree: "github.com/foo/bar/tree/{rev}/{path}",
Blob: "github.com/foo/bar/blob/{rev}/{path}",
Commit: "github.com/foo/bar/commit/{commit}",
},
}},
assertDelay: time.Second,
assert: typestest.AssertReposEqual(),
},
}
for _, tc := range testCases {
tc := tc
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
_, err := db.ExecContext(ctx, "DELETE FROM repo")
if err != nil {
t.Fatal(err)
}
rs := tc.stored.Clone()
err = store.RepoStore().Create(ctx, rs...)
if err != nil {
t.Fatal(err)
}
clock := clock
logger := logtest.Scoped(t)
syncer := &repos.Syncer{
Now: clock.Now,
Store: store,
Sourcer: repos.NewFakeSourcer(nil, tc.src),
ObsvCtx: observation.TestContextTB(t),
}
scheduler := scheduler.NewUpdateScheduler(logtest.Scoped(t), dbmocks.NewMockDB(), gitserver.NewMockRepositoryServiceClient())
s := &Server{
Logger: logger,
Syncer: syncer,
Store: store,
Scheduler: scheduler,
}
gs := grpc.NewServer(defaults.ServerOptions(logger)...)
proto.RegisterRepoUpdaterServiceServer(gs, s)
srv := httptest.NewServer(internalgrpc.MultiplexHandlers(gs, http.NotFoundHandler()))
defer srv.Close()
cli := repoupdater.NewClient(srv.URL)
if tc.err == "" {
tc.err = "<nil>"
}
res, err := cli.RepoLookup(ctx, tc.args)
if have, want := fmt.Sprint(err), tc.err; have != want {
t.Fatalf("have err: %q, want: %q", have, want)
}
if diff := cmp.Diff(res, tc.result, cmpopts.IgnoreFields(protocol.RepoInfo{}, "ID")); diff != "" {
t.Fatalf("response mismatch(-have, +want): %s", diff)
}
if tc.assert != nil {
if tc.assertDelay != 0 {
time.Sleep(tc.assertDelay)
}
rs, err := store.RepoStore().List(ctx, database.ReposListOptions{})
if err != nil {
t.Fatal(err)
}
tc.assert(t, rs)
}
})
}
}
type fakeScheduler struct{}
func (s *fakeScheduler) UpdateOnce(_ api.RepoID, _ api.RepoName) {}

View File

@ -18,7 +18,6 @@ go_library(
"//internal/api",
"//internal/conf",
"//internal/database",
"//internal/dotcom",
"//internal/limiter",
"//internal/ratelimit",
"//internal/repoupdater/protocol",

View File

@ -18,7 +18,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/limiter"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
@ -104,48 +103,46 @@ func (s *UpdateScheduler) Start() {
ctx, cancel := context.WithCancel(actor.WithInternalActor(context.Background()))
s.cancelCtx = cancel
if !dotcom.SourcegraphDotComMode() {
s.logger.Info("hydrating update scheduler")
s.logger.Info("hydrating update scheduler")
// Hydrate the scheduler with the initial set of repos.
// This is done to preset the intervals from the database state, so that
// repos that haven't changed in a while don't need to be refetched once
// after a restart until we restore the previous schedule.
var nextCursor int
errors := 0
for {
var (
rs []types.RepoGitserverStatus
err error
)
rs, nextCursor, err = s.db.GitserverRepos().IterateRepoGitserverStatus(ctx, database.IterateRepoGitserverStatusOptions{
NextCursor: nextCursor,
BatchSize: 1000,
})
if err != nil {
errors++
s.logger.Error("failed to iterate gitserver repos", log.Error(err), log.Int("errors", errors))
if errors > 5 {
s.logger.Error("too many errors, stopping initial hydration of update queue, the queue will build up lazily")
return
}
time.Sleep(time.Second)
continue
// Hydrate the scheduler with the initial set of repos.
// This is done to preset the intervals from the database state, so that
// repos that haven't changed in a while don't need to be refetched once
// after a restart until we restore the previous schedule.
var nextCursor int
errors := 0
for {
var (
rs []types.RepoGitserverStatus
err error
)
rs, nextCursor, err = s.db.GitserverRepos().IterateRepoGitserverStatus(ctx, database.IterateRepoGitserverStatusOptions{
NextCursor: nextCursor,
BatchSize: 1000,
})
if err != nil {
errors++
s.logger.Error("failed to iterate gitserver repos", log.Error(err), log.Int("errors", errors))
if errors > 5 {
s.logger.Error("too many errors, stopping initial hydration of update queue, the queue will build up lazily")
return
}
for _, r := range rs {
cr := configuredRepo{
ID: r.ID,
Name: r.Name,
}
if !s.schedule.upsert(cr) {
interval := initialInterval(r)
s.schedule.updateInterval(cr, interval)
}
time.Sleep(time.Second)
continue
}
for _, r := range rs {
cr := configuredRepo{
ID: r.ID,
Name: r.Name,
}
if nextCursor == 0 {
break
if !s.schedule.upsert(cr) {
interval := initialInterval(r)
s.schedule.updateInterval(cr, interval)
}
}
if nextCursor == 0 {
break
}
s.logger.Info("hydrated update scheduler")
}

View File

@ -84,7 +84,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
// bit more to do in this method, though, and the process will be marked ready
// further down this function.
mustRegisterMetrics(log.Scoped("MustRegisterMetrics"), db, dotcom.SourcegraphDotComMode())
mustRegisterMetrics(log.Scoped("MustRegisterMetrics"), db)
store := repos.NewStore(logger.Scoped("store"), db)
{
@ -130,7 +130,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
routines := []goroutine.BackgroundRoutine{
makeGRPCServer(logger, server),
newUnclonedReposManager(ctx, logger, dotcom.SourcegraphDotComMode(), updateScheduler, store),
newUnclonedReposManager(ctx, logger, updateScheduler, store),
phabricator.NewRepositorySyncWorker(ctx, db, log.Scoped("PhabricatorRepositorySyncWorker"), store),
// Run git fetches scheduler
updateScheduler,
@ -139,7 +139,6 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
routines = append(routines,
syncer.Routines(ctx, store, repos.RunOptions{
EnqueueInterval: conf.RepoListUpdateInterval,
IsDotCom: dotcom.SourcegraphDotComMode(),
MinSyncInterval: conf.RepoListUpdateInterval,
})...,
)
@ -331,7 +330,7 @@ func watchSyncer(
// the uncloned repositories on gitserver and update the scheduler with the list.
// It also ensures that if any of our indexable repos are missing from the cloned
// list they will be added for cloning ASAP.
func newUnclonedReposManager(ctx context.Context, logger log.Logger, isSourcegraphDotCom bool, sched *scheduler.UpdateScheduler, store repos.Store) goroutine.BackgroundRoutine {
func newUnclonedReposManager(ctx context.Context, logger log.Logger, sched *scheduler.UpdateScheduler, store repos.Store) goroutine.BackgroundRoutine {
return goroutine.NewPeriodicGoroutine(
actor.WithInternalActor(ctx),
goroutine.HandlerFunc(func(ctx context.Context) error {
@ -342,21 +341,7 @@ func newUnclonedReposManager(ctx context.Context, logger log.Logger, isSourcegra
baseRepoStore := database.ReposWith(logger, store)
if isSourcegraphDotCom {
// Fetch ALL indexable repos that are NOT cloned so that we can add them to the
// scheduler.
opts := database.ListSourcegraphDotComIndexableReposOptions{
CloneStatus: types.CloneStatusNotCloned,
}
indexable, err := baseRepoStore.ListSourcegraphDotComIndexableRepos(ctx, opts)
if err != nil {
return errors.Wrap(err, "listing indexable repos")
}
// Ensure that uncloned indexable repos are known to the scheduler
sched.EnsureScheduled(indexable)
}
// Next, move any repos managed by the scheduler that are uncloned to the front
// Move any repos managed by the scheduler that are uncloned to the front
// of the queue.
managed := sched.ListRepoIDs()
@ -395,7 +380,7 @@ func watchAuthzProviders(ctx context.Context, db database.DB) {
}()
}
func mustRegisterMetrics(logger log.Logger, db dbutil.DB, sourcegraphDotCom bool) {
func mustRegisterMetrics(logger log.Logger, db dbutil.DB) {
scanCount := func(sql string) (float64, error) {
row := db.QueryRowContext(context.Background(), sql)
var count int64
@ -484,7 +469,6 @@ select round((select cast(count(*) as float) from latest_state where state = 'er
SELECT extract(epoch from max(now() - last_sync_at))
FROM external_services AS es
WHERE deleted_at IS NULL
AND NOT cloud_default
AND last_sync_at IS NOT NULL
-- Exclude any external services that are currently syncing since it's possible they may sync for more
-- than our max backoff time.
@ -510,18 +494,10 @@ AND NOT EXISTS(SELECT FROM external_service_sync_jobs WHERE external_service_id
// Count the number of repos owned by site level external services that haven't
// been fetched in 8 hours.
//
// We always return zero for Sourcegraph.com because we currently have a lot of
// repos owned by the Starburst service in this state and until that's resolved
// it would just be noise.
promauto.NewGaugeFunc(prometheus.GaugeOpts{
Name: "src_repoupdater_stale_repos",
Help: "The number of repos that haven't been fetched in at least 8 hours",
}, func() float64 {
if sourcegraphDotCom {
return 0
}
count, err := scanCount(`
select count(*)
from gitserver_repos
@ -531,8 +507,7 @@ where last_fetched < now() - interval '8 hours'
from external_service_repos
join external_services es on external_service_repos.external_service_id = es.id
join repo r on external_service_repos.repo_id = r.id
where not es.cloud_default
and gitserver_repos.repo_id = repo_id
where gitserver_repos.repo_id = repo_id
and es.deleted_at is null
and r.deleted_at is null
)

View File

@ -33,7 +33,6 @@ go_library(
"//internal/env",
"//internal/goroutine",
"//internal/observation",
"//internal/repoupdater",
"//internal/uploadstore",
"//internal/workerutil/dbworker",
"//internal/workerutil/dbworker/store",

View File

@ -10,7 +10,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
)
type autoindexingDependencyScheduler struct{}
@ -46,6 +45,5 @@ func (j *autoindexingDependencyScheduler) Routines(_ context.Context, observatio
services.UploadsService,
services.DependenciesService,
services.AutoIndexingService,
repoupdater.DefaultClient,
), nil
}

View File

@ -6330,7 +6330,7 @@ sum by (container_label_io_kubernetes_pod_name) (rate(container_cpu_usage_second
<p class="subtitle">Disk space remaining</p>
Indicates disk space remaining for each gitserver instance, which is used to determine when to start evicting least-used repository clones from disk (default 10%, configured by `SRC_REPOS_DESIRED_PERCENT_FREE`).
Indicates disk space remaining for each gitserver instance. When disk space is low, gitserver may experience slowdowns or fails to fetch repositories.
Refer to the [alerts reference](./alerts.md#gitserver-disk-space-remaining) for 2 alerts related to this panel.
@ -6869,30 +6869,6 @@ sum by (job_name) (rate(src_gitserver_janitor_job_duration_seconds_count{instanc
<br />
#### gitserver: repos_removed
<p class="subtitle">Repositories removed due to disk pressure</p>
Repositories removed due to disk pressure
This panel has no related alerts.
To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=100230` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Source team](https://handbook.sourcegraph.com/departments/engineering/teams/source).*</sub>
<details>
<summary>Technical details</summary>
Query:
```
sum by (instance) (rate(src_gitserver_repos_removed_disk_pressure{instance=~`${shard:regex}`}[5m]))
```
</details>
<br />
#### gitserver: non_existent_repos_removed
<p class="subtitle">Repositories removed because they are not defined in the DB</p>
@ -6901,7 +6877,7 @@ Repositoriess removed because they are not defined in the DB
This panel has no related alerts.
To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=100240` on your Sourcegraph instance.
To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=100230` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Source team](https://handbook.sourcegraph.com/departments/engineering/teams/source).*</sub>
@ -6925,7 +6901,7 @@ the rate of successful sg maintenance jobs and the reason why they were triggere
This panel has no related alerts.
To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=100250` on your Sourcegraph instance.
To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=100240` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Source team](https://handbook.sourcegraph.com/departments/engineering/teams/source).*</sub>
@ -6949,7 +6925,7 @@ the rate of successful git prune jobs over 1h and whether they were skipped
This panel has no related alerts.
To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=100260` on your Sourcegraph instance.
To see this panel, visit `/-/debug/grafana/d/gitserver/gitserver?viewPanel=100250` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Source team](https://handbook.sourcegraph.com/departments/engineering/teams/source).*</sub>

View File

@ -88,22 +88,6 @@ If you see a certificate expiry warning you may need to delete your certificate
On macOS, the certificate can be removed from here: `~/Library/Application\ Support/Caddy/certificates/local/sourcegraph.test`
## Running out of disk space
If you see errors similar to this:
```
gitserver | ERROR cleanup: error freeing up space, error: only freed 1124101958 bytes, wanted to free 29905298227
```
You are probably low on disk space. By default it tries to cleanup when there is less than 10% of available disk space.
You can override that by setting this env variable:
```bash
# means 5%. You may want to put that into .bashrc for convinience
SRC_REPOS_DESIRED_PERCENT_FREE=5
```
## CPU/RAM/bandwidth/battery usage
On first install, the program will use quite a bit of bandwidth to concurrently download all the Go and Node packages. After packages have been installed, the Javascript assets will be compiled into a single Javascript file, which can take up to 5 minutes, and can be heavy on the CPU at times.
@ -154,4 +138,3 @@ If files do not normally have group permissions in your environment (e.g. if you
When trying to install `sg` with the pre-built binaries on WSL2 you may run into this error message: `failed to set max open files: invalid argument`. The default configuration of WSL2 does not allow the user to modify the number of open files by default [which `sg` requires](https://github.com/sourcegraph/sourcegraph/blob/379369e3d92c9b28d5891d3251922c7737ed810b/dev/sg/main.go#L75:L90) to start. To work around this you can modify the file limits for your given session with `sudo prlimit --nofile=20000 --pid $$; ulimit -n 20000` then re-run the installation script.
Note: this change will be reverted when your session ends. You will need to reset these limits every time you open a new session and want to use `sg`.

View File

@ -92,10 +92,6 @@ func ProvidersFromConfig(
opt.AfterID = svcs[len(svcs)-1].ID // Advance the cursor
for _, svc := range svcs {
if svc.CloudDefault { // Only public repos in CloudDefault services
continue
}
cfg, err := extsvc.ParseEncryptableConfig(ctx, svc.Kind, svc.Config)
if err != nil {
seriousProblems = append(seriousProblems, fmt.Sprintf("Could not parse config of external service %d: %v", svc.ID, err))

View File

@ -94,7 +94,6 @@ func TestExecutor_ExecutePlan(t *testing.T) {
Name: repo.Name,
VCS: protocol.VCSInfo{URL: repo.URI},
})
defer state.Unmock()
mockExternalURL(t, "https://sourcegraph.test")

View File

@ -42,7 +42,6 @@ func TestReconcilerProcess_IntegrationTest(t *testing.T) {
Name: repo.Name,
VCS: protocol.VCSInfo{URL: repo.URI},
})
defer state.Unmock()
mockExternalURL(t, "https://sourcegraph.test")

View File

@ -44,7 +44,6 @@ go_library(
"//internal/gitserver",
"//internal/gitserver/gitdomain",
"//internal/rbac",
"//internal/repoupdater",
"//internal/repoupdater/protocol",
"//internal/timeutil",
"//internal/types",

View File

@ -11,7 +11,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
)
@ -20,8 +19,6 @@ type MockedChangesetSyncState struct {
DiffStat *diff.Stat
MockClient *gitserver.MockClient
mockRepoLookup func(protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)
}
// MockChangesetSyncState sets up mocks such that invoking SetDerivedState() with
@ -33,14 +30,6 @@ func MockChangesetSyncState(repo *protocol.RepoInfo) *MockedChangesetSyncState {
state := &MockedChangesetSyncState{
// This diff.Stat matches the testGitHubDiff below
DiffStat: &diff.Stat{Added: 2, Deleted: 4},
mockRepoLookup: repoupdater.MockRepoLookup,
}
repoupdater.MockRepoLookup = func(args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
return &protocol.RepoLookupResult{
Repo: repo,
}, nil
}
gitserverClient := gitserver.NewMockClient()
@ -86,8 +75,3 @@ func generateFakeCommitID() string {
return commitID
}
// Unmock resets the mocks set up by MockGitHubChangesetSync.
func (state *MockedChangesetSyncState) Unmock() {
repoupdater.MockRepoLookup = state.mockRepoLookup
}

View File

@ -62,7 +62,6 @@ go_test(
"//internal/database/dbmocks",
"//internal/gitserver",
"//internal/observation",
"//internal/repoupdater/protocol",
"//internal/types",
"//lib/codeintel/autoindex/config",
"@com_github_google_go_cmp//cmp",

View File

@ -17,10 +17,6 @@ type (
InferenceService = jobselector.InferenceService
)
type RepoUpdaterClient interface {
dependencies.RepoUpdaterClient
}
type UploadService interface {
dependencies.UploadService
summary.UploadService

View File

@ -72,7 +72,6 @@ func NewDependencyIndexSchedulers(
uploadSvc UploadService,
depsSvc DependenciesService,
autoindexingSvc *Service,
repoUpdater RepoUpdaterClient,
) []goroutine.BackgroundRoutine {
return background.NewDependencyIndexSchedulers(
scopedContext("dependencies", observationCtx),
@ -81,7 +80,6 @@ func NewDependencyIndexSchedulers(
depsSvc,
autoindexingSvc.store,
autoindexingSvc.indexEnqueuer,
repoUpdater,
DependenciesConfigInst,
)
}

View File

@ -28,14 +28,11 @@ go_library(
"//internal/conf/reposource",
"//internal/database",
"//internal/database/dbutil",
"//internal/dotcom",
"//internal/env",
"//internal/errcode",
"//internal/executor",
"//internal/extsvc",
"//internal/observation",
"//internal/packagefilters",
"//internal/repoupdater/protocol",
"//internal/types",
"//internal/workerutil",
"//internal/workerutil/dbworker",
@ -77,7 +74,6 @@ go_test(
"//internal/executor",
"//internal/extsvc",
"//internal/observation",
"//internal/repoupdater/protocol",
"//internal/types",
"//internal/workerutil",
"//internal/workerutil/dbworker/store",

View File

@ -8,7 +8,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/codeintel/uploads/shared"
uploadsshared "github.com/sourcegraph/sourcegraph/internal/codeintel/uploads/shared"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/internal/types"
)
@ -35,10 +34,6 @@ type IndexEnqueuer interface {
QueueIndexesForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) (err error)
}
type RepoUpdaterClient interface {
RepoLookup(ctx context.Context, args protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)
}
type UploadService interface {
GetUploadByID(ctx context.Context, id int) (shared.Upload, bool, error)
ReferencesForUpload(ctx context.Context, uploadID int) (shared.PackageReferenceScanner, error)

View File

@ -17,9 +17,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
"github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker"
@ -36,7 +33,6 @@ func NewDependencyIndexingScheduler(
externalServiceStore ExternalServiceStore,
gitserverRepoStore GitserverRepoStore,
indexEnqueuer IndexEnqueuer,
repoUpdater RepoUpdaterClient,
metrics workerutil.WorkerObservability,
config *Config,
) *workerutil.Worker[dependencyIndexingJob] {
@ -49,7 +45,6 @@ func NewDependencyIndexingScheduler(
gitserverRepoStore: gitserverRepoStore,
indexEnqueuer: indexEnqueuer,
workerStore: dependencyIndexingStore,
repoUpdater: repoUpdater,
}
return dbworker.NewWorker[dependencyIndexingJob](rootContext, dependencyIndexingStore, handler, workerutil.WorkerOptions{
@ -69,7 +64,6 @@ type dependencyIndexingSchedulerHandler struct {
extsvcStore ExternalServiceStore
gitserverRepoStore GitserverRepoStore
workerStore dbworkerstore.Store[dependencyIndexingJob]
repoUpdater RepoUpdaterClient
}
const requeueBackoff = time.Second * 30
@ -188,18 +182,8 @@ func (h *dependencyIndexingSchedulerHandler) Handle(ctx context.Context, logger
// otherwise skip them.
difference := setDifference(repoNames, listedRepoNames)
if dotcom.SourcegraphDotComMode() {
for _, repo := range difference {
if _, err := h.repoUpdater.RepoLookup(ctx, protocol.RepoLookupArgs{Repo: repo}); errcode.IsNotFound(err) {
delete(repoToPackages, repo)
} else if err != nil {
return errors.Wrapf(err, "repoUpdater.RepoLookup", "repo", repo)
}
}
} else {
for _, repo := range difference {
delete(repoToPackages, repo)
}
for _, repo := range difference {
delete(repoToPackages, repo)
}
}

View File

@ -21,115 +21,6 @@ func TestDependencyIndexingSchedulerHandler(t *testing.T) {
mockUploadsSvc := NewMockUploadService()
mockRepoStore := NewMockReposStore()
mockExtSvcStore := NewMockExternalServiceStore()
mockRepoUpdater := NewMockRepoUpdaterClient()
mockScanner := NewMockPackageReferenceScanner()
mockWorkerStore := NewMockWorkerStore[dependencyIndexingJob]()
mockRepoStore.ListMinimalReposFunc.PushReturn([]types.MinimalRepo{
{
ID: 0,
Name: "",
Stars: 0,
},
}, nil)
mockUploadsSvc.GetUploadByIDFunc.SetDefaultReturn(shared.Upload{ID: 42, RepositoryID: 50, Indexer: "lsif-go"}, true, nil)
mockUploadsSvc.ReferencesForUploadFunc.SetDefaultReturn(mockScanner, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/sample/text", Version: "v2.2.0"}}, true, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/sample/text", Version: "v3.2.0"}}, true, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/cheese/burger", Version: "v3.2.2"}}, true, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/cheese/burger", Version: "v2.2.1"}}, true, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/cheese/burger", Version: "v4.2.3"}}, true, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/sample/text", Version: "v1.2.0"}}, true, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/banana/world", Version: "v0.0.1"}}, true, nil)
mockScanner.NextFunc.SetDefaultReturn(shared.PackageReference{}, false, nil)
mockGitserverReposStore := NewMockGitserverRepoStore()
mockGitserverReposStore.GetByNamesFunc.PushReturn(map[api.RepoName]*types.GitserverRepo{
"github.com/sample/text": {
CloneStatus: types.CloneStatusCloned,
},
"github.com/cheese/burger": {
CloneStatus: types.CloneStatusCloned,
},
"github.com/banana/world": {
CloneStatus: types.CloneStatusCloned,
},
}, nil)
indexEnqueuer := NewMockIndexEnqueuer()
dotcom.MockSourcegraphDotComMode(t, true)
handler := &dependencyIndexingSchedulerHandler{
uploadsSvc: mockUploadsSvc,
repoStore: mockRepoStore,
indexEnqueuer: indexEnqueuer,
extsvcStore: mockExtSvcStore,
workerStore: mockWorkerStore,
gitserverRepoStore: mockGitserverReposStore,
repoUpdater: mockRepoUpdater,
}
logger := logtest.Scoped(t)
job := dependencyIndexingJob{
UploadID: 42,
ExternalServiceKind: "",
ExternalServiceSync: time.Time{},
}
if err := handler.Handle(context.Background(), logger, job); err != nil {
t.Fatalf("unexpected error performing update: %s", err)
}
if len(mockExtSvcStore.ListFunc.History()) != 0 {
t.Errorf("unexpected number of calls to extsvcStore.List. want=%d have=%d", 0, len(mockExtSvcStore.ListFunc.History()))
}
if len(indexEnqueuer.QueueIndexesForPackageFunc.History()) != 7 {
t.Errorf("unexpected number of calls to QueueIndexesForPackage. want=%d have=%d", 6, len(indexEnqueuer.QueueIndexesForPackageFunc.History()))
} else {
var packages []dependencies.MinimialVersionedPackageRepo
for _, call := range indexEnqueuer.QueueIndexesForPackageFunc.History() {
packages = append(packages, call.Arg1)
}
sort.Slice(packages, func(i, j int) bool {
for _, pair := range [][2]string{
{packages[i].Scheme, packages[j].Scheme},
{string(packages[i].Name), string(packages[j].Name)},
{packages[i].Version, packages[j].Version},
} {
if pair[0] < pair[1] {
return true
}
if pair[1] < pair[0] {
break
}
}
return false
})
expectedPackages := []dependencies.MinimialVersionedPackageRepo{
{Scheme: "gomod", Name: "https://github.com/banana/world", Version: "v0.0.1"},
{Scheme: "gomod", Name: "https://github.com/cheese/burger", Version: "v2.2.1"},
{Scheme: "gomod", Name: "https://github.com/cheese/burger", Version: "v3.2.2"},
{Scheme: "gomod", Name: "https://github.com/cheese/burger", Version: "v4.2.3"},
{Scheme: "gomod", Name: "https://github.com/sample/text", Version: "v1.2.0"},
{Scheme: "gomod", Name: "https://github.com/sample/text", Version: "v2.2.0"},
{Scheme: "gomod", Name: "https://github.com/sample/text", Version: "v3.2.0"},
}
if diff := cmp.Diff(expectedPackages, packages); diff != "" {
t.Errorf("unexpected packages (-want +got):\n%s", diff)
}
}
}
func TestDependencyIndexingSchedulerHandlerCustomer(t *testing.T) {
mockUploadsSvc := NewMockUploadService()
mockRepoStore := NewMockReposStore()
mockExtSvcStore := NewMockExternalServiceStore()
mockRepoUpdater := NewMockRepoUpdaterClient()
mockScanner := NewMockPackageReferenceScanner()
mockWorkerStore := NewMockWorkerStore[dependencyIndexingJob]()
mockUploadsSvc.GetUploadByIDFunc.SetDefaultReturn(shared.Upload{ID: 42, RepositoryID: 50, Indexer: "lsif-go"}, true, nil)
@ -170,7 +61,6 @@ func TestDependencyIndexingSchedulerHandlerCustomer(t *testing.T) {
extsvcStore: mockExtSvcStore,
workerStore: mockWorkerStore,
gitserverRepoStore: mockGitserverReposStore,
repoUpdater: mockRepoUpdater,
}
logger := logtest.Scoped(t)
@ -183,10 +73,6 @@ func TestDependencyIndexingSchedulerHandlerCustomer(t *testing.T) {
t.Fatalf("unexpected error performing update: %s", err)
}
if len(mockRepoUpdater.RepoLookupFunc.History()) != 0 {
t.Errorf("unexpected number of calls to repoUpdater.RepoLookup. want=%d have=%d", 0, len(mockRepoUpdater.RepoLookupFunc.History()))
}
if len(mockExtSvcStore.ListFunc.History()) != 0 {
t.Errorf("unexpected number of calls to extsvcStore.List. want=%d have=%d", 0, len(mockExtSvcStore.ListFunc.History()))
}
@ -233,7 +119,6 @@ func TestDependencyIndexingSchedulerHandlerRequeueNotCloned(t *testing.T) {
mockUploadsSvc := NewMockUploadService()
mockRepoStore := NewMockReposStore()
mockExtSvcStore := NewMockExternalServiceStore()
mockRepoUpdater := NewMockRepoUpdaterClient()
mockScanner := NewMockPackageReferenceScanner()
mockWorkerStore := NewMockWorkerStore[dependencyIndexingJob]()
mockUploadsSvc.GetUploadByIDFunc.SetDefaultReturn(shared.Upload{ID: 42, RepositoryID: 50, Indexer: "lsif-go"}, true, nil)
@ -264,7 +149,6 @@ func TestDependencyIndexingSchedulerHandlerRequeueNotCloned(t *testing.T) {
extsvcStore: mockExtSvcStore,
gitserverRepoStore: mockGitserverReposStore,
workerStore: mockWorkerStore,
repoUpdater: mockRepoUpdater,
}
job := dependencyIndexingJob{
@ -290,68 +174,6 @@ func TestDependencyIndexingSchedulerHandlerRequeueNotCloned(t *testing.T) {
}
}
func TestDependencyIndexingSchedulerHandlerSkipNonExistant(t *testing.T) {
mockUploadsSvc := NewMockUploadService()
mockExtSvcStore := NewMockExternalServiceStore()
mockRepoUpdater := NewMockRepoUpdaterClient()
mockScanner := NewMockPackageReferenceScanner()
mockWorkerStore := NewMockWorkerStore[dependencyIndexingJob]()
mockRepoStore := NewMockReposStore()
mockUploadsSvc.GetUploadByIDFunc.SetDefaultReturn(shared.Upload{ID: 42, RepositoryID: 50, Indexer: "lsif-go"}, true, nil)
mockUploadsSvc.ReferencesForUploadFunc.SetDefaultReturn(mockScanner, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/sample/text", Version: "v3.2.0"}}, true, nil)
mockScanner.NextFunc.PushReturn(shared.PackageReference{Package: shared.Package{UploadID: 42, Scheme: "gomod", Name: "https://github.com/cheese/burger", Version: "v4.2.3"}}, true, nil)
mockScanner.NextFunc.SetDefaultReturn(shared.PackageReference{}, false, nil)
mockGitserverReposStore := NewMockGitserverRepoStore()
mockGitserverReposStore.GetByNamesFunc.PushReturn(map[api.RepoName]*types.GitserverRepo{
"github.com/sample/text": {
CloneStatus: types.CloneStatusCloned,
},
"github.com/cheese/burger": {
CloneStatus: types.CloneStatusNotCloned,
},
}, nil)
indexEnqueuer := NewMockIndexEnqueuer()
dotcom.MockSourcegraphDotComMode(t, true)
handler := &dependencyIndexingSchedulerHandler{
uploadsSvc: mockUploadsSvc,
indexEnqueuer: indexEnqueuer,
extsvcStore: mockExtSvcStore,
gitserverRepoStore: mockGitserverReposStore,
workerStore: mockWorkerStore,
repoUpdater: mockRepoUpdater,
repoStore: mockRepoStore,
}
job := dependencyIndexingJob{
UploadID: 42,
ExternalServiceKind: "",
ExternalServiceSync: time.Time{},
}
logger := logtest.Scoped(t)
if err := handler.Handle(context.Background(), logger, job); err != nil {
t.Fatalf("unexpected error performing update: %s", err)
}
if len(mockWorkerStore.RequeueFunc.History()) != 0 {
t.Errorf("unexpected number of calls to Requeue. want=%d have=%d", 0, len(mockWorkerStore.RequeueFunc.History()))
}
if len(mockExtSvcStore.ListFunc.History()) != 0 {
t.Errorf("unexpected number of calls to extsvcStore.List. want=%d have=%d", 0, len(mockExtSvcStore.ListFunc.History()))
}
if len(indexEnqueuer.QueueIndexesForPackageFunc.History()) != 1 {
t.Errorf("unexpected number of calls to QueueIndexesForPackage. want=%d have=%d", 1, len(indexEnqueuer.QueueIndexesForPackageFunc.History()))
}
}
func TestDependencyIndexingSchedulerHandlerShouldSkipRepository(t *testing.T) {
mockUploadsSvc := NewMockUploadService()
mockExtSvcStore := NewMockExternalServiceStore()

View File

@ -21,7 +21,6 @@ import (
database "github.com/sourcegraph/sourcegraph/internal/database"
basestore "github.com/sourcegraph/sourcegraph/internal/database/basestore"
executor "github.com/sourcegraph/sourcegraph/internal/executor"
protocol "github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
types "github.com/sourcegraph/sourcegraph/internal/types"
workerutil "github.com/sourcegraph/sourcegraph/internal/workerutil"
store1 "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
@ -1055,162 +1054,6 @@ func (c IndexEnqueuerQueueIndexesForPackageFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// MockRepoUpdaterClient is a mock implementation of the RepoUpdaterClient
// interface (from the package
// github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing/internal/background/dependencies)
// used for unit testing.
type MockRepoUpdaterClient struct {
// RepoLookupFunc is an instance of a mock function object controlling
// the behavior of the method RepoLookup.
RepoLookupFunc *RepoUpdaterClientRepoLookupFunc
}
// NewMockRepoUpdaterClient creates a new mock of the RepoUpdaterClient
// interface. All methods return zero values for all results, unless
// overwritten.
func NewMockRepoUpdaterClient() *MockRepoUpdaterClient {
return &MockRepoUpdaterClient{
RepoLookupFunc: &RepoUpdaterClientRepoLookupFunc{
defaultHook: func(context.Context, protocol.RepoLookupArgs) (r0 *protocol.RepoLookupResult, r1 error) {
return
},
},
}
}
// NewStrictMockRepoUpdaterClient creates a new mock of the
// RepoUpdaterClient interface. All methods panic on invocation, unless
// overwritten.
func NewStrictMockRepoUpdaterClient() *MockRepoUpdaterClient {
return &MockRepoUpdaterClient{
RepoLookupFunc: &RepoUpdaterClientRepoLookupFunc{
defaultHook: func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
panic("unexpected invocation of MockRepoUpdaterClient.RepoLookup")
},
},
}
}
// NewMockRepoUpdaterClientFrom creates a new mock of the
// MockRepoUpdaterClient interface. All methods delegate to the given
// implementation, unless overwritten.
func NewMockRepoUpdaterClientFrom(i RepoUpdaterClient) *MockRepoUpdaterClient {
return &MockRepoUpdaterClient{
RepoLookupFunc: &RepoUpdaterClientRepoLookupFunc{
defaultHook: i.RepoLookup,
},
}
}
// RepoUpdaterClientRepoLookupFunc describes the behavior when the
// RepoLookup method of the parent MockRepoUpdaterClient instance is
// invoked.
type RepoUpdaterClientRepoLookupFunc struct {
defaultHook func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)
hooks []func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)
history []RepoUpdaterClientRepoLookupFuncCall
mutex sync.Mutex
}
// RepoLookup delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockRepoUpdaterClient) RepoLookup(v0 context.Context, v1 protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
r0, r1 := m.RepoLookupFunc.nextHook()(v0, v1)
m.RepoLookupFunc.appendCall(RepoUpdaterClientRepoLookupFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the RepoLookup method of
// the parent MockRepoUpdaterClient instance is invoked and the hook queue
// is empty.
func (f *RepoUpdaterClientRepoLookupFunc) SetDefaultHook(hook func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// RepoLookup method of the parent MockRepoUpdaterClient instance invokes
// the hook at the front of the queue and discards it. After the queue is
// empty, the default hook function is invoked for any future action.
func (f *RepoUpdaterClientRepoLookupFunc) PushHook(hook func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *RepoUpdaterClientRepoLookupFunc) SetDefaultReturn(r0 *protocol.RepoLookupResult, r1 error) {
f.SetDefaultHook(func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *RepoUpdaterClientRepoLookupFunc) PushReturn(r0 *protocol.RepoLookupResult, r1 error) {
f.PushHook(func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
return r0, r1
})
}
func (f *RepoUpdaterClientRepoLookupFunc) nextHook() func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *RepoUpdaterClientRepoLookupFunc) appendCall(r0 RepoUpdaterClientRepoLookupFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of RepoUpdaterClientRepoLookupFuncCall objects
// describing the invocations of this function.
func (f *RepoUpdaterClientRepoLookupFunc) History() []RepoUpdaterClientRepoLookupFuncCall {
f.mutex.Lock()
history := make([]RepoUpdaterClientRepoLookupFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// RepoUpdaterClientRepoLookupFuncCall is an object that describes an
// invocation of method RepoLookup on an instance of MockRepoUpdaterClient.
type RepoUpdaterClientRepoLookupFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 protocol.RepoLookupArgs
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 *protocol.RepoLookupResult
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c RepoUpdaterClientRepoLookupFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c RepoUpdaterClientRepoLookupFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// MockReposStore is a mock implementation of the ReposStore interface (from
// the package
// github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing/internal/background/dependencies)

View File

@ -56,7 +56,6 @@ func NewDependencyIndexSchedulers(
depsSvc dependencies.DependenciesService,
store store.Store,
indexEnqueuer dependencies.IndexEnqueuer,
repoUpdater dependencies.RepoUpdaterClient,
config *dependencies.Config,
) []goroutine.BackgroundRoutine {
metrics := dependencies.NewResetterMetrics(observationCtx)
@ -85,7 +84,6 @@ func NewDependencyIndexSchedulers(
externalServiceStore,
gitserverRepoStore,
indexEnqueuer,
repoUpdater,
workerutil.NewMetrics(observationCtx, "codeintel_dependency_index_queueing"),
config,
),

View File

@ -15,7 +15,6 @@ import (
store "github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing/internal/store"
shared "github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing/shared"
shared1 "github.com/sourcegraph/sourcegraph/internal/codeintel/uploads/shared"
protocol "github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
)
// MockStore is a mock implementation of the Store interface (from the
@ -2617,162 +2616,6 @@ func (c InferenceServiceInferIndexJobsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// MockRepoUpdaterClient is a mock implementation of the RepoUpdaterClient
// interface (from the package
// github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing) used
// for unit testing.
type MockRepoUpdaterClient struct {
// RepoLookupFunc is an instance of a mock function object controlling
// the behavior of the method RepoLookup.
RepoLookupFunc *RepoUpdaterClientRepoLookupFunc
}
// NewMockRepoUpdaterClient creates a new mock of the RepoUpdaterClient
// interface. All methods return zero values for all results, unless
// overwritten.
func NewMockRepoUpdaterClient() *MockRepoUpdaterClient {
return &MockRepoUpdaterClient{
RepoLookupFunc: &RepoUpdaterClientRepoLookupFunc{
defaultHook: func(context.Context, protocol.RepoLookupArgs) (r0 *protocol.RepoLookupResult, r1 error) {
return
},
},
}
}
// NewStrictMockRepoUpdaterClient creates a new mock of the
// RepoUpdaterClient interface. All methods panic on invocation, unless
// overwritten.
func NewStrictMockRepoUpdaterClient() *MockRepoUpdaterClient {
return &MockRepoUpdaterClient{
RepoLookupFunc: &RepoUpdaterClientRepoLookupFunc{
defaultHook: func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
panic("unexpected invocation of MockRepoUpdaterClient.RepoLookup")
},
},
}
}
// NewMockRepoUpdaterClientFrom creates a new mock of the
// MockRepoUpdaterClient interface. All methods delegate to the given
// implementation, unless overwritten.
func NewMockRepoUpdaterClientFrom(i RepoUpdaterClient) *MockRepoUpdaterClient {
return &MockRepoUpdaterClient{
RepoLookupFunc: &RepoUpdaterClientRepoLookupFunc{
defaultHook: i.RepoLookup,
},
}
}
// RepoUpdaterClientRepoLookupFunc describes the behavior when the
// RepoLookup method of the parent MockRepoUpdaterClient instance is
// invoked.
type RepoUpdaterClientRepoLookupFunc struct {
defaultHook func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)
hooks []func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)
history []RepoUpdaterClientRepoLookupFuncCall
mutex sync.Mutex
}
// RepoLookup delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockRepoUpdaterClient) RepoLookup(v0 context.Context, v1 protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
r0, r1 := m.RepoLookupFunc.nextHook()(v0, v1)
m.RepoLookupFunc.appendCall(RepoUpdaterClientRepoLookupFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the RepoLookup method of
// the parent MockRepoUpdaterClient instance is invoked and the hook queue
// is empty.
func (f *RepoUpdaterClientRepoLookupFunc) SetDefaultHook(hook func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// RepoLookup method of the parent MockRepoUpdaterClient instance invokes
// the hook at the front of the queue and discards it. After the queue is
// empty, the default hook function is invoked for any future action.
func (f *RepoUpdaterClientRepoLookupFunc) PushHook(hook func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *RepoUpdaterClientRepoLookupFunc) SetDefaultReturn(r0 *protocol.RepoLookupResult, r1 error) {
f.SetDefaultHook(func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *RepoUpdaterClientRepoLookupFunc) PushReturn(r0 *protocol.RepoLookupResult, r1 error) {
f.PushHook(func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
return r0, r1
})
}
func (f *RepoUpdaterClientRepoLookupFunc) nextHook() func(context.Context, protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *RepoUpdaterClientRepoLookupFunc) appendCall(r0 RepoUpdaterClientRepoLookupFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of RepoUpdaterClientRepoLookupFuncCall objects
// describing the invocations of this function.
func (f *RepoUpdaterClientRepoLookupFunc) History() []RepoUpdaterClientRepoLookupFuncCall {
f.mutex.Lock()
history := make([]RepoUpdaterClientRepoLookupFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// RepoUpdaterClientRepoLookupFuncCall is an object that describes an
// invocation of method RepoLookup on an instance of MockRepoUpdaterClient.
type RepoUpdaterClientRepoLookupFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 protocol.RepoLookupArgs
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 *protocol.RepoLookupResult
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c RepoUpdaterClientRepoLookupFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c RepoUpdaterClientRepoLookupFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// MockUploadService is a mock implementation of the UploadService interface
// (from the package
// github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing) used

View File

@ -318,7 +318,6 @@ go_test(
"@com_github_sourcegraph_zoekt//:zoekt",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@com_github_tidwall_gjson//:gjson",
"@org_golang_google_protobuf//proto",
"@org_golang_google_protobuf//testing/protocmp",
"@org_golang_google_protobuf//types/known/structpb",

View File

@ -1,27 +0,0 @@
load("//dev:go_defs.bzl", "go_test")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "dbcache",
srcs = ["cached_indexable_repos.go"],
importpath = "github.com/sourcegraph/sourcegraph/internal/database/dbcache",
visibility = ["//:__subpackages__"],
deps = [
"//internal/database",
"//internal/types",
"//lib/errors",
"@com_github_sourcegraph_log//:log",
],
)
go_test(
name = "dbcache_test",
timeout = "short",
srcs = ["cached_indexable_repos_test.go"],
embed = [":dbcache"],
deps = [
"//internal/database",
"//internal/database/dbtest",
"@com_github_sourcegraph_log//logtest",
],
)

View File

@ -1,120 +0,0 @@
package dbcache
import (
"context"
"sync"
"sync/atomic"
"time"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// indexableReposMaxAge is how long we cache the list of indexable repos. The list
// changes very rarely, so we can cache for a while.
const indexableReposMaxAge = time.Minute
type cachedRepos struct {
minimalRepos []types.MinimalRepo
fetched time.Time
}
// repos returns the current cached repos and boolean value indicating
// whether an update is required
func (c *cachedRepos) repos() ([]types.MinimalRepo, bool) {
if c == nil {
return nil, true
}
if c.minimalRepos == nil {
return nil, true
}
return append([]types.MinimalRepo{}, c.minimalRepos...), time.Since(c.fetched) > indexableReposMaxAge
}
var globalReposCache = reposCache{}
func NewIndexableReposLister(logger log.Logger, store database.RepoStore) *IndexableReposLister {
return &IndexableReposLister{
logger: logger,
store: store,
reposCache: &globalReposCache,
}
}
type reposCache struct {
cacheAllRepos atomic.Value
mu sync.Mutex
}
// IndexableReposLister holds the list of indexable repos which are cached for
// indexableReposMaxAge.
type IndexableReposLister struct {
logger log.Logger
store database.RepoStore
*reposCache
}
// List lists ALL indexable repos. These include all repos with a minimum number of stars.
//
// The values are cached for up to indexableReposMaxAge. If the cache has expired, we return
// stale data and start a background refresh.
func (s *IndexableReposLister) List(ctx context.Context) (results []types.MinimalRepo, err error) {
cache := &(s.cacheAllRepos)
cached, _ := cache.Load().(*cachedRepos)
repos, needsUpdate := cached.repos()
if !needsUpdate {
return repos, nil
}
// We don't have any repos yet, fetch them
if len(repos) == 0 {
return s.refreshCache(ctx)
}
// We have existing repos, return the stale data and start background refresh
go func() {
newCtx, cancel := context.WithTimeout(context.Background(), 2*time.Minute)
defer cancel()
_, err := s.refreshCache(newCtx)
if err != nil {
s.logger.Error("Refreshing indexable repos cache", log.Error(err))
}
}()
return repos, nil
}
func (s *IndexableReposLister) refreshCache(ctx context.Context) ([]types.MinimalRepo, error) {
s.mu.Lock()
defer s.mu.Unlock()
cache := &(s.cacheAllRepos)
// Check whether another routine already did the work
cached, _ := cache.Load().(*cachedRepos)
repos, needsUpdate := cached.repos()
if !needsUpdate {
return repos, nil
}
opts := database.ListSourcegraphDotComIndexableReposOptions{
// Zoekt can only index a repo which has been cloned.
CloneStatus: types.CloneStatusCloned,
}
repos, err := s.store.ListSourcegraphDotComIndexableRepos(ctx, opts)
if err != nil {
return nil, errors.Wrap(err, "querying for indexable repos")
}
cache.Store(&cachedRepos{
// Copy since repos will be mutated by the caller
minimalRepos: append([]types.MinimalRepo{}, repos...),
fetched: time.Now(),
})
return repos, nil
}

View File

@ -1,30 +0,0 @@
package dbcache
import (
"context"
"testing"
"github.com/sourcegraph/log/logtest"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
)
func BenchmarkIndexableRepos_List_Empty(b *testing.B) {
logger := logtest.Scoped(b)
db := database.NewDB(logger, dbtest.NewDB(b))
ctx := context.Background()
select {
case <-ctx.Done():
b.Fatal("context already canceled")
default:
}
b.ResetTimer()
for range b.N {
_, err := NewIndexableReposLister(logger, db.Repos()).List(ctx)
if err != nil {
b.Fatal(err)
}
}
}

View File

@ -37099,9 +37099,6 @@ type MockGitserverRepoStore struct {
// ListPurgeableReposFunc is an instance of a mock function object
// controlling the behavior of the method ListPurgeableRepos.
ListPurgeableReposFunc *GitserverRepoStoreListPurgeableReposFunc
// ListReposWithLastErrorFunc is an instance of a mock function object
// controlling the behavior of the method ListReposWithLastError.
ListReposWithLastErrorFunc *GitserverRepoStoreListReposWithLastErrorFunc
// LogCorruptionFunc is an instance of a mock function object
// controlling the behavior of the method LogCorruption.
LogCorruptionFunc *GitserverRepoStoreLogCorruptionFunc
@ -37120,10 +37117,6 @@ type MockGitserverRepoStore struct {
// SetRepoSizeFunc is an instance of a mock function object controlling
// the behavior of the method SetRepoSize.
SetRepoSizeFunc *GitserverRepoStoreSetRepoSizeFunc
// TotalErroredCloudDefaultReposFunc is an instance of a mock function
// object controlling the behavior of the method
// TotalErroredCloudDefaultRepos.
TotalErroredCloudDefaultReposFunc *GitserverRepoStoreTotalErroredCloudDefaultReposFunc
// UpdateFunc is an instance of a mock function object controlling the
// behavior of the method Update.
UpdateFunc *GitserverRepoStoreUpdateFunc
@ -37180,11 +37173,6 @@ func NewMockGitserverRepoStore() *MockGitserverRepoStore {
return
},
},
ListReposWithLastErrorFunc: &GitserverRepoStoreListReposWithLastErrorFunc{
defaultHook: func(context.Context) (r0 []api.RepoName, r1 error) {
return
},
},
LogCorruptionFunc: &GitserverRepoStoreLogCorruptionFunc{
defaultHook: func(context.Context, api.RepoName, string, string) (r0 error) {
return
@ -37215,11 +37203,6 @@ func NewMockGitserverRepoStore() *MockGitserverRepoStore {
return
},
},
TotalErroredCloudDefaultReposFunc: &GitserverRepoStoreTotalErroredCloudDefaultReposFunc{
defaultHook: func(context.Context) (r0 int, r1 error) {
return
},
},
UpdateFunc: &GitserverRepoStoreUpdateFunc{
defaultHook: func(context.Context, ...*types.GitserverRepo) (r0 error) {
return
@ -37283,11 +37266,6 @@ func NewStrictMockGitserverRepoStore() *MockGitserverRepoStore {
panic("unexpected invocation of MockGitserverRepoStore.ListPurgeableRepos")
},
},
ListReposWithLastErrorFunc: &GitserverRepoStoreListReposWithLastErrorFunc{
defaultHook: func(context.Context) ([]api.RepoName, error) {
panic("unexpected invocation of MockGitserverRepoStore.ListReposWithLastError")
},
},
LogCorruptionFunc: &GitserverRepoStoreLogCorruptionFunc{
defaultHook: func(context.Context, api.RepoName, string, string) error {
panic("unexpected invocation of MockGitserverRepoStore.LogCorruption")
@ -37318,11 +37296,6 @@ func NewStrictMockGitserverRepoStore() *MockGitserverRepoStore {
panic("unexpected invocation of MockGitserverRepoStore.SetRepoSize")
},
},
TotalErroredCloudDefaultReposFunc: &GitserverRepoStoreTotalErroredCloudDefaultReposFunc{
defaultHook: func(context.Context) (int, error) {
panic("unexpected invocation of MockGitserverRepoStore.TotalErroredCloudDefaultRepos")
},
},
UpdateFunc: &GitserverRepoStoreUpdateFunc{
defaultHook: func(context.Context, ...*types.GitserverRepo) error {
panic("unexpected invocation of MockGitserverRepoStore.Update")
@ -37370,9 +37343,6 @@ func NewMockGitserverRepoStoreFrom(i database.GitserverRepoStore) *MockGitserver
ListPurgeableReposFunc: &GitserverRepoStoreListPurgeableReposFunc{
defaultHook: i.ListPurgeableRepos,
},
ListReposWithLastErrorFunc: &GitserverRepoStoreListReposWithLastErrorFunc{
defaultHook: i.ListReposWithLastError,
},
LogCorruptionFunc: &GitserverRepoStoreLogCorruptionFunc{
defaultHook: i.LogCorruption,
},
@ -37391,9 +37361,6 @@ func NewMockGitserverRepoStoreFrom(i database.GitserverRepoStore) *MockGitserver
SetRepoSizeFunc: &GitserverRepoStoreSetRepoSizeFunc{
defaultHook: i.SetRepoSize,
},
TotalErroredCloudDefaultReposFunc: &GitserverRepoStoreTotalErroredCloudDefaultReposFunc{
defaultHook: i.TotalErroredCloudDefaultRepos,
},
UpdateFunc: &GitserverRepoStoreUpdateFunc{
defaultHook: i.Update,
},
@ -38287,115 +38254,6 @@ func (c GitserverRepoStoreListPurgeableReposFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// GitserverRepoStoreListReposWithLastErrorFunc describes the behavior when
// the ListReposWithLastError method of the parent MockGitserverRepoStore
// instance is invoked.
type GitserverRepoStoreListReposWithLastErrorFunc struct {
defaultHook func(context.Context) ([]api.RepoName, error)
hooks []func(context.Context) ([]api.RepoName, error)
history []GitserverRepoStoreListReposWithLastErrorFuncCall
mutex sync.Mutex
}
// ListReposWithLastError delegates to the next hook function in the queue
// and stores the parameter and result values of this invocation.
func (m *MockGitserverRepoStore) ListReposWithLastError(v0 context.Context) ([]api.RepoName, error) {
r0, r1 := m.ListReposWithLastErrorFunc.nextHook()(v0)
m.ListReposWithLastErrorFunc.appendCall(GitserverRepoStoreListReposWithLastErrorFuncCall{v0, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the
// ListReposWithLastError method of the parent MockGitserverRepoStore
// instance is invoked and the hook queue is empty.
func (f *GitserverRepoStoreListReposWithLastErrorFunc) SetDefaultHook(hook func(context.Context) ([]api.RepoName, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// ListReposWithLastError method of the parent MockGitserverRepoStore
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *GitserverRepoStoreListReposWithLastErrorFunc) PushHook(hook func(context.Context) ([]api.RepoName, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverRepoStoreListReposWithLastErrorFunc) SetDefaultReturn(r0 []api.RepoName, r1 error) {
f.SetDefaultHook(func(context.Context) ([]api.RepoName, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverRepoStoreListReposWithLastErrorFunc) PushReturn(r0 []api.RepoName, r1 error) {
f.PushHook(func(context.Context) ([]api.RepoName, error) {
return r0, r1
})
}
func (f *GitserverRepoStoreListReposWithLastErrorFunc) nextHook() func(context.Context) ([]api.RepoName, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverRepoStoreListReposWithLastErrorFunc) appendCall(r0 GitserverRepoStoreListReposWithLastErrorFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// GitserverRepoStoreListReposWithLastErrorFuncCall objects describing the
// invocations of this function.
func (f *GitserverRepoStoreListReposWithLastErrorFunc) History() []GitserverRepoStoreListReposWithLastErrorFuncCall {
f.mutex.Lock()
history := make([]GitserverRepoStoreListReposWithLastErrorFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverRepoStoreListReposWithLastErrorFuncCall is an object that
// describes an invocation of method ListReposWithLastError on an instance
// of MockGitserverRepoStore.
type GitserverRepoStoreListReposWithLastErrorFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []api.RepoName
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverRepoStoreListReposWithLastErrorFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverRepoStoreListReposWithLastErrorFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// GitserverRepoStoreLogCorruptionFunc describes the behavior when the
// LogCorruption method of the parent MockGitserverRepoStore instance is
// invoked.
@ -39072,115 +38930,6 @@ func (c GitserverRepoStoreSetRepoSizeFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// GitserverRepoStoreTotalErroredCloudDefaultReposFunc describes the
// behavior when the TotalErroredCloudDefaultRepos method of the parent
// MockGitserverRepoStore instance is invoked.
type GitserverRepoStoreTotalErroredCloudDefaultReposFunc struct {
defaultHook func(context.Context) (int, error)
hooks []func(context.Context) (int, error)
history []GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall
mutex sync.Mutex
}
// TotalErroredCloudDefaultRepos delegates to the next hook function in the
// queue and stores the parameter and result values of this invocation.
func (m *MockGitserverRepoStore) TotalErroredCloudDefaultRepos(v0 context.Context) (int, error) {
r0, r1 := m.TotalErroredCloudDefaultReposFunc.nextHook()(v0)
m.TotalErroredCloudDefaultReposFunc.appendCall(GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall{v0, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the
// TotalErroredCloudDefaultRepos method of the parent MockGitserverRepoStore
// instance is invoked and the hook queue is empty.
func (f *GitserverRepoStoreTotalErroredCloudDefaultReposFunc) SetDefaultHook(hook func(context.Context) (int, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// TotalErroredCloudDefaultRepos method of the parent MockGitserverRepoStore
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *GitserverRepoStoreTotalErroredCloudDefaultReposFunc) PushHook(hook func(context.Context) (int, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverRepoStoreTotalErroredCloudDefaultReposFunc) SetDefaultReturn(r0 int, r1 error) {
f.SetDefaultHook(func(context.Context) (int, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverRepoStoreTotalErroredCloudDefaultReposFunc) PushReturn(r0 int, r1 error) {
f.PushHook(func(context.Context) (int, error) {
return r0, r1
})
}
func (f *GitserverRepoStoreTotalErroredCloudDefaultReposFunc) nextHook() func(context.Context) (int, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverRepoStoreTotalErroredCloudDefaultReposFunc) appendCall(r0 GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall objects
// describing the invocations of this function.
func (f *GitserverRepoStoreTotalErroredCloudDefaultReposFunc) History() []GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall {
f.mutex.Lock()
history := make([]GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall is an object that
// describes an invocation of method TotalErroredCloudDefaultRepos on an
// instance of MockGitserverRepoStore.
type GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 int
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverRepoStoreTotalErroredCloudDefaultReposFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// GitserverRepoStoreUpdateFunc describes the behavior when the Update
// method of the parent MockGitserverRepoStore instance is invoked.
type GitserverRepoStoreUpdateFunc struct {
@ -60070,10 +59819,6 @@ type MockRepoStore struct {
// ListMinimalReposFunc is an instance of a mock function object
// controlling the behavior of the method ListMinimalRepos.
ListMinimalReposFunc *RepoStoreListMinimalReposFunc
// ListSourcegraphDotComIndexableReposFunc is an instance of a mock
// function object controlling the behavior of the method
// ListSourcegraphDotComIndexableRepos.
ListSourcegraphDotComIndexableReposFunc *RepoStoreListSourcegraphDotComIndexableReposFunc
// MetadataFunc is an instance of a mock function object controlling the
// behavior of the method Metadata.
MetadataFunc *RepoStoreMetadataFunc
@ -60173,11 +59918,6 @@ func NewMockRepoStore() *MockRepoStore {
return
},
},
ListSourcegraphDotComIndexableReposFunc: &RepoStoreListSourcegraphDotComIndexableReposFunc{
defaultHook: func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) (r0 []types.MinimalRepo, r1 error) {
return
},
},
MetadataFunc: &RepoStoreMetadataFunc{
defaultHook: func(context.Context, ...api.RepoID) (r0 []*types.SearchedRepo, r1 error) {
return
@ -60290,11 +60030,6 @@ func NewStrictMockRepoStore() *MockRepoStore {
panic("unexpected invocation of MockRepoStore.ListMinimalRepos")
},
},
ListSourcegraphDotComIndexableReposFunc: &RepoStoreListSourcegraphDotComIndexableReposFunc{
defaultHook: func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error) {
panic("unexpected invocation of MockRepoStore.ListSourcegraphDotComIndexableRepos")
},
},
MetadataFunc: &RepoStoreMetadataFunc{
defaultHook: func(context.Context, ...api.RepoID) ([]*types.SearchedRepo, error) {
panic("unexpected invocation of MockRepoStore.Metadata")
@ -60377,9 +60112,6 @@ func NewMockRepoStoreFrom(i database.RepoStore) *MockRepoStore {
ListMinimalReposFunc: &RepoStoreListMinimalReposFunc{
defaultHook: i.ListMinimalRepos,
},
ListSourcegraphDotComIndexableReposFunc: &RepoStoreListSourcegraphDotComIndexableReposFunc{
defaultHook: i.ListSourcegraphDotComIndexableRepos,
},
MetadataFunc: &RepoStoreMetadataFunc{
defaultHook: i.Metadata,
},
@ -62035,119 +61767,6 @@ func (c RepoStoreListMinimalReposFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// RepoStoreListSourcegraphDotComIndexableReposFunc describes the behavior
// when the ListSourcegraphDotComIndexableRepos method of the parent
// MockRepoStore instance is invoked.
type RepoStoreListSourcegraphDotComIndexableReposFunc struct {
defaultHook func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error)
hooks []func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error)
history []RepoStoreListSourcegraphDotComIndexableReposFuncCall
mutex sync.Mutex
}
// ListSourcegraphDotComIndexableRepos delegates to the next hook function
// in the queue and stores the parameter and result values of this
// invocation.
func (m *MockRepoStore) ListSourcegraphDotComIndexableRepos(v0 context.Context, v1 database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error) {
r0, r1 := m.ListSourcegraphDotComIndexableReposFunc.nextHook()(v0, v1)
m.ListSourcegraphDotComIndexableReposFunc.appendCall(RepoStoreListSourcegraphDotComIndexableReposFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the
// ListSourcegraphDotComIndexableRepos method of the parent MockRepoStore
// instance is invoked and the hook queue is empty.
func (f *RepoStoreListSourcegraphDotComIndexableReposFunc) SetDefaultHook(hook func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// ListSourcegraphDotComIndexableRepos method of the parent MockRepoStore
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *RepoStoreListSourcegraphDotComIndexableReposFunc) PushHook(hook func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *RepoStoreListSourcegraphDotComIndexableReposFunc) SetDefaultReturn(r0 []types.MinimalRepo, r1 error) {
f.SetDefaultHook(func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *RepoStoreListSourcegraphDotComIndexableReposFunc) PushReturn(r0 []types.MinimalRepo, r1 error) {
f.PushHook(func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error) {
return r0, r1
})
}
func (f *RepoStoreListSourcegraphDotComIndexableReposFunc) nextHook() func(context.Context, database.ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *RepoStoreListSourcegraphDotComIndexableReposFunc) appendCall(r0 RepoStoreListSourcegraphDotComIndexableReposFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// RepoStoreListSourcegraphDotComIndexableReposFuncCall objects describing
// the invocations of this function.
func (f *RepoStoreListSourcegraphDotComIndexableReposFunc) History() []RepoStoreListSourcegraphDotComIndexableReposFuncCall {
f.mutex.Lock()
history := make([]RepoStoreListSourcegraphDotComIndexableReposFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// RepoStoreListSourcegraphDotComIndexableReposFuncCall is an object that
// describes an invocation of method ListSourcegraphDotComIndexableRepos on
// an instance of MockRepoStore.
type RepoStoreListSourcegraphDotComIndexableReposFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 database.ListSourcegraphDotComIndexableReposOptions
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []types.MinimalRepo
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c RepoStoreListSourcegraphDotComIndexableReposFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c RepoStoreListSourcegraphDotComIndexableReposFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// RepoStoreMetadataFunc describes the behavior when the Metadata method of
// the parent MockRepoStore instance is invoked.
type RepoStoreMetadataFunc struct {

View File

@ -67,8 +67,7 @@ type ExternalServiceStore interface {
// GetLatestSyncErrors returns the most recent sync failure message for
// each external service. If the latest sync did not have an error, the
// string will be empty. We exclude cloud_default external services as they
// are never synced.
// string will be empty.
GetLatestSyncErrors(ctx context.Context) ([]*SyncError, error)
// GetByID returns the external service for id.
@ -254,9 +253,6 @@ type ExternalServicesListOptions struct {
UpdatedAfter time.Time
// Possible values are ASC or DESC. Defaults to DESC.
OrderByDirection string
// When true, will only return services that have the cloud_default flag set to
// true.
OnlyCloudDefault bool
// When specified, only include external services which contain repository with a given ID.
RepoID api.RepoID
@ -286,9 +282,6 @@ func (o ExternalServicesListOptions) sqlConditions() []*sqlf.Query {
if !o.UpdatedAfter.IsZero() {
conds = append(conds, sqlf.Sprintf(`updated_at > %s`, o.UpdatedAfter))
}
if o.OnlyCloudDefault {
conds = append(conds, sqlf.Sprintf("cloud_default = true"))
}
if o.CodeHostID != 0 {
conds = append(conds, sqlf.Sprintf("code_host_id = %s", o.CodeHostID))
}
@ -535,18 +528,6 @@ func validatePerforceConnection(perforceValidators []PerforceValidatorFunc, _ in
return err
}
// disablePermsSyncingForExternalService removes "authorization" or
// "enforcePermissions" fields from the external service config
// when present on the external service config.
func disablePermsSyncingForExternalService(config string) (string, error) {
withoutEnforcePermissions, err := jsonc.Remove(config, "enforcePermissions")
// in case removing "enforcePermissions" fails, we try to remove "authorization" anyway
if err != nil {
withoutEnforcePermissions = config
}
return jsonc.Remove(withoutEnforcePermissions, "authorization")
}
func (e *externalServiceStore) Create(ctx context.Context, confGet func() *conf.Unified, es *types.ExternalService) (err error) {
rawConfig, err := es.Config.Decrypt(ctx)
if err != nil {
@ -563,18 +544,6 @@ func (e *externalServiceStore) Create(ctx context.Context, confGet func() *conf.
return err
}
// 🚨 SECURITY: For all code host connections on Sourcegraph.com,
// we always want to disable repository permissions to prevent
// permission syncing from trying to sync permissions from public code.
if dotcom.SourcegraphDotComMode() {
rawConfig, err = disablePermsSyncingForExternalService(rawConfig)
if err != nil {
return err
}
es.Config.Set(rawConfig)
}
es.CreatedAt = timeutil.Now()
es.UpdatedAt = es.CreatedAt
@ -611,7 +580,6 @@ func (e *externalServiceStore) Create(ctx context.Context, confGet func() *conf.
es.CreatedAt,
es.UpdatedAt,
es.Unrestricted,
es.CloudDefault,
es.HasWebhooks,
es.CodeHostID,
es.CreatorID,
@ -622,8 +590,8 @@ func (e *externalServiceStore) Create(ctx context.Context, confGet func() *conf.
const createExternalServiceQueryFmtstr = `
INSERT INTO external_services
(kind, display_name, config, encryption_key_id, created_at, updated_at, unrestricted, cloud_default, has_webhooks, code_host_id, creator_id, last_updater_id)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
(kind, display_name, config, encryption_key_id, created_at, updated_at, unrestricted, has_webhooks, code_host_id, creator_id, last_updater_id)
VALUES(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
RETURNING id
`
@ -664,18 +632,6 @@ func (e *externalServiceStore) Upsert(ctx context.Context, svcs ...*types.Extern
return errors.Wrapf(err, "validating service of kind %q", s.Kind)
}
// 🚨 SECURITY: For all code host connections on Sourcegraph.com,
// we always want to disable repository permissions to prevent
// permission syncing from trying to sync permissions from public code.
if dotcom.SourcegraphDotComMode() {
rawConfig, err = disablePermsSyncingForExternalService(rawConfig)
if err != nil {
return err
}
s.Config.Set(rawConfig)
}
e.recalculateFields(s, string(normalized))
chID, err := ensureCodeHost(ctx, tx, s.Kind, string(normalized))
@ -739,7 +695,6 @@ func (e *externalServiceStore) Upsert(ctx context.Context, svcs ...*types.Extern
&dbutil.NullTime{Time: &svcs[i].LastSyncAt},
&dbutil.NullTime{Time: &svcs[i].NextSyncAt},
&svcs[i].Unrestricted,
&svcs[i].CloudDefault,
&keyID,
&dbutil.NullBool{B: svcs[i].HasWebhooks},
&svcs[i].CodeHostID,
@ -777,7 +732,6 @@ func (e *externalServiceStore) upsertExternalServicesQuery(ctx context.Context,
dbutil.NullTimeColumn(s.LastSyncAt),
dbutil.NullTimeColumn(s.NextSyncAt),
s.Unrestricted,
s.CloudDefault,
s.HasWebhooks,
s.CodeHostID,
s.CreatorID,
@ -792,7 +746,7 @@ func (e *externalServiceStore) upsertExternalServicesQuery(ctx context.Context,
}
const upsertExternalServicesQueryValueFmtstr = `
(COALESCE(NULLIF(%s, 0), (SELECT nextval('external_services_id_seq'))), UPPER(%s), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
(COALESCE(NULLIF(%s, 0), (SELECT nextval('external_services_id_seq'))), UPPER(%s), %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
`
const upsertExternalServicesQueryFmtstr = `
@ -808,7 +762,6 @@ INSERT INTO external_services (
last_sync_at,
next_sync_at,
unrestricted,
cloud_default,
has_webhooks,
code_host_id,
creator_id,
@ -827,7 +780,6 @@ SET
last_sync_at = excluded.last_sync_at,
next_sync_at = excluded.next_sync_at,
unrestricted = excluded.unrestricted,
cloud_default = excluded.cloud_default,
has_webhooks = excluded.has_webhooks,
code_host_id = excluded.code_host_id,
last_updater_id = excluded.last_updater_id
@ -842,7 +794,6 @@ RETURNING
last_sync_at,
next_sync_at,
unrestricted,
cloud_default,
encryption_key_id,
has_webhooks,
code_host_id,
@ -854,7 +805,6 @@ RETURNING
type ExternalServiceUpdate struct {
DisplayName *string
Config *string
CloudDefault *bool
TokenExpiresAt *time.Time
LastSyncAt *time.Time
NextSyncAt *time.Time
@ -922,17 +872,6 @@ func (e *externalServiceStore) Update(ctx context.Context, ps []schema.AuthProvi
return err
}
// 🚨 SECURITY: For all code host connections on Sourcegraph.com,
// we always want to disable repository permissions to prevent
// permission syncing from trying to sync permissions from public code.
if dotcom.SourcegraphDotComMode() {
unredactedConfig, err = disablePermsSyncingForExternalService(unredactedConfig)
if err != nil {
return err
}
newSvc.Config.Set(unredactedConfig)
}
chID, err := ensureCodeHost(ctx, tx, externalService.Kind, string(normalized))
if err != nil {
return err
@ -950,7 +889,7 @@ func (e *externalServiceStore) Update(ctx context.Context, ps []schema.AuthProvi
}
if update.Config != nil {
unrestricted := calcUnrestricted(string(normalized), dotcom.SourcegraphDotComMode(), conf.PermissionsUserMapping().Enabled)
unrestricted := calcUnrestricted(string(normalized), conf.PermissionsUserMapping().Enabled)
updates = append(updates,
sqlf.Sprintf(
@ -959,10 +898,6 @@ func (e *externalServiceStore) Update(ctx context.Context, ps []schema.AuthProvi
))
}
if update.CloudDefault != nil {
updates = append(updates, sqlf.Sprintf("cloud_default = %s", update.CloudDefault))
}
if update.TokenExpiresAt != nil {
updates = append(updates, sqlf.Sprintf("token_expires_at = %s", update.TokenExpiresAt))
}
@ -1469,7 +1404,7 @@ FROM external_services es
ON es.id = essj.external_service_id
AND essj.state IN ('completed', 'errored', 'failed')
AND essj.finished_at IS NOT NULL
WHERE es.deleted_at IS NULL AND NOT es.cloud_default
WHERE es.deleted_at IS NULL
ORDER BY es.id, essj.finished_at DESC
`)
@ -1497,7 +1432,6 @@ func (e *externalServiceStore) List(ctx context.Context, opt ExternalServicesLis
last_sync_at,
next_sync_at,
unrestricted,
cloud_default,
has_webhooks,
token_expires_at,
code_host_id,
@ -1541,7 +1475,6 @@ func (e *externalServiceStore) List(ctx context.Context, opt ExternalServicesLis
&lastSyncAt,
&nextSyncAt,
&h.Unrestricted,
&h.CloudDefault,
&hasWebhooks,
&tokenExpiresAt,
&h.CodeHostID,
@ -1697,17 +1630,7 @@ WHERE EXISTS(
// calcUnrestricted determines whether or not permissions should be enforced
// on an external service.
//
// isDotComMode and permissionsUserMappingEnabled can be passed via
//
// dotcom.SourcegraphDotComMode() and globals.PermissionsUserMapping().Enabled
//
// respectively.
func calcUnrestricted(config string, isDotComMode bool, permissionsUserMappingEnabled bool) bool {
if isDotComMode {
return false
}
func calcUnrestricted(config string, permissionsUserMappingEnabled bool) bool {
// If PermissionsUserMapping is enabled, we return false since permissions
// will be managed by the explicit permissions API.
if permissionsUserMappingEnabled {
@ -1737,7 +1660,7 @@ func calcUnrestricted(config string, isDotComMode bool, permissionsUserMappingEn
// calculated depending on the external service configuration, namely
// `Unrestricted` and `HasWebhooks`.
func (e *externalServiceStore) recalculateFields(es *types.ExternalService, rawConfig string) {
es.Unrestricted = calcUnrestricted(rawConfig, dotcom.SourcegraphDotComMode(), conf.PermissionsUserMapping().Enabled)
es.Unrestricted = calcUnrestricted(rawConfig, conf.PermissionsUserMapping().Enabled)
hasWebhooks := false
cfg, err := extsvc.ParseConfig(es.Kind, rawConfig)

View File

@ -16,7 +16,6 @@ import (
"github.com/lib/pq"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/tidwall/gjson"
"github.com/sourcegraph/log"
@ -45,15 +44,14 @@ import (
func TestExternalServicesListOptions_sqlConditions(t *testing.T) {
tests := []struct {
name string
kinds []string
afterID int64
updatedAfter time.Time
wantQuery string
onlyCloudDefault bool
includeDeleted bool
wantArgs []any
repoID api.RepoID
name string
kinds []string
afterID int64
updatedAfter time.Time
wantQuery string
includeDeleted bool
wantArgs []any
repoID api.RepoID
}{
{
name: "no condition",
@ -83,11 +81,6 @@ func TestExternalServicesListOptions_sqlConditions(t *testing.T) {
wantQuery: "deleted_at IS NULL AND updated_at > $1",
wantArgs: []any{time.Date(2013, 0o4, 19, 0, 0, 0, 0, time.UTC)},
},
{
name: "has OnlyCloudDefault",
onlyCloudDefault: true,
wantQuery: "deleted_at IS NULL AND cloud_default = true",
},
{
name: "includeDeleted",
includeDeleted: true,
@ -103,12 +96,11 @@ func TestExternalServicesListOptions_sqlConditions(t *testing.T) {
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
opts := ExternalServicesListOptions{
Kinds: test.kinds,
AfterID: test.afterID,
UpdatedAfter: test.updatedAfter,
OnlyCloudDefault: test.onlyCloudDefault,
IncludeDeleted: test.includeDeleted,
RepoID: test.repoID,
Kinds: test.kinds,
AfterID: test.afterID,
UpdatedAfter: test.updatedAfter,
IncludeDeleted: test.includeDeleted,
RepoID: test.repoID,
}
q := sqlf.Join(opts.sqlConditions(), "AND")
if diff := cmp.Diff(test.wantQuery, q.Query(sqlf.PostgresBindVar)); diff != "" {
@ -148,7 +140,7 @@ func TestExternalServicesStore_Create(t *testing.T) {
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc", "webhooks": [{"org": "org", "secret": "secret"}]}`),
},
codeHostURL: "https://github.com/",
wantUnrestricted: false,
wantUnrestricted: true,
wantHasWebhooks: true,
},
{
@ -159,7 +151,7 @@ func TestExternalServicesStore_Create(t *testing.T) {
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc"}`),
},
codeHostURL: "https://github.com/",
wantUnrestricted: false,
wantUnrestricted: true,
wantHasWebhooks: false,
},
{
@ -187,29 +179,7 @@ func TestExternalServicesStore_Create(t *testing.T) {
}`),
},
codeHostURL: "https://github.com/",
wantUnrestricted: false,
},
{
name: "dotcom: auto-add authorization to code host connections for GitHub",
externalService: &types.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "GITHUB #4",
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc"}`),
},
codeHostURL: "https://github.com/",
wantUnrestricted: false,
wantHasWebhooks: false,
},
{
name: "dotcom: auto-add authorization to code host connections for GitLab",
externalService: &types.ExternalService{
Kind: extsvc.KindGitLab,
DisplayName: "GITLAB #1",
Config: extsvc.NewUnencryptedConfig(`{"url": "https://gitlab.com", "projectQuery": ["none"], "token": "abc"}`),
},
codeHostURL: "https://gitlab.com/",
wantUnrestricted: false,
wantHasWebhooks: false,
wantUnrestricted: true,
},
{
name: "Empty config not allowed",
@ -351,7 +321,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
esID int64
update *ExternalServiceUpdate
wantUnrestricted bool
wantCloudDefault bool
wantHasWebhooks bool
wantTokenExpiresAt bool
wantLastSyncAt time.Time
@ -366,7 +335,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
Config: pointers.Ptr(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "def", "authorization": {}, "webhooks": [{"org": "org", "secret": "secret"}]}`),
},
wantUnrestricted: false,
wantCloudDefault: false,
wantHasWebhooks: true,
},
{
@ -377,7 +345,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
Config: pointers.Ptr(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "def"}`),
},
wantUnrestricted: false,
wantCloudDefault: false,
wantHasWebhooks: false,
},
{
@ -394,28 +361,8 @@ func TestExternalServicesStore_Update(t *testing.T) {
}`),
},
wantUnrestricted: false,
wantCloudDefault: false,
wantHasWebhooks: false,
},
{
name: "set cloud_default true",
esID: es.ID,
update: &ExternalServiceUpdate{
DisplayName: pointers.Ptr("GITHUB (updated) #4"),
CloudDefault: pointers.Ptr(true),
Config: pointers.Ptr(`
{
"url": "https://github.com",
"repositoryQuery": ["none"],
"token": "def",
"authorization": {},
"webhooks": [{"org": "org", "secret": "secret"}]
}`),
},
wantUnrestricted: false,
wantCloudDefault: true,
wantHasWebhooks: true,
},
{
name: "update token_expires_at",
esID: es.ID,
@ -424,7 +371,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
Config: pointers.Ptr(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "def"}`),
TokenExpiresAt: pointers.Ptr(time.Now()),
},
wantCloudDefault: true,
wantTokenExpiresAt: true,
},
{
@ -451,7 +397,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
Config: pointers.Ptr(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "def"}`),
LastSyncAt: pointers.Ptr(now),
},
wantCloudDefault: true,
wantTokenExpiresAt: true,
wantLastSyncAt: now,
},
@ -464,7 +409,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
LastSyncAt: pointers.Ptr(now),
NextSyncAt: pointers.Ptr(now),
},
wantCloudDefault: true,
wantTokenExpiresAt: true,
wantNextSyncAt: now,
},
@ -529,10 +473,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
t.Fatalf("Want unrestricted = %v, but got %v", test.wantUnrestricted, got.Unrestricted)
}
if test.wantCloudDefault != got.CloudDefault {
t.Fatalf("Want cloud_default = %v, but got %v", test.wantCloudDefault, got.CloudDefault)
}
if !test.wantLastSyncAt.IsZero() && !test.wantLastSyncAt.Equal(got.LastSyncAt) {
t.Fatalf("Want last_sync_at = %v, but got %v", test.wantLastSyncAt, got.LastSyncAt)
}
@ -562,173 +502,6 @@ func TestExternalServicesStore_Update(t *testing.T) {
}
}
func TestDisablePermsSyncingForExternalService(t *testing.T) {
tests := []struct {
name string
config string
want string
}{
{
name: "github with authorization",
config: `
{
// Useful comments
"url": "https://github.com",
"repositoryQuery": ["none"],
"token": "def",
"authorization": {}
}`,
want: `
{
// Useful comments
"url": "https://github.com",
"repositoryQuery": ["none"],
"token": "def"
}`,
},
{
name: "github without authorization",
config: `
{
// Useful comments
"url": "https://github.com",
"repositoryQuery": ["none"],
"token": "def"
}`,
want: `
{
// Useful comments
"url": "https://github.com",
"repositoryQuery": ["none"],
"token": "def"
}`,
},
{
name: "azure devops with enforce permissions",
config: `
{
// Useful comments
"url": "https://dev.azure.com",
"username": "horse",
"token": "abc",
"enforcePermissions": true
}`,
want: `
{
// Useful comments
"url": "https://dev.azure.com",
"username": "horse",
"token": "abc"
}`,
},
{
name: "azure devops without enforce permissions",
config: `
{
// Useful comments
"url": "https://dev.azure.com",
"username": "horse",
"token": "abc"
}`,
want: `
{
// Useful comments
"url": "https://dev.azure.com",
"username": "horse",
"token": "abc"
}`,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
got, err := disablePermsSyncingForExternalService(test.config)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(test.want, got); diff != "" {
t.Fatalf("Mismatch (-want +got):\n%s", diff)
}
})
}
}
// This test ensures under Sourcegraph.com mode, every call of `Create`,
// `Upsert` and `Update` removes the "authorization" field in the external
// service config automatically.
func TestExternalServicesStore_DisablePermsSyncingForExternalService(t *testing.T) {
if testing.Short() {
t.Skip()
}
logger := logtest.Scoped(t)
db := NewDB(logger, dbtest.NewDB(t))
ctx := context.Background()
user, err := db.Users().Create(ctx, NewUser{Username: "foo"})
if err != nil {
t.Fatal(err)
}
dotcom.MockSourcegraphDotComMode(t, true)
confGet := func() *conf.Unified {
return &conf.Unified{}
}
externalServices := db.ExternalServices()
// Test Create method
es := &types.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "GITHUB #1",
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc", "authorization": {}}`),
}
err = externalServices.Create(ctx, confGet, es)
require.NoError(t, err)
got, err := externalServices.GetByID(ctx, es.ID)
require.NoError(t, err)
cfg, err := got.Config.Decrypt(ctx)
if err != nil {
t.Fatal(err)
}
exists := gjson.Get(cfg, "authorization").Exists()
assert.False(t, exists, `"authorization" field exists, but should not`)
// Reset Config field and test Upsert method
es.Config.Set(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc", "authorization": {}}`)
err = externalServices.Upsert(ctx, es)
require.NoError(t, err)
got, err = externalServices.GetByID(ctx, es.ID)
require.NoError(t, err)
cfg, err = got.Config.Decrypt(ctx)
if err != nil {
t.Fatal(err)
}
exists = gjson.Get(cfg, "authorization").Exists()
assert.False(t, exists, `"authorization" field exists, but should not`)
// Reset Config field and test Update method
es.Config.Set(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc", "authorization": {}}`)
err = externalServices.Update(ctx,
conf.Get().AuthProviders,
es.ID,
&ExternalServiceUpdate{
Config: &cfg,
LastUpdaterID: &user.ID,
},
)
require.NoError(t, err)
got, err = externalServices.GetByID(ctx, es.ID)
require.NoError(t, err)
cfg, err = got.Config.Decrypt(ctx)
if err != nil {
t.Fatal(err)
}
exists = gjson.Get(cfg, "authorization").Exists()
assert.False(t, exists, `"authorization" field exists, but should not`)
}
func TestCountRepoCount(t *testing.T) {
if testing.Short() {
t.Skip()
@ -1524,10 +1297,9 @@ func TestExternalServicesStore_List(t *testing.T) {
}
ess := []*types.ExternalService{
{
Kind: extsvc.KindGitHub,
DisplayName: "GITHUB #1",
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc", "authorization": {}}`),
CloudDefault: true,
Kind: extsvc.KindGitHub,
DisplayName: "GITHUB #1",
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc", "authorization": {}}`),
},
{
Kind: extsvc.KindGitHub,
@ -1652,19 +1424,6 @@ VALUES (1, 1, ''), (2, 1, '')
}
})
t.Run("list cloud default services", func(t *testing.T) {
ess, err := db.ExternalServices().List(ctx, ExternalServicesListOptions{
OnlyCloudDefault: true,
})
if err != nil {
t.Fatal(err)
}
// We should find all cloud default services
if len(ess) != 1 {
t.Fatalf("Want 0 external services but got %d", len(ess))
}
})
t.Run("list including deleted", func(t *testing.T) {
ess, err := db.ExternalServices().List(ctx, ExternalServicesListOptions{
IncludeDeleted: true,
@ -2300,52 +2059,6 @@ func TestExternalServiceStore_UpdateSyncJobCounters(t *testing.T) {
}
}
func TestExternalServicesStore_OneCloudDefaultPerKind(t *testing.T) {
if testing.Short() {
t.Skip()
}
t.Parallel()
logger := logtest.Scoped(t)
db := NewDB(logger, dbtest.NewDB(t))
ctx := context.Background()
now := time.Now()
makeService := func(cloudDefault bool) *types.ExternalService {
cfg := `{"url": "https://github.com", "token": "abc", "repositoryQuery": ["none"]}`
svc := &types.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "Github - Test",
Config: extsvc.NewUnencryptedConfig(cfg),
CreatedAt: now,
UpdatedAt: now,
CloudDefault: cloudDefault,
}
return svc
}
t.Run("non default", func(t *testing.T) {
gh := makeService(false)
if err := db.ExternalServices().Upsert(ctx, gh); err != nil {
t.Fatalf("Upsert error: %s", err)
}
})
t.Run("first default", func(t *testing.T) {
gh := makeService(true)
if err := db.ExternalServices().Upsert(ctx, gh); err != nil {
t.Fatalf("Upsert error: %s", err)
}
})
t.Run("second default", func(t *testing.T) {
gh := makeService(true)
if err := db.ExternalServices().Upsert(ctx, gh); err == nil {
t.Fatal("Expected an error")
}
})
}
func TestExternalServiceStore_SyncDue(t *testing.T) {
if testing.Short() {
t.Skip()
@ -2758,14 +2471,7 @@ func TestExternalServices_CleanupSyncJobs(t *testing.T) {
}
func TestCalcUnrestricted(t *testing.T) {
// Separate test for dotcom mode to test a mix of cases
t.Run("dotcom mode always returns false", func(t *testing.T) {
require.False(t, calcUnrestricted("", true, false))
require.False(t, calcUnrestricted(`{"authorization": {}}`, true, false))
require.False(t, calcUnrestricted(`{"authorization": {}, "enforcePermissions": false}`, true, false))
})
otherTests := map[string]struct {
tts := map[string]struct {
authorization bool
enforcePermissions bool
permissionsUserMapping bool
@ -2777,7 +2483,7 @@ func TestCalcUnrestricted(t *testing.T) {
"enforcePermissions and no permissionsUserMapping returns restrictred": {enforcePermissions: true, want: false},
}
for testName, test := range otherTests {
for testName, test := range tts {
t.Run(testName, func(t *testing.T) {
var vals []string
if test.authorization {
@ -2788,7 +2494,7 @@ func TestCalcUnrestricted(t *testing.T) {
}
conf := fmt.Sprintf("{%s}", strings.Join(vals, ","))
require.Equal(t, test.want, calcUnrestricted(conf, false, test.permissionsUserMapping))
require.Equal(t, test.want, calcUnrestricted(conf, test.permissionsUserMapping))
})
}
}

View File

@ -56,15 +56,9 @@ type GitserverRepoStore interface {
// a matching row does not yet exist a new one will be created.
// If the size value hasn't changed, the row will not be updated.
SetRepoSize(ctx context.Context, name api.RepoName, size int64, shardID string) error
// ListReposWithLastError iterates over repos w/ non-empty last_error field and calls the repoFn for these repos.
// note that this currently filters out any repos which do not have an associated external service where cloud_default = true.
ListReposWithLastError(ctx context.Context) ([]api.RepoName, error)
// ListPurgeableRepos returns all purgeable repos. These are repos that
// are cloned on disk but have been deleted or blocked.
ListPurgeableRepos(ctx context.Context, options ListPurgableReposOptions) ([]api.RepoName, error)
// TotalErroredCloudDefaultRepos returns the total number of repos which have a non-empty last_error field. Note that this is only
// counting repos with an associated cloud_default external service.
TotalErroredCloudDefaultRepos(ctx context.Context) (int, error)
// UpdateRepoSizes sets repo sizes according to input map. Key is repoID, value is repo_size_bytes.
UpdateRepoSizes(ctx context.Context, logger log.Logger, shardID string, repos map[api.RepoName]int64) (int, error)
// GetLastSyncOutput returns the last stored output from a repo sync (clone or fetch), or ok: false if
@ -150,51 +144,6 @@ WHERE
locked_data.repo_id = gr.repo_id
`
func (s *gitserverRepoStore) TotalErroredCloudDefaultRepos(ctx context.Context) (int, error) {
count, _, err := basestore.ScanFirstInt(s.Query(ctx, sqlf.Sprintf(totalErroredCloudDefaultReposQuery)))
return count, err
}
const totalErroredCloudDefaultReposQuery = `
SELECT
COUNT(*)
FROM gitserver_repos gr
JOIN repo r ON r.id = gr.repo_id
JOIN external_service_repos esr ON gr.repo_id = esr.repo_id
JOIN external_services es on esr.external_service_id = es.id
WHERE
gr.last_error != ''
AND r.blocked IS NULL
AND r.deleted_at IS NULL
AND es.cloud_default IS TRUE
`
func (s *gitserverRepoStore) ListReposWithLastError(ctx context.Context) ([]api.RepoName, error) {
rows, err := s.Query(ctx, sqlf.Sprintf(nonemptyLastErrorQuery))
return scanLastErroredRepos(rows, err)
}
const nonemptyLastErrorQuery = `
SELECT
repo.name
FROM repo
JOIN gitserver_repos gr ON repo.id = gr.repo_id
JOIN external_service_repos esr ON repo.id = esr.repo_id
JOIN external_services es on esr.external_service_id = es.id
WHERE
gr.last_error != ''
AND repo.blocked IS NULL
AND repo.deleted_at IS NULL
AND es.cloud_default IS TRUE
`
func scanLastErroredRepoRow(scanner dbutil.Scanner) (name api.RepoName, err error) {
err = scanner.Scan(&name)
return name, err
}
var scanLastErroredRepos = basestore.NewSliceScanner(scanLastErroredRepoRow)
type ListPurgableReposOptions struct {
// DeletedBefore will filter the deleted repos to only those that were deleted
// before the given time. The zero value will not apply filtering.

View File

@ -196,152 +196,6 @@ func TestListPurgeableRepos(t *testing.T) {
}
}
func TestListReposWithLastError(t *testing.T) {
if testing.Short() {
t.Skip()
}
type testRepo struct {
name string
cloudDefault bool
hasLastError bool
blocked bool
}
type testCase struct {
name string
testRepos []testRepo
expectedReposFound []api.RepoName
}
testCases := []testCase{
{
name: "get repos with last error",
testRepos: []testRepo{
{
name: "github.com/sourcegraph/repo1",
cloudDefault: true,
hasLastError: true,
},
{
name: "github.com/sourcegraph/repo2",
cloudDefault: true,
},
},
expectedReposFound: []api.RepoName{"github.com/sourcegraph/repo1"},
},
{
name: "filter out non cloud_default repos",
testRepos: []testRepo{
{
name: "github.com/sourcegraph/repo1",
cloudDefault: false,
hasLastError: true,
},
{
name: "github.com/sourcegraph/repo2",
cloudDefault: true,
hasLastError: true,
},
},
expectedReposFound: []api.RepoName{"github.com/sourcegraph/repo2"},
},
{
name: "no cloud_default repos with non-empty last errors",
testRepos: []testRepo{
{
name: "github.com/sourcegraph/repo1",
cloudDefault: false,
hasLastError: true,
},
{
name: "github.com/sourcegraph/repo2",
cloudDefault: true,
hasLastError: false,
},
},
expectedReposFound: nil,
},
{
name: "filter out blocked repos",
testRepos: []testRepo{
{
name: "github.com/sourcegraph/repo1",
cloudDefault: true,
hasLastError: true,
blocked: true,
},
{
name: "github.com/sourcegraph/repo2",
cloudDefault: true,
hasLastError: true,
},
},
expectedReposFound: []api.RepoName{"github.com/sourcegraph/repo2"},
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
ctx := context.Background()
logger := logtest.Scoped(t)
db := NewDB(logger, dbtest.NewDB(t))
now := time.Now()
cloudDefaultService := createTestExternalService(ctx, t, now, db, true)
nonCloudDefaultService := createTestExternalService(ctx, t, now, db, false)
for i, tr := range tc.testRepos {
testRepo := &types.Repo{
Name: api.RepoName(tr.name),
URI: tr.name,
ExternalRepo: api.ExternalRepoSpec{
ID: fmt.Sprintf("repo%d-external", i),
ServiceType: extsvc.TypeGitHub,
ServiceID: "https://github.com",
},
}
if tr.cloudDefault {
testRepo = testRepo.With(
typestest.Opt.RepoSources(cloudDefaultService.URN()),
)
} else {
testRepo = testRepo.With(
typestest.Opt.RepoSources(nonCloudDefaultService.URN()),
)
}
createTestRepos(ctx, t, db, types.Repos{testRepo})
if tr.hasLastError {
if err := db.GitserverRepos().SetLastError(ctx, testRepo.Name, "an error", "test"); err != nil {
t.Fatal(err)
}
}
if tr.blocked {
q := sqlf.Sprintf(`UPDATE repo SET blocked = %s WHERE name = %s`, []byte(`{"reason": "test"}`), testRepo.Name)
if _, err := db.ExecContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...); err != nil {
t.Fatal(err)
}
}
}
// Iterate and collect repos
foundRepos, err := db.GitserverRepos().ListReposWithLastError(ctx)
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(tc.expectedReposFound, foundRepos); diff != "" {
t.Fatalf("mismatch in expected repos with last_error, (-want, +got)\n%s", diff)
}
total, err := db.GitserverRepos().TotalErroredCloudDefaultRepos(ctx)
if err != nil {
t.Fatal(err)
}
if total != len(tc.expectedReposFound) {
t.Fatalf("expected %d total errored repos, got %d instead", len(tc.expectedReposFound), total)
}
})
}
}
func TestReposWithLastOutput(t *testing.T) {
if testing.Short() {
t.Skip()
@ -372,7 +226,7 @@ func TestReposWithLastOutput(t *testing.T) {
logger := logtest.Scoped(t)
db := NewDB(logger, dbtest.NewDB(t))
now := time.Now()
cloudDefaultService := createTestExternalService(ctx, t, now, db, true)
svc := createTestExternalService(ctx, t, now, db)
for i, tr := range testRepos {
t.Run(tr.title, func(t *testing.T) {
testRepo := &types.Repo{
@ -386,7 +240,7 @@ func TestReposWithLastOutput(t *testing.T) {
},
}
testRepo = testRepo.With(
typestest.Opt.RepoSources(cloudDefaultService.URN()),
typestest.Opt.RepoSources(svc.URN()),
)
createTestRepos(ctx, t, db, types.Repos{testRepo})
if err := db.GitserverRepos().SetLastOutput(ctx, testRepo.Name, tr.lastOutput); err != nil {
@ -406,14 +260,13 @@ func TestReposWithLastOutput(t *testing.T) {
}
}
func createTestExternalService(ctx context.Context, t *testing.T, now time.Time, db DB, cloudDefault bool) types.ExternalService {
func createTestExternalService(ctx context.Context, t *testing.T, now time.Time, db DB) types.ExternalService {
service := types.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "Github - Test",
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc"}`),
CreatedAt: now,
UpdatedAt: now,
CloudDefault: cloudDefault,
Kind: extsvc.KindGitHub,
DisplayName: "Github - Test",
Config: extsvc.NewUnencryptedConfig(`{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc"}`),
CreatedAt: now,
UpdatedAt: now,
}
// Create a new external service

View File

@ -81,15 +81,6 @@ type RepoStore interface {
GetReposSetByIDs(context.Context, ...api.RepoID) (map[api.RepoID]*types.Repo, error)
GetRepoDescriptionsByIDs(context.Context, ...api.RepoID) (map[api.RepoID]string, error)
List(context.Context, ReposListOptions) ([]*types.Repo, error)
// ListSourcegraphDotComIndexableRepos returns a list of repos to be indexed for search on sourcegraph.com.
// This includes all non-forked, non-archived repos with >= listSourcegraphDotComIndexableReposMinStars stars,
// plus all repos from the following data sources:
// - src.fedoraproject.org
// - maven
// - NPM
// - JDK
// THIS QUERY SHOULD NEVER BE USED OUTSIDE OF SOURCEGRAPH.COM.
ListSourcegraphDotComIndexableRepos(context.Context, ListSourcegraphDotComIndexableReposOptions) ([]types.MinimalRepo, error)
ListMinimalRepos(context.Context, ReposListOptions) ([]types.MinimalRepo, error)
Metadata(context.Context, ...api.RepoID) ([]*types.SearchedRepo, error)
StreamMinimalRepos(context.Context, ReposListOptions, func(*types.MinimalRepo)) error
@ -1298,92 +1289,6 @@ const embeddedReposQueryFmtstr = `
SELECT DISTINCT ON (repo_id) repo_id, true embedded FROM repo_embedding_jobs WHERE state = 'completed'
`
type ListSourcegraphDotComIndexableReposOptions struct {
// CloneStatus if set will only return indexable repos of that clone
// status.
CloneStatus types.CloneStatus
}
// listSourcegraphDotComIndexableReposMinStars is the minimum number of stars needed for a public
// repo to be indexed on sourcegraph.com.
const listSourcegraphDotComIndexableReposMinStars = 5
func (s *repoStore) ListSourcegraphDotComIndexableRepos(ctx context.Context, opts ListSourcegraphDotComIndexableReposOptions) (results []types.MinimalRepo, err error) {
tr, ctx := trace.New(ctx, "repos.ListIndexable")
defer tr.EndWithErr(&err)
var joins, where []*sqlf.Query
if opts.CloneStatus != types.CloneStatusUnknown {
if opts.CloneStatus == types.CloneStatusCloned {
// **Performance optimization case**:
//
// sourcegraph.com (at the time of this comment) has 2.8M cloned and 10k uncloned _indexable_ repos.
// At this scale, it is much faster (and logically equivalent) to perform an anti-join on the inverse
// set (i.e., filter out non-cloned repos) than a join on the target set (i.e., retaining cloned repos).
//
// If these scales change significantly this optimization should be reconsidered. The original query
// plans informing this change are available at https://github.com/sourcegraph/sourcegraph/pull/44129.
joins = append(joins, sqlf.Sprintf("LEFT JOIN gitserver_repos gr ON gr.repo_id = repo.id AND gr.clone_status <> %s", types.CloneStatusCloned))
where = append(where, sqlf.Sprintf("gr.repo_id IS NULL"))
} else {
// Normal case: Filter out rows that do not have a gitserver repo with the target status
joins = append(joins, sqlf.Sprintf("JOIN gitserver_repos gr ON gr.repo_id = repo.id AND gr.clone_status = %s", opts.CloneStatus))
}
}
if len(where) == 0 {
where = append(where, sqlf.Sprintf("TRUE"))
}
q := sqlf.Sprintf(
listSourcegraphDotComIndexableReposQuery,
sqlf.Join(joins, "\n"),
listSourcegraphDotComIndexableReposMinStars,
sqlf.Join(where, "\nAND"),
)
rows, err := s.Query(ctx, q)
if err != nil {
return nil, errors.Wrap(err, "querying indexable repos")
}
defer rows.Close()
for rows.Next() {
var r types.MinimalRepo
if err := rows.Scan(&r.ID, &r.Name, &dbutil.NullInt{N: &r.Stars}); err != nil {
return nil, errors.Wrap(err, "scanning indexable repos")
}
results = append(results, r)
}
if err = rows.Err(); err != nil {
return nil, errors.Wrap(err, "scanning indexable repos")
}
return results, nil
}
// N.B. This query's exact conditions are mirrored in the Postgres index
// repo_dotcom_indexable_repos_idx. Any substantial changes to this query
// may require an associated index redefinition.
const listSourcegraphDotComIndexableReposQuery = `
SELECT
repo.id,
repo.name,
repo.stars
FROM repo
%s
WHERE
deleted_at IS NULL AND
blocked IS NULL AND
(
(repo.stars >= %s AND NOT COALESCE(fork, false) AND NOT archived)
OR
lower(repo.name) ~ '^(src\.fedoraproject\.org|maven|npm|jdk)'
) AND
%s
ORDER BY stars DESC NULLS LAST
`
// Create inserts repos and their sources, respectively in the repo and external_service_repos table.
// Associated external services must already exist.
func (s *repoStore) Create(ctx context.Context, repos ...*types.Repo) (err error) {

View File

@ -2,7 +2,6 @@ package database
import (
"context"
"encoding/json"
"fmt"
"reflect"
"sort"
@ -2826,122 +2825,6 @@ func TestRepos_Create(t *testing.T) {
})
}
func TestListSourcegraphDotComIndexableRepos(t *testing.T) {
if testing.Short() {
t.Skip()
}
t.Parallel()
logger := logtest.Scoped(t)
db := NewDB(logger, dbtest.NewDB(t))
reposToAdd := []types.Repo{
{
ID: api.RepoID(1),
Name: "github.com/foo/bar1",
Stars: 20,
},
{
ID: api.RepoID(2),
Name: "github.com/baz/bar2",
Stars: 30,
},
{
ID: api.RepoID(3),
Name: "github.com/baz/bar3",
Stars: 15,
Private: true,
},
{
ID: api.RepoID(4),
Name: "github.com/foo/bar4",
Stars: 1, // Not enough stars
},
{
ID: api.RepoID(5),
Name: "github.com/foo/bar5",
Stars: 400,
Blocked: &types.RepoBlock{
At: time.Now().UTC().Unix(),
Reason: "Failed to index too many times.",
},
},
}
ctx := context.Background()
// Add an external service
_, err := db.ExecContext(
ctx,
`INSERT INTO external_services(id, kind, display_name, config, cloud_default) VALUES (1, 'github', 'github', '{}', true);`,
)
if err != nil {
t.Fatal(err)
}
for _, r := range reposToAdd {
blocked, err := json.Marshal(r.Blocked)
if err != nil {
t.Fatal(err)
}
_, err = db.ExecContext(ctx,
`INSERT INTO repo(id, name, stars, private, blocked) VALUES ($1, $2, $3, $4, NULLIF($5, 'null'::jsonb))`,
r.ID, r.Name, r.Stars, r.Private, blocked,
)
if err != nil {
t.Fatal(err)
}
if r.Private {
if _, err := db.ExecContext(ctx, `INSERT INTO external_service_repos VALUES (1, $1, $2);`, r.ID, r.Name); err != nil {
t.Fatal(err)
}
}
cloned := int(r.ID) > 1
cloneStatus := types.CloneStatusCloned
if !cloned {
cloneStatus = types.CloneStatusNotCloned
}
if _, err := db.ExecContext(ctx, `UPDATE gitserver_repos SET clone_status = $2, shard_id = 'test' WHERE repo_id = $1;`, r.ID, cloneStatus); err != nil {
t.Fatal(err)
}
}
for _, tc := range []struct {
name string
opts ListSourcegraphDotComIndexableReposOptions
want []api.RepoID
}{
{
name: "no opts",
want: []api.RepoID{2, 1, 3},
},
{
name: "only uncloned",
opts: ListSourcegraphDotComIndexableReposOptions{CloneStatus: types.CloneStatusNotCloned},
want: []api.RepoID{1},
},
} {
tc := tc
t.Run(tc.name, func(t *testing.T) {
t.Parallel()
repos, err := db.Repos().ListSourcegraphDotComIndexableRepos(ctx, tc.opts)
if err != nil {
t.Fatal(err)
}
have := make([]api.RepoID, 0, len(repos))
for _, r := range repos {
have = append(have, r.ID)
}
if diff := cmp.Diff(tc.want, have, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("mismatch (-want +have):\n%s", diff)
}
})
}
}
func TestRepoNotFoundFulfillsNotFound(t *testing.T) {
err := &RepoNotFoundErr{
ID: api.RepoID(1),

View File

@ -147,11 +147,6 @@ func IsTemporary(err error) bool {
return errors.AsInterface(err, &e) && e.Temporary()
}
func IsRepoDenied(err error) bool {
var e interface{ IsRepoDenied() bool }
return errors.AsInterface(err, &e) && e.IsRepoDenied()
}
// IsArchived will check if err or one of its causes is an archived error.
// (This is generally going to be in the context of repositories being
// archived.)

View File

@ -18,7 +18,6 @@ go_library(
"//internal/api",
"//internal/database",
"//internal/database/basestore",
"//internal/dotcom",
"//internal/gitserver",
"//internal/goroutine",
"//internal/insights/background/limiter",

View File

@ -14,7 +14,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/database"
edb "github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
internalGitserver "github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/insights/background/limiter"
@ -98,7 +97,6 @@ func GetBackgroundJobs(ctx context.Context, logger log.Logger, mainAppDB databas
AllRepoIterator: discovery.NewAllReposIterator(
mainAppDB.Repos(),
time.Now,
dotcom.SourcegraphDotComMode(),
15*time.Minute,
&prometheus.CounterOpts{
Namespace: "src",

View File

@ -18,11 +18,6 @@ type RepoIterator interface {
ForEach(ctx context.Context, each func(repoName string, id api.RepoID) error) error
}
// IndexableReposLister is a subset of the API exposed by the backend.ListIndexable.
type IndexableReposLister interface {
List(ctx context.Context) ([]types.MinimalRepo, error)
}
// RepoStore is a subset of the API exposed by the database.Repos() store.
type RepoStore interface {
List(ctx context.Context, opt database.ReposListOptions) (results []*types.Repo, err error)
@ -34,9 +29,8 @@ type RepoStore interface {
// It caches multiple consecutive uses in order to ensure repository lists (which can be quite
// large, e.g. 500,000+ repositories) are only fetched as frequently as needed.
type AllReposIterator struct {
RepoStore RepoStore
Clock func() time.Time
SourcegraphDotComMode bool // result of dotcom.SourcegraphDotComMode()
RepoStore RepoStore
Clock func() time.Time
// RepositoryListCacheTime describes how long to cache repository lists for. These API calls
// can result in hundreds of thousands of repositories, so choose wisely as it can be expensive
@ -49,8 +43,8 @@ type AllReposIterator struct {
cachedPageRequests map[database.LimitOffset]cachedPageRequest
}
func NewAllReposIterator(repoStore RepoStore, clock func() time.Time, sourcegraphDotComMode bool, repositoryListCacheTime time.Duration, counterOpts *prometheus.CounterOpts) *AllReposIterator {
return &AllReposIterator{RepoStore: repoStore, Clock: clock, SourcegraphDotComMode: sourcegraphDotComMode, RepositoryListCacheTime: repositoryListCacheTime, counter: promauto.NewCounterVec(*counterOpts, []string{"result"})}
func NewAllReposIterator(repoStore RepoStore, clock func() time.Time, repositoryListCacheTime time.Duration, counterOpts *prometheus.CounterOpts) *AllReposIterator {
return &AllReposIterator{RepoStore: repoStore, Clock: clock, RepositoryListCacheTime: repositoryListCacheTime, counter: promauto.NewCounterVec(*counterOpts, []string{"result"})}
}
func (a *AllReposIterator) timeSince(t time.Time) time.Duration {

View File

@ -40,7 +40,7 @@ func TestAllReposIterator(t *testing.T) {
return result, nil
})
iter := NewAllReposIterator(repoStore, clock, false, 15*time.Minute, &prometheus.CounterOpts{Name: "fake_name123"})
iter := NewAllReposIterator(repoStore, clock, 15*time.Minute, &prometheus.CounterOpts{Name: "fake_name123"})
{
// Do we get all 9 repositories?
var each []string

View File

@ -14,158 +14,6 @@ import (
types "github.com/sourcegraph/sourcegraph/internal/types"
)
// MockIndexableReposLister is a mock implementation of the
// IndexableReposLister interface (from the package
// github.com/sourcegraph/sourcegraph/internal/insights/discovery) used for
// unit testing.
type MockIndexableReposLister struct {
// ListFunc is an instance of a mock function object controlling the
// behavior of the method List.
ListFunc *IndexableReposListerListFunc
}
// NewMockIndexableReposLister creates a new mock of the
// IndexableReposLister interface. All methods return zero values for all
// results, unless overwritten.
func NewMockIndexableReposLister() *MockIndexableReposLister {
return &MockIndexableReposLister{
ListFunc: &IndexableReposListerListFunc{
defaultHook: func(context.Context) (r0 []types.MinimalRepo, r1 error) {
return
},
},
}
}
// NewStrictMockIndexableReposLister creates a new mock of the
// IndexableReposLister interface. All methods panic on invocation, unless
// overwritten.
func NewStrictMockIndexableReposLister() *MockIndexableReposLister {
return &MockIndexableReposLister{
ListFunc: &IndexableReposListerListFunc{
defaultHook: func(context.Context) ([]types.MinimalRepo, error) {
panic("unexpected invocation of MockIndexableReposLister.List")
},
},
}
}
// NewMockIndexableReposListerFrom creates a new mock of the
// MockIndexableReposLister interface. All methods delegate to the given
// implementation, unless overwritten.
func NewMockIndexableReposListerFrom(i IndexableReposLister) *MockIndexableReposLister {
return &MockIndexableReposLister{
ListFunc: &IndexableReposListerListFunc{
defaultHook: i.List,
},
}
}
// IndexableReposListerListFunc describes the behavior when the List method
// of the parent MockIndexableReposLister instance is invoked.
type IndexableReposListerListFunc struct {
defaultHook func(context.Context) ([]types.MinimalRepo, error)
hooks []func(context.Context) ([]types.MinimalRepo, error)
history []IndexableReposListerListFuncCall
mutex sync.Mutex
}
// List delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockIndexableReposLister) List(v0 context.Context) ([]types.MinimalRepo, error) {
r0, r1 := m.ListFunc.nextHook()(v0)
m.ListFunc.appendCall(IndexableReposListerListFuncCall{v0, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the List method of the
// parent MockIndexableReposLister instance is invoked and the hook queue is
// empty.
func (f *IndexableReposListerListFunc) SetDefaultHook(hook func(context.Context) ([]types.MinimalRepo, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// List method of the parent MockIndexableReposLister instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *IndexableReposListerListFunc) PushHook(hook func(context.Context) ([]types.MinimalRepo, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *IndexableReposListerListFunc) SetDefaultReturn(r0 []types.MinimalRepo, r1 error) {
f.SetDefaultHook(func(context.Context) ([]types.MinimalRepo, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *IndexableReposListerListFunc) PushReturn(r0 []types.MinimalRepo, r1 error) {
f.PushHook(func(context.Context) ([]types.MinimalRepo, error) {
return r0, r1
})
}
func (f *IndexableReposListerListFunc) nextHook() func(context.Context) ([]types.MinimalRepo, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *IndexableReposListerListFunc) appendCall(r0 IndexableReposListerListFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of IndexableReposListerListFuncCall objects
// describing the invocations of this function.
func (f *IndexableReposListerListFunc) History() []IndexableReposListerListFuncCall {
f.mutex.Lock()
history := make([]IndexableReposListerListFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// IndexableReposListerListFuncCall is an object that describes an
// invocation of method List on an instance of MockIndexableReposLister.
type IndexableReposListerListFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []types.MinimalRepo
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c IndexableReposListerListFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c IndexableReposListerListFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// MockRepoStore is a mock implementation of the RepoStore interface (from
// the package
// github.com/sourcegraph/sourcegraph/internal/insights/discovery) used for

View File

@ -34,7 +34,6 @@ go_library(
"sources_test_utils.go",
"status_messages.go",
"store.go",
"sync_errored.go",
"sync_worker.go",
"syncer.go",
"testing.go",
@ -198,7 +197,6 @@ go_test(
"@com_github_sourcegraph_zoekt//:zoekt",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@org_golang_x_time//rate",
],
)

View File

@ -219,13 +219,6 @@ func buildSizeConstraintsExcludeFn(constraint string) (excludeFunc, error) {
type operator string
const (
opLess operator = "<"
opLessOrEqual operator = "<="
opGreater operator = ">"
opGreaterOrEqual operator = ">="
)
func newOperator(input string) (operator, error) {
if input != "<" && input != "<=" && input != ">" && input != ">=" {
return "", errors.Newf("invalid operator %q", input)

View File

@ -104,7 +104,7 @@ func NewMockStore() *MockStore {
},
},
EnqueueSyncJobsFunc: &StoreEnqueueSyncJobsFunc{
defaultHook: func(context.Context, bool) (r0 error) {
defaultHook: func(context.Context) (r0 error) {
return
},
},
@ -191,7 +191,7 @@ func NewStrictMockStore() *MockStore {
},
},
EnqueueSyncJobsFunc: &StoreEnqueueSyncJobsFunc{
defaultHook: func(context.Context, bool) error {
defaultHook: func(context.Context) error {
panic("unexpected invocation of MockStore.EnqueueSyncJobs")
},
},
@ -847,24 +847,24 @@ func (c StoreEnqueueSingleSyncJobFuncCall) Results() []interface{} {
// StoreEnqueueSyncJobsFunc describes the behavior when the EnqueueSyncJobs
// method of the parent MockStore instance is invoked.
type StoreEnqueueSyncJobsFunc struct {
defaultHook func(context.Context, bool) error
hooks []func(context.Context, bool) error
defaultHook func(context.Context) error
hooks []func(context.Context) error
history []StoreEnqueueSyncJobsFuncCall
mutex sync.Mutex
}
// EnqueueSyncJobs delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockStore) EnqueueSyncJobs(v0 context.Context, v1 bool) error {
r0 := m.EnqueueSyncJobsFunc.nextHook()(v0, v1)
m.EnqueueSyncJobsFunc.appendCall(StoreEnqueueSyncJobsFuncCall{v0, v1, r0})
func (m *MockStore) EnqueueSyncJobs(v0 context.Context) error {
r0 := m.EnqueueSyncJobsFunc.nextHook()(v0)
m.EnqueueSyncJobsFunc.appendCall(StoreEnqueueSyncJobsFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the EnqueueSyncJobs
// method of the parent MockStore instance is invoked and the hook queue is
// empty.
func (f *StoreEnqueueSyncJobsFunc) SetDefaultHook(hook func(context.Context, bool) error) {
func (f *StoreEnqueueSyncJobsFunc) SetDefaultHook(hook func(context.Context) error) {
f.defaultHook = hook
}
@ -872,7 +872,7 @@ func (f *StoreEnqueueSyncJobsFunc) SetDefaultHook(hook func(context.Context, boo
// EnqueueSyncJobs method of the parent MockStore instance invokes the hook
// at the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *StoreEnqueueSyncJobsFunc) PushHook(hook func(context.Context, bool) error) {
func (f *StoreEnqueueSyncJobsFunc) PushHook(hook func(context.Context) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -881,19 +881,19 @@ func (f *StoreEnqueueSyncJobsFunc) PushHook(hook func(context.Context, bool) err
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *StoreEnqueueSyncJobsFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(context.Context, bool) error {
f.SetDefaultHook(func(context.Context) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *StoreEnqueueSyncJobsFunc) PushReturn(r0 error) {
f.PushHook(func(context.Context, bool) error {
f.PushHook(func(context.Context) error {
return r0
})
}
func (f *StoreEnqueueSyncJobsFunc) nextHook() func(context.Context, bool) error {
func (f *StoreEnqueueSyncJobsFunc) nextHook() func(context.Context) error {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -929,9 +929,6 @@ type StoreEnqueueSyncJobsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 bool
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
@ -940,7 +937,7 @@ type StoreEnqueueSyncJobsFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c StoreEnqueueSyncJobsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this

View File

@ -21,7 +21,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/extsvc/phabricator"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/rcache"
"github.com/sourcegraph/sourcegraph/internal/types"
@ -574,12 +573,6 @@ func listRepos(t *testing.T, cf *httpcli.Factory, gc gitserver.Client, svc *type
return repos
}
func newClientFactoryWithOpt(t testing.TB, name string, opt httpcli.Opt) (*httpcli.Factory, func(testing.TB)) {
mw, rec := TestClientFactorySetup(t, name)
return httpcli.NewFactory(mw, opt, httptestutil.NewRecorderOpt(rec)),
func(t testing.TB) { Save(t, rec) }
}
func getAWSEnv(envVar string) string {
s := os.Getenv(envVar)
if s == "" {

View File

@ -69,21 +69,11 @@ type Store interface {
// service if the external service is not deleted and no other job is
// already queued or processing.
//
// Additionally, it also skips queueing up a sync job for cloud_default
// external services. This is done to avoid the sync job for the
// cloud_default triggering a deletion of repos because:
// 1. cloud_default does not define any repos in its config
// 2. repos under the cloud_default are lazily synced the first time a user accesses them
//
// This is a limitation of our current repo syncing architecture. The
// cloud_default flag is only set on sourcegraph.com and manages public GitHub
// and GitLab repositories that have been lazily synced.
//
// It can block if a row-level lock is held on the given external service,
// for example if it's being deleted.
EnqueueSingleSyncJob(ctx context.Context, extSvcID int64) (err error)
// EnqueueSyncJobs enqueues sync jobs for all external services that are due.
EnqueueSyncJobs(ctx context.Context, isCloud bool) (err error)
EnqueueSyncJobs(ctx context.Context) (err error)
// ListSyncJobs returns all sync jobs.
ListSyncJobs(ctx context.Context) ([]SyncJob, error)
}
@ -582,7 +572,6 @@ WITH es AS (
FROM external_services es
WHERE
id = %s
AND NOT cloud_default
AND deleted_at IS NULL
FOR UPDATE
)
@ -600,7 +589,7 @@ WHERE NOT EXISTS (
return s.Exec(ctx, q)
}
func (s *store) EnqueueSyncJobs(ctx context.Context, isDotCom bool) (err error) {
func (s *store) EnqueueSyncJobs(ctx context.Context) (err error) {
tr, ctx := s.trace(ctx, "Store.EnqueueSyncJobs")
defer func(began time.Time) {
@ -610,13 +599,7 @@ func (s *store) EnqueueSyncJobs(ctx context.Context, isDotCom bool) (err error)
tr.End()
}(time.Now())
filter := "TRUE"
// On Sourcegraph.com we don't sync our default sources in the background, they are synced
// on demand instead.
if isDotCom {
filter = "cloud_default = false"
}
q := sqlf.Sprintf(enqueueSyncJobsQueryFmtstr, sqlf.Sprintf(filter))
q := sqlf.Sprintf(enqueueSyncJobsQueryFmtstr)
return s.Exec(ctx, q)
}
@ -629,7 +612,6 @@ WITH due AS (
WHERE (next_sync_at <= clock_timestamp() OR next_sync_at IS NULL)
AND deleted_at IS NULL
AND LOWER(kind) != 'phabricator'
AND %s
FOR UPDATE OF external_services -- We query 'FOR UPDATE' so we don't enqueue
-- sync jobs while an external service is being deleted.
),

View File

@ -36,11 +36,10 @@ func TestStoreEnqueueSyncJobs(t *testing.T) {
services := generateExternalServices(10, mkExternalServices(now)...)
type testCase struct {
name string
stored types.ExternalServices
queued func(types.ExternalServices) []int64
ignoreSiteAdmin bool
err error
name string
stored types.ExternalServices
queued func(types.ExternalServices) []int64
err error
}
var testCases []testCase
@ -61,15 +60,6 @@ func TestStoreEnqueueSyncJobs(t *testing.T) {
queued: func(svcs types.ExternalServices) []int64 { return []int64{} },
})
testCases = append(testCases, testCase{
name: "ignore siteadmin repos",
stored: services.With(func(s *types.ExternalService) {
s.NextSyncAt = now.Add(10 * time.Second)
}),
ignoreSiteAdmin: true,
queued: func(svcs types.ExternalServices) []int64 { return []int64{} },
})
{
i := 0
testCases = append(testCases, testCase{
@ -110,7 +100,7 @@ func TestStoreEnqueueSyncJobs(t *testing.T) {
t.Fatalf("failed to setup store: %v", err)
}
err := store.EnqueueSyncJobs(ctx, tc.ignoreSiteAdmin)
err := store.EnqueueSyncJobs(ctx)
if have, want := fmt.Sprint(err), fmt.Sprint(tc.err); have != want {
t.Errorf("error:\nhave: %v\nwant: %v", have, want)
}
@ -206,36 +196,6 @@ func TestStoreEnqueueSingleSyncJob(t *testing.T) {
t.Fatal(err)
}
assertSyncJobCount(t, store, 2)
// Test that cloud default external services don't get jobs enqueued (no-ops instead of errors)
q = sqlf.Sprintf("UPDATE external_service_sync_jobs SET state='completed'")
if _, err = store.Handle().ExecContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...); err != nil {
t.Fatal(err)
}
service.CloudDefault = true
err = store.ExternalServiceStore().Upsert(ctx, &service)
if err != nil {
t.Fatal(err)
}
err = store.EnqueueSingleSyncJob(ctx, service.ID)
if err != nil {
t.Fatal(err)
}
assertSyncJobCount(t, store, 2)
// Test that cloud default external services don't get jobs enqueued also when there are no job rows.
q = sqlf.Sprintf("DELETE FROM external_service_sync_jobs")
if _, err = store.Handle().ExecContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...); err != nil {
t.Fatal(err)
}
err = store.EnqueueSingleSyncJob(ctx, service.ID)
if err != nil {
t.Fatal(err)
}
assertSyncJobCount(t, store, 0)
}
func assertSyncJobCount(t *testing.T, store repos.Store, want int) {
@ -275,7 +235,7 @@ func TestStoreEnqueuingSyncJobsWhileExtSvcBeingDeleted(t *testing.T) {
},
"EnqueueSyncJobs": func(t *testing.T, ctx context.Context, store repos.Store, _ *types.ExternalService) {
t.Helper()
if err := store.EnqueueSyncJobs(ctx, false); err != nil {
if err := store.EnqueueSyncJobs(ctx); err != nil {
t.Fatal(err)
}
},

View File

@ -1,81 +0,0 @@
package repos
import (
"context"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
const syncInterval = 5 * time.Minute
var erroredRepoGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "src_repoupdater_syncer_sync_repos_with_last_error_total",
Help: "Counts number of repos with non empty_last errors which have been synced.",
})
var totalErroredRepos = promauto.NewGauge(prometheus.GaugeOpts{
Name: "src_repoupdater_syncer_total_errored_repos",
Help: "Total number of repos with last error currently.",
})
func (s *Syncer) newSyncReposWithLastErrorsWorker(ctx context.Context, rateLimiter *ratelimit.InstrumentedLimiter) goroutine.BackgroundRoutine {
return goroutine.NewPeriodicGoroutine(
actor.WithInternalActor(ctx),
goroutine.HandlerFunc(func(ctx context.Context) error {
s.ObsvCtx.Logger.Info("running worker for SyncReposWithLastErrors", log.Time("time", time.Now()))
err := s.SyncReposWithLastErrors(ctx, rateLimiter)
if err != nil {
return errors.Wrap(err, "Error syncing repos with errors")
}
return nil
}),
goroutine.WithName("repo-updater.repos-with-last-errors-syncer"),
goroutine.WithDescription("iterates through all repos which have a non-empty last_error column in the gitserver_repos table, indicating there was an issue updating the repo, and syncs each of these repos. Repos which are no longer visible (i.e. deleted or made private) will be deleted from the DB. Sourcegraph.com only."),
goroutine.WithInterval(syncInterval),
)
}
// SyncReposWithLastErrors iterates through all repos which have a non-empty last_error column in the gitserver_repos
// table, indicating there was an issue updating the repo, and syncs each of these repos. Repos which are no longer
// visible (i.e. deleted or made private) will be deleted from the DB. Note that this is only being run in Sourcegraph
// Dot com mode.
func (s *Syncer) SyncReposWithLastErrors(ctx context.Context, rateLimiter *ratelimit.InstrumentedLimiter) error {
erroredRepoGauge.Set(0)
s.setTotalErroredRepos(ctx)
repoNames, err := s.Store.GitserverReposStore().ListReposWithLastError(ctx)
if err != nil {
return errors.Wrap(err, "failed to list gitserver_repos with last_error not null")
}
for _, repoName := range repoNames {
err := rateLimiter.Wait(ctx)
if err != nil {
return errors.Errorf("error waiting for rate limiter: %s", err)
}
_, err = s.SyncRepo(ctx, repoName, false)
if err != nil {
s.ObsvCtx.Logger.Error("error syncing repo", log.String("repo", string(repoName)), log.Error(err))
}
erroredRepoGauge.Inc()
}
return err
}
func (s *Syncer) setTotalErroredRepos(ctx context.Context) {
totalErrored, err := s.Store.GitserverReposStore().TotalErroredCloudDefaultRepos(ctx)
if err != nil {
s.ObsvCtx.Logger.Error("error fetching count of total errored repos", log.Error(err))
return
}
totalErroredRepos.Set(float64(totalErrored))
}

View File

@ -14,18 +14,14 @@ import (
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/bytesize"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/licensing"
"github.com/sourcegraph/sourcegraph/internal/metrics"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -62,7 +58,6 @@ func NewSyncer(observationCtx *observation.Context, store Store, sourcer Sourcer
// RunOptions contains options customizing Run behaviour.
type RunOptions struct {
EnqueueInterval func() time.Duration // Defaults to 1 minute
IsDotCom bool // Defaults to false
MinSyncInterval func() time.Duration // Defaults to 1 minute
DequeueInterval time.Duration // Default to 10 seconds
}
@ -99,7 +94,7 @@ func (s *Syncer) Routines(ctx context.Context, store Store, opts RunOptions) []g
return nil
}
if err := store.EnqueueSyncJobs(ctx, opts.IsDotCom); err != nil {
if err := store.EnqueueSyncJobs(ctx); err != nil {
return errors.Wrap(err, "enqueueing sync jobs")
}
@ -112,11 +107,6 @@ func (s *Syncer) Routines(ctx context.Context, store Store, opts RunOptions) []g
routines := []goroutine.BackgroundRoutine{worker, resetter, syncerJanitor, scheduler}
if opts.IsDotCom {
rateLimiter := ratelimit.NewInstrumentedLimiter("SyncReposWithLastErrors", rate.NewLimiter(1, 1))
routines = append(routines, s.newSyncReposWithLastErrorsWorker(ctx, rateLimiter))
}
return routines
}
@ -189,180 +179,6 @@ func (e ErrAccountSuspended) AccountSuspended() bool {
return true
}
// SyncRepo syncs a single repository by name and associates it with an external service.
//
// It works for repos from:
//
// 1. Public "cloud_default" code hosts since we don't sync them in the background
// (which would delete lazy synced repos).
// 2. Any package hosts (i.e. npm, Maven, etc) since callers are expected to store
// repos in the `lsif_dependency_repos` table which is used as the source of truth
// for the next full sync, so lazy added repos don't get wiped.
//
// The "background" boolean flag indicates that we should run this
// sync in the background vs block and call s.syncRepo synchronously.
func (s *Syncer) SyncRepo(ctx context.Context, name api.RepoName, background bool) (repo *types.Repo, err error) {
logger := s.ObsvCtx.Logger.With(log.String("name", string(name)), log.Bool("background", background))
logger.Debug("SyncRepo started")
tr, ctx := trace.New(ctx, "Syncer.SyncRepo", name.Attr())
defer tr.End()
repo, err = s.Store.RepoStore().GetByName(ctx, name)
if err != nil && !errcode.IsNotFound(err) {
return nil, errors.Wrapf(err, "GetByName failed for %q", name)
}
codehost := extsvc.CodeHostOf(name, extsvc.PublicCodeHosts...)
if codehost == nil {
if repo != nil {
return repo, nil
}
logger.Debug("no associated code host found, skipping")
return nil, &database.RepoNotFoundErr{Name: name}
}
if repo != nil {
// Only public repos can be individually synced on sourcegraph.com
if repo.Private {
logger.Debug("repo is private, skipping")
return nil, &database.RepoNotFoundErr{Name: name}
}
// Don't sync the repo if it's been updated in the past 1 minute.
if s.Now().Sub(repo.UpdatedAt) < time.Minute {
logger.Debug("repo updated recently, skipping")
return repo, nil
}
}
if background && repo != nil {
logger.Debug("starting background sync in goroutine")
go func() {
ctx, cancel := context.WithTimeout(context.Background(), 3*time.Minute)
defer cancel()
// We don't care about the return value here, but we still want to ensure that
// only one is in flight at a time.
updatedRepo, err, shared := s.syncGroup.Do(string(name), func() (any, error) {
return s.syncRepo(ctx, codehost, name, repo)
})
logger.Debug("syncGroup completed", log.String("updatedRepo", fmt.Sprintf("%v", updatedRepo.(*types.Repo))))
if err != nil {
logger.Error("background.SyncRepo", log.Error(err), log.Bool("shared", shared))
}
}()
return repo, nil
}
logger.Debug("starting foreground sync")
updatedRepo, err, shared := s.syncGroup.Do(string(name), func() (any, error) {
return s.syncRepo(ctx, codehost, name, repo)
})
if err != nil {
logger.Warn("foreground sync failed", log.Error(err), log.Bool("shared", shared))
return nil, err
}
return updatedRepo.(*types.Repo), nil
}
func (s *Syncer) syncRepo(
ctx context.Context,
codehost *extsvc.CodeHost,
name api.RepoName,
stored *types.Repo,
) (repo *types.Repo, err error) {
var svc *types.ExternalService
ctx, save := s.observeSync(ctx, "Syncer.syncRepo", name.Attr())
defer func() { save(svc, err) }()
svcs, err := s.Store.ExternalServiceStore().List(ctx, database.ExternalServicesListOptions{
Kinds: []string{extsvc.TypeToKind(codehost.ServiceType)},
// Since package host external services have the set of repositories to sync in
// the lsif_dependency_repos table, we can lazy-sync individual repos without wiping them
// out in the next full background sync as long as we add them to that table.
//
// This permits lazy-syncing of package repos in on-prem instances as well as in cloud.
OnlyCloudDefault: !codehost.IsPackageHost(),
LimitOffset: &database.LimitOffset{Limit: 1},
})
if err != nil {
return nil, errors.Wrap(err, "listing external services")
}
if len(svcs) != 1 {
return nil, errors.Wrapf(
&database.RepoNotFoundErr{Name: name},
"cloud default external service of type %q not found", codehost.ServiceType,
)
}
svc = svcs[0]
src, err := s.Sourcer(ctx, svc)
if err != nil {
return nil, errors.Wrap(err, "failed to retrieve Sourcer")
}
rg, ok := src.(RepoGetter)
if !ok {
return nil, errors.Wrapf(
&database.RepoNotFoundErr{Name: name},
"can't get repo metadata for service of type %q", codehost.ServiceType,
)
}
path := strings.TrimPrefix(string(name), strings.TrimPrefix(codehost.ServiceID, "https://"))
if stored != nil {
defer func() {
s.ObsvCtx.Logger.Debug("deferred deletable repo check")
if isDeleteableRepoError(err) {
err2 := s.Store.DeleteExternalServiceRepo(ctx, svc, stored.ID)
if err2 != nil {
s.ObsvCtx.Logger.Error(
"SyncRepo failed to delete",
log.Object("svc", log.String("name", svc.DisplayName), log.Int64("id", svc.ID)),
log.String("repo", string(name)),
log.NamedError("cause", err),
log.Error(err2),
)
}
s.ObsvCtx.Logger.Debug("external service repo deleted", log.Int32("deleted ID", int32(stored.ID)))
s.notifyDeleted(ctx, stored.ID)
}
}()
}
repo, err = rg.GetRepo(ctx, path)
if err != nil {
return nil, errors.Wrapf(err, "failed to get repo with path: %q", path)
}
if err := repoCanBeAddedOnDemandDotcom(repo); err != nil {
return nil, err
}
if repo.Private {
s.ObsvCtx.Logger.Debug("repo is private, skipping")
return nil, &database.RepoNotFoundErr{Name: name}
}
if _, err = s.sync(ctx, svc, repo); err != nil {
return nil, err
}
return repo, nil
}
// isDeleteableRepoError checks whether the error returned from a repo sync
// signals that we can safely delete the repo
func isDeleteableRepoError(err error) bool {
return errcode.IsNotFound(err) || errcode.IsUnauthorized(err) ||
errcode.IsForbidden(err) || errcode.IsAccountSuspended(err) || errcode.IsUnavailableForLegalReasons(err)
}
func (s *Syncer) notifyDeleted(ctx context.Context, deleted ...api.RepoID) {
var d types.RepoSyncDiff
for _, id := range deleted {
@ -378,13 +194,6 @@ func (s *Syncer) notifyDeleted(ctx context.Context, deleted ...api.RepoID) {
}
}
// ErrCloudDefaultSync is returned by SyncExternalService if an attempt to
// sync a cloud default external service is done. We can't sync these external services
// because their repos are added via the lazy-syncing mechanism on sourcegraph.com
// instead of config (which is empty), so attempting to sync them would delete all of
// the lazy-added repos.
var ErrCloudDefaultSync = errors.New("cloud default external services can't be synced")
// SyncProgress represents running counts for an external service sync.
type SyncProgress struct {
Synced int32 `json:"synced,omitempty"`
@ -461,15 +270,6 @@ func (s *Syncer) SyncExternalService(
logger.Debug("synced external service", log.Duration("backoff duration", interval))
}()
// We have fail-safes in place to prevent enqueuing sync jobs for cloud default
// external services, but in case those fail to prevent a sync for any reason,
// we have this additional check here. Cloud default external services have their
// repos added via the lazy-syncing mechanism on sourcegraph.com instead of config
// (which is empty), so attempting to sync them would delete all of the lazy-added repos.
if svc.CloudDefault {
return ErrCloudDefaultSync
}
src, err := s.Sourcer(ctx, svc)
if err != nil {
return err
@ -922,31 +722,3 @@ func syncErrorReason(err error) string {
return "unknown"
}
}
const (
maxSize = 1 * bytesize.GB
minStars = 100
)
type ErrRepoDeniedTooBig struct{}
func (e ErrRepoDeniedTooBig) Error() string {
// NOTE: This message is visible to users!
return "repository is larger than 1GB and has less than 100 stars"
}
func (e ErrRepoDeniedTooBig) IsRepoDenied() bool { return true }
// repoCanBeAddedOnDemandDotcom returns nil if the repository can be added on
// dotcom and an error if it doesn't meet requirements. For example: if the
// repo is too big for the number of stars it has, an error is returned with an
// error message that explains this.
func repoCanBeAddedOnDemandDotcom(repo *types.Repo) error {
if r, ok := repo.Metadata.(*github.Repository); ok {
if r.SizeBytes() >= maxSize && r.StargazerCount < minStars {
return ErrRepoDeniedTooBig{}
}
}
return nil
}

View File

@ -14,10 +14,8 @@ import (
"github.com/keegancsmith/sqlf"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
@ -31,7 +29,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/licensing"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/repos"
"github.com/sourcegraph/sourcegraph/internal/timeutil"
"github.com/sourcegraph/sourcegraph/internal/types"
@ -683,222 +680,6 @@ func TestSyncerSync(t *testing.T) {
}
}
func TestSyncRepo(t *testing.T) {
t.Parallel()
store := getTestRepoStore(t)
servicesPerKind := createExternalServices(t, store, func(svc *types.ExternalService) { svc.CloudDefault = true })
repo := &types.Repo{
ID: 0, // explicitly make default value for sourced repo
Name: "github.com/foo/bar",
Description: "The description",
Archived: false,
Fork: false,
Stars: 100,
ExternalRepo: api.ExternalRepoSpec{
ID: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
ServiceType: extsvc.TypeGitHub,
ServiceID: "https://github.com/",
},
Sources: map[string]*types.SourceInfo{
servicesPerKind[extsvc.KindGitHub].URN(): {
ID: servicesPerKind[extsvc.KindGitHub].URN(),
CloneURL: "git@github.com:foo/bar.git",
},
},
Metadata: &github.Repository{
ID: "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAA==",
URL: "github.com/foo/bar",
DatabaseID: 1234,
Description: "The description",
NameWithOwner: "foo/bar",
StargazerCount: 100,
},
}
now := time.Now().UTC()
oldRepo := repo.With(func(r *types.Repo) {
r.UpdatedAt = now.Add(-time.Hour)
r.CreatedAt = r.UpdatedAt.Add(-time.Hour)
r.Stars = 0
})
testCases := []struct {
name string
repo api.RepoName
background bool // whether to run SyncRepo in the background
before types.Repos // the repos to insert into the database before syncing
sourced *types.Repo // the repo that is returned by the fake sourcer
returned *types.Repo // the expected return value from SyncRepo (which changes meaning depending on background)
after types.Repos // the expected database repos after syncing
diff types.RepoSyncDiff // the expected types.Diff sent by the syncer
}{{
name: "insert",
repo: repo.Name,
background: true,
sourced: repo.Clone(),
returned: repo,
after: types.Repos{repo},
diff: types.RepoSyncDiff{
Added: types.Repos{repo},
},
}, {
name: "update",
repo: repo.Name,
background: true,
before: types.Repos{oldRepo},
sourced: repo.Clone(),
returned: oldRepo,
after: types.Repos{repo},
diff: types.RepoSyncDiff{
Modified: types.ReposModified{
{Repo: repo, Modified: types.RepoModifiedStars},
},
},
}, {
name: "blocking update",
repo: repo.Name,
background: false,
before: types.Repos{oldRepo},
sourced: repo.Clone(),
returned: repo,
after: types.Repos{repo},
diff: types.RepoSyncDiff{
Modified: types.ReposModified{
{Repo: repo, Modified: types.RepoModifiedStars},
},
},
}, {
name: "update name",
repo: repo.Name,
background: true,
before: types.Repos{repo.With(typestest.Opt.RepoName("old/name"))},
sourced: repo.Clone(),
returned: repo,
after: types.Repos{repo},
diff: types.RepoSyncDiff{
Modified: types.ReposModified{
{Repo: repo, Modified: types.RepoModifiedName},
},
},
}, {
name: "archived",
repo: repo.Name,
background: true,
before: types.Repos{repo},
sourced: repo.With(typestest.Opt.RepoArchived(true)),
returned: repo,
after: types.Repos{repo.With(typestest.Opt.RepoArchived(true))},
diff: types.RepoSyncDiff{
Modified: types.ReposModified{
{
Repo: repo.With(typestest.Opt.RepoArchived(true)),
Modified: types.RepoModifiedArchived,
},
},
},
}, {
name: "unarchived",
repo: repo.Name,
background: true,
before: types.Repos{repo.With(typestest.Opt.RepoArchived(true))},
sourced: repo.Clone(),
returned: repo.With(typestest.Opt.RepoArchived(true)),
after: types.Repos{repo},
diff: types.RepoSyncDiff{
Modified: types.ReposModified{
{Repo: repo, Modified: types.RepoModifiedArchived},
},
},
}, {
name: "delete conflicting name",
repo: repo.Name,
background: true,
before: types.Repos{repo.With(typestest.Opt.RepoExternalID("old id"))},
sourced: repo.Clone(),
returned: repo.With(typestest.Opt.RepoExternalID("old id")),
after: types.Repos{repo},
diff: types.RepoSyncDiff{
Modified: types.ReposModified{
{Repo: repo, Modified: types.RepoModifiedExternalRepo},
},
},
}, {
name: "rename and delete conflicting name",
repo: repo.Name,
background: true,
before: types.Repos{
repo.With(typestest.Opt.RepoExternalID("old id")),
repo.With(typestest.Opt.RepoName("old name")),
},
sourced: repo.Clone(),
returned: repo.With(typestest.Opt.RepoExternalID("old id")),
after: types.Repos{repo},
diff: types.RepoSyncDiff{
Modified: types.ReposModified{
{Repo: repo, Modified: types.RepoModifiedName},
},
},
}}
for _, tc := range testCases {
tc := tc
ctx := context.Background()
t.Run(tc.name, func(t *testing.T) {
q := sqlf.Sprintf("DELETE FROM repo")
_, err := store.Handle().ExecContext(ctx, q.Query(sqlf.PostgresBindVar), q.Args()...)
if err != nil {
t.Fatal(err)
}
if len(tc.before) > 0 {
if err := store.RepoStore().Create(ctx, tc.before.Clone()...); err != nil {
t.Fatalf("failed to prepare store: %v", err)
}
}
syncer := &repos.Syncer{
ObsvCtx: observation.TestContextTB(t),
Now: time.Now,
Store: store,
Synced: make(chan types.RepoSyncDiff, 1),
Sourcer: repos.NewFakeSourcer(nil,
repos.NewFakeSource(servicesPerKind[extsvc.KindGitHub], nil, tc.sourced),
),
}
have, err := syncer.SyncRepo(ctx, tc.repo, tc.background)
if err != nil {
t.Fatal(err)
}
if have.ID == 0 {
t.Errorf("expected returned synced repo to have an ID set")
}
opt := cmpopts.IgnoreFields(types.Repo{}, "ID", "CreatedAt", "UpdatedAt")
if diff := cmp.Diff(have, tc.returned, opt); diff != "" {
t.Errorf("returned mismatch: (-have, +want):\n%s", diff)
}
if diff := cmp.Diff(<-syncer.Synced, tc.diff, opt); diff != "" {
t.Errorf("diff mismatch: (-have, +want):\n%s", diff)
}
after, err := store.RepoStore().List(ctx, database.ReposListOptions{})
if err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(types.Repos(after), tc.after, opt); diff != "" {
t.Errorf("repos mismatch: (-have, +want):\n%s", diff)
}
})
}
}
func TestSyncRun(t *testing.T) {
t.Parallel()
store := getTestRepoStore(t)
@ -959,7 +740,6 @@ func TestSyncRun(t *testing.T) {
ctx,
syncer.Routines(ctx, store, repos.RunOptions{
EnqueueInterval: func() time.Duration { return time.Second },
IsDotCom: false,
MinSyncInterval: func() time.Duration { return 1 * time.Millisecond },
DequeueInterval: 1 * time.Millisecond,
})...,
@ -1111,7 +891,6 @@ func TestSyncerMultipleServices(t *testing.T) {
ctx,
syncer.Routines(ctx, store, repos.RunOptions{
EnqueueInterval: func() time.Duration { return time.Second },
IsDotCom: false,
MinSyncInterval: func() time.Duration { return 1 * time.Minute },
DequeueInterval: 1 * time.Millisecond,
})...,
@ -1289,57 +1068,6 @@ func TestOrphanedRepo(t *testing.T) {
assertDeletedRepoCount(ctx, t, store, 1)
}
func TestCloudDefaultExternalServicesDontSync(t *testing.T) {
t.Parallel()
store := getTestRepoStore(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
now := time.Now()
svc1 := &types.ExternalService{
Kind: extsvc.KindGitHub,
DisplayName: "Github - Test1",
Config: extsvc.NewUnencryptedConfig(basicGitHubConfig),
CloudDefault: true,
CreatedAt: now,
UpdatedAt: now,
}
// setup services
if err := store.ExternalServiceStore().Upsert(ctx, svc1); err != nil {
t.Fatal(err)
}
githubRepo := &types.Repo{
Name: "github.com/org/foo",
Metadata: &github.Repository{},
ExternalRepo: api.ExternalRepoSpec{
ID: "foo-external-12345",
ServiceID: "https://github.com/",
ServiceType: extsvc.TypeGitHub,
},
}
syncer := &repos.Syncer{
ObsvCtx: observation.TestContextTB(t),
Sourcer: func(ctx context.Context, service *types.ExternalService) (repos.Source, error) {
s := repos.NewFakeSource(svc1, nil, githubRepo)
return s, nil
},
Store: store,
Now: time.Now,
}
have := syncer.SyncExternalService(ctx, svc1.ID, 10*time.Second, noopProgressRecorder)
want := repos.ErrCloudDefaultSync
if !errors.Is(have, want) {
t.Fatalf("have err: %v, want %v", have, want)
}
}
func TestDotComPrivateReposDontSync(t *testing.T) {
dotcom.MockSourcegraphDotComMode(t, true)
@ -1616,22 +1344,6 @@ func TestSyncRepoMaintainsOtherSources(t *testing.T) {
// Confirm that there are two relationships
assertSourceCount(ctx, t, store, 2)
// Run syncRepo with only one source
urn := extsvc.URN(extsvc.KindGitHub, svc1.ID)
githubRepo.Sources = map[string]*types.SourceInfo{
urn: {
ID: urn,
CloneURL: "cloneURL",
},
}
_, err := syncer.SyncRepo(ctx, githubRepo.Name, true)
if err != nil {
t.Fatal(err)
}
// We should still have two sources
assertSourceCount(ctx, t, store, 2)
}
func TestNameOnConflictOnRename(t *testing.T) {
@ -1870,169 +1582,6 @@ func assertDeletedRepoCount(ctx context.Context, t *testing.T, store repos.Store
}
}
func TestSyncReposWithLastErrors(t *testing.T) {
t.Parallel()
store := getTestRepoStore(t)
ctx := context.Background()
testCases := []struct {
label string
svcKind string
repoName api.RepoName
config string
extSvcErr error
serviceID string
}{
{
label: "github test",
svcKind: extsvc.KindGitHub,
repoName: api.RepoName("github.com/foo/bar"),
config: `{"url": "https://github.com", "repositoryQuery": ["none"], "token": "abc"}`,
extSvcErr: github.ErrRepoNotFound,
serviceID: "https://github.com/",
},
{
label: "gitlab test",
svcKind: extsvc.KindGitLab,
repoName: api.RepoName("gitlab.com/foo/bar"),
config: `{"url": "https://gitlab.com", "projectQuery": ["none"], "token": "abc"}`,
extSvcErr: gitlab.ProjectNotFoundError{Name: "/foo/bar"},
serviceID: "https://gitlab.com/",
},
}
for i, tc := range testCases {
t.Run(tc.label, func(t *testing.T) {
syncer, dbRepos := setupSyncErroredTest(ctx, store, t, tc.svcKind,
tc.extSvcErr, tc.config, tc.serviceID, tc.repoName)
if len(dbRepos) != 1 {
t.Fatalf("should've inserted exactly 1 repo in the db for testing, got %d instead", len(dbRepos))
}
// Run the syncer, which should find the repo with non-empty last_error and delete it
err := syncer.SyncReposWithLastErrors(ctx, ratelimit.NewInstrumentedLimiter("TestSyncRepos", rate.NewLimiter(200, 1)))
if err != nil {
t.Fatalf("unexpected error running SyncReposWithLastErrors: %s", err)
}
diff := <-syncer.Synced
deleted := types.Repos{&types.Repo{ID: dbRepos[0].ID}}
if d := cmp.Diff(types.RepoSyncDiff{Deleted: deleted}, diff); d != "" {
t.Fatalf("Deleted mismatch (-want +got):\n%s", d)
}
// each iteration will result in one more deleted repo.
assertDeletedRepoCount(ctx, t, store, i+1)
// Try to fetch the repo to verify that it was deleted by the syncer
myRepo, err := store.RepoStore().GetByName(ctx, tc.repoName)
if err == nil {
t.Fatalf("repo should've been deleted. expected a repo not found error")
}
if !errors.Is(err, &database.RepoNotFoundErr{Name: tc.repoName}) {
t.Fatalf("expected a RepoNotFound error, got %s", err)
}
if myRepo != nil {
t.Fatalf("repo should've been deleted: %v", myRepo)
}
})
}
}
func TestSyncReposWithLastErrorsHitsRateLimiter(t *testing.T) {
t.Parallel()
store := getTestRepoStore(t)
ctx := context.Background()
repoNames := []api.RepoName{
"github.com/asdf/jkl",
"github.com/foo/bar",
}
syncer, _ := setupSyncErroredTest(ctx, store, t, extsvc.KindGitLab, github.ErrRepoNotFound, `{"url": "https://github.com", "projectQuery": ["none"], "token": "abc"}`, "https://gitlab.com/", repoNames...)
ctx, cancel := context.WithTimeout(ctx, time.Second)
defer cancel()
// Run the syncer, which should return an error due to hitting the rate limit
err := syncer.SyncReposWithLastErrors(ctx, ratelimit.NewInstrumentedLimiter("TestSyncRepos", rate.NewLimiter(1, 1)))
if err == nil {
t.Fatal("SyncReposWithLastErrors should've returned an error due to hitting rate limit")
}
if !strings.Contains(err.Error(), "error waiting for rate limiter: rate: Wait(n=1) would exceed context deadline") {
t.Fatalf("expected an error from rate limiting, got %s instead", err)
}
}
func setupSyncErroredTest(ctx context.Context, s repos.Store, t *testing.T,
serviceType string, externalSvcError error, config, serviceID string, repoNames ...api.RepoName,
) (*repos.Syncer, types.Repos) {
t.Helper()
now := time.Now()
dbRepos := types.Repos{}
service := types.ExternalService{
Kind: serviceType,
DisplayName: fmt.Sprintf("%s - Test", serviceType),
Config: extsvc.NewUnencryptedConfig(config),
CreatedAt: now,
UpdatedAt: now,
CloudDefault: true,
}
// Create a new external service
confGet := func() *conf.Unified {
return &conf.Unified{}
}
err := s.ExternalServiceStore().Create(ctx, confGet, &service)
if err != nil {
t.Fatal(err)
}
for _, repoName := range repoNames {
dbRepo := (&types.Repo{
Name: repoName,
Description: "",
ExternalRepo: api.ExternalRepoSpec{
ID: fmt.Sprintf("external-%s", repoName), // TODO: make this something else?
ServiceID: serviceID,
ServiceType: serviceType,
},
}).With(typestest.Opt.RepoSources(service.URN()))
// Insert the repo into our database
if err := s.RepoStore().Create(ctx, dbRepo); err != nil {
t.Fatal(err)
}
// Log a failure in gitserver_repos for this repo
if err := s.GitserverReposStore().Update(ctx, &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "test",
CloneStatus: types.CloneStatusCloned,
LastError: "error fetching repo: Not found",
}); err != nil {
t.Fatal(err)
}
// Validate that the repo exists and we can fetch it
_, err := s.RepoStore().GetByName(ctx, dbRepo.Name)
if err != nil {
t.Fatal(err)
}
dbRepos = append(dbRepos, dbRepo)
}
syncer := &repos.Syncer{
ObsvCtx: observation.TestContextTB(t),
Now: time.Now,
Store: s,
Synced: make(chan types.RepoSyncDiff, 1),
Sourcer: repos.NewFakeSourcer(
nil,
repos.NewFakeSource(&service,
externalSvcError,
dbRepos...),
),
}
return syncer, dbRepos
}
var noopProgressRecorder = func(ctx context.Context, progress repos.SyncProgress, final bool) error {
return nil
}

View File

@ -15,10 +15,7 @@ go_library(
"//internal/grpc/defaults",
"//internal/repoupdater/protocol",
"//internal/repoupdater/v1:repoupdater",
"//internal/trace",
"//lib/errors",
"@com_github_sourcegraph_log//:log",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//status",

View File

@ -8,7 +8,6 @@ import (
"sync"
"github.com/sourcegraph/log"
"go.opentelemetry.io/otel/attribute"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@ -16,8 +15,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/repoupdater/v1"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
var (
@ -80,52 +77,6 @@ func (c *Client) RepoUpdateSchedulerInfo(
return protocol.RepoUpdateSchedulerInfoResultFromProto(resp), nil
}
// MockRepoLookup mocks (*Client).RepoLookup for tests.
var MockRepoLookup func(protocol.RepoLookupArgs) (*protocol.RepoLookupResult, error)
// RepoLookup retrieves information about the repository on repoupdater.
func (c *Client) RepoLookup(
ctx context.Context,
args protocol.RepoLookupArgs,
) (result *protocol.RepoLookupResult, err error) {
if MockRepoLookup != nil {
return MockRepoLookup(args)
}
tr, ctx := trace.New(ctx, "repoupdater.RepoLookup",
args.Repo.Attr())
defer func() {
if result != nil {
tr.SetAttributes(attribute.Bool("found", result.Repo != nil))
}
tr.EndWithErr(&err)
}()
client, err := c.grpcClient()
if err != nil {
return nil, err
}
resp, err := client.RepoLookup(ctx, args.ToProto())
if err != nil {
return nil, errors.Wrapf(err, "RepoLookup for %+v failed", args)
}
res := protocol.RepoLookupResultFromProto(resp)
switch {
case resp.GetErrorNotFound():
return res, &ErrNotFound{Repo: args.Repo, IsNotFound: true}
case resp.GetErrorUnauthorized():
return res, &ErrUnauthorized{Repo: args.Repo, NoAuthz: true}
case resp.GetErrorTemporarilyUnavailable():
return res, &ErrTemporary{Repo: args.Repo, IsTemporary: true}
case resp.GetErrorRepoDenied() != "":
return res, &ErrRepoDenied{
Repo: args.Repo,
Reason: resp.GetErrorRepoDenied(),
}
}
return res, nil
}
func (c *Client) RecloneRepository(ctx context.Context, repoName api.RepoName) error {
client, err := c.grpcClient()
if err != nil {

View File

@ -2,7 +2,6 @@ package repoupdater
import (
"fmt"
"net/http"
"github.com/sourcegraph/sourcegraph/internal/api"
)
@ -53,13 +52,3 @@ func (e *ErrTemporary) Temporary() bool {
func (e *ErrTemporary) Error() string {
return fmt.Sprintf("repository temporarily unavailable (name=%s istemporary=%v)", e.Repo, e.IsTemporary)
}
// ErrRepoDenied happens when the repository cannot be added on-demand
type ErrRepoDenied struct {
Repo api.RepoName
Reason string
}
func (e *ErrRepoDenied) IsRepoDenied() bool { return true }
func (e *ErrRepoDenied) HTTPStatusCode() int { return http.StatusNotFound }
func (e *ErrRepoDenied) Error() string { return e.Reason }

View File

@ -88,73 +88,6 @@ type RepoQueueState struct {
Priority int
}
// RepoLookupArgs is a request for information about a repository on repoupdater.
type RepoLookupArgs struct {
// Repo is the repository name to look up.
Repo api.RepoName `json:",omitempty"`
}
func (r *RepoLookupArgs) ToProto() *proto.RepoLookupRequest {
return &proto.RepoLookupRequest{
Repo: string(r.Repo),
}
}
func (r *RepoLookupArgs) String() string {
return fmt.Sprintf("RepoLookupArgs{Repo: %s}", r.Repo)
}
// RepoLookupResult is the response to a repository information request (RepoLookupArgs).
type RepoLookupResult struct {
// Repo contains information about the repository, if it is found. If an error occurred, it is nil.
Repo *RepoInfo
ErrorNotFound bool // the repository host reported that the repository was not found
ErrorUnauthorized bool // the repository host rejected the client's authorization
ErrorTemporarilyUnavailable bool // the repository host was temporarily unavailable (e.g., rate limit exceeded)
ErrorRepoDenied string // the repository cannot be added on-demand on dotcom (e.g. because its too big)
}
func (r *RepoLookupResult) ToProto() *proto.RepoLookupResponse {
return &proto.RepoLookupResponse{
Repo: r.Repo.ToProto(),
ErrorNotFound: r.ErrorNotFound,
ErrorUnauthorized: r.ErrorUnauthorized,
ErrorTemporarilyUnavailable: r.ErrorTemporarilyUnavailable,
ErrorRepoDenied: r.ErrorRepoDenied,
}
}
func RepoLookupResultFromProto(p *proto.RepoLookupResponse) *RepoLookupResult {
return &RepoLookupResult{
Repo: RepoInfoFromProto(p.GetRepo()),
ErrorNotFound: p.GetErrorNotFound(),
ErrorUnauthorized: p.GetErrorUnauthorized(),
ErrorTemporarilyUnavailable: p.GetErrorTemporarilyUnavailable(),
ErrorRepoDenied: p.GetErrorRepoDenied(),
}
}
func (r *RepoLookupResult) String() string {
var parts []string
if r.Repo != nil {
parts = append(parts, "repo="+r.Repo.String())
}
if r.ErrorNotFound {
parts = append(parts, "notfound")
}
if r.ErrorUnauthorized {
parts = append(parts, "unauthorized")
}
if r.ErrorTemporarilyUnavailable {
parts = append(parts, "tempunavailable")
}
if r.ErrorRepoDenied != "" {
parts = append(parts, "repodenied")
}
return fmt.Sprintf("RepoLookupResult{%s}", strings.Join(parts, " "))
}
// RepoInfo is information about a repository that lives on an external service (such as GitHub or GitLab).
type RepoInfo struct {
ID api.RepoID // ID is the unique numeric ID for this repository.

View File

@ -25,11 +25,6 @@ func (a *automaticRetryClient) RepoUpdateSchedulerInfo(ctx context.Context, in *
return a.base.RepoUpdateSchedulerInfo(ctx, in, opts...)
}
func (a *automaticRetryClient) RepoLookup(ctx context.Context, in *proto.RepoLookupRequest, opts ...grpc.CallOption) (*proto.RepoLookupResponse, error) {
opts = append(defaults.RetryPolicy, opts...)
return a.base.RepoLookup(ctx, in, opts...)
}
func (a *automaticRetryClient) EnqueueRepoUpdate(ctx context.Context, in *proto.EnqueueRepoUpdateRequest, opts ...grpc.CallOption) (*proto.EnqueueRepoUpdateResponse, error) {
opts = append(defaults.RetryPolicy, opts...)
return a.base.EnqueueRepoUpdate(ctx, in, opts...)

View File

@ -353,138 +353,6 @@ func (x *RepoQueueState) GetPriority() int64 {
return 0
}
type RepoLookupRequest struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Repo is the repository name to look up.
Repo string `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"`
}
func (x *RepoLookupRequest) Reset() {
*x = RepoLookupRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RepoLookupRequest) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RepoLookupRequest) ProtoMessage() {}
func (x *RepoLookupRequest) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RepoLookupRequest.ProtoReflect.Descriptor instead.
func (*RepoLookupRequest) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{6}
}
func (x *RepoLookupRequest) GetRepo() string {
if x != nil {
return x.Repo
}
return ""
}
type RepoLookupResponse struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
// Repo contains information about the repository, if it is found. If an error occurred, it is nil.
Repo *RepoInfo `protobuf:"bytes,1,opt,name=repo,proto3" json:"repo,omitempty"`
// the repository host reported that the repository was not found
ErrorNotFound bool `protobuf:"varint,2,opt,name=error_not_found,json=errorNotFound,proto3" json:"error_not_found,omitempty"`
// the repository host rejected the client's authorization
ErrorUnauthorized bool `protobuf:"varint,3,opt,name=error_unauthorized,json=errorUnauthorized,proto3" json:"error_unauthorized,omitempty"`
// the repository host was temporarily unavailable (e.g., rate limit exceeded)
ErrorTemporarilyUnavailable bool `protobuf:"varint,4,opt,name=error_temporarily_unavailable,json=errorTemporarilyUnavailable,proto3" json:"error_temporarily_unavailable,omitempty"`
// the repository cannot be added because it doesn't fit restrictions.
ErrorRepoDenied string `protobuf:"bytes,5,opt,name=error_repo_denied,json=errorRepoDenied,proto3" json:"error_repo_denied,omitempty"`
}
func (x *RepoLookupResponse) Reset() {
*x = RepoLookupResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
}
func (x *RepoLookupResponse) String() string {
return protoimpl.X.MessageStringOf(x)
}
func (*RepoLookupResponse) ProtoMessage() {}
func (x *RepoLookupResponse) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
ms.StoreMessageInfo(mi)
}
return ms
}
return mi.MessageOf(x)
}
// Deprecated: Use RepoLookupResponse.ProtoReflect.Descriptor instead.
func (*RepoLookupResponse) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{7}
}
func (x *RepoLookupResponse) GetRepo() *RepoInfo {
if x != nil {
return x.Repo
}
return nil
}
func (x *RepoLookupResponse) GetErrorNotFound() bool {
if x != nil {
return x.ErrorNotFound
}
return false
}
func (x *RepoLookupResponse) GetErrorUnauthorized() bool {
if x != nil {
return x.ErrorUnauthorized
}
return false
}
func (x *RepoLookupResponse) GetErrorTemporarilyUnavailable() bool {
if x != nil {
return x.ErrorTemporarilyUnavailable
}
return false
}
func (x *RepoLookupResponse) GetErrorRepoDenied() string {
if x != nil {
return x.ErrorRepoDenied
}
return ""
}
type RepoInfo struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -515,7 +383,7 @@ type RepoInfo struct {
func (x *RepoInfo) Reset() {
*x = RepoInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[8]
mi := &file_repoupdater_proto_msgTypes[6]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -528,7 +396,7 @@ func (x *RepoInfo) String() string {
func (*RepoInfo) ProtoMessage() {}
func (x *RepoInfo) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[8]
mi := &file_repoupdater_proto_msgTypes[6]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -541,7 +409,7 @@ func (x *RepoInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use RepoInfo.ProtoReflect.Descriptor instead.
func (*RepoInfo) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{8}
return file_repoupdater_proto_rawDescGZIP(), []int{6}
}
func (x *RepoInfo) GetId() int32 {
@ -620,7 +488,7 @@ type VCSInfo struct {
func (x *VCSInfo) Reset() {
*x = VCSInfo{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[9]
mi := &file_repoupdater_proto_msgTypes[7]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -633,7 +501,7 @@ func (x *VCSInfo) String() string {
func (*VCSInfo) ProtoMessage() {}
func (x *VCSInfo) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[9]
mi := &file_repoupdater_proto_msgTypes[7]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -646,7 +514,7 @@ func (x *VCSInfo) ProtoReflect() protoreflect.Message {
// Deprecated: Use VCSInfo.ProtoReflect.Descriptor instead.
func (*VCSInfo) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{9}
return file_repoupdater_proto_rawDescGZIP(), []int{7}
}
func (x *VCSInfo) GetUrl() string {
@ -675,7 +543,7 @@ type RepoLinks struct {
func (x *RepoLinks) Reset() {
*x = RepoLinks{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[10]
mi := &file_repoupdater_proto_msgTypes[8]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -688,7 +556,7 @@ func (x *RepoLinks) String() string {
func (*RepoLinks) ProtoMessage() {}
func (x *RepoLinks) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[10]
mi := &file_repoupdater_proto_msgTypes[8]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -701,7 +569,7 @@ func (x *RepoLinks) ProtoReflect() protoreflect.Message {
// Deprecated: Use RepoLinks.ProtoReflect.Descriptor instead.
func (*RepoLinks) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{10}
return file_repoupdater_proto_rawDescGZIP(), []int{8}
}
func (x *RepoLinks) GetRoot() string {
@ -761,7 +629,7 @@ type ExternalRepoSpec struct {
func (x *ExternalRepoSpec) Reset() {
*x = ExternalRepoSpec{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[11]
mi := &file_repoupdater_proto_msgTypes[9]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -774,7 +642,7 @@ func (x *ExternalRepoSpec) String() string {
func (*ExternalRepoSpec) ProtoMessage() {}
func (x *ExternalRepoSpec) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[11]
mi := &file_repoupdater_proto_msgTypes[9]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -787,7 +655,7 @@ func (x *ExternalRepoSpec) ProtoReflect() protoreflect.Message {
// Deprecated: Use ExternalRepoSpec.ProtoReflect.Descriptor instead.
func (*ExternalRepoSpec) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{11}
return file_repoupdater_proto_rawDescGZIP(), []int{9}
}
func (x *ExternalRepoSpec) GetId() string {
@ -822,7 +690,7 @@ type EnqueueRepoUpdateRequest struct {
func (x *EnqueueRepoUpdateRequest) Reset() {
*x = EnqueueRepoUpdateRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[12]
mi := &file_repoupdater_proto_msgTypes[10]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -835,7 +703,7 @@ func (x *EnqueueRepoUpdateRequest) String() string {
func (*EnqueueRepoUpdateRequest) ProtoMessage() {}
func (x *EnqueueRepoUpdateRequest) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[12]
mi := &file_repoupdater_proto_msgTypes[10]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -848,7 +716,7 @@ func (x *EnqueueRepoUpdateRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use EnqueueRepoUpdateRequest.ProtoReflect.Descriptor instead.
func (*EnqueueRepoUpdateRequest) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{12}
return file_repoupdater_proto_rawDescGZIP(), []int{10}
}
func (x *EnqueueRepoUpdateRequest) GetRepo() string {
@ -873,7 +741,7 @@ type EnqueueRepoUpdateResponse struct {
func (x *EnqueueRepoUpdateResponse) Reset() {
*x = EnqueueRepoUpdateResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[13]
mi := &file_repoupdater_proto_msgTypes[11]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -886,7 +754,7 @@ func (x *EnqueueRepoUpdateResponse) String() string {
func (*EnqueueRepoUpdateResponse) ProtoMessage() {}
func (x *EnqueueRepoUpdateResponse) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[13]
mi := &file_repoupdater_proto_msgTypes[11]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -899,7 +767,7 @@ func (x *EnqueueRepoUpdateResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use EnqueueRepoUpdateResponse.ProtoReflect.Descriptor instead.
func (*EnqueueRepoUpdateResponse) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{13}
return file_repoupdater_proto_rawDescGZIP(), []int{11}
}
func (x *EnqueueRepoUpdateResponse) GetId() int32 {
@ -927,7 +795,7 @@ type EnqueueChangesetSyncRequest struct {
func (x *EnqueueChangesetSyncRequest) Reset() {
*x = EnqueueChangesetSyncRequest{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[14]
mi := &file_repoupdater_proto_msgTypes[12]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -940,7 +808,7 @@ func (x *EnqueueChangesetSyncRequest) String() string {
func (*EnqueueChangesetSyncRequest) ProtoMessage() {}
func (x *EnqueueChangesetSyncRequest) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[14]
mi := &file_repoupdater_proto_msgTypes[12]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -953,7 +821,7 @@ func (x *EnqueueChangesetSyncRequest) ProtoReflect() protoreflect.Message {
// Deprecated: Use EnqueueChangesetSyncRequest.ProtoReflect.Descriptor instead.
func (*EnqueueChangesetSyncRequest) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{14}
return file_repoupdater_proto_rawDescGZIP(), []int{12}
}
func (x *EnqueueChangesetSyncRequest) GetIds() []int64 {
@ -972,7 +840,7 @@ type EnqueueChangesetSyncResponse struct {
func (x *EnqueueChangesetSyncResponse) Reset() {
*x = EnqueueChangesetSyncResponse{}
if protoimpl.UnsafeEnabled {
mi := &file_repoupdater_proto_msgTypes[15]
mi := &file_repoupdater_proto_msgTypes[13]
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
ms.StoreMessageInfo(mi)
}
@ -985,7 +853,7 @@ func (x *EnqueueChangesetSyncResponse) String() string {
func (*EnqueueChangesetSyncResponse) ProtoMessage() {}
func (x *EnqueueChangesetSyncResponse) ProtoReflect() protoreflect.Message {
mi := &file_repoupdater_proto_msgTypes[15]
mi := &file_repoupdater_proto_msgTypes[13]
if protoimpl.UnsafeEnabled && x != nil {
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
if ms.LoadMessageInfo() == nil {
@ -998,7 +866,7 @@ func (x *EnqueueChangesetSyncResponse) ProtoReflect() protoreflect.Message {
// Deprecated: Use EnqueueChangesetSyncResponse.ProtoReflect.Descriptor instead.
func (*EnqueueChangesetSyncResponse) Descriptor() ([]byte, []int) {
return file_repoupdater_proto_rawDescGZIP(), []int{15}
return file_repoupdater_proto_rawDescGZIP(), []int{13}
}
var File_repoupdater_proto protoreflect.FileDescriptor
@ -1043,115 +911,89 @@ var file_repoupdater_proto_rawDesc = []byte{
0x0a, 0x08, 0x75, 0x70, 0x64, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08,
0x52, 0x08, 0x75, 0x70, 0x64, 0x61, 0x74, 0x69, 0x6e, 0x67, 0x12, 0x1a, 0x0a, 0x08, 0x70, 0x72,
0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x08, 0x70, 0x72,
0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0x35, 0x0a, 0x11, 0x52, 0x65, 0x70, 0x6f, 0x4c, 0x6f,
0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72,
0x65, 0x70, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x65, 0x70, 0x6f, 0x4a,
0x04, 0x08, 0x02, 0x10, 0x03, 0x52, 0x06, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x22, 0x89, 0x02,
0x0a, 0x12, 0x52, 0x65, 0x70, 0x6f, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x12, 0x2c, 0x0a, 0x04, 0x72, 0x65, 0x70, 0x6f, 0x18, 0x01, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x18, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72,
0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x04, 0x72, 0x65,
0x70, 0x6f, 0x12, 0x26, 0x0a, 0x0f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x6e, 0x6f, 0x74, 0x5f,
0x66, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0d, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x4e, 0x6f, 0x74, 0x46, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x65, 0x72,
0x72, 0x6f, 0x72, 0x5f, 0x75, 0x6e, 0x61, 0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64,
0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x11, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x55, 0x6e, 0x61,
0x75, 0x74, 0x68, 0x6f, 0x72, 0x69, 0x7a, 0x65, 0x64, 0x12, 0x42, 0x0a, 0x1d, 0x65, 0x72, 0x72,
0x6f, 0x72, 0x5f, 0x74, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x69, 0x6c, 0x79, 0x5f, 0x75,
0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08,
0x52, 0x1b, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x65, 0x6d, 0x70, 0x6f, 0x72, 0x61, 0x72, 0x69,
0x6c, 0x79, 0x55, 0x6e, 0x61, 0x76, 0x61, 0x69, 0x6c, 0x61, 0x62, 0x6c, 0x65, 0x12, 0x2a, 0x0a,
0x11, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x5f, 0x64, 0x65, 0x6e, 0x69,
0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0f, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x52,
0x65, 0x70, 0x6f, 0x44, 0x65, 0x6e, 0x69, 0x65, 0x64, 0x22, 0xc6, 0x02, 0x0a, 0x08, 0x52, 0x65,
0x70, 0x6f, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01,
0x28, 0x05, 0x52, 0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02,
0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65,
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x0b, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04,
0x66, 0x6f, 0x72, 0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x66, 0x6f, 0x72, 0x6b,
0x12, 0x1a, 0x0a, 0x08, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01,
0x28, 0x08, 0x52, 0x08, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07,
0x70, 0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70,
0x72, 0x69, 0x76, 0x61, 0x74, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x76, 0x63, 0x73, 0x5f, 0x69, 0x6e,
0x66, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75,
0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x43, 0x53, 0x49, 0x6e, 0x66,
0x6f, 0x52, 0x07, 0x76, 0x63, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x05, 0x6c, 0x69,
0x6e, 0x6b, 0x73, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x72, 0x65, 0x70, 0x6f,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x4c,
0x69, 0x6e, 0x6b, 0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x65,
0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x18, 0x09, 0x20, 0x01,
0x28, 0x0b, 0x32, 0x20, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72,
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6f,
0x53, 0x70, 0x65, 0x63, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65,
0x70, 0x6f, 0x22, 0x1b, 0x0a, 0x07, 0x56, 0x43, 0x53, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a,
0x03, 0x75, 0x72, 0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22,
0x5f, 0x0a, 0x09, 0x52, 0x65, 0x70, 0x6f, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x12, 0x0a, 0x04,
0x72, 0x6f, 0x6f, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74,
0x12, 0x12, 0x0a, 0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x74, 0x72, 0x65, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x03, 0x20, 0x01,
0x28, 0x09, 0x52, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d,
0x69, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74,
0x22, 0x64, 0x0a, 0x10, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6f,
0x53, 0x70, 0x65, 0x63, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x02, 0x69, 0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
0x74, 0x79, 0x70, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76,
0x69, 0x63, 0x65, 0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69,
0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72,
0x76, 0x69, 0x63, 0x65, 0x49, 0x64, 0x22, 0x2e, 0x0a, 0x18, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75,
0x65, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65,
0x73, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x65, 0x70, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09,
0x52, 0x04, 0x72, 0x65, 0x70, 0x6f, 0x22, 0x3f, 0x0a, 0x19, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75,
0x65, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
0x69, 0x6f, 0x72, 0x69, 0x74, 0x79, 0x22, 0xc6, 0x02, 0x0a, 0x08, 0x52, 0x65, 0x70, 0x6f, 0x49,
0x6e, 0x66, 0x6f, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52,
0x02, 0x69, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28,
0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x22, 0x2f, 0x0a, 0x1b, 0x45, 0x6e, 0x71, 0x75, 0x65,
0x75, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52,
0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20,
0x03, 0x28, 0x03, 0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x45, 0x6e, 0x71, 0x75,
0x65, 0x75, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xc2, 0x04, 0x0a, 0x12, 0x52, 0x65, 0x70,
0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12,
0x7f, 0x0a, 0x17, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68,
0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x2e, 0x72, 0x65, 0x70,
0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x49,
0x6e, 0x66, 0x6f, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x72, 0x65, 0x70,
0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f,
0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x49,
0x6e, 0x66, 0x6f, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x01,
0x12, 0x58, 0x0a, 0x0a, 0x52, 0x65, 0x70, 0x6f, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x12, 0x21,
0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
0x52, 0x65, 0x70, 0x6f, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73,
0x74, 0x1a, 0x22, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x4c, 0x6f, 0x6f, 0x6b, 0x75, 0x70, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x02, 0x12, 0x6d, 0x0a, 0x11, 0x45, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x12,
0x28, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31,
0x2e, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6f,
0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x71, 0x75, 0x65,
0x75, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70,
0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x02, 0x12, 0x6a, 0x0a, 0x11, 0x52, 0x65, 0x63,
0x6c, 0x6f, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x12, 0x28,
0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e,
0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x20, 0x0a, 0x0b, 0x64, 0x65, 0x73, 0x63, 0x72,
0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x64, 0x65,
0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x6f, 0x72,
0x6b, 0x18, 0x04, 0x20, 0x01, 0x28, 0x08, 0x52, 0x04, 0x66, 0x6f, 0x72, 0x6b, 0x12, 0x1a, 0x0a,
0x08, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52,
0x08, 0x61, 0x72, 0x63, 0x68, 0x69, 0x76, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x72, 0x69,
0x76, 0x61, 0x74, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x70, 0x72, 0x69, 0x76,
0x61, 0x74, 0x65, 0x12, 0x32, 0x0a, 0x08, 0x76, 0x63, 0x73, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18,
0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61,
0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x56, 0x43, 0x53, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x07,
0x76, 0x63, 0x73, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2f, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73,
0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x19, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64,
0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x4c, 0x69, 0x6e, 0x6b,
0x73, 0x52, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x45, 0x0a, 0x0d, 0x65, 0x78, 0x74, 0x65,
0x72, 0x6e, 0x61, 0x6c, 0x5f, 0x72, 0x65, 0x70, 0x6f, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32,
0x20, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31,
0x2e, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6f, 0x53, 0x70, 0x65,
0x63, 0x52, 0x0c, 0x65, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6f, 0x22,
0x1b, 0x0a, 0x07, 0x56, 0x43, 0x53, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x10, 0x0a, 0x03, 0x75, 0x72,
0x6c, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x75, 0x72, 0x6c, 0x22, 0x5f, 0x0a, 0x09,
0x52, 0x65, 0x70, 0x6f, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x72, 0x6f, 0x6f,
0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72, 0x6f, 0x6f, 0x74, 0x12, 0x12, 0x0a,
0x04, 0x74, 0x72, 0x65, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x72, 0x65,
0x65, 0x12, 0x12, 0x0a, 0x04, 0x62, 0x6c, 0x6f, 0x62, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52,
0x04, 0x62, 0x6c, 0x6f, 0x62, 0x12, 0x16, 0x0a, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x18,
0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x22, 0x64, 0x0a,
0x10, 0x45, 0x78, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x52, 0x65, 0x70, 0x6f, 0x53, 0x70, 0x65,
0x63, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, 0x69,
0x64, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f, 0x74, 0x79, 0x70,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
0x54, 0x79, 0x70, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x5f,
0x69, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63,
0x65, 0x49, 0x64, 0x22, 0x2e, 0x0a, 0x18, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65,
0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
0x12, 0x0a, 0x04, 0x72, 0x65, 0x70, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x72,
0x65, 0x70, 0x6f, 0x22, 0x3f, 0x0a, 0x19, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65,
0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x02, 0x69, 0x64,
0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04,
0x6e, 0x61, 0x6d, 0x65, 0x22, 0x2f, 0x0a, 0x1b, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x43,
0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75,
0x65, 0x73, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x69, 0x64, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x03,
0x52, 0x03, 0x69, 0x64, 0x73, 0x22, 0x1e, 0x0a, 0x1c, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x73,
0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xe8, 0x03, 0x0a, 0x12, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70,
0x64, 0x61, 0x74, 0x65, 0x72, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7f, 0x0a, 0x17,
0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75,
0x6c, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x2e, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70,
0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64,
0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70,
0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64,
0x61, 0x74, 0x65, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x72, 0x49, 0x6e, 0x66, 0x6f,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x01, 0x12, 0x6d, 0x0a,
0x11, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61,
0x74, 0x65, 0x12, 0x28, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72,
0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x55,
0x70, 0x64, 0x61, 0x74, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x72,
0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e,
0x71, 0x75, 0x65, 0x75, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x52,
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x02, 0x12, 0x6a, 0x0a, 0x11,
0x52, 0x65, 0x63, 0x6c, 0x6f, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72,
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75,
0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6c, 0x6f, 0x6e,
0x65, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f,
0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x14, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65,
0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x12, 0x2b, 0x2e,
0x79, 0x12, 0x28, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e,
0x76, 0x31, 0x2e, 0x52, 0x65, 0x63, 0x6c, 0x6f, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69,
0x74, 0x6f, 0x72, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x29, 0x2e, 0x72, 0x65,
0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x63,
0x6c, 0x6f, 0x6e, 0x65, 0x52, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x6f, 0x72, 0x79, 0x52, 0x65,
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x00, 0x12, 0x76, 0x0a, 0x14, 0x45, 0x6e, 0x71, 0x75,
0x65, 0x75, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63,
0x12, 0x2b, 0x2e, 0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76,
0x31, 0x2e, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73,
0x65, 0x74, 0x53, 0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e,
0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45,
0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53,
0x79, 0x6e, 0x63, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2c, 0x2e, 0x72, 0x65, 0x70,
0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x6e, 0x71, 0x75,
0x65, 0x75, 0x65, 0x43, 0x68, 0x61, 0x6e, 0x67, 0x65, 0x73, 0x65, 0x74, 0x53, 0x79, 0x6e, 0x63,
0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x02, 0x42, 0x3c, 0x5a,
0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73, 0x6f, 0x75, 0x72,
0x63, 0x65, 0x67, 0x72, 0x61, 0x70, 0x68, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x67, 0x72,
0x61, 0x70, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x72, 0x65, 0x70,
0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x33,
0x79, 0x6e, 0x63, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x03, 0x90, 0x02, 0x02,
0x42, 0x3c, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x73,
0x6f, 0x75, 0x72, 0x63, 0x65, 0x67, 0x72, 0x61, 0x70, 0x68, 0x2f, 0x73, 0x6f, 0x75, 0x72, 0x63,
0x65, 0x67, 0x72, 0x61, 0x70, 0x68, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
0x72, 0x65, 0x70, 0x6f, 0x75, 0x70, 0x64, 0x61, 0x74, 0x65, 0x72, 0x2f, 0x76, 0x31, 0x62, 0x06,
0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
}
var (
@ -1166,7 +1008,7 @@ func file_repoupdater_proto_rawDescGZIP() []byte {
return file_repoupdater_proto_rawDescData
}
var file_repoupdater_proto_msgTypes = make([]protoimpl.MessageInfo, 16)
var file_repoupdater_proto_msgTypes = make([]protoimpl.MessageInfo, 14)
var file_repoupdater_proto_goTypes = []interface{}{
(*RecloneRepositoryRequest)(nil), // 0: repoupdater.v1.RecloneRepositoryRequest
(*RecloneRepositoryResponse)(nil), // 1: repoupdater.v1.RecloneRepositoryResponse
@ -1174,41 +1016,36 @@ var file_repoupdater_proto_goTypes = []interface{}{
(*RepoUpdateSchedulerInfoResponse)(nil), // 3: repoupdater.v1.RepoUpdateSchedulerInfoResponse
(*RepoScheduleState)(nil), // 4: repoupdater.v1.RepoScheduleState
(*RepoQueueState)(nil), // 5: repoupdater.v1.RepoQueueState
(*RepoLookupRequest)(nil), // 6: repoupdater.v1.RepoLookupRequest
(*RepoLookupResponse)(nil), // 7: repoupdater.v1.RepoLookupResponse
(*RepoInfo)(nil), // 8: repoupdater.v1.RepoInfo
(*VCSInfo)(nil), // 9: repoupdater.v1.VCSInfo
(*RepoLinks)(nil), // 10: repoupdater.v1.RepoLinks
(*ExternalRepoSpec)(nil), // 11: repoupdater.v1.ExternalRepoSpec
(*EnqueueRepoUpdateRequest)(nil), // 12: repoupdater.v1.EnqueueRepoUpdateRequest
(*EnqueueRepoUpdateResponse)(nil), // 13: repoupdater.v1.EnqueueRepoUpdateResponse
(*EnqueueChangesetSyncRequest)(nil), // 14: repoupdater.v1.EnqueueChangesetSyncRequest
(*EnqueueChangesetSyncResponse)(nil), // 15: repoupdater.v1.EnqueueChangesetSyncResponse
(*timestamppb.Timestamp)(nil), // 16: google.protobuf.Timestamp
(*RepoInfo)(nil), // 6: repoupdater.v1.RepoInfo
(*VCSInfo)(nil), // 7: repoupdater.v1.VCSInfo
(*RepoLinks)(nil), // 8: repoupdater.v1.RepoLinks
(*ExternalRepoSpec)(nil), // 9: repoupdater.v1.ExternalRepoSpec
(*EnqueueRepoUpdateRequest)(nil), // 10: repoupdater.v1.EnqueueRepoUpdateRequest
(*EnqueueRepoUpdateResponse)(nil), // 11: repoupdater.v1.EnqueueRepoUpdateResponse
(*EnqueueChangesetSyncRequest)(nil), // 12: repoupdater.v1.EnqueueChangesetSyncRequest
(*EnqueueChangesetSyncResponse)(nil), // 13: repoupdater.v1.EnqueueChangesetSyncResponse
(*timestamppb.Timestamp)(nil), // 14: google.protobuf.Timestamp
}
var file_repoupdater_proto_depIdxs = []int32{
4, // 0: repoupdater.v1.RepoUpdateSchedulerInfoResponse.schedule:type_name -> repoupdater.v1.RepoScheduleState
5, // 1: repoupdater.v1.RepoUpdateSchedulerInfoResponse.queue:type_name -> repoupdater.v1.RepoQueueState
16, // 2: repoupdater.v1.RepoScheduleState.due:type_name -> google.protobuf.Timestamp
8, // 3: repoupdater.v1.RepoLookupResponse.repo:type_name -> repoupdater.v1.RepoInfo
9, // 4: repoupdater.v1.RepoInfo.vcs_info:type_name -> repoupdater.v1.VCSInfo
10, // 5: repoupdater.v1.RepoInfo.links:type_name -> repoupdater.v1.RepoLinks
11, // 6: repoupdater.v1.RepoInfo.external_repo:type_name -> repoupdater.v1.ExternalRepoSpec
2, // 7: repoupdater.v1.RepoUpdaterService.RepoUpdateSchedulerInfo:input_type -> repoupdater.v1.RepoUpdateSchedulerInfoRequest
6, // 8: repoupdater.v1.RepoUpdaterService.RepoLookup:input_type -> repoupdater.v1.RepoLookupRequest
12, // 9: repoupdater.v1.RepoUpdaterService.EnqueueRepoUpdate:input_type -> repoupdater.v1.EnqueueRepoUpdateRequest
0, // 10: repoupdater.v1.RepoUpdaterService.RecloneRepository:input_type -> repoupdater.v1.RecloneRepositoryRequest
14, // 11: repoupdater.v1.RepoUpdaterService.EnqueueChangesetSync:input_type -> repoupdater.v1.EnqueueChangesetSyncRequest
3, // 12: repoupdater.v1.RepoUpdaterService.RepoUpdateSchedulerInfo:output_type -> repoupdater.v1.RepoUpdateSchedulerInfoResponse
7, // 13: repoupdater.v1.RepoUpdaterService.RepoLookup:output_type -> repoupdater.v1.RepoLookupResponse
13, // 14: repoupdater.v1.RepoUpdaterService.EnqueueRepoUpdate:output_type -> repoupdater.v1.EnqueueRepoUpdateResponse
1, // 15: repoupdater.v1.RepoUpdaterService.RecloneRepository:output_type -> repoupdater.v1.RecloneRepositoryResponse
15, // 16: repoupdater.v1.RepoUpdaterService.EnqueueChangesetSync:output_type -> repoupdater.v1.EnqueueChangesetSyncResponse
12, // [12:17] is the sub-list for method output_type
7, // [7:12] is the sub-list for method input_type
7, // [7:7] is the sub-list for extension type_name
7, // [7:7] is the sub-list for extension extendee
0, // [0:7] is the sub-list for field type_name
14, // 2: repoupdater.v1.RepoScheduleState.due:type_name -> google.protobuf.Timestamp
7, // 3: repoupdater.v1.RepoInfo.vcs_info:type_name -> repoupdater.v1.VCSInfo
8, // 4: repoupdater.v1.RepoInfo.links:type_name -> repoupdater.v1.RepoLinks
9, // 5: repoupdater.v1.RepoInfo.external_repo:type_name -> repoupdater.v1.ExternalRepoSpec
2, // 6: repoupdater.v1.RepoUpdaterService.RepoUpdateSchedulerInfo:input_type -> repoupdater.v1.RepoUpdateSchedulerInfoRequest
10, // 7: repoupdater.v1.RepoUpdaterService.EnqueueRepoUpdate:input_type -> repoupdater.v1.EnqueueRepoUpdateRequest
0, // 8: repoupdater.v1.RepoUpdaterService.RecloneRepository:input_type -> repoupdater.v1.RecloneRepositoryRequest
12, // 9: repoupdater.v1.RepoUpdaterService.EnqueueChangesetSync:input_type -> repoupdater.v1.EnqueueChangesetSyncRequest
3, // 10: repoupdater.v1.RepoUpdaterService.RepoUpdateSchedulerInfo:output_type -> repoupdater.v1.RepoUpdateSchedulerInfoResponse
11, // 11: repoupdater.v1.RepoUpdaterService.EnqueueRepoUpdate:output_type -> repoupdater.v1.EnqueueRepoUpdateResponse
1, // 12: repoupdater.v1.RepoUpdaterService.RecloneRepository:output_type -> repoupdater.v1.RecloneRepositoryResponse
13, // 13: repoupdater.v1.RepoUpdaterService.EnqueueChangesetSync:output_type -> repoupdater.v1.EnqueueChangesetSyncResponse
10, // [10:14] is the sub-list for method output_type
6, // [6:10] is the sub-list for method input_type
6, // [6:6] is the sub-list for extension type_name
6, // [6:6] is the sub-list for extension extendee
0, // [0:6] is the sub-list for field type_name
}
func init() { file_repoupdater_proto_init() }
@ -1290,30 +1127,6 @@ func file_repoupdater_proto_init() {
}
}
file_repoupdater_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RepoLookupRequest); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_repoupdater_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RepoLookupResponse); i {
case 0:
return &v.state
case 1:
return &v.sizeCache
case 2:
return &v.unknownFields
default:
return nil
}
}
file_repoupdater_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RepoInfo); i {
case 0:
return &v.state
@ -1325,7 +1138,7 @@ func file_repoupdater_proto_init() {
return nil
}
}
file_repoupdater_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
file_repoupdater_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*VCSInfo); i {
case 0:
return &v.state
@ -1337,7 +1150,7 @@ func file_repoupdater_proto_init() {
return nil
}
}
file_repoupdater_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
file_repoupdater_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*RepoLinks); i {
case 0:
return &v.state
@ -1349,7 +1162,7 @@ func file_repoupdater_proto_init() {
return nil
}
}
file_repoupdater_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
file_repoupdater_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*ExternalRepoSpec); i {
case 0:
return &v.state
@ -1361,7 +1174,7 @@ func file_repoupdater_proto_init() {
return nil
}
}
file_repoupdater_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
file_repoupdater_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EnqueueRepoUpdateRequest); i {
case 0:
return &v.state
@ -1373,7 +1186,7 @@ func file_repoupdater_proto_init() {
return nil
}
}
file_repoupdater_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
file_repoupdater_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EnqueueRepoUpdateResponse); i {
case 0:
return &v.state
@ -1385,7 +1198,7 @@ func file_repoupdater_proto_init() {
return nil
}
}
file_repoupdater_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
file_repoupdater_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EnqueueChangesetSyncRequest); i {
case 0:
return &v.state
@ -1397,7 +1210,7 @@ func file_repoupdater_proto_init() {
return nil
}
}
file_repoupdater_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
file_repoupdater_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} {
switch v := v.(*EnqueueChangesetSyncResponse); i {
case 0:
return &v.state
@ -1416,7 +1229,7 @@ func file_repoupdater_proto_init() {
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
RawDescriptor: file_repoupdater_proto_rawDesc,
NumEnums: 0,
NumMessages: 16,
NumMessages: 14,
NumExtensions: 0,
NumServices: 1,
},

View File

@ -11,10 +11,6 @@ service RepoUpdaterService {
rpc RepoUpdateSchedulerInfo(RepoUpdateSchedulerInfoRequest) returns (RepoUpdateSchedulerInfoResponse) {
option idempotency_level = NO_SIDE_EFFECTS;
}
// RepoLookup retrieves information about the repository on repoupdater.
rpc RepoLookup(RepoLookupRequest) returns (RepoLookupResponse) {
option idempotency_level = IDEMPOTENT;
}
// EnqueueRepoUpdate requests that the named repository be updated in the near
// future. It does not wait for the update.
rpc EnqueueRepoUpdate(EnqueueRepoUpdateRequest) returns (EnqueueRepoUpdateResponse) {
@ -62,28 +58,6 @@ message RepoQueueState {
int64 priority = 4;
}
message RepoLookupRequest {
reserved 2;
reserved "update";
// Repo is the repository name to look up.
string repo = 1;
}
message RepoLookupResponse {
// Repo contains information about the repository, if it is found. If an error occurred, it is nil.
RepoInfo repo = 1;
// the repository host reported that the repository was not found
bool error_not_found = 2;
// the repository host rejected the client's authorization
bool error_unauthorized = 3;
// the repository host was temporarily unavailable (e.g., rate limit exceeded)
bool error_temporarily_unavailable = 4;
// the repository cannot be added because it doesn't fit restrictions.
string error_repo_denied = 5;
}
message RepoInfo {
// ID is the unique numeric ID for this repository.
int32 id = 1;

View File

@ -20,7 +20,6 @@ const _ = grpc.SupportPackageIsVersion7
const (
RepoUpdaterService_RepoUpdateSchedulerInfo_FullMethodName = "/repoupdater.v1.RepoUpdaterService/RepoUpdateSchedulerInfo"
RepoUpdaterService_RepoLookup_FullMethodName = "/repoupdater.v1.RepoUpdaterService/RepoLookup"
RepoUpdaterService_EnqueueRepoUpdate_FullMethodName = "/repoupdater.v1.RepoUpdaterService/EnqueueRepoUpdate"
RepoUpdaterService_RecloneRepository_FullMethodName = "/repoupdater.v1.RepoUpdaterService/RecloneRepository"
RepoUpdaterService_EnqueueChangesetSync_FullMethodName = "/repoupdater.v1.RepoUpdaterService/EnqueueChangesetSync"
@ -32,8 +31,6 @@ const (
type RepoUpdaterServiceClient interface {
// RepoUpdateSchedulerInfo returns information about the state of the repo in the update scheduler.
RepoUpdateSchedulerInfo(ctx context.Context, in *RepoUpdateSchedulerInfoRequest, opts ...grpc.CallOption) (*RepoUpdateSchedulerInfoResponse, error)
// RepoLookup retrieves information about the repository on repoupdater.
RepoLookup(ctx context.Context, in *RepoLookupRequest, opts ...grpc.CallOption) (*RepoLookupResponse, error)
// EnqueueRepoUpdate requests that the named repository be updated in the near
// future. It does not wait for the update.
EnqueueRepoUpdate(ctx context.Context, in *EnqueueRepoUpdateRequest, opts ...grpc.CallOption) (*EnqueueRepoUpdateResponse, error)
@ -62,15 +59,6 @@ func (c *repoUpdaterServiceClient) RepoUpdateSchedulerInfo(ctx context.Context,
return out, nil
}
func (c *repoUpdaterServiceClient) RepoLookup(ctx context.Context, in *RepoLookupRequest, opts ...grpc.CallOption) (*RepoLookupResponse, error) {
out := new(RepoLookupResponse)
err := c.cc.Invoke(ctx, RepoUpdaterService_RepoLookup_FullMethodName, in, out, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *repoUpdaterServiceClient) EnqueueRepoUpdate(ctx context.Context, in *EnqueueRepoUpdateRequest, opts ...grpc.CallOption) (*EnqueueRepoUpdateResponse, error) {
out := new(EnqueueRepoUpdateResponse)
err := c.cc.Invoke(ctx, RepoUpdaterService_EnqueueRepoUpdate_FullMethodName, in, out, opts...)
@ -104,8 +92,6 @@ func (c *repoUpdaterServiceClient) EnqueueChangesetSync(ctx context.Context, in
type RepoUpdaterServiceServer interface {
// RepoUpdateSchedulerInfo returns information about the state of the repo in the update scheduler.
RepoUpdateSchedulerInfo(context.Context, *RepoUpdateSchedulerInfoRequest) (*RepoUpdateSchedulerInfoResponse, error)
// RepoLookup retrieves information about the repository on repoupdater.
RepoLookup(context.Context, *RepoLookupRequest) (*RepoLookupResponse, error)
// EnqueueRepoUpdate requests that the named repository be updated in the near
// future. It does not wait for the update.
EnqueueRepoUpdate(context.Context, *EnqueueRepoUpdateRequest) (*EnqueueRepoUpdateResponse, error)
@ -125,9 +111,6 @@ type UnimplementedRepoUpdaterServiceServer struct {
func (UnimplementedRepoUpdaterServiceServer) RepoUpdateSchedulerInfo(context.Context, *RepoUpdateSchedulerInfoRequest) (*RepoUpdateSchedulerInfoResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RepoUpdateSchedulerInfo not implemented")
}
func (UnimplementedRepoUpdaterServiceServer) RepoLookup(context.Context, *RepoLookupRequest) (*RepoLookupResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method RepoLookup not implemented")
}
func (UnimplementedRepoUpdaterServiceServer) EnqueueRepoUpdate(context.Context, *EnqueueRepoUpdateRequest) (*EnqueueRepoUpdateResponse, error) {
return nil, status.Errorf(codes.Unimplemented, "method EnqueueRepoUpdate not implemented")
}
@ -168,24 +151,6 @@ func _RepoUpdaterService_RepoUpdateSchedulerInfo_Handler(srv interface{}, ctx co
return interceptor(ctx, in, info, handler)
}
func _RepoUpdaterService_RepoLookup_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(RepoLookupRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(RepoUpdaterServiceServer).RepoLookup(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: RepoUpdaterService_RepoLookup_FullMethodName,
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(RepoUpdaterServiceServer).RepoLookup(ctx, req.(*RepoLookupRequest))
}
return interceptor(ctx, in, info, handler)
}
func _RepoUpdaterService_EnqueueRepoUpdate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(EnqueueRepoUpdateRequest)
if err := dec(in); err != nil {
@ -251,10 +216,6 @@ var RepoUpdaterService_ServiceDesc = grpc.ServiceDesc{
MethodName: "RepoUpdateSchedulerInfo",
Handler: _RepoUpdaterService_RepoUpdateSchedulerInfo_Handler,
},
{
MethodName: "RepoLookup",
Handler: _RepoUpdaterService_RepoLookup_Handler,
},
{
MethodName: "EnqueueRepoUpdate",
Handler: _RepoUpdaterService_EnqueueRepoUpdate_Handler,

View File

@ -621,7 +621,6 @@ type ExternalService struct {
LastSyncAt time.Time
NextSyncAt time.Time
Unrestricted bool // Whether access to repositories belong to this external service is unrestricted.
CloudDefault bool // Whether this external service is our default public service on Cloud
HasWebhooks *bool // Whether this external service has webhooks configured; calculated from Config
TokenExpiresAt *time.Time // Whether the token in this external services expires, nil indicates never expires.
CodeHostID *int32

View File

@ -5,7 +5,6 @@
- filename: internal/insights/discovery/mocks_temp.go
path: github.com/sourcegraph/sourcegraph/internal/insights/discovery
interfaces:
- IndexableReposLister
- RepoStore
- filename: internal/insights/store/mocks_temp.go
path: github.com/sourcegraph/sourcegraph/internal/insights/store

View File

@ -172,7 +172,6 @@
- ExternalServiceStore
- ReposStore
- IndexEnqueuer
- RepoUpdaterClient
- UploadService
- path: github.com/sourcegraph/sourcegraph/internal/codeintel/uploads/shared
interfaces:
@ -198,7 +197,6 @@
- Store
- path: github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing
interfaces:
- RepoUpdaterClient
- InferenceService
- UploadService
- filename: internal/codeintel/autoindexing/internal/inference/mocks_test.go

View File

@ -192,21 +192,14 @@ func GitServer() *monitoring.Dashboard {
Name: "disk_space_remaining",
Description: "disk space remaining",
Query: "(src_gitserver_disk_space_available{instance=~`${shard:regex}`} / src_gitserver_disk_space_total{instance=~`${shard:regex}`}) * 100",
// Warning alert when we have disk space remaining that is
// approaching the default SRC_REPOS_DESIRED_PERCENT_FREE
Warning: monitoring.Alert().Less(15),
// Critical alert when we have less space remaining than the
// default SRC_REPOS_DESIRED_PERCENT_FREE some amount of time.
// This means that gitserver should be evicting repos, but it's
// either filling up faster than it can evict, or there is an
// issue with the janitor job.
Critical: monitoring.Alert().Less(10).For(10 * time.Minute),
Warning: monitoring.Alert().Less(15),
Critical: monitoring.Alert().Less(10).For(10 * time.Minute),
Panel: monitoring.Panel().LegendFormat("{{instance}}").
Unit(monitoring.Percentage).
With(monitoring.PanelOptions.LegendOnRight()),
Owner: monitoring.ObservableOwnerSource,
Interpretation: `
Indicates disk space remaining for each gitserver instance, which is used to determine when to start evicting least-used repository clones from disk (default 10%, configured by 'SRC_REPOS_DESIRED_PERCENT_FREE').
Indicates disk space remaining for each gitserver instance. When disk space is low, gitserver may experience slowdowns or fails to fetch repositories.
`,
NextSteps: `
- On a warning alert, you may want to provision more disk space: Disk pressure may result in decreased performance, users having to wait for repositories to clone, etc.
@ -492,17 +485,6 @@ func GitServer() *monitoring.Dashboard {
Interpretation: "the rate of failures over 5m (by job)",
},
},
{
{
Name: "repos_removed",
Description: "repositories removed due to disk pressure",
Query: "sum by (instance) (rate(src_gitserver_repos_removed_disk_pressure{instance=~`${shard:regex}`}[5m]))",
NoAlert: true,
Panel: monitoring.Panel().LegendFormat("{{instance}}").Unit(monitoring.Number),
Owner: monitoring.ObservableOwnerSource,
Interpretation: "Repositories removed due to disk pressure",
},
},
{
{
Name: "non_existent_repos_removed",

View File

@ -231,19 +231,6 @@
"description": "Whether the code host connection is in a pending state.",
"type": "boolean",
"default": false
},
"cloudGlobal": {
"title": "CloudGlobal",
"description": "When set to true, this external service will be chosen as our 'Global' GitHub service. Only valid on Sourcegraph.com. Only one service can have this flag set.",
"type": "boolean",
"default": false,
"deprecationMessage": "DEPRECATED: The cloud_default flag should be set in the database instead"
},
"cloudDefault": {
"title": "CloudDefault",
"description": "Only used to override the cloud_default column from a config file specified by EXTSVC_CONFIG_FILE",
"type": "boolean",
"default": false
}
}
}

View File

@ -227,19 +227,6 @@
}
}
}
},
"cloudGlobal": {
"title": "CloudGlobal",
"description": "When set to true, this external service will be chosen as our 'Global' GitLab service. Only valid on Sourcegraph.com. Only one service can have this flag set.",
"type": "boolean",
"default": false,
"deprecationMessage": "DEPRECATED: The cloud_default flag should be set in the database instead"
},
"cloudDefault": {
"title": "CloudDefault",
"description": "Only used to override the cloud_default column from a config file specified by EXTSVC_CONFIG_FILE",
"type": "boolean",
"default": false
}
},
"definitions": {

View File

@ -1303,10 +1303,6 @@ type GitHubConnection struct {
Authorization *GitHubAuthorization `json:"authorization,omitempty"`
// Certificate description: TLS certificate of the GitHub Enterprise instance. This is only necessary if the certificate is self-signed or signed by an internal CA. To get the certificate run `openssl s_client -connect HOST:443 -showcerts < /dev/null 2> /dev/null | openssl x509 -outform PEM`. To escape the value into a JSON string, you may want to use a tool like https://json-escape-text.now.sh.
Certificate string `json:"certificate,omitempty"`
// CloudDefault description: Only used to override the cloud_default column from a config file specified by EXTSVC_CONFIG_FILE
CloudDefault bool `json:"cloudDefault,omitempty"`
// CloudGlobal description: When set to true, this external service will be chosen as our 'Global' GitHub service. Only valid on Sourcegraph.com. Only one service can have this flag set.
CloudGlobal bool `json:"cloudGlobal,omitempty"`
// Exclude description: A list of repositories to never mirror from this GitHub instance. Takes precedence over "orgs", "repos", and "repositoryQuery" configuration.
//
// Supports excluding by name ({"name": "owner/name"}) or by ID ({"id": "MDEwOlJlcG9zaXRvcnkxMTczMDM0Mg=="}).
@ -1420,10 +1416,6 @@ type GitLabConnection struct {
Authorization *GitLabAuthorization `json:"authorization,omitempty"`
// Certificate description: TLS certificate of the GitLab instance. This is only necessary if the certificate is self-signed or signed by an internal CA. To get the certificate run `openssl s_client -connect HOST:443 -showcerts < /dev/null 2> /dev/null | openssl x509 -outform PEM`. To escape the value into a JSON string, you may want to use a tool like https://json-escape-text.now.sh.
Certificate string `json:"certificate,omitempty"`
// CloudDefault description: Only used to override the cloud_default column from a config file specified by EXTSVC_CONFIG_FILE
CloudDefault bool `json:"cloudDefault,omitempty"`
// CloudGlobal description: When set to true, this external service will be chosen as our 'Global' GitLab service. Only valid on Sourcegraph.com. Only one service can have this flag set.
CloudGlobal bool `json:"cloudGlobal,omitempty"`
// Exclude description: A list of projects to never mirror from this GitLab instance. Takes precedence over "projects" and "projectQuery" configuration. Supports excluding by name ({"name": "group/name"}) or by ID ({"id": 42}).
Exclude []*ExcludedGitLabProject `json:"exclude,omitempty"`
// GitURLType description: The type of Git URLs to use for cloning and fetching Git repositories on this GitLab instance.