Reapply "gitserver: Introduce FS layer to encapsulate repo name conversions (#60627)" (#61487) (#61523)

This reverts commit 0e8cbca569.

After the initial version didn't work for case-sensitive filesystems, this is the next attempt at shipping this.

## Test plan

Tests and verified that the previous issue is resolved.
This commit is contained in:
Erik Seliger 2024-04-02 19:20:18 +02:00 committed by GitHub
parent 218bc806ea
commit 17a9768c3a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
66 changed files with 2680 additions and 1171 deletions

View File

@ -35,6 +35,8 @@ All notable changes to Sourcegraph are documented in this file.
- Code Monitors now properly ignores monitors associated with soft-deleted users, which previously would have led to an error on the overview page. [#60405](https://github.com/sourcegraph/sourcegraph/pull/60405)
- Fixed a bug where clicking "Exclude Repo" on Azure DevOps or Gerrit repositories would not work. [#60509](https://github.com/sourcegraph/sourcegraph/pull/60509)
- Links in codeintel popovers respect the revision from the URL. [#60545](https://github.com/sourcegraph/sourcegraph/pull/60545)
- Fixed an issue where repositories with a name ending in `.git` failed to clone. [#60627](https://github.com/sourcegraph/sourcegraph/pull/60627)
- Fixed an issue where Sourcegraph could lose track of repositories on gitserver, leaving behind unnecessary data and inconsistent clone status in the UI. [#60627](https://github.com/sourcegraph/sourcegraph/pull/60627)
- The "Commits" button in repository and folder pages links to commits in the current revision instead of in the default branch. [#61408](https://github.com/sourcegraph/sourcegraph/pull/61408)
- The "Commits" button in repository and folder pages uses Perforce language and links to `/-/changelists` for Perforce depots when the experimental feature `perforceChangelistMapping` is enabled. [#61408](https://github.com/sourcegraph/sourcegraph/pull/61408)

View File

@ -7,7 +7,6 @@ go_library(
srcs = [
"cleanup.go",
"clone.go",
"disk.go",
"ensurerevision.go",
"gitservice.go",
"list_gitolite.go",
@ -60,7 +59,6 @@ go_library(
"//internal/hostname",
"//internal/lazyregexp",
"//internal/limiter",
"//internal/metrics",
"//internal/observation",
"//internal/perforce",
"//internal/ratelimit",
@ -77,7 +75,6 @@ go_library(
"@com_github_prometheus_client_golang//prometheus",
"@com_github_prometheus_client_golang//prometheus/promauto",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_mountinfo//:mountinfo",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//status",

View File

@ -29,7 +29,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
du "github.com/sourcegraph/sourcegraph/internal/diskusage"
"github.com/sourcegraph/sourcegraph/internal/diskusage"
"github.com/sourcegraph/sourcegraph/internal/dotcom"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/errcode"
@ -37,6 +37,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/hostname"
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/wrexec"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -44,13 +45,12 @@ import (
type JanitorConfig struct {
JanitorInterval time.Duration
ShardID string
ReposDir string
DesiredPercentFree int
DisableDeleteReposOnWrongShard bool
}
func NewJanitor(ctx context.Context, cfg JanitorConfig, db database.DB, rcf *wrexec.RecordingCommandFactory, cloneRepo cloneRepoFunc, logger log.Logger) goroutine.BackgroundRoutine {
func NewJanitor(ctx context.Context, cfg JanitorConfig, db database.DB, fs gitserverfs.FS, rcf *wrexec.RecordingCommandFactory, cloneRepo cloneRepoFunc, logger log.Logger) goroutine.BackgroundRoutine {
return goroutine.NewPeriodicGoroutine(
actor.WithInternalActor(ctx),
goroutine.HandlerFunc(func(ctx context.Context) error {
@ -65,22 +65,30 @@ func NewJanitor(ctx context.Context, cfg JanitorConfig, db database.DB, rcf *wre
// managed by an external service connection and they will be recloned
// ASAP.
if dotcom.SourcegraphDotComMode() {
diskSizer := &StatDiskSizer{}
logger := logger.Scoped("dotcom-repo-cleaner")
start := time.Now()
logger.Info("Starting dotcom repo cleaner")
toFree, err := howManyBytesToFree(logger, cfg.ReposDir, diskSizer, cfg.DesiredPercentFree)
if err != nil {
logger.Error("ensuring free disk space", log.Error(err))
} else if err := freeUpSpace(ctx, logger, db, cfg.ShardID, cfg.ReposDir, diskSizer, cfg.DesiredPercentFree, toFree); err != nil {
logger.Error("error freeing up space", log.Error(err))
}
logger.Info("dotcom repo cleaner finished", log.Int64("toFree", toFree), log.Bool("failed", err != nil), log.String("duration", time.Since(start).String()))
func() {
logger := logger.Scoped("dotcom-repo-cleaner")
start := time.Now()
logger.Info("Starting dotcom repo cleaner")
usage, err := fs.DiskUsage()
if err != nil {
logger.Error("getting free disk space", log.Error(err))
return
}
toFree := howManyBytesToFree(logger, usage, cfg.DesiredPercentFree)
if err := freeUpSpace(ctx, logger, db, fs, cfg.ShardID, usage, cfg.DesiredPercentFree, toFree); err != nil {
logger.Error("error freeing up space", log.Error(err))
}
logger.Info("dotcom repo cleaner finished", log.Int64("toFree", toFree), log.Bool("failed", err != nil), log.String("duration", time.Since(start).String()))
}()
}
gitserverAddrs := gitserver.NewGitserverAddresses(conf.Get())
// TODO: Should this return an error?
cleanupRepos(ctx, logger, db, rcf, cfg.ShardID, cfg.ReposDir, cloneRepo, gitserverAddrs, cfg.DisableDeleteReposOnWrongShard)
cleanupRepos(ctx, logger, db, fs, rcf, cfg.ShardID, cloneRepo, gitserverAddrs, cfg.DisableDeleteReposOnWrongShard)
return nil
}),
@ -239,9 +247,9 @@ func cleanupRepos(
ctx context.Context,
logger log.Logger,
db database.DB,
fs gitserverfs.FS,
rcf *wrexec.RecordingCommandFactory,
shardID string,
reposDir string,
cloneRepo cloneRepoFunc,
gitServerAddrs gitserver.GitserverAddresses,
disableDeleteReposOnWrongShard bool,
@ -284,11 +292,10 @@ func cleanupRepos(
}
}()
maybeDeleteWrongShardRepos := func(dir common.GitDir) (done bool, err error) {
maybeDeleteWrongShardRepos := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
// Record the number of repos that should not belong on this instance and
// remove up to SRC_WRONG_SHARD_DELETE_LIMIT in a single Janitor run.
name := gitserverfs.RepoNameFromDir(reposDir, dir)
addr := gitServerAddrs.AddrForRepo(ctx, name)
addr := gitServerAddrs.AddrForRepo(ctx, repoName)
if hostnameMatch(shardID, addr) {
return false, nil
@ -315,7 +322,7 @@ func cleanupRepos(
log.String("target-shard", addr),
log.String("current-shard", shardID),
)
if err := gitserverfs.RemoveRepoDirectory(ctx, logger, db, shardID, reposDir, dir, false); err != nil {
if err := fs.RemoveRepo(repoName); err != nil {
return true, err
}
@ -325,8 +332,7 @@ func cleanupRepos(
return true, nil
}
collectSize := func(dir common.GitDir) (done bool, err error) {
repoName := gitserverfs.RepoNameFromDir(reposDir, dir)
collectSize := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
backend := gitcli.NewBackend(logger, rcf, dir, repoName)
last, err := getLastSizeCalculation(ctx, backend.Config())
if err != nil {
@ -338,39 +344,45 @@ func cleanupRepos(
return false, nil
}
size := gitserverfs.DirSize(dir.Path("."))
size, err := fs.DirSize(dir.Path())
if err != nil {
return false, errors.Wrap(err, "calculating repo size")
}
repoToSize[repoName] = size
return false, setLastSizeCalculation(ctx, backend.Config())
}
maybeRemoveCorrupt := func(dir common.GitDir) (done bool, _ error) {
corrupt, reason, err := checkRepoDirCorrupt(rcf, reposDir, dir)
maybeRemoveCorrupt := func(repoName api.RepoName, dir common.GitDir) (done bool, _ error) {
corrupt, reason, err := checkRepoDirCorrupt(rcf, repoName, dir)
if !corrupt || err != nil {
return false, err
}
repoCorruptedCounter.Inc()
repoName := gitserverfs.RepoNameFromDir(reposDir, dir)
err = db.GitserverRepos().LogCorruption(ctx, repoName, fmt.Sprintf("sourcegraph detected corrupt repo: %s", reason), shardID)
if err != nil {
logger.Warn("failed to log repo corruption", log.String("repo", string(repoName)), log.Error(err))
}
logger.Info("removing corrupt repo", log.String("repo", string(dir)), log.String("reason", reason))
if err := gitserverfs.RemoveRepoDirectory(ctx, logger, db, shardID, reposDir, dir, true); err != nil {
if err := fs.RemoveRepo(repoName); err != nil {
return true, err
}
reposRemoved.WithLabelValues(reason).Inc()
// Set as not_cloned in the database.
if err := db.GitserverRepos().SetCloneStatus(ctx, repoName, types.CloneStatusNotCloned, shardID); err != nil {
return true, errors.Wrap(err, "failed to update clone status")
}
return true, nil
}
maybeRemoveNonExisting := func(dir common.GitDir) (bool, error) {
maybeRemoveNonExisting := func(repoName api.RepoName, dir common.GitDir) (bool, error) {
if !removeNonExistingRepos {
return false, nil
}
repoName := gitserverfs.RepoNameFromDir(reposDir, dir)
_, err := db.GitserverRepos().GetByName(ctx, repoName)
// Repo still exists, nothing to do.
if err == nil {
@ -384,26 +396,27 @@ func cleanupRepos(
}
// The repo does not exist in the DB (or is soft-deleted), continue deleting it.
err = gitserverfs.RemoveRepoDirectory(ctx, logger, db, shardID, reposDir, dir, false)
// TODO: For soft-deleted, it might be nice to attempt to update the clone status,
// but that can only work when we can map a repo on disk back to a repo in DB
// when the name has been modified to have the DELETED- prefix.
err = fs.RemoveRepo(repoName)
if err == nil {
nonExistingReposRemoved.Inc()
}
return true, err
}
ensureGitAttributes := func(dir common.GitDir) (done bool, err error) {
ensureGitAttributes := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
return false, git.SetGitAttributes(dir)
}
ensureAutoGC := func(dir common.GitDir) (done bool, err error) {
repoName := gitserverfs.RepoNameFromDir(reposDir, dir)
ensureAutoGC := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
backend := gitcli.NewBackend(logger, rcf, dir, repoName)
return false, gitSetAutoGC(ctx, backend.Config())
}
maybeReclone := func(dir common.GitDir) (done bool, err error) {
repoName := gitserverfs.RepoNameFromDir(reposDir, dir)
maybeReclone := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
backend := gitcli.NewBackend(logger, rcf, dir, repoName)
repoType, err := git.GetRepositoryType(ctx, backend.Config())
@ -481,7 +494,7 @@ func cleanupRepos(
return true, nil
}
removeStaleLocks := func(gitDir common.GitDir) (done bool, err error) {
removeStaleLocks := func(repoName api.RepoName, gitDir common.GitDir) (done bool, err error) {
// if removing a lock fails, we still want to try the other locks.
var multi error
@ -495,7 +508,7 @@ func cleanupRepos(
multi = errors.Append(multi, err)
}
// we use the same conservative age for locks inside of refs
if err := gitserverfs.BestEffortWalk(gitDir.Path("refs"), func(path string, fi fs.DirEntry) error {
if err := gitserverfs.BestEffortWalk(gitDir.Path("refs"), func(path string, fi os.DirEntry) error {
if fi.IsDir() {
return nil
}
@ -533,21 +546,21 @@ func cleanupRepos(
return false, multi
}
performGC := func(dir common.GitDir) (done bool, err error) {
return false, gitGC(rcf, reposDir, dir)
performGC := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
return false, gitGC(rcf, repoName, dir)
}
performSGMaintenance := func(dir common.GitDir) (done bool, err error) {
performSGMaintenance := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
return false, sgMaintenance(logger, dir)
}
performGitPrune := func(reposDir string, dir common.GitDir) (done bool, err error) {
return false, pruneIfNeeded(rcf, reposDir, dir, looseObjectsLimit)
performGitPrune := func(repoName api.RepoName, dir common.GitDir) (done bool, err error) {
return false, pruneIfNeeded(rcf, repoName, dir, looseObjectsLimit)
}
type cleanupFn struct {
Name string
Do func(common.GitDir) (bool, error)
Do func(api.RepoName, common.GitDir) (bool, error)
}
cleanups := []cleanupFn{
// First, check if we should even be having this repo on disk anymore,
@ -584,9 +597,7 @@ func cleanupRepos(
// reducing storage requirements for the repository. Note: "garbage collect" and
// "sg maintenance" must not be enabled at the same time.
cleanups = append(cleanups, cleanupFn{"sg maintenance", performSGMaintenance})
cleanups = append(cleanups, cleanupFn{"git prune", func(dir common.GitDir) (bool, error) {
return performGitPrune(reposDir, dir)
}})
cleanups = append(cleanups, cleanupFn{"git prune", performGitPrune})
}
if !conf.Get().DisableAutoGitUpdates {
@ -607,7 +618,7 @@ func cleanupRepos(
reposCleaned := 0
err := iterateGitDirs(reposDir, func(gitDir common.GitDir) (done bool) {
err := fs.ForEachRepo(func(repo api.RepoName, gitDir common.GitDir) (done bool) {
for _, cfn := range cleanups {
// Check if context has been canceled, if so skip the rest of the repos.
select {
@ -618,7 +629,7 @@ func cleanupRepos(
}
start := time.Now()
done, err := cfn.Do(gitDir)
done, err := cfn.Do(repo, gitDir)
if err != nil {
logger.Error("error running cleanup command",
log.String("name", cfn.Name),
@ -654,7 +665,7 @@ func cleanupRepos(
logger.Info("Janitor run finished", log.String("duration", time.Since(start).String()))
}
func checkRepoDirCorrupt(rcf *wrexec.RecordingCommandFactory, reposDir string, dir common.GitDir) (bool, string, error) {
func checkRepoDirCorrupt(rcf *wrexec.RecordingCommandFactory, repoName api.RepoName, dir common.GitDir) (bool, string, error) {
// We treat repositories missing HEAD to be corrupt. Both our cloning
// and fetching ensure there is a HEAD file.
if _, err := os.Stat(dir.Path("HEAD")); os.IsNotExist(err) {
@ -669,32 +680,20 @@ func checkRepoDirCorrupt(rcf *wrexec.RecordingCommandFactory, reposDir string, d
// repos as corrupt. Since we often fetch with ensureRevision, this
// leads to most commands failing against the repository. It is safer
// to remove now than try a safe reclone.
if gitIsNonBareBestEffort(rcf, reposDir, dir) {
if gitIsNonBareBestEffort(rcf, repoName, dir) {
return true, "non-bare", nil
}
return false, "", nil
}
// DiskSizer gets information about disk size and free space.
type DiskSizer interface {
BytesFreeOnDisk(mountPoint string) (uint64, error)
DiskSizeBytes(mountPoint string) (uint64, error)
}
// howManyBytesToFree returns the number of bytes that should be freed to make sure
// there is sufficient disk space free to satisfy s.DesiredPercentFree.
func howManyBytesToFree(logger log.Logger, reposDir string, diskSizer DiskSizer, desiredPercentFree int) (int64, error) {
actualFreeBytes, err := diskSizer.BytesFreeOnDisk(reposDir)
if err != nil {
return 0, errors.Wrap(err, "finding the amount of space free on disk")
}
func howManyBytesToFree(logger log.Logger, usage diskusage.DiskUsage, desiredPercentFree int) int64 {
actualFreeBytes := usage.Free()
// Free up space if necessary.
diskSizeBytes, err := diskSizer.DiskSizeBytes(reposDir)
if err != nil {
return 0, errors.Wrap(err, "getting disk size")
}
diskSizeBytes := usage.Size()
desiredFreeBytes := uint64(float64(desiredPercentFree) / 100.0 * float64(diskSizeBytes))
howManyBytesToFree := int64(desiredFreeBytes - actualFreeBytes)
if howManyBytesToFree < 0 {
@ -709,30 +708,12 @@ func howManyBytesToFree(logger log.Logger, reposDir string, diskSizer DiskSizer,
log.Float64("amount to free in GiB", float64(howManyBytesToFree)/G),
)
return howManyBytesToFree, nil
return howManyBytesToFree
}
type StatDiskSizer struct{}
func (s *StatDiskSizer) BytesFreeOnDisk(mountPoint string) (uint64, error) {
usage, err := du.New(mountPoint)
if err != nil {
return 0, err
}
return usage.Available(), nil
}
func (s *StatDiskSizer) DiskSizeBytes(mountPoint string) (uint64, error) {
usage, err := du.New(mountPoint)
if err != nil {
return 0, err
}
return usage.Size(), nil
}
// freeUpSpace removes git directories under ReposDir, in order from least
// freeUpSpace removes git directories under the fs, in order from least
// recently to most recently used, until it has freed howManyBytesToFree.
func freeUpSpace(ctx context.Context, logger log.Logger, db database.DB, shardID string, reposDir string, diskSizer DiskSizer, desiredPercentFree int, howManyBytesToFree int64) error {
func freeUpSpace(ctx context.Context, logger log.Logger, db database.DB, fs gitserverfs.FS, shardID string, usage diskusage.DiskUsage, desiredPercentFree int, howManyBytesToFree int64) error {
if howManyBytesToFree <= 0 {
return nil
}
@ -740,7 +721,7 @@ func freeUpSpace(ctx context.Context, logger log.Logger, db database.DB, shardID
logger = logger.Scoped("freeUpSpace")
// Get the git directories and their mod times.
gitDirs, err := findGitDirs(reposDir)
gitDirs, err := findGitDirs(fs)
if err != nil {
return errors.Wrap(err, "finding git dirs")
}
@ -764,10 +745,7 @@ func freeUpSpace(ctx context.Context, logger log.Logger, db database.DB, shardID
// Remove repos until howManyBytesToFree is met or exceeded.
var spaceFreed int64
diskSizeBytes, err := diskSizer.DiskSizeBytes(reposDir)
if err != nil {
return errors.Wrap(err, "getting disk size")
}
diskSizeBytes := usage.Size()
for _, d := range gitDirs {
if spaceFreed >= howManyBytesToFree {
return nil
@ -780,19 +758,30 @@ func freeUpSpace(ctx context.Context, logger log.Logger, db database.DB, shardID
default:
}
delta := gitserverfs.DirSize(d.Path("."))
if err := gitserverfs.RemoveRepoDirectory(ctx, logger, db, shardID, reposDir, d, true); err != nil {
repoName := fs.ResolveRepoName(d)
delta, err := fs.DirSize(d.Path())
if err != nil {
logger.Warn("failed to get dir size", log.String("dir", string(d)), log.Error(err))
continue
}
if err := fs.RemoveRepo(repoName); err != nil {
logger.Warn("failed to remove least recently used repo", log.String("dir", string(d)), log.Error(err))
continue
}
// Set as not_cloned in the database.
if err := db.GitserverRepos().SetCloneStatus(ctx, repoName, types.CloneStatusNotCloned, shardID); err != nil {
logger.Warn("failed to update clone status", log.Error(err))
}
spaceFreed += delta
reposRemovedDiskPressure.Inc()
// Report the new disk usage situation after removing this repo.
actualFreeBytes, err := diskSizer.BytesFreeOnDisk(reposDir)
usage, err := fs.DiskUsage()
if err != nil {
return errors.Wrap(err, "finding the amount of space free on disk")
}
actualFreeBytes := usage.Free()
G := float64(1024 * 1024 * 1024)
logger.Warn("removed least recently used repo",
@ -820,37 +809,10 @@ func gitDirModTime(d common.GitDir) (time.Time, error) {
return head.ModTime(), nil
}
// iterateGitDirs walks over the reposDir on disk and calls walkFn for each of the
// git directories found on disk.
func iterateGitDirs(reposDir string, walkFn func(common.GitDir) (done bool)) error {
return gitserverfs.BestEffortWalk(reposDir, func(dir string, fi fs.DirEntry) error {
if gitserverfs.IgnorePath(reposDir, dir) {
if fi.IsDir() {
return filepath.SkipDir
}
return nil
}
// Look for $GIT_DIR
if !fi.IsDir() || fi.Name() != ".git" {
return nil
}
// We are sure this is a GIT_DIR after the above check
gitDir := common.GitDir(dir)
if done := walkFn(gitDir); done {
return filepath.SkipAll
}
return filepath.SkipDir
})
}
// findGitDirs collects the GitDirs of all repos under reposDir.
func findGitDirs(reposDir string) ([]common.GitDir, error) {
// findGitDirs collects the GitDirs of all repos in the FS.
func findGitDirs(fs gitserverfs.FS) ([]common.GitDir, error) {
var dirs []common.GitDir
return dirs, iterateGitDirs(reposDir, func(dir common.GitDir) bool {
return dirs, fs.ForEachRepo(func(_ api.RepoName, dir common.GitDir) (done bool) {
dirs = append(dirs, dir)
return false
})
@ -907,10 +869,10 @@ func getRecloneTime(ctx context.Context, c git.GitConfigBackend, dir common.GitD
// Note: it is not always possible to check if a repository is bare since a
// lock file may prevent the check from succeeding. We only want bare
// repositories and want to avoid transient false positives.
func gitIsNonBareBestEffort(rcf *wrexec.RecordingCommandFactory, reposDir string, dir common.GitDir) bool {
func gitIsNonBareBestEffort(rcf *wrexec.RecordingCommandFactory, repoName api.RepoName, dir common.GitDir) bool {
cmd := exec.Command("git", "-C", dir.Path(), "rev-parse", "--is-bare-repository")
dir.Set(cmd)
wrappedCmd := rcf.WrapWithRepoName(context.Background(), log.NoOp(), gitserverfs.RepoNameFromDir(reposDir, dir), cmd)
wrappedCmd := rcf.WrapWithRepoName(context.Background(), log.NoOp(), repoName, cmd)
b, _ := wrappedCmd.Output()
b = bytes.TrimSpace(b)
return bytes.Equal(b, []byte("false"))
@ -919,10 +881,10 @@ func gitIsNonBareBestEffort(rcf *wrexec.RecordingCommandFactory, reposDir string
// gitGC will invoke `git-gc` to clean up any garbage in the repo. It will
// operate synchronously and be aggressive with its internal heuristics when
// deciding to act (meaning it will act now at lower thresholds).
func gitGC(rcf *wrexec.RecordingCommandFactory, reposDir string, dir common.GitDir) error {
func gitGC(rcf *wrexec.RecordingCommandFactory, repoName api.RepoName, dir common.GitDir) error {
cmd := exec.Command("git", "-c", "gc.auto=1", "-c", "gc.autoDetach=false", "gc", "--auto")
dir.Set(cmd)
wrappedCmd := rcf.WrapWithRepoName(context.Background(), log.NoOp(), gitserverfs.RepoNameFromDir(reposDir, dir), cmd)
wrappedCmd := rcf.WrapWithRepoName(context.Background(), log.NoOp(), repoName, cmd)
err := wrappedCmd.Run()
if err != nil {
return errors.Wrapf(executil.WrapCmdError(cmd, err), "failed to git-gc")
@ -1070,7 +1032,7 @@ func lockRepoForGC(dir common.GitDir) (error, func() error) {
// We run git-prune only if there are enough loose objects. This approach is
// adapted from https://gitlab.com/gitlab-org/gitaly.
func pruneIfNeeded(rcf *wrexec.RecordingCommandFactory, reposDir string, dir common.GitDir, limit int) (err error) {
func pruneIfNeeded(rcf *wrexec.RecordingCommandFactory, repo api.RepoName, dir common.GitDir, limit int) (err error) {
needed, err := tooManyLooseObjects(dir, limit)
defer func() {
pruneStatus.WithLabelValues(strconv.FormatBool(err == nil), strconv.FormatBool(!needed)).Inc()
@ -1089,7 +1051,7 @@ func pruneIfNeeded(rcf *wrexec.RecordingCommandFactory, reposDir string, dir com
// continuously trigger repacks until the loose objects expire.
cmd := exec.Command("git", "prune", "--expire", "now")
dir.Set(cmd)
wrappedCmd := rcf.WrapWithRepoName(context.Background(), log.NoOp(), gitserverfs.RepoNameFromDir(reposDir, dir), cmd)
wrappedCmd := rcf.WrapWithRepoName(context.Background(), log.NoOp(), repo, cmd)
err = wrappedCmd.Run()
if err != nil {
return errors.Wrapf(executil.WrapCmdError(cmd, err), "failed to git-prune")

View File

@ -28,6 +28,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/wrexec"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -71,13 +72,15 @@ UPDATE gitserver_repos SET repo_size_bytes = 5 where repo_id = 3;
t.Fatalf("unexpected error while inserting test data: %s", err)
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
actor.WithInternalActor(context.Background()),
logger,
db,
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"test-gitserver",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
// Don't actually attempt clones.
return "", nil
@ -88,7 +91,8 @@ UPDATE gitserver_repos SET repo_size_bytes = 5 where repo_id = 3;
// This may be different in practice, but the way we setup the tests
// we only have .git dirs to measure so this is correct.
wantGitDirBytes := gitserverfs.DirSize(root)
wantGitDirBytes, err := fs.DirSize(root)
require.NoError(t, err)
for i := 1; i <= 3; i++ {
repo, err := db.GitserverRepos().GetByID(context.Background(), api.RepoID(i))
@ -132,13 +136,16 @@ func TestCleanupInactive(t *testing.T) {
t.Fatal(err)
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
newMockedGitserverDB(),
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"test-gitserver",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
return "", nil
},
@ -171,13 +178,16 @@ func TestCleanupWrongShard(t *testing.T) {
t.Fatal(err)
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
newMockedGitserverDB(),
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"does-not-exist",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
return "", nil
},
@ -208,13 +218,16 @@ func TestCleanupWrongShard(t *testing.T) {
t.Fatal(err)
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
newMockedGitserverDB(),
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"gitserver-0",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
return "", nil
},
@ -245,13 +258,16 @@ func TestCleanupWrongShard(t *testing.T) {
t.Fatal(err)
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
newMockedGitserverDB(),
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"gitserver-0",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
t.Fatal("clone called")
return "", nil
@ -318,13 +334,16 @@ func TestGitGCAuto(t *testing.T) {
t.Fatalf("expected git to report objects but none found")
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
newMockedGitserverDB(),
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"test-gitserver",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
return "", nil
},
@ -445,13 +464,16 @@ func TestCleanupExpired(t *testing.T) {
return "done", nil
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
newMockedGitserverDB(),
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"test-gitserver",
root,
cloneRepo,
gitserver.GitserverAddresses{Addresses: []string{"test-gitserver"}},
false,
@ -509,13 +531,16 @@ func TestCleanup_RemoveNonExistentRepos(t *testing.T) {
root := t.TempDir()
repoExists, repoNotExists := initRepos(root)
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
mockDB,
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"test-gitserver",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
return "", nil
},
@ -538,13 +563,16 @@ func TestCleanup_RemoveNonExistentRepos(t *testing.T) {
root := t.TempDir()
repoExists, repoNotExists := initRepos(root)
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
mockDB,
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"test-gitserver",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
return "", nil
},
@ -690,13 +718,16 @@ func TestCleanupOldLocks(t *testing.T) {
}
}
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
cleanupRepos(
context.Background(),
logtest.Scoped(t),
newMockedGitserverDB(),
fs,
wrexec.NewNoOpRecordingCommandFactory(),
"test-gitserver",
root,
func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
return "", nil
},
@ -706,7 +737,7 @@ func TestCleanupOldLocks(t *testing.T) {
isRemoved := func(path string) bool {
_, err := os.Stat(path)
return errors.Is(err, fs.ErrNotExist)
return errors.Is(err, os.ErrNotExist)
}
for _, c := range cases {
@ -753,18 +784,14 @@ func TestHowManyBytesToFree(t *testing.T) {
for _, tc := range tcs {
t.Run(tc.name, func(t *testing.T) {
b, err := howManyBytesToFree(
b := howManyBytesToFree(
logger,
"/testroot",
&fakeDiskSizer{
&fakeDiskUsage{
diskSize: tc.diskSize,
bytesFree: tc.bytesFree,
},
10,
)
if err != nil {
t.Fatal(err)
}
if b != tc.want {
t.Errorf("s.howManyBytesToFree(...) is %v, want 0", b)
}
@ -772,17 +799,25 @@ func TestHowManyBytesToFree(t *testing.T) {
}
}
type fakeDiskSizer struct {
type fakeDiskUsage struct {
bytesFree uint64
diskSize uint64
}
func (f *fakeDiskSizer) BytesFreeOnDisk(_ string) (uint64, error) {
return f.bytesFree, nil
func (f *fakeDiskUsage) Free() uint64 {
return f.bytesFree
}
func (f *fakeDiskSizer) DiskSizeBytes(_ string) (uint64, error) {
return f.diskSize, nil
func (f *fakeDiskUsage) Size() uint64 {
return f.diskSize
}
func (f *fakeDiskUsage) PercentUsed() float32 {
return 1
}
func (f *fakeDiskUsage) Available() uint64 {
return 1
}
// assertPaths checks that all paths under want exist. It excludes non-empty directories
@ -849,22 +884,22 @@ func isEmptyDir(path string) (bool, error) {
func TestFreeUpSpace(t *testing.T) {
logger := logtest.Scoped(t)
root := t.TempDir()
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
t.Run("no error if no space requested and no repos", func(t *testing.T) {
if err := freeUpSpace(context.Background(), logger, newMockedGitserverDB(), "test-gitserver", t.TempDir(), &fakeDiskSizer{}, 10, 0); err != nil {
if err := freeUpSpace(context.Background(), logger, newMockedGitserverDB(), fs, "test-gitserver", &fakeDiskUsage{}, 10, 0); err != nil {
t.Fatal(err)
}
})
t.Run("error if space requested and no repos", func(t *testing.T) {
if err := freeUpSpace(context.Background(), logger, newMockedGitserverDB(), "test-gitserver", t.TempDir(), &fakeDiskSizer{}, 10, 1); err == nil {
if err := freeUpSpace(context.Background(), logger, newMockedGitserverDB(), fs, "test-gitserver", &fakeDiskUsage{}, 10, 1); err == nil {
t.Fatal("want error")
}
})
t.Run("oldest repo gets removed to free up space", func(t *testing.T) {
// Set up.
rd := t.TempDir()
r1 := filepath.Join(rd, "repo1")
r2 := filepath.Join(rd, "repo2")
r1 := filepath.Join(root, "repo1")
r2 := filepath.Join(root, "repo2")
if err := makeFakeRepo(r1, 1000); err != nil {
t.Fatal(err)
}
@ -885,16 +920,18 @@ func TestFreeUpSpace(t *testing.T) {
gr := dbmocks.NewMockGitserverRepoStore()
db.GitserverReposFunc.SetDefaultReturn(gr)
// Run.
if err := freeUpSpace(context.Background(), logger, db, "test-gitserver", rd, &fakeDiskSizer{}, 10, 1000); err != nil {
if err := freeUpSpace(context.Background(), logger, db, fs, "test-gitserver", &fakeDiskUsage{}, 10, 1000); err != nil {
t.Fatal(err)
}
// Check.
assertPaths(t, rd,
assertPaths(t, root,
".tmp",
".p4home",
"repo2/.git/HEAD",
"repo2/.git/space_eater")
rds := gitserverfs.DirSize(rd)
rds, err := fs.DirSize(root)
require.NoError(t, err)
wantSize := int64(1000)
if rds > wantSize {
t.Errorf("repo dir size is %d, want no more than %d", rds, wantSize)
@ -1206,7 +1243,7 @@ func TestPruneIfNeeded(t *testing.T) {
}
limit := -1 // always run prune
if err := pruneIfNeeded(wrexec.NewNoOpRecordingCommandFactory(), reposDir, gitDir, limit); err != nil {
if err := pruneIfNeeded(wrexec.NewNoOpRecordingCommandFactory(), "reponame", gitDir, limit); err != nil {
t.Fatal(err)
}
}

View File

@ -5,37 +5,43 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
type CloneStatus struct {
CloneInProgress bool
CloneProgress string
}
// MaybeStartClone checks if a given repository is cloned on disk. If not, it starts
// cloning the repository in the background and returns a NotFound error, if no current
// clone operation is running for that repo yet. If it is already cloning, a NotFound
// error with CloneInProgress: true is returned.
// cloning the repository in the background and returns a CloneStatus.
// Note: If disableAutoGitUpdates is set in the site config, no operation is taken and
// a NotFound error is returned.
func (s *Server) MaybeStartClone(ctx context.Context, repo api.RepoName) (notFound *protocol.NotFoundPayload, cloned bool) {
dir := gitserverfs.RepoDirFromName(s.reposDir, repo)
if repoCloned(dir) {
return nil, true
func (s *Server) MaybeStartClone(ctx context.Context, repo api.RepoName) (cloned bool, status CloneStatus, _ error) {
cloned, err := s.fs.RepoCloned(repo)
if err != nil {
return false, CloneStatus{}, errors.Wrap(err, "determine clone status")
}
if cloned {
return true, CloneStatus{}, nil
}
if conf.Get().DisableAutoGitUpdates {
s.logger.Debug("not cloning on demand as DisableAutoGitUpdates is set")
return &protocol.NotFoundPayload{}, false
return false, CloneStatus{}, nil
}
cloneProgress, err := s.CloneRepo(ctx, repo, CloneOptions{})
if err != nil {
s.logger.Debug("error starting repo clone", log.String("repo", string(repo)), log.Error(err))
return &protocol.NotFoundPayload{CloneInProgress: false}, false
s.logger.Warn("error starting repo clone", log.String("repo", string(repo)), log.Error(err))
return false, CloneStatus{}, nil
}
return &protocol.NotFoundPayload{
return false, CloneStatus{
CloneInProgress: true,
CloneProgress: cloneProgress,
}, false
}, nil
}

View File

@ -17,7 +17,10 @@ go_library(
go_test(
name = "common_test",
srcs = ["queue_test.go"],
srcs = [
"common_test.go",
"queue_test.go",
],
embed = [":common"],
deps = [
"//internal/observation",

View File

@ -14,6 +14,9 @@ type GitDir string
// Path is a helper which returns filepath.Join(dir, elem...)
func (dir GitDir) Path(elem ...string) string {
if len(elem) == 0 {
return string(dir)
}
return filepath.Join(append([]string{string(dir)}, elem...)...)
}

View File

@ -0,0 +1,43 @@
package common
import (
"os/exec"
"testing"
)
func TestGitDirPath(t *testing.T) {
dir := GitDir("/repos/myrepo/.git")
path := dir.Path("objects", "pack")
if path != "/repos/myrepo/.git/objects/pack" {
t.Errorf("Expected /repos/myrepo/.git/objects/pack, got %s", path)
}
path = dir.Path()
if path != "/repos/myrepo/.git" {
t.Errorf("Expected /repos/myrepo/.git, got %s", path)
}
}
func TestGitDirSet(t *testing.T) {
dir := GitDir("/repos/myrepo/.git")
cmd := exec.Command("git", "log")
dir.Set(cmd)
if cmd.Dir != "/repos/myrepo/.git" {
t.Errorf("Expected dir to be set to /repos/myrepo/.git, got %s", cmd.Dir)
}
foundGitDirEnv := false
for _, env := range cmd.Env {
if env == "GIT_DIR=/repos/myrepo/.git" {
foundGitDirEnv = true
break
}
}
if !foundGitDirEnv {
t.Error("Expected GIT_DIR env to be set")
}
}

View File

@ -1,23 +0,0 @@
package internal
import (
"github.com/sourcegraph/sourcegraph/internal/diskusage"
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
)
// getDiskInfo returns disk usage info for the gitserver.
//
// It calculates the total and free disk space for the gitserver's repo
// directory using du.DiskUsage. The results are returned as a
// protocol.DiskInfoResponse struct.
func getDiskInfo(dir string) (*proto.DiskInfoResponse, error) {
usage, err := diskusage.New(dir)
if err != nil {
return nil, err
}
return &proto.DiskInfoResponse{
TotalSpace: usage.Size(),
FreeSpace: usage.Free(),
PercentUsed: usage.PercentUsed(),
}, nil
}

View File

@ -1,3 +1,4 @@
load("//dev:go_mockgen.bzl", "go_mockgen")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
load("//dev:go_defs.bzl", "go_test")
@ -6,18 +7,22 @@ go_library(
srcs = [
"fs.go",
"initfs.go",
"mock.go",
],
importpath = "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs",
visibility = ["//cmd/gitserver:__subpackages__"],
deps = [
"//cmd/gitserver/internal/common",
"//internal/api",
"//internal/database",
"//internal/diskusage",
"//internal/fileutil",
"//internal/gitserver/protocol",
"//internal/types",
"//internal/metrics",
"//internal/observation",
"//lib/errors",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_mountinfo//:mountinfo",
],
)
@ -28,16 +33,20 @@ go_test(
"initfs_test.go",
],
embed = [":gitserverfs"],
tags = ["requires-network"],
deps = [
"//cmd/gitserver/internal/common",
"//internal/api",
"//internal/database",
"//internal/database/dbmocks",
"//internal/database/dbtest",
"//internal/types",
"@com_github_sourcegraph_log//logtest",
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
],
)
go_mockgen(
name = "generate_mocks",
out = "mock.go",
manifests = [
"//:mockgen.yaml",
"//:mockgen.test.yaml",
"//:mockgen.temp.yaml",
],
deps = [":gitserverfs"],
)

View File

@ -1,45 +1,252 @@
package gitserverfs
import (
"context"
"io/fs"
"os"
"path/filepath"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/sourcegraph/log"
"github.com/sourcegraph/mountinfo"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/diskusage"
du "github.com/sourcegraph/sourcegraph/internal/diskusage"
"github.com/sourcegraph/sourcegraph/internal/fileutil"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/metrics"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// TempDirName is the name used for the temporary directory under ReposDir.
const TempDirName = ".tmp"
// P4HomeName is the name used for the directory that git p4 will use as $HOME
// and where it will store cache data.
const P4HomeName = ".p4home"
func MakeP4HomeDir(reposDir string) (string, error) {
p4Home := filepath.Join(reposDir, P4HomeName)
// Ensure the directory exists
if err := os.MkdirAll(p4Home, os.ModePerm); err != nil {
return "", errors.Wrapf(err, "ensuring p4Home exists: %q", p4Home)
}
return p4Home, nil
type FS interface {
// Initialize creates all the necessary directory structures used by gitserverfs.
Initialize() error
DirSize(string) (int64, error)
RepoDir(api.RepoName) common.GitDir
ResolveRepoName(common.GitDir) api.RepoName
TempDir(prefix string) (string, error)
IgnorePath(string) bool
P4HomeDir() (string, error)
VisitRepos(func(api.RepoName, common.GitDir) (done bool, _ error)) error
RepoCloned(api.RepoName) (bool, error)
RemoveRepo(api.RepoName) error
ForEachRepo(func(api.RepoName, common.GitDir) (done bool)) error
DiskUsage() (diskusage.DiskUsage, error)
}
func RepoDirFromName(reposDir string, name api.RepoName) common.GitDir {
func New(observationCtx *observation.Context, reposDir string) FS {
return &realGitserverFS{
logger: observationCtx.Logger.Scoped("gitserverfs"),
observationCtx: observationCtx,
reposDir: reposDir,
clonedState: make(map[api.RepoName]struct{}),
lastCloneStateReset: time.Now(),
}
}
type realGitserverFS struct {
reposDir string
observationCtx *observation.Context
logger log.Logger
// clonedState keeps track of the clone state of a repo.
clonedState map[api.RepoName]struct{}
cloneStateMu sync.Mutex
lastCloneStateReset time.Time
}
func (r *realGitserverFS) Initialize() error {
err := initGitserverFileSystem(r.logger, r.reposDir)
if err != nil {
return err
}
r.registerMetrics()
return nil
}
func (r *realGitserverFS) DirSize(dir string) (int64, error) {
if !filepath.IsAbs(dir) {
return 0, errors.New("dir must be absolute")
}
return dirSize(dir)
}
func (r *realGitserverFS) RepoDir(name api.RepoName) common.GitDir {
// We need to use api.UndeletedRepoName(repo) for the name, as this is a name
// transformation done on the database side that gitserver cannot know about.
dir := repoDirFromName(r.reposDir, api.UndeletedRepoName(name))
// dir is expected to be cleaned, ie. it doesn't allow `..`.
if !strings.HasPrefix(dir.Path(), r.reposDir) {
panic("dir is outside of repos dir")
}
return dir
}
func (r *realGitserverFS) ResolveRepoName(dir common.GitDir) api.RepoName {
return repoNameFromDir(r.reposDir, dir)
}
func (r *realGitserverFS) TempDir(prefix string) (string, error) {
return tempDir(r.reposDir, prefix)
}
func (r *realGitserverFS) IgnorePath(path string) bool {
return ignorePath(r.reposDir, path)
}
func (r *realGitserverFS) P4HomeDir() (string, error) {
return makeP4HomeDir(r.reposDir)
}
func (r *realGitserverFS) VisitRepos(visit func(api.RepoName, common.GitDir) (done bool, _ error)) error {
return nil
}
const cloneStateResetInterval = 5 * time.Minute
func (r *realGitserverFS) RepoCloned(name api.RepoName) (bool, error) {
r.cloneStateMu.Lock()
defer r.cloneStateMu.Unlock()
// Every few minutes, we invalidate the entire cache, in case we fall into
// some bad state, this'll fix the state every now and then.
if time.Since(r.lastCloneStateReset) > cloneStateResetInterval {
r.clonedState = make(map[api.RepoName]struct{})
}
_, cloned := r.clonedState[name]
if cloned {
return true, nil
}
cloned, err := repoCloned(r.RepoDir(name))
if err != nil {
return false, err
}
if cloned {
r.clonedState[name] = struct{}{}
}
return cloned, nil
}
func (r *realGitserverFS) RemoveRepo(name api.RepoName) error {
err := removeRepoDirectory(r.logger, r.reposDir, r.RepoDir(name))
// Mark as not cloned anymore in cache. We even remove it from the cache
// when the deletion failed partially, in the next call we'll recheck if
// the repo is still there.
r.cloneStateMu.Lock()
delete(r.clonedState, name)
r.cloneStateMu.Unlock()
return err
}
// iterateGitDirs walks over the reposDir on disk and calls walkFn for each of the
// git directories found on disk.
func (r *realGitserverFS) ForEachRepo(visit func(api.RepoName, common.GitDir) bool) error {
return BestEffortWalk(r.reposDir, func(dir string, fi fs.DirEntry) error {
if ignorePath(r.reposDir, dir) {
if fi.IsDir() {
return filepath.SkipDir
}
return nil
}
// Look for $GIT_DIR
if !fi.IsDir() || fi.Name() != ".git" {
return nil
}
// We are sure this is a GIT_DIR after the above check
gitDir := common.GitDir(dir)
if done := visit(r.ResolveRepoName(gitDir), gitDir); done {
return filepath.SkipAll
}
return filepath.SkipDir
})
}
func (r *realGitserverFS) DiskUsage() (diskusage.DiskUsage, error) {
return du.New(r.reposDir)
}
var realGitserverFSMetricsRegisterer sync.Once
func (r *realGitserverFS) registerMetrics() {
realGitserverFSMetricsRegisterer.Do(func() {
// report the size of the repos dir
opts := mountinfo.CollectorOpts{Namespace: "gitserver"}
m := mountinfo.NewCollector(r.logger, opts, map[string]string{"reposDir": r.reposDir})
r.observationCtx.Registerer.MustRegister(m)
metrics.MustRegisterDiskMonitor(r.reposDir)
// TODO: Start removal of these.
// TODO(keegan) these are older names for the above disk metric. Keeping
// them to prevent breaking dashboards. Can remove once no
// alert/dashboards use them.
c := prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "src_gitserver_disk_space_available",
Help: "Amount of free space disk space on the repos mount.",
}, func() float64 {
usage, err := du.New(r.reposDir)
if err != nil {
r.logger.Error("error getting disk usage info", log.Error(err))
return 0
}
return float64(usage.Available())
})
prometheus.MustRegister(c)
c = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "src_gitserver_disk_space_total",
Help: "Amount of total disk space in the repos directory.",
}, func() float64 {
usage, err := du.New(r.reposDir)
if err != nil {
r.logger.Error("error getting disk usage info", log.Error(err))
return 0
}
return float64(usage.Size())
})
prometheus.MustRegister(c)
})
}
// repoCloned checks if dir or `${dir}/.git` is a valid GIT_DIR.
func repoCloned(dir common.GitDir) (bool, error) {
_, err := os.Stat(dir.Path("HEAD"))
if err == nil {
return true, nil
}
if os.IsNotExist(err) {
return false, nil
}
return false, err
}
// tempDirName is the name used for the temporary directory under ReposDir.
const tempDirName = ".tmp"
// p4HomeName is the name used for the directory that git p4 will use as $HOME
// and where it will store cache data.
const p4HomeName = ".p4home"
func repoDirFromName(reposDir string, name api.RepoName) common.GitDir {
p := string(protocol.NormalizeRepo(name))
return common.GitDir(filepath.Join(reposDir, filepath.FromSlash(p), ".git"))
}
func RepoNameFromDir(reposDir string, dir common.GitDir) api.RepoName {
func repoNameFromDir(reposDir string, dir common.GitDir) api.RepoName {
// dir == ${s.ReposDir}/${name}/.git
parent := filepath.Dir(string(dir)) // remove suffix "/.git"
name := strings.TrimPrefix(parent, reposDir) // remove prefix "${s.ReposDir}"
@ -48,39 +255,39 @@ func RepoNameFromDir(reposDir string, dir common.GitDir) api.RepoName {
return protocol.NormalizeRepo(api.RepoName(name))
}
// TempDir is a wrapper around os.MkdirTemp, but using the given reposDir
// tempDir is a wrapper around os.MkdirTemp, but using the given reposDir
// temporary directory filepath.Join(s.ReposDir, tempDirName).
//
// This directory is cleaned up by gitserver and will be ignored by repository
// listing operations.
func TempDir(reposDir, prefix string) (name string, err error) {
func tempDir(reposDir, prefix string) (name string, err error) {
// TODO: At runtime, this directory always exists. We only need to ensure
// the directory exists here because tests use this function without creating
// the directory first. Ideally, we can remove this later.
tmp := filepath.Join(reposDir, TempDirName)
tmp := filepath.Join(reposDir, tempDirName)
if err := os.MkdirAll(tmp, os.ModePerm); err != nil {
return "", err
}
return os.MkdirTemp(tmp, prefix)
}
func IgnorePath(reposDir string, path string) bool {
func ignorePath(reposDir string, path string) bool {
// We ignore any path which starts with .tmp or .p4home in ReposDir
if filepath.Dir(path) != reposDir {
return false
}
base := filepath.Base(path)
return strings.HasPrefix(base, TempDirName) || strings.HasPrefix(base, P4HomeName)
return strings.HasPrefix(base, tempDirName) || strings.HasPrefix(base, p4HomeName)
}
// RemoveRepoDirectory atomically removes a directory from reposDir.
// removeRepoDirectory atomically removes a directory from reposDir.
//
// It first moves the directory to a temporary location to avoid leaving
// partial state in the event of server restart or concurrent modifications to
// the directory.
//
// Additionally, it removes parent empty directories up until reposDir.
func RemoveRepoDirectory(ctx context.Context, logger log.Logger, db database.DB, shardID string, reposDir string, gitDir common.GitDir, updateCloneStatus bool) error {
func removeRepoDirectory(logger log.Logger, reposDir string, gitDir common.GitDir) error {
dir := string(gitDir)
if _, err := os.Stat(dir); os.IsNotExist(err) {
@ -90,7 +297,7 @@ func RemoveRepoDirectory(ctx context.Context, logger log.Logger, db database.DB,
}
// Rename out of the location, so we can atomically stop using the repo.
tmp, err := TempDir(reposDir, "delete-repo")
tmp, err := tempDir(reposDir, "delete-repo")
if err != nil {
return err
}
@ -107,13 +314,6 @@ func RemoveRepoDirectory(ctx context.Context, logger log.Logger, db database.DB,
// Everything after this point is just cleanup, so any error that occurs
// should not be returned, just logged.
if updateCloneStatus {
// Set as not_cloned in the database.
if err := db.GitserverRepos().SetCloneStatus(ctx, RepoNameFromDir(reposDir, gitDir), types.CloneStatusNotCloned, shardID); err != nil {
logger.Warn("failed to update clone status", log.Error(err))
}
}
// Cleanup empty parent directories. We just attempt to remove and if we
// have a failure we assume it's due to the directory having other
// children. If we checked first we could race with someone else adding a
@ -173,12 +373,10 @@ func BestEffortWalk(root string, walkFn func(path string, entry fs.DirEntry) err
})
}
// DirSize returns the total size in bytes of all the files under d.
func DirSize(d string) int64 {
// dirSize returns the total size in bytes of all the files under d.
func dirSize(d string) (int64, error) {
var size int64
// We don't return an error, so we know that err is always nil and can be
// ignored.
_ = BestEffortWalk(d, func(path string, d fs.DirEntry) error {
return size, BestEffortWalk(d, func(path string, d fs.DirEntry) error {
if d.IsDir() {
return nil
}
@ -190,5 +388,4 @@ func DirSize(d string) int64 {
size += fi.Size()
return nil
})
return size
}

View File

@ -1,20 +1,13 @@
package gitserverfs
import (
"context"
"path/filepath"
"testing"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/types"
)
func TestIgnorePath(t *testing.T) {
@ -24,14 +17,14 @@ func TestIgnorePath(t *testing.T) {
path string
shouldIgnore bool
}{
{path: filepath.Join(reposDir, TempDirName), shouldIgnore: true},
{path: filepath.Join(reposDir, P4HomeName), shouldIgnore: true},
{path: filepath.Join(reposDir, tempDirName), shouldIgnore: true},
{path: filepath.Join(reposDir, p4HomeName), shouldIgnore: true},
// Double check handling of trailing space
{path: filepath.Join(reposDir, P4HomeName+" "), shouldIgnore: true},
{path: filepath.Join(reposDir, p4HomeName+" "), shouldIgnore: true},
{path: filepath.Join(reposDir, "sourcegraph/sourcegraph"), shouldIgnore: false},
} {
t.Run("", func(t *testing.T) {
assert.Equal(t, tc.shouldIgnore, IgnorePath(reposDir, tc.path))
assert.Equal(t, tc.shouldIgnore, ignorePath(reposDir, tc.path))
})
}
}
@ -47,44 +40,13 @@ func TestRemoveRepoDirectory(t *testing.T) {
"example.com/repo/.git/HEAD",
)
// Set them up in the DB
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
db := database.NewDB(logger, dbtest.NewDB(t))
idMapping := make(map[api.RepoName]api.RepoID)
// Set them all as cloned in the DB
for _, r := range []string{
"github.com/foo/baz",
"github.com/foo/survivor",
"github.com/bam/bam",
"example.com/repo",
} {
repo := &types.Repo{
Name: api.RepoName(r),
}
if err := db.Repos().Create(ctx, repo); err != nil {
t.Fatal(err)
}
if err := db.GitserverRepos().Update(ctx, &types.GitserverRepo{
RepoID: repo.ID,
ShardID: "test",
CloneStatus: types.CloneStatusCloned,
}); err != nil {
t.Fatal(err)
}
idMapping[repo.Name] = repo.ID
}
// Remove everything but github.com/foo/survivor
for _, d := range []string{
"github.com/foo/baz/.git",
"github.com/bam/bam/.git",
"example.com/repo/.git",
} {
if err := RemoveRepoDirectory(ctx, logger, db, "test-gitserver", root, common.GitDir(filepath.Join(root, d)), true); err != nil {
if err := removeRepoDirectory(logger, root, common.GitDir(filepath.Join(root, d))); err != nil {
t.Fatalf("failed to remove %s: %s", d, err)
}
}
@ -95,7 +57,7 @@ func TestRemoveRepoDirectory(t *testing.T) {
"github.com/bam/bam/.git",
"example.com/repo/.git",
} {
if err := RemoveRepoDirectory(ctx, logger, db, "test-gitserver", root, common.GitDir(filepath.Join(root, d)), true); err != nil {
if err := removeRepoDirectory(logger, root, common.GitDir(filepath.Join(root, d))); err != nil {
t.Fatalf("failed to remove %s: %s", d, err)
}
}
@ -104,28 +66,6 @@ func TestRemoveRepoDirectory(t *testing.T) {
"github.com/foo/survivor/.git/HEAD",
".tmp",
)
for _, tc := range []struct {
name api.RepoName
status types.CloneStatus
}{
{"github.com/foo/baz", types.CloneStatusNotCloned},
{"github.com/bam/bam", types.CloneStatusNotCloned},
{"example.com/repo", types.CloneStatusNotCloned},
{"github.com/foo/survivor", types.CloneStatusCloned},
} {
id, ok := idMapping[tc.name]
if !ok {
t.Fatal("id mapping not found")
}
r, err := db.GitserverRepos().GetByID(ctx, id)
if err != nil {
t.Fatal(err)
}
if r.CloneStatus != tc.status {
t.Errorf("Want %q, got %q for %q", tc.status, r.CloneStatus, tc.name)
}
}
}
func TestRemoveRepoDirectory_Empty(t *testing.T) {
@ -134,68 +74,13 @@ func TestRemoveRepoDirectory_Empty(t *testing.T) {
mkFiles(t, root,
"github.com/foo/baz/.git/HEAD",
)
db := dbmocks.NewMockDB()
gr := dbmocks.NewMockGitserverRepoStore()
db.GitserverReposFunc.SetDefaultReturn(gr)
logger := logtest.Scoped(t)
if err := RemoveRepoDirectory(context.Background(), logger, db, "test-gitserver", root, common.GitDir(filepath.Join(root, "github.com/foo/baz/.git")), true); err != nil {
if err := removeRepoDirectory(logger, root, common.GitDir(filepath.Join(root, "github.com/foo/baz/.git"))); err != nil {
t.Fatal(err)
}
assertPaths(t, root,
".tmp",
)
if len(gr.SetCloneStatusFunc.History()) == 0 {
t.Fatal("expected gitserverRepos.SetLastError to be called, but wasn't")
}
require.Equal(t, gr.SetCloneStatusFunc.History()[0].Arg2, types.CloneStatusNotCloned)
}
func TestRemoveRepoDirectory_UpdateCloneStatus(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
repo := &types.Repo{
Name: api.RepoName("github.com/foo/baz/"),
}
if err := db.Repos().Create(ctx, repo); err != nil {
t.Fatal(err)
}
if err := db.GitserverRepos().Update(ctx, &types.GitserverRepo{
RepoID: repo.ID,
ShardID: "test",
CloneStatus: types.CloneStatusCloned,
}); err != nil {
t.Fatal(err)
}
root := t.TempDir()
mkFiles(t, root, "github.com/foo/baz/.git/HEAD")
if err := RemoveRepoDirectory(ctx, logger, db, "test-gitserver", root, common.GitDir(filepath.Join(root, "github.com/foo/baz/.git")), false); err != nil {
t.Fatal(err)
}
assertPaths(t, root, ".tmp")
r, err := db.Repos().GetByName(ctx, repo.Name)
if err != nil {
t.Fatal(err)
}
gsRepo, err := db.GitserverRepos().GetByID(ctx, r.ID)
if err != nil {
t.Fatal(err)
}
if gsRepo.CloneStatus != types.CloneStatusCloned {
t.Fatalf("Expected clone_status to be %s, but got %s", types.CloneStatusCloned, gsRepo.CloneStatus)
}
}

View File

@ -10,14 +10,14 @@ import (
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func InitGitserverFileSystem(logger log.Logger, reposDir string) error {
func initGitserverFileSystem(logger log.Logger, reposDir string) error {
// Ensure the ReposDir exists.
if err := os.MkdirAll(reposDir, os.ModePerm); err != nil {
return errors.Wrap(err, "creating SRC_REPOS_DIR")
}
// Ensure the Perforce Dir exists.
p4Home := filepath.Join(reposDir, P4HomeName)
if err := os.MkdirAll(p4Home, os.ModePerm); err != nil {
p4Home, err := makeP4HomeDir(reposDir)
if err != nil {
return errors.Wrapf(err, "ensuring p4Home exists: %q", p4Home)
}
// Ensure the tmp dir exists, is cleaned up, and TMP_DIR is set properly.
@ -40,6 +40,15 @@ func InitGitserverFileSystem(logger log.Logger, reposDir string) error {
return nil
}
func makeP4HomeDir(reposDir string) (string, error) {
p4Home := filepath.Join(reposDir, p4HomeName)
// Ensure the directory exists
if err := os.MkdirAll(p4Home, os.ModePerm); err != nil {
return "", errors.Wrapf(err, "ensuring p4Home exists: %q", p4Home)
}
return p4Home, nil
}
// setupAndClearTmp sets up the tempdir for reposDir as well as clearing it
// out. It returns the temporary directory location.
func setupAndClearTmp(logger log.Logger, reposDir string) (string, error) {
@ -48,8 +57,8 @@ func setupAndClearTmp(logger log.Logger, reposDir string) (string, error) {
// Additionally, we create directories with the prefix .tmp-old which are
// asynchronously removed. We do not remove in place since it may be a
// slow operation to block on. Our tmp dir will be ${s.ReposDir}/.tmp
dir := filepath.Join(reposDir, TempDirName) // .tmp
oldPrefix := TempDirName + "-old"
dir := filepath.Join(reposDir, tempDirName) // .tmp
oldPrefix := tempDirName + "-old"
if _, err := os.Stat(dir); err == nil {
// Rename the current tmp file, so we can asynchronously remove it. Use
// a consistent pattern so if we get interrupted, we can clean it
@ -60,7 +69,7 @@ func setupAndClearTmp(logger log.Logger, reposDir string) (string, error) {
}
// oldTmp dir exists, so we need to use a child of oldTmp as the
// rename target.
if err := os.Rename(dir, filepath.Join(oldTmp, TempDirName)); err != nil {
if err := os.Rename(dir, filepath.Join(oldTmp, tempDirName)); err != nil {
return "", err
}
}

File diff suppressed because it is too large Load Diff

View File

@ -15,7 +15,6 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/accesslog"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/lib/gitservice"
@ -76,7 +75,7 @@ func (s *Server) gitServiceHandler() *gitservice.Handler {
return &gitservice.Handler{
Dir: func(d string) string {
return string(gitserverfs.RepoDirFromName(s.reposDir, api.RepoName(d)))
return string(s.fs.RepoDir(api.RepoName(d)))
},
ErrorHook: func(err error, stderr string) {

View File

@ -15,6 +15,7 @@ go_library(
"//cmd/gitserver/internal/common",
"//cmd/gitserver/internal/git",
"//cmd/gitserver/internal/git/gitcli",
"//cmd/gitserver/internal/gitserverfs",
"//cmd/gitserver/internal/vcssyncer",
"//internal/api",
"//internal/database/dbmocks",
@ -23,6 +24,7 @@ go_library(
"//internal/gitserver/v1:gitserver",
"//internal/grpc",
"//internal/grpc/defaults",
"//internal/observation",
"//internal/ratelimit",
"//internal/types",
"//internal/vcs",
@ -30,6 +32,7 @@ go_library(
"//lib/errors",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_log//logtest",
"@com_github_stretchr_testify//require",
"@org_golang_x_time//rate",
],
)

View File

@ -3,13 +3,14 @@ package inttests
import (
"container/list"
"context"
"github.com/sourcegraph/sourcegraph/lib/errors"
"net/http/httptest"
"net/url"
"os"
"path/filepath"
"testing"
"github.com/sourcegraph/sourcegraph/lib/errors"
mockassert "github.com/derision-test/go-mockgen/v2/testutil/assert"
mockrequire "github.com/derision-test/go-mockgen/v2/testutil/require"
"github.com/sourcegraph/log/logtest"
@ -52,14 +53,16 @@ func TestClone(t *testing.T) {
lock := NewMockRepositoryLock()
locker.TryAcquireFunc.SetDefaultReturn(lock, true)
fs := gitserverfs.New(&observation.TestContext, reposDir)
require.NoError(t, fs.Initialize())
getRemoteURLFunc := func(_ context.Context, name api.RepoName) (string, error) { //nolint:unparam
require.Equal(t, repo, name)
return remote, nil
}
s := server.NewServer(&server.ServerOpts{
Logger: logger,
ReposDir: reposDir,
Logger: logger,
FS: fs,
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
return gitcli.NewBackend(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory(), dir, repoName)
},
@ -146,7 +149,7 @@ func TestClone(t *testing.T) {
mockassert.CalledWith(t, gsStore.SetLastErrorFunc, mockassert.Values(mockassert.Skip, repo, "", "test-shard"))
// Check that the repo is in the expected location on disk.
_, err = os.Stat(gitserverfs.RepoDirFromName(reposDir, repo).Path())
_, err = os.Stat(fs.RepoDir(repo).Path())
require.NoError(t, err)
}
@ -166,14 +169,16 @@ func TestClone_Fail(t *testing.T) {
lock := NewMockRepositoryLock()
locker.TryAcquireFunc.SetDefaultReturn(lock, true)
fs := gitserverfs.New(&observation.TestContext, reposDir)
require.NoError(t, fs.Initialize())
getRemoteURLFunc := func(_ context.Context, name api.RepoName) (string, error) { //nolint:unparam
require.Equal(t, repo, name)
return remote, nil
}
s := server.NewServer(&server.ServerOpts{
Logger: logger,
ReposDir: reposDir,
Logger: logger,
FS: fs,
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
return gitcli.NewBackend(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory(), dir, repoName)
},
@ -294,7 +299,7 @@ func TestClone_Fail(t *testing.T) {
require.Contains(t, gsStore.SetLastErrorFunc.History()[0].Arg2, "failed to fetch: exit status 128")
// Check that no repo is in the expected location on disk.
_, err = os.Stat(gitserverfs.RepoDirFromName(reposDir, repo).Path())
_, err = os.Stat(fs.RepoDir(repo).Path())
require.Error(t, err)
require.True(t, os.IsNotExist(err))
}

View File

@ -10,7 +10,7 @@ import (
"sync"
internal "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal"
common "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
api "github.com/sourcegraph/sourcegraph/internal/api"
)
// MockRepositoryLock is a mock implementation of the RepositoryLock
@ -290,17 +290,17 @@ type MockRepositoryLocker struct {
func NewMockRepositoryLocker() *MockRepositoryLocker {
return &MockRepositoryLocker{
AllStatusesFunc: &RepositoryLockerAllStatusesFunc{
defaultHook: func() (r0 map[common.GitDir]string) {
defaultHook: func() (r0 map[api.RepoName]string) {
return
},
},
StatusFunc: &RepositoryLockerStatusFunc{
defaultHook: func(common.GitDir) (r0 string, r1 bool) {
defaultHook: func(api.RepoName) (r0 string, r1 bool) {
return
},
},
TryAcquireFunc: &RepositoryLockerTryAcquireFunc{
defaultHook: func(common.GitDir, string) (r0 internal.RepositoryLock, r1 bool) {
defaultHook: func(api.RepoName, string) (r0 internal.RepositoryLock, r1 bool) {
return
},
},
@ -312,17 +312,17 @@ func NewMockRepositoryLocker() *MockRepositoryLocker {
func NewStrictMockRepositoryLocker() *MockRepositoryLocker {
return &MockRepositoryLocker{
AllStatusesFunc: &RepositoryLockerAllStatusesFunc{
defaultHook: func() map[common.GitDir]string {
defaultHook: func() map[api.RepoName]string {
panic("unexpected invocation of MockRepositoryLocker.AllStatuses")
},
},
StatusFunc: &RepositoryLockerStatusFunc{
defaultHook: func(common.GitDir) (string, bool) {
defaultHook: func(api.RepoName) (string, bool) {
panic("unexpected invocation of MockRepositoryLocker.Status")
},
},
TryAcquireFunc: &RepositoryLockerTryAcquireFunc{
defaultHook: func(common.GitDir, string) (internal.RepositoryLock, bool) {
defaultHook: func(api.RepoName, string) (internal.RepositoryLock, bool) {
panic("unexpected invocation of MockRepositoryLocker.TryAcquire")
},
},
@ -350,15 +350,15 @@ func NewMockRepositoryLockerFrom(i internal.RepositoryLocker) *MockRepositoryLoc
// AllStatuses method of the parent MockRepositoryLocker instance is
// invoked.
type RepositoryLockerAllStatusesFunc struct {
defaultHook func() map[common.GitDir]string
hooks []func() map[common.GitDir]string
defaultHook func() map[api.RepoName]string
hooks []func() map[api.RepoName]string
history []RepositoryLockerAllStatusesFuncCall
mutex sync.Mutex
}
// AllStatuses delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockRepositoryLocker) AllStatuses() map[common.GitDir]string {
func (m *MockRepositoryLocker) AllStatuses() map[api.RepoName]string {
r0 := m.AllStatusesFunc.nextHook()()
m.AllStatusesFunc.appendCall(RepositoryLockerAllStatusesFuncCall{r0})
return r0
@ -367,7 +367,7 @@ func (m *MockRepositoryLocker) AllStatuses() map[common.GitDir]string {
// SetDefaultHook sets function that is called when the AllStatuses method
// of the parent MockRepositoryLocker instance is invoked and the hook queue
// is empty.
func (f *RepositoryLockerAllStatusesFunc) SetDefaultHook(hook func() map[common.GitDir]string) {
func (f *RepositoryLockerAllStatusesFunc) SetDefaultHook(hook func() map[api.RepoName]string) {
f.defaultHook = hook
}
@ -375,7 +375,7 @@ func (f *RepositoryLockerAllStatusesFunc) SetDefaultHook(hook func() map[common.
// AllStatuses method of the parent MockRepositoryLocker instance invokes
// the hook at the front of the queue and discards it. After the queue is
// empty, the default hook function is invoked for any future action.
func (f *RepositoryLockerAllStatusesFunc) PushHook(hook func() map[common.GitDir]string) {
func (f *RepositoryLockerAllStatusesFunc) PushHook(hook func() map[api.RepoName]string) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -383,20 +383,20 @@ func (f *RepositoryLockerAllStatusesFunc) PushHook(hook func() map[common.GitDir
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *RepositoryLockerAllStatusesFunc) SetDefaultReturn(r0 map[common.GitDir]string) {
f.SetDefaultHook(func() map[common.GitDir]string {
func (f *RepositoryLockerAllStatusesFunc) SetDefaultReturn(r0 map[api.RepoName]string) {
f.SetDefaultHook(func() map[api.RepoName]string {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *RepositoryLockerAllStatusesFunc) PushReturn(r0 map[common.GitDir]string) {
f.PushHook(func() map[common.GitDir]string {
func (f *RepositoryLockerAllStatusesFunc) PushReturn(r0 map[api.RepoName]string) {
f.PushHook(func() map[api.RepoName]string {
return r0
})
}
func (f *RepositoryLockerAllStatusesFunc) nextHook() func() map[common.GitDir]string {
func (f *RepositoryLockerAllStatusesFunc) nextHook() func() map[api.RepoName]string {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -431,7 +431,7 @@ func (f *RepositoryLockerAllStatusesFunc) History() []RepositoryLockerAllStatuse
type RepositoryLockerAllStatusesFuncCall struct {
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 map[common.GitDir]string
Result0 map[api.RepoName]string
}
// Args returns an interface slice containing the arguments of this
@ -449,15 +449,15 @@ func (c RepositoryLockerAllStatusesFuncCall) Results() []interface{} {
// RepositoryLockerStatusFunc describes the behavior when the Status method
// of the parent MockRepositoryLocker instance is invoked.
type RepositoryLockerStatusFunc struct {
defaultHook func(common.GitDir) (string, bool)
hooks []func(common.GitDir) (string, bool)
defaultHook func(api.RepoName) (string, bool)
hooks []func(api.RepoName) (string, bool)
history []RepositoryLockerStatusFuncCall
mutex sync.Mutex
}
// Status delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockRepositoryLocker) Status(v0 common.GitDir) (string, bool) {
func (m *MockRepositoryLocker) Status(v0 api.RepoName) (string, bool) {
r0, r1 := m.StatusFunc.nextHook()(v0)
m.StatusFunc.appendCall(RepositoryLockerStatusFuncCall{v0, r0, r1})
return r0, r1
@ -466,7 +466,7 @@ func (m *MockRepositoryLocker) Status(v0 common.GitDir) (string, bool) {
// SetDefaultHook sets function that is called when the Status method of the
// parent MockRepositoryLocker instance is invoked and the hook queue is
// empty.
func (f *RepositoryLockerStatusFunc) SetDefaultHook(hook func(common.GitDir) (string, bool)) {
func (f *RepositoryLockerStatusFunc) SetDefaultHook(hook func(api.RepoName) (string, bool)) {
f.defaultHook = hook
}
@ -474,7 +474,7 @@ func (f *RepositoryLockerStatusFunc) SetDefaultHook(hook func(common.GitDir) (st
// Status method of the parent MockRepositoryLocker instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *RepositoryLockerStatusFunc) PushHook(hook func(common.GitDir) (string, bool)) {
func (f *RepositoryLockerStatusFunc) PushHook(hook func(api.RepoName) (string, bool)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -483,19 +483,19 @@ func (f *RepositoryLockerStatusFunc) PushHook(hook func(common.GitDir) (string,
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *RepositoryLockerStatusFunc) SetDefaultReturn(r0 string, r1 bool) {
f.SetDefaultHook(func(common.GitDir) (string, bool) {
f.SetDefaultHook(func(api.RepoName) (string, bool) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *RepositoryLockerStatusFunc) PushReturn(r0 string, r1 bool) {
f.PushHook(func(common.GitDir) (string, bool) {
f.PushHook(func(api.RepoName) (string, bool) {
return r0, r1
})
}
func (f *RepositoryLockerStatusFunc) nextHook() func(common.GitDir) (string, bool) {
func (f *RepositoryLockerStatusFunc) nextHook() func(api.RepoName) (string, bool) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -530,7 +530,7 @@ func (f *RepositoryLockerStatusFunc) History() []RepositoryLockerStatusFuncCall
type RepositoryLockerStatusFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 common.GitDir
Arg0 api.RepoName
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 string
@ -554,15 +554,15 @@ func (c RepositoryLockerStatusFuncCall) Results() []interface{} {
// RepositoryLockerTryAcquireFunc describes the behavior when the TryAcquire
// method of the parent MockRepositoryLocker instance is invoked.
type RepositoryLockerTryAcquireFunc struct {
defaultHook func(common.GitDir, string) (internal.RepositoryLock, bool)
hooks []func(common.GitDir, string) (internal.RepositoryLock, bool)
defaultHook func(api.RepoName, string) (internal.RepositoryLock, bool)
hooks []func(api.RepoName, string) (internal.RepositoryLock, bool)
history []RepositoryLockerTryAcquireFuncCall
mutex sync.Mutex
}
// TryAcquire delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockRepositoryLocker) TryAcquire(v0 common.GitDir, v1 string) (internal.RepositoryLock, bool) {
func (m *MockRepositoryLocker) TryAcquire(v0 api.RepoName, v1 string) (internal.RepositoryLock, bool) {
r0, r1 := m.TryAcquireFunc.nextHook()(v0, v1)
m.TryAcquireFunc.appendCall(RepositoryLockerTryAcquireFuncCall{v0, v1, r0, r1})
return r0, r1
@ -571,7 +571,7 @@ func (m *MockRepositoryLocker) TryAcquire(v0 common.GitDir, v1 string) (internal
// SetDefaultHook sets function that is called when the TryAcquire method of
// the parent MockRepositoryLocker instance is invoked and the hook queue is
// empty.
func (f *RepositoryLockerTryAcquireFunc) SetDefaultHook(hook func(common.GitDir, string) (internal.RepositoryLock, bool)) {
func (f *RepositoryLockerTryAcquireFunc) SetDefaultHook(hook func(api.RepoName, string) (internal.RepositoryLock, bool)) {
f.defaultHook = hook
}
@ -579,7 +579,7 @@ func (f *RepositoryLockerTryAcquireFunc) SetDefaultHook(hook func(common.GitDir,
// TryAcquire method of the parent MockRepositoryLocker instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *RepositoryLockerTryAcquireFunc) PushHook(hook func(common.GitDir, string) (internal.RepositoryLock, bool)) {
func (f *RepositoryLockerTryAcquireFunc) PushHook(hook func(api.RepoName, string) (internal.RepositoryLock, bool)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -588,19 +588,19 @@ func (f *RepositoryLockerTryAcquireFunc) PushHook(hook func(common.GitDir, strin
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *RepositoryLockerTryAcquireFunc) SetDefaultReturn(r0 internal.RepositoryLock, r1 bool) {
f.SetDefaultHook(func(common.GitDir, string) (internal.RepositoryLock, bool) {
f.SetDefaultHook(func(api.RepoName, string) (internal.RepositoryLock, bool) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *RepositoryLockerTryAcquireFunc) PushReturn(r0 internal.RepositoryLock, r1 bool) {
f.PushHook(func(common.GitDir, string) (internal.RepositoryLock, bool) {
f.PushHook(func(api.RepoName, string) (internal.RepositoryLock, bool) {
return r0, r1
})
}
func (f *RepositoryLockerTryAcquireFunc) nextHook() func(common.GitDir, string) (internal.RepositoryLock, bool) {
func (f *RepositoryLockerTryAcquireFunc) nextHook() func(api.RepoName, string) (internal.RepositoryLock, bool) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -635,7 +635,7 @@ func (f *RepositoryLockerTryAcquireFunc) History() []RepositoryLockerTryAcquireF
type RepositoryLockerTryAcquireFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 common.GitDir
Arg0 api.RepoName
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string

View File

@ -3,13 +3,14 @@ package inttests
import (
"container/list"
"context"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/lib/errors"
"net/http/httptest"
"net/url"
"path/filepath"
"testing"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/require"
"golang.org/x/time/rate"
@ -18,6 +19,7 @@ import (
common "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git/gitcli"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/perforce"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
"github.com/sourcegraph/sourcegraph/internal/api"
@ -70,13 +72,15 @@ func TestClient_ResolveRevision(t *testing.T) {
db := newMockDB()
ctx := context.Background()
fs := gitserverfs.New(&observation.TestContext, filepath.Join(root, "repos"))
require.NoError(t, fs.Initialize())
getRemoteURLFunc := func(_ context.Context, name api.RepoName) (string, error) { //nolint:unparam
return remote, nil
}
s := server.NewServer(&server.ServerOpts{
Logger: logger,
ReposDir: filepath.Join(root, "repos"),
Logger: logger,
FS: fs,
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
return gitcli.NewBackend(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory(), dir, repoName)
},

View File

@ -2,8 +2,6 @@ package inttests
import (
"context"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/lib/errors"
"net"
"net/http"
"os"
@ -13,15 +11,20 @@ import (
"strings"
"testing"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/lib/errors"
"golang.org/x/time/rate"
sglog "github.com/sourcegraph/log"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/require"
server "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal"
common "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git/gitcli"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
@ -30,6 +33,7 @@ import (
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/wrexec"
@ -76,13 +80,15 @@ func InitGitserver() {
})
db.ReposFunc.SetDefaultReturn(r)
fs := gitserverfs.New(&observation.TestContext, filepath.Join(root, "repos"))
require.NoError(&t, fs.Initialize())
getRemoteURLFunc := func(_ context.Context, name api.RepoName) (string, error) { //nolint:unparam // context is unused but required by the interface, error is not used in this test
return filepath.Join(root, "remotes", string(name)), nil
}
s := server.NewServer(&server.ServerOpts{
Logger: sglog.Scoped("server"),
ReposDir: filepath.Join(root, "repos"),
Logger: sglog.Scoped("server"),
FS: fs,
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
return gitcli.NewBackend(logtest.Scoped(&t), wrexec.NewNoOpRecordingCommandFactory(), dir, repoName)
},

View File

@ -3,11 +3,11 @@ package internal
import (
"sync"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/internal/api"
)
// RepositoryLock is returned by RepositoryLocker.TryAcquire. It allows
// updating the status of a directory lock, as well as releasing the lock.
// updating the status of a repo lock, as well as releasing the lock.
type RepositoryLock interface {
// SetStatus updates the status for the lock. If the lock has been released,
// this is a noop.
@ -16,51 +16,46 @@ type RepositoryLock interface {
Release()
}
// RepositoryLocker provides locks for doing operations to a repository
// directory. When a repository is locked, only the owner of the lock is
// allowed to run commands against it.
//
// Repositories are identified by the absolute path to their $GIT_DIR.
//
// The directory's $GIT_DIR does not have to exist when locked. The owner of
// the lock may remove the directory's $GIT_DIR while holding the lock.
// RepositoryLocker provides locks for doing operations to a repository.
// When a repository is locked, only the owner of the lock is allowed to perform
// writing operations against it.
//
// The main use of RepositoryLocker is to prevent concurrent clones. However,
// it is also used during maintenance tasks such as recloning/migrating/etc.
type RepositoryLocker interface {
// TryAcquire acquires the lock for dir. If it is already held, ok is false
// TryAcquire acquires the lock for repo. If it is already held, ok is false
// and lock is nil. Otherwise a non-nil lock is returned and true. When
// finished with the lock you must call lock.Release.
TryAcquire(dir common.GitDir, initialStatus string) (lock RepositoryLock, ok bool)
// Status returns the status of the locked directory dir. If dir is not
// locked, then locked is false.
Status(dir common.GitDir) (status string, locked bool)
// AllStatuses returns the status of all locked directories.
AllStatuses() map[common.GitDir]string
TryAcquire(repo api.RepoName, initialStatus string) (lock RepositoryLock, ok bool)
// Status returns the status of the locked repo. If repo is not locked, then
// locked is false.
Status(repo api.RepoName) (status string, locked bool)
// AllStatuses returns the status of all locked repositories.
AllStatuses() map[api.RepoName]string
}
func NewRepositoryLocker() RepositoryLocker {
return &repositoryLocker{
status: make(map[common.GitDir]string),
status: make(map[api.RepoName]string),
}
}
type repositoryLocker struct {
// mu protects status
mu sync.RWMutex
// status tracks directories that are locked. The value is the status. If
// a directory is in status, the directory is locked.
status map[common.GitDir]string
// status tracks repos that are locked. The value is the status. If
// a repo is in status, the repo is locked.
status map[api.RepoName]string
}
func (rl *repositoryLocker) TryAcquire(dir common.GitDir, initialStatus string) (lock RepositoryLock, ok bool) {
func (rl *repositoryLocker) TryAcquire(repo api.RepoName, initialStatus string) (lock RepositoryLock, ok bool) {
rl.mu.Lock()
_, failed := rl.status[dir]
_, failed := rl.status[repo]
if !failed {
if rl.status == nil {
rl.status = make(map[common.GitDir]string)
rl.status = make(map[api.RepoName]string)
}
rl.status[dir] = initialStatus
rl.status[repo] = initialStatus
}
rl.mu.Unlock()
@ -71,32 +66,32 @@ func (rl *repositoryLocker) TryAcquire(dir common.GitDir, initialStatus string)
return &repositoryLock{
unlock: func() {
rl.mu.Lock()
delete(rl.status, dir)
delete(rl.status, repo)
rl.mu.Unlock()
},
setStatus: func(status string) {
rl.mu.Lock()
rl.status[dir] = status
rl.status[repo] = status
rl.mu.Unlock()
},
dir: dir,
repo: repo,
}, true
}
func (rl *repositoryLocker) Status(dir common.GitDir) (status string, locked bool) {
func (rl *repositoryLocker) Status(repo api.RepoName) (status string, locked bool) {
rl.mu.RLock()
defer rl.mu.RUnlock()
status, locked = rl.status[dir]
status, locked = rl.status[repo]
return
}
func (rl *repositoryLocker) AllStatuses() map[common.GitDir]string {
func (rl *repositoryLocker) AllStatuses() map[api.RepoName]string {
rl.mu.RLock()
defer rl.mu.RUnlock()
statuses := make(map[common.GitDir]string, len(rl.status))
for dir, status := range rl.status {
statuses[dir] = status
statuses := make(map[api.RepoName]string, len(rl.status))
for repo, status := range rl.status {
statuses[repo] = status
}
return statuses
@ -105,7 +100,7 @@ func (rl *repositoryLocker) AllStatuses() map[common.GitDir]string {
type repositoryLock struct {
unlock func()
setStatus func(status string)
dir common.GitDir
repo api.RepoName
mu sync.Mutex
done bool

View File

@ -76,7 +76,7 @@ func NewMockService() *MockService {
},
},
MaybeStartCloneFunc: &ServiceMaybeStartCloneFunc{
defaultHook: func(context.Context, api.RepoName) (r0 *protocol.NotFoundPayload, r1 bool) {
defaultHook: func(context.Context, api.RepoName) (r0 bool, r1 CloneStatus, r2 error) {
return
},
},
@ -123,7 +123,7 @@ func NewStrictMockService() *MockService {
},
},
MaybeStartCloneFunc: &ServiceMaybeStartCloneFunc{
defaultHook: func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool) {
defaultHook: func(context.Context, api.RepoName) (bool, CloneStatus, error) {
panic("unexpected invocation of MockService.MaybeStartClone")
},
},
@ -149,7 +149,7 @@ type surrogateMockService interface {
EnsureRevision(context.Context, api.RepoName, string) bool
IsRepoCloneable(context.Context, api.RepoName) (protocol.IsRepoCloneableResponse, error)
LogIfCorrupt(context.Context, api.RepoName, error)
MaybeStartClone(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool)
MaybeStartClone(context.Context, api.RepoName) (bool, CloneStatus, error)
RepoUpdate(context.Context, *protocol.RepoUpdateRequest) protocol.RepoUpdateResponse
SearchWithObservability(context.Context, trace.Trace, *protocol.SearchRequest, func(*protocol.CommitMatch) error) (bool, error)
}
@ -728,24 +728,24 @@ func (c ServiceLogIfCorruptFuncCall) Results() []interface{} {
// ServiceMaybeStartCloneFunc describes the behavior when the
// MaybeStartClone method of the parent MockService instance is invoked.
type ServiceMaybeStartCloneFunc struct {
defaultHook func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool)
hooks []func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool)
defaultHook func(context.Context, api.RepoName) (bool, CloneStatus, error)
hooks []func(context.Context, api.RepoName) (bool, CloneStatus, error)
history []ServiceMaybeStartCloneFuncCall
mutex sync.Mutex
}
// MaybeStartClone delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockService) MaybeStartClone(v0 context.Context, v1 api.RepoName) (*protocol.NotFoundPayload, bool) {
r0, r1 := m.MaybeStartCloneFunc.nextHook()(v0, v1)
m.MaybeStartCloneFunc.appendCall(ServiceMaybeStartCloneFuncCall{v0, v1, r0, r1})
return r0, r1
func (m *MockService) MaybeStartClone(v0 context.Context, v1 api.RepoName) (bool, CloneStatus, error) {
r0, r1, r2 := m.MaybeStartCloneFunc.nextHook()(v0, v1)
m.MaybeStartCloneFunc.appendCall(ServiceMaybeStartCloneFuncCall{v0, v1, r0, r1, r2})
return r0, r1, r2
}
// SetDefaultHook sets function that is called when the MaybeStartClone
// method of the parent MockService instance is invoked and the hook queue
// is empty.
func (f *ServiceMaybeStartCloneFunc) SetDefaultHook(hook func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool)) {
func (f *ServiceMaybeStartCloneFunc) SetDefaultHook(hook func(context.Context, api.RepoName) (bool, CloneStatus, error)) {
f.defaultHook = hook
}
@ -753,7 +753,7 @@ func (f *ServiceMaybeStartCloneFunc) SetDefaultHook(hook func(context.Context, a
// MaybeStartClone method of the parent MockService instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *ServiceMaybeStartCloneFunc) PushHook(hook func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool)) {
func (f *ServiceMaybeStartCloneFunc) PushHook(hook func(context.Context, api.RepoName) (bool, CloneStatus, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -761,20 +761,20 @@ func (f *ServiceMaybeStartCloneFunc) PushHook(hook func(context.Context, api.Rep
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *ServiceMaybeStartCloneFunc) SetDefaultReturn(r0 *protocol.NotFoundPayload, r1 bool) {
f.SetDefaultHook(func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool) {
return r0, r1
func (f *ServiceMaybeStartCloneFunc) SetDefaultReturn(r0 bool, r1 CloneStatus, r2 error) {
f.SetDefaultHook(func(context.Context, api.RepoName) (bool, CloneStatus, error) {
return r0, r1, r2
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *ServiceMaybeStartCloneFunc) PushReturn(r0 *protocol.NotFoundPayload, r1 bool) {
f.PushHook(func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool) {
return r0, r1
func (f *ServiceMaybeStartCloneFunc) PushReturn(r0 bool, r1 CloneStatus, r2 error) {
f.PushHook(func(context.Context, api.RepoName) (bool, CloneStatus, error) {
return r0, r1, r2
})
}
func (f *ServiceMaybeStartCloneFunc) nextHook() func(context.Context, api.RepoName) (*protocol.NotFoundPayload, bool) {
func (f *ServiceMaybeStartCloneFunc) nextHook() func(context.Context, api.RepoName) (bool, CloneStatus, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -815,10 +815,13 @@ type ServiceMaybeStartCloneFuncCall struct {
Arg1 api.RepoName
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 *protocol.NotFoundPayload
Result0 bool
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 bool
Result1 CloneStatus
// Result2 is the value of the 3rd result returned from this method
// invocation.
Result2 error
}
// Args returns an interface slice containing the arguments of this
@ -830,7 +833,7 @@ func (c ServiceMaybeStartCloneFuncCall) Args() []interface{} {
// Results returns an interface slice containing the results of this
// invocation.
func (c ServiceMaybeStartCloneFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
return []interface{}{c.Result0, c.Result1, c.Result2}
}
// ServiceRepoUpdateFunc describes the behavior when the RepoUpdate method

View File

@ -20,7 +20,6 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/executil"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git/gitcli"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/perforce"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/sshagent"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/urlredactor"
@ -45,21 +44,20 @@ func (s *Server) CreateCommitFromPatch(ctx context.Context, req protocol.CreateC
var resp protocol.CreateCommitFromPatchResponse
repo := string(protocol.NormalizeRepo(req.Repo))
repoDir := filepath.Join(s.reposDir, repo)
repoGitDir := filepath.Join(repoDir, ".git")
if _, err := os.Stat(repoGitDir); os.IsNotExist(err) {
repoGitDir = filepath.Join(s.reposDir, repo)
if _, err := os.Stat(repoGitDir); os.IsNotExist(err) {
resp.SetError(repo, "", "", errors.Wrap(err, "gitserver: repo does not exist"))
return resp
}
repo := req.Repo
cloned, err := s.fs.RepoCloned(repo)
if err != nil {
resp.SetError(repo, "", "", errors.Wrap(err, "failed to check if repo is cloned"))
return resp
}
if !cloned {
resp.SetError(repo, "", "", errors.Wrap(err, "gitserver: repo does not exist"))
return resp
}
var (
remoteURL *vcs.URL
err error
)
repoGitDir := s.fs.RepoDir(repo)
var remoteURL *vcs.URL
if req.Push != nil && req.Push.RemoteURL != "" {
remoteURL, err = vcs.ParseURL(req.Push.RemoteURL)
@ -114,7 +112,7 @@ func (s *Server) CreateCommitFromPatch(ctx context.Context, req protocol.CreateC
}()
// Ensure tmp directory exists
tmpRepoDir, err := gitserverfs.TempDir(s.reposDir, "patch-repo-")
tmpRepoDir, err := s.fs.TempDir("patch-repo-")
if err != nil {
resp.SetError(repo, "", "", errors.Wrap(err, "gitserver: make tmp repo"))
return resp
@ -160,7 +158,7 @@ func (s *Server) CreateCommitFromPatch(ctx context.Context, req protocol.CreateC
tmpGitPathEnv := "GIT_DIR=" + filepath.Join(tmpRepoDir, ".git")
tmpObjectsDir := filepath.Join(tmpRepoDir, ".git", "objects")
repoObjectsDir := filepath.Join(repoGitDir, "objects")
repoObjectsDir := repoGitDir.Path("objects")
altObjectsEnv := "GIT_ALTERNATE_OBJECT_DIRECTORIES=" + repoObjectsDir
@ -295,7 +293,7 @@ func (s *Server) CreateCommitFromPatch(ctx context.Context, req protocol.CreateC
resp.ChangelistId = cid
} else {
cmd = exec.CommandContext(ctx, "git", "push", "--force", remoteURL.String(), fmt.Sprintf("%s:%s", cmtHash, ref))
cmd.Dir = repoGitDir
repoGitDir.Set(cmd)
// If the protocol is SSH and a private key was given, we want to
// use it for communication with the code host.
@ -332,7 +330,7 @@ func (s *Server) CreateCommitFromPatch(ctx context.Context, req protocol.CreateC
if req.PushRef == nil {
cmd = exec.CommandContext(ctx, "git", "update-ref", "--", ref, cmtHash)
cmd.Dir = repoGitDir
repoGitDir.Set(cmd)
if out, err = run(cmd, "creating ref", false); err != nil {
logger.Error("Failed to create ref for commit.", log.String("commit", cmtHash), log.String("output", string(out)))
@ -348,7 +346,7 @@ func (s *Server) CreateCommitFromPatch(ctx context.Context, req protocol.CreateC
//
// The ref prefix `ref/<ref type>/` is stripped away from the returned
// refs.
func (s *Server) repoRemoteRefs(ctx context.Context, remoteURL *vcs.URL, repoName, prefix string) (map[string]string, error) {
func (s *Server) repoRemoteRefs(ctx context.Context, remoteURL *vcs.URL, repoName api.RepoName, prefix string) (map[string]string, error) {
// The expected output of this git command is a list of:
// <commit hash> <ref name>
cmd := exec.Command("git", "ls-remote", remoteURL.String(), prefix+"*")
@ -393,7 +391,7 @@ func (s *Server) shelveChangelist(ctx context.Context, req protocol.CreateCommit
repo := string(req.Repo)
baseCommit := string(req.BaseCommit)
p4home, err := gitserverfs.MakeP4HomeDir(s.reposDir)
p4home, err := s.fs.P4HomeDir()
if err != nil {
return "", err
}
@ -424,7 +422,7 @@ func (s *Server) shelveChangelist(ctx context.Context, req protocol.CreateCommit
p4client := strings.TrimPrefix(req.TargetRef, "refs/heads/")
// do all work in (another) temporary directory
tmpClientDir, err := gitserverfs.TempDir(s.reposDir, "perforce-client-")
tmpClientDir, err := s.fs.TempDir("perforce-client-")
if err != nil {
return "", errors.Wrap(err, "gitserver: make tmp repo for Perforce client")
}
@ -461,7 +459,6 @@ func (s *Server) shelveChangelist(ctx context.Context, req protocol.CreateCommit
// check to see if there's a changelist for this target branch already
args := perforce.GetChangeListByClientArguments{
P4Home: p4home,
P4Port: p4port,
P4User: p4user,
P4Passwd: p4passwd,
@ -470,7 +467,7 @@ func (s *Server) shelveChangelist(ctx context.Context, req protocol.CreateCommit
Client: p4client,
}
cl, err := perforce.GetChangelistByClient(ctx, args)
cl, err := perforce.GetChangelistByClient(ctx, s.fs, args)
if err == nil && cl.ID != "" {
return cl.ID, nil
}

View File

@ -17,12 +17,6 @@ import (
// GetChangeListByIDArguments are the arguments for GetChangelistByID.
type GetChangeListByIDArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -34,7 +28,7 @@ type GetChangeListByIDArguments struct {
ChangelistID string
}
func GetChangelistByID(ctx context.Context, args GetChangeListByIDArguments) (*p4types.Changelist, error) {
func GetChangelistByID(ctx context.Context, fs gitserverfs.FS, args GetChangeListByIDArguments) (*p4types.Changelist, error) {
options := []P4OptionFunc{
WithAuthentication(args.P4User, args.P4Passwd),
WithHost(args.P4Port),
@ -50,13 +44,18 @@ func GetChangelistByID(ctx context.Context, args GetChangeListByIDArguments) (*p
"-e", args.ChangelistID, // start from this changelist and go up
))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-changelist-")
p4home, err := fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-changelist-")
if err != nil {
return nil, errors.Wrap(err, "could not create temp dir to invoke 'p4 changes'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {
@ -85,10 +84,6 @@ func GetChangelistByID(ctx context.Context, args GetChangeListByIDArguments) (*p
// GetChangeListByClientArguments are the arguments for GetChangelistByClient.
type GetChangeListByClientArguments struct {
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -103,7 +98,7 @@ type GetChangeListByClientArguments struct {
Client string
}
func GetChangelistByClient(ctx context.Context, args GetChangeListByClientArguments) (*p4types.Changelist, error) {
func GetChangelistByClient(ctx context.Context, fs gitserverfs.FS, args GetChangeListByClientArguments) (*p4types.Changelist, error) {
options := []P4OptionFunc{
WithAuthentication(args.P4User, args.P4Passwd),
WithHost(args.P4Port),
@ -120,7 +115,12 @@ func GetChangelistByClient(ctx context.Context, args GetChangeListByClientArgume
"-c", args.Client,
))
cmd := NewBaseCommand(ctx, args.P4Home, args.WorkDir, options...)
p4home, err := fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home dir")
}
cmd := NewBaseCommand(ctx, p4home, args.WorkDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {

View File

@ -4,17 +4,12 @@ import (
"context"
"strings"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// IsDepotPathCloneableArguments are the arguments for IsDepotPathCloneable.
type IsDepotPathCloneableArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -26,12 +21,9 @@ type IsDepotPathCloneableArguments struct {
DepotPath string
}
func IsDepotPathCloneable(ctx context.Context, args IsDepotPathCloneableArguments) error {
func IsDepotPathCloneable(ctx context.Context, fs gitserverfs.FS, args IsDepotPathCloneableArguments) error {
// start with a test and set up trust if necessary
if err := P4TestWithTrust(ctx, P4TestWithTrustArguments{
ReposDir: args.ReposDir,
P4Home: args.P4Home,
if err := P4TestWithTrust(ctx, fs, P4TestWithTrustArguments{
P4Port: args.P4Port,
P4User: args.P4User,
P4Passwd: args.P4Passwd,
@ -48,10 +40,7 @@ func IsDepotPathCloneable(ctx context.Context, args IsDepotPathCloneableArgument
depot := strings.Split(strings.TrimLeft(args.DepotPath, "/"), "/")[0]
// get a list of depots that match the supplied depot (if it's defined)
depots, err := P4Depots(ctx, P4DepotsArguments{
ReposDir: args.ReposDir,
P4Home: args.P4Home,
depots, err := P4Depots(ctx, fs, P4DepotsArguments{
P4Port: args.P4Port,
P4User: args.P4User,

View File

@ -56,12 +56,6 @@ type perforceDepot struct {
// P4DepotsArguments contains the arguments for P4Depots.
type P4DepotsArguments struct {
// ReposDir is the directory where the repos are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4Port is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -76,7 +70,7 @@ type P4DepotsArguments struct {
// P4Depots returns all of the depots to which the user has access on the host
// and whose names match the given nameFilter, which can contain asterisks (*) for wildcards
// if nameFilter is blank, return all depots.
func P4Depots(ctx context.Context, args P4DepotsArguments) ([]perforceDepot, error) {
func P4Depots(ctx context.Context, fs gitserverfs.FS, args P4DepotsArguments) ([]perforceDepot, error) {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
@ -91,13 +85,18 @@ func P4Depots(ctx context.Context, args P4DepotsArguments) ([]perforceDepot, err
options = append(options, WithArguments("-Mj", "-ztag", "depots", "-e", args.NameFilter))
}
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-depots-")
p4home, err := fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-depots-")
if err != nil {
return nil, errors.Wrap(err, "could not create temp dir to invoke 'p4 depots'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {

View File

@ -13,13 +13,6 @@ import (
// P4GroupMembersArguments are the arguments for P4GroupMembers.
type P4GroupMembersArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
@ -33,7 +26,7 @@ type P4GroupMembersArguments struct {
}
// P4GroupMembers returns all usernames that are members of the given group.
func P4GroupMembers(ctx context.Context, args P4GroupMembersArguments) ([]string, error) {
func P4GroupMembers(ctx context.Context, fs gitserverfs.FS, args P4GroupMembersArguments) ([]string, error) {
options := []P4OptionFunc{
WithAuthentication(args.P4User, args.P4Passwd),
WithHost(args.P4Port),
@ -41,13 +34,18 @@ func P4GroupMembers(ctx context.Context, args P4GroupMembersArguments) ([]string
options = append(options, WithArguments("-Mj", "-ztag", "group", "-o", args.Group))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-group-")
p4home, err := fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-group-")
if err != nil {
return nil, errors.Wrap(err, "could not create temp dir to invoke 'p4 group'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {
if ctxerr := ctx.Err(); ctxerr != nil {

View File

@ -13,12 +13,6 @@ import (
// P4TestWithTrustArguments are the arguments for P4TestWithTrust.
type P4TestWithTrustArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -28,26 +22,24 @@ type P4TestWithTrustArguments struct {
}
// P4TestWithTrust attempts to test the Perforce server and performs a trust operation when needed.
func P4TestWithTrust(ctx context.Context, args P4TestWithTrustArguments) error {
func P4TestWithTrust(ctx context.Context, fs gitserverfs.FS, args P4TestWithTrustArguments) error {
// Attempt to check connectivity, may be prompted to trust.
err := P4Test(ctx, P4TestArguments(args))
err := P4Test(ctx, fs, P4TestArguments(args))
if err == nil {
return nil // The test worked, session still valid for the user
}
// If the output indicates that we have to run p4trust first, do that.
if strings.Contains(err.Error(), "To allow connection use the 'p4 trust' command.") {
err := P4Trust(ctx, P4TrustArguments{
ReposDir: args.ReposDir,
P4Home: args.P4Home,
P4Port: args.P4Port,
err := P4Trust(ctx, fs, P4TrustArguments{
P4Port: args.P4Port,
})
if err != nil {
return errors.Wrap(err, "trust")
}
// Now attempt to run p4test again.
err = P4Test(ctx, P4TestArguments(args))
err = P4Test(ctx, fs, P4TestArguments(args))
if err != nil {
return errors.Wrap(err, "testing connection after trust")
}
@ -60,12 +52,6 @@ func P4TestWithTrust(ctx context.Context, args P4TestWithTrustArguments) error {
// P4UserIsSuperUserArguments are the arguments for P4UserIsSuperUser.
type P4UserIsSuperUserArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4Port is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -78,7 +64,7 @@ type P4UserIsSuperUserArguments struct {
// If the user is a super user, no error is returned. If not, ErrIsNotSuperUser
// is returned.
// Other errors may occur.
func P4UserIsSuperUser(ctx context.Context, args P4UserIsSuperUserArguments) error {
func P4UserIsSuperUser(ctx context.Context, fs gitserverfs.FS, args P4UserIsSuperUserArguments) error {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
@ -90,13 +76,18 @@ func P4UserIsSuperUser(ctx context.Context, args P4UserIsSuperUserArguments) err
// Validate the user has "super" access with "-u" option, see https://www.perforce.com/perforce/r12.1/manuals/cmdref/protects.html
options = append(options, WithArguments("protects", "-u", args.P4User))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-protects-")
p4home, err := fs.P4HomeDir()
if err != nil {
return errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-protects-")
if err != nil {
return errors.Wrap(err, "could not create temp dir to invoke 'p4 protects'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {
if ctxerr := ctx.Err(); ctxerr != nil {
@ -121,19 +112,12 @@ var ErrIsNotSuperUser = errors.New("the user does not have super access")
// P4TrustArguments are the arguments to P4Trust.
type P4TrustArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
}
// P4Trust blindly accepts fingerprint of the Perforce server.
func P4Trust(ctx context.Context, args P4TrustArguments) error {
func P4Trust(ctx context.Context, fs gitserverfs.FS, args P4TrustArguments) error {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
@ -143,13 +127,18 @@ func P4Trust(ctx context.Context, args P4TrustArguments) error {
options = append(options, WithArguments("trust", "-y", "-f"))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-trust-")
p4home, err := fs.P4HomeDir()
if err != nil {
return errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-trust-")
if err != nil {
return errors.Wrap(err, "could not create temp dir to invoke 'p4 trust'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {
@ -166,12 +155,6 @@ func P4Trust(ctx context.Context, args P4TrustArguments) error {
// P4TestArguments are the arguments to the P4Test function.
type P4TestArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -182,7 +165,7 @@ type P4TestArguments struct {
// P4Test uses `p4 login -s` to test the Perforce connection: port, user, passwd.
// If the command times out after 10 seconds, it will be tried one more time.
func P4Test(ctx context.Context, args P4TestArguments) error {
func P4Test(ctx context.Context, fs gitserverfs.FS, args P4TestArguments) error {
runCommand := func() error {
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
defer cancel()
@ -198,13 +181,18 @@ func P4Test(ctx context.Context, args P4TestArguments) error {
// so it seems like the perfect alternative to `p4 ping`.
options = append(options, WithArguments("login", "-s"))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-login-")
p4home, err := fs.P4HomeDir()
if err != nil {
return errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-login-")
if err != nil {
return errors.Wrap(err, "could not create temp dir to invoke 'p4 login'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {

View File

@ -14,12 +14,6 @@ import (
// P4ProtectsForUserArguments are the arguments for P4ProtectsForUser.
type P4ProtectsForUserArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -32,7 +26,7 @@ type P4ProtectsForUserArguments struct {
}
// P4ProtectsForUser returns all protect definitions that apply to the given username.
func P4ProtectsForUser(ctx context.Context, args P4ProtectsForUserArguments) ([]*p4types.Protect, error) {
func P4ProtectsForUser(ctx context.Context, fs gitserverfs.FS, args P4ProtectsForUserArguments) ([]*p4types.Protect, error) {
options := []P4OptionFunc{
WithAuthentication(args.P4User, args.P4Passwd),
WithHost(args.P4Port),
@ -42,13 +36,18 @@ func P4ProtectsForUser(ctx context.Context, args P4ProtectsForUserArguments) ([]
// requires super access.
options = append(options, WithArguments("-Mj", "-ztag", "protects", "-u", args.Username))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-protects-")
p4home, err := fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-protects-")
if err != nil {
return nil, errors.Wrap(err, "could not create temp dir to invoke 'p4 protects'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {
if ctxerr := ctx.Err(); ctxerr != nil {
@ -71,12 +70,6 @@ func P4ProtectsForUser(ctx context.Context, args P4ProtectsForUserArguments) ([]
}
type P4ProtectsForDepotArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -89,7 +82,7 @@ type P4ProtectsForDepotArguments struct {
}
// P4ProtectsForUser returns all protect definitions that apply to the given depot.
func P4ProtectsForDepot(ctx context.Context, args P4ProtectsForDepotArguments) ([]*p4types.Protect, error) {
func P4ProtectsForDepot(ctx context.Context, fs gitserverfs.FS, args P4ProtectsForDepotArguments) ([]*p4types.Protect, error) {
options := []P4OptionFunc{
WithAuthentication(args.P4User, args.P4Passwd),
WithHost(args.P4Port),
@ -99,13 +92,18 @@ func P4ProtectsForDepot(ctx context.Context, args P4ProtectsForDepotArguments) (
// access.
options = append(options, WithArguments("-Mj", "-ztag", "protects", "-a", args.Depot))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-protects-")
p4home, err := fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-protects-")
if err != nil {
return nil, errors.Wrap(err, "could not create temp dir to invoke 'p4 protects'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {

View File

@ -12,12 +12,6 @@ import (
)
type P4UsersArguments struct {
// ReposDir is the directory where the repositories are stored.
ReposDir string
// P4Home is the path to the directory that 'p4' will use as $HOME
// and where it will store cache data.
P4Home string
// P4PORT is the address of the Perforce server.
P4Port string
// P4User is the Perforce username to authenticate with.
@ -27,7 +21,7 @@ type P4UsersArguments struct {
}
// P4Users returns all of users known to the Perforce server.
func P4Users(ctx context.Context, args P4UsersArguments) ([]perforce.User, error) {
func P4Users(ctx context.Context, fs gitserverfs.FS, args P4UsersArguments) ([]perforce.User, error) {
options := []P4OptionFunc{
WithAuthentication(args.P4User, args.P4Passwd),
WithHost(args.P4Port),
@ -35,13 +29,18 @@ func P4Users(ctx context.Context, args P4UsersArguments) ([]perforce.User, error
options = append(options, WithArguments("-Mj", "-ztag", "users"))
scratchDir, err := gitserverfs.TempDir(args.ReposDir, "p4-users-")
p4home, err := fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home dir")
}
scratchDir, err := fs.TempDir("p4-users-")
if err != nil {
return nil, errors.Wrap(err, "could not create temp dir to invoke 'p4 users'")
}
defer os.Remove(scratchDir)
cmd := NewBaseCommand(ctx, args.P4Home, scratchDir, options...)
cmd := NewBaseCommand(ctx, p4home, scratchDir, options...)
out, err := cmd.CombinedOutput()
if err != nil {
if ctxerr := ctx.Err(); ctxerr != nil {

View File

@ -3,8 +3,6 @@ package internal
import (
"context"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -13,33 +11,35 @@ import (
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func repoCloneProgress(reposDir string, locker RepositoryLocker, repo api.RepoName) *protocol.RepoCloneProgress {
dir := gitserverfs.RepoDirFromName(reposDir, repo)
resp := protocol.RepoCloneProgress{
Cloned: repoCloned(dir),
func repoCloneProgress(fs gitserverfs.FS, locker RepositoryLocker, repo api.RepoName) (*protocol.RepoCloneProgress, error) {
cloned, err := fs.RepoCloned(repo)
if err != nil {
return nil, errors.Wrap(err, "determine clone status")
}
resp.CloneProgress, resp.CloneInProgress = locker.Status(dir)
resp := protocol.RepoCloneProgress{
Cloned: cloned,
}
resp.CloneProgress, resp.CloneInProgress = locker.Status(repo)
if isAlwaysCloningTest(repo) {
resp.CloneInProgress = true
resp.CloneProgress = "This will never finish cloning"
}
return &resp
return &resp, nil
}
func deleteRepo(
ctx context.Context,
logger log.Logger,
db database.DB,
shardID string,
reposDir string,
fs gitserverfs.FS,
repo api.RepoName,
) error {
// The repo may be deleted in the database, in this case we need to get the
// original name in order to find it on disk
err := gitserverfs.RemoveRepoDirectory(ctx, logger, db, shardID, reposDir, gitserverfs.RepoDirFromName(reposDir, api.UndeletedRepoName(repo)), true)
err := fs.RemoveRepo(repo)
if err != nil {
return errors.Wrap(err, "removing repo directory")
}
err = db.GitserverRepos().SetCloneStatus(ctx, repo, types.CloneStatusNotCloned, shardID)
if err != nil {
return errors.Wrap(err, "setting clone status after delete")

View File

@ -8,7 +8,6 @@ import (
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
@ -63,7 +62,8 @@ func testDeleteRepo(t *testing.T, deletedInDB bool) {
Repo: repoName,
})
size := gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.reposDir, repoName).Path("."))
size, err := s.fs.DirSize(string(s.fs.RepoDir(repoName)))
require.NoError(t, err)
want := &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
@ -95,9 +95,10 @@ func testDeleteRepo(t *testing.T, deletedInDB bool) {
}
// Now we can delete it
require.NoError(t, deleteRepo(ctx, logger, db, "", reposDir, dbRepo.Name))
require.NoError(t, deleteRepo(ctx, db, "", s.fs, dbRepo.Name))
size = gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.reposDir, repoName).Path("."))
size, err = s.fs.DirSize(string(s.fs.RepoDir(repoName)))
require.NoError(t, err)
if size != 0 {
t.Fatalf("Size should be 0, got %d", size)
}

View File

@ -12,7 +12,6 @@ import (
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git/gitcli"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/search"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/conf"
@ -84,7 +83,6 @@ func (s *Server) SearchWithObservability(ctx context.Context, tr trace.Trace, ar
// search handles the core logic of the search. It is passed a matchesBuf so it doesn't need to
// concern itself with event types, and all instrumentation is handled in the calling function.
func (s *Server) search(ctx context.Context, args *protocol.SearchRequest, onMatch func(*protocol.CommitMatch) error) (limitHit bool, err error) {
args.Repo = protocol.NormalizeRepo(args.Repo)
if args.Limit == 0 {
args.Limit = math.MaxInt32
}
@ -122,7 +120,7 @@ func (s *Server) search(ctx context.Context, args *protocol.SearchRequest, onMat
searcher := &search.CommitSearcher{
Logger: s.logger,
RepoName: args.Repo,
RepoDir: gitserverfs.RepoDirFromName(s.reposDir, args.Repo).Path(),
RepoDir: string(s.fs.RepoDir(args.Repo)),
Revisions: args.Revisions,
Query: mt,
IncludeDiff: args.IncludeDiff,

View File

@ -92,8 +92,9 @@ type ServerOpts struct {
// Logger should be used for all logging and logger creation.
Logger log.Logger
// ReposDir is the path to the base directory for gitserver storage.
ReposDir string
// FS is the file system to use for the gitserver. It allows to find repos by
// name on disk and map a dir on disk back to a repo name.
FS gitserverfs.FS
// GetBackendFunc is a function which returns the git backend for a
// repository.
@ -160,7 +161,6 @@ func NewServer(opt *ServerOpts) *Server {
return &Server{
logger: opt.Logger,
reposDir: opt.ReposDir,
getBackendFunc: opt.GetBackendFunc,
getRemoteURLFunc: opt.GetRemoteURLFunc,
getVCSSyncer: opt.GetVCSSyncer,
@ -171,6 +171,7 @@ func NewServer(opt *ServerOpts) *Server {
rpsLimiter: opt.RPSLimiter,
recordingCommandFactory: opt.RecordingCommandFactory,
perforce: opt.Perforce,
fs: opt.FS,
repoUpdateLocks: make(map[api.RepoName]*locks),
cloneLimiter: cloneLimiter,
@ -184,8 +185,9 @@ type Server struct {
// logger should be used for all logging and logger creation.
logger log.Logger
// reposDir is the path to the base directory for gitserver storage.
reposDir string
// fs is the file system to use for the gitserver. It allows to find repos by
// name on disk and map a dir on disk back to a repo name.
fs gitserverfs.FS
// getBackendFunc is a function which returns the git backend for a
// repository.
@ -218,9 +220,6 @@ type Server struct {
// locker is used to lock repositories while fetching to prevent concurrent work.
locker RepositoryLocker
// skipCloneForTests is set by tests to avoid clones.
skipCloneForTests bool
// ctx is the context we use for all background jobs. It is done when the
// server is stopped. Do not directly call this, rather call
// Server.context()
@ -386,12 +385,6 @@ func (p *clonePipelineRoutine) cloneJobConsumer(ctx context.Context, tasks <-cha
}
}
// repoCloned checks if dir or `${dir}/.git` is a valid GIT_DIR.
var repoCloned = func(dir common.GitDir) bool {
_, err := os.Stat(dir.Path("HEAD"))
return !os.IsNotExist(err)
}
// Stop cancels the running background jobs and returns when done.
func (s *Server) Stop() {
// idempotent so we can just always set and cancel
@ -460,8 +453,13 @@ func (s *Server) IsRepoCloneable(ctx context.Context, repo api.RepoName) (protoc
return protocol.IsRepoCloneableResponse{}, errors.Wrap(err, "GetVCSSyncer")
}
cloned, err := s.fs.RepoCloned(repo)
if err != nil {
return protocol.IsRepoCloneableResponse{}, errors.Wrap(err, "determine if repo is cloned")
}
resp := protocol.IsRepoCloneableResponse{
Cloned: repoCloned(gitserverfs.RepoDirFromName(s.reposDir, repo)),
Cloned: cloned,
}
err = syncer.IsCloneable(ctx, repo)
if err != nil {
@ -478,13 +476,19 @@ func (s *Server) IsRepoCloneable(ctx context.Context, repo api.RepoName) (protoc
// This function will not return until the update is complete.
// Canceling the context will not cancel the update, but it will let the caller
// escape the function early.
func (s *Server) RepoUpdate(ctx context.Context, req *protocol.RepoUpdateRequest) (resp protocol.RepoUpdateResponse) {
func (s *Server) RepoUpdate(ctx context.Context, req *protocol.RepoUpdateRequest) protocol.RepoUpdateResponse {
logger := s.logger.Scoped("handleRepoUpdate")
req.Repo = protocol.NormalizeRepo(req.Repo)
dir := gitserverfs.RepoDirFromName(s.reposDir, req.Repo)
var resp protocol.RepoUpdateResponse
dir := s.fs.RepoDir(req.Repo)
if !repoCloned(dir) {
cloned, err := s.fs.RepoCloned(req.Repo)
if err != nil {
resp.Error = errors.Wrap(err, "determining cloned status").Error()
return resp
}
if !cloned {
_, cloneErr := s.CloneRepo(ctx, req.Repo, CloneOptions{Block: true})
if cloneErr != nil {
logger.Warn("error cloning repo", log.String("repo", string(req.Repo)), log.Error(cloneErr))
@ -610,11 +614,9 @@ func (s *Server) CloneRepo(ctx context.Context, repo api.RepoName, opts CloneOpt
return "This will never finish cloning", nil
}
dir := gitserverfs.RepoDirFromName(s.reposDir, repo)
// PERF: Before doing the network request to check if isCloneable, lets
// ensure we are not already cloning.
if progress, cloneInProgress := s.locker.Status(dir); cloneInProgress {
if progress, cloneInProgress := s.locker.Status(repo); cloneInProgress {
return progress, nil
}
@ -662,17 +664,14 @@ func (s *Server) CloneRepo(ctx context.Context, repo api.RepoName, opts CloneOpt
// Mark this repo as currently being cloned. We have to check again if someone else isn't already
// cloning since we released the lock. We released the lock since isCloneable is a potentially
// slow operation.
lock, ok := s.locker.TryAcquire(dir, "starting clone")
lock, ok := s.locker.TryAcquire(repo, "starting clone")
if !ok {
// Someone else beat us to it
status, _ := s.locker.Status(dir)
status, _ := s.locker.Status(repo)
return status, nil
}
if s.skipCloneForTests {
lock.Release()
return "", nil
}
dir := s.fs.RepoDir(repo)
if opts.Block {
// Use serverCtx here since we want to let the clone proceed, even if
@ -759,22 +758,35 @@ func (s *Server) doClone(
// We clone to a temporary location first to avoid having incomplete
// clones in the repo tree. This also avoids leaving behind corrupt clones
// if the clone is interrupted.
tmpDir, err := gitserverfs.TempDir(s.reposDir, "clone-")
tmpDir, err := s.fs.TempDir("clone-")
if err != nil {
return err
}
defer os.RemoveAll(tmpDir)
tmpPath := filepath.Join(tmpDir, ".git")
cloned, err := s.fs.RepoCloned(repo)
if err != nil {
return errors.Wrap(err, "checking if repo is cloned")
}
// It may already be cloned
if !repoCloned(dir) {
if !cloned {
if err := s.db.GitserverRepos().SetCloneStatus(ctx, repo, types.CloneStatusCloning, s.hostname); err != nil {
s.logger.Error("Setting clone status in DB", log.Error(err))
}
}
defer func() {
// Use a background context to ensure we still update the DB even if we time out
if err := s.db.GitserverRepos().SetCloneStatus(context.Background(), repo, cloneStatus(repoCloned(dir), false), s.hostname); err != nil {
cloned, err := s.fs.RepoCloned(repo)
if err != nil {
s.logger.Error("failed to check if repo is cloned", log.Error(err))
} else if err := s.db.GitserverRepos().SetCloneStatus(
// Use a background context to ensure we still update the DB even if we time out
context.Background(),
repo,
cloneStatus(cloned, false),
s.hostname,
); err != nil {
s.logger.Error("Setting clone status in DB", log.Error(err))
}
}()
@ -815,7 +827,7 @@ func (s *Server) doClone(
return errors.Wrapf(cloneErr, "clone failed. Output: %s", output.String())
}
if err := postRepoFetchActions(ctx, logger, s.db, s.getBackendFunc(common.GitDir(tmpPath), repo), s.hostname, repo, common.GitDir(tmpPath), syncer); err != nil {
if err := postRepoFetchActions(ctx, logger, s.fs, s.db, s.getBackendFunc(common.GitDir(tmpPath), repo), s.hostname, repo, common.GitDir(tmpPath), syncer); err != nil {
return err
}
@ -904,6 +916,7 @@ func (w *linebasedBufferedWriter) Bytes() []byte {
func postRepoFetchActions(
ctx context.Context,
logger log.Logger,
fs gitserverfs.FS,
db database.DB,
backend git.GitBackend,
shardID string,
@ -942,8 +955,10 @@ func postRepoFetchActions(
}
// Successfully updated, best-effort calculation of the repo size.
repoSizeBytes := gitserverfs.DirSize(dir.Path("."))
if err := db.GitserverRepos().SetRepoSize(ctx, repo, repoSizeBytes, shardID); err != nil {
repoSizeBytes, err := fs.DirSize(dir.Path())
if err != nil {
errs = errors.Append(errs, errors.Wrap(err, "failed to calculate repo size"))
} else if err := db.GitserverRepos().SetRepoSize(ctx, repo, repoSizeBytes, shardID); err != nil {
errs = errors.Append(errs, errors.Wrap(err, "failed to set repo size"))
}
@ -1168,8 +1183,7 @@ func (s *Server) doBackgroundRepoUpdate(repo api.RepoName, revspec string) error
return err
}
repo = protocol.NormalizeRepo(repo)
dir := gitserverfs.RepoDirFromName(s.reposDir, repo)
dir := s.fs.RepoDir(repo)
remoteURL, err := s.getRemoteURL(ctx, repo)
if err != nil {
@ -1208,7 +1222,7 @@ func (s *Server) doBackgroundRepoUpdate(repo api.RepoName, revspec string) error
}
}
return postRepoFetchActions(ctx, logger, s.db, s.getBackendFunc(dir, repo), s.hostname, repo, dir, syncer)
return postRepoFetchActions(ctx, logger, s.fs, s.db, s.getBackendFunc(dir, repo), s.hostname, repo, dir, syncer)
}(ctx)
if errors.Is(err, context.DeadlineExceeded) {

View File

@ -35,7 +35,7 @@ import (
type service interface {
CreateCommitFromPatch(ctx context.Context, req protocol.CreateCommitFromPatchRequest, patchReader io.Reader) protocol.CreateCommitFromPatchResponse
LogIfCorrupt(context.Context, api.RepoName, error)
MaybeStartClone(ctx context.Context, repo api.RepoName) (notFound *protocol.NotFoundPayload, cloned bool)
MaybeStartClone(ctx context.Context, repo api.RepoName) (cloned bool, status CloneStatus, _ error)
IsRepoCloneable(ctx context.Context, repo api.RepoName) (protocol.IsRepoCloneableResponse, error)
RepoUpdate(ctx context.Context, req *protocol.RepoUpdateRequest) protocol.RepoUpdateResponse
CloneRepo(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error)
@ -46,24 +46,24 @@ type service interface {
func NewGRPCServer(server *Server) proto.GitserverServiceServer {
return &grpcServer{
logger: server.logger,
reposDir: server.reposDir,
db: server.db,
hostname: server.hostname,
subRepoChecker: authz.DefaultSubRepoPermsChecker,
locker: server.locker,
getBackendFunc: server.getBackendFunc,
svc: server,
fs: server.fs,
}
}
type grpcServer struct {
logger log.Logger
reposDir string
db database.DB
hostname string
subRepoChecker authz.SubRepoPermissionChecker
locker RepositoryLocker
getBackendFunc Backender
fs gitserverfs.FS
svc service
@ -113,7 +113,16 @@ func (gs *grpcServer) CreateCommitFromPatchBinary(s proto.GitserverService_Creat
}
func (gs *grpcServer) DiskInfo(_ context.Context, _ *proto.DiskInfoRequest) (*proto.DiskInfoResponse, error) {
return getDiskInfo(gs.reposDir)
usage, err := gs.fs.DiskUsage()
if err != nil {
return nil, err
}
return &proto.DiskInfoResponse{
TotalSpace: usage.Size(),
FreeSpace: usage.Free(),
PercentUsed: usage.PercentUsed(),
}, nil
}
func (gs *grpcServer) Exec(req *proto.ExecRequest, ss proto.GitserverService_ExecServer) error {
@ -136,7 +145,7 @@ func (gs *grpcServer) Exec(req *proto.ExecRequest, ss proto.GitserverService_Exe
}
repoName := api.RepoName(req.GetRepo())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
backend := gs.getBackendFunc(repoDir, repoName)
if err := gs.maybeStartClone(ctx, repoName); err != nil {
@ -255,7 +264,7 @@ func (gs *grpcServer) Archive(req *proto.ArchiveRequest, ss proto.GitserverServi
)
repoName := api.RepoName(req.GetRepo())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
if err := gs.maybeStartClone(ss.Context(), repoName); err != nil {
return err
@ -331,7 +340,7 @@ func (gs *grpcServer) Archive(req *proto.ArchiveRequest, ss proto.GitserverServi
func (gs *grpcServer) GetObject(ctx context.Context, req *proto.GetObjectRequest) (*proto.GetObjectResponse, error) {
repoName := api.RepoName(req.GetRepo())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
// Log which actor is accessing the repo.
accesslog.Record(ctx, string(repoName), log.String("objectname", req.GetObjectName()))
@ -407,10 +416,14 @@ func (gs *grpcServer) Search(req *proto.SearchRequest, ss proto.GitserverService
})
}
func (gs *grpcServer) RepoClone(ctx context.Context, in *proto.RepoCloneRequest) (*proto.RepoCloneResponse, error) {
repo := protocol.NormalizeRepo(api.RepoName(in.GetRepo()))
func (gs *grpcServer) RepoClone(ctx context.Context, req *proto.RepoCloneRequest) (*proto.RepoCloneResponse, error) {
if req.GetRepo() == "" {
return nil, status.New(codes.InvalidArgument, "repo must be specified").Err()
}
if _, err := gs.svc.CloneRepo(ctx, repo, CloneOptions{Block: false}); err != nil {
repoName := api.RepoName(req.GetRepo())
if _, err := gs.svc.CloneRepo(ctx, repoName, CloneOptions{Block: false}); err != nil {
return &proto.RepoCloneResponse{Error: err.Error()}, nil
}
@ -424,19 +437,26 @@ func (gs *grpcServer) RepoCloneProgress(_ context.Context, req *proto.RepoCloneP
repoName := api.RepoName(req.GetRepoName())
progress := repoCloneProgress(gs.reposDir, gs.locker, repoName)
progress, err := repoCloneProgress(gs.fs, gs.locker, repoName)
if err != nil {
return nil, err
}
return progress.ToProto(), nil
}
func (gs *grpcServer) RepoDelete(ctx context.Context, req *proto.RepoDeleteRequest) (*proto.RepoDeleteResponse, error) {
repo := req.GetRepo()
if err := deleteRepo(ctx, gs.logger, gs.db, gs.hostname, gs.reposDir, api.RepoName(repo)); err != nil {
gs.logger.Error("failed to delete repository", log.String("repo", repo), log.Error(err))
return &proto.RepoDeleteResponse{}, status.Errorf(codes.Internal, "failed to delete repository %s: %s", repo, err)
if req.GetRepo() == "" {
return nil, status.New(codes.InvalidArgument, "repo must be specified").Err()
}
gs.logger.Info("deleted repository", log.String("repo", repo))
repoName := api.RepoName(req.GetRepo())
if err := deleteRepo(ctx, gs.db, gs.hostname, gs.fs, repoName); err != nil {
gs.logger.Error("failed to delete repository", log.String("repo", string(repoName)), log.Error(err))
return &proto.RepoDeleteResponse{}, status.Errorf(codes.Internal, "failed to delete repository %s: %s", repoName, err)
}
gs.logger.Info("deleted repository", log.String("repo", string(repoName)))
return &proto.RepoDeleteResponse{}, nil
}
@ -465,20 +485,12 @@ func (gs *grpcServer) IsRepoCloneable(ctx context.Context, req *proto.IsRepoClon
}
func (gs *grpcServer) IsPerforcePathCloneable(ctx context.Context, req *proto.IsPerforcePathCloneableRequest) (*proto.IsPerforcePathCloneableResponse, error) {
if req.DepotPath == "" {
if req.GetDepotPath() == "" {
return nil, status.Error(codes.InvalidArgument, "no DepotPath given")
}
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.IsDepotPathCloneable(ctx, perforce.IsDepotPathCloneableArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.IsDepotPathCloneable(ctx, gs.fs, perforce.IsDepotPathCloneableArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -493,16 +505,8 @@ func (gs *grpcServer) IsPerforcePathCloneable(ctx context.Context, req *proto.Is
}
func (gs *grpcServer) CheckPerforceCredentials(ctx context.Context, req *proto.CheckPerforceCredentialsRequest) (*proto.CheckPerforceCredentialsResponse, error) {
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.P4TestWithTrust(ctx, gs.fs, perforce.P4TestWithTrustArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -519,16 +523,8 @@ func (gs *grpcServer) CheckPerforceCredentials(ctx context.Context, req *proto.C
}
func (gs *grpcServer) PerforceUsers(ctx context.Context, req *proto.PerforceUsersRequest) (*proto.PerforceUsersResponse, error) {
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.P4TestWithTrust(ctx, gs.fs, perforce.P4TestWithTrustArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -548,10 +544,7 @@ func (gs *grpcServer) PerforceUsers(ctx context.Context, req *proto.PerforceUser
log.String("p4port", conn.GetP4Port()),
)
users, err := perforce.P4Users(ctx, perforce.P4UsersArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
users, err := perforce.P4Users(ctx, gs.fs, perforce.P4UsersArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -572,16 +565,8 @@ func (gs *grpcServer) PerforceUsers(ctx context.Context, req *proto.PerforceUser
}
func (gs *grpcServer) PerforceProtectsForUser(ctx context.Context, req *proto.PerforceProtectsForUserRequest) (*proto.PerforceProtectsForUserResponse, error) {
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.P4TestWithTrust(ctx, gs.fs, perforce.P4TestWithTrustArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -602,16 +587,13 @@ func (gs *grpcServer) PerforceProtectsForUser(ctx context.Context, req *proto.Pe
)
args := perforce.P4ProtectsForUserArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
Username: req.GetUsername(),
}
protects, err := perforce.P4ProtectsForUser(ctx, args)
protects, err := perforce.P4ProtectsForUser(ctx, gs.fs, args)
if err != nil {
return nil, err
}
@ -627,16 +609,8 @@ func (gs *grpcServer) PerforceProtectsForUser(ctx context.Context, req *proto.Pe
}
func (gs *grpcServer) PerforceProtectsForDepot(ctx context.Context, req *proto.PerforceProtectsForDepotRequest) (*proto.PerforceProtectsForDepotResponse, error) {
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.P4TestWithTrust(ctx, gs.fs, perforce.P4TestWithTrustArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -656,9 +630,7 @@ func (gs *grpcServer) PerforceProtectsForDepot(ctx context.Context, req *proto.P
log.String("p4port", conn.GetP4Port()),
)
protects, err := perforce.P4ProtectsForDepot(ctx, perforce.P4ProtectsForDepotArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
protects, err := perforce.P4ProtectsForDepot(ctx, gs.fs, perforce.P4ProtectsForDepotArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -679,16 +651,8 @@ func (gs *grpcServer) PerforceProtectsForDepot(ctx context.Context, req *proto.P
}
func (gs *grpcServer) PerforceGroupMembers(ctx context.Context, req *proto.PerforceGroupMembersRequest) (*proto.PerforceGroupMembersResponse, error) {
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.P4TestWithTrust(ctx, gs.fs, perforce.P4TestWithTrustArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -709,9 +673,6 @@ func (gs *grpcServer) PerforceGroupMembers(ctx context.Context, req *proto.Perfo
)
args := perforce.P4GroupMembersArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -719,7 +680,7 @@ func (gs *grpcServer) PerforceGroupMembers(ctx context.Context, req *proto.Perfo
Group: req.GetGroup(),
}
members, err := perforce.P4GroupMembers(ctx, args)
members, err := perforce.P4GroupMembers(ctx, gs.fs, args)
if err != nil {
return nil, err
}
@ -730,16 +691,8 @@ func (gs *grpcServer) PerforceGroupMembers(ctx context.Context, req *proto.Perfo
}
func (gs *grpcServer) IsPerforceSuperUser(ctx context.Context, req *proto.IsPerforceSuperUserRequest) (*proto.IsPerforceSuperUserResponse, error) {
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.P4TestWithTrust(ctx, gs.fs, perforce.P4TestWithTrustArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -752,9 +705,7 @@ func (gs *grpcServer) IsPerforceSuperUser(ctx context.Context, req *proto.IsPerf
return nil, status.Error(codes.InvalidArgument, err.Error())
}
err = perforce.P4UserIsSuperUser(ctx, perforce.P4UserIsSuperUserArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err = perforce.P4UserIsSuperUser(ctx, gs.fs, perforce.P4UserIsSuperUserArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -775,16 +726,8 @@ func (gs *grpcServer) IsPerforceSuperUser(ctx context.Context, req *proto.IsPerf
}
func (gs *grpcServer) PerforceGetChangelist(ctx context.Context, req *proto.PerforceGetChangelistRequest) (*proto.PerforceGetChangelistResponse, error) {
p4home, err := gitserverfs.MakeP4HomeDir(gs.reposDir)
if err != nil {
return nil, status.Error(codes.Internal, err.Error())
}
conn := req.GetConnectionDetails()
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
err := perforce.P4TestWithTrust(ctx, gs.fs, perforce.P4TestWithTrustArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -804,11 +747,7 @@ func (gs *grpcServer) PerforceGetChangelist(ctx context.Context, req *proto.Perf
log.String("p4port", conn.GetP4Port()),
)
changelist, err := perforce.GetChangelistByID(ctx, perforce.GetChangeListByIDArguments{
ReposDir: gs.reposDir,
P4Home: p4home,
changelist, err := perforce.GetChangelistByID(ctx, gs.fs, perforce.GetChangeListByIDArguments{
P4Port: conn.GetP4Port(),
P4User: conn.GetP4User(),
P4Passwd: conn.GetP4Passwd(),
@ -853,7 +792,7 @@ func (gs *grpcServer) MergeBase(ctx context.Context, req *proto.MergeBaseRequest
}
repoName := api.RepoName(req.GetRepoName())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
if err := gs.maybeStartClone(ctx, repoName); err != nil {
return nil, err
@ -901,7 +840,7 @@ func (gs *grpcServer) GetCommit(ctx context.Context, req *proto.GetCommitRequest
}
repoName := api.RepoName(req.GetRepoName())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
if err := gs.maybeStartClone(ctx, repoName); err != nil {
return nil, err
@ -976,7 +915,7 @@ func (gs *grpcServer) Blame(req *proto.BlameRequest, ss proto.GitserverService_B
}
repoName := api.RepoName(req.GetRepoName())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
if err := gs.maybeStartClone(ctx, repoName); err != nil {
return err
@ -1075,7 +1014,7 @@ func (gs *grpcServer) DefaultBranch(ctx context.Context, req *proto.DefaultBranc
}
repoName := api.RepoName(req.GetRepoName())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
if err := gs.maybeStartClone(ctx, repoName); err != nil {
return nil, err
@ -1136,7 +1075,7 @@ func (gs *grpcServer) ReadFile(req *proto.ReadFileRequest, ss proto.GitserverSer
}
repoName := api.RepoName(req.GetRepoName())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
if err := gs.maybeStartClone(ctx, repoName); err != nil {
return err
@ -1215,7 +1154,7 @@ func (gs *grpcServer) ResolveRevision(ctx context.Context, req *proto.ResolveRev
}
repoName := api.RepoName(req.GetRepoName())
repoDir := gitserverfs.RepoDirFromName(gs.reposDir, repoName)
repoDir := gs.fs.RepoDir(repoName)
if err := gs.maybeStartClone(ctx, repoName); err != nil {
return nil, err
@ -1262,21 +1201,16 @@ func (gs *grpcServer) ResolveRevision(ctx context.Context, req *proto.ResolveRev
}
func (gs *grpcServer) maybeStartClone(ctx context.Context, repo api.RepoName) error {
// Ensure that the repo is cloned and if not start a background clone, then
// return a well-known NotFound payload error.
if notFoundPayload, cloned := gs.svc.MaybeStartClone(ctx, repo); !cloned {
s, err := status.New(codes.NotFound, "repo not found").WithDetails(&proto.RepoNotFoundPayload{
CloneInProgress: notFoundPayload.CloneInProgress,
CloneProgress: notFoundPayload.CloneProgress,
Repo: string(repo),
})
if err != nil {
return err
}
return s.Err()
cloned, state, err := gs.svc.MaybeStartClone(ctx, repo)
if err != nil {
return status.New(codes.Internal, "failed to check if repo is cloned").Err()
}
return nil
if cloned {
return nil
}
return newRepoNotFoundError(repo, state.CloneInProgress, state.CloneProgress)
}
func hasAccessToCommit(ctx context.Context, repoName api.RepoName, files []string, checker authz.SubRepoPermissionChecker) (bool, error) {
@ -1301,3 +1235,15 @@ func hasAccessToCommit(ctx context.Context, repoName api.RepoName, files []strin
}
return false, nil
}
func newRepoNotFoundError(repo api.RepoName, cloneInProgress bool, cloneProgress string) error {
s, err := status.New(codes.NotFound, "repo not found").WithDetails(&proto.RepoNotFoundPayload{
CloneInProgress: cloneInProgress,
CloneProgress: cloneProgress,
Repo: string(repo),
})
if err != nil {
return err
}
return s.Err()
}

View File

@ -26,12 +26,12 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/authz"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
v1 "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
@ -58,8 +58,8 @@ func TestGRPCServer_Blame(t *testing.T) {
})
t.Run("checks for uncloned repo", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(&protocol.NotFoundPayload{CloneInProgress: true, CloneProgress: "cloning"}, false)
gs := &grpcServer{svc: svc}
svc.MaybeStartCloneFunc.SetDefaultReturn(false, CloneStatus{CloneInProgress: true, CloneProgress: "cloning"}, nil)
gs := &grpcServer{svc: svc, fs: gitserverfs.NewMockFS()}
err := gs.Blame(&v1.BlameRequest{RepoName: "therepo", Commit: "deadbeef", Path: "thepath"}, mockSS)
require.Error(t, err)
assertGRPCStatusCode(t, err, codes.NotFound)
@ -71,10 +71,11 @@ func TestGRPCServer_Blame(t *testing.T) {
srp := authz.NewMockSubRepoPermissionChecker()
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
b := git.NewMockGitBackend()
hr := git.NewMockBlameHunkReader()
@ -116,7 +117,7 @@ func TestGRPCServer_Blame(t *testing.T) {
srp.EnabledFunc.SetDefaultReturn(false)
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
hr := git.NewMockBlameHunkReader()
hr.ReadFunc.PushReturn(&gitdomain.Hunk{CommitID: "deadbeef"}, nil)
@ -125,6 +126,7 @@ func TestGRPCServer_Blame(t *testing.T) {
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},
@ -197,8 +199,8 @@ func TestGRPCServer_DefaultBranch(t *testing.T) {
})
t.Run("checks for uncloned repo", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(&protocol.NotFoundPayload{CloneInProgress: true, CloneProgress: "cloning"}, false)
gs := &grpcServer{svc: svc}
svc.MaybeStartCloneFunc.SetDefaultReturn(false, CloneStatus{CloneInProgress: true, CloneProgress: "cloning"}, nil)
gs := &grpcServer{svc: svc, fs: gitserverfs.NewMockFS()}
_, err := gs.DefaultBranch(ctx, &v1.DefaultBranchRequest{RepoName: "therepo"})
require.Error(t, err)
assertGRPCStatusCode(t, err, codes.NotFound)
@ -209,12 +211,13 @@ func TestGRPCServer_DefaultBranch(t *testing.T) {
t.Run("e2e", func(t *testing.T) {
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
b.SymbolicRefHeadFunc.SetDefaultReturn("refs/heads/main", nil)
b.RevParseHeadFunc.SetDefaultReturn("deadbeef", nil)
gs := &grpcServer{
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},
@ -259,8 +262,8 @@ func TestGRPCServer_MergeBase(t *testing.T) {
})
t.Run("checks for uncloned repo", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(&protocol.NotFoundPayload{CloneInProgress: true, CloneProgress: "cloning"}, false)
gs := &grpcServer{svc: svc}
svc.MaybeStartCloneFunc.SetDefaultReturn(false, CloneStatus{CloneInProgress: true, CloneProgress: "cloning"}, nil)
gs := &grpcServer{svc: svc, fs: gitserverfs.NewMockFS()}
_, err := gs.MergeBase(ctx, &v1.MergeBaseRequest{RepoName: "therepo", Base: []byte("master"), Head: []byte("b2")})
require.Error(t, err)
assertGRPCStatusCode(t, err, codes.NotFound)
@ -270,9 +273,10 @@ func TestGRPCServer_MergeBase(t *testing.T) {
})
t.Run("revision not found", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
gs := &grpcServer{
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
b := git.NewMockGitBackend()
b.MergeBaseFunc.SetDefaultReturn("", &gitdomain.RevisionNotFoundError{Repo: "therepo", Spec: "b2"})
@ -288,11 +292,12 @@ func TestGRPCServer_MergeBase(t *testing.T) {
t.Run("e2e", func(t *testing.T) {
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
b.MergeBaseFunc.SetDefaultReturn("deadbeef", nil)
gs := &grpcServer{
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},
@ -332,8 +337,8 @@ func TestGRPCServer_ReadFile(t *testing.T) {
})
t.Run("checks for uncloned repo", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(&protocol.NotFoundPayload{CloneInProgress: true, CloneProgress: "cloning"}, false)
gs := &grpcServer{svc: svc}
svc.MaybeStartCloneFunc.SetDefaultReturn(false, CloneStatus{CloneInProgress: true, CloneProgress: "cloning"}, nil)
gs := &grpcServer{svc: svc, fs: gitserverfs.NewMockFS()}
err := gs.ReadFile(&v1.ReadFileRequest{RepoName: "therepo", Commit: "deadbeef", Path: "thepath"}, mockSS)
require.Error(t, err)
assertGRPCStatusCode(t, err, codes.NotFound)
@ -345,10 +350,11 @@ func TestGRPCServer_ReadFile(t *testing.T) {
srp := authz.NewMockSubRepoPermissionChecker()
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
b := git.NewMockGitBackend()
b.ReadFileFunc.SetDefaultReturn(io.NopCloser(bytes.NewReader([]byte("filecontent"))), nil)
@ -388,12 +394,13 @@ func TestGRPCServer_ReadFile(t *testing.T) {
srp.EnabledFunc.SetDefaultReturn(false)
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
b.ReadFileFunc.SetDefaultReturn(io.NopCloser(bytes.NewReader([]byte("filecontent"))), nil)
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},
@ -468,8 +475,8 @@ func TestGRPCServer_Archive(t *testing.T) {
})
t.Run("checks for uncloned repo", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(&protocol.NotFoundPayload{CloneInProgress: true, CloneProgress: "cloning"}, false)
gs := &grpcServer{svc: svc}
svc.MaybeStartCloneFunc.SetDefaultReturn(false, CloneStatus{CloneInProgress: true, CloneProgress: "cloning"}, nil)
gs := &grpcServer{svc: svc, fs: gitserverfs.NewMockFS()}
err := gs.Archive(&v1.ArchiveRequest{Repo: "therepo", Treeish: "HEAD", Format: proto.ArchiveFormat_ARCHIVE_FORMAT_ZIP}, mockSS)
require.Error(t, err)
assertGRPCStatusCode(t, err, codes.NotFound)
@ -481,10 +488,11 @@ func TestGRPCServer_Archive(t *testing.T) {
srp := authz.NewMockSubRepoPermissionChecker()
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
b := git.NewMockGitBackend()
b.ArchiveReaderFunc.SetDefaultReturn(io.NopCloser(bytes.NewReader([]byte("filecontent"))), nil)
@ -524,12 +532,13 @@ func TestGRPCServer_Archive(t *testing.T) {
srp.EnabledForRepoFunc.SetDefaultReturn(false, nil)
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
b.ArchiveReaderFunc.SetDefaultReturn(io.NopCloser(bytes.NewReader([]byte("filecontent"))), nil)
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},
@ -600,8 +609,8 @@ func TestGRPCServer_GetCommit(t *testing.T) {
})
t.Run("checks for uncloned repo", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(&protocol.NotFoundPayload{CloneInProgress: true, CloneProgress: "cloning"}, false)
gs := &grpcServer{svc: svc}
svc.MaybeStartCloneFunc.SetDefaultReturn(false, CloneStatus{CloneInProgress: true, CloneProgress: "cloning"}, nil)
gs := &grpcServer{svc: svc, fs: gitserverfs.NewMockFS()}
_, err := gs.GetCommit(ctx, &v1.GetCommitRequest{RepoName: "therepo", Commit: "deadbeef"})
require.Error(t, err)
assertGRPCStatusCode(t, err, codes.NotFound)
@ -613,11 +622,12 @@ func TestGRPCServer_GetCommit(t *testing.T) {
srp := authz.NewMockSubRepoPermissionChecker()
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},
@ -673,12 +683,13 @@ func TestGRPCServer_GetCommit(t *testing.T) {
srp.EnabledForRepoFunc.SetDefaultReturn(false, nil)
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
b.GetCommitFunc.PushReturn(&git.GitCommitWithFiles{Commit: &gitdomain.Commit{Committer: &gitdomain.Signature{}}}, nil)
gs := &grpcServer{
subRepoChecker: srp,
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},
@ -712,8 +723,8 @@ func TestGRPCServer_ResolveRevision(t *testing.T) {
})
t.Run("checks for uncloned repo", func(t *testing.T) {
svc := NewMockService()
svc.MaybeStartCloneFunc.SetDefaultReturn(&protocol.NotFoundPayload{CloneInProgress: true, CloneProgress: "cloning"}, false)
gs := &grpcServer{svc: svc}
svc.MaybeStartCloneFunc.SetDefaultReturn(false, CloneStatus{CloneInProgress: true, CloneProgress: "cloning"}, nil)
gs := &grpcServer{svc: svc, fs: gitserverfs.NewMockFS()}
_, err := gs.ResolveRevision(ctx, &v1.ResolveRevisionRequest{RepoName: "therepo"})
require.Error(t, err)
assertGRPCStatusCode(t, err, codes.NotFound)
@ -724,11 +735,12 @@ func TestGRPCServer_ResolveRevision(t *testing.T) {
t.Run("e2e", func(t *testing.T) {
svc := NewMockService()
// Repo is cloned, proceed!
svc.MaybeStartCloneFunc.SetDefaultReturn(nil, true)
svc.MaybeStartCloneFunc.SetDefaultReturn(true, CloneStatus{}, nil)
b := git.NewMockGitBackend()
b.ResolveRevisionFunc.SetDefaultReturn("deadbeef", nil)
gs := &grpcServer{
svc: svc,
fs: gitserverfs.NewMockFS(),
getBackendFunc: func(common.GitDir, api.RepoName) git.GitBackend {
return b
},

View File

@ -5,7 +5,6 @@ import (
"container/list"
"context"
"fmt"
"github.com/sourcegraph/sourcegraph/internal/actor"
"io"
"os"
"os/exec"
@ -14,6 +13,8 @@ import (
"testing"
"time"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/assert"
@ -75,32 +76,6 @@ func TestExecRequest(t *testing.T) {
Stderr: "teststderr",
}},
},
{
Name: "NonexistingRepo",
Request: &v1.ExecRequest{
Repo: "github.com/gorilla/doesnotexist",
Args: [][]byte{[]byte("diff")},
},
ExpectedCode: codes.NotFound,
ExpectedError: "repo not found",
ExpectedDetails: []any{&v1.RepoNotFoundPayload{
Repo: "github.com/gorilla/doesnotexist",
CloneInProgress: false,
}},
},
{
Name: "UnclonedRepo",
Request: &v1.ExecRequest{
Repo: "github.com/nicksnyder/go-i18n",
Args: [][]byte{[]byte("diff")},
},
ExpectedCode: codes.NotFound,
ExpectedError: "repo not found",
ExpectedDetails: []any{&v1.RepoNotFoundPayload{
Repo: "github.com/nicksnyder/go-i18n",
CloneInProgress: true,
}},
},
{
Name: "Error",
Request: &v1.ExecRequest{
@ -140,14 +115,15 @@ func TestExecRequest(t *testing.T) {
db := dbmocks.NewMockDB()
gr := dbmocks.NewMockGitserverRepoStore()
db.GitserverReposFunc.SetDefaultReturn(gr)
reposDir := t.TempDir()
fs := gitserverfs.New(&observation.TestContext, t.TempDir())
require.NoError(t, fs.Initialize())
s := NewServer(&ServerOpts{
Logger: logtest.Scoped(t),
ReposDir: reposDir,
Logger: logtest.Scoped(t),
FS: fs,
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
backend := git.NewMockGitBackend()
backend.ExecFunc.SetDefaultHook(func(ctx context.Context, args ...string) (io.ReadCloser, error) {
if !gitcli.IsAllowedGitCmd(logtest.Scoped(t), args, gitserverfs.RepoDirFromName(reposDir, repoName)) {
if !gitcli.IsAllowedGitCmd(logtest.Scoped(t), args, fs.RepoDir(repoName)) {
return nil, gitcli.ErrBadGitCommand
}
@ -200,15 +176,16 @@ func TestExecRequest(t *testing.T) {
RPSLimiter: ratelimit.NewInstrumentedLimiter("GitserverTest", rate.NewLimiter(rate.Inf, 10)),
})
s.skipCloneForTests = true
gs := NewGRPCServer(s)
origRepoCloned := repoCloned
repoCloned = func(dir common.GitDir) bool {
return dir == gitserverfs.RepoDirFromName(reposDir, "github.com/gorilla/mux") || dir == gitserverfs.RepoDirFromName(reposDir, "my-mux")
}
t.Cleanup(func() { repoCloned = origRepoCloned })
svc := NewMockServiceFrom(s)
svc.MaybeStartCloneFunc.SetDefaultHook(func(ctx context.Context, repo api.RepoName) (bool, CloneStatus, error) {
if repo == "github.com/gorilla/mux" || repo == "my-mux" {
return true, CloneStatus{}, nil
}
cloneProgress, err := s.CloneRepo(ctx, repo, CloneOptions{})
return false, CloneStatus{CloneProgress: cloneProgress, CloneInProgress: err != nil}, nil
})
gs.(*grpcServer).svc = svc
vcssyncer.TestGitRepoExists = func(ctx context.Context, repoName api.RepoName) error {
if strings.Contains(string(repoName), "nicksnyder/go-i18n") {
@ -318,9 +295,11 @@ func makeTestServer(ctx context.Context, t *testing.T, repoDir, remote string, d
}
cloneQueue := NewCloneQueue(obctx, list.New())
fs := gitserverfs.New(obctx, repoDir)
require.NoError(t, fs.Initialize())
s := NewServer(&ServerOpts{
Logger: logger,
ReposDir: repoDir,
Logger: logger,
FS: fs,
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
return gitcli.NewBackend(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory(), dir, repoName)
},
@ -401,8 +380,10 @@ func TestCloneRepo(t *testing.T) {
// Verify the gitserver repo entry exists.
assertRepoState(types.CloneStatusNotCloned, 0, nil)
repoDir := gitserverfs.RepoDirFromName(reposDir, repoName)
remoteDir := filepath.Join(reposDir, "remote")
s := makeTestServer(ctx, t, reposDir, remoteDir, db)
repoDir := s.fs.RepoDir(repoName)
if err := os.Mkdir(remoteDir, os.ModePerm); err != nil {
t.Fatal(err)
}
@ -415,8 +396,6 @@ func TestCloneRepo(t *testing.T) {
// Add a bad tag
cmd("git", "tag", "HEAD")
s := makeTestServer(ctx, t, reposDir, remoteDir, db)
// Enqueue repo clone.
_, err := s.CloneRepo(ctx, repoName, CloneOptions{})
require.NoError(t, err)
@ -425,13 +404,14 @@ func TestCloneRepo(t *testing.T) {
// outside of a test. We only know this works since our test only starts
// one clone and will have nothing else attempt to lock.
for range 1000 {
_, cloning := s.locker.Status(repoDir)
_, cloning := s.locker.Status(repoName)
if !cloning {
break
}
time.Sleep(10 * time.Millisecond)
}
wantRepoSize := gitserverfs.DirSize(repoDir.Path("."))
wantRepoSize, err := s.fs.DirSize(string(s.fs.RepoDir(repoName)))
require.NoError(t, err)
assertRepoState(types.CloneStatusCloned, wantRepoSize, err)
cmdExecDir = repoDir.Path(".")
@ -615,7 +595,8 @@ func TestHandleRepoUpdate(t *testing.T) {
Repo: repoName,
})
size := gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.reposDir, repoName).Path("."))
size, err := s.fs.DirSize(string(s.fs.RepoDir(repoName)))
require.NoError(t, err)
want := &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
@ -647,7 +628,8 @@ func TestHandleRepoUpdate(t *testing.T) {
Repo: repoName,
})
size = gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.reposDir, repoName).Path("."))
size, err = s.fs.DirSize(string(s.fs.RepoDir(repoName)))
require.NoError(t, err)
want = &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
@ -701,11 +683,14 @@ func TestHandleRepoUpdate(t *testing.T) {
Repo: repoName,
})
// we compute the new size
wantSize, err := s.fs.DirSize(string(s.fs.RepoDir(repoName)))
require.NoError(t, err)
want = &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
CloneStatus: types.CloneStatusCloned,
RepoSizeBytes: gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.reposDir, repoName).Path(".")), // we compute the new size
RepoSizeBytes: wantSize,
}
fromDB, err = db.GitserverRepos().GetByID(ctx, dbRepo.ID)
if err != nil {
@ -787,7 +772,7 @@ func TestCloneRepo_EnsureValidity(t *testing.T) {
_, err := s.CloneRepo(ctx, repoName, CloneOptions{Block: true})
require.NoError(t, err)
dst := gitserverfs.RepoDirFromName(s.reposDir, repoName)
dst := s.fs.RepoDir(repoName)
head, err := os.ReadFile(fmt.Sprintf("%s/HEAD", dst))
if os.IsNotExist(err) {
t.Fatal("expected a reconstituted HEAD, but no file exists")
@ -817,7 +802,7 @@ func TestCloneRepo_EnsureValidity(t *testing.T) {
t.Fatalf("expected no error, got %v", err)
}
dst := gitserverfs.RepoDirFromName(s.reposDir, "example.com/foo/bar")
dst := s.fs.RepoDir("example.com/foo/bar")
head, err := os.ReadFile(fmt.Sprintf("%s/HEAD", dst))
if os.IsNotExist(err) {
@ -936,7 +921,7 @@ func TestSyncRepoState(t *testing.T) {
t.Fatal(err)
}
err = syncRepoState(ctx, logger, db, s.locker, hostname, reposDir, gitserver.GitserverAddresses{Addresses: []string{hostname}}, 10, 10, true)
err = syncRepoState(ctx, logger, db, s.locker, hostname, s.fs, gitserver.GitserverAddresses{Addresses: []string{hostname}}, 10, 10, true)
if err != nil {
t.Fatal(err)
}
@ -961,7 +946,7 @@ func TestSyncRepoState(t *testing.T) {
t.Fatal(err)
}
err = syncRepoState(ctx, logger, db, s.locker, hostname, reposDir, gitserver.GitserverAddresses{Addresses: []string{hostname}}, 10, 10, true)
err = syncRepoState(ctx, logger, db, s.locker, hostname, s.fs, gitserver.GitserverAddresses{Addresses: []string{hostname}}, 10, 10, true)
if err != nil {
t.Fatal(err)
}
@ -1161,13 +1146,13 @@ func TestServer_IsRepoCloneable_InternalActor(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
reposDir := t.TempDir()
isCloneableCalled := false
fs := gitserverfs.New(&observation.TestContext, t.TempDir())
require.NoError(t, fs.Initialize())
s := NewServer(&ServerOpts{
Logger: logtest.Scoped(t),
ReposDir: reposDir,
Logger: logtest.Scoped(t),
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
return git.NewMockGitBackend()
},
@ -1195,6 +1180,7 @@ func TestServer_IsRepoCloneable_InternalActor(t *testing.T) {
RecordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
Locker: NewRepositoryLocker(),
RPSLimiter: ratelimit.NewInstrumentedLimiter("GitserverTest", rate.NewLimiter(rate.Inf, 10)),
FS: fs,
})
_, err := s.IsRepoCloneable(context.Background(), "foo")

View File

@ -9,11 +9,7 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/mountinfo"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
du "github.com/sourcegraph/sourcegraph/internal/diskusage"
"github.com/sourcegraph/sourcegraph/internal/metrics"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
@ -24,44 +20,6 @@ func (s *Server) RegisterMetrics(observationCtx *observation.Context, db dbutil.
// See https://github.com/sourcegraph/sourcegraph/issues/54317 for details.
s.logger.Warn("Disabling 'echo' metric")
}
// report the size of the repos dir
logger := s.logger
opts := mountinfo.CollectorOpts{Namespace: "gitserver"}
m := mountinfo.NewCollector(logger, opts, map[string]string{"reposDir": s.reposDir})
observationCtx.Registerer.MustRegister(m)
metrics.MustRegisterDiskMonitor(s.reposDir)
// TODO: Start removal of these.
// TODO(keegan) these are older names for the above disk metric. Keeping
// them to prevent breaking dashboards. Can remove once no
// alert/dashboards use them.
c := prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "src_gitserver_disk_space_available",
Help: "Amount of free space disk space on the repos mount.",
}, func() float64 {
usage, err := du.New(s.reposDir)
if err != nil {
s.logger.Error("error getting disk usage info", log.Error(err))
return 0
}
return float64(usage.Available())
})
prometheus.MustRegister(c)
c = prometheus.NewGaugeFunc(prometheus.GaugeOpts{
Name: "src_gitserver_disk_space_total",
Help: "Amount of total disk space in the repos directory.",
}, func() float64 {
usage, err := du.New(s.reposDir)
if err != nil {
s.logger.Error("error getting disk usage info", log.Error(err))
return 0
}
return float64(usage.Size())
})
prometheus.MustRegister(c)
}
func registerEchoMetric(logger log.Logger) {

View File

@ -14,7 +14,6 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
@ -45,7 +44,7 @@ func NewRepoStateSyncer(
db database.DB,
locker RepositoryLocker,
shardID string,
reposDir string,
fs gitserverfs.FS,
interval time.Duration,
batchSize int,
perSecond int,
@ -78,7 +77,7 @@ func NewRepoStateSyncer(
fullSync = fullSync || currentPinned != previousPinned
previousPinned = currentPinned
if err := syncRepoState(ctx, logger, db, locker, shardID, reposDir, gitServerAddrs, batchSize, perSecond, fullSync); err != nil {
if err := syncRepoState(ctx, logger, db, locker, shardID, fs, gitServerAddrs, batchSize, perSecond, fullSync); err != nil {
// after a failed full sync, we should attempt it again in the next
// invocation.
fullSync = true
@ -102,7 +101,7 @@ func syncRepoState(
db database.DB,
locker RepositoryLocker,
shardID string,
reposDir string,
fs gitserverfs.FS,
gitServerAddrs gitserver.GitserverAddresses,
batchSize int,
perSecond int,
@ -189,11 +188,6 @@ func syncRepoState(
for _, repo := range repos {
repoSyncStateCounter.WithLabelValues("check").Inc()
// We may have a deleted repo, we need to extract the original name both to
// ensure that the shard check is correct and also so that we can find the
// directory.
repo.Name = api.UndeletedRepoName(repo.Name)
// Ensure we're only dealing with repos we are responsible for.
addr := gitServerAddrs.AddrForRepo(ctx, repo.Name)
if !hostnameMatch(shardID, addr) {
@ -202,9 +196,12 @@ func syncRepoState(
}
repoSyncStateCounter.WithLabelValues("this_shard").Inc()
dir := gitserverfs.RepoDirFromName(reposDir, repo.Name)
cloned := repoCloned(dir)
_, cloning := locker.Status(dir)
cloned, err := fs.RepoCloned(repo.Name)
if err != nil {
// Failed to determine cloned state, we have to skip this record for now.
continue
}
_, cloning := locker.Status(repo.Name)
var shouldUpdate bool
if repo.ShardID != shardID {

View File

@ -85,6 +85,7 @@ go_test(
],
deps = [
"//cmd/gitserver/internal/common",
"//cmd/gitserver/internal/gitserverfs",
"//internal/api",
"//internal/codeintel/dependencies",
"//internal/conf/reposource",

View File

@ -27,8 +27,8 @@ func NewGoModulesSyncer(
connection *schema.GoModulesConnection,
svc *dependencies.Service,
client *gomodproxy.Client,
fs gitserverfs.FS,
getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error),
reposDir string,
) VCSSyncer {
placeholder, err := reposource.ParseGoVersionedPackage("sourcegraph.com/placeholder@v0.0.0")
if err != nil {
@ -42,15 +42,15 @@ func NewGoModulesSyncer(
placeholder: placeholder,
svc: svc,
configDeps: connection.Dependencies,
source: &goModulesSyncer{client: client, reposDir: reposDir},
reposDir: reposDir,
source: &goModulesSyncer{client: client, fs: fs},
fs: fs,
getRemoteURLSource: getRemoteURLSource,
}
}
type goModulesSyncer struct {
client *gomodproxy.Client
reposDir string
client *gomodproxy.Client
fs gitserverfs.FS
}
func (s goModulesSyncer) ParseVersionedPackageFromNameAndVersion(name reposource.PackageName, version string) (reposource.VersionedPackage, error) {
@ -76,8 +76,14 @@ func (s *goModulesSyncer) Download(ctx context.Context, dir string, dep reposour
}
defer zip.Close()
tmpdir, err := s.fs.TempDir("gomod-zips")
if err != nil {
return errors.Wrap(err, "create temp dir")
}
defer os.RemoveAll(tmpdir)
mod := dep.(*reposource.GoVersionedPackage).Module
if err = unzip(mod, zip, s.reposDir, dir); err != nil {
if err = unzip(mod, zip, tmpdir, dir); err != nil {
return errors.Wrap(err, "failed to unzip go module")
}
@ -86,18 +92,11 @@ func (s *goModulesSyncer) Download(ctx context.Context, dir string, dep reposour
// unzip the given go module zip into workDir, skipping any files that aren't
// valid according to modzip.CheckZip or that are potentially malicious.
func unzip(mod module.Version, zipContent io.Reader, reposDir string, workDir string) (err error) {
func unzip(mod module.Version, zipContent io.Reader, tmpdir, workDir string) (err error) {
// We cannot unzip in a streaming fashion, so we write the zip file to
// a temporary file. Otherwise, we would need to load the entire zip into
// memory, which isn't great for multi-megabyte+ files.
// Create a tmpdir that gitserver manages.
tmpdir, err := gitserverfs.TempDir(reposDir, "gomod-zips")
if err != nil {
return err
}
defer os.RemoveAll(tmpdir)
// Write the whole package to a temporary file.
zip, zipLen, err := writeZipToTemp(tmpdir, zipContent)
if err != nil {

View File

@ -15,6 +15,7 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/unpack"
@ -36,7 +37,7 @@ const (
jvmMajorVersion0 = 44
)
func NewJVMPackagesSyncer(connection *schema.JVMPackagesConnection, svc *dependencies.Service, getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error), cacheDir string, reposDir string) VCSSyncer {
func NewJVMPackagesSyncer(connection *schema.JVMPackagesConnection, svc *dependencies.Service, getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error), cacheDir string, fs gitserverfs.FS) VCSSyncer {
placeholder, err := reposource.ParseMavenVersionedPackage("com.sourcegraph:sourcegraph:1.0.0")
if err != nil {
panic(fmt.Sprintf("expected placeholder package to parse but got %v", err))
@ -51,7 +52,7 @@ func NewJVMPackagesSyncer(connection *schema.JVMPackagesConnection, svc *depende
placeholder: placeholder,
svc: svc,
configDeps: connection.Maven.Dependencies,
reposDir: reposDir,
fs: fs,
source: &jvmPackagesSyncer{
coursier: chandle,
config: connection,

View File

@ -4,8 +4,6 @@ import (
"archive/zip"
"context"
"fmt"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"os"
"os/exec"
"path"
@ -14,10 +12,14 @@ import (
"strings"
"testing"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -217,7 +219,9 @@ func TestJVMCloneCommand(t *testing.T) {
depsSvc := dependencies.TestService(database.NewDB(logger, dbtest.NewDB(t)))
cacheDir := filepath.Join(dir, "cache")
s := NewJVMPackagesSyncer(&schema.JVMPackagesConnection{Maven: schema.Maven{Dependencies: []string{}}}, depsSvc, testGetRemoteURLSource, cacheDir, dir).(*vcsPackagesSyncer)
fs := gitserverfs.New(&observation.TestContext, dir)
require.NoError(t, fs.Initialize())
s := NewJVMPackagesSyncer(&schema.JVMPackagesConnection{Maven: schema.Maven{Dependencies: []string{}}}, depsSvc, testGetRemoteURLSource, cacheDir, fs).(*vcsPackagesSyncer)
bareGitDirectory := path.Join(dir, "git")
s.runCloneCommand(t, bareGitDirectory, []string{exampleVersionedPackage})

View File

@ -11,6 +11,7 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
@ -26,8 +27,8 @@ func NewNpmPackagesSyncer(
connection schema.NpmPackagesConnection,
svc *dependencies.Service,
client npm.Client,
fs gitserverfs.FS,
getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error),
reposDir string,
) VCSSyncer {
placeholder, err := reposource.ParseNpmVersionedPackage("@sourcegraph/placeholder@1.0.0")
if err != nil {
@ -41,7 +42,7 @@ func NewNpmPackagesSyncer(
placeholder: placeholder,
svc: svc,
configDeps: connection.Dependencies,
reposDir: reposDir,
fs: fs,
source: &npmPackagesSyncer{client: client},
getRemoteURLSource: getRemoteURLSource,
}

View File

@ -6,8 +6,6 @@ import (
"compress/gzip"
"context"
"fmt"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"io"
"io/fs"
"os"
@ -18,11 +16,16 @@ import (
"testing"
"time"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -118,12 +121,15 @@ func TestNpmCloneCommand(t *testing.T) {
depsSvc := dependencies.TestService(database.NewDB(logger, dbtest.NewDB(t)))
fs := gitserverfs.New(&observation.TestContext, dir)
require.NoError(t, fs.Initialize())
s := NewNpmPackagesSyncer(
schema.NpmPackagesConnection{Dependencies: []string{}},
depsSvc,
&client,
fs,
testGetRemoteURLSource,
dir,
).(*vcsPackagesSyncer)
bareGitDirectory := path.Join(dir, "git")

View File

@ -38,7 +38,7 @@ type vcsPackagesSyncer struct {
configDeps []string
source packagesSource
svc dependenciesService
reposDir string
fs gitserverfs.FS
getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error)
}
@ -306,7 +306,7 @@ func (s *vcsPackagesSyncer) fetchVersions(ctx context.Context, name reposource.P
// gitPushDependencyTag is responsible for cleaning up temporary directories
// created in the process.
func (s *vcsPackagesSyncer) gitPushDependencyTag(ctx context.Context, bareGitDirectory string, dep reposource.VersionedPackage) error {
workDir, err := gitserverfs.TempDir(s.reposDir, s.Type())
workDir, err := s.fs.TempDir(s.Type())
if err != nil {
return err
}

View File

@ -18,9 +18,11 @@ import (
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -36,6 +38,9 @@ func TestVcsDependenciesSyncer_Fetch(t *testing.T) {
}
depsService := &fakeDepsService{deps: map[reposource.PackageName]dependencies.PackageRepoReference{}}
root := t.TempDir()
fs := gitserverfs.New(&observation.TestContext, root)
require.NoError(t, fs.Initialize())
remoteURL := &vcs.URL{URL: url.URL{Path: "fake/foo"}}
s := vcsPackagesSyncer{
@ -45,7 +50,7 @@ func TestVcsDependenciesSyncer_Fetch(t *testing.T) {
placeholder: placeholder,
source: depsSource,
svc: depsService,
reposDir: t.TempDir(),
fs: fs,
getRemoteURLSource: func(ctx context.Context, name api.RepoName) (RemoteURLSource, error) {
return RemoteURLSourceFunc(func(_ context.Context) (*vcs.URL, error) {
return remoteURL, nil

View File

@ -16,6 +16,7 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/executil"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/perforce"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/urlredactor"
"github.com/sourcegraph/sourcegraph/internal/wrexec"
@ -26,37 +27,30 @@ import (
type perforceDepotSyncer struct {
logger log.Logger
recordingCommandFactory *wrexec.RecordingCommandFactory
fs gitserverfs.FS
// MaxChanges indicates to only import at most n changes when possible.
MaxChanges int
// maxChanges indicates to only import at most n changes when possible.
maxChanges int
// P4Client configures the client to use with p4 and enables use of a client spec
// p4Client configures the client to use with p4 and enables use of a client spec
// to find the list of interesting files in p4.
P4Client string
p4Client string
// FusionConfig contains information about the experimental p4-fusion client.
FusionConfig fusionConfig
// P4Home is a directory we will pass to `git p4` commands as the
// $HOME directory as it requires this to write cache data.
P4Home string
// reposDir is the directory where repositories are cloned.
reposDir string
// fusionConfig contains information about the experimental p4-fusion client.
fusionConfig fusionConfig
// getRemoteURLSource returns the RemoteURLSource for the given repository.
getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error)
}
func NewPerforceDepotSyncer(logger log.Logger, r *wrexec.RecordingCommandFactory, connection *schema.PerforceConnection, getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error), reposDir, p4Home string) VCSSyncer {
func NewPerforceDepotSyncer(logger log.Logger, r *wrexec.RecordingCommandFactory, fs gitserverfs.FS, connection *schema.PerforceConnection, getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error)) VCSSyncer {
return &perforceDepotSyncer{
logger: logger.Scoped("PerforceDepotSyncer"),
recordingCommandFactory: r,
MaxChanges: int(connection.MaxChanges),
P4Client: connection.P4Client,
FusionConfig: configureFusionClient(connection),
reposDir: reposDir,
P4Home: p4Home,
fs: fs,
maxChanges: int(connection.MaxChanges),
p4Client: connection.P4Client,
fusionConfig: configureFusionClient(connection),
getRemoteURLSource: getRemoteURLSource,
}
}
@ -82,10 +76,7 @@ func (s *perforceDepotSyncer) IsCloneable(ctx context.Context, repoName api.Repo
return errors.Wrap(err, "invalid perforce remote URL")
}
return perforce.IsDepotPathCloneable(ctx, perforce.IsDepotPathCloneableArguments{
ReposDir: s.reposDir,
P4Home: s.P4Home,
return perforce.IsDepotPathCloneable(ctx, s.fs, perforce.IsDepotPathCloneableArguments{
P4Port: host,
P4User: username,
P4Passwd: password,
@ -119,10 +110,7 @@ func (s *perforceDepotSyncer) Clone(ctx context.Context, repo api.RepoName, _ co
// First, do a quick check if we can reach the Perforce server.
tryWrite(s.logger, progressWriter, "Checking Perforce server connection\n")
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: s.reposDir,
P4Home: s.P4Home,
err = perforce.P4TestWithTrust(ctx, s.fs, perforce.P4TestWithTrustArguments{
P4Port: p4port,
P4User: p4user,
P4Passwd: p4passwd,
@ -133,7 +121,7 @@ func (s *perforceDepotSyncer) Clone(ctx context.Context, repo api.RepoName, _ co
tryWrite(s.logger, progressWriter, "Perforce server connection succeeded\n")
var cmd *exec.Cmd
if s.FusionConfig.Enabled {
if s.fusionConfig.Enabled {
tryWrite(s.logger, progressWriter, "Converting depot using p4-fusion\n")
cmd = s.buildP4FusionCmd(ctx, depot, p4user, tmpPath, p4port)
} else {
@ -143,7 +131,10 @@ func (s *perforceDepotSyncer) Clone(ctx context.Context, repo api.RepoName, _ co
args = append(args, depot+"@all", tmpPath)
cmd = exec.CommandContext(ctx, "git", args...)
}
cmd.Env = s.p4CommandEnv(tmpPath, p4port, p4user, p4passwd)
cmd.Env, err = s.p4CommandEnv(tmpPath, p4port, p4user, p4passwd)
if err != nil {
return errors.Wrap(err, "failed to build p4 command env")
}
redactor := urlredactor.New(remoteURL)
wrCmd := s.recordingCommandFactory.WrapWithRepoName(ctx, s.logger, repo, cmd).WithRedactorFunc(redactor.Redact)
@ -197,18 +188,18 @@ func (s *perforceDepotSyncer) Clone(ctx context.Context, repo api.RepoName, _ co
func (s *perforceDepotSyncer) buildP4FusionCmd(ctx context.Context, depot, username, src, port string) *exec.Cmd {
return exec.CommandContext(ctx, "p4-fusion",
"--path", depot+"...",
"--client", s.FusionConfig.Client,
"--client", s.fusionConfig.Client,
"--user", username,
"--src", src,
"--networkThreads", strconv.Itoa(s.FusionConfig.NetworkThreads),
"--printBatch", strconv.Itoa(s.FusionConfig.PrintBatch),
"--networkThreads", strconv.Itoa(s.fusionConfig.NetworkThreads),
"--printBatch", strconv.Itoa(s.fusionConfig.PrintBatch),
"--port", port,
"--lookAhead", strconv.Itoa(s.FusionConfig.LookAhead),
"--retries", strconv.Itoa(s.FusionConfig.Retries),
"--refresh", strconv.Itoa(s.FusionConfig.Refresh),
"--maxChanges", strconv.Itoa(s.FusionConfig.MaxChanges),
"--includeBinaries", strconv.FormatBool(s.FusionConfig.IncludeBinaries),
"--fsyncEnable", strconv.FormatBool(s.FusionConfig.FsyncEnable),
"--lookAhead", strconv.Itoa(s.fusionConfig.LookAhead),
"--retries", strconv.Itoa(s.fusionConfig.Retries),
"--refresh", strconv.Itoa(s.fusionConfig.Refresh),
"--maxChanges", strconv.Itoa(s.fusionConfig.MaxChanges),
"--includeBinaries", strconv.FormatBool(s.fusionConfig.IncludeBinaries),
"--fsyncEnable", strconv.FormatBool(s.fusionConfig.FsyncEnable),
"--noColor", "true",
// We don't want an empty commit for a sane merge base across branches,
// since we don't use them and the empty commit breaks changelist parsing.
@ -234,10 +225,7 @@ func (s *perforceDepotSyncer) Fetch(ctx context.Context, repoName api.RepoName,
}
// First, do a quick check if we can reach the Perforce server.
err = perforce.P4TestWithTrust(ctx, perforce.P4TestWithTrustArguments{
ReposDir: s.reposDir,
P4Home: s.P4Home,
err = perforce.P4TestWithTrust(ctx, s.fs, perforce.P4TestWithTrustArguments{
P4Port: p4port,
P4User: p4user,
P4Passwd: p4passwd,
@ -247,7 +235,7 @@ func (s *perforceDepotSyncer) Fetch(ctx context.Context, repoName api.RepoName,
}
var cmd *wrexec.Cmd
if s.FusionConfig.Enabled {
if s.fusionConfig.Enabled {
// Example: p4-fusion --path //depot/... --user $P4USER --src clones/ --networkThreads 64 --printBatch 10 --port $P4PORT --lookAhead 2000 --retries 10 --refresh 100
root, _ := filepath.Split(string(dir))
cmd = wrexec.Wrap(ctx, nil, s.buildP4FusionCmd(ctx, depot, p4user, root+".git", p4port))
@ -256,7 +244,10 @@ func (s *perforceDepotSyncer) Fetch(ctx context.Context, repoName api.RepoName,
args := append([]string{"p4", "sync"}, s.p4CommandOptions()...)
cmd = wrexec.CommandContext(ctx, nil, "git", args...)
}
cmd.Env = s.p4CommandEnv(string(dir), p4port, p4user, p4passwd)
cmd.Env, err = s.p4CommandEnv(string(dir), p4port, p4user, p4passwd)
if err != nil {
return nil, errors.Wrap(err, "failed to build p4 command env")
}
dir.Set(cmd.Cmd)
// TODO(keegancsmith)(indradhanush) This is running a remote command and
@ -267,14 +258,19 @@ func (s *perforceDepotSyncer) Fetch(ctx context.Context, repoName api.RepoName,
return nil, errors.Wrapf(err, "failed to update with output %q", urlredactor.New(remoteURL).Redact(string(output)))
}
if !s.FusionConfig.Enabled {
if !s.fusionConfig.Enabled {
p4home, err := s.fs.P4HomeDir()
if err != nil {
return nil, errors.Wrap(err, "failed to create p4home")
}
// Force update "master" to "refs/remotes/p4/master" where changes are synced into
cmd = wrexec.CommandContext(ctx, nil, "git", "branch", "-f", "master", "refs/remotes/p4/master")
cmd.Cmd.Env = append(os.Environ(),
"P4PORT="+p4port,
"P4USER="+p4user,
"P4PASSWD="+p4passwd,
"HOME="+s.P4Home,
"HOME="+p4home,
)
dir.Set(cmd.Cmd)
if output, err := cmd.CombinedOutput(); err != nil {
@ -295,16 +291,16 @@ func (s *perforceDepotSyncer) Fetch(ctx context.Context, repoName api.RepoName,
func (s *perforceDepotSyncer) p4CommandOptions() []string {
flags := []string{}
if s.MaxChanges > 0 {
flags = append(flags, "--max-changes", strconv.Itoa(s.MaxChanges))
if s.maxChanges > 0 {
flags = append(flags, "--max-changes", strconv.Itoa(s.maxChanges))
}
if s.P4Client != "" {
if s.p4Client != "" {
flags = append(flags, "--use-client-spec")
}
return flags
}
func (s *perforceDepotSyncer) p4CommandEnv(cmdCWD, p4port, p4user, p4passwd string) []string {
func (s *perforceDepotSyncer) p4CommandEnv(cmdCWD, p4port, p4user, p4passwd string) ([]string, error) {
env := append(
os.Environ(),
"P4PORT="+p4port,
@ -313,17 +309,20 @@ func (s *perforceDepotSyncer) p4CommandEnv(cmdCWD, p4port, p4user, p4passwd stri
"P4CLIENTPATH="+cmdCWD,
)
if s.P4Client != "" {
env = append(env, "P4CLIENT="+s.P4Client)
if s.p4Client != "" {
env = append(env, "P4CLIENT="+s.p4Client)
}
if s.P4Home != "" {
// git p4 commands write to $HOME/.gitp4-usercache.txt, we should pass in a
// directory under our control and ensure that it is writeable.
env = append(env, "HOME="+s.P4Home)
p4home, err := s.fs.P4HomeDir()
if err != nil {
return nil, err
}
return env
// git p4 commands write to $HOME/.gitp4-usercache.txt, we should pass in a
// directory under our control and ensure that it is writeable.
env = append(env, "HOME="+p4home)
return env, nil
}
// fusionConfig allows configuration of the p4-fusion client.

View File

@ -5,20 +5,25 @@ import (
"testing"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/wrexec"
)
func TestP4DepotSyncer_p4CommandEnv(t *testing.T) {
fs := gitserverfs.NewMockFS()
fs.P4HomeDirFunc.SetDefaultReturn("p4home", nil)
syncer := &perforceDepotSyncer{
logger: logtest.Scoped(t),
recordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
P4Client: "client",
P4Home: "p4home",
p4Client: "client",
fs: fs,
}
cwd := t.TempDir()
vars := syncer.p4CommandEnv(cwd, "host", "username", "password")
vars, err := syncer.p4CommandEnv(cwd, "host", "username", "password")
require.NoError(t, err)
assertEnv := func(key, value string) {
var match string
for _, s := range vars {

View File

@ -25,8 +25,8 @@ func NewPythonPackagesSyncer(
connection *schema.PythonPackagesConnection,
svc *dependencies.Service,
client *pypi.Client,
fs gitserverfs.FS,
getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error),
reposDir string,
) VCSSyncer {
return &vcsPackagesSyncer{
logger: log.Scoped("PythonPackagesSyncer"),
@ -35,16 +35,16 @@ func NewPythonPackagesSyncer(
placeholder: reposource.ParseVersionedPackage("sourcegraph.com/placeholder@v0.0.0"),
svc: svc,
configDeps: connection.Dependencies,
source: &pythonPackagesSyncer{client: client, reposDir: reposDir},
reposDir: reposDir,
source: &pythonPackagesSyncer{client: client, fs: fs},
getRemoteURLSource: getRemoteURLSource,
fs: fs,
}
}
// pythonPackagesSyncer implements packagesSource
type pythonPackagesSyncer struct {
client *pypi.Client
reposDir string
client *pypi.Client
fs gitserverfs.FS
}
func (pythonPackagesSyncer) ParseVersionedPackageFromNameAndVersion(name reposource.PackageName, version string) (reposource.VersionedPackage, error) {
@ -76,7 +76,11 @@ func (s *pythonPackagesSyncer) Download(ctx context.Context, dir string, dep rep
}
defer pkgData.Close()
if err = unpackPythonPackage(pkgData, packageURL, s.reposDir, dir); err != nil {
mkdirTemp := func() (string, error) {
return s.fs.TempDir("pypi-packages")
}
if err = unpackPythonPackage(pkgData, packageURL, mkdirTemp, dir); err != nil {
return errors.Wrap(err, "failed to unzip python module")
}
@ -86,7 +90,7 @@ func (s *pythonPackagesSyncer) Download(ctx context.Context, dir string, dep rep
// unpackPythonPackage unpacks the given python package archive into workDir, skipping any
// files that aren't valid or that are potentially malicious. It detects the kind of archive
// and compression used with the given packageURL.
func unpackPythonPackage(pkg io.Reader, packageURL, reposDir, workDir string) error {
func unpackPythonPackage(pkg io.Reader, packageURL string, mkdirTemp func() (string, error), workDir string) error {
logger := log.Scoped("unpackPythonPackage")
u, err := url.Parse(packageURL)
if err != nil {
@ -128,7 +132,7 @@ func unpackPythonPackage(pkg io.Reader, packageURL, reposDir, workDir string) er
// memory, which isn't great for multi-megabyte+ files.
// Create a tmpdir that gitserver manages.
tmpdir, err := gitserverfs.TempDir(reposDir, "pypi-packages")
tmpdir, err := mkdirTemp()
if err != nil {
return err
}

View File

@ -57,7 +57,7 @@ func TestUnpackPythonPackage_TGZ(t *testing.T) {
pkg := bytes.NewReader(createTgz(t, files))
tmp := t.TempDir()
if err := unpackPythonPackage(pkg, "https://some.where/pckg.tar.gz", tmp, tmp); err != nil {
if err := unpackPythonPackage(pkg, "https://some.where/pckg.tar.gz", func() (string, error) { return tmp, nil }, tmp); err != nil {
t.Fatal()
}
@ -127,7 +127,7 @@ func TestUnpackPythonPackage_ZIP(t *testing.T) {
}
tmp := t.TempDir()
if err := unpackPythonPackage(&zipBuf, "https://some.where/pckg.zip", tmp, tmp); err != nil {
if err := unpackPythonPackage(&zipBuf, "https://some.where/pckg.zip", func() (string, error) { return os.MkdirTemp(tmp, "zip") }, tmp); err != nil {
t.Fatal()
}
@ -171,13 +171,13 @@ func TestUnpackPythonPackage_InvalidZip(t *testing.T) {
pkg := bytes.NewReader(createTgz(t, files))
if err := unpackPythonPackage(pkg, "https://some.where/pckg.whl", t.TempDir(), t.TempDir()); err == nil {
if err := unpackPythonPackage(pkg, "https://some.where/pckg.whl", func() (string, error) { return t.TempDir(), nil }, t.TempDir()); err == nil {
t.Fatal("no error returned from unpack package")
}
}
func TestUnpackPythonPackage_UnsupportedFormat(t *testing.T) {
if err := unpackPythonPackage(bytes.NewReader([]byte{}), "https://some.where/pckg.exe", "", ""); err == nil {
if err := unpackPythonPackage(bytes.NewReader([]byte{}), "https://some.where/pckg.exe", func() (string, error) { return "", nil }, ""); err == nil {
t.Fatal()
}
}
@ -211,7 +211,7 @@ func TestUnpackPythonPackage_Wheel(t *testing.T) {
}
tmp := t.TempDir()
if err := unpackPythonPackage(b, wheelURL, tmp, tmp); err != nil {
if err := unpackPythonPackage(b, wheelURL, func() (string, error) { return os.MkdirTemp(tmp, "wheel") }, tmp); err != nil {
t.Fatal(err)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
@ -24,8 +25,8 @@ func NewRubyPackagesSyncer(
connection *schema.RubyPackagesConnection,
svc *dependencies.Service,
client *rubygems.Client,
fs gitserverfs.FS,
getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error),
reposDir string,
) VCSSyncer {
return &vcsPackagesSyncer{
logger: log.Scoped("RubyPackagesSyncer"),
@ -34,8 +35,8 @@ func NewRubyPackagesSyncer(
placeholder: reposource.NewRubyVersionedPackage("sourcegraph/placeholder", "0.0.0"),
svc: svc,
configDeps: connection.Dependencies,
reposDir: reposDir,
source: &rubyDependencySource{client: client},
fs: fs,
getRemoteURLSource: getRemoteURLSource,
}
}

View File

@ -8,6 +8,7 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
@ -21,8 +22,8 @@ func NewRustPackagesSyncer(
connection *schema.RustPackagesConnection,
svc *dependencies.Service,
client *crates.Client,
fs gitserverfs.FS,
getRemoteURLSource func(ctx context.Context, name api.RepoName) (RemoteURLSource, error),
reposDir string,
) VCSSyncer {
return &vcsPackagesSyncer{
logger: log.Scoped("RustPackagesSyncer"),
@ -31,8 +32,8 @@ func NewRustPackagesSyncer(
placeholder: reposource.ParseRustVersionedPackage("sourcegraph.com/placeholder@0.0.0"),
svc: svc,
configDeps: connection.Dependencies,
reposDir: reposDir,
source: &rustDependencySource{client: client},
fs: fs,
getRemoteURLSource: getRemoteURLSource,
}
}

View File

@ -2,10 +2,12 @@ package vcssyncer
import (
"context"
jsoniter "github.com/json-iterator/go"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"io"
jsoniter "github.com/json-iterator/go"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
@ -70,10 +72,10 @@ type NewVCSSyncerOpts struct {
RepoStore database.RepoStore
DepsSvc *dependencies.Service
Repo api.RepoName
ReposDir string
CoursierCacheDir string
RecordingCommandFactory *wrexec.RecordingCommandFactory
Logger log.Logger
FS gitserverfs.FS
GetRemoteURLSource func(ctx context.Context, repo api.RepoName) (RemoteURLSource, error)
}
@ -115,18 +117,13 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
return nil, err
}
p4Home, err := gitserverfs.MakeP4HomeDir(opts.ReposDir)
if err != nil {
return nil, err
}
return NewPerforceDepotSyncer(opts.Logger, opts.RecordingCommandFactory, &c, opts.GetRemoteURLSource, opts.ReposDir, p4Home), nil
return NewPerforceDepotSyncer(opts.Logger, opts.RecordingCommandFactory, opts.FS, &c, opts.GetRemoteURLSource), nil
case extsvc.TypeJVMPackages:
var c schema.JVMPackagesConnection
if _, err := extractOptions(&c); err != nil {
return nil, err
}
return NewJVMPackagesSyncer(&c, opts.DepsSvc, opts.GetRemoteURLSource, opts.CoursierCacheDir, opts.ReposDir), nil
return NewJVMPackagesSyncer(&c, opts.DepsSvc, opts.GetRemoteURLSource, opts.CoursierCacheDir, opts.FS), nil
case extsvc.TypeNpmPackages:
var c schema.NpmPackagesConnection
urn, err := extractOptions(&c)
@ -137,7 +134,7 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
if err != nil {
return nil, err
}
return NewNpmPackagesSyncer(c, opts.DepsSvc, cli, opts.GetRemoteURLSource, opts.ReposDir), nil
return NewNpmPackagesSyncer(c, opts.DepsSvc, cli, opts.FS, opts.GetRemoteURLSource), nil
case extsvc.TypeGoModules:
var c schema.GoModulesConnection
urn, err := extractOptions(&c)
@ -145,7 +142,7 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
return nil, err
}
cli := gomodproxy.NewClient(urn, c.Urls, httpcli.ExternalClientFactory)
return NewGoModulesSyncer(&c, opts.DepsSvc, cli, opts.GetRemoteURLSource, opts.ReposDir), nil
return NewGoModulesSyncer(&c, opts.DepsSvc, cli, opts.FS, opts.GetRemoteURLSource), nil
case extsvc.TypePythonPackages:
var c schema.PythonPackagesConnection
urn, err := extractOptions(&c)
@ -156,7 +153,7 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
if err != nil {
return nil, err
}
return NewPythonPackagesSyncer(&c, opts.DepsSvc, cli, opts.GetRemoteURLSource, opts.ReposDir), nil
return NewPythonPackagesSyncer(&c, opts.DepsSvc, cli, opts.FS, opts.GetRemoteURLSource), nil
case extsvc.TypeRustPackages:
var c schema.RustPackagesConnection
urn, err := extractOptions(&c)
@ -167,7 +164,7 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
if err != nil {
return nil, err
}
return NewRustPackagesSyncer(&c, opts.DepsSvc, cli, opts.GetRemoteURLSource, opts.ReposDir), nil
return NewRustPackagesSyncer(&c, opts.DepsSvc, cli, opts.FS, opts.GetRemoteURLSource), nil
case extsvc.TypeRubyPackages:
var c schema.RubyPackagesConnection
urn, err := extractOptions(&c)
@ -178,7 +175,7 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
if err != nil {
return nil, err
}
return NewRubyPackagesSyncer(&c, opts.DepsSvc, cli, opts.GetRemoteURLSource, opts.ReposDir), nil
return NewRubyPackagesSyncer(&c, opts.DepsSvc, cli, opts.FS, opts.GetRemoteURLSource), nil
}
return NewGitRepoSyncer(opts.Logger, opts.RecordingCommandFactory, opts.GetRemoteURLSource), nil

View File

@ -10,10 +10,12 @@ import (
"github.com/sourcegraph/log/logtest"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
api "github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/types"
)
@ -25,6 +27,9 @@ func TestGetVCSSyncer(t *testing.T) {
}
tempCoursierCacheDir := filepath.Join(tempReposDir, "coursier")
fs := gitserverfs.New(&observation.TestContext, tempReposDir)
require.NoError(t, fs.Initialize())
repo := api.RepoName("foo/bar")
extsvcStore := dbmocks.NewMockExternalServiceStore()
repoStore := dbmocks.NewMockRepoStore()
@ -57,7 +62,7 @@ func TestGetVCSSyncer(t *testing.T) {
RepoStore: repoStore,
DepsSvc: new(dependencies.Service),
Repo: repo,
ReposDir: tempReposDir,
FS: fs,
CoursierCacheDir: tempCoursierCacheDir,
Logger: logtest.Scoped(t),
})

View File

@ -7,12 +7,13 @@ import (
"database/sql"
"encoding/json"
"fmt"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"net/http"
"os/exec"
"path/filepath"
"strings"
"github.com/sourcegraph/sourcegraph/internal/vcs"
"github.com/sourcegraph/log"
"google.golang.org/grpc"
@ -67,7 +68,8 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
}
// Prepare the file system.
if err := gitserverfs.InitGitserverFileSystem(logger, config.ReposDir); err != nil {
fs := gitserverfs.New(observationCtx, config.ReposDir)
if err := fs.Initialize(); err != nil {
return err
}
@ -92,8 +94,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
locker := server.NewRepositoryLocker()
hostname := config.ExternalAddress
gitserver := server.NewServer(&server.ServerOpts{
Logger: logger,
ReposDir: config.ReposDir,
Logger: logger,
GetBackendFunc: func(dir common.GitDir, repoName api.RepoName) git.GitBackend {
return git.NewObservableBackend(gitcli.NewBackend(logger, recordingCommandFactory, dir, repoName))
},
@ -106,10 +107,10 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
RepoStore: db.Repos(),
DepsSvc: dependencies.NewService(observationCtx, db),
Repo: repo,
ReposDir: config.ReposDir,
CoursierCacheDir: config.CoursierCacheDir,
RecordingCommandFactory: recordingCommandFactory,
Logger: logger,
FS: fs,
GetRemoteURLSource: func(ctx context.Context, repo api.RepoName) (vcssyncer.RemoteURLSource, error) {
return vcssyncer.RemoteURLSourceFunc(func(ctx context.Context) (*vcs.URL, error) {
rawURL, err := getRemoteURLFunc(ctx, logger, db, repo)
@ -128,10 +129,10 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
return u, nil
}), nil
},
})
},
FS: fs,
Hostname: hostname,
DB: db,
CloneQueue: cloneQueue,
@ -180,7 +181,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
db,
locker,
hostname,
config.ReposDir,
fs,
config.SyncRepoStateInterval,
config.SyncRepoStateBatchSize,
config.SyncRepoStateUpdatePerSecond,
@ -190,11 +191,11 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
server.JanitorConfig{
ShardID: hostname,
JanitorInterval: config.JanitorInterval,
ReposDir: config.ReposDir,
DesiredPercentFree: config.JanitorReposDesiredPercentFree,
DisableDeleteReposOnWrongShard: config.JanitorDisableDeleteReposOnWrongShard,
},
db,
fs,
recordingCommandFactory,
gitserver.CloneRepo,
logger,

View File

@ -111,5 +111,6 @@ write_source_files(
"//internal/workerutil/dbworker/store/mocks:generate_mocks",
"//internal/workerutil:generate_mocks",
"//lib/background:generate_mocks",
"//cmd/gitserver/internal/gitserverfs:generate_mocks",
],
)

View File

@ -31,7 +31,7 @@ func (r RepoName) Equal(o RepoName) bool {
// RepoHashedName is the hashed name of a repo
type RepoHashedName string
var deletedRegex = lazyregexp.New("DELETED-[0-9]+\\.[0-9]+-")
var deletedRegex = lazyregexp.New("^DELETED-[0-9]+\\.[0-9]+-")
// UndeletedRepoName will "undelete" a repo name. When we soft-delete a repo we
// change its name in the database, this function extracts the original repo

View File

@ -32,6 +32,16 @@ func TestUndeletedRepoName(t *testing.T) {
have: RepoName("DELETED-1650977466.716686-github.com/owner/repo"),
want: RepoName("github.com/owner/repo"),
},
{
name: "Double deleted",
have: RepoName("DELETED-1650977466.716686-DELETED-1650977466.716686-github.com/owner/repo"),
want: RepoName("DELETED-1650977466.716686-github.com/owner/repo"),
},
{
name: "Not actually deleted",
have: RepoName("github.com/DELETED-1650977466.716686-owner/repo"),
want: RepoName("github.com/DELETED-1650977466.716686-owner/repo"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {

View File

@ -174,13 +174,20 @@ type GitserverAddresses struct {
func (g *GitserverAddresses) AddrForRepo(ctx context.Context, repoName api.RepoName) string {
addrForRepoInvoked.Inc()
// Normalizing the name in case the caller didn't.
name := string(protocol.NormalizeRepo(repoName))
// We undelete the repo name for the addr function so that we can still reach the
// right gitserver after a repo has been deleted (and the name changed by that).
// Ideally we wouldn't need this, but as long as we use RepoName as the identifier
// in gitserver, we have to do this.
name := string(api.UndeletedRepoName(repoName))
if pinnedAddr, ok := g.PinnedServers[name]; ok {
return pinnedAddr
}
return addrForKey(name, g.Addresses)
// We use the normalize function here, because that's what we did previously.
// Ideally, this would not be required, but it would reshuffle GitHub.com repos
// with uppercase characters in the name. So until we have a better migration
// strategy, we keep this old behavior in.
return addrForKey(string(protocol.NormalizeRepo(api.RepoName(name))), g.Addresses)
}
// addrForKey returns the gitserver address to use for the given string key,

View File

@ -16,41 +16,49 @@ func TestAddrForRepo(t *testing.T) {
}
ctx := context.Background()
t.Run("no deduplicated forks", func(t *testing.T) {
testCases := []struct {
name string
repo api.RepoName
want string
}{
{
name: "repo1",
repo: api.RepoName("repo1"),
want: "gitserver-3",
},
{
name: "check we normalise",
repo: api.RepoName("repo1.git"),
want: "gitserver-3",
},
{
name: "another repo",
repo: api.RepoName("github.com/sourcegraph/sourcegraph.git"),
want: "gitserver-2",
},
{
name: "pinned repo", // different server address that the hashing function would normally yield
repo: api.RepoName("repo2"),
want: "gitserver-1",
},
}
testCases := []struct {
name string
repo api.RepoName
want string
}{
{
name: "repo1",
repo: api.RepoName("repo1"),
want: "gitserver-3",
},
{
name: "check we target the original instance prior to deletion",
repo: api.RepoName("DELETED-123123.123123-repo1"),
want: "gitserver-3",
},
{
name: "deletion and pinning work together",
repo: api.RepoName("DELETED-123123.123123-repo2"),
want: "gitserver-1",
},
{
name: "another repo",
repo: api.RepoName("github.com/sourcegraph/sourcegraph"),
want: "gitserver-2",
},
{
name: "case sensitive repo",
repo: api.RepoName("github.com/sourcegraph/Sourcegraph"),
want: "gitserver-2",
},
{
name: "pinned repo", // different server address that the hashing function would normally yield
repo: api.RepoName("repo2"),
want: "gitserver-1",
},
}
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := ga.AddrForRepo(ctx, tc.repo)
if got != tc.want {
t.Fatalf("Want %q, got %q", tc.want, got)
}
})
}
})
for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
got := ga.AddrForRepo(ctx, tc.repo)
if got != tc.want {
t.Fatalf("Want %q, got %q", tc.want, got)
}
})
}
}

View File

@ -601,20 +601,18 @@ func (c *RemoteGitCommand) sendExec(ctx context.Context) (_ io.ReadCloser, err e
}
}()
repoName := protocol.NormalizeRepo(c.repo)
// Check that ctx is not expired.
if err := ctx.Err(); err != nil {
return nil, err
}
client, err := c.execer.ClientForRepo(ctx, repoName)
client, err := c.execer.ClientForRepo(ctx, c.repo)
if err != nil {
return nil, err
}
req := &proto.ExecRequest{
Repo: string(repoName),
Repo: string(c.repo),
Args: stringsToByteSlices(c.args[1:]),
NoTimeout: c.noTimeout,
}
@ -656,9 +654,7 @@ func (c *clientImplementor) Search(ctx context.Context, args *protocol.SearchReq
})
defer endObservation(1, observation.Args{})
repoName := protocol.NormalizeRepo(args.Repo)
client, err := c.ClientForRepo(ctx, repoName)
client, err := c.ClientForRepo(ctx, args.Repo)
if err != nil {
return false, err
}
@ -871,11 +867,7 @@ func (c *clientImplementor) Remove(ctx context.Context, repo api.RepoName) (err
})
defer endObservation(1, observation.Args{})
// In case the repo has already been deleted from the database we need to pass
// the old name in order to land on the correct gitserver instance
undeletedName := api.UndeletedRepoName(repo)
client, err := c.ClientForRepo(ctx, undeletedName)
client, err := c.ClientForRepo(ctx, repo)
if err != nil {
return err
}

View File

@ -313,13 +313,6 @@ func (r *RepoCloneResponse) FromProto(p *proto.RepoCloneResponse) {
}
}
type NotFoundPayload struct {
CloneInProgress bool `json:"cloneInProgress"` // If true, exec returned with noop because clone is in progress.
// CloneProgress is a progress message from the running clone command.
CloneProgress string `json:"cloneProgress,omitempty"`
}
// IsRepoCloneableRequest is a request to determine if a repo is cloneable.
type IsRepoCloneableRequest struct {
// Repo is the repository to check.
@ -560,11 +553,11 @@ func (r *CreateCommitFromPatchResponse) FromProto(res *proto.CreateCommitFromPat
}
// SetError adds the supplied error related details to e.
func (e *CreateCommitFromPatchResponse) SetError(repo, command, out string, err error) {
func (e *CreateCommitFromPatchResponse) SetError(repo api.RepoName, command, out string, err error) {
if e.Error == nil {
e.Error = &CreateCommitFromPatchError{}
}
e.Error.RepositoryName = repo
e.Error.RepositoryName = string(repo)
e.Error.Command = command
e.Error.CombinedOutput = out
e.Error.InternalError = err.Error()

View File

@ -25,21 +25,9 @@ func NormalizeRepo(input api.RepoName) api.RepoName {
host, repoPath = strings.ToLower(repo[:slash]), repo[slash:]
}
trimGit := func(s string) string {
s = strings.TrimSuffix(s, ".git")
return strings.TrimSuffix(s, "/")
}
switch host {
case "github.com":
repoPath = trimGit(repoPath)
if host == "github.com" {
// GitHub is fully case-insensitive.
repoPath = strings.ToLower(repoPath)
case "go":
// support suffix ".git"
default:
repoPath = trimGit(repoPath)
}
return api.RepoName(host + repoPath)

View File

@ -8,16 +8,14 @@ import (
func TestNormalizeRepo(t *testing.T) {
cases := map[api.RepoName]api.RepoName{
"FooBar.git": "FooBar",
"foobar": "foobar",
"FooBar": "FooBar",
"foo/bar": "foo/bar",
"gitHub.Com/FooBar.git": "github.com/foobar",
"myServer.Com/FooBar.git": "myserver.com/FooBar",
"myServer.Com/FooBar/.git": "myserver.com/FooBar",
"foobar": "foobar",
"FooBar": "FooBar",
"foo/bar": "foo/bar",
"github.com/FooBar.git": "github.com/foobar.git",
// support repos with suffix .git for Go
"go/git.foo.org/bar.git": "go/git.foo.org/bar.git",
// Case insensitivity:
"gitHub.Com/FooBar": "github.com/foobar",
"myServer.Com/FooBar": "myserver.com/FooBar",
// trying to escape gitserver root
"/etc/passwd": "etc/passwd",

View File

@ -149,3 +149,7 @@
- GitBackend
- GitConfigBackend
- BlameHunkReader
- filename: cmd/gitserver/internal/gitserverfs/mock.go
path: github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs
interfaces:
- FS