mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 15:51:43 +00:00
vcssyncer: Move clone implementation details into syncer (#57688)
Today, the doClone method does a bunch of side effects: Disabling LFS smudge for git, triggering a Perforce changelist mapper, redacting logs assuming all implementations of VCSSyncer have the same kind of potential leakage, and so forth. This replaces the CloneCommand function on the syncer by a proper Clone method. It moves the responsibility to the syncer to do the above things (not yet the changelist mapper, because that'll need a bit more refactoring I don't want to do in this PR). There are two main motivations for this PR: - Make the cloning process less dependent on the implementation, all we care about is that there's a valid git repo in tmpDir after invocation - Allow us to (without the dirty hacks we did in packages syncer before where we run the actual command in the getter method and return a bogus command) run multiple steps in the cloning process (which we already implicitly did, just without any visibility). This will enable us to repack an exported perforce depot after successful conversion. Nice side-effect: The packages syncer feels a little less hacky, too.
This commit is contained in:
parent
140d93ba25
commit
30f38a5d39
@ -16,22 +16,18 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
"github.com/sourcegraph/log/logtest"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
|
||||
"github.com/sourcegraph/sourcegraph/internal/actor"
|
||||
"github.com/sourcegraph/sourcegraph/internal/api"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/internal/wrexec"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
@ -368,30 +364,6 @@ func TestCleanupExpired(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
getRemoteURL := func(ctx context.Context, name api.RepoName) (string, error) {
|
||||
if name == "repo-boom" {
|
||||
return "", errors.Errorf("boom")
|
||||
}
|
||||
return remote, nil
|
||||
}
|
||||
|
||||
logger := logtest.Scoped(t)
|
||||
s := &Server{
|
||||
Logger: logger,
|
||||
ObservationCtx: observation.TestContextTB(t),
|
||||
ReposDir: root,
|
||||
GetRemoteURLFunc: getRemoteURL,
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
return vcssyncer.NewGitRepoSyncer(wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
Hostname: "test-gitserver",
|
||||
DB: database.NewDB(logger, dbtest.NewDB(t)),
|
||||
RecordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
|
||||
Locker: NewRepositoryLocker(),
|
||||
RPSLimiter: ratelimit.NewInstrumentedLimiter("test", rate.NewLimiter(100, 10)),
|
||||
}
|
||||
s.Handler() // Handler as a side-effect sets up Server
|
||||
|
||||
modTime := func(path string) time.Time {
|
||||
t.Helper()
|
||||
fi, err := os.Stat(filepath.Join(path, "HEAD"))
|
||||
@ -439,13 +411,6 @@ func TestCleanupExpired(t *testing.T) {
|
||||
}
|
||||
|
||||
now := time.Now()
|
||||
repoNewTime := modTime(repoNew)
|
||||
repoOldTime := modTime(repoOld)
|
||||
repoGCNewTime := modTime(repoGCNew)
|
||||
repoGCOldTime := modTime(repoGCOld)
|
||||
repoCorruptTime := modTime(repoBoom)
|
||||
repoPerforceTime := modTime(repoPerforce)
|
||||
repoPerforceGCOldTime := modTime(repoPerforceGCOld)
|
||||
repoBoomTime := modTime(repoBoom)
|
||||
repoBoomRecloneTime := recloneTime(repoBoom)
|
||||
|
||||
@ -453,6 +418,15 @@ func TestCleanupExpired(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
calledClone := []api.RepoName{}
|
||||
cloneRepo := func(ctx context.Context, repo api.RepoName, opts CloneOptions) (cloneProgress string, err error) {
|
||||
if repo == "repo-boom" {
|
||||
return "", errors.New("boom")
|
||||
}
|
||||
calledClone = append(calledClone, repo)
|
||||
return "done", nil
|
||||
}
|
||||
|
||||
cleanupRepos(
|
||||
context.Background(),
|
||||
logtest.Scoped(t),
|
||||
@ -460,35 +434,10 @@ func TestCleanupExpired(t *testing.T) {
|
||||
wrexec.NewNoOpRecordingCommandFactory(),
|
||||
"test-gitserver",
|
||||
root,
|
||||
s.CloneRepo,
|
||||
cloneRepo,
|
||||
gitserver.GitserverAddresses{Addresses: []string{"test-gitserver"}},
|
||||
)
|
||||
|
||||
// repos that shouldn't be re-cloned
|
||||
if repoNewTime.Before(modTime(repoNew)) {
|
||||
t.Error("expected repoNew to not be modified")
|
||||
}
|
||||
if repoGCNewTime.Before(modTime(repoGCNew)) {
|
||||
t.Error("expected repoGCNew to not be modified")
|
||||
}
|
||||
if repoPerforceTime.Before(modTime(repoPerforce)) {
|
||||
t.Error("expected repoPerforce to not be modified")
|
||||
}
|
||||
if repoPerforceGCOldTime.Before(modTime(repoPerforceGCOld)) {
|
||||
t.Error("expected repoPerforceGCOld to not be modified")
|
||||
}
|
||||
|
||||
// repos that should be recloned
|
||||
if !repoOldTime.Before(modTime(repoOld)) {
|
||||
t.Error("expected repoOld to be recloned during clean up")
|
||||
}
|
||||
if !repoGCOldTime.Before(modTime(repoGCOld)) {
|
||||
t.Error("expected repoGCOld to be recloned during clean up")
|
||||
}
|
||||
if !repoCorruptTime.Before(modTime(repoCorrupt)) {
|
||||
t.Error("expected repoCorrupt to be recloned during clean up")
|
||||
}
|
||||
|
||||
// repos that fail to clone need to have recloneTime updated
|
||||
if repoBoomTime.Before(modTime(repoBoom)) {
|
||||
t.Fatal("expected repoBoom to fail to re-clone due to hardcoding getRemoteURL failure")
|
||||
@ -503,6 +452,8 @@ func TestCleanupExpired(t *testing.T) {
|
||||
if _, err := os.Stat(repoNonBare); err == nil {
|
||||
t.Fatal("non-bare repo was not removed")
|
||||
}
|
||||
|
||||
require.Equal(t, []api.RepoName{"repo-corrupt", "repo-gc-old", "repo-old"}, calledClone)
|
||||
}
|
||||
|
||||
func TestCleanup_RemoveNonExistentRepos(t *testing.T) {
|
||||
|
||||
@ -12,6 +12,8 @@ go_library(
|
||||
"//internal/trace",
|
||||
"//internal/wrexec",
|
||||
"//lib/errors",
|
||||
"//lib/process",
|
||||
"@com_github_sourcegraph_conc//pool",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
"@io_opentelemetry_go_otel//attribute",
|
||||
],
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path"
|
||||
@ -13,6 +14,7 @@ import (
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/sourcegraph/conc/pool"
|
||||
"github.com/sourcegraph/log"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
|
||||
@ -21,6 +23,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace" //nolint:staticcheck // OT is deprecated
|
||||
"github.com/sourcegraph/sourcegraph/internal/wrexec"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"github.com/sourcegraph/sourcegraph/lib/process"
|
||||
)
|
||||
|
||||
// ShortGitCommandTimeout returns the timeout for git commands that should not
|
||||
@ -84,9 +87,7 @@ func RunCommand(ctx context.Context, cmd wrexec.Cmder) (exitCode int, err error)
|
||||
attribute.StringSlice("args", cmd.Unwrap().Args),
|
||||
attribute.String("dir", cmd.Unwrap().Dir))
|
||||
defer func() {
|
||||
if err != nil {
|
||||
tr.SetAttributes(attribute.Int("exitCode", exitCode))
|
||||
}
|
||||
tr.SetAttributes(attribute.Int("exitCode", exitCode))
|
||||
tr.EndWithErr(&err)
|
||||
}()
|
||||
|
||||
@ -108,9 +109,8 @@ func RunCommandCombinedOutput(ctx context.Context, cmd wrexec.Cmder) ([]byte, er
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
// RunRemoteGitCommand runs the command after applying the remote options. If
|
||||
// progress is not nil, all output is written to it in a separate goroutine.
|
||||
func RunRemoteGitCommand(ctx context.Context, cmd wrexec.Cmder, configRemoteOpts bool, progress io.Writer) ([]byte, error) {
|
||||
// RunRemoteGitCommand runs the command after applying the remote options.
|
||||
func RunRemoteGitCommand(ctx context.Context, cmd wrexec.Cmder, configRemoteOpts bool) ([]byte, error) {
|
||||
if configRemoteOpts {
|
||||
// Inherit process environment. This allows admins to configure
|
||||
// variables like http_proxy/etc.
|
||||
@ -120,27 +120,14 @@ func RunRemoteGitCommand(ctx context.Context, cmd wrexec.Cmder, configRemoteOpts
|
||||
configureRemoteGitCommand(cmd.Unwrap(), tlsExternal())
|
||||
}
|
||||
|
||||
var b interface {
|
||||
Bytes() []byte
|
||||
}
|
||||
|
||||
if progress != nil {
|
||||
var pw progressWriter
|
||||
mr := io.MultiWriter(&pw, progress)
|
||||
cmd.Unwrap().Stdout = mr
|
||||
cmd.Unwrap().Stderr = mr
|
||||
b = &pw
|
||||
} else {
|
||||
var buf bytes.Buffer
|
||||
cmd.Unwrap().Stdout = &buf
|
||||
cmd.Unwrap().Stderr = &buf
|
||||
b = &buf
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
cmd.Unwrap().Stdout = &buf
|
||||
cmd.Unwrap().Stderr = &buf
|
||||
|
||||
// We don't care about exitStatus, we just rely on error.
|
||||
_, err := RunCommand(ctx, cmd)
|
||||
|
||||
return b.Bytes(), err
|
||||
return buf.Bytes(), err
|
||||
}
|
||||
|
||||
// tlsExternal will create a new cache for this gitserer process and store the certificates set in
|
||||
@ -148,66 +135,6 @@ func RunRemoteGitCommand(ctx context.Context, cmd wrexec.Cmder, configRemoteOpts
|
||||
// This creates a long lived
|
||||
var tlsExternal = conf.Cached(getTlsExternalDoNotInvoke)
|
||||
|
||||
// progressWriter is an io.Writer that writes to a buffer.
|
||||
// '\r' resets the write offset to the index after last '\n' in the buffer,
|
||||
// or the beginning of the buffer if a '\n' has not been written yet.
|
||||
//
|
||||
// This exists to remove intermediate progress reports from "git clone
|
||||
// --progress".
|
||||
type progressWriter struct {
|
||||
// writeOffset is the offset in buf where the next write should begin.
|
||||
writeOffset int
|
||||
|
||||
// afterLastNewline is the index after the last '\n' in buf
|
||||
// or 0 if there is no '\n' in buf.
|
||||
afterLastNewline int
|
||||
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *progressWriter) Write(p []byte) (n int, err error) {
|
||||
l := len(p)
|
||||
for {
|
||||
if len(p) == 0 {
|
||||
// If p ends in a '\r' we still want to include that in the buffer until it is overwritten.
|
||||
break
|
||||
}
|
||||
idx := bytes.IndexAny(p, "\r\n")
|
||||
if idx == -1 {
|
||||
w.buf = append(w.buf[:w.writeOffset], p...)
|
||||
w.writeOffset = len(w.buf)
|
||||
break
|
||||
}
|
||||
switch p[idx] {
|
||||
case '\n':
|
||||
w.buf = append(w.buf[:w.writeOffset], p[:idx+1]...)
|
||||
w.writeOffset = len(w.buf)
|
||||
w.afterLastNewline = len(w.buf)
|
||||
p = p[idx+1:]
|
||||
case '\r':
|
||||
w.buf = append(w.buf[:w.writeOffset], p[:idx+1]...)
|
||||
// Record that our next write should overwrite the data after the most recent newline.
|
||||
// Don't slice it off immediately here, because we want to be able to return that output
|
||||
// until it is overwritten.
|
||||
w.writeOffset = w.afterLastNewline
|
||||
p = p[idx+1:]
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected char %q", p[idx]))
|
||||
}
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// String returns the contents of the buffer as a string.
|
||||
func (w *progressWriter) String() string {
|
||||
return string(w.buf)
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the buffer.
|
||||
func (w *progressWriter) Bytes() []byte {
|
||||
return w.buf
|
||||
}
|
||||
|
||||
type tlsConfig struct {
|
||||
// Whether to not verify the SSL certificate when fetching or pushing over
|
||||
// HTTPS.
|
||||
@ -298,6 +225,10 @@ func getTlsExternalDoNotInvoke() *tlsConfig {
|
||||
}
|
||||
}
|
||||
|
||||
func ConfigureRemoteGitCommand(cmd *exec.Cmd) {
|
||||
configureRemoteGitCommand(cmd, tlsExternal())
|
||||
}
|
||||
|
||||
func configureRemoteGitCommand(cmd *exec.Cmd, tlsConf *tlsConfig) {
|
||||
// We split here in case the first command is an absolute path to the executable
|
||||
// which allows us to safely match lower down
|
||||
@ -384,3 +315,119 @@ func WrapCmdError(cmd *exec.Cmd, err error) error {
|
||||
}
|
||||
return errors.Wrapf(err, "%s %s failed", cmd.Path, strings.Join(cmd.Args, " "))
|
||||
}
|
||||
|
||||
type RedactorFunc func(string) string
|
||||
|
||||
// The passed cmd should be bound to the passed context.
|
||||
func RunCommandWriteOutput(ctx context.Context, cmd wrexec.Cmder, writer io.Writer, redactor RedactorFunc) (int, error) {
|
||||
exitStatus := UnsetExitStatus
|
||||
|
||||
// Create a cancel context so that on exit we always properly close the command
|
||||
// pipes attached later by process.PipeOutputUnbuffered.
|
||||
ctx, cancel := context.WithCancel(ctx)
|
||||
defer cancel()
|
||||
|
||||
// Make sure we only write to the writer from one goroutine at a time, either
|
||||
// stdout or stderr.
|
||||
syncWriter := newSynchronizedWriter(writer)
|
||||
|
||||
outputRedactor := func(w io.Writer, r io.Reader) error {
|
||||
sc := process.NewOutputScannerWithSplit(r, scanLinesWithCRLF)
|
||||
for sc.Scan() {
|
||||
line := sc.Text()
|
||||
if _, err := fmt.Fprint(w, redactor(line)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// We can ignore ErrClosed because we get that if a process crashes, it will
|
||||
// be handled by cmd.Wait.
|
||||
if err := sc.Err(); err != nil && !errors.Is(err, fs.ErrClosed) {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
eg, err := process.PipeProcessOutput(
|
||||
ctx,
|
||||
cmd,
|
||||
syncWriter,
|
||||
syncWriter,
|
||||
outputRedactor,
|
||||
)
|
||||
if err != nil {
|
||||
return exitStatus, errors.Wrap(err, "failed to pipe output")
|
||||
}
|
||||
|
||||
if err = cmd.Start(); err != nil {
|
||||
return exitStatus, errors.Wrap(err, "failed to start command")
|
||||
}
|
||||
|
||||
// Wait for either the command to finish (aka the pipewriters get closed), or
|
||||
// for a context cancelation.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case err := <-watchErrGroup(eg):
|
||||
if err != nil {
|
||||
return exitStatus, errors.Wrap(err, "failed to read output")
|
||||
}
|
||||
}
|
||||
|
||||
err = cmd.Wait()
|
||||
|
||||
if ps := cmd.Unwrap().ProcessState; ps != nil && ps.Sys() != nil {
|
||||
if ws, ok := ps.Sys().(syscall.WaitStatus); ok {
|
||||
exitStatus = ws.ExitStatus()
|
||||
}
|
||||
}
|
||||
|
||||
return exitStatus, errors.Wrap(err, "command failed")
|
||||
}
|
||||
|
||||
// watchErrGroup turns a pool.ErrorPool into a channel that will receive the error
|
||||
// returned from the pool once it returns.
|
||||
func watchErrGroup(g *pool.ErrorPool) <-chan error {
|
||||
ch := make(chan error)
|
||||
go func() {
|
||||
ch <- g.Wait()
|
||||
close(ch)
|
||||
}()
|
||||
|
||||
return ch
|
||||
}
|
||||
|
||||
// scanLinesWithCRLF is a modified version of bufio.ScanLines that retains
|
||||
// the trailing newline byte(s) in the returned token and splits on either CR
|
||||
// or LF.
|
||||
func scanLinesWithCRLF(data []byte, atEOF bool) (advance int, token []byte, err error) {
|
||||
if atEOF && len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
if i := bytes.IndexAny(data, "\r\n"); i >= 0 {
|
||||
// We have a full newline-terminated line.
|
||||
return i + 1, data[0 : i+1], nil
|
||||
}
|
||||
|
||||
// If we're at EOF, we have a final, non-terminated line. Return it.
|
||||
if atEOF {
|
||||
return len(data), data, nil
|
||||
}
|
||||
|
||||
// Request more data.
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
func newSynchronizedWriter(w io.Writer) *synchronizedWriter {
|
||||
return &synchronizedWriter{writer: w}
|
||||
}
|
||||
|
||||
type synchronizedWriter struct {
|
||||
mu sync.Mutex
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
func (sw *synchronizedWriter) Write(p []byte) (n int, err error) {
|
||||
sw.mu.Lock()
|
||||
defer sw.mu.Unlock()
|
||||
return sw.writer.Write(p)
|
||||
}
|
||||
|
||||
@ -231,116 +231,3 @@ func TestConfigureRemoteGitCommand_tls(t *testing.T) {
|
||||
assert.Equal(t, want, cmd.Env)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProgressWriter(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
writes []string
|
||||
text string
|
||||
}{
|
||||
{
|
||||
name: "identity",
|
||||
writes: []string{"hello"},
|
||||
text: "hello",
|
||||
},
|
||||
{
|
||||
name: "single write begin newline",
|
||||
writes: []string{"\nhelloworld"},
|
||||
text: "\nhelloworld",
|
||||
},
|
||||
{
|
||||
name: "single write contains newline",
|
||||
writes: []string{"hello\nworld"},
|
||||
text: "hello\nworld",
|
||||
},
|
||||
{
|
||||
name: "single write end newline",
|
||||
writes: []string{"helloworld\n"},
|
||||
text: "helloworld\n",
|
||||
},
|
||||
{
|
||||
name: "first write end newline",
|
||||
writes: []string{"hello\n", "world"},
|
||||
text: "hello\nworld",
|
||||
},
|
||||
{
|
||||
name: "second write begin newline",
|
||||
writes: []string{"hello", "\nworld"},
|
||||
text: "hello\nworld",
|
||||
},
|
||||
{
|
||||
name: "single write begin return",
|
||||
writes: []string{"\rhelloworld"},
|
||||
text: "helloworld",
|
||||
},
|
||||
{
|
||||
name: "single write contains return",
|
||||
writes: []string{"hello\rworld"},
|
||||
text: "world",
|
||||
},
|
||||
{
|
||||
name: "single write end return",
|
||||
writes: []string{"helloworld\r"},
|
||||
text: "helloworld\r",
|
||||
},
|
||||
{
|
||||
name: "first write contains return",
|
||||
writes: []string{"hel\rlo", "world"},
|
||||
text: "loworld",
|
||||
},
|
||||
{
|
||||
name: "first write end return",
|
||||
writes: []string{"hello\r", "world"},
|
||||
text: "world",
|
||||
},
|
||||
{
|
||||
name: "second write begin return",
|
||||
writes: []string{"hello", "\rworld"},
|
||||
text: "world",
|
||||
},
|
||||
{
|
||||
name: "second write contains return",
|
||||
writes: []string{"hello", "wor\rld"},
|
||||
text: "ld",
|
||||
},
|
||||
{
|
||||
name: "second write ends return",
|
||||
writes: []string{"hello", "world\r"},
|
||||
text: "helloworld\r",
|
||||
},
|
||||
{
|
||||
name: "third write",
|
||||
writes: []string{"hello", "world\r", "hola"},
|
||||
text: "hola",
|
||||
},
|
||||
{
|
||||
name: "progress one write",
|
||||
writes: []string{"progress\n1%\r20%\r100%\n"},
|
||||
text: "progress\n100%\n",
|
||||
},
|
||||
{
|
||||
name: "progress multiple writes",
|
||||
writes: []string{"progress\n", "1%\r", "2%\r", "100%"},
|
||||
text: "progress\n100%",
|
||||
},
|
||||
{
|
||||
name: "one two three four",
|
||||
writes: []string{"one\ntwotwo\nthreethreethree\rfourfourfourfour\n"},
|
||||
text: "one\ntwotwo\nfourfourfourfour\n",
|
||||
},
|
||||
{
|
||||
name: "real git",
|
||||
writes: []string{"Cloning into bare repository '/Users/nick/.sourcegraph/repos/github.com/nicksnyder/go-i18n/.git'...\nremote: Counting objects: 2148, done. \nReceiving objects: 0% (1/2148) \rReceiving objects: 100% (2148/2148), 473.65 KiB | 366.00 KiB/s, done.\nResolving deltas: 0% (0/1263) \rResolving deltas: 100% (1263/1263), done.\n"},
|
||||
text: "Cloning into bare repository '/Users/nick/.sourcegraph/repos/github.com/nicksnyder/go-i18n/.git'...\nremote: Counting objects: 2148, done. \nReceiving objects: 100% (2148/2148), 473.65 KiB | 366.00 KiB/s, done.\nResolving deltas: 100% (1263/1263), done.\n",
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
var w progressWriter
|
||||
for _, write := range testCase.writes {
|
||||
_, _ = w.Write([]byte(write))
|
||||
}
|
||||
assert.Equal(t, testCase.text, w.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -43,5 +43,6 @@ go_test(
|
||||
"//internal/wrexec",
|
||||
"//lib/errors",
|
||||
"@com_github_google_go_cmp//cmp",
|
||||
"@com_github_stretchr_testify//require",
|
||||
],
|
||||
)
|
||||
|
||||
@ -267,3 +267,14 @@ func CheckSpecArgSafety(spec string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// MakeBareRepo initializes a new bare repo at the given dir.
|
||||
func MakeBareRepo(ctx context.Context, dir string) error {
|
||||
cmd := exec.CommandContext(ctx, "git", "init", "--bare", ".")
|
||||
cmd.Dir = dir
|
||||
out, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to create bare repo: %s", string(out))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -10,9 +10,25 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
|
||||
)
|
||||
|
||||
func TestMakeBareRepo(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
ctx := context.Background()
|
||||
|
||||
require.NoError(t, MakeBareRepo(ctx, dir))
|
||||
|
||||
// Now verify we created a valid repo.
|
||||
c := exec.CommandContext(ctx, "git", "rev-parse", "HEAD")
|
||||
c.Dir = dir
|
||||
out, err := c.CombinedOutput()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, "HEAD\n", string(out))
|
||||
}
|
||||
|
||||
func BenchmarkQuickRevParseHeadQuickSymbolicRefHead_packed_refs(b *testing.B) {
|
||||
tmp := b.TempDir()
|
||||
|
||||
|
||||
@ -3,11 +3,15 @@ load("//dev:go_defs.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "integration_tests",
|
||||
srcs = ["test_utils.go"],
|
||||
srcs = [
|
||||
"mocks.go",
|
||||
"test_utils.go",
|
||||
],
|
||||
importpath = "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/integration_tests",
|
||||
visibility = ["//cmd/gitserver:__subpackages__"],
|
||||
deps = [
|
||||
"//cmd/gitserver/internal",
|
||||
"//cmd/gitserver/internal/common",
|
||||
"//cmd/gitserver/internal/vcssyncer",
|
||||
"//internal/api",
|
||||
"//internal/database/dbmocks",
|
||||
@ -32,6 +36,7 @@ go_test(
|
||||
timeout = "short",
|
||||
srcs = [
|
||||
"archivereader_test.go",
|
||||
"clone_test.go",
|
||||
"commits_test.go",
|
||||
"main_test.go",
|
||||
"object_test.go",
|
||||
@ -44,6 +49,7 @@ go_test(
|
||||
env = {"COURSIER_CACHE_DIR": "/tmp"},
|
||||
deps = [
|
||||
"//cmd/gitserver/internal",
|
||||
"//cmd/gitserver/internal/gitserverfs",
|
||||
"//cmd/gitserver/internal/perforce",
|
||||
"//cmd/gitserver/internal/vcssyncer",
|
||||
"//internal/actor",
|
||||
@ -51,7 +57,6 @@ go_test(
|
||||
"//internal/authz",
|
||||
"//internal/authz/subrepoperms",
|
||||
"//internal/conf",
|
||||
"//internal/database",
|
||||
"//internal/database/dbmocks",
|
||||
"//internal/extsvc",
|
||||
"//internal/gitserver",
|
||||
@ -63,9 +68,12 @@ go_test(
|
||||
"//internal/observation",
|
||||
"//internal/ratelimit",
|
||||
"//internal/types",
|
||||
"//internal/vcs",
|
||||
"//internal/wrexec",
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
"@com_github_derision_test_go_mockgen//testutil/assert",
|
||||
"@com_github_derision_test_go_mockgen//testutil/require",
|
||||
"@com_github_google_go_cmp//cmp",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
"@com_github_sourcegraph_log//logtest",
|
||||
|
||||
@ -106,7 +106,7 @@ func TestClient_ArchiveReader(t *testing.T) {
|
||||
return "", errors.Errorf("no remote for %s", test.name)
|
||||
},
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
return vcssyncer.NewGitRepoSyncer(wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
RecordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
|
||||
Locker: server.NewRepositoryLocker(),
|
||||
|
||||
253
cmd/gitserver/internal/integration_tests/clone_test.go
Normal file
253
cmd/gitserver/internal/integration_tests/clone_test.go
Normal file
@ -0,0 +1,253 @@
|
||||
package inttests
|
||||
|
||||
import (
|
||||
"container/list"
|
||||
"context"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
mockassert "github.com/derision-test/go-mockgen/testutil/assert"
|
||||
mockrequire "github.com/derision-test/go-mockgen/testutil/require"
|
||||
"github.com/sourcegraph/log/logtest"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/time/rate"
|
||||
|
||||
server "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/perforce"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
|
||||
"github.com/sourcegraph/sourcegraph/internal/api"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver"
|
||||
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
|
||||
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
|
||||
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/internal/vcs"
|
||||
"github.com/sourcegraph/sourcegraph/internal/wrexec"
|
||||
)
|
||||
|
||||
func TestClone(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
reposDir := filepath.Join(root, "repos")
|
||||
remote := createSimpleGitRepo(t, root)
|
||||
|
||||
logger := logtest.Scoped(t)
|
||||
db := newMockDB()
|
||||
gsStore := dbmocks.NewMockGitserverRepoStore()
|
||||
db.GitserverReposFunc.SetDefaultReturn(gsStore)
|
||||
ctx := context.Background()
|
||||
repo := api.RepoName("github.com/test/repo")
|
||||
|
||||
locker := NewMockRepositoryLocker()
|
||||
lock := NewMockRepositoryLock()
|
||||
locker.TryAcquireFunc.SetDefaultReturn(lock, true)
|
||||
|
||||
s := server.Server{
|
||||
Logger: logger,
|
||||
ReposDir: reposDir,
|
||||
GetRemoteURLFunc: func(_ context.Context, name api.RepoName) (string, error) {
|
||||
require.Equal(t, repo, name)
|
||||
return remote, nil
|
||||
},
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
require.Equal(t, repo, name)
|
||||
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
DB: db,
|
||||
Perforce: perforce.NewService(ctx, observation.TestContextTB(t), logger, db, list.New()),
|
||||
RecordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
|
||||
Locker: locker,
|
||||
RPSLimiter: ratelimit.NewInstrumentedLimiter("GitserverTest", rate.NewLimiter(100, 10)),
|
||||
Hostname: "test-shard",
|
||||
}
|
||||
|
||||
grpcServer := defaults.NewServer(logtest.Scoped(t))
|
||||
proto.RegisterGitserverServiceServer(grpcServer, &server.GRPCServer{Server: &s})
|
||||
|
||||
handler := internalgrpc.MultiplexHandlers(grpcServer, s.Handler())
|
||||
srv := httptest.NewServer(handler)
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
u, _ := url.Parse(srv.URL)
|
||||
addrs := []string{u.Host}
|
||||
source := gitserver.NewTestClientSource(t, addrs)
|
||||
|
||||
cli := gitserver.NewTestClient(t).WithClientSource(source)
|
||||
|
||||
// Requesting a repo update should figure out that the repo is not yet
|
||||
// cloned and call clone. We expect that clone to succeed.
|
||||
_, err := cli.RequestRepoUpdate(ctx, repo, 0)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Should have acquired a lock.
|
||||
mockassert.CalledOnce(t, locker.TryAcquireFunc)
|
||||
// Should have reported status. 21 lines is the output git currently produces.
|
||||
// This number might need to be adjusted over time, but before doing so please
|
||||
// check that the calls actually use the args you would expect them to use.
|
||||
mockassert.CalledN(t, lock.SetStatusFunc, 21)
|
||||
// Should have released the lock.
|
||||
mockassert.CalledOnce(t, lock.ReleaseFunc)
|
||||
|
||||
// Check it was set to cloning first, then cloned.
|
||||
mockassert.CalledN(t, gsStore.SetCloneStatusFunc, 2)
|
||||
mockassert.CalledWith(t, gsStore.SetCloneStatusFunc, mockassert.Values(mockassert.Skip, repo, types.CloneStatusCloning, "test-shard"))
|
||||
mockassert.CalledWith(t, gsStore.SetCloneStatusFunc, mockassert.Values(mockassert.Skip, repo, types.CloneStatusCloned, "test-shard"))
|
||||
|
||||
// Last output should have been stored for the repo.
|
||||
mockrequire.CalledOnce(t, gsStore.SetLastOutputFunc)
|
||||
haveLastOutput := gsStore.SetLastOutputFunc.History()[0].Arg2
|
||||
require.Contains(t, haveLastOutput, "Creating bare repo\n")
|
||||
require.Contains(t, haveLastOutput, "Created bare repo at ")
|
||||
require.Contains(t, haveLastOutput, "Fetching remote contents\n")
|
||||
// Ensure the path is properly redacted. The redactor just takes the whole
|
||||
// remote URL as redacted so this is expected.
|
||||
require.Contains(t, haveLastOutput, "From <redacted>")
|
||||
// Double newlines should not be part of our standard output. They are not
|
||||
// forbidden, but we currently don't use them. So this will guard against
|
||||
// regressions in the log processing to make sure we don't accidentally
|
||||
// introduce blank newlines for CRLF parsing. (Yes this took a while to get
|
||||
// right).
|
||||
require.NotContains(t, haveLastOutput, "\n\n")
|
||||
|
||||
// Check that it was called exactly once total.
|
||||
mockassert.CalledOnce(t, gsStore.SetLastErrorFunc)
|
||||
// And that it was called for the right repo, setting the last error to empty.
|
||||
mockassert.CalledWith(t, gsStore.SetLastErrorFunc, mockassert.Values(mockassert.Skip, repo, "", "test-shard"))
|
||||
|
||||
// Check that the repo is in the expected location on disk.
|
||||
_, err = os.Stat(gitserverfs.RepoDirFromName(reposDir, repo).Path())
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestClone_Fail(t *testing.T) {
|
||||
root := t.TempDir()
|
||||
reposDir := filepath.Join(root, "repos")
|
||||
remote := filepath.Join(root, "non-existing")
|
||||
|
||||
logger := logtest.Scoped(t)
|
||||
db := newMockDB()
|
||||
gsStore := dbmocks.NewMockGitserverRepoStore()
|
||||
db.GitserverReposFunc.SetDefaultReturn(gsStore)
|
||||
ctx := context.Background()
|
||||
repo := api.RepoName("github.com/test/repo")
|
||||
|
||||
locker := NewMockRepositoryLocker()
|
||||
lock := NewMockRepositoryLock()
|
||||
locker.TryAcquireFunc.SetDefaultReturn(lock, true)
|
||||
|
||||
s := server.Server{
|
||||
Logger: logger,
|
||||
ReposDir: reposDir,
|
||||
GetRemoteURLFunc: func(_ context.Context, name api.RepoName) (string, error) {
|
||||
require.Equal(t, repo, name)
|
||||
return remote, nil
|
||||
},
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
require.Equal(t, repo, name)
|
||||
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
DB: db,
|
||||
Perforce: perforce.NewService(ctx, observation.TestContextTB(t), logger, db, list.New()),
|
||||
RecordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
|
||||
Locker: locker,
|
||||
RPSLimiter: ratelimit.NewInstrumentedLimiter("GitserverTest", rate.NewLimiter(100, 10)),
|
||||
Hostname: "test-shard",
|
||||
}
|
||||
|
||||
grpcServer := defaults.NewServer(logtest.Scoped(t))
|
||||
proto.RegisterGitserverServiceServer(grpcServer, &server.GRPCServer{Server: &s})
|
||||
|
||||
handler := internalgrpc.MultiplexHandlers(grpcServer, s.Handler())
|
||||
srv := httptest.NewServer(handler)
|
||||
t.Cleanup(srv.Close)
|
||||
|
||||
u, _ := url.Parse(srv.URL)
|
||||
addrs := []string{u.Host}
|
||||
source := gitserver.NewTestClientSource(t, addrs)
|
||||
|
||||
cli := gitserver.NewTestClient(t).WithClientSource(source)
|
||||
|
||||
// Requesting a repo update should figure out that the repo is not yet
|
||||
// cloned and call clone. We expect that clone to fail, because vcssyncer.IsCloneable
|
||||
// fails here.
|
||||
resp, err := cli.RequestRepoUpdate(ctx, repo, 0)
|
||||
require.NoError(t, err)
|
||||
// Note that this error is from IsCloneable(), not from Clone().
|
||||
require.Contains(t, resp.Error, "error cloning repo: repo github.com/test/repo not cloneable: exit status 128")
|
||||
|
||||
// No lock should have been acquired.
|
||||
mockassert.NotCalled(t, locker.TryAcquireFunc)
|
||||
|
||||
// Check we reported an error.
|
||||
// Check that it was called exactly once total.
|
||||
mockrequire.CalledOnce(t, gsStore.SetLastErrorFunc)
|
||||
// And that it was called for the right repo, setting the last error value.
|
||||
mockassert.CalledWith(t, gsStore.SetLastErrorFunc, mockassert.Values(mockassert.Skip, repo, mockassert.Skip, "test-shard"))
|
||||
require.Contains(t, gsStore.SetLastErrorFunc.History()[0].Arg2, `error cloning repo: repo github.com/test/repo not cloneable: exit status 128 - output: "fatal:`)
|
||||
|
||||
// And no other DB activity has happened.
|
||||
mockassert.NotCalled(t, gsStore.SetCloneStatusFunc)
|
||||
mockassert.NotCalled(t, gsStore.SetLastOutputFunc)
|
||||
|
||||
// ===================
|
||||
|
||||
// Now, fake that the IsCloneable check passes, then Clone will be called
|
||||
// and is expected to fail.
|
||||
vcssyncer.TestGitRepoExists = func(ctx context.Context, remoteURL *vcs.URL) error {
|
||||
return nil
|
||||
}
|
||||
t.Cleanup(func() {
|
||||
vcssyncer.TestGitRepoExists = nil
|
||||
})
|
||||
// Reset mock counters.
|
||||
gsStore = dbmocks.NewMockGitserverRepoStore()
|
||||
db.GitserverReposFunc.SetDefaultReturn(gsStore)
|
||||
|
||||
// Requesting another repo update should figure out that the repo is not yet
|
||||
// cloned and call clone. We expect that clone to fail, but in the vcssyncer.Clone
|
||||
// stage this time, not vcssyncer.IsCloneable.
|
||||
resp, err = cli.RequestRepoUpdate(ctx, repo, 0)
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, resp.Error, "failed to clone github.com/test/repo: clone failed. Output: Creating bare repo\nCreated bare repo at")
|
||||
|
||||
// Should have acquired a lock.
|
||||
mockassert.CalledOnce(t, locker.TryAcquireFunc)
|
||||
// Should have reported status. 7 lines is the output git currently produces.
|
||||
// This number might need to be adjusted over time, but before doing so please
|
||||
// check that the calls actually use the args you would expect them to use.
|
||||
mockassert.CalledN(t, lock.SetStatusFunc, 7)
|
||||
// Should have released the lock.
|
||||
mockassert.CalledOnce(t, lock.ReleaseFunc)
|
||||
|
||||
// Check it was set to cloning first, then uncloned again (since clone failed).
|
||||
mockassert.CalledN(t, gsStore.SetCloneStatusFunc, 2)
|
||||
mockassert.CalledWith(t, gsStore.SetCloneStatusFunc, mockassert.Values(mockassert.Skip, repo, types.CloneStatusCloning, "test-shard"))
|
||||
mockassert.CalledWith(t, gsStore.SetCloneStatusFunc, mockassert.Values(mockassert.Skip, repo, types.CloneStatusNotCloned, "test-shard"))
|
||||
|
||||
// Last output should have been stored for the repo.
|
||||
mockrequire.CalledOnce(t, gsStore.SetLastOutputFunc)
|
||||
haveLastOutput := gsStore.SetLastOutputFunc.History()[0].Arg2
|
||||
require.Contains(t, haveLastOutput, "Creating bare repo\n")
|
||||
require.Contains(t, haveLastOutput, "Created bare repo at ")
|
||||
require.Contains(t, haveLastOutput, "Fetching remote contents\n")
|
||||
// Check that also git output made it here.
|
||||
require.Contains(t, haveLastOutput, "does not appear to be a git repository\n")
|
||||
|
||||
// Check that it was called exactly once total.
|
||||
mockrequire.CalledOnce(t, gsStore.SetLastErrorFunc)
|
||||
// And that it was called for the right repo, setting the last error to empty.
|
||||
mockassert.CalledWith(t, gsStore.SetLastErrorFunc, mockassert.Values(mockassert.Skip, repo, mockassert.Skip, "test-shard"))
|
||||
require.Contains(t, gsStore.SetLastErrorFunc.History()[0].Arg2, "Creating bare repo\n")
|
||||
require.Contains(t, gsStore.SetLastErrorFunc.History()[0].Arg2, "failed to fetch: exit status 128")
|
||||
|
||||
// Check that no repo is in the expected location on disk.
|
||||
_, err = os.Stat(gitserverfs.RepoDirFromName(reposDir, repo).Path())
|
||||
require.Error(t, err)
|
||||
require.True(t, os.IsNotExist(err))
|
||||
}
|
||||
544
cmd/gitserver/internal/integration_tests/mocks.go
Normal file
544
cmd/gitserver/internal/integration_tests/mocks.go
Normal file
@ -0,0 +1,544 @@
|
||||
// Code generated by go-mockgen 1.3.7; DO NOT EDIT.
|
||||
//
|
||||
// This file was generated by running `sg generate` (or `go-mockgen`) at the root of
|
||||
// this repository. To add additional mocks to this or another package, add a new entry
|
||||
// to the mockgen.yaml file in the root of this repository.
|
||||
|
||||
package inttests
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
internal "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal"
|
||||
common "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
|
||||
)
|
||||
|
||||
// MockRepositoryLock is a mock implementation of the RepositoryLock
|
||||
// interface (from the package
|
||||
// github.com/sourcegraph/sourcegraph/cmd/gitserver/internal) used for unit
|
||||
// testing.
|
||||
type MockRepositoryLock struct {
|
||||
// ReleaseFunc is an instance of a mock function object controlling the
|
||||
// behavior of the method Release.
|
||||
ReleaseFunc *RepositoryLockReleaseFunc
|
||||
// SetStatusFunc is an instance of a mock function object controlling
|
||||
// the behavior of the method SetStatus.
|
||||
SetStatusFunc *RepositoryLockSetStatusFunc
|
||||
}
|
||||
|
||||
// NewMockRepositoryLock creates a new mock of the RepositoryLock interface.
|
||||
// All methods return zero values for all results, unless overwritten.
|
||||
func NewMockRepositoryLock() *MockRepositoryLock {
|
||||
return &MockRepositoryLock{
|
||||
ReleaseFunc: &RepositoryLockReleaseFunc{
|
||||
defaultHook: func() {
|
||||
return
|
||||
},
|
||||
},
|
||||
SetStatusFunc: &RepositoryLockSetStatusFunc{
|
||||
defaultHook: func(string) {
|
||||
return
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewStrictMockRepositoryLock creates a new mock of the RepositoryLock
|
||||
// interface. All methods panic on invocation, unless overwritten.
|
||||
func NewStrictMockRepositoryLock() *MockRepositoryLock {
|
||||
return &MockRepositoryLock{
|
||||
ReleaseFunc: &RepositoryLockReleaseFunc{
|
||||
defaultHook: func() {
|
||||
panic("unexpected invocation of MockRepositoryLock.Release")
|
||||
},
|
||||
},
|
||||
SetStatusFunc: &RepositoryLockSetStatusFunc{
|
||||
defaultHook: func(string) {
|
||||
panic("unexpected invocation of MockRepositoryLock.SetStatus")
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewMockRepositoryLockFrom creates a new mock of the MockRepositoryLock
|
||||
// interface. All methods delegate to the given implementation, unless
|
||||
// overwritten.
|
||||
func NewMockRepositoryLockFrom(i internal.RepositoryLock) *MockRepositoryLock {
|
||||
return &MockRepositoryLock{
|
||||
ReleaseFunc: &RepositoryLockReleaseFunc{
|
||||
defaultHook: i.Release,
|
||||
},
|
||||
SetStatusFunc: &RepositoryLockSetStatusFunc{
|
||||
defaultHook: i.SetStatus,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RepositoryLockReleaseFunc describes the behavior when the Release method
|
||||
// of the parent MockRepositoryLock instance is invoked.
|
||||
type RepositoryLockReleaseFunc struct {
|
||||
defaultHook func()
|
||||
hooks []func()
|
||||
history []RepositoryLockReleaseFuncCall
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Release delegates to the next hook function in the queue and stores the
|
||||
// parameter and result values of this invocation.
|
||||
func (m *MockRepositoryLock) Release() {
|
||||
m.ReleaseFunc.nextHook()()
|
||||
m.ReleaseFunc.appendCall(RepositoryLockReleaseFuncCall{})
|
||||
return
|
||||
}
|
||||
|
||||
// SetDefaultHook sets function that is called when the Release method of
|
||||
// the parent MockRepositoryLock instance is invoked and the hook queue is
|
||||
// empty.
|
||||
func (f *RepositoryLockReleaseFunc) SetDefaultHook(hook func()) {
|
||||
f.defaultHook = hook
|
||||
}
|
||||
|
||||
// PushHook adds a function to the end of hook queue. Each invocation of the
|
||||
// Release method of the parent MockRepositoryLock instance invokes the hook
|
||||
// at the front of the queue and discards it. After the queue is empty, the
|
||||
// default hook function is invoked for any future action.
|
||||
func (f *RepositoryLockReleaseFunc) PushHook(hook func()) {
|
||||
f.mutex.Lock()
|
||||
f.hooks = append(f.hooks, hook)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// SetDefaultReturn calls SetDefaultHook with a function that returns the
|
||||
// given values.
|
||||
func (f *RepositoryLockReleaseFunc) SetDefaultReturn() {
|
||||
f.SetDefaultHook(func() {
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// PushReturn calls PushHook with a function that returns the given values.
|
||||
func (f *RepositoryLockReleaseFunc) PushReturn() {
|
||||
f.PushHook(func() {
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (f *RepositoryLockReleaseFunc) nextHook() func() {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
if len(f.hooks) == 0 {
|
||||
return f.defaultHook
|
||||
}
|
||||
|
||||
hook := f.hooks[0]
|
||||
f.hooks = f.hooks[1:]
|
||||
return hook
|
||||
}
|
||||
|
||||
func (f *RepositoryLockReleaseFunc) appendCall(r0 RepositoryLockReleaseFuncCall) {
|
||||
f.mutex.Lock()
|
||||
f.history = append(f.history, r0)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// History returns a sequence of RepositoryLockReleaseFuncCall objects
|
||||
// describing the invocations of this function.
|
||||
func (f *RepositoryLockReleaseFunc) History() []RepositoryLockReleaseFuncCall {
|
||||
f.mutex.Lock()
|
||||
history := make([]RepositoryLockReleaseFuncCall, len(f.history))
|
||||
copy(history, f.history)
|
||||
f.mutex.Unlock()
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// RepositoryLockReleaseFuncCall is an object that describes an invocation
|
||||
// of method Release on an instance of MockRepositoryLock.
|
||||
type RepositoryLockReleaseFuncCall struct{}
|
||||
|
||||
// Args returns an interface slice containing the arguments of this
|
||||
// invocation.
|
||||
func (c RepositoryLockReleaseFuncCall) Args() []interface{} {
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
// Results returns an interface slice containing the results of this
|
||||
// invocation.
|
||||
func (c RepositoryLockReleaseFuncCall) Results() []interface{} {
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
// RepositoryLockSetStatusFunc describes the behavior when the SetStatus
|
||||
// method of the parent MockRepositoryLock instance is invoked.
|
||||
type RepositoryLockSetStatusFunc struct {
|
||||
defaultHook func(string)
|
||||
hooks []func(string)
|
||||
history []RepositoryLockSetStatusFuncCall
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// SetStatus delegates to the next hook function in the queue and stores the
|
||||
// parameter and result values of this invocation.
|
||||
func (m *MockRepositoryLock) SetStatus(v0 string) {
|
||||
m.SetStatusFunc.nextHook()(v0)
|
||||
m.SetStatusFunc.appendCall(RepositoryLockSetStatusFuncCall{v0})
|
||||
return
|
||||
}
|
||||
|
||||
// SetDefaultHook sets function that is called when the SetStatus method of
|
||||
// the parent MockRepositoryLock instance is invoked and the hook queue is
|
||||
// empty.
|
||||
func (f *RepositoryLockSetStatusFunc) SetDefaultHook(hook func(string)) {
|
||||
f.defaultHook = hook
|
||||
}
|
||||
|
||||
// PushHook adds a function to the end of hook queue. Each invocation of the
|
||||
// SetStatus method of the parent MockRepositoryLock instance invokes the
|
||||
// hook at the front of the queue and discards it. After the queue is empty,
|
||||
// the default hook function is invoked for any future action.
|
||||
func (f *RepositoryLockSetStatusFunc) PushHook(hook func(string)) {
|
||||
f.mutex.Lock()
|
||||
f.hooks = append(f.hooks, hook)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// SetDefaultReturn calls SetDefaultHook with a function that returns the
|
||||
// given values.
|
||||
func (f *RepositoryLockSetStatusFunc) SetDefaultReturn() {
|
||||
f.SetDefaultHook(func(string) {
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
// PushReturn calls PushHook with a function that returns the given values.
|
||||
func (f *RepositoryLockSetStatusFunc) PushReturn() {
|
||||
f.PushHook(func(string) {
|
||||
return
|
||||
})
|
||||
}
|
||||
|
||||
func (f *RepositoryLockSetStatusFunc) nextHook() func(string) {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
if len(f.hooks) == 0 {
|
||||
return f.defaultHook
|
||||
}
|
||||
|
||||
hook := f.hooks[0]
|
||||
f.hooks = f.hooks[1:]
|
||||
return hook
|
||||
}
|
||||
|
||||
func (f *RepositoryLockSetStatusFunc) appendCall(r0 RepositoryLockSetStatusFuncCall) {
|
||||
f.mutex.Lock()
|
||||
f.history = append(f.history, r0)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// History returns a sequence of RepositoryLockSetStatusFuncCall objects
|
||||
// describing the invocations of this function.
|
||||
func (f *RepositoryLockSetStatusFunc) History() []RepositoryLockSetStatusFuncCall {
|
||||
f.mutex.Lock()
|
||||
history := make([]RepositoryLockSetStatusFuncCall, len(f.history))
|
||||
copy(history, f.history)
|
||||
f.mutex.Unlock()
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// RepositoryLockSetStatusFuncCall is an object that describes an invocation
|
||||
// of method SetStatus on an instance of MockRepositoryLock.
|
||||
type RepositoryLockSetStatusFuncCall struct {
|
||||
// Arg0 is the value of the 1st argument passed to this method
|
||||
// invocation.
|
||||
Arg0 string
|
||||
}
|
||||
|
||||
// Args returns an interface slice containing the arguments of this
|
||||
// invocation.
|
||||
func (c RepositoryLockSetStatusFuncCall) Args() []interface{} {
|
||||
return []interface{}{c.Arg0}
|
||||
}
|
||||
|
||||
// Results returns an interface slice containing the results of this
|
||||
// invocation.
|
||||
func (c RepositoryLockSetStatusFuncCall) Results() []interface{} {
|
||||
return []interface{}{}
|
||||
}
|
||||
|
||||
// MockRepositoryLocker is a mock implementation of the RepositoryLocker
|
||||
// interface (from the package
|
||||
// github.com/sourcegraph/sourcegraph/cmd/gitserver/internal) used for unit
|
||||
// testing.
|
||||
type MockRepositoryLocker struct {
|
||||
// StatusFunc is an instance of a mock function object controlling the
|
||||
// behavior of the method Status.
|
||||
StatusFunc *RepositoryLockerStatusFunc
|
||||
// TryAcquireFunc is an instance of a mock function object controlling
|
||||
// the behavior of the method TryAcquire.
|
||||
TryAcquireFunc *RepositoryLockerTryAcquireFunc
|
||||
}
|
||||
|
||||
// NewMockRepositoryLocker creates a new mock of the RepositoryLocker
|
||||
// interface. All methods return zero values for all results, unless
|
||||
// overwritten.
|
||||
func NewMockRepositoryLocker() *MockRepositoryLocker {
|
||||
return &MockRepositoryLocker{
|
||||
StatusFunc: &RepositoryLockerStatusFunc{
|
||||
defaultHook: func(common.GitDir) (r0 string, r1 bool) {
|
||||
return
|
||||
},
|
||||
},
|
||||
TryAcquireFunc: &RepositoryLockerTryAcquireFunc{
|
||||
defaultHook: func(common.GitDir, string) (r0 internal.RepositoryLock, r1 bool) {
|
||||
return
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewStrictMockRepositoryLocker creates a new mock of the RepositoryLocker
|
||||
// interface. All methods panic on invocation, unless overwritten.
|
||||
func NewStrictMockRepositoryLocker() *MockRepositoryLocker {
|
||||
return &MockRepositoryLocker{
|
||||
StatusFunc: &RepositoryLockerStatusFunc{
|
||||
defaultHook: func(common.GitDir) (string, bool) {
|
||||
panic("unexpected invocation of MockRepositoryLocker.Status")
|
||||
},
|
||||
},
|
||||
TryAcquireFunc: &RepositoryLockerTryAcquireFunc{
|
||||
defaultHook: func(common.GitDir, string) (internal.RepositoryLock, bool) {
|
||||
panic("unexpected invocation of MockRepositoryLocker.TryAcquire")
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// NewMockRepositoryLockerFrom creates a new mock of the
|
||||
// MockRepositoryLocker interface. All methods delegate to the given
|
||||
// implementation, unless overwritten.
|
||||
func NewMockRepositoryLockerFrom(i internal.RepositoryLocker) *MockRepositoryLocker {
|
||||
return &MockRepositoryLocker{
|
||||
StatusFunc: &RepositoryLockerStatusFunc{
|
||||
defaultHook: i.Status,
|
||||
},
|
||||
TryAcquireFunc: &RepositoryLockerTryAcquireFunc{
|
||||
defaultHook: i.TryAcquire,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// RepositoryLockerStatusFunc describes the behavior when the Status method
|
||||
// of the parent MockRepositoryLocker instance is invoked.
|
||||
type RepositoryLockerStatusFunc struct {
|
||||
defaultHook func(common.GitDir) (string, bool)
|
||||
hooks []func(common.GitDir) (string, bool)
|
||||
history []RepositoryLockerStatusFuncCall
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// Status delegates to the next hook function in the queue and stores the
|
||||
// parameter and result values of this invocation.
|
||||
func (m *MockRepositoryLocker) Status(v0 common.GitDir) (string, bool) {
|
||||
r0, r1 := m.StatusFunc.nextHook()(v0)
|
||||
m.StatusFunc.appendCall(RepositoryLockerStatusFuncCall{v0, r0, r1})
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetDefaultHook sets function that is called when the Status method of the
|
||||
// parent MockRepositoryLocker instance is invoked and the hook queue is
|
||||
// empty.
|
||||
func (f *RepositoryLockerStatusFunc) SetDefaultHook(hook func(common.GitDir) (string, bool)) {
|
||||
f.defaultHook = hook
|
||||
}
|
||||
|
||||
// PushHook adds a function to the end of hook queue. Each invocation of the
|
||||
// Status method of the parent MockRepositoryLocker instance invokes the
|
||||
// hook at the front of the queue and discards it. After the queue is empty,
|
||||
// the default hook function is invoked for any future action.
|
||||
func (f *RepositoryLockerStatusFunc) PushHook(hook func(common.GitDir) (string, bool)) {
|
||||
f.mutex.Lock()
|
||||
f.hooks = append(f.hooks, hook)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// SetDefaultReturn calls SetDefaultHook with a function that returns the
|
||||
// given values.
|
||||
func (f *RepositoryLockerStatusFunc) SetDefaultReturn(r0 string, r1 bool) {
|
||||
f.SetDefaultHook(func(common.GitDir) (string, bool) {
|
||||
return r0, r1
|
||||
})
|
||||
}
|
||||
|
||||
// PushReturn calls PushHook with a function that returns the given values.
|
||||
func (f *RepositoryLockerStatusFunc) PushReturn(r0 string, r1 bool) {
|
||||
f.PushHook(func(common.GitDir) (string, bool) {
|
||||
return r0, r1
|
||||
})
|
||||
}
|
||||
|
||||
func (f *RepositoryLockerStatusFunc) nextHook() func(common.GitDir) (string, bool) {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
if len(f.hooks) == 0 {
|
||||
return f.defaultHook
|
||||
}
|
||||
|
||||
hook := f.hooks[0]
|
||||
f.hooks = f.hooks[1:]
|
||||
return hook
|
||||
}
|
||||
|
||||
func (f *RepositoryLockerStatusFunc) appendCall(r0 RepositoryLockerStatusFuncCall) {
|
||||
f.mutex.Lock()
|
||||
f.history = append(f.history, r0)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// History returns a sequence of RepositoryLockerStatusFuncCall objects
|
||||
// describing the invocations of this function.
|
||||
func (f *RepositoryLockerStatusFunc) History() []RepositoryLockerStatusFuncCall {
|
||||
f.mutex.Lock()
|
||||
history := make([]RepositoryLockerStatusFuncCall, len(f.history))
|
||||
copy(history, f.history)
|
||||
f.mutex.Unlock()
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// RepositoryLockerStatusFuncCall is an object that describes an invocation
|
||||
// of method Status on an instance of MockRepositoryLocker.
|
||||
type RepositoryLockerStatusFuncCall struct {
|
||||
// Arg0 is the value of the 1st argument passed to this method
|
||||
// invocation.
|
||||
Arg0 common.GitDir
|
||||
// Result0 is the value of the 1st result returned from this method
|
||||
// invocation.
|
||||
Result0 string
|
||||
// Result1 is the value of the 2nd result returned from this method
|
||||
// invocation.
|
||||
Result1 bool
|
||||
}
|
||||
|
||||
// Args returns an interface slice containing the arguments of this
|
||||
// invocation.
|
||||
func (c RepositoryLockerStatusFuncCall) Args() []interface{} {
|
||||
return []interface{}{c.Arg0}
|
||||
}
|
||||
|
||||
// Results returns an interface slice containing the results of this
|
||||
// invocation.
|
||||
func (c RepositoryLockerStatusFuncCall) Results() []interface{} {
|
||||
return []interface{}{c.Result0, c.Result1}
|
||||
}
|
||||
|
||||
// RepositoryLockerTryAcquireFunc describes the behavior when the TryAcquire
|
||||
// method of the parent MockRepositoryLocker instance is invoked.
|
||||
type RepositoryLockerTryAcquireFunc struct {
|
||||
defaultHook func(common.GitDir, string) (internal.RepositoryLock, bool)
|
||||
hooks []func(common.GitDir, string) (internal.RepositoryLock, bool)
|
||||
history []RepositoryLockerTryAcquireFuncCall
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// TryAcquire delegates to the next hook function in the queue and stores
|
||||
// the parameter and result values of this invocation.
|
||||
func (m *MockRepositoryLocker) TryAcquire(v0 common.GitDir, v1 string) (internal.RepositoryLock, bool) {
|
||||
r0, r1 := m.TryAcquireFunc.nextHook()(v0, v1)
|
||||
m.TryAcquireFunc.appendCall(RepositoryLockerTryAcquireFuncCall{v0, v1, r0, r1})
|
||||
return r0, r1
|
||||
}
|
||||
|
||||
// SetDefaultHook sets function that is called when the TryAcquire method of
|
||||
// the parent MockRepositoryLocker instance is invoked and the hook queue is
|
||||
// empty.
|
||||
func (f *RepositoryLockerTryAcquireFunc) SetDefaultHook(hook func(common.GitDir, string) (internal.RepositoryLock, bool)) {
|
||||
f.defaultHook = hook
|
||||
}
|
||||
|
||||
// PushHook adds a function to the end of hook queue. Each invocation of the
|
||||
// TryAcquire method of the parent MockRepositoryLocker instance invokes the
|
||||
// hook at the front of the queue and discards it. After the queue is empty,
|
||||
// the default hook function is invoked for any future action.
|
||||
func (f *RepositoryLockerTryAcquireFunc) PushHook(hook func(common.GitDir, string) (internal.RepositoryLock, bool)) {
|
||||
f.mutex.Lock()
|
||||
f.hooks = append(f.hooks, hook)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// SetDefaultReturn calls SetDefaultHook with a function that returns the
|
||||
// given values.
|
||||
func (f *RepositoryLockerTryAcquireFunc) SetDefaultReturn(r0 internal.RepositoryLock, r1 bool) {
|
||||
f.SetDefaultHook(func(common.GitDir, string) (internal.RepositoryLock, bool) {
|
||||
return r0, r1
|
||||
})
|
||||
}
|
||||
|
||||
// PushReturn calls PushHook with a function that returns the given values.
|
||||
func (f *RepositoryLockerTryAcquireFunc) PushReturn(r0 internal.RepositoryLock, r1 bool) {
|
||||
f.PushHook(func(common.GitDir, string) (internal.RepositoryLock, bool) {
|
||||
return r0, r1
|
||||
})
|
||||
}
|
||||
|
||||
func (f *RepositoryLockerTryAcquireFunc) nextHook() func(common.GitDir, string) (internal.RepositoryLock, bool) {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
if len(f.hooks) == 0 {
|
||||
return f.defaultHook
|
||||
}
|
||||
|
||||
hook := f.hooks[0]
|
||||
f.hooks = f.hooks[1:]
|
||||
return hook
|
||||
}
|
||||
|
||||
func (f *RepositoryLockerTryAcquireFunc) appendCall(r0 RepositoryLockerTryAcquireFuncCall) {
|
||||
f.mutex.Lock()
|
||||
f.history = append(f.history, r0)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// History returns a sequence of RepositoryLockerTryAcquireFuncCall objects
|
||||
// describing the invocations of this function.
|
||||
func (f *RepositoryLockerTryAcquireFunc) History() []RepositoryLockerTryAcquireFuncCall {
|
||||
f.mutex.Lock()
|
||||
history := make([]RepositoryLockerTryAcquireFuncCall, len(f.history))
|
||||
copy(history, f.history)
|
||||
f.mutex.Unlock()
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// RepositoryLockerTryAcquireFuncCall is an object that describes an
|
||||
// invocation of method TryAcquire on an instance of MockRepositoryLocker.
|
||||
type RepositoryLockerTryAcquireFuncCall struct {
|
||||
// Arg0 is the value of the 1st argument passed to this method
|
||||
// invocation.
|
||||
Arg0 common.GitDir
|
||||
// Arg1 is the value of the 2nd argument passed to this method
|
||||
// invocation.
|
||||
Arg1 string
|
||||
// Result0 is the value of the 1st result returned from this method
|
||||
// invocation.
|
||||
Result0 internal.RepositoryLock
|
||||
// Result1 is the value of the 2nd result returned from this method
|
||||
// invocation.
|
||||
Result1 bool
|
||||
}
|
||||
|
||||
// Args returns an interface slice containing the arguments of this
|
||||
// invocation.
|
||||
func (c RepositoryLockerTryAcquireFuncCall) Args() []interface{} {
|
||||
return []interface{}{c.Arg0, c.Arg1}
|
||||
}
|
||||
|
||||
// Results returns an interface slice containing the results of this
|
||||
// invocation.
|
||||
func (c RepositoryLockerTryAcquireFuncCall) Results() []interface{} {
|
||||
return []interface{}{c.Result0, c.Result1}
|
||||
}
|
||||
@ -16,7 +16,6 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/perforce"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
|
||||
"github.com/sourcegraph/sourcegraph/internal/api"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
|
||||
"github.com/sourcegraph/sourcegraph/internal/extsvc"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver"
|
||||
@ -77,7 +76,7 @@ func TestClient_ResolveRevisions(t *testing.T) {
|
||||
return remote, nil
|
||||
},
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
return vcssyncer.NewGitRepoSyncer(wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
DB: db,
|
||||
Perforce: perforce.NewService(ctx, observation.TestContextTB(t), logger, db, list.New()),
|
||||
@ -117,7 +116,7 @@ func TestClient_ResolveRevisions(t *testing.T) {
|
||||
|
||||
}
|
||||
|
||||
func newMockDB() database.DB {
|
||||
func newMockDB() *dbmocks.MockDB {
|
||||
db := dbmocks.NewMockDB()
|
||||
db.GitserverReposFunc.SetDefaultReturn(dbmocks.NewMockGitserverRepoStore())
|
||||
db.FeatureFlagsFunc.SetDefaultReturn(dbmocks.NewMockFeatureFlagStore())
|
||||
|
||||
@ -81,7 +81,7 @@ func InitGitserver() {
|
||||
return filepath.Join(root, "remotes", string(name)), nil
|
||||
},
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
return vcssyncer.NewGitRepoSyncer(wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
return vcssyncer.NewGitRepoSyncer(logger, wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
GlobalBatchLogSemaphore: semaphore.NewWeighted(32),
|
||||
DB: db,
|
||||
|
||||
@ -128,7 +128,7 @@ func TestCheckSSRFHeader(t *testing.T) {
|
||||
return "https://" + string(name) + ".git", nil
|
||||
},
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
return vcssyncer.NewGitRepoSyncer(wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
DB: db,
|
||||
Locker: NewRepositoryLocker(),
|
||||
|
||||
@ -155,7 +155,7 @@ func (s *Server) createCommitFromPatch(ctx context.Context, req protocol.CreateC
|
||||
t := time.Now()
|
||||
|
||||
// runRemoteGitCommand since one of our commands could be git push
|
||||
out, err := executil.RunRemoteGitCommand(ctx, s.RecordingCommandFactory.Wrap(ctx, s.Logger, cmd), true, nil)
|
||||
out, err := executil.RunRemoteGitCommand(ctx, s.RecordingCommandFactory.Wrap(ctx, s.Logger, cmd), true)
|
||||
logger := logger.With(
|
||||
log.String("prefix", prefix),
|
||||
log.String("command", redactor.Redact(argsToString(cmd.Args))),
|
||||
|
||||
@ -592,47 +592,49 @@ func (s *Server) repoUpdate(req *protocol.RepoUpdateRequest) protocol.RepoUpdate
|
||||
defer cancel1()
|
||||
ctx, cancel2 := context.WithTimeout(ctx, conf.GitLongCommandTimeout())
|
||||
defer cancel2()
|
||||
|
||||
if !repoCloned(dir) && !s.skipCloneForTests {
|
||||
_, err := s.CloneRepo(ctx, req.Repo, CloneOptions{Block: true})
|
||||
if err != nil {
|
||||
logger.Warn("error cloning repo", log.String("repo", string(req.Repo)), log.Error(err))
|
||||
resp.Error = err.Error()
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
var statusErr, updateErr error
|
||||
|
||||
if debounce(req.Repo, req.Since) {
|
||||
updateErr = s.doRepoUpdate(ctx, req.Repo, "")
|
||||
}
|
||||
|
||||
// attempts to acquire these values are not contingent on the success of
|
||||
// the update.
|
||||
lastFetched, err := repoLastFetched(dir)
|
||||
if err != nil {
|
||||
statusErr = err
|
||||
} else {
|
||||
var statusErr, updateErr error
|
||||
|
||||
if debounce(req.Repo, req.Since) {
|
||||
updateErr = s.doRepoUpdate(ctx, req.Repo, "")
|
||||
}
|
||||
|
||||
// attempts to acquire these values are not contingent on the success of
|
||||
// the update.
|
||||
lastFetched, err := repoLastFetched(dir)
|
||||
if err != nil {
|
||||
statusErr = err
|
||||
} else {
|
||||
resp.LastFetched = &lastFetched
|
||||
}
|
||||
lastChanged, err := repoLastChanged(dir)
|
||||
if err != nil {
|
||||
statusErr = err
|
||||
} else {
|
||||
resp.LastChanged = &lastChanged
|
||||
}
|
||||
if statusErr != nil {
|
||||
logger.Error("failed to get status of repo", log.String("repo", string(req.Repo)), log.Error(statusErr))
|
||||
// report this error in-band, but still produce a valid response with the
|
||||
// other information.
|
||||
resp.Error = statusErr.Error()
|
||||
}
|
||||
// If an error occurred during update, report it but don't actually make
|
||||
// it into an http error; we want the client to get the information cleanly.
|
||||
// An update error "wins" over a status error.
|
||||
if updateErr != nil {
|
||||
resp.Error = updateErr.Error()
|
||||
} else {
|
||||
s.Perforce.EnqueueChangelistMappingJob(perforce.NewChangelistMappingJob(req.Repo, dir))
|
||||
}
|
||||
resp.LastFetched = &lastFetched
|
||||
}
|
||||
lastChanged, err := repoLastChanged(dir)
|
||||
if err != nil {
|
||||
statusErr = err
|
||||
} else {
|
||||
resp.LastChanged = &lastChanged
|
||||
}
|
||||
if statusErr != nil {
|
||||
logger.Error("failed to get status of repo", log.String("repo", string(req.Repo)), log.Error(statusErr))
|
||||
// report this error in-band, but still produce a valid response with the
|
||||
// other information.
|
||||
resp.Error = statusErr.Error()
|
||||
}
|
||||
// If an error occurred during update, report it but don't actually make
|
||||
// it into an http error; we want the client to get the information cleanly.
|
||||
// An update error "wins" over a status error.
|
||||
if updateErr != nil {
|
||||
resp.Error = updateErr.Error()
|
||||
} else {
|
||||
s.Perforce.EnqueueChangelistMappingJob(perforce.NewChangelistMappingJob(req.Repo, dir))
|
||||
}
|
||||
|
||||
return resp
|
||||
@ -1454,68 +1456,66 @@ func (s *Server) doClone(
|
||||
// We clone to a temporary location first to avoid having incomplete
|
||||
// clones in the repo tree. This also avoids leaving behind corrupt clones
|
||||
// if the clone is interrupted.
|
||||
tmpPath, err := gitserverfs.TempDir(s.ReposDir, "clone-")
|
||||
tmpDir, err := gitserverfs.TempDir(s.ReposDir, "clone-")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer os.RemoveAll(tmpPath)
|
||||
tmpPath = filepath.Join(tmpPath, ".git")
|
||||
tmp := common.GitDir(tmpPath)
|
||||
defer os.RemoveAll(tmpDir)
|
||||
tmpPath := filepath.Join(tmpDir, ".git")
|
||||
|
||||
// It may already be cloned
|
||||
if !repoCloned(dir) {
|
||||
if err := s.DB.GitserverRepos().SetCloneStatus(ctx, repo, types.CloneStatusCloning, s.Hostname); err != nil {
|
||||
s.Logger.Warn("Setting clone status in DB", log.Error(err))
|
||||
s.Logger.Error("Setting clone status in DB", log.Error(err))
|
||||
}
|
||||
}
|
||||
defer func() {
|
||||
// Use a background context to ensure we still update the DB even if we time out
|
||||
if err := s.DB.GitserverRepos().SetCloneStatus(context.Background(), repo, cloneStatus(repoCloned(dir), false), s.Hostname); err != nil {
|
||||
s.Logger.Warn("Setting clone status in DB", log.Error(err))
|
||||
s.Logger.Error("Setting clone status in DB", log.Error(err))
|
||||
}
|
||||
}()
|
||||
|
||||
cmd, err := syncer.CloneCommand(ctx, remoteURL, tmpPath)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get clone command")
|
||||
}
|
||||
if cmd.Env == nil {
|
||||
cmd.Env = os.Environ()
|
||||
logger.Info("cloning repo", log.String("tmp", tmpDir), log.String("dst", dstPath))
|
||||
|
||||
progressReader, progressWriter := io.Pipe()
|
||||
// We also capture the entire output in memory for the call to SetLastOutput
|
||||
// further down.
|
||||
// TODO: This might require a lot of memory depending on the amount of logs
|
||||
// produced, the ideal solution would be that readCloneProgress stores it in
|
||||
// chunks.
|
||||
output := &linebasedBufferedWriter{}
|
||||
eg := readCloneProgress(s.DB, logger, lock, io.TeeReader(progressReader, output), repo)
|
||||
|
||||
cloneErr := syncer.Clone(ctx, repo, remoteURL, dir, tmpPath, progressWriter)
|
||||
progressWriter.Close()
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
s.Logger.Error("reading clone progress", log.Error(err))
|
||||
}
|
||||
|
||||
// see issue #7322: skip LFS content in repositories with Git LFS configured
|
||||
cmd.Env = append(cmd.Env, "GIT_LFS_SKIP_SMUDGE=1")
|
||||
logger.Info("cloning repo", log.String("tmp", tmpPath), log.String("dst", dstPath))
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
defer pw.Close()
|
||||
|
||||
redactor := urlredactor.New(remoteURL)
|
||||
|
||||
go readCloneProgress(s.DB, logger, redactor, lock, pr, repo)
|
||||
|
||||
output, err := executil.RunRemoteGitCommand(ctx, s.RecordingCommandFactory.WrapWithRepoName(ctx, s.Logger, repo, cmd).WithRedactorFunc(redactor.Redact), true, pw)
|
||||
redactedOutput := redactor.Redact(string(output))
|
||||
// best-effort update the output of the clone
|
||||
if err := s.DB.GitserverRepos().SetLastOutput(context.Background(), repo, redactedOutput); err != nil {
|
||||
s.Logger.Warn("Setting last output in DB", log.Error(err))
|
||||
if err := s.DB.GitserverRepos().SetLastOutput(context.Background(), repo, output.String()); err != nil {
|
||||
s.Logger.Error("Setting last output in DB", log.Error(err))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "clone failed. Output: %s", redactedOutput)
|
||||
if cloneErr != nil {
|
||||
// TODO: Should we really return the entire output here in an error?
|
||||
// It could be a super big error string.
|
||||
return errors.Wrapf(cloneErr, "clone failed. Output: %s", output.String())
|
||||
}
|
||||
|
||||
if testRepoCorrupter != nil {
|
||||
testRepoCorrupter(ctx, tmp)
|
||||
testRepoCorrupter(ctx, common.GitDir(tmpPath))
|
||||
}
|
||||
|
||||
if err := postRepoFetchActions(ctx, logger, s.DB, s.Hostname, s.RecordingCommandFactory, s.ReposDir, repo, tmp, remoteURL, syncer); err != nil {
|
||||
if err := postRepoFetchActions(ctx, logger, s.DB, s.Hostname, s.RecordingCommandFactory, s.ReposDir, repo, common.GitDir(tmpPath), remoteURL, syncer); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if opts.Overwrite {
|
||||
// remove the current repo by putting it into our temporary directory
|
||||
err := fileutil.RenameAndSync(dstPath, filepath.Join(filepath.Dir(tmpPath), "old"))
|
||||
err := fileutil.RenameAndSync(dstPath, filepath.Join(filepath.Dir(tmpDir), "old"))
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "failed to remove old clone")
|
||||
}
|
||||
@ -1536,6 +1536,65 @@ func (s *Server) doClone(
|
||||
return nil
|
||||
}
|
||||
|
||||
// linebasedBufferedWriter is an io.Writer that writes to a buffer.
|
||||
// '\r' resets the write offset to the index after last '\n' in the buffer,
|
||||
// or the beginning of the buffer if a '\n' has not been written yet.
|
||||
//
|
||||
// This exists to remove intermediate progress reports from "git clone
|
||||
// --progress".
|
||||
type linebasedBufferedWriter struct {
|
||||
// writeOffset is the offset in buf where the next write should begin.
|
||||
writeOffset int
|
||||
|
||||
// afterLastNewline is the index after the last '\n' in buf
|
||||
// or 0 if there is no '\n' in buf.
|
||||
afterLastNewline int
|
||||
|
||||
buf []byte
|
||||
}
|
||||
|
||||
func (w *linebasedBufferedWriter) Write(p []byte) (n int, err error) {
|
||||
l := len(p)
|
||||
for {
|
||||
if len(p) == 0 {
|
||||
// If p ends in a '\r' we still want to include that in the buffer until it is overwritten.
|
||||
break
|
||||
}
|
||||
idx := bytes.IndexAny(p, "\r\n")
|
||||
if idx == -1 {
|
||||
w.buf = append(w.buf[:w.writeOffset], p...)
|
||||
w.writeOffset = len(w.buf)
|
||||
break
|
||||
}
|
||||
w.buf = append(w.buf[:w.writeOffset], p[:idx+1]...)
|
||||
switch p[idx] {
|
||||
case '\n':
|
||||
w.writeOffset = len(w.buf)
|
||||
w.afterLastNewline = len(w.buf)
|
||||
p = p[idx+1:]
|
||||
case '\r':
|
||||
// Record that our next write should overwrite the data after the most recent newline.
|
||||
// Don't slice it off immediately here, because we want to be able to return that output
|
||||
// until it is overwritten.
|
||||
w.writeOffset = w.afterLastNewline
|
||||
p = p[idx+1:]
|
||||
default:
|
||||
panic(fmt.Sprintf("unexpected char %q", p[idx]))
|
||||
}
|
||||
}
|
||||
return l, nil
|
||||
}
|
||||
|
||||
// String returns the contents of the buffer as a string.
|
||||
func (w *linebasedBufferedWriter) String() string {
|
||||
return string(w.buf)
|
||||
}
|
||||
|
||||
// Bytes returns the contents of the buffer.
|
||||
func (w *linebasedBufferedWriter) Bytes() []byte {
|
||||
return w.buf
|
||||
}
|
||||
|
||||
func postRepoFetchActions(
|
||||
ctx context.Context,
|
||||
logger log.Logger,
|
||||
@ -1547,58 +1606,66 @@ func postRepoFetchActions(
|
||||
dir common.GitDir,
|
||||
remoteURL *vcs.URL,
|
||||
syncer vcssyncer.VCSSyncer,
|
||||
) error {
|
||||
if err := git.RemoveBadRefs(ctx, dir); err != nil {
|
||||
logger.Warn("failed to remove bad refs", log.String("repo", string(repo)), log.Error(err))
|
||||
) (errs error) {
|
||||
// Note: We use a multi error in this function to try to make as many of the
|
||||
// post repo fetch actions succeed.
|
||||
|
||||
// We run setHEAD first, because other commands further down can fail when no
|
||||
// head exists.
|
||||
if err := setHEAD(ctx, logger, rcf, repo, dir, syncer, remoteURL); err != nil {
|
||||
errs = errors.Append(errs, errors.Wrapf(err, "failed to ensure HEAD exists for repo %q", repo))
|
||||
}
|
||||
|
||||
if err := setHEAD(ctx, logger, rcf, repo, dir, syncer, remoteURL); err != nil {
|
||||
return errors.Wrapf(err, "failed to ensure HEAD exists for repo %q", repo)
|
||||
if err := git.RemoveBadRefs(ctx, dir); err != nil {
|
||||
errs = errors.Append(errs, errors.Wrapf(err, "failed to remove bad refs for repo %q", repo))
|
||||
}
|
||||
|
||||
if err := git.SetRepositoryType(rcf, reposDir, dir, syncer.Type()); err != nil {
|
||||
return errors.Wrapf(err, "failed to set repository type for repo %q", repo)
|
||||
errs = errors.Append(errs, errors.Wrapf(err, "failed to set repository type for repo %q", repo))
|
||||
}
|
||||
|
||||
if err := git.SetGitAttributes(dir); err != nil {
|
||||
return errors.Wrap(err, "setting git attributes")
|
||||
errs = errors.Append(errs, errors.Wrap(err, "setting git attributes"))
|
||||
}
|
||||
|
||||
if err := gitSetAutoGC(rcf, reposDir, dir); err != nil {
|
||||
return errors.Wrap(err, "setting git gc mode")
|
||||
errs = errors.Append(errs, errors.Wrap(err, "setting git gc mode"))
|
||||
}
|
||||
|
||||
// Update the last-changed stamp on disk.
|
||||
if err := setLastChanged(logger, dir); err != nil {
|
||||
return errors.Wrap(err, "failed to update last changed time")
|
||||
errs = errors.Append(errs, errors.Wrap(err, "failed to update last changed time"))
|
||||
}
|
||||
|
||||
// Successfully updated, best-effort updating of db fetch state based on
|
||||
// disk state.
|
||||
if err := setLastFetched(ctx, db, shardID, dir, repo); err != nil {
|
||||
logger.Warn("failed setting last fetch in DB", log.Error(err))
|
||||
errs = errors.Append(errs, errors.Wrap(err, "failed setting last fetch in DB"))
|
||||
}
|
||||
|
||||
// Successfully updated, best-effort calculation of the repo size.
|
||||
repoSizeBytes := gitserverfs.DirSize(dir.Path("."))
|
||||
if err := db.GitserverRepos().SetRepoSize(ctx, repo, repoSizeBytes, shardID); err != nil {
|
||||
logger.Warn("failed to set repo size", log.Error(err))
|
||||
errs = errors.Append(errs, errors.Wrap(err, "failed to set repo size"))
|
||||
}
|
||||
|
||||
return nil
|
||||
return errs
|
||||
}
|
||||
|
||||
// readCloneProgress scans the reader and saves the most recent line of output
|
||||
// as the lock status.
|
||||
func readCloneProgress(db database.DB, logger log.Logger, redactor *urlredactor.URLRedactor, lock RepositoryLock, pr io.Reader, repo api.RepoName) {
|
||||
// as the lock status, writes to a log file if siteConfig.cloneProgressLog is
|
||||
// enabled, and optionally to the database when the feature flag `clone-progress-logging`
|
||||
// is enabled.
|
||||
func readCloneProgress(db database.DB, logger log.Logger, lock RepositoryLock, pr io.Reader, repo api.RepoName) *errgroup.Group {
|
||||
// Use a background context to ensure we still update the DB even if we
|
||||
// time out. IE we intentionally don't take an input ctx.
|
||||
ctx := featureflag.WithFlags(context.Background(), db.FeatureFlags())
|
||||
enableExperimentalDBCloneProgress := featureflag.FromContext(ctx).GetBoolOr("clone-progress-logging", false)
|
||||
|
||||
var logFile *os.File
|
||||
var err error
|
||||
|
||||
if conf.Get().CloneProgressLog {
|
||||
var err error
|
||||
logFile, err = os.CreateTemp("", "")
|
||||
if err != nil {
|
||||
logger.Warn("failed to create temporary clone log file", log.Error(err), log.String("repo", string(repo)))
|
||||
@ -1612,38 +1679,37 @@ func readCloneProgress(db database.DB, logger log.Logger, redactor *urlredactor.
|
||||
scan := bufio.NewScanner(pr)
|
||||
scan.Split(scanCRLF)
|
||||
store := db.GitserverRepos()
|
||||
for scan.Scan() {
|
||||
progress := scan.Text()
|
||||
// 🚨 SECURITY: The output could include the clone url with may contain a sensitive token.
|
||||
// Redact the full url and any found HTTP credentials to be safe.
|
||||
//
|
||||
// e.g.
|
||||
// $ git clone http://token@github.com/foo/bar
|
||||
// Cloning into 'nick'...
|
||||
// fatal: repository 'http://token@github.com/foo/bar/' not found
|
||||
redactedProgress := redactor.Redact(progress)
|
||||
|
||||
lock.SetStatus(redactedProgress)
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg.Go(func() error {
|
||||
for scan.Scan() {
|
||||
progress := scan.Text()
|
||||
lock.SetStatus(progress)
|
||||
|
||||
if logFile != nil {
|
||||
// Failing to write here is non-fatal and we don't want to spam our logs if there
|
||||
// are issues
|
||||
_, _ = fmt.Fprintln(logFile, progress)
|
||||
}
|
||||
// Only write to the database persisted status if line indicates progress
|
||||
// which is recognized by presence of a '%'. We filter these writes not to waste
|
||||
// rate-limit tokens on log lines that would not be relevant to the user.
|
||||
if featureflag.FromContext(ctx).GetBoolOr("clone-progress-logging", false) &&
|
||||
strings.Contains(redactedProgress, "%") &&
|
||||
dbWritesLimiter.Allow() {
|
||||
if err := store.SetCloningProgress(ctx, repo, redactedProgress); err != nil {
|
||||
logger.Error("error updating cloning progress in the db", log.Error(err))
|
||||
if logFile != nil {
|
||||
// Failing to write here is non-fatal and we don't want to spam our logs if there
|
||||
// are issues
|
||||
_, _ = fmt.Fprintln(logFile, progress)
|
||||
}
|
||||
// Only write to the database persisted status if line indicates progress
|
||||
// which is recognized by presence of a '%'. We filter these writes not to waste
|
||||
// rate-limit tokens on log lines that would not be relevant to the user.
|
||||
if enableExperimentalDBCloneProgress {
|
||||
if strings.Contains(progress, "%") && dbWritesLimiter.Allow() {
|
||||
if err := store.SetCloningProgress(ctx, repo, progress); err != nil {
|
||||
logger.Error("error updating cloning progress in the db", log.Error(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := scan.Err(); err != nil {
|
||||
logger.Error("error reporting progress", log.Error(err))
|
||||
}
|
||||
if err := scan.Err(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
|
||||
return eg
|
||||
}
|
||||
|
||||
// scanCRLF is similar to bufio.ScanLines except it splits on both '\r' and '\n'
|
||||
@ -1822,9 +1888,12 @@ func (s *Server) doBackgroundRepoUpdate(repo api.RepoName, revspec string) error
|
||||
// return until this fetch has completed or definitely-failed,
|
||||
// either way they can't still be in use. we don't care exactly
|
||||
// when the cleanup happens, just that it does.
|
||||
// TODO: Should be done in janitor.
|
||||
defer git.CleanTmpPackFiles(s.Logger, dir)
|
||||
|
||||
output, err := syncer.Fetch(ctx, remoteURL, repo, dir, revspec)
|
||||
// TODO: Move the redaction also into the VCSSyncer layer here, to be in line
|
||||
// with what clone does.
|
||||
redactedOutput := urlredactor.New(remoteURL).Redact(string(output))
|
||||
// best-effort update the output of the fetch
|
||||
if err := s.DB.GitserverRepos().SetLastOutput(context.Background(), repo, redactedOutput); err != nil {
|
||||
@ -1861,7 +1930,7 @@ func setHEAD(ctx context.Context, logger log.Logger, rcf *wrexec.RecordingComman
|
||||
}
|
||||
dir.Set(cmd)
|
||||
r := urlredactor.New(remoteURL)
|
||||
output, err := executil.RunRemoteGitCommand(ctx, rcf.WrapWithRepoName(ctx, logger, repoName, cmd).WithRedactorFunc(r.Redact), true, nil)
|
||||
output, err := executil.RunRemoteGitCommand(ctx, rcf.WrapWithRepoName(ctx, logger, repoName, cmd).WithRedactorFunc(r.Redact), true)
|
||||
if err != nil {
|
||||
logger.Error("Failed to fetch remote info", log.Error(err), log.String("output", string(output)))
|
||||
return errors.Wrap(err, "failed to fetch remote info")
|
||||
|
||||
@ -181,7 +181,7 @@ func TestExecRequest(t *testing.T) {
|
||||
return "https://" + string(name) + ".git", nil
|
||||
},
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
return vcssyncer.NewGitRepoSyncer(wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
DB: db,
|
||||
RecordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
|
||||
@ -310,7 +310,7 @@ func makeTestServer(ctx context.Context, t *testing.T, repoDir, remote string, d
|
||||
ReposDir: repoDir,
|
||||
GetRemoteURLFunc: staticGetRemoteURL(remote),
|
||||
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
return vcssyncer.NewGitRepoSyncer(wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
|
||||
},
|
||||
DB: db,
|
||||
CloneQueue: cloneQueue,
|
||||
@ -458,7 +458,7 @@ func TestCloneRepoRecordsFailures(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assertRepoState := func(status types.CloneStatus, size int64, wantErr error) {
|
||||
assertRepoState := func(status types.CloneStatus, size int64, wantErr string) {
|
||||
t.Helper()
|
||||
fromDB, err := db.GitserverRepos().GetByID(ctx, dbRepo.ID)
|
||||
if err != nil {
|
||||
@ -466,15 +466,11 @@ func TestCloneRepoRecordsFailures(t *testing.T) {
|
||||
}
|
||||
assert.Equal(t, status, fromDB.CloneStatus)
|
||||
assert.Equal(t, size, fromDB.RepoSizeBytes)
|
||||
var errString string
|
||||
if wantErr != nil {
|
||||
errString = wantErr.Error()
|
||||
}
|
||||
assert.Equal(t, errString, fromDB.LastError)
|
||||
assert.Equal(t, wantErr, fromDB.LastError)
|
||||
}
|
||||
|
||||
// Verify the gitserver repo entry exists.
|
||||
assertRepoState(types.CloneStatusNotCloned, 0, nil)
|
||||
assertRepoState(types.CloneStatusNotCloned, 0, "")
|
||||
|
||||
reposDir := t.TempDir()
|
||||
s := makeTestServer(ctx, t, reposDir, remote, db)
|
||||
@ -482,7 +478,7 @@ func TestCloneRepoRecordsFailures(t *testing.T) {
|
||||
for _, tc := range []struct {
|
||||
name string
|
||||
getVCSSyncer func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error)
|
||||
wantErr error
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "Not cloneable",
|
||||
@ -493,18 +489,20 @@ func TestCloneRepoRecordsFailures(t *testing.T) {
|
||||
})
|
||||
return m, nil
|
||||
},
|
||||
wantErr: errors.New("error cloning repo: repo example.com/foo/bar not cloneable: not_cloneable"),
|
||||
wantErr: "error cloning repo: repo example.com/foo/bar not cloneable: not_cloneable",
|
||||
},
|
||||
{
|
||||
name: "Failing clone",
|
||||
getVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
|
||||
m := vcssyncer.NewMockVCSSyncer()
|
||||
m.CloneCommandFunc.SetDefaultHook(func(ctx context.Context, url *vcs.URL, s string) (*exec.Cmd, error) {
|
||||
return exec.Command("git", "clone", "/dev/null"), nil
|
||||
m.CloneFunc.SetDefaultHook(func(_ context.Context, _ api.RepoName, _ *vcs.URL, _ common.GitDir, _ string, w io.Writer) error {
|
||||
_, err := fmt.Fprint(w, "fatal: repository '/dev/null' does not exist")
|
||||
require.NoError(t, err)
|
||||
return &exec.ExitError{ProcessState: &os.ProcessState{}}
|
||||
})
|
||||
return m, nil
|
||||
},
|
||||
wantErr: errors.New("failed to clone example.com/foo/bar: clone failed. Output: fatal: repository '/dev/null' does not exist: exit status 128"),
|
||||
wantErr: "failed to clone example.com/foo/bar: clone failed. Output: fatal: repository '/dev/null' does not exist: exit status 0",
|
||||
},
|
||||
} {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
@ -868,19 +866,10 @@ func TestCloneRepo_EnsureValidity(t *testing.T) {
|
||||
t.Cleanup(func() { testRepoCorrupter = nil })
|
||||
// Use block so we get clone errors right here and don't have to rely on the
|
||||
// clone queue. There's no other reason for blocking here, just convenience/simplicity.
|
||||
if _, err := s.CloneRepo(ctx, repoName, CloneOptions{Block: true}); err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
_, err := s.CloneRepo(ctx, repoName, CloneOptions{Block: true})
|
||||
require.NoError(t, err)
|
||||
|
||||
dst := gitserverfs.RepoDirFromName(s.ReposDir, repoName)
|
||||
for i := 0; i < 1000; i++ {
|
||||
_, cloning := s.Locker.Status(dst)
|
||||
if !cloning {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
head, err := os.ReadFile(fmt.Sprintf("%s/HEAD", dst))
|
||||
if os.IsNotExist(err) {
|
||||
t.Fatal("expected a reconstituted HEAD, but no file exists")
|
||||
@ -906,19 +895,11 @@ func TestCloneRepo_EnsureValidity(t *testing.T) {
|
||||
cmd("sh", "-c", fmt.Sprintf(": > %s/HEAD", tmpDir))
|
||||
}
|
||||
t.Cleanup(func() { testRepoCorrupter = nil })
|
||||
if _, err := s.CloneRepo(ctx, "example.com/foo/bar", CloneOptions{}); err != nil {
|
||||
if _, err := s.CloneRepo(ctx, "example.com/foo/bar", CloneOptions{Block: true}); err != nil {
|
||||
t.Fatalf("expected no error, got %v", err)
|
||||
}
|
||||
|
||||
// wait for repo to be cloned
|
||||
dst := gitserverfs.RepoDirFromName(s.ReposDir, "example.com/foo/bar")
|
||||
for i := 0; i < 1000; i++ {
|
||||
_, cloning := s.Locker.Status(dst)
|
||||
if !cloning {
|
||||
break
|
||||
}
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
}
|
||||
|
||||
head, err := os.ReadFile(fmt.Sprintf("%s/HEAD", dst))
|
||||
if os.IsNotExist(err) {
|
||||
@ -1420,3 +1401,116 @@ error: Could not read d24d09b8bc5d1ea2c3aa24455f4578db6aa3afda`,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLinebasedBufferedWriter(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
writes []string
|
||||
text string
|
||||
}{
|
||||
{
|
||||
name: "identity",
|
||||
writes: []string{"hello"},
|
||||
text: "hello",
|
||||
},
|
||||
{
|
||||
name: "single write begin newline",
|
||||
writes: []string{"\nhelloworld"},
|
||||
text: "\nhelloworld",
|
||||
},
|
||||
{
|
||||
name: "single write contains newline",
|
||||
writes: []string{"hello\nworld"},
|
||||
text: "hello\nworld",
|
||||
},
|
||||
{
|
||||
name: "single write end newline",
|
||||
writes: []string{"helloworld\n"},
|
||||
text: "helloworld\n",
|
||||
},
|
||||
{
|
||||
name: "first write end newline",
|
||||
writes: []string{"hello\n", "world"},
|
||||
text: "hello\nworld",
|
||||
},
|
||||
{
|
||||
name: "second write begin newline",
|
||||
writes: []string{"hello", "\nworld"},
|
||||
text: "hello\nworld",
|
||||
},
|
||||
{
|
||||
name: "single write begin return",
|
||||
writes: []string{"\rhelloworld"},
|
||||
text: "helloworld",
|
||||
},
|
||||
{
|
||||
name: "single write contains return",
|
||||
writes: []string{"hello\rworld"},
|
||||
text: "world",
|
||||
},
|
||||
{
|
||||
name: "single write end return",
|
||||
writes: []string{"helloworld\r"},
|
||||
text: "helloworld\r",
|
||||
},
|
||||
{
|
||||
name: "first write contains return",
|
||||
writes: []string{"hel\rlo", "world"},
|
||||
text: "loworld",
|
||||
},
|
||||
{
|
||||
name: "first write end return",
|
||||
writes: []string{"hello\r", "world"},
|
||||
text: "world",
|
||||
},
|
||||
{
|
||||
name: "second write begin return",
|
||||
writes: []string{"hello", "\rworld"},
|
||||
text: "world",
|
||||
},
|
||||
{
|
||||
name: "second write contains return",
|
||||
writes: []string{"hello", "wor\rld"},
|
||||
text: "ld",
|
||||
},
|
||||
{
|
||||
name: "second write ends return",
|
||||
writes: []string{"hello", "world\r"},
|
||||
text: "helloworld\r",
|
||||
},
|
||||
{
|
||||
name: "third write",
|
||||
writes: []string{"hello", "world\r", "hola"},
|
||||
text: "hola",
|
||||
},
|
||||
{
|
||||
name: "progress one write",
|
||||
writes: []string{"progress\n1%\r20%\r100%\n"},
|
||||
text: "progress\n100%\n",
|
||||
},
|
||||
{
|
||||
name: "progress multiple writes",
|
||||
writes: []string{"progress\n", "1%\r", "2%\r", "100%"},
|
||||
text: "progress\n100%",
|
||||
},
|
||||
{
|
||||
name: "one two three four",
|
||||
writes: []string{"one\ntwotwo\nthreethreethree\rfourfourfourfour\n"},
|
||||
text: "one\ntwotwo\nfourfourfourfour\n",
|
||||
},
|
||||
{
|
||||
name: "real git",
|
||||
writes: []string{"Cloning into bare repository '/Users/nick/.sourcegraph/repos/github.com/nicksnyder/go-i18n/.git'...\nremote: Counting objects: 2148, done. \nReceiving objects: 0% (1/2148) \rReceiving objects: 100% (2148/2148), 473.65 KiB | 366.00 KiB/s, done.\nResolving deltas: 0% (0/1263) \rResolving deltas: 100% (1263/1263), done.\n"},
|
||||
text: "Cloning into bare repository '/Users/nick/.sourcegraph/repos/github.com/nicksnyder/go-i18n/.git'...\nremote: Counting objects: 2148, done. \nReceiving objects: 100% (2148/2148), 473.65 KiB | 366.00 KiB/s, done.\nResolving deltas: 100% (1263/1263), done.\n",
|
||||
},
|
||||
}
|
||||
for _, testCase := range testCases {
|
||||
t.Run(testCase.name, func(t *testing.T) {
|
||||
var w linebasedBufferedWriter
|
||||
for _, write := range testCase.writes {
|
||||
_, _ = w.Write([]byte(write))
|
||||
}
|
||||
assert.Equal(t, testCase.text, w.String())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@ -17,12 +17,14 @@ go_library(
|
||||
"ruby_packages.go",
|
||||
"rust_packages.go",
|
||||
"syncer.go",
|
||||
"util.go",
|
||||
],
|
||||
importpath = "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer",
|
||||
visibility = ["//cmd/gitserver:__subpackages__"],
|
||||
deps = [
|
||||
"//cmd/gitserver/internal/common",
|
||||
"//cmd/gitserver/internal/executil",
|
||||
"//cmd/gitserver/internal/git",
|
||||
"//cmd/gitserver/internal/gitserverfs",
|
||||
"//cmd/gitserver/internal/perforce",
|
||||
"//cmd/gitserver/internal/urlredactor",
|
||||
@ -97,6 +99,7 @@ go_test(
|
||||
"//internal/testutil",
|
||||
"//internal/types",
|
||||
"//internal/vcs",
|
||||
"//internal/wrexec",
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
"@com_github_google_go_cmp//cmp",
|
||||
|
||||
@ -2,6 +2,7 @@ package vcssyncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
@ -12,6 +13,7 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/executil"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/urlredactor"
|
||||
"github.com/sourcegraph/sourcegraph/internal/vcs"
|
||||
"github.com/sourcegraph/sourcegraph/internal/wrexec"
|
||||
@ -20,11 +22,12 @@ import (
|
||||
|
||||
// gitRepoSyncer is a syncer for Git repositories.
|
||||
type gitRepoSyncer struct {
|
||||
logger log.Logger
|
||||
recordingCommandFactory *wrexec.RecordingCommandFactory
|
||||
}
|
||||
|
||||
func NewGitRepoSyncer(r *wrexec.RecordingCommandFactory) *gitRepoSyncer {
|
||||
return &gitRepoSyncer{recordingCommandFactory: r}
|
||||
func NewGitRepoSyncer(logger log.Logger, r *wrexec.RecordingCommandFactory) *gitRepoSyncer {
|
||||
return &gitRepoSyncer{logger: logger.Scoped("GitRepoSyncer"), recordingCommandFactory: r}
|
||||
}
|
||||
|
||||
func (s *gitRepoSyncer) Type() string {
|
||||
@ -50,7 +53,7 @@ func (s *gitRepoSyncer) IsCloneable(ctx context.Context, repoName api.RepoName,
|
||||
|
||||
r := urlredactor.New(remoteURL)
|
||||
cmd := exec.CommandContext(ctx, "git", args...)
|
||||
out, err := executil.RunRemoteGitCommand(ctx, s.recordingCommandFactory.WrapWithRepoName(ctx, log.NoOp(), repoName, cmd).WithRedactorFunc(r.Redact), true, nil)
|
||||
out, err := executil.RunRemoteGitCommand(ctx, s.recordingCommandFactory.WrapWithRepoName(ctx, log.NoOp(), repoName, cmd).WithRedactorFunc(r.Redact), true)
|
||||
if err != nil {
|
||||
if ctxerr := ctx.Err(); ctxerr != nil {
|
||||
err = ctxerr
|
||||
@ -63,21 +66,46 @@ func (s *gitRepoSyncer) IsCloneable(ctx context.Context, repoName api.RepoName,
|
||||
return nil
|
||||
}
|
||||
|
||||
// CloneCommand returns the command to be executed for cloning a Git repository.
|
||||
func (s *gitRepoSyncer) CloneCommand(ctx context.Context, remoteURL *vcs.URL, tmpPath string) (cmd *exec.Cmd, err error) {
|
||||
// Clone clones a Git repository into tmpPath, reporting redacted progress logs
|
||||
// via the progressWriter.
|
||||
// We "clone" a repository by first creating a bare repo and then fetching the
|
||||
// configured refs into it from the remote.
|
||||
func (s *gitRepoSyncer) Clone(ctx context.Context, repo api.RepoName, remoteURL *vcs.URL, targetDir common.GitDir, tmpPath string, progressWriter io.Writer) (err error) {
|
||||
// First, make sure the tmpPath exists.
|
||||
if err := os.MkdirAll(tmpPath, os.ModePerm); err != nil {
|
||||
return nil, errors.Wrapf(err, "clone failed to create tmp dir")
|
||||
return errors.Wrapf(err, "clone failed to create tmp dir")
|
||||
}
|
||||
|
||||
cmd = exec.CommandContext(ctx, "git", "init", "--bare", ".")
|
||||
// Next, initialize a bare repo in that tmp path.
|
||||
tryWrite(s.logger, progressWriter, "Creating bare repo\n")
|
||||
if err := git.MakeBareRepo(ctx, tmpPath); err != nil {
|
||||
return &common.GitCommandError{Err: err}
|
||||
}
|
||||
tryWrite(s.logger, progressWriter, "Created bare repo at %s\n", tmpPath)
|
||||
|
||||
// Now we build our fetch command. We don't actually clone, instead we init
|
||||
// a bare repository and fetch all refs from remote once into local refs.
|
||||
cmd, _ := s.fetchCommand(ctx, remoteURL)
|
||||
cmd.Dir = tmpPath
|
||||
if err := cmd.Run(); err != nil {
|
||||
return nil, errors.Wrapf(&common.GitCommandError{Err: err}, "clone setup failed")
|
||||
if cmd.Env == nil {
|
||||
cmd.Env = os.Environ()
|
||||
}
|
||||
// see issue #7322: skip LFS content in repositories with Git LFS configured.
|
||||
cmd.Env = append(cmd.Env, "GIT_LFS_SKIP_SMUDGE=1")
|
||||
executil.ConfigureRemoteGitCommand(cmd)
|
||||
|
||||
tryWrite(s.logger, progressWriter, "Fetching remote contents\n")
|
||||
redactor := urlredactor.New(remoteURL)
|
||||
wrCmd := s.recordingCommandFactory.WrapWithRepoName(ctx, s.logger, repo, cmd).WithRedactorFunc(redactor.Redact)
|
||||
// Note: Using RunCommandWriteOutput here does NOT store the output of the
|
||||
// command as the command output of the wrexec command, because the pipes are
|
||||
// already used.
|
||||
exitCode, err := executil.RunCommandWriteOutput(ctx, wrCmd, progressWriter, redactor.Redact)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch: exit status %d", exitCode)
|
||||
}
|
||||
|
||||
cmd, _ = s.fetchCommand(ctx, remoteURL)
|
||||
cmd.Dir = tmpPath
|
||||
return cmd, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Fetch tries to fetch updates of a Git repository.
|
||||
@ -85,9 +113,9 @@ func (s *gitRepoSyncer) Fetch(ctx context.Context, remoteURL *vcs.URL, repoName
|
||||
cmd, configRemoteOpts := s.fetchCommand(ctx, remoteURL)
|
||||
dir.Set(cmd)
|
||||
r := urlredactor.New(remoteURL)
|
||||
output, err := executil.RunRemoteGitCommand(ctx, s.recordingCommandFactory.WrapWithRepoName(ctx, log.NoOp(), repoName, cmd).WithRedactorFunc(r.Redact), configRemoteOpts, nil)
|
||||
output, err := executil.RunRemoteGitCommand(ctx, s.recordingCommandFactory.WrapWithRepoName(ctx, log.NoOp(), repoName, cmd).WithRedactorFunc(r.Redact), configRemoteOpts)
|
||||
if err != nil {
|
||||
return nil, &common.GitCommandError{Err: err, Output: urlredactor.New(remoteURL).Redact(string(output))}
|
||||
return nil, &common.GitCommandError{Err: err, Output: r.Redact(string(output))}
|
||||
}
|
||||
return output, nil
|
||||
}
|
||||
|
||||
@ -8,6 +8,7 @@ package vcssyncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os/exec"
|
||||
"sync"
|
||||
|
||||
@ -21,9 +22,9 @@ import (
|
||||
// github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer) used
|
||||
// for unit testing.
|
||||
type MockVCSSyncer struct {
|
||||
// CloneCommandFunc is an instance of a mock function object controlling
|
||||
// the behavior of the method CloneCommand.
|
||||
CloneCommandFunc *VCSSyncerCloneCommandFunc
|
||||
// CloneFunc is an instance of a mock function object controlling the
|
||||
// behavior of the method Clone.
|
||||
CloneFunc *VCSSyncerCloneFunc
|
||||
// FetchFunc is an instance of a mock function object controlling the
|
||||
// behavior of the method Fetch.
|
||||
FetchFunc *VCSSyncerFetchFunc
|
||||
@ -42,8 +43,8 @@ type MockVCSSyncer struct {
|
||||
// methods return zero values for all results, unless overwritten.
|
||||
func NewMockVCSSyncer() *MockVCSSyncer {
|
||||
return &MockVCSSyncer{
|
||||
CloneCommandFunc: &VCSSyncerCloneCommandFunc{
|
||||
defaultHook: func(context.Context, *vcs.URL, string) (r0 *exec.Cmd, r1 error) {
|
||||
CloneFunc: &VCSSyncerCloneFunc{
|
||||
defaultHook: func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) (r0 error) {
|
||||
return
|
||||
},
|
||||
},
|
||||
@ -74,9 +75,9 @@ func NewMockVCSSyncer() *MockVCSSyncer {
|
||||
// methods panic on invocation, unless overwritten.
|
||||
func NewStrictMockVCSSyncer() *MockVCSSyncer {
|
||||
return &MockVCSSyncer{
|
||||
CloneCommandFunc: &VCSSyncerCloneCommandFunc{
|
||||
defaultHook: func(context.Context, *vcs.URL, string) (*exec.Cmd, error) {
|
||||
panic("unexpected invocation of MockVCSSyncer.CloneCommand")
|
||||
CloneFunc: &VCSSyncerCloneFunc{
|
||||
defaultHook: func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error {
|
||||
panic("unexpected invocation of MockVCSSyncer.Clone")
|
||||
},
|
||||
},
|
||||
FetchFunc: &VCSSyncerFetchFunc{
|
||||
@ -106,8 +107,8 @@ func NewStrictMockVCSSyncer() *MockVCSSyncer {
|
||||
// All methods delegate to the given implementation, unless overwritten.
|
||||
func NewMockVCSSyncerFrom(i VCSSyncer) *MockVCSSyncer {
|
||||
return &MockVCSSyncer{
|
||||
CloneCommandFunc: &VCSSyncerCloneCommandFunc{
|
||||
defaultHook: i.CloneCommand,
|
||||
CloneFunc: &VCSSyncerCloneFunc{
|
||||
defaultHook: i.Clone,
|
||||
},
|
||||
FetchFunc: &VCSSyncerFetchFunc{
|
||||
defaultHook: i.Fetch,
|
||||
@ -124,35 +125,34 @@ func NewMockVCSSyncerFrom(i VCSSyncer) *MockVCSSyncer {
|
||||
}
|
||||
}
|
||||
|
||||
// VCSSyncerCloneCommandFunc describes the behavior when the CloneCommand
|
||||
// method of the parent MockVCSSyncer instance is invoked.
|
||||
type VCSSyncerCloneCommandFunc struct {
|
||||
defaultHook func(context.Context, *vcs.URL, string) (*exec.Cmd, error)
|
||||
hooks []func(context.Context, *vcs.URL, string) (*exec.Cmd, error)
|
||||
history []VCSSyncerCloneCommandFuncCall
|
||||
// VCSSyncerCloneFunc describes the behavior when the Clone method of the
|
||||
// parent MockVCSSyncer instance is invoked.
|
||||
type VCSSyncerCloneFunc struct {
|
||||
defaultHook func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error
|
||||
hooks []func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error
|
||||
history []VCSSyncerCloneFuncCall
|
||||
mutex sync.Mutex
|
||||
}
|
||||
|
||||
// CloneCommand delegates to the next hook function in the queue and stores
|
||||
// the parameter and result values of this invocation.
|
||||
func (m *MockVCSSyncer) CloneCommand(v0 context.Context, v1 *vcs.URL, v2 string) (*exec.Cmd, error) {
|
||||
r0, r1 := m.CloneCommandFunc.nextHook()(v0, v1, v2)
|
||||
m.CloneCommandFunc.appendCall(VCSSyncerCloneCommandFuncCall{v0, v1, v2, r0, r1})
|
||||
return r0, r1
|
||||
// Clone delegates to the next hook function in the queue and stores the
|
||||
// parameter and result values of this invocation.
|
||||
func (m *MockVCSSyncer) Clone(v0 context.Context, v1 api.RepoName, v2 *vcs.URL, v3 common.GitDir, v4 string, v5 io.Writer) error {
|
||||
r0 := m.CloneFunc.nextHook()(v0, v1, v2, v3, v4, v5)
|
||||
m.CloneFunc.appendCall(VCSSyncerCloneFuncCall{v0, v1, v2, v3, v4, v5, r0})
|
||||
return r0
|
||||
}
|
||||
|
||||
// SetDefaultHook sets function that is called when the CloneCommand method
|
||||
// of the parent MockVCSSyncer instance is invoked and the hook queue is
|
||||
// empty.
|
||||
func (f *VCSSyncerCloneCommandFunc) SetDefaultHook(hook func(context.Context, *vcs.URL, string) (*exec.Cmd, error)) {
|
||||
// SetDefaultHook sets function that is called when the Clone method of the
|
||||
// parent MockVCSSyncer instance is invoked and the hook queue is empty.
|
||||
func (f *VCSSyncerCloneFunc) SetDefaultHook(hook func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error) {
|
||||
f.defaultHook = hook
|
||||
}
|
||||
|
||||
// PushHook adds a function to the end of hook queue. Each invocation of the
|
||||
// CloneCommand method of the parent MockVCSSyncer instance invokes the hook
|
||||
// at the front of the queue and discards it. After the queue is empty, the
|
||||
// default hook function is invoked for any future action.
|
||||
func (f *VCSSyncerCloneCommandFunc) PushHook(hook func(context.Context, *vcs.URL, string) (*exec.Cmd, error)) {
|
||||
// Clone method of the parent MockVCSSyncer instance invokes the hook at the
|
||||
// front of the queue and discards it. After the queue is empty, the default
|
||||
// hook function is invoked for any future action.
|
||||
func (f *VCSSyncerCloneFunc) PushHook(hook func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error) {
|
||||
f.mutex.Lock()
|
||||
f.hooks = append(f.hooks, hook)
|
||||
f.mutex.Unlock()
|
||||
@ -160,20 +160,20 @@ func (f *VCSSyncerCloneCommandFunc) PushHook(hook func(context.Context, *vcs.URL
|
||||
|
||||
// SetDefaultReturn calls SetDefaultHook with a function that returns the
|
||||
// given values.
|
||||
func (f *VCSSyncerCloneCommandFunc) SetDefaultReturn(r0 *exec.Cmd, r1 error) {
|
||||
f.SetDefaultHook(func(context.Context, *vcs.URL, string) (*exec.Cmd, error) {
|
||||
return r0, r1
|
||||
func (f *VCSSyncerCloneFunc) SetDefaultReturn(r0 error) {
|
||||
f.SetDefaultHook(func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error {
|
||||
return r0
|
||||
})
|
||||
}
|
||||
|
||||
// PushReturn calls PushHook with a function that returns the given values.
|
||||
func (f *VCSSyncerCloneCommandFunc) PushReturn(r0 *exec.Cmd, r1 error) {
|
||||
f.PushHook(func(context.Context, *vcs.URL, string) (*exec.Cmd, error) {
|
||||
return r0, r1
|
||||
func (f *VCSSyncerCloneFunc) PushReturn(r0 error) {
|
||||
f.PushHook(func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error {
|
||||
return r0
|
||||
})
|
||||
}
|
||||
|
||||
func (f *VCSSyncerCloneCommandFunc) nextHook() func(context.Context, *vcs.URL, string) (*exec.Cmd, error) {
|
||||
func (f *VCSSyncerCloneFunc) nextHook() func(context.Context, api.RepoName, *vcs.URL, common.GitDir, string, io.Writer) error {
|
||||
f.mutex.Lock()
|
||||
defer f.mutex.Unlock()
|
||||
|
||||
@ -186,53 +186,59 @@ func (f *VCSSyncerCloneCommandFunc) nextHook() func(context.Context, *vcs.URL, s
|
||||
return hook
|
||||
}
|
||||
|
||||
func (f *VCSSyncerCloneCommandFunc) appendCall(r0 VCSSyncerCloneCommandFuncCall) {
|
||||
func (f *VCSSyncerCloneFunc) appendCall(r0 VCSSyncerCloneFuncCall) {
|
||||
f.mutex.Lock()
|
||||
f.history = append(f.history, r0)
|
||||
f.mutex.Unlock()
|
||||
}
|
||||
|
||||
// History returns a sequence of VCSSyncerCloneCommandFuncCall objects
|
||||
// describing the invocations of this function.
|
||||
func (f *VCSSyncerCloneCommandFunc) History() []VCSSyncerCloneCommandFuncCall {
|
||||
// History returns a sequence of VCSSyncerCloneFuncCall objects describing
|
||||
// the invocations of this function.
|
||||
func (f *VCSSyncerCloneFunc) History() []VCSSyncerCloneFuncCall {
|
||||
f.mutex.Lock()
|
||||
history := make([]VCSSyncerCloneCommandFuncCall, len(f.history))
|
||||
history := make([]VCSSyncerCloneFuncCall, len(f.history))
|
||||
copy(history, f.history)
|
||||
f.mutex.Unlock()
|
||||
|
||||
return history
|
||||
}
|
||||
|
||||
// VCSSyncerCloneCommandFuncCall is an object that describes an invocation
|
||||
// of method CloneCommand on an instance of MockVCSSyncer.
|
||||
type VCSSyncerCloneCommandFuncCall struct {
|
||||
// VCSSyncerCloneFuncCall is an object that describes an invocation of
|
||||
// method Clone on an instance of MockVCSSyncer.
|
||||
type VCSSyncerCloneFuncCall struct {
|
||||
// Arg0 is the value of the 1st argument passed to this method
|
||||
// invocation.
|
||||
Arg0 context.Context
|
||||
// Arg1 is the value of the 2nd argument passed to this method
|
||||
// invocation.
|
||||
Arg1 *vcs.URL
|
||||
Arg1 api.RepoName
|
||||
// Arg2 is the value of the 3rd argument passed to this method
|
||||
// invocation.
|
||||
Arg2 string
|
||||
Arg2 *vcs.URL
|
||||
// Arg3 is the value of the 4th argument passed to this method
|
||||
// invocation.
|
||||
Arg3 common.GitDir
|
||||
// Arg4 is the value of the 5th argument passed to this method
|
||||
// invocation.
|
||||
Arg4 string
|
||||
// Arg5 is the value of the 6th argument passed to this method
|
||||
// invocation.
|
||||
Arg5 io.Writer
|
||||
// Result0 is the value of the 1st result returned from this method
|
||||
// invocation.
|
||||
Result0 *exec.Cmd
|
||||
// Result1 is the value of the 2nd result returned from this method
|
||||
// invocation.
|
||||
Result1 error
|
||||
Result0 error
|
||||
}
|
||||
|
||||
// Args returns an interface slice containing the arguments of this
|
||||
// invocation.
|
||||
func (c VCSSyncerCloneCommandFuncCall) Args() []interface{} {
|
||||
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
|
||||
func (c VCSSyncerCloneFuncCall) Args() []interface{} {
|
||||
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3, c.Arg4, c.Arg5}
|
||||
}
|
||||
|
||||
// Results returns an interface slice containing the results of this
|
||||
// invocation.
|
||||
func (c VCSSyncerCloneCommandFuncCall) Results() []interface{} {
|
||||
return []interface{}{c.Result0, c.Result1}
|
||||
func (c VCSSyncerCloneFuncCall) Results() []interface{} {
|
||||
return []interface{}{c.Result0}
|
||||
}
|
||||
|
||||
// VCSSyncerFetchFunc describes the behavior when the Fetch method of the
|
||||
|
||||
@ -14,6 +14,7 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/executil"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/git"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
|
||||
"github.com/sourcegraph/sourcegraph/internal/api"
|
||||
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
|
||||
@ -82,24 +83,30 @@ func (s *vcsPackagesSyncer) RemoteShowCommand(ctx context.Context, remoteURL *vc
|
||||
return exec.CommandContext(ctx, "git", "remote", "show", "./"), nil
|
||||
}
|
||||
|
||||
func (s *vcsPackagesSyncer) CloneCommand(ctx context.Context, remoteURL *vcs.URL, bareGitDirectory string) (*exec.Cmd, error) {
|
||||
err := os.MkdirAll(bareGitDirectory, 0o755)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
// Clone writes a package and all requested versions of it into a synthetic git
|
||||
// repo at tmpPath by creating one head per version.
|
||||
// It reports redacted progress logs via the progressWriter.
|
||||
func (s *vcsPackagesSyncer) Clone(ctx context.Context, repo api.RepoName, remoteURL *vcs.URL, targetDir common.GitDir, tmpPath string, progressWriter io.Writer) (err error) {
|
||||
// First, make sure the tmpPath exists.
|
||||
if err := os.MkdirAll(tmpPath, os.ModePerm); err != nil {
|
||||
return errors.Wrapf(err, "clone failed to create tmp dir")
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "git", "--bare", "init")
|
||||
if _, err := runCommandInDirectory(ctx, cmd, bareGitDirectory, s.placeholder); err != nil {
|
||||
return nil, err
|
||||
// Next, initialize a bare repo in that tmp path.
|
||||
tryWrite(s.logger, progressWriter, "Creating bare repo\n")
|
||||
if err := git.MakeBareRepo(ctx, tmpPath); err != nil {
|
||||
return &common.GitCommandError{Err: err}
|
||||
}
|
||||
tryWrite(s.logger, progressWriter, "Created bare repo at %s\n", tmpPath)
|
||||
|
||||
// The Fetch method is responsible for cleaning up temporary directories.
|
||||
if _, err := s.Fetch(ctx, remoteURL, "", common.GitDir(bareGitDirectory), ""); err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to fetch repo for %s", remoteURL)
|
||||
// TODO: We should have more fine-grained progress reporting here.
|
||||
tryWrite(s.logger, progressWriter, "Fetching package revisions\n")
|
||||
if _, err := s.Fetch(ctx, remoteURL, "", common.GitDir(tmpPath), ""); err != nil {
|
||||
return errors.Wrapf(err, "failed to fetch repo for %s", repo)
|
||||
}
|
||||
|
||||
// no-op command to satisfy VCSSyncer interface, see docstring for more details.
|
||||
return exec.CommandContext(ctx, "git", "--version"), nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *vcsPackagesSyncer) Fetch(ctx context.Context, remoteURL *vcs.URL, _ api.RepoName, dir common.GitDir, revspec string) ([]byte, error) {
|
||||
|
||||
@ -4,6 +4,7 @@ import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"net/url"
|
||||
"os"
|
||||
"os/exec"
|
||||
@ -48,7 +49,7 @@ func TestVcsDependenciesSyncer_Fetch(t *testing.T) {
|
||||
remoteURL := &vcs.URL{URL: url.URL{Path: "fake/foo"}}
|
||||
|
||||
dir := common.GitDir(t.TempDir())
|
||||
_, err := s.CloneCommand(ctx, remoteURL, string(dir))
|
||||
err := s.Clone(ctx, "repo", remoteURL, common.GitDir(dir), string(dir), io.Discard)
|
||||
require.NoError(t, err)
|
||||
|
||||
depsService.Add("foo@0.0.1")
|
||||
@ -375,9 +376,8 @@ func (s *vcsPackagesSyncer) runCloneCommand(t *testing.T, examplePackageURL, bar
|
||||
URL: url.URL{Path: examplePackageURL},
|
||||
}
|
||||
s.configDeps = dependencies
|
||||
cmd, err := s.CloneCommand(context.Background(), &u, bareGitDirectory)
|
||||
err := s.Clone(context.Background(), "repo", &u, common.GitDir(bareGitDirectory), string(bareGitDirectory), io.Discard)
|
||||
assert.Nil(t, err)
|
||||
assert.Nil(t, cmd.Run())
|
||||
}
|
||||
|
||||
func (s *vcsPackagesSyncer) assertDownloadCounts(t *testing.T, depsSource *fakeDepsSource, want map[string]int) {
|
||||
|
||||
@ -2,11 +2,14 @@ package vcssyncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
|
||||
"github.com/sourcegraph/log"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/api"
|
||||
"github.com/sourcegraph/sourcegraph/schema"
|
||||
|
||||
@ -19,74 +22,100 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
// PerforceDepotSyncer is a syncer for Perforce depots.
|
||||
type PerforceDepotSyncer struct {
|
||||
// perforceDepotSyncer is a syncer for Perforce depots.
|
||||
type perforceDepotSyncer struct {
|
||||
logger log.Logger
|
||||
recordingCommandFactory *wrexec.RecordingCommandFactory
|
||||
|
||||
// MaxChanges indicates to only import at most n changes when possible.
|
||||
MaxChanges int
|
||||
|
||||
// Client configures the client to use with p4 and enables use of a client spec
|
||||
// P4Client configures the client to use with p4 and enables use of a client spec
|
||||
// to find the list of interesting files in p4.
|
||||
Client string
|
||||
P4Client string
|
||||
|
||||
// FusionConfig contains information about the experimental p4-fusion client.
|
||||
FusionConfig FusionConfig
|
||||
FusionConfig fusionConfig
|
||||
|
||||
// P4Home is a directory we will pass to `git p4` commands as the
|
||||
// $HOME directory as it requires this to write cache data.
|
||||
P4Home string
|
||||
}
|
||||
|
||||
func NewPerforceDepotSyncer(connection *schema.PerforceConnection, p4Home string) VCSSyncer {
|
||||
return &PerforceDepotSyncer{
|
||||
MaxChanges: int(connection.MaxChanges),
|
||||
Client: connection.P4Client,
|
||||
FusionConfig: configureFusionClient(connection),
|
||||
P4Home: p4Home,
|
||||
func NewPerforceDepotSyncer(logger log.Logger, r *wrexec.RecordingCommandFactory, connection *schema.PerforceConnection, p4Home string) VCSSyncer {
|
||||
return &perforceDepotSyncer{
|
||||
logger: logger.Scoped("PerforceDepotSyncer"),
|
||||
recordingCommandFactory: r,
|
||||
MaxChanges: int(connection.MaxChanges),
|
||||
P4Client: connection.P4Client,
|
||||
FusionConfig: configureFusionClient(connection),
|
||||
P4Home: p4Home,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *PerforceDepotSyncer) Type() string {
|
||||
func (s *perforceDepotSyncer) Type() string {
|
||||
return "perforce"
|
||||
}
|
||||
|
||||
// IsCloneable checks to see if the Perforce remote URL is cloneable.
|
||||
func (s *PerforceDepotSyncer) IsCloneable(ctx context.Context, _ api.RepoName, remoteURL *vcs.URL) error {
|
||||
func (s *perforceDepotSyncer) IsCloneable(ctx context.Context, _ api.RepoName, remoteURL *vcs.URL) error {
|
||||
username, password, host, path, err := perforce.DecomposePerforceRemoteURL(remoteURL)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "decompose")
|
||||
return errors.Wrap(err, "invalid perforce remote URL")
|
||||
}
|
||||
|
||||
return perforce.IsDepotPathCloneable(ctx, s.P4Home, host, username, password, path)
|
||||
}
|
||||
|
||||
// CloneCommand returns the command to be executed for cloning a Perforce depot as a Git repository.
|
||||
func (s *PerforceDepotSyncer) CloneCommand(ctx context.Context, remoteURL *vcs.URL, tmpPath string) (*exec.Cmd, error) {
|
||||
username, password, p4port, depot, err := perforce.DecomposePerforceRemoteURL(remoteURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decompose")
|
||||
// Clone writes a Perforce depot into tmpPath, using a Perforce-to-git-conversion.
|
||||
// It reports redacted progress logs via the progressWriter.
|
||||
func (s *perforceDepotSyncer) Clone(ctx context.Context, repo api.RepoName, remoteURL *vcs.URL, targetDir common.GitDir, tmpPath string, progressWriter io.Writer) (err error) {
|
||||
// First, make sure the tmpPath exists.
|
||||
if err := os.MkdirAll(tmpPath, os.ModePerm); err != nil {
|
||||
return errors.Wrapf(err, "clone failed to create tmp dir")
|
||||
}
|
||||
|
||||
err = perforce.P4TestWithTrust(ctx, s.P4Home, p4port, username, password)
|
||||
p4user, p4passwd, p4port, depot, err := perforce.DecomposePerforceRemoteURL(remoteURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "test with trust")
|
||||
return errors.Wrap(err, "invalid perforce remote URL")
|
||||
}
|
||||
|
||||
// First, do a quick check if we can reach the Perforce server.
|
||||
tryWrite(s.logger, progressWriter, "Checking Perforce server connection\n")
|
||||
err = perforce.P4TestWithTrust(ctx, s.P4Home, p4port, p4user, p4passwd)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "verifying connection to perforce server")
|
||||
}
|
||||
tryWrite(s.logger, progressWriter, "Perforce server connection succeeded\n")
|
||||
|
||||
var cmd *exec.Cmd
|
||||
if s.FusionConfig.Enabled {
|
||||
cmd = s.buildP4FusionCmd(ctx, depot, username, tmpPath, p4port)
|
||||
tryWrite(s.logger, progressWriter, "Converting depot using p4-fusion\n")
|
||||
cmd = s.buildP4FusionCmd(ctx, depot, p4user, tmpPath, p4port)
|
||||
} else {
|
||||
tryWrite(s.logger, progressWriter, "Converting depot using git-p4\n")
|
||||
// Example: git p4 clone --bare --max-changes 1000 //Sourcegraph/@all /tmp/clone-584194180/.git
|
||||
args := append([]string{"p4", "clone", "--bare"}, s.p4CommandOptions()...)
|
||||
args = append(args, depot+"@all", tmpPath)
|
||||
cmd = exec.CommandContext(ctx, "git", args...)
|
||||
}
|
||||
cmd.Env = s.p4CommandEnv(p4port, username, password)
|
||||
cmd.Env = s.p4CommandEnv(p4port, p4user, p4passwd)
|
||||
|
||||
return cmd, nil
|
||||
redactor := urlredactor.New(remoteURL)
|
||||
wrCmd := s.recordingCommandFactory.WrapWithRepoName(ctx, s.logger, repo, cmd).WithRedactorFunc(redactor.Redact)
|
||||
// Note: Using RunCommandWriteOutput here does NOT store the output of the
|
||||
// command as the command output of the wrexec command, because the pipes are
|
||||
// already used.
|
||||
exitCode, err := executil.RunCommandWriteOutput(ctx, wrCmd, progressWriter, redactor.Redact)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to run p4->git conversion: exit code %d", exitCode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *PerforceDepotSyncer) buildP4FusionCmd(ctx context.Context, depot, username, src, port string) *exec.Cmd {
|
||||
// Example: p4-fusion --path //depot/... --user $P4USER --src clones/ --networkThreads 64 --printBatch 10 --port $P4PORT --lookAhead 2000 --retries 10 --refresh 100 --noColor true
|
||||
// Example: p4-fusion --path //depot/... --user $P4USER --src clones/ --networkThreads 64 --printBatch 10 --port $P4PORT --lookAhead 2000 --retries 10 --refresh 100 --noColor true --noBaseCommit true
|
||||
func (s *perforceDepotSyncer) buildP4FusionCmd(ctx context.Context, depot, username, src, port string) *exec.Cmd {
|
||||
return exec.CommandContext(ctx, "p4-fusion",
|
||||
"--path", depot+"...",
|
||||
"--client", s.FusionConfig.Client,
|
||||
@ -109,28 +138,29 @@ func (s *PerforceDepotSyncer) buildP4FusionCmd(ctx context.Context, depot, usern
|
||||
}
|
||||
|
||||
// Fetch tries to fetch updates of a Perforce depot as a Git repository.
|
||||
func (s *PerforceDepotSyncer) Fetch(ctx context.Context, remoteURL *vcs.URL, _ api.RepoName, dir common.GitDir, _ string) ([]byte, error) {
|
||||
username, password, host, depot, err := perforce.DecomposePerforceRemoteURL(remoteURL)
|
||||
func (s *perforceDepotSyncer) Fetch(ctx context.Context, remoteURL *vcs.URL, _ api.RepoName, dir common.GitDir, _ string) ([]byte, error) {
|
||||
p4user, p4passwd, p4port, depot, err := perforce.DecomposePerforceRemoteURL(remoteURL)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "decompose")
|
||||
return nil, errors.Wrap(err, "invalid perforce remote URL")
|
||||
}
|
||||
|
||||
err = perforce.P4TestWithTrust(ctx, s.P4Home, host, username, password)
|
||||
// First, do a quick check if we can reach the Perforce server.
|
||||
err = perforce.P4TestWithTrust(ctx, s.P4Home, p4port, p4user, p4passwd)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "test with trust")
|
||||
return nil, errors.Wrap(err, "verifying connection to perforce server")
|
||||
}
|
||||
|
||||
var cmd *wrexec.Cmd
|
||||
if s.FusionConfig.Enabled {
|
||||
// Example: p4-fusion --path //depot/... --user $P4USER --src clones/ --networkThreads 64 --printBatch 10 --port $P4PORT --lookAhead 2000 --retries 10 --refresh 100
|
||||
root, _ := filepath.Split(string(dir))
|
||||
cmd = wrexec.Wrap(ctx, nil, s.buildP4FusionCmd(ctx, depot, username, root+".git", host))
|
||||
cmd = wrexec.Wrap(ctx, nil, s.buildP4FusionCmd(ctx, depot, p4user, root+".git", p4port))
|
||||
} else {
|
||||
// Example: git p4 sync --max-changes 1000
|
||||
args := append([]string{"p4", "sync"}, s.p4CommandOptions()...)
|
||||
cmd = wrexec.CommandContext(ctx, nil, "git", args...)
|
||||
}
|
||||
cmd.Env = s.p4CommandEnv(host, username, password)
|
||||
cmd.Env = s.p4CommandEnv(p4port, p4user, p4passwd)
|
||||
dir.Set(cmd.Cmd)
|
||||
|
||||
// TODO(keegancsmith)(indradhanush) This is running a remote command and
|
||||
@ -145,9 +175,9 @@ func (s *PerforceDepotSyncer) Fetch(ctx context.Context, remoteURL *vcs.URL, _ a
|
||||
// Force update "master" to "refs/remotes/p4/master" where changes are synced into
|
||||
cmd = wrexec.CommandContext(ctx, nil, "git", "branch", "-f", "master", "refs/remotes/p4/master")
|
||||
cmd.Cmd.Env = append(os.Environ(),
|
||||
"P4PORT="+host,
|
||||
"P4USER="+username,
|
||||
"P4PASSWD="+password,
|
||||
"P4PORT="+p4port,
|
||||
"P4USER="+p4user,
|
||||
"P4PASSWD="+p4passwd,
|
||||
"HOME="+s.P4Home,
|
||||
)
|
||||
dir.Set(cmd.Cmd)
|
||||
@ -160,31 +190,32 @@ func (s *PerforceDepotSyncer) Fetch(ctx context.Context, remoteURL *vcs.URL, _ a
|
||||
}
|
||||
|
||||
// RemoteShowCommand returns the command to be executed for showing Git remote of a Perforce depot.
|
||||
func (s *PerforceDepotSyncer) RemoteShowCommand(ctx context.Context, _ *vcs.URL) (cmd *exec.Cmd, err error) {
|
||||
func (s *perforceDepotSyncer) RemoteShowCommand(ctx context.Context, _ *vcs.URL) (cmd *exec.Cmd, err error) {
|
||||
// Remote info is encoded as in the current repository
|
||||
return exec.CommandContext(ctx, "git", "remote", "show", "./"), nil
|
||||
}
|
||||
|
||||
func (s *PerforceDepotSyncer) p4CommandOptions() []string {
|
||||
func (s *perforceDepotSyncer) p4CommandOptions() []string {
|
||||
flags := []string{}
|
||||
if s.MaxChanges > 0 {
|
||||
flags = append(flags, "--max-changes", strconv.Itoa(s.MaxChanges))
|
||||
}
|
||||
if s.Client != "" {
|
||||
if s.P4Client != "" {
|
||||
flags = append(flags, "--use-client-spec")
|
||||
}
|
||||
return flags
|
||||
}
|
||||
|
||||
func (s *PerforceDepotSyncer) p4CommandEnv(port, username, password string) []string {
|
||||
env := append(os.Environ(),
|
||||
"P4PORT="+port,
|
||||
"P4USER="+username,
|
||||
"P4PASSWD="+password,
|
||||
func (s *perforceDepotSyncer) p4CommandEnv(p4port, p4user, p4passwd string) []string {
|
||||
env := append(
|
||||
os.Environ(),
|
||||
"P4PORT="+p4port,
|
||||
"P4USER="+p4user,
|
||||
"P4PASSWD="+p4passwd,
|
||||
)
|
||||
|
||||
if s.Client != "" {
|
||||
env = append(env, "P4CLIENT="+s.Client)
|
||||
if s.P4Client != "" {
|
||||
env = append(env, "P4CLIENT="+s.P4Client)
|
||||
}
|
||||
|
||||
if s.P4Home != "" {
|
||||
@ -196,8 +227,8 @@ func (s *PerforceDepotSyncer) p4CommandEnv(port, username, password string) []st
|
||||
return env
|
||||
}
|
||||
|
||||
// FusionConfig allows configuration of the p4-fusion client
|
||||
type FusionConfig struct {
|
||||
// fusionConfig allows configuration of the p4-fusion client.
|
||||
type fusionConfig struct {
|
||||
// Enabled: Enable the p4-fusion client for cloning and fetching repos
|
||||
Enabled bool
|
||||
// Client: The client spec tht should be used
|
||||
@ -229,9 +260,9 @@ type FusionConfig struct {
|
||||
FsyncEnable bool
|
||||
}
|
||||
|
||||
func configureFusionClient(conn *schema.PerforceConnection) FusionConfig {
|
||||
func configureFusionClient(conn *schema.PerforceConnection) fusionConfig {
|
||||
// Set up default settings first
|
||||
fc := FusionConfig{
|
||||
fc := fusionConfig{
|
||||
Enabled: false,
|
||||
Client: conn.P4Client,
|
||||
LookAhead: 2000,
|
||||
|
||||
@ -3,12 +3,18 @@ package vcssyncer
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/sourcegraph/log/logtest"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/wrexec"
|
||||
)
|
||||
|
||||
func TestP4DepotSyncer_p4CommandEnv(t *testing.T) {
|
||||
syncer := &PerforceDepotSyncer{
|
||||
Client: "client",
|
||||
P4Home: "p4home",
|
||||
syncer := &perforceDepotSyncer{
|
||||
logger: logtest.Scoped(t),
|
||||
recordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
|
||||
P4Client: "client",
|
||||
P4Home: "p4home",
|
||||
}
|
||||
vars := syncer.p4CommandEnv("host", "username", "password")
|
||||
assertEnv := func(key, value string) {
|
||||
|
||||
@ -2,10 +2,13 @@ package vcssyncer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os/exec"
|
||||
|
||||
jsoniter "github.com/json-iterator/go"
|
||||
|
||||
"github.com/sourcegraph/log"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/common"
|
||||
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
|
||||
"github.com/sourcegraph/sourcegraph/internal/actor"
|
||||
@ -34,8 +37,23 @@ type VCSSyncer interface {
|
||||
// IsCloneable checks to see if the VCS remote URL is cloneable. Any non-nil
|
||||
// error indicates there is a problem.
|
||||
IsCloneable(ctx context.Context, repoName api.RepoName, remoteURL *vcs.URL) error
|
||||
// CloneCommand returns the command to be executed for cloning from remote.
|
||||
CloneCommand(ctx context.Context, remoteURL *vcs.URL, tmpPath string) (cmd *exec.Cmd, err error)
|
||||
// Clone should clone the repo onto disk into the given tmpPath.
|
||||
//
|
||||
// For now, regardless of the VCSSyncer implementation, the result that ends
|
||||
// up in tmpPath is expected to be a valid Git repository and should be initially
|
||||
// optimized (repacked, commit-graph written, etc).
|
||||
//
|
||||
// targetDir is passed for reporting purposes, but should not be written to
|
||||
// during this process.
|
||||
//
|
||||
// Progress can be reported by writing to the progressWriter.
|
||||
// 🚨 SECURITY:
|
||||
// Content written to this writer should NEVER contain sensitive information.
|
||||
// The VCSSyncer implementation is responsible of redacting potentially
|
||||
// sensitive data like secrets.
|
||||
// Progress reported through the progressWriter will be streamed line-by-line
|
||||
// with both LF and CR being valid line terminators.
|
||||
Clone(ctx context.Context, repo api.RepoName, remoteURL *vcs.URL, targetDir common.GitDir, tmpPath string, progressWriter io.Writer) error
|
||||
// Fetch tries to fetch updates from the remote to given directory.
|
||||
// The revspec parameter is optional and specifies that the client is specifically
|
||||
// interested in fetching the provided revspec (example "v2.3.4^0").
|
||||
@ -56,6 +74,7 @@ type NewVCSSyncerOpts struct {
|
||||
ReposDir string
|
||||
CoursierCacheDir string
|
||||
RecordingCommandFactory *wrexec.RecordingCommandFactory
|
||||
Logger log.Logger
|
||||
}
|
||||
|
||||
func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error) {
|
||||
@ -101,7 +120,7 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewPerforceDepotSyncer(&c, p4Home), nil
|
||||
return NewPerforceDepotSyncer(opts.Logger, opts.RecordingCommandFactory, &c, p4Home), nil
|
||||
case extsvc.TypeJVMPackages:
|
||||
var c schema.JVMPackagesConnection
|
||||
if _, err := extractOptions(&c); err != nil {
|
||||
@ -161,7 +180,8 @@ func NewVCSSyncer(ctx context.Context, opts *NewVCSSyncerOpts) (VCSSyncer, error
|
||||
}
|
||||
return NewRubyPackagesSyncer(&c, opts.DepsSvc, cli, opts.ReposDir), nil
|
||||
}
|
||||
return NewGitRepoSyncer(opts.RecordingCommandFactory), nil
|
||||
|
||||
return NewGitRepoSyncer(opts.Logger, opts.RecordingCommandFactory), nil
|
||||
}
|
||||
|
||||
type notFoundError struct{ error }
|
||||
|
||||
@ -8,6 +8,8 @@ import (
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/sourcegraph/log/logtest"
|
||||
|
||||
api "github.com/sourcegraph/sourcegraph/internal/api"
|
||||
"github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
|
||||
@ -57,6 +59,7 @@ func TestGetVCSSyncer(t *testing.T) {
|
||||
Repo: repo,
|
||||
ReposDir: tempReposDir,
|
||||
CoursierCacheDir: tempCoursierCacheDir,
|
||||
Logger: logtest.Scoped(t),
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
||||
16
cmd/gitserver/internal/vcssyncer/util.go
Normal file
16
cmd/gitserver/internal/vcssyncer/util.go
Normal file
@ -0,0 +1,16 @@
|
||||
package vcssyncer
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/sourcegraph/log"
|
||||
)
|
||||
|
||||
// tryWrite tries to write the formatted string to the given writer, logging any errors
|
||||
// to the logger.
|
||||
func tryWrite(logger log.Logger, w io.Writer, format string, a ...any) {
|
||||
if _, err := fmt.Fprintf(w, format, a...); err != nil {
|
||||
logger.Error("failed to write log message", log.Error(err))
|
||||
}
|
||||
}
|
||||
@ -98,6 +98,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
|
||||
ReposDir: config.ReposDir,
|
||||
CoursierCacheDir: config.CoursierCacheDir,
|
||||
RecordingCommandFactory: recordingCommandFactory,
|
||||
Logger: logger,
|
||||
})
|
||||
},
|
||||
Hostname: config.ExternalAddress,
|
||||
|
||||
@ -30,7 +30,6 @@ go_library(
|
||||
"@com_github_grafana_regexp//:regexp",
|
||||
"@com_github_rjeczalik_notify//:notify",
|
||||
"@com_github_sourcegraph_conc//pool",
|
||||
"@org_golang_x_sync//errgroup",
|
||||
"@org_golang_x_sync//semaphore",
|
||||
],
|
||||
)
|
||||
|
||||
@ -5,7 +5,7 @@ import (
|
||||
"io"
|
||||
"os/exec"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"github.com/sourcegraph/conc/pool"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/secrets"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
|
||||
@ -111,7 +111,7 @@ type startedCmd struct {
|
||||
stdoutBuf *prefixSuffixSaver
|
||||
stderrBuf *prefixSuffixSaver
|
||||
|
||||
outEg *errgroup.Group
|
||||
outEg *pool.ErrorPool
|
||||
}
|
||||
|
||||
func (sc *startedCmd) Wait() error {
|
||||
|
||||
@ -11,7 +11,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//lib/errors",
|
||||
"@org_golang_x_sync//errgroup",
|
||||
"@com_github_sourcegraph_conc//pool",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@ import (
|
||||
"io"
|
||||
"io/fs"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
"github.com/sourcegraph/conc/pool"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
@ -26,34 +26,44 @@ type cmdPiper interface {
|
||||
StderrPipe() (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// NewOutputScannerWithSplit creates a new bufio.Scanner using the given split
|
||||
// function with well-working defaults for the initial and max buf sizes.
|
||||
func NewOutputScannerWithSplit(r io.Reader, split bufio.SplitFunc) *bufio.Scanner {
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(split)
|
||||
buf := make([]byte, initialBufSize)
|
||||
scanner.Buffer(buf, maxTokenSize)
|
||||
return scanner
|
||||
}
|
||||
|
||||
// PipeOutput reads stdout/stderr output of the given command into the two
|
||||
// io.Writers.
|
||||
//
|
||||
// It returns a errgroup.Group. The caller *must* call the Wait() method of the
|
||||
// errgroup.Group after waiting for the *exec.Cmd to finish.
|
||||
// errgroup.Group **before** waiting for the *exec.Cmd to finish.
|
||||
//
|
||||
// The passed in context should be canceled when done.
|
||||
//
|
||||
// See this issue for more details: https://github.com/golang/go/issues/21922
|
||||
func PipeOutput(ctx context.Context, c cmdPiper, stdoutWriter, stderrWriter io.Writer) (*errgroup.Group, error) {
|
||||
func PipeOutput(ctx context.Context, c cmdPiper, stdoutWriter, stderrWriter io.Writer) (*pool.ErrorPool, error) {
|
||||
pipe := func(w io.Writer, r io.Reader) error {
|
||||
scanner := bufio.NewScanner(r)
|
||||
scanner.Split(scanLinesWithNewline)
|
||||
|
||||
buf := make([]byte, initialBufSize)
|
||||
scanner.Buffer(buf, maxTokenSize)
|
||||
scanner := NewOutputScannerWithSplit(r, scanLinesWithNewline)
|
||||
|
||||
for scanner.Scan() {
|
||||
fmt.Fprint(w, scanner.Text())
|
||||
if _, err := fmt.Fprint(w, scanner.Text()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return scanner.Err()
|
||||
}
|
||||
|
||||
return pipeProcessOutput(ctx, c, stdoutWriter, stderrWriter, pipe)
|
||||
return PipeProcessOutput(ctx, c, stdoutWriter, stderrWriter, pipe)
|
||||
}
|
||||
|
||||
// PipeOutputUnbuffered is the unbuffered version of PipeOutput and uses
|
||||
// io.Copy instead of piping output line-based to the output.
|
||||
func PipeOutputUnbuffered(ctx context.Context, c cmdPiper, stdoutWriter, stderrWriter io.Writer) (*errgroup.Group, error) {
|
||||
func PipeOutputUnbuffered(ctx context.Context, c cmdPiper, stdoutWriter, stderrWriter io.Writer) (*pool.ErrorPool, error) {
|
||||
pipe := func(w io.Writer, r io.Reader) error {
|
||||
_, err := io.Copy(w, r)
|
||||
// We can ignore ErrClosed because we get that if a process crashes
|
||||
@ -63,31 +73,45 @@ func PipeOutputUnbuffered(ctx context.Context, c cmdPiper, stdoutWriter, stderrW
|
||||
return nil
|
||||
}
|
||||
|
||||
return pipeProcessOutput(ctx, c, stdoutWriter, stderrWriter, pipe)
|
||||
return PipeProcessOutput(ctx, c, stdoutWriter, stderrWriter, pipe)
|
||||
}
|
||||
|
||||
func pipeProcessOutput(ctx context.Context, c cmdPiper, stdoutWriter, stderrWriter io.Writer, fn pipe) (*errgroup.Group, error) {
|
||||
func PipeProcessOutput(ctx context.Context, c cmdPiper, stdoutWriter, stderrWriter io.Writer, fn pipe) (*pool.ErrorPool, error) {
|
||||
stdoutPipe, err := c.StdoutPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to attach stdout pipe")
|
||||
}
|
||||
|
||||
stderrPipe, err := c.StderrPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, errors.Wrap(err, "failed to attach stderr pipe")
|
||||
}
|
||||
|
||||
go func() {
|
||||
// We start a goroutine here to make sure that our pipes are closed
|
||||
// when the context is canceled.
|
||||
// There is a deadlock condition due the following strange decisions:
|
||||
//
|
||||
// See cmd/executor/internal/command/run.go for more details.
|
||||
// 1. The pipes attached to a command are not closed if the context
|
||||
// attached to the command is canceled. The pipes are only closed
|
||||
// after Wait has been called.
|
||||
// 2. According to the docs, we are not meant to call cmd.Wait() until
|
||||
// we have complete read the pipes attached to the command.
|
||||
//
|
||||
// Since we're following the expected usage, we block on a wait group
|
||||
// tracking the consumption of stdout and stderr pipes in two separate
|
||||
// goroutines between calls to Start and Wait. This means that if there
|
||||
// is a reason the command is abandoned but the pipes are not closed
|
||||
// (such as context cancellation), we will hang indefinitely.
|
||||
//
|
||||
// To be defensive, we'll forcibly close both pipes when the context has
|
||||
// finished. These may return an ErrClosed condition, but we don't really
|
||||
// care: the command package doesn't surface errors when closing the pipes
|
||||
// either.
|
||||
<-ctx.Done()
|
||||
stdoutPipe.Close()
|
||||
stderrPipe.Close()
|
||||
}()
|
||||
|
||||
eg := &errgroup.Group{}
|
||||
eg := pool.New().WithErrors()
|
||||
|
||||
eg.Go(func() error { return fn(stdoutWriter, stdoutPipe) })
|
||||
eg.Go(func() error { return fn(stderrWriter, stderrPipe) })
|
||||
|
||||
@ -404,3 +404,10 @@
|
||||
- path: github.com/sourcegraph/sourcegraph/internal/telemetry
|
||||
interfaces:
|
||||
- EventsStore
|
||||
- filename: cmd/gitserver/internal/integration_tests/mocks.go
|
||||
package: inttests
|
||||
sources:
|
||||
- path: github.com/sourcegraph/sourcegraph/cmd/gitserver/internal
|
||||
interfaces:
|
||||
- RepositoryLocker
|
||||
- RepositoryLock
|
||||
|
||||
Loading…
Reference in New Issue
Block a user