Remove HTTP for inter-service RPC (#59093)

In the upcoming release, we will only support gRPC going forward. This PR removes the old HTTP client and server implementations and a few leftovers from the transition.
This commit is contained in:
Erik Seliger 2024-01-11 19:46:32 +01:00 committed by GitHub
parent 1caacec5d5
commit bb09a4ac1f
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
129 changed files with 2879 additions and 6932 deletions

View File

@ -20,6 +20,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitlab"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/observation"
internalrepos "github.com/sourcegraph/sourcegraph/internal/repos"
@ -440,5 +441,5 @@ func newGenericSourcer(logger log.Logger, db database.DB) internalrepos.Sourcer
db = database.NewDBWith(sourcerLogger.Scoped("db"), db)
dependenciesService := dependencies.NewService(observation.NewContext(logger), db)
cf := httpcli.NewExternalClientFactory(httpcli.NewLoggingMiddleware(sourcerLogger))
return internalrepos.NewSourcer(sourcerLogger, db, cf, internalrepos.WithDependenciesService(dependenciesService))
return internalrepos.NewSourcer(sourcerLogger, db, cf, gitserver.NewClient("backend.external-services"), internalrepos.WithDependenciesService(dependenciesService))
}

View File

@ -486,7 +486,6 @@ go_test(
"//internal/authz/permssync",
"//internal/binary",
"//internal/conf",
"//internal/conf/conftypes",
"//internal/database",
"//internal/database/basestore",
"//internal/database/dbmocks",

View File

@ -17,6 +17,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gqlutil"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
@ -267,6 +268,7 @@ func (r *externalServiceResolver) CheckConnection(ctx context.Context) (*externa
r.db,
r.externalService,
httpcli.ExternalClientFactory,
gitserver.NewClient("graphql.check-connection"),
)
if err != nil {
return nil, errors.Wrap(err, "failed to create source")

View File

@ -1,33 +1,30 @@
package graphqlbackend
import (
"context"
"encoding/base64"
"encoding/json"
"flag"
"fmt"
"io"
"log" //nolint:logging // TODO move all logging to sourcegraph/log
"net/http"
"net/http/httptest"
"net/url"
"os"
"reflect"
"sync/atomic"
"testing"
mockassert "github.com/derision-test/go-mockgen/testutil/assert"
"github.com/grafana/regexp"
"github.com/graph-gophers/graphql-go"
"github.com/inconshreveable/log15" //nolint:logging // TODO move all logging to sourcegraph/log
sglog "github.com/sourcegraph/log"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestMain(m *testing.M) {
@ -91,28 +88,6 @@ func TestRepository(t *testing.T) {
func TestRecloneRepository(t *testing.T) {
resetMocks()
var gitserverCalled atomic.Bool
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
resp := protocol.RepoUpdateResponse{}
gitserverCalled.Store(true)
json.NewEncoder(w).Encode(&resp)
}))
defer srv.Close()
serverURL, err := url.Parse(srv.URL)
assert.Nil(t, err)
conf.Mock(&conf.Unified{
ServiceConnectionConfig: conftypes.ServiceConnections{
GitServers: []string{serverURL.Host},
}, SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
defer conf.Mock(nil)
repos := dbmocks.NewMockRepoStore()
repos.GetFunc.SetDefaultReturn(&types.Repo{ID: 1, Name: "github.com/gorilla/mux"}, nil)
@ -127,32 +102,19 @@ func TestRecloneRepository(t *testing.T) {
db.UsersFunc.SetDefaultReturn(users)
db.GitserverReposFunc.SetDefaultReturn(gitserverRepos)
called := backend.Mocks.Repos.MockDeleteRepositoryFromDisk(t, 1)
repoID := MarshalRepositoryID(1)
repoID := base64.StdEncoding.EncodeToString([]byte("Repository:1"))
gc := gitserver.NewMockClient()
gc.RequestRepoCloneFunc.SetDefaultReturn(&protocol.RepoCloneResponse{}, nil)
r := newSchemaResolver(db, gc)
RunTests(t, []*Test{
{
Schema: mustParseGraphQLSchema(t, db),
Query: fmt.Sprintf(`
mutation {
recloneRepository(repo: "%s") {
alwaysNil
}
}
`, repoID),
ExpectedResult: `
{
"recloneRepository": {
"alwaysNil": null
}
}
`,
},
})
_, err := r.RecloneRepository(context.Background(), &struct{ Repo graphql.ID }{Repo: repoID})
require.NoError(t, err)
assert.True(t, *called)
assert.True(t, gitserverCalled.Load())
// To reclone, we first make a request to delete the repository, followed by a request
// to clone the repository again.
mockassert.CalledN(t, gc.RemoveFunc, 1)
mockassert.CalledN(t, gc.RequestRepoCloneFunc, 1)
}
func TestDeleteRepositoryFromDisk(t *testing.T) {

View File

@ -247,11 +247,10 @@ func RegisterInternalServices(
gitService := &gitServiceHandler{Gitserver: gsClient}
m.Path("/git/{RepoName:.*}/info/refs").Methods("GET").Name(gitInfoRefs).Handler(trace.Route(handler(gitService.serveInfoRefs())))
m.Path("/git/{RepoName:.*}/git-upload-pack").Methods("GET", "POST").Name(gitUploadPack).Handler(trace.Route(handler(gitService.serveGitUploadPack())))
m.Path("/repos/index").Methods("POST").Handler(trace.Route(handler(indexer.serveList)))
// TODO: Can be removed after 5.3 is cut.
m.Path("/configuration").Methods("POST").Handler(trace.Route(handler(serveConfiguration)))
m.Path("/ranks/{RepoName:.*}/documents").Methods("GET").Handler(trace.Route(handler(indexer.serveDocumentRanks)))
m.Path("/search/configuration").Methods("GET", "POST").Handler(trace.Route(handler(indexer.serveConfiguration)))
m.Path("/search/index-status").Methods("POST").Handler(trace.Route(handler(indexer.handleIndexStatusUpdate)))
m.Path("/lsif/upload").Methods("POST").Handler(trace.Route(newCodeIntelUploadHandler(false)))
m.Path("/scip/upload").Methods("POST").Handler(trace.Route(newCodeIntelUploadHandler(false)))
m.Path("/scip/upload").Methods("HEAD").Handler(trace.Route(noopHandler))

View File

@ -15,6 +15,7 @@ import (
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// TODO: Can be removed after 5.3 is cut.
func serveConfiguration(w http.ResponseWriter, _ *http.Request) error {
raw := conf.Raw()
err := json.NewEncoder(w).Encode(raw)

View File

@ -135,7 +135,7 @@ func (h *handler) handleBranch(w http.ResponseWriter, branch string) {
}
w.Header().Set("Content-Type", "application/json; charset=utf-8")
w.Write(raw)
_, _ = w.Write(raw)
}
func (h *handler) handleWebhook(w http.ResponseWriter, r *http.Request) {

View File

@ -1,15 +1,10 @@
package httpapi
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
"github.com/gorilla/mux"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sourcegraph/log"
@ -183,72 +178,6 @@ type searchIndexerServer struct {
MinLastChangedDisabled bool
}
// serveConfiguration is _only_ used by the zoekt index server. Zoekt does
// not depend on frontend and therefore does not have access to `conf.Watch`.
// Additionally, it only cares about certain search specific settings so this
// search specific endpoint is used rather than serving the entire site settings
// from /.internal/configuration.
//
// This endpoint also supports batch requests to avoid managing concurrency in
// zoekt. On vertically scaled instances we have observed zoekt requesting
// this endpoint concurrently leading to socket starvation.
func (h *searchIndexerServer) serveConfiguration(w http.ResponseWriter, r *http.Request) error {
ctx := r.Context()
if err := r.ParseForm(); err != nil {
return err
}
indexedIDs := make([]api.RepoID, 0, len(r.Form["repoID"]))
for _, idStr := range r.Form["repoID"] {
id, err := strconv.Atoi(idStr)
if err != nil {
http.Error(w, fmt.Sprintf("invalid repo id %s: %s", idStr, err), http.StatusBadRequest)
return nil
}
indexedIDs = append(indexedIDs, api.RepoID(id))
}
var clientFingerprint searchbackend.ConfigFingerprint
err := clientFingerprint.FromHeaders(r.Header)
if err != nil {
http.Error(w, fmt.Sprintf("invalid fingerprint: %s", err), http.StatusBadRequest)
return nil
}
response, err := h.doSearchConfiguration(ctx, searchConfigurationParameters{
repoIDs: indexedIDs,
fingerprint: clientFingerprint,
})
if err != nil {
var parameterErr *parameterError
code := http.StatusInternalServerError
if errors.As(err, &parameterErr) {
code = http.StatusBadRequest
}
http.Error(w, err.Error(), code)
return nil
}
response.fingerprint.ToHeaders(w.Header())
jsonOptions := make([][]byte, 0, len(response.options))
for _, opt := range response.options {
marshalled, err := json.Marshal(opt)
if err != nil {
_, _ = w.Write([]byte(err.Error()))
}
jsonOptions = append(jsonOptions, marshalled)
}
_, _ = w.Write(bytes.Join(jsonOptions, []byte("\n")))
return nil
}
func (h *searchIndexerServer) doSearchConfiguration(ctx context.Context, parameters searchConfigurationParameters) (*searchConfigurationResponse, error) {
siteConfig := conf.Get().SiteConfiguration
@ -387,32 +316,6 @@ type searchConfigurationResponse struct {
fingerprint searchbackend.ConfigFingerprint
}
// serveList is used by zoekt to get the list of repositories for it to index.
func (h *searchIndexerServer) serveList(w http.ResponseWriter, r *http.Request) error {
var parameters listParameters
err := json.NewDecoder(r.Body).Decode(&parameters)
if err != nil {
return err
}
repoIDs, err := h.doList(r.Context(), &parameters)
if err != nil {
return err
}
// TODO: Avoid batching up so much in memory by:
// 1. Changing the schema from object of arrays to array of objects.
// 2. Stream out each object marshalled rather than marshall the full list in memory.
data := struct {
RepoIDs []api.RepoID
}{
RepoIDs: repoIDs,
}
return json.NewEncoder(w).Encode(&data)
}
func (h *searchIndexerServer) doList(ctx context.Context, parameters *listParameters) (repoIDS []api.RepoID, err error) {
indexable, err := h.ListIndexable(ctx)
if err != nil {
@ -460,47 +363,6 @@ var metricGetVersion = promauto.NewCounter(prometheus.CounterOpts{
Help: "The total number of times we poll gitserver for the version of a indexable branch.",
})
func (h *searchIndexerServer) serveDocumentRanks(w http.ResponseWriter, r *http.Request) error {
return serveRank(h.Ranking.GetDocumentRanks, w, r)
}
func serveRank[T []float64 | citypes.RepoPathRanks](
f func(ctx context.Context, name api.RepoName) (r T, err error),
w http.ResponseWriter,
r *http.Request,
) error {
ctx := r.Context()
repoName := api.RepoName(mux.Vars(r)["RepoName"])
rank, err := f(ctx, repoName)
if err != nil {
if errcode.IsNotFound(err) {
http.Error(w, err.Error(), http.StatusNotFound)
return nil
}
return err
}
b, err := json.Marshal(rank)
if err != nil {
return err
}
_, _ = w.Write(b)
return nil
}
func (h *searchIndexerServer) handleIndexStatusUpdate(_ http.ResponseWriter, r *http.Request) error {
var args indexStatusUpdateArgs
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
return errors.Wrap(err, "failed to decode request args")
}
return h.doIndexStatusUpdate(r.Context(), &args)
}
func (h *searchIndexerServer) doIndexStatusUpdate(ctx context.Context, args *indexStatusUpdateArgs) error {
var (
ids = make([]int32, len(args.Repositories))

View File

@ -1,13 +1,7 @@
package httpapi
import (
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
@ -55,203 +49,141 @@ func TestServeConfiguration(t *testing.T) {
}
rankingService := &fakeRankingService{}
t.Run("gRPC", func(t *testing.T) {
// Set up the GRPC server
grpcServer := searchIndexerGRPCServer{
server: &searchIndexerServer{
RepoStore: repoStore,
gitserverClient: gsClient,
Ranking: rankingService,
SearchContextsRepoRevs: searchContextRepoRevsFunc,
},
}
// Setup: create a request for repos 5 and 6, and the non-existent repo 1
requestedRepoIDs := []int32{1, 5, 6}
// Execute the first request (no fingerprint)
var initialRequest proto.SearchConfigurationRequest
initialRequest.RepoIds = requestedRepoIDs
initialRequest.Fingerprint = nil
initialResponse, err := grpcServer.SearchConfiguration(context.Background(), &initialRequest)
if err != nil {
t.Fatalf("SearchConfiguration: %s", err)
}
// Verify: Check to see that the response contains an error
// for the non-existent repo 1
var responseRepo1 *proto.ZoektIndexOptions
foundRepo1 := false
var receivedRepositories []*proto.ZoektIndexOptions
for _, repo := range initialResponse.GetUpdatedOptions() {
if repo.RepoId == 1 {
responseRepo1 = repo
foundRepo1 = true
continue
}
sort.Slice(repo.LanguageMap, func(i, j int) bool {
return repo.LanguageMap[i].Language > repo.LanguageMap[j].Language
})
receivedRepositories = append(receivedRepositories, repo)
}
if !foundRepo1 {
t.Errorf("expected to find repo ID 1 in response: %v", receivedRepositories)
}
if foundRepo1 && !strings.Contains(responseRepo1.Error, "repo not found") {
t.Errorf("expected to find repo not found error in repo 1: %v", responseRepo1)
}
languageMap := make([]*proto.LanguageMapping, 0)
for lang, engine := range ctags_config.DefaultEngines {
languageMap = append(languageMap, &proto.LanguageMapping{Language: lang, Ctags: proto.CTagsParserType(engine)})
}
sort.Slice(languageMap, func(i, j int) bool {
return languageMap[i].Language > languageMap[j].Language
})
// Verify: Check to see that the response the expected repos 5 and 6
expectedRepo5 := &proto.ZoektIndexOptions{
RepoId: 5,
Name: "5",
Priority: 5,
Public: true,
Symbols: true,
Branches: []*proto.ZoektRepositoryBranch{{Name: "HEAD", Version: "!HEAD"}},
LanguageMap: languageMap,
}
expectedRepo6 := &proto.ZoektIndexOptions{
RepoId: 6,
Name: "6",
Priority: 6,
Public: true,
Symbols: true,
Branches: []*proto.ZoektRepositoryBranch{
{Name: "HEAD", Version: "!HEAD"},
{Name: "a", Version: "!a"},
{Name: "b", Version: "!b"},
},
LanguageMap: languageMap,
}
expectedRepos := []*proto.ZoektIndexOptions{
expectedRepo5,
expectedRepo6,
}
sort.Slice(receivedRepositories, func(i, j int) bool {
return receivedRepositories[i].RepoId < receivedRepositories[j].RepoId
})
sort.Slice(expectedRepos, func(i, j int) bool {
return expectedRepos[i].RepoId < expectedRepos[j].RepoId
})
if diff := cmp.Diff(expectedRepos, receivedRepositories, protocmp.Transform()); diff != "" {
t.Fatalf("mismatch in response repositories (-want, +got):\n%s", diff)
}
if initialResponse.GetFingerprint() == nil {
t.Fatalf("expected fingerprint to be set in initial response")
}
// Setup: run a second request with the fingerprint from the first response
// Note: when fingerprint is set we only return a subset. We simulate this by setting RepoStore to only list repo number 5
grpcServer.server.RepoStore = &fakeRepoStore{Repos: repos[:1]}
var fingerprintedRequest proto.SearchConfigurationRequest
fingerprintedRequest.RepoIds = requestedRepoIDs
fingerprintedRequest.Fingerprint = initialResponse.GetFingerprint()
// Execute the seconds request
fingerprintedResponse, err := grpcServer.SearchConfiguration(context.Background(), &fingerprintedRequest)
if err != nil {
t.Fatalf("SearchConfiguration: %s", err)
}
fingerprintedResponses := fingerprintedResponse.GetUpdatedOptions()
for _, res := range fingerprintedResponses {
sort.Slice(res.LanguageMap, func(i, j int) bool {
return res.LanguageMap[i].Language > res.LanguageMap[j].Language
})
}
// Verify that the response contains the expected repo 5
if diff := cmp.Diff(fingerprintedResponses, []*proto.ZoektIndexOptions{expectedRepo5}, protocmp.Transform()); diff != "" {
t.Errorf("mismatch in fingerprinted repositories (-want, +got):\n%s", diff)
}
if fingerprintedResponse.GetFingerprint() == nil {
t.Fatalf("expected fingerprint to be set in fingerprinted response")
}
})
t.Run("REST", func(t *testing.T) {
srv := &searchIndexerServer{
// Set up the GRPC server
grpcServer := searchIndexerGRPCServer{
server: &searchIndexerServer{
RepoStore: repoStore,
gitserverClient: gsClient,
Ranking: rankingService,
SearchContextsRepoRevs: searchContextRepoRevsFunc,
},
}
// Setup: create a request for repos 5 and 6, and the non-existent repo 1
requestedRepoIDs := []int32{1, 5, 6}
// Execute the first request (no fingerprint)
var initialRequest proto.SearchConfigurationRequest
initialRequest.RepoIds = requestedRepoIDs
initialRequest.Fingerprint = nil
initialResponse, err := grpcServer.SearchConfiguration(context.Background(), &initialRequest)
if err != nil {
t.Fatalf("SearchConfiguration: %s", err)
}
// Verify: Check to see that the response contains an error
// for the non-existent repo 1
var responseRepo1 *proto.ZoektIndexOptions
foundRepo1 := false
var receivedRepositories []*proto.ZoektIndexOptions
for _, repo := range initialResponse.GetUpdatedOptions() {
if repo.RepoId == 1 {
responseRepo1 = repo
foundRepo1 = true
continue
}
data := url.Values{
"repoID": []string{"1", "5", "6"},
}
req := httptest.NewRequest("POST", "/", strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
w := httptest.NewRecorder()
if err := srv.serveConfiguration(w, req); err != nil {
t.Fatal(err)
}
sort.Slice(repo.LanguageMap, func(i, j int) bool {
return repo.LanguageMap[i].Language > repo.LanguageMap[j].Language
})
receivedRepositories = append(receivedRepositories, repo)
}
resp := w.Result()
body, _ := io.ReadAll(resp.Body)
if !foundRepo1 {
t.Errorf("expected to find repo ID 1 in response: %v", receivedRepositories)
}
// This is a very fragile test since it will depend on changes to
// searchbackend.GetIndexOptions. If this becomes a problem we can make it
// more robust by shifting around responsibilities.
want := `{"Name":"","RepoID":1,"Public":false,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":false,"Error":"repo not found: id=1","LanguageMap":null}
{"Name":"5","RepoID":5,"Public":true,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":true,"Branches":[{"Name":"HEAD","Version":"!HEAD"}],"Priority":5,"LanguageMap":{"c_sharp":3,"go":3,"javascript":3,"kotlin":3,"python":3,"ruby":3,"rust":3,"scala":3,"typescript":3,"zig":3}}
{"Name":"6","RepoID":6,"Public":true,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":true,"Branches":[{"Name":"HEAD","Version":"!HEAD"},{"Name":"a","Version":"!a"},{"Name":"b","Version":"!b"}],"Priority":6,"LanguageMap":{"c_sharp":3,"go":3,"javascript":3,"kotlin":3,"python":3,"ruby":3,"rust":3,"scala":3,"typescript":3,"zig":3}}`
if foundRepo1 && !strings.Contains(responseRepo1.Error, "repo not found") {
t.Errorf("expected to find repo not found error in repo 1: %v", responseRepo1)
}
if d := cmp.Diff(want, string(body)); d != "" {
t.Fatalf("mismatch (-want, +got):\n%s", d)
}
languageMap := make([]*proto.LanguageMapping, 0)
for lang, engine := range ctags_config.DefaultEngines {
languageMap = append(languageMap, &proto.LanguageMapping{Language: lang, Ctags: proto.CTagsParserType(engine)})
}
// when fingerprint is set we only return a subset. We simulate this by setting RepoStore to only list repo number 5
srv.RepoStore = &fakeRepoStore{Repos: repos[:1]}
req = httptest.NewRequest("POST", "/", strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("X-Sourcegraph-Config-Fingerprint", resp.Header.Get("X-Sourcegraph-Config-Fingerprint"))
w = httptest.NewRecorder()
if err := srv.serveConfiguration(w, req); err != nil {
t.Fatal(err)
}
resp = w.Result()
body, _ = io.ReadAll(resp.Body)
// We want the same as before, except we only want to get back 5.
//
// This is a very fragile test since it will depend on changes to
// searchbackend.GetIndexOptions. If this becomes a problem we can make it
// more robust by shifting around responsibilities.
want = `{"Name":"5","RepoID":5,"Public":true,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":true,"Branches":[{"Name":"HEAD","Version":"!HEAD"}],"Priority":5,"LanguageMap":{"c_sharp":3,"go":3,"javascript":3,"kotlin":3,"python":3,"ruby":3,"rust":3,"scala":3,"typescript":3,"zig":3}}`
if d := cmp.Diff(want, string(body)); d != "" {
t.Fatalf("mismatch (-want, +got):\n%s", d)
}
sort.Slice(languageMap, func(i, j int) bool {
return languageMap[i].Language > languageMap[j].Language
})
// Verify: Check to see that the response the expected repos 5 and 6
expectedRepo5 := &proto.ZoektIndexOptions{
RepoId: 5,
Name: "5",
Priority: 5,
Public: true,
Symbols: true,
Branches: []*proto.ZoektRepositoryBranch{{Name: "HEAD", Version: "!HEAD"}},
LanguageMap: languageMap,
}
expectedRepo6 := &proto.ZoektIndexOptions{
RepoId: 6,
Name: "6",
Priority: 6,
Public: true,
Symbols: true,
Branches: []*proto.ZoektRepositoryBranch{
{Name: "HEAD", Version: "!HEAD"},
{Name: "a", Version: "!a"},
{Name: "b", Version: "!b"},
},
LanguageMap: languageMap,
}
expectedRepos := []*proto.ZoektIndexOptions{
expectedRepo5,
expectedRepo6,
}
sort.Slice(receivedRepositories, func(i, j int) bool {
return receivedRepositories[i].RepoId < receivedRepositories[j].RepoId
})
sort.Slice(expectedRepos, func(i, j int) bool {
return expectedRepos[i].RepoId < expectedRepos[j].RepoId
})
if diff := cmp.Diff(expectedRepos, receivedRepositories, protocmp.Transform()); diff != "" {
t.Fatalf("mismatch in response repositories (-want, +got):\n%s", diff)
}
if initialResponse.GetFingerprint() == nil {
t.Fatalf("expected fingerprint to be set in initial response")
}
// Setup: run a second request with the fingerprint from the first response
// Note: when fingerprint is set we only return a subset. We simulate this by setting RepoStore to only list repo number 5
grpcServer.server.RepoStore = &fakeRepoStore{Repos: repos[:1]}
var fingerprintedRequest proto.SearchConfigurationRequest
fingerprintedRequest.RepoIds = requestedRepoIDs
fingerprintedRequest.Fingerprint = initialResponse.GetFingerprint()
// Execute the seconds request
fingerprintedResponse, err := grpcServer.SearchConfiguration(context.Background(), &fingerprintedRequest)
if err != nil {
t.Fatalf("SearchConfiguration: %s", err)
}
fingerprintedResponses := fingerprintedResponse.GetUpdatedOptions()
for _, res := range fingerprintedResponses {
sort.Slice(res.LanguageMap, func(i, j int) bool {
return res.LanguageMap[i].Language > res.LanguageMap[j].Language
})
}
// Verify that the response contains the expected repo 5
if diff := cmp.Diff(fingerprintedResponses, []*proto.ZoektIndexOptions{expectedRepo5}, protocmp.Transform()); diff != "" {
t.Errorf("mismatch in fingerprinted repositories (-want, +got):\n%s", diff)
}
if fingerprintedResponse.GetFingerprint() == nil {
t.Fatalf("expected fingerprint to be set in fingerprinted response")
}
}
func TestReposIndex(t *testing.T) {
@ -320,84 +252,38 @@ func TestReposIndex(t *testing.T) {
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
t.Run("gRPC", func(t *testing.T) {
grpcServer := &searchIndexerGRPCServer{
server: &searchIndexerServer{
ListIndexable: fakeListIndexable(tc.indexable),
RepoStore: &fakeRepoStore{
Repos: allRepos,
},
Indexers: suffixIndexers(true),
},
}
resp, err := grpcServer.List(context.Background(), tc.parameters.grpcRequest)
if err != nil {
t.Fatal(err)
}
expectedRepoIDs := make([]api.RepoID, len(tc.want))
for i, name := range tc.want {
for _, repo := range allRepos {
if string(repo.Name) == name {
expectedRepoIDs[i] = repo.ID
}
}
}
var receivedRepoIDs []api.RepoID
for _, id := range resp.GetRepoIds() {
receivedRepoIDs = append(receivedRepoIDs, api.RepoID(id))
}
if d := cmp.Diff(expectedRepoIDs, receivedRepoIDs, cmpopts.EquateEmpty()); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
})
t.Run("REST", func(t *testing.T) {
srv := &searchIndexerServer{
grpcServer := &searchIndexerGRPCServer{
server: &searchIndexerServer{
ListIndexable: fakeListIndexable(tc.indexable),
RepoStore: &fakeRepoStore{
Repos: allRepos,
},
Indexers: suffixIndexers(true),
}
},
}
req := httptest.NewRequest("POST", "/", bytes.NewReader([]byte(tc.parameters.restBody)))
w := httptest.NewRecorder()
if err := srv.serveList(w, req); err != nil {
t.Fatal(err)
}
resp, err := grpcServer.List(context.Background(), tc.parameters.grpcRequest)
if err != nil {
t.Fatal(err)
}
resp := w.Result()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
t.Errorf("got status %v", resp.StatusCode)
}
var data struct {
RepoIDs []api.RepoID
}
if err := json.Unmarshal(body, &data); err != nil {
t.Fatal(err)
}
wantIDs := make([]api.RepoID, len(tc.want))
for i, name := range tc.want {
for _, repo := range allRepos {
if string(repo.Name) == name {
wantIDs[i] = repo.ID
}
expectedRepoIDs := make([]api.RepoID, len(tc.want))
for i, name := range tc.want {
for _, repo := range allRepos {
if string(repo.Name) == name {
expectedRepoIDs[i] = repo.ID
}
}
if d := cmp.Diff(wantIDs, data.RepoIDs); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
})
}
var receivedRepoIDs []api.RepoID
for _, id := range resp.GetRepoIds() {
receivedRepoIDs = append(receivedRepoIDs, api.RepoID(id))
}
if d := cmp.Diff(expectedRepoIDs, receivedRepoIDs, cmpopts.EquateEmpty()); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
})
}
}
@ -511,90 +397,45 @@ func TestRepoRankFromConfig(t *testing.T) {
}
func TestIndexStatusUpdate(t *testing.T) {
logger := logtest.Scoped(t)
t.Run("REST", func(t *testing.T) {
logger := logtest.Scoped(t)
wantRepoID := uint32(1234)
wantBranches := []zoekt.RepositoryBranch{{Name: "main", Version: "f00b4r"}}
body := `{"Repositories": [{"RepoID": 1234, "Branches": [{"Name": "main", "Version": "f00b4r"}]}]}`
wantBranches := []zoekt.RepositoryBranch{{Name: "main", Version: "f00b4r"}}
called := false
called := false
zoektReposStore := dbmocks.NewMockZoektReposStore()
zoektReposStore.UpdateIndexStatusesFunc.SetDefaultHook(func(_ context.Context, indexed zoekt.ReposMap) error {
entry, ok := indexed[1234]
if !ok {
t.Fatalf("wrong repo ID")
}
if d := cmp.Diff(entry.Branches, wantBranches); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
called = true
return nil
})
db := dbmocks.NewMockDB()
db.ZoektReposFunc.SetDefaultReturn(zoektReposStore)
srv := &searchIndexerServer{db: db, logger: logger}
req := httptest.NewRequest("POST", "/", bytes.NewReader([]byte(body)))
w := httptest.NewRecorder()
if err := srv.handleIndexStatusUpdate(w, req); err != nil {
t.Fatal(err)
zoektReposStore := dbmocks.NewMockZoektReposStore()
zoektReposStore.UpdateIndexStatusesFunc.SetDefaultHook(func(_ context.Context, indexed zoekt.ReposMap) error {
entry, ok := indexed[wantRepoID]
if !ok {
t.Fatalf("wrong repo ID")
}
resp := w.Result()
if resp.StatusCode != http.StatusOK {
t.Errorf("got status %v", resp.StatusCode)
}
if !called {
t.Fatalf("not called")
if d := cmp.Diff(entry.Branches, wantBranches); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
called = true
return nil
})
t.Run("gRPC", func(t *testing.T) {
logger := logtest.Scoped(t)
db := dbmocks.NewMockDB()
db.ZoektReposFunc.SetDefaultReturn(zoektReposStore)
wantRepoID := uint32(1234)
wantBranches := []zoekt.RepositoryBranch{{Name: "main", Version: "f00b4r"}}
parameters := indexStatusUpdateArgs{
Repositories: []indexStatusUpdateRepository{
{RepoID: wantRepoID, Branches: wantBranches},
},
}
called := false
srv := &searchIndexerGRPCServer{server: &searchIndexerServer{db: db, logger: logger}}
zoektReposStore := dbmocks.NewMockZoektReposStore()
zoektReposStore.UpdateIndexStatusesFunc.SetDefaultHook(func(_ context.Context, indexed zoekt.ReposMap) error {
entry, ok := indexed[wantRepoID]
if !ok {
t.Fatalf("wrong repo ID")
}
if d := cmp.Diff(entry.Branches, wantBranches); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
called = true
return nil
})
_, err := srv.UpdateIndexStatus(context.Background(), parameters.ToProto())
if err != nil {
t.Fatal(err)
}
db := dbmocks.NewMockDB()
db.ZoektReposFunc.SetDefaultReturn(zoektReposStore)
parameters := indexStatusUpdateArgs{
Repositories: []indexStatusUpdateRepository{
{RepoID: wantRepoID, Branches: wantBranches},
},
}
srv := &searchIndexerGRPCServer{server: &searchIndexerServer{db: db, logger: logger}}
_, err := srv.UpdateIndexStatus(context.Background(), parameters.ToProto())
if err != nil {
t.Fatal(err)
}
if !called {
t.Fatalf("not called")
}
})
if !called {
t.Fatalf("not called")
}
}
func TestRepoPathRanks_RoundTrip(t *testing.T) {

View File

@ -50,7 +50,6 @@ go_test(
"//internal/extsvc/gitlab/webhooks",
"//internal/grpc",
"//internal/grpc/defaults",
"//internal/httpcli",
"//internal/repos",
"//internal/repoupdater",
"//internal/repoupdater/protocol",

View File

@ -33,7 +33,6 @@ import (
gitlabwebhooks "github.com/sourcegraph/sourcegraph/internal/extsvc/gitlab/webhooks"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/repos"
"github.com/sourcegraph/sourcegraph/internal/repoupdater"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
@ -151,15 +150,7 @@ func TestGitHubHandler(t *testing.T) {
server := httptest.NewServer(internalgrpc.MultiplexHandlers(gs, mux))
defer server.Close()
cf := httpcli.NewExternalClientFactory()
opts := []httpcli.Opt{}
doer, err := cf.Doer(opts...)
if err != nil {
t.Fatal(err)
}
repoupdater.DefaultClient = repoupdater.NewClient(server.URL)
repoupdater.DefaultClient.HTTPClient = doer
payload, err := os.ReadFile(filepath.Join("testdata", "github-push.json"))
if err != nil {

View File

@ -62,7 +62,6 @@ go_library(
"//internal/observation",
"//internal/perforce",
"//internal/ratelimit",
"//internal/search/streaming/http",
"//internal/security",
"//internal/trace",
"//internal/types",
@ -74,13 +73,11 @@ go_library(
"@com_github_mxk_go_flowrate//flowrate",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_prometheus_client_golang//prometheus/promauto",
"@com_github_sourcegraph_conc//:conc",
"@com_github_sourcegraph_log//:log",
"@com_github_sourcegraph_mountinfo//:mountinfo",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
"@org_golang_x_sync//errgroup",
"@org_golang_x_sync//semaphore",
"@org_golang_x_time//rate",
@ -95,8 +92,8 @@ go_test(
"list_gitolite_test.go",
"main_test.go",
"p4exec_test.go",
"repo_info_test.go",
"server_test.go",
"serverutil_test.go",
],
data = glob(["testdata/**"]),
embed = [":internal"],
@ -116,6 +113,7 @@ go_test(
"//cmd/gitserver/internal/vcssyncer",
"//internal/actor",
"//internal/api",
"//internal/conf",
"//internal/database",
"//internal/database/dbmocks",
"//internal/database/dbtest",

View File

@ -1,10 +1,6 @@
package internal
import (
"net/http"
"google.golang.org/protobuf/encoding/protojson"
"github.com/sourcegraph/sourcegraph/internal/diskusage"
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
)
@ -25,21 +21,3 @@ func getDiskInfo(dir string) (*proto.DiskInfoResponse, error) {
PercentUsed: usage.PercentUsed(),
}, nil
}
func (s *Server) handleDiskInfo(w http.ResponseWriter, r *http.Request) {
resp, err := getDiskInfo(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonBytes, err := protojson.Marshal(resp)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
w.Write(jsonBytes)
}

View File

@ -20,7 +20,6 @@ go_library(
"//internal/gitserver/v1:gitserver",
"//internal/grpc",
"//internal/grpc/defaults",
"//internal/httpcli",
"//internal/observation",
"//internal/ratelimit",
"//internal/types",

View File

@ -22,7 +22,6 @@ import (
server "github.com/sourcegraph/sourcegraph/cmd/gitserver/internal"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
@ -31,7 +30,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/wrexec"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestClient_ArchiveReader(t *testing.T) {
@ -185,97 +183,41 @@ func TestClient_ArchiveReader(t *testing.T) {
})
}
t.Run("grpc", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(true),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
for _, test := range tests {
repoName := api.RepoName(test.name)
called := false
for _, test := range tests {
repoName := api.RepoName(test.name)
called := false
mkClient := func(t *testing.T, addrs []string) gitserver.Client {
t.Helper()
mkClient := func(t *testing.T, addrs []string) gitserver.Client {
t.Helper()
source := gitserver.NewTestClientSource(t, addrs, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
source := gitserver.NewTestClientSource(t, addrs, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
base := proto.NewGitserverServiceClient(cc)
mockArchive := func(ctx context.Context, in *proto.ArchiveRequest, opts ...grpc.CallOption) (proto.GitserverService_ArchiveClient, error) {
called = true
return base.Archive(ctx, in, opts...)
}
mockRepoUpdate := func(ctx context.Context, in *proto.RepoUpdateRequest, opts ...grpc.CallOption) (*proto.RepoUpdateResponse, error) {
base := proto.NewGitserverServiceClient(cc)
mockArchive := func(ctx context.Context, in *proto.ArchiveRequest, opts ...grpc.CallOption) (proto.GitserverService_ArchiveClient, error) {
called = true
return base.Archive(ctx, in, opts...)
}
mockRepoUpdate := func(ctx context.Context, in *proto.RepoUpdateRequest, opts ...grpc.CallOption) (*proto.RepoUpdateResponse, error) {
base := proto.NewGitserverServiceClient(cc)
return base.RepoUpdate(ctx, in, opts...)
}
cli := gitserver.NewMockGitserverServiceClient()
cli.ArchiveFunc.SetDefaultHook(mockArchive)
cli.RepoUpdateFunc.SetDefaultHook(mockRepoUpdate)
return cli
return base.RepoUpdate(ctx, in, opts...)
}
})
return gitserver.NewTestClient(t).WithClientSource(source)
}
runArchiveReaderTestfunc(t, mkClient, repoName, test)
if !called {
t.Error("archiveReader: GitserverServiceClient should have been called")
}
}
})
t.Run("http", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
for _, test := range tests {
repoName := api.RepoName(test.name)
called := false
mkClient := func(t *testing.T, addrs []string) gitserver.Client {
t.Helper()
source := gitserver.NewTestClientSource(t, addrs, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockArchive := func(ctx context.Context, in *proto.ArchiveRequest, opts ...grpc.CallOption) (proto.GitserverService_ArchiveClient, error) {
called = true
base := proto.NewGitserverServiceClient(cc)
return base.Archive(ctx, in, opts...)
}
cli := gitserver.NewMockGitserverServiceClient()
cli.ArchiveFunc.SetDefaultHook(mockArchive)
return cli
}
})
return gitserver.NewTestClient(t).WithClientSource(source)
}
runArchiveReaderTestfunc(t, mkClient, repoName, test)
if called {
t.Error("archiveReader: GitserverServiceClient should have been called")
}
cli := gitserver.NewMockGitserverServiceClient()
cli.ArchiveFunc.SetDefaultHook(mockArchive)
cli.RepoUpdateFunc.SetDefaultHook(mockRepoUpdate)
return cli
}
})
return gitserver.NewTestClient(t).WithClientSource(source)
}
})
runArchiveReaderTestfunc(t, mkClient, repoName, test)
if !called {
t.Error("archiveReader: GitserverServiceClient should have been called")
}
}
}
func createSimpleGitRepo(t *testing.T, root string) string {

View File

@ -5,10 +5,8 @@ import (
"testing"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestGetObject(t *testing.T) {
@ -51,37 +49,9 @@ func TestGetObject(t *testing.T) {
})
}
t.Run("gRPC", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(true),
},
},
})
for label, test := range tests {
source := gitserver.NewTestClientSource(t, GitserverAddresses)
cli := gitserver.NewTestClient(t).WithClientSource(source)
runTest(t, label, test, cli)
}
})
t.Run("HTTP", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
for label, test := range tests {
source := gitserver.NewTestClientSource(t, GitserverAddresses)
cli := gitserver.NewTestClient(t).WithClientSource(source)
runTest(t, label, test, cli)
}
})
}
func boolPointer(b bool) *bool {
return &b
for label, test := range tests {
source := gitserver.NewTestClientSource(t, GitserverAddresses)
cli := gitserver.NewTestClient(t).WithClientSource(source)
runTest(t, label, test, cli)
}
}

View File

@ -25,7 +25,6 @@ import (
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/types"
@ -105,7 +104,7 @@ func InitGitserver() {
serverAddress := l.Addr().String()
source := gitserver.NewTestClientSource(&t, []string{serverAddress})
testGitserverClient = gitserver.NewTestClient(&t).WithDoer(httpcli.InternalDoer).WithClientSource(source)
testGitserverClient = gitserver.NewTestClient(&t).WithClientSource(source)
GitserverAddresses = []string{serverAddress}
}

View File

@ -2,26 +2,12 @@ package internal
import (
"context"
"encoding/json"
"net/http"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
"github.com/sourcegraph/sourcegraph/internal/security"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func (s *Server) handleListGitolite(w http.ResponseWriter, r *http.Request) {
repos, err := defaultGitolite.listRepos(r.Context(), r.URL.Query().Get("gitolite"))
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := json.NewEncoder(w).Encode(repos); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
var defaultGitolite = gitoliteFetcher{client: gitoliteClient{}}
type gitoliteFetcher struct {

View File

@ -2,22 +2,11 @@ package internal
import (
"context"
"net/http"
"net/http/httptest"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/assert"
"golang.org/x/time/rate"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
"github.com/sourcegraph/sourcegraph/internal/wrexec"
"github.com/sourcegraph/sourcegraph/schema"
)
@ -115,63 +104,6 @@ func Test_Gitolite_listRepos(t *testing.T) {
}
}
func TestCheckSSRFHeader(t *testing.T) {
db := dbmocks.NewMockDB()
gr := dbmocks.NewMockGitserverRepoStore()
db.GitserverReposFunc.SetDefaultReturn(gr)
s := &Server{
Logger: logtest.Scoped(t),
ObservationCtx: observation.TestContextTB(t),
ReposDir: "/testroot",
skipCloneForTests: true,
GetRemoteURLFunc: func(ctx context.Context, name api.RepoName) (string, error) {
return "https://" + string(name) + ".git", nil
},
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
},
DB: db,
Locker: NewRepositoryLocker(),
RPSLimiter: ratelimit.NewInstrumentedLimiter("GitserverTest", rate.NewLimiter(rate.Inf, 10)),
}
h := s.Handler()
oldFetcher := defaultGitolite
t.Cleanup(func() {
defaultGitolite = oldFetcher
})
defaultGitolite = gitoliteFetcher{
client: stubGitoliteClient{
ListRepos_: func(ctx context.Context, host string) ([]*gitolite.Repo, error) {
return []*gitolite.Repo{}, nil
},
},
}
t.Run("header missing", func(t *testing.T) {
rw := httptest.NewRecorder()
r, err := http.NewRequest("GET", "/list-gitolite?gitolite=127.0.0.1", nil)
if err != nil {
t.Fatal(err)
}
h.ServeHTTP(rw, r)
assert.Equal(t, 400, rw.Code)
})
t.Run("header supplied", func(t *testing.T) {
rw := httptest.NewRecorder()
r, err := http.NewRequest("GET", "/list-gitolite?gitolite=127.0.0.1", nil)
if err != nil {
t.Fatal(err)
}
r.Header.Set("X-Requested-With", "Sourcegraph")
h.ServeHTTP(rw, r)
assert.Equal(t, 200, rw.Code)
})
}
type stubGitoliteClient struct {
ListRepos_ func(ctx context.Context, host string) ([]*gitolite.Repo, error)
}

View File

@ -3,10 +3,8 @@ package internal
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"strconv"
@ -116,83 +114,6 @@ func (gs *GRPCServer) doP4Exec(ctx context.Context, logger log.Logger, req *p4Ex
return nil
}
func (s *Server) handleP4Exec(w http.ResponseWriter, r *http.Request) {
var req p4ExecRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if len(req.Args) < 1 {
http.Error(w, "args must be greater than or equal to 1", http.StatusBadRequest)
return
}
// Make sure the subcommand is explicitly allowed
allowlist := []string{"protects", "groups", "users", "group", "changes"}
allowed := false
for _, arg := range allowlist {
if req.Args[0] == arg {
allowed = true
break
}
}
if !allowed {
http.Error(w, fmt.Sprintf("subcommand %q is not allowed", req.Args[0]), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Log which actor is accessing p4-exec.
//
// p4-exec is currently only used for fetching user based permissions information
// so, we don't have a repo name.
accesslog.Record(r.Context(), "<no-repo>",
log.String("p4user", req.P4User),
log.String("p4port", req.P4Port),
log.Strings("args", req.Args),
)
// Make sure credentials are valid before heavier operation
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
s.p4execHTTP(w, r, &req)
}
func (s *Server) p4execHTTP(w http.ResponseWriter, r *http.Request, req *p4ExecRequest) {
logger := s.Logger.Scoped("p4exec")
// Flush writes more aggressively than standard net/http so that clients
// with a context deadline see as much partial response body as possible.
if fw := newFlushingResponseWriter(logger, w); fw != nil {
w = fw
defer fw.Close()
}
ctx, cancel := context.WithTimeout(r.Context(), time.Minute)
defer cancel()
w.Header().Set("Trailer", "X-Exec-Error")
w.Header().Add("Trailer", "X-Exec-Exit-Status")
w.Header().Add("Trailer", "X-Exec-Stderr")
w.WriteHeader(http.StatusOK)
execStatus := s.p4Exec(ctx, logger, req, r.UserAgent(), w)
w.Header().Set("X-Exec-Error", errorString(execStatus.Err))
w.Header().Set("X-Exec-Exit-Status", strconv.Itoa(execStatus.ExitStatus))
w.Header().Set("X-Exec-Stderr", execStatus.Stderr)
}
func (s *Server) p4Exec(ctx context.Context, logger log.Logger, req *p4ExecRequest, userAgent string, w io.Writer) execStatus {
start := time.Now()
var cmdStart time.Time // set once we have ensured commit

View File

@ -8,7 +8,6 @@ import (
"net/http/httptest"
"net/url"
"os/exec"
"strings"
"testing"
"github.com/google/go-cmp/cmp"
@ -77,184 +76,115 @@ func TestServer_handleP4Exec(t *testing.T) {
}
}
t.Run("gRPC", func(t *testing.T) {
readAll := func(execClient proto.GitserverService_P4ExecClient) ([]byte, error) {
var buf bytes.Buffer
for {
resp, err := execClient.Recv()
if errors.Is(err, io.EOF) {
return buf.Bytes(), nil
}
readAll := func(execClient proto.GitserverService_P4ExecClient) ([]byte, error) {
var buf bytes.Buffer
for {
resp, err := execClient.Recv()
if errors.Is(err, io.EOF) {
return buf.Bytes(), nil
}
if err != nil {
return buf.Bytes(), err
}
if err != nil {
return buf.Bytes(), err
}
_, err = buf.Write(resp.GetData())
if err != nil {
t.Fatalf("failed to write data: %v", err)
}
_, err = buf.Write(resp.GetData())
if err != nil {
t.Fatalf("failed to write data: %v", err)
}
}
}
t.Run("users", func(t *testing.T) {
executil.UpdateRunCommandMock(defaultMockRunCommand)
t.Run("users", func(t *testing.T) {
executil.UpdateRunCommandMock(defaultMockRunCommand)
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
stream, err := client.P4Exec(context.Background(), &proto.P4ExecRequest{
Args: [][]byte{[]byte("users")},
})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
data, err := readAll(stream)
s, ok := status.FromError(err)
if !ok {
t.Fatal("received non-status error from p4exec call")
}
if diff := cmp.Diff("the answer to life the universe and everything", s.Message()); diff != "" {
t.Fatalf("unexpected error in stream (-want +got):\n%s", diff)
}
expectedData := []byte("admin <admin@joe-perforce-server> (admin) accessed 2021/01/31")
if diff := cmp.Diff(expectedData, data); diff != "" {
t.Fatalf("unexpected data (-want +got):\n%s", diff)
}
stream, err := client.P4Exec(context.Background(), &proto.P4ExecRequest{
Args: [][]byte{[]byte("users")},
})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
t.Run("empty request", func(t *testing.T) {
executil.UpdateRunCommandMock(defaultMockRunCommand)
data, err := readAll(stream)
s, ok := status.FromError(err)
if !ok {
t.Fatal("received non-status error from p4exec call")
}
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
if diff := cmp.Diff("the answer to life the universe and everything", s.Message()); diff != "" {
t.Fatalf("unexpected error in stream (-want +got):\n%s", diff)
}
stream, err := client.P4Exec(context.Background(), &proto.P4ExecRequest{})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
_, err = readAll(stream)
if status.Code(err) != codes.InvalidArgument {
t.Fatalf("expected InvalidArgument error, got %v", err)
}
})
t.Run("disallowed command", func(t *testing.T) {
executil.UpdateRunCommandMock(defaultMockRunCommand)
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
stream, err := client.P4Exec(context.Background(), &proto.P4ExecRequest{
Args: [][]byte{[]byte("bad_command")},
})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
_, err = readAll(stream)
if status.Code(err) != codes.InvalidArgument {
t.Fatalf("expected InvalidArgument error, got %v", err)
}
})
t.Run("context cancelled", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
executil.UpdateRunCommandMock(func(ctx context.Context, _ *exec.Cmd) (int, error) {
// fake a context cancellation that occurs while the process is running
cancel()
return 0, ctx.Err()
})
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
stream, err := client.P4Exec(ctx, &proto.P4ExecRequest{
Args: [][]byte{[]byte("users")},
})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
_, err = readAll(stream)
if !(errors.Is(err, context.Canceled) || status.Code(err) == codes.Canceled) {
t.Fatalf("expected context cancelation error, got %v", err)
}
})
expectedData := []byte("admin <admin@joe-perforce-server> (admin) accessed 2021/01/31")
if diff := cmp.Diff(expectedData, data); diff != "" {
t.Fatalf("unexpected data (-want +got):\n%s", diff)
}
})
t.Run("HTTP", func(t *testing.T) {
t.Run("empty request", func(t *testing.T) {
executil.UpdateRunCommandMock(defaultMockRunCommand)
tests := []Test{
{
Name: "Command",
Request: newRequest("POST", "/p4-exec", strings.NewReader(`{"args": ["users"]}`)),
ExpectedCode: http.StatusOK,
ExpectedBody: "admin <admin@joe-perforce-server> (admin) accessed 2021/01/31",
ExpectedTrailers: http.Header{
"X-Exec-Error": {"the answer to life the universe and everything"},
"X-Exec-Exit-Status": {"42"},
"X-Exec-Stderr": {"teststderr"},
},
},
{
Name: "Error",
Request: newRequest("POST", "/p4-exec", strings.NewReader(`{"args": ["bad_command"]}`)),
ExpectedCode: http.StatusBadRequest,
ExpectedBody: "subcommand \"bad_command\" is not allowed",
},
{
Name: "EmptyBody",
Request: newRequest("POST", "/p4-exec", nil),
ExpectedCode: http.StatusBadRequest,
ExpectedBody: `EOF`,
},
{
Name: "EmptyInput",
Request: newRequest("POST", "/p4-exec", strings.NewReader("{}")),
ExpectedCode: http.StatusBadRequest,
ExpectedBody: `args must be greater than or equal to 1`,
},
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
stream, err := client.P4Exec(context.Background(), &proto.P4ExecRequest{})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
executil.UpdateRunCommandMock(defaultMockRunCommand)
_, err = readAll(stream)
if status.Code(err) != codes.InvalidArgument {
t.Fatalf("expected InvalidArgument error, got %v", err)
}
})
handler, _, closeFunc := startServer(t)
t.Cleanup(closeFunc)
t.Run("disallowed command", func(t *testing.T) {
w := httptest.ResponseRecorder{Body: new(bytes.Buffer)}
handler.ServeHTTP(&w, test.Request)
executil.UpdateRunCommandMock(defaultMockRunCommand)
res := w.Result()
if res.StatusCode != test.ExpectedCode {
t.Errorf("wrong status: expected %d, got %d", test.ExpectedCode, w.Code)
}
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
body, err := io.ReadAll(res.Body)
if err != nil {
t.Fatal(err)
}
if strings.TrimSpace(string(body)) != test.ExpectedBody {
t.Errorf("wrong body: expected %q, got %q", test.ExpectedBody, string(body))
}
stream, err := client.P4Exec(context.Background(), &proto.P4ExecRequest{
Args: [][]byte{[]byte("bad_command")},
})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
for k, v := range test.ExpectedTrailers {
if got := res.Trailer.Get(k); got != v[0] {
t.Errorf("wrong trailer %q: expected %q, got %q", k, v[0], got)
}
}
})
_, err = readAll(stream)
if status.Code(err) != codes.InvalidArgument {
t.Fatalf("expected InvalidArgument error, got %v", err)
}
})
t.Run("context cancelled", func(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
executil.UpdateRunCommandMock(func(ctx context.Context, _ *exec.Cmd) (int, error) {
// fake a context cancellation that occurs while the process is running
cancel()
return 0, ctx.Err()
})
_, client, closeFunc := startServer(t)
t.Cleanup(closeFunc)
stream, err := client.P4Exec(ctx, &proto.P4ExecRequest{
Args: [][]byte{[]byte("users")},
})
if err != nil {
t.Fatalf("failed to call P4Exec: %v", err)
}
_, err = readAll(stream)
if !(errors.Is(err, context.Canceled) || status.Code(err) == codes.Canceled) {
t.Fatalf("expected context cancelation error, got %v", err)
}
})
}

View File

@ -4,7 +4,6 @@ import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io/fs"
"net/http"
@ -35,26 +34,6 @@ import (
var patchID uint64
func (s *Server) handleCreateCommitFromPatchBinary(w http.ResponseWriter, r *http.Request) {
var req protocol.CreateCommitFromPatchRequest
var resp protocol.CreateCommitFromPatchResponse
var status int
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
resp := new(protocol.CreateCommitFromPatchResponse)
resp.SetError("", "", "", errors.Wrap(err, "decoding CreateCommitFromPatchRequest"))
status = http.StatusBadRequest
} else {
status, resp = s.createCommitFromPatch(r.Context(), req)
}
w.WriteHeader(status)
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) createCommitFromPatch(ctx context.Context, req protocol.CreateCommitFromPatchRequest) (int, protocol.CreateCommitFromPatchResponse) {
logger := s.Logger.Scoped("createCommitFromPatch").
With(

View File

@ -2,8 +2,6 @@ package internal
import (
"context"
"encoding/json"
"net/http"
"github.com/sourcegraph/log"
@ -28,42 +26,6 @@ func repoCloneProgress(reposDir string, locker RepositoryLocker, repo api.RepoNa
return &resp
}
func (s *Server) handleRepoCloneProgress(w http.ResponseWriter, r *http.Request) {
var req protocol.RepoCloneProgressRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
resp := protocol.RepoCloneProgressResponse{
Results: make(map[api.RepoName]*protocol.RepoCloneProgress, len(req.Repos)),
}
for _, repoName := range req.Repos {
result := repoCloneProgress(s.ReposDir, s.Locker, repoName)
resp.Results[repoName] = result
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handleRepoDelete(w http.ResponseWriter, r *http.Request) {
var req protocol.RepoDeleteRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := deleteRepo(r.Context(), s.Logger, s.DB, s.Hostname, s.ReposDir, req.Repo); err != nil {
s.Logger.Error("failed to delete repository", log.String("repo", string(req.Repo)), log.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
s.Logger.Info("deleted repository", log.String("repo", string(req.Repo)))
}
func deleteRepo(
ctx context.Context,
logger log.Logger,

View File

@ -0,0 +1,124 @@
package internal
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/gitserverfs"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
"github.com/sourcegraph/sourcegraph/internal/types"
)
func TestDeleteRepo(t *testing.T) {
testDeleteRepo(t, false)
}
func TestDeleteRepoWhenDeleteInDB(t *testing.T) {
// We also want to ensure that we can delete repo data on disk for a repo that
// has already been deleted in the DB.
testDeleteRepo(t, true)
}
func testDeleteRepo(t *testing.T, deletedInDB bool) {
logger := logtest.Scoped(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
remote := t.TempDir()
repoName := api.RepoName("example.com/foo/bar")
db := database.NewDB(logger, dbtest.NewDB(t))
dbRepo := &types.Repo{
Name: repoName,
Description: "Test",
}
// Insert the repo into our database
if err := db.Repos().Create(ctx, dbRepo); err != nil {
t.Fatal(err)
}
repo := remote
cmd := func(name string, arg ...string) string {
t.Helper()
return runCmd(t, repo, name, arg...)
}
_ = makeSingleCommitRepo(cmd)
// Add a bad tag
cmd("git", "tag", "HEAD")
reposDir := t.TempDir()
s := makeTestServer(ctx, t, reposDir, remote, db)
// We need some of the side effects here
_ = s.Handler()
// This will perform an initial clone
s.repoUpdate(&protocol.RepoUpdateRequest{
Repo: repoName,
})
size := gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.ReposDir, repoName).Path("."))
want := &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
CloneStatus: types.CloneStatusCloned,
RepoSizeBytes: size,
}
fromDB, err := db.GitserverRepos().GetByID(ctx, dbRepo.ID)
if err != nil {
t.Fatal(err)
}
// We don't expect an error
if diff := cmp.Diff(want, fromDB, ignoreVolatileGitserverRepoFields); diff != "" {
t.Fatal(diff)
}
if deletedInDB {
if err := db.Repos().Delete(ctx, dbRepo.ID); err != nil {
t.Fatal(err)
}
repos, err := db.Repos().List(ctx, database.ReposListOptions{IncludeDeleted: true, IDs: []api.RepoID{dbRepo.ID}})
if err != nil {
t.Fatal(err)
}
if len(repos) != 1 {
t.Fatalf("Expected 1 repo, got %d", len(repos))
}
dbRepo = repos[0]
}
// Now we can delete it
require.NoError(t, deleteRepo(ctx, logger, db, "", reposDir, dbRepo.Name))
size = gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.ReposDir, repoName).Path("."))
if size != 0 {
t.Fatalf("Size should be 0, got %d", size)
}
// Check status in gitserver_repos
want = &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
CloneStatus: types.CloneStatusNotCloned,
RepoSizeBytes: size,
}
fromDB, err = db.GitserverRepos().GetByID(ctx, dbRepo.ID)
if err != nil {
t.Fatal(err)
}
// We don't expect an error
if diff := cmp.Diff(want, fromDB, ignoreVolatileGitserverRepoFields); diff != "" {
t.Fatal(diff)
}
}

View File

@ -6,8 +6,6 @@ import (
"bytes"
"container/list"
"context"
"encoding/gob"
"encoding/json"
"fmt"
"io"
"net/http"
@ -18,7 +16,6 @@ import (
"strings"
"sync"
"sync/atomic"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus"
@ -28,7 +25,6 @@ import (
"golang.org/x/sync/semaphore"
"golang.org/x/time/rate"
"github.com/sourcegraph/conc"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/accesslog"
@ -55,7 +51,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/limiter"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
streamhttp "github.com/sourcegraph/sourcegraph/internal/search/streaming/http"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/vcs"
@ -225,35 +220,6 @@ func shortGitCommandSlow(args []string) time.Duration {
}
}
// 🚨 SECURITY: headerXRequestedWithMiddleware will ensure that the X-Requested-With
// header contains the correct value. See "What does X-Requested-With do, anyway?" in
// https://github.com/sourcegraph/sourcegraph/pull/27931.
func headerXRequestedWithMiddleware(next http.Handler) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
l := log.Scoped("gitserver")
// Do not apply the middleware to /ping and /git endpoints.
//
// 1. /ping is used by health check services who most likely don't set this header
// at all.
//
// 2. /git may be used to run "git fetch" from another gitserver instance over
// HTTP and the fetchCommand does not set this header yet.
if strings.HasPrefix(r.URL.Path, "/ping") || strings.HasPrefix(r.URL.Path, "/git") {
next.ServeHTTP(w, r)
return
}
if value := r.Header.Get("X-Requested-With"); value != "Sourcegraph" {
l.Error("header X-Requested-With is not set or is invalid", log.String("path", r.URL.Path))
http.Error(w, "header X-Requested-With is not set or is invalid", http.StatusBadRequest)
return
}
next.ServeHTTP(w, r)
}
}
// Handler returns the http.Handler that should be used to serve requests.
func (s *Server) Handler() http.Handler {
s.ctx, s.cancel = context.WithCancel(context.Background())
@ -279,40 +245,7 @@ func (s *Server) Handler() http.Handler {
})
mux := http.NewServeMux()
mux.HandleFunc("/archive", trace.WithRouteName("archive", accesslog.HTTPMiddleware(
s.Logger.Scoped("archive.accesslog"),
conf.DefaultClient(),
s.handleArchive,
)))
mux.HandleFunc("/exec", trace.WithRouteName("exec", accesslog.HTTPMiddleware(
s.Logger.Scoped("exec.accesslog"),
conf.DefaultClient(),
s.handleExec,
)))
mux.HandleFunc("/search", trace.WithRouteName("search", s.handleSearch))
mux.HandleFunc("/batch-log", trace.WithRouteName("batch-log", s.handleBatchLog))
mux.HandleFunc("/p4-exec", trace.WithRouteName("p4-exec", accesslog.HTTPMiddleware(
s.Logger.Scoped("p4-exec.accesslog"),
conf.DefaultClient(),
s.handleP4Exec,
)))
mux.HandleFunc("/list-gitolite", trace.WithRouteName("list-gitolite", s.handleListGitolite))
mux.HandleFunc("/is-repo-cloneable", trace.WithRouteName("is-repo-cloneable", s.handleIsRepoCloneable))
mux.HandleFunc("/repo-clone-progress", trace.WithRouteName("repo-clone-progress", s.handleRepoCloneProgress))
mux.HandleFunc("/delete", trace.WithRouteName("delete", s.handleRepoDelete))
mux.HandleFunc("/repo-update", trace.WithRouteName("repo-update", s.handleRepoUpdate))
mux.HandleFunc("/repo-clone", trace.WithRouteName("repo-clone", s.handleRepoClone))
mux.HandleFunc("/create-commit-from-patch-binary", trace.WithRouteName("create-commit-from-patch-binary", s.handleCreateCommitFromPatchBinary))
mux.HandleFunc("/disk-info", trace.WithRouteName("disk-info", s.handleDiskInfo))
mux.HandleFunc("/is-perforce-path-cloneable", trace.WithRouteName("is-perforce-path-cloneable", s.handleIsPerforcePathCloneable))
mux.HandleFunc("/check-perforce-credentials", trace.WithRouteName("check-perforce-credentials", s.handleCheckPerforceCredentials))
mux.HandleFunc("/commands/get-object", trace.WithRouteName("commands/get-object", s.handleGetObject))
mux.HandleFunc("/perforce-users", trace.WithRouteName("perforce-users", s.handlePerforceUsers))
mux.HandleFunc("/perforce-protects-for-user", trace.WithRouteName("perforce-protects-for-user", s.handlePerforceProtectsForUser))
mux.HandleFunc("/perforce-protects-for-depot", trace.WithRouteName("perforce-protects-for-depot", s.handlePerforceProtectsForDepot))
mux.HandleFunc("/perforce-group-members", trace.WithRouteName("perforce-group-members", s.handlePerforceGroupMembers))
mux.HandleFunc("/is-perforce-super-user", trace.WithRouteName("is-perforce-super-user", s.handleIsPerforceSuperUser))
mux.HandleFunc("/perforce-get-changelist", trace.WithRouteName("perforce-get-changelist", s.handlePerforceGetChangelist))
mux.HandleFunc("/ping", trace.WithRouteName("ping", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
@ -332,8 +265,7 @@ func (s *Server) Handler() http.Handler {
},
)))
// 🚨 SECURITY: This must be wrapped in headerXRequestedWithMiddleware.
return headerXRequestedWithMiddleware(mux)
return mux
}
func addrForRepo(ctx context.Context, repoName api.RepoName, gitServerAddrs gitserver.GitserverAddresses) string {
@ -511,29 +443,6 @@ func (s *Server) acquireCloneableLimiter(ctx context.Context) (context.Context,
return s.cloneableLimiter.Acquire(ctx)
}
func (s *Server) handleIsRepoCloneable(w http.ResponseWriter, r *http.Request) {
var req protocol.IsRepoCloneableRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if req.Repo == "" {
http.Error(w, "no Repo given", http.StatusBadRequest)
return
}
resp, err := s.isRepoCloneable(r.Context(), req.Repo)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) isRepoCloneable(ctx context.Context, repo api.RepoName) (protocol.IsRepoCloneableResponse, error) {
// We use an internal actor here as the repo may be private. It is safe since all
// we return is a bool indicating whether the repo is cloneable or not. Perhaps
@ -561,25 +470,6 @@ func (s *Server) isRepoCloneable(ctx context.Context, repo api.RepoName) (protoc
return resp, nil
}
// handleRepoUpdate is a synchronous (waits for update to complete or
// time out) method so it can yield errors. Updates are not
// unconditional; we debounce them based on the provided
// interval, to avoid spam.
func (s *Server) handleRepoUpdate(w http.ResponseWriter, r *http.Request) {
var req protocol.RepoUpdateRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
resp := s.repoUpdate(&req)
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) repoUpdate(req *protocol.RepoUpdateRequest) protocol.RepoUpdateResponse {
logger := s.Logger.Scoped("handleRepoUpdate")
var resp protocol.RepoUpdateResponse
@ -640,169 +530,6 @@ func (s *Server) repoUpdate(req *protocol.RepoUpdateRequest) protocol.RepoUpdate
return resp
}
// handleRepoClone is an asynchronous (does not wait for update to complete or
// time out) call to clone a repository.
// Asynchronous errors will have to be checked in the gitserver_repos table under last_error.
func (s *Server) handleRepoClone(w http.ResponseWriter, r *http.Request) {
logger := s.Logger.Scoped("handleRepoClone")
var req protocol.RepoCloneRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
var resp protocol.RepoCloneResponse
req.Repo = protocol.NormalizeRepo(req.Repo)
_, err := s.CloneRepo(context.Background(), req.Repo, CloneOptions{Block: false})
if err != nil {
logger.Warn("error cloning repo", log.String("repo", string(req.Repo)), log.Error(err))
resp.Error = err.Error()
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handleArchive(w http.ResponseWriter, r *http.Request) {
var (
logger = s.Logger.Scoped("handleArchive")
q = r.URL.Query()
treeish = q.Get("treeish")
repo = q.Get("repo")
format = q.Get("format")
pathspecs = q["path"]
)
// Log which which actor is accessing the repo.
accesslog.Record(r.Context(), repo,
log.String("treeish", treeish),
log.String("format", format),
log.Strings("path", pathspecs),
)
if err := git.CheckSpecArgSafety(treeish); err != nil {
w.WriteHeader(http.StatusBadRequest)
s.Logger.Error("gitserver.archive.CheckSpecArgSafety", log.Error(err))
return
}
if repo == "" || format == "" {
w.WriteHeader(http.StatusBadRequest)
logger.Error("gitserver.archive", log.String("error", "empty repo or format"))
return
}
req := &protocol.ExecRequest{
Repo: api.RepoName(repo),
Args: []string{
"archive",
// Suppresses fatal error when the repo contains paths matching **/.git/** and instead
// includes those files (to allow archiving invalid such repos). This is unexpected
// behavior; the --worktree-attributes flag should merely let us specify a gitattributes
// file that contains `**/.git/** export-ignore`, but it actually makes everything work as
// desired. Tested by the "repo with .git dir" test case.
"--worktree-attributes",
"--format=" + format,
},
}
if format == string(gitserver.ArchiveFormatZip) {
// Compression level of 0 (no compression) seems to perform the
// best overall on fast network links, but this has not been tuned
// thoroughly.
req.Args = append(req.Args, "-0")
}
req.Args = append(req.Args, treeish, "--")
req.Args = append(req.Args, pathspecs...)
s.execHTTP(w, r, req)
}
func (s *Server) handleSearch(w http.ResponseWriter, r *http.Request) {
logger := s.Logger.Scoped("handleSearch")
tr, ctx := trace.New(r.Context(), "handleSearch")
defer tr.End()
// Decode the request
protocol.RegisterGob()
var args protocol.SearchRequest
if err := gob.NewDecoder(r.Body).Decode(&args); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
eventWriter, err := streamhttp.NewWriter(w)
if err != nil {
tr.SetError(err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var matchesBufMux sync.Mutex
matchesBuf := streamhttp.NewJSONArrayBuf(8*1024, func(data []byte) error {
tr.AddEvent("flushing data", attribute.Int("data.len", len(data)))
return eventWriter.EventBytes("matches", data)
})
// Start a goroutine that periodically flushes the buffer
var flusherWg conc.WaitGroup
flusherCtx, flusherCancel := context.WithCancel(context.Background())
defer flusherCancel()
flusherWg.Go(func() {
flushTicker := time.NewTicker(50 * time.Millisecond)
defer flushTicker.Stop()
for {
select {
case <-flushTicker.C:
matchesBufMux.Lock()
matchesBuf.Flush()
matchesBufMux.Unlock()
case <-flusherCtx.Done():
return
}
}
})
// Create a callback that appends the match to the buffer
var haveFlushed atomic.Bool
onMatch := func(match *protocol.CommitMatch) error {
matchesBufMux.Lock()
defer matchesBufMux.Unlock()
err := matchesBuf.Append(match)
if err != nil {
return err
}
// If we haven't sent any results yet, flush immediately
if !haveFlushed.Load() {
haveFlushed.Store(true)
return matchesBuf.Flush()
}
return nil
}
// Run the search
limitHit, searchErr := s.searchWithObservability(ctx, tr, &args, onMatch)
if writeErr := eventWriter.Event("done", protocol.NewSearchEventDone(limitHit, searchErr)); writeErr != nil {
if !errors.Is(writeErr, syscall.EPIPE) {
logger.Error("failed to send done event", log.Error(writeErr))
}
}
// Clean up the flusher goroutine, then do one final flush
flusherCancel()
flusherWg.Wait()
matchesBuf.Flush()
}
func (s *Server) performGitLogCommand(ctx context.Context, repoCommit api.RepoCommit, format string) (output string, isRepoCloned bool, err error) {
ctx, _, endObservation := s.operations.batchLogSingle.With(ctx, &err, observation.Args{
Attrs: append(
@ -903,44 +630,6 @@ func (s *Server) batchGitLogInstrumentedHandler(ctx context.Context, req protoco
return protocol.BatchLogResponse{Results: results}, nil
}
func (s *Server) handleBatchLog(w http.ResponseWriter, r *http.Request) {
// 🚨 SECURITY: Only allow POST requests.
if strings.ToUpper(r.Method) != http.MethodPost {
http.Error(w, "", http.StatusMethodNotAllowed)
return
}
s.operations = s.ensureOperations()
// Read request body
var req protocol.BatchLogRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Validate request parameters
if len(req.RepoCommits) == 0 {
// Early exit: implicitly writes 200 OK
_ = json.NewEncoder(w).Encode(protocol.BatchLogResponse{Results: []protocol.BatchLogResult{}})
return
}
if !strings.HasPrefix(req.Format, "--format=") {
http.Error(w, "format parameter expected to be of the form `--format=<git log format>`", http.StatusUnprocessableEntity)
return
}
// Handle unexpected error conditions
resp, err := s.batchGitLogInstrumentedHandler(r.Context(), req)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// Write payload to client: implicitly writes 200 OK
_ = json.NewEncoder(w).Encode(resp)
}
// ensureOperations returns the non-nil operations value supplied to this server
// via RegisterMetrics (when constructed as part of the gitserver binary), or
// constructs and memoizes a no-op operations value (for use in tests).
@ -952,35 +641,6 @@ func (s *Server) ensureOperations() *operations {
return s.operations
}
func (s *Server) handleExec(w http.ResponseWriter, r *http.Request) {
// 🚨 SECURITY: Only allow POST requests.
// See https://github.com/sourcegraph/security-issues/issues/213.
if strings.ToUpper(r.Method) != http.MethodPost {
http.Error(w, "", http.StatusMethodNotAllowed)
return
}
var req protocol.ExecRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Log which actor is accessing the repo.
args := req.Args
cmd := ""
if len(req.Args) > 0 {
cmd = req.Args[0]
args = args[1:]
}
accesslog.Record(r.Context(), string(req.Repo),
log.String("cmd", cmd),
log.Strings("args", args),
)
s.execHTTP(w, r, &req)
}
var blockedCommandExecutedCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "src_gitserver_exec_blocked_command_received",
Help: "Incremented each time a command not in the allowlist for gitserver is executed",
@ -1000,6 +660,7 @@ type execStatus struct {
Err error
}
// TODO: eseliger
// exec runs a git command. After the first write to w, it must not return an error.
// TODO(@camdencheek): once gRPC is the only consumer of this, do everything with errors
// because gRPC can handle trailing errors on a stream.
@ -1182,48 +843,6 @@ func (s *Server) exec(ctx context.Context, logger log.Logger, req *protocol.Exec
}, nil
}
// execHTTP translates the results of an exec into the expected HTTP statuses and payloads
func (s *Server) execHTTP(w http.ResponseWriter, r *http.Request, req *protocol.ExecRequest) {
logger := s.Logger.Scoped("exec").With(log.Strings("req.Args", req.Args))
// Flush writes more aggressively than standard net/http so that clients
// with a context deadline see as much partial response body as possible.
if fw := newFlushingResponseWriter(logger, w); fw != nil {
w = fw
defer fw.Close()
}
ctx := r.Context()
w.Header().Set("Content-Type", "application/octet-stream")
w.Header().Set("Cache-Control", "no-cache")
w.Header().Set("Trailer", "X-Exec-Error")
w.Header().Add("Trailer", "X-Exec-Exit-Status")
w.Header().Add("Trailer", "X-Exec-Stderr")
execStatus, err := s.exec(ctx, logger, req, r.UserAgent(), w)
w.Header().Set("X-Exec-Error", errorString(execStatus.Err))
w.Header().Set("X-Exec-Exit-Status", strconv.Itoa(execStatus.ExitStatus))
w.Header().Set("X-Exec-Stderr", execStatus.Stderr)
if err != nil {
if v := (&NotFoundError{}); errors.As(err, &v) {
w.WriteHeader(http.StatusNotFound)
_ = json.NewEncoder(w).Encode(v.Payload)
} else if errors.Is(err, ErrInvalidCommand) {
w.WriteHeader(http.StatusBadRequest)
_, _ = w.Write([]byte("invalid command"))
} else {
// If it's not a well-known error, send the error text
// and a generic error code.
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
}
}
}
func setLastFetched(ctx context.Context, db database.DB, shardID string, dir common.GitDir, name api.RepoName) error {
lastFetched, err := repoLastFetched(dir)
if err != nil {
@ -2027,387 +1646,3 @@ func setLastChanged(logger log.Logger, dir common.GitDir) error {
return nil
}
// errorString returns the error string. If err is nil it returns the empty
// string.
func errorString(err error) string {
if err == nil {
return ""
}
return err.Error()
}
func (s *Server) handleIsPerforcePathCloneable(w http.ResponseWriter, r *http.Request) {
var req protocol.IsPerforcePathCloneableRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if req.DepotPath == "" {
http.Error(w, "no DepotPath given", http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.IsDepotPathCloneable(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd, req.DepotPath)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := json.NewEncoder(w).Encode(protocol.IsPerforcePathCloneableResponse{}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handleCheckPerforceCredentials(w http.ResponseWriter, r *http.Request) {
var req protocol.CheckPerforceCredentialsRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
if err := json.NewEncoder(w).Encode(protocol.CheckPerforceCredentialsResponse{}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handleGetObject(w http.ResponseWriter, r *http.Request) {
var req protocol.GetObjectRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, errors.Wrap(err, "decoding body").Error(), http.StatusBadRequest)
return
}
// Log which actor is accessing the repo.
accesslog.Record(r.Context(), string(req.Repo), log.String("objectname", req.ObjectName))
obj, err := git.GetObject(r.Context(), s.RecordingCommandFactory, s.ReposDir, req.Repo, req.ObjectName)
if err != nil {
http.Error(w, errors.Wrap(err, "getting object").Error(), http.StatusInternalServerError)
return
}
resp := protocol.GetObjectResponse{
Object: *obj,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handlePerforceUsers(w http.ResponseWriter, r *http.Request) {
var req protocol.PerforceUsersRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
accesslog.Record(
r.Context(),
"<no-repo>",
log.String("p4user", req.P4User),
log.String("p4port", req.P4Port),
)
users, err := perforce.P4Users(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
resp := &protocol.PerforceUsersResponse{
Users: make([]protocol.PerforceUser, 0, len(users)),
}
for _, user := range users {
resp.Users = append(resp.Users, protocol.PerforceUser{
Username: user.Username,
Email: user.Email,
})
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handlePerforceProtectsForUser(w http.ResponseWriter, r *http.Request) {
var req protocol.PerforceProtectsForUserRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
accesslog.Record(
r.Context(),
"<no-repo>",
log.String("p4user", req.P4User),
log.String("p4port", req.P4Port),
)
protects, err := perforce.P4ProtectsForUser(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd, req.Username)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonProtects := make([]protocol.PerforceProtect, len(protects))
for i, p := range protects {
jsonProtects[i] = protocol.PerforceProtect{
Level: p.Level,
EntityType: p.EntityType,
EntityName: p.EntityName,
Match: p.Match,
IsExclusion: p.IsExclusion,
Host: p.Host,
}
}
resp := &protocol.PerforceProtectsForUserResponse{
Protects: jsonProtects,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handlePerforceProtectsForDepot(w http.ResponseWriter, r *http.Request) {
var req protocol.PerforceProtectsForDepotRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
accesslog.Record(
r.Context(),
"<no-repo>",
log.String("p4user", req.P4User),
log.String("p4port", req.P4Port),
)
protects, err := perforce.P4ProtectsForDepot(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd, req.Depot)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
jsonProtects := make([]protocol.PerforceProtect, len(protects))
for i, p := range protects {
jsonProtects[i] = protocol.PerforceProtect{
Level: p.Level,
EntityType: p.EntityType,
EntityName: p.EntityName,
Match: p.Match,
IsExclusion: p.IsExclusion,
Host: p.Host,
}
}
resp := &protocol.PerforceProtectsForDepotResponse{
Protects: jsonProtects,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handlePerforceGroupMembers(w http.ResponseWriter, r *http.Request) {
var req protocol.PerforceGroupMembersRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
accesslog.Record(
r.Context(),
"<no-repo>",
log.String("p4user", req.P4User),
log.String("p4port", req.P4Port),
)
members, err := perforce.P4GroupMembers(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd, req.Group)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
resp := &protocol.PerforceGroupMembersResponse{
Usernames: members,
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handleIsPerforceSuperUser(w http.ResponseWriter, r *http.Request) {
var req protocol.IsPerforceSuperUserRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
accesslog.Record(
r.Context(),
"<no-repo>",
log.String("p4user", req.P4User),
log.String("p4port", req.P4Port),
)
err = perforce.P4UserIsSuperUser(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
if err == perforce.ErrIsNotSuperUser {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
resp := &protocol.IsPerforceSuperUserResponse{}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}
func (s *Server) handlePerforceGetChangelist(w http.ResponseWriter, r *http.Request) {
var req protocol.PerforceGetChangelistRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
p4home, err := gitserverfs.MakeP4HomeDir(s.ReposDir)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
err = perforce.P4TestWithTrust(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd)
if err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
accesslog.Record(
r.Context(),
"<no-repo>",
log.String("p4user", req.P4User),
log.String("p4port", req.P4Port),
)
changelist, err := perforce.GetChangelistByID(r.Context(), p4home, req.P4Port, req.P4User, req.P4Passwd, req.ChangelistID)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
resp := &protocol.PerforceGetChangelistResponse{
Changelist: protocol.PerforceChangelist{
ID: changelist.ID,
CreationDate: changelist.CreationDate,
State: string(changelist.State),
Author: changelist.Author,
Title: changelist.Title,
Message: changelist.Message,
},
}
if err := json.NewEncoder(w).Encode(resp); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
}

View File

@ -214,11 +214,15 @@ func (gs *GRPCServer) doExec(ctx context.Context, logger log.Logger, req *protoc
}
gRPCStatus := codes.Unknown
if strings.Contains(execStatus.Err.Error(), "signal: killed") {
if execStatus.Err != nil && strings.Contains(execStatus.Err.Error(), "signal: killed") {
gRPCStatus = codes.Aborted
}
s, err := status.New(gRPCStatus, execStatus.Err.Error()).WithDetails(&proto.ExecStatusPayload{
var errString string
if execStatus.Err != nil {
errString = execStatus.Err.Error()
}
s, err := status.New(gRPCStatus, errString).WithDetails(&proto.ExecStatusPayload{
StatusCode: int32(execStatus.ExitStatus),
Stderr: execStatus.Stderr,
})
@ -228,6 +232,7 @@ func (gs *GRPCServer) doExec(ctx context.Context, logger log.Logger, req *protoc
}
return s.Err()
}
return nil
}

View File

@ -1,14 +1,10 @@
package internal
import (
"bytes"
"container/list"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/http/httptest"
"os"
"os/exec"
"path/filepath"
@ -22,6 +18,8 @@ import (
"github.com/stretchr/testify/require"
"golang.org/x/sync/semaphore"
"golang.org/x/time/rate"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/sourcegraph/log/logtest"
@ -31,11 +29,13 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/perforce"
"github.com/sourcegraph/sourcegraph/cmd/gitserver/internal/vcssyncer"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
v1 "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
"github.com/sourcegraph/sourcegraph/internal/limiter"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
@ -46,124 +46,101 @@ import (
)
type Test struct {
Name string
Request *http.Request
ExpectedCode int
ExpectedBody string
ExpectedTrailers http.Header
}
func newRequest(method, path string, body io.Reader) *http.Request {
r := httptest.NewRequest(method, path, body)
r.Header.Add("X-Requested-With", "Sourcegraph")
return r
Name string
Request *v1.ExecRequest
ExpectedCode codes.Code
ExpectedBody string
ExpectedDetails []any
}
func TestExecRequest(t *testing.T) {
conf.Mock(&conf.Unified{})
t.Cleanup(func() { conf.Mock(nil) })
tests := []Test{
{
Name: "HTTP GET",
Request: newRequest("GET", "/exec", strings.NewReader("{}")),
ExpectedCode: http.StatusMethodNotAllowed,
ExpectedBody: "",
},
{
Name: "Command",
Request: newRequest("POST", "/exec", strings.NewReader(`{"repo": "github.com/gorilla/mux", "args": ["testcommand"]}`)),
ExpectedCode: http.StatusOK,
ExpectedBody: "teststdout",
ExpectedTrailers: http.Header{
"X-Exec-Error": {""},
"X-Exec-Exit-Status": {"42"},
"X-Exec-Stderr": {"teststderr"},
},
},
{
Name: "CommandWithURL",
Request: newRequest("POST", "/exec", strings.NewReader(`{"repo": "my-mux", "url": "https://github.com/gorilla/mux.git", "args": ["testcommand"]}`)),
ExpectedCode: http.StatusOK,
ExpectedBody: "teststdout",
ExpectedTrailers: http.Header{
"X-Exec-Error": {""},
"X-Exec-Exit-Status": {"42"},
"X-Exec-Stderr": {"teststderr"},
Name: "Command",
Request: &v1.ExecRequest{
Repo: "github.com/gorilla/mux",
Args: [][]byte{[]byte("testcommand")},
},
ExpectedCode: codes.Unknown,
ExpectedDetails: []any{&v1.ExecStatusPayload{
StatusCode: 42,
Stderr: "teststderr",
}},
},
{
Name: "echo",
Request: newRequest(
"POST", "/exec", strings.NewReader(
`{"repo": "github.com/gorilla/mux", "args": ["testecho", "hi"]}`,
),
),
ExpectedCode: http.StatusOK,
ExpectedBody: "hi",
ExpectedTrailers: http.Header{
"X-Exec-Error": {""},
"X-Exec-Exit-Status": {"0"},
"X-Exec-Stderr": {""},
Request: &v1.ExecRequest{
Repo: "github.com/gorilla/mux",
Args: [][]byte{[]byte("testecho"), []byte("hi")},
},
ExpectedCode: codes.OK,
ExpectedBody: "hi",
},
{
Name: "stdin",
Request: newRequest(
"POST", "/exec", strings.NewReader(
`{"repo": "github.com/gorilla/mux", "args": ["testcat"], "stdin": "aGk="}`,
),
),
ExpectedCode: http.StatusOK,
Request: &v1.ExecRequest{
Repo: "github.com/gorilla/mux",
Args: [][]byte{[]byte("testcat")},
Stdin: []byte("hi"),
},
ExpectedCode: codes.OK,
ExpectedBody: "hi",
ExpectedTrailers: http.Header{
"X-Exec-Error": {""},
"X-Exec-Exit-Status": {"0"},
"X-Exec-Stderr": {""},
},
{
Name: "NonexistingRepo",
Request: &v1.ExecRequest{
Repo: "github.com/gorilla/doesnotexist",
Args: [][]byte{[]byte("testcommand")},
},
ExpectedCode: codes.NotFound,
ExpectedBody: "repo not found",
ExpectedDetails: []any{&v1.NotFoundPayload{
Repo: "github.com/gorilla/doesnotexist",
CloneInProgress: false,
}},
},
{
Name: "NonexistingRepo",
Request: newRequest("POST", "/exec", strings.NewReader(`{"repo": "github.com/gorilla/doesnotexist", "args": ["testcommand"]}`)),
ExpectedCode: http.StatusNotFound,
ExpectedBody: `{"cloneInProgress":false}`,
},
{
Name: "NonexistingRepoWithURL",
Request: newRequest(
"POST", "/exec", strings.NewReader(`{"repo": "my-doesnotexist", "url": "https://github.com/gorilla/doesntexist.git", "args": ["testcommand"]}`)),
ExpectedCode: http.StatusNotFound,
ExpectedBody: `{"cloneInProgress":false}`,
},
{
Name: "UnclonedRepoWithoutURL",
Request: newRequest("POST", "/exec", strings.NewReader(`{"repo": "github.com/nicksnyder/go-i18n", "args": ["testcommand"]}`)),
ExpectedCode: http.StatusNotFound,
ExpectedBody: `{"cloneInProgress":true}`, // we now fetch the URL from GetRemoteURL so it works.
},
{
Name: "UnclonedRepoWithURL",
Request: newRequest("POST", "/exec", strings.NewReader(`{"repo": "github.com/nicksnyder/go-i18n", "url": "https://github.com/nicksnyder/go-i18n.git", "args": ["testcommand"]}`)),
ExpectedCode: http.StatusNotFound,
ExpectedBody: `{"cloneInProgress":true}`,
},
{
Name: "Error",
Request: newRequest("POST", "/exec", strings.NewReader(`{"repo": "github.com/gorilla/mux", "args": ["testerror"]}`)),
ExpectedCode: http.StatusOK,
ExpectedTrailers: http.Header{
"X-Exec-Error": {"testerror"},
"X-Exec-Exit-Status": {"0"},
"X-Exec-Stderr": {""},
Name: "UnclonedRepo",
Request: &v1.ExecRequest{
Repo: "github.com/nicksnyder/go-i18n",
Args: [][]byte{[]byte("testcommand")},
},
ExpectedCode: codes.NotFound,
ExpectedBody: "repo not found",
ExpectedDetails: []any{&v1.NotFoundPayload{
Repo: "github.com/nicksnyder/go-i18n",
CloneInProgress: true,
}},
},
{
Name: "Error",
Request: &v1.ExecRequest{
Repo: "github.com/gorilla/mux",
Args: [][]byte{[]byte("testerror")},
},
ExpectedCode: codes.Unknown,
ExpectedBody: "testerror",
ExpectedDetails: []any{&v1.ExecStatusPayload{
StatusCode: 1,
Stderr: "teststderr",
}},
},
{
Name: "EmptyInput",
Request: newRequest("POST", "/exec", strings.NewReader("{}")),
ExpectedCode: http.StatusBadRequest,
Request: &v1.ExecRequest{},
ExpectedCode: codes.InvalidArgument,
ExpectedBody: "invalid command",
},
{
Name: "BadCommand",
Request: newRequest("POST", "/exec", strings.NewReader(`{"repo":"github.com/sourcegraph/sourcegraph", "args": ["invalid-command"]}`)),
ExpectedCode: http.StatusBadRequest,
Name: "BadCommand",
Request: &v1.ExecRequest{
Repo: "github.com/sourcegraph/sourcegraph",
Args: [][]byte{[]byte("invalid-command")},
},
ExpectedCode: codes.InvalidArgument,
ExpectedBody: "invalid command",
},
}
@ -188,7 +165,10 @@ func TestExecRequest(t *testing.T) {
Locker: NewRepositoryLocker(),
RPSLimiter: ratelimit.NewInstrumentedLimiter("GitserverTest", rate.NewLimiter(rate.Inf, 10)),
}
h := s.Handler()
// Initialize side-effects.
_ = s.Handler()
gs := &GRPCServer{Server: s}
origRepoCloned := repoCloned
repoCloned = func(dir common.GitDir) bool {
@ -211,7 +191,8 @@ func TestExecRequest(t *testing.T) {
_, _ = cmd.Stderr.Write([]byte("teststderr"))
return 42, nil
case "testerror":
return 0, errors.New("testerror")
_, _ = cmd.Stderr.Write([]byte("teststderr"))
return 1, errors.New("testerror")
case "testecho", "testcat":
// We do an actual exec in this case to test that code path.
exe := strings.TrimPrefix(cmd.Args[1], "test")
@ -236,37 +217,46 @@ func TestExecRequest(t *testing.T) {
for _, test := range tests {
t.Run(test.Name, func(t *testing.T) {
w := httptest.ResponseRecorder{Body: new(bytes.Buffer)}
h.ServeHTTP(&w, test.Request)
ss := gitserver.NewMockGitserverService_ExecServer()
ss.ContextFunc.SetDefaultReturn(context.Background())
var receivedData []byte
ss.SendFunc.SetDefaultHook(func(er *v1.ExecResponse) error {
receivedData = append(receivedData, er.GetData()...)
return nil
})
err := gs.Exec(test.Request, ss)
res := w.Result()
if res.StatusCode != test.ExpectedCode {
t.Errorf("wrong status: expected %d, got %d", test.ExpectedCode, w.Code)
}
body, err := io.ReadAll(res.Body)
if err != nil {
if test.ExpectedCode == codes.OK && err != nil {
t.Fatal(err)
}
if strings.TrimSpace(string(body)) != test.ExpectedBody {
t.Errorf("wrong body: expected %q, got %q", test.ExpectedBody, string(body))
if test.ExpectedCode != codes.OK {
if err == nil {
t.Fatal("expected error to be returned")
}
s, ok := status.FromError(err)
require.True(t, ok)
require.Equal(t, test.ExpectedCode, s.Code())
if len(test.ExpectedDetails) > 0 {
if diff := cmp.Diff(test.ExpectedDetails, s.Details(), cmpopts.IgnoreUnexported(v1.ExecStatusPayload{}, v1.NotFoundPayload{})); diff != "" {
t.Fatalf("unexpected error details (-want +got):\n%s", diff)
}
}
if strings.TrimSpace(s.Message()) != test.ExpectedBody {
t.Errorf("wrong error body: expected %q, got %q", test.ExpectedBody, s.Message())
}
return
}
for k, v := range test.ExpectedTrailers {
if got := res.Trailer.Get(k); got != v[0] {
t.Errorf("wrong trailer %q: expected %q, got %q", k, v[0], got)
}
if strings.TrimSpace(string(receivedData)) != test.ExpectedBody {
t.Errorf("wrong body: expected %q, got %q", test.ExpectedBody, string(receivedData))
}
})
}
}
func staticGetRemoteURL(remote string) func(context.Context, api.RepoName) (string, error) {
return func(context.Context, api.RepoName) (string, error) {
return remote, nil
}
}
// makeSingleCommitRepo make create a new repo with a single commit and returns
// the HEAD SHA
func makeSingleCommitRepo(cmd func(string, ...string) string) string {
@ -305,10 +295,12 @@ func makeTestServer(ctx context.Context, t *testing.T, repoDir, remote string, d
cloneQueue := NewCloneQueue(obctx, list.New())
s := &Server{
Logger: logger,
ObservationCtx: obctx,
ReposDir: repoDir,
GetRemoteURLFunc: staticGetRemoteURL(remote),
Logger: logger,
ObservationCtx: obctx,
ReposDir: repoDir,
GetRemoteURLFunc: func(context.Context, api.RepoName) (string, error) {
return remote, nil
},
GetVCSSyncer: func(ctx context.Context, name api.RepoName) (vcssyncer.VCSSyncer, error) {
return vcssyncer.NewGitRepoSyncer(logtest.Scoped(t), wrexec.NewNoOpRecordingCommandFactory()), nil
},
@ -525,130 +517,6 @@ var ignoreVolatileGitserverRepoFields = cmpopts.IgnoreFields(
"CloningProgress",
)
func TestHandleRepoDelete(t *testing.T) {
testHandleRepoDelete(t, false)
}
func TestHandleRepoDeleteWhenDeleteInDB(t *testing.T) {
// We also want to ensure that we can delete repo data on disk for a repo that
// has already been deleted in the DB.
testHandleRepoDelete(t, true)
}
func testHandleRepoDelete(t *testing.T, deletedInDB bool) {
logger := logtest.Scoped(t)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
remote := t.TempDir()
repoName := api.RepoName("example.com/foo/bar")
db := database.NewDB(logger, dbtest.NewDB(t))
dbRepo := &types.Repo{
Name: repoName,
Description: "Test",
}
// Insert the repo into our database
if err := db.Repos().Create(ctx, dbRepo); err != nil {
t.Fatal(err)
}
repo := remote
cmd := func(name string, arg ...string) string {
t.Helper()
return runCmd(t, repo, name, arg...)
}
_ = makeSingleCommitRepo(cmd)
// Add a bad tag
cmd("git", "tag", "HEAD")
reposDir := t.TempDir()
s := makeTestServer(ctx, t, reposDir, remote, db)
// We need some of the side effects here
_ = s.Handler()
rr := httptest.NewRecorder()
updateReq := protocol.RepoUpdateRequest{
Repo: repoName,
}
body, err := json.Marshal(updateReq)
if err != nil {
t.Fatal(err)
}
// This will perform an initial clone
req := newRequest("GET", "/repo-update", bytes.NewReader(body))
s.handleRepoUpdate(rr, req)
size := gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.ReposDir, repoName).Path("."))
want := &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
CloneStatus: types.CloneStatusCloned,
RepoSizeBytes: size,
}
fromDB, err := db.GitserverRepos().GetByID(ctx, dbRepo.ID)
if err != nil {
t.Fatal(err)
}
// We don't expect an error
if diff := cmp.Diff(want, fromDB, ignoreVolatileGitserverRepoFields); diff != "" {
t.Fatal(diff)
}
if deletedInDB {
if err := db.Repos().Delete(ctx, dbRepo.ID); err != nil {
t.Fatal(err)
}
repos, err := db.Repos().List(ctx, database.ReposListOptions{IncludeDeleted: true, IDs: []api.RepoID{dbRepo.ID}})
if err != nil {
t.Fatal(err)
}
if len(repos) != 1 {
t.Fatalf("Expected 1 repo, got %d", len(repos))
}
dbRepo = repos[0]
}
// Now we can delete it
deleteReq := protocol.RepoDeleteRequest{
Repo: dbRepo.Name,
}
body, err = json.Marshal(deleteReq)
if err != nil {
t.Fatal(err)
}
req = newRequest("GET", "/delete", bytes.NewReader(body))
s.handleRepoDelete(rr, req)
size = gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.ReposDir, repoName).Path("."))
if size != 0 {
t.Fatalf("Size should be 0, got %d", size)
}
// Check status in gitserver_repos
want = &types.GitserverRepo{
RepoID: dbRepo.ID,
ShardID: "",
CloneStatus: types.CloneStatusNotCloned,
RepoSizeBytes: size,
}
fromDB, err = db.GitserverRepos().GetByID(ctx, dbRepo.ID)
if err != nil {
t.Fatal(err)
}
// We don't expect an error
if diff := cmp.Diff(want, fromDB, ignoreVolatileGitserverRepoFields); diff != "" {
t.Fatal(diff)
}
}
func TestHandleRepoUpdate(t *testing.T) {
logger := logtest.Scoped(t)
ctx, cancel := context.WithCancel(context.Background())
@ -683,23 +551,14 @@ func TestHandleRepoUpdate(t *testing.T) {
// We need the side effects here
_ = s.Handler()
rr := httptest.NewRecorder()
updateReq := protocol.RepoUpdateRequest{
Repo: repoName,
}
body, err := json.Marshal(updateReq)
if err != nil {
t.Fatal(err)
}
// Confirm that failing to clone the repo stores the error
oldRemoveURLFunc := s.GetRemoteURLFunc
s.GetRemoteURLFunc = func(ctx context.Context, name api.RepoName) (string, error) {
return "https://invalid.example.com/", nil
}
req := newRequest("GET", "/repo-update", bytes.NewReader(body))
s.handleRepoUpdate(rr, req)
s.repoUpdate(&protocol.RepoUpdateRequest{
Repo: repoName,
})
size := gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.ReposDir, repoName).Path("."))
want := &types.GitserverRepo{
@ -728,8 +587,9 @@ func TestHandleRepoUpdate(t *testing.T) {
// This will perform an initial clone
s.GetRemoteURLFunc = oldRemoveURLFunc
req = newRequest("GET", "/repo-update", bytes.NewReader(body))
s.handleRepoUpdate(rr, req)
s.repoUpdate(&protocol.RepoUpdateRequest{
Repo: repoName,
})
size = gitserverfs.DirSize(gitserverfs.RepoDirFromName(s.ReposDir, repoName).Path("."))
want = &types.GitserverRepo{
@ -756,8 +616,9 @@ func TestHandleRepoUpdate(t *testing.T) {
t.Cleanup(func() { doBackgroundRepoUpdateMock = nil })
// This will trigger an update since the repo is already cloned
req = newRequest("GET", "/repo-update", bytes.NewReader(body))
s.handleRepoUpdate(rr, req)
s.repoUpdate(&protocol.RepoUpdateRequest{
Repo: repoName,
})
want = &types.GitserverRepo{
RepoID: dbRepo.ID,
@ -780,8 +641,9 @@ func TestHandleRepoUpdate(t *testing.T) {
doBackgroundRepoUpdateMock = nil
// This will trigger an update since the repo is already cloned
req = newRequest("GET", "/repo-update", bytes.NewReader(body))
s.handleRepoUpdate(rr, req)
s.repoUpdate(&protocol.RepoUpdateRequest{
Repo: repoName,
})
want = &types.GitserverRepo{
RepoID: dbRepo.ID,
@ -1060,11 +922,10 @@ func TestSyncRepoState(t *testing.T) {
}
type BatchLogTest struct {
Name string
Request *http.Request
ExpectedCode int
ExpectedBody string
RunCommandMock func(ctx context.Context, cmd *exec.Cmd) (int, error)
Name string
Request *v1.BatchLogRequest
ExpectedCode codes.Code
ExpectedBody *protocol.BatchLogResponse
}
func TestHandleBatchLog(t *testing.T) {
@ -1087,32 +948,26 @@ func TestHandleBatchLog(t *testing.T) {
t.Cleanup(func() { executil.UpdateRunCommandMock(nil) })
tests := []BatchLogTest{
{
Name: "bad request",
Request: newRequest("POST", "/batch-log", strings.NewReader(``)),
ExpectedCode: http.StatusBadRequest,
ExpectedBody: "EOF", // the particular error when parsing empty payload
},
{
Name: "empty",
Request: newRequest("POST", "/batch-log", strings.NewReader(`{}`)),
ExpectedCode: http.StatusOK,
ExpectedBody: mustEncodeJSONResponse(protocol.BatchLogResponse{
Request: &v1.BatchLogRequest{},
ExpectedCode: codes.OK,
ExpectedBody: &protocol.BatchLogResponse{
Results: []protocol.BatchLogResult{},
}),
},
},
{
Name: "all resolved",
Request: newRequest("POST", "/batch-log", strings.NewReader(`{
"repoCommits": [
{"repo": "github.com/foo/bar", "commitId": "deadbeef1"},
{"repo": "github.com/foo/baz", "commitId": "deadbeef2"},
{"repo": "github.com/foo/bonk", "commitId": "deadbeef3"}
],
"format": "--format=test"
}`)),
ExpectedCode: http.StatusOK,
ExpectedBody: mustEncodeJSONResponse(protocol.BatchLogResponse{
Request: &v1.BatchLogRequest{
RepoCommits: []*v1.RepoCommit{
{Repo: "github.com/foo/bar", Commit: "deadbeef1"},
{Repo: "github.com/foo/baz", Commit: "deadbeef2"},
{Repo: "github.com/foo/bonk", Commit: "deadbeef3"},
},
Format: "--format=test",
},
ExpectedCode: codes.OK,
ExpectedBody: &protocol.BatchLogResponse{
Results: []protocol.BatchLogResult{
{
RepoCommit: api.RepoCommit{Repo: "github.com/foo/bar", CommitID: "deadbeef1"},
@ -1130,20 +985,20 @@ func TestHandleBatchLog(t *testing.T) {
CommandError: "",
},
},
}),
},
},
{
Name: "partially resolved",
Request: newRequest("POST", "/batch-log", strings.NewReader(`{
"repoCommits": [
{"repo": "github.com/foo/bar", "commitId": "deadbeef1"},
{"repo": "github.com/foo/baz", "commitId": "dumbmilk1"},
{"repo": "github.com/foo/honk", "commitId": "deadbeef3"}
],
"format": "--format=test"
}`)),
ExpectedCode: http.StatusOK,
ExpectedBody: mustEncodeJSONResponse(protocol.BatchLogResponse{
Request: &v1.BatchLogRequest{
RepoCommits: []*v1.RepoCommit{
{Repo: "github.com/foo/bar", Commit: "deadbeef1"},
{Repo: "github.com/foo/baz", Commit: "dumbmilk1"},
{Repo: "github.com/foo/honk", Commit: "deadbeef3"},
},
Format: "--format=test",
},
ExpectedCode: codes.OK,
ExpectedBody: &protocol.BatchLogResponse{
Results: []protocol.BatchLogResult{
{
RepoCommit: api.RepoCommit{Repo: "github.com/foo/bar", CommitID: "deadbeef1"},
@ -1163,7 +1018,7 @@ func TestHandleBatchLog(t *testing.T) {
CommandError: "repo not found",
},
},
}),
},
},
}
@ -1177,129 +1032,35 @@ func TestHandleBatchLog(t *testing.T) {
RecordingCommandFactory: wrexec.NewNoOpRecordingCommandFactory(),
Locker: NewRepositoryLocker(),
}
h := server.Handler()
// Initialize side-effects.
_ = server.Handler()
w := httptest.ResponseRecorder{Body: new(bytes.Buffer)}
h.ServeHTTP(&w, test.Request)
gs := &GRPCServer{Server: server}
res := w.Result()
if res.StatusCode != test.ExpectedCode {
t.Errorf("wrong status: expected %d, got %d", test.ExpectedCode, w.Code)
}
res, err := gs.BatchLog(context.Background(), test.Request)
body, err := io.ReadAll(res.Body)
if err != nil {
if test.ExpectedCode == codes.OK && err != nil {
t.Fatal(err)
}
if strings.TrimSpace(string(body)) != test.ExpectedBody {
t.Errorf("wrong body: expected %q, got %q", test.ExpectedBody, string(body))
if test.ExpectedCode != codes.OK {
if err == nil {
t.Fatal("expected error to be returned")
}
s, ok := status.FromError(err)
require.True(t, ok)
require.Equal(t, test.ExpectedCode, s.Code())
return
}
var have protocol.BatchLogResponse
have.FromProto(res)
require.Equal(t, test.ExpectedBody, &have)
})
}
}
func TestHeaderXRequestedWithMiddleware(t *testing.T) {
test := headerXRequestedWithMiddleware(
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.Write([]byte("success"))
w.WriteHeader(http.StatusOK)
}),
)
assertBody := func(result *http.Response, want string) {
b, err := io.ReadAll(result.Body)
if err != nil {
t.Fatalf("failed to read body: %v", err)
}
data := string(b)
if data != want {
t.Fatalf(`Expected body to contain %q, but found %q`, want, data)
}
}
failureExpectation := "header X-Requested-With is not set or is invalid\n"
t.Run("x-requested-with not set", func(t *testing.T) {
r := httptest.NewRequest(http.MethodGet, "/", nil)
w := httptest.NewRecorder()
test(w, r)
result := w.Result()
defer result.Body.Close()
if result.StatusCode != http.StatusBadRequest {
t.Fatalf("expected HTTP status code %d, but got %d", http.StatusBadRequest, result.StatusCode)
}
assertBody(result, failureExpectation)
})
t.Run("x-requested-with invalid value", func(t *testing.T) {
r := httptest.NewRequest(http.MethodGet, "/", nil)
r.Header.Add("X-Requested-With", "foo")
w := httptest.NewRecorder()
test(w, r)
result := w.Result()
defer result.Body.Close()
if result.StatusCode != http.StatusBadRequest {
t.Fatalf("expected HTTP status code %d, but got %d", http.StatusBadRequest, result.StatusCode)
}
assertBody(result, failureExpectation)
})
t.Run("x-requested-with correct value", func(t *testing.T) {
r := httptest.NewRequest(http.MethodGet, "/", nil)
r.Header.Add("X-Requested-With", "Sourcegraph")
w := httptest.NewRecorder()
test(w, r)
result := w.Result()
defer result.Body.Close()
if result.StatusCode != http.StatusOK {
t.Fatalf("expected HTTP status code %d, but got %d", http.StatusOK, result.StatusCode)
}
assertBody(result, "success")
})
t.Run("check skippped for /ping", func(t *testing.T) {
r := httptest.NewRequest(http.MethodGet, "/ping", nil)
w := httptest.NewRecorder()
test(w, r)
result := w.Result()
defer result.Body.Close()
if result.StatusCode != http.StatusOK {
t.Fatalf("expected HTTP status code %d, but got %d", http.StatusOK, result.StatusCode)
}
})
t.Run("check skipped for /git", func(t *testing.T) {
r := httptest.NewRequest(http.MethodGet, "/git/foo/bar", nil)
w := httptest.NewRecorder()
test(w, r)
result := w.Result()
defer result.Body.Close()
if result.StatusCode != http.StatusOK {
t.Fatalf("expected HTTP status code %d, but got %d", http.StatusOK, result.StatusCode)
}
})
}
func TestLogIfCorrupt(t *testing.T) {
logger := logtest.Scoped(t)
ctx, cancel := context.WithCancel(context.Background())
@ -1367,11 +1128,6 @@ func TestLogIfCorrupt(t *testing.T) {
})
}
func mustEncodeJSONResponse(value any) string {
encoded, _ := json.Marshal(value)
return strings.TrimSpace(string(encoded))
}
func TestStdErrIndicatesCorruption(t *testing.T) {
bad := []string{
"error: packfile .git/objects/pack/pack-a.pack does not match index",

View File

@ -3,11 +3,8 @@ package internal
import (
"fmt"
"io"
"net/http"
"os"
"reflect"
"strings"
"sync"
"time"
"github.com/sourcegraph/log"
@ -107,111 +104,6 @@ func (l *limitWriter) Write(p []byte) (int, error) {
return n, err
}
// flushingResponseWriter is a http.ResponseWriter that flushes all writes
// to the underlying connection within a certain time period after Write is
// called (instead of buffering them indefinitely).
//
// This lets, e.g., clients with a context deadline see as much partial response
// body as possible.
type flushingResponseWriter struct {
// mu ensures we don't concurrently call Flush and Write. It also protects
// state.
mu sync.Mutex
w http.ResponseWriter
flusher http.Flusher
closed bool
doFlush bool
}
var logUnflushableResponseWriterOnce sync.Once
// newFlushingResponseWriter creates a new flushing response writer. Callers
// must call Close to free the resources created by the writer.
//
// If w does not support flushing, it returns nil.
func newFlushingResponseWriter(logger log.Logger, w http.ResponseWriter) *flushingResponseWriter {
// We panic if we don't implement the needed interfaces.
flusher := hackilyGetHTTPFlusher(w)
if flusher == nil {
logUnflushableResponseWriterOnce.Do(func() {
logger.Warn("unable to flush HTTP response bodies - Diff search performance and completeness will be affected",
log.String("type", reflect.TypeOf(w).String()))
})
return nil
}
w.Header().Set("Transfer-Encoding", "chunked")
f := &flushingResponseWriter{w: w, flusher: flusher}
go f.periodicFlush()
return f
}
// hackilyGetHTTPFlusher attempts to get an http.Flusher from w. It (hackily) handles the case where w is a
// nethttp.statusCodeTracker (which wraps http.ResponseWriter and does not implement http.Flusher). See
// https://github.com/opentracing-contrib/go-stdlib/pull/11#discussion_r164295773 and
// https://github.com/sourcegraph/sourcegraph/issues/9045.
//
// I (@sqs) wrote this hack instead of fixing it upstream immediately because seems to be some reluctance to merge
// a fix (because it'd make the http.ResponseWriter falsely appear to implement many interfaces that it doesn't
// actually implement, so it would break the correctness of Go type-assertion impl checks).
func hackilyGetHTTPFlusher(w http.ResponseWriter) http.Flusher {
if f, ok := w.(http.Flusher); ok {
return f
}
if reflect.TypeOf(w).String() == "*nethttp.statusCodeTracker" {
v := reflect.ValueOf(w).Elem()
if v.Kind() == reflect.Struct {
if rwv := v.FieldByName("ResponseWriter"); rwv.IsValid() {
f, ok := rwv.Interface().(http.Flusher)
if ok {
return f
}
}
}
}
return nil
}
// Header implements http.ResponseWriter.
func (f *flushingResponseWriter) Header() http.Header { return f.w.Header() }
// WriteHeader implements http.ResponseWriter.
func (f *flushingResponseWriter) WriteHeader(code int) { f.w.WriteHeader(code) }
// Write implements http.ResponseWriter.
func (f *flushingResponseWriter) Write(p []byte) (int, error) {
f.mu.Lock()
n, err := f.w.Write(p)
if n > 0 {
f.doFlush = true
}
f.mu.Unlock()
return n, err
}
func (f *flushingResponseWriter) periodicFlush() {
for {
time.Sleep(100 * time.Millisecond)
f.mu.Lock()
if f.closed {
f.mu.Unlock()
break
}
if f.doFlush {
f.flusher.Flush()
}
f.mu.Unlock()
}
}
// Close signals to the flush goroutine to stop.
func (f *flushingResponseWriter) Close() {
f.mu.Lock()
f.closed = true
f.mu.Unlock()
}
// mapToLoggerField translates a map to log context fields.
func mapToLoggerField(m map[string]any) []log.Field {
LogFields := []log.Field{}

View File

@ -1,45 +0,0 @@
package internal
import (
"net/http/httptest"
"testing"
"time"
)
func TestFlushingResponseWriter(t *testing.T) {
flush := make(chan struct{})
fw := &flushingResponseWriter{
w: httptest.NewRecorder(),
flusher: flushFunc(func() {
flush <- struct{}{}
}),
}
done := make(chan struct{})
go func() {
fw.periodicFlush()
close(done)
}()
_, _ = fw.Write([]byte("hi"))
select {
case <-flush:
close(flush)
case <-time.After(5 * time.Second):
t.Fatal("periodic flush did not happen")
}
fw.Close()
select {
case <-done:
case <-time.After(5 * time.Second):
t.Fatal("periodic flush goroutine did not close")
}
}
type flushFunc func()
func (f flushFunc) Flush() {
f()
}

View File

@ -71,7 +71,6 @@ go_test(
"@com_github_google_go_cmp//cmp/cmpopts",
"@com_github_inconshreveable_log15//:log15",
"@com_github_sourcegraph_log//logtest",
"@io_opentelemetry_go_otel_trace//:trace",
"@org_golang_google_grpc//:go_default_library",
],
)

View File

@ -23,7 +23,7 @@ func (s *RepoUpdaterServiceServer) RepoUpdateSchedulerInfo(_ context.Context, re
}
func (s *RepoUpdaterServiceServer) RepoLookup(ctx context.Context, req *proto.RepoLookupRequest) (*proto.RepoLookupResponse, error) {
res, err := s.Server.repoLookup(ctx, api.RepoName(req.Repo))
res, err := s.Server.repoLookup(ctx, api.RepoName(req.GetRepo()))
if err != nil {
return nil, err
}

View File

@ -3,7 +3,6 @@ package repoupdater
import (
"context"
"encoding/json"
"fmt"
"net/http"
"time"
@ -42,60 +41,9 @@ func (s *Server) Handler() http.Handler {
mux.HandleFunc("/healthz", trace.WithRouteName("healthz", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
}))
mux.HandleFunc("/repo-update-scheduler-info", trace.WithRouteName("repo-update-scheduler-info", s.handleRepoUpdateSchedulerInfo))
mux.HandleFunc("/repo-lookup", trace.WithRouteName("repo-lookup", s.handleRepoLookup))
mux.HandleFunc("/enqueue-repo-update", trace.WithRouteName("enqueue-repo-update", s.handleEnqueueRepoUpdate))
mux.HandleFunc("/enqueue-changeset-sync", trace.WithRouteName("enqueue-changeset-sync", s.handleEnqueueChangesetSync))
return mux
}
func (s *Server) handleRepoUpdateSchedulerInfo(w http.ResponseWriter, r *http.Request) {
var args protocol.RepoUpdateSchedulerInfoArgs
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
s.respond(w, http.StatusBadRequest, err)
return
}
result := s.Scheduler.ScheduleInfo(args.ID)
s.respond(w, http.StatusOK, result)
}
func (s *Server) handleRepoLookup(w http.ResponseWriter, r *http.Request) {
var args protocol.RepoLookupArgs
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
result, err := s.repoLookup(r.Context(), args.Repo)
if err != nil {
if r.Context().Err() != nil {
http.Error(w, "request canceled", http.StatusGatewayTimeout)
return
}
s.Logger.Error("repoLookup failed", log.String("name", string(args.Repo)), log.Error(err))
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
s.respond(w, http.StatusOK, result)
}
func (s *Server) handleEnqueueRepoUpdate(w http.ResponseWriter, r *http.Request) {
var req protocol.RepoUpdateRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
s.respond(w, http.StatusBadRequest, err)
return
}
result, status, err := s.enqueueRepoUpdate(r.Context(), &req)
if err != nil {
s.Logger.Warn("enqueueRepoUpdate failed", log.String("req", fmt.Sprint(req)), log.Error(err))
s.respond(w, status, err)
return
}
s.respond(w, status, result)
}
func (s *Server) enqueueRepoUpdate(ctx context.Context, req *protocol.RepoUpdateRequest) (resp *protocol.RepoUpdateResponse, httpStatus int, err error) {
tr, ctx := trace.New(ctx, "enqueueRepoUpdate", attribute.Stringer("req", req))
defer func() {
@ -129,30 +77,6 @@ func (s *Server) enqueueRepoUpdate(ctx context.Context, req *protocol.RepoUpdate
}, http.StatusOK, nil
}
func (s *Server) respond(w http.ResponseWriter, code int, v any) {
switch val := v.(type) {
case error:
if val != nil {
s.Logger.Error("response value error", log.Error(val))
w.Header().Set("Content-Type", "text/plain; charset=utf-8")
w.WriteHeader(code)
fmt.Fprintf(w, "%v", val)
}
default:
w.Header().Set("Content-Type", "application/json")
bs, err := json.Marshal(v)
if err != nil {
s.respond(w, http.StatusInternalServerError, err)
return
}
w.WriteHeader(code)
if _, err = w.Write(bs); err != nil {
s.Logger.Error("failed to write response", log.Error(err))
}
}
}
var mockRepoLookup func(api.RepoName) (*protocol.RepoLookupResult, error)
func (s *Server) repoLookup(ctx context.Context, repoName api.RepoName) (result *protocol.RepoLookupResult, err error) {
@ -196,28 +120,3 @@ func (s *Server) repoLookup(ctx context.Context, repoName api.RepoName) (result
return &protocol.RepoLookupResult{Repo: protocol.NewRepoInfo(repo)}, nil
}
func (s *Server) handleEnqueueChangesetSync(w http.ResponseWriter, r *http.Request) {
if s.ChangesetSyncRegistry == nil {
s.Logger.Warn("ChangesetSyncer is nil")
s.respond(w, http.StatusForbidden, nil)
return
}
var req protocol.ChangesetSyncRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
s.respond(w, http.StatusBadRequest, err)
return
}
if len(req.IDs) == 0 {
s.respond(w, http.StatusBadRequest, errors.New("no ids provided"))
return
}
err := s.ChangesetSyncRegistry.EnqueueChangesetSyncs(r.Context(), req.IDs)
if err != nil {
resp := protocol.ChangesetSyncResponse{Error: err.Error()}
s.respond(w, http.StatusInternalServerError, resp)
return
}
s.respond(w, http.StatusOK, nil)
}

View File

@ -1,9 +1,7 @@
package repoupdater
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net/http"
"net/http/httptest"
@ -14,7 +12,6 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"go.opentelemetry.io/otel/trace"
"google.golang.org/grpc"
"github.com/sourcegraph/log/logtest"
@ -43,108 +40,6 @@ import (
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func TestServer_handleRepoLookup(t *testing.T) {
logger := logtest.Scoped(t)
s := &Server{Logger: logger}
h := ObservedHandler(
logger,
NewHandlerMetrics(),
trace.NewNoopTracerProvider(),
)(s.Handler())
repoLookup := func(t *testing.T, repo api.RepoName) (resp *protocol.RepoLookupResult, statusCode int) {
t.Helper()
rr := httptest.NewRecorder()
body, err := json.Marshal(protocol.RepoLookupArgs{Repo: repo})
if err != nil {
t.Fatal(err)
}
req := httptest.NewRequest("GET", "/repo-lookup", bytes.NewReader(body))
fmt.Printf("h: %v rr: %v req: %v\n", h, rr, req)
h.ServeHTTP(rr, req)
if rr.Code == http.StatusOK {
if err := json.NewDecoder(rr.Body).Decode(&resp); err != nil {
t.Fatal(err)
}
}
return resp, rr.Code
}
repoLookupResult := func(t *testing.T, repo api.RepoName) protocol.RepoLookupResult {
t.Helper()
resp, statusCode := repoLookup(t, repo)
if statusCode != http.StatusOK {
t.Fatalf("http non-200 status %d", statusCode)
}
return *resp
}
t.Run("args", func(t *testing.T) {
called := false
mockRepoLookup = func(repoName api.RepoName) (*protocol.RepoLookupResult, error) {
called = true
if want := api.RepoName("github.com/a/b"); repoName != want {
t.Errorf("got owner %q, want %q", repoName, want)
}
return &protocol.RepoLookupResult{Repo: nil}, nil
}
defer func() { mockRepoLookup = nil }()
repoLookupResult(t, "github.com/a/b")
if !called {
t.Error("!called")
}
})
t.Run("not found", func(t *testing.T) {
mockRepoLookup = func(api.RepoName) (*protocol.RepoLookupResult, error) {
return &protocol.RepoLookupResult{Repo: nil}, nil
}
defer func() { mockRepoLookup = nil }()
if got, want := repoLookupResult(t, "github.com/a/b"), (protocol.RepoLookupResult{}); !reflect.DeepEqual(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
})
t.Run("unexpected error", func(t *testing.T) {
mockRepoLookup = func(api.RepoName) (*protocol.RepoLookupResult, error) {
return nil, errors.New("x")
}
defer func() { mockRepoLookup = nil }()
result, statusCode := repoLookup(t, "github.com/a/b")
if result != nil {
t.Errorf("got result %+v, want nil", result)
}
if want := http.StatusInternalServerError; statusCode != want {
t.Errorf("got HTTP status code %d, want %d", statusCode, want)
}
})
t.Run("found", func(t *testing.T) {
want := protocol.RepoLookupResult{
Repo: &protocol.RepoInfo{
ExternalRepo: api.ExternalRepoSpec{
ID: "a",
ServiceType: extsvc.TypeGitHub,
ServiceID: "https://github.com/",
},
Name: "github.com/c/d",
Description: "b",
Fork: true,
},
}
mockRepoLookup = func(api.RepoName) (*protocol.RepoLookupResult, error) {
return &want, nil
}
defer func() { mockRepoLookup = nil }()
if got := repoLookupResult(t, "github.com/c/d"); !reflect.DeepEqual(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
})
}
func TestServer_EnqueueRepoUpdate(t *testing.T) {
ctx := context.Background()

View File

@ -109,7 +109,7 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
sourceMetrics := repos.NewSourceMetrics()
sourceMetrics.MustRegister(prometheus.DefaultRegisterer)
src := repos.NewSourcer(sourcerLogger, db, cf, repos.WithDependenciesService(dependencies.NewService(observationCtx, db)), repos.ObservedSource(sourcerLogger, sourceMetrics))
src := repos.NewSourcer(sourcerLogger, db, cf, gitserver.NewClient("repo-updater.sourcer"), repos.WithDependenciesService(dependencies.NewService(observationCtx, db)), repos.ObservedSource(sourcerLogger, sourceMetrics))
syncer := repos.NewSyncer(observationCtx, store, src)
updateScheduler := scheduler.NewUpdateScheduler(logger, db, gitserver.NewClient("repos.updatescheduler"))
server := &repoupdater.Server{

View File

@ -38,8 +38,6 @@ go_library(
"//internal/search",
"//internal/search/backend",
"//internal/search/casetransform",
"//internal/search/searcher",
"//internal/search/streaming/http",
"//internal/search/zoekt",
"//internal/searcher/v1:searcher",
"//internal/trace",
@ -135,6 +133,7 @@ go_test(
"//cmd/searcher/protocol",
"//internal/api",
"//internal/comby",
"//internal/conf",
"//internal/errcode",
"//internal/gitserver",
"//internal/grpc",
@ -142,7 +141,6 @@ go_test(
"//internal/observation",
"//internal/search",
"//internal/search/backend",
"//internal/search/searcher",
"//internal/searcher/v1:searcher",
"//lib/errors",
"//schema",

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"io"
"net/http"
"net/http/httptest"
"sort"
"strings"
@ -135,7 +136,7 @@ Hello world example in go`, typeFile},
Service: service,
})
handler := internalgrpc.MultiplexHandlers(grpcServer, service)
handler := internalgrpc.MultiplexHandlers(grpcServer, http.HandlerFunc(http.NotFound))
ts := httptest.NewServer(handler)
@ -260,7 +261,7 @@ unchanged.md
FetchTimeout: fetchTimeoutForCI(t),
}
m, err := doSearch(ts.URL, &req)
m, err := doSearch(t, ts.URL, &req)
if err != nil {
t.Fatal(err)
}

View File

@ -13,11 +13,6 @@ package search
import (
"context"
"encoding/json"
"math"
"net"
"net/http"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
@ -30,8 +25,6 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/searcher/protocol"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/search/searcher"
streamhttp "github.com/sourcegraph/sourcegraph/internal/search/streaming/http"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -62,81 +55,6 @@ type Service struct {
MaxTotalPathsLength int
}
// ServeHTTP handles HTTP based search requests
func (s *Service) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx := r.Context()
var p protocol.Request
dec := json.NewDecoder(r.Body)
if err := dec.Decode(&p); err != nil {
http.Error(w, "failed to decode form: "+err.Error(), http.StatusBadRequest)
return
}
if !p.PatternMatchesContent && !p.PatternMatchesPath {
// BACKCOMPAT: Old frontends send neither of these fields, but we still want to
// search file content in that case.
p.PatternMatchesContent = true
}
if err := validateParams(&p); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
s.streamSearch(ctx, w, p)
}
// isNetOpError returns true if net.OpError is contained in err. This is
// useful to ignore errors when the connection has gone away.
func isNetOpError(err error) bool {
return errors.HasType(err, (*net.OpError)(nil))
}
func (s *Service) streamSearch(ctx context.Context, w http.ResponseWriter, p protocol.Request) {
if p.Limit == 0 {
// No limit for streaming search since upstream limits
// will either be sent in the request, or propagated by
// a cancelled context.
p.Limit = math.MaxInt32
}
eventWriter, err := streamhttp.NewWriter(w)
if err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var bufMux sync.Mutex
matchesBuf := streamhttp.NewJSONArrayBuf(32*1024, func(data []byte) error {
return eventWriter.EventBytes("matches", data)
})
onMatches := func(match protocol.FileMatch) {
bufMux.Lock()
if err := matchesBuf.Append(match); err != nil && !isNetOpError(err) {
s.Log.Warn("failed appending match to buffer", log.Error(err))
}
bufMux.Unlock()
}
ctx, cancel, stream := newLimitedStream(ctx, p.Limit, onMatches)
defer cancel()
err = s.search(ctx, &p, stream)
doneEvent := searcher.EventDone{
LimitHit: stream.LimitHit(),
}
if err != nil {
doneEvent.Error = err.Error()
}
// Flush remaining matches before sending a different event
if err := matchesBuf.Flush(); err != nil && !isNetOpError(err) {
s.Log.Warn("failed to flush matches", log.Error(err))
}
if err := eventWriter.Event("done", doneEvent); err != nil && !isNetOpError(err) {
s.Log.Warn("failed to send done event", log.Error(err))
}
}
func (s *Service) search(ctx context.Context, p *protocol.Request, sender matchSender) (err error) {
metricRunning.Inc()
defer metricRunning.Dec()

View File

@ -2,13 +2,15 @@ package search
import (
"context"
"math"
"strings"
"sync"
"github.com/sourcegraph/sourcegraph/cmd/searcher/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/searcher/v1"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/sourcegraph/sourcegraph/cmd/searcher/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/searcher/v1"
)
type Server struct {
@ -17,8 +19,24 @@ type Server struct {
}
func (s *Server) Search(req *proto.SearchRequest, stream proto.SearcherService_SearchServer) error {
var unmarshaledReq protocol.Request
unmarshaledReq.FromProto(req)
var p protocol.Request
p.FromProto(req)
if !p.PatternMatchesContent && !p.PatternMatchesPath {
// BACKCOMPAT: Old frontends send neither of these fields, but we still want to
// search file content in that case.
p.PatternMatchesContent = true
}
if err := validateParams(&p); err != nil {
return status.Error(codes.InvalidArgument, err.Error())
}
if p.Limit == 0 {
// No limit for streaming search since upstream limits
// will either be sent in the request, or propagated by
// a cancelled context.
p.Limit = math.MaxInt32
}
// mu protects the stream from concurrent writes.
var mu sync.Mutex
@ -33,10 +51,10 @@ func (s *Server) Search(req *proto.SearchRequest, stream proto.SearcherService_S
})
}
ctx, cancel, matchStream := newLimitedStream(stream.Context(), int(req.PatternInfo.Limit), onMatches)
ctx, cancel, matchStream := newLimitedStream(stream.Context(), int(p.PatternInfo.Limit), onMatches)
defer cancel()
err := s.Service.search(ctx, &unmarshaledReq, matchStream)
err := s.Service.search(ctx, &p, matchStream)
if err != nil {
return convertToGRPCError(ctx, err)
}

View File

@ -4,10 +4,10 @@ import (
"archive/tar"
"bytes"
"context"
"encoding/json"
"io"
"net/http"
"net/http/httptest"
"net/url"
"os"
"os/exec"
"runtime"
@ -22,15 +22,19 @@ import (
"github.com/sourcegraph/log/logtest"
"github.com/sourcegraph/zoekt"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/search/backend"
proto "github.com/sourcegraph/sourcegraph/internal/searcher/v1"
v1 "github.com/sourcegraph/sourcegraph/internal/searcher/v1"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/cmd/searcher/internal/search"
"github.com/sourcegraph/sourcegraph/cmd/searcher/protocol"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/search/backend"
"github.com/sourcegraph/sourcegraph/internal/search/searcher"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
type fileType int
@ -71,23 +75,27 @@ func main() {
arg protocol.PatternInfo
contextLines int32
want autogold.Value
}{{
arg: protocol.PatternInfo{Pattern: "foo"},
want: autogold.Expect(""),
}, {
arg: protocol.PatternInfo{Pattern: "World", IsCaseSensitive: true},
want: autogold.Expect("README.md:1:1:\n# Hello World\n"),
}, {
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
want: autogold.Expect(`README.md:3:3:
}{
{
arg: protocol.PatternInfo{Pattern: "foo"},
want: autogold.Expect(""),
},
{
arg: protocol.PatternInfo{Pattern: "World", IsCaseSensitive: true},
want: autogold.Expect("README.md:1:1:\n# Hello World\n"),
},
{
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
want: autogold.Expect(`README.md:3:3:
Hello world example in go
main.go:6:6:
fmt.Println("Hello world")
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
contextLines: 1,
want: autogold.Expect(`README.md:2:3:
},
{
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
contextLines: 1,
want: autogold.Expect(`README.md:2:3:
Hello world example in go
main.go:5:7:
@ -95,10 +103,11 @@ func main() {
fmt.Println("Hello world")
}
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
contextLines: 2,
want: autogold.Expect(`README.md:1:3:
},
{
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
contextLines: 2,
want: autogold.Expect(`README.md:1:3:
# Hello World
Hello world example in go
@ -108,10 +117,11 @@ func main() {
fmt.Println("Hello world")
}
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
contextLines: 999,
want: autogold.Expect(`README.md:1:3:
},
{
arg: protocol.PatternInfo{Pattern: "world", IsCaseSensitive: true},
contextLines: 999,
want: autogold.Expect(`README.md:1:3:
# Hello World
Hello world example in go
@ -124,148 +134,174 @@ func main() {
fmt.Println("Hello world")
}
`),
}, {
arg: protocol.PatternInfo{Pattern: "world"},
want: autogold.Expect(`README.md:1:1:
},
{
arg: protocol.PatternInfo{Pattern: "world"},
want: autogold.Expect(`README.md:1:1:
# Hello World
README.md:3:3:
Hello world example in go
main.go:6:6:
fmt.Println("Hello world")
`),
}, {
arg: protocol.PatternInfo{Pattern: "func.*main"},
want: autogold.Expect(""),
}, {
arg: protocol.PatternInfo{Pattern: "func.*main", IsRegExp: true},
want: autogold.Expect("main.go:5:5:\nfunc main() {\n"),
}, {
// https://github.com/sourcegraph/sourcegraph/issues/8155
arg: protocol.PatternInfo{Pattern: "^func", IsRegExp: true},
want: autogold.Expect("main.go:5:5:\nfunc main() {\n"),
}, {
arg: protocol.PatternInfo{Pattern: "^FuNc", IsRegExp: true},
want: autogold.Expect("main.go:5:5:\nfunc main() {\n"),
}, {
// Ensure we handle CaseInsensitive regexp searches with
// special uppercase chars in pattern.
arg: protocol.PatternInfo{Pattern: `printL\B`, IsRegExp: true},
want: autogold.Expect(`main.go:6:6:
},
{
arg: protocol.PatternInfo{Pattern: "func.*main"},
want: autogold.Expect(""),
},
{
arg: protocol.PatternInfo{Pattern: "func.*main", IsRegExp: true},
want: autogold.Expect("main.go:5:5:\nfunc main() {\n"),
},
{
// https://github.com/sourcegraph/sourcegraph/issues/8155
arg: protocol.PatternInfo{Pattern: "^func", IsRegExp: true},
want: autogold.Expect("main.go:5:5:\nfunc main() {\n"),
},
{
arg: protocol.PatternInfo{Pattern: "^FuNc", IsRegExp: true},
want: autogold.Expect("main.go:5:5:\nfunc main() {\n"),
},
{
// Ensure we handle CaseInsensitive regexp searches with
// special uppercase chars in pattern.
arg: protocol.PatternInfo{Pattern: `printL\B`, IsRegExp: true},
want: autogold.Expect(`main.go:6:6:
fmt.Println("Hello world")
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", ExcludePattern: "README.md"},
want: autogold.Expect(`main.go:6:6:
},
{
arg: protocol.PatternInfo{Pattern: "world", ExcludePattern: "README.md"},
want: autogold.Expect(`main.go:6:6:
fmt.Println("Hello world")
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.md$`}},
want: autogold.Expect(`README.md:1:1:
},
{
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.md$`}},
want: autogold.Expect(`README.md:1:1:
# Hello World
README.md:3:3:
Hello world example in go
`),
}, {
arg: protocol.PatternInfo{Pattern: "w", IncludePatterns: []string{`\.(md|txt)$`, `\.txt$`}},
want: autogold.Expect("abc.txt:1:1:\nw\n"),
}, {
arg: protocol.PatternInfo{Pattern: "world", ExcludePattern: "README\\.md"},
want: autogold.Expect(`main.go:6:6:
},
{
arg: protocol.PatternInfo{Pattern: "w", IncludePatterns: []string{`\.(md|txt)$`, `\.txt$`}},
want: autogold.Expect("abc.txt:1:1:\nw\n"),
},
{
arg: protocol.PatternInfo{Pattern: "world", ExcludePattern: "README\\.md"},
want: autogold.Expect(`main.go:6:6:
fmt.Println("Hello world")
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{"\\.md"}},
want: autogold.Expect(`README.md:1:1:
},
{
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{"\\.md"}},
want: autogold.Expect(`README.md:1:1:
# Hello World
README.md:3:3:
Hello world example in go
`),
}, {
arg: protocol.PatternInfo{Pattern: "w", IncludePatterns: []string{"\\.(md|txt)", "README"}},
want: autogold.Expect(`README.md:1:1:
},
{
arg: protocol.PatternInfo{Pattern: "w", IncludePatterns: []string{"\\.(md|txt)", "README"}},
want: autogold.Expect(`README.md:1:1:
# Hello World
README.md:3:3:
Hello world example in go
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)$`}, PathPatternsAreCaseSensitive: true},
want: autogold.Expect(`main.go:6:6:
},
{
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)$`}, PathPatternsAreCaseSensitive: true},
want: autogold.Expect(`main.go:6:6:
fmt.Println("Hello world")
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)$`}, PathPatternsAreCaseSensitive: true},
contextLines: 1,
want: autogold.Expect(`main.go:5:7:
},
{
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)$`}, PathPatternsAreCaseSensitive: true},
contextLines: 1,
want: autogold.Expect(`main.go:5:7:
func main() {
fmt.Println("Hello world")
}
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)$`}, PathPatternsAreCaseSensitive: true},
contextLines: 2,
want: autogold.Expect(`main.go:4:7:
},
{
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)$`}, PathPatternsAreCaseSensitive: true},
contextLines: 2,
want: autogold.Expect(`main.go:4:7:
func main() {
fmt.Println("Hello world")
}
`),
}, {
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)`}, PathPatternsAreCaseSensitive: true},
want: autogold.Expect(`main.go:6:6:
},
{
arg: protocol.PatternInfo{Pattern: "world", IncludePatterns: []string{`\.(MD|go)`}, PathPatternsAreCaseSensitive: true},
want: autogold.Expect(`main.go:6:6:
fmt.Println("Hello world")
`),
}, {
arg: protocol.PatternInfo{Pattern: "doesnotmatch"},
want: autogold.Expect(""),
}, {
arg: protocol.PatternInfo{Pattern: "", IsRegExp: false, IncludePatterns: []string{"\\.png"}, PatternMatchesPath: true},
want: autogold.Expect("milton.png\n"),
}, {
arg: protocol.PatternInfo{Pattern: "package main\n\nimport \"fmt\"", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:3:
},
{
arg: protocol.PatternInfo{Pattern: "doesnotmatch"},
want: autogold.Expect(""),
},
{
arg: protocol.PatternInfo{Pattern: "", IsRegExp: false, IncludePatterns: []string{"\\.png"}, PatternMatchesPath: true},
want: autogold.Expect("milton.png\n"),
},
{
arg: protocol.PatternInfo{Pattern: "package main\n\nimport \"fmt\"", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:3:
package main
import "fmt"
`),
}, {
arg: protocol.PatternInfo{Pattern: "package main\n\\s*import \"fmt\"", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:3:
},
{
arg: protocol.PatternInfo{Pattern: "package main\n\\s*import \"fmt\"", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:3:
package main
import "fmt"
`),
}, {
arg: protocol.PatternInfo{Pattern: "package main\n", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect("main.go:1:2:\npackage main\n\n"),
}, {
arg: protocol.PatternInfo{Pattern: "package main\n\\s*", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:3:
},
{
arg: protocol.PatternInfo{Pattern: "package main\n", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect("main.go:1:2:\npackage main\n\n"),
},
{
arg: protocol.PatternInfo{Pattern: "package main\n\\s*", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:3:
package main
import "fmt"
`),
}, {
arg: protocol.PatternInfo{Pattern: "\nfunc", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect("main.go:4:5:\n\nfunc main() {\n"),
}, {
arg: protocol.PatternInfo{Pattern: "\n\\s*func", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:3:5:
},
{
arg: protocol.PatternInfo{Pattern: "\nfunc", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect("main.go:4:5:\n\nfunc main() {\n"),
},
{
arg: protocol.PatternInfo{Pattern: "\n\\s*func", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:3:5:
import "fmt"
func main() {
`),
}, {
arg: protocol.PatternInfo{Pattern: "package main\n\nimport \"fmt\"\n\nfunc main\\(\\) {", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:5:
},
{
arg: protocol.PatternInfo{Pattern: "package main\n\nimport \"fmt\"\n\nfunc main\\(\\) {", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`main.go:1:5:
package main
import "fmt"
func main() {
`),
}, {
arg: protocol.PatternInfo{Pattern: "\n", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`README.md:1:3:
},
{
arg: protocol.PatternInfo{Pattern: "\n", IsCaseSensitive: false, IsRegExp: true, PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect(`README.md:1:3:
# Hello World
Hello world example in go
@ -279,9 +315,10 @@ fmt.Println("Hello world")
}
`),
}, {
arg: protocol.PatternInfo{Pattern: "^$", IsRegExp: true},
want: autogold.Expect(`README.md:2:2:
},
{
arg: protocol.PatternInfo{Pattern: "^$", IsRegExp: true},
want: autogold.Expect(`README.md:2:2:
main.go:2:2:
@ -292,64 +329,73 @@ main.go:8:8:
milton.png:1:1:
`),
}, {
arg: protocol.PatternInfo{
Pattern: "filename contains regex metachars",
IncludePatterns: []string{regexp.QuoteMeta("file++.plus")},
IsStructuralPat: true,
IsRegExp: true, // To test for a regression, imply that IsStructuralPat takes precedence.
},
want: autogold.Expect(`file++.plus:1:1:
{
arg: protocol.PatternInfo{
Pattern: "filename contains regex metachars",
IncludePatterns: []string{regexp.QuoteMeta("file++.plus")},
IsStructuralPat: true,
IsRegExp: true, // To test for a regression, imply that IsStructuralPat takes precedence.
},
want: autogold.Expect(`file++.plus:1:1:
filename contains regex metachars
`),
}, {
arg: protocol.PatternInfo{Pattern: "World", IsNegated: true},
want: autogold.Expect(`abc.txt
},
{
arg: protocol.PatternInfo{Pattern: "World", IsNegated: true},
want: autogold.Expect(`abc.txt
file++.plus
milton.png
nonutf8.txt
symlink
`),
}, {
arg: protocol.PatternInfo{Pattern: "World", IsCaseSensitive: true, IsNegated: true},
want: autogold.Expect(`abc.txt
},
{
arg: protocol.PatternInfo{Pattern: "World", IsCaseSensitive: true, IsNegated: true},
want: autogold.Expect(`abc.txt
file++.plus
main.go
milton.png
nonutf8.txt
symlink
`),
}, {
arg: protocol.PatternInfo{Pattern: "fmt", IsNegated: true},
want: autogold.Expect(`README.md
},
{
arg: protocol.PatternInfo{Pattern: "fmt", IsNegated: true},
want: autogold.Expect(`README.md
abc.txt
file++.plus
milton.png
nonutf8.txt
symlink
`),
}, {
arg: protocol.PatternInfo{Pattern: "go", IsNegated: true, PatternMatchesPath: true, ExcludePattern: "\\.txt"},
want: autogold.Expect(`README.md
},
{
arg: protocol.PatternInfo{Pattern: "go", IsNegated: true, PatternMatchesPath: true, ExcludePattern: "\\.txt"},
want: autogold.Expect(`README.md
file++.plus
milton.png
symlink
`),
}, {
arg: protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect("abc.txt\nsymlink:1:1:\nabc.txt\n"),
}, {
arg: protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: false, PatternMatchesContent: true},
want: autogold.Expect("symlink:1:1:\nabc.txt\n"),
}, {
arg: protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: true, PatternMatchesContent: false},
want: autogold.Expect("abc.txt\n"),
}, {
arg: protocol.PatternInfo{Pattern: "utf8", PatternMatchesPath: false, PatternMatchesContent: true},
want: autogold.Expect(`nonutf8.txt:1:1:
},
{
arg: protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: true, PatternMatchesContent: true},
want: autogold.Expect("abc.txt\nsymlink:1:1:\nabc.txt\n"),
},
{
arg: protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: false, PatternMatchesContent: true},
want: autogold.Expect("symlink:1:1:\nabc.txt\n"),
},
{
arg: protocol.PatternInfo{Pattern: "abc", PatternMatchesPath: true, PatternMatchesContent: false},
want: autogold.Expect("abc.txt\n"),
},
{
arg: protocol.PatternInfo{Pattern: "utf8", PatternMatchesPath: false, PatternMatchesContent: true},
want: autogold.Expect(`nonutf8.txt:1:1:
file contains invalid utf8 <EFBFBD> characters
`),
}}
}}
zoektURL := newZoekt(t, &zoekt.Repository{}, nil)
s := newStore(t, files)
@ -358,12 +404,29 @@ file contains invalid utf8 <20> characters
return hdr.Name == "ignore.me"
}, nil
}
ts := httptest.NewServer(&search.Service{
service := &search.Service{
Store: s,
Log: s.Log,
Indexed: backend.ZoektDial(zoektURL),
}
grpcServer := defaults.NewServer(logtest.Scoped(t))
proto.RegisterSearcherServiceServer(grpcServer, &search.Server{
Service: service,
})
handler := internalgrpc.MultiplexHandlers(grpcServer, http.HandlerFunc(http.NotFound))
ts := httptest.NewServer(handler)
t.Cleanup(func() {
ts.Close()
})
conf.Mock(&conf.Unified{})
t.Cleanup(func() {
conf.Mock(nil)
})
defer ts.Close()
for i, test := range cases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
@ -379,7 +442,7 @@ file contains invalid utf8 <20> characters
FetchTimeout: fetchTimeoutForCI(t),
NumContextLines: test.contextLines,
}
m, err := doSearch(ts.URL, &req)
m, err := doSearch(t, ts.URL, &req)
if err != nil {
t.Fatalf("%s failed: %s", test.arg.String(), err)
}
@ -517,62 +580,68 @@ func TestSearch_badrequest(t *testing.T) {
},
}
zoektURL := newZoekt(t, &zoekt.Repository{}, nil)
store := newStore(t, nil)
ts := httptest.NewServer(&search.Service{
Store: store,
Log: store.Log,
})
defer ts.Close()
service := &search.Service{
Store: store,
Log: store.Log,
Indexed: backend.ZoektDial(zoektURL),
}
for _, p := range cases {
p.PatternInfo.PatternMatchesContent = true
_, err := doSearch(ts.URL, &p)
if err == nil {
t.Fatalf("%v expected to fail", p)
}
grpcServer := defaults.NewServer(logtest.Scoped(t))
proto.RegisterSearcherServiceServer(grpcServer, &search.Server{
Service: service,
})
handler := internalgrpc.MultiplexHandlers(grpcServer, http.HandlerFunc(http.NotFound))
ts := httptest.NewServer(handler)
t.Cleanup(func() {
ts.Close()
})
for i, p := range cases {
t.Run(strconv.Itoa(i), func(t *testing.T) {
p.PatternInfo.PatternMatchesContent = true
_, err := doSearch(t, ts.URL, &p)
if err == nil {
t.Fatalf("%v expected to fail", p)
}
})
}
}
func doSearch(u string, p *protocol.Request) ([]protocol.FileMatch, error) {
reqBody, err := json.Marshal(p)
func doSearch(t *testing.T, urlString string, p *protocol.Request) ([]protocol.FileMatch, error) {
u, err := url.Parse(urlString)
if err != nil {
return nil, err
}
resp, err := http.Post(u, "application/json", bytes.NewReader(reqBody))
conn, err := defaults.Dial(u.Host, logtest.Scoped(t))
if err != nil {
return nil, err
}
c := v1.NewSearcherServiceClient(conn)
cc, err := c.Search(context.Background(), p.ToProto())
if err != nil {
return nil, err
}
if resp.StatusCode != 200 {
body, err := io.ReadAll(resp.Body)
var matches []protocol.FileMatch
for {
msg, err := cc.Recv()
if err != nil {
if err == io.EOF {
return matches, nil
}
return nil, err
}
return nil, errors.Errorf("non-200 response: code=%d body=%s", resp.StatusCode, string(body))
if m := msg.GetFileMatch(); m != nil {
var fm protocol.FileMatch
fm.FromProto(m)
matches = append(matches, fm)
}
}
var ed searcher.EventDone
var matches []protocol.FileMatch
dec := searcher.StreamDecoder{
OnMatches: func(newMatches []*protocol.FileMatch) {
for _, match := range newMatches {
matches = append(matches, *match)
}
},
OnDone: func(e searcher.EventDone) {
ed = e
},
OnUnknown: func(event []byte, _ []byte) {
panic("unknown event")
},
}
if err := dec.ReadAll(resp.Body); err != nil {
return nil, err
}
if ed.Error != "" {
return nil, errors.New(ed.Error)
}
return matches, err
}
func newStore(t *testing.T, files map[string]struct {

View File

@ -6,7 +6,6 @@ import (
"bytes"
"context"
"io"
"net"
"os"
"os/exec"
"path/filepath"
@ -317,12 +316,3 @@ func emptyTar(t *testing.T) io.ReadCloser {
}
return io.NopCloser(bytes.NewReader(buf.Bytes()))
}
func TestIsNetOpError(t *testing.T) {
if !isNetOpError(&net.OpError{}) {
t.Fatal("should be net.OpError")
}
if isNetOpError(errors.New("hi")) {
t.Fatal("should not be net.OpError")
}
}

View File

@ -13,7 +13,6 @@ go_library(
"//cmd/searcher/internal/search",
"//internal/actor",
"//internal/api",
"//internal/conf",
"//internal/debugserver",
"//internal/env",
"//internal/gitserver",
@ -21,12 +20,10 @@ go_library(
"//internal/goroutine",
"//internal/grpc",
"//internal/grpc/defaults",
"//internal/instrumentation",
"//internal/observation",
"//internal/search",
"//internal/searcher/v1:searcher",
"//internal/service",
"//internal/trace",
"//lib/errors",
"@com_github_keegancsmith_tmpfriend//:tmpfriend",
"@com_github_sourcegraph_log//:log",

View File

@ -21,19 +21,16 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/searcher/internal/search"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/instrumentation"
"github.com/sourcegraph/sourcegraph/internal/observation"
sharedsearch "github.com/sourcegraph/sourcegraph/internal/search"
proto "github.com/sourcegraph/sourcegraph/internal/searcher/v1"
"github.com/sourcegraph/sourcegraph/internal/service"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -175,11 +172,6 @@ func Start(ctx context.Context, observationCtx *observation.Context, ready servi
}
sService.Store.Start()
// Set up handler middleware
handler := actor.HTTPMiddleware(logger, sService)
handler = trace.HTTPMiddleware(logger, handler, conf.DefaultClient())
handler = instrumentation.HTTPMiddleware("", handler)
ctx, cancel := context.WithCancel(ctx)
defer cancel()
@ -203,7 +195,7 @@ func Start(ctx context.Context, observationCtx *observation.Context, ready servi
_, _ = w.Write([]byte("ok"))
return
}
handler.ServeHTTP(w, r)
http.NotFoundHandler().ServeHTTP(w, r)
})),
}

View File

@ -19,7 +19,6 @@ go_library(
"//cmd/symbols/squirrel",
"//cmd/symbols/types",
"//internal/database",
"//internal/env",
"//internal/grpc",
"//internal/grpc/chunk",
"//internal/grpc/defaults",
@ -58,7 +57,6 @@ go_test(
"//internal/endpoint",
"//internal/gitserver/gitdomain",
"//internal/grpc/defaults",
"//internal/httpcli",
"//internal/observation",
"//internal/search",
"//internal/search/result",

View File

@ -2,7 +2,6 @@ package api
import (
"context"
"encoding/json"
"fmt"
"net/http"
@ -15,7 +14,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/search/result"
proto "github.com/sourcegraph/sourcegraph/internal/symbols/v1"
internaltypes "github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
const maxNumSymbolResults = 500
@ -62,7 +60,6 @@ func NewHandler(
handleStatus func(http.ResponseWriter, *http.Request),
ctagsBinary string,
) http.Handler {
searchFuncWrapper := func(ctx context.Context, args search.SymbolsParameters) (result.Symbols, error) {
// Massage the arguments to ensure that First is set to a reasonable value.
if args.First < 0 || args.First > maxNumSymbolResults {
@ -87,10 +84,8 @@ func NewHandler(
// Initialize the legacy JSON API server
mux := http.NewServeMux()
mux.HandleFunc("/search", handleSearchWith(jsonLogger, searchFuncWrapper))
mux.HandleFunc("/healthz", handleHealthCheck(jsonLogger))
addHandlers(mux, searchFunc, readFileFunc)
if handleStatus != nil {
mux.HandleFunc("/status", handleStatus)
}
@ -98,40 +93,6 @@ func NewHandler(
return internalgrpc.MultiplexHandlers(grpcServer, mux)
}
func handleSearchWith(l logger.Logger, searchFunc types.SearchFunc) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
var args search.SymbolsParameters
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
resultSymbols, err := searchFunc(r.Context(), args)
if err != nil {
// Ignore reporting errors where client disconnected
if r.Context().Err() == context.Canceled && errors.Is(err, context.Canceled) {
return
}
argsStr := fmt.Sprintf("%+v", args)
l.Error("symbol search failed",
logger.String("arguments", argsStr),
logger.Error(err),
)
if err := json.NewEncoder(w).Encode(search.SymbolsResponse{Err: err.Error()}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
return
}
if err := json.NewEncoder(w).Encode(search.SymbolsResponse{Symbols: resultSymbols}); err != nil {
http.Error(w, err.Error(), http.StatusInternalServerError)
}
}
}
func handleHealthCheck(l logger.Logger) http.HandlerFunc {
return func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)

View File

@ -4,28 +4,17 @@ package api
import (
"context"
"net/http"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/sourcegraph/sourcegraph/cmd/symbols/squirrel"
"github.com/sourcegraph/sourcegraph/cmd/symbols/types"
"github.com/sourcegraph/sourcegraph/internal/grpc/chunk"
proto "github.com/sourcegraph/sourcegraph/internal/symbols/v1"
internaltypes "github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
// addHandlers adds handlers that require cgo.
func addHandlers(
mux *http.ServeMux,
searchFunc types.SearchFunc,
readFileFunc func(context.Context, internaltypes.RepoCommitPath) ([]byte, error),
) {
mux.HandleFunc("/localCodeIntel", squirrel.LocalCodeIntelHandler(readFileFunc))
mux.HandleFunc("/symbolInfo", squirrel.NewSymbolInfoHandler(searchFunc, readFileFunc))
}
func convertSquirrelErrorToGrpcError(err error) *status.Status {
if errors.Is(err, squirrel.UnrecognizedFileExtensionError) {
return status.New(codes.InvalidArgument, err.Error())

View File

@ -7,29 +7,11 @@ import (
"encoding/json"
"net/http"
"github.com/sourcegraph/sourcegraph/cmd/symbols/types"
"github.com/sourcegraph/sourcegraph/internal/env"
proto "github.com/sourcegraph/sourcegraph/internal/symbols/v1"
internaltypes "github.com/sourcegraph/sourcegraph/internal/types"
"google.golang.org/grpc/status"
proto "github.com/sourcegraph/sourcegraph/internal/symbols/v1"
)
// addHandlers adds handlers that do not require cgo, which speeds up compile times but omits local
// code intelligence features. This non-cgo variant must only be used for development. Release
// builds of Sourcegraph must be built with cgo, or else they'll miss critical features.
func addHandlers(
mux *http.ServeMux,
searchFunc types.SearchFunc,
readFileFunc func(context.Context, internaltypes.RepoCommitPath) ([]byte, error),
) {
if !env.InsecureDev {
panic("must build with cgo (non-cgo variant is only for local dev)")
}
mux.HandleFunc("/localCodeIntel", jsonResponseHandler(internaltypes.LocalCodeIntelPayload{Symbols: []internaltypes.Symbol{}}))
mux.HandleFunc("/symbolInfo", jsonResponseHandler(internaltypes.SymbolInfo{}))
}
func jsonResponseHandler(v any) http.HandlerFunc {
data, _ := json.Marshal(v)
return func(w http.ResponseWriter, r *http.Request) {

View File

@ -22,7 +22,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/diskcache"
"github.com/sourcegraph/sourcegraph/internal/endpoint"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/search"
"github.com/sourcegraph/sourcegraph/internal/search/result"
@ -81,7 +80,6 @@ func TestHandler(t *testing.T) {
client := symbolsclient.Client{
Endpoints: endpoint.Static(server.URL),
GRPCConnectionCache: connectionCache,
HTTPClient: httpcli.InternalDoer,
}
x := result.Symbol{Name: "x", Path: "a.js", Line: 0, Character: 4}

View File

@ -6,7 +6,6 @@ go_library(
srcs = [
"breadcrumbs.go",
"hover.go",
"http_handlers.go",
"lang_java.go",
"lang_python.go",
"lang_starlark.go",
@ -28,7 +27,6 @@ go_library(
"//lib/errors",
"@com_github_fatih_color//:color",
"@com_github_grafana_regexp//:regexp",
"@com_github_inconshreveable_log15//:log15",
"@com_github_smacker_go_tree_sitter//:go-tree-sitter",
"@com_github_smacker_go_tree_sitter//cpp",
"@com_github_smacker_go_tree_sitter//csharp",

View File

@ -1,128 +0,0 @@
package squirrel
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"os"
"strings"
"github.com/inconshreveable/log15" //nolint:logging // TODO move all logging to sourcegraph/log
symbolsTypes "github.com/sourcegraph/sourcegraph/cmd/symbols/types"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// LocalCodeIntelHandler responds to /localCodeIntel
func LocalCodeIntelHandler(readFile readFileFunc) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
// Read the args from the request body.
body, err := io.ReadAll(r.Body)
if err != nil {
log15.Error("failed to read request body", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var args types.RepoCommitPath
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&args); err != nil {
log15.Error("failed to decode request body", "err", err, "body", string(body))
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
squirrel := New(readFile, nil)
defer squirrel.Close()
// Compute the local code intel payload.
payload, err := squirrel.LocalCodeIntel(r.Context(), args)
if payload != nil && os.Getenv("SQUIRREL_DEBUG") == "true" {
debugStringBuilder := &strings.Builder{}
fmt.Fprintln(debugStringBuilder, "👉 /localCodeIntel repo:", args.Repo, "commit:", args.Commit, "path:", args.Path)
contents, err := readFile(r.Context(), args)
if err != nil {
log15.Error("failed to read file from gitserver", "err", err)
} else {
prettyPrintLocalCodeIntelPayload(debugStringBuilder, *payload, string(contents))
fmt.Fprintln(debugStringBuilder, "✅ /localCodeIntel repo:", args.Repo, "commit:", args.Commit, "path:", args.Path)
fmt.Println(" ")
fmt.Println(bracket(debugStringBuilder.String()))
fmt.Println(" ")
}
}
if err != nil {
_ = json.NewEncoder(w).Encode(nil)
// Log the error if it's not an unrecognized file extension or unsupported language error.
if !errors.Is(err, UnrecognizedFileExtensionError) && !errors.Is(err, UnsupportedLanguageError) {
log15.Error("failed to generate local code intel payload", "err", err)
}
return
}
// Write the response.
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(payload)
if err != nil {
log15.Error("failed to write response: %s", "error", err)
http.Error(w, fmt.Sprintf("failed to generate local code intel payload: %s", err), http.StatusInternalServerError)
return
}
}
}
// NewSymbolInfoHandler responds to /symbolInfo
func NewSymbolInfoHandler(symbolSearch symbolsTypes.SearchFunc, readFile readFileFunc) func(w http.ResponseWriter, r *http.Request) {
return func(w http.ResponseWriter, r *http.Request) {
// Read the args from the request body.
body, err := io.ReadAll(r.Body)
if err != nil {
log15.Error("failed to read request body", "err", err)
http.Error(w, err.Error(), http.StatusInternalServerError)
return
}
var args types.RepoCommitPathPoint
if err := json.NewDecoder(bytes.NewReader(body)).Decode(&args); err != nil {
log15.Error("failed to decode request body", "err", err, "body", string(body))
http.Error(w, err.Error(), http.StatusBadRequest)
return
}
// Find the symbol.
squirrel := New(readFile, symbolSearch)
defer squirrel.Close()
result, err := squirrel.SymbolInfo(r.Context(), args)
if os.Getenv("SQUIRREL_DEBUG") == "true" {
debugStringBuilder := &strings.Builder{}
fmt.Fprintln(debugStringBuilder, "👉 /symbolInfo repo:", args.Repo, "commit:", args.Commit, "path:", args.Path, "row:", args.Row, "column:", args.Column)
squirrel.breadcrumbs.pretty(debugStringBuilder, readFile)
if result == nil {
fmt.Fprintln(debugStringBuilder, "❌ no definition found")
} else {
fmt.Fprintln(debugStringBuilder, "✅ /symbolInfo", *result)
}
fmt.Println(" ")
fmt.Println(bracket(debugStringBuilder.String()))
fmt.Println(" ")
}
if err != nil {
_ = json.NewEncoder(w).Encode(nil)
log15.Error("failed to get definition", "err", err)
return
}
// Write the response.
w.Header().Set("Content-Type", "application/json")
err = json.NewEncoder(w).Encode(result)
if err != nil {
log15.Error("failed to write response: %s", "error", err)
http.Error(w, fmt.Sprintf("failed to get definition: %s", err), http.StatusInternalServerError)
return
}
}
}

View File

@ -4,7 +4,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "repo",
srcs = [
"document_ranks.go",
"handler.go",
"janitor.go",
"scheduler.go",
@ -19,9 +18,8 @@ go_library(
"//cmd/worker/shared/init/db",
"//internal/actor",
"//internal/api",
"//internal/api/internalapi",
"//internal/codeintel/context",
"//internal/codeintel/types",
"//internal/codeintel/ranking",
"//internal/conf",
"//internal/conf/conftypes",
"//internal/database",
@ -33,7 +31,6 @@ go_library(
"//internal/featureflag",
"//internal/gitserver",
"//internal/goroutine",
"//internal/httpcli",
"//internal/observation",
"//internal/paths",
"//internal/uploadstore",

View File

@ -1,61 +0,0 @@
package repo
import (
"context"
"encoding/json"
"io"
"net/http"
"net/url"
"strings"
"github.com/sourcegraph/sourcegraph/internal/api/internalapi"
"github.com/sourcegraph/sourcegraph/internal/codeintel/types"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func getDocumentRanks(ctx context.Context, repoName string) (types.RepoPathRanks, error) {
root, err := url.Parse(internalapi.Client.URL)
if err != nil {
return types.RepoPathRanks{}, err
}
u := root.ResolveReference(&url.URL{
Path: "/.internal/ranks/" + strings.Trim(repoName, "/") + "/documents",
})
req, err := http.NewRequestWithContext(ctx, "GET", u.String(), nil)
if err != nil {
return types.RepoPathRanks{}, err
}
resp, err := httpcli.InternalDoer.Do(req)
if err != nil {
return types.RepoPathRanks{}, err
}
if resp.StatusCode != http.StatusOK {
b, err := io.ReadAll(io.LimitReader(resp.Body, 1024))
_ = resp.Body.Close()
if err != nil {
return types.RepoPathRanks{}, err
}
return types.RepoPathRanks{}, &url.Error{
Op: "Get",
URL: u.String(),
Err: errors.Errorf("%s: %s", resp.Status, string(b)),
}
}
b, err := io.ReadAll(resp.Body)
if err != nil {
return types.RepoPathRanks{}, err
}
ranks := types.RepoPathRanks{}
err = json.Unmarshal(b, &ranks)
if err != nil {
return types.RepoPathRanks{}, err
}
return ranks, nil
}

View File

@ -9,6 +9,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
codeintelContext "github.com/sourcegraph/sourcegraph/internal/codeintel/context"
"github.com/sourcegraph/sourcegraph/internal/codeintel/ranking"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -32,6 +33,7 @@ type handler struct {
getQdrantInserter func() (db.VectorInserter, error)
contextService embed.ContextService
repoEmbeddingJobsStore bgrepo.RepoEmbeddingJobsStore
rankingService *ranking.Service
}
var _ workerutil.Handler[*bgrepo.RepoEmbeddingJob] = &handler{}
@ -161,7 +163,7 @@ func (h *handler) Handle(ctx context.Context, logger log.Logger, record *bgrepo.
}
}
ranks, err := getDocumentRanks(ctx, string(repo.Name))
ranks, err := h.rankingService.GetDocumentRanks(ctx, repo.Name)
if err != nil {
return err
}

View File

@ -8,6 +8,7 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/worker/shared/init/codeintel"
workerdb "github.com/sourcegraph/sourcegraph/cmd/worker/shared/init/db"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/codeintel/ranking"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/embeddings"
repoembeddingsbg "github.com/sourcegraph/sourcegraph/internal/embeddings/background/repo"
@ -68,6 +69,7 @@ func (s *repoEmbeddingJob) Routines(_ context.Context, observationCtx *observati
getQdrantInserter,
services.ContextService,
repoembeddingsbg.NewRepoEmbeddingJobsStore(db),
services.RankingService,
),
}, nil
}
@ -82,6 +84,7 @@ func newRepoEmbeddingJobWorker(
getQdrantInserter func() (vdb.VectorInserter, error),
contextService embed.ContextService,
repoEmbeddingJobsStore repoembeddingsbg.RepoEmbeddingJobsStore,
rankingService *ranking.Service,
) *workerutil.Worker[*repoembeddingsbg.RepoEmbeddingJob] {
handler := &handler{
db: db,
@ -90,6 +93,7 @@ func newRepoEmbeddingJobWorker(
getQdrantInserter: getQdrantInserter,
contextService: contextService,
repoEmbeddingJobsStore: repoEmbeddingJobsStore,
rankingService: rankingService,
}
return dbworker.NewWorker[*repoembeddingsbg.RepoEmbeddingJob](ctx, workerStore, handler, workerutil.WorkerOptions{
Name: "repo_embedding_job_worker",

View File

@ -8,7 +8,6 @@ PORT=${PORT:-"7080"}
URL="http://localhost:$PORT"
DATA=${DATA:-"/tmp/sourcegraph-data"}
SOURCEGRAPH_LICENSE_GENERATION_KEY=${SOURCEGRAPH_LICENSE_GENERATION_KEY:-""}
SG_FEATURE_FLAG_GRPC=${SG_FEATURE_FLAG_GRPC:-"true"}
DB_STARTUP_TIMEOUT="10s"
echo "--- Checking for existing Sourcegraph instance at $URL"
@ -48,7 +47,6 @@ docker run "$@" \
--publish "$PORT":7080 \
-e ALLOW_SINGLE_DOCKER_CODE_INSIGHTS=t \
-e SOURCEGRAPH_LICENSE_GENERATION_KEY="$SOURCEGRAPH_LICENSE_GENERATION_KEY" \
-e SG_FEATURE_FLAG_GRPC="$SG_FEATURE_FLAG_GRPC" \
-e DB_STARTUP_TIMEOUT="$DB_STARTUP_TIMEOUT" \
-e SOURCEGRAPH_5_1_DB_MIGRATION=true \
--volume "$DATA/config:/etc/sourcegraph" \

View File

@ -707,37 +707,6 @@ Generated query for warning alert: `max((sum by (code) (increase(searcher_servic
<br />
## frontend: internalapi_error_responses
<p class="subtitle">internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> frontend: 5%+ internal API error responses every 5m by route for 15m0s
**Next steps**
- May not be a substantial issue, check the `frontend` logs for potential causes.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#frontend-internalapi-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_frontend_internalapi_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Source team](https://handbook.sourcegraph.com/departments/engineering/teams/source).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2.."}[5m])) / ignoring (code) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count[5m])) * 100) >= 5)`
</details>
<br />
## frontend: 99th_percentile_gitserver_duration
<p class="subtitle">99th percentile successful gitserver query duration over 5m</p>
@ -1572,43 +1541,6 @@ Generated query for warning alert: `max((sum(src_gitserver_lsremote_queue)) >= 2
<br />
## gitserver: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> gitserver: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Next steps**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs gitserver` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs gitserver` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#gitserver-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_gitserver_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Source team](https://handbook.sourcegraph.com/departments/engineering/teams/source).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2..",job="gitserver"}[5m])) / ignoring (category) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count{job="gitserver"}[5m]))) >= 2)`
</details>
<br />
## gitserver: gitserver_site_configuration_duration_since_last_successful_update_by_instance
<p class="subtitle">maximum duration since last successful site configuration update (all "gitserver" instances)</p>
@ -2383,43 +2315,6 @@ Generated query for critical alert: `max((max(src_codeintel_upload_queued_durati
<br />
## precise-code-intel-worker: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> precise-code-intel-worker: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Next steps**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs precise-code-intel-worker` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs precise-code-intel-worker` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#precise-code-intel-worker-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_precise-code-intel-worker_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Code intelligence team](https://handbook.sourcegraph.com/departments/engineering/teams/code-intelligence).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2..",job="precise-code-intel-worker"}[5m])) / ignoring (category) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count{job="precise-code-intel-worker"}[5m]))) >= 2)`
</details>
<br />
## precise-code-intel-worker: mean_blocked_seconds_per_conn_request
<p class="subtitle">mean blocked seconds per conn request</p>
@ -3396,43 +3291,6 @@ Generated query for warning alert: `max((max(src_query_runner_worker_total{job=~
<br />
## worker: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> worker: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Next steps**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs worker` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs worker` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#worker-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_worker_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Code intelligence team](https://handbook.sourcegraph.com/departments/engineering/teams/code-intelligence).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2..",job="worker"}[5m])) / ignoring (category) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count{job="worker"}[5m]))) >= 2)`
</details>
<br />
## worker: mean_blocked_seconds_per_conn_request
<p class="subtitle">mean blocked seconds per conn request</p>
@ -4645,43 +4503,6 @@ Generated query for critical alert: `max((max(max_over_time(src_conf_client_time
<br />
## repo-updater: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> repo-updater: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Next steps**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs repo-updater` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs repo-updater` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#repo-updater-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_repo-updater_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Source team](https://handbook.sourcegraph.com/departments/engineering/teams/source).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2..",job="repo-updater"}[5m])) / ignoring (category) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count{job="repo-updater"}[5m]))) >= 2)`
</details>
<br />
## repo-updater: mean_blocked_seconds_per_conn_request
<p class="subtitle">mean blocked seconds per conn request</p>
@ -5164,43 +4985,6 @@ Generated query for critical alert: `max((sum by (app_name, db_name) (increase(s
<br />
## searcher: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> searcher: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Next steps**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs searcher` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs searcher` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#searcher-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_searcher_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Search Platform team](https://handbook.sourcegraph.com/departments/engineering/teams/search/core).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2..",job="searcher"}[5m])) / ignoring (category) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count{job="searcher"}[5m]))) >= 2)`
</details>
<br />
## searcher: container_cpu_usage
<p class="subtitle">container cpu usage total (1m average) across all cores by instance</p>
@ -5586,43 +5370,6 @@ Generated query for critical alert: `max((sum by (app_name, db_name) (increase(s
<br />
## symbols: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> symbols: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Next steps**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs symbols` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs symbols` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#symbols-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_symbols_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Code intelligence team](https://handbook.sourcegraph.com/departments/engineering/teams/code-intelligence).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2..",job="symbols"}[5m])) / ignoring (category) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count{job="symbols"}[5m]))) >= 2)`
</details>
<br />
## symbols: container_cpu_usage
<p class="subtitle">container cpu usage total (1m average) across all cores by instance</p>
@ -8006,43 +7753,6 @@ Generated query for critical alert: `max((sum by (app_name, db_name) (increase(s
<br />
## embeddings: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> embeddings: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Next steps**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs embeddings` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs embeddings` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#embeddings-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_embeddings_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Cody team](https://handbook.sourcegraph.com/departments/engineering/teams/cody).*</sub>
<details>
<summary>Technical details</summary>
Generated query for warning alert: `max((sum by (category) (increase(src_frontend_internal_request_duration_seconds_count{code!~"2..",job="embeddings"}[5m])) / ignoring (category) group_left () sum(increase(src_frontend_internal_request_duration_seconds_count{job="embeddings"}[5m]))) >= 2)`
</details>
<br />
## embeddings: container_cpu_usage
<p class="subtitle">container cpu usage total (1m average) across all cores by instance</p>

File diff suppressed because it is too large Load Diff

View File

@ -10,15 +10,10 @@ go_library(
importpath = "github.com/sourcegraph/sourcegraph/internal/api/internalapi",
visibility = ["//:__subpackages__"],
deps = [
"//internal/actor",
"//internal/api/internalapi/v1:internalapi",
"//internal/conf/conftypes",
"//internal/env",
"//internal/grpc/defaults",
"//internal/httpcli",
"//lib/errors",
"@com_github_prometheus_client_golang//prometheus",
"@com_github_prometheus_client_golang//prometheus/promauto",
"@com_github_sourcegraph_log//:log",
"@org_golang_google_grpc//:go_default_library",
],

View File

@ -1,30 +1,19 @@
package internalapi
import (
"bytes"
"context"
"encoding/json"
"fmt"
"net"
"net/http"
"net/url"
"strconv"
"strings"
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/actor"
proto "github.com/sourcegraph/sourcegraph/internal/api/internalapi/v1"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
var frontendInternal = func() *url.URL {
@ -32,10 +21,6 @@ var frontendInternal = func() *url.URL {
return mustParseSourcegraphInternalURL(rawURL)
}()
// NOTE: this intentionally does not use the site configuration option because we need to make the decision
// about whether or not to use gRPC to fetch the site configuration in the first place.
var enableGRPC = env.MustGetBool("SRC_GRPC_ENABLE_CONF", false, "Enable gRPC for configuration updates")
func defaultFrontendInternal() string {
return "sourcegraph-frontend-internal"
}
@ -61,12 +46,6 @@ var Client = &internalClient{
}),
}
var requestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Name: "src_frontend_internal_request_duration_seconds",
Help: "Time (in seconds) spent on request.",
Buckets: prometheus.DefBuckets,
}, []string{"category", "code"})
// MockClientConfiguration mocks (*internalClient).Configuration.
var MockClientConfiguration func() (conftypes.RawUnified, error)
@ -75,102 +54,17 @@ func (c *internalClient) Configuration(ctx context.Context) (conftypes.RawUnifie
return MockClientConfiguration()
}
if enableGRPC {
cc, err := c.getConfClient()
if err != nil {
return conftypes.RawUnified{}, err
}
resp, err := cc.GetConfig(ctx, &proto.GetConfigRequest{})
if err != nil {
return conftypes.RawUnified{}, err
}
var raw conftypes.RawUnified
raw.FromProto(resp.RawUnified)
return raw, nil
}
var cfg conftypes.RawUnified
err := c.postInternal(ctx, "configuration", nil, &cfg)
return cfg, err
}
// postInternal sends an HTTP post request to the internal route.
func (c *internalClient) postInternal(ctx context.Context, route string, reqBody, respBody any) error {
return c.meteredPost(ctx, "/.internal/"+route, reqBody, respBody)
}
func (c *internalClient) meteredPost(ctx context.Context, route string, reqBody, respBody any) error {
start := time.Now()
statusCode, err := c.post(ctx, route, reqBody, respBody)
d := time.Since(start)
code := strconv.Itoa(statusCode)
cc, err := c.getConfClient()
if err != nil {
code = "error"
return conftypes.RawUnified{}, err
}
requestDuration.WithLabelValues(route, code).Observe(d.Seconds())
return err
}
// post sends an HTTP post request to the provided route. If reqBody is
// non-nil it will Marshal it as JSON and set that as the Request body. If
// respBody is non-nil the response body will be JSON unmarshalled to resp.
func (c *internalClient) post(ctx context.Context, route string, reqBody, respBody any) (int, error) {
var data []byte
if reqBody != nil {
var err error
data, err = json.Marshal(reqBody)
if err != nil {
return -1, err
}
}
req, err := http.NewRequest("POST", c.URL+route, bytes.NewBuffer(data))
resp, err := cc.GetConfig(ctx, &proto.GetConfigRequest{})
if err != nil {
return -1, err
return conftypes.RawUnified{}, err
}
req.Header.Set("Content-Type", "application/json")
// Check if we have an actor, if not, ensure that we use our internal actor since
// this is an internal request.
a := actor.FromContext(ctx)
if !a.IsAuthenticated() && !a.IsInternal() {
ctx = actor.WithInternalActor(ctx)
}
resp, err := httpcli.InternalDoer.Do(req.WithContext(ctx))
if err != nil {
return -1, err
}
defer resp.Body.Close()
if err := checkAPIResponse(resp); err != nil {
return resp.StatusCode, err
}
if respBody != nil {
return resp.StatusCode, json.NewDecoder(resp.Body).Decode(respBody)
}
return resp.StatusCode, nil
}
func checkAPIResponse(resp *http.Response) error {
if 200 > resp.StatusCode || resp.StatusCode > 299 {
buf := new(bytes.Buffer)
_, _ = buf.ReadFrom(resp.Body)
b := buf.Bytes()
errString := string(b)
if errString != "" {
return errors.Errorf(
"internal API response error code %d: %s (%s)",
resp.StatusCode,
errString,
resp.Request.URL,
)
}
return errors.Errorf("internal API response error code %d (%s)", resp.StatusCode, resp.Request.URL)
}
return nil
var raw conftypes.RawUnified
raw.FromProto(resp.RawUnified)
return raw, nil
}
// mustParseSourcegraphInternalURL parses a frontend internal URL string and panics if it is invalid.

View File

@ -95,6 +95,7 @@ go_test(
"//internal/extsvc/gerrit",
"//internal/extsvc/github",
"//internal/extsvc/gitlab",
"//internal/extsvc/gitolite",
"//internal/extsvc/versions",
"//internal/github_apps/auth",
"//internal/github_apps/store",

View File

@ -23,6 +23,7 @@ import (
azuredevops "github.com/sourcegraph/sourcegraph/internal/extsvc/azuredevops"
bitbucketcloud "github.com/sourcegraph/sourcegraph/internal/extsvc/bitbucketcloud"
gerrit "github.com/sourcegraph/sourcegraph/internal/extsvc/gerrit"
gitolite "github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
store1 "github.com/sourcegraph/sourcegraph/internal/github_apps/store"
gitserver "github.com/sourcegraph/sourcegraph/internal/gitserver"
gitdomain "github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
@ -10949,6 +10950,9 @@ type MockGitserverClient struct {
// ListDirectoryChildrenFunc is an instance of a mock function object
// controlling the behavior of the method ListDirectoryChildren.
ListDirectoryChildrenFunc *GitserverClientListDirectoryChildrenFunc
// ListGitoliteReposFunc is an instance of a mock function object
// controlling the behavior of the method ListGitoliteRepos.
ListGitoliteReposFunc *GitserverClientListGitoliteReposFunc
// ListRefsFunc is an instance of a mock function object controlling the
// behavior of the method ListRefs.
ListRefsFunc *GitserverClientListRefsFunc
@ -11191,6 +11195,11 @@ func NewMockGitserverClient() *MockGitserverClient {
return
},
},
ListGitoliteReposFunc: &GitserverClientListGitoliteReposFunc{
defaultHook: func(context.Context, string) (r0 []*gitolite.Repo, r1 error) {
return
},
},
ListRefsFunc: &GitserverClientListRefsFunc{
defaultHook: func(context.Context, api.RepoName) (r0 []gitdomain.Ref, r1 error) {
return
@ -11488,6 +11497,11 @@ func NewStrictMockGitserverClient() *MockGitserverClient {
panic("unexpected invocation of MockGitserverClient.ListDirectoryChildren")
},
},
ListGitoliteReposFunc: &GitserverClientListGitoliteReposFunc{
defaultHook: func(context.Context, string) ([]*gitolite.Repo, error) {
panic("unexpected invocation of MockGitserverClient.ListGitoliteRepos")
},
},
ListRefsFunc: &GitserverClientListRefsFunc{
defaultHook: func(context.Context, api.RepoName) ([]gitdomain.Ref, error) {
panic("unexpected invocation of MockGitserverClient.ListRefs")
@ -11724,6 +11738,9 @@ func NewMockGitserverClientFrom(i gitserver.Client) *MockGitserverClient {
ListDirectoryChildrenFunc: &GitserverClientListDirectoryChildrenFunc{
defaultHook: i.ListDirectoryChildren,
},
ListGitoliteReposFunc: &GitserverClientListGitoliteReposFunc{
defaultHook: i.ListGitoliteRepos,
},
ListRefsFunc: &GitserverClientListRefsFunc{
defaultHook: i.ListRefs,
},
@ -15275,6 +15292,117 @@ func (c GitserverClientListDirectoryChildrenFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// GitserverClientListGitoliteReposFunc describes the behavior when the
// ListGitoliteRepos method of the parent MockGitserverClient instance is
// invoked.
type GitserverClientListGitoliteReposFunc struct {
defaultHook func(context.Context, string) ([]*gitolite.Repo, error)
hooks []func(context.Context, string) ([]*gitolite.Repo, error)
history []GitserverClientListGitoliteReposFuncCall
mutex sync.Mutex
}
// ListGitoliteRepos delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockGitserverClient) ListGitoliteRepos(v0 context.Context, v1 string) ([]*gitolite.Repo, error) {
r0, r1 := m.ListGitoliteReposFunc.nextHook()(v0, v1)
m.ListGitoliteReposFunc.appendCall(GitserverClientListGitoliteReposFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the ListGitoliteRepos
// method of the parent MockGitserverClient instance is invoked and the hook
// queue is empty.
func (f *GitserverClientListGitoliteReposFunc) SetDefaultHook(hook func(context.Context, string) ([]*gitolite.Repo, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// ListGitoliteRepos method of the parent MockGitserverClient instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverClientListGitoliteReposFunc) PushHook(hook func(context.Context, string) ([]*gitolite.Repo, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverClientListGitoliteReposFunc) SetDefaultReturn(r0 []*gitolite.Repo, r1 error) {
f.SetDefaultHook(func(context.Context, string) ([]*gitolite.Repo, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverClientListGitoliteReposFunc) PushReturn(r0 []*gitolite.Repo, r1 error) {
f.PushHook(func(context.Context, string) ([]*gitolite.Repo, error) {
return r0, r1
})
}
func (f *GitserverClientListGitoliteReposFunc) nextHook() func(context.Context, string) ([]*gitolite.Repo, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverClientListGitoliteReposFunc) appendCall(r0 GitserverClientListGitoliteReposFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of GitserverClientListGitoliteReposFuncCall
// objects describing the invocations of this function.
func (f *GitserverClientListGitoliteReposFunc) History() []GitserverClientListGitoliteReposFuncCall {
f.mutex.Lock()
history := make([]GitserverClientListGitoliteReposFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverClientListGitoliteReposFuncCall is an object that describes an
// invocation of method ListGitoliteRepos on an instance of
// MockGitserverClient.
type GitserverClientListGitoliteReposFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []*gitolite.Repo
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverClientListGitoliteReposFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverClientListGitoliteReposFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// GitserverClientListRefsFunc describes the behavior when the ListRefs
// method of the parent MockGitserverClient instance is invoked.
type GitserverClientListRefsFunc struct {

View File

@ -11,7 +11,6 @@ go_library(
"computed.go",
"conf.go",
"diff.go",
"grpc.go",
"helpers.go",
"init.go",
"log_sinks.go",
@ -60,7 +59,6 @@ go_test(
"client_test.go",
"computed_test.go",
"diff_test.go",
"grpc_test.go",
"mocks_test.go",
"validate_test.go",
],

View File

@ -1,20 +0,0 @@
package conf
import (
"context"
"os"
"strconv"
)
const envGRPCEnabled = "SG_FEATURE_FLAG_GRPC"
func IsGRPCEnabled(ctx context.Context) bool {
if val, err := strconv.ParseBool(os.Getenv(envGRPCEnabled)); err == nil {
return val
}
if c := Get(); c.ExperimentalFeatures != nil && c.ExperimentalFeatures.EnableGRPC != nil {
return *c.ExperimentalFeatures.EnableGRPC
}
return true
}

View File

@ -1,45 +0,0 @@
package conf
import (
"context"
"testing"
)
func TestIsGRPCEnabled(t *testing.T) {
tests := []struct {
name string
envValue string
expected bool
}{
{
name: "enabled",
envValue: "true",
expected: true,
},
{
name: "disabled",
envValue: "false",
expected: false,
},
{
name: "empty env var - default true",
envValue: "",
expected: true,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
t.Setenv(envGRPCEnabled, test.envValue)
actual := IsGRPCEnabled(context.Background())
if actual != test.expected {
t.Errorf("expected %v but got %v", test.expected, actual)
}
})
}
}

View File

@ -9,9 +9,10 @@ import (
"strings"
"github.com/sourcegraph/conc/pool"
"github.com/sourcegraph/sourcegraph/lib/errors"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/endpoint"

View File

@ -18,6 +18,7 @@ go_library(
"//internal/database",
"//internal/env",
"//internal/extsvc",
"//internal/gitserver",
"//internal/goroutine",
"//internal/httpcli",
"//internal/observation",

View File

@ -12,6 +12,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/observation"
@ -50,7 +51,7 @@ func (j *syncingJob) Routines(_ context.Context, observationCtx *observation.Con
sourcerCF := httpcli.NewExternalClientFactory(
httpcli.NewLoggingMiddleware(sourcerLogger),
)
sourcer := repos.NewSourcer(sourcerLogger, db, sourcerCF)
sourcer := repos.NewSourcer(sourcerLogger, db, sourcerCF, gitserver.NewClient("extsvc.version-syncer"))
store := db.ExternalServices()
handler := goroutine.HandlerFunc(func(ctx context.Context) error {

View File

@ -8,11 +8,9 @@ go_library(
"client.go",
"commands.go",
"git_command.go",
"gitolite.go",
"mock.go",
"mocks_temp.go",
"observability.go",
"proxy.go",
"retry.go",
"stream_client.go",
"stream_hunks.go",
@ -34,9 +32,7 @@ go_library(
"//internal/grpc/defaults",
"//internal/grpc/streamio",
"//internal/honey",
"//internal/httpcli",
"//internal/lazyregexp",
"//internal/limiter",
"//internal/metrics",
"//internal/observation",
"//internal/perforce",
@ -54,8 +50,8 @@ go_library(
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_grpc//codes",
"@org_golang_google_grpc//metadata",
"@org_golang_google_grpc//status",
"@org_golang_google_protobuf//encoding/protojson",
"@org_golang_x_exp//slices",
"@org_golang_x_sync//errgroup",
"@org_golang_x_sync//semaphore",
@ -89,7 +85,6 @@ go_test(
"//internal/gitserver/protocol",
"//internal/gitserver/v1:gitserver",
"//internal/grpc",
"//internal/httpcli",
"//internal/types",
"//lib/errors",
"//schema",
@ -98,6 +93,5 @@ go_test(
"@com_github_stretchr_testify//assert",
"@com_github_stretchr_testify//require",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//encoding/protojson",
],
)

File diff suppressed because it is too large Load Diff

View File

@ -1,13 +1,9 @@
package gitserver_test
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"math/rand"
"net/http"
"os/exec"
"path/filepath"
"reflect"
@ -22,17 +18,12 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/stretchr/testify/require"
"google.golang.org/grpc"
"google.golang.org/protobuf/encoding/protojson"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestClient_Archive_ProtoRoundTrip(t *testing.T) {
@ -291,97 +282,7 @@ func TestClient_ListGitolite_ProtoRoundTrip(t *testing.T) {
}
}
func TestClient_Remove(t *testing.T) {
test := func(t *testing.T, called *bool) {
repo := api.RepoName("github.com/sourcegraph/sourcegraph")
addrs := []string{"172.16.8.1:8080", "172.16.8.2:8080"}
expected := "http://172.16.8.1:8080"
source := gitserver.NewTestClientSource(t, addrs, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockRepoDelete := func(ctx context.Context, in *proto.RepoDeleteRequest, opts ...grpc.CallOption) (*proto.RepoDeleteResponse, error) {
*called = true
return nil, nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.RepoDeleteFunc.SetDefaultHook(mockRepoDelete)
return cli
}
})
cli := gitserver.NewTestClient(t).
WithDoer(httpcli.DoerFunc(func(r *http.Request) (*http.Response, error) {
switch r.URL.String() {
// Ensure that the request was received by the "expected" gitserver instance - where
// expected is the gitserver instance according to the Rendezvous hashing scheme.
// For anything else apart from this we return an error.
case expected + "/delete":
return &http.Response{
StatusCode: 200,
Body: io.NopCloser(bytes.NewBufferString("{}")),
}, nil
default:
return nil, errors.Newf("unexpected URL: %q", r.URL.String())
}
})).
WithClientSource(source)
err := cli.Remove(context.Background(), repo)
if err != nil {
t.Fatalf("expected URL %q, but got err %q", expected, err)
}
}
t.Run("GRPC", func(t *testing.T) {
called := false
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(true),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
test(t, &called)
if !called {
t.Fatal("grpc client not called")
}
})
t.Run("HTTP", func(t *testing.T) {
called := false
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
test(t, &called)
if called {
t.Fatal("grpc client called")
}
})
}
func TestClient_BatchLogGRPC(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(true),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
func TestClient_BatchLog(t *testing.T) {
addrs := []string{"172.16.8.1:8080"}
called := false
@ -472,105 +373,6 @@ func TestClient_BatchLogGRPC(t *testing.T) {
}
}
func TestClient_BatchLog(t *testing.T) {
addrs := []string{"172.16.8.1:8080", "172.16.8.2:8080", "172.16.8.3:8080"}
source := gitserver.NewTestClientSource(t, addrs, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(conn *grpc.ClientConn) proto.GitserverServiceClient {
mockBatchLog := func(ctx context.Context, in *proto.BatchLogRequest, opts ...grpc.CallOption) (*proto.BatchLogResponse, error) {
var out []*proto.BatchLogResult
for _, repoCommit := range in.GetRepoCommits() {
out = append(out, &proto.BatchLogResult{
RepoCommit: repoCommit,
CommandOutput: fmt.Sprintf("out<%s: %s@%s>", fmt.Sprintf("http://%s/batch-log", conn.Target()), repoCommit.GetRepo(), repoCommit.GetCommit()),
CommandError: nil,
})
}
return &proto.BatchLogResponse{
Results: out,
}, nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.BatchLogFunc.SetDefaultHook(mockBatchLog)
return cli
}
})
cli := gitserver.NewTestClient(t).
WithDoer(httpcli.DoerFunc(func(r *http.Request) (*http.Response, error) {
var req protocol.BatchLogRequest
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
return nil, err
}
var results []protocol.BatchLogResult
for _, repoCommit := range req.RepoCommits {
results = append(results, protocol.BatchLogResult{
RepoCommit: repoCommit,
CommandOutput: fmt.Sprintf("out<%s: %s@%s>", r.URL.String(), repoCommit.Repo, repoCommit.CommitID),
CommandError: "",
})
}
encoded, _ := json.Marshal(protocol.BatchLogResponse{Results: results})
body := io.NopCloser(strings.NewReader(strings.TrimSpace(string(encoded))))
return &http.Response{StatusCode: 200, Body: body}, nil
})).
WithClientSource(source)
opts := gitserver.BatchLogOptions{
RepoCommits: []api.RepoCommit{
{Repo: api.RepoName("github.com/test/foo"), CommitID: api.CommitID("deadbeef01")},
{Repo: api.RepoName("github.com/test/bar"), CommitID: api.CommitID("deadbeef02")},
{Repo: api.RepoName("github.com/test/baz"), CommitID: api.CommitID("deadbeef03")},
{Repo: api.RepoName("github.com/test/bonk"), CommitID: api.CommitID("deadbeef04")},
{Repo: api.RepoName("github.com/test/quux"), CommitID: api.CommitID("deadbeef05")},
{Repo: api.RepoName("github.com/test/honk"), CommitID: api.CommitID("deadbeef06")},
{Repo: api.RepoName("github.com/test/xyzzy"), CommitID: api.CommitID("deadbeef07")},
{Repo: api.RepoName("github.com/test/lorem"), CommitID: api.CommitID("deadbeef08")},
{Repo: api.RepoName("github.com/test/ipsum"), CommitID: api.CommitID("deadbeef09")},
{Repo: api.RepoName("github.com/test/fnord"), CommitID: api.CommitID("deadbeef10")},
},
Format: "--format=test",
}
results := map[api.RepoCommit]gitserver.RawBatchLogResult{}
var mu sync.Mutex
if err := cli.BatchLog(context.Background(), opts, func(repoCommit api.RepoCommit, gitLogResult gitserver.RawBatchLogResult) error {
mu.Lock()
defer mu.Unlock()
results[repoCommit] = gitLogResult
return nil
}); err != nil {
t.Fatalf("unexpected error performing batch log: %s", err)
}
expectedResults := map[api.RepoCommit]gitserver.RawBatchLogResult{
// Shard 1
{Repo: "github.com/test/baz", CommitID: "deadbeef03"}: {Stdout: "out<http://172.16.8.1:8080/batch-log: github.com/test/baz@deadbeef03>"},
{Repo: "github.com/test/quux", CommitID: "deadbeef05"}: {Stdout: "out<http://172.16.8.1:8080/batch-log: github.com/test/quux@deadbeef05>"},
{Repo: "github.com/test/honk", CommitID: "deadbeef06"}: {Stdout: "out<http://172.16.8.1:8080/batch-log: github.com/test/honk@deadbeef06>"},
// Shard 2
{Repo: "github.com/test/bar", CommitID: "deadbeef02"}: {Stdout: "out<http://172.16.8.2:8080/batch-log: github.com/test/bar@deadbeef02>"},
{Repo: "github.com/test/xyzzy", CommitID: "deadbeef07"}: {Stdout: "out<http://172.16.8.2:8080/batch-log: github.com/test/xyzzy@deadbeef07>"},
// Shard 3
{Repo: "github.com/test/foo", CommitID: "deadbeef01"}: {Stdout: "out<http://172.16.8.3:8080/batch-log: github.com/test/foo@deadbeef01>"},
{Repo: "github.com/test/bonk", CommitID: "deadbeef04"}: {Stdout: "out<http://172.16.8.3:8080/batch-log: github.com/test/bonk@deadbeef04>"},
{Repo: "github.com/test/lorem", CommitID: "deadbeef08"}: {Stdout: "out<http://172.16.8.3:8080/batch-log: github.com/test/lorem@deadbeef08>"},
{Repo: "github.com/test/ipsum", CommitID: "deadbeef09"}: {Stdout: "out<http://172.16.8.3:8080/batch-log: github.com/test/ipsum@deadbeef09>"},
{Repo: "github.com/test/fnord", CommitID: "deadbeef10"}: {Stdout: "out<http://172.16.8.3:8080/batch-log: github.com/test/fnord@deadbeef10>"},
}
if diff := cmp.Diff(expectedResults, results); diff != "" {
t.Errorf("unexpected results (-want +got):\n%s", diff)
}
}
func TestLocalGitCommand(t *testing.T) {
// creating a repo with 1 committed file
root := gitserver.CreateRepoDir(t)
@ -667,97 +469,31 @@ func TestClient_IsRepoCloneableGRPC(t *testing.T) {
})
}
t.Run("GRPC", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(true),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
for _, tc := range testCases {
for _, tc := range testCases {
called := false
source := gitserver.NewTestClientSource(t, []string{gitserverAddr}, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockIsRepoCloneable := func(ctx context.Context, in *proto.IsRepoCloneableRequest, opts ...grpc.CallOption) (*proto.IsRepoCloneableResponse, error) {
called = true
if api.RepoName(in.Repo) != tc.repo {
t.Errorf("got %q, want %q", in.Repo, tc.repo)
}
return tc.mockResponse.ToProto(), nil
called := false
source := gitserver.NewTestClientSource(t, []string{gitserverAddr}, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockIsRepoCloneable := func(ctx context.Context, in *proto.IsRepoCloneableRequest, opts ...grpc.CallOption) (*proto.IsRepoCloneableResponse, error) {
called = true
if api.RepoName(in.Repo) != tc.repo {
t.Errorf("got %q, want %q", in.Repo, tc.repo)
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.IsRepoCloneableFunc.SetDefaultHook(mockIsRepoCloneable)
return cli
return tc.mockResponse.ToProto(), nil
}
})
client := gitserver.NewTestClient(t).WithClientSource(source)
runTests(t, client, tc)
if !called {
t.Fatal("IsRepoCloneable: grpc client not called")
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.IsRepoCloneableFunc.SetDefaultHook(mockIsRepoCloneable)
return cli
}
}
})
t.Run("HTTP", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
expected := fmt.Sprintf("http://%s", gitserverAddr)
for _, tc := range testCases {
called := false
source := gitserver.NewTestClientSource(t, []string{gitserverAddr}, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockIsRepoCloneable := func(ctx context.Context, in *proto.IsRepoCloneableRequest, opts ...grpc.CallOption) (*proto.IsRepoCloneableResponse, error) {
called = true
if api.RepoName(in.Repo) != tc.repo {
t.Errorf("got %q, want %q", in.Repo, tc.repo)
}
return tc.mockResponse.ToProto(), nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.IsRepoCloneableFunc.SetDefaultHook(mockIsRepoCloneable)
return cli
}
})
client := gitserver.NewTestClient(t).WithClientSource(source)
client := gitserver.NewTestClient(t).
WithDoer(httpcli.DoerFunc(func(r *http.Request) (*http.Response, error) {
switch r.URL.String() {
case expected + "/is-repo-cloneable":
encoded, _ := json.Marshal(tc.mockResponse)
body := io.NopCloser(strings.NewReader(strings.TrimSpace(string(encoded))))
return &http.Response{
StatusCode: 200,
Body: body,
}, nil
default:
return nil, errors.Newf("unexpected URL: %q", r.URL.String())
}
})).
WithClientSource(source)
runTests(t, client, tc)
if called {
t.Fatal("IsRepoCloneable: http client should be called")
}
runTests(t, client, tc)
if !called {
t.Fatal("IsRepoCloneable: grpc client not called")
}
})
}
}
func TestClient_SystemsInfo(t *testing.T) {
@ -799,110 +535,40 @@ func TestClient_SystemsInfo(t *testing.T) {
}
}
t.Run("GRPC", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(true),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
var called atomic.Bool
source := gitserver.NewTestClientSource(t, []string{gitserverAddr1, gitserverAddr2}, func(o *gitserver.TestClientSourceOptions) {
responseByAddress := make(map[string]*proto.DiskInfoResponse, len(expectedResponses))
for _, response := range expectedResponses {
responseByAddress[response.Address] = &proto.DiskInfoResponse{
FreeSpace: response.FreeSpace,
TotalSpace: response.TotalSpace,
PercentUsed: response.PercentUsed,
}
var called atomic.Bool
source := gitserver.NewTestClientSource(t, []string{gitserverAddr1, gitserverAddr2}, func(o *gitserver.TestClientSourceOptions) {
responseByAddress := make(map[string]*proto.DiskInfoResponse, len(expectedResponses))
for _, response := range expectedResponses {
responseByAddress[response.Address] = &proto.DiskInfoResponse{
FreeSpace: response.FreeSpace,
TotalSpace: response.TotalSpace,
PercentUsed: response.PercentUsed,
}
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockDiskInfo := func(ctx context.Context, in *proto.DiskInfoRequest, opts ...grpc.CallOption) (*proto.DiskInfoResponse, error) {
address := cc.Target()
response, ok := responseByAddress[address]
if !ok {
t.Fatalf("received unexpected address %q", address)
}
called.Store(true)
return response, nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.DiskInfoFunc.SetDefaultHook(mockDiskInfo)
return cli
}
})
client := gitserver.NewTestClient(t).WithClientSource(source)
runTest(t, client)
if !called.Load() {
t.Fatal("DiskInfo: grpc client not called")
}
})
t.Run("HTTP", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
called := false
source := gitserver.NewTestClientSource(t, []string{gitserverAddr1, gitserverAddr2}, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockDiskInfo := func(ctx context.Context, in *proto.DiskInfoRequest, opts ...grpc.CallOption) (*proto.DiskInfoResponse, error) {
called = true
return nil, nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.DiskInfoFunc.SetDefaultHook(mockDiskInfo)
return cli
}
})
client := gitserver.NewTestClient(t).
WithDoer(httpcli.DoerFunc(func(r *http.Request) (*http.Response, error) {
responseByAddress := make(map[string]*proto.DiskInfoResponse, len(expectedResponses))
for _, response := range expectedResponses {
responseByAddress[fmt.Sprintf("http://%s/disk-info", response.Address)] = &proto.DiskInfoResponse{
FreeSpace: response.FreeSpace,
TotalSpace: response.TotalSpace,
PercentUsed: response.PercentUsed,
}
}
address := r.URL.String()
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockDiskInfo := func(ctx context.Context, in *proto.DiskInfoRequest, opts ...grpc.CallOption) (*proto.DiskInfoResponse, error) {
address := cc.Target()
response, ok := responseByAddress[address]
if !ok {
return nil, errors.Newf("unexpected URL: %q", address)
t.Fatalf("received unexpected address %q", address)
}
encoded, _ := protojson.Marshal(response)
body := io.NopCloser(strings.NewReader(string(encoded)))
return &http.Response{
StatusCode: 200,
Body: body,
}, nil
})).
WithClientSource(source)
runTest(t, client)
if called {
t.Fatal("DiskInfo: http client should be called")
called.Store(true)
return response, nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.DiskInfoFunc.SetDefaultHook(mockDiskInfo)
return cli
}
})
client := gitserver.NewTestClient(t).WithClientSource(source)
runTest(t, client)
if !called.Load() {
t.Fatal("DiskInfo: grpc client not called")
}
}
func TestClient_SystemInfo(t *testing.T) {
@ -921,86 +587,25 @@ func TestClient_SystemInfo(t *testing.T) {
require.Equal(t, mockResponse.TotalSpace, info.TotalSpace)
}
t.Run("GRPC", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(true),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
called := false
source := gitserver.NewTestClientSource(t, []string{gitserverAddr}, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockDiskInfo := func(ctx context.Context, in *proto.DiskInfoRequest, opts ...grpc.CallOption) (*proto.DiskInfoResponse, error) {
called = true
return mockResponse, nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.DiskInfoFunc.SetDefaultHook(mockDiskInfo)
return cli
called := false
source := gitserver.NewTestClientSource(t, []string{gitserverAddr}, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockDiskInfo := func(ctx context.Context, in *proto.DiskInfoRequest, opts ...grpc.CallOption) (*proto.DiskInfoResponse, error) {
called = true
return mockResponse, nil
}
})
client := gitserver.NewTestClient(t).WithClientSource(source)
runTest(t, client, gitserverAddr)
if !called {
t.Fatal("DiskInfo: grpc client not called")
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.DiskInfoFunc.SetDefaultHook(mockDiskInfo)
return cli
}
})
t.Run("HTTP", func(t *testing.T) {
conf.Mock(&conf.Unified{
SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
t.Cleanup(func() {
conf.Mock(nil)
})
expected := fmt.Sprintf("http://%s", gitserverAddr)
client := gitserver.NewTestClient(t).WithClientSource(source)
called := false
source := gitserver.NewTestClientSource(t, []string{gitserverAddr}, func(o *gitserver.TestClientSourceOptions) {
o.ClientFunc = func(cc *grpc.ClientConn) proto.GitserverServiceClient {
mockDiskInfo := func(ctx context.Context, in *proto.DiskInfoRequest, opts ...grpc.CallOption) (*proto.DiskInfoResponse, error) {
called = true
return mockResponse, nil
}
cli := gitserver.NewStrictMockGitserverServiceClient()
cli.DiskInfoFunc.SetDefaultHook(mockDiskInfo)
return cli
}
})
client := gitserver.NewTestClient(t).
WithDoer(httpcli.DoerFunc(func(r *http.Request) (*http.Response, error) {
switch r.URL.String() {
case expected + "/disk-info":
encoded, _ := json.Marshal(mockResponse)
body := io.NopCloser(strings.NewReader(strings.TrimSpace(string(encoded))))
return &http.Response{
StatusCode: 200,
Body: body,
}, nil
default:
return nil, errors.Newf("unexpected URL: %q", r.URL.String())
}
})).
WithClientSource(source)
runTest(t, client, gitserverAddr)
if called {
t.Fatal("DiskInfo: http client should be called")
}
})
runTest(t, client, gitserverAddr)
if !called {
t.Fatal("DiskInfo: grpc client not called")
}
}
type fuzzTime time.Time
@ -1014,7 +619,3 @@ func (fuzzTime) Generate(rand *rand.Rand, _ int) reflect.Value {
}
var _ quick.Generator = fuzzTime{}
func boolPointer(b bool) *bool {
return &b
}

View File

@ -5,11 +5,9 @@ import (
"bytes"
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"io/fs"
"net/http"
"net/mail"
"os"
stdlibpath "path"
@ -32,7 +30,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/authz"
"github.com/sourcegraph/sourcegraph/internal/byteutils"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/fileutil"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
@ -2579,120 +2576,81 @@ func (c *clientImplementor) ArchiveReader(
return nil, err
}
if conf.IsGRPCEnabled(ctx) {
client, err := c.clientSource.ClientForRepo(ctx, c.userAgent, repo)
if err != nil {
return nil, err
}
client, err := c.clientSource.ClientForRepo(ctx, c.userAgent, repo)
if err != nil {
return nil, err
}
req := options.ToProto(string(repo)) // HACK: ArchiveOptions doesn't have a repository here, so we have to add it ourselves.
req := options.ToProto(string(repo)) // HACK: ArchiveOptions doesn't have a repository here, so we have to add it ourselves.
ctx, cancel := context.WithCancel(ctx)
ctx, cancel := context.WithCancel(ctx)
stream, err := client.Archive(ctx, req)
if err != nil {
cancel()
return nil, err
}
stream, err := client.Archive(ctx, req)
if err != nil {
cancel()
return nil, err
}
// first message from the gRPC stream needs to be read to check for errors before continuing
// to read the rest of the stream. If the first message is an error, we cancel the stream
// and return the error.
// first message from the gRPC stream needs to be read to check for errors before continuing
// to read the rest of the stream. If the first message is an error, we cancel the stream
// and return the error.
//
// This is necessary to provide parity between the REST and gRPC implementations of
// ArchiveReader. Users of cli.ArchiveReader may assume error handling occurs immediately,
// as is the case with the HTTP implementation where errors are returned as soon as the
// function returns. gRPC is asynchronous, so we have to start consuming messages from
// the stream to see any errors from the server. Reading the first message ensures we
// handle any errors synchronously, similar to the HTTP implementation.
firstMessage, firstError := stream.Recv()
if firstError != nil {
// Hack: The ArchiveReader.Read() implementation handles surfacing the
// any "revision not found" errors returned from the invoked git binary.
//
// This is necessary to provide parity between the REST and gRPC implementations of
// ArchiveReader. Users of cli.ArchiveReader may assume error handling occurs immediately,
// as is the case with the HTTP implementation where errors are returned as soon as the
// function returns. gRPC is asynchronous, so we have to start consuming messages from
// the stream to see any errors from the server. Reading the first message ensures we
// handle any errors synchronously, similar to the HTTP implementation.
// In order to maintainparity with the HTTP API, we return this error in the ArchiveReader.Read() method
// instead of returning it immediately.
firstMessage, firstError := stream.Recv()
if firstError != nil {
// Hack: The ArchiveReader.Read() implementation handles surfacing the
// any "revision not found" errors returned from the invoked git binary.
//
// In order to maintainparity with the HTTP API, we return this error in the ArchiveReader.Read() method
// instead of returning it immediately.
// We return early only if this isn't a revision not found error.
// We return early only if this isn't a revision not found error.
err := convertGRPCErrorToGitDomainError(firstError)
err := convertGRPCErrorToGitDomainError(firstError)
var cse *CommandStatusError
if !errors.As(err, &cse) || !isRevisionNotFound(cse.Stderr) {
cancel()
return nil, convertGRPCErrorToGitDomainError(err)
}
}
firstMessageRead := false
// Create a reader to read from the gRPC stream.
r := streamio.NewReader(func() ([]byte, error) {
// Check if we've read the first message yet. If not, read it and return.
if !firstMessageRead {
firstMessageRead = true
if firstError != nil {
return nil, firstError
}
return firstMessage.GetData(), nil
}
// Receive the next message from the stream.
msg, err := stream.Recv()
if err != nil {
return nil, convertGRPCErrorToGitDomainError(err)
}
// Return the data from the received message.
return msg.GetData(), nil
})
return &archiveReader{
base: &readCloseWrapper{r: r, closeFn: cancel},
repo: repo,
spec: options.Treeish,
}, nil
} else {
// Fall back to http request
u := c.archiveURL(ctx, repo, options)
resp, err := c.do(ctx, repo, u.String(), nil)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case http.StatusOK:
return &archiveReader{
base: &cmdReader{
rc: resp.Body,
trailer: resp.Trailer,
},
repo: repo,
spec: options.Treeish,
}, nil
case http.StatusNotFound:
var payload protocol.NotFoundPayload
if err := json.NewDecoder(resp.Body).Decode(&payload); err != nil {
resp.Body.Close()
return nil, err
}
resp.Body.Close()
return nil, &badRequestError{
error: &gitdomain.RepoNotExistError{
Repo: repo,
CloneInProgress: payload.CloneInProgress,
CloneProgress: payload.CloneProgress,
},
}
default:
resp.Body.Close()
return nil, errors.Errorf("unexpected status code: %d", resp.StatusCode)
var cse *CommandStatusError
if !errors.As(err, &cse) || !isRevisionNotFound(cse.Stderr) {
cancel()
return nil, convertGRPCErrorToGitDomainError(err)
}
}
firstMessageRead := false
// Create a reader to read from the gRPC stream.
r := streamio.NewReader(func() ([]byte, error) {
// Check if we've read the first message yet. If not, read it and return.
if !firstMessageRead {
firstMessageRead = true
if firstError != nil {
return nil, firstError
}
return firstMessage.GetData(), nil
}
// Receive the next message from the stream.
msg, err := stream.Recv()
if err != nil {
return nil, convertGRPCErrorToGitDomainError(err)
}
// Return the data from the received message.
return msg.GetData(), nil
})
return &archiveReader{
base: &readCloseWrapper{r: r, closeFn: cancel},
repo: repo,
spec: options.Treeish,
}, nil
}
func addNameOnly(opt CommitsOptions, checker authz.SubRepoPermissionChecker) CommitsOptions {

View File

@ -5,11 +5,9 @@ import (
"context"
"fmt"
"io"
"net/http"
"os"
"os/exec"
"path/filepath"
"strconv"
"strings"
"syscall"
@ -176,8 +174,6 @@ type RemoteGitCommand struct {
}
type execer interface {
httpPost(ctx context.Context, repo api.RepoName, op string, payload any) (resp *http.Response, err error)
AddrForRepo(ctx context.Context, repo api.RepoName) string
ClientForRepo(ctx context.Context, repo api.RepoName) (proto.GitserverServiceClient, error)
}
@ -251,44 +247,3 @@ func (c *RemoteGitCommand) String() string { return fmt.Sprintf("%q", c.args) }
func (c *RemoteGitCommand) StdoutReader(ctx context.Context) (io.ReadCloser, error) {
return c.sendExec(ctx)
}
type cmdReader struct {
rc io.ReadCloser
trailer http.Header
}
func (c *cmdReader) Read(p []byte) (int, error) {
n, err := c.rc.Read(p)
if err == io.EOF {
statusCode, err := strconv.Atoi(c.trailer.Get("X-Exec-Exit-Status"))
if err != nil {
return n, errors.Wrap(err, "failed to parse exit status code")
}
errorMessage := c.trailer.Get("X-Exec-Error")
// did the command exit cleanly?
if statusCode == 0 && errorMessage == "" {
// yes - propagate io.EOF
return n, io.EOF
}
// no - report it
stderr := c.trailer.Get("X-Exec-Stderr")
err = &CommandStatusError{
Stderr: stderr,
StatusCode: int32(statusCode),
Message: errorMessage,
}
return n, err
}
return n, err
}
func (c *cmdReader) Close() error {
return c.rc.Close()
}

View File

@ -1,85 +0,0 @@
package gitserver
import (
"context"
"encoding/json"
"net/http"
"net/url"
"os"
"path/filepath"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
proto "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
)
type GitoliteLister struct {
addrs func() []string
httpClient httpcli.Doer
grpcClient ClientSource
userAgent string
}
func NewGitoliteLister(cli httpcli.Doer) *GitoliteLister {
return &GitoliteLister{
httpClient: cli,
addrs: func() []string {
return conns.get().Addresses
},
grpcClient: conns,
userAgent: filepath.Base(os.Args[0]),
}
}
func (c *GitoliteLister) ListRepos(ctx context.Context, gitoliteHost string) (list []*gitolite.Repo, err error) {
addrs := c.addrs()
if len(addrs) == 0 {
panic("unexpected state: no gitserver addresses")
}
if conf.IsGRPCEnabled(ctx) {
client, err := c.grpcClient.ClientForRepo(ctx, c.userAgent, "")
if err != nil {
return nil, err
}
req := &proto.ListGitoliteRequest{
GitoliteHost: gitoliteHost,
}
grpcResp, err := client.ListGitolite(ctx, req)
if err != nil {
return nil, err
}
list = make([]*gitolite.Repo, len(grpcResp.Repos))
for i, r := range grpcResp.GetRepos() {
list[i] = &gitolite.Repo{
Name: r.GetName(),
URL: r.GetUrl(),
}
}
return list, nil
} else {
// The gitserver calls the shared Gitolite server in response to this request, so
// we need to only call a single gitserver (or else we'd get duplicate results).
addr := addrForKey(gitoliteHost, addrs)
req, err := http.NewRequest("GET", "http://"+addr+"/list-gitolite?gitolite="+url.QueryEscape(gitoliteHost), nil)
if err != nil {
return nil, err
}
// Set header so that the handler knows the request is from us
req.Header.Set("X-Requested-With", "Sourcegraph")
resp, err := c.httpClient.Do(req.WithContext(ctx))
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&list)
return list, err
}
}

View File

@ -12,6 +12,7 @@ import (
v1 "github.com/sourcegraph/sourcegraph/internal/gitserver/v1"
grpc "google.golang.org/grpc"
metadata "google.golang.org/grpc/metadata"
)
// MockGitserverServiceClient is a mock implementation of the
@ -3054,3 +3055,877 @@ func (c GitserverServiceClientSearchFuncCall) Args() []interface{} {
func (c GitserverServiceClientSearchFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// MockGitserverService_ExecServer is a mock implementation of the
// GitserverService_ExecServer interface (from the package
// github.com/sourcegraph/sourcegraph/internal/gitserver/v1) used for unit
// testing.
type MockGitserverService_ExecServer struct {
// ContextFunc is an instance of a mock function object controlling the
// behavior of the method Context.
ContextFunc *GitserverService_ExecServerContextFunc
// RecvMsgFunc is an instance of a mock function object controlling the
// behavior of the method RecvMsg.
RecvMsgFunc *GitserverService_ExecServerRecvMsgFunc
// SendFunc is an instance of a mock function object controlling the
// behavior of the method Send.
SendFunc *GitserverService_ExecServerSendFunc
// SendHeaderFunc is an instance of a mock function object controlling
// the behavior of the method SendHeader.
SendHeaderFunc *GitserverService_ExecServerSendHeaderFunc
// SendMsgFunc is an instance of a mock function object controlling the
// behavior of the method SendMsg.
SendMsgFunc *GitserverService_ExecServerSendMsgFunc
// SetHeaderFunc is an instance of a mock function object controlling
// the behavior of the method SetHeader.
SetHeaderFunc *GitserverService_ExecServerSetHeaderFunc
// SetTrailerFunc is an instance of a mock function object controlling
// the behavior of the method SetTrailer.
SetTrailerFunc *GitserverService_ExecServerSetTrailerFunc
}
// NewMockGitserverService_ExecServer creates a new mock of the
// GitserverService_ExecServer interface. All methods return zero values for
// all results, unless overwritten.
func NewMockGitserverService_ExecServer() *MockGitserverService_ExecServer {
return &MockGitserverService_ExecServer{
ContextFunc: &GitserverService_ExecServerContextFunc{
defaultHook: func() (r0 context.Context) {
return
},
},
RecvMsgFunc: &GitserverService_ExecServerRecvMsgFunc{
defaultHook: func(interface{}) (r0 error) {
return
},
},
SendFunc: &GitserverService_ExecServerSendFunc{
defaultHook: func(*v1.ExecResponse) (r0 error) {
return
},
},
SendHeaderFunc: &GitserverService_ExecServerSendHeaderFunc{
defaultHook: func(metadata.MD) (r0 error) {
return
},
},
SendMsgFunc: &GitserverService_ExecServerSendMsgFunc{
defaultHook: func(interface{}) (r0 error) {
return
},
},
SetHeaderFunc: &GitserverService_ExecServerSetHeaderFunc{
defaultHook: func(metadata.MD) (r0 error) {
return
},
},
SetTrailerFunc: &GitserverService_ExecServerSetTrailerFunc{
defaultHook: func(metadata.MD) {
return
},
},
}
}
// NewStrictMockGitserverService_ExecServer creates a new mock of the
// GitserverService_ExecServer interface. All methods panic on invocation,
// unless overwritten.
func NewStrictMockGitserverService_ExecServer() *MockGitserverService_ExecServer {
return &MockGitserverService_ExecServer{
ContextFunc: &GitserverService_ExecServerContextFunc{
defaultHook: func() context.Context {
panic("unexpected invocation of MockGitserverService_ExecServer.Context")
},
},
RecvMsgFunc: &GitserverService_ExecServerRecvMsgFunc{
defaultHook: func(interface{}) error {
panic("unexpected invocation of MockGitserverService_ExecServer.RecvMsg")
},
},
SendFunc: &GitserverService_ExecServerSendFunc{
defaultHook: func(*v1.ExecResponse) error {
panic("unexpected invocation of MockGitserverService_ExecServer.Send")
},
},
SendHeaderFunc: &GitserverService_ExecServerSendHeaderFunc{
defaultHook: func(metadata.MD) error {
panic("unexpected invocation of MockGitserverService_ExecServer.SendHeader")
},
},
SendMsgFunc: &GitserverService_ExecServerSendMsgFunc{
defaultHook: func(interface{}) error {
panic("unexpected invocation of MockGitserverService_ExecServer.SendMsg")
},
},
SetHeaderFunc: &GitserverService_ExecServerSetHeaderFunc{
defaultHook: func(metadata.MD) error {
panic("unexpected invocation of MockGitserverService_ExecServer.SetHeader")
},
},
SetTrailerFunc: &GitserverService_ExecServerSetTrailerFunc{
defaultHook: func(metadata.MD) {
panic("unexpected invocation of MockGitserverService_ExecServer.SetTrailer")
},
},
}
}
// NewMockGitserverService_ExecServerFrom creates a new mock of the
// MockGitserverService_ExecServer interface. All methods delegate to the
// given implementation, unless overwritten.
func NewMockGitserverService_ExecServerFrom(i v1.GitserverService_ExecServer) *MockGitserverService_ExecServer {
return &MockGitserverService_ExecServer{
ContextFunc: &GitserverService_ExecServerContextFunc{
defaultHook: i.Context,
},
RecvMsgFunc: &GitserverService_ExecServerRecvMsgFunc{
defaultHook: i.RecvMsg,
},
SendFunc: &GitserverService_ExecServerSendFunc{
defaultHook: i.Send,
},
SendHeaderFunc: &GitserverService_ExecServerSendHeaderFunc{
defaultHook: i.SendHeader,
},
SendMsgFunc: &GitserverService_ExecServerSendMsgFunc{
defaultHook: i.SendMsg,
},
SetHeaderFunc: &GitserverService_ExecServerSetHeaderFunc{
defaultHook: i.SetHeader,
},
SetTrailerFunc: &GitserverService_ExecServerSetTrailerFunc{
defaultHook: i.SetTrailer,
},
}
}
// GitserverService_ExecServerContextFunc describes the behavior when the
// Context method of the parent MockGitserverService_ExecServer instance is
// invoked.
type GitserverService_ExecServerContextFunc struct {
defaultHook func() context.Context
hooks []func() context.Context
history []GitserverService_ExecServerContextFuncCall
mutex sync.Mutex
}
// Context delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockGitserverService_ExecServer) Context() context.Context {
r0 := m.ContextFunc.nextHook()()
m.ContextFunc.appendCall(GitserverService_ExecServerContextFuncCall{r0})
return r0
}
// SetDefaultHook sets function that is called when the Context method of
// the parent MockGitserverService_ExecServer instance is invoked and the
// hook queue is empty.
func (f *GitserverService_ExecServerContextFunc) SetDefaultHook(hook func() context.Context) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Context method of the parent MockGitserverService_ExecServer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverService_ExecServerContextFunc) PushHook(hook func() context.Context) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverService_ExecServerContextFunc) SetDefaultReturn(r0 context.Context) {
f.SetDefaultHook(func() context.Context {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverService_ExecServerContextFunc) PushReturn(r0 context.Context) {
f.PushHook(func() context.Context {
return r0
})
}
func (f *GitserverService_ExecServerContextFunc) nextHook() func() context.Context {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverService_ExecServerContextFunc) appendCall(r0 GitserverService_ExecServerContextFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of GitserverService_ExecServerContextFuncCall
// objects describing the invocations of this function.
func (f *GitserverService_ExecServerContextFunc) History() []GitserverService_ExecServerContextFuncCall {
f.mutex.Lock()
history := make([]GitserverService_ExecServerContextFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverService_ExecServerContextFuncCall is an object that describes an
// invocation of method Context on an instance of
// MockGitserverService_ExecServer.
type GitserverService_ExecServerContextFuncCall struct {
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 context.Context
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverService_ExecServerContextFuncCall) Args() []interface{} {
return []interface{}{}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverService_ExecServerContextFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// GitserverService_ExecServerRecvMsgFunc describes the behavior when the
// RecvMsg method of the parent MockGitserverService_ExecServer instance is
// invoked.
type GitserverService_ExecServerRecvMsgFunc struct {
defaultHook func(interface{}) error
hooks []func(interface{}) error
history []GitserverService_ExecServerRecvMsgFuncCall
mutex sync.Mutex
}
// RecvMsg delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockGitserverService_ExecServer) RecvMsg(v0 interface{}) error {
r0 := m.RecvMsgFunc.nextHook()(v0)
m.RecvMsgFunc.appendCall(GitserverService_ExecServerRecvMsgFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the RecvMsg method of
// the parent MockGitserverService_ExecServer instance is invoked and the
// hook queue is empty.
func (f *GitserverService_ExecServerRecvMsgFunc) SetDefaultHook(hook func(interface{}) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// RecvMsg method of the parent MockGitserverService_ExecServer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverService_ExecServerRecvMsgFunc) PushHook(hook func(interface{}) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverService_ExecServerRecvMsgFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(interface{}) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverService_ExecServerRecvMsgFunc) PushReturn(r0 error) {
f.PushHook(func(interface{}) error {
return r0
})
}
func (f *GitserverService_ExecServerRecvMsgFunc) nextHook() func(interface{}) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverService_ExecServerRecvMsgFunc) appendCall(r0 GitserverService_ExecServerRecvMsgFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of GitserverService_ExecServerRecvMsgFuncCall
// objects describing the invocations of this function.
func (f *GitserverService_ExecServerRecvMsgFunc) History() []GitserverService_ExecServerRecvMsgFuncCall {
f.mutex.Lock()
history := make([]GitserverService_ExecServerRecvMsgFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverService_ExecServerRecvMsgFuncCall is an object that describes an
// invocation of method RecvMsg on an instance of
// MockGitserverService_ExecServer.
type GitserverService_ExecServerRecvMsgFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 interface{}
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverService_ExecServerRecvMsgFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverService_ExecServerRecvMsgFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// GitserverService_ExecServerSendFunc describes the behavior when the Send
// method of the parent MockGitserverService_ExecServer instance is invoked.
type GitserverService_ExecServerSendFunc struct {
defaultHook func(*v1.ExecResponse) error
hooks []func(*v1.ExecResponse) error
history []GitserverService_ExecServerSendFuncCall
mutex sync.Mutex
}
// Send delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockGitserverService_ExecServer) Send(v0 *v1.ExecResponse) error {
r0 := m.SendFunc.nextHook()(v0)
m.SendFunc.appendCall(GitserverService_ExecServerSendFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the Send method of the
// parent MockGitserverService_ExecServer instance is invoked and the hook
// queue is empty.
func (f *GitserverService_ExecServerSendFunc) SetDefaultHook(hook func(*v1.ExecResponse) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Send method of the parent MockGitserverService_ExecServer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverService_ExecServerSendFunc) PushHook(hook func(*v1.ExecResponse) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverService_ExecServerSendFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(*v1.ExecResponse) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverService_ExecServerSendFunc) PushReturn(r0 error) {
f.PushHook(func(*v1.ExecResponse) error {
return r0
})
}
func (f *GitserverService_ExecServerSendFunc) nextHook() func(*v1.ExecResponse) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverService_ExecServerSendFunc) appendCall(r0 GitserverService_ExecServerSendFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of GitserverService_ExecServerSendFuncCall
// objects describing the invocations of this function.
func (f *GitserverService_ExecServerSendFunc) History() []GitserverService_ExecServerSendFuncCall {
f.mutex.Lock()
history := make([]GitserverService_ExecServerSendFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverService_ExecServerSendFuncCall is an object that describes an
// invocation of method Send on an instance of
// MockGitserverService_ExecServer.
type GitserverService_ExecServerSendFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 *v1.ExecResponse
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverService_ExecServerSendFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverService_ExecServerSendFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// GitserverService_ExecServerSendHeaderFunc describes the behavior when the
// SendHeader method of the parent MockGitserverService_ExecServer instance
// is invoked.
type GitserverService_ExecServerSendHeaderFunc struct {
defaultHook func(metadata.MD) error
hooks []func(metadata.MD) error
history []GitserverService_ExecServerSendHeaderFuncCall
mutex sync.Mutex
}
// SendHeader delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockGitserverService_ExecServer) SendHeader(v0 metadata.MD) error {
r0 := m.SendHeaderFunc.nextHook()(v0)
m.SendHeaderFunc.appendCall(GitserverService_ExecServerSendHeaderFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the SendHeader method of
// the parent MockGitserverService_ExecServer instance is invoked and the
// hook queue is empty.
func (f *GitserverService_ExecServerSendHeaderFunc) SetDefaultHook(hook func(metadata.MD) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// SendHeader method of the parent MockGitserverService_ExecServer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverService_ExecServerSendHeaderFunc) PushHook(hook func(metadata.MD) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverService_ExecServerSendHeaderFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(metadata.MD) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverService_ExecServerSendHeaderFunc) PushReturn(r0 error) {
f.PushHook(func(metadata.MD) error {
return r0
})
}
func (f *GitserverService_ExecServerSendHeaderFunc) nextHook() func(metadata.MD) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverService_ExecServerSendHeaderFunc) appendCall(r0 GitserverService_ExecServerSendHeaderFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// GitserverService_ExecServerSendHeaderFuncCall objects describing the
// invocations of this function.
func (f *GitserverService_ExecServerSendHeaderFunc) History() []GitserverService_ExecServerSendHeaderFuncCall {
f.mutex.Lock()
history := make([]GitserverService_ExecServerSendHeaderFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverService_ExecServerSendHeaderFuncCall is an object that describes
// an invocation of method SendHeader on an instance of
// MockGitserverService_ExecServer.
type GitserverService_ExecServerSendHeaderFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 metadata.MD
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverService_ExecServerSendHeaderFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverService_ExecServerSendHeaderFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// GitserverService_ExecServerSendMsgFunc describes the behavior when the
// SendMsg method of the parent MockGitserverService_ExecServer instance is
// invoked.
type GitserverService_ExecServerSendMsgFunc struct {
defaultHook func(interface{}) error
hooks []func(interface{}) error
history []GitserverService_ExecServerSendMsgFuncCall
mutex sync.Mutex
}
// SendMsg delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockGitserverService_ExecServer) SendMsg(v0 interface{}) error {
r0 := m.SendMsgFunc.nextHook()(v0)
m.SendMsgFunc.appendCall(GitserverService_ExecServerSendMsgFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the SendMsg method of
// the parent MockGitserverService_ExecServer instance is invoked and the
// hook queue is empty.
func (f *GitserverService_ExecServerSendMsgFunc) SetDefaultHook(hook func(interface{}) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// SendMsg method of the parent MockGitserverService_ExecServer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverService_ExecServerSendMsgFunc) PushHook(hook func(interface{}) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverService_ExecServerSendMsgFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(interface{}) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverService_ExecServerSendMsgFunc) PushReturn(r0 error) {
f.PushHook(func(interface{}) error {
return r0
})
}
func (f *GitserverService_ExecServerSendMsgFunc) nextHook() func(interface{}) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverService_ExecServerSendMsgFunc) appendCall(r0 GitserverService_ExecServerSendMsgFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of GitserverService_ExecServerSendMsgFuncCall
// objects describing the invocations of this function.
func (f *GitserverService_ExecServerSendMsgFunc) History() []GitserverService_ExecServerSendMsgFuncCall {
f.mutex.Lock()
history := make([]GitserverService_ExecServerSendMsgFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverService_ExecServerSendMsgFuncCall is an object that describes an
// invocation of method SendMsg on an instance of
// MockGitserverService_ExecServer.
type GitserverService_ExecServerSendMsgFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 interface{}
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverService_ExecServerSendMsgFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverService_ExecServerSendMsgFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// GitserverService_ExecServerSetHeaderFunc describes the behavior when the
// SetHeader method of the parent MockGitserverService_ExecServer instance
// is invoked.
type GitserverService_ExecServerSetHeaderFunc struct {
defaultHook func(metadata.MD) error
hooks []func(metadata.MD) error
history []GitserverService_ExecServerSetHeaderFuncCall
mutex sync.Mutex
}
// SetHeader delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockGitserverService_ExecServer) SetHeader(v0 metadata.MD) error {
r0 := m.SetHeaderFunc.nextHook()(v0)
m.SetHeaderFunc.appendCall(GitserverService_ExecServerSetHeaderFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the SetHeader method of
// the parent MockGitserverService_ExecServer instance is invoked and the
// hook queue is empty.
func (f *GitserverService_ExecServerSetHeaderFunc) SetDefaultHook(hook func(metadata.MD) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// SetHeader method of the parent MockGitserverService_ExecServer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverService_ExecServerSetHeaderFunc) PushHook(hook func(metadata.MD) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverService_ExecServerSetHeaderFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(metadata.MD) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverService_ExecServerSetHeaderFunc) PushReturn(r0 error) {
f.PushHook(func(metadata.MD) error {
return r0
})
}
func (f *GitserverService_ExecServerSetHeaderFunc) nextHook() func(metadata.MD) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverService_ExecServerSetHeaderFunc) appendCall(r0 GitserverService_ExecServerSetHeaderFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// GitserverService_ExecServerSetHeaderFuncCall objects describing the
// invocations of this function.
func (f *GitserverService_ExecServerSetHeaderFunc) History() []GitserverService_ExecServerSetHeaderFuncCall {
f.mutex.Lock()
history := make([]GitserverService_ExecServerSetHeaderFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverService_ExecServerSetHeaderFuncCall is an object that describes
// an invocation of method SetHeader on an instance of
// MockGitserverService_ExecServer.
type GitserverService_ExecServerSetHeaderFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 metadata.MD
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverService_ExecServerSetHeaderFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverService_ExecServerSetHeaderFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// GitserverService_ExecServerSetTrailerFunc describes the behavior when the
// SetTrailer method of the parent MockGitserverService_ExecServer instance
// is invoked.
type GitserverService_ExecServerSetTrailerFunc struct {
defaultHook func(metadata.MD)
hooks []func(metadata.MD)
history []GitserverService_ExecServerSetTrailerFuncCall
mutex sync.Mutex
}
// SetTrailer delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockGitserverService_ExecServer) SetTrailer(v0 metadata.MD) {
m.SetTrailerFunc.nextHook()(v0)
m.SetTrailerFunc.appendCall(GitserverService_ExecServerSetTrailerFuncCall{v0})
return
}
// SetDefaultHook sets function that is called when the SetTrailer method of
// the parent MockGitserverService_ExecServer instance is invoked and the
// hook queue is empty.
func (f *GitserverService_ExecServerSetTrailerFunc) SetDefaultHook(hook func(metadata.MD)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// SetTrailer method of the parent MockGitserverService_ExecServer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *GitserverService_ExecServerSetTrailerFunc) PushHook(hook func(metadata.MD)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *GitserverService_ExecServerSetTrailerFunc) SetDefaultReturn() {
f.SetDefaultHook(func(metadata.MD) {
return
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *GitserverService_ExecServerSetTrailerFunc) PushReturn() {
f.PushHook(func(metadata.MD) {
return
})
}
func (f *GitserverService_ExecServerSetTrailerFunc) nextHook() func(metadata.MD) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *GitserverService_ExecServerSetTrailerFunc) appendCall(r0 GitserverService_ExecServerSetTrailerFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// GitserverService_ExecServerSetTrailerFuncCall objects describing the
// invocations of this function.
func (f *GitserverService_ExecServerSetTrailerFunc) History() []GitserverService_ExecServerSetTrailerFuncCall {
f.mutex.Lock()
history := make([]GitserverService_ExecServerSetTrailerFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// GitserverService_ExecServerSetTrailerFuncCall is an object that describes
// an invocation of method SetTrailer on an instance of
// MockGitserverService_ExecServer.
type GitserverService_ExecServerSetTrailerFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 metadata.MD
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c GitserverService_ExecServerSetTrailerFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c GitserverService_ExecServerSetTrailerFuncCall) Results() []interface{} {
return []interface{}{}
}

View File

@ -15,6 +15,7 @@ import (
diff "github.com/sourcegraph/go-diff/diff"
api "github.com/sourcegraph/sourcegraph/internal/api"
gitolite "github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
gitdomain "github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
protocol "github.com/sourcegraph/sourcegraph/internal/gitserver/protocol"
perforce "github.com/sourcegraph/sourcegraph/internal/perforce"
@ -117,6 +118,9 @@ type MockClient struct {
// ListDirectoryChildrenFunc is an instance of a mock function object
// controlling the behavior of the method ListDirectoryChildren.
ListDirectoryChildrenFunc *ClientListDirectoryChildrenFunc
// ListGitoliteReposFunc is an instance of a mock function object
// controlling the behavior of the method ListGitoliteRepos.
ListGitoliteReposFunc *ClientListGitoliteReposFunc
// ListRefsFunc is an instance of a mock function object controlling the
// behavior of the method ListRefs.
ListRefsFunc *ClientListRefsFunc
@ -359,6 +363,11 @@ func NewMockClient() *MockClient {
return
},
},
ListGitoliteReposFunc: &ClientListGitoliteReposFunc{
defaultHook: func(context.Context, string) (r0 []*gitolite.Repo, r1 error) {
return
},
},
ListRefsFunc: &ClientListRefsFunc{
defaultHook: func(context.Context, api.RepoName) (r0 []gitdomain.Ref, r1 error) {
return
@ -656,6 +665,11 @@ func NewStrictMockClient() *MockClient {
panic("unexpected invocation of MockClient.ListDirectoryChildren")
},
},
ListGitoliteReposFunc: &ClientListGitoliteReposFunc{
defaultHook: func(context.Context, string) ([]*gitolite.Repo, error) {
panic("unexpected invocation of MockClient.ListGitoliteRepos")
},
},
ListRefsFunc: &ClientListRefsFunc{
defaultHook: func(context.Context, api.RepoName) ([]gitdomain.Ref, error) {
panic("unexpected invocation of MockClient.ListRefs")
@ -891,6 +905,9 @@ func NewMockClientFrom(i Client) *MockClient {
ListDirectoryChildrenFunc: &ClientListDirectoryChildrenFunc{
defaultHook: i.ListDirectoryChildren,
},
ListGitoliteReposFunc: &ClientListGitoliteReposFunc{
defaultHook: i.ListGitoliteRepos,
},
ListRefsFunc: &ClientListRefsFunc{
defaultHook: i.ListRefs,
},
@ -4387,6 +4404,114 @@ func (c ClientListDirectoryChildrenFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// ClientListGitoliteReposFunc describes the behavior when the
// ListGitoliteRepos method of the parent MockClient instance is invoked.
type ClientListGitoliteReposFunc struct {
defaultHook func(context.Context, string) ([]*gitolite.Repo, error)
hooks []func(context.Context, string) ([]*gitolite.Repo, error)
history []ClientListGitoliteReposFuncCall
mutex sync.Mutex
}
// ListGitoliteRepos delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockClient) ListGitoliteRepos(v0 context.Context, v1 string) ([]*gitolite.Repo, error) {
r0, r1 := m.ListGitoliteReposFunc.nextHook()(v0, v1)
m.ListGitoliteReposFunc.appendCall(ClientListGitoliteReposFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the ListGitoliteRepos
// method of the parent MockClient instance is invoked and the hook queue is
// empty.
func (f *ClientListGitoliteReposFunc) SetDefaultHook(hook func(context.Context, string) ([]*gitolite.Repo, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// ListGitoliteRepos method of the parent MockClient instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *ClientListGitoliteReposFunc) PushHook(hook func(context.Context, string) ([]*gitolite.Repo, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *ClientListGitoliteReposFunc) SetDefaultReturn(r0 []*gitolite.Repo, r1 error) {
f.SetDefaultHook(func(context.Context, string) ([]*gitolite.Repo, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *ClientListGitoliteReposFunc) PushReturn(r0 []*gitolite.Repo, r1 error) {
f.PushHook(func(context.Context, string) ([]*gitolite.Repo, error) {
return r0, r1
})
}
func (f *ClientListGitoliteReposFunc) nextHook() func(context.Context, string) ([]*gitolite.Repo, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *ClientListGitoliteReposFunc) appendCall(r0 ClientListGitoliteReposFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of ClientListGitoliteReposFuncCall objects
// describing the invocations of this function.
func (f *ClientListGitoliteReposFunc) History() []ClientListGitoliteReposFuncCall {
f.mutex.Lock()
history := make([]ClientListGitoliteReposFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// ClientListGitoliteReposFuncCall is an object that describes an invocation
// of method ListGitoliteRepos on an instance of MockClient.
type ClientListGitoliteReposFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []*gitolite.Repo
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c ClientListGitoliteReposFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c ClientListGitoliteReposFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// ClientListRefsFunc describes the behavior when the ListRefs method of the
// parent MockClient instance is invoked.
type ClientListRefsFunc struct {

View File

@ -1,56 +0,0 @@
package gitserver
import (
"net/http"
"net/http/httputil"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/limiter"
"github.com/sourcegraph/sourcegraph/internal/trace"
"go.opentelemetry.io/otel/attribute"
)
// DefaultReverseProxy is the default ReverseProxy. It uses the same transport and HTTP
// limiter as the default client.
var DefaultReverseProxy = NewReverseProxy(defaultClient.Transport, defaultLimiter)
var defaultClient, _ = clientFactory.Client()
// NewReverseProxy returns a new gitserver.ReverseProxy instantiated with the given
// transport and HTTP limiter.
func NewReverseProxy(transport http.RoundTripper, httpLimiter limiter.Limiter) *ReverseProxy {
return &ReverseProxy{
Transport: transport,
HTTPLimiter: httpLimiter,
}
}
// ReverseProxy is a gitserver reverse proxy.
type ReverseProxy struct {
Transport http.RoundTripper
// Limits concurrency of outstanding HTTP posts
HTTPLimiter limiter.Limiter
}
// ServeHTTP creates a one-shot proxy with the given director and proxies the given request
// to gitserver. The director must rewrite the request to the correct gitserver address, which
// should be obtained via a gitserver client's AddrForRepo method.
func (p *ReverseProxy) ServeHTTP(repo api.RepoName, method, op string, director func(req *http.Request), res http.ResponseWriter, req *http.Request) {
tr, _ := trace.New(req.Context(), "ReverseProxy.ServeHTTP",
repo.Attr(),
attribute.String("method", method),
attribute.String("op", op))
defer tr.End()
p.HTTPLimiter.Acquire()
defer p.HTTPLimiter.Release()
tr.AddEvent("Acquired HTTP limiter")
proxy := &httputil.ReverseProxy{
Director: director,
Transport: p.Transport,
}
proxy.ServeHTTP(res, req)
}

View File

@ -306,7 +306,7 @@ func TestCommitScanner(t *testing.T) {
cmd := exec.Command("git", (&CommitSearcher{IncludeModifiedFiles: includeModifiedFiles}).gitArgs()...)
cmd.Dir = dir
cmd.Stdout = &buf
cmd.Run()
require.NoError(t, cmd.Run())
return buf.Bytes()
}

View File

@ -189,6 +189,7 @@ go_test(
"//lib/errors",
"//lib/pointers",
"//schema",
"@com_github_derision_test_go_mockgen//testutil/require",
"@com_github_google_go_cmp//cmp",
"@com_github_google_go_cmp//cmp/cmpopts",
"@com_github_google_uuid//:uuid",

View File

@ -8,7 +8,6 @@ import (
"github.com/sourcegraph/sourcegraph/internal/conf/reposource"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/jsonc"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -21,15 +20,11 @@ type GitoliteSource struct {
svc *types.ExternalService
conn *schema.GitoliteConnection
excluder repoExcluder
// gitoliteLister allows us to list Gitlolite repos. In practice, we ask
// gitserver to talk to gitolite because it holds the ssh keys required for
// authentication.
lister *gitserver.GitoliteLister
gc gitserver.Client
}
// NewGitoliteSource returns a new GitoliteSource from the given external service.
func NewGitoliteSource(ctx context.Context, svc *types.ExternalService, cf *httpcli.Factory) (*GitoliteSource, error) {
func NewGitoliteSource(ctx context.Context, svc *types.ExternalService, gc gitserver.Client) (*GitoliteSource, error) {
rawConfig, err := svc.Config.Decrypt(ctx)
if err != nil {
return nil, errors.Errorf("external service id=%d config error: %s", svc.ID, err)
@ -39,17 +34,6 @@ func NewGitoliteSource(ctx context.Context, svc *types.ExternalService, cf *http
return nil, errors.Wrapf(err, "external service id=%d config error", svc.ID)
}
gitoliteDoer, err := cf.Doer(
httpcli.NewMaxIdleConnsPerHostOpt(500),
// The provided httpcli.Factory is one used for external services - however,
// GitoliteSource asks gitserver to communicate to gitolite instead, so we
// have to ensure that the actor transport used for internal clients is provided.
httpcli.ActorTransportOpt,
)
if err != nil {
return nil, err
}
var ex repoExcluder
for _, r := range c.Exclude {
ex.AddRule().
@ -60,12 +44,10 @@ func NewGitoliteSource(ctx context.Context, svc *types.ExternalService, cf *http
return nil, err
}
lister := gitserver.NewGitoliteLister(gitoliteDoer)
return &GitoliteSource{
svc: svc,
conn: &c,
lister: lister,
gc: gc.Scoped("repos.gitolite"),
excluder: ex,
}, nil
}
@ -80,7 +62,7 @@ func (s *GitoliteSource) CheckConnection(ctx context.Context) error {
// ListRepos returns all Gitolite repositories accessible to all connections configured
// in Sourcegraph via the external services configuration.
func (s *GitoliteSource) ListRepos(ctx context.Context, results chan SourceResult) {
all, err := s.lister.ListRepos(ctx, s.conn.Host)
all, err := s.gc.ListGitoliteRepos(ctx, s.conn.Host)
if err != nil {
results <- SourceResult{Source: s, Err: err}
return

View File

@ -4,21 +4,33 @@ import (
"context"
"testing"
mockrequire "github.com/derision-test/go-mockgen/testutil/require"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/types/typestest"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestGitoliteSource(t *testing.T) {
cf, save := newClientFactoryWithOpt(t, "basic", httpcli.ExternalTransportOpt)
defer save(t)
gc := gitserver.NewMockClient()
gc.ScopedFunc.SetDefaultReturn(gc)
svc := typestest.MakeExternalService(t, extsvc.VariantGitolite, &schema.GitoliteConnection{})
ctx := context.Background()
_, err := NewGitoliteSource(ctx, svc, cf)
s, err := NewGitoliteSource(ctx, svc, gc)
if err != nil {
t.Fatal(err)
}
res := make(chan SourceResult)
go func() {
s.ListRepos(ctx, res)
close(res)
}()
for range res {
}
mockrequire.Called(t, gc.ListGitoliteReposFunc)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/extsvc/auth"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -25,9 +26,9 @@ type Sourcer func(context.Context, *types.ExternalService) (Source, error)
// http.Clients needed to contact the respective upstream code host APIs.
//
// The provided decorator functions will be applied to the Source.
func NewSourcer(logger log.Logger, db database.DB, cf *httpcli.Factory, decs ...func(Source) Source) Sourcer {
func NewSourcer(logger log.Logger, db database.DB, cf *httpcli.Factory, gc gitserver.Client, decs ...func(Source) Source) Sourcer {
return func(ctx context.Context, svc *types.ExternalService) (Source, error) {
src, err := NewSource(ctx, logger.Scoped("source"), db, svc, cf)
src, err := NewSource(ctx, logger.Scoped("source"), db, svc, cf, gc)
if err != nil {
return nil, err
}
@ -41,7 +42,10 @@ func NewSourcer(logger log.Logger, db database.DB, cf *httpcli.Factory, decs ...
}
// NewSource returns a repository yielding Source from the given ExternalService configuration.
func NewSource(ctx context.Context, logger log.Logger, db database.DB, svc *types.ExternalService, cf *httpcli.Factory) (Source, error) {
func NewSource(ctx context.Context, logger log.Logger, db database.DB, svc *types.ExternalService, cf *httpcli.Factory, gc gitserver.Client) (Source, error) {
if gc == nil {
gc = gitserver.NewClient("repos.sourcer")
}
switch strings.ToUpper(svc.Kind) {
case extsvc.KindGitHub:
return NewGitHubSource(ctx, logger.Scoped("GithubSource"), db, svc, cf)
@ -56,7 +60,7 @@ func NewSource(ctx context.Context, logger log.Logger, db database.DB, svc *type
case extsvc.KindBitbucketCloud:
return NewBitbucketCloudSource(ctx, logger.Scoped("BitbucketCloudSource"), svc, cf)
case extsvc.KindGitolite:
return NewGitoliteSource(ctx, svc, cf)
return NewGitoliteSource(ctx, svc, gc)
case extsvc.KindPhabricator:
return NewPhabricatorSource(ctx, logger.Scoped("PhabricatorSource"), svc, cf)
case extsvc.KindAWSCodeCommit:

View File

@ -17,7 +17,9 @@ import (
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/extsvc/gitolite"
"github.com/sourcegraph/sourcegraph/internal/extsvc/phabricator"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/httptestutil"
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
@ -121,7 +123,7 @@ func TestSources_ListRepos_YieldExistingRepos(t *testing.T) {
cf, save := NewClientFactory(t, name)
defer save(t)
repos := listRepos(t, cf, tc.svc)
repos := listRepos(t, cf, nil, tc.svc)
var haveNames []string
for _, r := range repos {
@ -140,10 +142,6 @@ func TestSources_ListRepos_Excluded(t *testing.T) {
conf.Mock(&conf.Unified{
ServiceConnectionConfig: conftypes.ServiceConnections{
GitServers: []string{"127.0.0.1:3178"},
}, SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
defer conf.Mock(nil)
@ -151,9 +149,18 @@ func TestSources_ListRepos_Excluded(t *testing.T) {
rcache.SetupForTest(t)
ratelimit.SetupForTest(t)
gitoliteGs := gitserver.NewMockClient()
gitoliteGs.ScopedFunc.SetDefaultReturn(gitoliteGs)
gitoliteGs.ListGitoliteReposFunc.SetDefaultReturn([]*gitolite.Repo{
{Name: "baz", URL: "gitolite.mycorp.com/baz.git"},
{Name: "foo", URL: "gitolite.mycorp.com/foo.git"},
{Name: "testing", URL: "gitolite.mycorp.com/testing.git"},
}, nil)
tests := []struct {
svc *types.ExternalService
wantNames []string
gc gitserver.Client
}{
{
svc: typestest.MakeExternalService(t, extsvc.VariantGitHub, &schema.GitHubConnection{
@ -267,6 +274,7 @@ func TestSources_ListRepos_Excluded(t *testing.T) {
"gitolite.mycorp.com/foo",
"gitolite.mycorp.com/testing",
},
gc: gitoliteGs,
},
}
@ -277,7 +285,7 @@ func TestSources_ListRepos_Excluded(t *testing.T) {
cf, save := NewClientFactory(t, name)
defer save(t)
repos := listRepos(t, cf, tc.svc)
repos := listRepos(t, cf, tc.gc, tc.svc)
var haveNames []string
for _, r := range repos {
@ -297,10 +305,6 @@ func TestSources_ListRepos_RepositoryPathPattern(t *testing.T) {
conf.Mock(&conf.Unified{
ServiceConnectionConfig: conftypes.ServiceConnections{
GitServers: []string{"127.0.0.1:3178"},
}, SiteConfiguration: schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
EnableGRPC: boolPointer(false),
},
},
})
defer conf.Mock(nil)
@ -308,10 +312,21 @@ func TestSources_ListRepos_RepositoryPathPattern(t *testing.T) {
ratelimit.SetupForTest(t)
rcache.SetupForTest(t)
gitoliteGs := gitserver.NewMockClient()
gitoliteGs.ScopedFunc.SetDefaultReturn(gitoliteGs)
gitoliteGs.ListGitoliteReposFunc.SetDefaultReturn([]*gitolite.Repo{
{Name: "bar", URL: "gitolite.mycorp.com/bar.git"},
{Name: "baz", URL: "gitolite.mycorp.com/baz.git"},
{Name: "foo", URL: "gitolite.mycorp.com/foo.git"},
{Name: "gitolite-admin", URL: "gitolite.mycorp.com/gitolite-admin.git"},
{Name: "testing", URL: "gitolite.mycorp.com/testing.git"},
}, nil)
tests := []struct {
svc *types.ExternalService
wantNames []string
wantURIs []string
gc gitserver.Client
}{
{
svc: typestest.MakeExternalService(t, extsvc.VariantGitHub, &schema.GitHubConnection{
@ -394,6 +409,7 @@ func TestSources_ListRepos_RepositoryPathPattern(t *testing.T) {
"gitolite.mycorp.com/gitolite-admin",
"gitolite.mycorp.com/testing",
},
gc: gitoliteGs,
},
}
@ -404,7 +420,7 @@ func TestSources_ListRepos_RepositoryPathPattern(t *testing.T) {
cf, save := NewClientFactory(t, name)
defer save(t)
repos := listRepos(t, cf, tc.svc)
repos := listRepos(t, cf, tc.gc, tc.svc)
var haveURIs, haveNames []string
for _, r := range repos {
@ -431,7 +447,7 @@ func TestSources_Phabricator(t *testing.T) {
Token: os.Getenv("PHABRICATOR_TOKEN"),
})
repos := listRepos(t, cf, svc)
repos := listRepos(t, cf, nil, svc)
if len(repos) == 0 {
t.Fatalf("no repos yielded")
@ -491,7 +507,7 @@ func TestSources_ListRepos_GitLab_NameTransformations(t *testing.T) {
},
})
repos := listRepos(t, cf, svc)
repos := listRepos(t, cf, nil, svc)
haveNames := types.Repos(repos).Names()
sort.Strings(haveNames)
@ -520,7 +536,7 @@ func TestSources_ListRepos_BitbucketServer_Archived(t *testing.T) {
Repos: []string{"sour/vegeta", "PUBLIC/archived-repo"},
})
repos := listRepos(t, cf, svc)
repos := listRepos(t, cf, nil, svc)
wantArchived := map[string]bool{
"vegeta": false,
@ -537,13 +553,13 @@ func TestSources_ListRepos_BitbucketServer_Archived(t *testing.T) {
}
}
func listRepos(t *testing.T, cf *httpcli.Factory, svc *types.ExternalService) []*types.Repo {
func listRepos(t *testing.T, cf *httpcli.Factory, gc gitserver.Client, svc *types.ExternalService) []*types.Repo {
t.Helper()
ctx := context.Background()
logger := logtest.NoOp(t)
sourcer := NewSourcer(logger, dbmocks.NewMockDB(), cf)
sourcer := NewSourcer(logger, dbmocks.NewMockDB(), cf, gc)
src, err := sourcer(ctx, svc)
if err != nil {

View File

@ -1,7 +1,6 @@
package repos
import (
"net/http"
"path/filepath"
"strings"
"testing"
@ -40,22 +39,10 @@ func Update(name string) bool {
func TestClientFactorySetup(t testing.TB, name string, mws ...httpcli.Middleware) (httpcli.Middleware, *recorder.Recorder) {
cassete := filepath.Join("testdata", "sources", strings.ReplaceAll(name, " ", "-"))
rec := NewRecorder(t, cassete, Update(name))
mws = append(mws, GitserverRedirectMiddleware)
mw := httpcli.NewMiddleware(mws...)
return mw, rec
}
func GitserverRedirectMiddleware(cli httpcli.Doer) httpcli.Doer {
return httpcli.DoerFunc(func(req *http.Request) (*http.Response, error) {
if req.URL.Hostname() == "gitserver" {
// Start local git server first
req.URL.Host = "127.0.0.1:3178"
req.URL.Scheme = "http"
}
return cli.Do(req)
})
}
func NewRecorder(t testing.TB, file string, record bool) *recorder.Recorder {
rec, err := httptestutil.NewRecorder(file, record, func(i *cassette.Interaction) error {
// The ratelimit.Monitor type resets its internal timestamp if it's

View File

@ -1,22 +0,0 @@
---
version: 1
interactions:
- request:
body: ""
form: {}
headers: {}
url: http://127.0.0.1:3178/list-gitolite?gitolite=ssh%3A%2F%2Fgit%40127.0.0.1%3A2222
method: GET
response:
body: |
[{"Name":"bar","URL":"ssh://git@127.0.0.1:2222:bar"},{"Name":"baz","URL":"ssh://git@127.0.0.1:2222:baz"},{"Name":"foo","URL":"ssh://git@127.0.0.1:2222:foo"},{"Name":"gitolite-admin","URL":"ssh://git@127.0.0.1:2222:gitolite-admin"},{"Name":"testing","URL":"ssh://git@127.0.0.1:2222:testing"}]
headers:
Content-Length:
- "292"
Content-Type:
- text/plain; charset=utf-8
Date:
- Tue, 07 May 2019 16:03:13 GMT
status: 200 OK
code: 200
duration: ""

View File

@ -1,22 +0,0 @@
---
version: 1
interactions:
- request:
body: ""
form: {}
headers: {}
url: http://127.0.0.1:3178/list-gitolite?gitolite=ssh%3A%2F%2Fgit%40127.0.0.1%3A2222
method: GET
response:
body: |
[{"Name":"bar","URL":"ssh://git@127.0.0.1:2222:bar"},{"Name":"baz","URL":"ssh://git@127.0.0.1:2222:baz"},{"Name":"foo","URL":"ssh://git@127.0.0.1:2222:foo"},{"Name":"gitolite-admin","URL":"ssh://git@127.0.0.1:2222:gitolite-admin"},{"Name":"testing","URL":"ssh://git@127.0.0.1:2222:testing"}]
headers:
Content-Length:
- "292"
Content-Type:
- text/plain; charset=utf-8
Date:
- Tue, 07 May 2019 16:45:47 GMT
status: 200 OK
code: 200
duration: ""

View File

@ -1,22 +0,0 @@
---
version: 1
interactions:
- request:
body: ""
form: {}
headers: {}
url: http://127.0.0.1:3178/list-gitolite?gitolite=ssh%3A%2F%2Fgit%40127.0.0.1%3A2222
method: GET
response:
body: |
[{"Name":"bar","URL":"ssh://git@127.0.0.1:2222:bar"},{"Name":"baz","URL":"ssh://git@127.0.0.1:2222:baz"},{"Name":"foo","URL":"ssh://git@127.0.0.1:2222:foo"},{"Name":"gitolite-admin","URL":"ssh://git@127.0.0.1:2222:gitolite-admin"},{"Name":"testing","URL":"ssh://git@127.0.0.1:2222:testing"}]
headers:
Content-Length:
- "292"
Content-Type:
- text/plain; charset=utf-8
Date:
- Fri, 03 May 2019 14:24:37 GMT
status: 200 OK
code: 200
duration: ""

View File

@ -1,4 +1,3 @@
load("//dev:go_defs.bzl", "go_test")
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
@ -12,9 +11,7 @@ go_library(
visibility = ["//:__subpackages__"],
deps = [
"//internal/api",
"//internal/conf",
"//internal/grpc/defaults",
"//internal/httpcli",
"//internal/repoupdater/protocol",
"//internal/repoupdater/v1:repoupdater",
"//internal/trace",
@ -26,10 +23,3 @@ go_library(
"@org_golang_google_grpc//status",
],
)
go_test(
name = "repoupdater_test",
timeout = "short",
srcs = ["client_test.go"],
embed = [":repoupdater"],
)

View File

@ -1,12 +1,8 @@
package repoupdater
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"os"
"sync"
@ -17,9 +13,7 @@ import (
"google.golang.org/grpc/status"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/sourcegraph/sourcegraph/internal/repoupdater/protocol"
proto "github.com/sourcegraph/sourcegraph/internal/repoupdater/v1"
"github.com/sourcegraph/sourcegraph/internal/trace"
@ -27,8 +21,6 @@ import (
)
var (
defaultDoer, _ = httpcli.NewInternalClientFactory("repoupdater").Doer()
// DefaultClient is the default Client. Unless overwritten, it is
// connected to the server specified by the REPO_UPDATER_URL
// environment variable.
@ -45,12 +37,6 @@ func repoUpdaterURLDefault() string {
// Client is a repoupdater client.
type Client struct {
// URL to repoupdater server.
URL string
// HTTP client to use
HTTPClient httpcli.Doer
// grpcClient is a function that lazily creates a grpc client.
// Any implementation should not recreate the client more than once.
grpcClient func() (proto.RepoUpdaterServiceClient, error)
@ -59,8 +45,6 @@ type Client struct {
// NewClient will initiate a new repoupdater Client with the given serverURL.
func NewClient(serverURL string) *Client {
return &Client{
URL: serverURL,
HTTPClient: defaultDoer,
grpcClient: sync.OnceValues(func() (proto.RepoUpdaterServiceClient, error) {
u, err := url.Parse(serverURL)
if err != nil {
@ -84,31 +68,16 @@ func (c *Client) RepoUpdateSchedulerInfo(
ctx context.Context,
args protocol.RepoUpdateSchedulerInfoArgs,
) (result *protocol.RepoUpdateSchedulerInfoResult, err error) {
if conf.IsGRPCEnabled(ctx) {
client, err := c.grpcClient()
if err != nil {
return nil, err
}
req := &proto.RepoUpdateSchedulerInfoRequest{Id: int32(args.ID)}
resp, err := client.RepoUpdateSchedulerInfo(ctx, req)
if err != nil {
return nil, err
}
return protocol.RepoUpdateSchedulerInfoResultFromProto(resp), nil
}
resp, err := c.httpPost(ctx, "repo-update-scheduler-info", args)
client, err := c.grpcClient()
if err != nil {
return nil, err
}
if resp.StatusCode != http.StatusOK {
stack := fmt.Sprintf("RepoScheduleInfo: %+v", args)
return nil, errors.Wrap(errors.Errorf("http status %d", resp.StatusCode), stack)
req := &proto.RepoUpdateSchedulerInfoRequest{Id: int32(args.ID)}
resp, err := client.RepoUpdateSchedulerInfo(ctx, req)
if err != nil {
return nil, err
}
defer resp.Body.Close()
err = json.NewDecoder(resp.Body).Decode(&result)
return result, err
return protocol.RepoUpdateSchedulerInfoResultFromProto(resp), nil
}
// MockRepoLookup mocks (*Client).RepoLookup for tests.
@ -132,75 +101,29 @@ func (c *Client) RepoLookup(
tr.EndWithErr(&err)
}()
if conf.IsGRPCEnabled(ctx) {
client, err := c.grpcClient()
if err != nil {
return nil, err
}
resp, err := client.RepoLookup(ctx, args.ToProto())
if err != nil {
return nil, errors.Wrapf(err, "RepoLookup for %+v failed", args)
}
res := protocol.RepoLookupResultFromProto(resp)
switch {
case resp.GetErrorNotFound():
return res, &ErrNotFound{Repo: args.Repo, IsNotFound: true}
case resp.GetErrorUnauthorized():
return res, &ErrUnauthorized{Repo: args.Repo, NoAuthz: true}
case resp.GetErrorTemporarilyUnavailable():
return res, &ErrTemporary{Repo: args.Repo, IsTemporary: true}
case resp.GetErrorRepoDenied() != "":
return res, &ErrRepoDenied{
Repo: args.Repo,
Reason: resp.GetErrorRepoDenied(),
}
}
return res, nil
}
resp, err := c.httpPost(ctx, "repo-lookup", args)
client, err := c.grpcClient()
if err != nil {
return nil, err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
// best-effort inclusion of body in error message
body, _ := io.ReadAll(io.LimitReader(resp.Body, 200))
return nil, errors.Errorf(
"RepoLookup for %+v failed with http status %d: %s",
args,
resp.StatusCode,
string(body),
)
resp, err := client.RepoLookup(ctx, args.ToProto())
if err != nil {
return nil, errors.Wrapf(err, "RepoLookup for %+v failed", args)
}
err = json.NewDecoder(resp.Body).Decode(&result)
if err == nil && result != nil {
switch {
case result.ErrorNotFound:
err = &ErrNotFound{
Repo: args.Repo,
IsNotFound: true,
}
case result.ErrorUnauthorized:
err = &ErrUnauthorized{
Repo: args.Repo,
NoAuthz: true,
}
case result.ErrorTemporarilyUnavailable:
err = &ErrTemporary{
Repo: args.Repo,
IsTemporary: true,
}
case result.ErrorRepoDenied != "":
err = &ErrRepoDenied{
Repo: args.Repo,
Reason: result.ErrorRepoDenied,
}
res := protocol.RepoLookupResultFromProto(resp)
switch {
case resp.GetErrorNotFound():
return res, &ErrNotFound{Repo: args.Repo, IsNotFound: true}
case resp.GetErrorUnauthorized():
return res, &ErrUnauthorized{Repo: args.Repo, NoAuthz: true}
case resp.GetErrorTemporarilyUnavailable():
return res, &ErrTemporary{Repo: args.Repo, IsTemporary: true}
case resp.GetErrorRepoDenied() != "":
return res, &ErrRepoDenied{
Repo: args.Repo,
Reason: resp.GetErrorRepoDenied(),
}
}
return result, err
return res, nil
}
// MockEnqueueRepoUpdate mocks (*Client).EnqueueRepoUpdate for tests.
@ -213,50 +136,22 @@ func (c *Client) EnqueueRepoUpdate(ctx context.Context, repo api.RepoName) (*pro
return MockEnqueueRepoUpdate(ctx, repo)
}
if conf.IsGRPCEnabled(ctx) {
client, err := c.grpcClient()
if err != nil {
return nil, err
}
req := proto.EnqueueRepoUpdateRequest{Repo: string(repo)}
resp, err := client.EnqueueRepoUpdate(ctx, &req)
if err != nil {
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, &repoNotFoundError{repo: string(repo), responseBody: s.Message()}
}
return nil, err
}
return protocol.RepoUpdateResponseFromProto(resp), nil
}
req := &protocol.RepoUpdateRequest{
Repo: repo,
}
resp, err := c.httpPost(ctx, "enqueue-repo-update", req)
client, err := c.grpcClient()
if err != nil {
return nil, err
}
defer resp.Body.Close()
bs, err := io.ReadAll(resp.Body)
req := proto.EnqueueRepoUpdateRequest{Repo: string(repo)}
resp, err := client.EnqueueRepoUpdate(ctx, &req)
if err != nil {
return nil, errors.Wrap(err, "failed to read response body")
}
if s, ok := status.FromError(err); ok && s.Code() == codes.NotFound {
return nil, &repoNotFoundError{repo: string(repo), responseBody: s.Message()}
}
var res protocol.RepoUpdateResponse
if resp.StatusCode == http.StatusNotFound {
return nil, &repoNotFoundError{string(repo), string(bs)}
} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return nil, errors.New(string(bs))
} else if err = json.Unmarshal(bs, &res); err != nil {
return nil, err
}
return &res, nil
return protocol.RepoUpdateResponseFromProto(resp), nil
}
type repoNotFoundError struct {
@ -277,66 +172,12 @@ func (c *Client) EnqueueChangesetSync(ctx context.Context, ids []int64) error {
return MockEnqueueChangesetSync(ctx, ids)
}
if conf.IsGRPCEnabled(ctx) {
client, err := c.grpcClient()
if err != nil {
return err
}
// empty response can be ignored
_, err = client.EnqueueChangesetSync(ctx, &proto.EnqueueChangesetSyncRequest{Ids: ids})
return err
}
req := protocol.ChangesetSyncRequest{IDs: ids}
resp, err := c.httpPost(ctx, "enqueue-changeset-sync", req)
client, err := c.grpcClient()
if err != nil {
return err
}
defer resp.Body.Close()
bs, err := io.ReadAll(resp.Body)
if err != nil {
return errors.Wrap(err, "failed to read response body")
}
var res protocol.ChangesetSyncResponse
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
return errors.New(string(bs))
} else if err = json.Unmarshal(bs, &res); err != nil {
return err
}
if res.Error == "" {
return nil
}
return errors.New(res.Error)
}
func (c *Client) httpPost(ctx context.Context, method string, payload any) (resp *http.Response, err error) {
reqBody, err := json.Marshal(payload)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", c.URL+"/"+method, bytes.NewReader(reqBody))
if err != nil {
return nil, err
}
return c.do(ctx, req)
}
func (c *Client) do(ctx context.Context, req *http.Request) (_ *http.Response, err error) {
tr, ctx := trace.New(ctx, "repoupdater.do")
defer tr.EndWithErr(&err)
req.Header.Set("Content-Type", "application/json")
req = req.WithContext(ctx)
if c.HTTPClient != nil {
return c.HTTPClient.Do(req)
}
return http.DefaultClient.Do(req)
// empty response can be ignored
_, err = client.EnqueueChangesetSync(ctx, &proto.EnqueueChangesetSyncRequest{Ids: ids})
return err
}

View File

@ -1,14 +0,0 @@
package repoupdater
import "testing"
func TestNewClient(t *testing.T) {
t.Run("successful creation of client with custom URL", func(t *testing.T) {
expected := "foo"
c := NewClient(expected)
if c.URL != expected {
t.Errorf("Expected URL %q, but got %q", expected, c.URL)
}
})
}

View File

@ -15,6 +15,7 @@ go_library(
"//internal/endpoint",
"//internal/gitserver",
"//internal/gitserver/gitdomain",
"//internal/grpc/defaults",
"//internal/search",
"//internal/search/query",
"//internal/search/repos",

View File

@ -17,6 +17,7 @@ import (
"github.com/sourcegraph/sourcegraph/internal/endpoint"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/search"
"github.com/sourcegraph/sourcegraph/internal/search/query"
searchrepos "github.com/sourcegraph/sourcegraph/internal/search/repos"
@ -25,10 +26,11 @@ import (
)
type Observer struct {
Logger log.Logger
Db database.DB
Zoekt zoekt.Streamer
Searcher *endpoint.Map
Logger log.Logger
Db database.DB
Zoekt zoekt.Streamer
Searcher *endpoint.Map
SearcherGRPCConnectionCache *defaults.ConnectionCache
// Inputs are used to generate alert messages based on the query.
*search.Inputs
@ -47,7 +49,7 @@ type Observer struct {
// raising NoResolvedRepos alerts with suggestions when we know the original
// query does not contain any repos to search.
func (o *Observer) reposExist(ctx context.Context, options search.RepoOptions) bool {
repositoryResolver := searchrepos.NewResolver(o.Logger, o.Db, gitserver.NewClient("search.alertobserver"), o.Searcher, o.Zoekt)
repositoryResolver := searchrepos.NewResolver(o.Logger, o.Db, gitserver.NewClient("search.alertobserver"), o.Searcher, o.SearcherGRPCConnectionCache, o.Zoekt)
it := repositoryResolver.Iterator(ctx, options)
for it.Next() {
resolved := it.Current()

View File

@ -25,7 +25,6 @@ go_library(
"//internal/ctags_config",
"//internal/grpc/defaults",
"//internal/honey",
"//internal/httpcli",
"//internal/trace",
"//internal/types",
"//lib/errors",
@ -41,7 +40,6 @@ go_library(
"@com_github_sourcegraph_zoekt//cmd/zoekt-sourcegraph-indexserver/protos/sourcegraph/zoekt/configuration/v1:configuration",
"@com_github_sourcegraph_zoekt//grpc/protos/zoekt/webserver/v1:webserver",
"@com_github_sourcegraph_zoekt//query",
"@com_github_sourcegraph_zoekt//rpc",
"@com_github_sourcegraph_zoekt//stream",
"@io_opentelemetry_go_otel//attribute",
"@org_golang_google_grpc//:go_default_library",

View File

@ -9,49 +9,8 @@ import (
"github.com/sourcegraph/zoekt/query"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
"github.com/sourcegraph/sourcegraph/internal/conf"
)
// switchableZoektGRPCClient is a zoekt.Streamer that can switch between
// gRPC and HTTP backends.
type switchableZoektGRPCClient struct {
httpClient zoekt.Streamer
grpcClient zoekt.Streamer
}
func (c *switchableZoektGRPCClient) StreamSearch(ctx context.Context, q query.Q, opts *zoekt.SearchOptions, sender zoekt.Sender) error {
if conf.IsGRPCEnabled(ctx) {
return c.grpcClient.StreamSearch(ctx, q, opts, sender)
} else {
return c.httpClient.StreamSearch(ctx, q, opts, sender)
}
}
func (c *switchableZoektGRPCClient) Search(ctx context.Context, q query.Q, opts *zoekt.SearchOptions) (*zoekt.SearchResult, error) {
if conf.IsGRPCEnabled(ctx) {
return c.grpcClient.Search(ctx, q, opts)
} else {
return c.httpClient.Search(ctx, q, opts)
}
}
func (c *switchableZoektGRPCClient) List(ctx context.Context, q query.Q, opts *zoekt.ListOptions) (*zoekt.RepoList, error) {
if conf.IsGRPCEnabled(ctx) {
return c.grpcClient.List(ctx, q, opts)
} else {
return c.httpClient.List(ctx, q, opts)
}
}
func (c *switchableZoektGRPCClient) Close() {
c.httpClient.Close()
}
func (c *switchableZoektGRPCClient) String() string {
return c.httpClient.String()
}
// zoektGRPCClient is a zoekt.Streamer that uses gRPC for its RPC layer
type zoektGRPCClient struct {
endpoint string

View File

@ -60,13 +60,6 @@ func (s *HorizontalSearcher) StreamSearch(ctx context.Context, q query.Q, opts *
var mu sync.Mutex
dedupper := dedupper{}
// GobCache exists, so we only pay the cost of marshalling a query once
// when we aggregate it out over all the replicas. Zoekt's RPC layers
// unwrap this before passing it on to the Zoekt evaluation layers.
if !conf.IsGRPCEnabled(ctx) {
q = &query.GobCache{Q: q}
}
ch := make(chan error, len(clients))
for endpoint, c := range clients {
go func(endpoint string, c zoekt.Streamer) {

View File

@ -6,30 +6,11 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/zoekt"
proto "github.com/sourcegraph/zoekt/grpc/protos/zoekt/webserver/v1"
"github.com/sourcegraph/zoekt/rpc"
zoektstream "github.com/sourcegraph/zoekt/stream"
"google.golang.org/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
)
// We don't use the normal factory for internal requests because we disable
// retries. Currently our retry framework copies the full body on every
// request, this is prohibitive when zoekt generates a large query.
//
// Once our retry framework supports the use of Request.GetBody we can switch
// back to the normal internal request factory.
var zoektHTTPClient, _ = httpcli.NewFactory(
httpcli.NewMiddleware(
httpcli.ContextErrorMiddleware,
),
httpcli.NewMaxIdleConnsPerHostOpt(500),
// This will also generate a metric named "src_zoekt_webserver_requests_total".
httpcli.MeteredTransportOpt("zoekt_webserver"),
httpcli.TracedTransportOpt,
).Client()
// ZoektStreamFunc is a convenience function to create a stream receiver from a
// function.
type ZoektStreamFunc func(*zoekt.SearchResult)
@ -92,29 +73,14 @@ func (c *cachedStreamerCloser) Close() {
c.Streamer.Close()
}
// ZoektDial connects to a Searcher HTTP RPC server at address (host:port).
func ZoektDial(endpoint string) zoekt.Streamer {
return &switchableZoektGRPCClient{
httpClient: ZoektDialHTTP(endpoint),
grpcClient: ZoektDialGRPC(endpoint),
}
}
// ZoektDialHTTP connects to a Searcher HTTP RPC server at address (host:port).
func ZoektDialHTTP(endpoint string) zoekt.Streamer {
client := rpc.Client(endpoint)
streamClient := zoektstream.NewClient("http://"+endpoint, zoektHTTPClient).WithSearcher(client)
return NewMeteredSearcher(endpoint, streamClient)
}
// maxRecvMsgSize is the max message size we can receive from Zoekt without erroring.
// By default, this caps at 4MB, but Zoekt can send payloads significantly larger
// than that depending on the type of search being executed.
// 128MiB is a best guess at reasonable size that will rarely fail.
const maxRecvMsgSize = 128 * 1024 * 1024 // 128MiB
// ZoektDialGRPC connects to a Searcher gRPC server at address (host:port).
func ZoektDialGRPC(endpoint string) zoekt.Streamer {
// ZoektDial connects to a Searcher gRPC server at address (host:port).
func ZoektDial(endpoint string) zoekt.Streamer {
conn, err := defaults.Dial(
endpoint,
log.Scoped("zoekt"),

View File

@ -96,7 +96,7 @@ func (j *SearchJob) Run(ctx context.Context, clients job.RuntimeClients, stream
return doSearch(args)
}
repos := searchrepos.NewResolver(clients.Logger, clients.DB, clients.Gitserver, clients.SearcherURLs, clients.Zoekt)
repos := searchrepos.NewResolver(clients.Logger, clients.DB, clients.Gitserver, clients.SearcherURLs, clients.SearcherGRPCConnectionCache, clients.Zoekt)
it := repos.Iterator(ctx, j.RepoOpts)
p := pool.New().WithContext(ctx).WithMaxGoroutines(4).WithFirstError()

View File

@ -33,6 +33,7 @@ go_library(
"//internal/featureflag",
"//internal/gitserver",
"//internal/gitserver/gitdomain",
"//internal/grpc/defaults",
"//internal/own/search",
"//internal/search",
"//internal/search/alert",

View File

@ -41,12 +41,13 @@ func (j *alertJob) Run(ctx context.Context, clients job.RuntimeClients, stream s
jobAlert, err := j.child.Run(ctx, clients, statsObserver)
ao := searchalert.Observer{
Logger: clients.Logger,
Db: clients.DB,
Zoekt: clients.Zoekt,
Searcher: clients.SearcherURLs,
Inputs: j.inputs,
HasResults: countingStream.Count() > 0,
Logger: clients.Logger,
Db: clients.DB,
Zoekt: clients.Zoekt,
Searcher: clients.SearcherURLs,
SearcherGRPCConnectionCache: clients.SearcherGRPCConnectionCache,
Inputs: j.inputs,
HasResults: countingStream.Count() > 0,
}
if err != nil {
ao.Error(ctx, err)

View File

@ -105,5 +105,5 @@ func (e Exhaustive) ResolveRepositoryRevSpec(ctx context.Context, clients job.Ru
}
func reposNewResolver(clients job.RuntimeClients) *repos.Resolver {
return repos.NewResolver(clients.Logger, clients.DB, clients.Gitserver, clients.SearcherURLs, clients.Zoekt)
return repos.NewResolver(clients.Logger, clients.DB, clients.Gitserver, clients.SearcherURLs, clients.SearcherGRPCConnectionCache, clients.Zoekt)
}

View File

@ -12,6 +12,7 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/searcher/protocol"
"github.com/sourcegraph/sourcegraph/internal/endpoint"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/search"
"github.com/sourcegraph/sourcegraph/internal/search/job"
"github.com/sourcegraph/sourcegraph/internal/search/query"
@ -93,14 +94,14 @@ func (j *fileContainsFilterJob) Run(ctx context.Context, clients job.RuntimeClie
defer func() { finish(alert, err) }()
filteredStream := streaming.StreamFunc(func(event streaming.SearchEvent) {
event = j.filterEvent(ctx, clients.SearcherURLs, event)
event = j.filterEvent(ctx, clients.SearcherURLs, clients.SearcherGRPCConnectionCache, event)
stream.Send(event)
})
return j.child.Run(ctx, clients, filteredStream)
}
func (j *fileContainsFilterJob) filterEvent(ctx context.Context, searcherURLs *endpoint.Map, event streaming.SearchEvent) streaming.SearchEvent {
func (j *fileContainsFilterJob) filterEvent(ctx context.Context, searcherURLs *endpoint.Map, searcherGRPCConnectionCache *defaults.ConnectionCache, event streaming.SearchEvent) streaming.SearchEvent {
// Don't filter out files with zero chunks because if the file contained
// a result, we still want to return a match for the file even if it
// has no matched ranges left.
@ -110,7 +111,7 @@ func (j *fileContainsFilterJob) filterEvent(ctx context.Context, searcherURLs *e
case *result.FileMatch:
filtered = append(filtered, j.filterFileMatch(v))
case *result.CommitMatch:
cm := j.filterCommitMatch(ctx, searcherURLs, v)
cm := j.filterCommitMatch(ctx, searcherURLs, searcherGRPCConnectionCache, v)
if cm != nil {
filtered = append(filtered, cm)
}
@ -159,7 +160,7 @@ func matchesAny(val string, matchers []*regexp.Regexp) bool {
return false
}
func (j *fileContainsFilterJob) filterCommitMatch(ctx context.Context, searcherURLs *endpoint.Map, cm *result.CommitMatch) result.Match {
func (j *fileContainsFilterJob) filterCommitMatch(ctx context.Context, searcherURLs *endpoint.Map, searcherGRPCConnectionCache *defaults.ConnectionCache, cm *result.CommitMatch) result.Match {
// Skip any commit matches -- we only handle diff matches
if cm.DiffPreview == nil {
return nil
@ -187,15 +188,14 @@ func (j *fileContainsFilterJob) filterCommitMatch(ctx context.Context, searcherU
PatternMatchesContent: true,
}
onMatch := func(fms []*protocol.FileMatch) {
for _, fm := range fms {
matchedFileCounts[fm.Path] += 1
}
onMatch := func(fm *protocol.FileMatch) {
matchedFileCounts[fm.Path] += 1
}
_, err := searcher.Search(
ctx,
searcherURLs,
searcherGRPCConnectionCache,
cm.Repo.Name,
cm.Repo.ID,
"",

View File

@ -256,9 +256,9 @@ func TestFileContainsFilterJob(t *testing.T) {
s.Send(tc.inputEvent)
return nil, nil
})
searcher.MockSearch = func(_ context.Context, _ api.RepoName, _ api.RepoID, _ api.CommitID, p *search.TextPatternInfo, _ time.Duration, onMatches func([]*protocol.FileMatch)) (limitHit bool, err error) {
searcher.MockSearch = func(_ context.Context, _ api.RepoName, _ api.RepoID, _ api.CommitID, p *search.TextPatternInfo, _ time.Duration, onMatch func(*protocol.FileMatch)) (limitHit bool, err error) {
if len(p.IncludePatterns) > 0 {
onMatches([]*protocol.FileMatch{{Path: "file4"}})
onMatch(&protocol.FileMatch{Path: "file4"})
}
return false, nil
}

Some files were not shown because too many files have changed in this diff Show More