grpc: add support for frontend's indexed-search internal API methods (#47710)

Co-authored-by: Camden Cheek <camden@ccheek.com>
This commit is contained in:
Geoffrey Gilmore 2023-03-07 18:15:54 -08:00 committed by GitHub
parent ca4f167337
commit 834f893d18
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
18 changed files with 1145 additions and 370 deletions

View File

@ -8,6 +8,7 @@ import (
gcontext "github.com/gorilla/context"
"github.com/gorilla/mux"
"github.com/graph-gophers/graphql-go"
"google.golang.org/grpc"
"github.com/sourcegraph/log"
@ -137,6 +138,7 @@ func healthCheckMiddleware(next http.Handler) http.Handler {
func newInternalHTTPHandler(
schema *graphql.Schema,
db database.DB,
grpcServer *grpc.Server,
enterpriseJobs jobutil.EnterpriseJobs,
newCodeIntelUploadHandler enterprise.NewCodeIntelUploadHandler,
rankingService enterprise.RankingService,
@ -145,23 +147,30 @@ func newInternalHTTPHandler(
) http.Handler {
internalMux := http.NewServeMux()
logger := log.Scoped("internal", "internal http handlers")
internalRouter := router.NewInternal(mux.NewRouter().PathPrefix("/.internal/").Subrouter())
internalhttpapi.RegisterInternalServices(
internalRouter,
grpcServer,
db,
enterpriseJobs,
schema,
newCodeIntelUploadHandler,
rankingService,
newComputeStreamHandler,
rateLimitWatcher,
)
internalMux.Handle("/.internal/", gziphandler.GzipHandler(
actor.HTTPMiddleware(
logger,
featureflag.Middleware(db.FeatureFlags(),
internalhttpapi.NewInternalHandler(
router.NewInternal(mux.NewRouter().PathPrefix("/.internal/").Subrouter()),
db,
enterpriseJobs,
schema,
newCodeIntelUploadHandler,
rankingService,
newComputeStreamHandler,
rateLimitWatcher,
),
featureflag.Middleware(
db.FeatureFlags(),
internalRouter,
),
),
))
h := http.Handler(internalMux)
h = gcontext.ClearHandler(h)
h = tracepkg.HTTPMiddleware(logger, h, conf.DefaultClient())

View File

@ -36,6 +36,8 @@ import (
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
internalgrpc "github.com/sourcegraph/sourcegraph/internal/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/internal/httpserver"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/oobmigration"
@ -248,7 +250,6 @@ func Main(ctx context.Context, observationCtx *observation.Context, ready servic
// We only want to run this task once Sourcegraph is ready to serve user requests.
goroutine.Go(func() { bg.AppReady(logger) })
goroutine.MonitorBackgroundRoutines(context.Background(), routines...)
return nil
}
@ -315,16 +316,21 @@ func makeInternalAPI(
return nil, err
}
grpcServer := defaults.NewServer(logger)
// The internal HTTP handler does not include the auth handlers.
internalHandler := newInternalHTTPHandler(
schema,
db,
grpcServer,
enterprise.EnterpriseSearchJobs,
enterprise.NewCodeIntelUploadHandler,
enterprise.RankingService,
enterprise.NewComputeStreamHandler,
rateLimiter,
)
internalHandler = internalgrpc.MultiplexHandlers(grpcServer, internalHandler)
httpServer := &http.Server{
Handler: internalHandler,
ReadTimeout: 75 * time.Second,
@ -390,3 +396,8 @@ func redispoolRegisterDB(db database.DB) error {
})
})
}
// GetInternalAddr returns the address of the internal HTTP API server.
func GetInternalAddr() string {
return httpAddrInternal
}

View File

@ -12,8 +12,10 @@ import (
"github.com/gorilla/mux"
"github.com/gorilla/schema"
"github.com/graph-gophers/graphql-go"
sglog "github.com/sourcegraph/log"
"google.golang.org/grpc"
proto "github.com/sourcegraph/zoekt/cmd/zoekt-sourcegraph-indexserver/protos/sourcegraph/zoekt/configuration/v1"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise"
@ -181,14 +183,16 @@ func NewHandler(
return m
}
// NewInternalHandler returns a new API handler for internal endpoints that uses
// the provided API router, which must have been created by httpapi/router.NewInternal.
// RegisterInternalServices registers REST and gRPC handlers for Sourcegraph's internal API on the
// provided mux.Router and gRPC server.
//
// 🚨 SECURITY: This handler should not be served on a publicly exposed port. 🚨
// This handler is not guaranteed to provide the same authorization checks as
// public API handlers.
func NewInternalHandler(
func RegisterInternalServices(
m *mux.Router,
s *grpc.Server,
db database.DB,
enterpriseJobs jobutil.EnterpriseJobs,
schema *graphql.Schema,
@ -196,11 +200,8 @@ func NewInternalHandler(
rankingService enterprise.RankingService,
newComputeStreamHandler enterprise.NewComputeStreamHandler,
rateLimitWatcher graphqlbackend.LimitWatcher,
) http.Handler {
) {
logger := sglog.Scoped("InternalHandler", "frontend internal HTTP API handler")
if m == nil {
m = apirouter.New(nil)
}
m.StrictSlash(true)
handler := jsonMiddleware(&errorHandler{
@ -232,6 +233,8 @@ func NewInternalHandler(
m.Get(apirouter.DocumentRanks).Handler(trace.Route(handler(indexer.serveDocumentRanks)))
m.Get(apirouter.UpdateIndexStatus).Handler(trace.Route(handler(indexer.handleIndexStatusUpdate)))
proto.RegisterZoektConfigurationServiceServer(s, &searchIndexerGRPCServer{server: indexer})
m.Get(apirouter.ExternalURL).Handler(trace.Route(handler(serveExternalURL)))
m.Get(apirouter.SendEmail).Handler(trace.Route(handler(serveSendEmail)))
gitService := &gitServiceHandler{
@ -254,8 +257,6 @@ func NewInternalHandler(
log.Printf("API no route: %s %s from %s", r.Method, r.URL, r.Referer())
http.Error(w, "no route", http.StatusNotFound)
})
return m
}
var schemaDecoder = schema.NewDecoder()

View File

@ -1,6 +1,7 @@
package httpapi
import (
"bytes"
"context"
"encoding/json"
"fmt"
@ -13,6 +14,10 @@ import (
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/sourcegraph/log"
"github.com/sourcegraph/zoekt"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
proto "github.com/sourcegraph/zoekt/cmd/zoekt-sourcegraph-indexserver/protos/sourcegraph/zoekt/configuration/v1"
"github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise"
"github.com/sourcegraph/sourcegraph/internal/api"
@ -48,6 +53,112 @@ func repoRankFromConfig(siteConfig schema.SiteConfiguration, repoName string) fl
return val
}
type searchIndexerGRPCServer struct {
server *searchIndexerServer
proto.ZoektConfigurationServiceServer
}
func (s *searchIndexerGRPCServer) SearchConfiguration(ctx context.Context, request *proto.SearchConfigurationRequest) (*proto.SearchConfigurationResponse, error) {
repoIDs := make([]api.RepoID, 0, len(request.GetRepoIds()))
for _, repoID := range request.GetRepoIds() {
repoIDs = append(repoIDs, api.RepoID(repoID))
}
var fingerprint searchbackend.ConfigFingerprint
fingerprint.FromProto(request.GetFingerprint())
parameters := searchConfigurationParameters{
fingerprint: fingerprint,
repoIDs: repoIDs,
}
r, err := s.server.doSearchConfiguration(ctx, parameters)
if err != nil {
var parameterErr *parameterError
if errors.As(err, &parameterErr) {
return nil, status.Error(codes.InvalidArgument, err.Error())
}
return nil, err
}
options := make([]*proto.ZoektIndexOptions, 0, len(r.options))
for _, o := range r.options {
options = append(options, o.ToProto())
}
return &proto.SearchConfigurationResponse{
UpdatedOptions: options,
Fingerprint: r.fingerprint.ToProto(),
}, nil
}
func (s *searchIndexerGRPCServer) List(ctx context.Context, r *proto.ListRequest) (*proto.ListResponse, error) {
indexedIDs := make([]api.RepoID, 0, len(r.GetIndexedIds()))
for _, repoID := range r.GetIndexedIds() {
indexedIDs = append(indexedIDs, api.RepoID(repoID))
}
var parameters listParameters
parameters.IndexedIDs = indexedIDs
parameters.Hostname = r.GetHostname()
repoIDs, err := s.server.doList(ctx, &parameters)
if err != nil {
return nil, err
}
var response proto.ListResponse
response.RepoIds = make([]int32, 0, len(repoIDs))
for _, repoID := range repoIDs {
response.RepoIds = append(response.RepoIds, int32(repoID))
}
return &response, nil
}
func (s *searchIndexerGRPCServer) RepositoryRank(ctx context.Context, request *proto.RepositoryRankRequest) (*proto.RepositoryRankResponse, error) {
ranks, err := s.server.Ranking.GetRepoRank(ctx, api.RepoName(request.Repository))
if err != nil {
if errcode.IsNotFound(err) {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, err
}
return &proto.RepositoryRankResponse{
Rank: ranks,
}, nil
}
func (s *searchIndexerGRPCServer) DocumentRanks(ctx context.Context, request *proto.DocumentRanksRequest) (*proto.DocumentRanksResponse, error) {
ranks, err := s.server.Ranking.GetDocumentRanks(ctx, api.RepoName(request.Repository))
if err != nil {
if errcode.IsNotFound(err) {
return nil, status.Error(codes.NotFound, err.Error())
}
return nil, err
}
return repoPathRanksToProto(&ranks), nil
}
func (s *searchIndexerGRPCServer) UpdateIndexStatus(ctx context.Context, req *proto.UpdateIndexStatusRequest) (*proto.UpdateIndexStatusResponse, error) {
var request indexStatusUpdateArgs
request.FromProto(req)
err := s.server.doIndexStatusUpdate(ctx, &request)
if err != nil {
return nil, err
}
return &proto.UpdateIndexStatusResponse{}, nil
}
var _ proto.ZoektConfigurationServiceServer = &searchIndexerGRPCServer{}
// searchIndexerServer has handlers that zoekt-sourcegraph-indexserver
// interacts with (search-indexer).
type searchIndexerServer struct {
@ -98,8 +209,6 @@ type searchIndexerServer struct {
// this endpoint concurrently leading to socket starvation.
func (h *searchIndexerServer) serveConfiguration(w http.ResponseWriter, r *http.Request) error {
ctx := r.Context()
siteConfig := conf.Get().SiteConfiguration
if err := r.ParseForm(); err != nil {
return err
}
@ -114,23 +223,70 @@ func (h *searchIndexerServer) serveConfiguration(w http.ResponseWriter, r *http.
indexedIDs = append(indexedIDs, api.RepoID(id))
}
if len(indexedIDs) == 0 {
http.Error(w, "at least one repoID required", http.StatusBadRequest)
var clientFingerprint searchbackend.ConfigFingerprint
err := clientFingerprint.FromHeaders(r.Header)
if err != nil {
http.Error(w, fmt.Sprintf("invalid fingerprint: %s", err), http.StatusBadRequest)
return nil
}
response, err := h.doSearchConfiguration(ctx, searchConfigurationParameters{
repoIDs: indexedIDs,
fingerprint: clientFingerprint,
})
if err != nil {
var parameterErr *parameterError
code := http.StatusInternalServerError
if errors.As(err, &parameterErr) {
code = http.StatusBadRequest
}
http.Error(w, err.Error(), code)
return nil
}
response.fingerprint.ToHeaders(w.Header())
jsonOptions := make([][]byte, 0, len(response.options))
for _, opt := range response.options {
marshalled, err := json.Marshal(opt)
if err != nil {
_, _ = w.Write([]byte(err.Error()))
}
jsonOptions = append(jsonOptions, marshalled)
}
_, _ = w.Write(bytes.Join(jsonOptions, []byte("\n")))
return nil
}
func (h *searchIndexerServer) doSearchConfiguration(ctx context.Context, parameters searchConfigurationParameters) (*searchConfigurationResponse, error) {
siteConfig := conf.Get().SiteConfiguration
if len(parameters.repoIDs) == 0 {
return nil, &parameterError{err: "at least one repoID required"}
}
var minLastChanged time.Time
nextFingerPrint := parameters.fingerprint
if !h.MinLastChangedDisabled {
var err error
minLastChanged, err = searchbackend.ParseAndSetConfigFingerprint(w, r, &siteConfig)
fp, err := searchbackend.NewConfigFingerprint(&siteConfig)
if err != nil {
return err
return nil, err
}
minLastChanged = parameters.fingerprint.ChangesSince(fp)
nextFingerPrint = *fp
}
// Preload repos to support fast lookups by repo ID.
repos, loadReposErr := h.RepoStore.List(ctx, database.ReposListOptions{
IDs: indexedIDs,
IDs: parameters.repoIDs,
// When minLastChanged is non-zero we will only return the
// repositories that have changed since minLastChanged. This takes
// into account repo metadata, repo content and search context
@ -147,19 +303,19 @@ func (h *searchIndexerServer) serveConfiguration(w http.ResponseWriter, r *http.
// If we used MinLastChanged, we should only return information for the
// repositories that we found from List.
if !minLastChanged.IsZero() {
filtered := indexedIDs[:0]
for _, id := range indexedIDs {
filtered := parameters.repoIDs[:0]
for _, id := range parameters.repoIDs {
if _, ok := reposMap[id]; ok {
filtered = append(filtered, id)
}
}
indexedIDs = filtered
parameters.repoIDs = filtered
}
rankingLastUpdatedAt, err := h.Ranking.LastUpdatedAt(ctx, indexedIDs)
rankingLastUpdatedAt, err := h.Ranking.LastUpdatedAt(ctx, parameters.repoIDs)
if err != nil {
h.logger.Warn("failed to get ranking last updated timestamps, falling back to no ranking",
log.Int("repos", len(indexedIDs)),
log.Int("repos", len(parameters.repoIDs)),
log.Error(err),
)
rankingLastUpdatedAt = make(map[api.RepoID]time.Time)
@ -209,7 +365,7 @@ func (h *searchIndexerServer) serveConfiguration(w http.ResponseWriter, r *http.
}, nil
}
revisionsForRepo, revisionsForRepoErr := h.SearchContextsRepoRevs(ctx, indexedIDs)
revisionsForRepo, revisionsForRepoErr := h.SearchContextsRepoRevs(ctx, parameters.repoIDs)
getSearchContextRevisions := func(repoID api.RepoID) ([]string, error) {
if revisionsForRepoErr != nil {
return nil, revisionsForRepoErr
@ -217,49 +373,81 @@ func (h *searchIndexerServer) serveConfiguration(w http.ResponseWriter, r *http.
return revisionsForRepo[repoID], nil
}
b := searchbackend.GetIndexOptions(
indexOptions := searchbackend.GetIndexOptions(
&siteConfig,
getRepoIndexOptions,
getSearchContextRevisions,
indexedIDs...,
parameters.repoIDs...,
)
_, _ = w.Write(b)
return nil
return &searchConfigurationResponse{
options: indexOptions,
fingerprint: nextFingerPrint,
}, nil
}
type parameterError struct {
err string
}
func (e *parameterError) Error() string { return e.err }
type searchConfigurationParameters struct {
repoIDs []api.RepoID
fingerprint searchbackend.ConfigFingerprint
}
type searchConfigurationResponse struct {
options []searchbackend.ZoektIndexOptions
fingerprint searchbackend.ConfigFingerprint
}
// serveList is used by zoekt to get the list of repositories for it to index.
func (h *searchIndexerServer) serveList(w http.ResponseWriter, r *http.Request) error {
var opt struct {
// Hostname is used to determine the subset of repos to return
Hostname string
// IndexedIDs are the repository IDs of indexed repos by Hostname.
IndexedIDs []api.RepoID
}
err := json.NewDecoder(r.Body).Decode(&opt)
var parameters listParameters
err := json.NewDecoder(r.Body).Decode(&parameters)
if err != nil {
return err
}
indexable, err := h.ListIndexable(r.Context())
repoIDs, err := h.doList(r.Context(), &parameters)
if err != nil {
return err
}
// TODO: Avoid batching up so much in memory by:
// 1. Changing the schema from object of arrays to array of objects.
// 2. Stream out each object marshalled rather than marshall the full list in memory.
data := struct {
RepoIDs []api.RepoID
}{
RepoIDs: repoIDs,
}
return json.NewEncoder(w).Encode(&data)
}
func (h *searchIndexerServer) doList(ctx context.Context, parameters *listParameters) (repoIDS []api.RepoID, err error) {
indexable, err := h.ListIndexable(ctx)
if err != nil {
return nil, err
}
if h.Indexers.Enabled() {
indexed := make(map[uint32]*zoekt.MinimalRepoListEntry, len(opt.IndexedIDs))
indexed := make(map[uint32]*zoekt.MinimalRepoListEntry, len(parameters.IndexedIDs))
add := func(r *types.MinimalRepo) { indexed[uint32(r.ID)] = nil }
if len(opt.IndexedIDs) > 0 {
opts := database.ReposListOptions{IDs: opt.IndexedIDs}
err = h.RepoStore.StreamMinimalRepos(r.Context(), opts, add)
if len(parameters.IndexedIDs) > 0 {
opts := database.ReposListOptions{IDs: parameters.IndexedIDs}
err = h.RepoStore.StreamMinimalRepos(ctx, opts, add)
if err != nil {
return err
return nil, err
}
}
indexable, err = h.Indexers.ReposSubset(r.Context(), opt.Hostname, indexed, indexable)
indexable, err = h.Indexers.ReposSubset(ctx, parameters.Hostname, indexed, indexable)
if err != nil {
return err
return nil, err
}
}
@ -268,18 +456,18 @@ func (h *searchIndexerServer) serveList(w http.ResponseWriter, r *http.Request)
// 2. Stream out each object marshalled rather than marshall the full list in memory.
ids := make([]api.RepoID, 0, len(indexable))
for _, r := range indexable {
ids = append(ids, r.ID)
}
data := struct {
RepoIDs []api.RepoID
}{
RepoIDs: ids,
}
return ids, nil
}
return json.NewEncoder(w).Encode(&data)
type listParameters struct {
// Hostname is used to determine the subset of repos to return
Hostname string
// IndexedIDs are the repository IDs of indexed repos by Hostname.
IndexedIDs []api.RepoID
}
var metricGetVersion = promauto.NewCounter(prometheus.CounterOpts{
@ -322,29 +510,107 @@ func serveRank[T []float64 | citypes.RepoPathRanks](
return nil
}
func (h *searchIndexerServer) handleIndexStatusUpdate(w http.ResponseWriter, r *http.Request) error {
var body struct {
Repositories []struct {
RepoID uint32
Branches []zoekt.RepositoryBranch
}
func (h *searchIndexerServer) handleIndexStatusUpdate(_ http.ResponseWriter, r *http.Request) error {
var args indexStatusUpdateArgs
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
return errors.Wrap(err, "failed to decode request args")
}
if err := json.NewDecoder(r.Body).Decode(&body); err != nil {
return errors.Wrap(err, "failed to decode request body")
}
return h.doIndexStatusUpdate(r.Context(), &args)
}
func (h *searchIndexerServer) doIndexStatusUpdate(ctx context.Context, args *indexStatusUpdateArgs) error {
var (
ids = make([]int32, len(body.Repositories))
minimal = make(map[uint32]*zoekt.MinimalRepoListEntry, len(body.Repositories))
ids = make([]int32, len(args.Repositories))
minimal = make(map[uint32]*zoekt.MinimalRepoListEntry, len(args.Repositories))
)
for i, repo := range body.Repositories {
for i, repo := range args.Repositories {
ids[i] = int32(repo.RepoID)
minimal[repo.RepoID] = &zoekt.MinimalRepoListEntry{Branches: repo.Branches}
}
h.logger.Info("updating index status", log.Int32s("repositories", ids))
return h.db.ZoektRepos().UpdateIndexStatuses(r.Context(), minimal)
return h.db.ZoektRepos().UpdateIndexStatuses(ctx, minimal)
}
type indexStatusUpdateArgs struct {
Repositories []indexStatusUpdateRepository
}
type indexStatusUpdateRepository struct {
RepoID uint32
Branches []zoekt.RepositoryBranch
}
func (a *indexStatusUpdateArgs) FromProto(req *proto.UpdateIndexStatusRequest) {
a.Repositories = make([]indexStatusUpdateRepository, 0, len(req.Repositories))
for _, repo := range req.Repositories {
branches := make([]zoekt.RepositoryBranch, 0, len(repo.Branches))
for _, b := range repo.Branches {
branches = append(branches, zoekt.RepositoryBranch{
Name: b.Name,
Version: b.Version,
})
}
a.Repositories = append(a.Repositories, struct {
RepoID uint32
Branches []zoekt.RepositoryBranch
}{
RepoID: repo.RepoId,
Branches: branches,
})
}
}
func (a *indexStatusUpdateArgs) ToProto() *proto.UpdateIndexStatusRequest {
repos := make([]*proto.UpdateIndexStatusRequest_Repository, 0, len(a.Repositories))
for _, repo := range a.Repositories {
branches := make([]*proto.ZoektRepositoryBranch, 0, len(repo.Branches))
for _, b := range repo.Branches {
branches = append(branches, &proto.ZoektRepositoryBranch{
Name: b.Name,
Version: b.Version,
})
}
repos = append(repos, &proto.UpdateIndexStatusRequest_Repository{
RepoId: repo.RepoID,
Branches: branches,
})
}
return &proto.UpdateIndexStatusRequest{
Repositories: repos,
}
}
func repoPathRanksToProto(r *citypes.RepoPathRanks) *proto.DocumentRanksResponse {
paths := make(map[string]float64, len(r.Paths))
for path, counts := range r.Paths {
paths[path] = counts
}
return &proto.DocumentRanksResponse{
Paths: paths,
MeanRank: r.MeanRank,
}
}
func repoPathRanksFromProto(x *proto.DocumentRanksResponse) *citypes.RepoPathRanks {
protoPaths := x.GetPaths()
paths := make(map[string]float64, len(protoPaths))
for path, counts := range protoPaths {
paths[path] = counts
}
return &citypes.RepoPathRanks{
Paths: paths,
MeanRank: x.MeanRank,
}
}

View File

@ -8,13 +8,19 @@ import (
"net/http"
"net/http/httptest"
"net/url"
"sort"
"strings"
"testing"
"testing/quick"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/sourcegraph/log/logtest"
"github.com/sourcegraph/zoekt"
"google.golang.org/protobuf/testing/protocmp"
proto "github.com/sourcegraph/zoekt/cmd/zoekt-sourcegraph-indexserver/protos/sourcegraph/zoekt/configuration/v1"
"github.com/sourcegraph/sourcegraph/internal/api"
citypes "github.com/sourcegraph/sourcegraph/internal/codeintel/types"
@ -41,63 +47,187 @@ func TestServeConfiguration(t *testing.T) {
return api.CommitID("!" + spec), nil
})
srv := &searchIndexerServer{
RepoStore: &fakeRepoStore{Repos: repos},
gitserverClient: gsClient,
SearchContextsRepoRevs: func(ctx context.Context, repoIDs []api.RepoID) (map[api.RepoID][]string, error) {
return map[api.RepoID][]string{6: {"a", "b"}}, nil
},
Ranking: &fakeRankingService{},
repoStore := &fakeRepoStore{Repos: repos}
searchContextRepoRevsFunc := func(ctx context.Context, repoIDs []api.RepoID) (map[api.RepoID][]string, error) {
return map[api.RepoID][]string{6: {"a", "b"}}, nil
}
rankingService := &fakeRankingService{}
data := url.Values{
"repoID": []string{"1", "5", "6"},
}
req := httptest.NewRequest("POST", "/", strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
w := httptest.NewRecorder()
if err := srv.serveConfiguration(w, req); err != nil {
t.Fatal(err)
}
t.Run("gRPC", func(t *testing.T) {
resp := w.Result()
body, _ := io.ReadAll(resp.Body)
// Set up the GRPC server
grpcServer := searchIndexerGRPCServer{
server: &searchIndexerServer{
RepoStore: repoStore,
gitserverClient: gsClient,
Ranking: rankingService,
SearchContextsRepoRevs: searchContextRepoRevsFunc,
},
}
// This is a very fragile test since it will depend on changes to
// searchbackend.GetIndexOptions. If this becomes a problem we can make it
// more robust by shifting around responsibilities.
want := `{"Name":"","RepoID":0,"Public":false,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":false,"Error":"repo not found: id=1"}
// Setup: create a request for repos 5 and 6, and the non-existent repo 1
requestedRepoIDs := []int32{1, 5, 6}
// Execute the first request (no fingerprint)
var initialRequest proto.SearchConfigurationRequest
initialRequest.RepoIds = requestedRepoIDs
initialRequest.Fingerprint = nil
initialResponse, err := grpcServer.SearchConfiguration(context.Background(), &initialRequest)
if err != nil {
t.Fatalf("SearchConfiguration: %s", err)
}
// Verify: Check to see that the response contains an error
// for the non-existent repo 1
var responseRepo1 *proto.ZoektIndexOptions
foundRepo1 := false
var receivedRepositories []*proto.ZoektIndexOptions
for _, repo := range initialResponse.GetUpdatedOptions() {
if repo.RepoId == 1 {
responseRepo1 = repo
foundRepo1 = true
continue
}
receivedRepositories = append(receivedRepositories, repo)
}
if !foundRepo1 {
t.Errorf("expected to find repo ID 1 in response: %v", receivedRepositories)
}
if foundRepo1 && !strings.Contains(responseRepo1.Error, "repo not found") {
t.Errorf("expected to find repo not found error in repo 1: %v", responseRepo1)
}
// Verify: Check to see that the response the expected repos 5 and 6
expectedRepo5 := &proto.ZoektIndexOptions{
RepoId: 5,
Name: "5",
Priority: 5,
Public: true,
Symbols: true,
Branches: []*proto.ZoektRepositoryBranch{{Name: "HEAD", Version: "!HEAD"}},
}
expectedRepo6 := &proto.ZoektIndexOptions{
RepoId: 6,
Name: "6",
Priority: 6,
Public: true,
Symbols: true,
Branches: []*proto.ZoektRepositoryBranch{
{Name: "HEAD", Version: "!HEAD"},
{Name: "a", Version: "!a"},
{Name: "b", Version: "!b"},
},
}
expectedRepos := []*proto.ZoektIndexOptions{
expectedRepo5,
expectedRepo6,
}
sort.Slice(receivedRepositories, func(i, j int) bool {
return receivedRepositories[i].RepoId < receivedRepositories[j].RepoId
})
sort.Slice(expectedRepos, func(i, j int) bool {
return expectedRepos[i].RepoId < expectedRepos[j].RepoId
})
if diff := cmp.Diff(expectedRepos, receivedRepositories, protocmp.Transform()); diff != "" {
t.Fatalf("mismatch in response repositories (-want, +got):\n%s", diff)
}
if initialResponse.GetFingerprint() == nil {
t.Fatalf("expected fingerprint to be set in initial response")
}
// Setup: run a second request with the fingerprint from the first response
// Note: when fingerprint is set we only return a subset. We simulate this by setting RepoStore to only list repo number 5
grpcServer.server.RepoStore = &fakeRepoStore{Repos: repos[:1]}
var fingerprintedRequest proto.SearchConfigurationRequest
fingerprintedRequest.RepoIds = requestedRepoIDs
fingerprintedRequest.Fingerprint = initialResponse.GetFingerprint()
// Execute the seconds request
fingerprintedResponse, err := grpcServer.SearchConfiguration(context.Background(), &fingerprintedRequest)
if err != nil {
t.Fatalf("SearchConfiguration: %s", err)
}
// Verify that the response contains the expected repo 5
if diff := cmp.Diff(fingerprintedResponse.GetUpdatedOptions(), []*proto.ZoektIndexOptions{expectedRepo5}, protocmp.Transform()); diff != "" {
t.Errorf("mismatch in fingerprinted repositories (-want, +got):\n%s", diff)
}
if fingerprintedResponse.GetFingerprint() == nil {
t.Fatalf("expected fingerprint to be set in fingerprinted response")
}
})
t.Run("REST", func(t *testing.T) {
srv := &searchIndexerServer{
RepoStore: repoStore,
gitserverClient: gsClient,
Ranking: rankingService,
SearchContextsRepoRevs: searchContextRepoRevsFunc,
}
data := url.Values{
"repoID": []string{"1", "5", "6"},
}
req := httptest.NewRequest("POST", "/", strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
w := httptest.NewRecorder()
if err := srv.serveConfiguration(w, req); err != nil {
t.Fatal(err)
}
resp := w.Result()
body, _ := io.ReadAll(resp.Body)
// This is a very fragile test since it will depend on changes to
// searchbackend.GetIndexOptions. If this becomes a problem we can make it
// more robust by shifting around responsibilities.
want := `{"Name":"","RepoID":1,"Public":false,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":false,"Error":"repo not found: id=1"}
{"Name":"5","RepoID":5,"Public":true,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":true,"Branches":[{"Name":"HEAD","Version":"!HEAD"}],"Priority":5}
{"Name":"6","RepoID":6,"Public":true,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":true,"Branches":[{"Name":"HEAD","Version":"!HEAD"},{"Name":"a","Version":"!a"},{"Name":"b","Version":"!b"}],"Priority":6}`
if d := cmp.Diff(want, string(body)); d != "" {
t.Fatalf("mismatch (-want, +got):\n%s", d)
}
if d := cmp.Diff(want, string(body)); d != "" {
t.Fatalf("mismatch (-want, +got):\n%s", d)
}
// when fingerprint is set we only return a subset. We simulate this by setting RepoStore to only list repo number 5
srv.RepoStore = &fakeRepoStore{Repos: repos[:1]}
req = httptest.NewRequest("POST", "/", strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("X-Sourcegraph-Config-Fingerprint", resp.Header.Get("X-Sourcegraph-Config-Fingerprint"))
// when fingerprint is set we only return a subset. We simulate this by setting RepoStore to only list repo number 5
srv.RepoStore = &fakeRepoStore{Repos: repos[:1]}
req = httptest.NewRequest("POST", "/", strings.NewReader(data.Encode()))
req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
req.Header.Set("X-Sourcegraph-Config-Fingerprint", resp.Header.Get("X-Sourcegraph-Config-Fingerprint"))
w = httptest.NewRecorder()
if err := srv.serveConfiguration(w, req); err != nil {
t.Fatal(err)
}
w = httptest.NewRecorder()
if err := srv.serveConfiguration(w, req); err != nil {
t.Fatal(err)
}
resp = w.Result()
body, _ = io.ReadAll(resp.Body)
resp = w.Result()
body, _ = io.ReadAll(resp.Body)
// We want the same as before, except we only want to get back 5.
//
// This is a very fragile test since it will depend on changes to
// searchbackend.GetIndexOptions. If this becomes a problem we can make it
// more robust by shifting around responsibilities.
want = `{"Name":"5","RepoID":5,"Public":true,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":true,"Branches":[{"Name":"HEAD","Version":"!HEAD"}],"Priority":5}`
// We want the same as before, except we only want to get back 5.
//
// This is a very fragile test since it will depend on changes to
// searchbackend.GetIndexOptions. If this becomes a problem we can make it
// more robust by shifting around responsibilities.
want = `{"Name":"5","RepoID":5,"Public":true,"Fork":false,"Archived":false,"LargeFiles":null,"Symbols":true,"Branches":[{"Name":"HEAD","Version":"!HEAD"}],"Priority":5}`
if d := cmp.Diff(want, string(body)); d != "" {
t.Fatalf("mismatch (-want, +got):\n%s", d)
}
})
if d := cmp.Diff(want, string(body)); d != "" {
t.Fatalf("mismatch (-want, +got):\n%s", d)
}
}
func TestReposIndex(t *testing.T) {
@ -110,79 +240,140 @@ func TestReposIndex(t *testing.T) {
indexableRepos := allRepos[:2]
cases := []struct {
name string
indexable []types.MinimalRepo
body string
want []string
}{{
type parameters struct {
restBody string
grpcRequest *proto.ListRequest
}
type testCase struct {
name string
indexable []types.MinimalRepo
parameters parameters
want []string
}
cases := []testCase{{
name: "indexers",
indexable: allRepos,
body: `{"Hostname": "foo"}`,
want: []string{"github.com/popular/foo", "github.com/alice/foo"},
parameters: parameters{
restBody: `{"Hostname": "foo"}`,
grpcRequest: &proto.ListRequest{Hostname: "foo"},
},
want: []string{"github.com/popular/foo", "github.com/alice/foo"},
}, {
name: "indexedids",
indexable: allRepos,
body: `{"Hostname": "foo", "IndexedIDs": [4]}`,
want: []string{"github.com/popular/foo", "github.com/alice/foo", "github.com/alice/bar"},
parameters: parameters{
restBody: `{"Hostname": "foo", "IndexedIDs": [4]}`,
grpcRequest: &proto.ListRequest{Hostname: "foo", IndexedIds: []int32{4}},
},
want: []string{"github.com/popular/foo", "github.com/alice/foo", "github.com/alice/bar"},
}, {
name: "dot-com indexers",
indexable: indexableRepos,
body: `{"Hostname": "foo"}`,
want: []string{"github.com/popular/foo"},
parameters: parameters{
restBody: `{"Hostname": "foo"}`,
grpcRequest: &proto.ListRequest{Hostname: "foo"},
},
want: []string{"github.com/popular/foo"},
}, {
name: "dot-com indexedids",
indexable: indexableRepos,
body: `{"Hostname": "foo", "IndexedIDs": [2]}`,
want: []string{"github.com/popular/foo", "github.com/popular/bar"},
parameters: parameters{
restBody: `{"Hostname": "foo", "IndexedIDs": [2]}`,
grpcRequest: &proto.ListRequest{Hostname: "foo", IndexedIds: []int32{2}},
},
want: []string{"github.com/popular/foo", "github.com/popular/bar"},
}, {
name: "none",
indexable: allRepos,
body: `{"Hostname": "baz"}`,
want: []string{},
parameters: parameters{
restBody: `{"Hostname": "baz"}`,
grpcRequest: &proto.ListRequest{Hostname: "baz"},
},
want: []string{},
}}
for _, tc := range cases {
t.Run(tc.name, func(t *testing.T) {
srv := &searchIndexerServer{
ListIndexable: fakeListIndexable(tc.indexable),
RepoStore: &fakeRepoStore{
Repos: allRepos,
},
Indexers: suffixIndexers(true),
}
t.Run("gRPC", func(t *testing.T) {
grpcServer := &searchIndexerGRPCServer{
server: &searchIndexerServer{
ListIndexable: fakeListIndexable(tc.indexable),
RepoStore: &fakeRepoStore{
Repos: allRepos,
},
Indexers: suffixIndexers(true),
},
}
req := httptest.NewRequest("POST", "/", bytes.NewReader([]byte(tc.body)))
w := httptest.NewRecorder()
if err := srv.serveList(w, req); err != nil {
t.Fatal(err)
}
resp, err := grpcServer.List(context.Background(), tc.parameters.grpcRequest)
if err != nil {
t.Fatal(err)
}
resp := w.Result()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
t.Errorf("got status %v", resp.StatusCode)
}
var data struct {
RepoIDs []api.RepoID
}
if err := json.Unmarshal(body, &data); err != nil {
t.Fatal(err)
}
wantIDs := make([]api.RepoID, len(tc.want))
for i, name := range tc.want {
for _, repo := range allRepos {
if string(repo.Name) == name {
wantIDs[i] = repo.ID
expectedRepoIDs := make([]api.RepoID, len(tc.want))
for i, name := range tc.want {
for _, repo := range allRepos {
if string(repo.Name) == name {
expectedRepoIDs[i] = repo.ID
}
}
}
}
if d := cmp.Diff(wantIDs, data.RepoIDs); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
var receivedRepoIDs []api.RepoID
for _, id := range resp.GetRepoIds() {
receivedRepoIDs = append(receivedRepoIDs, api.RepoID(id))
}
if d := cmp.Diff(expectedRepoIDs, receivedRepoIDs, cmpopts.EquateEmpty()); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
})
t.Run("REST", func(t *testing.T) {
srv := &searchIndexerServer{
ListIndexable: fakeListIndexable(tc.indexable),
RepoStore: &fakeRepoStore{
Repos: allRepos,
},
Indexers: suffixIndexers(true),
}
req := httptest.NewRequest("POST", "/", bytes.NewReader([]byte(tc.parameters.restBody)))
w := httptest.NewRecorder()
if err := srv.serveList(w, req); err != nil {
t.Fatal(err)
}
resp := w.Result()
body, _ := io.ReadAll(resp.Body)
if resp.StatusCode != http.StatusOK {
t.Errorf("got status %v", resp.StatusCode)
}
var data struct {
RepoIDs []api.RepoID
}
if err := json.Unmarshal(body, &data); err != nil {
t.Fatal(err)
}
wantIDs := make([]api.RepoID, len(tc.want))
for i, name := range tc.want {
for _, repo := range allRepos {
if string(repo.Name) == name {
wantIDs[i] = repo.ID
}
}
}
if d := cmp.Diff(wantIDs, data.RepoIDs); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
})
})
}
}
@ -296,43 +487,106 @@ func TestRepoRankFromConfig(t *testing.T) {
}
func TestIndexStatusUpdate(t *testing.T) {
logger := logtest.Scoped(t)
body := `{"Repositories": [{"RepoID": 1234, "Branches": [{"Name": "main", "Version": "f00b4r"}]}]}`
wantBranches := []zoekt.RepositoryBranch{{Name: "main", Version: "f00b4r"}}
called := false
t.Run("REST", func(t *testing.T) {
logger := logtest.Scoped(t)
zoektReposStore := database.NewMockZoektReposStore()
zoektReposStore.UpdateIndexStatusesFunc.SetDefaultHook(func(_ context.Context, indexed map[uint32]*zoekt.MinimalRepoListEntry) error {
entry, ok := indexed[1234]
if !ok {
t.Fatalf("wrong repo ID")
body := `{"Repositories": [{"RepoID": 1234, "Branches": [{"Name": "main", "Version": "f00b4r"}]}]}`
wantBranches := []zoekt.RepositoryBranch{{Name: "main", Version: "f00b4r"}}
called := false
zoektReposStore := database.NewMockZoektReposStore()
zoektReposStore.UpdateIndexStatusesFunc.SetDefaultHook(func(_ context.Context, indexed map[uint32]*zoekt.MinimalRepoListEntry) error {
entry, ok := indexed[1234]
if !ok {
t.Fatalf("wrong repo ID")
}
if d := cmp.Diff(entry.Branches, wantBranches); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
called = true
return nil
})
db := database.NewMockDB()
db.ZoektReposFunc.SetDefaultReturn(zoektReposStore)
srv := &searchIndexerServer{db: db, logger: logger}
req := httptest.NewRequest("POST", "/", bytes.NewReader([]byte(body)))
w := httptest.NewRecorder()
if err := srv.handleIndexStatusUpdate(w, req); err != nil {
t.Fatal(err)
}
if d := cmp.Diff(entry.Branches, wantBranches); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
resp := w.Result()
if resp.StatusCode != http.StatusOK {
t.Errorf("got status %v", resp.StatusCode)
}
if !called {
t.Fatalf("not called")
}
called = true
return nil
})
db := database.NewMockDB()
db.ZoektReposFunc.SetDefaultReturn(zoektReposStore)
t.Run("gRPC", func(t *testing.T) {
logger := logtest.Scoped(t)
srv := &searchIndexerServer{db: db, logger: logger}
wantRepoID := uint32(1234)
wantBranches := []zoekt.RepositoryBranch{{Name: "main", Version: "f00b4r"}}
req := httptest.NewRequest("POST", "/", bytes.NewReader([]byte(body)))
w := httptest.NewRecorder()
called := false
if err := srv.handleIndexStatusUpdate(w, req); err != nil {
t.Fatal(err)
zoektReposStore := database.NewMockZoektReposStore()
zoektReposStore.UpdateIndexStatusesFunc.SetDefaultHook(func(_ context.Context, indexed map[uint32]*zoekt.MinimalRepoListEntry) error {
entry, ok := indexed[wantRepoID]
if !ok {
t.Fatalf("wrong repo ID")
}
if d := cmp.Diff(entry.Branches, wantBranches); d != "" {
t.Fatalf("ids mismatch (-want +got):\n%s", d)
}
called = true
return nil
})
db := database.NewMockDB()
db.ZoektReposFunc.SetDefaultReturn(zoektReposStore)
parameters := indexStatusUpdateArgs{
Repositories: []indexStatusUpdateRepository{
{RepoID: wantRepoID, Branches: wantBranches},
},
}
srv := &searchIndexerGRPCServer{server: &searchIndexerServer{db: db, logger: logger}}
_, err := srv.UpdateIndexStatus(context.Background(), parameters.ToProto())
if err != nil {
t.Fatal(err)
}
if !called {
t.Fatalf("not called")
}
})
}
func TestRepoPathRanks_RoundTrip(t *testing.T) {
var diff string
f := func(original citypes.RepoPathRanks) bool {
converted := repoPathRanksFromProto(repoPathRanksToProto(&original))
if diff = cmp.Diff(&original, converted); diff != "" {
return false
}
return true
}
resp := w.Result()
if resp.StatusCode != http.StatusOK {
t.Errorf("got status %v", resp.StatusCode)
}
if !called {
t.Fatalf("not called")
if err := quick.Check(f, nil); err != nil {
t.Errorf("mismatch (-want +got):\n%s", diff)
}
}

View File

@ -0,0 +1,15 @@
package shared
import (
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/cli"
"github.com/sourcegraph/sourcegraph/internal/debugserver"
)
// GRPCWebUIDebugEndpoints returns debug points that serve the GRPCWebUI instances that target
// this frontend instance.
func GRPCWebUIDebugEndpoints() []debugserver.Endpoint {
addr := cli.GetInternalAddr()
return []debugserver.Endpoint{
debugserver.NewGRPCWebUIEndpoint("frontend-internal", addr),
}
}

View File

@ -20,7 +20,7 @@ func (svc) Name() string { return "frontend" }
func (svc) Configure() (env.Config, []debugserver.Endpoint) {
CLILoadConfig()
return nil, nil
return nil, GRPCWebUIDebugEndpoints()
}
func (svc) Start(ctx context.Context, observationCtx *observation.Context, ready service.ReadyFunc, config env.Config) error {

View File

@ -8,5 +8,5 @@ import (
// this gitserver instance.
func GRPCWebUIDebugEndpoint() debugserver.Endpoint {
addr := getAddr()
return debugserver.NewGRPCWebUIEndpoint(addr)
return debugserver.NewGRPCWebUIEndpoint("gitserver", addr)
}

View File

@ -8,5 +8,5 @@ import (
// this searcher instance.
func GRPCWebUIDebugEndpoint() debugserver.Endpoint {
addr := getAddr()
return debugserver.NewGRPCWebUIEndpoint(addr)
return debugserver.NewGRPCWebUIEndpoint("searcher", addr)
}

View File

@ -7,5 +7,5 @@ import (
// GRPCWebUIDebugEndpoint returns a debug endpoint that serves the GRPCWebUI that targets
// this symbols instance.
func GRPCWebUIDebugEndpoint() debugserver.Endpoint {
return debugserver.NewGRPCWebUIEndpoint(addr)
return debugserver.NewGRPCWebUIEndpoint("symbols", addr)
}

View File

@ -21,7 +21,7 @@ func (svc) Name() string { return "frontend" }
func (svc) Configure() (env.Config, []debugserver.Endpoint) {
frontend_shared.CLILoadConfig()
codeintel.LoadConfig()
return nil, nil
return nil, frontend_shared.GRPCWebUIDebugEndpoints()
}
func (svc) Start(ctx context.Context, observationCtx *observation.Context, ready service.ReadyFunc, config env.Config) error {

3
go.mod
View File

@ -290,10 +290,11 @@ replace (
// that they always use the current version of go-grpc-middleware that they're developing). Until this issue is fixed,
// we'll need to ensure that we explicitly depend on the latest version of go-grpc-middleware (v2.0.0-rc.3) as of this writing.
github.com/grpc-ecosystem/go-grpc-middleware/v2 => github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.3
)
require (
github.com/sourcegraph/zoekt v0.0.0-20230303172250-3b0248719b12
github.com/sourcegraph/zoekt v0.0.0-20230308015635-8df0b5af86f0
github.com/stretchr/objx v0.5.0 // indirect
)

4
go.sum
View File

@ -1551,8 +1551,8 @@ github.com/sourcegraph/scip v0.2.4-0.20221213205653-aa0e511dcfef h1:fWPxLkDObzzK
github.com/sourcegraph/scip v0.2.4-0.20221213205653-aa0e511dcfef/go.mod h1:ymcTuv+6D5OEZB/84TRPQvUpDK7v7zXnWBJl79hb7ns=
github.com/sourcegraph/yaml v1.0.1-0.20200714132230-56936252f152 h1:z/MpntplPaW6QW95pzcAR/72Z5TWDyDnSo0EOcyij9o=
github.com/sourcegraph/yaml v1.0.1-0.20200714132230-56936252f152/go.mod h1:GIjDIg/heH5DOkXY3YJ/wNhfHsQHoXGjl8G8amsYQ1I=
github.com/sourcegraph/zoekt v0.0.0-20230303172250-3b0248719b12 h1:XO7th5aa0YMebq14wz6HC8guKkz9pjvlW1QKIbXchZk=
github.com/sourcegraph/zoekt v0.0.0-20230303172250-3b0248719b12/go.mod h1:pYHdTfl7eBIQOALuugYItJp9gZXDR0Cx7v1Kr7GGwz4=
github.com/sourcegraph/zoekt v0.0.0-20230308015635-8df0b5af86f0 h1:ezmau/P6/AtXjX66yvtd9mZSuHiVvl4mLUcKyFxxT5w=
github.com/sourcegraph/zoekt v0.0.0-20230308015635-8df0b5af86f0/go.mod h1:G3dj/2qn2M/cjDexzuINNETdXFdqIJyAHd42UAGwuqc=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/spf13/afero v0.0.0-20170901052352-ee1bd8ee15a1/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=

View File

@ -5,16 +5,19 @@ import (
"net/http"
"github.com/fullstorydev/grpcui/standalone"
"google.golang.org/grpc"
"github.com/sourcegraph/sourcegraph/internal/grpc/defaults"
"github.com/sourcegraph/sourcegraph/lib/errors"
"google.golang.org/grpc"
)
const gRPCWebUIPath = "/debug/grpcui"
// NewGRPCWebUIEndpoint returns a new Endpoint that serves a gRPC Web UI instance
// that targets the gRPC server specified by target.
func NewGRPCWebUIEndpoint(target string) Endpoint {
//
// serviceName is the name of the gRPC service that will be displayed on the debug page.
func NewGRPCWebUIEndpoint(serviceName, target string) Endpoint {
var handler http.Handler = &grpcHandler{
target: target,
dialOpts: defaults.DialOptions(),
@ -27,7 +30,7 @@ func NewGRPCWebUIEndpoint(target string) Endpoint {
handler = http.StripPrefix(gRPCWebUIPath, handler)
return Endpoint{
Name: "gRPC Web UI",
Name: fmt.Sprintf("gRPC Web UI (%s)", serviceName),
Path: fmt.Sprintf("%s/", gRPCWebUIPath),
// gRPC Web UI serves multiple assets, so we need to forward _all_ requests under this path

View File

@ -6,76 +6,77 @@ import (
"time"
"github.com/mitchellh/hashstructure"
"google.golang.org/protobuf/types/known/timestamppb"
proto "github.com/sourcegraph/zoekt/cmd/zoekt-sourcegraph-indexserver/protos/sourcegraph/zoekt/configuration/v1"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
)
const configFingerprintHeader = "X-Sourcegraph-Config-Fingerprint"
// ParseAndSetConfigFingerprint will set the current config fingerprint in
// w. If r specifies a config fingerprint, we return the minimum time for a
// repository to have changed.
//
// A config fingerprint represents a point in time that indexed search
// configuration was generated. It is an opaque identifier sent to clients to
// allow efficient calculation of what has changed since the last
// request. zoekt-sourcegraph-indexserver reads and sets these headers to
// reduce the amount of work required when it polls.
func ParseAndSetConfigFingerprint(w http.ResponseWriter, r *http.Request, siteConfig *schema.SiteConfiguration) (minLastChanged time.Time, err error) {
// Before we load anything generate a config fingerprint representing the
// point in time just before loading. This is sent to the client via a
// header for use in the next call.
fingerprint, err := newConfigFingerprint(siteConfig)
if err != nil {
return time.Time{}, err
}
w.Header().Set(configFingerprintHeader, fingerprint.Marshal())
// If the user specified a fingerprint to diff against, we can use it to
// reduce the amount of work we do. minLastChanged being zero means we
// check every repository.
old, err := parseConfigFingerprint(r.Header.Get(configFingerprintHeader))
if err != nil {
return time.Time{}, err
}
// Different site config may affect any repository, so we need to load
// them all in.
if !old.SameConfig(fingerprint) {
return time.Time{}, nil
}
// We can just load what has changed since the last config fingerprint.
return old.Since(), nil
}
// configFingerprint represents a point in time that indexed search
// ConfigFingerprint represents a point in time that indexed search
// configuration was generated. It is an opaque identifier sent to clients to
// allow efficient calculation of what has changed since the last request.
type configFingerprint struct {
type ConfigFingerprint struct {
ts time.Time
hash uint64
}
// newConfigFingerprint returns a ConfigFingerprint for the current time and sc.
func newConfigFingerprint(sc *schema.SiteConfiguration) (*configFingerprint, error) {
// NewConfigFingerprint returns a ConfigFingerprint for the current time and sc.
func NewConfigFingerprint(sc *schema.SiteConfiguration) (*ConfigFingerprint, error) {
hash, err := hashstructure.Hash(sc, nil)
if err != nil {
return nil, err
}
return &configFingerprint{
return &ConfigFingerprint{
ts: time.Now(),
hash: hash,
}, nil
}
const configFingerprintHeader = "X-Sourcegraph-Config-Fingerprint"
func (c *ConfigFingerprint) FromHeaders(header http.Header) error {
fingerprint, err := parseConfigFingerprint(header.Get(configFingerprintHeader))
if err != nil {
return err
}
*c = *fingerprint
return nil
}
func (c *ConfigFingerprint) ToHeaders(headers http.Header) {
headers.Set(configFingerprintHeader, c.Marshal())
}
func (c *ConfigFingerprint) FromProto(p *proto.Fingerprint) {
// Note: In comparison to parseConfigFingerprint, protobuf's
// schema evolution through filed addition means that we don't need to
// encode specific version numbers.
ts := p.GetGeneratedAt().AsTime()
identifier := p.GetIdentifier()
*c = ConfigFingerprint{
ts: ts.Truncate(time.Second),
hash: identifier,
}
}
func (c *ConfigFingerprint) ToProto() *proto.Fingerprint {
return &proto.Fingerprint{
Identifier: c.hash,
GeneratedAt: timestamppb.New(c.ts.Truncate(time.Second)),
}
}
// parseConfigFingerprint unmarshals s and returns ConfigFingerprint. This is
// the inverse of Marshal.
func parseConfigFingerprint(s string) (_ *configFingerprint, err error) {
func parseConfigFingerprint(s string) (_ *ConfigFingerprint, err error) {
// We support no cursor.
if len(s) == 0 {
return &configFingerprint{}, nil
return &ConfigFingerprint{}, nil
}
var (
@ -87,7 +88,7 @@ func parseConfigFingerprint(s string) (_ *configFingerprint, err error) {
// ignore different versions
if n >= 1 && version != 1 {
return &configFingerprint{}, nil
return &ConfigFingerprint{}, nil
}
if err != nil {
@ -99,32 +100,59 @@ func parseConfigFingerprint(s string) (_ *configFingerprint, err error) {
return nil, errors.Wrapf(err, "malformed search-config-fingerprint 1: %q", s)
}
return &configFingerprint{
return &ConfigFingerprint{
ts: ts,
hash: hash,
}, nil
}
// Marshal returns an opaque string for c to send to clients.
func (c *configFingerprint) Marshal() string {
func (c *ConfigFingerprint) Marshal() string {
ts := c.ts.UTC().Truncate(time.Second)
return fmt.Sprintf("search-config-fingerprint 1 %s %x", ts.Format(time.RFC3339), c.hash)
}
// Since returns the time to return changes since. Note: It does not return
// ChangesSince compares the two fingerprints and returns a timestamp that the caller
// can use to determine if any repositories have changed since the last request.
func (c *ConfigFingerprint) ChangesSince(other *ConfigFingerprint) time.Time {
if c == nil || other == nil {
// Load all repositories.
return time.Time{}
}
older, newer := c, other
if other.ts.Before(c.ts) {
older, newer = other, c
}
if !older.sameConfig(newer) {
// Different site configuration could have changed the set of
// repositories we need to index. Load everything.
return time.Time{}
}
// Otherwise, only load repositories that have changed since the older
// fingerprint.
return older.paddedTimestamp()
}
// since returns the time to return changes since. Note: It does not return
// the exact time the fingerprint was generated, but instead some time in the
// past to allow for time skew and races.
func (c *configFingerprint) Since() time.Time {
func (c *ConfigFingerprint) paddedTimestamp() time.Time {
if c.ts.IsZero() {
return c.ts
}
// 90s is the same value recommended by the TOTP spec.
return c.ts.Add(-90 * time.Second)
}
// SameConfig returns true if c2 was generated with the same site
// sameConfig returns true if c2 was generated with the same site
// configuration.
func (c *configFingerprint) SameConfig(c2 *configFingerprint) bool {
func (c *ConfigFingerprint) sameConfig(c2 *ConfigFingerprint) bool {
// ts being zero indicates a missing cursor or non-fatal unmarshalling of
// the cursor.
if c.ts.IsZero() || c2.ts.IsZero() {

View File

@ -1,70 +1,113 @@
package backend
import (
"math/rand"
"net/http"
"net/http/httptest"
"reflect"
"testing"
"testing/quick"
"time"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestParseAndSetConfigFingerprint(t *testing.T) {
mk := func(sc *schema.SiteConfiguration) *configFingerprint {
func TestConfigFingerprintChangesSince(t *testing.T) {
mk := func(sc *schema.SiteConfiguration, timestamp time.Time) *ConfigFingerprint {
t.Helper()
fingerprint, err := newConfigFingerprint(sc)
fingerprint, err := NewConfigFingerprint(sc)
if err != nil {
t.Fatal(err)
}
fingerprint.ts = timestamp
return fingerprint
}
parseAndSet := func(fingerprint string, sc *schema.SiteConfiguration) time.Time {
t.Helper()
r := httptest.NewRequest("GET", "/", nil)
r.Header.Set("X-Sourcegraph-Config-Fingerprint", fingerprint)
w := httptest.NewRecorder()
minLastChanged, err := ParseAndSetConfigFingerprint(w, r, sc)
if err != nil {
t.Fatal(err)
}
zero := time.Time{}
now := time.Now()
oneDayAhead := now.Add(24 * time.Hour)
got, err := parseConfigFingerprint(w.Result().Header.Get("X-Sourcegraph-Config-Fingerprint"))
if err != nil {
t.Fatal(err)
}
want := mk(sc)
if !got.SameConfig(want) {
t.Fatal("expected same config in response fingerprint")
}
return minLastChanged
}
sc1 := &schema.SiteConfiguration{
fingerprintV1 := mk(&schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
SearchIndexBranches: map[string][]string{
"foo": {"dev"},
},
},
}
sc2 := &schema.SiteConfiguration{
}, now)
fingerprintV2 := mk(&schema.SiteConfiguration{
ExperimentalFeatures: &schema.ExperimentalFeatures{
SearchIndexBranches: map[string][]string{
"foo": {"dev", "qa"},
},
},
}
}, oneDayAhead)
if got := parseAndSet("", sc1); !got.IsZero() {
t.Fatal("expect no min last changed for missing fingerprint")
}
for _, tc := range []struct {
name string
fingerPrintA *ConfigFingerprint
fingerPrintB *ConfigFingerprint
if got := parseAndSet(mk(sc1).Marshal(), sc2); !got.IsZero() {
t.Fatal("expect no min last changed for different site config")
}
timeLowerBound time.Time
timeUpperBound time.Time
}{
{
name: "missing fingerprint A",
fingerPrintA: nil,
fingerPrintB: fingerprintV1,
if got := parseAndSet(mk(sc1).Marshal(), sc1); got.IsZero() {
t.Fatal("expect min last changed for same site config")
timeLowerBound: zero,
timeUpperBound: zero,
},
{
name: "missing fingerprint B",
fingerPrintA: nil,
fingerPrintB: fingerprintV1,
timeLowerBound: zero,
timeUpperBound: zero,
},
{
name: "same fingerprint",
fingerPrintA: fingerprintV1,
fingerPrintB: fingerprintV1,
timeLowerBound: fingerprintV1.ts.Add(-3 * time.Minute),
timeUpperBound: fingerprintV1.ts.Add(3 * time.Minute),
},
{
name: "different fingerprint",
fingerPrintA: fingerprintV1,
fingerPrintB: fingerprintV2,
timeLowerBound: zero,
timeUpperBound: zero,
},
} {
t.Run(tc.name, func(t *testing.T) {
got := tc.fingerPrintA.ChangesSince(tc.fingerPrintB)
if tc.timeLowerBound.Equal(got) {
return
}
if got.After(tc.timeLowerBound) && got.Before(tc.timeUpperBound) {
return
}
t.Errorf("got %s, not in range [%s, %s)",
got.Format(time.RFC3339),
tc.timeLowerBound.Format(time.RFC3339),
tc.timeUpperBound.Format(time.RFC3339),
)
})
}
}
@ -85,9 +128,9 @@ func TestConfigFingerprint(t *testing.T) {
}
var seq time.Duration
mk := func(sc *schema.SiteConfiguration) *configFingerprint {
mk := func(sc *schema.SiteConfiguration) *ConfigFingerprint {
t.Helper()
cf, err := newConfigFingerprint(sc)
cf, err := NewConfigFingerprint(sc)
if err != nil {
t.Fatal(err)
}
@ -105,14 +148,90 @@ func TestConfigFingerprint(t *testing.T) {
cfB := mk(sc1)
cfC := mk(sc2)
if !cfA.SameConfig(cfB) {
if !cfA.sameConfig(cfB) {
t.Fatal("expected same config for A and B")
}
if cfA.SameConfig(cfC) {
if cfA.sameConfig(cfC) {
t.Fatal("expected different config for A and C")
}
}
func TestSiteConfigFingerprint_RoundTrip(t *testing.T) {
type roundTripper func(t *testing.T, original *ConfigFingerprint) (converted *ConfigFingerprint)
for _, test := range []struct {
transportName string
roundTripper roundTripper
}{
{
transportName: "gRPC",
roundTripper: func(_ *testing.T, original *ConfigFingerprint) *ConfigFingerprint {
converted := &ConfigFingerprint{}
converted.FromProto(original.ToProto())
return converted
},
},
{
transportName: "HTTP headers",
roundTripper: func(t *testing.T, original *ConfigFingerprint) *ConfigFingerprint {
echoHandler := func(w http.ResponseWriter, r *http.Request) {
// echo back the fingerprint in the response
var fingerprint ConfigFingerprint
err := fingerprint.FromHeaders(r.Header)
if err != nil {
t.Fatalf("error converting from request in echoHandler: %s", err)
}
fingerprint.ToHeaders(w.Header())
}
w := httptest.NewRecorder()
r := httptest.NewRequest("GET", "/", nil)
original.ToHeaders(r.Header)
echoHandler(w, r)
var converted ConfigFingerprint
err := converted.FromHeaders(w.Result().Header)
if err != nil {
t.Fatalf("error converting from response outside echoHandler: %s", err)
}
return &converted
},
},
} {
t.Run(test.transportName, func(t *testing.T) {
var diff string
f := func(ts fuzzTime, hash uint64) bool {
original := ConfigFingerprint{
ts: time.Time(ts),
hash: hash,
}
converted := test.roundTripper(t, &original)
if diff = cmp.Diff(original.hash, converted.hash); diff != "" {
diff = "hash: " + diff
return false
}
if diff = cmp.Diff(original.ts, converted.ts, cmpopts.EquateApproxTime(time.Second)); diff != "" {
diff = "ts: " + diff
return false
}
return true
}
if err := quick.Check(f, nil); err != nil {
t.Errorf("fingerprint diff (-want +got):\n%s", diff)
}
})
}
}
func TestConfigFingerprint_Marshal(t *testing.T) {
// Use a fixed time for this test case
now, err := time.Parse(time.RFC3339, "2006-01-02T15:04:05Z")
@ -120,7 +239,7 @@ func TestConfigFingerprint_Marshal(t *testing.T) {
t.Fatal(err)
}
cf := configFingerprint{
cf := ConfigFingerprint{
ts: now,
hash: 123,
}
@ -180,7 +299,7 @@ func TestConfigFingerprint_parse(t *testing.T) {
}
}
func testMarshal(t *testing.T, cf *configFingerprint) {
func testMarshal(t *testing.T, cf *ConfigFingerprint) {
t.Helper()
v := cf.Marshal()
@ -191,15 +310,27 @@ func testMarshal(t *testing.T, cf *configFingerprint) {
t.Fatal(err)
}
if !cf.SameConfig(got) {
if !cf.sameConfig(got) {
t.Error("expected same config")
}
since := got.Since()
since := got.paddedTimestamp()
if since.After(cf.ts) {
t.Error("since should not be after ts")
t.Error("since should not be after Timestamp")
}
if since.Before(cf.ts.Add(-time.Hour)) {
t.Error("since should not be before ts - hour")
t.Error("since should not be before Timestamp - hour")
}
}
type fuzzTime time.Time
func (fuzzTime) Generate(rand *rand.Rand, _ int) reflect.Value {
// The maximum representable year in RFC 3339 is 9999, so we'll use that as our upper bound.
maxDate := time.Date(9999, 1, 1, 0, 0, 0, 0, time.UTC)
ts := time.Unix(rand.Int63n(maxDate.Unix()), rand.Int63n(int64(time.Second)))
return reflect.ValueOf(fuzzTime(ts))
}
var _ quick.Generator = fuzzTime{}

View File

@ -1,24 +1,23 @@
package backend
import (
"bytes"
"encoding/json"
"github.com/grafana/regexp"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/zoekt"
"golang.org/x/exp/slices"
proto "github.com/sourcegraph/zoekt/cmd/zoekt-sourcegraph-indexserver/protos/sourcegraph/zoekt/configuration/v1"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/schema"
)
// zoektIndexOptions are options which change what we index for a
// ZoektIndexOptions are options which change what we index for a
// repository. Everytime a repository is indexed by zoekt this structure is
// fetched. See getIndexOptions in the zoekt codebase.
//
// We only specify a subset of the fields.
type zoektIndexOptions struct {
// We only specify a subset of the fields from zoekt.IndexOptions.
type ZoektIndexOptions struct {
// Name is the Repository Name.
Name string
@ -58,6 +57,53 @@ type zoektIndexOptions struct {
Error string `json:",omitempty"`
}
func (o *ZoektIndexOptions) FromProto(p *proto.ZoektIndexOptions) {
o.Name = p.GetName()
o.RepoID = api.RepoID(p.GetRepoId())
o.Public = p.GetPublic()
o.Fork = p.GetFork()
o.Archived = p.GetArchived()
o.LargeFiles = p.GetLargeFiles()
o.Symbols = p.GetSymbols()
o.Priority = p.GetPriority()
o.DocumentRanksVersion = p.GetDocumentRanksVersion()
o.Error = p.GetError()
branches := make([]zoekt.RepositoryBranch, 0, len(p.GetBranches()))
for _, b := range p.GetBranches() {
branches = append(branches, zoekt.RepositoryBranch{
Name: b.GetName(),
Version: b.GetVersion(),
})
}
o.Branches = branches
}
func (o *ZoektIndexOptions) ToProto() *proto.ZoektIndexOptions {
branches := make([]*proto.ZoektRepositoryBranch, 0, len(o.Branches))
for _, b := range o.Branches {
branches = append(branches, &proto.ZoektRepositoryBranch{
Name: b.Name,
Version: b.Version,
})
}
return &proto.ZoektIndexOptions{
Name: o.Name,
RepoId: int32(o.RepoID),
Public: o.Public,
Fork: o.Fork,
Archived: o.Archived,
LargeFiles: o.LargeFiles,
Symbols: o.Symbols,
Branches: branches,
Priority: o.Priority,
DocumentRanksVersion: o.DocumentRanksVersion,
Error: o.Error,
}
}
// RepoIndexOptions are the options used by GetIndexOptions for a specific
// repository.
type RepoIndexOptions struct {
@ -100,12 +146,12 @@ func GetIndexOptions(
getRepoIndexOptions getRepoIndexOptsFn,
getSearchContextRevisions func(repoID api.RepoID) ([]string, error),
repos ...api.RepoID,
) []byte {
) []ZoektIndexOptions {
// Limit concurrency to 32 to avoid too many active network requests and
// strain on gitserver (as ported from zoekt-sourcegraph-indexserver). In
// future we want a more intelligent global limit based on scale.
// the future we want a more intelligent global limit based on scale.
sema := make(chan struct{}, 32)
results := make([][]byte, len(repos))
results := make([]ZoektIndexOptions, len(repos))
getSiteConfigRevisions := siteConfigRevisionsRuleFunc(c)
for i := range repos {
@ -121,7 +167,7 @@ func GetIndexOptions(
sema <- struct{}{}
}
return bytes.Join(results, []byte{'\n'})
return results
}
func getIndexOptions(
@ -130,13 +176,16 @@ func getIndexOptions(
getRepoIndexOptions func(repoID api.RepoID) (*RepoIndexOptions, error),
getSearchContextRevisions func(repoID api.RepoID) ([]string, error),
getSiteConfigRevisions revsRuleFunc,
) []byte {
) ZoektIndexOptions {
opts, err := getRepoIndexOptions(repoID)
if err != nil {
return marshal(&zoektIndexOptions{Error: err.Error()})
return ZoektIndexOptions{
RepoID: repoID,
Error: err.Error(),
}
}
o := &zoektIndexOptions{
o := ZoektIndexOptions{
Name: opts.Name,
RepoID: opts.RepoID,
Public: opts.Public,
@ -162,7 +211,10 @@ func getIndexOptions(
// Add all branches that are referenced by search contexts
revs, err := getSearchContextRevisions(opts.RepoID)
if err != nil {
return marshal(&zoektIndexOptions{Error: err.Error()})
return ZoektIndexOptions{
RepoID: opts.RepoID,
Error: err.Error(),
}
}
for _, rev := range revs {
branches[rev] = struct{}{}
@ -175,7 +227,10 @@ func getIndexOptions(
for branch := range branches {
v, err := opts.GetVersion(branch)
if err != nil {
return marshal(&zoektIndexOptions{Error: err.Error()})
return ZoektIndexOptions{
RepoID: opts.RepoID,
Error: err.Error(),
}
}
// If we failed to resolve a branch, skip it
@ -208,7 +263,7 @@ func getIndexOptions(
o.Branches = o.Branches[:64]
}
return marshal(o)
return o
}
type revsRuleFunc func(*RepoIndexOptions) (revs []string)
@ -259,8 +314,3 @@ func getBoolPtr(b *bool, default_ bool) bool {
}
return *b
}
func marshal(o *zoektIndexOptions) []byte {
b, _ := json.Marshal(o)
return b
}

View File

@ -1,10 +1,9 @@
package backend
import (
"bytes"
"encoding/json"
"fmt"
"testing"
"testing/quick"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/zoekt"
@ -14,6 +13,24 @@ import (
"github.com/sourcegraph/sourcegraph/schema"
)
func TestZoektIndexOptions_RoundTrip(t *testing.T) {
var diff string
f := func(original ZoektIndexOptions) bool {
var converted ZoektIndexOptions
converted.FromProto(original.ToProto())
if diff = cmp.Diff(original, converted); diff != "" {
return false
}
return true
}
if err := quick.Check(f, nil); err != nil {
t.Errorf("ZoektIndexOptions diff (-want +got):\n%s", diff)
}
}
func TestGetIndexOptions(t *testing.T) {
const (
REPO = api.RepoID(iota + 1)
@ -47,14 +64,14 @@ func TestGetIndexOptions(t *testing.T) {
conf schema.SiteConfiguration
searchContextRevs []string
repo api.RepoID
want zoektIndexOptions
want ZoektIndexOptions
}
cases := []caseT{{
name: "default",
conf: schema.SiteConfiguration{},
repo: REPO,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Symbols: true,
@ -66,7 +83,7 @@ func TestGetIndexOptions(t *testing.T) {
name: "public",
conf: schema.SiteConfiguration{},
repo: PUBLIC,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 5,
Name: "repo-05",
Public: true,
@ -79,7 +96,7 @@ func TestGetIndexOptions(t *testing.T) {
name: "fork",
conf: schema.SiteConfiguration{},
repo: FORK,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 6,
Name: "repo-06",
Fork: true,
@ -92,7 +109,7 @@ func TestGetIndexOptions(t *testing.T) {
name: "archived",
conf: schema.SiteConfiguration{},
repo: ARCHIVED,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 7,
Name: "repo-07",
Archived: true,
@ -107,7 +124,7 @@ func TestGetIndexOptions(t *testing.T) {
SearchIndexSymbolsEnabled: boolPtr(false),
},
repo: REPO,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Branches: []zoekt.RepositoryBranch{
@ -120,7 +137,7 @@ func TestGetIndexOptions(t *testing.T) {
SearchLargeFiles: []string{"**/*.jar", "*.bin", "!**/excluded.zip", "\\!included.zip"},
},
repo: REPO,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Symbols: true,
@ -133,7 +150,7 @@ func TestGetIndexOptions(t *testing.T) {
name: "conf index branches",
conf: withBranches(schema.SiteConfiguration{}, REPO, "a", "", "b"),
repo: REPO,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Symbols: true,
@ -151,7 +168,7 @@ func TestGetIndexOptions(t *testing.T) {
},
}},
repo: REPO,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Symbols: true,
@ -171,7 +188,7 @@ func TestGetIndexOptions(t *testing.T) {
},
}},
repo: REPO,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Symbols: true,
@ -187,7 +204,7 @@ func TestGetIndexOptions(t *testing.T) {
conf: schema.SiteConfiguration{},
repo: REPO,
searchContextRevs: []string{"rev1", "rev2"},
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Symbols: true,
@ -201,7 +218,7 @@ func TestGetIndexOptions(t *testing.T) {
name: "with a priority value",
conf: schema.SiteConfiguration{},
repo: PRIORITY,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 4,
Name: "repo-04",
Symbols: true,
@ -214,7 +231,7 @@ func TestGetIndexOptions(t *testing.T) {
name: "with rank",
conf: schema.SiteConfiguration{},
repo: RANKED,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 8,
Name: "repo-08",
Symbols: true,
@ -242,7 +259,7 @@ func TestGetIndexOptions(t *testing.T) {
name: "limit branches",
conf: withBranches(schema.SiteConfiguration{}, REPO, branches...),
repo: REPO,
want: zoektIndexOptions{
want: ZoektIndexOptions{
RepoID: 1,
Name: "repo-01",
Symbols: true,
@ -279,14 +296,10 @@ func TestGetIndexOptions(t *testing.T) {
t.Run(tc.name, func(t *testing.T) {
getSearchContextRevisions := func(api.RepoID) ([]string, error) { return tc.searchContextRevs, nil }
b := GetIndexOptions(&tc.conf, getRepoIndexOptions, getSearchContextRevisions, tc.repo)
got := GetIndexOptions(&tc.conf, getRepoIndexOptions, getSearchContextRevisions, tc.repo)
var got zoektIndexOptions
if err := json.Unmarshal(b, &got); err != nil {
t.Fatal(err)
}
if diff := cmp.Diff(tc.want, got); diff != "" {
want := []ZoektIndexOptions{tc.want}
if diff := cmp.Diff(want, got); diff != "" {
t.Fatal("mismatch (-want, +got):\n", diff)
}
})
@ -351,15 +364,14 @@ func TestGetIndexOptions_getVersion(t *testing.T) {
}, nil
}
b := GetIndexOptions(&conf, getRepoIndexOptions, getSearchContextRevs, 1)
var got zoektIndexOptions
if err := json.Unmarshal(b, &got); err != nil {
t.Fatal(err)
resp := GetIndexOptions(&conf, getRepoIndexOptions, getSearchContextRevs, 1)
if len(resp) != 1 {
t.Fatalf("expected 1 index options returned, got %d", len(resp))
}
got := resp[0]
if got.Error != tc.wantErr {
t.Fatalf("expected error %v, got body %s and error %v", tc.wantErr, b, got.Error)
t.Fatalf("expected error %v, got index options %+v and error %v", tc.wantErr, got, got.Error)
}
if tc.wantErr != "" {
return
@ -378,14 +390,14 @@ func TestGetIndexOptions_batch(t *testing.T) {
}
var (
repos []api.RepoID
want []zoektIndexOptions
want []ZoektIndexOptions
)
for repo := api.RepoID(1); repo < 100; repo++ {
repos = append(repos, repo)
if isError(repo) {
want = append(want, zoektIndexOptions{Error: "error"})
want = append(want, ZoektIndexOptions{Error: "error"})
} else {
want = append(want, zoektIndexOptions{
want = append(want, ZoektIndexOptions{
Symbols: true,
Branches: []zoekt.RepositoryBranch{
{Name: "HEAD", Version: fmt.Sprintf("!HEAD-%d", repo)},
@ -406,14 +418,8 @@ func TestGetIndexOptions_batch(t *testing.T) {
getSearchContextRevs := func(api.RepoID) ([]string, error) { return nil, nil }
b := GetIndexOptions(&schema.SiteConfiguration{}, getRepoIndexOptions, getSearchContextRevs, repos...)
dec := json.NewDecoder(bytes.NewReader(b))
got := make([]zoektIndexOptions, len(repos))
for i := range repos {
if err := dec.Decode(&got[i]); err != nil {
t.Fatal(err)
}
}
got := GetIndexOptions(&schema.SiteConfiguration{}, getRepoIndexOptions, getSearchContextRevs, repos...)
if diff := cmp.Diff(want, got); diff != "" {
t.Fatal("mismatch (-want, +got):\n", diff)
}