Rewrite precise-code-intel-api-server in Go (#9703)

This commit is contained in:
Eric Fritz 2020-04-24 08:22:14 -05:00 committed by GitHub
parent bde46f4ae8
commit d49aee8694
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
92 changed files with 9932 additions and 4 deletions

1
.github/CODEOWNERS vendored
View File

@ -224,6 +224,7 @@ Dockerfile @sourcegraph/distribution
# Precise code intel
/cmd/precise-code-intel/ @sourcegraph/code-intel
/cmd/precise-code-intel-api-server/ @sourcegraph/code-intel
/cmd/precise-code-intel-bundle-manager/ @sourcegraph/code-intel
/internal/lsif @sourcegraph/code-intel
/internal/codeintel @sourcegraph/code-intel

View File

@ -0,0 +1,20 @@
FROM sourcegraph/alpine:3.10@sha256:4d05cd5669726fc38823e92320659a6d1ef7879e62268adec5df658a0bacf65c
ARG COMMIT_SHA="unknown"
ARG DATE="unknown"
ARG VERSION="unknown"
LABEL org.opencontainers.image.revision=${COMMIT_SHA}
LABEL org.opencontainers.image.created=${DATE}
LABEL org.opencontainers.image.version=${VERSION}
LABEL com.sourcegraph.github.url=https://github.com/sourcegraph/sourcegraph/commit/${COMMIT_SHA}
# hadolint ignore=DL3018
RUN apk update && apk add --no-cache \
tini
COPY ./precise-code-intel-api-server /usr/local/bin/precise-code-intel-api-server
EXPOSE 3186
ENV GO111MODULES=on LANG=en_US.utf8 LOG_LEVEL=debug
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/precise-code-intel-api-server"]

View File

@ -0,0 +1,28 @@
#!/usr/bin/env bash
# This script builds the precise-code-intel-api-server docker image.
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
set -eu
OUTPUT=$(mktemp -d -t sgdockerbuild_XXXXXXX)
cleanup() {
rm -rf "$OUTPUT"
}
trap cleanup EXIT
# Environment for building linux binaries
export GO111MODULE=on
export GOARCH=amd64
export GOOS=linux
echo "--- go build"
pkg="github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server"
go build -trimpath -ldflags "-X github.com/sourcegraph/sourcegraph/internal/version.version=$VERSION" -buildmode exe -tags dist -o "$OUTPUT/$(basename $pkg)" "$pkg"
echo "--- docker build"
docker build -f cmd/precise-code-intel-api-server/Dockerfile -t "$IMAGE" "$OUTPUT" \
--progress=plain \
--build-arg COMMIT_SHA \
--build-arg DATE \
--build-arg VERSION

View File

@ -0,0 +1,32 @@
package main
import (
"log"
"time"
"github.com/sourcegraph/sourcegraph/internal/env"
)
var (
rawBundleManagerURL = env.Get("PRECISE_CODE_INTEL_BUNDLE_MANAGER_URL", "", "HTTP address for internal LSIF bundle manager server.")
rawJanitorInterval = env.Get("PRECISE_CODE_INTEL_JANITOR_INTERVAL", "1m", "Interval between cleanup runs.")
)
// mustGet returns the non-empty version of the given raw value fatally logs on failure.
func mustGet(rawValue, name string) string {
if rawValue == "" {
log.Fatalf("invalid value %q for %s: no value supplied", rawValue, name)
}
return rawValue
}
// mustParseInterval returns the interval version of the given raw value fatally logs on failure.
func mustParseInterval(rawValue, name string) time.Duration {
d, err := time.ParseDuration(rawValue)
if err != nil {
log.Fatalf("invalid duration %q for %s: %s", rawValue, name, err)
}
return d
}

View File

@ -0,0 +1,44 @@
package api
import (
"context"
"errors"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
// CodeIntelAPI is the main interface into precise code intelligence data.
type CodeIntelAPI interface {
// FindClosestDumps returns the set of dumps that can most accurately answer code intelligence
// queries for the given file. These dump IDs should be subsequently passed to invocations of
// Definitions, References, and Hover.
FindClosestDumps(ctx context.Context, repositoryID int, commit, file string) ([]db.Dump, error)
// Definitions returns the list of source locations that define the symbol at the given position.
// This may include remote definitions if the remote repository is also indexed.
Definitions(ctx context.Context, file string, line, character, uploadID int) ([]ResolvedLocation, error)
// References returns the list of source locations that reference the symbol at the given position.
// This may include references from other dumps and repositories.
References(ctx context.Context, repositoryID int, commit string, limit int, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error)
// Hover returns the hover text and range for the symbol at the given position.
Hover(ctx context.Context, file string, line, character, uploadID int) (string, bundles.Range, bool, error)
}
type codeIntelAPI struct {
db db.DB
bundleManagerClient bundles.BundleManagerClient
}
var _ CodeIntelAPI = &codeIntelAPI{}
var ErrMissingDump = errors.New("no dump")
func New(db db.DB, bundleManagerClient bundles.BundleManagerClient) CodeIntelAPI {
return &codeIntelAPI{
db: db,
bundleManagerClient: bundleManagerClient,
}
}

View File

@ -0,0 +1,141 @@
package api
import (
"bytes"
"compress/gzip"
"encoding/json"
"io/ioutil"
"math"
"unicode"
"unicode/utf16"
)
// decodeAndTestFilter decodes the filter and determines if identifier is a member of the underlying
// set. Returns an error if the encoded filter is malformed (improperly compressed or invalid JSON).
func decodeAndTestFilter(encodedFilter []byte, identifier string) (bool, error) {
payload := struct {
Buckets []int `json:"buckets"`
NumHashFunctions int32 `json:"numHashFunctions"`
}{}
r, err := gzip.NewReader(bytes.NewReader(encodedFilter))
if err != nil {
return false, err
}
f, err := ioutil.ReadAll(r)
if err != nil {
return false, err
}
if err := json.Unmarshal(f, &payload); err != nil {
return false, err
}
//
// TODO - document bloom filter behaviors
//
locations := hashLocations(
identifier,
int32(math.Ceil(float64(len(payload.Buckets)))*32),
payload.NumHashFunctions,
)
for _, b := range locations {
if (payload.Buckets[int(math.Floor(float64(b)/32))] & (1 << (b % 32))) == 0 {
return false, nil
}
}
return true, nil
}
// The following code is a port of bloomfilter 0.0.18 from npm. We chose not to recreate all the bloom
// filters stored in Postgres because we want a transitionary period where both services (TS and Go) can
// exist and be behaviorally equivalent.
//
// There are not a large number of differences, but there are some subtle ones around overflow behavior
// and UTF-8/16 encoding. The accompanying test suite uses filters generated by the original TypeScript
// code to ensure that they can be read without a migration step. We may want to run a migration step to
// simplify this dependency, but it is in no way urgent.
//
// The original code available at https://github.com/jasondavies/bloomfilter.js.
// Original notes:
// See http://willwhim.wpengine.com/2011/09/03/producing-n-hash-functions-by-hashing-only-once/.
func hashLocations(v string, m, k int32) []int32 {
a := fowlerNollVo1a(v, 0)
b := fowlerNollVo1a(v, 1576284489) // The seed value is chosen randomly
x := a % int32(m)
r := make([]int32, k)
for i := int32(0); i < k; i++ {
if x < 0 {
r[i] = x + int32(m)
} else {
r[i] = x
}
x = (x + b) % int32(m)
}
return r
}
// Original notes:
// Fowler/Noll/Vo hashing. This function optionally takes a seed value that is incorporated
// into the offset basis. Almost any choice of offset basis will serve so long as it is non-zero,
// according to http://www.isthe.com/chongo/tech/comp/fnv/index.html.
func fowlerNollVo1a(v string, seed int32) int32 {
q := 2166136261
a := int64(int32(q) ^ seed)
for _, r := range utf16Runes(v) {
c := int64(r)
if d := c & 0xff00; d != 0 {
a = (fowlerNollVoMultiply(int32(a ^ int64(d>>8))))
}
a = fowlerNollVoMultiply(int32(a) ^ int32(c&0xff))
}
return fowlerNollVoMix(int32(a))
}
// Original notes:
// Equivalent to `a * 16777619 mod 2**32`.
func fowlerNollVoMultiply(a int32) int64 {
return (int64(a) + int64(a<<1) + int64(a<<4) + int64(a<<7) + int64(a<<8) + int64(a<<24))
}
// Original notes:
// See https://web.archive.org/web/20131019013225/http://home.comcast.net/~bretm/hash/6.html.
func fowlerNollVoMix(a int32) int32 {
a += a << 13
a ^= int32(uint32(a) >> 7)
a += a << 3
a ^= int32(uint32(a) >> 17)
a += a << 5
return a
}
// utf16Runes converts the given string into a slice of UTF-16 encoded runes. This works by
// determining if each rune is a UTF-16 surrogate pair. If it is, we replace the rune with
// both runes composing the surrogate pair. Otherwise, we leave the original rune alone.
//
// This is a necessary step as existing filters were created in TypeScript, which treated
// strings as encoded in UTF-16, not UTF-8. We need to do this translation for runes that
// fall outside of the basic multilingual plane, or we wont be able to retrieve the original
// identifiers.
func utf16Runes(v string) []rune {
var runes []rune
for _, r := range v {
// If the pair is not surrogate, U+FFFD is returned for both runes
if a, b := utf16.EncodeRune(r); a == unicode.ReplacementChar && b == unicode.ReplacementChar {
runes = append(runes, r)
} else {
runes = append(runes, a, b)
}
}
return runes
}

View File

@ -0,0 +1,54 @@
package api
import (
"fmt"
"testing"
)
func TestTestTypeScriptGeneratedBloomFilters(t *testing.T) {
testCases := []struct {
filterFile string
includeFile string
excludeFile string
}{
{filterFile: "64kb-16", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "64kb-08", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "64kb-24", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "64kb-32", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "32kb-16", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "32kb-08", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "32kb-24", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "32kb-32", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "96kb-16", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "96kb-08", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "96kb-24", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "96kb-32", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "128kb-16", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "128kb-08", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "128kb-24", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "128kb-32", includeFile: "lorem-ipsum", excludeFile: "corporate-ipsum"},
{filterFile: "emojis", includeFile: "emojis", excludeFile: "lorem-ipsum"},
}
for _, testCase := range testCases {
name := fmt.Sprintf("filter=%s", testCase.filterFile)
t.Run(name, func(t *testing.T) {
for _, v := range readTestWords(t, testCase.includeFile) {
if exists, err := decodeAndTestFilter(readTestFilter(t, "stress", testCase.filterFile), v); err != nil {
t.Fatalf("unexpected error decoding filter: %s", err)
} else if !exists {
t.Errorf("expected %s to be in bloom filter", v)
}
}
for _, v := range readTestWords(t, testCase.excludeFile) {
if exists, err := decodeAndTestFilter(readTestFilter(t, "stress", testCase.filterFile), v); err != nil {
t.Fatalf("unexpected error decoding filter: %s", err)
} else if exists {
t.Errorf("expected %s not to be in bloom filter", v)
}
}
})
}
}

View File

@ -0,0 +1,93 @@
package api
import (
"context"
"encoding/base64"
"encoding/json"
"strings"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
// Cursor holds the complete state necessary to page through a reference result set.
type Cursor struct {
Phase string // common
DumpID int // common
Path string // same-dump/definition-monikers
Line int // same-dump
Character int // same-dump
Monikers []bundles.MonikerData // same-dump/definition-monikers
SkipResults int // same-dump/definition-monikers
Identifier string // same-repo/remote-repo
Scheme string // same-repo/remote-repo
Name string // same-repo/remote-repo
Version string // same-repo/remote-repo
DumpIDs []int // same-repo/remote-repo
TotalDumpsWhenBatching int // same-repo/remote-repo
SkipDumpsWhenBatching int // same-repo/remote-repo
SkipDumpsInBatch int // same-repo/remote-repo
SkipResultsInDump int // same-repo/remote-repo
}
// EncodeCursor returns an encoding of the given cursor suitable for a URL.
func EncodeCursor(cursor Cursor) string {
rawEncoded, _ := json.Marshal(cursor)
return base64.RawURLEncoding.EncodeToString(rawEncoded)
}
// decodeCursor is the inverse of EncodeCursor.
func decodeCursor(rawEncoded string) (Cursor, error) {
raw, err := base64.RawURLEncoding.DecodeString(rawEncoded)
if err != nil {
return Cursor{}, err
}
var cursor Cursor
err = json.Unmarshal([]byte(raw), &cursor)
return cursor, err
}
// DecodeOrCreateCursor decodes and returns the raw cursor, or creates a new initial page cursor
// if a raw cursor is not supplied.
func DecodeOrCreateCursor(path string, line, character, uploadID int, rawCursor string, db db.DB, bundleManagerClient bundles.BundleManagerClient) (Cursor, error) {
if rawCursor != "" {
cursor, err := decodeCursor(rawCursor)
if err != nil {
return Cursor{}, err
}
return cursor, nil
}
dump, exists, err := db.GetDumpByID(context.Background(), uploadID)
if err != nil {
return Cursor{}, err
}
if !exists {
return Cursor{}, ErrMissingDump
}
pathInBundle := strings.TrimPrefix(path, dump.Root)
bundleClient := bundleManagerClient.BundleClient(dump.ID)
rangeMonikers, err := bundleClient.MonikersByPosition(context.Background(), pathInBundle, line, character)
if err != nil {
return Cursor{}, err
}
var flattened []bundles.MonikerData
for _, monikers := range rangeMonikers {
flattened = append(flattened, monikers...)
}
return Cursor{
Phase: "same-dump",
DumpID: dump.ID,
Path: pathInBundle,
Line: line,
Character: character,
Monikers: flattened,
SkipResults: 0,
}, nil
}

View File

@ -0,0 +1,113 @@
package api
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/mocks"
)
func TestSerializationRoundTrip(t *testing.T) {
c := Cursor{
Phase: "same-repo",
DumpID: 42,
Path: "/foo/bar/baz.go",
Line: 10,
Character: 50,
Monikers: []bundles.MonikerData{
{Kind: "k1", Scheme: "s1", Identifier: "i1", PackageInformationID: "pid1"},
{Kind: "k2", Scheme: "s2", Identifier: "i2", PackageInformationID: "pid2"},
{Kind: "k3", Scheme: "s3", Identifier: "i3", PackageInformationID: "pid3"},
},
SkipResults: 1,
Identifier: "x",
Scheme: "gomod",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{1, 2, 3, 4, 5},
TotalDumpsWhenBatching: 5,
SkipDumpsWhenBatching: 4,
SkipDumpsInBatch: 3,
SkipResultsInDump: 2,
}
roundtripped, err := decodeCursor(EncodeCursor(c))
if err != nil {
t.Fatalf("unexpected error decoding cursor: %s", err)
}
if diff := cmp.Diff(c, roundtripped); diff != "" {
t.Errorf("unexpected cursor (-want +got):\n%s", diff)
}
}
func TestDecodeOrCreateCursor(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient})
setMockBundleClientMonikersByPosition(t, mockBundleClient, "main.go", 10, 20, [][]bundles.MonikerData{{testMoniker1}, {testMoniker2}})
expectedCursor := Cursor{
Phase: "same-dump",
DumpID: 42,
Path: "main.go",
Line: 10,
Character: 20,
Monikers: []bundles.MonikerData{testMoniker1, testMoniker2},
}
if cursor, err := DecodeOrCreateCursor("sub1/main.go", 10, 20, 42, "", mockDB, mockBundleManagerClient); err != nil {
t.Fatalf("unexpected error decoding cursor: %s", err)
} else if diff := cmp.Diff(expectedCursor, cursor); diff != "" {
t.Errorf("unexpected cursor (-want +got):\n%s", diff)
}
}
func TestDecodeOrCreateCursorUnknownDump(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
setMockDBGetDumpByID(t, mockDB, nil)
if _, err := DecodeOrCreateCursor("sub1/main.go", 10, 20, 42, "", mockDB, mockBundleManagerClient); err != ErrMissingDump {
t.Fatalf("unexpected error decoding cursor. want=%q have =%q", ErrMissingDump, err)
}
}
func TestDecodeOrCreateCursorExisting(t *testing.T) {
expectedCursor := Cursor{
Phase: "same-repo",
DumpID: 42,
Path: "/foo/bar/baz.go",
Line: 10,
Character: 50,
Monikers: []bundles.MonikerData{
{Kind: "k1", Scheme: "s1", Identifier: "i1", PackageInformationID: "pid1"},
{Kind: "k2", Scheme: "s2", Identifier: "i2", PackageInformationID: "pid2"},
{Kind: "k3", Scheme: "s3", Identifier: "i3", PackageInformationID: "pid3"},
},
SkipResults: 1,
Identifier: "x",
Scheme: "gomod",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{1, 2, 3, 4, 5},
TotalDumpsWhenBatching: 5,
SkipDumpsWhenBatching: 4,
SkipDumpsInBatch: 3,
SkipResultsInDump: 2,
}
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
if cursor, err := DecodeOrCreateCursor("", 0, 0, 0, EncodeCursor(expectedCursor), mockDB, mockBundleManagerClient); err != nil {
t.Fatalf("unexpected error decoding cursor: %s", err)
} else if diff := cmp.Diff(expectedCursor, cursor); diff != "" {
t.Errorf("unexpected cursor (-want +got):\n%s", diff)
}
}

View File

@ -0,0 +1,77 @@
package api
import (
"context"
"strings"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
// Definitions returns the list of source locations that define the symbol at the given position.
// This may include remote definitions if the remote repository is also indexed.
func (api *codeIntelAPI) Definitions(ctx context.Context, file string, line, character, uploadID int) ([]ResolvedLocation, error) {
dump, exists, err := api.db.GetDumpByID(ctx, uploadID)
if err != nil {
return nil, err
}
if !exists {
return nil, ErrMissingDump
}
pathInBundle := strings.TrimPrefix(file, dump.Root)
bundleClient := api.bundleManagerClient.BundleClient(dump.ID)
return api.definitionsRaw(ctx, dump, bundleClient, pathInBundle, line, character)
}
func (api *codeIntelAPI) definitionsRaw(ctx context.Context, dump db.Dump, bundleClient bundles.BundleClient, pathInBundle string, line, character int) ([]ResolvedLocation, error) {
locations, err := bundleClient.Definitions(ctx, pathInBundle, line, character)
if err != nil {
return nil, err
}
if len(locations) > 0 {
return resolveLocationsWithDump(dump, locations), nil
}
rangeMonikers, err := bundleClient.MonikersByPosition(context.Background(), pathInBundle, line, character)
if err != nil {
return nil, err
}
for _, monikers := range rangeMonikers {
for _, moniker := range monikers {
if moniker.Kind == "import" {
locations, _, err := lookupMoniker(api.db, api.bundleManagerClient, dump.ID, pathInBundle, "definitions", moniker, 0, 0)
if err != nil {
return nil, err
}
if len(locations) > 0 {
return locations, nil
}
} else {
// This symbol was not imported from another database. We search the definitions
// table of our own database in case there was a definition that wasn't properly
// attached to a result set but did have the correct monikers attached.
locations, _, err := bundleClient.MonikerResults(context.Background(), "definitions", moniker.Scheme, moniker.Identifier, 0, 0)
if err != nil {
return nil, err
}
if len(locations) > 0 {
return resolveLocationsWithDump(dump, locations), nil
}
}
}
}
return nil, nil
}
func (api *codeIntelAPI) definitionRaw(ctx context.Context, dump db.Dump, bundleClient bundles.BundleClient, pathInBundle string, line, character int) (ResolvedLocation, bool, error) {
resolved, err := api.definitionsRaw(ctx, dump, bundleClient, pathInBundle, line, character)
if err != nil || len(resolved) == 0 {
return ResolvedLocation{}, false, err
}
return resolved[0], true, nil
}

View File

@ -0,0 +1,116 @@
package api
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/mocks"
)
func TestDefinitions(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient})
setMockBundleClientDefinitions(t, mockBundleClient, "main.go", 10, 50, []bundles.Location{
{DumpID: 42, Path: "foo.go", Range: testRange1},
{DumpID: 42, Path: "bar.go", Range: testRange2},
{DumpID: 42, Path: "baz.go", Range: testRange3},
})
api := New(mockDB, mockBundleManagerClient)
definitions, err := api.Definitions(context.Background(), "sub1/main.go", 10, 50, 42)
if err != nil {
t.Fatalf("expected error getting definitions: %s", err)
}
expectedDefinitions := []ResolvedLocation{
{Dump: testDump1, Path: "sub1/foo.go", Range: testRange1},
{Dump: testDump1, Path: "sub1/bar.go", Range: testRange2},
{Dump: testDump1, Path: "sub1/baz.go", Range: testRange3},
}
if diff := cmp.Diff(expectedDefinitions, definitions); diff != "" {
t.Errorf("unexpected definitions (-want +got):\n%s", diff)
}
}
func TestDefinitionsUnknownDump(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
setMockDBGetDumpByID(t, mockDB, nil)
api := New(mockDB, mockBundleManagerClient)
if _, err := api.Definitions(context.Background(), "sub1/main.go", 10, 50, 25); err != ErrMissingDump {
t.Fatalf("unexpected error getting definitions. want=%q have=%q", ErrMissingDump, err)
}
}
func TestDefinitionViaSameDumpMoniker(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient})
setMockBundleClientDefinitions(t, mockBundleClient, "main.go", 10, 50, nil)
setMockBundleClientMonikersByPosition(t, mockBundleClient, "main.go", 10, 50, [][]bundles.MonikerData{{testMoniker2}})
setMockBundleClientMonikerResults(t, mockBundleClient, "definitions", "gomod", "pad", 0, 0, []bundles.Location{
{DumpID: 42, Path: "foo.go", Range: testRange1},
{DumpID: 42, Path: "bar.go", Range: testRange2},
{DumpID: 42, Path: "baz.go", Range: testRange3},
}, 3)
api := New(mockDB, mockBundleManagerClient)
definitions, err := api.Definitions(context.Background(), "sub1/main.go", 10, 50, 42)
if err != nil {
t.Fatalf("expected error getting definitions: %s", err)
}
expectedDefinitions := []ResolvedLocation{
{Dump: testDump1, Path: "sub1/foo.go", Range: testRange1},
{Dump: testDump1, Path: "sub1/bar.go", Range: testRange2},
{Dump: testDump1, Path: "sub1/baz.go", Range: testRange3},
}
if diff := cmp.Diff(expectedDefinitions, definitions); diff != "" {
t.Errorf("unexpected definitions (-want +got):\n%s", diff)
}
}
func TestDefinitionViaRemoteDumpMoniker(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient1 := mocks.NewMockBundleClient()
mockBundleClient2 := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1, 50: testDump2})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient1, 50: mockBundleClient2})
setMockBundleClientDefinitions(t, mockBundleClient1, "main.go", 10, 50, nil)
setMockBundleClientMonikersByPosition(t, mockBundleClient1, "main.go", 10, 50, [][]bundles.MonikerData{{testMoniker1}})
setMockBundleClientPackageInformation(t, mockBundleClient1, "main.go", "1234", testPackageInformation)
setMockDBGetPackage(t, mockDB, "gomod", "leftpad", "0.1.0", testDump2, true)
setMockBundleClientMonikerResults(t, mockBundleClient2, "definitions", "gomod", "pad", 0, 0, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
{DumpID: 50, Path: "baz.go", Range: testRange3},
}, 15)
api := New(mockDB, mockBundleManagerClient)
definitions, err := api.Definitions(context.Background(), "sub1/main.go", 10, 50, 42)
if err != nil {
t.Fatalf("expected error getting definitions: %s", err)
}
expectedDefinitions := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump2, Path: "sub2/baz.go", Range: testRange3},
}
if diff := cmp.Diff(expectedDefinitions, definitions); diff != "" {
t.Errorf("unexpected definitions (-want +got):\n%s", diff)
}
}

View File

@ -0,0 +1,33 @@
package api
import (
"context"
"strings"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
// FindClosestDumps returns the set of dumps that can most accurately answer code intelligence
// queries for the given file. These dump IDs should be subsequently passed to invocations of
// Definitions, References, and Hover.
func (api *codeIntelAPI) FindClosestDumps(ctx context.Context, repositoryID int, commit, file string) ([]db.Dump, error) {
candidates, err := api.db.FindClosestDumps(ctx, repositoryID, commit, file)
if err != nil {
return nil, err
}
var dumps []db.Dump
for _, dump := range candidates {
exists, err := api.bundleManagerClient.BundleClient(dump.ID).Exists(ctx, strings.TrimPrefix(file, dump.Root))
if err != nil {
return nil, err
}
if !exists {
continue
}
dumps = append(dumps, dump)
}
return dumps, nil
}

View File

@ -0,0 +1,51 @@
package api
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/mocks"
)
func TestFindClosestDatabase(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient1 := mocks.NewMockBundleClient()
mockBundleClient2 := mocks.NewMockBundleClient()
mockBundleClient3 := mocks.NewMockBundleClient()
mockBundleClient4 := mocks.NewMockBundleClient()
setMockDBFindClosestDumps(t, mockDB, 42, testCommit, "s1/main.go", []db.Dump{
{ID: 50, Root: "s1/"},
{ID: 51, Root: "s1/"},
{ID: 52, Root: "s1/"},
{ID: 53, Root: "s2/"},
})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{
50: mockBundleClient1,
51: mockBundleClient2,
52: mockBundleClient3,
53: mockBundleClient4,
})
setMockBundleClientExists(t, mockBundleClient1, "main.go", true)
setMockBundleClientExists(t, mockBundleClient2, "main.go", false)
setMockBundleClientExists(t, mockBundleClient3, "main.go", true)
setMockBundleClientExists(t, mockBundleClient4, "s1/main.go", false)
api := New(mockDB, mockBundleManagerClient)
dumps, err := api.FindClosestDumps(context.Background(), 42, testCommit, "s1/main.go")
if err != nil {
t.Fatalf("unexpected error finding closest database: %s", err)
}
expected := []db.Dump{
{ID: 50, Root: "s1/"},
{ID: 52, Root: "s1/"},
}
if diff := cmp.Diff(expected, dumps); diff != "" {
t.Errorf("unexpected dumps (-want +got):\n%s", diff)
}
}

View File

@ -0,0 +1,275 @@
package api
import (
"context"
"encoding/hex"
"fmt"
"io/ioutil"
"strings"
"testing"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/mocks"
)
var (
testCommit = "0000000000000000000000000000000000000000"
testDump1 = db.Dump{ID: 42, Root: "sub1/"}
testDump2 = db.Dump{ID: 50, Root: "sub2/"}
testDump3 = db.Dump{ID: 51, Root: "sub3/"}
testDump4 = db.Dump{ID: 52, Root: "sub4/"}
testMoniker1 = bundles.MonikerData{Kind: "import", Scheme: "gomod", Identifier: "pad", PackageInformationID: "1234"}
testMoniker2 = bundles.MonikerData{Kind: "export", Scheme: "gomod", Identifier: "pad", PackageInformationID: "1234"}
testMoniker3 = bundles.MonikerData{Kind: "export", Scheme: "gomod", Identifier: "pad"}
testPackageInformation = bundles.PackageInformationData{Name: "leftpad", Version: "0.1.0"}
testRange1 = bundles.Range{
Start: bundles.Position{Line: 10, Character: 50},
End: bundles.Position{Line: 10, Character: 55},
}
testRange2 = bundles.Range{
Start: bundles.Position{Line: 11, Character: 50},
End: bundles.Position{Line: 11, Character: 55},
}
testRange3 = bundles.Range{
Start: bundles.Position{Line: 12, Character: 50},
End: bundles.Position{Line: 12, Character: 55},
}
testRange4 = bundles.Range{
Start: bundles.Position{Line: 13, Character: 50},
End: bundles.Position{Line: 13, Character: 55},
}
testRange5 = bundles.Range{
Start: bundles.Position{Line: 14, Character: 50},
End: bundles.Position{Line: 14, Character: 55},
}
)
func setMockDBGetDumpByID(t *testing.T, mockDB *mocks.MockDB, dumps map[int]db.Dump) {
mockDB.GetDumpByIDFunc.SetDefaultHook(func(ctx context.Context, id int) (db.Dump, bool, error) {
dump, ok := dumps[id]
return dump, ok, nil
})
}
func setMockDBGetPackage(t *testing.T, mockDB *mocks.MockDB, expectedScheme, expectedName, expectedVersion string, dump db.Dump, exists bool) {
mockDB.GetPackageFunc.SetDefaultHook(func(ctx context.Context, scheme, name, version string) (db.Dump, bool, error) {
if scheme != expectedScheme {
t.Errorf("unexpected scheme for GetPackage. want=%s have=%s", expectedScheme, scheme)
}
if name != expectedName {
t.Errorf("unexpected name for GetPackage. want=%s have=%s", expectedName, name)
}
if version != expectedVersion {
t.Errorf("unexpected version for GetPackage. want=%s have=%s", expectedVersion, version)
}
return dump, exists, nil
})
}
func setMockDBFindClosestDumps(t *testing.T, mockDB *mocks.MockDB, expectedRepositoryID int, expectedCommit, expectedFile string, dumps []db.Dump) {
mockDB.FindClosestDumpsFunc.SetDefaultHook(func(ctx context.Context, repositoryID int, commit, file string) ([]db.Dump, error) {
if repositoryID != expectedRepositoryID {
t.Errorf("unexpected repository id for FindClosestDumps. want=%d have=%d", expectedRepositoryID, repositoryID)
}
if commit != expectedCommit {
t.Errorf("unexpected commit for FindClosestDumps. want=%s have=%s", expectedCommit, commit)
}
if file != expectedFile {
t.Errorf("unexpected file for FindClosestDumps. want=%s have=%s", expectedFile, file)
}
return dumps, nil
})
}
func setMockDBSameRepoPager(t *testing.T, mockDB *mocks.MockDB, expectedRepositoryID int, expectedCommit, expectedScheme, expectedName, expectedVersion string, expectedLimit, totalCount int, pager db.ReferencePager) {
mockDB.SameRepoPagerFunc.SetDefaultHook(func(ctx context.Context, repositoryID int, commit, scheme, name, version string, limit int) (int, db.ReferencePager, error) {
if repositoryID != expectedRepositoryID {
t.Errorf("unexpected repository id for SameRepoPager. want=%v have=%v", expectedRepositoryID, repositoryID)
}
if commit != expectedCommit {
t.Errorf("unexpected commit for SameRepoPager. want=%s have=%s", expectedCommit, commit)
}
if scheme != expectedScheme {
t.Errorf("unexpected scheme for SameRepoPager. want=%s have=%s", expectedScheme, scheme)
}
if name != expectedName {
t.Errorf("unexpected name for SameRepoPager. want=%s have=%s", expectedName, name)
}
if version != expectedVersion {
t.Errorf("unexpected version for SameRepoPager. want=%s have=%s", expectedVersion, version)
}
if limit != expectedLimit {
t.Errorf("unexpected limit for SameRepoPager. want=%d have=%d", expectedLimit, limit)
}
return totalCount, pager, nil
})
}
func setMockDBPackageReferencePager(t *testing.T, mockDB *mocks.MockDB, expectedScheme, expectedName, expectedVersion string, expectedRepositoryID, expectedLimit int, totalCount int, pager db.ReferencePager) {
mockDB.PackageReferencePagerFunc.SetDefaultHook(func(ctx context.Context, scheme, name, version string, repositoryID, limit int) (int, db.ReferencePager, error) {
if scheme != expectedScheme {
t.Errorf("unexpected scheme for PackageReferencePager. want=%s have=%s", expectedScheme, scheme)
}
if name != expectedName {
t.Errorf("unexpected name for PackageReferencePager. want=%s have=%s", expectedName, name)
}
if version != expectedVersion {
t.Errorf("unexpected version for PackageReferencePager. want=%s have=%s", expectedVersion, version)
}
if repositoryID != expectedRepositoryID {
t.Errorf("unexpected repository id for PackageReferencePager. want=%d have=%d", expectedRepositoryID, repositoryID)
}
if limit != expectedLimit {
t.Errorf("unexpected limit for PackageReferencePager. want=%d have=%d", expectedLimit, limit)
}
return totalCount, pager, nil
})
}
func setMockReferencePagerPageFromOffset(t *testing.T, mockReferencePager *mocks.MockReferencePager, expectedOffset int, references []db.Reference) {
mockReferencePager.PageFromOffsetFunc.SetDefaultHook(func(offset int) ([]db.Reference, error) {
if offset != expectedOffset {
t.Errorf("unexpected offset for PageFromOffset. want=%d have=%d", expectedOffset, offset)
}
return references, nil
})
}
func setMockBundleManagerClientBundleClient(t *testing.T, mockBundleManagerClient *mocks.MockBundleManagerClient, bundleClients map[int]bundles.BundleClient) {
mockBundleManagerClient.BundleClientFunc.SetDefaultHook(func(bundleID int) bundles.BundleClient {
bundleClient, ok := bundleClients[bundleID]
if !ok {
t.Errorf("unexpected bundle id for BundleClient: %d", bundleID)
}
return bundleClient
})
}
func setMockBundleClientExists(t *testing.T, mockBundleClient *mocks.MockBundleClient, expectedPath string, exists bool) {
mockBundleClient.ExistsFunc.SetDefaultHook(func(ctx context.Context, path string) (bool, error) {
if path != expectedPath {
t.Errorf("unexpected path for Exists. want=%s have=%s", expectedPath, path)
}
return exists, nil
})
}
func setMockBundleClientDefinitions(t *testing.T, mockBundleClient *mocks.MockBundleClient, expectedPath string, expectedLine, expectedCharacter int, locations []bundles.Location) {
mockBundleClient.DefinitionsFunc.SetDefaultHook(func(ctx context.Context, path string, line, character int) ([]bundles.Location, error) {
if path != expectedPath {
t.Errorf("unexpected path for Definitions. want=%s have=%s", expectedPath, path)
}
if line != expectedLine {
t.Errorf("unexpected line for Definitions. want=%d have=%d", expectedLine, line)
}
if character != expectedCharacter {
t.Errorf("unexpected character for Definitions. want=%d have=%d", expectedCharacter, character)
}
return locations, nil
})
}
func setMockBundleClientReferences(t *testing.T, mockBundleClient *mocks.MockBundleClient, expectedPath string, expectedLine, expectedCharacter int, locations []bundles.Location) {
mockBundleClient.ReferencesFunc.SetDefaultHook(func(ctx context.Context, path string, line, character int) ([]bundles.Location, error) {
if path != expectedPath {
t.Errorf("unexpected path for References. want=%s have=%s", expectedPath, path)
}
if line != expectedLine {
t.Errorf("unexpected line for References. want=%d have=%d", expectedLine, line)
}
if character != expectedCharacter {
t.Errorf("unexpected character for References. want=%d have=%d", expectedCharacter, character)
}
return locations, nil
})
}
func setMockBundleClientHover(t *testing.T, mockBundleClient *mocks.MockBundleClient, expectedPath string, expectedLine, expectedCharacter int, text string, r bundles.Range, exists bool) {
mockBundleClient.HoverFunc.SetDefaultHook(func(ctx context.Context, path string, line, character int) (string, bundles.Range, bool, error) {
if path != expectedPath {
t.Errorf("unexpected path Hover. want=%s have=%s", expectedPath, path)
}
if line != expectedLine {
t.Errorf("unexpected line Hover. want=%d have=%d", expectedLine, line)
}
if character != expectedCharacter {
t.Errorf("unexpected character Hover. want=%d have=%d", expectedCharacter, character)
}
return text, r, exists, nil
})
}
func setMockBundleClientMonikersByPosition(t *testing.T, mockBundleClient *mocks.MockBundleClient, expectedPath string, expectedLine, expectedCharacter int, monikers [][]bundles.MonikerData) {
mockBundleClient.MonikersByPositionFunc.SetDefaultHook(func(ctx context.Context, path string, line, character int) ([][]bundles.MonikerData, error) {
if path != expectedPath {
t.Fatalf("unexpected path for MonikersByPosition. want=%s have=%s", expectedPath, path)
}
if line != expectedLine {
t.Fatalf("unexpected line for MonikersByPosition. want=%v have=%v", expectedLine, line)
}
if character != expectedCharacter {
t.Fatalf("unexpected character for MonikersByPosition. want=%v have=%v", expectedCharacter, character)
}
return monikers, nil
})
}
func setMockBundleClientMonikerResults(t *testing.T, mockBundleClient *mocks.MockBundleClient, expectedModelType, expectedScheme, expectedIdentifier string, expectedSkip, expectedTake int, locations []bundles.Location, totalCount int) {
mockBundleClient.MonikerResultsFunc.SetDefaultHook(func(ctx context.Context, modelType, scheme, identifier string, skip, take int) ([]bundles.Location, int, error) {
if modelType != expectedModelType {
t.Errorf("unexpected model type for MonikerResults. want=%s have=%s", expectedModelType, modelType)
}
if scheme != expectedScheme {
t.Errorf("unexpected scheme for MonikerResults. want=%s have=%s", expectedScheme, scheme)
}
if identifier != expectedIdentifier {
t.Errorf("unexpected identifier for MonikerResults. want=%s have=%s", expectedIdentifier, identifier)
}
if skip != expectedSkip {
t.Errorf("unexpected skip for MonikerResults. want=%d have=%d", expectedSkip, skip)
}
if take != expectedTake {
t.Errorf("unexpected take for MonikerResults. want=%d have=%d", expectedTake, take)
}
return locations, totalCount, nil
})
}
func setMockBundleClientPackageInformation(t *testing.T, mockBundleClient *mocks.MockBundleClient, expectedPath, expectedPackageInformationID string, packageInformation bundles.PackageInformationData) {
mockBundleClient.PackageInformationFunc.SetDefaultHook(func(ctx context.Context, path, packageInformationID string) (bundles.PackageInformationData, error) {
if path != expectedPath {
t.Errorf("unexpected path for PackageInformation. want=%s have=%s", expectedPath, path)
}
if packageInformationID != expectedPackageInformationID {
t.Errorf("unexpected package information id for PackageInformation. want=%s have=%s", expectedPackageInformationID, packageInformationID)
}
return packageInformation, nil
})
}
func readTestFilter(t *testing.T, dirname, filename string) []byte {
content, err := ioutil.ReadFile(fmt.Sprintf("../../testdata/filters/%s/%s", dirname, filename))
if err != nil {
t.Fatalf("unexpected error reading: %s", err)
}
raw, err := hex.DecodeString(strings.TrimSpace(string(content)))
if err != nil {
t.Fatalf("unexpected error decoding: %s", err)
}
return raw
}
func readTestWords(t *testing.T, filename string) []string {
content, err := ioutil.ReadFile(fmt.Sprintf("../../testdata/words/%s", filename))
if err != nil {
t.Fatalf("unexpected error reading %s: %s", filename, err)
}
return strings.Split(strings.TrimSpace(string(content)), "\n")
}

View File

@ -0,0 +1,40 @@
package api
import (
"context"
"strings"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
)
// Hover returns the hover text and range for the symbol at the given position.
func (api *codeIntelAPI) Hover(ctx context.Context, file string, line, character, uploadID int) (string, bundles.Range, bool, error) {
dump, exists, err := api.db.GetDumpByID(ctx, uploadID)
if err != nil {
return "", bundles.Range{}, false, err
}
if !exists {
return "", bundles.Range{}, false, ErrMissingDump
}
pathInBundle := strings.TrimPrefix(file, dump.Root)
bundleClient := api.bundleManagerClient.BundleClient(dump.ID)
text, rn, exists, err := bundleClient.Hover(ctx, pathInBundle, line, character)
if err != nil {
return "", bundles.Range{}, false, err
}
if exists {
return text, rn, true, nil
}
definition, exists, err := api.definitionRaw(ctx, dump, bundleClient, pathInBundle, line, character)
if err != nil || !exists {
return "", bundles.Range{}, false, err
}
pathInDefinitionBundle := strings.TrimPrefix(definition.Path, definition.Dump.Root)
definitionBundleClient := api.bundleManagerClient.BundleClient(definition.Dump.ID)
return definitionBundleClient.Hover(ctx, pathInDefinitionBundle, definition.Range.Start.Line, definition.Range.Start.Character)
}

View File

@ -0,0 +1,108 @@
package api
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/mocks"
)
func TestHover(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient})
setMockBundleClientHover(t, mockBundleClient, "main.go", 10, 50, "text", testRange1, true)
api := New(mockDB, mockBundleManagerClient)
text, r, exists, err := api.Hover(context.Background(), "sub1/main.go", 10, 50, 42)
if err != nil {
t.Fatalf("expected error getting hover text: %s", err)
}
if !exists {
t.Fatalf("expected hover text to exist.")
}
if text != "text" {
t.Errorf("unexpected text. want=%s have=%s", "text", text)
}
if diff := cmp.Diff(testRange1, r); diff != "" {
t.Errorf("unexpected range (-want +got):\n%s", diff)
}
}
func TestHoverUnknownDump(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
setMockDBGetDumpByID(t, mockDB, nil)
api := New(mockDB, mockBundleManagerClient)
if _, _, _, err := api.Hover(context.Background(), "sub1/main.go", 10, 50, 42); err != ErrMissingDump {
t.Fatalf("unexpected error getting hover text. want=%q have=%q", ErrMissingDump, err)
}
}
func TestHoverRemoteDefinitionHoverText(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient1 := mocks.NewMockBundleClient()
mockBundleClient2 := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1, 50: testDump2})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient1, 50: mockBundleClient2})
setMockBundleClientHover(t, mockBundleClient1, "main.go", 10, 50, "", bundles.Range{}, false)
setMockBundleClientDefinitions(t, mockBundleClient1, "main.go", 10, 50, nil)
setMockBundleClientMonikersByPosition(t, mockBundleClient1, "main.go", 10, 50, [][]bundles.MonikerData{{testMoniker1}})
setMockBundleClientPackageInformation(t, mockBundleClient1, "main.go", "1234", testPackageInformation)
setMockDBGetPackage(t, mockDB, "gomod", "leftpad", "0.1.0", testDump2, true)
setMockBundleClientMonikerResults(t, mockBundleClient2, "definitions", "gomod", "pad", 0, 0, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
{DumpID: 50, Path: "baz.go", Range: testRange3},
}, 15)
setMockBundleClientHover(t, mockBundleClient2, "foo.go", 10, 50, "text", testRange4, true)
api := New(mockDB, mockBundleManagerClient)
text, r, exists, err := api.Hover(context.Background(), "sub1/main.go", 10, 50, 42)
if err != nil {
t.Fatalf("expected error getting hover text: %s", err)
}
if !exists {
t.Fatalf("expected hover text to exist.")
}
if text != "text" {
t.Errorf("unexpected text. want=%s have=%s", "text", text)
}
if diff := cmp.Diff(testRange4, r); diff != "" {
t.Errorf("unexpected range (-want +got):\n%s", diff)
}
}
func TestHoverUnknownDefinition(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient})
setMockBundleClientHover(t, mockBundleClient, "main.go", 10, 50, "", bundles.Range{}, false)
setMockBundleClientDefinitions(t, mockBundleClient, "main.go", 10, 50, nil)
setMockBundleClientMonikersByPosition(t, mockBundleClient, "main.go", 10, 50, [][]bundles.MonikerData{{testMoniker1}})
setMockBundleClientPackageInformation(t, mockBundleClient, "main.go", "1234", testPackageInformation)
setMockDBGetPackage(t, mockDB, "gomod", "leftpad", "0.1.0", db.Dump{}, false)
api := New(mockDB, mockBundleManagerClient)
_, _, exists, err := api.Hover(context.Background(), "sub1/main.go", 10, 50, 42)
if err != nil {
t.Fatalf("unexpected error getting hover text: %s", err)
}
if exists {
t.Errorf("unexpected hover text")
}
}

View File

@ -0,0 +1,35 @@
package api
import (
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
type ResolvedLocation struct {
Dump db.Dump `json:"dump"`
Path string `json:"path"`
Range bundles.Range `json:"range"`
}
func sliceLocations(locations []bundles.Location, lo, hi int) []bundles.Location {
if lo >= len(locations) {
return nil
}
if hi >= len(locations) {
hi = len(locations)
}
return locations[lo:hi]
}
func resolveLocationsWithDump(dump db.Dump, locations []bundles.Location) []ResolvedLocation {
var resolvedLocations []ResolvedLocation
for _, location := range locations {
resolvedLocations = append(resolvedLocations, ResolvedLocation{
Dump: dump,
Path: dump.Root + location.Path,
Range: location.Range,
})
}
return resolvedLocations
}

View File

@ -0,0 +1,40 @@
package api
import (
"context"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
func lookupMoniker(
db db.DB,
bundleManagerClient bundles.BundleManagerClient,
dumpID int,
path string,
modelType string,
moniker bundles.MonikerData,
skip int,
take int,
) ([]ResolvedLocation, int, error) {
if moniker.PackageInformationID == "" {
return nil, 0, nil
}
pid, err := bundleManagerClient.BundleClient(dumpID).PackageInformation(context.Background(), path, moniker.PackageInformationID)
if err != nil {
return nil, 0, err
}
dump, exists, err := db.GetPackage(context.Background(), moniker.Scheme, pid.Name, pid.Version)
if err != nil || !exists {
return nil, 0, err
}
locations, count, err := bundleManagerClient.BundleClient(dump.ID).MonikerResults(context.Background(), modelType, moniker.Scheme, moniker.Identifier, skip, take)
if err != nil {
return nil, 0, err
}
return resolveLocationsWithDump(dump, locations), count, nil
}

View File

@ -0,0 +1,78 @@
package api
import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/mocks"
)
func TestLookupMoniker(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient1 := mocks.NewMockBundleClient()
mockBundleClient2 := mocks.NewMockBundleClient()
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient1, 50: mockBundleClient2})
setMockBundleClientPackageInformation(t, mockBundleClient1, "sub2/main.go", "1234", testPackageInformation)
setMockDBGetPackage(t, mockDB, "gomod", "leftpad", "0.1.0", testDump2, true)
setMockBundleClientMonikerResults(t, mockBundleClient2, "definitions", "gomod", "pad", 10, 5, []bundles.Location{
{DumpID: 42, Path: "foo.go", Range: testRange1},
{DumpID: 42, Path: "bar.go", Range: testRange2},
{DumpID: 42, Path: "baz.go", Range: testRange3},
{DumpID: 42, Path: "bar.go", Range: testRange4},
{DumpID: 42, Path: "baz.go", Range: testRange5},
}, 15)
locations, totalCount, err := lookupMoniker(mockDB, mockBundleManagerClient, 42, "sub2/main.go", "definitions", testMoniker2, 10, 5)
if err != nil {
t.Fatalf("unexpected error querying moniker: %s", err)
}
if totalCount != 15 {
t.Errorf("unexpected total count. want=%d have=%d", 5, totalCount)
}
expectedLocations := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump2, Path: "sub2/baz.go", Range: testRange3},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange4},
{Dump: testDump2, Path: "sub2/baz.go", Range: testRange5},
}
if diff := cmp.Diff(expectedLocations, locations); diff != "" {
t.Errorf("unexpected definitions (-want +got):\n%s", diff)
}
}
func TestLookupMonikerNoPackageInformationID(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
_, totalCount, err := lookupMoniker(mockDB, mockBundleManagerClient, 42, "sub/main.go", "definitions", testMoniker3, 10, 5)
if err != nil {
t.Fatalf("unexpected error querying moniker: %s", err)
}
if totalCount != 0 {
t.Errorf("unexpected total count. want=%d have=%d", 0, totalCount)
}
}
func TestLookupMonikerNoPackage(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient})
setMockBundleClientPackageInformation(t, mockBundleClient, "main.go", "1234", testPackageInformation)
setMockDBGetPackage(t, mockDB, "gomod", "leftpad", "0.1.0", db.Dump{}, false)
_, totalCount, err := lookupMoniker(mockDB, mockBundleManagerClient, 42, "main.go", "definitions", testMoniker1, 10, 5)
if err != nil {
t.Fatalf("unexpected error querying moniker: %s", err)
}
if totalCount != 0 {
t.Errorf("unexpected total count. want=%d have=%d", 0, totalCount)
}
}

View File

@ -0,0 +1,351 @@
package api
import (
"context"
"fmt"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
// References returns the list of source locations that reference the symbol at the given position.
// This may include references from other dumps and repositories.
func (api *codeIntelAPI) References(ctx context.Context, repositoryID int, commit string, limit int, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error) {
rpr := &ReferencePageResolver{
db: api.db,
bundleManagerClient: api.bundleManagerClient,
repositoryID: repositoryID,
commit: commit,
limit: limit,
}
return rpr.resolvePage(ctx, cursor)
}
type ReferencePageResolver struct {
db db.DB
bundleManagerClient bundles.BundleManagerClient
repositoryID int
commit string
remoteDumpLimit int
limit int
}
func (s *ReferencePageResolver) resolvePage(ctx context.Context, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error) {
var allLocations []ResolvedLocation
for {
locations, newCursor, hasNewCursor, err := s.dispatchCursorHandler(ctx, cursor)
if err != nil {
return nil, Cursor{}, false, err
}
s.limit -= len(locations)
allLocations = append(allLocations, locations...)
if !hasNewCursor || s.limit <= 0 {
return allLocations, newCursor, hasNewCursor, err
}
cursor = newCursor
}
}
func (s *ReferencePageResolver) dispatchCursorHandler(ctx context.Context, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error) {
fns := map[string]func(context.Context, Cursor) ([]ResolvedLocation, Cursor, bool, error){
"same-dump": s.handleSameDumpCursor,
"definition-monikers": s.handleDefinitionMonikersCursor,
"same-repo": s.handleSameRepoCursor,
"remote-repo": s.handleRemoteRepoCursor,
}
fn, exists := fns[cursor.Phase]
if !exists {
return nil, Cursor{}, false, fmt.Errorf("unknown cursor phase %s", cursor.Phase)
}
return fn(ctx, cursor)
}
func (s *ReferencePageResolver) handleSameDumpCursor(ctx context.Context, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error) {
dump, exists, err := s.db.GetDumpByID(ctx, cursor.DumpID)
if err != nil {
return nil, Cursor{}, false, err
}
if !exists {
return nil, Cursor{}, false, ErrMissingDump
}
bundleClient := s.bundleManagerClient.BundleClient(dump.ID)
locations, err := bundleClient.References(ctx, cursor.Path, cursor.Line, cursor.Character)
if err != nil {
return nil, Cursor{}, false, err
}
hashLocation := func(location bundles.Location) string {
return fmt.Sprintf(
"%s:%d:%d:%d:%d",
location.Path,
location.Range.Start.Line,
location.Range.Start.Character,
location.Range.End.Line,
location.Range.End.Character,
)
}
dumpIDs := map[string]struct{}{}
for _, location := range locations {
dumpIDs[hashLocation(location)] = struct{}{}
}
// Search the references table of the current dump. This search is necessary because
// we want a 'Find References' operation on a reference to also return references to
// the governing definition, and those may not be fully linked in the LSIF data. This
// method returns a cursor if there are reference rows remaining for a subsequent page.
for _, moniker := range cursor.Monikers {
results, _, err := bundleClient.MonikerResults(ctx, "reference", moniker.Scheme, moniker.Identifier, 0, 0)
if err != nil {
return nil, Cursor{}, false, err
}
for _, location := range results {
if _, ok := dumpIDs[hashLocation(location)]; !ok {
locations = append(locations, location)
}
}
}
resolvedLocations := resolveLocationsWithDump(dump, sliceLocations(locations, cursor.SkipResults, cursor.SkipResults+s.limit))
if newOffset := cursor.SkipResults + s.limit; newOffset <= len(locations) {
newCursor := Cursor{
Phase: cursor.Phase,
DumpID: cursor.DumpID,
Path: cursor.Path,
Line: cursor.Line,
Character: cursor.Character,
Monikers: cursor.Monikers,
SkipResults: newOffset,
}
return resolvedLocations, newCursor, true, nil
}
newCursor := Cursor{
DumpID: cursor.DumpID,
Phase: "definition-monikers",
Path: cursor.Path,
Monikers: cursor.Monikers,
SkipResults: 0,
}
return resolvedLocations, newCursor, true, nil
}
func (s *ReferencePageResolver) handleDefinitionMonikersCursor(ctx context.Context, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error) {
var hasNextPhaseCursor = false
var nextPhaseCursor Cursor
for _, moniker := range cursor.Monikers {
if moniker.PackageInformationID == "" {
continue
}
packageInformation, err := s.bundleManagerClient.BundleClient(cursor.DumpID).PackageInformation(ctx, cursor.Path, moniker.PackageInformationID)
if err != nil {
return nil, Cursor{}, false, err
}
hasNextPhaseCursor = true
nextPhaseCursor = Cursor{
DumpID: cursor.DumpID,
Phase: "same-repo",
Scheme: moniker.Scheme,
Identifier: moniker.Identifier,
Name: packageInformation.Name,
Version: packageInformation.Version,
DumpIDs: nil,
TotalDumpsWhenBatching: 0,
SkipDumpsWhenBatching: 0,
SkipDumpsInBatch: 0,
SkipResultsInDump: 0,
}
break
}
for _, moniker := range cursor.Monikers {
if moniker.Kind != "import" {
continue
}
locations, count, err := lookupMoniker(s.db, s.bundleManagerClient, cursor.DumpID, cursor.Path, "reference", moniker, cursor.SkipResults, s.limit)
if err != nil {
return nil, Cursor{}, false, err
}
if len(locations) == 0 {
continue
}
if newOffset := cursor.SkipResults + len(locations); newOffset < count {
newCursor := Cursor{
Phase: cursor.Phase,
DumpID: cursor.DumpID,
Path: cursor.Path,
Monikers: cursor.Monikers,
SkipResults: newOffset,
}
return locations, newCursor, true, nil
}
return locations, nextPhaseCursor, hasNextPhaseCursor, nil
}
return nil, nextPhaseCursor, hasNextPhaseCursor, nil
}
func (s *ReferencePageResolver) handleSameRepoCursor(ctx context.Context, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error) {
locations, newCursor, hasNewCursor, err := s.resolveLocationsViaReferencePager(ctx, cursor, func(ctx context.Context) (int, db.ReferencePager, error) {
return s.db.SameRepoPager(ctx, s.repositoryID, s.commit, cursor.Scheme, cursor.Name, cursor.Version, s.remoteDumpLimit)
})
if err != nil || hasNewCursor {
return locations, newCursor, hasNewCursor, err
}
newCursor = Cursor{
DumpID: cursor.DumpID,
Phase: "remote-repo",
Scheme: cursor.Scheme,
Identifier: cursor.Identifier,
Name: cursor.Name,
Version: cursor.Version,
DumpIDs: nil,
TotalDumpsWhenBatching: 0,
SkipDumpsWhenBatching: 0,
SkipDumpsInBatch: 0,
SkipResultsInDump: 0,
}
return locations, newCursor, true, nil
}
func (s *ReferencePageResolver) handleRemoteRepoCursor(ctx context.Context, cursor Cursor) ([]ResolvedLocation, Cursor, bool, error) {
return s.resolveLocationsViaReferencePager(ctx, cursor, func(ctx context.Context) (int, db.ReferencePager, error) {
return s.db.PackageReferencePager(ctx, cursor.Scheme, cursor.Name, cursor.Version, s.repositoryID, s.remoteDumpLimit)
})
}
func (s *ReferencePageResolver) resolveLocationsViaReferencePager(ctx context.Context, cursor Cursor, createPager func(context.Context) (int, db.ReferencePager, error)) ([]ResolvedLocation, Cursor, bool, error) {
dumpID := cursor.DumpID
scheme := cursor.Scheme
identifier := cursor.Identifier
limit := s.limit
if len(cursor.DumpIDs) == 0 {
totalCount, pager, err := createPager(ctx)
if err != nil {
return nil, Cursor{}, false, err
}
identifier := cursor.Identifier
offset := cursor.SkipDumpsWhenBatching
limit := s.remoteDumpLimit
newOffset := offset
var packageRefs []db.Reference
for len(packageRefs) < limit && newOffset < totalCount {
page, err := pager.PageFromOffset(newOffset)
if err != nil {
return nil, Cursor{}, false, pager.CloseTx(err)
}
if len(page) == 0 {
// Shouldn't happen, but just in case of a bug we
// don't want this to throw up into an infinite loop.
break
}
filtered, scanned := applyBloomFilter(page, identifier, limit-len(packageRefs))
packageRefs = append(packageRefs, filtered...)
newOffset += scanned
}
var dumpIDs []int
for _, ref := range packageRefs {
dumpIDs = append(dumpIDs, ref.DumpID)
}
cursor.DumpIDs = dumpIDs
cursor.SkipDumpsWhenBatching = newOffset
cursor.TotalDumpsWhenBatching = totalCount
if err := pager.CloseTx(nil); err != nil {
return nil, Cursor{}, false, err
}
}
for i, batchDumpID := range cursor.DumpIDs {
// Skip the remote reference that show up for ourselves - we've already gathered
// these in the previous step of the references query.
if i < cursor.SkipDumpsInBatch || batchDumpID == dumpID {
continue
}
dump, exists, err := s.db.GetDumpByID(ctx, batchDumpID)
if err != nil {
return nil, Cursor{}, false, err
}
if !exists {
continue
}
bundleClient := s.bundleManagerClient.BundleClient(batchDumpID)
results, count, err := bundleClient.MonikerResults(ctx, "reference", scheme, identifier, cursor.SkipResultsInDump, limit)
if err != nil {
return nil, Cursor{}, false, err
}
if len(results) == 0 {
continue
}
resolvedLocations := resolveLocationsWithDump(dump, results)
if newResultOffset := cursor.SkipResultsInDump + len(results); newResultOffset < count {
newCursor := cursor
newCursor.SkipResultsInDump = newResultOffset
return resolvedLocations, newCursor, true, nil
}
if i+1 < len(cursor.DumpIDs) {
newCursor := cursor
newCursor.SkipDumpsInBatch = i + 1
newCursor.SkipResultsInDump = 0
return resolvedLocations, newCursor, true, nil
}
if cursor.SkipDumpsWhenBatching < cursor.TotalDumpsWhenBatching {
newCursor := cursor
newCursor.DumpIDs = []int{}
newCursor.SkipDumpsInBatch = 0
newCursor.SkipResultsInDump = 0
return resolvedLocations, newCursor, true, nil
}
return resolvedLocations, Cursor{}, false, nil
}
return nil, Cursor{}, false, nil
}
func applyBloomFilter(refs []db.Reference, identifier string, limit int) ([]db.Reference, int) {
var filteredReferences []db.Reference
for i, ref := range refs {
test, err := decodeAndTestFilter([]byte(ref.Filter), identifier)
if err != nil || !test {
continue
}
filteredReferences = append(filteredReferences, ref)
if len(filteredReferences) >= limit {
return filteredReferences, i + 1
}
}
return filteredReferences, len(refs)
}

View File

@ -0,0 +1,695 @@
package api
import (
"context"
"fmt"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/mocks"
)
func TestHandleSameDumpCursor(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient})
setMockBundleClientReferences(t, mockBundleClient, "main.go", 23, 34, []bundles.Location{
{DumpID: 42, Path: "foo.go", Range: testRange1},
{DumpID: 42, Path: "bar.go", Range: testRange2},
{DumpID: 42, Path: "baz.go", Range: testRange3},
})
setMockBundleClientMonikerResults(t, mockBundleClient, "reference", "gomod", "pad", 0, 0, []bundles.Location{
{DumpID: 42, Path: "foo.go", Range: testRange1},
{DumpID: 42, Path: "bonk.go", Range: testRange4},
{DumpID: 42, Path: "quux.go", Range: testRange5},
}, 3)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
limit: 5,
}
t.Run("partial results", func(t *testing.T) {
references, newCursor, hasNewCursor, err := rpr.dispatchCursorHandler(context.Background(), Cursor{
Phase: "same-dump",
DumpID: 42,
Path: "main.go",
Line: 23,
Character: 34,
Monikers: []bundles.MonikerData{{Kind: "export", Scheme: "gomod", Identifier: "pad"}},
SkipResults: 0,
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump1, Path: "sub1/foo.go", Range: testRange1},
{Dump: testDump1, Path: "sub1/bar.go", Range: testRange2},
{Dump: testDump1, Path: "sub1/baz.go", Range: testRange3},
{Dump: testDump1, Path: "sub1/bonk.go", Range: testRange4},
{Dump: testDump1, Path: "sub1/quux.go", Range: testRange5},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "same-dump",
DumpID: 42,
Path: "main.go",
Line: 23,
Character: 34,
Monikers: []bundles.MonikerData{{Kind: "export", Scheme: "gomod", Identifier: "pad"}},
SkipResults: 5,
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
})
t.Run("end of result set", func(t *testing.T) {
references, newCursor, hasNewCursor, err := rpr.dispatchCursorHandler(context.Background(), Cursor{
Phase: "same-dump",
DumpID: 42,
Path: "main.go",
Line: 23,
Character: 34,
Monikers: []bundles.MonikerData{{Kind: "export", Scheme: "gomod", Identifier: "pad"}},
SkipResults: 1,
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump1, Path: "sub1/bar.go", Range: testRange2},
{Dump: testDump1, Path: "sub1/baz.go", Range: testRange3},
{Dump: testDump1, Path: "sub1/bonk.go", Range: testRange4},
{Dump: testDump1, Path: "sub1/quux.go", Range: testRange5},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "definition-monikers",
DumpID: 42,
Path: "main.go",
Monikers: []bundles.MonikerData{{Kind: "export", Scheme: "gomod", Identifier: "pad"}},
SkipResults: 0,
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
})
}
func TestHandleDefinitionMonikersCursor(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient1 := mocks.NewMockBundleClient()
mockBundleClient2 := mocks.NewMockBundleClient()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1, 50: testDump2})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{42: mockBundleClient1, 50: mockBundleClient2})
setMockBundleClientPackageInformation(t, mockBundleClient1, "main.go", "1234", testPackageInformation)
setMockDBGetPackage(t, mockDB, "gomod", "leftpad", "0.1.0", testDump2, true)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
limit: 5,
}
t.Run("partial results", func(t *testing.T) {
setMockBundleClientMonikerResults(t, mockBundleClient2, "reference", "gomod", "pad", 0, 5, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
{DumpID: 50, Path: "baz.go", Range: testRange3},
{DumpID: 50, Path: "bonk.go", Range: testRange4},
{DumpID: 50, Path: "quux.go", Range: testRange5},
}, 10)
references, newCursor, hasNewCursor, err := rpr.dispatchCursorHandler(context.Background(), Cursor{
Phase: "definition-monikers",
DumpID: 42,
Path: "main.go",
Monikers: []bundles.MonikerData{{Kind: "import", Scheme: "gomod", Identifier: "pad", PackageInformationID: "1234"}},
SkipResults: 0,
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump2, Path: "sub2/baz.go", Range: testRange3},
{Dump: testDump2, Path: "sub2/bonk.go", Range: testRange4},
{Dump: testDump2, Path: "sub2/quux.go", Range: testRange5},
}
if diff := cmp.Diff(references, expectedReferences); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "definition-monikers",
DumpID: 42,
Path: "main.go",
Monikers: []bundles.MonikerData{{Kind: "import", Scheme: "gomod", Identifier: "pad", PackageInformationID: "1234"}},
SkipResults: 5,
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
})
t.Run("end of result set", func(t *testing.T) {
setMockBundleClientMonikerResults(t, mockBundleClient2, "reference", "gomod", "pad", 5, 5, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
{DumpID: 50, Path: "baz.go", Range: testRange3},
{DumpID: 50, Path: "bonk.go", Range: testRange4},
{DumpID: 50, Path: "quux.go", Range: testRange5},
}, 10)
references, newCursor, hasNewCursor, err := rpr.dispatchCursorHandler(context.Background(), Cursor{
Phase: "definition-monikers",
DumpID: 42,
Path: "main.go",
Monikers: []bundles.MonikerData{{Kind: "import", Scheme: "gomod", Identifier: "pad", PackageInformationID: "1234"}},
SkipResults: 5,
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump2, Path: "sub2/baz.go", Range: testRange3},
{Dump: testDump2, Path: "sub2/bonk.go", Range: testRange4},
{Dump: testDump2, Path: "sub2/quux.go", Range: testRange5},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "same-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "pad",
Name: "leftpad",
Version: "0.1.0",
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
})
}
func TestHandleSameRepoCursor(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient1 := mocks.NewMockBundleClient()
mockBundleClient2 := mocks.NewMockBundleClient()
mockBundleClient3 := mocks.NewMockBundleClient()
mockReferencePager := mocks.NewMockReferencePager()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1, 50: testDump2, 51: testDump3, 52: testDump4})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{50: mockBundleClient1, 51: mockBundleClient2, 52: mockBundleClient3})
setMockDBSameRepoPager(t, mockDB, 100, testCommit, "gomod", "leftpad", "0.1.0", 5, 3, mockReferencePager)
setMockReferencePagerPageFromOffset(t, mockReferencePager, 0, []db.Reference{
{DumpID: 50, Filter: readTestFilter(t, "normal", "1")},
{DumpID: 51, Filter: readTestFilter(t, "normal", "1")},
{DumpID: 52, Filter: readTestFilter(t, "normal", "1")},
})
t.Run("partial results", func(t *testing.T) {
setMockBundleClientMonikerResults(t, mockBundleClient1, "reference", "gomod", "bar", 0, 5, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
{DumpID: 51, Path: "baz.go", Range: testRange3},
{DumpID: 51, Path: "bonk.go", Range: testRange4},
{DumpID: 52, Path: "quux.go", Range: testRange5},
}, 10)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
remoteDumpLimit: 5,
limit: 5,
}
references, newCursor, hasNewCursor, err := rpr.resolvePage(context.Background(), Cursor{
Phase: "same-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump2, Path: "sub2/baz.go", Range: testRange3},
{Dump: testDump2, Path: "sub2/bonk.go", Range: testRange4},
{Dump: testDump2, Path: "sub2/quux.go", Range: testRange5},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "same-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{50, 51, 52},
TotalDumpsWhenBatching: 3,
SkipDumpsWhenBatching: 3,
SkipDumpsInBatch: 0,
SkipResultsInDump: 5,
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
})
t.Run("multiple pages", func(t *testing.T) {
setMockBundleClientMonikerResults(t, mockBundleClient1, "reference", "gomod", "bar", 0, 5, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
}, 2)
setMockBundleClientMonikerResults(t, mockBundleClient2, "reference", "gomod", "bar", 0, 3, []bundles.Location{
{DumpID: 51, Path: "baz.go", Range: testRange3},
{DumpID: 51, Path: "bonk.go", Range: testRange4},
}, 2)
setMockBundleClientMonikerResults(t, mockBundleClient3, "reference", "gomod", "bar", 0, 1, []bundles.Location{
{DumpID: 52, Path: "quux.go", Range: testRange5},
}, 1)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
remoteDumpLimit: 5,
limit: 5,
}
references, newCursor, hasNewCursor, err := rpr.resolvePage(context.Background(), Cursor{
Phase: "same-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump3, Path: "sub3/baz.go", Range: testRange3},
{Dump: testDump3, Path: "sub3/bonk.go", Range: testRange4},
{Dump: testDump4, Path: "sub4/quux.go", Range: testRange5},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "remote-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
})
}
func TestHandleSameRepoCursorMultipleDumpBatches(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
mockReferencePager := mocks.NewMockReferencePager()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1, 50: testDump2, 51: testDump3, 52: testDump4})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{51: mockBundleClient})
setMockDBSameRepoPager(t, mockDB, 100, testCommit, "gomod", "leftpad", "0.1.0", 2, 3, mockReferencePager)
setMockReferencePagerPageFromOffset(t, mockReferencePager, 0, []db.Reference{
{DumpID: 50, Filter: readTestFilter(t, "normal", "1")},
{DumpID: 51, Filter: readTestFilter(t, "normal", "1")},
})
setMockBundleClientMonikerResults(t, mockBundleClient, "reference", "gomod", "bar", 0, 5, []bundles.Location{
{DumpID: 51, Path: "baz.go", Range: testRange3},
{DumpID: 51, Path: "bonk.go", Range: testRange4},
}, 2)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
remoteDumpLimit: 2,
limit: 5,
}
references, newCursor, hasNewCursor, err := rpr.dispatchCursorHandler(context.Background(), Cursor{
Phase: "same-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{50, 51},
TotalDumpsWhenBatching: 3,
SkipDumpsWhenBatching: 2,
SkipDumpsInBatch: 1,
SkipResultsInDump: 0,
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump3, Path: "sub3/baz.go", Range: testRange3},
{Dump: testDump3, Path: "sub3/bonk.go", Range: testRange4},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "same-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{},
TotalDumpsWhenBatching: 3,
SkipDumpsWhenBatching: 2,
SkipDumpsInBatch: 0,
SkipResultsInDump: 0,
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
}
//
//
//
//
func TestHandleRemoteRepoCursor(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient1 := mocks.NewMockBundleClient()
mockBundleClient2 := mocks.NewMockBundleClient()
mockBundleClient3 := mocks.NewMockBundleClient()
mockReferencePager := mocks.NewMockReferencePager()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1, 50: testDump2, 51: testDump3, 52: testDump4})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{50: mockBundleClient1, 51: mockBundleClient2, 52: mockBundleClient3})
setMockDBPackageReferencePager(t, mockDB, "gomod", "leftpad", "0.1.0", 100, 5, 3, mockReferencePager)
setMockReferencePagerPageFromOffset(t, mockReferencePager, 0, []db.Reference{
{DumpID: 50, Filter: readTestFilter(t, "normal", "1")},
{DumpID: 51, Filter: readTestFilter(t, "normal", "1")},
{DumpID: 52, Filter: readTestFilter(t, "normal", "1")},
})
t.Run("partial results", func(t *testing.T) {
setMockBundleClientMonikerResults(t, mockBundleClient1, "reference", "gomod", "bar", 0, 5, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
{DumpID: 51, Path: "baz.go", Range: testRange3},
{DumpID: 51, Path: "bonk.go", Range: testRange4},
{DumpID: 52, Path: "quux.go", Range: testRange5},
}, 10)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
remoteDumpLimit: 5,
limit: 5,
}
references, newCursor, hasNewCursor, err := rpr.resolvePage(context.Background(), Cursor{
Phase: "remote-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump2, Path: "sub2/baz.go", Range: testRange3},
{Dump: testDump2, Path: "sub2/bonk.go", Range: testRange4},
{Dump: testDump2, Path: "sub2/quux.go", Range: testRange5},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "remote-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{50, 51, 52},
TotalDumpsWhenBatching: 3,
SkipDumpsWhenBatching: 3,
SkipDumpsInBatch: 0,
SkipResultsInDump: 5,
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
})
t.Run("multiple pages", func(t *testing.T) {
setMockBundleClientMonikerResults(t, mockBundleClient1, "reference", "gomod", "bar", 0, 5, []bundles.Location{
{DumpID: 50, Path: "foo.go", Range: testRange1},
{DumpID: 50, Path: "bar.go", Range: testRange2},
}, 2)
setMockBundleClientMonikerResults(t, mockBundleClient2, "reference", "gomod", "bar", 0, 3, []bundles.Location{
{DumpID: 51, Path: "baz.go", Range: testRange3},
{DumpID: 51, Path: "bonk.go", Range: testRange4},
}, 2)
setMockBundleClientMonikerResults(t, mockBundleClient3, "reference", "gomod", "bar", 0, 1, []bundles.Location{
{DumpID: 52, Path: "quux.go", Range: testRange5},
}, 1)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
remoteDumpLimit: 5,
limit: 5,
}
references, _, hasNewCursor, err := rpr.resolvePage(context.Background(), Cursor{
Phase: "remote-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump2, Path: "sub2/foo.go", Range: testRange1},
{Dump: testDump2, Path: "sub2/bar.go", Range: testRange2},
{Dump: testDump3, Path: "sub3/baz.go", Range: testRange3},
{Dump: testDump3, Path: "sub3/bonk.go", Range: testRange4},
{Dump: testDump4, Path: "sub4/quux.go", Range: testRange5},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
if hasNewCursor {
t.Errorf("unexpected new cursor")
}
})
}
func TestHandleRemoteRepoCursorMultipleDumpBatches(t *testing.T) {
mockDB := mocks.NewMockDB()
mockBundleManagerClient := mocks.NewMockBundleManagerClient()
mockBundleClient := mocks.NewMockBundleClient()
mockReferencePager := mocks.NewMockReferencePager()
setMockDBGetDumpByID(t, mockDB, map[int]db.Dump{42: testDump1, 50: testDump2, 51: testDump3, 52: testDump4})
setMockBundleManagerClientBundleClient(t, mockBundleManagerClient, map[int]bundles.BundleClient{51: mockBundleClient})
setMockDBPackageReferencePager(t, mockDB, "gomod", "leftpad", "0.1.0", 100, 2, 3, mockReferencePager)
setMockReferencePagerPageFromOffset(t, mockReferencePager, 0, []db.Reference{
{DumpID: 50, Filter: readTestFilter(t, "normal", "1")},
{DumpID: 51, Filter: readTestFilter(t, "normal", "1")},
})
setMockBundleClientMonikerResults(t, mockBundleClient, "reference", "gomod", "bar", 0, 5, []bundles.Location{
{DumpID: 51, Path: "baz.go", Range: testRange3},
{DumpID: 51, Path: "bonk.go", Range: testRange4},
}, 2)
rpr := &ReferencePageResolver{
db: mockDB,
bundleManagerClient: mockBundleManagerClient,
repositoryID: 100,
commit: testCommit,
remoteDumpLimit: 2,
limit: 5,
}
references, newCursor, hasNewCursor, err := rpr.dispatchCursorHandler(context.Background(), Cursor{
Phase: "remote-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{50, 51},
TotalDumpsWhenBatching: 3,
SkipDumpsWhenBatching: 2,
SkipDumpsInBatch: 1,
SkipResultsInDump: 0,
})
if err != nil {
t.Fatalf("expected error getting references: %s", err)
}
expectedReferences := []ResolvedLocation{
{Dump: testDump3, Path: "sub3/baz.go", Range: testRange3},
{Dump: testDump3, Path: "sub3/bonk.go", Range: testRange4},
}
if diff := cmp.Diff(expectedReferences, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
expectedNewCursor := Cursor{
Phase: "remote-repo",
DumpID: 42,
Scheme: "gomod",
Identifier: "bar",
Name: "leftpad",
Version: "0.1.0",
DumpIDs: []int{},
TotalDumpsWhenBatching: 3,
SkipDumpsWhenBatching: 2,
SkipDumpsInBatch: 0,
SkipResultsInDump: 0,
}
if !hasNewCursor {
t.Errorf("expected new cursor")
} else if diff := cmp.Diff(expectedNewCursor, newCursor); diff != "" {
t.Errorf("unexpected new cursor (-want +got):\n%s", diff)
}
}
func TestApplyBloomFilter(t *testing.T) {
references := []db.Reference{
{DumpID: 1, Filter: readTestFilter(t, "normal", "1")}, // bar
{DumpID: 2, Filter: readTestFilter(t, "normal", "2")}, // no bar
{DumpID: 3, Filter: readTestFilter(t, "normal", "3")}, // bar
{DumpID: 4, Filter: readTestFilter(t, "normal", "4")}, // bar
{DumpID: 5, Filter: readTestFilter(t, "normal", "5")}, // no bar
{DumpID: 6, Filter: readTestFilter(t, "normal", "6")}, // bar
{DumpID: 7, Filter: readTestFilter(t, "normal", "7")}, // bar
{DumpID: 8, Filter: readTestFilter(t, "normal", "8")}, // no bar
{DumpID: 9, Filter: readTestFilter(t, "normal", "9")}, // bar
{DumpID: 10, Filter: readTestFilter(t, "normal", "10")}, // bar
{DumpID: 11, Filter: readTestFilter(t, "normal", "11")}, // no bar
{DumpID: 12, Filter: readTestFilter(t, "normal", "12")}, // bar
}
testCases := []struct {
limit int
expectedScanned int
expectedDumpIDs []int
}{
{1, 1, []int{1}},
{2, 3, []int{1, 3}},
{6, 9, []int{1, 3, 4, 6, 7, 9}},
{7, 10, []int{1, 3, 4, 6, 7, 9, 10}},
{8, 12, []int{1, 3, 4, 6, 7, 9, 10, 12}},
{12, 12, []int{1, 3, 4, 6, 7, 9, 10, 12}},
}
for _, testCase := range testCases {
name := fmt.Sprintf("limit=%d", testCase.limit)
t.Run(name, func(t *testing.T) {
filteredReferences, scanned := applyBloomFilter(references, "bar", testCase.limit)
if scanned != testCase.expectedScanned {
t.Errorf("unexpected scanned. want=%d have=%d", testCase.expectedScanned, scanned)
}
var filteredDumpIDs []int
for _, reference := range filteredReferences {
filteredDumpIDs = append(filteredDumpIDs, reference.DumpID)
}
if diff := cmp.Diff(testCase.expectedDumpIDs, filteredDumpIDs); diff != "" {
t.Errorf("unexpected filtered references ids (-want +got):\n%s", diff)
}
})
}
}

View File

@ -0,0 +1,191 @@
package bundles
import (
"context"
"encoding/json"
"fmt"
"net/http"
"net/url"
)
// BundleClient is the interface to the precise-code-intel-bundle-manager service scoped to a particular dump.
type BundleClient interface {
// Exists determines if the given path exists in the dump.
Exists(ctx context.Context, path string) (bool, error)
// Definitions retrieves a list of definition locations for the symbol under the given location.
Definitions(ctx context.Context, path string, line, character int) ([]Location, error)
// Definitions retrieves a list of reference locations for the symbol under the given location.
References(ctx context.Context, path string, line, character int) ([]Location, error)
// Hover retrieves the hover text for the symbol under the given location.
Hover(ctx context.Context, path string, line, character int) (string, Range, bool, error)
// MonikersByPosition retrieves a list of monikers attached to the symbol under the given location. There may
// be multiple ranges enclosing this point. The returned monikers are partitioned such that inner ranges occur
// first in the result, and outer ranges occur later.
MonikersByPosition(ctx context.Context, path string, line, character int) ([][]MonikerData, error)
// MonikerResults retrieves a page of locations attached to a moniker and a total count of such locations.
MonikerResults(ctx context.Context, modelType, scheme, identifier string, skip, take int) ([]Location, int, error)
// PackageInformation retrieves package information data by its identifier.
PackageInformation(ctx context.Context, path, packageInformationID string) (PackageInformationData, error)
}
type bundleClientImpl struct {
bundleManagerURL string
bundleID int
}
var _ BundleClient = &bundleClientImpl{}
// Exists determines if the given path exists in the dump.
func (c *bundleClientImpl) Exists(ctx context.Context, path string) (exists bool, err error) {
err = c.request(ctx, "exists", map[string]interface{}{"path": path}, &exists)
return exists, err
}
// Definitions retrieves a list of definition locations for the symbol under the given location.
func (c *bundleClientImpl) Definitions(ctx context.Context, path string, line, character int) (locations []Location, err error) {
args := map[string]interface{}{
"path": path,
"line": line,
"character": character,
}
err = c.request(ctx, "definitions", args, &locations)
c.addBundleIDToLocations(locations)
return locations, err
}
// Definitions retrieves a list of reference locations for the symbol under the given location.
func (c *bundleClientImpl) References(ctx context.Context, path string, line, character int) (locations []Location, err error) {
args := map[string]interface{}{
"path": path,
"line": line,
"character": character,
}
err = c.request(ctx, "references", args, &locations)
c.addBundleIDToLocations(locations)
return locations, err
}
// Hover retrieves the hover text for the symbol under the given location.
func (c *bundleClientImpl) Hover(ctx context.Context, path string, line, character int) (string, Range, bool, error) {
args := map[string]interface{}{
"path": path,
"line": line,
"character": character,
}
var target *json.RawMessage
if err := c.request(ctx, "hover", args, &target); err != nil {
return "", Range{}, false, err
}
if target == nil {
return "", Range{}, false, nil
}
payload := struct {
Text string `json:"text"`
Range Range `json:"range"`
}{}
if err := json.Unmarshal(*target, &payload); err != nil {
return "", Range{}, false, err
}
return payload.Text, payload.Range, true, nil
}
// MonikersByPosition retrieves a list of monikers attached to the symbol under the given location. There may
// be multiple ranges enclosing this point. The returned monikers are partitioned such that inner ranges occur
// first in the result, and outer ranges occur later.
func (c *bundleClientImpl) MonikersByPosition(ctx context.Context, path string, line, character int) (target [][]MonikerData, err error) {
args := map[string]interface{}{
"path": path,
"line": line,
"character": character,
}
err = c.request(ctx, "monikersByPosition", args, &target)
return target, err
}
// MonikerResults retrieves a page of locations attached to a moniker and a total count of such locations.
func (c *bundleClientImpl) MonikerResults(ctx context.Context, modelType, scheme, identifier string, skip, take int) (locations []Location, count int, err error) {
args := map[string]interface{}{
"modelType": modelType,
"scheme": scheme,
"identifier": identifier,
}
if skip != 0 {
args["skip"] = skip
}
if take != 0 {
args["take"] = take
}
target := struct {
Locations []Location `json:"locations"`
Count int `json:"count"`
}{}
err = c.request(ctx, "monikerResults", args, &target)
locations = target.Locations
count = target.Count
c.addBundleIDToLocations(locations)
return locations, count, err
}
// PackageInformation retrieves package information data by its identifier.
func (c *bundleClientImpl) PackageInformation(ctx context.Context, path, packageInformationID string) (target PackageInformationData, err error) {
args := map[string]interface{}{
"path": path,
"packageInformationId": packageInformationID,
}
err = c.request(ctx, "packageInformation", args, &target)
return target, err
}
func (c *bundleClientImpl) request(ctx context.Context, path string, qs map[string]interface{}, target interface{}) error {
values := url.Values{}
for k, v := range qs {
values[k] = []string{fmt.Sprintf("%v", v)}
}
url, err := url.Parse(fmt.Sprintf("%s/dbs/%d/%s", c.bundleManagerURL, c.bundleID, path))
if err != nil {
return err
}
url.RawQuery = values.Encode()
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return err
}
// TODO - use context
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status %d", resp.StatusCode)
}
return json.NewDecoder(resp.Body).Decode(&target)
}
func (c *bundleClientImpl) addBundleIDToLocations(locations []Location) {
for i := range locations {
locations[i].DumpID = c.bundleID
}
}

View File

@ -0,0 +1,292 @@
package bundles
import (
"context"
"net/http"
"net/http/httptest"
"net/url"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestExists(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/exists", map[string]string{
"path": "main.go",
})
_, _ = w.Write([]byte(`true`))
}))
defer ts.Close()
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
exists, err := client.Exists(context.Background(), "main.go")
if err != nil {
t.Fatalf("unexpected error querying exists: %s", err)
} else if !exists {
t.Errorf("unexpected path to exist")
}
}
func TestExistsBadResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer ts.Close()
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
_, err := client.Exists(context.Background(), "main.go")
if err == nil {
t.Fatalf("unexpected nil error querying exists")
}
}
func TestDefinitions(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/definitions", map[string]string{
"path": "main.go",
"line": "10",
"character": "20",
})
_, _ = w.Write([]byte(`[
{"path": "foo.go", "range": {"start": {"line": 1, "character": 2}, "end": {"line": 3, "character": 4}}},
{"path": "bar.go", "range": {"start": {"line": 5, "character": 6}, "end": {"line": 7, "character": 8}}}
]`))
}))
defer ts.Close()
expected := []Location{
{DumpID: 42, Path: "foo.go", Range: Range{Start: Position{1, 2}, End: Position{3, 4}}},
{DumpID: 42, Path: "bar.go", Range: Range{Start: Position{5, 6}, End: Position{7, 8}}},
}
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
definitions, err := client.Definitions(context.Background(), "main.go", 10, 20)
if err != nil {
t.Fatalf("unexpected error querying definitions: %s", err)
} else if diff := cmp.Diff(expected, definitions); diff != "" {
t.Errorf("unexpected definitions (-want +got):\n%s", diff)
}
}
func TestReferences(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/references", map[string]string{
"path": "main.go",
"line": "10",
"character": "20",
})
_, _ = w.Write([]byte(`[
{"path": "foo.go", "range": {"start": {"line": 1, "character": 2}, "end": {"line": 3, "character": 4}}},
{"path": "bar.go", "range": {"start": {"line": 5, "character": 6}, "end": {"line": 7, "character": 8}}}
]`))
}))
defer ts.Close()
expected := []Location{
{DumpID: 42, Path: "foo.go", Range: Range{Start: Position{1, 2}, End: Position{3, 4}}},
{DumpID: 42, Path: "bar.go", Range: Range{Start: Position{5, 6}, End: Position{7, 8}}},
}
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
references, err := client.References(context.Background(), "main.go", 10, 20)
if err != nil {
t.Fatalf("unexpected error querying references: %s", err)
} else if diff := cmp.Diff(expected, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
}
func TestHover(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/hover", map[string]string{
"path": "main.go",
"line": "10",
"character": "20",
})
_, _ = w.Write([]byte(`{
"text": "starts the program",
"range": {"start": {"line": 1, "character": 2}, "end": {"line": 3, "character": 4}}
}`))
}))
defer ts.Close()
expectedText := "starts the program"
expectedRange := Range{
Start: Position{1, 2},
End: Position{3, 4},
}
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
text, r, exists, err := client.Hover(context.Background(), "main.go", 10, 20)
if err != nil {
t.Fatalf("unexpected error querying hover: %s", err)
}
if !exists {
t.Errorf("expected hover text to exist")
} else {
if text != expectedText {
t.Errorf("unexpected hover text. want=%v have=%v", expectedText, text)
} else if diff := cmp.Diff(expectedRange, r); diff != "" {
t.Errorf("unexpected hover range (-want +got):\n%s", diff)
}
}
}
func TestHoverNull(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/hover", map[string]string{
"path": "main.go",
"line": "10",
"character": "20",
})
_, _ = w.Write([]byte(`null`))
}))
defer ts.Close()
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
_, _, exists, err := client.Hover(context.Background(), "main.go", 10, 20)
if err != nil {
t.Fatalf("unexpected error querying hover: %s", err)
} else if exists {
t.Errorf("unexpected hover text")
}
}
func TestMonikersByPosition(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/monikersByPosition", map[string]string{
"path": "main.go",
"line": "10",
"character": "20",
})
_, _ = w.Write([]byte(`[
[{
"kind": "import",
"scheme": "gomod",
"identifier": "pad1"
}],
[{
"kind": "import",
"scheme": "gomod",
"identifier": "pad2",
"packageInformationID": "123"
}, {
"kind": "export",
"scheme": "gomod",
"identifier": "pad2",
"packageInformationID": "123"
}]
]`))
}))
defer ts.Close()
expected := [][]MonikerData{
{
{Kind: "import", Scheme: "gomod", Identifier: "pad1"},
},
{
{Kind: "import", Scheme: "gomod", Identifier: "pad2", PackageInformationID: "123"},
{Kind: "export", Scheme: "gomod", Identifier: "pad2", PackageInformationID: "123"},
},
}
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
monikers, err := client.MonikersByPosition(context.Background(), "main.go", 10, 20)
if err != nil {
t.Fatalf("unexpected error querying monikers by position: %s", err)
} else if diff := cmp.Diff(expected, monikers); diff != "" {
t.Errorf("unexpected moniker data (-want +got):\n%s", diff)
}
}
func TestMonikerResults(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/monikerResults", map[string]string{
"modelType": "definitions",
"scheme": "gomod",
"identifier": "leftpad",
"take": "25",
})
_, _ = w.Write([]byte(`{
"locations": [
{"path": "foo.go", "range": {"start": {"line": 1, "character": 2}, "end": {"line": 3, "character": 4}}},
{"path": "bar.go", "range": {"start": {"line": 5, "character": 6}, "end": {"line": 7, "character": 8}}}
],
"count": 5
}`))
}))
defer ts.Close()
expected := []Location{
{DumpID: 42, Path: "foo.go", Range: Range{Start: Position{1, 2}, End: Position{3, 4}}},
{DumpID: 42, Path: "bar.go", Range: Range{Start: Position{5, 6}, End: Position{7, 8}}},
}
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
locations, count, err := client.MonikerResults(context.Background(), "definitions", "gomod", "leftpad", 0, 25)
if err != nil {
t.Fatalf("unexpected error querying moniker results: %s", err)
}
if count != 5 {
t.Errorf("unexpected count. want=%v have=%v", 2, count)
}
if diff := cmp.Diff(expected, locations); diff != "" {
t.Errorf("unexpected locations (-want +got):\n%s", diff)
}
}
func TestPackageInformation(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assertRequest(t, r, "GET", "/dbs/42/packageInformation", map[string]string{
"path": "main.go",
"packageInformationId": "123",
})
_, _ = w.Write([]byte(`{"name": "leftpad", "version": "0.1.0"}`))
}))
defer ts.Close()
expected := PackageInformationData{
Name: "leftpad",
Version: "0.1.0",
}
client := &bundleClientImpl{bundleManagerURL: ts.URL, bundleID: 42}
packageInformation, err := client.PackageInformation(context.Background(), "main.go", "123")
if err != nil {
t.Fatalf("unexpected error querying package information: %s", err)
} else if diff := cmp.Diff(expected, packageInformation); diff != "" {
t.Errorf("unexpected package information (-want +got):\n%s", diff)
}
}
func assertRequest(t *testing.T, r *http.Request, expectedMethod, expectedPath string, expectedQuery map[string]string) {
if r.Method != expectedMethod {
t.Errorf("unexpected method. want=%s have=%s", expectedMethod, r.Method)
}
if r.URL.Path != expectedPath {
t.Errorf("unexpected path. want=%s have=%s", expectedPath, r.URL.Path)
}
if !compareQuery(r.URL.Query(), expectedQuery) {
t.Errorf("unexpected query. want=%v have=%s", expectedQuery, r.URL.Query().Encode())
}
}
func compareQuery(query url.Values, expected map[string]string) bool {
values := map[string]string{}
for k, v := range query {
values[k] = v[0]
}
return cmp.Diff(expected, values) == ""
}

View File

@ -0,0 +1,61 @@
package bundles
import (
"context"
"fmt"
"io"
"net/http"
"net/url"
)
// BundleManagerClient is the interface to the precise-code-intel-bundle-manager service.
type BundleManagerClient interface {
// BundleClient creates a client that can answer intelligence queries for a single dump.
BundleClient(bundleID int) BundleClient
// SendUpload transfers a raw LSIF upload to the bundle manager to be stored on disk.
SendUpload(ctx context.Context, bundleID int, r io.Reader) error
}
type bundleManagerClientImpl struct {
bundleManagerURL string
}
var _ BundleManagerClient = &bundleManagerClientImpl{}
func New(bundleManagerURL string) BundleManagerClient {
return &bundleManagerClientImpl{bundleManagerURL: bundleManagerURL}
}
// BundleClient creates a client that can answer intelligence queries for a single dump.
func (c *bundleManagerClientImpl) BundleClient(bundleID int) BundleClient {
return &bundleClientImpl{
bundleManagerURL: c.bundleManagerURL,
bundleID: bundleID,
}
}
// SendUpload transfers a raw LSIF upload to the bundle manager to be stored on disk.
func (c *bundleManagerClientImpl) SendUpload(ctx context.Context, bundleID int, r io.Reader) error {
url, err := url.Parse(fmt.Sprintf("%s/uploads/%d", c.bundleManagerURL, bundleID))
if err != nil {
return err
}
req, err := http.NewRequest("POST", url.String(), r)
if err != nil {
return err
}
// TODO - use context
resp, err := http.DefaultClient.Do(req)
if err != nil {
return err
}
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status %d", resp.StatusCode)
}
return nil
}

View File

@ -0,0 +1,49 @@
package bundles
import (
"bytes"
"context"
"io/ioutil"
"net/http"
"net/http/httptest"
"testing"
"github.com/google/go-cmp/cmp"
)
func TestSendUpload(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.Method != "POST" {
t.Errorf("unexpected method. want=%s have=%s", "POST", r.Method)
}
if r.URL.Path != "/uploads/42" {
t.Errorf("unexpected method. want=%s have=%s", "/uploads/42", r.URL.Path)
}
if content, err := ioutil.ReadAll(r.Body); err != nil {
t.Fatalf("unexpected error reading payload: %s", err)
} else if diff := cmp.Diff([]byte("payload\n"), content); diff != "" {
t.Errorf("unexpected request payload (-want +got):\n%s", diff)
}
}))
defer ts.Close()
client := &bundleManagerClientImpl{bundleManagerURL: ts.URL}
err := client.SendUpload(context.Background(), 42, bytes.NewReader([]byte("payload\n")))
if err != nil {
t.Fatalf("unexpected error sending upload: %s", err)
}
}
func TestSendUploadBadResponse(t *testing.T) {
ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusInternalServerError)
}))
defer ts.Close()
client := &bundleManagerClientImpl{bundleManagerURL: ts.URL}
err := client.SendUpload(context.Background(), 42, bytes.NewReader([]byte("payload\n")))
if err == nil {
t.Fatalf("unexpected nil error sending upload")
}
}

View File

@ -0,0 +1,34 @@
package bundles
// Location is an LSP-like location scoped to a dump.
type Location struct {
DumpID int `json:"dumpId"`
Path string `json:"path"`
Range Range `json:"range"`
}
// Range is an inclusive bounds within a file.
type Range struct {
Start Position `json:"start"`
End Position `json:"end"`
}
// Position is a unique position within a file.
type Position struct {
Line int `json:"line"`
Character int `json:"character"`
}
// MonikerData describes a moniker within a dump.
type MonikerData struct {
Kind string `json:"kind"`
Scheme string `json:"scheme"`
Identifier string `json:"identifier"`
PackageInformationID string `json:"packageInformationID"`
}
// PackageInformationData describes a package within a package manager system.
type PackageInformationData struct {
Name string `json:"name"`
Version string `json:"version"`
}

View File

@ -0,0 +1,88 @@
package db
import (
"fmt"
"github.com/keegancsmith/sqlf"
)
// MaxTraversalLimit is the maximum size of the CTE result set when traversing commit ancestor
// and descendants. This value affects how stale an upload can be while still serving code
// intelligence for a nearby commit.
const MaxTraversalLimit = 100
// visibleIDsCTE defines a CTE `visible_ids` that returns an ordered list of dump identifiers
// given a previously defined CTE `lineage`. The dump identifiers returned exclude the dumps
// shadowed by another dump: one dump shadows another when it has the same indexer value, has
// a root value enclosing the other, and when it is at a commit closer to the target commit
// value.
var visibleIDsCTE = `
-- Limit the visibility to the maximum traversal depth and approximate
-- each commit's depth by its row number.
limited_lineage AS (
SELECT a.*, row_number() OVER() as n from lineage a LIMIT ` + fmt.Sprintf("%d", MaxTraversalLimit) + `
),
-- Correlate commits to dumps and filter out commits without LSIF data
lineage_with_dumps AS (
SELECT a.*, d.root, d.indexer, d.id as dump_id FROM limited_lineage a
JOIN lsif_dumps d ON d.repository_id = a.repository_id AND d."commit" = a."commit"
),
visible_ids AS (
-- Remove dumps where there exists another visible dump of smaller depth with an
-- overlapping root from the same indexer. Such dumps would not be returned with
-- a closest commit query so we don't want to return results for them in global
-- find-reference queries either.
SELECT DISTINCT t1.dump_id as id FROM lineage_with_dumps t1 WHERE NOT EXISTS (
SELECT 1 FROM lineage_with_dumps t2
WHERE t2.n < t1.n AND t1.indexer = t2.indexer AND (
t2.root LIKE (t1.root || '%%%%') OR
t1.root LIKE (t2.root || '%%%%')
)
)
)
`
// withAncestorLineage prepares the given query by defining the CTE `visible_ids`. The set of
// candidate dumps are chosen by tracing the commit graph backwards (towards ancestors).
func withAncestorLineage(query string, repositoryID int, commit string, args ...interface{}) *sqlf.Query {
queryWithCTEs := `
WITH
RECURSIVE lineage(id, "commit", parent, repository_id) AS (
SELECT c.* FROM lsif_commits c WHERE c.repository_id = %s AND c."commit" = %s
UNION
SELECT c.* FROM lineage a JOIN lsif_commits c ON a.repository_id = c.repository_id AND a.parent = c."commit"
), ` + visibleIDsCTE + " " + query
return sqlf.Sprintf(queryWithCTEs, append([]interface{}{repositoryID, commit}, args...)...)
}
// withBidirectionalLineage prepares the given query by defining the CTE `visible_ids`. The set of
// candidatedumps are chosen by tracing the commit graph both forwards and backwards. The resulting
// order of dumps are interleaved such that two dumps with a similar "distance" are near eachother
// in the result set. This prevents the resulting dumps from preferring one direction over the other.
func withBidirectionalLineage(query string, repositoryID int, commit string, args ...interface{}) *sqlf.Query {
queryWithCTEs := `
WITH
RECURSIVE lineage(id, "commit", parent_commit, repository_id, direction) AS (
SELECT l.* FROM (
-- seed recursive set with commit looking in ancestor direction
SELECT c.*, 'A' FROM lsif_commits c WHERE c.repository_id = %s AND c."commit" = %s
UNION
-- seed recursive set with commit looking in descendant direction
SELECT c.*, 'D' FROM lsif_commits c WHERE c.repository_id = %s AND c."commit" = %s
) l
UNION
SELECT * FROM (
WITH l_inner AS (SELECT * FROM lineage)
-- get next ancestors (multiple parents for merge commits)
SELECT c.*, 'A' FROM l_inner l JOIN lsif_commits c ON l.direction = 'A' AND c.repository_id = l.repository_id AND c."commit" = l.parent_commit
UNION
-- get next descendants
SELECT c.*, 'D' FROM l_inner l JOIN lsif_commits c ON l.direction = 'D' and c.repository_id = l.repository_id AND c.parent_commit = l."commit"
) subquery
), ` + visibleIDsCTE + " " + query
return sqlf.Sprintf(queryWithCTEs, append([]interface{}{repositoryID, commit, repositoryID, commit}, args...)...)
}

View File

@ -0,0 +1,99 @@
package db
import (
"context"
"database/sql"
"time"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/sourcegraph/internal/db/dbutil"
)
// DB is the interface to Postgres that deals with LSIF-specific tables.
//
// - lsif_commits
// - lsif_packages
// - lsif_references
// - lsif_uploads
//
// These tables are kept separate from the remainder of Sourcegraph tablespace.
type DB interface {
// GetUploadByID returns an upload by its identifier and boolean flag indicating its existence.
GetUploadByID(ctx context.Context, id int) (Upload, bool, error)
// GetUploadsByRepo returns a list of uploads for a particular repo and the total count of records matching the given conditions.
GetUploadsByRepo(ctx context.Context, repositoryID int, state, term string, visibleAtTip bool, limit, offset int) ([]Upload, int, error)
// Enqueue inserts a new upload with a "queued" state, returning its identifier and a TxCloser that must be closed to commit the transaction.
Enqueue(ctx context.Context, commit, root, tracingContext string, repositoryID int, indexerName string) (int, TxCloser, error)
// GetStates returns the states for the uploads with the given identifiers.
GetStates(ctx context.Context, ids []int) (map[int]string, error)
// DeleteUploadByID deletes an upload by its identifier. If the upload was visible at the tip of its repository's default branch,
// the visibility of all uploads for that repository are recalculated. The given function is expected to return the newest commit
// on the default branch when invoked.
DeleteUploadByID(ctx context.Context, id int, getTipCommit func(repositoryID int) (string, error)) (bool, error)
// ResetStalled moves all unlocked uploads processing for more than `StalledUploadMaxAge` back to the queued state.
// This method returns a list of updated upload identifiers.
ResetStalled(ctx context.Context, now time.Time) ([]int, error)
// GetDumpByID returns a dump by its identifier and boolean flag indicating its existence.
GetDumpByID(ctx context.Context, id int) (Dump, bool, error)
// FindClosestDumps returns the set of dumps that can most accurately answer queries for the given repository, commit, and file.
FindClosestDumps(ctx context.Context, repositoryID int, commit, file string) ([]Dump, error)
// DeleteOldestDump deletes the oldest dump that is not currently visible at the tip of its repository's default branch.
// This method returns the deleted dump's identifier and a flag indicating its (previous) existence.
DeleteOldestDump(ctx context.Context) (int, bool, error)
// GetPackage returns the dump that provides the package with the given scheme, name, and version and a flag indicating its existence.
GetPackage(ctx context.Context, scheme, name, version string) (Dump, bool, error)
// SameRepoPager returns a ReferencePager for dumps that belong to the given repository and commit and reference the package with the
// given scheme, name, and version.
SameRepoPager(ctx context.Context, repositoryID int, commit, scheme, name, version string, limit int) (int, ReferencePager, error)
// PackageReferencePager returns a ReferencePager for dumps that belong to a remote repository (distinct from the given repository id)
// and reference the package with the given scheme, name, and version. All resulting dumps are visible at the tip of their repository's
// default branch.
PackageReferencePager(ctx context.Context, scheme, name, version string, repositoryID, limit int) (int, ReferencePager, error)
}
type dbImpl struct {
db *sql.DB
}
var _ DB = &dbImpl{}
// New creates a new instance of DB connected to the given Postgres DSN.
func New(postgresDSN string) (DB, error) {
db, err := dbutil.NewDB(postgresDSN, "precise-code-intel-api-server")
if err != nil {
return nil, err
}
return &dbImpl{db: db}, nil
}
// query performs Query on the underlying connection.
func (db *dbImpl) query(ctx context.Context, query *sqlf.Query) (*sql.Rows, error) {
return db.db.QueryContext(ctx, query.Query(sqlf.PostgresBindVar), query.Args()...)
}
// queryRow performs QueryRow on the underlying connection.
func (db *dbImpl) queryRow(ctx context.Context, query *sqlf.Query) *sql.Row {
return db.db.QueryRowContext(ctx, query.Query(sqlf.PostgresBindVar), query.Args()...)
}
// beginTx performs BeginTx on the underlying connection and wraps the transaction.
func (db *dbImpl) beginTx(ctx context.Context) (*transactionWrapper, error) {
tx, err := db.db.BeginTx(ctx, nil)
if err != nil {
return nil, err
}
return &transactionWrapper{tx}, nil
}

View File

@ -0,0 +1,7 @@
package db
import "github.com/sourcegraph/sourcegraph/internal/db/dbtesting"
func init() {
dbtesting.DBNameSuffix = "precise-code-intel-api-server"
}

View File

@ -0,0 +1,168 @@
package db
import (
"context"
"time"
"github.com/keegancsmith/sqlf"
)
// Dump is a subset of the lsif_uploads table (queried via the lsif_dumps view) and stores
// only processed records.
type Dump struct {
ID int `json:"id"`
Commit string `json:"commit"`
Root string `json:"root"`
VisibleAtTip bool `json:"visibleAtTip"`
UploadedAt time.Time `json:"uploadedAt"`
State string `json:"state"`
FailureSummary *string `json:"failureSummary"`
FailureStacktrace *string `json:"failureStacktrace"`
StartedAt *time.Time `json:"startedAt"`
FinishedAt *time.Time `json:"finishedAt"`
TracingContext string `json:"tracingContext"`
RepositoryID int `json:"repositoryId"`
Indexer string `json:"indexer"`
}
// GetDumpByID returns a dump by its identifier and boolean flag indicating its existence.
func (db *dbImpl) GetDumpByID(ctx context.Context, id int) (Dump, bool, error) {
query := `
SELECT
d.id,
d.commit,
d.root,
d.visible_at_tip,
d.uploaded_at,
d.state,
d.failure_summary,
d.failure_stacktrace,
d.started_at,
d.finished_at,
d.tracing_context,
d.repository_id,
d.indexer
FROM lsif_dumps d WHERE id = %d
`
dump, err := scanDump(db.queryRow(ctx, sqlf.Sprintf(query, id)))
if err != nil {
return Dump{}, false, ignoreErrNoRows(err)
}
return dump, true, nil
}
// FindClosestDumps returns the set of dumps that can most accurately answer queries for the given repository, commit, and file.
func (db *dbImpl) FindClosestDumps(ctx context.Context, repositoryID int, commit, file string) ([]Dump, error) {
tw, err := db.beginTx(ctx)
if err != nil {
return nil, err
}
defer func() {
err = closeTx(tw.tx, err)
}()
visibleIDsQuery := `
SELECT d.dump_id FROM lineage_with_dumps d
WHERE %s LIKE (d.root || '%%%%') AND d.dump_id IN (SELECT * FROM visible_ids)
ORDER BY d.n
`
ids, err := scanInts(tw.query(ctx, withBidirectionalLineage(visibleIDsQuery, repositoryID, commit, file)))
if err != nil {
return nil, err
}
if len(ids) == 0 {
return nil, nil
}
query := `
SELECT
d.id,
d.commit,
d.root,
d.visible_at_tip,
d.uploaded_at,
d.state,
d.failure_summary,
d.failure_stacktrace,
d.started_at,
d.finished_at,
d.tracing_context,
d.repository_id,
d.indexer
FROM lsif_dumps d WHERE id IN (%s)
`
dumps, err := scanDumps(tw.query(ctx, sqlf.Sprintf(query, sqlf.Join(intsToQueries(ids), ", "))))
if err != nil {
return nil, err
}
return deduplicateDumps(dumps), nil
}
// deduplicateDumps returns a copy of the given slice of dumps with duplicate identifiers removed.
// The first dump with a unique identifier is retained.
func deduplicateDumps(allDumps []Dump) (dumps []Dump) {
dumpIDs := map[int]struct{}{}
for _, dump := range allDumps {
if _, ok := dumpIDs[dump.ID]; ok {
continue
}
dumpIDs[dump.ID] = struct{}{}
dumps = append(dumps, dump)
}
return dumps
}
// DeleteOldestDump deletes the oldest dump that is not currently visible at the tip of its repository's default branch.
// This method returns the deleted dump's identifier and a flag indicating its (previous) existence.
func (db *dbImpl) DeleteOldestDump(ctx context.Context) (int, bool, error) {
query := `
DELETE FROM lsif_uploads
WHERE id IN (
SELECT id FROM lsif_dumps
WHERE visible_at_tip = false
ORDER BY uploaded_at
LIMIT 1
) RETURNING id
`
id, err := scanInt(db.queryRow(ctx, sqlf.Sprintf(query)))
if err != nil {
return 0, false, ignoreErrNoRows(err)
}
return id, true, nil
}
// updateDumpsVisibleFromTip recalculates the visible_at_tip flag of all dumps of the given repository.
func (db *dbImpl) updateDumpsVisibleFromTip(ctx context.Context, tw *transactionWrapper, repositoryID int, tipCommit string) (err error) {
if tw == nil {
tw, err = db.beginTx(ctx)
if err != nil {
return err
}
defer func() {
err = closeTx(tw.tx, err)
}()
}
// Update dump records by:
// (1) unsetting the visibility flag of all previously visible dumps, and
// (2) setting the visibility flag of all currently visible dumps
query := `
UPDATE lsif_dumps d
SET visible_at_tip = id IN (SELECT * from visible_ids)
WHERE d.repository_id = %s AND (d.id IN (SELECT * from visible_ids) OR d.visible_at_tip)
`
_, err = tw.exec(ctx, withAncestorLineage(query, repositoryID, tipCommit, repositoryID))
return err
}

View File

@ -0,0 +1,590 @@
package db
import (
"context"
"fmt"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
"github.com/sourcegraph/sourcegraph/internal/db/dbtesting"
)
func TestGetDumpByID(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// Dump does not exist initially
if _, exists, err := db.GetDumpByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting dump: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
uploadedAt := time.Unix(1587396557, 0).UTC()
startedAt := uploadedAt.Add(time.Minute)
finishedAt := uploadedAt.Add(time.Minute * 2)
expected := Dump{
ID: 1,
Commit: makeCommit(1),
Root: "sub/",
VisibleAtTip: true,
UploadedAt: uploadedAt,
State: "completed",
FailureSummary: nil,
FailureStacktrace: nil,
StartedAt: &startedAt,
FinishedAt: &finishedAt,
TracingContext: `{"id": 42}`,
RepositoryID: 50,
Indexer: "lsif-go",
}
insertUploads(t, db.db, Upload{
ID: expected.ID,
Commit: expected.Commit,
Root: expected.Root,
VisibleAtTip: expected.VisibleAtTip,
UploadedAt: expected.UploadedAt,
State: expected.State,
FailureSummary: expected.FailureSummary,
FailureStacktrace: expected.FailureStacktrace,
StartedAt: expected.StartedAt,
FinishedAt: expected.FinishedAt,
TracingContext: expected.TracingContext,
RepositoryID: expected.RepositoryID,
Indexer: expected.Indexer,
})
if dump, exists, err := db.GetDumpByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting dump: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else if diff := cmp.Diff(expected, dump); diff != "" {
t.Errorf("unexpected dump (-want +got):\n%s", diff)
}
}
func TestFindClosestDumps(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This database has the following commit graph:
//
// [1] --+--- 2 --------+--5 -- 6 --+-- [7]
// | | |
// +-- [3] -- 4 --+ +--- 8
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(1)},
makeCommit(4): {makeCommit(3)},
makeCommit(5): {makeCommit(2), makeCommit(4)},
makeCommit(6): {makeCommit(5)},
makeCommit(7): {makeCommit(6)},
makeCommit(8): {makeCommit(6)},
})
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1)},
Upload{ID: 2, Commit: makeCommit(3)},
Upload{ID: 3, Commit: makeCommit(7)},
)
testFindClosestDumps(t, db, []FindClosestDumpsTestCase{
{commit: makeCommit(1), file: "file.ts", anyOfIDs: []int{1}},
{commit: makeCommit(2), file: "file.ts", anyOfIDs: []int{1}},
{commit: makeCommit(3), file: "file.ts", anyOfIDs: []int{2}},
{commit: makeCommit(4), file: "file.ts", anyOfIDs: []int{2}},
{commit: makeCommit(6), file: "file.ts", anyOfIDs: []int{3}},
{commit: makeCommit(7), file: "file.ts", anyOfIDs: []int{3}},
{commit: makeCommit(5), file: "file.ts", anyOfIDs: []int{1, 2, 3}},
{commit: makeCommit(8), file: "file.ts", anyOfIDs: []int{1, 2}},
})
}
func TestFindClosestDumpsAlternateCommitGraph(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This database has the following commit graph:
//
// 1 --+-- [2] ---- 3
// |
// +--- 4 --+-- 5 -- 6
// |
// +-- 7 -- 8
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(1)},
makeCommit(5): {makeCommit(4)},
makeCommit(6): {makeCommit(5)},
makeCommit(7): {makeCommit(4)},
makeCommit(8): {makeCommit(7)},
})
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(2)},
)
testFindClosestDumps(t, db, []FindClosestDumpsTestCase{
{commit: makeCommit(1), allOfIDs: []int{1}},
{commit: makeCommit(2), allOfIDs: []int{1}},
{commit: makeCommit(3), allOfIDs: []int{1}},
{commit: makeCommit(4)},
{commit: makeCommit(6)},
{commit: makeCommit(7)},
{commit: makeCommit(5)},
{commit: makeCommit(8)},
})
}
func TestFindClosestDumpsDistinctRoots(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This database has the following commit graph:
//
// 1 --+-- [2]
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
})
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(2), Root: "root1/"},
Upload{ID: 2, Commit: makeCommit(2), Root: "root2/"},
)
testFindClosestDumps(t, db, []FindClosestDumpsTestCase{
{commit: makeCommit(1), file: "blah"},
{commit: makeCommit(2), file: "root1/file.ts", allOfIDs: []int{1}},
{commit: makeCommit(1), file: "root2/file.ts", allOfIDs: []int{2}},
{commit: makeCommit(2), file: "root2/file.ts", allOfIDs: []int{2}},
{commit: makeCommit(1), file: "root3/file.ts"},
})
}
func TestFindClosestDumpsOverlappingRoots(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This database has the following commit graph:
//
// 1 -- 2 --+-- 3 --+-- 5 -- 6
// | |
// +-- 4 --+
//
// With the following LSIF dumps:
//
// | Commit | Root | Indexer |
// | ------ + ------- + ------- |
// | 1 | root3/ | lsif-go |
// | 1 | root4/ | lsif-py |
// | 2 | root1/ | lsif-go |
// | 2 | root2/ | lsif-go |
// | 2 | | lsif-py | (overwrites root4/ at commit 1)
// | 3 | root1/ | lsif-go | (overwrites root1/ at commit 2)
// | 4 | | lsif-py | (overwrites (root) at commit 2)
// | 5 | root2/ | lsif-go | (overwrites root2/ at commit 2)
// | 6 | root1/ | lsif-go | (overwrites root1/ at commit 2)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(2)},
makeCommit(5): {makeCommit(3), makeCommit(4)},
makeCommit(6): {makeCommit(5)},
})
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1), Root: "root3/"},
Upload{ID: 2, Commit: makeCommit(1), Root: "root4/", Indexer: "lsif-py"},
Upload{ID: 3, Commit: makeCommit(2), Root: "root1/"},
Upload{ID: 4, Commit: makeCommit(2), Root: "root2/"},
Upload{ID: 5, Commit: makeCommit(2), Root: "", Indexer: "lsif-py"},
Upload{ID: 6, Commit: makeCommit(3), Root: "root1/"},
Upload{ID: 7, Commit: makeCommit(4), Root: "", Indexer: "lsif-py"},
Upload{ID: 8, Commit: makeCommit(5), Root: "root2/"},
Upload{ID: 9, Commit: makeCommit(6), Root: "root1/"},
)
testFindClosestDumps(t, db, []FindClosestDumpsTestCase{
{commit: makeCommit(4), file: "root1/file.ts", allOfIDs: []int{7, 3}},
{commit: makeCommit(5), file: "root2/file.ts", allOfIDs: []int{8, 7}},
{commit: makeCommit(3), file: "root3/file.ts", allOfIDs: []int{5, 1}},
{commit: makeCommit(1), file: "root4/file.ts", allOfIDs: []int{2}},
{commit: makeCommit(2), file: "root4/file.ts", allOfIDs: []int{5}},
})
}
func TestFindClosestDumpsMaxTraversalLimit(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This repository has the following commit graph (ancestors to the left):
//
// MAX_TRAVERSAL_LIMIT -- ... -- 2 -- 1 -- 0
//
// At commit `50`, the traversal limit will be reached before visiting commit `0`
// because commits are visited in this order:
//
// | depth | commit |
// | ----- | ------ |
// | 1 | 50 | (with direction 'A')
// | 2 | 50 | (with direction 'D')
// | 3 | 51 |
// | 4 | 49 |
// | 5 | 52 |
// | 6 | 48 |
// | ... | |
// | 99 | 99 |
// | 100 | 1 | (limit reached)
commits := map[string][]string{}
for i := 0; i < MaxTraversalLimit; i++ {
commits[makeCommit(i)] = []string{makeCommit(i + 1)}
}
insertCommits(t, db.db, commits)
insertUploads(t, db.db, Upload{ID: 1, Commit: makeCommit(0)})
testFindClosestDumps(t, db, []FindClosestDumpsTestCase{
{commit: makeCommit(0), file: "file.ts", allOfIDs: []int{1}},
{commit: makeCommit(1), file: "file.ts", allOfIDs: []int{1}},
{commit: makeCommit(MaxTraversalLimit/2 - 1), file: "file.ts", allOfIDs: []int{1}},
{commit: makeCommit(MaxTraversalLimit / 2), file: "file.ts"},
})
}
func TestDeleteOldestDump(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// Cannot prune empty dump set
if _, prunable, err := db.DeleteOldestDump(context.Background()); err != nil {
t.Fatalf("unexpected error pruning dumps: %s", err)
} else if prunable {
t.Fatal("unexpectedly prunable")
}
t1 := time.Unix(1587396557, 0).UTC()
t2 := t1.Add(time.Minute)
t3 := t1.Add(time.Minute * 2)
t4 := t1.Add(time.Minute * 3)
insertUploads(t, db.db,
Upload{ID: 1, UploadedAt: t1},
Upload{ID: 2, UploadedAt: t2, VisibleAtTip: true},
Upload{ID: 3, UploadedAt: t3},
Upload{ID: 4, UploadedAt: t4},
)
// Prune oldest
if id, prunable, err := db.DeleteOldestDump(context.Background()); err != nil {
t.Fatalf("unexpected error pruning dumps: %s", err)
} else if !prunable {
t.Fatal("unexpectedly non-prunable")
} else if id != 1 {
t.Errorf("unexpected pruned identifier. want=%d have=%d", 1, id)
}
// Prune next oldest (skips visible at tip)
if id, prunable, err := db.DeleteOldestDump(context.Background()); err != nil {
t.Fatalf("unexpected error pruning dumps: %s", err)
} else if !prunable {
t.Fatal("unexpectedly non-prunable")
} else if id != 3 {
t.Errorf("unexpected pruned identifier. want=%d have=%d", 3, id)
}
}
type FindClosestDumpsTestCase struct {
commit string
file string
anyOfIDs []int
allOfIDs []int
}
func testFindClosestDumps(t *testing.T, db DB, testCases []FindClosestDumpsTestCase) {
for _, testCase := range testCases {
name := fmt.Sprintf("commit=%s file=%s", testCase.commit, testCase.file)
t.Run(name, func(t *testing.T) {
dumps, err := db.FindClosestDumps(context.Background(), 50, testCase.commit, testCase.file)
if err != nil {
t.Fatalf("unexpected error finding closest dumps: %s", err)
}
if len(testCase.anyOfIDs) > 0 {
testAnyOf(t, dumps, testCase.anyOfIDs)
return
}
if len(testCase.allOfIDs) > 0 {
testAllOf(t, dumps, testCase.allOfIDs)
return
}
if len(dumps) != 0 {
t.Errorf("unexpected nearest dump length. want=%d have=%d", 0, len(dumps))
return
}
})
}
}
func testAnyOf(t *testing.T, dumps []Dump, expectedIDs []int) {
if len(dumps) != 1 {
t.Errorf("unexpected nearest dump length. want=%d have=%d", 1, len(dumps))
return
}
if !testPresence(dumps[0].ID, expectedIDs) {
t.Errorf("unexpected nearest dump ids. want one of %v have=%v", expectedIDs, dumps[0].ID)
}
}
func testAllOf(t *testing.T, dumps []Dump, expectedIDs []int) {
if len(dumps) != len(expectedIDs) {
t.Errorf("unexpected nearest dump length. want=%d have=%d", 1, len(dumps))
}
var dumpIDs []int
for _, dump := range dumps {
dumpIDs = append(dumpIDs, dump.ID)
}
for _, expectedID := range expectedIDs {
if !testPresence(expectedID, dumpIDs) {
t.Errorf("unexpected nearest dump ids. want all of %v have=%v", expectedIDs, dumpIDs)
return
}
}
}
func testPresence(needle int, haystack []int) bool {
for _, candidate := range haystack {
if needle == candidate {
return true
}
}
return false
}
func TestUpdateDumpsVisibleFromTipOverlappingRootsSameIndexer(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This database has the following commit graph:
//
// [1] -- [2] -- [3] -- [4] -- 5 -- [6] -- [7]
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1), Root: "r1/"},
Upload{ID: 2, Commit: makeCommit(2), Root: "r2/"},
Upload{ID: 3, Commit: makeCommit(3)},
Upload{ID: 4, Commit: makeCommit(4), Root: "r3/"},
Upload{ID: 5, Commit: makeCommit(6), Root: "r4/"},
Upload{ID: 6, Commit: makeCommit(7), Root: "r5/"},
)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(3)},
makeCommit(5): {makeCommit(4)},
makeCommit(6): {makeCommit(5)},
makeCommit(7): {makeCommit(6)},
})
err := db.updateDumpsVisibleFromTip(context.Background(), nil, 50, makeCommit(6))
if err != nil {
t.Fatalf("unexpected error updating dumps visible from tip: %s", err)
}
visibilities := getDumpVisibilities(t, db.db)
expected := map[int]bool{1: false, 2: false, 3: false, 4: true, 5: true, 6: false}
if diff := cmp.Diff(expected, visibilities); diff != "" {
t.Errorf("unexpected visibility (-want +got):\n%s", diff)
}
}
func TestUpdateDumpsVisibleFromTipOverlappingRoots(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This database has the following commit graph:
//
// [1] -- 2 -- [3] -- [4] -- [5] -- [6] -- [7]
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1), Root: "r1/"},
Upload{ID: 2, Commit: makeCommit(3), Root: "r2/"},
Upload{ID: 3, Commit: makeCommit(4), Root: "r1/"},
Upload{ID: 4, Commit: makeCommit(6), Root: "r3/"},
Upload{ID: 5, Commit: makeCommit(7), Root: "r4/"},
Upload{ID: 6, Commit: makeCommit(1), Root: "r1/", Indexer: "lsif-tsc"},
Upload{ID: 7, Commit: makeCommit(3), Root: "r2/", Indexer: "lsif-tsc"},
Upload{ID: 8, Commit: makeCommit(4), Indexer: "lsif-tsc"},
Upload{ID: 9, Commit: makeCommit(5), Root: "r3/", Indexer: "lsif-tsc"},
)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(3)},
makeCommit(5): {makeCommit(4)},
makeCommit(6): {makeCommit(5)},
makeCommit(7): {makeCommit(6)},
})
err := db.updateDumpsVisibleFromTip(context.Background(), nil, 50, makeCommit(6))
if err != nil {
t.Fatalf("unexpected error updating dumps visible from tip: %s", err)
}
visibilities := getDumpVisibilities(t, db.db)
expected := map[int]bool{1: false, 2: true, 3: true, 4: true, 5: false, 6: false, 7: false, 8: false, 9: true}
if diff := cmp.Diff(expected, visibilities); diff != "" {
t.Errorf("unexpected visibility. want=%v have=%v", expected, visibilities)
}
}
func TestUpdateDumpsVisibleFromTipBranchingPaths(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This database has the following commit graph:
//
// 1 --+-- [2] --- 3 ---+
// | |
// +--- 4 --- [5] --+ -- [8] --+-- [9]
// | |
// +-- [6] --- 7 --------------+
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(2), Root: "r2/"},
Upload{ID: 2, Commit: makeCommit(5), Root: "r2/a/"},
Upload{ID: 3, Commit: makeCommit(5), Root: "r2/b/"},
Upload{ID: 4, Commit: makeCommit(6), Root: "r1/a/"},
Upload{ID: 5, Commit: makeCommit(6), Root: "r1/b/"},
Upload{ID: 6, Commit: makeCommit(8), Root: "r1/"},
Upload{ID: 7, Commit: makeCommit(9), Root: "r3/"},
)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(1)},
makeCommit(5): {makeCommit(4)},
makeCommit(8): {makeCommit(5), makeCommit(3)},
makeCommit(9): {makeCommit(7), makeCommit(8)},
makeCommit(6): {makeCommit(1)},
makeCommit(7): {makeCommit(6)},
})
err := db.updateDumpsVisibleFromTip(context.Background(), nil, 50, makeCommit(9))
if err != nil {
t.Fatalf("unexpected error updating dumps visible from tip: %s", err)
}
visibilities := getDumpVisibilities(t, db.db)
expected := map[int]bool{1: false, 2: true, 3: true, 4: false, 5: false, 6: true, 7: true}
if diff := cmp.Diff(expected, visibilities); diff != "" {
t.Errorf("unexpected visibility (-want +got):\n%s", diff)
}
}
func TestUpdateDumpsVisibleFromTipMaxTraversalLimit(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// This repository has the following commit graph (ancestors to the left):
//
// (MAX_TRAVERSAL_LIMIT + 1) -- ... -- 2 -- 1 -- 0
commits := map[string][]string{}
for i := 0; i < MaxTraversalLimit+1; i++ {
commits[makeCommit(i)] = []string{makeCommit(i + 1)}
}
insertCommits(t, db.db, commits)
insertUploads(t, db.db, Upload{ID: 1, Commit: fmt.Sprintf("%040d", MaxTraversalLimit)})
if err := db.updateDumpsVisibleFromTip(context.Background(), nil, 50, makeCommit(MaxTraversalLimit)); err != nil {
t.Fatalf("unexpected error updating dumps visible from tip: %s", err)
} else {
visibilities := getDumpVisibilities(t, db.db)
expected := map[int]bool{1: true}
if diff := cmp.Diff(expected, visibilities); diff != "" {
t.Errorf("unexpected visibility (-want +got):\n%s", diff)
}
}
if err := db.updateDumpsVisibleFromTip(context.Background(), nil, 50, makeCommit(1)); err != nil {
t.Fatalf("unexpected error updating dumps visible from tip: %s", err)
} else {
visibilities := getDumpVisibilities(t, db.db)
expected := map[int]bool{1: true}
if diff := cmp.Diff(expected, visibilities); diff != "" {
t.Errorf("unexpected visibility (-want +got):\n%s", diff)
}
}
if err := db.updateDumpsVisibleFromTip(context.Background(), nil, 50, makeCommit(0)); err != nil {
t.Fatalf("unexpected error updating dumps visible from tip: %s", err)
} else {
visibilities := getDumpVisibilities(t, db.db)
expected := map[int]bool{1: false}
if diff := cmp.Diff(expected, visibilities); diff != "" {
t.Errorf("unexpected visibility (-want +got):\n%s", diff)
}
}
}

View File

@ -0,0 +1,164 @@
package db
import (
"context"
"database/sql"
"fmt"
"testing"
"github.com/keegancsmith/sqlf"
)
type PackageModel struct {
Scheme string
Name string
Version string
DumpID int
}
type ReferenceModel struct {
Scheme string
Name string
Version string
DumpID int
Filter []byte
}
// insertUploads populates the lsif_uploads table with the given upload models.
func insertUploads(t *testing.T, db *sql.DB, uploads ...Upload) {
for _, upload := range uploads {
if upload.Commit == "" {
upload.Commit = makeCommit(upload.ID)
}
if upload.State == "" {
upload.State = "completed"
}
if upload.RepositoryID == 0 {
upload.RepositoryID = 50
}
if upload.Indexer == "" {
upload.Indexer = "lsif-go"
}
query := sqlf.Sprintf(`
INSERT INTO lsif_uploads (
id,
commit,
root,
visible_at_tip,
uploaded_at,
state,
failure_summary,
failure_stacktrace,
started_at,
finished_at,
tracing_context,
repository_id,
indexer
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
`,
upload.ID,
upload.Commit,
upload.Root,
upload.VisibleAtTip,
upload.UploadedAt,
upload.State,
upload.FailureSummary,
upload.FailureStacktrace,
upload.StartedAt,
upload.FinishedAt,
upload.TracingContext,
upload.RepositoryID,
upload.Indexer,
)
if _, err := db.ExecContext(context.Background(), query.Query(sqlf.PostgresBindVar), query.Args()...); err != nil {
t.Fatalf("unexpected error while inserting dump: %s", err)
}
}
}
// insertPackages populates the lsif_packages table with the given package models.
func insertPackages(t *testing.T, db *sql.DB, packages ...PackageModel) {
for _, pkg := range packages {
query := sqlf.Sprintf(`
INSERT INTO lsif_packages (
scheme,
name,
version,
dump_id
) VALUES (%s, %s, %s, %s)
`,
pkg.Scheme,
pkg.Name,
pkg.Version,
pkg.DumpID,
)
if _, err := db.ExecContext(context.Background(), query.Query(sqlf.PostgresBindVar), query.Args()...); err != nil {
t.Fatalf("unexpected error while inserting package: %s", err)
}
}
}
// insertReferences populates the lsif_references table with the given reference models.
func insertReferences(t *testing.T, db *sql.DB, references ...ReferenceModel) {
for _, reference := range references {
query := sqlf.Sprintf(`
INSERT INTO lsif_references (
scheme,
name,
version,
dump_id,
filter
) VALUES (%s, %s, %s, %s, %s)
`,
reference.Scheme,
reference.Name,
reference.Version,
reference.DumpID,
reference.Filter,
)
if _, err := db.ExecContext(context.Background(), query.Query(sqlf.PostgresBindVar), query.Args()...); err != nil {
t.Fatalf("unexpected error while inserting reference: %s", err)
}
}
}
// insertCommits populates the lsif_commits table with the given commit-parent map.
func insertCommits(t *testing.T, db *sql.DB, commits map[string][]string) {
var values []*sqlf.Query
for k, vs := range commits {
if len(vs) == 0 {
values = append(values, sqlf.Sprintf("(%d, %s, %v)", 50, k, nil))
}
for _, v := range vs {
values = append(values, sqlf.Sprintf("(%d, %s, %s)", 50, k, v))
}
}
query := sqlf.Sprintf(
"INSERT INTO lsif_commits (repository_id, commit, parent_commit) VALUES %s",
sqlf.Join(values, ", "),
)
if _, err := db.ExecContext(context.Background(), query.Query(sqlf.PostgresBindVar), query.Args()...); err != nil {
t.Fatalf("unexpected error while inserting commits: %s", err)
}
}
// getDumpVisibilities returns a map from dump identifiers to its visibility. Fails the test on error.
func getDumpVisibilities(t *testing.T, db *sql.DB) map[int]bool {
visibilities, err := scanVisibilities(db.Query("SELECT id, visible_at_tip FROM lsif_dumps"))
if err != nil {
t.Fatalf("unexpected error while scanning dump visibility: %s", err)
}
return visibilities
}
func makeCommit(i int) string {
return fmt.Sprintf("%040d", i)
}

View File

@ -0,0 +1,38 @@
package db
import (
"context"
"github.com/keegancsmith/sqlf"
)
// GetPackage returns the dump that provides the package with the given scheme, name, and version and a flag indicating its existence.
func (db *dbImpl) GetPackage(ctx context.Context, scheme, name, version string) (Dump, bool, error) {
query := `
SELECT
d.id,
d.commit,
d.root,
d.visible_at_tip,
d.uploaded_at,
d.state,
d.failure_summary,
d.failure_stacktrace,
d.started_at,
d.finished_at,
d.tracing_context,
d.repository_id,
d.indexer
FROM lsif_packages p
JOIN lsif_dumps d ON p.dump_id = d.id
WHERE p.scheme = %s AND p.name = %s AND p.version = %s
LIMIT 1
`
dump, err := scanDump(db.queryRow(ctx, sqlf.Sprintf(query, scheme, name, version)))
if err != nil {
return Dump{}, false, ignoreErrNoRows(err)
}
return dump, true, nil
}

View File

@ -0,0 +1,76 @@
package db
import (
"context"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
"github.com/sourcegraph/sourcegraph/internal/db/dbtesting"
)
func TestGetPackage(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// Package does not exist initially
if _, exists, err := db.GetPackage(context.Background(), "gomod", "leftpad", "0.1.0"); err != nil {
t.Fatalf("unexpected error getting package: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
uploadedAt := time.Unix(1587396557, 0).UTC()
startedAt := uploadedAt.Add(time.Minute)
finishedAt := uploadedAt.Add(time.Minute * 2)
expected := Dump{
ID: 1,
Commit: makeCommit(1),
Root: "sub/",
VisibleAtTip: true,
UploadedAt: uploadedAt,
State: "completed",
FailureSummary: nil,
FailureStacktrace: nil,
StartedAt: &startedAt,
FinishedAt: &finishedAt,
TracingContext: `{"id": 42}`,
RepositoryID: 50,
Indexer: "lsif-go",
}
insertUploads(t, db.db, Upload{
ID: expected.ID,
Commit: expected.Commit,
Root: expected.Root,
VisibleAtTip: expected.VisibleAtTip,
UploadedAt: expected.UploadedAt,
State: expected.State,
FailureSummary: expected.FailureSummary,
FailureStacktrace: expected.FailureStacktrace,
StartedAt: expected.StartedAt,
FinishedAt: expected.FinishedAt,
TracingContext: expected.TracingContext,
RepositoryID: expected.RepositoryID,
Indexer: expected.Indexer,
})
insertPackages(t, db.db, PackageModel{
Scheme: "gomod",
Name: "leftpad",
Version: "0.1.0",
DumpID: 1,
})
if dump, exists, err := db.GetPackage(context.Background(), "gomod", "leftpad", "0.1.0"); err != nil {
t.Fatalf("unexpected error getting package: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else if diff := cmp.Diff(expected, dump); diff != "" {
t.Errorf("unexpected dump (-want +got):\n%s", diff)
}
}

View File

@ -0,0 +1,35 @@
package db
import "database/sql"
// ReferencePager holds state for a reference result in a SQL transaction. Each page
// requested should have a consistent view into the database.
type ReferencePager interface {
TxCloser
// PageFromOffset returns the page of references that starts at the given offset.
PageFromOffset(offset int) ([]Reference, error)
}
type referencePager struct {
*txCloser
pageFromOffset func(offset int) ([]Reference, error)
}
// PageFromOffset returns the page of references that starts at the given offset.
func (rp *referencePager) PageFromOffset(offset int) ([]Reference, error) {
return rp.pageFromOffset(offset)
}
func newReferencePager(tx *sql.Tx, pageFromOffset func(offset int) ([]Reference, error)) ReferencePager {
return &referencePager{
txCloser: &txCloser{tx},
pageFromOffset: pageFromOffset,
}
}
func newEmptyReferencePager(tx *sql.Tx) ReferencePager {
return newReferencePager(tx, func(offset int) ([]Reference, error) {
return nil, nil
})
}

View File

@ -0,0 +1,111 @@
package db
import (
"context"
"github.com/keegancsmith/sqlf"
)
// Reference is a subset of the lsif_references table which links a dump to a package from
// which it imports. The filter field encodes a bloom filter of the set of identifiers it
// imports from the package, which can be used to quickly filter out (on the server side)
// dumps that improt a package but not the target identifier.
type Reference struct {
DumpID int
Filter []byte
}
// SameRepoPager returns a ReferencePager for dumps that belong to the given repository and commit and reference the package with the
// given scheme, name, and version.
func (db *dbImpl) SameRepoPager(ctx context.Context, repositoryID int, commit, scheme, name, version string, limit int) (_ int, _ ReferencePager, err error) {
tw, err := db.beginTx(ctx)
if err != nil {
return 0, nil, err
}
defer func() {
if err != nil {
err = closeTx(tw.tx, err)
}
}()
visibleIDsQuery := `SELECT id FROM visible_ids`
visibleIDs, err := scanInts(tw.query(ctx, withBidirectionalLineage(visibleIDsQuery, repositoryID, commit)))
if err != nil {
return 0, nil, err
}
if len(visibleIDs) == 0 {
return 0, newEmptyReferencePager(tw.tx), nil
}
conds := []*sqlf.Query{
sqlf.Sprintf("r.scheme = %s", scheme),
sqlf.Sprintf("r.name = %s", name),
sqlf.Sprintf("r.version = %s", version),
sqlf.Sprintf("r.dump_id IN (%s)", sqlf.Join(intsToQueries(visibleIDs), ", ")),
}
countQuery := `SELECT COUNT(1) FROM lsif_references r WHERE %s`
totalCount, err := scanInt(tw.queryRow(ctx, sqlf.Sprintf(countQuery, sqlf.Join(conds, " AND "))))
if err != nil {
return 0, nil, err
}
pageFromOffset := func(offset int) ([]Reference, error) {
query := `
SELECT d.id, r.filter FROM lsif_references r
LEFT JOIN lsif_dumps d on r.dump_id = d.id
WHERE %s ORDER BY d.root LIMIT %d OFFSET %d
`
return scanReferences(tw.query(ctx, sqlf.Sprintf(query, sqlf.Join(conds, " AND "), limit, offset)))
}
return totalCount, newReferencePager(tw.tx, pageFromOffset), nil
}
// PackageReferencePager returns a ReferencePager for dumps that belong to a remote repository (distinct from the given repository id)
// and reference the package with the given scheme, name, and version. All resulting dumps are visible at the tip of their repository's
// default branch.
func (db *dbImpl) PackageReferencePager(ctx context.Context, scheme, name, version string, repositoryID, limit int) (_ int, _ ReferencePager, err error) {
tw, err := db.beginTx(ctx)
if err != nil {
return 0, nil, err
}
defer func() {
if err != nil {
err = closeTx(tw.tx, err)
}
}()
conds := []*sqlf.Query{
sqlf.Sprintf("r.scheme = %s", scheme),
sqlf.Sprintf("r.name = %s", name),
sqlf.Sprintf("r.version = %s", version),
sqlf.Sprintf("d.repository_id != %s", repositoryID),
sqlf.Sprintf("d.visible_at_tip = true"),
}
countQuery := `
SELECT COUNT(1) FROM lsif_references r
LEFT JOIN lsif_dumps d ON r.dump_id = d.id
WHERE %s
`
totalCount, err := scanInt(tw.queryRow(ctx, sqlf.Sprintf(countQuery, sqlf.Join(conds, " AND "))))
if err != nil {
return 0, nil, err
}
pageFromOffset := func(offset int) ([]Reference, error) {
query := `
SELECT d.id, r.filter FROM lsif_references r
LEFT JOIN lsif_dumps d ON r.dump_id = d.id
WHERE %s ORDER BY d.repository_id, d.root LIMIT %d OFFSET %d
`
return scanReferences(tw.query(ctx, sqlf.Sprintf(query, sqlf.Join(conds, " AND "), limit, offset)))
}
return totalCount, newReferencePager(tw.tx, pageFromOffset), nil
}

View File

@ -0,0 +1,357 @@
package db
import (
"context"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
"github.com/sourcegraph/sourcegraph/internal/db/dbtesting"
)
func TestSameRepoPager(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(2), Root: "sub1/"},
Upload{ID: 2, Commit: makeCommit(3), Root: "sub2/"},
Upload{ID: 3, Commit: makeCommit(4), Root: "sub3/"},
Upload{ID: 4, Commit: makeCommit(3), Root: "sub4/"},
Upload{ID: 5, Commit: makeCommit(2), Root: "sub5/"},
)
insertReferences(t, db.db,
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 1, Filter: []byte("f1")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 2, Filter: []byte("f2")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 3, Filter: []byte("f3")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 4, Filter: []byte("f4")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 5, Filter: []byte("f5")},
)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(3)},
})
totalCount, pager, err := db.SameRepoPager(context.Background(), 50, makeCommit(1), "gomod", "leftpad", "0.1.0", 5)
if err != nil {
t.Fatalf("unexpected error getting pager: %s", err)
}
defer func() { _ = pager.CloseTx(nil) }()
if totalCount != 5 {
t.Errorf("unexpected dump. want=%d have=%d", 5, totalCount)
}
expected := []Reference{
{DumpID: 1, Filter: []byte("f1")},
{DumpID: 2, Filter: []byte("f2")},
{DumpID: 3, Filter: []byte("f3")},
{DumpID: 4, Filter: []byte("f4")},
{DumpID: 5, Filter: []byte("f5")},
}
if references, err := pager.PageFromOffset(0); err != nil {
t.Fatalf("unexpected error getting next page: %s", err)
} else if diff := cmp.Diff(expected, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
}
func TestSameRepoPagerEmpty(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
totalCount, pager, err := db.SameRepoPager(context.Background(), 50, makeCommit(1), "gomod", "leftpad", "0.1.0", 5)
if err != nil {
t.Fatalf("unexpected error getting pager: %s", err)
}
defer func() { _ = pager.CloseTx(nil) }()
if totalCount != 0 {
t.Errorf("unexpected dump. want=%d have=%d", 0, totalCount)
}
}
func TestSameRepoPagerMultiplePages(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1), Root: "sub1/"},
Upload{ID: 2, Commit: makeCommit(1), Root: "sub2/"},
Upload{ID: 3, Commit: makeCommit(1), Root: "sub3/"},
Upload{ID: 4, Commit: makeCommit(1), Root: "sub4/"},
Upload{ID: 5, Commit: makeCommit(1), Root: "sub5/"},
Upload{ID: 6, Commit: makeCommit(1), Root: "sub6/"},
Upload{ID: 7, Commit: makeCommit(1), Root: "sub7/"},
Upload{ID: 8, Commit: makeCommit(1), Root: "sub8/"},
Upload{ID: 9, Commit: makeCommit(1), Root: "sub9/"},
)
insertReferences(t, db.db,
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 1, Filter: []byte("f1")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 2, Filter: []byte("f2")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 3, Filter: []byte("f3")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 4, Filter: []byte("f4")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 5, Filter: []byte("f5")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 6, Filter: []byte("f6")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 7, Filter: []byte("f7")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 8, Filter: []byte("f8")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 9, Filter: []byte("f9")},
)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
})
totalCount, pager, err := db.SameRepoPager(context.Background(), 50, makeCommit(1), "gomod", "leftpad", "0.1.0", 3)
if err != nil {
t.Fatalf("unexpected error getting pager: %s", err)
}
defer func() { _ = pager.CloseTx(nil) }()
if totalCount != 9 {
t.Errorf("unexpected dump. want=%d have=%d", 9, totalCount)
}
expected := []Reference{
{DumpID: 1, Filter: []byte("f1")},
{DumpID: 2, Filter: []byte("f2")},
{DumpID: 3, Filter: []byte("f3")},
{DumpID: 4, Filter: []byte("f4")},
{DumpID: 5, Filter: []byte("f5")},
{DumpID: 6, Filter: []byte("f6")},
{DumpID: 7, Filter: []byte("f7")},
{DumpID: 8, Filter: []byte("f8")},
{DumpID: 9, Filter: []byte("f9")},
}
for lo := 0; lo < len(expected); lo++ {
hi := lo + 3
if hi > len(expected) {
hi = len(expected)
}
if references, err := pager.PageFromOffset(lo); err != nil {
t.Fatalf("unexpected error getting page at offset %d: %s", lo, err)
} else if diff := cmp.Diff(expected[lo:hi], references); diff != "" {
t.Errorf("unexpected references at offset %d (-want +got):\n%s", lo, diff)
}
}
}
func TestSameRepoPagerVisibility(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1), Root: "sub1/"}, // not visible
Upload{ID: 2, Commit: makeCommit(2), Root: "sub2/"}, // not visible
Upload{ID: 3, Commit: makeCommit(3), Root: "sub1/"},
Upload{ID: 4, Commit: makeCommit(4), Root: "sub2/"},
Upload{ID: 5, Commit: makeCommit(5), Root: "sub5/"},
)
insertReferences(t, db.db,
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 1, Filter: []byte("f1")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 2, Filter: []byte("f2")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 3, Filter: []byte("f3")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 4, Filter: []byte("f4")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 5, Filter: []byte("f5")},
)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(3)},
makeCommit(5): {makeCommit(4)},
makeCommit(6): {makeCommit(5)},
})
totalCount, pager, err := db.SameRepoPager(context.Background(), 50, makeCommit(6), "gomod", "leftpad", "0.1.0", 5)
if err != nil {
t.Fatalf("unexpected error getting pager: %s", err)
}
defer func() { _ = pager.CloseTx(nil) }()
if totalCount != 3 {
t.Errorf("unexpected dump. want=%d have=%d", 5, totalCount)
}
expected := []Reference{
{DumpID: 3, Filter: []byte("f3")},
{DumpID: 4, Filter: []byte("f4")},
{DumpID: 5, Filter: []byte("f5")},
}
if references, err := pager.PageFromOffset(0); err != nil {
t.Fatalf("unexpected error getting next page: %s", err)
} else if diff := cmp.Diff(expected, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
}
func TestPackageReferencePager(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1), VisibleAtTip: true},
Upload{ID: 2, Commit: makeCommit(2), VisibleAtTip: true, RepositoryID: 51},
Upload{ID: 3, Commit: makeCommit(3), VisibleAtTip: true, RepositoryID: 52},
Upload{ID: 4, Commit: makeCommit(4), VisibleAtTip: true, RepositoryID: 53},
Upload{ID: 5, Commit: makeCommit(5), VisibleAtTip: true, RepositoryID: 54},
Upload{ID: 6, Commit: makeCommit(6), VisibleAtTip: false, RepositoryID: 55},
Upload{ID: 7, Commit: makeCommit(6), VisibleAtTip: true, RepositoryID: 56},
)
insertReferences(t, db.db,
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 1, Filter: []byte("f1")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 2, Filter: []byte("f2")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 3, Filter: []byte("f3")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 4, Filter: []byte("f4")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 5, Filter: []byte("f5")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 6, Filter: []byte("f6")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 7, Filter: []byte("f7")},
)
totalCount, pager, err := db.PackageReferencePager(context.Background(), "gomod", "leftpad", "0.1.0", 50, 5)
if err != nil {
t.Fatalf("unexpected error getting pager: %s", err)
}
defer func() { _ = pager.CloseTx(nil) }()
if totalCount != 5 {
t.Errorf("unexpected dump. want=%d have=%d", 5, totalCount)
}
expected := []Reference{
{DumpID: 2, Filter: []byte("f2")},
{DumpID: 3, Filter: []byte("f3")},
{DumpID: 4, Filter: []byte("f4")},
{DumpID: 5, Filter: []byte("f5")},
{DumpID: 7, Filter: []byte("f7")},
}
if references, err := pager.PageFromOffset(0); err != nil {
t.Fatalf("unexpected error getting next page: %s", err)
} else if diff := cmp.Diff(expected, references); diff != "" {
t.Errorf("unexpected references (-want +got):\n%s", diff)
}
}
func TestPackageReferencePagerEmpty(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
totalCount, pager, err := db.PackageReferencePager(context.Background(), "gomod", "leftpad", "0.1.0", 50, 5)
if err != nil {
t.Fatalf("unexpected error getting pager: %s", err)
}
defer func() { _ = pager.CloseTx(nil) }()
if totalCount != 0 {
t.Errorf("unexpected dump. want=%d have=%d", 0, totalCount)
}
}
func TestPackageReferencePagerPages(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(1), VisibleAtTip: true, RepositoryID: 51},
Upload{ID: 2, Commit: makeCommit(2), VisibleAtTip: true, RepositoryID: 52},
Upload{ID: 3, Commit: makeCommit(3), VisibleAtTip: true, RepositoryID: 53},
Upload{ID: 4, Commit: makeCommit(4), VisibleAtTip: true, RepositoryID: 54},
Upload{ID: 5, Commit: makeCommit(5), VisibleAtTip: true, RepositoryID: 55},
Upload{ID: 6, Commit: makeCommit(6), VisibleAtTip: true, RepositoryID: 56},
Upload{ID: 7, Commit: makeCommit(7), VisibleAtTip: true, RepositoryID: 57},
Upload{ID: 8, Commit: makeCommit(8), VisibleAtTip: true, RepositoryID: 58},
Upload{ID: 9, Commit: makeCommit(9), VisibleAtTip: true, RepositoryID: 59},
)
insertReferences(t, db.db,
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 1, Filter: []byte("f1")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 2, Filter: []byte("f2")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 3, Filter: []byte("f3")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 4, Filter: []byte("f4")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 5, Filter: []byte("f5")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 6, Filter: []byte("f6")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 7, Filter: []byte("f7")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 8, Filter: []byte("f8")},
ReferenceModel{Scheme: "gomod", Name: "leftpad", Version: "0.1.0", DumpID: 9, Filter: []byte("f9")},
)
totalCount, pager, err := db.PackageReferencePager(context.Background(), "gomod", "leftpad", "0.1.0", 50, 3)
if err != nil {
t.Fatalf("unexpected error getting pager: %s", err)
}
defer func() { _ = pager.CloseTx(nil) }()
if totalCount != 9 {
t.Errorf("unexpected dump. want=%d have=%d", 9, totalCount)
}
testCases := []struct {
offset int
lo int
hi int
}{
{0, 0, 3},
{1, 1, 4},
{2, 2, 5},
{3, 3, 6},
{4, 4, 7},
{5, 5, 8},
{6, 6, 9},
{7, 7, 9},
{8, 8, 9},
}
expected := []Reference{
{DumpID: 1, Filter: []byte("f1")},
{DumpID: 2, Filter: []byte("f2")},
{DumpID: 3, Filter: []byte("f3")},
{DumpID: 4, Filter: []byte("f4")},
{DumpID: 5, Filter: []byte("f5")},
{DumpID: 6, Filter: []byte("f6")},
{DumpID: 7, Filter: []byte("f7")},
{DumpID: 8, Filter: []byte("f8")},
{DumpID: 9, Filter: []byte("f9")},
}
for _, testCase := range testCases {
if references, err := pager.PageFromOffset(testCase.offset); err != nil {
t.Fatalf("unexpected error getting page at offset %d: %s", testCase.offset, err)
} else if diff := cmp.Diff(expected[testCase.lo:testCase.hi], references); diff != "" {
t.Errorf("unexpected references at offset %d (-want +got):\n%s", testCase.offset, diff)
}
}
}

View File

@ -0,0 +1,203 @@
package db
import (
"database/sql"
)
// Scanner is the common interface shared by *sql.Row and *sql.Rows.
type Scanner interface {
// Scan copies the values of the current row into the values pointed at by dest.
Scan(dest ...interface{}) error
}
// scanDump populates a Dump value from the given scanner.
func scanDump(scanner Scanner) (dump Dump, err error) {
err = scanner.Scan(
&dump.ID,
&dump.Commit,
&dump.Root,
&dump.VisibleAtTip,
&dump.UploadedAt,
&dump.State,
&dump.FailureSummary,
&dump.FailureStacktrace,
&dump.StartedAt,
&dump.FinishedAt,
&dump.TracingContext,
&dump.RepositoryID,
&dump.Indexer,
)
return dump, err
}
// scanDumps reads the given set of dump rows and returns a slice of resulting values.
// This method should be called directly with the return value of `*db.queryRows`.
func scanDumps(rows *sql.Rows, err error) ([]Dump, error) {
if err != nil {
return nil, err
}
defer rows.Close()
var dumps []Dump
for rows.Next() {
dump, err := scanDump(rows)
if err != nil {
return nil, err
}
dumps = append(dumps, dump)
}
return dumps, nil
}
// scanUpload populates an Upload value from the given scanner.
func scanUpload(scanner Scanner) (upload Upload, err error) {
err = scanner.Scan(
&upload.ID,
&upload.Commit,
&upload.Root,
&upload.VisibleAtTip,
&upload.UploadedAt,
&upload.State,
&upload.FailureSummary,
&upload.FailureStacktrace,
&upload.StartedAt,
&upload.FinishedAt,
&upload.TracingContext,
&upload.RepositoryID,
&upload.Indexer,
&upload.Rank,
)
return upload, err
}
// scanUploads reads the given set of upload rows and returns a slice of resulting
// values. This method should be called directly with the return value of `*db.queryRows`.
func scanUploads(rows *sql.Rows, err error) ([]Upload, error) {
if err != nil {
return nil, err
}
defer rows.Close()
var uploads []Upload
for rows.Next() {
upload, err := scanUpload(rows)
if err != nil {
return nil, err
}
uploads = append(uploads, upload)
}
return uploads, nil
}
// scanReference populates a Reference value from the given scanner.
func scanReference(scanner Scanner) (reference Reference, err error) {
err = scanner.Scan(&reference.DumpID, &reference.Filter)
return reference, err
}
// scanReferences reads the given set of reference rows and returns a slice of resulting
// values. This method should be called directly with the return value of `*db.queryRows`.
func scanReferences(rows *sql.Rows, err error) ([]Reference, error) {
if err != nil {
return nil, err
}
defer rows.Close()
var references []Reference
for rows.Next() {
reference, err := scanReference(rows)
if err != nil {
return nil, err
}
references = append(references, reference)
}
return references, nil
}
// scanInt populates an integer value from the given scanner.
func scanInt(scanner Scanner) (value int, err error) {
err = scanner.Scan(&value)
return value, err
}
// scanInts reads the given set of `(int)` rows and returns a slice of resulting values.
// This method should be called directly with the return value of `*db.queryRows`.
func scanInts(rows *sql.Rows, err error) ([]int, error) {
if err != nil {
return nil, err
}
defer rows.Close()
var values []int
for rows.Next() {
value, err := scanInt(rows)
if err != nil {
return nil, err
}
values = append(values, value)
}
return values, nil
}
// scanState populates an integer and string from the given scanner.
func scanState(scanner Scanner) (repositoryID int, state string, err error) {
err = scanner.Scan(&repositoryID, &state)
return repositoryID, state, err
}
// scanStates reads the given set of `(id, state)` rows and returns a map from id to its
// state. This method should be called directly with the return value of `*db.queryRows`.
func scanStates(rows *sql.Rows, err error) (map[int]string, error) {
if err != nil {
return nil, err
}
defer rows.Close()
states := map[int]string{}
for rows.Next() {
repositoryID, state, err := scanState(rows)
if err != nil {
return nil, err
}
states[repositoryID] = state
}
return states, nil
}
// scanVisibility populates an integer and boolean from the given scanner.
func scanVisibility(scanner Scanner) (repositoryID int, visibleAtTip bool, err error) {
err = scanner.Scan(&repositoryID, &visibleAtTip)
return repositoryID, visibleAtTip, err
}
// scanVisibilities reads the given set of `(id, visible_at_tip)` rows and returns a map
// from id to its visibility. This method should be called directly with the return value
// of `*db.queryRows`.
func scanVisibilities(rows *sql.Rows, err error) (map[int]bool, error) {
if err != nil {
return nil, err
}
defer rows.Close()
visibilities := map[int]bool{}
for rows.Next() {
repositoryID, visibleAtTip, err := scanVisibility(rows)
if err != nil {
return nil, err
}
visibilities[repositoryID] = visibleAtTip
}
return visibilities, nil
}

View File

@ -0,0 +1,55 @@
package db
import (
"context"
"database/sql"
"github.com/hashicorp/go-multierror"
"github.com/keegancsmith/sqlf"
)
// TxCloser is a convenience wrapper for closing SQL transactions.
type TxCloser interface {
// CloseTx commits the transaction on a nil error value and performs a rollback
// otherwise. If an error occurs during commit or rollback of the transaction,
// the error is added to the resulting error value.
CloseTx(err error) error
}
type txCloser struct {
tx *sql.Tx
}
func (txc *txCloser) CloseTx(err error) error {
return closeTx(txc.tx, err)
}
func closeTx(tx *sql.Tx, err error) error {
if err != nil {
if rollErr := tx.Rollback(); rollErr != nil {
err = multierror.Append(err, rollErr)
}
return err
}
return tx.Commit()
}
type transactionWrapper struct {
tx *sql.Tx
}
// query performs QueryContext on the underlying transaction.
func (tw *transactionWrapper) query(ctx context.Context, query *sqlf.Query) (*sql.Rows, error) {
return tw.tx.QueryContext(ctx, query.Query(sqlf.PostgresBindVar), query.Args()...)
}
// queryRow performs QueryRow on the underlying transaction.
func (tw *transactionWrapper) queryRow(ctx context.Context, query *sqlf.Query) *sql.Row {
return tw.tx.QueryRowContext(ctx, query.Query(sqlf.PostgresBindVar), query.Args()...)
}
// exec performs Exec on the underlying transaction.
func (tw *transactionWrapper) exec(ctx context.Context, query *sqlf.Query) (sql.Result, error) {
return tw.tx.ExecContext(ctx, query.Query(sqlf.PostgresBindVar), query.Args()...)
}

View File

@ -0,0 +1,240 @@
package db
import (
"context"
"time"
"github.com/keegancsmith/sqlf"
)
// StalledUploadMaxAge is the maximum allowable duration between updating the state of an
// upload as "processing" and locking the upload row during processing. An unlocked row that
// is marked as processing likely indicates that the worker that dequeued the upload has died.
// There should be a nearly-zero delay between these states during normal operation.
const StalledUploadMaxAge = time.Second * 5
// Upload is a subset of the lsif_uploads table and stores both processed and unprocessed
// records.
type Upload struct {
ID int `json:"id"`
Commit string `json:"commit"`
Root string `json:"root"`
VisibleAtTip bool `json:"visibleAtTip"`
UploadedAt time.Time `json:"uploadedAt"`
State string `json:"state"`
FailureSummary *string `json:"failureSummary"`
FailureStacktrace *string `json:"failureStacktrace"`
StartedAt *time.Time `json:"startedAt"`
FinishedAt *time.Time `json:"finishedAt"`
TracingContext string `json:"tracingContext"`
RepositoryID int `json:"repositoryId"`
Indexer string `json:"indexer"`
Rank *int `json:"placeInQueue"`
}
// GetUploadByID returns an upload by its identifier and boolean flag indicating its existence.
func (db *dbImpl) GetUploadByID(ctx context.Context, id int) (Upload, bool, error) {
query := `
SELECT
u.id,
u.commit,
u.root,
u.visible_at_tip,
u.uploaded_at,
u.state,
u.failure_summary,
u.failure_stacktrace,
u.started_at,
u.finished_at,
u.tracing_context,
u.repository_id,
u.indexer,
s.rank
FROM lsif_uploads u
LEFT JOIN (
SELECT r.id, RANK() OVER (ORDER BY r.uploaded_at) as rank
FROM lsif_uploads r
WHERE r.state = 'queued'
) s
ON u.id = s.id
WHERE u.id = %s
`
upload, err := scanUpload(db.queryRow(ctx, sqlf.Sprintf(query, id)))
if err != nil {
return Upload{}, false, ignoreErrNoRows(err)
}
return upload, true, nil
}
// GetUploadsByRepo returns a list of uploads for a particular repo and the total count of records matching the given conditions.
func (db *dbImpl) GetUploadsByRepo(ctx context.Context, repositoryID int, state, term string, visibleAtTip bool, limit, offset int) (_ []Upload, _ int, err error) {
tw, err := db.beginTx(ctx)
if err != nil {
return nil, 0, err
}
defer func() {
err = closeTx(tw.tx, err)
}()
var conds []*sqlf.Query
conds = append(conds, sqlf.Sprintf("u.repository_id = %s", repositoryID))
if state != "" {
conds = append(conds, sqlf.Sprintf("u.state = %s", state))
}
if term != "" {
conds = append(conds, makeSearchCondition(term))
}
if visibleAtTip {
conds = append(conds, sqlf.Sprintf("u.visible_at_tip = true"))
}
countQuery := `SELECT COUNT(1) FROM lsif_uploads u WHERE %s`
count, err := scanInt(tw.queryRow(ctx, sqlf.Sprintf(countQuery, sqlf.Join(conds, " AND "))))
if err != nil {
return nil, 0, err
}
query := `
SELECT
u.id,
u.commit,
u.root,
u.visible_at_tip,
u.uploaded_at,
u.state,
u.failure_summary,
u.failure_stacktrace,
u.started_at,
u.finished_at,
u.tracing_context,
u.repository_id,
u.indexer,
s.rank
FROM lsif_uploads u
LEFT JOIN (
SELECT r.id, RANK() OVER (ORDER BY r.uploaded_at) as rank
FROM lsif_uploads r
WHERE r.state = 'queued'
) s
ON u.id = s.id
WHERE %s ORDER BY uploaded_at DESC LIMIT %d OFFSET %d
`
uploads, err := scanUploads(tw.query(ctx, sqlf.Sprintf(query, sqlf.Join(conds, " AND "), limit, offset)))
if err != nil {
return nil, 0, err
}
return uploads, count, nil
}
// makeSearchCondition returns a disjunction of LIKE clauses against all searchable columns of an upload.
func makeSearchCondition(term string) *sqlf.Query {
searchableColumns := []string{
"commit",
"root",
"indexer",
"failure_summary",
"failure_stacktrace",
}
var termConds []*sqlf.Query
for _, column := range searchableColumns {
termConds = append(termConds, sqlf.Sprintf("u."+column+" LIKE %s", "%"+term+"%"))
}
return sqlf.Sprintf("(%s)", sqlf.Join(termConds, " OR "))
}
// Enqueue inserts a new upload with a "queued" state, returning its identifier and a TxCloser that must be closed to commit the transaction.
func (db *dbImpl) Enqueue(ctx context.Context, commit, root, tracingContext string, repositoryID int, indexerName string) (_ int, _ TxCloser, err error) {
tw, err := db.beginTx(ctx)
if err != nil {
return 0, nil, err
}
defer func() {
if err != nil {
err = closeTx(tw.tx, err)
}
}()
query := `
INSERT INTO lsif_uploads (commit, root, tracing_context, repository_id, indexer)
VALUES (%s, %s, %s, %s, %s)
RETURNING id
`
id, err := scanInt(tw.queryRow(ctx, sqlf.Sprintf(query, commit, root, tracingContext, repositoryID, indexerName)))
if err != nil {
return 0, nil, err
}
return id, &txCloser{tw.tx}, nil
}
// GetStates returns the states for the uploads with the given identifiers.
func (db *dbImpl) GetStates(ctx context.Context, ids []int) (map[int]string, error) {
query := `SELECT id, state FROM lsif_uploads WHERE id IN (%s)`
return scanStates(db.query(ctx, sqlf.Sprintf(query, sqlf.Join(intsToQueries(ids), ", "))))
}
// DeleteUploadByID deletes an upload by its identifier. If the upload was visible at the tip of its repository's default branch,
// the visibility of all uploads for that repository are recalculated. The given function is expected to return the newest commit
// on the default branch when invoked.
func (db *dbImpl) DeleteUploadByID(ctx context.Context, id int, getTipCommit func(repositoryID int) (string, error)) (_ bool, err error) {
tw, err := db.beginTx(ctx)
if err != nil {
return false, err
}
defer func() {
err = closeTx(tw.tx, err)
}()
query := `
DELETE FROM lsif_uploads
WHERE id = %s
RETURNING repository_id, visible_at_tip
`
repositoryID, visibleAtTip, err := scanVisibility(tw.queryRow(ctx, sqlf.Sprintf(query, id)))
if err != nil {
return false, ignoreErrNoRows(err)
}
if !visibleAtTip {
return true, nil
}
tipCommit, err := getTipCommit(repositoryID)
if err != nil {
return false, err
}
if err := db.updateDumpsVisibleFromTip(ctx, tw, repositoryID, tipCommit); err != nil {
return false, err
}
return true, nil
}
// ResetStalled moves all unlocked uploads processing for more than `StalledUploadMaxAge` back to the queued state.
// This method returns a list of updated upload identifiers.
func (db *dbImpl) ResetStalled(ctx context.Context, now time.Time) ([]int, error) {
query := `
UPDATE lsif_uploads u SET state = 'queued', started_at = null WHERE id = ANY(
SELECT id FROM lsif_uploads
WHERE state = 'processing' AND %s - started_at > (%s * interval '1 second')
FOR UPDATE SKIP LOCKED
)
RETURNING u.id
`
ids, err := scanInts(db.query(ctx, sqlf.Sprintf(query, now.UTC(), StalledUploadMaxAge/time.Second)))
if err != nil {
return nil, err
}
return ids, nil
}

View File

@ -0,0 +1,426 @@
package db
import (
"context"
"errors"
"fmt"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
"github.com/sourcegraph/sourcegraph/internal/db/dbtesting"
)
type printableRank struct{ value *int }
func (r printableRank) String() string {
if r.value == nil {
return "nil"
}
return fmt.Sprintf("%d", r.value)
}
func TestGetUploadByID(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
// Upload does not exist initially
if _, exists, err := db.GetUploadByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting upload: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
uploadedAt := time.Unix(1587396557, 0).UTC()
startedAt := uploadedAt.Add(time.Minute)
expected := Upload{
ID: 1,
Commit: makeCommit(1),
Root: "sub/",
VisibleAtTip: true,
UploadedAt: uploadedAt,
State: "processing",
FailureSummary: nil,
FailureStacktrace: nil,
StartedAt: &startedAt,
FinishedAt: nil,
TracingContext: `{"id": 42}`,
RepositoryID: 123,
Indexer: "lsif-go",
Rank: nil,
}
insertUploads(t, db.db, expected)
if upload, exists, err := db.GetUploadByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting upload: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else if diff := cmp.Diff(expected, upload); diff != "" {
t.Errorf("unexpected upload (-want +got):\n%s", diff)
}
}
func TestGetQueuedUploadRank(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
t1 := time.Unix(1587396557, 0).UTC()
t2 := t1.Add(+time.Minute * 5)
t3 := t1.Add(+time.Minute * 3)
t4 := t1.Add(+time.Minute * 1)
t5 := t1.Add(+time.Minute * 4)
t6 := t1.Add(+time.Minute * 2)
insertUploads(t, db.db,
Upload{ID: 1, UploadedAt: t1, State: "queued"},
Upload{ID: 2, UploadedAt: t2, State: "queued"},
Upload{ID: 3, UploadedAt: t3, State: "queued"},
Upload{ID: 4, UploadedAt: t4, State: "queued"},
Upload{ID: 5, UploadedAt: t5, State: "queued"},
Upload{ID: 6, UploadedAt: t6, State: "processing"},
)
if upload, _, _ := db.GetUploadByID(context.Background(), 1); upload.Rank == nil || *upload.Rank != 1 {
t.Errorf("unexpected rank. want=%d have=%s", 1, printableRank{upload.Rank})
}
if upload, _, _ := db.GetUploadByID(context.Background(), 2); upload.Rank == nil || *upload.Rank != 5 {
t.Errorf("unexpected rank. want=%d have=%s", 5, printableRank{upload.Rank})
}
if upload, _, _ := db.GetUploadByID(context.Background(), 3); upload.Rank == nil || *upload.Rank != 3 {
t.Errorf("unexpected rank. want=%d have=%s", 3, printableRank{upload.Rank})
}
if upload, _, _ := db.GetUploadByID(context.Background(), 4); upload.Rank == nil || *upload.Rank != 2 {
t.Errorf("unexpected rank. want=%d have=%s", 2, printableRank{upload.Rank})
}
if upload, _, _ := db.GetUploadByID(context.Background(), 5); upload.Rank == nil || *upload.Rank != 4 {
t.Errorf("unexpected rank. want=%d have=%s", 4, printableRank{upload.Rank})
}
// Only considers queued uploads to determine rank
if upload, _, _ := db.GetUploadByID(context.Background(), 6); upload.Rank != nil {
t.Errorf("unexpected rank. want=%s have=%s", "nil", printableRank{upload.Rank})
}
}
func TestGetUploadsByRepo(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
t1 := time.Unix(1587396557, 0).UTC()
t2 := t1.Add(-time.Minute * 1)
t3 := t1.Add(-time.Minute * 2)
t4 := t1.Add(-time.Minute * 3)
t5 := t1.Add(-time.Minute * 4)
t6 := t1.Add(-time.Minute * 5)
t7 := t1.Add(-time.Minute * 6)
t8 := t1.Add(-time.Minute * 7)
t9 := t1.Add(-time.Minute * 8)
t10 := t1.Add(-time.Minute * 9)
failureSummary := "unlucky 333"
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(3331), UploadedAt: t1, Root: "sub1/", State: "queued"},
Upload{ID: 2, UploadedAt: t2, VisibleAtTip: true, State: "errored", FailureSummary: &failureSummary, Indexer: "lsif-tsc"},
Upload{ID: 3, Commit: makeCommit(3333), UploadedAt: t3, Root: "sub2/", State: "queued"},
Upload{ID: 4, UploadedAt: t4, State: "queued", RepositoryID: 51},
Upload{ID: 5, Commit: makeCommit(3333), UploadedAt: t5, Root: "sub1/", VisibleAtTip: true, State: "processing", Indexer: "lsif-tsc"},
Upload{ID: 6, UploadedAt: t6, Root: "sub2/", State: "processing"},
Upload{ID: 7, UploadedAt: t7, Root: "sub1/", VisibleAtTip: true, Indexer: "lsif-tsc"},
Upload{ID: 8, UploadedAt: t8, VisibleAtTip: true, Indexer: "lsif-tsc"},
Upload{ID: 9, UploadedAt: t9, State: "queued"},
Upload{ID: 10, UploadedAt: t10, Root: "sub1/", Indexer: "lsif-tsc"},
)
testCases := []struct {
state string
term string
visibleAtTip bool
expectedIDs []int
}{
{expectedIDs: []int{1, 2, 3, 5, 6, 7, 8, 9, 10}},
{state: "completed", expectedIDs: []int{7, 8, 10}},
{term: "sub", expectedIDs: []int{1, 3, 5, 6, 7, 10}}, // searches root
{term: "003", expectedIDs: []int{1, 3, 5}}, // searches commits
{term: "333", expectedIDs: []int{1, 2, 3, 5}}, // searches commits and failure summary
{term: "tsc", expectedIDs: []int{2, 5, 7, 8, 10}}, // searches indexer
{visibleAtTip: true, expectedIDs: []int{2, 5, 7, 8}},
}
for _, testCase := range testCases {
name := fmt.Sprintf("state=%s term=%s visibleAtTip=%v", testCase.state, testCase.term, testCase.visibleAtTip)
t.Run(name, func(t *testing.T) {
for lo := 0; lo < len(testCase.expectedIDs); lo++ {
hi := lo + 3
if hi > len(testCase.expectedIDs) {
hi = len(testCase.expectedIDs)
}
uploads, totalCount, err := db.GetUploadsByRepo(context.Background(), 50, testCase.state, testCase.term, testCase.visibleAtTip, 3, lo)
if err != nil {
t.Fatalf("unexpected error getting uploads for repo: %s", err)
}
if totalCount != len(testCase.expectedIDs) {
t.Errorf("unexpected total count. want=%d have=%d", len(testCase.expectedIDs), totalCount)
}
var ids []int
for _, upload := range uploads {
ids = append(ids, upload.ID)
}
if diff := cmp.Diff(testCase.expectedIDs[lo:hi], ids); diff != "" {
t.Errorf("unexpected upload ids at offset %d (-want +got):\n%s", lo, diff)
}
}
})
}
}
func TestEnqueue(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
id, closer, err := db.Enqueue(context.Background(), makeCommit(1), "sub/", `{"id": 42}`, 50, "lsif-go")
if err != nil {
t.Fatalf("unexpected error enqueueing upload: %s", err)
}
// Upload does not exist before transaction commit
if _, exists, err := db.GetUploadByID(context.Background(), id); err != nil {
t.Fatalf("unexpected error getting upload: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
// Commit transaction
_ = closer.CloseTx(nil)
rank := 1
expected := Upload{
ID: id,
Commit: makeCommit(1),
Root: "sub/",
VisibleAtTip: false,
UploadedAt: time.Time{},
State: "queued",
FailureSummary: nil,
FailureStacktrace: nil,
StartedAt: nil,
FinishedAt: nil,
TracingContext: `{"id": 42}`,
RepositoryID: 50,
Indexer: "lsif-go",
Rank: &rank,
}
if upload, exists, err := db.GetUploadByID(context.Background(), id); err != nil {
t.Fatalf("unexpected error getting upload: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
} else {
// Update auto-generated timestamp
expected.UploadedAt = upload.UploadedAt
if diff := cmp.Diff(expected, upload); diff != "" {
t.Errorf("unexpected upload (-want +got):\n%s", diff)
}
}
}
func TestEnqueueRollback(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
id, closer, err := db.Enqueue(context.Background(), makeCommit(1), "sub/", `{"id": 42}`, 50, "lsif-go")
if err != nil {
t.Fatalf("unexpected error enqueueing upload: %s", err)
}
_ = closer.CloseTx(errors.New(""))
// Upload does not exist after rollback
if _, exists, err := db.GetUploadByID(context.Background(), id); err != nil {
t.Fatalf("unexpected error getting upload: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
}
func TestGetStates(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1, State: "queued"},
Upload{ID: 2},
Upload{ID: 3, State: "processing"},
Upload{ID: 4, State: "errored"},
)
expected := map[int]string{
1: "queued",
2: "completed",
4: "errored",
}
if states, err := db.GetStates(context.Background(), []int{1, 2, 4, 6}); err != nil {
t.Fatalf("unexpected error getting states: %s", err)
} else if diff := cmp.Diff(expected, states); diff != "" {
t.Errorf("unexpected upload states (-want +got):\n%s", diff)
}
}
func TestDeleteUploadByID(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1},
)
var called bool
getTipCommit := func(repositoryID int) (string, error) {
called = true
return "", nil
}
if found, err := db.DeleteUploadByID(context.Background(), 1, getTipCommit); err != nil {
t.Fatalf("unexpected error deleting upload: %s", err)
} else if !found {
t.Fatalf("expected record to exist")
} else if called {
t.Fatalf("unexpected call to getTipCommit")
}
// Upload no longer exists
if _, exists, err := db.GetUploadByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting upload: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
}
func TestDeleteUploadByIDMissingRow(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
getTipCommit := func(repositoryID int) (string, error) {
return "", nil
}
if found, err := db.DeleteUploadByID(context.Background(), 1, getTipCommit); err != nil {
t.Fatalf("unexpected error deleting upload: %s", err)
} else if found {
t.Fatalf("unexpected record")
}
}
func TestDeleteUploadByIDUpdatesVisibility(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
insertUploads(t, db.db,
Upload{ID: 1, Commit: makeCommit(4), Root: "sub1/", VisibleAtTip: true},
Upload{ID: 2, Commit: makeCommit(3), Root: "sub2/", VisibleAtTip: true},
Upload{ID: 3, Commit: makeCommit(2), Root: "sub1/", VisibleAtTip: false},
Upload{ID: 4, Commit: makeCommit(1), Root: "sub2/", VisibleAtTip: false},
)
insertCommits(t, db.db, map[string][]string{
makeCommit(1): {},
makeCommit(2): {makeCommit(1)},
makeCommit(3): {makeCommit(2)},
makeCommit(4): {makeCommit(3)},
})
var called bool
getTipCommit := func(repositoryID int) (string, error) {
called = true
return makeCommit(4), nil
}
if found, err := db.DeleteUploadByID(context.Background(), 1, getTipCommit); err != nil {
t.Fatalf("unexpected error deleting upload: %s", err)
} else if !found {
t.Fatalf("expected record to exist")
} else if !called {
t.Fatalf("expected call to getTipCommit")
}
expected := map[int]bool{2: true, 3: true, 4: false}
visibilities := getDumpVisibilities(t, db.db)
if diff := cmp.Diff(expected, visibilities); diff != "" {
t.Errorf("unexpected visibility (-want +got):\n%s", diff)
}
}
func TestResetStalled(t *testing.T) {
if testing.Short() {
t.Skip()
}
dbtesting.SetupGlobalTestDB(t)
db := &dbImpl{db: dbconn.Global}
now := time.Unix(1587396557, 0).UTC()
t1 := now.Add(-time.Second * 6) // old
t2 := now.Add(-time.Second * 2) // new enough
t3 := now.Add(-time.Second * 3) // new enough
t4 := now.Add(-time.Second * 8) // old
t5 := now.Add(-time.Second * 8) // old
insertUploads(t, db.db,
Upload{ID: 1, State: "processing", StartedAt: &t1},
Upload{ID: 2, State: "processing", StartedAt: &t2},
Upload{ID: 3, State: "processing", StartedAt: &t3},
Upload{ID: 4, State: "processing", StartedAt: &t4},
Upload{ID: 5, State: "processing", StartedAt: &t5},
)
tx, err := db.db.BeginTx(context.Background(), nil)
if err != nil {
t.Fatal(err)
}
defer func() { _ = tx.Rollback() }()
// Row lock upload 5 in a transaction which should be skipped by ResetStalled
if _, err := tx.Query(`SELECT * FROM lsif_uploads WHERE id = 5 FOR UPDATE`); err != nil {
t.Fatal(err)
}
expected := []int{1, 4}
if ids, err := db.ResetStalled(context.Background(), now); err != nil {
t.Fatalf("unexpected error resetting stalled uploads: %s", err)
} else if diff := cmp.Diff(expected, ids); diff != "" {
t.Errorf("unexpected ids (-want +got):\n%s", diff)
}
}

View File

@ -0,0 +1,25 @@
package db
import (
"database/sql"
"github.com/keegancsmith/sqlf"
)
// ignoreErrNoRows returns the given error if it's not sql.ErrNoRows.
func ignoreErrNoRows(err error) error {
if err == sql.ErrNoRows {
return nil
}
return err
}
// intsToQueries converts a slice of ints into a slice of queries.
func intsToQueries(values []int) []*sqlf.Query {
var queries []*sqlf.Query
for _, value := range values {
queries = append(queries, sqlf.Sprintf("%d", value))
}
return queries
}

View File

@ -0,0 +1,68 @@
package janitor
import (
"context"
"time"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
)
type Janitor struct {
db db.DB
janitorInterval time.Duration
}
type JanitorOpts struct {
DB db.DB
JanitorInterval time.Duration
}
func NewJanitor(opts JanitorOpts) *Janitor {
return &Janitor{
db: opts.DB,
janitorInterval: opts.JanitorInterval,
}
}
func (j *Janitor) Start() {
for {
if err := j.step(); err != nil {
log15.Error("Failed to run janitor process", "error", err)
}
time.Sleep(j.janitorInterval)
}
}
// Run performs a best-effort cleanup. See the following methods for more specifics.
// - resetStalled
func (j *Janitor) step() error {
cleanupFns := []func() error{
j.resetStalled,
}
for _, fn := range cleanupFns {
if err := fn(); err != nil {
return err
}
}
return nil
}
// resetStalled moves all uploads that have been in the PROCESSING state for a while back
// to QUEUED. For each updated upload record, the conversion process that was responsible
// for handling the upload did not hold a row lock, indicating that it has died.
func (j *Janitor) resetStalled() error {
ids, err := j.db.ResetStalled(context.Background(), time.Now())
if err != nil {
return err
}
for _, id := range ids {
log15.Debug("Reset stalled upload", "uploadID", id)
}
return nil
}

View File

@ -0,0 +1,7 @@
package mocks
//go:generate env GOBIN=$PWD/.bin GO111MODULE=on go install github.com/efritz/go-mockgen
//go:generate $PWD/.bin/go-mockgen -f github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db -i DB -o mock_db.go
//go:generate $PWD/.bin/go-mockgen -f github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db -i ReferencePager -o mock_reference_pager.go
//go:generate $PWD/.bin/go-mockgen -f github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles -i BundleManagerClient -o mock_bundle_manager_client.go
//go:generate $PWD/.bin/go-mockgen -f github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles -i BundleClient -o mock_bundle_client.go

View File

@ -0,0 +1,921 @@
// Code generated by github.com/efritz/go-mockgen 0.1.0; DO NOT EDIT.
package mocks
import (
"context"
bundles "github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"sync"
)
// MockBundleClient is a mock impelementation of the BundleClient interface
// (from the package
// github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles)
// used for unit testing.
type MockBundleClient struct {
// DefinitionsFunc is an instance of a mock function object controlling
// the behavior of the method Definitions.
DefinitionsFunc *BundleClientDefinitionsFunc
// ExistsFunc is an instance of a mock function object controlling the
// behavior of the method Exists.
ExistsFunc *BundleClientExistsFunc
// HoverFunc is an instance of a mock function object controlling the
// behavior of the method Hover.
HoverFunc *BundleClientHoverFunc
// MonikerResultsFunc is an instance of a mock function object
// controlling the behavior of the method MonikerResults.
MonikerResultsFunc *BundleClientMonikerResultsFunc
// MonikersByPositionFunc is an instance of a mock function object
// controlling the behavior of the method MonikersByPosition.
MonikersByPositionFunc *BundleClientMonikersByPositionFunc
// PackageInformationFunc is an instance of a mock function object
// controlling the behavior of the method PackageInformation.
PackageInformationFunc *BundleClientPackageInformationFunc
// ReferencesFunc is an instance of a mock function object controlling
// the behavior of the method References.
ReferencesFunc *BundleClientReferencesFunc
}
// NewMockBundleClient creates a new mock of the BundleClient interface. All
// methods return zero values for all results, unless overwritten.
func NewMockBundleClient() *MockBundleClient {
return &MockBundleClient{
DefinitionsFunc: &BundleClientDefinitionsFunc{
defaultHook: func(context.Context, string, int, int) ([]bundles.Location, error) {
return nil, nil
},
},
ExistsFunc: &BundleClientExistsFunc{
defaultHook: func(context.Context, string) (bool, error) {
return false, nil
},
},
HoverFunc: &BundleClientHoverFunc{
defaultHook: func(context.Context, string, int, int) (string, bundles.Range, bool, error) {
return "", bundles.Range{}, false, nil
},
},
MonikerResultsFunc: &BundleClientMonikerResultsFunc{
defaultHook: func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error) {
return nil, 0, nil
},
},
MonikersByPositionFunc: &BundleClientMonikersByPositionFunc{
defaultHook: func(context.Context, string, int, int) ([][]bundles.MonikerData, error) {
return nil, nil
},
},
PackageInformationFunc: &BundleClientPackageInformationFunc{
defaultHook: func(context.Context, string, string) (bundles.PackageInformationData, error) {
return bundles.PackageInformationData{}, nil
},
},
ReferencesFunc: &BundleClientReferencesFunc{
defaultHook: func(context.Context, string, int, int) ([]bundles.Location, error) {
return nil, nil
},
},
}
}
// NewMockBundleClientFrom creates a new mock of the MockBundleClient
// interface. All methods delegate to the given implementation, unless
// overwritten.
func NewMockBundleClientFrom(i bundles.BundleClient) *MockBundleClient {
return &MockBundleClient{
DefinitionsFunc: &BundleClientDefinitionsFunc{
defaultHook: i.Definitions,
},
ExistsFunc: &BundleClientExistsFunc{
defaultHook: i.Exists,
},
HoverFunc: &BundleClientHoverFunc{
defaultHook: i.Hover,
},
MonikerResultsFunc: &BundleClientMonikerResultsFunc{
defaultHook: i.MonikerResults,
},
MonikersByPositionFunc: &BundleClientMonikersByPositionFunc{
defaultHook: i.MonikersByPosition,
},
PackageInformationFunc: &BundleClientPackageInformationFunc{
defaultHook: i.PackageInformation,
},
ReferencesFunc: &BundleClientReferencesFunc{
defaultHook: i.References,
},
}
}
// BundleClientDefinitionsFunc describes the behavior when the Definitions
// method of the parent MockBundleClient instance is invoked.
type BundleClientDefinitionsFunc struct {
defaultHook func(context.Context, string, int, int) ([]bundles.Location, error)
hooks []func(context.Context, string, int, int) ([]bundles.Location, error)
history []BundleClientDefinitionsFuncCall
mutex sync.Mutex
}
// Definitions delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockBundleClient) Definitions(v0 context.Context, v1 string, v2 int, v3 int) ([]bundles.Location, error) {
r0, r1 := m.DefinitionsFunc.nextHook()(v0, v1, v2, v3)
m.DefinitionsFunc.appendCall(BundleClientDefinitionsFuncCall{v0, v1, v2, v3, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Definitions method
// of the parent MockBundleClient instance is invoked and the hook queue is
// empty.
func (f *BundleClientDefinitionsFunc) SetDefaultHook(hook func(context.Context, string, int, int) ([]bundles.Location, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Definitions method of the parent MockBundleClient instance inovkes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *BundleClientDefinitionsFunc) PushHook(hook func(context.Context, string, int, int) ([]bundles.Location, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleClientDefinitionsFunc) SetDefaultReturn(r0 []bundles.Location, r1 error) {
f.SetDefaultHook(func(context.Context, string, int, int) ([]bundles.Location, error) {
return r0, r1
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleClientDefinitionsFunc) PushReturn(r0 []bundles.Location, r1 error) {
f.PushHook(func(context.Context, string, int, int) ([]bundles.Location, error) {
return r0, r1
})
}
func (f *BundleClientDefinitionsFunc) nextHook() func(context.Context, string, int, int) ([]bundles.Location, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleClientDefinitionsFunc) appendCall(r0 BundleClientDefinitionsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleClientDefinitionsFuncCall objects
// describing the invocations of this function.
func (f *BundleClientDefinitionsFunc) History() []BundleClientDefinitionsFuncCall {
f.mutex.Lock()
history := make([]BundleClientDefinitionsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleClientDefinitionsFuncCall is an object that describes an invocation
// of method Definitions on an instance of MockBundleClient.
type BundleClientDefinitionsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 int
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []bundles.Location
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleClientDefinitionsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleClientDefinitionsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// BundleClientExistsFunc describes the behavior when the Exists method of
// the parent MockBundleClient instance is invoked.
type BundleClientExistsFunc struct {
defaultHook func(context.Context, string) (bool, error)
hooks []func(context.Context, string) (bool, error)
history []BundleClientExistsFuncCall
mutex sync.Mutex
}
// Exists delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockBundleClient) Exists(v0 context.Context, v1 string) (bool, error) {
r0, r1 := m.ExistsFunc.nextHook()(v0, v1)
m.ExistsFunc.appendCall(BundleClientExistsFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Exists method of the
// parent MockBundleClient instance is invoked and the hook queue is empty.
func (f *BundleClientExistsFunc) SetDefaultHook(hook func(context.Context, string) (bool, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Exists method of the parent MockBundleClient instance inovkes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *BundleClientExistsFunc) PushHook(hook func(context.Context, string) (bool, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleClientExistsFunc) SetDefaultReturn(r0 bool, r1 error) {
f.SetDefaultHook(func(context.Context, string) (bool, error) {
return r0, r1
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleClientExistsFunc) PushReturn(r0 bool, r1 error) {
f.PushHook(func(context.Context, string) (bool, error) {
return r0, r1
})
}
func (f *BundleClientExistsFunc) nextHook() func(context.Context, string) (bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleClientExistsFunc) appendCall(r0 BundleClientExistsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleClientExistsFuncCall objects
// describing the invocations of this function.
func (f *BundleClientExistsFunc) History() []BundleClientExistsFuncCall {
f.mutex.Lock()
history := make([]BundleClientExistsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleClientExistsFuncCall is an object that describes an invocation of
// method Exists on an instance of MockBundleClient.
type BundleClientExistsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 bool
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleClientExistsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleClientExistsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// BundleClientHoverFunc describes the behavior when the Hover method of the
// parent MockBundleClient instance is invoked.
type BundleClientHoverFunc struct {
defaultHook func(context.Context, string, int, int) (string, bundles.Range, bool, error)
hooks []func(context.Context, string, int, int) (string, bundles.Range, bool, error)
history []BundleClientHoverFuncCall
mutex sync.Mutex
}
// Hover delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockBundleClient) Hover(v0 context.Context, v1 string, v2 int, v3 int) (string, bundles.Range, bool, error) {
r0, r1, r2, r3 := m.HoverFunc.nextHook()(v0, v1, v2, v3)
m.HoverFunc.appendCall(BundleClientHoverFuncCall{v0, v1, v2, v3, r0, r1, r2, r3})
return r0, r1, r2, r3
}
// SetDefaultHook sets function that is called when the Hover method of the
// parent MockBundleClient instance is invoked and the hook queue is empty.
func (f *BundleClientHoverFunc) SetDefaultHook(hook func(context.Context, string, int, int) (string, bundles.Range, bool, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Hover method of the parent MockBundleClient instance inovkes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *BundleClientHoverFunc) PushHook(hook func(context.Context, string, int, int) (string, bundles.Range, bool, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleClientHoverFunc) SetDefaultReturn(r0 string, r1 bundles.Range, r2 bool, r3 error) {
f.SetDefaultHook(func(context.Context, string, int, int) (string, bundles.Range, bool, error) {
return r0, r1, r2, r3
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleClientHoverFunc) PushReturn(r0 string, r1 bundles.Range, r2 bool, r3 error) {
f.PushHook(func(context.Context, string, int, int) (string, bundles.Range, bool, error) {
return r0, r1, r2, r3
})
}
func (f *BundleClientHoverFunc) nextHook() func(context.Context, string, int, int) (string, bundles.Range, bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleClientHoverFunc) appendCall(r0 BundleClientHoverFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleClientHoverFuncCall objects
// describing the invocations of this function.
func (f *BundleClientHoverFunc) History() []BundleClientHoverFuncCall {
f.mutex.Lock()
history := make([]BundleClientHoverFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleClientHoverFuncCall is an object that describes an invocation of
// method Hover on an instance of MockBundleClient.
type BundleClientHoverFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 int
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 string
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 bundles.Range
// Result2 is the value of the 3rd result returned from this method
// invocation.
Result2 bool
// Result3 is the value of the 4th result returned from this method
// invocation.
Result3 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleClientHoverFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleClientHoverFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1, c.Result2, c.Result3}
}
// BundleClientMonikerResultsFunc describes the behavior when the
// MonikerResults method of the parent MockBundleClient instance is invoked.
type BundleClientMonikerResultsFunc struct {
defaultHook func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error)
hooks []func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error)
history []BundleClientMonikerResultsFuncCall
mutex sync.Mutex
}
// MonikerResults delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockBundleClient) MonikerResults(v0 context.Context, v1 string, v2 string, v3 string, v4 int, v5 int) ([]bundles.Location, int, error) {
r0, r1, r2 := m.MonikerResultsFunc.nextHook()(v0, v1, v2, v3, v4, v5)
m.MonikerResultsFunc.appendCall(BundleClientMonikerResultsFuncCall{v0, v1, v2, v3, v4, v5, r0, r1, r2})
return r0, r1, r2
}
// SetDefaultHook sets function that is called when the MonikerResults
// method of the parent MockBundleClient instance is invoked and the hook
// queue is empty.
func (f *BundleClientMonikerResultsFunc) SetDefaultHook(hook func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// MonikerResults method of the parent MockBundleClient instance inovkes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *BundleClientMonikerResultsFunc) PushHook(hook func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleClientMonikerResultsFunc) SetDefaultReturn(r0 []bundles.Location, r1 int, r2 error) {
f.SetDefaultHook(func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error) {
return r0, r1, r2
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleClientMonikerResultsFunc) PushReturn(r0 []bundles.Location, r1 int, r2 error) {
f.PushHook(func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error) {
return r0, r1, r2
})
}
func (f *BundleClientMonikerResultsFunc) nextHook() func(context.Context, string, string, string, int, int) ([]bundles.Location, int, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleClientMonikerResultsFunc) appendCall(r0 BundleClientMonikerResultsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleClientMonikerResultsFuncCall objects
// describing the invocations of this function.
func (f *BundleClientMonikerResultsFunc) History() []BundleClientMonikerResultsFuncCall {
f.mutex.Lock()
history := make([]BundleClientMonikerResultsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleClientMonikerResultsFuncCall is an object that describes an
// invocation of method MonikerResults on an instance of MockBundleClient.
type BundleClientMonikerResultsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 string
// Arg4 is the value of the 5th argument passed to this method
// invocation.
Arg4 int
// Arg5 is the value of the 6th argument passed to this method
// invocation.
Arg5 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []bundles.Location
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 int
// Result2 is the value of the 3rd result returned from this method
// invocation.
Result2 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleClientMonikerResultsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3, c.Arg4, c.Arg5}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleClientMonikerResultsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1, c.Result2}
}
// BundleClientMonikersByPositionFunc describes the behavior when the
// MonikersByPosition method of the parent MockBundleClient instance is
// invoked.
type BundleClientMonikersByPositionFunc struct {
defaultHook func(context.Context, string, int, int) ([][]bundles.MonikerData, error)
hooks []func(context.Context, string, int, int) ([][]bundles.MonikerData, error)
history []BundleClientMonikersByPositionFuncCall
mutex sync.Mutex
}
// MonikersByPosition delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockBundleClient) MonikersByPosition(v0 context.Context, v1 string, v2 int, v3 int) ([][]bundles.MonikerData, error) {
r0, r1 := m.MonikersByPositionFunc.nextHook()(v0, v1, v2, v3)
m.MonikersByPositionFunc.appendCall(BundleClientMonikersByPositionFuncCall{v0, v1, v2, v3, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the MonikersByPosition
// method of the parent MockBundleClient instance is invoked and the hook
// queue is empty.
func (f *BundleClientMonikersByPositionFunc) SetDefaultHook(hook func(context.Context, string, int, int) ([][]bundles.MonikerData, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// MonikersByPosition method of the parent MockBundleClient instance inovkes
// the hook at the front of the queue and discards it. After the queue is
// empty, the default hook function is invoked for any future action.
func (f *BundleClientMonikersByPositionFunc) PushHook(hook func(context.Context, string, int, int) ([][]bundles.MonikerData, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleClientMonikersByPositionFunc) SetDefaultReturn(r0 [][]bundles.MonikerData, r1 error) {
f.SetDefaultHook(func(context.Context, string, int, int) ([][]bundles.MonikerData, error) {
return r0, r1
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleClientMonikersByPositionFunc) PushReturn(r0 [][]bundles.MonikerData, r1 error) {
f.PushHook(func(context.Context, string, int, int) ([][]bundles.MonikerData, error) {
return r0, r1
})
}
func (f *BundleClientMonikersByPositionFunc) nextHook() func(context.Context, string, int, int) ([][]bundles.MonikerData, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleClientMonikersByPositionFunc) appendCall(r0 BundleClientMonikersByPositionFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleClientMonikersByPositionFuncCall
// objects describing the invocations of this function.
func (f *BundleClientMonikersByPositionFunc) History() []BundleClientMonikersByPositionFuncCall {
f.mutex.Lock()
history := make([]BundleClientMonikersByPositionFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleClientMonikersByPositionFuncCall is an object that describes an
// invocation of method MonikersByPosition on an instance of
// MockBundleClient.
type BundleClientMonikersByPositionFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 int
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 [][]bundles.MonikerData
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleClientMonikersByPositionFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleClientMonikersByPositionFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// BundleClientPackageInformationFunc describes the behavior when the
// PackageInformation method of the parent MockBundleClient instance is
// invoked.
type BundleClientPackageInformationFunc struct {
defaultHook func(context.Context, string, string) (bundles.PackageInformationData, error)
hooks []func(context.Context, string, string) (bundles.PackageInformationData, error)
history []BundleClientPackageInformationFuncCall
mutex sync.Mutex
}
// PackageInformation delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockBundleClient) PackageInformation(v0 context.Context, v1 string, v2 string) (bundles.PackageInformationData, error) {
r0, r1 := m.PackageInformationFunc.nextHook()(v0, v1, v2)
m.PackageInformationFunc.appendCall(BundleClientPackageInformationFuncCall{v0, v1, v2, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the PackageInformation
// method of the parent MockBundleClient instance is invoked and the hook
// queue is empty.
func (f *BundleClientPackageInformationFunc) SetDefaultHook(hook func(context.Context, string, string) (bundles.PackageInformationData, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// PackageInformation method of the parent MockBundleClient instance inovkes
// the hook at the front of the queue and discards it. After the queue is
// empty, the default hook function is invoked for any future action.
func (f *BundleClientPackageInformationFunc) PushHook(hook func(context.Context, string, string) (bundles.PackageInformationData, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleClientPackageInformationFunc) SetDefaultReturn(r0 bundles.PackageInformationData, r1 error) {
f.SetDefaultHook(func(context.Context, string, string) (bundles.PackageInformationData, error) {
return r0, r1
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleClientPackageInformationFunc) PushReturn(r0 bundles.PackageInformationData, r1 error) {
f.PushHook(func(context.Context, string, string) (bundles.PackageInformationData, error) {
return r0, r1
})
}
func (f *BundleClientPackageInformationFunc) nextHook() func(context.Context, string, string) (bundles.PackageInformationData, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleClientPackageInformationFunc) appendCall(r0 BundleClientPackageInformationFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleClientPackageInformationFuncCall
// objects describing the invocations of this function.
func (f *BundleClientPackageInformationFunc) History() []BundleClientPackageInformationFuncCall {
f.mutex.Lock()
history := make([]BundleClientPackageInformationFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleClientPackageInformationFuncCall is an object that describes an
// invocation of method PackageInformation on an instance of
// MockBundleClient.
type BundleClientPackageInformationFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 bundles.PackageInformationData
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleClientPackageInformationFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleClientPackageInformationFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// BundleClientReferencesFunc describes the behavior when the References
// method of the parent MockBundleClient instance is invoked.
type BundleClientReferencesFunc struct {
defaultHook func(context.Context, string, int, int) ([]bundles.Location, error)
hooks []func(context.Context, string, int, int) ([]bundles.Location, error)
history []BundleClientReferencesFuncCall
mutex sync.Mutex
}
// References delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockBundleClient) References(v0 context.Context, v1 string, v2 int, v3 int) ([]bundles.Location, error) {
r0, r1 := m.ReferencesFunc.nextHook()(v0, v1, v2, v3)
m.ReferencesFunc.appendCall(BundleClientReferencesFuncCall{v0, v1, v2, v3, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the References method of
// the parent MockBundleClient instance is invoked and the hook queue is
// empty.
func (f *BundleClientReferencesFunc) SetDefaultHook(hook func(context.Context, string, int, int) ([]bundles.Location, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// References method of the parent MockBundleClient instance inovkes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *BundleClientReferencesFunc) PushHook(hook func(context.Context, string, int, int) ([]bundles.Location, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleClientReferencesFunc) SetDefaultReturn(r0 []bundles.Location, r1 error) {
f.SetDefaultHook(func(context.Context, string, int, int) ([]bundles.Location, error) {
return r0, r1
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleClientReferencesFunc) PushReturn(r0 []bundles.Location, r1 error) {
f.PushHook(func(context.Context, string, int, int) ([]bundles.Location, error) {
return r0, r1
})
}
func (f *BundleClientReferencesFunc) nextHook() func(context.Context, string, int, int) ([]bundles.Location, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleClientReferencesFunc) appendCall(r0 BundleClientReferencesFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleClientReferencesFuncCall objects
// describing the invocations of this function.
func (f *BundleClientReferencesFunc) History() []BundleClientReferencesFuncCall {
f.mutex.Lock()
history := make([]BundleClientReferencesFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleClientReferencesFuncCall is an object that describes an invocation
// of method References on an instance of MockBundleClient.
type BundleClientReferencesFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 int
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []bundles.Location
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleClientReferencesFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleClientReferencesFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}

View File

@ -0,0 +1,272 @@
// Code generated by github.com/efritz/go-mockgen 0.1.0; DO NOT EDIT.
package mocks
import (
"context"
bundles "github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"io"
"sync"
)
// MockBundleManagerClient is a mock impelementation of the
// BundleManagerClient interface (from the package
// github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles)
// used for unit testing.
type MockBundleManagerClient struct {
// BundleClientFunc is an instance of a mock function object controlling
// the behavior of the method BundleClient.
BundleClientFunc *BundleManagerClientBundleClientFunc
// SendUploadFunc is an instance of a mock function object controlling
// the behavior of the method SendUpload.
SendUploadFunc *BundleManagerClientSendUploadFunc
}
// NewMockBundleManagerClient creates a new mock of the BundleManagerClient
// interface. All methods return zero values for all results, unless
// overwritten.
func NewMockBundleManagerClient() *MockBundleManagerClient {
return &MockBundleManagerClient{
BundleClientFunc: &BundleManagerClientBundleClientFunc{
defaultHook: func(int) bundles.BundleClient {
return nil
},
},
SendUploadFunc: &BundleManagerClientSendUploadFunc{
defaultHook: func(context.Context, int, io.Reader) error {
return nil
},
},
}
}
// NewMockBundleManagerClientFrom creates a new mock of the
// MockBundleManagerClient interface. All methods delegate to the given
// implementation, unless overwritten.
func NewMockBundleManagerClientFrom(i bundles.BundleManagerClient) *MockBundleManagerClient {
return &MockBundleManagerClient{
BundleClientFunc: &BundleManagerClientBundleClientFunc{
defaultHook: i.BundleClient,
},
SendUploadFunc: &BundleManagerClientSendUploadFunc{
defaultHook: i.SendUpload,
},
}
}
// BundleManagerClientBundleClientFunc describes the behavior when the
// BundleClient method of the parent MockBundleManagerClient instance is
// invoked.
type BundleManagerClientBundleClientFunc struct {
defaultHook func(int) bundles.BundleClient
hooks []func(int) bundles.BundleClient
history []BundleManagerClientBundleClientFuncCall
mutex sync.Mutex
}
// BundleClient delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockBundleManagerClient) BundleClient(v0 int) bundles.BundleClient {
r0 := m.BundleClientFunc.nextHook()(v0)
m.BundleClientFunc.appendCall(BundleManagerClientBundleClientFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the BundleClient method
// of the parent MockBundleManagerClient instance is invoked and the hook
// queue is empty.
func (f *BundleManagerClientBundleClientFunc) SetDefaultHook(hook func(int) bundles.BundleClient) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// BundleClient method of the parent MockBundleManagerClient instance
// inovkes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *BundleManagerClientBundleClientFunc) PushHook(hook func(int) bundles.BundleClient) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleManagerClientBundleClientFunc) SetDefaultReturn(r0 bundles.BundleClient) {
f.SetDefaultHook(func(int) bundles.BundleClient {
return r0
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleManagerClientBundleClientFunc) PushReturn(r0 bundles.BundleClient) {
f.PushHook(func(int) bundles.BundleClient {
return r0
})
}
func (f *BundleManagerClientBundleClientFunc) nextHook() func(int) bundles.BundleClient {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleManagerClientBundleClientFunc) appendCall(r0 BundleManagerClientBundleClientFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleManagerClientBundleClientFuncCall
// objects describing the invocations of this function.
func (f *BundleManagerClientBundleClientFunc) History() []BundleManagerClientBundleClientFuncCall {
f.mutex.Lock()
history := make([]BundleManagerClientBundleClientFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleManagerClientBundleClientFuncCall is an object that describes an
// invocation of method BundleClient on an instance of
// MockBundleManagerClient.
type BundleManagerClientBundleClientFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 bundles.BundleClient
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleManagerClientBundleClientFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleManagerClientBundleClientFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// BundleManagerClientSendUploadFunc describes the behavior when the
// SendUpload method of the parent MockBundleManagerClient instance is
// invoked.
type BundleManagerClientSendUploadFunc struct {
defaultHook func(context.Context, int, io.Reader) error
hooks []func(context.Context, int, io.Reader) error
history []BundleManagerClientSendUploadFuncCall
mutex sync.Mutex
}
// SendUpload delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockBundleManagerClient) SendUpload(v0 context.Context, v1 int, v2 io.Reader) error {
r0 := m.SendUploadFunc.nextHook()(v0, v1, v2)
m.SendUploadFunc.appendCall(BundleManagerClientSendUploadFuncCall{v0, v1, v2, r0})
return r0
}
// SetDefaultHook sets function that is called when the SendUpload method of
// the parent MockBundleManagerClient instance is invoked and the hook queue
// is empty.
func (f *BundleManagerClientSendUploadFunc) SetDefaultHook(hook func(context.Context, int, io.Reader) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// SendUpload method of the parent MockBundleManagerClient instance inovkes
// the hook at the front of the queue and discards it. After the queue is
// empty, the default hook function is invoked for any future action.
func (f *BundleManagerClientSendUploadFunc) PushHook(hook func(context.Context, int, io.Reader) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *BundleManagerClientSendUploadFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(context.Context, int, io.Reader) error {
return r0
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *BundleManagerClientSendUploadFunc) PushReturn(r0 error) {
f.PushHook(func(context.Context, int, io.Reader) error {
return r0
})
}
func (f *BundleManagerClientSendUploadFunc) nextHook() func(context.Context, int, io.Reader) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *BundleManagerClientSendUploadFunc) appendCall(r0 BundleManagerClientSendUploadFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of BundleManagerClientSendUploadFuncCall
// objects describing the invocations of this function.
func (f *BundleManagerClientSendUploadFunc) History() []BundleManagerClientSendUploadFuncCall {
f.mutex.Lock()
history := make([]BundleManagerClientSendUploadFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// BundleManagerClientSendUploadFuncCall is an object that describes an
// invocation of method SendUpload on an instance of
// MockBundleManagerClient.
type BundleManagerClientSendUploadFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 io.Reader
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c BundleManagerClientSendUploadFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c BundleManagerClientSendUploadFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,262 @@
// Code generated by github.com/efritz/go-mockgen 0.1.0; DO NOT EDIT.
package mocks
import (
db "github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"sync"
)
// MockReferencePager is a mock impelementation of the ReferencePager
// interface (from the package
// github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db)
// used for unit testing.
type MockReferencePager struct {
// CloseTxFunc is an instance of a mock function object controlling the
// behavior of the method CloseTx.
CloseTxFunc *ReferencePagerCloseTxFunc
// PageFromOffsetFunc is an instance of a mock function object
// controlling the behavior of the method PageFromOffset.
PageFromOffsetFunc *ReferencePagerPageFromOffsetFunc
}
// NewMockReferencePager creates a new mock of the ReferencePager interface.
// All methods return zero values for all results, unless overwritten.
func NewMockReferencePager() *MockReferencePager {
return &MockReferencePager{
CloseTxFunc: &ReferencePagerCloseTxFunc{
defaultHook: func(error) error {
return nil
},
},
PageFromOffsetFunc: &ReferencePagerPageFromOffsetFunc{
defaultHook: func(int) ([]db.Reference, error) {
return nil, nil
},
},
}
}
// NewMockReferencePagerFrom creates a new mock of the MockReferencePager
// interface. All methods delegate to the given implementation, unless
// overwritten.
func NewMockReferencePagerFrom(i db.ReferencePager) *MockReferencePager {
return &MockReferencePager{
CloseTxFunc: &ReferencePagerCloseTxFunc{
defaultHook: i.CloseTx,
},
PageFromOffsetFunc: &ReferencePagerPageFromOffsetFunc{
defaultHook: i.PageFromOffset,
},
}
}
// ReferencePagerCloseTxFunc describes the behavior when the CloseTx method
// of the parent MockReferencePager instance is invoked.
type ReferencePagerCloseTxFunc struct {
defaultHook func(error) error
hooks []func(error) error
history []ReferencePagerCloseTxFuncCall
mutex sync.Mutex
}
// CloseTx delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockReferencePager) CloseTx(v0 error) error {
r0 := m.CloseTxFunc.nextHook()(v0)
m.CloseTxFunc.appendCall(ReferencePagerCloseTxFuncCall{v0, r0})
return r0
}
// SetDefaultHook sets function that is called when the CloseTx method of
// the parent MockReferencePager instance is invoked and the hook queue is
// empty.
func (f *ReferencePagerCloseTxFunc) SetDefaultHook(hook func(error) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// CloseTx method of the parent MockReferencePager instance inovkes the hook
// at the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *ReferencePagerCloseTxFunc) PushHook(hook func(error) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *ReferencePagerCloseTxFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(error) error {
return r0
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *ReferencePagerCloseTxFunc) PushReturn(r0 error) {
f.PushHook(func(error) error {
return r0
})
}
func (f *ReferencePagerCloseTxFunc) nextHook() func(error) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *ReferencePagerCloseTxFunc) appendCall(r0 ReferencePagerCloseTxFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of ReferencePagerCloseTxFuncCall objects
// describing the invocations of this function.
func (f *ReferencePagerCloseTxFunc) History() []ReferencePagerCloseTxFuncCall {
f.mutex.Lock()
history := make([]ReferencePagerCloseTxFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// ReferencePagerCloseTxFuncCall is an object that describes an invocation
// of method CloseTx on an instance of MockReferencePager.
type ReferencePagerCloseTxFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 error
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c ReferencePagerCloseTxFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c ReferencePagerCloseTxFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// ReferencePagerPageFromOffsetFunc describes the behavior when the
// PageFromOffset method of the parent MockReferencePager instance is
// invoked.
type ReferencePagerPageFromOffsetFunc struct {
defaultHook func(int) ([]db.Reference, error)
hooks []func(int) ([]db.Reference, error)
history []ReferencePagerPageFromOffsetFuncCall
mutex sync.Mutex
}
// PageFromOffset delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockReferencePager) PageFromOffset(v0 int) ([]db.Reference, error) {
r0, r1 := m.PageFromOffsetFunc.nextHook()(v0)
m.PageFromOffsetFunc.appendCall(ReferencePagerPageFromOffsetFuncCall{v0, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the PageFromOffset
// method of the parent MockReferencePager instance is invoked and the hook
// queue is empty.
func (f *ReferencePagerPageFromOffsetFunc) SetDefaultHook(hook func(int) ([]db.Reference, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// PageFromOffset method of the parent MockReferencePager instance inovkes
// the hook at the front of the queue and discards it. After the queue is
// empty, the default hook function is invoked for any future action.
func (f *ReferencePagerPageFromOffsetFunc) PushHook(hook func(int) ([]db.Reference, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultDefaultHook with a function that returns
// the given values.
func (f *ReferencePagerPageFromOffsetFunc) SetDefaultReturn(r0 []db.Reference, r1 error) {
f.SetDefaultHook(func(int) ([]db.Reference, error) {
return r0, r1
})
}
// PushReturn calls PushDefaultHook with a function that returns the given
// values.
func (f *ReferencePagerPageFromOffsetFunc) PushReturn(r0 []db.Reference, r1 error) {
f.PushHook(func(int) ([]db.Reference, error) {
return r0, r1
})
}
func (f *ReferencePagerPageFromOffsetFunc) nextHook() func(int) ([]db.Reference, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *ReferencePagerPageFromOffsetFunc) appendCall(r0 ReferencePagerPageFromOffsetFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of ReferencePagerPageFromOffsetFuncCall
// objects describing the invocations of this function.
func (f *ReferencePagerPageFromOffsetFunc) History() []ReferencePagerPageFromOffsetFuncCall {
f.mutex.Lock()
history := make([]ReferencePagerPageFromOffsetFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// ReferencePagerPageFromOffsetFuncCall is an object that describes an
// invocation of method PageFromOffset on an instance of MockReferencePager.
type ReferencePagerPageFromOffsetFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []db.Reference
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c ReferencePagerPageFromOffsetFuncCall) Args() []interface{} {
return []interface{}{c.Arg0}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c ReferencePagerPageFromOffsetFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}

View File

@ -0,0 +1,25 @@
package server
import (
"bytes"
"context"
"github.com/sourcegraph/sourcegraph/cmd/frontend/db"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
)
func getTipCommit(repositoryID int) (string, error) {
repo, err := db.Repos.Get(context.Background(), api.RepoID(repositoryID))
if err != nil {
return "", err
}
cmd := gitserver.DefaultClient.Command("git", "rev-parse", "HEAD")
cmd.Repo = gitserver.Repo{Name: repo.Name}
out, err := cmd.CombinedOutput(context.Background())
if err != nil {
return "", err
}
return string(bytes.TrimSpace(out)), nil
}

View File

@ -0,0 +1,311 @@
package server
import (
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"os"
"github.com/gorilla/mux"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/api"
)
const DefaultUploadPageSize = 50
func (s *Server) handler() http.Handler {
mux := mux.NewRouter()
mux.Path("/uploads/{id:[0-9]+}").Methods("GET").HandlerFunc(s.handleGetUploadByID)
mux.Path("/uploads/{id:[0-9]+}").Methods("DELETE").HandlerFunc(s.handleDeleteUploadByID)
mux.Path("/uploads/repository/{id:[0-9]+}").Methods("GET").HandlerFunc(s.handleGetUploadsByRepo)
mux.Path("/upload").Methods("POST").HandlerFunc(s.handleEnqueue)
mux.Path("/exists").Methods("GET").HandlerFunc(s.handleExists)
mux.Path("/definitions").Methods("GET").HandlerFunc(s.handleDefinitions)
mux.Path("/references").Methods("GET").HandlerFunc(s.handleReferences)
mux.Path("/hover").Methods("GET").HandlerFunc(s.handleHover)
mux.Path("/uploads").Methods("POST").HandlerFunc(s.handleUploads)
mux.Path("/prune").Methods("POST").HandlerFunc(s.handlePrune)
mux.HandleFunc("/healthz", func(w http.ResponseWriter, _ *http.Request) {
w.WriteHeader(http.StatusOK)
})
return mux
}
// GET /uploads/{id:[0-9]+}
func (s *Server) handleGetUploadByID(w http.ResponseWriter, r *http.Request) {
upload, exists, err := s.db.GetUploadByID(r.Context(), int(idFromRequest(r)))
if err != nil {
log15.Error("Failed to retrieve upload", "error", err)
http.Error(w, fmt.Sprintf("failed to retrieve upload: %s", err.Error()), http.StatusInternalServerError)
return
}
if !exists {
http.Error(w, "upload not found", http.StatusNotFound)
return
}
writeJSON(w, upload)
}
// DELETE /uploads/{id:[0-9]+}
func (s *Server) handleDeleteUploadByID(w http.ResponseWriter, r *http.Request) {
exists, err := s.db.DeleteUploadByID(r.Context(), int(idFromRequest(r)), getTipCommit)
if err != nil {
log15.Error("Failed to delete upload", "error", err)
http.Error(w, fmt.Sprintf("failed to delete upload: %s", err.Error()), http.StatusInternalServerError)
return
}
if !exists {
http.Error(w, "upload not found", http.StatusNotFound)
return
}
w.WriteHeader(http.StatusNoContent)
}
// GET /uploads/repository/{id:[0-9]+}
func (s *Server) handleGetUploadsByRepo(w http.ResponseWriter, r *http.Request) {
id := int(idFromRequest(r))
limit := getQueryIntDefault(r, "limit", DefaultUploadPageSize)
offset := getQueryInt(r, "offset")
uploads, totalCount, err := s.db.GetUploadsByRepo(
r.Context(),
id,
getQuery(r, "state"),
getQuery(r, "query"),
getQueryBool(r, "visibleAtTip"),
limit,
offset,
)
if err != nil {
log15.Error("Failed to list uploads", "error", err)
http.Error(w, fmt.Sprintf("failed to list uploads: %s", err.Error()), http.StatusInternalServerError)
return
}
if offset+len(uploads) < totalCount {
w.Header().Set("Link", makeNextLink(r.URL, map[string]interface{}{
"limit": limit,
"offset": offset + len(uploads),
}))
}
writeJSON(w, map[string]interface{}{"uploads": uploads, "totalCount": totalCount})
}
// POST /upload
func (s *Server) handleEnqueue(w http.ResponseWriter, r *http.Request) {
f, err := ioutil.TempFile("", "upload-")
if err != nil {
log15.Error("Failed to open target file", "error", err)
http.Error(w, fmt.Sprintf("failed to open target file: %s", err.Error()), http.StatusInternalServerError)
return
}
defer os.Remove(f.Name())
defer f.Close()
if _, err := io.Copy(f, r.Body); err != nil {
log15.Error("Failed to write payload", "error", err)
http.Error(w, fmt.Sprintf("failed to write payload: %s", err.Error()), http.StatusInternalServerError)
return
}
indexerName := getQuery(r, "indexerName")
if indexerName == "" {
if indexerName, err = readIndexerNameFromFile(f); err != nil {
log15.Error("Failed to read indexer name from upload", "error", err)
http.Error(w, fmt.Sprintf("failed to read indexer name from upload: %s", err.Error()), http.StatusInternalServerError)
return
}
}
id, closer, err := s.db.Enqueue(
r.Context(),
getQuery(r, "commit"),
sanitizeRoot(getQuery(r, "root")),
"{}", // TODO(efritz) - write tracing code
getQueryInt(r, "repositoryId"),
indexerName,
)
if err == nil {
err = closer.CloseTx(s.bundleManagerClient.SendUpload(r.Context(), id, f))
}
if err != nil {
log15.Error("Failed to enqueue payload", "error", err)
http.Error(w, fmt.Sprintf("failed to enqueue payload: %s", err.Error()), http.StatusInternalServerError)
return
}
w.WriteHeader(http.StatusAccepted)
writeJSON(w, map[string]interface{}{"id": id})
}
// GET /exists
func (s *Server) handleExists(w http.ResponseWriter, r *http.Request) {
dumps, err := s.api.FindClosestDumps(
r.Context(),
getQueryInt(r, "repositoryId"),
getQuery(r, "commit"),
getQuery(r, "path"),
)
if err != nil {
log15.Error("Failed to handle exists request", "error", err)
http.Error(w, fmt.Sprintf("failed to handle exists request: %s", err.Error()), http.StatusInternalServerError)
return
}
writeJSON(w, map[string]interface{}{"uploads": dumps})
}
// GET /definitions
func (s *Server) handleDefinitions(w http.ResponseWriter, r *http.Request) {
defs, err := s.api.Definitions(
r.Context(),
getQuery(r, "path"),
getQueryInt(r, "line"),
getQueryInt(r, "character"),
getQueryInt(r, "uploadId"),
)
if err != nil {
if err == api.ErrMissingDump {
http.Error(w, "no such dump", http.StatusNotFound)
return
}
log15.Error("Failed to handle definitions request", "error", err)
http.Error(w, fmt.Sprintf("failed to handle definitions request: %s", err.Error()), http.StatusInternalServerError)
return
}
outers, err := serializeLocations(defs)
if err != nil {
log15.Error("Failed to resolve locations", "error", err)
http.Error(w, fmt.Sprintf("failed to resolve locations: %s", err.Error()), http.StatusInternalServerError)
return
}
writeJSON(w, map[string]interface{}{"locations": outers})
}
// GET /references
func (s *Server) handleReferences(w http.ResponseWriter, r *http.Request) {
cursor, err := api.DecodeOrCreateCursor(
getQuery(r, "path"),
getQueryInt(r, "line"),
getQueryInt(r, "character"),
getQueryInt(r, "uploadId"),
getQuery(r, "rawCursor"),
s.db,
s.bundleManagerClient,
)
if err != nil {
if err == api.ErrMissingDump {
http.Error(w, "no such dump", http.StatusNotFound)
return
}
log15.Error("Failed to prepare cursor", "error", err)
http.Error(w, fmt.Sprintf("failed to prepare cursor: %s", err.Error()), http.StatusInternalServerError)
return
}
locations, newCursor, hasNewCursor, err := s.api.References(
r.Context(),
getQueryInt(r, "repositoryId"),
getQuery(r, "commit"),
getQueryInt(r, "limit"),
cursor,
)
if err != nil {
log15.Error("Failed to handle references request", "error", err)
http.Error(w, fmt.Sprintf("failed to handle references request: %s", err.Error()), http.StatusInternalServerError)
return
}
outers, err := serializeLocations(locations)
if err != nil {
log15.Error("Failed to resolve locations", "error", err)
http.Error(w, fmt.Sprintf("failed to resolve locations: %s", err.Error()), http.StatusInternalServerError)
return
}
if hasNewCursor {
w.Header().Set("Link", makeNextLink(r.URL, map[string]interface{}{
"cursor": api.EncodeCursor(newCursor),
}))
}
writeJSON(w, map[string]interface{}{"locations": outers})
}
// GET /hover
func (s *Server) handleHover(w http.ResponseWriter, r *http.Request) {
text, rn, exists, err := s.api.Hover(
r.Context(),
getQuery(r, "path"),
getQueryInt(r, "line"),
getQueryInt(r, "character"),
getQueryInt(r, "uploadId"),
)
if err != nil {
if err == api.ErrMissingDump {
http.Error(w, "no such dump", http.StatusNotFound)
return
}
log15.Error("Failed to handle hover request", "error", err)
http.Error(w, fmt.Sprintf("failed to handle hover request: %s", err.Error()), http.StatusInternalServerError)
return
}
if !exists {
writeJSON(w, nil)
} else {
writeJSON(w, map[string]interface{}{"text": text, "range": rn})
}
}
// POST /uploads
func (s *Server) handleUploads(w http.ResponseWriter, r *http.Request) {
payload := struct {
IDs []int `json:"ids"`
}{}
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
log15.Error("Failed to read request body", "error", err)
http.Error(w, fmt.Sprintf("failed to read request body: %s", err.Error()), http.StatusInternalServerError)
return
}
states, err := s.db.GetStates(r.Context(), payload.IDs)
if err != nil {
log15.Error("Failed to retrieve upload states", "error", err)
http.Error(w, fmt.Sprintf("failed to retrieve upload states: %s", err.Error()), http.StatusInternalServerError)
return
}
pairs := []interface{}{}
for k, v := range states {
pairs = append(pairs, []interface{}{k, v})
}
writeJSON(w, map[string]interface{}{"type": "map", "value": pairs})
}
// POST /prune
func (s *Server) handlePrune(w http.ResponseWriter, r *http.Request) {
id, prunable, err := s.db.DeleteOldestDump(r.Context())
if err != nil {
log15.Error("Failed to prune upload", "error", err)
http.Error(w, fmt.Sprintf("failed to prune upload: %s", err.Error()), http.StatusInternalServerError)
return
}
if !prunable {
writeJSON(w, nil)
} else {
writeJSON(w, map[string]interface{}{"id": id})
}
}

View File

@ -0,0 +1,69 @@
package server
import (
"bufio"
"compress/gzip"
"encoding/json"
"errors"
"io"
"os"
)
type metaDataVertex struct {
Label string `json:"label"`
ToolInfo toolInfo `json:"toolInfo"`
}
type toolInfo struct {
Name string `json:"name"`
}
var ErrInvalidMetaDataVertex = errors.New("invalid metadata vertex")
// readIndexerNameFromFile returns the name of the tool that generated
// the given index file. This function reads only the first line of the
// file, where the metadata vertex is assumed to be in all valid dumps.
// This function also resets the offset of the file to the beginning of
// the file before and after reading.
func readIndexerNameFromFile(f *os.File) (string, error) {
_, err1 := f.Seek(0, 0)
name, err2 := readIndexerName(f)
_, err3 := f.Seek(0, 0)
for _, err := range []error{err1, err2, err3} {
if err != nil {
return "", err
}
}
return name, nil
}
// readIndexerName returns the name of the tool that generated the given
// index contents. This function reads only the first line of the file,
// where the metadata vertex is assumed to be in all valid dumps.
func readIndexerName(r io.Reader) (string, error) {
gzipReader, err := gzip.NewReader(r)
if err != nil {
return "", err
}
line, isPrefix, err := bufio.NewReader(gzipReader).ReadLine()
if err != nil {
return "", err
}
if isPrefix {
return "", errors.New("metaData vertex exceeds buffer")
}
meta := metaDataVertex{}
if err := json.Unmarshal(line, &meta); err != nil {
return "", ErrInvalidMetaDataVertex
}
if meta.Label != "metaData" || meta.ToolInfo.Name == "" {
return "", ErrInvalidMetaDataVertex
}
return meta.ToolInfo.Name, nil
}

View File

@ -0,0 +1,90 @@
package server
import (
"bufio"
"bytes"
"compress/gzip"
"io"
"io/ioutil"
"os"
"strings"
"testing"
)
const testMetaDataVertex = `{"label": "metaData", "toolInfo": {"name": "test"}}`
const testVertex = `{"id": "a", "type": "edge", "label": "textDocument/references", "outV": "b", "inV": "c"}`
func TestReadIndexerName(t *testing.T) {
name, err := readIndexerName(generateTestIndex(testMetaDataVertex))
if err != nil {
t.Fatalf("unexpected error reading indexer name: %s", err)
}
if name != "test" {
t.Errorf("unexpected indexer name. want=%s have=%s", "test", name)
}
}
func TestReadIndexerNameMalformed(t *testing.T) {
for _, metaDataVertex := range []string{`invalid json`, `{"label": "textDocument/references"}`} {
if _, err := readIndexerName(generateTestIndex(metaDataVertex)); err != ErrInvalidMetaDataVertex {
t.Fatalf("unexpected error reading indexer name. want=%q have=%q", ErrInvalidMetaDataVertex, err)
}
}
}
func TestReadIndexerNameFromFile(t *testing.T) {
tempFile, err := ioutil.TempFile("", "")
if err != nil {
t.Fatalf("unexpected error creating temp file: %s", err)
}
defer os.Remove(tempFile.Name())
_, _ = io.Copy(tempFile, generateTestIndex(testMetaDataVertex))
name, err := readIndexerNameFromFile(tempFile)
if err != nil {
t.Fatalf("unexpected error reading indexer name: %s", err)
}
if name != "test" {
t.Errorf("unexpected indexer name. want=%s have=%s", "test", name)
}
// Ensure reader is reset to beginning
firstLine, err := testReadFirstLine(tempFile)
if err != nil {
t.Fatalf("unexpected error reading from file %s", err)
}
if firstLine != testMetaDataVertex {
t.Errorf("unexpected buffer location. want=%q have=%q", testMetaDataVertex, firstLine)
}
}
func generateTestIndex(metaDataVertex string) io.Reader {
lines := []string{metaDataVertex}
for i := 0; i < 20000; i++ {
lines = append(lines, testVertex)
}
content := strings.Join(lines, "\n") + "\n"
var buf bytes.Buffer
w := gzip.NewWriter(&buf)
_, _ = io.Copy(w, bytes.NewReader([]byte(content)))
w.Close()
return bytes.NewReader(buf.Bytes())
}
func testReadFirstLine(r io.Reader) (string, error) {
gzipReader, err := gzip.NewReader(r)
if err != nil {
return "", err
}
line, _, err := bufio.NewReader(gzipReader).ReadLine()
if err != nil {
return "", err
}
return string(line), nil
}

View File

@ -0,0 +1,27 @@
package server
import (
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/api"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
)
type APILocation struct {
RepositoryID int `json:"repositoryId"`
Commit string `json:"commit"`
Path string `json:"path"`
Range bundles.Range `json:"range"`
}
func serializeLocations(resolvedLocations []api.ResolvedLocation) ([]APILocation, error) {
var apiLocations []APILocation
for _, res := range resolvedLocations {
apiLocations = append(apiLocations, APILocation{
RepositoryID: res.Dump.RepositoryID,
Commit: res.Dump.Commit,
Path: res.Path,
Range: res.Range,
})
}
return apiLocations, nil
}

View File

@ -0,0 +1,50 @@
package server
import (
"net"
"net/http"
"os"
"strconv"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/api"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
)
type Server struct {
host string
port int
db db.DB
bundleManagerClient bundles.BundleManagerClient
api api.CodeIntelAPI
}
type ServerOpts struct {
Host string
Port int
DB db.DB
BundleManagerClient bundles.BundleManagerClient
}
func New(opts ServerOpts) *Server {
return &Server{
host: opts.Host,
port: opts.Port,
db: opts.DB,
bundleManagerClient: opts.BundleManagerClient,
api: api.New(opts.DB, opts.BundleManagerClient),
}
}
func (s *Server) Start() {
addr := net.JoinHostPort(s.host, strconv.FormatInt(int64(s.port), 10))
handler := ot.Middleware(s.handler())
server := &http.Server{Addr: addr, Handler: handler}
if err := server.ListenAndServe(); err != http.ErrServerClosed {
log15.Error("Failed to start server", "error", err)
os.Exit(1)
}
}

View File

@ -0,0 +1,90 @@
package server
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"net/url"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/inconshreveable/log15"
"github.com/tomnomnom/linkheader"
)
func getQuery(r *http.Request, name string) string {
return r.URL.Query().Get(name)
}
func getQueryInt(r *http.Request, name string) int {
value, _ := strconv.Atoi(r.URL.Query().Get(name))
return value
}
func getQueryIntDefault(r *http.Request, name string, defaultValue int) int {
value, err := strconv.Atoi(r.URL.Query().Get(name))
if err != nil {
value = defaultValue
}
return value
}
func getQueryBool(r *http.Request, name string) bool {
value, _ := strconv.ParseBool(r.URL.Query().Get(name))
return value
}
func makeNextLink(url *url.URL, newQueryValues map[string]interface{}) string {
q := url.Query()
for k, v := range newQueryValues {
q.Set(k, fmt.Sprintf("%v", v))
}
url.RawQuery = q.Encode()
header := linkheader.Link{
URL: url.String(),
Rel: "next",
}
return header.String()
}
// idFromRequest returns the database id from the request URL's path. This method
// must only be called from routes containing the `id:[0-9]+` pattern, as the error
// return from ParseInt is not checked.
func idFromRequest(r *http.Request) int64 {
id, _ := strconv.ParseInt(mux.Vars(r)["id"], 10, 64)
return id
}
// copyAll writes the contents of r to w and logs on write failure.
func copyAll(w http.ResponseWriter, r io.Reader) {
if _, err := io.Copy(w, r); err != nil {
log15.Error("Failed to write payload to client", "error", err)
}
}
// writeJSON writes the JSON-encoded payload to w and logs on write failure.
// If there is an encoding error, then a 500-level status is written to w.
func writeJSON(w http.ResponseWriter, payload interface{}) {
data, err := json.Marshal(payload)
if err != nil {
log15.Error("Failed to serialize result", "error", err)
http.Error(w, fmt.Sprintf("failed to serialize result: %s", err.Error()), http.StatusInternalServerError)
return
}
copyAll(w, bytes.NewReader(data))
}
func sanitizeRoot(s string) string {
if s == "" || s == "/" {
return ""
}
if !strings.HasSuffix(s, "/") {
s += "/"
}
return s
}

View File

@ -0,0 +1,79 @@
package main
import (
"log"
"os"
"os/signal"
"syscall"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/bundles"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/db"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/janitor"
"github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server/internal/server"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/debugserver"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/tracer"
)
func main() {
env.Lock()
env.HandleHelpFlag()
tracer.Init()
var (
janitorInterval = mustParseInterval(rawJanitorInterval, "PRECISE_CODE_INTEL_JANITOR_INTERVAL")
bundleManagerURL = mustGet(rawBundleManagerURL, "PRECISE_CODE_INTEL_BUNDLE_MANAGER_URL")
)
db := mustInitializeDatabase()
host := ""
if env.InsecureDev {
host = "127.0.0.1"
}
serverInst := server.New(server.ServerOpts{
Host: host,
Port: 3186,
DB: db,
BundleManagerClient: bundles.New(bundleManagerURL),
})
janitorInst := janitor.NewJanitor(janitor.JanitorOpts{
DB: db,
JanitorInterval: janitorInterval,
})
go serverInst.Start()
go janitorInst.Start()
go debugserver.Start()
waitForSignal()
}
func mustInitializeDatabase() db.DB {
postgresDSN := conf.Get().ServiceConnections.PostgresDSN
conf.Watch(func() {
if newDSN := conf.Get().ServiceConnections.PostgresDSN; postgresDSN != newDSN {
log.Fatalf("Detected repository DSN change, restarting to take effect: %s", newDSN)
}
})
db, err := db.New(postgresDSN)
if err != nil {
log.Fatalf("failed to initialize db store: %s", err)
}
return db
}
func waitForSignal() {
signals := make(chan os.Signal, 2)
signal.Notify(signals, syscall.SIGINT, syscall.SIGHUP)
for i := 0; i < 2; i++ {
<-signals
}
os.Exit(0)
}

View File

@ -0,0 +1 @@
1f8b0800000000000013d5974d4ec3301085ef9275903c7f9e310740dca1620115120811166d5615776f365569e5b66e12c746df2a1b67e679dee465d774fdf7f3ebe6e3a9efd6dbcf9f6ed33c826f9bb77efdf5be1d1e56ae9d1386c0e4b83532f3ce663dfb2fe88282e0e473c655089e8c33f6c6c7b208d5e793f16629c5de7c0a1023a862aa1227125e824884994af518b7081038ad45f63ab8a6c6b0cb4a9777e00181958d7cc2e81d485993e88d495872749ae493a588af3a48367dbd9cf720e44d5d805c3e1764b432b2790537cc74cad731cd3006a1c675887ea87e5a06f8ff731d679c63179c5921a520e8961445b299fd9cb9a2717e2607308c7c14ef8d8a995bbc18df535d02c57b98973131e9481db71a237ed31505ac1cc6b8e7acfc3f5a2fbf7be990244eef100000

View File

@ -0,0 +1 @@
1f8b0800000000000013ed97410ac2301045efd27585643233193d80780771a14550c4ba68bb2adeddec14a1306ab053286f95f2493e3ff969db177577ddec9bd3baababf67cab9b62e5b92c0e5d7539b669b075655e2488b093ccb3667538ba83210244b6ebce1ae0d06e58f66b30a363011e234a60c3a7cd781966b4102088dd8d9c4617bec7738c11d20792429a0a4751a364a2a0d1a15b6a64402a7b2ad1d073786e2fb06020542e3a627c04e99d8b0ae11f027c59ec83f866def101c1a72365f9b6f1a6ddd9c26e52b67fbda6d0839fd8dd1f632fe760aa100000

View File

@ -0,0 +1 @@
1f8b0800000000000013ed974d6ac3301085efe2750a9aff510f507287d2451b0a09a5ee22f12ae4ee1581800d216952c9964bf95602db7a7abcd18cf74ddb7d2e5fb7eba7ae5ded365fedb679045d346fddeae37d9716cf613117304403c151f6e251cfc57ef5213508ee3aa6ae1f6957042e270a8811ccf0ba3f67df0e589b5fc3c341b071e23c176a70235d8ff93ee6948a36dc17df3ea8ce242c39b5e500d2dd25f61b557067715f86439cd22a21750b116ac8733d38c439187263932d11df3ea94990317875bd8c2132854baa0419bdb43fe700252f6b97fcd7760f221166fa7b96109a4e11e02130b982130f69dc3776d2e32f0ba8a5e13cebc0940395d47f27573104ab1bdd6ea17c65bf1cbe0125fb0eb6ab100000

View File

@ -0,0 +1 @@
1f8b0800000000000013ed97cd6ac3300c80df25e716ac1f4bea1ea0ec1dc60e5b186c94658736a7b2779f7b2a29f5d2ac4ee541f94e0127fe2c5996b36fbafef3f165fbbeeebb76f7f1d56d9b079045f3dab79bb75d7a780a8bb2189949b0c25f2d6a78c11876312354c9d989424891f511cb8382c0fe521878183808e86f75a4feaab873209d8d63439669c32b1b09df329d28c614398efb9dbc775a18e5813445d4a96273c361559b520922325abde7884f71cc41659d16829232985b5703cc659461c514fcc375a63440c8fcc5ee4c07881154b39bae06f22531cb6ceeeb1dd888a6ec5c705d1a3277c02446fa7bdbffedffeb3a70f2c5ed1cffa128aee2f9fb070af8abf9b1100000

View File

@ -0,0 +1 @@
1f8b0800000000000013dd97cf0e82300cc6df853326eb9f75c50730be83f1a0c44463c4037032bebb249e5043200c36ccefb4cbdaef4bbbae8fa4a86fdb4379ded4455e5dee4599ac41d2e458e7d753d51c7626f50943c664d8eb9d6dd0640e2c4e18a10b10d2e9d4a1610d24ac9546f00cde00318273e8cf13226b99291e81605c2cc9c441971bcdc3153abde128a98ae92e611465b26c97a8af3f844ebe7d008fed1d8a4f0d96449dc9e0bf5a5b1c98a696c78e3f852c465f5010789cb6e517b23f2c32ea3c8684e8363b5bb4b0ffdd218cfe5c61b403f0f7e8ea0f0457e09755f3563a569228f69529896425f3cd104dd3af4bfbe70b54fbb4fda8100000

View File

@ -0,0 +1 @@
1f8b0800000000000013d557cb6ac3400cfc179f53583d56d2e6034aff21e49086424b8973487c0afdf7f8103001a7f143b637ccc118e3d57876465e5d8ab23a7cec4edfef55b93fff1ccb53b10659159fd5fef7eb5cdf6cc2ca17466612cc79d506189242c4f13c6f57084aca60c89351eefb81dca847a8329d96c3b00c27204650c5ae95ef649c0b5d2bb68704a836e37867e786f11de135347943606523e9613ca31424caec5c17494777404bcc518c29f20262792292988604be969658afdbd44046cb777bbbe5c420796a0479d826875d01d1fa3ffa40108fe3555b7a9f6346d70e2e1507e776f89bf9627c2f238a91995c83def39c382d40c8f8c1a34c28ce8d67edffff392c5fcda6198b181253f09a4d970e876fd243fbccbefdbb027ceba5bff6100000

View File

@ -0,0 +1 @@
1f8b0800000000000013dd974d4ec3400c85ef927591c67f63870320ee8058940a0984088b362bc4dd995d5229a33aed0426d1b74a6465ec273f8ff3dd74fde7e3fef8f6d07787d3fb57776cee21ee9a97fef0f17a4a0f4f61571623b318acf0570730b40a82b7e7b95886d7d6c5434a841a6b4bf0ef01620455f42a7126616d4cfb020882dedecc5b621d6adc21b0b2519cd1724b8fc61c55fb2279c06df0b520144d430b4bb5b220a3d52b9acf1b06ed3aac3e05444d1753daa432b58d4393fb44739163a2a4c671c471683d61289ea86c0d6741b9f723eb62342661e7a123f91cdd5c5e43c1348dd91158504501ba7cdc0c15b701910833959c057317c7850bcc2ef3dbbbf97c5cb33c0dd4ab59ddbf6dffed8aa2169fe4f9e7173e984c74f0100000

View File

@ -0,0 +1 @@
1f8b0800000000000013dd973d0ec2300c85efd2b948f14f6c870320ee80180021811065804e88bbd3858d4aa88d885bbd2943ace72f76123faba6bdae77f7d3aa6d0e8ff3adb9574b90badab787cbf1d12d36a19e8a30248588d9e241505206432e9eda2741b6e226fa44a8f27f77408ca08a7eb90c7506d4d55fbe62f622233309638e6b6e4c8c529028c57d78127c696814638a3c715291c43424c85bc512bbb8c5731b278394130af8e0e1e16102d1ee89ec0192fbd7f4ab22329a073a3df60677e8f09d7e35fed6228a9199e68706848c4b9bf0a405022b1b49cfb0e2659a2a2f86c414a6ce236b4f43f83e6d6f5f6f54320597aa100000

View File

@ -0,0 +1 @@
1f8b0800000000000013dd57cb4ec33010fc979c8be47d7877cd0720fe0171800a0984088736a78a7fc74242552aa7711abb09684e91fc981defec6e0e4ddb7ddc3fed5eefba76bb7ffb6c77cd2dc8a679eeb6ef2ffbf8f1e03665616426ce0a9f7a04baa0e0713ecf8c355c2d8a545c7ca444a8324450145c94f89adcb2f80b02d72305c408aa989b583d397f0e70b826c9d2360102a7f373fb3f610d6ac48a39b6e42666bfb291707ee92b552a518cc9b31f6779b2efd423e501f10aaf5389f54ec8b6fc14b00b7348cd85273175016a25b74746abfdb69723cf2c06610de61fc3c44e5cdf714aca608b75bb61c3320426778ed715f336711508595dd57c35c7ff451079cf4c2525993a26560e70708cafd3d50655595c895ff42b3f88c6b7ca98aefaa82d9df8d89f2fde7deedf6d1e70f28497c2d21629eaf7241ebfbe01fcefbb05f7100000

View File

@ -0,0 +1 @@
1f8b0800000000000013d5964d0ac2301085efd27585cc4f26a90710ef202eb4088a5817b52bf1ee16c15dc158924ccab7eaa67d6f3a6f669e5537dcb687febc19baf671b9777db506a9abe3d05e4f8ff16167ea9830344c866b4fde8bf151df1d9779da40c8730675844ef4aa876a5f9ea9d7f0ef621159cb4c5adeca4f44d98c534b5bc29715023bf624015d17068a67b26c53780c0a472e74e75a3a00f3b8b2c8e8750a280ecc38c242b65ffc80e40265949e63c12f8f5c2d3e074b8e1a8b465b470ad0340eecd24eb23f3d4e6c3e204670aee0ae0bcf04a82bd5a7dcff387d94147437a5884229de3eec5f6fb32efbf9a9100000

View File

@ -0,0 +1 @@
1f8b0800000000000013ed974d0ac2400c85efd2b5c2e46f927a00f10ee2428ba08875a15d897777967531b652451be45b0d3c48f2482633d7a26e8e8bf579376feaeab23fd5e7620671526c9aeab0bda4c3324cc602865241f0eb79bcbf2eb6af27f13b0031822afaf3040882faebdf217873c3c82c067fad3b047037ca42d13494e0ad7b8761508ed710889a764e7a1a656a6b4bd3be16cd29db44499dd243c7a1ec2343e9a3cad6f020ea0e168d49b867c89679828cf67ce23d382840dde15ef0d0074422cc34de7b205b186af4b6c68630456065a3f8ffbb38e7f3c3bcbadd01cd909fc0a7100000

View File

@ -0,0 +1 @@
1f8b0800000000000013ed57cb0ac23010fc979e15b28f64b77e80f80fe2414550c47a687b12ffdd1e9482545113eda23287904b76329909cc212beadd645eaec775b1ac36fba2cc461006d9a25e6e5755b399ba415a28a906a7894f4dcaf0bc8213120645ee9dd23508255893d022a716e8f8f3e41e9d683f1369f02bf71c22b0b05278c273bd18b497a15184833279f6a177267610bca7560f8f8c6af7515f89461ce0ef950b208808de10045d2ee0316e00da75decbc1f01027ca37a1f969545c6e44112046681c6dc374404d63e816c6722e3e8ffb7dcaae52ef69380c39934bd5316d4522f22eaebb7acf8e270cc7c14ab7100000

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
1f8b0800000000000013755849ae25c70dbc4bafeb034ce6ec0318be83a085d41060c3506b21f54af0dd15413287fa9021e8f77baf72e0108c20ebcf2fdfbefffaaf9f7efff73fbf7ffbfac77f7efbf6fb977f8ce7cbcfdfbffef7973ff0f907adeda9293f35b7d16596f2b4da441e79b48d926be1732d3ac6935aefbd747db2f63604df1feee6da56b1ffc9b9d652b23e49b13a174dbdf3133eb79c0a8e18798c26b6378ff20cd1917802be4ee5bff8cbc3b508eef94852248deecfb46982791ff8dbcbc815fb7935ae2baa38eb51993d55e58a842d53ed689526a526ac4ed2732f75e6e391280f693d09ec82eb53611c3e9887cf15055bcced696879dee1a1f9a368af6a0fc403c5dbed7cdbc77b1517655c25d759f52969d6ace35931a131537be6ee326adfc68e871723fa3997b20c6885c1e4c22161f528b5a452991e4b6aa2c3cb6284567987e25a5c360b0cb240462822c8bce79863b1292ac8b69d939b64958837dcc259254b7b46da3e9adf65b4ceb8db41c483963ca5449496dd7c8478343bc6a30b8f01243e9fc4e24aec36ee4a19f6aac1d57d0560361c65ff879b3b018573cb2422539f881cd1b3206bdb27f63fadb99bef14477d2478db98c0709ad6ac4fe67e20b5c02643aae2cf6c1bb5bc158f5bc0c66233f6092be3adbc36b0a218d100c2f1700586394122e6db50e139f83a053f252f15b3f17309218659e7e5912544c39c5db5c74c841018935609f29a6ca55c69553704ff7bf8c52c1b6635c18fc367b604eb4243f8f68ae53a501b805180b93bf1bc07060087cb3cf291787c0913fb8706bc4a5c9354bf7a5733af3603517852d3a42f66b29d66366e736a97a235070a17a3c0888e98a50613e8637fa5cf565511673a1ed76a67d90059a5bc8865c3f1debf82605e798d6bd2c95b339803c666986406e2693f89d89bcc9b341ba374407d87d24a74e76a0ee600ec724aa445400ba961972209b3695fa9aa451af5a1b69a36b8c0012cd24844a67c8c08f78ede3a3d283324a0d517c2e562a36d54d3998284abcc59a6a566ac1a249e5cd4582f2957053f91019c939ce28281caa5598852051118e4567e4cbf2694cf20437356ad953bd8a88c36376bafc25cc133c3bb940379f338cfcc5adafe39a627b695bb1252886c43559bf87ea28905fd05dc1755de845636875c8f61f84bbefcd152c2372401f23cab53af97c2d0849858c48c12faaccdcfbd6836ea9b97e036479bd50172ec5fec4fd4cc801c3f9bdc8dd4e8da2ebd555472419800b3aa007a8caac5297a100e2e4ca43b646683c2c84ec56006a6acbd9473a283c3a889fcf940b39d11976f0438804f4c11d70e915085e4c232ba13cc622af10c7dcacec5a0bcd99919d29546745919b2e332578237b7221c218f26c00f67a0edbc31071308f99d85315b96bcf5cf22cce41d793ddc5452d24b23ac3218b9d908440d6752976a542c66838718b7e16431e178739bd797b7011fd76517ee9728661866713432c69fbcc47fad58590eb6a63558ef218db06ca9606ae17c97914ea7e56ecf8d89dbd2d1aa788be7bff6680cac29f2bae461c3e2c7b01fea823648460eaee8d5eb5695717c905b1433626e7df3ca079e024c46ccb7186f34b109940620582776755a0cad4e396b4f8659ed14b41daed54ee2c9ccc3c83de59cea25c821a25761476b51023e6d8e46beb496e8b46da8eeadcfc8b5beaa3bbe4517b9f4daa8ccf13fae9f5d2dd8dc8fe7b460602598ca9e31198954a81335139e2d96d78d1fd6fcd01df90bcce59123f9d4543a461fe99c46dafb665b87ff110f20432bc93150971af7795f6ece4477e1f860332e7ed988ba0a9eb6ada19e577316d285f627f99cc076c60a8e74b9ba0a6e887edd42182d9d1bc9de31b4a0dfd0e3229455d1968d0b5a0c03126d0a6631d367db0b9e43c0d52610343f971e9a06a025c16f6b0ab15faf02bf7aad53592a66596debc0483c49a48cb4753b235ed39b71acecb5e784bc23e79fdba615e3b106d5c8eb9954d7a4e20188c1435ec0f5823665b1d1f0668cce1b8c3808b6285c6399d42eb9ade006b3e09e4c924f7d1c93bcb0470c5833f50afba2a2769d0b1475b6c5d5798e30b4dd75e8e2dbca5d9a1184ca4b4ad67bf18e48d56191c5a43a7a6e1b676bad782e101f96ca1e5c720d9e59f21b604393cce1ed2d717f2336abbb8a4adcd8726e3d0ac2864bdf5030ee2a6bca5e4eda1dd3daff43fb1f57b5bcba20c67637cc98e101a65d9d65ba9dea4ce1c7c31364a0f448e2a9f95b14484d4cd96e08d115e7bbb9dcca20ffafe34ff968f48edf1db6edaf27ecd3eb89fcaab5bdb648873fabc71873bd17f0212180165d11b42cf7e3a55a86ee9a8a5c7f605ba3482d2d8193bbb5bd5af24540e447d04bbd636a08c8d6f10cb5b20c34b118ade6d0cac5e6332ece88ff2079eca695f0b7a74676a5cf71680fdb8f16ab64a3e89500874a04db3500db08cd60a1a37fabc836944644295b9334dee3dfaaf7ad839752007d92e61e95364acd0c7b5571f705632bafb53f54a6800975c94ad3e6f2e9f39611a915875fb4db84782b02eac8f5c502d8332b39990e60566f65be5a920f0403dd33a6f11179914f9858c4e671b4369c728c281379b287ecc151f4b5cf697635b27f03dfb0826f34b249dc7a4b97fca5598a7752cdfb83353b3846380b2df898e00c8e99818cc938c65b4d7f2d73b7bdfe72ebe8e5624c1f76373ffb2c6904bd589e095823c80668406d4699eee9618322eafec7fffd05e9125d86bd150000

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
1f8b0800000000000013755949ae24c70dbd4baf5340cc830e20f80e8617b660c082e0f642ea95e1bb9bc19991f5f181eecaac080687f74806ebbfdfbefff8f75ffefec7bf7ef9f1fdd73f7ffbcff73fbefdbc9e6ffff8f1ebeffffc133eff353de9e975ac99766bcf792a63b5da5b1fe733fc3b3a7cffe4810f09ff6aedbdb55ae063aeade439cb7af2d3f26e35b567cc9cd61a0d36d5d59e95caca672b3c6edc03fff22967514eb3ec21d27f2ab9cdb6ea688b4f6b698f67d1571924ec82d24a1aa9f5bc8ed6bdb4b2408554ce939e0f9a963996376b89546f269998dc5f19a046733625279796b058704645cff1e1b7b473ce5805becbf558fa80bc495af30e3eebc8228781ec8c8e2a69cfdc8bfcbf9e4222d35e790e12de5349d98ecba9adce3e49de336d8f6412757959020058f2d69e8e8350e43c37854483cfb0c9c54cf74e05b1f91c641eb44fc5425d4aeeecdf4418017565255a334d1db6c6e9563b0042161c3461d0c98183c4c62092c8f52c3867249225df776f8b9e99c55d0844f1d122471cd6842d228b432820d86db554e54d4ea317888f28418016cc8110da9553afade99e5967cb8b308331b173fb4cadb8c500d53c93b2f74daa4337055efc23ced45a5622c58c446ae4f104e3d9e2a6873b0885885906b170cf05728ada0ca7a293d9d7aa6276d02d21c9d4e2beb27464d1d8e6425612356f3e779d178310b4ea1e0bc162008fdb2f5a121066ba1351ddb5e4b7c118e03b201a4f9f675e91491f94ba5c0c3b4b06ff30d4f25ca54f452283534c412737a06b032b14dd08b4be0a93231ee860ed430e1c1332c540978471192bf7d95a08021c4de9670c9538211cc745e3a000cf5a4101632d7aec938308048589f295d32c0bbb7dac1da9e38adc15129f4e92af5f186d5e4a42d88578707945fd4e6ff25cc199358199ecedc390e49787e492422ec3f41701b6464fb3df6f492d5039181a70a2cfe14b459c2b006bcd5631499c906a6c2ef8bb14c64b5cabc0a16718e23355eb5cab2f596c7bd09a5237083b18b3f38ae40f38b6048da83e5850b30fd7e92ca46928790a2b7a1a87d5d08494d17cf603a6f8e07850f9cf4044cb672da42acab0c77531946396bab726534a939a332d4ac4a93ca0aee432abf149d79cfffb107ea0eaa680956db34216b5322ac69823c699ca02a97b25f7d1464fb963b59c9d76ea799f1c144d856df9547c6e1dfac168e6a08526317fa866f6e75bd3c02e864d7739a0725f0a8a1bd845abd8cadc5dd1913d1ad5afde28218d323a53569b00e9ad28862d2a68ba7409e1c54514c67ebda3eda8a360038db9edb1e47cc2dc4ffbf7bc12993012007c353e642fb95198a99bb4f1a1344fe0bd8382eabab601798de0f1c8888986644cf4ba341de0dd31eb135b5775533cd515205e4cf2c7eb7df0b3c441c9c9571e871f2c29c87393d48ed12957e0b56517f6153ec04b85163bba220e80377c74e5709d73b2762dbe8b5091474b2df53e7970e0c861a1fac0aa13dc5713821136ea4681f814b0ae5fdd65140fdb4a52c46b65459a3261594a2eb169649c8cdafd9b022d5ba6d0429b3dda3665c8df037a7e344079f2bad6205a0137a0209093ebaf5b446f2cc2f29ddc201ddd8f91a00fed0188685f74ea1edb5eb1909aa3d005aedc64da71a05c1cbfe498c323b5126409f651acb4441620c95027e7e68ad2468ac8915d578b2f078aded20505dc0a8fe49cc83196389a1a5ed62b0284f6be5dce1cfbd0c735b5e13e23610b6a98ed4884b3fe087585dbc9e0c8b9c656a2aba4b183ae82b7843d64f23e61f3799b6190430799ec0b52c6321e94cc7cb8d8e26d90f0589abbf661d96623c2c5966f24eecacc95c845d4dc70934ac955cac4628c2e70d9bbe91d39143623387950f020ed261c7ec81c3181856f3c2e6b804a7e410dc5854e8c788989059e76aa4393edab6dae9004f4a358ae33832b5185e148413d6181bee32e34368eaf39c3156ac551f29c782d72f03c12adb1758deeab9917ee94a44d5b96d73d5132477792236d54a2677d511d1d7a08ed160358dc2b2542a7c9550f8d342f3fa02d9cf78902a4c329327259b66efca482615589d362eda3e7ce4d8b3a47ef1ecebe23955c704e04443a4dd476df2e6956598260adf1507a80966569c334de93ac90a9b8a2878ea183f7ac3acb4cea0522759ba419b847c4d8a3f6e378505aabf790c634b3d0b88034eab4afbab13e4e57dfd7622e6bc62ad72869b5a5b14120c117099daa194e923446d2115052a7938a86e3ee0ee415388de701e03b811e3347d2a952c10d26be202eb90896398455e099b48221765c1b485233378133eb4ebb1d28db65c69c2a4d86baa565e934d93836c112277a41cb4359ba5ae6ba3f85ab97c2e755ebb013f3ee66376440a6b7ca5d3c665ad9aae6e7e2a283e9265b8798e9d2492c89341873a2f90aa9374dab0da0c59a67d4e147edf730e2347e70cf5dcdcd78dd66bdda94e51b052af36f68f842542c17b32317fd56a1147479e9137dc2504a0fc7fde3f1b65077d10afec4e07d4b073ee9fe59441583b04d9a2611c0dca84d2650901c4adf34ffe7bb229098c601740be394dc292786bc124882f3681d38393fb0a18b5b84ec5dc3d3aaa31c6e935b165ee9b169753f1cf899b6fe0e92ce08230e8b19a529efad4305ca83f315ca655b6580261083b076fd9dcb2556b987092ae152726a12367d5d3623bb3b8ed59d624252c5d102fd96f8026cd9108baef39350ed0fc035c3ba01779c6d5275f537d6389c009b2686fe467790f3e1779bf412b63cf8d3d5473569c55de3f0228249d39c0105889a8b7cbce52f5b7156edeeb4e7973eea3964fe7f0df6cdf9922d956ed13e353b60ed7e76f9de3bd152cfa961ecae2bdce7335f4aa5ea4e9b85b96bbae4518c8a0e28c299944a6b8979a65cd6e3ecb0cdbd50dc0417e275036efaa5dd487165daf9cd264f72f2a7999a63882b0261ca786571fbc56a5d979c00913e8491fc7a7a46c99f70575a7da1e197b71dcbf8cbf5acd033ef131a1a3c9ec6ecd3e8fcd5bc98c25a28afb0bf41298376fc71cdb8173a009a4705ce919db17b71009060b93e367b07a5bffdefff44a5c57fe71f0000

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1 @@
1f8b08000000000000137d5a49aee4b811bd4badf3036470940f60f80e8617dd0d03360c9717ddb5327c777388995421173f5312c988172f46fdff7efbfee3df7ff9e5f77ffcf9c7f7dffef8e77fbefffeed4ffdf3edd71fbffdebef7f8cef7f85523f013f507b4e25ab2bf449a9949c13ac1ff11343eea5edc70a64e81d9f8b0f7c7aeabd866ed7c32787a77ee6d52f88313df0409c1bb4d8a1dd8e5c8bce8bf489a1a596c7e2ac2ea22c747689c05bb5daa7e0f55352ed2d3c786baa5c3b8c5d6a8b61082edb418598e5678600f9d97242785a2cd03f393e398579712fefef40c63444c643c78d01e153036f850fb110ea690fcad2441960a3a170100d270011859cf7e51b6c057358f6c80d209671637dcf3dd5dcb5feb10e9bf26a7f3803c6aa402ac3c0e71335db05042d6ebcc40d591b196ec41828b53c45376adf59b245154ec2b8326f78c0e78fcd9b45d473ab3eb95dcb00d7a0e478b81f10a4d44dbdca318c909b000c2f2be940171f5db2ed33c2c95862c7e0de84763bedb8af0ca39e358efbea5c73f9c2cb1295034285276aa22f6e4367da6431bbe2749e9e085e145472ff31d476bee85d82ec8ac655c0dc3cf162e0030b091d7c0219752d777be0cf6b505a8b6b565b2e95180d3267f626cdb478514fb1d05eb984dbdb0794562fa68f970d6949d4c186b53c64161f107597c09f2fb50c6a1605249c4466ce6162937fced8c8d2c563e919adb69390d8f8141f1c2d37d0ad45c765eab5699f0fb7367cba7faed98aadc4cbd7154366fc412490cfc5bb279493e26899d4624ae399a534a9a93782506efa13020743d04b6c86ecb9e42921e252db481a62bcadc2f2b18a70ccbc2e89c641bacf587e8a66ec9e0229431cbbb03dd6b79260eaad93e800ae3a3ac6f084129f99421864724d4d1a96532ccc36836b2da24c45104e9f1e40ae0837777f89a5bc5a8270855ae6c39c26b6ca03d474267da63e6d3023e7c4cff8c495394b41c81359c7eabd01b38057983d7799023a37ae3b063ded25c4514412e3880a95477cdc39b1ad00ad4969938dcd9cf7187109bd947ec247573e097ac06b368643886918e6a178c74a327520b03b5a7fa5f2568bb304994170065bf17a4f6f1b8c5eb863b3d15948e099bcb5f5e02d8f4401027704a344158387e388b103c09a8a75fb9fe464c7c885d685370eae83c1ea096006c2126c09be2d658dbf53c04faa9bb185e69ac0a6331f42b842730d42a919ef1bb0469f335c44281d0cf68a87d682c55cd7a417a148b76c746284c6a20758be5bb963a9ba6b0f9d8a294ec6670459bbbbe54d46f4fa2eb8f8284a86fe2866d170bdd1e6656fe78d8bb3f5faa2d32419c5a3489265e71df2d15ee0259378c04ce7d8b7d63d637fb5ca02fbd22290a332c52b9670d73ee574d2045b73efe028a24eaa676313af7e2f82712424d9ed433663dfb8b49b40d64e88b37e2ec15d10560ebf33bec0505ac8a3cfa1fe52f17112ea632a5c3ee925e3614b7cb7c86b03a0196182451dfe54c3b1dd50a60f43c844617ac28252b1c19eb69476fc76419060c77832226c6ddb0b85ef120ba4d25adc428cfc4463a1c52a495459f0b56563becbec2722610b45c2ee235b09f70ef6f659aab29ab6ccb63b78ba8e27210e34a8ceafcfb8807661dbf3970dd0eec636960ab3bb27d818c2767bcb7caa2ce58539ae605b25f2b3aadbf99745231c1230b0afb9fea7f3aeedc46862652f1bb528da7105a81dd24e7f8c33755bd1ef68720622751fa14939b4dec040c79119cfc3cd8dde948037b03c28588f2c63e8a37b2da12d0a3bfe4454a4851eabcb036aeac51241e80ef213bdd87bcb09ab03e50d29d491c3d61028ebf5537b15232ceb15758c631d78cbc7b4f72896a978317674b693a1c62aece3a8f9413aacbd5b3aac7e679739657e5953a90fba89adcc511be741285387a0bb1cbeadb9c29aea9610ed6a392d65bf1ae61572c378b6ae69b4a9fde918e63084ca84e0544461639b668ab316207c0443a98d735d6502a2ba2f41904634b3de671c036a23568960828032dede25cd69444b12b2cef44769dc90331c7d8990d16f81e31dca2022eeea3873acbaf9d1befefe82e11eade77b8279d44ba361267db71d90e52644159b25ad55a090d773ab3e59e9828e79df318b2e4622da548118bb486f0a008dba0c8dc135e26f4d56bddbf7e5e94d5af24df622f1c03d9af9491149b0f69738c08673f7d4bb9151dd8c1f81da1a48a91a2653ab70b4f3db4e8a4a65cb50a5b2ba30f8bc060e323197290875fb66742b31c977aeb12c4369a7b862b096004167ef6cbdac3bc63d86465ff335481c9efcc0b9bd1d80f0e899de40f02c47f7b8164e0d812b48d54f379a880a2f3f3a217265ada5bee4f29f2cda53fa54430243a01e1b4dd47be859e7ce8f1459c61d0d4c1ba4dbac88261912e37f9214f148356e3f6efa224977e9e7547785aa270c1fb3bc82d2b6dcea25c9d19229628f08ac1041ef987a3c8a292c8aa9374471fda28443c8116ec59da56df0eee90a9ac58f366af914918394df02f6785ea14d36cdd9f1bd1d6adb29aeaee699c1ca63c034afc325ca286668e8821c7603769cd24d31f53868efc42f20c4a7f9ad95ab0ea6a0cc45418bf674e3a9d872b0cd9aa28c38e0611abc5758537ec2184412ddd17cbcbd71396a200d8671344f0532cb51e3a6a4eb2fd58d14ca012e46f054bd3b01e81015688e199bf797d433d90e6442313e67be9379d5e6a32db507a31f7e073cdfa19b83fc1c78e7d4f5c7a48057a0f5ef0521e378eb57f56ee79b1e5c8ab04a274a24331e6a729e9ad408537d40f2958d9ae2902135bddc4cd90d26d62b8e79c8ae7247e5ae90bd0cdcac6ece551443b5d0d9af36421bd2ce9f5b20ac8b467c5623af73687c7ba7a65117d6c61a472f0954a6efe34632d649ee98f970241b34cecfec42e36578765483d3e973bfcf06c96792bf84d647b3cc7ae77d10a0e33ad2a55c8af5fd127c4dd2c55585bcc7b41ed337327acedeca8e9096f026f40c51ce2694074546e097016e4e2f15ad69446f2ff9ed87a82dc446937c19db233975c6abf83f2acf54403632ef147c513b901965f5f966c9563536eeb084bb613cfe8fe59e8a105a15676e45f71e05ef3ab25d9f61b5f4a1ebbf7fd05444a1754d149d6fd2ee67b2e79b7fd3221147a341035daf10a08029c708a6bad3ffffa4668262c075a6cc6654bedb6a943c1e2ddcea6d0fb9bee3e3a5ee4d2739d5d77c3739a4ec739e9ccf41e16a342afa17d95d5eb68b21d53f65e8ab263444d21e824f0c3288336157875854859b596917a8189e523d6e744afcd79e1efef6bfff038d9186fabe280000

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -0,0 +1,59 @@
agile
approaches
bring
capitalize
cloud
collaborative
content
corporate
disruptive
diversity
domination
empowerment
ensure
evolved
forward
foster
frameworks
fruit
further
generated
generation
going
grow
hanging
heading
holistic
innovation
iterative
level
leverage
multiple
normal
offshoring
organically
overall
overviews
proactive
proposition
provide
real-time
robust
runway
solution
strategies
strategy
streamlined
survival
synopsis
table
thinking
touchpoints
user
value
via
view
will
win-win
workplace
world

View File

@ -0,0 +1,106 @@
☹️
👹
👺
👻
👽
👿
💀
💩
🤐
🤑
🤒
🤓
🤔
🤕
🤗
🤠
🤡
🤢
🤣
🤤
🤥
🤧
🤨
🤩
🤪
🤫
🤬
🤭
🤮
🤯
🥰
🥳
🥴
🥵
🥶
🥺
🧐
😀
😁
😂
😃
😄
😅
😆
😇
😈
😉
😊
😋
😌
😍
😎
😏
😐
😑
😒
😓
😔
😕
😖
😗
😘
😙
😚
😛
😜
😝
😞
😟
😠
😡
😢
😣
😤
😥
😦
😧
😨
😩
😪
😫
😬
😭
😮
😯
😰
😱
😲
😳
😴
😵
😶
😷
😸
😹
😺
😻
😼
😽
😾
😿
🙀
🙁
🙂
🙃
🙄

View File

@ -0,0 +1,145 @@
a
ac
ad
adipiscing
aenean
aliquam
aliquet
amet
ante
aptent
arcu
at
augue
bibendum
blandit
class
commodo
condimentum
consectetur
consequat
conubia
convallis
cras
curabitur
diam
dictum
dictumst
dolor
donec
dui
ed
efficitur
egestas
eget
elit
enim
erat
eros
est
et
eu
euismod
ex
facilisis
faucibus
felis
fermentum
feugiat
gravida
habitasse
hac
hendrerit
himenaeos
iaculis
id
imperdiet
in
inceptos
integer
ipsum
justo
lacus
laoreet
lectus
leo
libero
ligula
litora
lobortis
lorem
luctus
maecenas
magna
massa
mattis
mauris
maximus
metus
mi
molestie
mollis
morbi
nam
nec
neque
nibh
nisi
nisl
non
nostra
nulla
nullam
nunc
odio
orci
pellentesque
per
pharetra
placerat
platea
porta
porttitor
posuere
praesent
pretium
proin
pulvinar
purus
quam
quis
rhoncus
risus
sagittis
sapien
scelerisque
sed
sem
semper
sit
sociosqu
sodales
sollicitudin
suscipit
suspendisse
taciti
tellus
tempor
tincidunt
torquent
tristique
turpis
ullamcorper
ultrices
ultricies
urna
ut
varius
vel
velit
vestibulum
vitae
vivamus
viverra
volutpat
vulputate

View File

@ -7,7 +7,7 @@ echo "--- go dbconn import"
set -euf -o pipefail
allowed='^github.com/sourcegraph/sourcegraph/cmd/frontend|github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend|github.com/sourcegraph/sourcegraph/enterprise/cmd/repo-updater'
allowed='^github.com/sourcegraph/sourcegraph/cmd/frontend|github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend|github.com/sourcegraph/sourcegraph/enterprise/cmd/repo-updater|github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-api-server'
# shellcheck disable=SC2016
template='{{with $pkg := .}}{{ range $pkg.Deps }}{{ printf "%s imports %s\n" $pkg.ImportPath .}}{{end}}{{end}}'

View File

@ -11,6 +11,9 @@ import (
// go-bindata is used in lots of our gen.go files
_ "github.com/kevinburke/go-bindata/go-bindata"
// go-mockgen is used to codegen mockable interfaces, used in precise code intel tests
_ "github.com/efritz/go-mockgen"
// vfsgendev is used for packing static assets into .go files.
_ "github.com/shurcooL/vfsgen/cmd/vfsgendev"

3
go.mod
View File

@ -23,6 +23,7 @@ require (
github.com/dgraph-io/ristretto v0.0.2
github.com/dnaeon/go-vcr v1.0.1
github.com/docker/docker v1.4.2-0.20200213202729-31a86c4ab209
github.com/efritz/go-mockgen v0.0.0-20200420163638-0338f3dfc81c
github.com/emersion/go-imap v1.0.4
github.com/ericchiang/k8s v1.2.0
github.com/fatih/astrewrite v0.0.0-20191207154002-9094e544fcef
@ -143,7 +144,7 @@ require (
golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a
golang.org/x/sys v0.0.0-20200331124033-c3d80250170d
golang.org/x/time v0.0.0-20191024005414-555d28b269f0
golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b
golang.org/x/tools v0.0.0-20200420001825-978e26b7c37c
google.golang.org/api v0.21.0 // indirect
google.golang.org/genproto v0.0.0-20200403120447-c50568487044 // indirect
gopkg.in/jpoehls/gophermail.v0 v0.0.0-20160410235621-62941eab772c

31
go.sum
View File

@ -76,13 +76,24 @@ github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrU
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d h1:G0m3OIz70MZUWq3EgK3CesDbo8upS2Vm9/P3FtgI+Jk=
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs=
github.com/alecthomas/kingpin v2.2.6+incompatible h1:5svnBTFgJjZvGKyYBtMB0+m5wvrbUHiqye8wRJMlnYI=
github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d h1:UQZhZ2O0vMHr2cI+DC1Mbh0TJxzA3RcLoMsFw+aXw7E=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/andygrunwald/go-gerrit v0.0.0-20191101112536-3f5e365ccf57/go.mod h1:0iuRQp6WJ44ts+iihy5E/WlPqfg5RNeQxOmzRkxCdtk=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ=
github.com/aphistic/sweet v0.0.0-20180618201346-68e18ab55a67/go.mod h1:iggGz3Cujwru5rGKuOi4u1rfI+38suzhVVJj8Ey7Q3M=
github.com/aphistic/sweet v0.2.0 h1:I4z+fAUqvKfvZV/CHi5dV0QuwbmIvYYFDjG0Ss5QpAs=
github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys=
github.com/aphistic/sweet-junit v0.0.0-20190314030539-8d7e248096c2 h1:qDCG/a4+mCcRqj+QHTc1RNncar6rpg0oGz9ynH4IRME=
github.com/aphistic/sweet-junit v0.0.0-20190314030539-8d7e248096c2/go.mod h1:+eL69RqmiKF2Jm3poefxF/ZyVNGXFdSsPq3ScBFtX9s=
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/avelino/slugify v0.0.0-20180501145920-855f152bd774 h1:HrMVYtly2IVqg9EBooHsakQ256ueojP7QuG32K71X/U=
@ -147,6 +158,8 @@ github.com/crewjam/httperr v0.0.0-20190612203328-a946449404da/go.mod h1:+rmNIXRv
github.com/crewjam/saml v0.4.0 h1:gvSlboe4BO1APaU2eDdsbql3itRat310Q5qs2Seim2k=
github.com/crewjam/saml v0.4.0/go.mod h1:geQUbAAwmTKNJFDzoXaTssZHY26O89PHIm3K3YWjWnI=
github.com/cznic/mathutil v0.0.0-20180504122225-ca4c9f2c1369/go.mod h1:e6NPNENfs9mPDVNRekM7lKScauxd5kXTr1Mfyig6TDM=
github.com/dave/jennifer v1.4.0 h1:tNJFJmLDVTLu+v05mVZ88RINa3vQqnyyWkTKWYz0CwE=
github.com/dave/jennifer v1.4.0/go.mod h1:fIb+770HOpJ2fmN9EPPKOqm1vMGhB+TwXKMZhrIygKg=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -176,10 +189,16 @@ github.com/docker/go-units v0.3.3 h1:Xk8S3Xj5sLGlG5g67hJmYMmUgXv5N4PhkjJHHqrwnTk
github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw=
github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo=
github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk=
github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs=
github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU=
github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I=
github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/efritz/go-genlib v0.0.0-20200420163202-9c3914df9594 h1:wAEx9E0qHx43LRLQqP5VFTijgLnvvNM53FqLu7G+VQI=
github.com/efritz/go-genlib v0.0.0-20200420163202-9c3914df9594/go.mod h1:aMnaM7yzn10TOdj5sGpElY90l/XT3eL+Sj7z7D1V8q8=
github.com/efritz/go-mockgen v0.0.0-20200420163638-0338f3dfc81c h1:MPakRO9Z5EFrMOnZm1we5ngCCxlJuMEiGCRbesXLVls=
github.com/efritz/go-mockgen v0.0.0-20200420163638-0338f3dfc81c/go.mod h1:K2AZyo7TmaAzRhr4EvUaEHcpumiT0UEAARFnSBid7yE=
github.com/emersion/go-imap v1.0.4 h1:uiCAIHM6Z5Jwkma1zdNDWWXxSCqb+/xHBkHflD7XBro=
github.com/emersion/go-imap v1.0.4/go.mod h1:yKASt+C3ZiDAiCSssxg9caIckWF/JG7ZQTO7GAmvicU=
github.com/emersion/go-message v0.11.1/go.mod h1:C4jnca5HOTo4bGN9YdqNQM9sITuT3Y0K6bSUw9RklvY=
@ -647,11 +666,14 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2 h1:YocNLcTBdEdvY3iDK6jfWXvEaM5OCKkjxPKoJRdB3Gg=
github.com/mcuadros/go-version v0.0.0-20190830083331-035f6764e8d2/go.mod h1:76rfSfYPWj01Z85hUf/ituArm797mNKcvINh1OlsZKo=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b h1:j7+1HpAFS1zy5+Q4qx1fWh90gTKwiN4QCGoY9TWyyO4=
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
github.com/microcosm-cc/bluemonday v1.0.2 h1:5lPfLTTAvAbtS0VqT+94yOtFnGfUWYyx0+iToC3Os3s=
github.com/microcosm-cc/bluemonday v1.0.2/go.mod h1:iVP4YcDBq+n/5fb23BhYFvIMq/leAFZyRl6bYmGDlGc=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-ps v0.0.0-20190716172923-621e5597135b/go.mod h1:r1VsdOzOPt1ZSrGZWFoNhsAedKnEd6r9Np1+5blZCWk=
github.com/mitchellh/go-wordwrap v1.0.0 h1:6GlHJ/LTGMrIJbwgdqdl2eEH8o+Exx/0m8ir9Gns0u4=
github.com/mitchellh/go-wordwrap v1.0.0/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
github.com/mitchellh/mapstructure v0.0.0-20170523030023-d0303fe80992/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE=
@ -685,11 +707,13 @@ github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLA
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.8.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw=
github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.0 h1:Iw5WCbBcaAAd0fpRb1c9r5YCylv4XDoCSigm1zLevwU=
github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.8.1 h1:C5Dqfs/LeauYDX0jJXIe2SWmwCbGzx9yF8C8xy3Lh34=
github.com/onsi/gomega v1.8.1/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA=
@ -1005,8 +1029,10 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf
golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190313024323-a1f597ede03a/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -1212,6 +1238,7 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn
golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191224055732-dd894d0a8a40/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200102140908-9497f49d5709/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28=
@ -1230,8 +1257,8 @@ golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjs
golang.org/x/tools v0.0.0-20200311090712-aafaee8bce8c/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw=
golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8=
golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b h1:AFZdJUT7jJYXQEC29hYH/WZkoV7+KhwxQGmdZ19yYoY=
golang.org/x/tools v0.0.0-20200403190813-44a64ad78b9b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200420001825-978e26b7c37c h1:JzwTM5XxGxiCwZEIZQPG46csyhWQxQlu2uSi3bEza34=
golang.org/x/tools v0.0.0-20200420001825-978e26b7c37c/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7 h1:9zdDQZ7Thm29KFXgAX/+yaf3eVbP7djjWp/dXAppNCc=