chore: Rename Index -> AutoIndexJob (#63955)

Also renames a bunch of related types such as
- config.IndexJob -> config.AutoIndexJobSpec
- IndexLoader -> AutoIndexJobLoader

and so on.
This commit is contained in:
Varun Gandhi 2024-07-22 22:18:40 +08:00 committed by GitHub
parent 5fdaa583e0
commit a6b6844678
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
81 changed files with 5126 additions and 5076 deletions

View File

@ -53,7 +53,7 @@ func Init(
siteAdminChecker := sharedresolvers.NewSiteAdminChecker(db)
locationResolverFactory := gitresolvers.NewCachedLocationResolverFactory(repoStore, codeIntelServices.GitserverClient)
uploadLoaderFactory := uploadgraphql.NewUploadLoaderFactory(codeIntelServices.UploadsService)
indexLoaderFactory := uploadgraphql.NewIndexLoaderFactory(codeIntelServices.UploadsService)
autoIndexJobLoaderFactory := uploadgraphql.NewAutoIndexJobLoaderFactory(codeIntelServices.UploadsService)
preciseIndexResolverFactory := uploadgraphql.NewPreciseIndexResolverFactory(
codeIntelServices.UploadsService,
codeIntelServices.PoliciesService,
@ -67,7 +67,7 @@ func Init(
codeIntelServices.AutoIndexingService,
siteAdminChecker,
uploadLoaderFactory,
indexLoaderFactory,
autoIndexJobLoaderFactory,
locationResolverFactory,
preciseIndexResolverFactory,
)
@ -80,7 +80,7 @@ func Init(
siteAdminChecker,
repoStore,
uploadLoaderFactory,
indexLoaderFactory,
autoIndexJobLoaderFactory,
preciseIndexResolverFactory,
locationResolverFactory,
ConfigInst.HunkCacheSize,
@ -103,7 +103,7 @@ func Init(
codeIntelServices.AutoIndexingService,
siteAdminChecker,
uploadLoaderFactory,
indexLoaderFactory,
autoIndexJobLoaderFactory,
locationResolverFactory,
preciseIndexResolverFactory,
)

View File

@ -34,7 +34,7 @@ type MultiHandler struct {
executorStore database.ExecutorStore
jobTokenStore executorstore.JobTokenStore
metricsStore metricsstore.DistributedStore
CodeIntelQueueHandler QueueHandler[uploadsshared.Index]
AutoIndexQueueHandler QueueHandler[uploadsshared.AutoIndexJob]
BatchesQueueHandler QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob]
DequeueCache *rcache.Cache
dequeueCacheConfig *schema.DequeueCacheConfig
@ -46,7 +46,7 @@ func NewMultiHandler(
executorStore database.ExecutorStore,
jobTokenStore executorstore.JobTokenStore,
metricsStore metricsstore.DistributedStore,
codeIntelQueueHandler QueueHandler[uploadsshared.Index],
autoIndexQueueHandler QueueHandler[uploadsshared.AutoIndexJob],
batchesQueueHandler QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob],
) MultiHandler {
siteConfig := conf.Get().SiteConfiguration
@ -59,7 +59,7 @@ func NewMultiHandler(
executorStore: executorStore,
jobTokenStore: jobTokenStore,
metricsStore: metricsStore,
CodeIntelQueueHandler: codeIntelQueueHandler,
AutoIndexQueueHandler: autoIndexQueueHandler,
BatchesQueueHandler: batchesQueueHandler,
DequeueCache: dequeueCache,
dequeueCacheConfig: dequeueCacheConfig,
@ -167,8 +167,8 @@ func (m *MultiHandler) dequeue(ctx context.Context, req executortypes.DequeueReq
logger.Error("Failed to transform record", log.String("queue", selectedQueue), log.Error(err))
return executortypes.Job{}, false, err
}
case m.CodeIntelQueueHandler.Name:
record, dequeued, err := m.CodeIntelQueueHandler.Store.Dequeue(ctx, req.ExecutorName, nil)
case m.AutoIndexQueueHandler.Name:
record, dequeued, err := m.AutoIndexQueueHandler.Store.Dequeue(ctx, req.ExecutorName, nil)
if err != nil {
err = errors.Wrapf(err, "dbworkerstore.Dequeue %s", selectedQueue)
logger.Error("Failed to dequeue", log.String("queue", selectedQueue), log.Error(err))
@ -180,9 +180,9 @@ func (m *MultiHandler) dequeue(ctx context.Context, req executortypes.DequeueReq
return executortypes.Job{}, false, nil
}
job, err = m.CodeIntelQueueHandler.RecordTransformer(ctx, req.Version, record, resourceMetadata)
job, err = m.AutoIndexQueueHandler.RecordTransformer(ctx, req.Version, record, resourceMetadata)
if err != nil {
markErr := markRecordAsFailed(ctx, m.CodeIntelQueueHandler.Store, record.RecordID(), err, logger)
markErr := markRecordAsFailed(ctx, m.AutoIndexQueueHandler.Store, record.RecordID(), err, logger)
err = errors.Wrapf(errors.Append(err, markErr), "RecordTransformer %s", selectedQueue)
logger.Error("Failed to transform record", log.String("queue", selectedQueue), log.Error(err))
return executortypes.Job{}, false, err
@ -262,7 +262,7 @@ func (m *MultiHandler) SelectEligibleQueues(queues []string) ([]string, error) {
switch queue {
case m.BatchesQueueHandler.Name:
limit = m.dequeueCacheConfig.Batches.Limit
case m.CodeIntelQueueHandler.Name:
case m.AutoIndexQueueHandler.Name:
limit = m.dequeueCacheConfig.Codeintel.Limit
}
if len(dequeues) < limit {
@ -286,8 +286,8 @@ func (m *MultiHandler) SelectNonEmptyQueues(ctx context.Context, queueNames []st
switch queue {
case m.BatchesQueueHandler.Name:
count, err = m.BatchesQueueHandler.Store.QueuedCount(ctx, false)
case m.CodeIntelQueueHandler.Name:
count, err = m.CodeIntelQueueHandler.Store.QueuedCount(ctx, false)
case m.AutoIndexQueueHandler.Name:
count, err = m.AutoIndexQueueHandler.Store.QueuedCount(ctx, false)
}
if err != nil {
m.logger.Error("fetching queue size", log.Error(err), log.String("queue", queue))
@ -381,8 +381,8 @@ func (m *MultiHandler) heartbeat(ctx context.Context, executor types.Executor, i
switch queue.QueueName {
case m.BatchesQueueHandler.Name:
known, cancel, err = m.BatchesQueueHandler.Store.Heartbeat(ctx, queue.JobIDs, heartbeatOptions)
case m.CodeIntelQueueHandler.Name:
known, cancel, err = m.CodeIntelQueueHandler.Store.Heartbeat(ctx, queue.JobIDs, heartbeatOptions)
case m.AutoIndexQueueHandler.Name:
known, cancel, err = m.AutoIndexQueueHandler.Store.Heartbeat(ctx, queue.JobIDs, heartbeatOptions)
}
if err != nil {

View File

@ -49,11 +49,11 @@ type dequeueTestCase struct {
// only valid status code for this field is http.StatusNoContent
expectedStatusCode int
mockFunc func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore)
assertionFunc func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore)
mockFunc func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore)
assertionFunc func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore)
dequeueEvents []dequeueEvent
codeintelTransformerFunc handler.TransformerFunc[uploadsshared.Index]
codeintelTransformerFunc handler.TransformerFunc[uploadsshared.AutoIndexJob]
batchesTransformerFunc handler.TransformerFunc[*btypes.BatchSpecWorkspaceExecutionJob]
}
@ -62,7 +62,7 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Dequeue one record for each queue",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel", "batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
// QueuedCount gets called for each queue in queues on every invocation of HandleDequeue to filter empty queues,
// so two calls are mocked for two dequeue events. Functionally it doesn't really matter what these return, but
// for the sake of accuracy, the codeintel store returns 1 less. The batches store returns the same value because
@ -72,12 +72,12 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
batchesMockStore.QueuedCountFunc.PushReturn(2, nil)
batchesMockStore.QueuedCountFunc.PushReturn(2, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
jobTokenStore.CreateFunc.PushReturn("token1", nil)
batchesMockStore.DequeueFunc.PushReturn(&btypes.BatchSpecWorkspaceExecutionJob{ID: 2}, true, nil)
jobTokenStore.CreateFunc.PushReturn("token2", nil)
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.CreateFunc.History(), 2)
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 2)
@ -110,16 +110,16 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Dequeue only codeintel record when requesting codeintel queue and batches record exists",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
// On the second event, the queue will be empty and return an empty job
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
// Mock a non-empty queue that will never be reached because it's not requested in the dequeue body
batchesMockStore.QueuedCountFunc.PushReturn(1, nil)
batchesMockStore.DequeueFunc.PushReturn(&btypes.BatchSpecWorkspaceExecutionJob{ID: 2}, true, nil)
jobTokenStore.CreateFunc.PushReturn("token1", nil)
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.CreateFunc.History(), 1)
// The queue will be empty after the first dequeue event, so no second dequeue happens
@ -147,14 +147,14 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Dequeue only codeintel record when requesting both queues and batches record doesn't exists",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel", "batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.QueuedCountFunc.PushReturn(0, nil)
batchesMockStore.QueuedCountFunc.PushReturn(0, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
jobTokenStore.CreateFunc.PushReturn("token1", nil)
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.CreateFunc.History(), 1)
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 2)
@ -182,11 +182,11 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Nothing to dequeue",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel","batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{}, false, nil)
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{}, false, nil)
batchesMockStore.DequeueFunc.PushReturn(&btypes.BatchSpecWorkspaceExecutionJob{}, false, nil)
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.DequeueFunc.History(), 1)
require.Len(t, batchesMockStore.DequeueFunc.History(), 1)
require.Len(t, jobTokenStore.CreateFunc.History(), 0)
@ -196,7 +196,7 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "No queue names provided",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": []}`,
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 0)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 0)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 0)
@ -208,7 +208,7 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Invalid queue name",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["invalidqueue"]}`,
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 0)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 0)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 0)
@ -225,7 +225,7 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Invalid version",
body: `{"executorName": "test-executor", "version":"\n1.2", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel","batches"]}`,
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 0)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 0)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 0)
@ -242,11 +242,11 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Dequeue error codeintel",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{}, false, errors.New("failed to dequeue"))
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{}, false, errors.New("failed to dequeue"))
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 1)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 0)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 1)
@ -264,11 +264,11 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Dequeue error batches",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
batchesMockStore.QueuedCountFunc.PushReturn(1, nil)
batchesMockStore.DequeueFunc.PushReturn(&btypes.BatchSpecWorkspaceExecutionJob{}, false, errors.New("failed to dequeue"))
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 0)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 1)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 0)
@ -286,12 +286,12 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Failed to transform record codeintel",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
codeintelMockStore.MarkFailedFunc.PushReturn(true, nil)
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 1)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 0)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 1)
@ -309,19 +309,19 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
expectedResponseBody: `{"error":"RecordTransformer codeintel: failed"}`,
},
},
codeintelTransformerFunc: func(ctx context.Context, version string, record uploadsshared.Index, resourceMetadata handler.ResourceMetadata) (executortypes.Job, error) {
codeintelTransformerFunc: func(ctx context.Context, version string, record uploadsshared.AutoIndexJob, resourceMetadata handler.ResourceMetadata) (executortypes.Job, error) {
return executortypes.Job{}, errors.New("failed")
},
},
{
name: "Failed to transform record batches",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
batchesMockStore.QueuedCountFunc.PushReturn(1, nil)
batchesMockStore.DequeueFunc.PushReturn(&btypes.BatchSpecWorkspaceExecutionJob{ID: 1}, true, nil)
batchesMockStore.MarkFailedFunc.PushReturn(true, nil)
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 0)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 1)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 0)
@ -346,12 +346,12 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Failed to mark record as failed codeintel",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
codeintelMockStore.MarkFailedFunc.PushReturn(true, errors.New("failed to mark"))
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 1)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 0)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 1)
@ -369,19 +369,19 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
expectedResponseBody: `{"error":"RecordTransformer codeintel: 2 errors occurred:\n\t* failed\n\t* failed to mark"}`,
},
},
codeintelTransformerFunc: func(ctx context.Context, version string, record uploadsshared.Index, resourceMetadata handler.ResourceMetadata) (executortypes.Job, error) {
codeintelTransformerFunc: func(ctx context.Context, version string, record uploadsshared.AutoIndexJob, resourceMetadata handler.ResourceMetadata) (executortypes.Job, error) {
return executortypes.Job{}, errors.New("failed")
},
},
{
name: "Failed to mark record as failed batches",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
batchesMockStore.QueuedCountFunc.PushReturn(1, nil)
batchesMockStore.DequeueFunc.PushReturn(&btypes.BatchSpecWorkspaceExecutionJob{ID: 1}, true, nil)
batchesMockStore.MarkFailedFunc.PushReturn(true, errors.New("failed to mark"))
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 0)
require.Len(t, batchesMockStore.QueuedCountFunc.History(), 1)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 0)
@ -405,12 +405,12 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Failed to create job token",
body: `{"executorName": "test-executor", "numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel","batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
jobTokenStore.CreateFunc.PushReturn("", errors.New("failed to create token"))
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 1)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 1)
require.Len(t, jobTokenStore.CreateFunc.History(), 1)
@ -427,13 +427,13 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Job token already exists",
body: `{"executorName": "test-executor","numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel","batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
jobTokenStore.CreateFunc.PushReturn("", executorstore.ErrJobTokenAlreadyCreated)
jobTokenStore.RegenerateFunc.PushReturn("somenewtoken", nil)
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 1)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 1)
require.Len(t, jobTokenStore.CreateFunc.History(), 1)
@ -452,13 +452,13 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
{
name: "Failed to regenerate token",
body: `{"executorName": "test-executor","numCPUs": 1, "memory": "1GB", "diskSpace": "10GB","queues": ["codeintel","batches"]}`,
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
codeintelMockStore.QueuedCountFunc.PushReturn(1, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.Index{ID: 1}, true, nil)
codeintelMockStore.DequeueFunc.PushReturn(uploadsshared.AutoIndexJob{ID: 1}, true, nil)
jobTokenStore.CreateFunc.PushReturn("", executorstore.ErrJobTokenAlreadyCreated)
jobTokenStore.RegenerateFunc.PushReturn("", errors.New("failed to regen token"))
},
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
assertionFunc: func(t *testing.T, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob], jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, codeintelMockStore.QueuedCountFunc.History(), 1)
require.Len(t, codeintelMockStore.DequeueFunc.History(), 1)
require.Len(t, jobTokenStore.CreateFunc.History(), 1)
@ -481,14 +481,14 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
rcache.SetupForTest(t)
jobTokenStore := executorstore.NewMockJobTokenStore()
codeIntelMockStore := dbworkerstoremocks.NewMockStore[uploadsshared.Index]()
codeIntelMockStore := dbworkerstoremocks.NewMockStore[uploadsshared.AutoIndexJob]()
batchesMockStore := dbworkerstoremocks.NewMockStore[*btypes.BatchSpecWorkspaceExecutionJob]()
mh := handler.NewMultiHandler(
dbmocks.NewMockExecutorStore(),
jobTokenStore,
metricsstore.NewMockDistributedStore(),
handler.QueueHandler[uploadsshared.Index]{Name: "codeintel", Store: codeIntelMockStore, RecordTransformer: transformerFunc[uploadsshared.Index]},
handler.QueueHandler[uploadsshared.AutoIndexJob]{Name: "codeintel", Store: codeIntelMockStore, RecordTransformer: transformerFunc[uploadsshared.AutoIndexJob]},
handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob]{Name: "batches", Store: batchesMockStore, RecordTransformer: transformerFunc[*btypes.BatchSpecWorkspaceExecutionJob]},
)
@ -504,7 +504,7 @@ func TestMultiHandler_HandleDequeue(t *testing.T) {
} else {
for _, event := range test.dequeueEvents {
if test.codeintelTransformerFunc != nil {
mh.CodeIntelQueueHandler.RecordTransformer = test.codeintelTransformerFunc
mh.AutoIndexQueueHandler.RecordTransformer = test.codeintelTransformerFunc
}
if test.batchesTransformerFunc != nil {
mh.BatchesQueueHandler.RecordTransformer = test.batchesTransformerFunc
@ -530,22 +530,22 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
tests := []struct {
name string
body string
mockFunc func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob])
mockFunc func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob])
expectedStatusCode int
expectedResponseBody string
assertionFunc func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob])
assertionFunc func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob])
}{
{
name: "Heartbeat for multiple queues",
body: `{"executorName": "test-executor", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [{"queueName": "codeintel", "jobIds": ["42", "7"]}, {"queueName": "batches", "jobIds": ["43", "8"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
executorStore.UpsertHeartbeatFunc.PushReturn(nil)
codeintelMockStore.HeartbeatFunc.PushReturn([]string{"42", "7"}, nil, nil)
batchesMockStore.HeartbeatFunc.PushReturn([]string{"43", "8"}, nil, nil)
},
expectedStatusCode: http.StatusOK,
expectedResponseBody: `{"knownIds":["42-codeintel", "7-codeintel", "43-batches", "8-batches"],"cancelIds":null}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 1)
assert.Equal(
@ -575,13 +575,13 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
{
name: "Heartbeat for single queue",
body: `{"executorName": "test-executor", "queueNames": ["codeintel"], "jobIdsByQueue": [{"queueName": "codeintel", "jobIds": ["42", "7"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
executorStore.UpsertHeartbeatFunc.PushReturn(nil)
codeintelMockStore.HeartbeatFunc.PushReturn([]string{"42", "7"}, nil, nil)
},
expectedStatusCode: http.StatusOK,
expectedResponseBody: `{"knownIds":["42-codeintel", "7-codeintel"],"cancelIds":null}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 1)
assert.Equal(
@ -609,12 +609,12 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
{
name: "No running jobs",
body: `{"executorName": "test-executor", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
executorStore.UpsertHeartbeatFunc.PushReturn(nil)
},
expectedStatusCode: http.StatusOK,
expectedResponseBody: `{"knownIds":null,"cancelIds":null}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 1)
assert.Equal(
@ -639,14 +639,14 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
{
name: "Known and canceled IDs",
body: `{"executorName": "test-executor", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [{"queueName": "codeintel", "jobIds": ["42", "7"]}, {"queueName": "batches", "jobIds": ["43", "8"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
executorStore.UpsertHeartbeatFunc.PushReturn(nil)
codeintelMockStore.HeartbeatFunc.PushReturn([]string{"42"}, []string{"7"}, nil)
batchesMockStore.HeartbeatFunc.PushReturn([]string{"43"}, []string{"8"}, nil)
},
expectedStatusCode: http.StatusOK,
expectedResponseBody: `{"knownIds":["42-codeintel", "43-batches"],"cancelIds":["7-codeintel", "8-batches"]}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 1)
assert.Equal(
@ -678,7 +678,7 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
body: `{"executorName": "", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [{"queueName": "codeintel", "jobIds": ["42", "7"]}, {"queueName": "batches", "jobIds": ["43", "8"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
expectedStatusCode: http.StatusInternalServerError,
expectedResponseBody: `{"error":"worker hostname cannot be empty"}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 0)
require.Len(t, codeintelMockStore.HeartbeatFunc.History(), 0)
require.Len(t, batchesMockStore.HeartbeatFunc.History(), 0)
@ -689,7 +689,7 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
body: `{"executorName": "test-executor", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [{"queueName": "foo", "jobIds": ["42"]}, {"queueName": "bar", "jobIds": ["43"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
expectedStatusCode: http.StatusInternalServerError,
expectedResponseBody: `{"error":"unsupported queue name(s) 'foo, bar' submitted in queueJobIds, executor is configured for queues 'codeintel, batches'"}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 0)
require.Len(t, codeintelMockStore.HeartbeatFunc.History(), 0)
require.Len(t, batchesMockStore.HeartbeatFunc.History(), 0)
@ -700,7 +700,7 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
body: `{"executorName": "test-executor", "jobIdsByQueue": [{"queueName": "codeintel", "jobIds": ["42"]}, {"queueName": "batches", "jobIds": ["43"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
expectedStatusCode: http.StatusInternalServerError,
expectedResponseBody: `{"error":"queueNames must be set for multi-queue heartbeats"}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 0)
require.Len(t, codeintelMockStore.HeartbeatFunc.History(), 0)
require.Len(t, batchesMockStore.HeartbeatFunc.History(), 0)
@ -709,14 +709,14 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
{
name: "Failed to upsert heartbeat",
body: `{"executorName": "test-executor", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [{"queueName": "codeintel", "jobIds": ["42", "7"]}, {"queueName": "batches", "jobIds": ["43", "8"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
executorStore.UpsertHeartbeatFunc.PushReturn(errors.Newf("failed"))
codeintelMockStore.HeartbeatFunc.PushReturn([]string{"42", "7"}, nil, nil)
batchesMockStore.HeartbeatFunc.PushReturn([]string{"43", "8"}, nil, nil)
},
expectedStatusCode: http.StatusOK,
expectedResponseBody: `{"knownIds":["42-codeintel", "7-codeintel", "43-batches", "8-batches"],"cancelIds":null}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 1)
assert.Equal(
t,
@ -745,14 +745,14 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
{
name: "Failed to heartbeat first queue, second is ignored",
body: `{"executorName": "test-executor", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [{"queueName": "batches", "jobIds": ["43", "8"]}, {"queueName": "codeintel", "jobIds": ["42", "7"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
executorStore.UpsertHeartbeatFunc.PushReturn(nil)
codeintelMockStore.HeartbeatFunc.PushReturn([]string{"42", "7"}, nil, nil)
batchesMockStore.HeartbeatFunc.PushReturn(nil, nil, errors.New("failed"))
},
expectedStatusCode: http.StatusInternalServerError,
expectedResponseBody: `{"error":"multiqueue.UpsertHeartbeat: failed"}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 1)
assert.Equal(
t,
@ -780,14 +780,14 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
{
name: "First queue successful heartbeat, failed to heartbeat second queue",
body: `{"executorName": "test-executor", "queueNames": ["codeintel", "batches"], "jobIdsByQueue": [{"queueName": "codeintel", "jobIds": ["42", "7"]}, {"queueName": "batches", "jobIds": ["43", "8"]}], "os": "test-os", "architecture": "test-arch", "dockerVersion": "1.0", "executorVersion": "2.0", "gitVersion": "3.0", "igniteVersion": "4.0", "srcCliVersion": "5.0", "prometheusMetrics": ""}`,
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
executorStore.UpsertHeartbeatFunc.PushReturn(nil)
codeintelMockStore.HeartbeatFunc.PushReturn([]string{"42", "7"}, nil, nil)
batchesMockStore.HeartbeatFunc.PushReturn(nil, nil, errors.New("failed"))
},
expectedStatusCode: http.StatusInternalServerError,
expectedResponseBody: `{"error":"multiqueue.UpsertHeartbeat: failed"}`,
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
assertionFunc: func(t *testing.T, metricsStore *metricsstore.MockDistributedStore, executorStore *dbmocks.MockExecutorStore, codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
require.Len(t, executorStore.UpsertHeartbeatFunc.History(), 1)
assert.Equal(
t,
@ -819,14 +819,14 @@ func TestMultiHandler_HandleHeartbeat(t *testing.T) {
t.Run(test.name, func(t *testing.T) {
executorStore := dbmocks.NewMockExecutorStore()
metricsStore := metricsstore.NewMockDistributedStore()
codeIntelMockStore := dbworkerstoremocks.NewMockStore[uploadsshared.Index]()
codeIntelMockStore := dbworkerstoremocks.NewMockStore[uploadsshared.AutoIndexJob]()
batchesMockStore := dbworkerstoremocks.NewMockStore[*btypes.BatchSpecWorkspaceExecutionJob]()
mh := handler.NewMultiHandler(
executorStore,
executorstore.NewMockJobTokenStore(),
metricsStore,
handler.QueueHandler[uploadsshared.Index]{Name: "codeintel", Store: codeIntelMockStore},
handler.QueueHandler[uploadsshared.AutoIndexJob]{Name: "codeintel", Store: codeIntelMockStore},
handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob]{Name: "batches", Store: batchesMockStore},
)
@ -923,7 +923,7 @@ func TestMultiHandler_SelectQueueForDequeueing(t *testing.T) {
nil,
nil,
nil,
handler.QueueHandler[uploadsshared.Index]{Name: "codeintel"},
handler.QueueHandler[uploadsshared.AutoIndexJob]{Name: "codeintel"},
handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob]{Name: "batches"},
)
@ -1015,7 +1015,7 @@ func TestMultiHandler_SelectEligibleQueues(t *testing.T) {
nil,
nil,
nil,
handler.QueueHandler[uploadsshared.Index]{Name: "codeintel"},
handler.QueueHandler[uploadsshared.AutoIndexJob]{Name: "codeintel"},
handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob]{Name: "batches"},
)
@ -1062,13 +1062,13 @@ func TestMultiHandler_SelectNonEmptyQueues(t *testing.T) {
tests := []struct {
name string
queueNames []string
mockFunc func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob])
mockFunc func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob])
expectedQueues []string
}{
{
name: "Both contain jobs",
queueNames: []string{"batches", "codeintel"},
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
codeintelMockStore.QueuedCountFunc.PushReturn(5, nil)
batchesMockStore.QueuedCountFunc.PushReturn(5, nil)
},
@ -1077,7 +1077,7 @@ func TestMultiHandler_SelectNonEmptyQueues(t *testing.T) {
{
name: "Only batches contains jobs",
queueNames: []string{"batches", "codeintel"},
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
codeintelMockStore.QueuedCountFunc.PushReturn(0, nil)
batchesMockStore.QueuedCountFunc.PushReturn(5, nil)
},
@ -1086,7 +1086,7 @@ func TestMultiHandler_SelectNonEmptyQueues(t *testing.T) {
{
name: "None contain jobs",
queueNames: []string{"batches", "codeintel"},
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.Index], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
mockFunc: func(codeintelMockStore *dbworkerstoremocks.MockStore[uploadsshared.AutoIndexJob], batchesMockStore *dbworkerstoremocks.MockStore[*btypes.BatchSpecWorkspaceExecutionJob]) {
codeintelMockStore.QueuedCountFunc.PushReturn(0, nil)
batchesMockStore.QueuedCountFunc.PushReturn(0, nil)
},
@ -1096,10 +1096,10 @@ func TestMultiHandler_SelectNonEmptyQueues(t *testing.T) {
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
ctx := context.Background()
codeIntelMockStore := dbworkerstoremocks.NewMockStore[uploadsshared.Index]()
codeIntelMockStore := dbworkerstoremocks.NewMockStore[uploadsshared.AutoIndexJob]()
batchesMockStore := dbworkerstoremocks.NewMockStore[*btypes.BatchSpecWorkspaceExecutionJob]()
m := &handler.MultiHandler{
CodeIntelQueueHandler: handler.QueueHandler[uploadsshared.Index]{Name: "codeintel", Store: codeIntelMockStore},
AutoIndexQueueHandler: handler.QueueHandler[uploadsshared.AutoIndexJob]{Name: "codeintel", Store: codeIntelMockStore},
BatchesQueueHandler: handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob]{Name: "batches", Store: batchesMockStore},
}

View File

@ -41,14 +41,14 @@ func newExecutorQueuesHandler(
// in the worker.
//
// Note: In order register a new queue type please change the validate() check code in cmd/executor/config.go
codeIntelQueueHandler := codeintelqueue.QueueHandler(observationCtx, db, accessToken)
autoIndexQueueHandler := codeintelqueue.QueueHandler(observationCtx, db, accessToken)
batchesQueueHandler := batches.QueueHandler(observationCtx, db, accessToken)
codeintelHandler := handler.NewHandler(executorStore, jobTokenStore, metricsStore, codeIntelQueueHandler)
codeintelHandler := handler.NewHandler(executorStore, jobTokenStore, metricsStore, autoIndexQueueHandler)
batchesHandler := handler.NewHandler(executorStore, jobTokenStore, metricsStore, batchesQueueHandler)
handlers := []handler.ExecutorHandler{codeintelHandler, batchesHandler}
multiHandler := handler.NewMultiHandler(executorStore, jobTokenStore, metricsStore, codeIntelQueueHandler, batchesQueueHandler)
multiHandler := handler.NewMultiHandler(executorStore, jobTokenStore, metricsStore, autoIndexQueueHandler, batchesQueueHandler)
// Auth middleware
executorAuth := executorAuthMiddleware(logger, accessToken)

View File

@ -12,14 +12,14 @@ import (
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
)
func QueueHandler(observationCtx *observation.Context, db database.DB, accessToken func() string) handler.QueueHandler[uploadsshared.Index] {
recordTransformer := func(ctx context.Context, _ string, record uploadsshared.Index, resourceMetadata handler.ResourceMetadata) (apiclient.Job, error) {
func QueueHandler(observationCtx *observation.Context, db database.DB, accessToken func() string) handler.QueueHandler[uploadsshared.AutoIndexJob] {
recordTransformer := func(ctx context.Context, _ string, record uploadsshared.AutoIndexJob, resourceMetadata handler.ResourceMetadata) (apiclient.Job, error) {
return transformRecord(ctx, db, record, resourceMetadata, accessToken())
}
store := dbworkerstore.New(observationCtx, db.Handle(), autoindexing.IndexWorkerStoreOptions)
return handler.QueueHandler[uploadsshared.Index]{
return handler.QueueHandler[uploadsshared.AutoIndexJob]{
Name: "codeintel",
Store: store,
RecordTransformer: recordTransformer,

View File

@ -38,16 +38,16 @@ func (e *accessLogTransformer) Create(ctx context.Context, log *database.Executo
return e.ExecutorSecretAccessLogCreator.Create(ctx, log)
}
func transformRecord(ctx context.Context, db database.DB, index uploadsshared.Index, resourceMetadata handler.ResourceMetadata, accessToken string) (apiclient.Job, error) {
func transformRecord(ctx context.Context, db database.DB, autoIndexJob uploadsshared.AutoIndexJob, resourceMetadata handler.ResourceMetadata, accessToken string) (apiclient.Job, error) {
resourceEnvironment := makeResourceEnvironment(resourceMetadata)
var secrets []*database.ExecutorSecret
var err error
if len(index.RequestedEnvVars) > 0 {
if len(autoIndexJob.RequestedEnvVars) > 0 {
secretsStore := db.ExecutorSecrets(keyring.Default().ExecutorSecretKey)
secrets, _, err = secretsStore.List(ctx, database.ExecutorSecretScopeCodeIntel, database.ExecutorSecretsListOpts{
// Note: No namespace set, codeintel secrets are only available in the global namespace for now.
Keys: index.RequestedEnvVars,
Keys: autoIndexJob.RequestedEnvVars,
})
if err != nil {
return apiclient.Job{}, err
@ -73,8 +73,8 @@ func transformRecord(ctx context.Context, db database.DB, index uploadsshared.In
envVars := append(resourceEnvironment, secretEnvVars...)
dockerSteps := make([]apiclient.DockerStep, 0, len(index.DockerSteps)+2)
for i, dockerStep := range index.DockerSteps {
dockerSteps := make([]apiclient.DockerStep, 0, len(autoIndexJob.DockerSteps)+2)
for i, dockerStep := range autoIndexJob.DockerSteps {
dockerSteps = append(dockerSteps, apiclient.DockerStep{
Key: fmt.Sprintf("pre-index.%d", i),
Image: dockerStep.Image,
@ -84,12 +84,12 @@ func transformRecord(ctx context.Context, db database.DB, index uploadsshared.In
})
}
if index.Indexer != "" {
if autoIndexJob.Indexer != "" {
dockerSteps = append(dockerSteps, apiclient.DockerStep{
Key: "indexer",
Image: index.Indexer,
Commands: append(index.LocalSteps, shellquote.Join(index.IndexerArgs...)),
Dir: index.Root,
Image: autoIndexJob.Indexer,
Commands: append(autoIndexJob.LocalSteps, shellquote.Join(autoIndexJob.IndexerArgs...)),
Dir: autoIndexJob.Root,
Env: envVars,
})
}
@ -99,18 +99,18 @@ func transformRecord(ctx context.Context, db database.DB, index uploadsshared.In
redactedAuthorizationHeader := makeAuthHeaderValue("REDACTED")
srcCliImage := fmt.Sprintf("%s:%s", conf.ExecutorsSrcCLIImage(), conf.ExecutorsSrcCLIImageTag())
root := index.Root
root := autoIndexJob.Root
if root == "" {
root = "."
}
outfile := index.Outfile
outfile := autoIndexJob.Outfile
if outfile == "" {
outfile = defaultOutfile
}
// TODO: Temporary workaround. LSIF-go needs tags, but they make git fetching slower.
fetchTags := strings.HasPrefix(index.Indexer, conf.ExecutorsLsifGoImage())
fetchTags := strings.HasPrefix(autoIndexJob.Indexer, conf.ExecutorsLsifGoImage())
dockerSteps = append(dockerSteps, apiclient.DockerStep{
Key: "upload",
@ -121,15 +121,15 @@ func transformRecord(ctx context.Context, db database.DB, index uploadsshared.In
"code-intel",
"upload",
"-no-progress",
"-repo", index.RepositoryName,
"-commit", index.Commit,
"-repo", autoIndexJob.RepositoryName,
"-commit", autoIndexJob.Commit,
"-root", root,
"-upload-route", uploadRoute,
"-file", outfile,
"-associated-index-id", strconv.Itoa(index.ID),
"-associated-index-id", strconv.Itoa(autoIndexJob.ID),
),
},
Dir: index.Root,
Dir: autoIndexJob.Root,
Env: []string{
fmt.Sprintf("SRC_ENDPOINT=%s", frontendURL),
fmt.Sprintf("SRC_HEADER_AUTHORIZATION=%s", authorizationHeader),
@ -151,9 +151,9 @@ func transformRecord(ctx context.Context, db database.DB, index uploadsshared.In
maps.Copy(allRedactedValues, redactedEnvVars)
aj := apiclient.Job{
ID: index.ID,
Commit: index.Commit,
RepositoryName: index.RepositoryName,
ID: autoIndexJob.ID,
Commit: autoIndexJob.Commit,
RepositoryName: autoIndexJob.RepositoryName,
ShallowClone: true,
FetchTags: fetchTags,
DockerSteps: dockerSteps,

View File

@ -55,7 +55,7 @@ func TestTransformRecord(t *testing.T) {
},
} {
t.Run(testCase.name, func(t *testing.T) {
index := uploadsshared.Index{
index := uploadsshared.AutoIndexJob{
ID: 42,
Commit: "deadbeef",
RepositoryName: "linux",
@ -151,7 +151,7 @@ func TestTransformRecordWithoutIndexer(t *testing.T) {
db := dbmocks.NewMockDB()
db.ExecutorSecretsFunc.SetDefaultReturn(dbmocks.NewMockExecutorSecretStore())
index := uploadsshared.Index{
index := uploadsshared.AutoIndexJob{
ID: 42,
Commit: "deadbeef",
RepositoryName: "linux",
@ -281,7 +281,7 @@ func TestTransformRecordWithSecrets(t *testing.T) {
},
} {
t.Run(testCase.name, func(t *testing.T) {
index := uploadsshared.Index{
index := uploadsshared.AutoIndexJob{
ID: 42,
Commit: "deadbeef",
RepositoryName: "linux",
@ -392,7 +392,7 @@ func TestTransformRecordDockerAuthConfig(t *testing.T) {
}, 0, nil)
db.ExecutorSecretAccessLogsFunc.SetDefaultReturn(dbmocks.NewMockExecutorSecretAccessLogStore())
job, err := transformRecord(context.Background(), db, uploadsshared.Index{ID: 42}, handler.ResourceMetadata{}, "hunter2")
job, err := transformRecord(context.Background(), db, uploadsshared.AutoIndexJob{ID: 42}, handler.ResourceMetadata{}, "hunter2")
if err != nil {
t.Fatal(err)
}

View File

@ -17,7 +17,7 @@ type externalEmitter[T workerutil.Record] struct {
allocation QueueAllocation
}
var _ goroutine.Handler = &externalEmitter[uploadsshared.Index]{}
var _ goroutine.Handler = &externalEmitter[uploadsshared.AutoIndexJob]{}
type reporter interface {
ReportCount(ctx context.Context, queueName string, count int)

View File

@ -15,7 +15,7 @@ If no explicit configuration exists, the steps are [inferred from the repository
- [Go](https://sourcegraph.com/search?q=context:global+repo:%5Egithub%5C.com/sourcegraph/sourcegraph%24%40main+file:%5Elib/codeintel/autoindex/inference/go%5C.go+func+InferGoIndexJobs%28&patternType=literal)
- [TypeScript](https://sourcegraph.com/search?q=context:global+repo:%5Egithub%5C.com/sourcegraph/sourcegraph%24%40main+file:%5Elib/codeintel/autoindex/inference/typescript%5C.go+func+InferTypeScriptIndexJobs%28&patternType=literal)
The steps to index the repository are serialized into an index record and [inserted into a task queue](https://sourcegraph.com/search?q=context:global+repo:%5Egithub%5C.com/sourcegraph/sourcegraph%24%40main+file:%5Einternal/codeintel/stores/dbstore/indexes%5C.go+func+%28s+*Store%29+InsertIndexes%28&patternType=literal) to be processed asynchronously by a pool of task executors.
The steps to index the repository are serialized into an index record and [inserted into a task queue](https://sourcegraph.com/search?q=context:global+repo:%5Egithub%5C.com/sourcegraph/sourcegraph%24+%22func+%28s+*store%29+InsertJobs%22&patternType=keyword&sm=0) to be processed asynchronously by a pool of task executors.
## Processing

View File

@ -65,6 +65,7 @@ go_test(
"//internal/types",
"//lib/codeintel/autoindex/config",
"@com_github_google_go_cmp//cmp",
"@com_github_google_go_cmp//cmp/cmpopts",
],
)

View File

@ -30,8 +30,8 @@ type ReposStore interface {
}
type IndexEnqueuer interface {
QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) (_ []uploadsshared.Index, err error)
QueueIndexesForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) (err error)
QueueAutoIndexJobs(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) (_ []uploadsshared.AutoIndexJob, err error)
QueueAutoIndexJobsForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) (err error)
}
type UploadService interface {

View File

@ -33,18 +33,18 @@ func Test_AutoIndexingManualEnqueuedDequeueOrder(t *testing.T) {
workerstore := store.New(observation.TestContextTB(t), db.Handle(), opts)
for i, test := range []struct {
indexes []shared.Index
nextID int
jobs []shared.AutoIndexJob
nextID int
}{
{
indexes: []shared.Index{
jobs: []shared.AutoIndexJob{
{ID: 1, RepositoryID: 1, EnqueuerUserID: 51234},
{ID: 2, RepositoryID: 4},
},
nextID: 1,
},
{
indexes: []shared.Index{
jobs: []shared.AutoIndexJob{
{ID: 1, RepositoryID: 1, EnqueuerUserID: 50, State: "completed", FinishedAt: dbutil.NullTimeColumn(clock.Now().Add(-time.Hour * 3))},
{ID: 2, RepositoryID: 2},
{ID: 3, RepositoryID: 1, EnqueuerUserID: 1},
@ -56,7 +56,7 @@ func Test_AutoIndexingManualEnqueuedDequeueOrder(t *testing.T) {
if _, err := db.ExecContext(context.Background(), "TRUNCATE lsif_indexes RESTART IDENTITY CASCADE"); err != nil {
t.Fatal(err)
}
insertIndexes(t, db, test.indexes...)
insertAutoIndexJobs(t, db, test.jobs...)
job, _, err := workerstore.Dequeue(context.Background(), "borgir", nil)
if err != nil {
t.Fatal(err)
@ -69,29 +69,29 @@ func Test_AutoIndexingManualEnqueuedDequeueOrder(t *testing.T) {
}
}
func insertIndexes(t testing.TB, db database.DB, indexes ...shared.Index) {
for _, index := range indexes {
if index.Commit == "" {
index.Commit = fmt.Sprintf("%040d", index.ID)
func insertAutoIndexJobs(t testing.TB, db database.DB, jobs ...shared.AutoIndexJob) {
for _, job := range jobs {
if job.Commit == "" {
job.Commit = fmt.Sprintf("%040d", job.ID)
}
if index.State == "" {
index.State = "queued"
if job.State == "" {
job.State = "queued"
}
if index.RepositoryID == 0 {
index.RepositoryID = 50
if job.RepositoryID == 0 {
job.RepositoryID = 50
}
if index.DockerSteps == nil {
index.DockerSteps = []shared.DockerStep{}
if job.DockerSteps == nil {
job.DockerSteps = []shared.DockerStep{}
}
if index.IndexerArgs == nil {
index.IndexerArgs = []string{}
if job.IndexerArgs == nil {
job.IndexerArgs = []string{}
}
if index.LocalSteps == nil {
index.LocalSteps = []string{}
if job.LocalSteps == nil {
job.LocalSteps = []string{}
}
// Ensure we have a repo for the inner join in select queries
insertRepo(t, db, index.RepositoryID, index.RepositoryName)
insertRepo(t, db, job.RepositoryID, job.RepositoryName)
query := sqlf.Sprintf(`
INSERT INTO lsif_indexes (
@ -117,26 +117,26 @@ func insertIndexes(t testing.TB, db database.DB, indexes ...shared.Index) {
enqueuer_user_id
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
`,
index.ID,
index.Commit,
index.QueuedAt,
index.State,
index.FailureMessage,
index.StartedAt,
index.FinishedAt,
index.ProcessAfter,
index.NumResets,
index.NumFailures,
index.RepositoryID,
pq.Array(index.DockerSteps),
index.Root,
index.Indexer,
pq.Array(index.IndexerArgs),
index.Outfile,
pq.Array(index.ExecutionLogs),
pq.Array(index.LocalSteps),
index.ShouldReindex,
index.EnqueuerUserID,
job.ID,
job.Commit,
job.QueuedAt,
job.State,
job.FailureMessage,
job.StartedAt,
job.FinishedAt,
job.ProcessAfter,
job.NumResets,
job.NumFailures,
job.RepositoryID,
pq.Array(job.DockerSteps),
job.Root,
job.Indexer,
pq.Array(job.IndexerArgs),
job.Outfile,
pq.Array(job.ExecutionLogs),
pq.Array(job.LocalSteps),
job.ShouldReindex,
job.EnqueuerUserID,
)
if _, err := db.ExecContext(context.Background(), query.Query(sqlf.PostgresBindVar), query.Args()...); err != nil {

View File

@ -204,8 +204,8 @@ func (h *dependencyIndexingSchedulerHandler) Handle(ctx context.Context, logger
var errs []error
for _, pkgs := range repoToPackages {
for _, pkg := range pkgs {
if err := h.indexEnqueuer.QueueIndexesForPackage(ctx, pkg); err != nil {
errs = append(errs, errors.Wrap(err, "enqueuer.QueueIndexesForPackage"))
if err := h.indexEnqueuer.QueueAutoIndexJobsForPackage(ctx, pkg); err != nil {
errs = append(errs, errors.Wrap(err, "enqueuer.QueueAutoIndexJobsForPackage"))
}
}
}

View File

@ -77,11 +77,11 @@ func TestDependencyIndexingSchedulerHandler(t *testing.T) {
t.Errorf("unexpected number of calls to extsvcStore.List. want=%d have=%d", 0, len(mockExtSvcStore.ListFunc.History()))
}
if len(indexEnqueuer.QueueIndexesForPackageFunc.History()) != 6 {
t.Errorf("unexpected number of calls to QueueIndexesForPackage. want=%d have=%d", 6, len(indexEnqueuer.QueueIndexesForPackageFunc.History()))
if len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()) != 6 {
t.Errorf("unexpected number of calls to QueueAutoIndexJobsForPackage. want=%d have=%d", 6, len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()))
} else {
var packages []dependencies.MinimialVersionedPackageRepo
for _, call := range indexEnqueuer.QueueIndexesForPackageFunc.History() {
for _, call := range indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History() {
packages = append(packages, call.Arg1)
}
sort.Slice(packages, func(i, j int) bool {
@ -169,8 +169,8 @@ func TestDependencyIndexingSchedulerHandlerRequeueNotCloned(t *testing.T) {
t.Errorf("unexpected number of calls to extsvcStore.List. want=%d have=%d", 0, len(mockExtSvcStore.ListFunc.History()))
}
if len(indexEnqueuer.QueueIndexesForPackageFunc.History()) != 0 {
t.Errorf("unexpected number of calls to QueueIndexesForPackage. want=%d have=%d", 0, len(indexEnqueuer.QueueIndexesForPackageFunc.History()))
if len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()) != 0 {
t.Errorf("unexpected number of calls to QueueAutoIndexJobsForPackage. want=%d have=%d", 0, len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()))
}
}
@ -206,8 +206,8 @@ func TestDependencyIndexingSchedulerHandlerShouldSkipRepository(t *testing.T) {
t.Fatalf("unexpected error performing update: %s", err)
}
if len(indexEnqueuer.QueueIndexesForPackageFunc.History()) != 0 {
t.Errorf("unexpected number of calls to QueueIndexesForPackage. want=%d have=%d", 0, len(indexEnqueuer.QueueIndexesForPackageFunc.History()))
if len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()) != 0 {
t.Errorf("unexpected number of calls to QueueAutoIndexJobsForPackage. want=%d have=%d", 0, len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()))
}
}
@ -254,7 +254,7 @@ func TestDependencyIndexingSchedulerHandlerNoExtsvc(t *testing.T) {
t.Fatalf("unexpected error performing update: %s", err)
}
if len(indexEnqueuer.QueueIndexesForPackageFunc.History()) != 0 {
t.Errorf("unexpected number of calls to QueueIndexesForPackage. want=%d have=%d", 0, len(indexEnqueuer.QueueIndexesForPackageFunc.History()))
if len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()) != 0 {
t.Errorf("unexpected number of calls to QueueAutoIndexJobsForPackage. want=%d have=%d", 0, len(indexEnqueuer.QueueAutoIndexJobsForPackageFunc.History()))
}
}

View File

@ -13,7 +13,7 @@ import (
// NewIndexResetter returns a background routine that periodically resets index
// records that are marked as being processed but are no longer being processed
// by a worker.
func NewIndexResetter(logger log.Logger, interval time.Duration, store dbworkerstore.Store[uploadsshared.Index], metrics *resetterMetrics) *dbworker.Resetter[uploadsshared.Index] {
func NewIndexResetter(logger log.Logger, interval time.Duration, store dbworkerstore.Store[uploadsshared.AutoIndexJob], metrics *resetterMetrics) *dbworker.Resetter[uploadsshared.AutoIndexJob] {
return dbworker.NewResetter(logger.Scoped("indexResetter"), store, dbworker.ResetterOptions{
Name: "precise_code_intel_index_worker_resetter",
Interval: interval,

View File

@ -770,24 +770,25 @@ func (c GitserverRepoStoreGetByNamesFuncCall) Results() []interface{} {
// github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing/internal/background/dependencies)
// used for unit testing.
type MockIndexEnqueuer struct {
// QueueIndexesFunc is an instance of a mock function object controlling
// the behavior of the method QueueIndexes.
QueueIndexesFunc *IndexEnqueuerQueueIndexesFunc
// QueueIndexesForPackageFunc is an instance of a mock function object
// controlling the behavior of the method QueueIndexesForPackage.
QueueIndexesForPackageFunc *IndexEnqueuerQueueIndexesForPackageFunc
// QueueAutoIndexJobsFunc is an instance of a mock function object
// controlling the behavior of the method QueueAutoIndexJobs.
QueueAutoIndexJobsFunc *IndexEnqueuerQueueAutoIndexJobsFunc
// QueueAutoIndexJobsForPackageFunc is an instance of a mock function
// object controlling the behavior of the method
// QueueAutoIndexJobsForPackage.
QueueAutoIndexJobsForPackageFunc *IndexEnqueuerQueueAutoIndexJobsForPackageFunc
}
// NewMockIndexEnqueuer creates a new mock of the IndexEnqueuer interface.
// All methods return zero values for all results, unless overwritten.
func NewMockIndexEnqueuer() *MockIndexEnqueuer {
return &MockIndexEnqueuer{
QueueIndexesFunc: &IndexEnqueuerQueueIndexesFunc{
defaultHook: func(context.Context, int, string, string, bool, bool) (r0 []shared1.Index, r1 error) {
QueueAutoIndexJobsFunc: &IndexEnqueuerQueueAutoIndexJobsFunc{
defaultHook: func(context.Context, int, string, string, bool, bool) (r0 []shared1.AutoIndexJob, r1 error) {
return
},
},
QueueIndexesForPackageFunc: &IndexEnqueuerQueueIndexesForPackageFunc{
QueueAutoIndexJobsForPackageFunc: &IndexEnqueuerQueueAutoIndexJobsForPackageFunc{
defaultHook: func(context.Context, shared.MinimialVersionedPackageRepo) (r0 error) {
return
},
@ -799,14 +800,14 @@ func NewMockIndexEnqueuer() *MockIndexEnqueuer {
// interface. All methods panic on invocation, unless overwritten.
func NewStrictMockIndexEnqueuer() *MockIndexEnqueuer {
return &MockIndexEnqueuer{
QueueIndexesFunc: &IndexEnqueuerQueueIndexesFunc{
defaultHook: func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error) {
panic("unexpected invocation of MockIndexEnqueuer.QueueIndexes")
QueueAutoIndexJobsFunc: &IndexEnqueuerQueueAutoIndexJobsFunc{
defaultHook: func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error) {
panic("unexpected invocation of MockIndexEnqueuer.QueueAutoIndexJobs")
},
},
QueueIndexesForPackageFunc: &IndexEnqueuerQueueIndexesForPackageFunc{
QueueAutoIndexJobsForPackageFunc: &IndexEnqueuerQueueAutoIndexJobsForPackageFunc{
defaultHook: func(context.Context, shared.MinimialVersionedPackageRepo) error {
panic("unexpected invocation of MockIndexEnqueuer.QueueIndexesForPackage")
panic("unexpected invocation of MockIndexEnqueuer.QueueAutoIndexJobsForPackage")
},
},
}
@ -817,44 +818,46 @@ func NewStrictMockIndexEnqueuer() *MockIndexEnqueuer {
// overwritten.
func NewMockIndexEnqueuerFrom(i IndexEnqueuer) *MockIndexEnqueuer {
return &MockIndexEnqueuer{
QueueIndexesFunc: &IndexEnqueuerQueueIndexesFunc{
defaultHook: i.QueueIndexes,
QueueAutoIndexJobsFunc: &IndexEnqueuerQueueAutoIndexJobsFunc{
defaultHook: i.QueueAutoIndexJobs,
},
QueueIndexesForPackageFunc: &IndexEnqueuerQueueIndexesForPackageFunc{
defaultHook: i.QueueIndexesForPackage,
QueueAutoIndexJobsForPackageFunc: &IndexEnqueuerQueueAutoIndexJobsForPackageFunc{
defaultHook: i.QueueAutoIndexJobsForPackage,
},
}
}
// IndexEnqueuerQueueIndexesFunc describes the behavior when the
// QueueIndexes method of the parent MockIndexEnqueuer instance is invoked.
type IndexEnqueuerQueueIndexesFunc struct {
defaultHook func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error)
hooks []func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error)
history []IndexEnqueuerQueueIndexesFuncCall
// IndexEnqueuerQueueAutoIndexJobsFunc describes the behavior when the
// QueueAutoIndexJobs method of the parent MockIndexEnqueuer instance is
// invoked.
type IndexEnqueuerQueueAutoIndexJobsFunc struct {
defaultHook func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error)
hooks []func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error)
history []IndexEnqueuerQueueAutoIndexJobsFuncCall
mutex sync.Mutex
}
// QueueIndexes delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockIndexEnqueuer) QueueIndexes(v0 context.Context, v1 int, v2 string, v3 string, v4 bool, v5 bool) ([]shared1.Index, error) {
r0, r1 := m.QueueIndexesFunc.nextHook()(v0, v1, v2, v3, v4, v5)
m.QueueIndexesFunc.appendCall(IndexEnqueuerQueueIndexesFuncCall{v0, v1, v2, v3, v4, v5, r0, r1})
// QueueAutoIndexJobs delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockIndexEnqueuer) QueueAutoIndexJobs(v0 context.Context, v1 int, v2 string, v3 string, v4 bool, v5 bool) ([]shared1.AutoIndexJob, error) {
r0, r1 := m.QueueAutoIndexJobsFunc.nextHook()(v0, v1, v2, v3, v4, v5)
m.QueueAutoIndexJobsFunc.appendCall(IndexEnqueuerQueueAutoIndexJobsFuncCall{v0, v1, v2, v3, v4, v5, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the QueueIndexes method
// of the parent MockIndexEnqueuer instance is invoked and the hook queue is
// empty.
func (f *IndexEnqueuerQueueIndexesFunc) SetDefaultHook(hook func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error)) {
// SetDefaultHook sets function that is called when the QueueAutoIndexJobs
// method of the parent MockIndexEnqueuer instance is invoked and the hook
// queue is empty.
func (f *IndexEnqueuerQueueAutoIndexJobsFunc) SetDefaultHook(hook func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// QueueIndexes method of the parent MockIndexEnqueuer instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *IndexEnqueuerQueueIndexesFunc) PushHook(hook func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error)) {
// QueueAutoIndexJobs method of the parent MockIndexEnqueuer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *IndexEnqueuerQueueAutoIndexJobsFunc) PushHook(hook func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -862,20 +865,20 @@ func (f *IndexEnqueuerQueueIndexesFunc) PushHook(hook func(context.Context, int,
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *IndexEnqueuerQueueIndexesFunc) SetDefaultReturn(r0 []shared1.Index, r1 error) {
f.SetDefaultHook(func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error) {
func (f *IndexEnqueuerQueueAutoIndexJobsFunc) SetDefaultReturn(r0 []shared1.AutoIndexJob, r1 error) {
f.SetDefaultHook(func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *IndexEnqueuerQueueIndexesFunc) PushReturn(r0 []shared1.Index, r1 error) {
f.PushHook(func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error) {
func (f *IndexEnqueuerQueueAutoIndexJobsFunc) PushReturn(r0 []shared1.AutoIndexJob, r1 error) {
f.PushHook(func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error) {
return r0, r1
})
}
func (f *IndexEnqueuerQueueIndexesFunc) nextHook() func(context.Context, int, string, string, bool, bool) ([]shared1.Index, error) {
func (f *IndexEnqueuerQueueAutoIndexJobsFunc) nextHook() func(context.Context, int, string, string, bool, bool) ([]shared1.AutoIndexJob, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -888,26 +891,27 @@ func (f *IndexEnqueuerQueueIndexesFunc) nextHook() func(context.Context, int, st
return hook
}
func (f *IndexEnqueuerQueueIndexesFunc) appendCall(r0 IndexEnqueuerQueueIndexesFuncCall) {
func (f *IndexEnqueuerQueueAutoIndexJobsFunc) appendCall(r0 IndexEnqueuerQueueAutoIndexJobsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of IndexEnqueuerQueueIndexesFuncCall objects
// describing the invocations of this function.
func (f *IndexEnqueuerQueueIndexesFunc) History() []IndexEnqueuerQueueIndexesFuncCall {
// History returns a sequence of IndexEnqueuerQueueAutoIndexJobsFuncCall
// objects describing the invocations of this function.
func (f *IndexEnqueuerQueueAutoIndexJobsFunc) History() []IndexEnqueuerQueueAutoIndexJobsFuncCall {
f.mutex.Lock()
history := make([]IndexEnqueuerQueueIndexesFuncCall, len(f.history))
history := make([]IndexEnqueuerQueueAutoIndexJobsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// IndexEnqueuerQueueIndexesFuncCall is an object that describes an
// invocation of method QueueIndexes on an instance of MockIndexEnqueuer.
type IndexEnqueuerQueueIndexesFuncCall struct {
// IndexEnqueuerQueueAutoIndexJobsFuncCall is an object that describes an
// invocation of method QueueAutoIndexJobs on an instance of
// MockIndexEnqueuer.
type IndexEnqueuerQueueAutoIndexJobsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
@ -928,7 +932,7 @@ type IndexEnqueuerQueueIndexesFuncCall struct {
Arg5 bool
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []shared1.Index
Result0 []shared1.AutoIndexJob
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
@ -936,47 +940,47 @@ type IndexEnqueuerQueueIndexesFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c IndexEnqueuerQueueIndexesFuncCall) Args() []interface{} {
func (c IndexEnqueuerQueueAutoIndexJobsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3, c.Arg4, c.Arg5}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c IndexEnqueuerQueueIndexesFuncCall) Results() []interface{} {
func (c IndexEnqueuerQueueAutoIndexJobsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// IndexEnqueuerQueueIndexesForPackageFunc describes the behavior when the
// QueueIndexesForPackage method of the parent MockIndexEnqueuer instance is
// invoked.
type IndexEnqueuerQueueIndexesForPackageFunc struct {
// IndexEnqueuerQueueAutoIndexJobsForPackageFunc describes the behavior when
// the QueueAutoIndexJobsForPackage method of the parent MockIndexEnqueuer
// instance is invoked.
type IndexEnqueuerQueueAutoIndexJobsForPackageFunc struct {
defaultHook func(context.Context, shared.MinimialVersionedPackageRepo) error
hooks []func(context.Context, shared.MinimialVersionedPackageRepo) error
history []IndexEnqueuerQueueIndexesForPackageFuncCall
history []IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall
mutex sync.Mutex
}
// QueueIndexesForPackage delegates to the next hook function in the queue
// and stores the parameter and result values of this invocation.
func (m *MockIndexEnqueuer) QueueIndexesForPackage(v0 context.Context, v1 shared.MinimialVersionedPackageRepo) error {
r0 := m.QueueIndexesForPackageFunc.nextHook()(v0, v1)
m.QueueIndexesForPackageFunc.appendCall(IndexEnqueuerQueueIndexesForPackageFuncCall{v0, v1, r0})
// QueueAutoIndexJobsForPackage delegates to the next hook function in the
// queue and stores the parameter and result values of this invocation.
func (m *MockIndexEnqueuer) QueueAutoIndexJobsForPackage(v0 context.Context, v1 shared.MinimialVersionedPackageRepo) error {
r0 := m.QueueAutoIndexJobsForPackageFunc.nextHook()(v0, v1)
m.QueueAutoIndexJobsForPackageFunc.appendCall(IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall{v0, v1, r0})
return r0
}
// SetDefaultHook sets function that is called when the
// QueueIndexesForPackage method of the parent MockIndexEnqueuer instance is
// invoked and the hook queue is empty.
func (f *IndexEnqueuerQueueIndexesForPackageFunc) SetDefaultHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
// QueueAutoIndexJobsForPackage method of the parent MockIndexEnqueuer
// instance is invoked and the hook queue is empty.
func (f *IndexEnqueuerQueueAutoIndexJobsForPackageFunc) SetDefaultHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// QueueIndexesForPackage method of the parent MockIndexEnqueuer instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *IndexEnqueuerQueueIndexesForPackageFunc) PushHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
// QueueAutoIndexJobsForPackage method of the parent MockIndexEnqueuer
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *IndexEnqueuerQueueAutoIndexJobsForPackageFunc) PushHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -984,20 +988,20 @@ func (f *IndexEnqueuerQueueIndexesForPackageFunc) PushHook(hook func(context.Con
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *IndexEnqueuerQueueIndexesForPackageFunc) SetDefaultReturn(r0 error) {
func (f *IndexEnqueuerQueueAutoIndexJobsForPackageFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(context.Context, shared.MinimialVersionedPackageRepo) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *IndexEnqueuerQueueIndexesForPackageFunc) PushReturn(r0 error) {
func (f *IndexEnqueuerQueueAutoIndexJobsForPackageFunc) PushReturn(r0 error) {
f.PushHook(func(context.Context, shared.MinimialVersionedPackageRepo) error {
return r0
})
}
func (f *IndexEnqueuerQueueIndexesForPackageFunc) nextHook() func(context.Context, shared.MinimialVersionedPackageRepo) error {
func (f *IndexEnqueuerQueueAutoIndexJobsForPackageFunc) nextHook() func(context.Context, shared.MinimialVersionedPackageRepo) error {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -1010,27 +1014,28 @@ func (f *IndexEnqueuerQueueIndexesForPackageFunc) nextHook() func(context.Contex
return hook
}
func (f *IndexEnqueuerQueueIndexesForPackageFunc) appendCall(r0 IndexEnqueuerQueueIndexesForPackageFuncCall) {
func (f *IndexEnqueuerQueueAutoIndexJobsForPackageFunc) appendCall(r0 IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of IndexEnqueuerQueueIndexesForPackageFuncCall
// objects describing the invocations of this function.
func (f *IndexEnqueuerQueueIndexesForPackageFunc) History() []IndexEnqueuerQueueIndexesForPackageFuncCall {
// History returns a sequence of
// IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall objects describing the
// invocations of this function.
func (f *IndexEnqueuerQueueAutoIndexJobsForPackageFunc) History() []IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall {
f.mutex.Lock()
history := make([]IndexEnqueuerQueueIndexesForPackageFuncCall, len(f.history))
history := make([]IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// IndexEnqueuerQueueIndexesForPackageFuncCall is an object that describes
// an invocation of method QueueIndexesForPackage on an instance of
// MockIndexEnqueuer.
type IndexEnqueuerQueueIndexesForPackageFuncCall struct {
// IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall is an object that
// describes an invocation of method QueueAutoIndexJobsForPackage on an
// instance of MockIndexEnqueuer.
type IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
@ -1044,13 +1049,13 @@ type IndexEnqueuerQueueIndexesForPackageFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c IndexEnqueuerQueueIndexesForPackageFuncCall) Args() []interface{} {
func (c IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c IndexEnqueuerQueueIndexesForPackageFuncCall) Results() []interface{} {
func (c IndexEnqueuerQueueAutoIndexJobsForPackageFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
@ -1779,9 +1784,9 @@ type MockStore struct {
// object controlling the behavior of the method
// InsertDependencyIndexingJob.
InsertDependencyIndexingJobFunc *StoreInsertDependencyIndexingJobFunc
// InsertIndexesFunc is an instance of a mock function object
// controlling the behavior of the method InsertIndexes.
InsertIndexesFunc *StoreInsertIndexesFunc
// InsertJobsFunc is an instance of a mock function object controlling
// the behavior of the method InsertJobs.
InsertJobsFunc *StoreInsertJobsFunc
// IsQueuedFunc is an instance of a mock function object controlling the
// behavior of the method IsQueued.
IsQueuedFunc *StoreIsQueuedFunc
@ -1856,8 +1861,8 @@ func NewMockStore() *MockStore {
return
},
},
InsertIndexesFunc: &StoreInsertIndexesFunc{
defaultHook: func(context.Context, []shared1.Index) (r0 []shared1.Index, r1 error) {
InsertJobsFunc: &StoreInsertJobsFunc{
defaultHook: func(context.Context, []shared1.AutoIndexJob) (r0 []shared1.AutoIndexJob, r1 error) {
return
},
},
@ -1958,9 +1963,9 @@ func NewStrictMockStore() *MockStore {
panic("unexpected invocation of MockStore.InsertDependencyIndexingJob")
},
},
InsertIndexesFunc: &StoreInsertIndexesFunc{
defaultHook: func(context.Context, []shared1.Index) ([]shared1.Index, error) {
panic("unexpected invocation of MockStore.InsertIndexes")
InsertJobsFunc: &StoreInsertJobsFunc{
defaultHook: func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
panic("unexpected invocation of MockStore.InsertJobs")
},
},
IsQueuedFunc: &StoreIsQueuedFunc{
@ -2050,8 +2055,8 @@ func NewMockStoreFrom(i store.Store) *MockStore {
InsertDependencyIndexingJobFunc: &StoreInsertDependencyIndexingJobFunc{
defaultHook: i.InsertDependencyIndexingJob,
},
InsertIndexesFunc: &StoreInsertIndexesFunc{
defaultHook: i.InsertIndexes,
InsertJobsFunc: &StoreInsertJobsFunc{
defaultHook: i.InsertJobs,
},
IsQueuedFunc: &StoreIsQueuedFunc{
defaultHook: i.IsQueued,
@ -2652,34 +2657,34 @@ func (c StoreInsertDependencyIndexingJobFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// StoreInsertIndexesFunc describes the behavior when the InsertIndexes
// method of the parent MockStore instance is invoked.
type StoreInsertIndexesFunc struct {
defaultHook func(context.Context, []shared1.Index) ([]shared1.Index, error)
hooks []func(context.Context, []shared1.Index) ([]shared1.Index, error)
history []StoreInsertIndexesFuncCall
// StoreInsertJobsFunc describes the behavior when the InsertJobs method of
// the parent MockStore instance is invoked.
type StoreInsertJobsFunc struct {
defaultHook func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)
hooks []func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)
history []StoreInsertJobsFuncCall
mutex sync.Mutex
}
// InsertIndexes delegates to the next hook function in the queue and stores
// InsertJobs delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockStore) InsertIndexes(v0 context.Context, v1 []shared1.Index) ([]shared1.Index, error) {
r0, r1 := m.InsertIndexesFunc.nextHook()(v0, v1)
m.InsertIndexesFunc.appendCall(StoreInsertIndexesFuncCall{v0, v1, r0, r1})
func (m *MockStore) InsertJobs(v0 context.Context, v1 []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
r0, r1 := m.InsertJobsFunc.nextHook()(v0, v1)
m.InsertJobsFunc.appendCall(StoreInsertJobsFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the InsertIndexes method
// of the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreInsertIndexesFunc) SetDefaultHook(hook func(context.Context, []shared1.Index) ([]shared1.Index, error)) {
// SetDefaultHook sets function that is called when the InsertJobs method of
// the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreInsertJobsFunc) SetDefaultHook(hook func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// InsertIndexes method of the parent MockStore instance invokes the hook at
// InsertJobs method of the parent MockStore instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *StoreInsertIndexesFunc) PushHook(hook func(context.Context, []shared1.Index) ([]shared1.Index, error)) {
func (f *StoreInsertJobsFunc) PushHook(hook func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -2687,20 +2692,20 @@ func (f *StoreInsertIndexesFunc) PushHook(hook func(context.Context, []shared1.I
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *StoreInsertIndexesFunc) SetDefaultReturn(r0 []shared1.Index, r1 error) {
f.SetDefaultHook(func(context.Context, []shared1.Index) ([]shared1.Index, error) {
func (f *StoreInsertJobsFunc) SetDefaultReturn(r0 []shared1.AutoIndexJob, r1 error) {
f.SetDefaultHook(func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *StoreInsertIndexesFunc) PushReturn(r0 []shared1.Index, r1 error) {
f.PushHook(func(context.Context, []shared1.Index) ([]shared1.Index, error) {
func (f *StoreInsertJobsFunc) PushReturn(r0 []shared1.AutoIndexJob, r1 error) {
f.PushHook(func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
return r0, r1
})
}
func (f *StoreInsertIndexesFunc) nextHook() func(context.Context, []shared1.Index) ([]shared1.Index, error) {
func (f *StoreInsertJobsFunc) nextHook() func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -2713,35 +2718,35 @@ func (f *StoreInsertIndexesFunc) nextHook() func(context.Context, []shared1.Inde
return hook
}
func (f *StoreInsertIndexesFunc) appendCall(r0 StoreInsertIndexesFuncCall) {
func (f *StoreInsertJobsFunc) appendCall(r0 StoreInsertJobsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of StoreInsertIndexesFuncCall objects
// describing the invocations of this function.
func (f *StoreInsertIndexesFunc) History() []StoreInsertIndexesFuncCall {
// History returns a sequence of StoreInsertJobsFuncCall objects describing
// the invocations of this function.
func (f *StoreInsertJobsFunc) History() []StoreInsertJobsFuncCall {
f.mutex.Lock()
history := make([]StoreInsertIndexesFuncCall, len(f.history))
history := make([]StoreInsertJobsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// StoreInsertIndexesFuncCall is an object that describes an invocation of
// method InsertIndexes on an instance of MockStore.
type StoreInsertIndexesFuncCall struct {
// StoreInsertJobsFuncCall is an object that describes an invocation of
// method InsertJobs on an instance of MockStore.
type StoreInsertJobsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 []shared1.Index
Arg1 []shared1.AutoIndexJob
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []shared1.Index
Result0 []shared1.AutoIndexJob
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
@ -2749,13 +2754,13 @@ type StoreInsertIndexesFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c StoreInsertIndexesFuncCall) Args() []interface{} {
func (c StoreInsertJobsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c StoreInsertIndexesFuncCall) Results() []interface{} {
func (c StoreInsertJobsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}

View File

@ -23,12 +23,12 @@ const stalledIndexMaxAge = time.Second * 25
// "queued" on its next reset.
const indexMaxNumResets = 3
var IndexWorkerStoreOptions = dbworkerstore.Options[uploadsshared.Index]{
var IndexWorkerStoreOptions = dbworkerstore.Options[uploadsshared.AutoIndexJob]{
Name: "codeintel_index",
TableName: "lsif_indexes",
ViewName: "lsif_indexes_with_repository_name u",
ColumnExpressions: indexColumnsWithNullRank,
Scan: dbworkerstore.BuildWorkerScan(scanIndex),
Scan: dbworkerstore.BuildWorkerScan(scanJob),
OrderByExpression: sqlf.Sprintf("(u.enqueuer_user_id > 0) DESC, u.queued_at, u.id"),
StalledMaxAge: stalledIndexMaxAge,
MaxNumResets: indexMaxNumResets,
@ -61,40 +61,40 @@ var indexColumnsWithNullRank = []*sqlf.Query{
sqlf.Sprintf(`u.enqueuer_user_id`),
}
func scanIndex(s dbutil.Scanner) (index uploadsshared.Index, err error) {
func scanJob(s dbutil.Scanner) (job uploadsshared.AutoIndexJob, err error) {
var executionLogs []executor.ExecutionLogEntry
if err := s.Scan(
&index.ID,
&index.Commit,
&index.QueuedAt,
&index.State,
&index.FailureMessage,
&index.StartedAt,
&index.FinishedAt,
&index.ProcessAfter,
&index.NumResets,
&index.NumFailures,
&index.RepositoryID,
&index.RepositoryName,
pq.Array(&index.DockerSteps),
&index.Root,
&index.Indexer,
pq.Array(&index.IndexerArgs),
&index.Outfile,
&job.ID,
&job.Commit,
&job.QueuedAt,
&job.State,
&job.FailureMessage,
&job.StartedAt,
&job.FinishedAt,
&job.ProcessAfter,
&job.NumResets,
&job.NumFailures,
&job.RepositoryID,
&job.RepositoryName,
pq.Array(&job.DockerSteps),
&job.Root,
&job.Indexer,
pq.Array(&job.IndexerArgs),
&job.Outfile,
pq.Array(&executionLogs),
&index.Rank,
pq.Array(&index.LocalSteps),
&index.AssociatedUploadID,
&index.ShouldReindex,
pq.Array(&index.RequestedEnvVars),
&index.EnqueuerUserID,
&job.Rank,
pq.Array(&job.LocalSteps),
&job.AssociatedUploadID,
&job.ShouldReindex,
pq.Array(&job.RequestedEnvVars),
&job.EnqueuerUserID,
); err != nil {
return index, err
return job, err
}
index.ExecutionLogs = append(index.ExecutionLogs, executionLogs...)
job.ExecutionLogs = append(job.ExecutionLogs, executionLogs...)
return index, nil
return job, nil
}
// stalledDependencySyncingJobMaxAge is the maximum allowable duration between updating

View File

@ -20,6 +20,6 @@ type PoliciesService interface {
}
type IndexEnqueuer interface {
QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) (_ []uploadsshared.Index, err error)
QueueIndexesForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) (err error)
QueueAutoIndexJobs(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) (_ []uploadsshared.AutoIndexJob, err error)
QueueAutoIndexJobsForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) (err error)
}

View File

@ -190,12 +190,12 @@ func (b indexSchedulerJob) handleRepository(ctx context.Context, repositoryID, p
}
// Attempt to queue an index if one does not exist for each of the matching commits
if _, err := b.indexEnqueuer.QueueIndexes(ctx, repositoryID, commit, "", false, false); err != nil {
if _, err := b.indexEnqueuer.QueueAutoIndexJobs(ctx, repositoryID, commit, "", false, false); err != nil {
if errors.HasType[*gitdomain.RevisionNotFoundError](err) {
continue
}
return errors.Wrap(err, "indexEnqueuer.QueueIndexes")
return errors.Wrap(err, "indexEnqueuer.QueueAutoIndexJobs")
}
}
@ -221,7 +221,7 @@ func NewOnDemandScheduler(s store.Store, indexEnqueuer IndexEnqueuer, config *Co
ids := make([]int, 0, len(repoRevs))
for _, repoRev := range repoRevs {
if _, err := indexEnqueuer.QueueIndexes(ctx, repoRev.RepositoryID, repoRev.Rev, "", false, false); err != nil {
if _, err := indexEnqueuer.QueueAutoIndexJobs(ctx, repoRev.RepositoryID, repoRev.Rev, "", false, false); err != nil {
return err
}

View File

@ -9,5 +9,5 @@ import (
type UploadService interface {
GetRecentUploadsSummary(ctx context.Context, repositoryID int) (upload []shared.UploadsWithRepositoryNamespace, err error)
GetRecentIndexesSummary(ctx context.Context, repositoryID int) ([]uploadsshared.IndexesWithRepositoryNamespace, error)
GetRecentAutoIndexJobsSummary(ctx context.Context, repositoryID int) ([]uploadsshared.GroupedAutoIndexJobs, error)
}

View File

@ -39,7 +39,7 @@ func NewSummaryBuilder(
if err != nil {
return err
}
recentIndexes, err := uploadSvc.GetRecentIndexesSummary(ctx, repositoryWithCount.RepositoryID)
recentIndexes, err := uploadSvc.GetRecentAutoIndexJobsSummary(ctx, repositoryWithCount.RepositoryID)
if err != nil {
return err
}

View File

@ -42,7 +42,7 @@ func NewIndexEnqueuer(
}
}
// QueueIndexes enqueues a set of index jobs for the following repository and commit. If a non-empty
// QueueAutoIndexJobs enqueues a set of index jobs for the following repository and commit. If a non-empty
// configuration is given, it will be used to determine the set of jobs to enqueue. Otherwise, it will
// the configuration will be determined based on the regular index scheduling rules: first read any
// in-repo configuration (e.g., sourcegraph.yaml), then look for any existing in-database configuration,
@ -52,7 +52,7 @@ func NewIndexEnqueuer(
// If the force flag is false, then the presence of an upload or index record for this given repository and commit
// will cause this method to no-op. Note that this is NOT a guarantee that there will never be any duplicate records
// when the flag is false.
func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) (_ []uploadsshared.Index, err error) {
func (s *IndexEnqueuer) QueueAutoIndexJobs(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) (_ []uploadsshared.AutoIndexJob, err error) {
ctx, trace, endObservation := s.operations.queueIndex.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
attribute.String("rev", rev),
@ -71,12 +71,12 @@ func (s *IndexEnqueuer) QueueIndexes(ctx context.Context, repositoryID int, rev,
commit := string(commitID)
trace.AddEvent("ResolveRevision", attribute.String("commit", commit))
return s.queueIndexForRepositoryAndCommit(ctx, repositoryID, commit, configuration, force, bypassLimit)
return s.queueJobsForRepoAndCommit(ctx, repositoryID, commit, configuration, force, bypassLimit)
}
// QueueIndexesForPackage enqueues index jobs for a dependency of a recently-processed precise code
// QueueAutoIndexJobsForPackage enqueues index jobs for a dependency of a recently-processed precise code
// intelligence index.
func (s *IndexEnqueuer) QueueIndexesForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) (err error) {
func (s *IndexEnqueuer) QueueAutoIndexJobsForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) (err error) {
ctx, trace, endObservation := s.operations.queueIndexForPackage.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.String("scheme", pkg.Scheme),
attribute.String("name", string(pkg.Name)),
@ -107,16 +107,16 @@ func (s *IndexEnqueuer) QueueIndexesForPackage(ctx context.Context, pkg dependen
return errors.Wrap(err, "gitserverClient.ResolveRevision")
}
_, err = s.queueIndexForRepositoryAndCommit(ctx, repoID, string(commit), "", false, false)
_, err = s.queueJobsForRepoAndCommit(ctx, repoID, string(commit), "", false, false)
return err
}
// queueIndexForRepositoryAndCommit determines a set of index jobs to enqueue for the given repository and commit.
// queueJobsForRepoAndCommit determines a set of index jobs to enqueue for the given repository and commit.
//
// If the force flag is false, then the presence of an upload or index record for this given repository and commit
// will cause this method to no-op. Note that this is NOT a guarantee that there will never be any duplicate records
// when the flag is false.
func (s *IndexEnqueuer) queueIndexForRepositoryAndCommit(ctx context.Context, repositoryID int, commit, configuration string, force, bypassLimit bool) ([]uploadsshared.Index, error) {
func (s *IndexEnqueuer) queueJobsForRepoAndCommit(ctx context.Context, repositoryID int, commit, configuration string, force, bypassLimit bool) ([]uploadsshared.AutoIndexJob, error) {
if !force {
isQueued, err := s.store.IsQueued(ctx, repositoryID, commit)
if err != nil {
@ -127,7 +127,7 @@ func (s *IndexEnqueuer) queueIndexForRepositoryAndCommit(ctx context.Context, re
}
}
indexes, err := s.jobSelector.GetIndexRecords(ctx, repositoryID, commit, configuration, bypassLimit)
indexes, err := s.jobSelector.GetJobs(ctx, repositoryID, commit, configuration, bypassLimit)
if err != nil {
return nil, err
}
@ -135,19 +135,19 @@ func (s *IndexEnqueuer) queueIndexForRepositoryAndCommit(ctx context.Context, re
return nil, nil
}
indexesToInsert := indexes
jobsToInsert := indexes
if !force {
indexesToInsert = []uploadsshared.Index{}
jobsToInsert = []uploadsshared.AutoIndexJob{}
for _, index := range indexes {
isQueued, err := s.store.IsQueuedRootIndexer(ctx, repositoryID, commit, index.Root, index.Indexer)
if err != nil {
return nil, errors.Wrap(err, "dbstore.IsQueuedRootIndexer")
}
if !isQueued {
indexesToInsert = append(indexesToInsert, index)
jobsToInsert = append(jobsToInsert, index)
}
}
}
return s.store.InsertIndexes(ctx, indexesToInsert)
return s.store.InsertJobs(ctx, jobsToInsert)
}

View File

@ -9,13 +9,13 @@ import (
)
// IndexJobFromTable decodes a single Lua table value into an index job instance.
func IndexJobFromTable(value lua.LValue) (config.IndexJob, error) {
func IndexJobFromTable(value lua.LValue) (config.AutoIndexJobSpec, error) {
table, ok := value.(*lua.LTable)
if !ok {
return config.IndexJob{}, util.NewTypeError("table", value)
return config.AutoIndexJobSpec{}, util.NewTypeError("table", value)
}
job := config.IndexJob{}
job := config.AutoIndexJobSpec{}
if err := util.DecodeTable(table, map[string]func(lua.LValue) error{
"steps": setDockerSteps(&job.Steps),
"local_steps": util.SetStrings(&job.LocalSteps),
@ -25,11 +25,11 @@ func IndexJobFromTable(value lua.LValue) (config.IndexJob, error) {
"outfile": util.SetString(&job.Outfile),
"requested_envvars": util.SetStrings(&job.RequestedEnvVars),
}); err != nil {
return config.IndexJob{}, err
return config.AutoIndexJobSpec{}, err
}
if job.Indexer == "" {
return config.IndexJob{}, errors.Newf("no indexer supplied")
return config.AutoIndexJobSpec{}, errors.Newf("no indexer supplied")
}
return job, nil

View File

@ -45,7 +45,7 @@ type invocationContext struct {
type invocationFunctionTable struct {
linearize func(recognizer *luatypes.Recognizer) []*luatypes.Recognizer
callback func(recognizer *luatypes.Recognizer) *baselua.LFunction
scanLuaValue func(value baselua.LValue) ([]config.IndexJob, error)
scanLuaValue func(value baselua.LValue) ([]config.AutoIndexJobSpec, error)
}
type LimitError struct {
@ -88,7 +88,7 @@ func (s *Service) InferIndexJobs(ctx context.Context, repo api.RepoName, commit,
functionTable := invocationFunctionTable{
linearize: luatypes.LinearizeGenerator,
callback: func(recognizer *luatypes.Recognizer) *baselua.LFunction { return recognizer.Generator() },
scanLuaValue: func(value baselua.LValue) ([]config.IndexJob, error) {
scanLuaValue: func(value baselua.LValue) ([]config.AutoIndexJobSpec, error) {
return util.MapSliceOrSingleton(value, luatypes.IndexJobFromTable)
},
}
@ -115,7 +115,7 @@ func (s *Service) inferIndexJobs(
commit string,
overrideScript string,
invocationContextMethods invocationFunctionTable,
) (_ []config.IndexJob, logs string, _ error) {
) (_ []config.AutoIndexJobSpec, logs string, _ error) {
sandbox, err := s.createSandbox(ctx)
if err != nil {
return nil, "", err
@ -227,7 +227,7 @@ func (s *Service) invokeRecognizers(
ctx context.Context,
invocationContext invocationContext,
recognizers []*luatypes.Recognizer,
) (_ []config.IndexJob, err error) {
) (_ []config.AutoIndexJobSpec, err error) {
ctx, _, endObservation := s.operations.invokeRecognizers.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})
@ -410,7 +410,7 @@ func (s *Service) invokeRecognizerChains(
recognizers []*luatypes.Recognizer,
paths []string,
contentsByPath map[string]string,
) (jobs []config.IndexJob, _ error) {
) (jobs []config.AutoIndexJobSpec, _ error) {
registrationAPI := &registrationAPI{}
// Invoke the recognizers and gather the resulting jobs or hints
@ -456,7 +456,7 @@ func (s *Service) invokeRecognizerChainUntilResults(
registrationAPI *registrationAPI,
paths []string,
contentsByPath map[string]string,
) ([]config.IndexJob, error) {
) ([]config.AutoIndexJobSpec, error) {
for _, recognizer := range invocationContext.linearize(recognizer) {
if jobs, err := s.invokeLinearizedRecognizer(
ctx,
@ -481,7 +481,7 @@ func (s *Service) invokeLinearizedRecognizer(
registrationAPI *registrationAPI,
paths []string,
contentsByPath map[string]string,
) (_ []config.IndexJob, err error) {
) (_ []config.AutoIndexJobSpec, err error) {
ctx, _, endObservation := s.operations.invokeLinearizedRecognizer.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})

View File

@ -3,8 +3,6 @@ package inference
import (
"context"
"flag"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/require"
"io"
"os"
"path/filepath"
@ -12,6 +10,9 @@ import (
"strings"
"testing"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/require"
"github.com/google/go-cmp/cmp"
"gopkg.in/yaml.v3"
@ -162,7 +163,7 @@ func testGenerator(t *testing.T, testCase generatorTestCase) {
require.NoError(t, err)
bytes, err := io.ReadAll(file)
require.NoError(t, err)
var expected []config.IndexJob
var expected []config.AutoIndexJobSpec
require.NoError(t, yaml.Unmarshal(bytes, &expected))
if diff := cmp.Diff(expected, result.IndexJobs, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("unexpected index jobs (-want +got):\n%s", diff)
@ -170,7 +171,7 @@ func testGenerator(t *testing.T, testCase generatorTestCase) {
})
}
func sortIndexJobs(s []config.IndexJob) []config.IndexJob {
func sortIndexJobs(s []config.AutoIndexJobSpec) []config.AutoIndexJobSpec {
sort.Slice(s, func(i, j int) bool {
return s[i].Indexer < s[j].Indexer || (s[i].Indexer == s[j].Indexer && s[i].Root < s[j].Root)
})

View File

@ -19,6 +19,7 @@ go_library(
"//internal/gitserver",
"//lib/codeintel/autoindex/config",
"//lib/errors",
"@com_github_life4_genesis//slices",
"@com_github_sourcegraph_log//:log",
],
)

View File

@ -5,6 +5,7 @@ import (
"io"
"os"
genslices "github.com/life4/genesis/slices"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/api"
@ -85,16 +86,16 @@ func (s *JobSelector) InferIndexJobsFromRepositoryStructure(ctx context.Context,
return result, nil
}
type configurationFactoryFunc func(ctx context.Context, repositoryID int, commit string, bypassLimit bool) ([]uploadsshared.Index, bool, error)
type configurationFactoryFunc func(ctx context.Context, repositoryID int, commit string, bypassLimit bool) ([]uploadsshared.AutoIndexJob, bool, error)
// GetIndexRecords determines the set of index records that should be enqueued for the given commit.
// GetJobs determines the set of index records that should be enqueued for the given commit.
// For each repository, we look for index configuration in the following order:
//
// - supplied explicitly via parameter
// - in the database
// - committed to `sourcegraph.yaml` in the repository
// - inferred from the repository structure
func (s *JobSelector) GetIndexRecords(ctx context.Context, repositoryID int, commit, configuration string, bypassLimit bool) ([]uploadsshared.Index, error) {
func (s *JobSelector) GetJobs(ctx context.Context, repositoryID int, commit, configuration string, bypassLimit bool) ([]uploadsshared.AutoIndexJob, error) {
if canSchedule, _, err := s.store.RepositoryExceptions(ctx, repositoryID); err != nil {
return nil, err
} else if !canSchedule {
@ -104,9 +105,9 @@ func (s *JobSelector) GetIndexRecords(ctx context.Context, repositoryID int, com
fns := []configurationFactoryFunc{
makeExplicitConfigurationFactory(configuration),
s.getIndexRecordsFromConfigurationInDatabase,
s.getIndexRecordsFromConfigurationInRepository,
s.inferIndexRecordsFromRepositoryStructure,
s.getJobsFromConfigInDatabase,
s.getJobsFromConfigInRepo,
s.inferJobsFromRepoContents,
}
for _, fn := range fns {
@ -125,7 +126,7 @@ func (s *JobSelector) GetIndexRecords(ctx context.Context, repositoryID int, com
// flag is returned.
func makeExplicitConfigurationFactory(configuration string) configurationFactoryFunc {
logger := log.Scoped("explicitConfigurationFactory")
return func(ctx context.Context, repositoryID int, commit string, _ bool) ([]uploadsshared.Index, bool, error) {
return func(ctx context.Context, repositoryID int, commit string, _ bool) ([]uploadsshared.AutoIndexJob, bool, error) {
if configuration == "" {
return nil, false, nil
}
@ -139,13 +140,13 @@ func makeExplicitConfigurationFactory(configuration string) configurationFactory
return nil, true, nil
}
return convertIndexConfiguration(repositoryID, commit, indexConfiguration), true, nil
return makeQueuedJobs(indexConfiguration.JobSpecs, repositoryID, commit), true, nil
}
}
// getIndexRecordsFromConfigurationInDatabase returns a set of index jobs configured via the UI for
// getJobsFromConfigInDatabase returns a set of index jobs configured via the UI for
// the given repository. If no jobs are configured via the UI then a false valued flag is returned.
func (s *JobSelector) getIndexRecordsFromConfigurationInDatabase(ctx context.Context, repositoryID int, commit string, _ bool) ([]uploadsshared.Index, bool, error) {
func (s *JobSelector) getJobsFromConfigInDatabase(ctx context.Context, repositoryID int, commit string, _ bool) ([]uploadsshared.AutoIndexJob, bool, error) {
indexConfigurationRecord, ok, err := s.store.GetIndexConfigurationByRepositoryID(ctx, repositoryID)
if err != nil {
return nil, false, errors.Wrap(err, "dbstore.GetIndexConfigurationByRepositoryID")
@ -163,13 +164,13 @@ func (s *JobSelector) getIndexRecordsFromConfigurationInDatabase(ctx context.Con
return nil, true, nil
}
return convertIndexConfiguration(repositoryID, commit, indexConfiguration), true, nil
return makeQueuedJobs(indexConfiguration.JobSpecs, repositoryID, commit), true, nil
}
// getIndexRecordsFromConfigurationInRepository returns a set of index jobs configured via a committed
// getJobsFromConfigInRepo returns a set of index jobs configured via a committed
// configuration file at the given commit. If no jobs are configured within the repository then a false
// valued flag is returned.
func (s *JobSelector) getIndexRecordsFromConfigurationInRepository(ctx context.Context, repositoryID int, commit string, _ bool) ([]uploadsshared.Index, bool, error) {
func (s *JobSelector) getJobsFromConfigInRepo(ctx context.Context, repositoryID int, commit string, _ bool) ([]uploadsshared.AutoIndexJob, bool, error) {
repo, err := s.repoStore.Get(ctx, api.RepoID(repositoryID))
if err != nil {
return nil, false, err
@ -199,77 +200,24 @@ func (s *JobSelector) getIndexRecordsFromConfigurationInRepository(ctx context.C
return nil, true, nil
}
return convertIndexConfiguration(repositoryID, commit, indexConfiguration), true, nil
return makeQueuedJobs(indexConfiguration.JobSpecs, repositoryID, commit), true, nil
}
// inferIndexRecordsFromRepositoryStructure looks at the repository contents at the given commit and
// inferJobsFromRepoContents looks at the repository contents at the given commit and
// determines a set of index jobs that are likely to succeed. If no jobs could be inferred then a
// false valued flag is returned.
func (s *JobSelector) inferIndexRecordsFromRepositoryStructure(ctx context.Context, repositoryID int, commit string, bypassLimit bool) ([]uploadsshared.Index, bool, error) {
func (s *JobSelector) inferJobsFromRepoContents(ctx context.Context, repositoryID int, commit string, bypassLimit bool) ([]uploadsshared.AutoIndexJob, bool, error) {
result, err := s.InferIndexJobsFromRepositoryStructure(ctx, repositoryID, commit, "", bypassLimit)
if err != nil || len(result.IndexJobs) == 0 {
return nil, false, err
}
return convertInferredConfiguration(repositoryID, commit, result.IndexJobs), true, nil
return makeQueuedJobs(result.IndexJobs, repositoryID, commit), true, nil
}
// convertIndexConfiguration converts an index configuration object into a set of index records to be
// inserted into the database.
func convertIndexConfiguration(repositoryID int, commit string, indexConfiguration config.IndexConfiguration) (indexes []uploadsshared.Index) {
for _, indexJob := range indexConfiguration.IndexJobs {
var dockerSteps []uploadsshared.DockerStep
for _, dockerStep := range indexJob.Steps {
dockerSteps = append(dockerSteps, uploadsshared.DockerStep{
Root: dockerStep.Root,
Image: dockerStep.Image,
Commands: dockerStep.Commands,
})
}
indexes = append(indexes, uploadsshared.Index{
Commit: commit,
RepositoryID: repositoryID,
State: "queued",
DockerSteps: dockerSteps,
LocalSteps: indexJob.LocalSteps,
Root: indexJob.Root,
Indexer: indexJob.Indexer,
IndexerArgs: indexJob.IndexerArgs,
Outfile: indexJob.Outfile,
RequestedEnvVars: indexJob.RequestedEnvVars,
})
}
return indexes
}
// convertInferredConfiguration converts a set of index jobs into a set of index records to be inserted
// into the database.
func convertInferredConfiguration(repositoryID int, commit string, indexJobs []config.IndexJob) (indexes []uploadsshared.Index) {
for _, indexJob := range indexJobs {
var dockerSteps []uploadsshared.DockerStep
for _, dockerStep := range indexJob.Steps {
dockerSteps = append(dockerSteps, uploadsshared.DockerStep{
Root: dockerStep.Root,
Image: dockerStep.Image,
Commands: dockerStep.Commands,
})
}
indexes = append(indexes, uploadsshared.Index{
RepositoryID: repositoryID,
Commit: commit,
State: "queued",
DockerSteps: dockerSteps,
LocalSteps: indexJob.LocalSteps,
Root: indexJob.Root,
Indexer: indexJob.Indexer,
IndexerArgs: indexJob.IndexerArgs,
Outfile: indexJob.Outfile,
RequestedEnvVars: indexJob.RequestedEnvVars,
})
}
return indexes
// TODO: Push api.RepoID and api.CommitID much further up.
func makeQueuedJobs(indexJobs []config.AutoIndexJobSpec, repoID int, commit string) []uploadsshared.AutoIndexJob {
return genslices.Map(indexJobs, func(job config.AutoIndexJobSpec) uploadsshared.AutoIndexJob {
return uploadsshared.NewAutoIndexJob(job, api.RepoID(repoID), api.CommitID(commit), uploadsshared.JobStateQueued)
})
}

View File

@ -102,7 +102,7 @@ SET data = %s
//
//
func scanIndexConfiguration(s dbutil.Scanner) (indexConfiguration shared.IndexConfiguration, err error) {
func scanJobConfiguration(s dbutil.Scanner) (indexConfiguration shared.IndexConfiguration, err error) {
return indexConfiguration, s.Scan(
&indexConfiguration.ID,
&indexConfiguration.RepositoryID,
@ -110,4 +110,4 @@ func scanIndexConfiguration(s dbutil.Scanner) (indexConfiguration shared.IndexCo
)
}
var scanFirstIndexConfiguration = basestore.NewFirstScanner(scanIndexConfiguration)
var scanFirstIndexConfiguration = basestore.NewFirstScanner(scanJobConfiguration)

View File

@ -110,50 +110,50 @@ LIMIT 1
// - canonization methods
// - share code with uploads store (should own this?)
func (s *store) InsertIndexes(ctx context.Context, indexes []shared.Index) (_ []shared.Index, err error) {
ctx, _, endObservation := s.operations.insertIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIndexes", len(indexes)),
func (s *store) InsertJobs(ctx context.Context, autoIndexJobs []shared.AutoIndexJob) (_ []shared.AutoIndexJob, err error) {
ctx, _, endObservation := s.operations.insertAutoIndexJobs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("numIndexes", len(autoIndexJobs)),
}})
endObservation(1, observation.Args{})
if len(indexes) == 0 {
if len(autoIndexJobs) == 0 {
return nil, nil
}
actor := actor.FromContext(ctx)
values := make([]*sqlf.Query, 0, len(indexes))
for _, index := range indexes {
if index.DockerSteps == nil {
index.DockerSteps = []shared.DockerStep{}
values := make([]*sqlf.Query, 0, len(autoIndexJobs))
for _, job := range autoIndexJobs {
if job.DockerSteps == nil {
job.DockerSteps = []shared.DockerStep{}
}
if index.LocalSteps == nil {
index.LocalSteps = []string{}
if job.LocalSteps == nil {
job.LocalSteps = []string{}
}
if index.IndexerArgs == nil {
index.IndexerArgs = []string{}
if job.IndexerArgs == nil {
job.IndexerArgs = []string{}
}
values = append(values, sqlf.Sprintf(
"(%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)",
index.State,
index.Commit,
index.RepositoryID,
pq.Array(index.DockerSteps),
pq.Array(index.LocalSteps),
index.Root,
index.Indexer,
pq.Array(index.IndexerArgs),
index.Outfile,
pq.Array(index.ExecutionLogs),
pq.Array(index.RequestedEnvVars),
job.State,
job.Commit,
job.RepositoryID,
pq.Array(job.DockerSteps),
pq.Array(job.LocalSteps),
job.Root,
job.Indexer,
pq.Array(job.IndexerArgs),
job.Outfile,
pq.Array(job.ExecutionLogs),
pq.Array(job.RequestedEnvVars),
actor.UID,
))
}
indexes = []shared.Index{}
autoIndexJobs = []shared.AutoIndexJob{}
err = s.withTransaction(ctx, func(tx *store) error {
ids, err := basestore.ScanInts(tx.db.Query(ctx, sqlf.Sprintf(insertIndexQuery, sqlf.Join(values, ","))))
ids, err := basestore.ScanInts(tx.db.Query(ctx, sqlf.Sprintf(insertAutoIndexJobQuery, sqlf.Join(values, ","))))
if err != nil {
return err
}
@ -170,14 +170,14 @@ func (s *store) InsertIndexes(ctx context.Context, indexes []shared.Index) (_ []
queries = append(queries, sqlf.Sprintf("%d", id))
}
indexes, err = scanIndexes(tx.db.Query(ctx, sqlf.Sprintf(getIndexesByIDsQuery, sqlf.Join(queries, ", "), authzConds)))
autoIndexJobs, err = scanJobs(tx.db.Query(ctx, sqlf.Sprintf(getAutoIndexJobsByIDsQuery, sqlf.Join(queries, ", "), authzConds)))
return err
})
return indexes, err
return autoIndexJobs, err
}
const insertIndexQuery = `
const insertAutoIndexJobQuery = `
INSERT INTO lsif_indexes (
state,
commit,
@ -196,7 +196,7 @@ VALUES %s
RETURNING id
`
const getIndexesByIDsQuery = `
const getAutoIndexJobsByIDsQuery = `
SELECT
u.id,
u.commit,
@ -239,7 +239,7 @@ ORDER BY u.id
//
//
func scanIndex(s dbutil.Scanner) (index shared.Index, err error) {
func scanJob(s dbutil.Scanner) (index shared.AutoIndexJob, err error) {
var executionLogs []executor.ExecutionLogEntry
if err := s.Scan(
&index.ID,
@ -274,4 +274,4 @@ func scanIndex(s dbutil.Scanner) (index shared.Index, err error) {
return index, nil
}
var scanIndexes = basestore.NewSliceScanner(scanIndex)
var scanJobs = basestore.NewSliceScanner(scanJob)

View File

@ -22,10 +22,10 @@ func TestIsQueued(t *testing.T) {
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
insertIndexes(t, db, uploadsshared.Index{ID: 1, RepositoryID: 1, Commit: makeCommit(1)})
insertIndexes(t, db, uploadsshared.Index{ID: 2, RepositoryID: 1, Commit: makeCommit(1), ShouldReindex: true})
insertIndexes(t, db, uploadsshared.Index{ID: 3, RepositoryID: 4, Commit: makeCommit(1), ShouldReindex: true})
insertIndexes(t, db, uploadsshared.Index{ID: 4, RepositoryID: 5, Commit: makeCommit(4), ShouldReindex: true})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1, RepositoryID: 1, Commit: makeCommit(1)})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 2, RepositoryID: 1, Commit: makeCommit(1), ShouldReindex: true})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 3, RepositoryID: 4, Commit: makeCommit(1), ShouldReindex: true})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 4, RepositoryID: 5, Commit: makeCommit(4), ShouldReindex: true})
insertUploads(t, db, upload{ID: 2, RepositoryID: 2, Commit: makeCommit(2)})
insertUploads(t, db, upload{ID: 3, RepositoryID: 3, Commit: makeCommit(3), State: "deleted"})
insertUploads(t, db, upload{ID: 4, RepositoryID: 5, Commit: makeCommit(4), ShouldReindex: true})
@ -67,12 +67,12 @@ func TestIsQueuedRootIndexer(t *testing.T) {
store := New(observation.TestContextTB(t), db)
now := time.Now()
insertIndexes(t, db, uploadsshared.Index{ID: 1, RepositoryID: 1, Commit: makeCommit(1), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 1)})
insertIndexes(t, db, uploadsshared.Index{ID: 2, RepositoryID: 1, Commit: makeCommit(1), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 2)})
insertIndexes(t, db, uploadsshared.Index{ID: 3, RepositoryID: 2, Commit: makeCommit(2), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 1), ShouldReindex: true})
insertIndexes(t, db, uploadsshared.Index{ID: 4, RepositoryID: 2, Commit: makeCommit(2), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 2)})
insertIndexes(t, db, uploadsshared.Index{ID: 5, RepositoryID: 3, Commit: makeCommit(3), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 1)})
insertIndexes(t, db, uploadsshared.Index{ID: 6, RepositoryID: 3, Commit: makeCommit(3), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 2), ShouldReindex: true})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1, RepositoryID: 1, Commit: makeCommit(1), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 1)})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 2, RepositoryID: 1, Commit: makeCommit(1), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 2)})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 3, RepositoryID: 2, Commit: makeCommit(2), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 1), ShouldReindex: true})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 4, RepositoryID: 2, Commit: makeCommit(2), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 2)})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 5, RepositoryID: 3, Commit: makeCommit(3), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 1)})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 6, RepositoryID: 3, Commit: makeCommit(3), Root: "/foo", Indexer: "i1", QueuedAt: now.Add(-time.Hour * 2), ShouldReindex: true})
testCases := []struct {
repositoryID int
@ -102,7 +102,7 @@ func TestIsQueuedRootIndexer(t *testing.T) {
}
}
func TestInsertIndexes(t *testing.T) {
func TestInsertJobs(t *testing.T) {
ctx := context.Background()
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
@ -110,7 +110,7 @@ func TestInsertIndexes(t *testing.T) {
insertRepo(t, db, 50, "")
indexes, err := store.InsertIndexes(ctx, []uploadsshared.Index{
indexes, err := store.InsertJobs(ctx, []uploadsshared.AutoIndexJob{
{
State: "queued",
Commit: makeCommit(1),
@ -161,7 +161,7 @@ func TestInsertIndexes(t *testing.T) {
rank1 := 1
rank2 := 2
expected := []uploadsshared.Index{
expected := []uploadsshared.AutoIndexJob{
{
ID: 1,
Commit: makeCommit(1),
@ -240,7 +240,7 @@ func TestInsertIndexWithActor(t *testing.T) {
actor.WithInternalActor(context.Background()),
context.Background(),
} {
indexes, err := store.InsertIndexes(ctx, []uploadsshared.Index{
indexes, err := store.InsertJobs(ctx, []uploadsshared.AutoIndexJob{
{ID: i, RepositoryID: 50, Commit: makeCommit(i), State: "queued"},
})
if err != nil {

View File

@ -27,7 +27,7 @@ type operations struct {
markRepoRevsAsProcessed *observation.Operation
isQueued *observation.Operation
isQueuedRootIndexer *observation.Operation
insertIndexes *observation.Operation
insertAutoIndexJobs *observation.Operation
insertDependencyIndexingJob *observation.Operation
queueRepoRev *observation.Operation
@ -83,7 +83,7 @@ func newOperations(observationCtx *observation.Context) *operations {
markRepoRevsAsProcessed: op("MarkRepoRevsAsProcessed"),
isQueued: op("IsQueued"),
isQueuedRootIndexer: op("IsQueuedRootIndexer"),
insertIndexes: op("InsertIndexes"),
insertAutoIndexJobs: op("InsertJobs"),
insertDependencyIndexingJob: op("InsertDependencyIndexingJob"),
queueRepoRev: op("QueueRepoRev"),

View File

@ -40,7 +40,7 @@ type Store interface {
// Enqueuer
IsQueued(ctx context.Context, repositoryID int, commit string) (bool, error)
IsQueuedRootIndexer(ctx context.Context, repositoryID int, commit string, root string, indexer string) (bool, error)
InsertIndexes(ctx context.Context, indexes []uploadsshared.Index) ([]uploadsshared.Index, error)
InsertJobs(context.Context, []uploadsshared.AutoIndexJob) ([]uploadsshared.AutoIndexJob, error)
// Dependency indexing
InsertDependencyIndexingJob(ctx context.Context, uploadID int, externalServiceKind string, syncTime time.Time) (int, error)

View File

@ -114,29 +114,29 @@ func insertUploads(t testing.TB, db database.DB, uploads ...upload) {
}
}
func insertIndexes(t testing.TB, db database.DB, indexes ...uploadsshared.Index) {
for _, index := range indexes {
if index.Commit == "" {
index.Commit = makeCommit(index.ID)
func insertAutoIndexJobs(t testing.TB, db database.DB, jobs ...uploadsshared.AutoIndexJob) {
for _, job := range jobs {
if job.Commit == "" {
job.Commit = makeCommit(job.ID)
}
if index.State == "" {
index.State = "completed"
if job.State == "" {
job.State = "completed"
}
if index.RepositoryID == 0 {
index.RepositoryID = 50
if job.RepositoryID == 0 {
job.RepositoryID = 50
}
if index.DockerSteps == nil {
index.DockerSteps = []uploadsshared.DockerStep{}
if job.DockerSteps == nil {
job.DockerSteps = []uploadsshared.DockerStep{}
}
if index.IndexerArgs == nil {
index.IndexerArgs = []string{}
if job.IndexerArgs == nil {
job.IndexerArgs = []string{}
}
if index.LocalSteps == nil {
index.LocalSteps = []string{}
if job.LocalSteps == nil {
job.LocalSteps = []string{}
}
// Ensure we have a repo for the inner join in select queries
insertRepo(t, db, index.RepositoryID, index.RepositoryName)
insertRepo(t, db, job.RepositoryID, job.RepositoryName)
query := sqlf.Sprintf(`
INSERT INTO lsif_indexes (
@ -161,25 +161,25 @@ func insertIndexes(t testing.TB, db database.DB, indexes ...uploadsshared.Index)
should_reindex
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
`,
index.ID,
index.Commit,
index.QueuedAt,
index.State,
index.FailureMessage,
index.StartedAt,
index.FinishedAt,
index.ProcessAfter,
index.NumResets,
index.NumFailures,
index.RepositoryID,
pq.Array(index.DockerSteps),
index.Root,
index.Indexer,
pq.Array(index.IndexerArgs),
index.Outfile,
pq.Array(index.ExecutionLogs),
pq.Array(index.LocalSteps),
index.ShouldReindex,
job.ID,
job.Commit,
job.QueuedAt,
job.State,
job.FailureMessage,
job.StartedAt,
job.FinishedAt,
job.ProcessAfter,
job.NumResets,
job.NumFailures,
job.RepositoryID,
pq.Array(job.DockerSteps),
job.Root,
job.Indexer,
pq.Array(job.IndexerArgs),
job.Outfile,
pq.Array(job.ExecutionLogs),
pq.Array(job.LocalSteps),
job.ShouldReindex,
)
if _, err := db.ExecContext(context.Background(), query.Query(sqlf.PostgresBindVar), query.Args()...); err != nil {

View File

@ -40,9 +40,9 @@ type MockStore struct {
// object controlling the behavior of the method
// InsertDependencyIndexingJob.
InsertDependencyIndexingJobFunc *StoreInsertDependencyIndexingJobFunc
// InsertIndexesFunc is an instance of a mock function object
// controlling the behavior of the method InsertIndexes.
InsertIndexesFunc *StoreInsertIndexesFunc
// InsertJobsFunc is an instance of a mock function object controlling
// the behavior of the method InsertJobs.
InsertJobsFunc *StoreInsertJobsFunc
// IsQueuedFunc is an instance of a mock function object controlling the
// behavior of the method IsQueued.
IsQueuedFunc *StoreIsQueuedFunc
@ -117,8 +117,8 @@ func NewMockStore() *MockStore {
return
},
},
InsertIndexesFunc: &StoreInsertIndexesFunc{
defaultHook: func(context.Context, []shared1.Index) (r0 []shared1.Index, r1 error) {
InsertJobsFunc: &StoreInsertJobsFunc{
defaultHook: func(context.Context, []shared1.AutoIndexJob) (r0 []shared1.AutoIndexJob, r1 error) {
return
},
},
@ -219,9 +219,9 @@ func NewStrictMockStore() *MockStore {
panic("unexpected invocation of MockStore.InsertDependencyIndexingJob")
},
},
InsertIndexesFunc: &StoreInsertIndexesFunc{
defaultHook: func(context.Context, []shared1.Index) ([]shared1.Index, error) {
panic("unexpected invocation of MockStore.InsertIndexes")
InsertJobsFunc: &StoreInsertJobsFunc{
defaultHook: func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
panic("unexpected invocation of MockStore.InsertJobs")
},
},
IsQueuedFunc: &StoreIsQueuedFunc{
@ -311,8 +311,8 @@ func NewMockStoreFrom(i store.Store) *MockStore {
InsertDependencyIndexingJobFunc: &StoreInsertDependencyIndexingJobFunc{
defaultHook: i.InsertDependencyIndexingJob,
},
InsertIndexesFunc: &StoreInsertIndexesFunc{
defaultHook: i.InsertIndexes,
InsertJobsFunc: &StoreInsertJobsFunc{
defaultHook: i.InsertJobs,
},
IsQueuedFunc: &StoreIsQueuedFunc{
defaultHook: i.IsQueued,
@ -913,34 +913,34 @@ func (c StoreInsertDependencyIndexingJobFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// StoreInsertIndexesFunc describes the behavior when the InsertIndexes
// method of the parent MockStore instance is invoked.
type StoreInsertIndexesFunc struct {
defaultHook func(context.Context, []shared1.Index) ([]shared1.Index, error)
hooks []func(context.Context, []shared1.Index) ([]shared1.Index, error)
history []StoreInsertIndexesFuncCall
// StoreInsertJobsFunc describes the behavior when the InsertJobs method of
// the parent MockStore instance is invoked.
type StoreInsertJobsFunc struct {
defaultHook func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)
hooks []func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)
history []StoreInsertJobsFuncCall
mutex sync.Mutex
}
// InsertIndexes delegates to the next hook function in the queue and stores
// InsertJobs delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockStore) InsertIndexes(v0 context.Context, v1 []shared1.Index) ([]shared1.Index, error) {
r0, r1 := m.InsertIndexesFunc.nextHook()(v0, v1)
m.InsertIndexesFunc.appendCall(StoreInsertIndexesFuncCall{v0, v1, r0, r1})
func (m *MockStore) InsertJobs(v0 context.Context, v1 []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
r0, r1 := m.InsertJobsFunc.nextHook()(v0, v1)
m.InsertJobsFunc.appendCall(StoreInsertJobsFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the InsertIndexes method
// of the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreInsertIndexesFunc) SetDefaultHook(hook func(context.Context, []shared1.Index) ([]shared1.Index, error)) {
// SetDefaultHook sets function that is called when the InsertJobs method of
// the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreInsertJobsFunc) SetDefaultHook(hook func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// InsertIndexes method of the parent MockStore instance invokes the hook at
// InsertJobs method of the parent MockStore instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *StoreInsertIndexesFunc) PushHook(hook func(context.Context, []shared1.Index) ([]shared1.Index, error)) {
func (f *StoreInsertJobsFunc) PushHook(hook func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -948,20 +948,20 @@ func (f *StoreInsertIndexesFunc) PushHook(hook func(context.Context, []shared1.I
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *StoreInsertIndexesFunc) SetDefaultReturn(r0 []shared1.Index, r1 error) {
f.SetDefaultHook(func(context.Context, []shared1.Index) ([]shared1.Index, error) {
func (f *StoreInsertJobsFunc) SetDefaultReturn(r0 []shared1.AutoIndexJob, r1 error) {
f.SetDefaultHook(func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *StoreInsertIndexesFunc) PushReturn(r0 []shared1.Index, r1 error) {
f.PushHook(func(context.Context, []shared1.Index) ([]shared1.Index, error) {
func (f *StoreInsertJobsFunc) PushReturn(r0 []shared1.AutoIndexJob, r1 error) {
f.PushHook(func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
return r0, r1
})
}
func (f *StoreInsertIndexesFunc) nextHook() func(context.Context, []shared1.Index) ([]shared1.Index, error) {
func (f *StoreInsertJobsFunc) nextHook() func(context.Context, []shared1.AutoIndexJob) ([]shared1.AutoIndexJob, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -974,35 +974,35 @@ func (f *StoreInsertIndexesFunc) nextHook() func(context.Context, []shared1.Inde
return hook
}
func (f *StoreInsertIndexesFunc) appendCall(r0 StoreInsertIndexesFuncCall) {
func (f *StoreInsertJobsFunc) appendCall(r0 StoreInsertJobsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of StoreInsertIndexesFuncCall objects
// describing the invocations of this function.
func (f *StoreInsertIndexesFunc) History() []StoreInsertIndexesFuncCall {
// History returns a sequence of StoreInsertJobsFuncCall objects describing
// the invocations of this function.
func (f *StoreInsertJobsFunc) History() []StoreInsertJobsFuncCall {
f.mutex.Lock()
history := make([]StoreInsertIndexesFuncCall, len(f.history))
history := make([]StoreInsertJobsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// StoreInsertIndexesFuncCall is an object that describes an invocation of
// method InsertIndexes on an instance of MockStore.
type StoreInsertIndexesFuncCall struct {
// StoreInsertJobsFuncCall is an object that describes an invocation of
// method InsertJobs on an instance of MockStore.
type StoreInsertJobsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 []shared1.Index
Arg1 []shared1.AutoIndexJob
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []shared1.Index
Result0 []shared1.AutoIndexJob
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
@ -1010,13 +1010,13 @@ type StoreInsertIndexesFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c StoreInsertIndexesFuncCall) Args() []interface{} {
func (c StoreInsertJobsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c StoreInsertIndexesFuncCall) Results() []interface{} {
func (c StoreInsertJobsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
@ -2621,9 +2621,10 @@ func (c InferenceServiceInferIndexJobsFuncCall) Results() []interface{} {
// github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing) used
// for unit testing.
type MockUploadService struct {
// GetRecentIndexesSummaryFunc is an instance of a mock function object
// controlling the behavior of the method GetRecentIndexesSummary.
GetRecentIndexesSummaryFunc *UploadServiceGetRecentIndexesSummaryFunc
// GetRecentAutoIndexJobsSummaryFunc is an instance of a mock function
// object controlling the behavior of the method
// GetRecentAutoIndexJobsSummary.
GetRecentAutoIndexJobsSummaryFunc *UploadServiceGetRecentAutoIndexJobsSummaryFunc
// GetRecentUploadsSummaryFunc is an instance of a mock function object
// controlling the behavior of the method GetRecentUploadsSummary.
GetRecentUploadsSummaryFunc *UploadServiceGetRecentUploadsSummaryFunc
@ -2639,8 +2640,8 @@ type MockUploadService struct {
// All methods return zero values for all results, unless overwritten.
func NewMockUploadService() *MockUploadService {
return &MockUploadService{
GetRecentIndexesSummaryFunc: &UploadServiceGetRecentIndexesSummaryFunc{
defaultHook: func(context.Context, int) (r0 []shared1.IndexesWithRepositoryNamespace, r1 error) {
GetRecentAutoIndexJobsSummaryFunc: &UploadServiceGetRecentAutoIndexJobsSummaryFunc{
defaultHook: func(context.Context, int) (r0 []shared1.GroupedAutoIndexJobs, r1 error) {
return
},
},
@ -2666,9 +2667,9 @@ func NewMockUploadService() *MockUploadService {
// interface. All methods panic on invocation, unless overwritten.
func NewStrictMockUploadService() *MockUploadService {
return &MockUploadService{
GetRecentIndexesSummaryFunc: &UploadServiceGetRecentIndexesSummaryFunc{
defaultHook: func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error) {
panic("unexpected invocation of MockUploadService.GetRecentIndexesSummary")
GetRecentAutoIndexJobsSummaryFunc: &UploadServiceGetRecentAutoIndexJobsSummaryFunc{
defaultHook: func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error) {
panic("unexpected invocation of MockUploadService.GetRecentAutoIndexJobsSummary")
},
},
GetRecentUploadsSummaryFunc: &UploadServiceGetRecentUploadsSummaryFunc{
@ -2694,8 +2695,8 @@ func NewStrictMockUploadService() *MockUploadService {
// overwritten.
func NewMockUploadServiceFrom(i UploadService) *MockUploadService {
return &MockUploadService{
GetRecentIndexesSummaryFunc: &UploadServiceGetRecentIndexesSummaryFunc{
defaultHook: i.GetRecentIndexesSummary,
GetRecentAutoIndexJobsSummaryFunc: &UploadServiceGetRecentAutoIndexJobsSummaryFunc{
defaultHook: i.GetRecentAutoIndexJobsSummary,
},
GetRecentUploadsSummaryFunc: &UploadServiceGetRecentUploadsSummaryFunc{
defaultHook: i.GetRecentUploadsSummary,
@ -2709,37 +2710,37 @@ func NewMockUploadServiceFrom(i UploadService) *MockUploadService {
}
}
// UploadServiceGetRecentIndexesSummaryFunc describes the behavior when the
// GetRecentIndexesSummary method of the parent MockUploadService instance
// is invoked.
type UploadServiceGetRecentIndexesSummaryFunc struct {
defaultHook func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error)
hooks []func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error)
history []UploadServiceGetRecentIndexesSummaryFuncCall
// UploadServiceGetRecentAutoIndexJobsSummaryFunc describes the behavior
// when the GetRecentAutoIndexJobsSummary method of the parent
// MockUploadService instance is invoked.
type UploadServiceGetRecentAutoIndexJobsSummaryFunc struct {
defaultHook func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error)
hooks []func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error)
history []UploadServiceGetRecentAutoIndexJobsSummaryFuncCall
mutex sync.Mutex
}
// GetRecentIndexesSummary delegates to the next hook function in the queue
// and stores the parameter and result values of this invocation.
func (m *MockUploadService) GetRecentIndexesSummary(v0 context.Context, v1 int) ([]shared1.IndexesWithRepositoryNamespace, error) {
r0, r1 := m.GetRecentIndexesSummaryFunc.nextHook()(v0, v1)
m.GetRecentIndexesSummaryFunc.appendCall(UploadServiceGetRecentIndexesSummaryFuncCall{v0, v1, r0, r1})
// GetRecentAutoIndexJobsSummary delegates to the next hook function in the
// queue and stores the parameter and result values of this invocation.
func (m *MockUploadService) GetRecentAutoIndexJobsSummary(v0 context.Context, v1 int) ([]shared1.GroupedAutoIndexJobs, error) {
r0, r1 := m.GetRecentAutoIndexJobsSummaryFunc.nextHook()(v0, v1)
m.GetRecentAutoIndexJobsSummaryFunc.appendCall(UploadServiceGetRecentAutoIndexJobsSummaryFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the
// GetRecentIndexesSummary method of the parent MockUploadService instance
// is invoked and the hook queue is empty.
func (f *UploadServiceGetRecentIndexesSummaryFunc) SetDefaultHook(hook func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error)) {
// GetRecentAutoIndexJobsSummary method of the parent MockUploadService
// instance is invoked and the hook queue is empty.
func (f *UploadServiceGetRecentAutoIndexJobsSummaryFunc) SetDefaultHook(hook func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// GetRecentIndexesSummary method of the parent MockUploadService instance
// invokes the hook at the front of the queue and discards it. After the
// queue is empty, the default hook function is invoked for any future
// action.
func (f *UploadServiceGetRecentIndexesSummaryFunc) PushHook(hook func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error)) {
// GetRecentAutoIndexJobsSummary method of the parent MockUploadService
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *UploadServiceGetRecentAutoIndexJobsSummaryFunc) PushHook(hook func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -2747,20 +2748,20 @@ func (f *UploadServiceGetRecentIndexesSummaryFunc) PushHook(hook func(context.Co
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *UploadServiceGetRecentIndexesSummaryFunc) SetDefaultReturn(r0 []shared1.IndexesWithRepositoryNamespace, r1 error) {
f.SetDefaultHook(func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error) {
func (f *UploadServiceGetRecentAutoIndexJobsSummaryFunc) SetDefaultReturn(r0 []shared1.GroupedAutoIndexJobs, r1 error) {
f.SetDefaultHook(func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *UploadServiceGetRecentIndexesSummaryFunc) PushReturn(r0 []shared1.IndexesWithRepositoryNamespace, r1 error) {
f.PushHook(func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error) {
func (f *UploadServiceGetRecentAutoIndexJobsSummaryFunc) PushReturn(r0 []shared1.GroupedAutoIndexJobs, r1 error) {
f.PushHook(func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error) {
return r0, r1
})
}
func (f *UploadServiceGetRecentIndexesSummaryFunc) nextHook() func(context.Context, int) ([]shared1.IndexesWithRepositoryNamespace, error) {
func (f *UploadServiceGetRecentAutoIndexJobsSummaryFunc) nextHook() func(context.Context, int) ([]shared1.GroupedAutoIndexJobs, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -2773,28 +2774,28 @@ func (f *UploadServiceGetRecentIndexesSummaryFunc) nextHook() func(context.Conte
return hook
}
func (f *UploadServiceGetRecentIndexesSummaryFunc) appendCall(r0 UploadServiceGetRecentIndexesSummaryFuncCall) {
func (f *UploadServiceGetRecentAutoIndexJobsSummaryFunc) appendCall(r0 UploadServiceGetRecentAutoIndexJobsSummaryFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// UploadServiceGetRecentIndexesSummaryFuncCall objects describing the
// UploadServiceGetRecentAutoIndexJobsSummaryFuncCall objects describing the
// invocations of this function.
func (f *UploadServiceGetRecentIndexesSummaryFunc) History() []UploadServiceGetRecentIndexesSummaryFuncCall {
func (f *UploadServiceGetRecentAutoIndexJobsSummaryFunc) History() []UploadServiceGetRecentAutoIndexJobsSummaryFuncCall {
f.mutex.Lock()
history := make([]UploadServiceGetRecentIndexesSummaryFuncCall, len(f.history))
history := make([]UploadServiceGetRecentAutoIndexJobsSummaryFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// UploadServiceGetRecentIndexesSummaryFuncCall is an object that describes
// an invocation of method GetRecentIndexesSummary on an instance of
// MockUploadService.
type UploadServiceGetRecentIndexesSummaryFuncCall struct {
// UploadServiceGetRecentAutoIndexJobsSummaryFuncCall is an object that
// describes an invocation of method GetRecentAutoIndexJobsSummary on an
// instance of MockUploadService.
type UploadServiceGetRecentAutoIndexJobsSummaryFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
@ -2803,7 +2804,7 @@ type UploadServiceGetRecentIndexesSummaryFuncCall struct {
Arg1 int
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 []shared1.IndexesWithRepositoryNamespace
Result0 []shared1.GroupedAutoIndexJobs
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
@ -2811,13 +2812,13 @@ type UploadServiceGetRecentIndexesSummaryFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c UploadServiceGetRecentIndexesSummaryFuncCall) Args() []interface{} {
func (c UploadServiceGetRecentAutoIndexJobsSummaryFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c UploadServiceGetRecentIndexesSummaryFuncCall) Results() []interface{} {
func (c UploadServiceGetRecentAutoIndexJobsSummaryFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}

View File

@ -129,12 +129,12 @@ func (s *Service) GetInferenceScript(ctx context.Context) (string, error) {
return s.store.GetInferenceScript(ctx)
}
func (s *Service) QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) ([]uploadsshared.Index, error) {
return s.indexEnqueuer.QueueIndexes(ctx, repositoryID, rev, configuration, force, bypassLimit)
func (s *Service) QueueAutoIndexJobs(ctx context.Context, repositoryID int, rev, configuration string, force, bypassLimit bool) ([]uploadsshared.AutoIndexJob, error) {
return s.indexEnqueuer.QueueAutoIndexJobs(ctx, repositoryID, rev, configuration, force, bypassLimit)
}
func (s *Service) QueueIndexesForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) error {
return s.indexEnqueuer.QueueIndexesForPackage(ctx, pkg)
func (s *Service) QueueAutoIndexJobsForPackage(ctx context.Context, pkg dependencies.MinimialVersionedPackageRepo) error {
return s.indexEnqueuer.QueueAutoIndexJobsForPackage(ctx, pkg)
}
func (s *Service) InferIndexJobsFromRepositoryStructure(ctx context.Context, repositoryID int, commit string, localOverrideScript string, bypassLimit bool) (*shared.InferenceResult, error) {

View File

@ -10,6 +10,7 @@ import (
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/autoindexing/internal/jobselector"
@ -27,7 +28,7 @@ func init() {
jobselector.MaximumIndexJobsPerInferredConfiguration = 50
}
func TestQueueIndexesExplicit(t *testing.T) {
func TestQueueAutoIndexJobsExplicit(t *testing.T) {
conf := `{
"index_jobs": [
{
@ -51,8 +52,8 @@ func TestQueueIndexesExplicit(t *testing.T) {
}`
mockDBStore := NewMockStore()
mockDBStore.InsertIndexesFunc.SetDefaultHook(func(ctx context.Context, indexes []uploadsshared.Index) ([]uploadsshared.Index, error) {
return indexes, nil
mockDBStore.InsertJobsFunc.SetDefaultHook(func(ctx context.Context, jobs []uploadsshared.AutoIndexJob) ([]uploadsshared.AutoIndexJob, error) {
return jobs, nil
})
mockDBStore.RepositoryExceptionsFunc.SetDefaultReturn(true, true, nil)
@ -70,7 +71,7 @@ func TestQueueIndexesExplicit(t *testing.T) {
defaultMockRepoStore(), // repoStore
mockGitserverClient,
)
_, _ = service.QueueIndexes(context.Background(), 42, "HEAD", conf, false, false)
_, _ = service.QueueAutoIndexJobs(context.Background(), 42, "HEAD", conf, false, false)
if len(mockDBStore.IsQueuedFunc.History()) != 1 {
t.Errorf("unexpected number of calls to IsQueued. want=%d have=%d", 1, len(mockDBStore.IsQueuedFunc.History()))
@ -86,12 +87,12 @@ func TestQueueIndexesExplicit(t *testing.T) {
}
}
var indexes []uploadsshared.Index
for _, call := range mockDBStore.InsertIndexesFunc.History() {
indexes = append(indexes, call.Result0...)
var jobs []uploadsshared.AutoIndexJob
for _, call := range mockDBStore.InsertJobsFunc.History() {
jobs = append(jobs, call.Result0...)
}
expectedIndexes := []uploadsshared.Index{
expectedIndexes := []uploadsshared.AutoIndexJob{
{
RepositoryID: 42,
Commit: "cr42",
@ -116,12 +117,12 @@ func TestQueueIndexesExplicit(t *testing.T) {
Outfile: "lsif.dump",
},
}
if diff := cmp.Diff(expectedIndexes, indexes); diff != "" {
if diff := cmp.Diff(expectedIndexes, jobs, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("unexpected indexes (-want +got):\n%s", diff)
}
}
func TestQueueIndexesInDatabase(t *testing.T) {
func TestQueueAutoIndexJobsInDatabase(t *testing.T) {
indexConfiguration := shared.IndexConfiguration{
ID: 1,
RepositoryID: 42,
@ -149,8 +150,8 @@ func TestQueueIndexesInDatabase(t *testing.T) {
}
mockDBStore := NewMockStore()
mockDBStore.InsertIndexesFunc.SetDefaultHook(func(ctx context.Context, indexes []uploadsshared.Index) ([]uploadsshared.Index, error) {
return indexes, nil
mockDBStore.InsertJobsFunc.SetDefaultHook(func(ctx context.Context, jobs []uploadsshared.AutoIndexJob) ([]uploadsshared.AutoIndexJob, error) {
return jobs, nil
})
mockDBStore.GetIndexConfigurationByRepositoryIDFunc.SetDefaultReturn(indexConfiguration, true, nil)
mockDBStore.RepositoryExceptionsFunc.SetDefaultReturn(true, true, nil)
@ -168,7 +169,7 @@ func TestQueueIndexesInDatabase(t *testing.T) {
defaultMockRepoStore(), // repoStore
mockGitserverClient,
)
_, _ = service.QueueIndexes(context.Background(), 42, "HEAD", "", false, false)
_, _ = service.QueueAutoIndexJobs(context.Background(), 42, "HEAD", "", false, false)
if len(mockDBStore.GetIndexConfigurationByRepositoryIDFunc.History()) != 1 {
t.Errorf("unexpected number of calls to GetIndexConfigurationByRepositoryID. want=%d have=%d", 1, len(mockDBStore.GetIndexConfigurationByRepositoryIDFunc.History()))
@ -198,12 +199,12 @@ func TestQueueIndexesInDatabase(t *testing.T) {
}
}
var indexes []uploadsshared.Index
for _, call := range mockDBStore.InsertIndexesFunc.History() {
indexes = append(indexes, call.Result0...)
var jobs []uploadsshared.AutoIndexJob
for _, call := range mockDBStore.InsertJobsFunc.History() {
jobs = append(jobs, call.Result0...)
}
expectedIndexes := []uploadsshared.Index{
expectedIndexes := []uploadsshared.AutoIndexJob{
{
RepositoryID: 42,
Commit: "cr42",
@ -228,7 +229,7 @@ func TestQueueIndexesInDatabase(t *testing.T) {
Outfile: "lsif.dump",
},
}
if diff := cmp.Diff(expectedIndexes, indexes); diff != "" {
if diff := cmp.Diff(expectedIndexes, jobs, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("unexpected indexes (-want +got):\n%s", diff)
}
}
@ -250,10 +251,10 @@ index_jobs:
outfile: lsif.dump
`)
func TestQueueIndexesInRepository(t *testing.T) {
func TestQueueAutoIndexJobsInRepository(t *testing.T) {
mockDBStore := NewMockStore()
mockDBStore.InsertIndexesFunc.SetDefaultHook(func(ctx context.Context, indexes []uploadsshared.Index) ([]uploadsshared.Index, error) {
return indexes, nil
mockDBStore.InsertJobsFunc.SetDefaultHook(func(ctx context.Context, jobs []uploadsshared.AutoIndexJob) ([]uploadsshared.AutoIndexJob, error) {
return jobs, nil
})
mockDBStore.RepositoryExceptionsFunc.SetDefaultReturn(true, true, nil)
@ -272,7 +273,7 @@ func TestQueueIndexesInRepository(t *testing.T) {
gitserverClient,
)
if _, err := service.QueueIndexes(context.Background(), 42, "HEAD", "", false, false); err != nil {
if _, err := service.QueueAutoIndexJobs(context.Background(), 42, "HEAD", "", false, false); err != nil {
t.Fatalf("unexpected error performing update: %s", err)
}
@ -290,12 +291,12 @@ func TestQueueIndexesInRepository(t *testing.T) {
}
}
var indexes []uploadsshared.Index
for _, call := range mockDBStore.InsertIndexesFunc.History() {
indexes = append(indexes, call.Result0...)
var jobs []uploadsshared.AutoIndexJob
for _, call := range mockDBStore.InsertJobsFunc.History() {
jobs = append(jobs, call.Result0...)
}
expectedIndexes := []uploadsshared.Index{
expectedIndexes := []uploadsshared.AutoIndexJob{
{
RepositoryID: 42,
Commit: "cr42",
@ -320,15 +321,15 @@ func TestQueueIndexesInRepository(t *testing.T) {
Outfile: "lsif.dump",
},
}
if diff := cmp.Diff(expectedIndexes, indexes); diff != "" {
if diff := cmp.Diff(expectedIndexes, jobs, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("unexpected indexes (-want +got):\n%s", diff)
}
}
func TestQueueIndexesInferred(t *testing.T) {
func TestQueueAutoIndexJobsInferred(t *testing.T) {
mockDBStore := NewMockStore()
mockDBStore.InsertIndexesFunc.SetDefaultHook(func(ctx context.Context, indexes []uploadsshared.Index) ([]uploadsshared.Index, error) {
return indexes, nil
mockDBStore.InsertJobsFunc.SetDefaultHook(func(ctx context.Context, jobs []uploadsshared.AutoIndexJob) ([]uploadsshared.AutoIndexJob, error) {
return jobs, nil
})
mockDBStore.RepositoryExceptionsFunc.SetDefaultReturn(true, true, nil)
@ -342,9 +343,9 @@ func TestQueueIndexesInferred(t *testing.T) {
inferenceService.InferIndexJobsFunc.SetDefaultHook(func(ctx context.Context, rn api.RepoName, s1, s2 string) (*shared.InferenceResult, error) {
switch string(rn) {
case "r42":
return &shared.InferenceResult{IndexJobs: []config.IndexJob{{Root: ""}}}, nil
return &shared.InferenceResult{IndexJobs: []config.AutoIndexJobSpec{{Root: ""}}}, nil
case "r44":
return &shared.InferenceResult{IndexJobs: []config.IndexJob{{Root: "a"}, {Root: "b"}}}, nil
return &shared.InferenceResult{IndexJobs: []config.AutoIndexJobSpec{{Root: "a"}, {Root: "b"}}}, nil
default:
return &shared.InferenceResult{IndexJobs: nil}, nil
}
@ -359,13 +360,13 @@ func TestQueueIndexesInferred(t *testing.T) {
)
for _, id := range []int{41, 42, 43, 44} {
if _, err := service.QueueIndexes(context.Background(), id, "HEAD", "", false, false); err != nil {
if _, err := service.QueueAutoIndexJobs(context.Background(), id, "HEAD", "", false, false); err != nil {
t.Fatalf("unexpected error performing update: %s", err)
}
}
indexRoots := map[int][]string{}
for _, call := range mockDBStore.InsertIndexesFunc.History() {
for _, call := range mockDBStore.InsertJobsFunc.History() {
for _, index := range call.Result0 {
indexRoots[index.RepositoryID] = append(indexRoots[index.RepositoryID], index.Root)
}
@ -394,10 +395,10 @@ func TestQueueIndexesInferred(t *testing.T) {
}
}
func TestQueueIndexesForPackage(t *testing.T) {
func TestQueueAutoIndexJobsForPackage(t *testing.T) {
mockDBStore := NewMockStore()
mockDBStore.InsertIndexesFunc.SetDefaultHook(func(ctx context.Context, indexes []uploadsshared.Index) ([]uploadsshared.Index, error) {
return indexes, nil
mockDBStore.InsertJobsFunc.SetDefaultHook(func(ctx context.Context, jobs []uploadsshared.AutoIndexJob) ([]uploadsshared.AutoIndexJob, error) {
return jobs, nil
})
mockDBStore.IsQueuedFunc.SetDefaultReturn(false, nil)
mockDBStore.RepositoryExceptionsFunc.SetDefaultReturn(true, true, nil)
@ -414,7 +415,7 @@ func TestQueueIndexesForPackage(t *testing.T) {
inferenceService := NewMockInferenceService()
inferenceService.InferIndexJobsFunc.SetDefaultHook(func(ctx context.Context, rn api.RepoName, s1, s2 string) (*shared.InferenceResult, error) {
return &shared.InferenceResult{
IndexJobs: []config.IndexJob{
IndexJobs: []config.AutoIndexJobSpec{
{
Root: "",
Steps: []config.DockerStep{
@ -446,7 +447,7 @@ func TestQueueIndexesForPackage(t *testing.T) {
gitserverClient,
)
_ = service.QueueIndexesForPackage(context.Background(), dependencies.MinimialVersionedPackageRepo{
_ = service.QueueAutoIndexJobsForPackage(context.Background(), dependencies.MinimialVersionedPackageRepo{
Scheme: "gomod",
Name: "https://github.com/sourcegraph/sourcegraph",
Version: "v3.26.0-4e7eeb0f8a96",
@ -466,15 +467,15 @@ func TestQueueIndexesForPackage(t *testing.T) {
}
}
if len(mockDBStore.InsertIndexesFunc.History()) != 1 {
t.Errorf("unexpected number of calls to InsertIndexes. want=%d have=%d", 1, len(mockDBStore.InsertIndexesFunc.History()))
if len(mockDBStore.InsertJobsFunc.History()) != 1 {
t.Errorf("unexpected number of calls to InsertJobs. want=%d have=%d", 1, len(mockDBStore.InsertJobsFunc.History()))
} else {
var indexes []uploadsshared.Index
for _, call := range mockDBStore.InsertIndexesFunc.History() {
indexes = append(indexes, call.Result0...)
var jobs []uploadsshared.AutoIndexJob
for _, call := range mockDBStore.InsertJobsFunc.History() {
jobs = append(jobs, call.Result0...)
}
expectedIndexes := []uploadsshared.Index{
expectedIndexes := []uploadsshared.AutoIndexJob{
{
RepositoryID: 42,
Commit: "c42",
@ -489,7 +490,7 @@ func TestQueueIndexesForPackage(t *testing.T) {
IndexerArgs: []string{"lsif-go", "--no-animation"},
},
}
if diff := cmp.Diff(expectedIndexes, indexes); diff != "" {
if diff := cmp.Diff(expectedIndexes, jobs, cmpopts.EquateEmpty()); diff != "" {
t.Errorf("unexpected indexes (-want +got):\n%s", diff)
}
}

View File

@ -10,6 +10,6 @@ type IndexConfiguration struct {
}
type InferenceResult struct {
IndexJobs []config.IndexJob
IndexJobs []config.AutoIndexJobSpec
InferenceOutput string
}

View File

@ -17,7 +17,7 @@ type AutoIndexingService interface {
UpdateIndexConfigurationByRepositoryID(ctx context.Context, repositoryID int, data []byte) error
// Inference
QueueIndexes(ctx context.Context, repositoryID int, rev, configuration string, force bool, bypassLimit bool) ([]uploadsshared.Index, error)
QueueAutoIndexJobs(ctx context.Context, repositoryID int, rev, configuration string, force bool, bypassLimit bool) ([]uploadsshared.AutoIndexJob, error)
InferIndexConfiguration(ctx context.Context, repositoryID int, commit string, localOverrideScript string, bypassLimit bool) (*shared.InferenceResult, error)
InferIndexJobsFromRepositoryStructure(ctx context.Context, repositoryID int, commit string, localOverrideScript string, bypassLimit bool) (*shared.InferenceResult, error)
}

View File

@ -14,7 +14,7 @@ type rootResolver struct {
autoindexSvc AutoIndexingService
siteAdminChecker sharedresolvers.SiteAdminChecker
uploadLoaderFactory graphql.UploadLoaderFactory
indexLoaderFactory graphql.IndexLoaderFactory
autoIndexJobLoaderFactory graphql.AutoIndexJobLoaderFactory
locationResolverFactory *gitresolvers.CachedLocationResolverFactory
preciseIndexResolverFactory *graphql.PreciseIndexResolverFactory
operations *operations
@ -25,7 +25,7 @@ func NewRootResolver(
autoindexSvc AutoIndexingService,
siteAdminChecker sharedresolvers.SiteAdminChecker,
uploadLoaderFactory graphql.UploadLoaderFactory,
indexLoaderFactory graphql.IndexLoaderFactory,
autoIndexJobLoaderFactory graphql.AutoIndexJobLoaderFactory,
locationResolverFactory *gitresolvers.CachedLocationResolverFactory,
preciseIndexResolverFactory *graphql.PreciseIndexResolverFactory,
) resolverstubs.AutoindexingServiceResolver {
@ -33,7 +33,7 @@ func NewRootResolver(
autoindexSvc: autoindexSvc,
siteAdminChecker: siteAdminChecker,
uploadLoaderFactory: uploadLoaderFactory,
indexLoaderFactory: indexLoaderFactory,
autoIndexJobLoaderFactory: autoIndexJobLoaderFactory,
locationResolverFactory: locationResolverFactory,
preciseIndexResolverFactory: preciseIndexResolverFactory,
operations: newOperations(observationCtx),

View File

@ -113,7 +113,7 @@ func (r *indexConfigurationResolver) InferredConfiguration(ctx context.Context)
return resolver, err
}
marshaled, err := config.MarshalJSON(config.IndexConfiguration{IndexJobs: result.IndexJobs})
marshaled, err := config.MarshalJSON(config.AutoIndexJobSpecList{JobSpecs: result.IndexJobs})
if err != nil {
return resolver, err
}

View File

@ -54,7 +54,7 @@ func (r *rootResolver) InferAutoIndexJobsForRepo(ctx context.Context, args *reso
return nil, err
}
jobResolvers, err := newDescriptionResolvers(r.siteAdminChecker, &config.IndexConfiguration{IndexJobs: result.IndexJobs})
jobResolvers, err := newDescriptionResolvers(r.siteAdminChecker, &config.AutoIndexJobSpecList{JobSpecs: result.IndexJobs})
if err != nil {
return nil, err
}
@ -96,13 +96,13 @@ func (r *rootResolver) QueueAutoIndexJobsForRepo(ctx context.Context, args *reso
configuration = *args.Configuration
}
indexes, err := r.autoindexSvc.QueueIndexes(ctx, int(repositoryID), rev, configuration, true, true)
indexes, err := r.autoindexSvc.QueueAutoIndexJobs(ctx, int(repositoryID), rev, configuration, true, true)
if err != nil {
return nil, err
}
// Create index loader with data we already have
indexLoader := r.indexLoaderFactory.CreateWithInitialData(indexes)
// Create job loader with data we already have
autoIndexJobLoader := r.autoIndexJobLoaderFactory.CreateWithInitialData(indexes)
// Pre-submit associated upload ids for subsequent loading
uploadLoader := r.uploadLoaderFactory.Create()
@ -114,7 +114,7 @@ func (r *rootResolver) QueueAutoIndexJobsForRepo(ctx context.Context, args *reso
resolvers := make([]resolverstubs.PreciseIndexResolver, 0, len(indexes))
for _, index := range indexes {
index := index
resolver, err := r.preciseIndexResolverFactory.Create(ctx, uploadLoader, indexLoader, locationResolver, traceErrs, nil, &index)
resolver, err := r.preciseIndexResolverFactory.Create(ctx, uploadLoader, autoIndexJobLoader, locationResolver, traceErrs, nil, &index)
if err != nil {
return nil, err
}
@ -146,13 +146,13 @@ func (r *inferAutoIndexJobsResultResolver) InferenceOutput() string {
type autoIndexJobDescriptionResolver struct {
siteAdminChecker sharedresolvers.SiteAdminChecker
indexJob config.IndexJob
indexJob config.AutoIndexJobSpec
steps []uploadsshared.DockerStep
}
func newDescriptionResolvers(siteAdminChecker sharedresolvers.SiteAdminChecker, indexConfiguration *config.IndexConfiguration) ([]resolverstubs.AutoIndexJobDescriptionResolver, error) {
func newDescriptionResolvers(siteAdminChecker sharedresolvers.SiteAdminChecker, indexConfiguration *config.AutoIndexJobSpecList) ([]resolverstubs.AutoIndexJobDescriptionResolver, error) {
var resolvers []resolverstubs.AutoIndexJobDescriptionResolver
for _, indexJob := range indexConfiguration.IndexJobs {
for _, indexJob := range indexConfiguration.JobSpecs {
var steps []uploadsshared.DockerStep
for _, step := range indexJob.Steps {
steps = append(steps, uploadsshared.DockerStep{
@ -184,8 +184,8 @@ func (r *autoIndexJobDescriptionResolver) ComparisonKey() string {
return comparisonKey(r.indexJob.Root, r.Indexer().Name())
}
func (r *autoIndexJobDescriptionResolver) Steps() resolverstubs.IndexStepsResolver {
return uploadsgraphql.NewIndexStepsResolver(r.siteAdminChecker, uploadsshared.Index{
func (r *autoIndexJobDescriptionResolver) Steps() resolverstubs.AutoIndexJobStepsResolver {
return uploadsgraphql.NewAutoIndexJobStepsResolver(r.siteAdminChecker, uploadsshared.AutoIndexJob{
DockerSteps: r.steps,
LocalSteps: r.indexJob.LocalSteps,
Root: r.indexJob.Root,

View File

@ -38,7 +38,7 @@ type rootResolver struct {
siteAdminChecker sharedresolvers.SiteAdminChecker
repoStore database.RepoStore
uploadLoaderFactory uploadsgraphql.UploadLoaderFactory
indexLoaderFactory uploadsgraphql.IndexLoaderFactory
autoIndexJobLoaderFactory uploadsgraphql.AutoIndexJobLoaderFactory
locationResolverFactory *gitresolvers.CachedLocationResolverFactory
hunkCache codenav.HunkCache
indexResolverFactory *uploadsgraphql.PreciseIndexResolverFactory
@ -54,7 +54,7 @@ func NewRootResolver(
siteAdminChecker sharedresolvers.SiteAdminChecker,
repoStore database.RepoStore,
uploadLoaderFactory uploadsgraphql.UploadLoaderFactory,
indexLoaderFactory uploadsgraphql.IndexLoaderFactory,
autoIndexJobLoaderFactory uploadsgraphql.AutoIndexJobLoaderFactory,
indexResolverFactory *uploadsgraphql.PreciseIndexResolverFactory,
locationResolverFactory *gitresolvers.CachedLocationResolverFactory,
maxIndexSearch int,
@ -72,7 +72,7 @@ func NewRootResolver(
siteAdminChecker: siteAdminChecker,
repoStore: repoStore,
uploadLoaderFactory: uploadLoaderFactory,
indexLoaderFactory: indexLoaderFactory,
autoIndexJobLoaderFactory: autoIndexJobLoaderFactory,
indexResolverFactory: indexResolverFactory,
locationResolverFactory: locationResolverFactory,
hunkCache: hunkCache,
@ -110,7 +110,7 @@ func (r *rootResolver) GitBlobLSIFData(ctx context.Context, args *resolverstubs.
r.indexResolverFactory,
reqState,
r.uploadLoaderFactory.Create(),
r.indexLoaderFactory.Create(),
r.autoIndexJobLoaderFactory.Create(),
r.locationResolverFactory.Create(),
r.operations,
), nil
@ -332,7 +332,7 @@ type gitBlobLSIFDataResolver struct {
indexResolverFactory *uploadsgraphql.PreciseIndexResolverFactory
requestState codenav.RequestState
uploadLoader uploadsgraphql.UploadLoader
indexLoader uploadsgraphql.IndexLoader
autoIndexJobLoader uploadsgraphql.AutoIndexJobLoader
locationResolver *gitresolvers.CachedLocationResolver
operations *operations
}
@ -345,14 +345,14 @@ func newGitBlobLSIFDataResolver(
indexResolverFactory *uploadsgraphql.PreciseIndexResolverFactory,
requestState codenav.RequestState,
uploadLoader uploadsgraphql.UploadLoader,
indexLoader uploadsgraphql.IndexLoader,
autoIndexJobLoader uploadsgraphql.AutoIndexJobLoader,
locationResolver *gitresolvers.CachedLocationResolver,
operations *operations,
) resolverstubs.GitBlobLSIFDataResolver {
return &gitBlobLSIFDataResolver{
codeNavSvc: codeNavSvc,
uploadLoader: uploadLoader,
indexLoader: indexLoader,
autoIndexJobLoader: autoIndexJobLoader,
indexResolverFactory: indexResolverFactory,
requestState: requestState,
locationResolver: locationResolver,
@ -383,7 +383,7 @@ func (r *gitBlobLSIFDataResolver) VisibleIndexes(ctx context.Context) (_ *[]reso
resolver, err := r.indexResolverFactory.Create(
ctx,
r.uploadLoader,
r.indexLoader,
r.autoIndexJobLoader,
r.locationResolver,
traceErrs,
&upload,

View File

@ -15,7 +15,7 @@ type ExternalServiceStore interface {
}
type AutoIndexingService interface {
QueueIndexesForPackage(ctx context.Context, pkg shared.MinimialVersionedPackageRepo) (err error)
QueueAutoIndexJobsForPackage(ctx context.Context, pkg shared.MinimialVersionedPackageRepo) (err error)
}
type DependenciesService interface {

View File

@ -20,9 +20,10 @@ import (
// github.com/sourcegraph/sourcegraph/internal/codeintel/dependencies/internal/background)
// used for unit testing.
type MockAutoIndexingService struct {
// QueueIndexesForPackageFunc is an instance of a mock function object
// controlling the behavior of the method QueueIndexesForPackage.
QueueIndexesForPackageFunc *AutoIndexingServiceQueueIndexesForPackageFunc
// QueueAutoIndexJobsForPackageFunc is an instance of a mock function
// object controlling the behavior of the method
// QueueAutoIndexJobsForPackage.
QueueAutoIndexJobsForPackageFunc *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc
}
// NewMockAutoIndexingService creates a new mock of the AutoIndexingService
@ -30,7 +31,7 @@ type MockAutoIndexingService struct {
// overwritten.
func NewMockAutoIndexingService() *MockAutoIndexingService {
return &MockAutoIndexingService{
QueueIndexesForPackageFunc: &AutoIndexingServiceQueueIndexesForPackageFunc{
QueueAutoIndexJobsForPackageFunc: &AutoIndexingServiceQueueAutoIndexJobsForPackageFunc{
defaultHook: func(context.Context, shared.MinimialVersionedPackageRepo) (r0 error) {
return
},
@ -43,9 +44,9 @@ func NewMockAutoIndexingService() *MockAutoIndexingService {
// overwritten.
func NewStrictMockAutoIndexingService() *MockAutoIndexingService {
return &MockAutoIndexingService{
QueueIndexesForPackageFunc: &AutoIndexingServiceQueueIndexesForPackageFunc{
QueueAutoIndexJobsForPackageFunc: &AutoIndexingServiceQueueAutoIndexJobsForPackageFunc{
defaultHook: func(context.Context, shared.MinimialVersionedPackageRepo) error {
panic("unexpected invocation of MockAutoIndexingService.QueueIndexesForPackage")
panic("unexpected invocation of MockAutoIndexingService.QueueAutoIndexJobsForPackage")
},
},
}
@ -56,43 +57,43 @@ func NewStrictMockAutoIndexingService() *MockAutoIndexingService {
// implementation, unless overwritten.
func NewMockAutoIndexingServiceFrom(i AutoIndexingService) *MockAutoIndexingService {
return &MockAutoIndexingService{
QueueIndexesForPackageFunc: &AutoIndexingServiceQueueIndexesForPackageFunc{
defaultHook: i.QueueIndexesForPackage,
QueueAutoIndexJobsForPackageFunc: &AutoIndexingServiceQueueAutoIndexJobsForPackageFunc{
defaultHook: i.QueueAutoIndexJobsForPackage,
},
}
}
// AutoIndexingServiceQueueIndexesForPackageFunc describes the behavior when
// the QueueIndexesForPackage method of the parent MockAutoIndexingService
// instance is invoked.
type AutoIndexingServiceQueueIndexesForPackageFunc struct {
// AutoIndexingServiceQueueAutoIndexJobsForPackageFunc describes the
// behavior when the QueueAutoIndexJobsForPackage method of the parent
// MockAutoIndexingService instance is invoked.
type AutoIndexingServiceQueueAutoIndexJobsForPackageFunc struct {
defaultHook func(context.Context, shared.MinimialVersionedPackageRepo) error
hooks []func(context.Context, shared.MinimialVersionedPackageRepo) error
history []AutoIndexingServiceQueueIndexesForPackageFuncCall
history []AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall
mutex sync.Mutex
}
// QueueIndexesForPackage delegates to the next hook function in the queue
// and stores the parameter and result values of this invocation.
func (m *MockAutoIndexingService) QueueIndexesForPackage(v0 context.Context, v1 shared.MinimialVersionedPackageRepo) error {
r0 := m.QueueIndexesForPackageFunc.nextHook()(v0, v1)
m.QueueIndexesForPackageFunc.appendCall(AutoIndexingServiceQueueIndexesForPackageFuncCall{v0, v1, r0})
// QueueAutoIndexJobsForPackage delegates to the next hook function in the
// queue and stores the parameter and result values of this invocation.
func (m *MockAutoIndexingService) QueueAutoIndexJobsForPackage(v0 context.Context, v1 shared.MinimialVersionedPackageRepo) error {
r0 := m.QueueAutoIndexJobsForPackageFunc.nextHook()(v0, v1)
m.QueueAutoIndexJobsForPackageFunc.appendCall(AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall{v0, v1, r0})
return r0
}
// SetDefaultHook sets function that is called when the
// QueueIndexesForPackage method of the parent MockAutoIndexingService
// QueueAutoIndexJobsForPackage method of the parent MockAutoIndexingService
// instance is invoked and the hook queue is empty.
func (f *AutoIndexingServiceQueueIndexesForPackageFunc) SetDefaultHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
func (f *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc) SetDefaultHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// QueueIndexesForPackage method of the parent MockAutoIndexingService
// QueueAutoIndexJobsForPackage method of the parent MockAutoIndexingService
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *AutoIndexingServiceQueueIndexesForPackageFunc) PushHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
func (f *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc) PushHook(hook func(context.Context, shared.MinimialVersionedPackageRepo) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -100,20 +101,20 @@ func (f *AutoIndexingServiceQueueIndexesForPackageFunc) PushHook(hook func(conte
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *AutoIndexingServiceQueueIndexesForPackageFunc) SetDefaultReturn(r0 error) {
func (f *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(context.Context, shared.MinimialVersionedPackageRepo) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *AutoIndexingServiceQueueIndexesForPackageFunc) PushReturn(r0 error) {
func (f *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc) PushReturn(r0 error) {
f.PushHook(func(context.Context, shared.MinimialVersionedPackageRepo) error {
return r0
})
}
func (f *AutoIndexingServiceQueueIndexesForPackageFunc) nextHook() func(context.Context, shared.MinimialVersionedPackageRepo) error {
func (f *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc) nextHook() func(context.Context, shared.MinimialVersionedPackageRepo) error {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -126,28 +127,28 @@ func (f *AutoIndexingServiceQueueIndexesForPackageFunc) nextHook() func(context.
return hook
}
func (f *AutoIndexingServiceQueueIndexesForPackageFunc) appendCall(r0 AutoIndexingServiceQueueIndexesForPackageFuncCall) {
func (f *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc) appendCall(r0 AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of
// AutoIndexingServiceQueueIndexesForPackageFuncCall objects describing the
// invocations of this function.
func (f *AutoIndexingServiceQueueIndexesForPackageFunc) History() []AutoIndexingServiceQueueIndexesForPackageFuncCall {
// AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall objects
// describing the invocations of this function.
func (f *AutoIndexingServiceQueueAutoIndexJobsForPackageFunc) History() []AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall {
f.mutex.Lock()
history := make([]AutoIndexingServiceQueueIndexesForPackageFuncCall, len(f.history))
history := make([]AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// AutoIndexingServiceQueueIndexesForPackageFuncCall is an object that
// describes an invocation of method QueueIndexesForPackage on an instance
// of MockAutoIndexingService.
type AutoIndexingServiceQueueIndexesForPackageFuncCall struct {
// AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall is an object that
// describes an invocation of method QueueAutoIndexJobsForPackage on an
// instance of MockAutoIndexingService.
type AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
@ -161,13 +162,13 @@ type AutoIndexingServiceQueueIndexesForPackageFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c AutoIndexingServiceQueueIndexesForPackageFuncCall) Args() []interface{} {
func (c AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c AutoIndexingServiceQueueIndexesForPackageFuncCall) Results() []interface{} {
func (c AutoIndexingServiceQueueAutoIndexJobsForPackageFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}

View File

@ -83,7 +83,7 @@ type PreciseIndexResolver interface {
ProcessingStartedAt() *gqlutil.DateTime
IndexingFinishedAt() *gqlutil.DateTime
ProcessingFinishedAt() *gqlutil.DateTime
Steps() IndexStepsResolver
Steps() AutoIndexJobStepsResolver
Failure() *string
PlaceInQueue() *int32
ShouldReindex(ctx context.Context) bool
@ -129,7 +129,7 @@ type AutoIndexJobDescriptionResolver interface {
Root() string
Indexer() CodeIntelIndexerResolver
ComparisonKey() string
Steps() IndexStepsResolver
Steps() AutoIndexJobStepsResolver
}
type CodeIntelIndexerResolver interface {
@ -139,7 +139,7 @@ type CodeIntelIndexerResolver interface {
ImageName() *string
}
type IndexStepsResolver interface {
type AutoIndexJobStepsResolver interface {
Setup() []ExecutionLogEntryResolver
PreIndex() []PreIndexStepResolver
Index() IndexStepResolver

View File

@ -17,7 +17,7 @@ import (
func TestSyntacticIndexingEnqueuer(t *testing.T) {
/*
The purpose of this test is to verify that methods InsertIndexes and IsQueued
The purpose of this test is to verify that methods InsertJobs and IsQueued
correctly interact with each other, and that the records inserted using those methods
are valid from the point of view of the DB worker interface
*/

View File

@ -8,6 +8,7 @@ import (
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database"
@ -113,7 +114,7 @@ func (s *syntacticIndexingJobStoreImpl) InsertIndexingJobs(ctx context.Context,
for _, id := range insertedJobIds {
jobLookupQueries = append(jobLookupQueries, sqlf.Sprintf("%d", id))
}
indexingJobs, err = scanIndexes(tx.Query(ctx, sqlf.Sprintf(getIndexesByIDsQuery, sqlf.Join(jobLookupQueries, ", "), authzConds)))
indexingJobs, err = scanJobs(tx.Query(ctx, sqlf.Sprintf(getIndexesByIDsQuery, sqlf.Join(jobLookupQueries, ", "), authzConds)))
return err
})
@ -173,7 +174,7 @@ WHERE u.id IN (%s) and %s
ORDER BY u.id
`
func scanIndex(s dbutil.Scanner) (index SyntacticIndexingJob, err error) {
func scanJob(s dbutil.Scanner) (index SyntacticIndexingJob, err error) {
if err := s.Scan(
&index.ID,
&index.Commit,
@ -196,4 +197,4 @@ func scanIndex(s dbutil.Scanner) (index SyntacticIndexingJob, err error) {
return index, nil
}
var scanIndexes = basestore.NewSliceScanner(scanIndex)
var scanJobs = basestore.NewSliceScanner(scanJob)

View File

@ -5,13 +5,14 @@ import (
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/syntactic_indexing/jobstore"
testutils "github.com/sourcegraph/sourcegraph/internal/codeintel/syntactic_indexing/testkit"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/stretchr/testify/require"
)
func TestSyntacticIndexingStoreDequeue(t *testing.T) {
@ -150,7 +151,7 @@ func TestSyntacticIndexingStoreEnqueue(t *testing.T) {
},
})
// Assertions below verify the interactions between InsertIndexes and IsQueued
// Assertions below verify the interactions between InsertJobs and IsQueued
tacosIsQueued, err := jobStore.IsQueued(ctx, tacosRepoId, tacosCommit)
require.NoError(t, err)
require.True(t, tacosIsQueued)
@ -163,7 +164,7 @@ func TestSyntacticIndexingStoreEnqueue(t *testing.T) {
require.NoError(t, err)
require.True(t, mangosIsQueued)
// Assertions below verify that records inserted by InsertIndexes are
// Assertions below verify that records inserted by InsertJobs are
// still visible by DB Worker interface
afterCount, _ := store.QueuedCount(ctx, true)

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -24,7 +24,7 @@ func NewUnknownRepositoryJanitor(
Interval: config.Interval,
Metrics: background.NewJanitorMetrics(observationCtx, name),
CleanupFunc: func(ctx context.Context) (numRecordsScanned, numRecordsAltered int, _ error) {
return store.DeleteIndexesWithoutRepository(ctx, time.Now())
return store.DeleteAutoIndexJobsWithoutRepository(ctx, time.Now())
},
})
}

File diff suppressed because it is too large Load Diff

View File

@ -337,10 +337,10 @@ delete_indexes AS (
SELECT COUNT(*) FROM delete_indexes
`
// DeleteIndexesWithoutRepository deletes indexes associated with repositories that were deleted at least
// DeleteAutoIndexJobsWithoutRepository deletes indexes associated with repositories that were deleted at least
// DeletedRepositoryGracePeriod ago. This returns the repository identifier mapped to the number of indexes
// that were removed for that repository.
func (s *store) DeleteIndexesWithoutRepository(ctx context.Context, now time.Time) (totalCount int, deletedCount int, err error) {
func (s *store) DeleteAutoIndexJobsWithoutRepository(ctx context.Context, now time.Time) (totalCount int, deletedCount int, err error) {
ctx, trace, endObservation := s.operations.deleteIndexesWithoutRepository.With(ctx, &err, observation.Args{})
defer endObservation(1, observation.Args{})

View File

@ -231,12 +231,12 @@ func TestProcessStaleSourcedCommits(t *testing.T) {
ctx := context.Background()
now := time.Unix(1587396557, 0).UTC()
insertIndexes(t, db,
uploadsshared.Index{ID: 1, RepositoryID: 50, Commit: makeCommit(1)},
uploadsshared.Index{ID: 2, RepositoryID: 50, Commit: makeCommit(2)},
uploadsshared.Index{ID: 3, RepositoryID: 50, Commit: makeCommit(3)},
uploadsshared.Index{ID: 4, RepositoryID: 51, Commit: makeCommit(6)},
uploadsshared.Index{ID: 5, RepositoryID: 52, Commit: makeCommit(7)},
insertAutoIndexJobs(t, db,
uploadsshared.AutoIndexJob{ID: 1, RepositoryID: 50, Commit: makeCommit(1)},
uploadsshared.AutoIndexJob{ID: 2, RepositoryID: 50, Commit: makeCommit(2)},
uploadsshared.AutoIndexJob{ID: 3, RepositoryID: 50, Commit: makeCommit(3)},
uploadsshared.AutoIndexJob{ID: 4, RepositoryID: 51, Commit: makeCommit(6)},
uploadsshared.AutoIndexJob{ID: 5, RepositoryID: 52, Commit: makeCommit(7)},
)
const (
@ -526,18 +526,18 @@ func TestDeleteSourcedCommits(t *testing.T) {
}
}
func TestDeleteIndexesWithoutRepository(t *testing.T) {
func TestDeleteAutoIndexJobsWithoutRepository(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
var indexes []uploadsshared.Index
var jobs []uploadsshared.AutoIndexJob
for i := range 25 {
for range 10 + i {
indexes = append(indexes, uploadsshared.Index{ID: len(indexes) + 1, RepositoryID: 50 + i})
jobs = append(jobs, uploadsshared.AutoIndexJob{ID: len(jobs) + 1, RepositoryID: 50 + i})
}
}
insertIndexes(t, db, indexes...)
insertAutoIndexJobs(t, db, jobs...)
t1 := time.Unix(1587396557, 0).UTC()
t2 := t1.Add(-deletedRepositoryGracePeriod + time.Minute)
@ -556,7 +556,7 @@ func TestDeleteIndexesWithoutRepository(t *testing.T) {
}
}
_, count, err := store.DeleteIndexesWithoutRepository(context.Background(), t1)
_, count, err := store.DeleteAutoIndexJobsWithoutRepository(context.Background(), t1)
if err != nil {
t.Fatalf("unexpected error deleting indexes: %s", err)
}
@ -573,26 +573,26 @@ func TestExpireFailedRecords(t *testing.T) {
ctx := context.Background()
now := time.Unix(1587396557, 0).UTC()
insertIndexes(t, db,
insertAutoIndexJobs(t, db,
// young failures (none removed)
uploadsshared.Index{ID: 1, RepositoryID: 50, Commit: makeCommit(1), FinishedAt: pointers.Ptr(now.Add(-time.Minute * 10)), State: "failed"},
uploadsshared.Index{ID: 2, RepositoryID: 50, Commit: makeCommit(2), FinishedAt: pointers.Ptr(now.Add(-time.Minute * 20)), State: "failed"},
uploadsshared.Index{ID: 3, RepositoryID: 50, Commit: makeCommit(3), FinishedAt: pointers.Ptr(now.Add(-time.Minute * 20)), State: "failed"},
uploadsshared.AutoIndexJob{ID: 1, RepositoryID: 50, Commit: makeCommit(1), FinishedAt: pointers.Ptr(now.Add(-time.Minute * 10)), State: "failed"},
uploadsshared.AutoIndexJob{ID: 2, RepositoryID: 50, Commit: makeCommit(2), FinishedAt: pointers.Ptr(now.Add(-time.Minute * 20)), State: "failed"},
uploadsshared.AutoIndexJob{ID: 3, RepositoryID: 50, Commit: makeCommit(3), FinishedAt: pointers.Ptr(now.Add(-time.Minute * 20)), State: "failed"},
// failures prior to a success (both removed)
uploadsshared.Index{ID: 4, RepositoryID: 50, Commit: makeCommit(4), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 10)), Root: "foo", State: "completed"},
uploadsshared.Index{ID: 5, RepositoryID: 50, Commit: makeCommit(5), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 12)), Root: "foo", State: "failed"},
uploadsshared.Index{ID: 6, RepositoryID: 50, Commit: makeCommit(6), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 14)), Root: "foo", State: "failed"},
uploadsshared.AutoIndexJob{ID: 4, RepositoryID: 50, Commit: makeCommit(4), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 10)), Root: "foo", State: "completed"},
uploadsshared.AutoIndexJob{ID: 5, RepositoryID: 50, Commit: makeCommit(5), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 12)), Root: "foo", State: "failed"},
uploadsshared.AutoIndexJob{ID: 6, RepositoryID: 50, Commit: makeCommit(6), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 14)), Root: "foo", State: "failed"},
// old failures (one is left for debugging)
uploadsshared.Index{ID: 7, RepositoryID: 51, Commit: makeCommit(7), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 3)), State: "failed"},
uploadsshared.Index{ID: 8, RepositoryID: 51, Commit: makeCommit(8), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 4)), State: "failed"},
uploadsshared.Index{ID: 9, RepositoryID: 51, Commit: makeCommit(9), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 5)), State: "failed"},
uploadsshared.AutoIndexJob{ID: 7, RepositoryID: 51, Commit: makeCommit(7), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 3)), State: "failed"},
uploadsshared.AutoIndexJob{ID: 8, RepositoryID: 51, Commit: makeCommit(8), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 4)), State: "failed"},
uploadsshared.AutoIndexJob{ID: 9, RepositoryID: 51, Commit: makeCommit(9), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 5)), State: "failed"},
// failures prior to queued uploads (one removed; queued does not reset failures)
uploadsshared.Index{ID: 10, RepositoryID: 52, Commit: makeCommit(10), Root: "foo", State: "queued"},
uploadsshared.Index{ID: 11, RepositoryID: 52, Commit: makeCommit(11), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 12)), Root: "foo", State: "failed"},
uploadsshared.Index{ID: 12, RepositoryID: 52, Commit: makeCommit(12), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 14)), Root: "foo", State: "failed"},
uploadsshared.AutoIndexJob{ID: 10, RepositoryID: 52, Commit: makeCommit(10), Root: "foo", State: "queued"},
uploadsshared.AutoIndexJob{ID: 11, RepositoryID: 52, Commit: makeCommit(11), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 12)), Root: "foo", State: "failed"},
uploadsshared.AutoIndexJob{ID: 12, RepositoryID: 52, Commit: makeCommit(12), FinishedAt: pointers.Ptr(now.Add(-time.Hour * 14)), Root: "foo", State: "failed"},
)
if _, _, err := store.ExpireFailedRecords(ctx, 100, time.Hour, now); err != nil {

View File

@ -16,9 +16,9 @@ import (
"github.com/sourcegraph/sourcegraph/internal/observation"
)
// GetIndexes returns a list of indexes and the total count of records matching the given conditions.
func (s *store) GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) (_ []shared.Index, _ int, err error) {
ctx, trace, endObservation := s.operations.getIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
// GetAutoIndexJobs returns a list of indexes and the total count of records matching the given conditions.
func (s *store) GetAutoIndexJobs(ctx context.Context, opts shared.GetAutoIndexJobsOptions) (_ []shared.AutoIndexJob, _ int, err error) {
ctx, trace, endObservation := s.operations.getAutoIndexJobs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.String("state", opts.State),
attribute.String("term", opts.Term),
@ -53,7 +53,7 @@ func (s *store) GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) (
conds = append(conds, sqlf.Sprintf("(%s)", sqlf.Join(indexerConds, " OR ")))
}
var a []shared.Index
var a []shared.AutoIndexJob
var b int
err = s.withTransaction(ctx, func(tx *store) error {
authzConds, err := database.AuthzQueryConds(ctx, database.NewDBWith(s.logger, tx.db))
@ -62,7 +62,7 @@ func (s *store) GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) (
}
conds = append(conds, authzConds)
indexes, err := scanIndexes(tx.db.Query(ctx, sqlf.Sprintf(
indexes, err := scanJobs(tx.db.Query(ctx, sqlf.Sprintf(
getIndexesSelectQuery,
sqlf.Join(conds, " AND "),
opts.Limit,
@ -71,7 +71,7 @@ func (s *store) GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) (
if err != nil {
return err
}
trace.AddEvent("scanIndexesWithCount",
trace.AddEvent("scanJobsWithCount",
attribute.Int("numIndexes", len(indexes)))
totalCount, _, err := basestore.ScanFirstInt(tx.db.Query(ctx, sqlf.Sprintf(
@ -81,7 +81,7 @@ func (s *store) GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) (
if err != nil {
return err
}
trace.AddEvent("scanIndexesWithCount",
trace.AddEvent("scanJobsWithCount",
attribute.Int("totalCount", totalCount),
)
@ -141,13 +141,13 @@ WHERE
%s
`
// scanIndexes scans a slice of indexes from the return value of `*Store.query`.
var scanIndexes = basestore.NewSliceScanner(scanIndex)
// scanJobs scans a slice of indexes from the return value of `*Store.query`.
var scanJobs = basestore.NewSliceScanner(scanJob)
// scanFirstIndex scans a slice of indexes from the return value of `*Store.query` and returns the first.
var scanFirstIndex = basestore.NewFirstScanner(scanIndex)
var scanFirstIndex = basestore.NewFirstScanner(scanJob)
func scanIndex(s dbutil.Scanner) (index shared.Index, err error) {
func scanJob(s dbutil.Scanner) (index shared.AutoIndexJob, err error) {
var executionLogs []executor.ExecutionLogEntry
if err := s.Scan(
&index.ID,
@ -183,16 +183,16 @@ func scanIndex(s dbutil.Scanner) (index shared.Index, err error) {
return index, nil
}
// GetIndexByID returns an index by its identifier and boolean flag indicating its existence.
func (s *store) GetIndexByID(ctx context.Context, id int) (_ shared.Index, _ bool, err error) {
ctx, _, endObservation := s.operations.getIndexByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
// GetAutoIndexJobByID returns an index by its identifier and boolean flag indicating its existence.
func (s *store) GetAutoIndexJobByID(ctx context.Context, id int) (_ shared.AutoIndexJob, _ bool, err error) {
ctx, _, endObservation := s.operations.getAutoIndexJobByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
authzConds, err := database.AuthzQueryConds(ctx, database.NewDBWith(s.logger, s.db))
if err != nil {
return shared.Index{}, false, err
return shared.AutoIndexJob{}, false, err
}
return scanFirstIndex(s.db.Query(ctx, sqlf.Sprintf(getIndexByIDQuery, id, authzConds)))
@ -231,10 +231,10 @@ JOIN repo ON repo.id = u.repository_id
WHERE repo.deleted_at IS NULL AND u.id = %s AND %s
`
// GetIndexesByIDs returns an index for each of the given identifiers. Not all given ids will necessarily
// GetAutoIndexJobsByIDs returns an index for each of the given identifiers. Not all given ids will necessarily
// have a corresponding element in the returned list.
func (s *store) GetIndexesByIDs(ctx context.Context, ids ...int) (_ []shared.Index, err error) {
ctx, _, endObservation := s.operations.getIndexesByIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
func (s *store) GetAutoIndexJobsByIDs(ctx context.Context, ids ...int) (_ []shared.AutoIndexJob, err error) {
ctx, _, endObservation := s.operations.getAutoIndexJobsByIDs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.IntSlice("ids", ids),
}})
defer endObservation(1, observation.Args{})
@ -253,7 +253,7 @@ func (s *store) GetIndexesByIDs(ctx context.Context, ids ...int) (_ []shared.Ind
queries = append(queries, sqlf.Sprintf("%d", id))
}
return scanIndexes(s.db.Query(ctx, sqlf.Sprintf(getIndexesByIDsQuery, sqlf.Join(queries, ", "), authzConds)))
return scanJobs(s.db.Query(ctx, sqlf.Sprintf(getIndexesByIDsQuery, sqlf.Join(queries, ", "), authzConds)))
}
const getIndexesByIDsQuery = `
@ -290,9 +290,9 @@ WHERE repo.deleted_at IS NULL AND u.id IN (%s) AND %s
ORDER BY u.id
`
// DeleteIndexByID deletes an index by its identifier.
func (s *store) DeleteIndexByID(ctx context.Context, id int) (_ bool, err error) {
ctx, _, endObservation := s.operations.deleteIndexByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
// DeleteAutoIndexJobByID deletes an index by its identifier.
func (s *store) DeleteAutoIndexJobByID(ctx context.Context, id int) (_ bool, err error) {
ctx, _, endObservation := s.operations.deleteAutoIndexJobByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
@ -305,9 +305,9 @@ const deleteIndexByIDQuery = `
DELETE FROM lsif_indexes WHERE id = %s RETURNING repository_id
`
// DeleteIndexes deletes indexes matching the given filter criteria.
func (s *store) DeleteIndexes(ctx context.Context, opts shared.DeleteIndexesOptions) (err error) {
ctx, _, endObservation := s.operations.deleteIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
// DeleteAutoIndexJobs deletes indexes matching the given filter criteria.
func (s *store) DeleteAutoIndexJobs(ctx context.Context, opts shared.DeleteAutoIndexJobsOptions) (err error) {
ctx, _, endObservation := s.operations.deleteAutoIndexJobs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.StringSlice("states", opts.States),
attribute.String("term", opts.Term),
@ -357,25 +357,25 @@ USING repo
WHERE u.repository_id = repo.id AND %s
`
// ReindexIndexByID reindexes an index by its identifier.
func (s *store) ReindexIndexByID(ctx context.Context, id int) (err error) {
ctx, _, endObservation := s.operations.reindexIndexByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
// SetRerunAutoIndexJobByID reindexes an index by its identifier.
func (s *store) SetRerunAutoIndexJobByID(ctx context.Context, id int) (err error) {
ctx, _, endObservation := s.operations.setRerunAutoIndexJobByID.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("id", id),
}})
defer endObservation(1, observation.Args{})
return s.db.Exec(ctx, sqlf.Sprintf(reindexIndexByIDQuery, id))
return s.db.Exec(ctx, sqlf.Sprintf(setRerunAutoIndexJobByIDQuery, id))
}
const reindexIndexByIDQuery = `
const setRerunAutoIndexJobByIDQuery = `
UPDATE lsif_indexes u
SET should_reindex = true
WHERE id = %s
`
// ReindexIndexes reindexes indexes matching the given filter criteria.
func (s *store) ReindexIndexes(ctx context.Context, opts shared.ReindexIndexesOptions) (err error) {
ctx, _, endObservation := s.operations.reindexIndexes.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
// SetRerunAutoIndexJobs reindexes indexes matching the given filter criteria.
func (s *store) SetRerunAutoIndexJobs(ctx context.Context, opts shared.SetRerunAutoIndexJobsOptions) (err error) {
ctx, _, endObservation := s.operations.setRerunAutoIndexJobs.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", opts.RepositoryID),
attribute.StringSlice("states", opts.States),
attribute.String("term", opts.Term),
@ -415,11 +415,11 @@ func (s *store) ReindexIndexes(ctx context.Context, opts shared.ReindexIndexesOp
unset, _ := tx.db.SetLocal(ctx, "codeintel.lsif_indexes_audit.reason", "direct reindex by filter criteria request")
defer unset(ctx)
return tx.db.Exec(ctx, sqlf.Sprintf(reindexIndexesQuery, sqlf.Join(conds, " AND ")))
return tx.db.Exec(ctx, sqlf.Sprintf(setRerunAutoIndexJobsByIDsQuery, sqlf.Join(conds, " AND ")))
})
}
const reindexIndexesQuery = `
const setRerunAutoIndexJobsByIDsQuery = `
WITH candidates AS (
SELECT u.id
FROM lsif_indexes u

View File

@ -42,17 +42,17 @@ func TestGetIndexes(t *testing.T) {
indexID1, indexID2, indexID3, indexID4 := 1, 3, 5, 5 // note the duplication
uploadID1, uploadID2, uploadID3, uploadID4 := 10, 11, 12, 13
insertIndexes(t, db,
uploadsshared.Index{ID: 1, Commit: makeCommit(3331), QueuedAt: t1, State: "queued", AssociatedUploadID: &uploadID1},
uploadsshared.Index{ID: 2, QueuedAt: t2, State: "errored", FailureMessage: &failureMessage},
uploadsshared.Index{ID: 3, Commit: makeCommit(3333), QueuedAt: t3, State: "queued", AssociatedUploadID: &uploadID1},
uploadsshared.Index{ID: 4, QueuedAt: t4, State: "queued", RepositoryID: 51, RepositoryName: "foo bar x"},
uploadsshared.Index{ID: 5, Commit: makeCommit(3333), QueuedAt: t5, State: "processing", AssociatedUploadID: &uploadID1},
uploadsshared.Index{ID: 6, QueuedAt: t6, State: "processing", RepositoryID: 52, RepositoryName: "foo bar y"},
uploadsshared.Index{ID: 7, QueuedAt: t7, Indexer: "lsif-typescript"},
uploadsshared.Index{ID: 8, QueuedAt: t8, Indexer: "scip-ocaml"},
uploadsshared.Index{ID: 9, QueuedAt: t9, State: "queued"},
uploadsshared.Index{ID: 10, QueuedAt: t10},
insertAutoIndexJobs(t, db,
uploadsshared.AutoIndexJob{ID: 1, Commit: makeCommit(3331), QueuedAt: t1, State: "queued", AssociatedUploadID: &uploadID1},
uploadsshared.AutoIndexJob{ID: 2, QueuedAt: t2, State: "errored", FailureMessage: &failureMessage},
uploadsshared.AutoIndexJob{ID: 3, Commit: makeCommit(3333), QueuedAt: t3, State: "queued", AssociatedUploadID: &uploadID1},
uploadsshared.AutoIndexJob{ID: 4, QueuedAt: t4, State: "queued", RepositoryID: 51, RepositoryName: "foo bar x"},
uploadsshared.AutoIndexJob{ID: 5, Commit: makeCommit(3333), QueuedAt: t5, State: "processing", AssociatedUploadID: &uploadID1},
uploadsshared.AutoIndexJob{ID: 6, QueuedAt: t6, State: "processing", RepositoryID: 52, RepositoryName: "foo bar y"},
uploadsshared.AutoIndexJob{ID: 7, QueuedAt: t7, Indexer: "lsif-typescript"},
uploadsshared.AutoIndexJob{ID: 8, QueuedAt: t8, Indexer: "scip-ocaml"},
uploadsshared.AutoIndexJob{ID: 9, QueuedAt: t9, State: "queued"},
uploadsshared.AutoIndexJob{ID: 10, QueuedAt: t10},
)
insertUploads(t, db,
shared.Upload{ID: uploadID1, AssociatedIndexID: &indexID1},
@ -102,7 +102,7 @@ func TestGetIndexes(t *testing.T) {
)
t.Run(name, func(t *testing.T) {
indexes, totalCount, err := store.GetIndexes(ctx, shared.GetIndexesOptions{
indexes, totalCount, err := store.GetAutoIndexJobs(ctx, shared.GetAutoIndexJobsOptions{
RepositoryID: testCase.repositoryID,
State: testCase.state,
States: testCase.states,
@ -145,8 +145,8 @@ func TestGetIndexes(t *testing.T) {
})
t.Cleanup(func() { conf.Mock(nil) })
indexes, totalCount, err := store.GetIndexes(ctx,
shared.GetIndexesOptions{
indexes, totalCount, err := store.GetAutoIndexJobs(ctx,
shared.GetAutoIndexJobsOptions{
Limit: 1,
},
)
@ -159,14 +159,14 @@ func TestGetIndexes(t *testing.T) {
})
}
func TestGetIndexByID(t *testing.T) {
func TestGetAutoIndexJobByID(t *testing.T) {
ctx := context.Background()
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
// Index does not exist initially
if _, exists, err := store.GetIndexByID(ctx, 1); err != nil {
// AutoIndexJob does not exist initially
if _, exists, err := store.GetAutoIndexJobByID(ctx, 1); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if exists {
t.Fatal("unexpected record")
@ -175,7 +175,7 @@ func TestGetIndexByID(t *testing.T) {
uploadID := 5
queuedAt := time.Unix(1587396557, 0).UTC()
startedAt := queuedAt.Add(time.Minute)
expected := uploadsshared.Index{
expected := uploadsshared.AutoIndexJob{
ID: 1,
Commit: makeCommit(1),
QueuedAt: queuedAt,
@ -204,10 +204,10 @@ func TestGetIndexByID(t *testing.T) {
AssociatedUploadID: &uploadID,
}
insertIndexes(t, db, expected)
insertAutoIndexJobs(t, db, expected)
insertUploads(t, db, shared.Upload{ID: uploadID, AssociatedIndexID: &expected.ID})
if index, exists, err := store.GetIndexByID(ctx, 1); err != nil {
if index, exists, err := store.GetAutoIndexJobByID(ctx, 1); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("expected record to exist")
@ -229,7 +229,7 @@ func TestGetIndexByID(t *testing.T) {
})
t.Cleanup(func() { conf.Mock(nil) })
_, exists, err := store.GetIndexByID(ctx, 1)
_, exists, err := store.GetAutoIndexJobByID(ctx, 1)
if err != nil {
t.Fatal(err)
}
@ -252,44 +252,44 @@ func TestGetQueuedIndexRank(t *testing.T) {
t6 := t1.Add(+time.Minute * 2)
t7 := t1.Add(+time.Minute * 5)
insertIndexes(t, db,
uploadsshared.Index{ID: 1, QueuedAt: t1, State: "queued"},
uploadsshared.Index{ID: 2, QueuedAt: t2, State: "queued"},
uploadsshared.Index{ID: 3, QueuedAt: t3, State: "queued"},
uploadsshared.Index{ID: 4, QueuedAt: t4, State: "queued"},
uploadsshared.Index{ID: 5, QueuedAt: t5, State: "queued"},
uploadsshared.Index{ID: 6, QueuedAt: t6, State: "processing"},
uploadsshared.Index{ID: 7, QueuedAt: t1, State: "queued", ProcessAfter: &t7},
insertAutoIndexJobs(t, db,
uploadsshared.AutoIndexJob{ID: 1, QueuedAt: t1, State: "queued"},
uploadsshared.AutoIndexJob{ID: 2, QueuedAt: t2, State: "queued"},
uploadsshared.AutoIndexJob{ID: 3, QueuedAt: t3, State: "queued"},
uploadsshared.AutoIndexJob{ID: 4, QueuedAt: t4, State: "queued"},
uploadsshared.AutoIndexJob{ID: 5, QueuedAt: t5, State: "queued"},
uploadsshared.AutoIndexJob{ID: 6, QueuedAt: t6, State: "processing"},
uploadsshared.AutoIndexJob{ID: 7, QueuedAt: t1, State: "queued", ProcessAfter: &t7},
)
if index, _, _ := store.GetIndexByID(context.Background(), 1); index.Rank == nil || *index.Rank != 1 {
if index, _, _ := store.GetAutoIndexJobByID(context.Background(), 1); index.Rank == nil || *index.Rank != 1 {
t.Errorf("unexpected rank. want=%d have=%s", 1, printableRank{index.Rank})
}
if index, _, _ := store.GetIndexByID(context.Background(), 2); index.Rank == nil || *index.Rank != 6 {
if index, _, _ := store.GetAutoIndexJobByID(context.Background(), 2); index.Rank == nil || *index.Rank != 6 {
t.Errorf("unexpected rank. want=%d have=%s", 5, printableRank{index.Rank})
}
if index, _, _ := store.GetIndexByID(context.Background(), 3); index.Rank == nil || *index.Rank != 3 {
if index, _, _ := store.GetAutoIndexJobByID(context.Background(), 3); index.Rank == nil || *index.Rank != 3 {
t.Errorf("unexpected rank. want=%d have=%s", 3, printableRank{index.Rank})
}
if index, _, _ := store.GetIndexByID(context.Background(), 4); index.Rank == nil || *index.Rank != 2 {
if index, _, _ := store.GetAutoIndexJobByID(context.Background(), 4); index.Rank == nil || *index.Rank != 2 {
t.Errorf("unexpected rank. want=%d have=%s", 2, printableRank{index.Rank})
}
if index, _, _ := store.GetIndexByID(context.Background(), 5); index.Rank == nil || *index.Rank != 4 {
if index, _, _ := store.GetAutoIndexJobByID(context.Background(), 5); index.Rank == nil || *index.Rank != 4 {
t.Errorf("unexpected rank. want=%d have=%s", 4, printableRank{index.Rank})
}
// Only considers queued indexes to determine rank
if index, _, _ := store.GetIndexByID(context.Background(), 6); index.Rank != nil {
if index, _, _ := store.GetAutoIndexJobByID(context.Background(), 6); index.Rank != nil {
t.Errorf("unexpected rank. want=%s have=%s", "nil", printableRank{index.Rank})
}
// Process after takes priority over upload time
if upload, _, _ := store.GetIndexByID(context.Background(), 7); upload.Rank == nil || *upload.Rank != 5 {
if upload, _, _ := store.GetAutoIndexJobByID(context.Background(), 7); upload.Rank == nil || *upload.Rank != 5 {
t.Errorf("unexpected rank. want=%d have=%s", 4, printableRank{upload.Rank})
}
}
func TestGetIndexesByIDs(t *testing.T) {
func TestGetAutoIndexJobsByIDs(t *testing.T) {
ctx := context.Background()
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
@ -298,17 +298,17 @@ func TestGetIndexesByIDs(t *testing.T) {
indexID1, indexID2, indexID3, indexID4 := 1, 3, 5, 5 // note the duplication
uploadID1, uploadID2, uploadID3, uploadID4 := 10, 11, 12, 13
insertIndexes(t, db,
uploadsshared.Index{ID: 1, AssociatedUploadID: &uploadID1},
uploadsshared.Index{ID: 2},
uploadsshared.Index{ID: 3, AssociatedUploadID: &uploadID1},
uploadsshared.Index{ID: 4},
uploadsshared.Index{ID: 5, AssociatedUploadID: &uploadID1},
uploadsshared.Index{ID: 6},
uploadsshared.Index{ID: 7},
uploadsshared.Index{ID: 8},
uploadsshared.Index{ID: 9},
uploadsshared.Index{ID: 10},
insertAutoIndexJobs(t, db,
uploadsshared.AutoIndexJob{ID: 1, AssociatedUploadID: &uploadID1},
uploadsshared.AutoIndexJob{ID: 2},
uploadsshared.AutoIndexJob{ID: 3, AssociatedUploadID: &uploadID1},
uploadsshared.AutoIndexJob{ID: 4},
uploadsshared.AutoIndexJob{ID: 5, AssociatedUploadID: &uploadID1},
uploadsshared.AutoIndexJob{ID: 6},
uploadsshared.AutoIndexJob{ID: 7},
uploadsshared.AutoIndexJob{ID: 8},
uploadsshared.AutoIndexJob{ID: 9},
uploadsshared.AutoIndexJob{ID: 10},
)
insertUploads(t, db,
shared.Upload{ID: uploadID1, AssociatedIndexID: &indexID1},
@ -318,7 +318,7 @@ func TestGetIndexesByIDs(t *testing.T) {
)
t.Run("fetch", func(t *testing.T) {
indexes, err := store.GetIndexesByIDs(ctx, 2, 4, 6, 8, 12)
indexes, err := store.GetAutoIndexJobsByIDs(ctx, 2, 4, 6, 8, 12)
if err != nil {
t.Fatalf("unexpected error getting indexes for repo: %s", err)
}
@ -348,7 +348,7 @@ func TestGetIndexesByIDs(t *testing.T) {
})
t.Cleanup(func() { conf.Mock(nil) })
indexes, err := store.GetIndexesByIDs(ctx, 1, 2, 3, 4)
indexes, err := store.GetAutoIndexJobsByIDs(ctx, 1, 2, 3, 4)
if err != nil {
t.Fatal(err)
}
@ -358,36 +358,36 @@ func TestGetIndexesByIDs(t *testing.T) {
})
}
func TestDeleteIndexByID(t *testing.T) {
func TestDeleteAutoIndexJobByID(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
insertIndexes(t, db, uploadsshared.Index{ID: 1})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1})
if found, err := store.DeleteIndexByID(context.Background(), 1); err != nil {
if found, err := store.DeleteAutoIndexJobByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error deleting index: %s", err)
} else if !found {
t.Fatalf("expected record to exist")
}
// Index no longer exists
if _, exists, err := store.GetIndexByID(context.Background(), 1); err != nil {
// AutoIndexJob no longer exists
if _, exists, err := store.GetAutoIndexJobByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
}
func TestDeleteIndexes(t *testing.T) {
func TestDeleteAutoIndexJobs(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
insertIndexes(t, db, uploadsshared.Index{ID: 1, State: "completed"})
insertIndexes(t, db, uploadsshared.Index{ID: 2, State: "errored"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1, State: "completed"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 2, State: "errored"})
if err := store.DeleteIndexes(context.Background(), shared.DeleteIndexesOptions{
if err := store.DeleteAutoIndexJobs(context.Background(), shared.DeleteAutoIndexJobsOptions{
States: []string{"errored"},
Term: "",
RepositoryID: 0,
@ -395,25 +395,25 @@ func TestDeleteIndexes(t *testing.T) {
t.Fatalf("unexpected error deleting indexes: %s", err)
}
// Index no longer exists
if _, exists, err := store.GetIndexByID(context.Background(), 2); err != nil {
// AutoIndexJob no longer exists
if _, exists, err := store.GetAutoIndexJobByID(context.Background(), 2); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if exists {
t.Fatal("unexpected record")
}
}
func TestDeleteIndexesWithIndexerKey(t *testing.T) {
func TestDeleteAutoIndexJobsWithIndexerKey(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
insertIndexes(t, db, uploadsshared.Index{ID: 1, Indexer: "sourcegraph/scip-go@sha256:123456"})
insertIndexes(t, db, uploadsshared.Index{ID: 2, Indexer: "sourcegraph/scip-go"})
insertIndexes(t, db, uploadsshared.Index{ID: 3, Indexer: "sourcegraph/scip-typescript"})
insertIndexes(t, db, uploadsshared.Index{ID: 4, Indexer: "sourcegraph/scip-typescript"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1, Indexer: "sourcegraph/scip-go@sha256:123456"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 2, Indexer: "sourcegraph/scip-go"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 3, Indexer: "sourcegraph/scip-typescript"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 4, Indexer: "sourcegraph/scip-typescript"})
if err := store.DeleteIndexes(context.Background(), shared.DeleteIndexesOptions{
if err := store.DeleteAutoIndexJobs(context.Background(), shared.DeleteAutoIndexJobsOptions{
IndexerNames: []string{"scip-go"},
}); err != nil {
t.Fatalf("unexpected error deleting indexes: %s", err)
@ -421,7 +421,7 @@ func TestDeleteIndexesWithIndexerKey(t *testing.T) {
// Target indexes no longer exist
for _, id := range []int{1, 2} {
if _, exists, err := store.GetIndexByID(context.Background(), id); err != nil {
if _, exists, err := store.GetAutoIndexJobByID(context.Background(), id); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if exists {
t.Fatal("unexpected record")
@ -430,7 +430,7 @@ func TestDeleteIndexesWithIndexerKey(t *testing.T) {
// Unmatched indexes remain
for _, id := range []int{3, 4} {
if _, exists, err := store.GetIndexByID(context.Background(), id); err != nil {
if _, exists, err := store.GetAutoIndexJobByID(context.Background(), id); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("expected record, got none")
@ -438,20 +438,20 @@ func TestDeleteIndexesWithIndexerKey(t *testing.T) {
}
}
func TestReindexIndexByID(t *testing.T) {
func TestSetRerunAutoIndexJobByID(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
insertIndexes(t, db, uploadsshared.Index{ID: 1, State: "completed"})
insertIndexes(t, db, uploadsshared.Index{ID: 2, State: "errored"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1, State: "completed"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 2, State: "errored"})
if err := store.ReindexIndexByID(context.Background(), 2); err != nil {
if err := store.SetRerunAutoIndexJobByID(context.Background(), 2); err != nil {
t.Fatalf("unexpected error deleting indexes: %s", err)
}
// Index has been marked for reindexing
if index, exists, err := store.GetIndexByID(context.Background(), 2); err != nil {
// AutoIndexJob has been marked for reindexing
if index, exists, err := store.GetAutoIndexJobByID(context.Background(), 2); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("index missing")
@ -460,15 +460,15 @@ func TestReindexIndexByID(t *testing.T) {
}
}
func TestReindexIndexes(t *testing.T) {
func TestSetRerunAutoIndexJobs(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
insertIndexes(t, db, uploadsshared.Index{ID: 1, State: "completed"})
insertIndexes(t, db, uploadsshared.Index{ID: 2, State: "errored"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1, State: "completed"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 2, State: "errored"})
if err := store.ReindexIndexes(context.Background(), shared.ReindexIndexesOptions{
if err := store.SetRerunAutoIndexJobs(context.Background(), shared.SetRerunAutoIndexJobsOptions{
States: []string{"errored"},
Term: "",
RepositoryID: 0,
@ -476,8 +476,8 @@ func TestReindexIndexes(t *testing.T) {
t.Fatalf("unexpected error deleting indexes: %s", err)
}
// Index has been marked for reindexing
if index, exists, err := store.GetIndexByID(context.Background(), 2); err != nil {
// AutoIndexJob has been marked for reindexing
if index, exists, err := store.GetAutoIndexJobByID(context.Background(), 2); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("index missing")
@ -486,17 +486,17 @@ func TestReindexIndexes(t *testing.T) {
}
}
func TestReindexIndexesWithIndexerKey(t *testing.T) {
func TestSetRerunAutoIndexJobsWithIndexerKey(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
insertIndexes(t, db, uploadsshared.Index{ID: 1, Indexer: "sourcegraph/scip-go@sha256:123456"})
insertIndexes(t, db, uploadsshared.Index{ID: 2, Indexer: "sourcegraph/scip-go"})
insertIndexes(t, db, uploadsshared.Index{ID: 3, Indexer: "sourcegraph/scip-typescript"})
insertIndexes(t, db, uploadsshared.Index{ID: 4, Indexer: "sourcegraph/scip-typescript"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 1, Indexer: "sourcegraph/scip-go@sha256:123456"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 2, Indexer: "sourcegraph/scip-go"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 3, Indexer: "sourcegraph/scip-typescript"})
insertAutoIndexJobs(t, db, uploadsshared.AutoIndexJob{ID: 4, Indexer: "sourcegraph/scip-typescript"})
if err := store.ReindexIndexes(context.Background(), shared.ReindexIndexesOptions{
if err := store.SetRerunAutoIndexJobs(context.Background(), shared.SetRerunAutoIndexJobsOptions{
IndexerNames: []string{"scip-go"},
Term: "",
RepositoryID: 0,
@ -509,7 +509,7 @@ func TestReindexIndexesWithIndexerKey(t *testing.T) {
1: true, 2: true,
3: false, 4: false,
} {
if index, exists, err := store.GetIndexByID(context.Background(), id); err != nil {
if index, exists, err := store.GetAutoIndexJobByID(context.Background(), id); err != nil {
t.Fatalf("unexpected error getting index: %s", err)
} else if !exists {
t.Fatal("index missing")
@ -519,12 +519,12 @@ func TestReindexIndexesWithIndexerKey(t *testing.T) {
}
}
func TestDeleteIndexByIDMissingRow(t *testing.T) {
func TestDeleteAutoIndexJobByIDMissingRow(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(t))
store := New(observation.TestContextTB(t), db)
if found, err := store.DeleteIndexByID(context.Background(), 1); err != nil {
if found, err := store.DeleteAutoIndexJobByID(context.Background(), 1); err != nil {
t.Fatalf("unexpected error deleting index: %s", err)
} else if found {
t.Fatalf("unexpected record")

View File

@ -21,7 +21,7 @@ type operations struct {
hasCommit *observation.Operation
repositoryIDsWithErrors *observation.Operation
numRepositoriesWithCodeIntelligence *observation.Operation
getRecentIndexesSummary *observation.Operation
getRecentAutoIndexJobsSummary *observation.Operation
// Repositories
getRepositoriesForIndexScan *observation.Operation
@ -85,13 +85,13 @@ type operations struct {
reindexUploadByID *observation.Operation
deleteIndexesWithoutRepository *observation.Operation
getIndexes *observation.Operation
getIndexByID *observation.Operation
getIndexesByIDs *observation.Operation
deleteIndexByID *observation.Operation
deleteIndexes *observation.Operation
reindexIndexByID *observation.Operation
reindexIndexes *observation.Operation
getAutoIndexJobs *observation.Operation
getAutoIndexJobByID *observation.Operation
getAutoIndexJobsByIDs *observation.Operation
deleteAutoIndexJobByID *observation.Operation
deleteAutoIndexJobs *observation.Operation
setRerunAutoIndexJobByID *observation.Operation
setRerunAutoIndexJobs *observation.Operation
processStaleSourcedCommits *observation.Operation
expireFailedRecords *observation.Operation
}
@ -190,19 +190,19 @@ func newOperations(observationCtx *observation.Context) *operations {
reindexUploads: op("ReindexUploads"),
reindexUploadByID: op("ReindexUploadByID"),
deleteIndexesWithoutRepository: op("DeleteIndexesWithoutRepository"),
deleteIndexesWithoutRepository: op("DeleteAutoIndexJobsWithoutRepository"),
getIndexes: op("GetIndexes"),
getIndexByID: op("GetIndexByID"),
getIndexesByIDs: op("GetIndexesByIDs"),
deleteIndexByID: op("DeleteIndexByID"),
deleteIndexes: op("DeleteIndexes"),
reindexIndexByID: op("ReindexIndexByID"),
reindexIndexes: op("ReindexIndexes"),
getAutoIndexJobs: op("GetAutoIndexJobs"),
getAutoIndexJobByID: op("GetAutoIndexJobByID"),
getAutoIndexJobsByIDs: op("GetAutoIndexJobsByIDs"),
deleteAutoIndexJobByID: op("DeleteAutoIndexJobByID"),
deleteAutoIndexJobs: op("DeleteAutoIndexJobs"),
setRerunAutoIndexJobByID: op("SetRerunAutoIndexJobByID"),
setRerunAutoIndexJobs: op("SetRerunAutoIndexJobs"),
processStaleSourcedCommits: op("ProcessStaleSourcedCommits"),
expireFailedRecords: op("ExpireFailedRecords"),
repositoryIDsWithErrors: op("RepositoryIDsWithErrors"),
numRepositoriesWithCodeIntelligence: op("NumRepositoriesWithCodeIntelligence"),
getRecentIndexesSummary: op("GetRecentIndexesSummary"),
getRecentAutoIndexJobsSummary: op("GetRecentAutoIndexJobsSummary"),
}
}

View File

@ -37,14 +37,14 @@ type Store interface {
ReindexUploads(ctx context.Context, opts shared.ReindexUploadsOptions) error
ReindexUploadByID(ctx context.Context, id int) error
// Index records
GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) ([]shared.Index, int, error)
GetIndexByID(ctx context.Context, id int) (shared.Index, bool, error)
GetIndexesByIDs(ctx context.Context, ids ...int) ([]shared.Index, error)
DeleteIndexByID(ctx context.Context, id int) (bool, error)
DeleteIndexes(ctx context.Context, opts shared.DeleteIndexesOptions) error
ReindexIndexByID(ctx context.Context, id int) error
ReindexIndexes(ctx context.Context, opts shared.ReindexIndexesOptions) error
// AutoIndexJob records
GetAutoIndexJobs(ctx context.Context, opts shared.GetAutoIndexJobsOptions) ([]shared.AutoIndexJob, int, error)
GetAutoIndexJobByID(ctx context.Context, id int) (shared.AutoIndexJob, bool, error)
GetAutoIndexJobsByIDs(ctx context.Context, ids ...int) ([]shared.AutoIndexJob, error)
DeleteAutoIndexJobByID(ctx context.Context, id int) (bool, error)
DeleteAutoIndexJobs(ctx context.Context, opts shared.DeleteAutoIndexJobsOptions) error
SetRerunAutoIndexJobByID(ctx context.Context, id int) error
SetRerunAutoIndexJobs(ctx context.Context, opts shared.SetRerunAutoIndexJobsOptions) error
// Upload record insertion + processing
InsertUpload(ctx context.Context, upload shared.Upload) (int, error)
@ -62,7 +62,7 @@ type Store interface {
// Summary
GetIndexers(ctx context.Context, opts shared.GetIndexersOptions) ([]string, error)
GetRecentUploadsSummary(ctx context.Context, repositoryID int) ([]shared.UploadsWithRepositoryNamespace, error)
GetRecentIndexesSummary(ctx context.Context, repositoryID int) ([]shared.IndexesWithRepositoryNamespace, error)
GetRecentAutoIndexJobsSummary(ctx context.Context, repositoryID int) ([]shared.GroupedAutoIndexJobs, error)
RepositoryIDsWithErrors(ctx context.Context, offset, limit int) ([]shared.RepositoryWithCount, int, error)
NumRepositoriesWithCodeIntelligence(ctx context.Context) (int, error)
@ -99,7 +99,7 @@ type Store interface {
DeleteOldAuditLogs(ctx context.Context, maxAge time.Duration, now time.Time) (numRecordsScanned, numRecordsAltered int, _ error)
ReconcileCandidates(ctx context.Context, batchSize int) ([]int, error)
ProcessStaleSourcedCommits(ctx context.Context, minimumTimeSinceLastCheck time.Duration, commitResolverBatchSize int, commitResolverMaximumCommitLag time.Duration, shouldDelete func(ctx context.Context, repositoryID int, repositoryName, commit string) (bool, error)) (int, int, error)
DeleteIndexesWithoutRepository(ctx context.Context, now time.Time) (int, int, error)
DeleteAutoIndexJobsWithoutRepository(ctx context.Context, now time.Time) (int, int, error)
ExpireFailedRecords(ctx context.Context, batchSize int, failedIndexMaxAge time.Duration, now time.Time) (int, int, error)
ProcessSourcedCommits(ctx context.Context, minimumTimeSinceLastCheck time.Duration, commitResolverMaximumCommitLag time.Duration, limit int, f func(ctx context.Context, repositoryID int, repositoryName, commit string) (bool, error), now time.Time) (int, int, error)

View File

@ -95,30 +95,30 @@ func insertPackageReferences(t testing.TB, store Store, packageReferences []shar
}
}
// insertIndexes populates the lsif_indexes table with the given index models.
func insertIndexes(t testing.TB, db database.DB, indexes ...uploadsshared.Index) {
for _, index := range indexes {
if index.Commit == "" {
index.Commit = makeCommit(index.ID)
// insertAutoIndexJobs populates the lsif_indexes table with the given index models.
func insertAutoIndexJobs(t testing.TB, db database.DB, jobs ...uploadsshared.AutoIndexJob) {
for _, job := range jobs {
if job.Commit == "" {
job.Commit = makeCommit(job.ID)
}
if index.State == "" {
index.State = "completed"
if job.State == "" {
job.State = "completed"
}
if index.RepositoryID == 0 {
index.RepositoryID = 50
if job.RepositoryID == 0 {
job.RepositoryID = 50
}
if index.DockerSteps == nil {
index.DockerSteps = []uploadsshared.DockerStep{}
if job.DockerSteps == nil {
job.DockerSteps = []uploadsshared.DockerStep{}
}
if index.IndexerArgs == nil {
index.IndexerArgs = []string{}
if job.IndexerArgs == nil {
job.IndexerArgs = []string{}
}
if index.LocalSteps == nil {
index.LocalSteps = []string{}
if job.LocalSteps == nil {
job.LocalSteps = []string{}
}
// Ensure we have a repo for the inner join in select queries
insertRepo(t, db, index.RepositoryID, index.RepositoryName, true)
insertRepo(t, db, job.RepositoryID, job.RepositoryName, true)
query := sqlf.Sprintf(`
INSERT INTO lsif_indexes (
@ -143,25 +143,25 @@ func insertIndexes(t testing.TB, db database.DB, indexes ...uploadsshared.Index)
should_reindex
) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)
`,
index.ID,
index.Commit,
index.QueuedAt,
index.State,
index.FailureMessage,
index.StartedAt,
index.FinishedAt,
index.ProcessAfter,
index.NumResets,
index.NumFailures,
index.RepositoryID,
pq.Array(index.DockerSteps),
index.Root,
index.Indexer,
pq.Array(index.IndexerArgs),
index.Outfile,
pq.Array(index.ExecutionLogs),
pq.Array(index.LocalSteps),
index.ShouldReindex,
job.ID,
job.Commit,
job.QueuedAt,
job.State,
job.FailureMessage,
job.StartedAt,
job.FinishedAt,
job.ProcessAfter,
job.NumResets,
job.NumFailures,
job.RepositoryID,
pq.Array(job.DockerSteps),
job.Root,
job.Indexer,
pq.Array(job.IndexerArgs),
job.Outfile,
pq.Array(job.ExecutionLogs),
pq.Array(job.LocalSteps),
job.ShouldReindex,
)
if _, err := db.ExecContext(context.Background(), query.Query(sqlf.PostgresBindVar), query.Args()...); err != nil {

View File

@ -187,26 +187,26 @@ const sanitizedIndexerExpression = `
)
`
// GetRecentIndexesSummary returns the set of "interesting" indexes for the repository with the given identifier.
// GetRecentAutoIndexJobsSummary returns the set of "interesting" indexes for the repository with the given identifier.
// The return value is a list of indexes grouped by root and indexer. In each group, the set of indexes should
// include the set of unprocessed records as well as the latest finished record. These values allow users to
// quickly determine if a particular root/indexer pair os up-to-date or having issues processing.
func (s *store) GetRecentIndexesSummary(ctx context.Context, repositoryID int) (summaries []uploadsshared.IndexesWithRepositoryNamespace, err error) {
ctx, logger, endObservation := s.operations.getRecentIndexesSummary.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
func (s *store) GetRecentAutoIndexJobsSummary(ctx context.Context, repositoryID int) (summaries []uploadsshared.GroupedAutoIndexJobs, err error) {
ctx, logger, endObservation := s.operations.getRecentAutoIndexJobsSummary.With(ctx, &err, observation.Args{Attrs: []attribute.KeyValue{
attribute.Int("repositoryID", repositoryID),
}})
defer endObservation(1, observation.Args{})
indexes, err := scanIndexes(s.db.Query(ctx, sqlf.Sprintf(recentIndexesSummaryQuery, repositoryID, repositoryID)))
indexes, err := scanJobs(s.db.Query(ctx, sqlf.Sprintf(recentIndexesSummaryQuery, repositoryID, repositoryID)))
if err != nil {
return nil, err
}
logger.AddEvent("scanIndexes", attribute.Int("numIndexes", len(indexes)))
logger.AddEvent("scanJobs", attribute.Int("numIndexes", len(indexes)))
groupedIndexes := make([]uploadsshared.IndexesWithRepositoryNamespace, 1, len(indexes)+1)
groupedIndexes := make([]uploadsshared.GroupedAutoIndexJobs, 1, len(indexes)+1)
for _, index := range indexes {
if last := groupedIndexes[len(groupedIndexes)-1]; last.Root != index.Root || last.Indexer != index.Indexer {
groupedIndexes = append(groupedIndexes, uploadsshared.IndexesWithRepositoryNamespace{
groupedIndexes = append(groupedIndexes, uploadsshared.GroupedAutoIndexJobs{
Root: index.Root,
Indexer: index.Indexer,
})

View File

@ -139,40 +139,40 @@ func TestRecentIndexesSummary(t *testing.T) {
r1 := 1
r2 := 2
addDefaults := func(index uploadsshared.Index) uploadsshared.Index {
index.Commit = makeCommit(index.ID)
index.RepositoryID = 50
index.RepositoryName = "n-50"
index.DockerSteps = []uploadsshared.DockerStep{}
index.IndexerArgs = []string{}
index.LocalSteps = []string{}
return index
addDefaults := func(job uploadsshared.AutoIndexJob) uploadsshared.AutoIndexJob {
job.Commit = makeCommit(job.ID)
job.RepositoryID = 50
job.RepositoryName = "n-50"
job.DockerSteps = []uploadsshared.DockerStep{}
job.IndexerArgs = []string{}
job.LocalSteps = []string{}
return job
}
indexes := []uploadsshared.Index{
addDefaults(uploadsshared.Index{ID: 150, QueuedAt: t0, Root: "r1", Indexer: "i1", State: "queued", Rank: &r2}), // visible (group 1)
addDefaults(uploadsshared.Index{ID: 151, QueuedAt: t1, Root: "r1", Indexer: "i1", State: "queued", Rank: &r1}), // visible (group 1)
addDefaults(uploadsshared.Index{ID: 152, FinishedAt: &t2, Root: "r1", Indexer: "i1", State: "errored"}), // visible (group 1)
addDefaults(uploadsshared.Index{ID: 153, FinishedAt: &t3, Root: "r1", Indexer: "i2", State: "completed"}), // visible (group 2)
addDefaults(uploadsshared.Index{ID: 154, FinishedAt: &t4, Root: "r2", Indexer: "i1", State: "completed"}), // visible (group 3)
addDefaults(uploadsshared.Index{ID: 155, FinishedAt: &t5, Root: "r2", Indexer: "i1", State: "errored"}), // shadowed
addDefaults(uploadsshared.Index{ID: 156, FinishedAt: &t6, Root: "r2", Indexer: "i2", State: "completed"}), // visible (group 4)
addDefaults(uploadsshared.Index{ID: 157, FinishedAt: &t7, Root: "r2", Indexer: "i2", State: "errored"}), // shadowed
addDefaults(uploadsshared.Index{ID: 158, FinishedAt: &t8, Root: "r2", Indexer: "i2", State: "errored"}), // shadowed
addDefaults(uploadsshared.Index{ID: 159, FinishedAt: &t9, Root: "r2", Indexer: "i2", State: "errored"}), // shadowed
jobs := []uploadsshared.AutoIndexJob{
addDefaults(uploadsshared.AutoIndexJob{ID: 150, QueuedAt: t0, Root: "r1", Indexer: "i1", State: "queued", Rank: &r2}), // visible (group 1)
addDefaults(uploadsshared.AutoIndexJob{ID: 151, QueuedAt: t1, Root: "r1", Indexer: "i1", State: "queued", Rank: &r1}), // visible (group 1)
addDefaults(uploadsshared.AutoIndexJob{ID: 152, FinishedAt: &t2, Root: "r1", Indexer: "i1", State: "errored"}), // visible (group 1)
addDefaults(uploadsshared.AutoIndexJob{ID: 153, FinishedAt: &t3, Root: "r1", Indexer: "i2", State: "completed"}), // visible (group 2)
addDefaults(uploadsshared.AutoIndexJob{ID: 154, FinishedAt: &t4, Root: "r2", Indexer: "i1", State: "completed"}), // visible (group 3)
addDefaults(uploadsshared.AutoIndexJob{ID: 155, FinishedAt: &t5, Root: "r2", Indexer: "i1", State: "errored"}), // shadowed
addDefaults(uploadsshared.AutoIndexJob{ID: 156, FinishedAt: &t6, Root: "r2", Indexer: "i2", State: "completed"}), // visible (group 4)
addDefaults(uploadsshared.AutoIndexJob{ID: 157, FinishedAt: &t7, Root: "r2", Indexer: "i2", State: "errored"}), // shadowed
addDefaults(uploadsshared.AutoIndexJob{ID: 158, FinishedAt: &t8, Root: "r2", Indexer: "i2", State: "errored"}), // shadowed
addDefaults(uploadsshared.AutoIndexJob{ID: 159, FinishedAt: &t9, Root: "r2", Indexer: "i2", State: "errored"}), // shadowed
}
insertIndexes(t, db, indexes...)
insertAutoIndexJobs(t, db, jobs...)
summary, err := store.GetRecentIndexesSummary(ctx, 50)
summary, err := store.GetRecentAutoIndexJobsSummary(ctx, 50)
if err != nil {
t.Fatalf("unexpected error querying recent index summary: %s", err)
}
expected := []uploadsshared.IndexesWithRepositoryNamespace{
{Root: "r1", Indexer: "i1", Indexes: []uploadsshared.Index{indexes[0], indexes[1], indexes[2]}},
{Root: "r1", Indexer: "i2", Indexes: []uploadsshared.Index{indexes[3]}},
{Root: "r2", Indexer: "i1", Indexes: []uploadsshared.Index{indexes[4]}},
{Root: "r2", Indexer: "i2", Indexes: []uploadsshared.Index{indexes[6]}},
expected := []uploadsshared.GroupedAutoIndexJobs{
{Root: "r1", Indexer: "i1", Indexes: []uploadsshared.AutoIndexJob{jobs[0], jobs[1], jobs[2]}},
{Root: "r1", Indexer: "i2", Indexes: []uploadsshared.AutoIndexJob{jobs[3]}},
{Root: "r2", Indexer: "i1", Indexes: []uploadsshared.AutoIndexJob{jobs[4]}},
{Root: "r2", Indexer: "i2", Indexes: []uploadsshared.AutoIndexJob{jobs[6]}},
}
if diff := cmp.Diff(expected, summary); diff != "" {
t.Errorf("unexpected index summary (-want +got):\n%s", diff)
@ -211,20 +211,20 @@ func TestRepositoryIDsWithErrors(t *testing.T) {
shared.Upload{ID: 171, RepositoryID: 58, State: "failed", FinishedAt: &t2},
shared.Upload{ID: 172, RepositoryID: 58, State: "failed", FinishedAt: &t3},
)
insertIndexes(t, db,
uploadsshared.Index{ID: 201, RepositoryID: 51}, // Repo 51 = success
uploadsshared.Index{ID: 202, RepositoryID: 52, State: "failed"}, // Repo 52 = failing index
uploadsshared.Index{ID: 203, RepositoryID: 53}, // Repo 53 = success (+ failing upload)
insertAutoIndexJobs(t, db,
uploadsshared.AutoIndexJob{ID: 201, RepositoryID: 51}, // Repo 51 = success
uploadsshared.AutoIndexJob{ID: 202, RepositoryID: 52, State: "failed"}, // Repo 52 = failing index
uploadsshared.AutoIndexJob{ID: 203, RepositoryID: 53}, // Repo 53 = success (+ failing upload)
// Repo 56 = multiple failures for same project
uploadsshared.Index{ID: 250, RepositoryID: 56, State: "failed", FinishedAt: &t1},
uploadsshared.Index{ID: 251, RepositoryID: 56, State: "failed", FinishedAt: &t2},
uploadsshared.Index{ID: 252, RepositoryID: 56, State: "failed", FinishedAt: &t3},
uploadsshared.AutoIndexJob{ID: 250, RepositoryID: 56, State: "failed", FinishedAt: &t1},
uploadsshared.AutoIndexJob{ID: 251, RepositoryID: 56, State: "failed", FinishedAt: &t2},
uploadsshared.AutoIndexJob{ID: 252, RepositoryID: 56, State: "failed", FinishedAt: &t3},
// Repo 57 = multiple failures for different projects
uploadsshared.Index{ID: 260, RepositoryID: 57, State: "failed", FinishedAt: &t1, Root: "proj1"},
uploadsshared.Index{ID: 261, RepositoryID: 57, State: "failed", FinishedAt: &t2, Root: "proj2"},
uploadsshared.Index{ID: 262, RepositoryID: 57, State: "failed", FinishedAt: &t3, Root: "proj3"},
uploadsshared.AutoIndexJob{ID: 260, RepositoryID: 57, State: "failed", FinishedAt: &t1, Root: "proj1"},
uploadsshared.AutoIndexJob{ID: 261, RepositoryID: 57, State: "failed", FinishedAt: &t2, Root: "proj2"},
uploadsshared.AutoIndexJob{ID: 262, RepositoryID: 57, State: "failed", FinishedAt: &t3, Root: "proj3"},
)
// Query page 1

File diff suppressed because it is too large Load Diff

View File

@ -200,36 +200,36 @@ func (s *Service) ReindexUploadByID(ctx context.Context, id int) error {
return s.store.ReindexUploadByID(ctx, id)
}
func (s *Service) GetIndexes(ctx context.Context, opts shared.GetIndexesOptions) ([]uploadsshared.Index, int, error) {
return s.store.GetIndexes(ctx, opts)
func (s *Service) GetAutoIndexJobs(ctx context.Context, opts shared.GetAutoIndexJobsOptions) ([]uploadsshared.AutoIndexJob, int, error) {
return s.store.GetAutoIndexJobs(ctx, opts)
}
func (s *Service) GetIndexByID(ctx context.Context, id int) (uploadsshared.Index, bool, error) {
return s.store.GetIndexByID(ctx, id)
func (s *Service) GetAutoIndexJobByID(ctx context.Context, id int) (uploadsshared.AutoIndexJob, bool, error) {
return s.store.GetAutoIndexJobByID(ctx, id)
}
func (s *Service) GetIndexesByIDs(ctx context.Context, ids ...int) ([]uploadsshared.Index, error) {
return s.store.GetIndexesByIDs(ctx, ids...)
func (s *Service) GetAutoIndexJobsByIDs(ctx context.Context, ids ...int) ([]uploadsshared.AutoIndexJob, error) {
return s.store.GetAutoIndexJobsByIDs(ctx, ids...)
}
func (s *Service) DeleteIndexByID(ctx context.Context, id int) (bool, error) {
return s.store.DeleteIndexByID(ctx, id)
func (s *Service) DeleteAutoIndexJobByID(ctx context.Context, id int) (bool, error) {
return s.store.DeleteAutoIndexJobByID(ctx, id)
}
func (s *Service) DeleteIndexes(ctx context.Context, opts shared.DeleteIndexesOptions) error {
return s.store.DeleteIndexes(ctx, opts)
func (s *Service) DeleteAutoIndexJobs(ctx context.Context, opts shared.DeleteAutoIndexJobsOptions) error {
return s.store.DeleteAutoIndexJobs(ctx, opts)
}
func (s *Service) ReindexIndexByID(ctx context.Context, id int) error {
return s.store.ReindexIndexByID(ctx, id)
func (s *Service) SetRerunAutoIndexJobByID(ctx context.Context, id int) error {
return s.store.SetRerunAutoIndexJobByID(ctx, id)
}
func (s *Service) ReindexIndexes(ctx context.Context, opts shared.ReindexIndexesOptions) error {
return s.store.ReindexIndexes(ctx, opts)
func (s *Service) SetRerunAutoIndexJobs(ctx context.Context, opts shared.SetRerunAutoIndexJobsOptions) error {
return s.store.SetRerunAutoIndexJobs(ctx, opts)
}
func (s *Service) GetRecentIndexesSummary(ctx context.Context, repositoryID int) ([]uploadsshared.IndexesWithRepositoryNamespace, error) {
return s.store.GetRecentIndexesSummary(ctx, repositoryID)
func (s *Service) GetRecentAutoIndexJobsSummary(ctx context.Context, repositoryID int) ([]uploadsshared.GroupedAutoIndexJobs, error) {
return s.store.GetRecentAutoIndexJobsSummary(ctx, repositoryID)
}
func (s *Service) NumRepositoriesWithCodeIntelligence(ctx context.Context) (int, error) {

View File

@ -19,6 +19,7 @@ go_library(
"//internal/executor",
"//lib/codeintel/autoindex/config",
"//lib/errors",
"@com_github_life4_genesis//slices",
"@com_github_sourcegraph_scip//bindings/go/scip",
"@io_opentelemetry_go_otel//attribute",
],

View File

@ -12,7 +12,7 @@ type AvailableIndexer struct {
Indexer CodeIntelIndexer
}
func PopulateInferredAvailableIndexers(indexJobs []config.IndexJob, blocklist map[string]struct{}, inferredAvailableIndexers map[string]AvailableIndexer) map[string]AvailableIndexer {
func PopulateInferredAvailableIndexers(indexJobs []config.AutoIndexJobSpec, blocklist map[string]struct{}, inferredAvailableIndexers map[string]AvailableIndexer) map[string]AvailableIndexer {
for _, job := range indexJobs {
indexer := job.GetIndexerName()
key := GetKeyForLookup(indexer, job.GetRoot())

View File

@ -6,11 +6,13 @@ import (
"strconv"
"time"
genslices "github.com/life4/genesis/slices"
"go.opentelemetry.io/otel/attribute"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/codeintel/core"
"github.com/sourcegraph/sourcegraph/internal/executor"
"github.com/sourcegraph/sourcegraph/lib/codeintel/autoindex/config"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -154,22 +156,23 @@ type UploadLog struct {
Operation string
}
type IndexState UploadState
type AutoIndexJobState UploadState
const (
IndexStateQueued = IndexState(StateQueued)
IndexStateProcessing = IndexState(StateProcessing)
IndexStateFailed = IndexState(StateFailed)
IndexStateErrored = IndexState(StateErrored)
IndexStateCompleted = IndexState(StateCompleted)
JobStateQueued = AutoIndexJobState(StateQueued)
JobStateProcessing = AutoIndexJobState(StateProcessing)
JobStateFailed = AutoIndexJobState(StateFailed)
JobStateErrored = AutoIndexJobState(StateErrored)
JobStateCompleted = AutoIndexJobState(StateCompleted)
)
type Index struct {
// AutoIndexJob represents an auto-indexing job as represented in lsif_indexes.
type AutoIndexJob struct {
ID int `json:"id"`
Commit string `json:"commit"`
QueuedAt time.Time `json:"queuedAt"`
// TODO(id: state-refactoring) Use IndexState type here.
// IMPORTANT: IndexState must transitively wrap 'string' for back-compat
// TODO(id: state-refactoring) Use AutoIndexJobState type here.
// IMPORTANT: AutoIndexJobState must transitively wrap 'string' for back-compat
State string `json:"state"`
FailureMessage *string `json:"failureMessage"`
StartedAt *time.Time `json:"startedAt"`
@ -193,11 +196,34 @@ type Index struct {
EnqueuerUserID int32 `json:"enqueuerUserID"`
}
func (i Index) RecordID() int {
func NewAutoIndexJob(job config.AutoIndexJobSpec, repositoryID api.RepoID, commit api.CommitID, state AutoIndexJobState) AutoIndexJob {
dockerSteps := genslices.Map(job.Steps, func(step config.DockerStep) DockerStep {
return DockerStep{
Root: step.Root,
Image: step.Image,
Commands: step.Commands,
}
})
return AutoIndexJob{
Commit: string(commit),
RepositoryID: int(repositoryID),
State: string(state),
DockerSteps: dockerSteps,
LocalSteps: job.LocalSteps,
Root: job.Root,
Indexer: job.Indexer,
IndexerArgs: job.IndexerArgs,
Outfile: job.Outfile,
RequestedEnvVars: job.RequestedEnvVars,
}
}
func (i AutoIndexJob) RecordID() int {
return i.ID
}
func (i Index) RecordUID() string {
func (i AutoIndexJob) RecordUID() string {
return strconv.Itoa(i.ID)
}
@ -299,7 +325,7 @@ type PackageReferenceScanner interface {
Close() error
}
type GetIndexesOptions struct {
type GetAutoIndexJobsOptions struct {
RepositoryID int
State string
States []string
@ -310,7 +336,7 @@ type GetIndexesOptions struct {
Offset int
}
type DeleteIndexesOptions struct {
type DeleteAutoIndexJobsOptions struct {
States []string
IndexerNames []string
Term string
@ -318,7 +344,7 @@ type DeleteIndexesOptions struct {
WithoutUpload bool
}
type ReindexIndexesOptions struct {
type SetRerunAutoIndexJobsOptions struct {
States []string
IndexerNames []string
Term string
@ -334,10 +360,10 @@ type ExportedUpload struct {
Root string
}
type IndexesWithRepositoryNamespace struct {
type GroupedAutoIndexJobs struct {
Root string
Indexer string
Indexes []Index
Indexes []AutoIndexJob
}
type RepositoryWithCount struct {

View File

@ -6,32 +6,32 @@ import (
)
type (
UploadLoaderFactory = *dataloader.LoaderFactory[int, shared.Upload]
IndexLoaderFactory = *dataloader.LoaderFactory[int, shared.Index]
UploadLoader = *dataloader.Loader[int, shared.Upload]
IndexLoader = *dataloader.Loader[int, shared.Index]
UploadLoaderFactory = *dataloader.LoaderFactory[int, shared.Upload]
AutoIndexJobLoaderFactory = *dataloader.LoaderFactory[int, shared.AutoIndexJob]
UploadLoader = *dataloader.Loader[int, shared.Upload]
AutoIndexJobLoader = *dataloader.Loader[int, shared.AutoIndexJob]
)
func NewUploadLoaderFactory(uploadService UploadsService) UploadLoaderFactory {
return dataloader.NewLoaderFactory[int, shared.Upload](dataloader.BackingServiceFunc[int, shared.Upload](uploadService.GetUploadsByIDs))
}
func NewIndexLoaderFactory(uploadService UploadsService) IndexLoaderFactory {
return dataloader.NewLoaderFactory[int, shared.Index](dataloader.BackingServiceFunc[int, shared.Index](uploadService.GetIndexesByIDs))
func NewAutoIndexJobLoaderFactory(uploadService UploadsService) AutoIndexJobLoaderFactory {
return dataloader.NewLoaderFactory[int, shared.AutoIndexJob](dataloader.BackingServiceFunc[int, shared.AutoIndexJob](uploadService.GetAutoIndexJobsByIDs))
}
func PresubmitAssociatedIndexes(indexLoader IndexLoader, uploads ...shared.Upload) {
func PresubmitAssociatedAutoIndexJobs(autoIndexJobLoader AutoIndexJobLoader, uploads ...shared.Upload) {
for _, upload := range uploads {
if upload.AssociatedIndexID != nil {
indexLoader.Presubmit(*upload.AssociatedIndexID)
autoIndexJobLoader.Presubmit(*upload.AssociatedIndexID)
}
}
}
func PresubmitAssociatedUploads(uploadLoader UploadLoader, indexes ...shared.Index) {
for _, index := range indexes {
if index.AssociatedUploadID != nil {
uploadLoader.Presubmit(*index.AssociatedUploadID)
func PresubmitAssociatedUploads(uploadLoader UploadLoader, jobs ...shared.AutoIndexJob) {
for _, job := range jobs {
if job.AssociatedUploadID != nil {
uploadLoader.Presubmit(*job.AssociatedUploadID)
}
}
}

View File

@ -12,16 +12,16 @@ import (
)
type UploadsService interface {
GetIndexesByIDs(ctx context.Context, ids ...int) (_ []shared.Index, err error)
GetAutoIndexJobsByIDs(ctx context.Context, ids ...int) (_ []shared.AutoIndexJob, err error)
GetUploadsByIDs(ctx context.Context, ids ...int) (_ []shared.Upload, err error)
GetIndexes(ctx context.Context, opts uploadshared.GetIndexesOptions) (_ []uploadsshared.Index, _ int, err error)
GetAutoIndexJobs(ctx context.Context, opts uploadshared.GetAutoIndexJobsOptions) (_ []uploadsshared.AutoIndexJob, _ int, err error)
GetUploads(ctx context.Context, opts uploadshared.GetUploadsOptions) (uploads []shared.Upload, totalCount int, err error)
GetAuditLogsForUpload(ctx context.Context, uploadID int) (_ []shared.UploadLog, err error)
GetIndexByID(ctx context.Context, id int) (_ uploadsshared.Index, _ bool, err error)
DeleteIndexByID(ctx context.Context, id int) (_ bool, err error)
DeleteIndexes(ctx context.Context, opts uploadshared.DeleteIndexesOptions) (err error)
ReindexIndexByID(ctx context.Context, id int) (err error)
ReindexIndexes(ctx context.Context, opts uploadshared.ReindexIndexesOptions) (err error)
GetAutoIndexJobByID(ctx context.Context, id int) (_ uploadsshared.AutoIndexJob, _ bool, err error)
DeleteAutoIndexJobByID(ctx context.Context, id int) (_ bool, err error)
DeleteAutoIndexJobs(ctx context.Context, opts uploadshared.DeleteAutoIndexJobsOptions) (err error)
SetRerunAutoIndexJobByID(ctx context.Context, id int) (err error)
SetRerunAutoIndexJobs(ctx context.Context, opts uploadshared.SetRerunAutoIndexJobsOptions) (err error)
GetIndexers(ctx context.Context, opts uploadshared.GetIndexersOptions) ([]string, error)
GetUploadByID(ctx context.Context, id int) (_ shared.Upload, _ bool, err error)
DeleteUploadByID(ctx context.Context, id int) (_ bool, err error)
@ -31,7 +31,7 @@ type UploadsService interface {
GetCommitGraphMetadata(ctx context.Context, repositoryID int) (stale bool, updatedAt *time.Time, err error)
GetRecentUploadsSummary(ctx context.Context, repositoryID int) ([]uploadshared.UploadsWithRepositoryNamespace, error)
GetLastUploadRetentionScanForRepository(ctx context.Context, repositoryID int) (*time.Time, error)
GetRecentIndexesSummary(ctx context.Context, repositoryID int) ([]uploadshared.IndexesWithRepositoryNamespace, error)
GetRecentAutoIndexJobsSummary(ctx context.Context, repositoryID int) ([]uploadshared.GroupedAutoIndexJobs, error)
NumRepositoriesWithCodeIntelligence(ctx context.Context) (int, error)
RepositoryIDsWithErrors(ctx context.Context, offset, limit int) (_ []uploadshared.RepositoryWithCount, totalCount int, err error)
}

View File

@ -15,32 +15,32 @@ import (
"github.com/sourcegraph/sourcegraph/lib/pointers"
)
// indexStepsResolver resolves the steps of an index record.
// autoIndexJobStepsResolver resolves the steps of an auto-indexing job.
//
// Index jobs are broken into three parts:
// Jobs are broken into three parts:
// - pre-index steps; all but the last docker step
// - index step; the last docker step
// - upload step; the only src-cli step
//
// The setup and teardown steps match the executor setup and teardown.
type indexStepsResolver struct {
type autoIndexJobStepsResolver struct {
siteAdminChecker sharedresolvers.SiteAdminChecker
index uploadsshared.Index
job uploadsshared.AutoIndexJob
}
func NewIndexStepsResolver(siteAdminChecker sharedresolvers.SiteAdminChecker, index uploadsshared.Index) resolverstubs.IndexStepsResolver {
return &indexStepsResolver{siteAdminChecker: siteAdminChecker, index: index}
func NewAutoIndexJobStepsResolver(siteAdminChecker sharedresolvers.SiteAdminChecker, job uploadsshared.AutoIndexJob) resolverstubs.AutoIndexJobStepsResolver {
return &autoIndexJobStepsResolver{siteAdminChecker: siteAdminChecker, job: job}
}
func (r *indexStepsResolver) Setup() []resolverstubs.ExecutionLogEntryResolver {
func (r *autoIndexJobStepsResolver) Setup() []resolverstubs.ExecutionLogEntryResolver {
return r.executionLogEntryResolversWithPrefix(logKeyPrefixSetup)
}
var logKeyPrefixSetup = regexp.MustCompile("^setup\\.")
func (r *indexStepsResolver) PreIndex() []resolverstubs.PreIndexStepResolver {
func (r *autoIndexJobStepsResolver) PreIndex() []resolverstubs.PreIndexStepResolver {
var resolvers []resolverstubs.PreIndexStepResolver
for i, step := range r.index.DockerSteps {
for i, step := range r.job.DockerSteps {
logKeyPreIndex := regexp.MustCompile(fmt.Sprintf("step\\.(docker|kubernetes)\\.pre-index\\.%d", i))
if entry, ok := r.findExecutionLogEntry(logKeyPreIndex); ok {
resolvers = append(resolvers, newPreIndexStepResolver(r.siteAdminChecker, step, &entry))
@ -56,24 +56,24 @@ func (r *indexStepsResolver) PreIndex() []resolverstubs.PreIndexStepResolver {
return resolvers
}
func (r *indexStepsResolver) Index() resolverstubs.IndexStepResolver {
func (r *autoIndexJobStepsResolver) Index() resolverstubs.IndexStepResolver {
if entry, ok := r.findExecutionLogEntry(logKeyPrefixIndexer); ok {
return newIndexStepResolver(r.siteAdminChecker, r.index, &entry)
return newIndexStepResolver(r.siteAdminChecker, r.job, &entry)
}
// This is here for backwards compatibility for records that were created before
// named keys for steps existed.
logKeyRegex := regexp.MustCompile(fmt.Sprintf("^step\\.(docker|kubernetes)\\.%d", len(r.index.DockerSteps)))
logKeyRegex := regexp.MustCompile(fmt.Sprintf("^step\\.(docker|kubernetes)\\.%d", len(r.job.DockerSteps)))
if entry, ok := r.findExecutionLogEntry(logKeyRegex); ok {
return newIndexStepResolver(r.siteAdminChecker, r.index, &entry)
return newIndexStepResolver(r.siteAdminChecker, r.job, &entry)
}
return newIndexStepResolver(r.siteAdminChecker, r.index, nil)
return newIndexStepResolver(r.siteAdminChecker, r.job, nil)
}
var logKeyPrefixIndexer = regexp.MustCompile("^step\\.(docker|kubernetes)\\.indexer")
func (r *indexStepsResolver) Upload() resolverstubs.ExecutionLogEntryResolver {
func (r *autoIndexJobStepsResolver) Upload() resolverstubs.ExecutionLogEntryResolver {
if entry, ok := r.findExecutionLogEntry(logKeyPrefixUpload); ok {
return newExecutionLogEntryResolver(r.siteAdminChecker, entry)
}
@ -92,14 +92,14 @@ var (
logKeyPrefixSrcFirstStep = regexp.MustCompile("^step\\.src\\.0")
)
func (r *indexStepsResolver) Teardown() []resolverstubs.ExecutionLogEntryResolver {
func (r *autoIndexJobStepsResolver) Teardown() []resolverstubs.ExecutionLogEntryResolver {
return r.executionLogEntryResolversWithPrefix(logKeyPrefixTeardown)
}
var logKeyPrefixTeardown = regexp.MustCompile("^teardown\\.")
func (r *indexStepsResolver) findExecutionLogEntry(key *regexp.Regexp) (executor.ExecutionLogEntry, bool) {
for _, entry := range r.index.ExecutionLogs {
func (r *autoIndexJobStepsResolver) findExecutionLogEntry(key *regexp.Regexp) (executor.ExecutionLogEntry, bool) {
for _, entry := range r.job.ExecutionLogs {
if key.MatchString(entry.Key) {
return entry, true
}
@ -108,9 +108,9 @@ func (r *indexStepsResolver) findExecutionLogEntry(key *regexp.Regexp) (executor
return executor.ExecutionLogEntry{}, false
}
func (r *indexStepsResolver) executionLogEntryResolversWithPrefix(prefix *regexp.Regexp) []resolverstubs.ExecutionLogEntryResolver {
func (r *autoIndexJobStepsResolver) executionLogEntryResolversWithPrefix(prefix *regexp.Regexp) []resolverstubs.ExecutionLogEntryResolver {
var resolvers []resolverstubs.ExecutionLogEntryResolver
for _, entry := range r.index.ExecutionLogs {
for _, entry := range r.job.ExecutionLogs {
if prefix.MatchString(entry.Key) {
res := newExecutionLogEntryResolver(r.siteAdminChecker, entry)
resolvers = append(resolvers, res)
@ -152,29 +152,31 @@ func (r *preIndexStepResolver) LogEntry() resolverstubs.ExecutionLogEntryResolve
//
//
// indexStepResolver represents only the 'index' phase of an auto-indexing job.
// See autoIndexJobStepsResolver for details.
type indexStepResolver struct {
siteAdminChecker sharedresolvers.SiteAdminChecker
index uploadsshared.Index
job uploadsshared.AutoIndexJob
entry *executor.ExecutionLogEntry
}
func newIndexStepResolver(siteAdminChecker sharedresolvers.SiteAdminChecker, index uploadsshared.Index, entry *executor.ExecutionLogEntry) resolverstubs.IndexStepResolver {
func newIndexStepResolver(siteAdminChecker sharedresolvers.SiteAdminChecker, job uploadsshared.AutoIndexJob, entry *executor.ExecutionLogEntry) resolverstubs.IndexStepResolver {
return &indexStepResolver{
siteAdminChecker: siteAdminChecker,
index: index,
job: job,
entry: entry,
}
}
func (r *indexStepResolver) Commands() []string { return r.index.LocalSteps }
func (r *indexStepResolver) IndexerArgs() []string { return r.index.IndexerArgs }
func (r *indexStepResolver) Outfile() *string { return pointers.NonZeroPtr(r.index.Outfile) }
func (r *indexStepResolver) Commands() []string { return r.job.LocalSteps }
func (r *indexStepResolver) IndexerArgs() []string { return r.job.IndexerArgs }
func (r *indexStepResolver) Outfile() *string { return pointers.NonZeroPtr(r.job.Outfile) }
func (r *indexStepResolver) RequestedEnvVars() *[]string {
if len(r.index.RequestedEnvVars) == 0 {
if len(r.job.RequestedEnvVars) == 0 {
return nil
}
return &r.index.RequestedEnvVars
return &r.job.RequestedEnvVars
}
func (r *indexStepResolver) LogEntry() resolverstubs.ExecutionLogEntryResolver {

File diff suppressed because it is too large Load Diff

View File

@ -32,7 +32,7 @@ type preciseIndexResolver struct {
locationResolver *gitresolvers.CachedLocationResolver
traceErrs *observation.ErrCollector
upload *shared.Upload
index *uploadsshared.Index
index *uploadsshared.AutoIndexJob
}
func newPreciseIndexResolver(
@ -41,13 +41,13 @@ func newPreciseIndexResolver(
policySvc PolicyService,
gitserverClient gitserver.Client,
uploadLoader UploadLoader,
indexLoader IndexLoader,
autoIndexJobLoader AutoIndexJobLoader,
siteAdminChecker sharedresolvers.SiteAdminChecker,
repoStore database.RepoStore,
locationResolver *gitresolvers.CachedLocationResolver,
traceErrs *observation.ErrCollector,
upload *shared.Upload,
index *uploadsshared.Index,
index *uploadsshared.AutoIndexJob,
) (resolverstubs.PreciseIndexResolver, error) {
if index != nil && index.AssociatedUploadID != nil && upload == nil {
v, ok, err := uploadLoader.GetByID(ctx, *index.AssociatedUploadID)
@ -61,7 +61,7 @@ func newPreciseIndexResolver(
if upload != nil {
if upload.AssociatedIndexID != nil {
v, ok, err := indexLoader.GetByID(ctx, *upload.AssociatedIndexID)
v, ok, err := autoIndexJobLoader.GetByID(ctx, *upload.AssociatedIndexID)
if err != nil {
return nil, err
}
@ -157,9 +157,9 @@ func (r *preciseIndexResolver) ProcessingFinishedAt() *gqlutil.DateTime {
return nil
}
func (r *preciseIndexResolver) Steps() resolverstubs.IndexStepsResolver {
func (r *preciseIndexResolver) Steps() resolverstubs.AutoIndexJobStepsResolver {
if r.index != nil {
return NewIndexStepsResolver(r.siteAdminChecker, *r.index)
return NewAutoIndexJobStepsResolver(r.siteAdminChecker, *r.index)
}
return nil
@ -276,19 +276,19 @@ func (r *preciseIndexResolver) State() string {
}
}
switch shared.IndexState(strings.ToLower(r.index.State)) {
case shared.IndexStateQueued:
switch shared.AutoIndexJobState(strings.ToLower(r.index.State)) {
case shared.JobStateQueued:
return "QUEUED_FOR_INDEXING"
case shared.IndexStateProcessing:
case shared.JobStateProcessing:
return "INDEXING"
case shared.IndexStateFailed:
case shared.JobStateFailed:
fallthrough
case shared.IndexStateErrored:
case shared.JobStateErrored:
return "INDEXING_ERRORED"
case shared.IndexStateCompleted:
case shared.JobStateCompleted:
// Should not actually occur in practice (where did upload go?)
return "INDEXING_COMPLETED"

View File

@ -40,11 +40,11 @@ func NewPreciseIndexResolverFactory(
func (f *PreciseIndexResolverFactory) Create(
ctx context.Context,
uploadLoader UploadLoader,
indexLoader IndexLoader,
autoIndexJobLoader AutoIndexJobLoader,
locationResolver *gitresolvers.CachedLocationResolver,
traceErrs *observation.ErrCollector,
upload *shared.Upload,
index *uploadsshared.Index,
index *uploadsshared.AutoIndexJob,
) (resolverstubs.PreciseIndexResolver, error) {
return newPreciseIndexResolver(
ctx,
@ -52,7 +52,7 @@ func (f *PreciseIndexResolverFactory) Create(
f.policySvc,
f.gitserverClient,
uploadLoader,
indexLoader,
autoIndexJobLoader,
f.siteAdminChecker,
f.repoStore,
locationResolver,

View File

@ -12,7 +12,7 @@ type rootResolver struct {
autoindexSvc AutoIndexingService
siteAdminChecker sharedresolvers.SiteAdminChecker
uploadLoaderFactory UploadLoaderFactory
indexLoaderFactory IndexLoaderFactory
autoIndexJobLoaderFactory AutoIndexJobLoaderFactory
locationResolverFactory *gitresolvers.CachedLocationResolverFactory
preciseIndexResolverFactory *PreciseIndexResolverFactory
operations *operations
@ -24,7 +24,7 @@ func NewRootResolver(
autoindexSvc AutoIndexingService,
siteAdminChecker sharedresolvers.SiteAdminChecker,
uploadLoaderFactory UploadLoaderFactory,
indexLoaderFactory IndexLoaderFactory,
autoIndexJobLoaderFactory AutoIndexJobLoaderFactory,
locationResolverFactory *gitresolvers.CachedLocationResolverFactory,
preciseIndexResolverFactory *PreciseIndexResolverFactory,
) resolverstubs.UploadsServiceResolver {
@ -33,7 +33,7 @@ func NewRootResolver(
autoindexSvc: autoindexSvc,
siteAdminChecker: siteAdminChecker,
uploadLoaderFactory: uploadLoaderFactory,
indexLoaderFactory: indexLoaderFactory,
autoIndexJobLoaderFactory: autoIndexJobLoaderFactory,
locationResolverFactory: locationResolverFactory,
preciseIndexResolverFactory: preciseIndexResolverFactory,
operations: newOperations(observationCtx),

View File

@ -65,7 +65,7 @@ func (r *rootResolver) RepositorySummary(ctx context.Context, repoID graphql.ID)
return nil, err
}
recentIndexes, err := r.uploadSvc.GetRecentIndexesSummary(ctx, id)
recentIndexes, err := r.uploadSvc.GetRecentAutoIndexJobsSummary(ctx, id)
if err != nil {
return nil, err
}
@ -131,7 +131,7 @@ func (r *rootResolver) RepositorySummary(ctx context.Context, repoID graphql.ID)
allUploads = append(allUploads, recentUpload.Uploads...)
}
var allIndexes []shared.Index
var allIndexes []shared.AutoIndexJob
for _, recentIndex := range recentIndexes {
allIndexes = append(allIndexes, recentIndex.Indexes...)
}
@ -140,9 +140,9 @@ func (r *rootResolver) RepositorySummary(ctx context.Context, repoID graphql.ID)
uploadLoader := r.uploadLoaderFactory.CreateWithInitialData(allUploads)
PresubmitAssociatedUploads(uploadLoader, allIndexes...)
// Create index loader with data we already have, and pre-submit associated indexes from upload records
indexLoader := r.indexLoaderFactory.CreateWithInitialData(allIndexes)
PresubmitAssociatedIndexes(indexLoader, allUploads...)
// Create job loader with data we already have, and pre-submit associated indexes from upload records
autoIndexJobLoader := r.autoIndexJobLoaderFactory.CreateWithInitialData(allIndexes)
PresubmitAssociatedAutoIndexJobs(autoIndexJobLoader, allUploads...)
// No data to load for git data (yet)
locationResolver := r.locationResolverFactory.Create()
@ -153,7 +153,7 @@ func (r *rootResolver) RepositorySummary(ctx context.Context, repoID graphql.ID)
inferredAvailableIndexersResolver,
limitErr,
uploadLoader,
indexLoader,
autoIndexJobLoader,
errTracer,
r.preciseIndexResolverFactory,
), nil
@ -295,7 +295,7 @@ func (r *indexerWithCountResolver) Count() int32
type RepositorySummary struct {
RecentUploads []uploadsShared.UploadsWithRepositoryNamespace
RecentIndexes []uploadsShared.IndexesWithRepositoryNamespace
RecentIndexes []uploadsShared.GroupedAutoIndexJobs
LastUploadRetentionScan *time.Time
LastIndexScan *time.Time
}
@ -324,7 +324,7 @@ type repositorySummaryResolver struct {
availableIndexers []inferredAvailableIndexers2
limitErr error
uploadLoader UploadLoader
indexLoader IndexLoader
autoIndexJobLoader AutoIndexJobLoader
locationResolver *gitresolvers.CachedLocationResolver
errTracer *observation.ErrCollector
preciseIndexResolverFactory *PreciseIndexResolverFactory
@ -341,7 +341,7 @@ func newRepositorySummaryResolver(
availableIndexers []inferredAvailableIndexers2,
limitErr error,
uploadLoader UploadLoader,
indexLoader IndexLoader,
autoIndexJobLoader AutoIndexJobLoader,
errTracer *observation.ErrCollector,
preciseIndexResolverFactory *PreciseIndexResolverFactory,
) resolverstubs.CodeIntelRepositorySummaryResolver {
@ -350,7 +350,7 @@ func newRepositorySummaryResolver(
availableIndexers: availableIndexers,
limitErr: limitErr,
uploadLoader: uploadLoader,
indexLoader: indexLoader,
autoIndexJobLoader: autoIndexJobLoader,
locationResolver: locationResolver,
errTracer: errTracer,
preciseIndexResolverFactory: preciseIndexResolverFactory,
@ -372,7 +372,7 @@ func (r *repositorySummaryResolver) RecentActivity(ctx context.Context) ([]resol
for _, upload := range recentUploads.Uploads {
upload := upload
resolver, err := r.preciseIndexResolverFactory.Create(ctx, r.uploadLoader, r.indexLoader, r.locationResolver, r.errTracer, &upload, nil)
resolver, err := r.preciseIndexResolverFactory.Create(ctx, r.uploadLoader, r.autoIndexJobLoader, r.locationResolver, r.errTracer, &upload, nil)
if err != nil {
return nil, err
}
@ -391,7 +391,7 @@ func (r *repositorySummaryResolver) RecentActivity(ctx context.Context) ([]resol
}
}
resolver, err := r.preciseIndexResolverFactory.Create(ctx, r.uploadLoader, r.indexLoader, r.locationResolver, r.errTracer, nil, &index)
resolver, err := r.preciseIndexResolverFactory.Create(ctx, r.uploadLoader, r.autoIndexJobLoader, r.locationResolver, r.errTracer, nil, &index)
if err != nil {
return nil, err
}

View File

@ -29,7 +29,7 @@ func (r *rootResolver) DeletePreciseIndex(ctx context.Context, args *struct{ ID
return nil, err
}
} else if indexID != 0 {
if _, err := r.uploadSvc.DeleteIndexByID(ctx, indexID); err != nil {
if _, err := r.uploadSvc.DeleteAutoIndexJobByID(ctx, indexID); err != nil {
return nil, err
}
}
@ -88,7 +88,7 @@ func (r *rootResolver) DeletePreciseIndexes(ctx context.Context, args *resolvers
}
}
if !skipIndexes {
if err := r.uploadSvc.DeleteIndexes(ctx, uploadsshared.DeleteIndexesOptions{
if err := r.uploadSvc.DeleteAutoIndexJobs(ctx, uploadsshared.DeleteAutoIndexJobsOptions{
RepositoryID: repositoryID,
States: indexStates,
IndexerNames: indexerNames,
@ -120,7 +120,7 @@ func (r *rootResolver) ReindexPreciseIndex(ctx context.Context, args *struct{ ID
return nil, err
}
} else if indexID != 0 {
if err := r.uploadSvc.ReindexIndexByID(ctx, indexID); err != nil {
if err := r.uploadSvc.SetRerunAutoIndexJobByID(ctx, indexID); err != nil {
return nil, err
}
}
@ -179,7 +179,7 @@ func (r *rootResolver) ReindexPreciseIndexes(ctx context.Context, args *resolver
}
}
if !skipIndexes {
if err := r.uploadSvc.ReindexIndexes(ctx, uploadsshared.ReindexIndexesOptions{
if err := r.uploadSvc.SetRerunAutoIndexJobs(ctx, uploadsshared.SetRerunAutoIndexJobsOptions{
States: indexStates,
IndexerNames: indexerNames,
Term: term,

View File

@ -130,10 +130,10 @@ func (r *rootResolver) PreciseIndexes(ctx context.Context, args *resolverstubs.P
}
}
var indexes []uploadsshared.Index
totalIndexCount := 0
var autoIndexJobs []uploadsshared.AutoIndexJob
totalJobsCount := 0
if !skipIndexes {
if indexes, totalIndexCount, err = r.uploadSvc.GetIndexes(ctx, uploadsshared.GetIndexesOptions{
if autoIndexJobs, totalJobsCount, err = r.uploadSvc.GetAutoIndexJobs(ctx, uploadsshared.GetAutoIndexJobsOptions{
RepositoryID: repositoryID,
States: indexStates,
Term: term,
@ -148,20 +148,20 @@ func (r *rootResolver) PreciseIndexes(ctx context.Context, args *resolverstubs.P
type pair struct {
upload *shared.Upload
index *uploadsshared.Index
job *uploadsshared.AutoIndexJob
}
pairs := make([]pair, 0, pageSize)
addUpload := func(upload shared.Upload) { pairs = append(pairs, pair{&upload, nil}) }
addIndex := func(index uploadsshared.Index) { pairs = append(pairs, pair{nil, &index}) }
addJob := func(job uploadsshared.AutoIndexJob) { pairs = append(pairs, pair{nil, &job}) }
uIdx := 0
iIdx := 0
for uIdx < len(uploads) && iIdx < len(indexes) && (uIdx+iIdx) < pageSize {
if uploads[uIdx].UploadedAt.After(indexes[iIdx].QueuedAt) {
for uIdx < len(uploads) && iIdx < len(autoIndexJobs) && (uIdx+iIdx) < pageSize {
if uploads[uIdx].UploadedAt.After(autoIndexJobs[iIdx].QueuedAt) {
addUpload(uploads[uIdx])
uIdx++
} else {
addIndex(indexes[iIdx])
addJob(autoIndexJobs[iIdx])
iIdx++
}
}
@ -169,8 +169,8 @@ func (r *rootResolver) PreciseIndexes(ctx context.Context, args *resolverstubs.P
addUpload(uploads[uIdx])
uIdx++
}
for iIdx < len(indexes) && (uIdx+iIdx) < pageSize {
addIndex(indexes[iIdx])
for iIdx < len(autoIndexJobs) && (uIdx+iIdx) < pageSize {
addJob(autoIndexJobs[iIdx])
iIdx++
}
@ -179,7 +179,7 @@ func (r *rootResolver) PreciseIndexes(ctx context.Context, args *resolverstubs.P
cursor += strconv.Itoa(newUploadOffset)
}
cursor += ":"
if newIndexOffset := indexOffset + iIdx; newIndexOffset < totalIndexCount {
if newIndexOffset := indexOffset + iIdx; newIndexOffset < totalJobsCount {
cursor += strconv.Itoa(newIndexOffset)
}
if cursor == ":" {
@ -188,18 +188,18 @@ func (r *rootResolver) PreciseIndexes(ctx context.Context, args *resolverstubs.P
// Create upload loader with data we already have, and pre-submit associated uploads from index records
uploadLoader := r.uploadLoaderFactory.CreateWithInitialData(uploads)
PresubmitAssociatedUploads(uploadLoader, indexes...)
PresubmitAssociatedUploads(uploadLoader, autoIndexJobs...)
// Create index loader with data we already have, and pre-submit associated indexes from upload records
indexLoader := r.indexLoaderFactory.CreateWithInitialData(indexes)
PresubmitAssociatedIndexes(indexLoader, uploads...)
// Create job loader with data we already have, and pre-submit associated indexes from upload records
autoIndexJobLoader := r.autoIndexJobLoaderFactory.CreateWithInitialData(autoIndexJobs)
PresubmitAssociatedAutoIndexJobs(autoIndexJobLoader, uploads...)
// No data to load for git data (yet)
locationResolver := r.locationResolverFactory.Create()
resolvers := make([]resolverstubs.PreciseIndexResolver, 0, len(pairs))
for _, pair := range pairs {
resolver, err := r.preciseIndexResolverFactory.Create(ctx, uploadLoader, indexLoader, locationResolver, errTracer, pair.upload, pair.index)
resolver, err := r.preciseIndexResolverFactory.Create(ctx, uploadLoader, autoIndexJobLoader, locationResolver, errTracer, pair.upload, pair.job)
if err != nil {
return nil, err
}
@ -207,7 +207,7 @@ func (r *rootResolver) PreciseIndexes(ctx context.Context, args *resolverstubs.P
resolvers = append(resolvers, resolver)
}
return resolverstubs.NewCursorWithTotalCountConnectionResolver(resolvers, cursor, int32(totalUploadCount+totalIndexCount)), nil
return resolverstubs.NewCursorWithTotalCountConnectionResolver(resolvers, cursor, int32(totalUploadCount+totalJobsCount)), nil
}
func (r *rootResolver) PreciseIndexByID(ctx context.Context, id graphql.ID) (_ resolverstubs.PreciseIndexResolver, err error) {
@ -231,22 +231,22 @@ func (r *rootResolver) PreciseIndexByID(ctx context.Context, id graphql.ID) (_ r
uploadLoader := r.uploadLoaderFactory.CreateWithInitialData([]shared.Upload{upload})
// Pre-submit associated index id for subsequent loading
indexLoader := r.indexLoaderFactory.Create()
PresubmitAssociatedIndexes(indexLoader, upload)
autoIndexJobLoader := r.autoIndexJobLoaderFactory.Create()
PresubmitAssociatedAutoIndexJobs(autoIndexJobLoader, upload)
// No data to load for git data (yet)
locationResolverFactory := r.locationResolverFactory.Create()
return r.preciseIndexResolverFactory.Create(ctx, uploadLoader, indexLoader, locationResolverFactory, errTracer, &upload, nil)
return r.preciseIndexResolverFactory.Create(ctx, uploadLoader, autoIndexJobLoader, locationResolverFactory, errTracer, &upload, nil)
}
if indexID != 0 {
index, ok, err := r.uploadSvc.GetIndexByID(ctx, indexID)
index, ok, err := r.uploadSvc.GetAutoIndexJobByID(ctx, indexID)
if err != nil || !ok {
return nil, err
}
// Create index loader with data we already have
indexLoader := r.indexLoaderFactory.CreateWithInitialData([]shared.Index{index})
// Create job loader with data we already have
autoIndexJobLoader := r.autoIndexJobLoaderFactory.CreateWithInitialData([]shared.AutoIndexJob{index})
// Pre-submit associated upload id for subsequent loading
uploadLoader := r.uploadLoaderFactory.Create()
@ -255,7 +255,7 @@ func (r *rootResolver) PreciseIndexByID(ctx context.Context, id graphql.ID) (_ r
// No data to load for git data (yet)
locationResolverFactory := r.locationResolverFactory.Create()
return r.preciseIndexResolverFactory.Create(ctx, uploadLoader, indexLoader, locationResolverFactory, errTracer, nil, &index)
return r.preciseIndexResolverFactory.Create(ctx, uploadLoader, autoIndexJobLoader, locationResolverFactory, errTracer, nil, &index)
}
return nil, errors.New("invalid identifier")

View File

@ -11,24 +11,24 @@ import (
// default json behaviour is to render nil slices as "null", so we manually
// set all nil slices in the struct to empty slice
func MarshalJSON(config IndexConfiguration) ([]byte, error) {
func MarshalJSON(config AutoIndexJobSpecList) ([]byte, error) {
nonNil := config
if nonNil.IndexJobs == nil {
nonNil.IndexJobs = []IndexJob{}
if nonNil.JobSpecs == nil {
nonNil.JobSpecs = []AutoIndexJobSpec{}
}
for idx := range nonNil.IndexJobs {
if nonNil.IndexJobs[idx].IndexerArgs == nil {
nonNil.IndexJobs[idx].IndexerArgs = []string{}
for idx := range nonNil.JobSpecs {
if nonNil.JobSpecs[idx].IndexerArgs == nil {
nonNil.JobSpecs[idx].IndexerArgs = []string{}
}
if nonNil.IndexJobs[idx].LocalSteps == nil {
nonNil.IndexJobs[idx].LocalSteps = []string{}
if nonNil.JobSpecs[idx].LocalSteps == nil {
nonNil.JobSpecs[idx].LocalSteps = []string{}
}
if nonNil.IndexJobs[idx].Steps == nil {
nonNil.IndexJobs[idx].Steps = []DockerStep{}
if nonNil.JobSpecs[idx].Steps == nil {
nonNil.JobSpecs[idx].Steps = []DockerStep{}
}
for stepIdx := range nonNil.IndexJobs[idx].Steps {
if nonNil.IndexJobs[idx].Steps[stepIdx].Commands == nil {
nonNil.IndexJobs[idx].Steps[stepIdx].Commands = []string{}
for stepIdx := range nonNil.JobSpecs[idx].Steps {
if nonNil.JobSpecs[idx].Steps[stepIdx].Commands == nil {
nonNil.JobSpecs[idx].Steps[stepIdx].Commands = []string{}
}
}
}
@ -36,10 +36,10 @@ func MarshalJSON(config IndexConfiguration) ([]byte, error) {
return json.MarshalIndent(nonNil, "", " ")
}
func UnmarshalJSON(data []byte) (IndexConfiguration, error) {
configuration := IndexConfiguration{}
func UnmarshalJSON(data []byte) (AutoIndexJobSpecList, error) {
configuration := AutoIndexJobSpecList{}
if err := jsonUnmarshal(string(data), &configuration); err != nil {
return IndexConfiguration{}, errors.Errorf("invalid JSON: %v", err)
return AutoIndexJobSpecList{}, errors.Errorf("invalid JSON: %v", err)
}
return configuration, nil
}

View File

@ -36,8 +36,8 @@ func TestUnmarshalJSON(t *testing.T) {
t.Fatalf("unexpected error: %s", err)
}
expected := IndexConfiguration{
IndexJobs: []IndexJob{
expected := AutoIndexJobSpecList{
JobSpecs: []AutoIndexJobSpec{
{
Steps: []DockerStep{
{

View File

@ -2,11 +2,11 @@ package config
import "strings"
type IndexConfiguration struct {
IndexJobs []IndexJob `json:"index_jobs" yaml:"index_jobs"`
type AutoIndexJobSpecList struct {
JobSpecs []AutoIndexJobSpec `json:"index_jobs" yaml:"index_jobs"`
}
type IndexJob struct {
type AutoIndexJobSpec struct {
Steps []DockerStep `json:"steps" yaml:"steps"`
LocalSteps []string `json:"local_steps" yaml:"local_steps"`
Root string `json:"root" yaml:"root"`
@ -16,7 +16,7 @@ type IndexJob struct {
RequestedEnvVars []string `json:"requestedEnvVars" yaml:"requestedEnvVars"`
}
func (j IndexJob) GetRoot() string {
func (j AutoIndexJobSpec) GetRoot() string {
return j.Root
}
@ -24,7 +24,7 @@ func (j IndexJob) GetRoot() string {
// from the indexer name.
// Example:
// sourcegraph/lsif-go@sha256:... => lsif-go
func (j IndexJob) GetIndexerName() string {
func (j AutoIndexJobSpec) GetIndexerName() string {
return extractIndexerName(j.Indexer)
}

View File

@ -1,14 +1,15 @@
package config
import (
"github.com/sourcegraph/sourcegraph/lib/errors"
"gopkg.in/yaml.v2"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func UnmarshalYAML(data []byte) (IndexConfiguration, error) {
configuration := IndexConfiguration{}
func UnmarshalYAML(data []byte) (AutoIndexJobSpecList, error) {
configuration := AutoIndexJobSpecList{}
if err := yaml.Unmarshal(data, &configuration); err != nil {
return IndexConfiguration{}, errors.Errorf("invalid YAML: %v", err)
return AutoIndexJobSpecList{}, errors.Errorf("invalid YAML: %v", err)
}
return configuration, nil

View File

@ -29,8 +29,8 @@ func TestUnmarshalYAML(t *testing.T) {
t.Fatalf("unexpected error: %s", err)
}
expected := IndexConfiguration{
IndexJobs: []IndexJob{
expected := AutoIndexJobSpecList{
JobSpecs: []AutoIndexJobSpec{
{
Steps: []DockerStep{
{