Executor Job Specific Tokens (#46792)

This commit is contained in:
Randell Callahan 2023-02-28 11:40:22 -07:00 committed by GitHub
parent 0e64682973
commit a82a602b67
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
58 changed files with 4697 additions and 1322 deletions

View File

@ -9,6 +9,7 @@ import (
"net/http"
"net/url"
"path"
"strconv"
"github.com/inconshreveable/log15"
"golang.org/x/net/context/ctxhttp"
@ -19,6 +20,9 @@ import (
// schemeExecutorToken is the special type of token to communicate with the executor endpoints.
const schemeExecutorToken = "token-executor"
// schemeJobToken is the special type of token to communicate with the job endpoints.
const schemeJobToken = "Bearer"
// BaseClient is an abstract HTTP API-backed data access layer. Instances of this
// struct should not be used directly, but should be used compositionally by other
// stores that implement logic specific to a domain.
@ -51,6 +55,9 @@ type BaseClient struct {
}
type BaseClientOptions struct {
// ExecutorName name of the executor host.
ExecutorName string
// UserAgent specifies the user agent string to supply on requests.
UserAgent string
@ -156,7 +163,7 @@ func NewRequest(method string, baseURL, urlPath string, payload any) (*http.Requ
}
// NewRequest creates a new http.Request where only the Authorization HTTP header is set.
func (c *BaseClient) NewRequest(method, path string, payload io.Reader) (*http.Request, error) {
func (c *BaseClient) NewRequest(jobId int, token, method, path string, payload io.Reader) (*http.Request, error) {
u := c.newRelativeURL(path)
r, err := http.NewRequest(method, u.String(), payload)
@ -164,7 +171,7 @@ func (c *BaseClient) NewRequest(method, path string, payload io.Reader) (*http.R
return nil, err
}
r.Header.Add("Authorization", fmt.Sprintf("%s %s", schemeExecutorToken, c.options.EndpointOptions.Token))
c.addHeaders(jobId, token, r)
return r, nil
}
@ -182,6 +189,20 @@ func (c *BaseClient) NewJSONRequest(method, path string, payload any) (*http.Req
return r, nil
}
// NewJSONJobRequest creates a new http.Request where the Content-Type is set to 'application/json' and the Authorization
// HTTP header is set.
func (c *BaseClient) NewJSONJobRequest(jobId int, method, path string, token string, payload any) (*http.Request, error) {
u := c.newRelativeURL(path)
r, err := newJSONRequest(method, u, payload)
if err != nil {
return nil, err
}
c.addHeaders(jobId, token, r)
return r, nil
}
// newRelativeURL builds the relative URL on the provided base URL and adds any additional paths.
func (c *BaseClient) newRelativeURL(endpointPath string) *url.URL {
// Create a shallow clone
@ -206,3 +227,14 @@ func newJSONRequest(method string, url *url.URL, payload any) (*http.Request, er
req.Header.Set("Content-Type", "application/json")
return req, nil
}
func (c *BaseClient) addHeaders(jobId int, token string, r *http.Request) {
// If there is no token set, we may be talking with a version of Sourcegraph that is behind.
if len(token) > 0 {
r.Header.Add("Authorization", fmt.Sprintf("%s %s", schemeJobToken, token))
} else {
r.Header.Add("Authorization", fmt.Sprintf("%s %s", schemeExecutorToken, c.options.EndpointOptions.Token))
}
r.Header.Add("X-Sourcegraph-Job-ID", strconv.Itoa(jobId))
r.Header.Add("X-Sourcegraph-Executor-Name", c.options.ExecutorName)
}

View File

@ -11,6 +11,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -38,14 +39,14 @@ func New(observationCtx *observation.Context, options apiclient.BaseClientOption
}, nil
}
func (c *Client) Exists(ctx context.Context, bucket string, key string) (exists bool, err error) {
func (c *Client) Exists(ctx context.Context, job types.Job, bucket string, key string) (exists bool, err error) {
ctx, _, endObservation := c.operations.exists.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("bucket", bucket),
otlog.String("key", key),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewRequest(http.MethodHead, fmt.Sprintf("%s/%s", bucket, key), nil)
req, err := c.client.NewRequest(job.ID, job.Token, http.MethodHead, fmt.Sprintf("%s/%s", bucket, key), nil)
if err != nil {
return false, err
}
@ -62,14 +63,14 @@ func (c *Client) Exists(ctx context.Context, bucket string, key string) (exists
return true, nil
}
func (c *Client) Get(ctx context.Context, bucket string, key string) (content io.ReadCloser, err error) {
func (c *Client) Get(ctx context.Context, job types.Job, bucket string, key string) (content io.ReadCloser, err error) {
ctx, _, endObservation := c.operations.get.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("bucket", bucket),
otlog.String("key", key),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewRequest(http.MethodGet, fmt.Sprintf("%s/%s", bucket, key), nil)
req, err := c.client.NewRequest(job.ID, job.Token, http.MethodGet, fmt.Sprintf("%s/%s", bucket, key), nil)
if err != nil {
return nil, err
}

View File

@ -12,6 +12,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient/files"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -61,6 +62,7 @@ func TestClient_Exists(t *testing.T) {
name string
handler func(t *testing.T) http.Handler
job types.Job
expectedValue bool
expectedErr error
@ -72,9 +74,27 @@ func TestClient_Exists(t *testing.T) {
assert.Equal(t, http.MethodHead, r.Method)
assert.Contains(t, r.URL.Path, "some-bucket/foo/bar")
assert.Equal(t, r.Header.Get("Authorization"), "token-executor hunter2")
assert.Equal(t, "42", r.Header.Get("X-Sourcegraph-Job-ID"))
assert.Equal(t, "test-executor", r.Header.Get("X-Sourcegraph-Executor-Name"))
w.WriteHeader(http.StatusOK)
})
},
job: types.Job{ID: 42},
expectedValue: true,
},
{
name: "File exists with job token",
handler: func(t *testing.T) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodHead, r.Method)
assert.Contains(t, r.URL.Path, "some-bucket/foo/bar")
assert.Equal(t, r.Header.Get("Authorization"), "Bearer sometoken")
assert.Equal(t, "42", r.Header.Get("X-Sourcegraph-Job-ID"))
assert.Equal(t, "test-executor", r.Header.Get("X-Sourcegraph-Executor-Name"))
w.WriteHeader(http.StatusOK)
})
},
job: types.Job{ID: 42, Token: "sometoken"},
expectedValue: true,
},
{
@ -84,6 +104,7 @@ func TestClient_Exists(t *testing.T) {
w.WriteHeader(http.StatusNotFound)
})
},
job: types.Job{ID: 42},
expectedValue: false,
},
{
@ -93,6 +114,7 @@ func TestClient_Exists(t *testing.T) {
w.WriteHeader(http.StatusInternalServerError)
})
},
job: types.Job{ID: 42},
expectedValue: false,
expectedErr: errors.New("unexpected status code 500"),
},
@ -102,6 +124,7 @@ func TestClient_Exists(t *testing.T) {
srv := httptest.NewServer(test.handler(t))
defer srv.Close()
options := apiclient.BaseClientOptions{
ExecutorName: "test-executor",
EndpointOptions: apiclient.EndpointOptions{
URL: srv.URL,
PathPrefix: "/.executors/files",
@ -112,7 +135,7 @@ func TestClient_Exists(t *testing.T) {
client, err := files.New(observationContext, options)
require.NoError(t, err)
exists, err := client.Exists(context.Background(), "some-bucket", "foo/bar")
exists, err := client.Exists(context.Background(), test.job, "some-bucket", "foo/bar")
if test.expectedErr != nil {
assert.Error(t, err)
@ -134,6 +157,8 @@ func TestClient_Get(t *testing.T) {
handler func(t *testing.T) http.Handler
job types.Job
expectedValue string
expectedErr error
}{
@ -144,10 +169,29 @@ func TestClient_Get(t *testing.T) {
assert.Equal(t, http.MethodGet, r.Method)
assert.Contains(t, r.URL.Path, "some-bucket/foo/bar")
assert.Equal(t, r.Header.Get("Authorization"), "token-executor hunter2")
assert.Equal(t, "42", r.Header.Get("X-Sourcegraph-Job-ID"))
assert.Equal(t, "test-executor", r.Header.Get("X-Sourcegraph-Executor-Name"))
w.WriteHeader(http.StatusOK)
w.Write([]byte("hello world!"))
})
},
job: types.Job{ID: 42},
expectedValue: "hello world!",
},
{
name: "Get content with job token",
handler: func(t *testing.T) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
assert.Equal(t, http.MethodGet, r.Method)
assert.Contains(t, r.URL.Path, "some-bucket/foo/bar")
assert.Equal(t, r.Header.Get("Authorization"), "Bearer sometoken")
assert.Equal(t, "42", r.Header.Get("X-Sourcegraph-Job-ID"))
assert.Equal(t, "test-executor", r.Header.Get("X-Sourcegraph-Executor-Name"))
w.WriteHeader(http.StatusOK)
w.Write([]byte("hello world!"))
})
},
job: types.Job{ID: 42, Token: "sometoken"},
expectedValue: "hello world!",
},
{
@ -157,9 +201,12 @@ func TestClient_Get(t *testing.T) {
assert.Equal(t, http.MethodGet, r.Method)
assert.Contains(t, r.URL.Path, "some-bucket/foo/bar")
assert.Equal(t, r.Header.Get("Authorization"), "token-executor hunter2")
assert.Equal(t, "42", r.Header.Get("X-Sourcegraph-Job-ID"))
assert.Equal(t, "test-executor", r.Header.Get("X-Sourcegraph-Executor-Name"))
w.WriteHeader(http.StatusInternalServerError)
})
},
job: types.Job{ID: 42},
expectedErr: errors.New("unexpected status code 500"),
},
}
@ -168,6 +215,7 @@ func TestClient_Get(t *testing.T) {
srv := httptest.NewServer(test.handler(t))
defer srv.Close()
options := apiclient.BaseClientOptions{
ExecutorName: "test-executor",
EndpointOptions: apiclient.EndpointOptions{
URL: srv.URL,
PathPrefix: "/.executors/files",
@ -178,7 +226,7 @@ func TestClient_Get(t *testing.T) {
client, err := files.New(observationContext, options)
require.NoError(t, err)
content, err := client.Get(context.Background(), "some-bucket", "foo/bar")
content, err := client.Get(context.Background(), test.job, "some-bucket", "foo/bar")
if test.expectedErr != nil {
assert.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())

View File

@ -19,7 +19,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
internalexecutor "github.com/sourcegraph/sourcegraph/internal/executor"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/version"
@ -37,7 +37,7 @@ type Client struct {
}
// Compile time validation.
var _ workerutil.Store[executor.Job] = &Client{}
var _ workerutil.Store[types.Job] = &Client{}
var _ command.ExecutionLogEntryStore = &Client{}
func New(observationCtx *observation.Context, options Options, metricsGatherer prometheus.Gatherer) (*Client, error) {
@ -58,13 +58,13 @@ func (c *Client) QueuedCount(ctx context.Context) (int, error) {
return 0, errors.New("unimplemented")
}
func (c *Client) Dequeue(ctx context.Context, workerHostname string, extraArguments any) (job executor.Job, _ bool, err error) {
func (c *Client) Dequeue(ctx context.Context, workerHostname string, extraArguments any) (job types.Job, _ bool, err error) {
ctx, _, endObservation := c.operations.dequeue.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/dequeue", c.options.QueueName), executor.DequeueRequest{
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/dequeue", c.options.QueueName), types.DequeueRequest{
Version: version.Version(),
ExecutorName: c.options.ExecutorName,
NumCPUs: c.options.ResourceOptions.NumCPUs,
@ -79,16 +79,18 @@ func (c *Client) Dequeue(ctx context.Context, workerHostname string, extraArgume
return job, decoded, err
}
func (c *Client) MarkComplete(ctx context.Context, id int) (_ bool, err error) {
func (c *Client) MarkComplete(ctx context.Context, job types.Job) (_ bool, err error) {
ctx, _, endObservation := c.operations.markComplete.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", id),
otlog.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/markComplete", c.options.QueueName), executor.MarkCompleteRequest{
ExecutorName: c.options.ExecutorName,
JobID: id,
req, err := c.client.NewJSONJobRequest(job.ID, http.MethodPost, fmt.Sprintf("%s/markComplete", c.options.QueueName), job.Token, types.MarkCompleteRequest{
JobOperationRequest: types.JobOperationRequest{
ExecutorName: c.options.ExecutorName,
JobID: job.ID,
},
})
if err != nil {
return false, err
@ -100,16 +102,18 @@ func (c *Client) MarkComplete(ctx context.Context, id int) (_ bool, err error) {
return true, nil
}
func (c *Client) MarkErrored(ctx context.Context, id int, failureMessage string) (_ bool, err error) {
func (c *Client) MarkErrored(ctx context.Context, job types.Job, failureMessage string) (_ bool, err error) {
ctx, _, endObservation := c.operations.markErrored.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", id),
otlog.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/markErrored", c.options.QueueName), executor.MarkErroredRequest{
ExecutorName: c.options.ExecutorName,
JobID: id,
req, err := c.client.NewJSONJobRequest(job.ID, http.MethodPost, fmt.Sprintf("%s/markErrored", c.options.QueueName), job.Token, types.MarkErroredRequest{
JobOperationRequest: types.JobOperationRequest{
ExecutorName: c.options.ExecutorName,
JobID: job.ID,
},
ErrorMessage: failureMessage,
})
if err != nil {
@ -122,16 +126,18 @@ func (c *Client) MarkErrored(ctx context.Context, id int, failureMessage string)
return true, nil
}
func (c *Client) MarkFailed(ctx context.Context, id int, failureMessage string) (_ bool, err error) {
func (c *Client) MarkFailed(ctx context.Context, job types.Job, failureMessage string) (_ bool, err error) {
ctx, _, endObservation := c.operations.markFailed.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", id),
otlog.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/markFailed", c.options.QueueName), executor.MarkErroredRequest{
ExecutorName: c.options.ExecutorName,
JobID: id,
req, err := c.client.NewJSONJobRequest(job.ID, http.MethodPost, fmt.Sprintf("%s/markFailed", c.options.QueueName), job.Token, types.MarkErroredRequest{
JobOperationRequest: types.JobOperationRequest{
ExecutorName: c.options.ExecutorName,
JobID: job.ID,
},
ErrorMessage: failureMessage,
})
if err != nil {
@ -157,9 +163,9 @@ func (c *Client) Heartbeat(ctx context.Context, jobIDs []int) (knownIDs, cancelI
// Continue, no metric errors should prevent heartbeats.
}
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/heartbeat", c.options.QueueName), executor.HeartbeatRequest{
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/heartbeat", c.options.QueueName), types.HeartbeatRequest{
// Request the new-fashioned payload.
Version: executor.ExecutorAPIVersion2,
Version: types.ExecutorAPIVersion2,
ExecutorName: c.options.ExecutorName,
JobIDs: jobIDs,
@ -193,7 +199,7 @@ func (c *Client) Heartbeat(ctx context.Context, jobIDs []int) (knownIDs, cancelI
}
// First, try to unmarshal the response into a V2 response object.
var respV2 executor.HeartbeatResponse
var respV2 types.HeartbeatResponse
if err := json.Unmarshal(bodyBytes, &respV2); err == nil {
// If that works, we can return the data.
return respV2.KnownIDs, respV2.CancelIDs, nil
@ -209,7 +215,7 @@ func (c *Client) Heartbeat(ctx context.Context, jobIDs []int) (knownIDs, cancelI
// are talking to a pre-4.3 Sourcegraph API and that doesn't return canceled
// jobs as part of heartbeats.
cancelIDs, err = c.CanceledJobs(ctx, c.options.QueueName, jobIDs)
cancelIDs, err = c.CanceledJobs(ctx, jobIDs)
if err != nil {
return nil, nil, err
}
@ -243,7 +249,7 @@ func gatherMetrics(logger log.Logger, gatherer prometheus.Gatherer) (string, err
var buf bytes.Buffer
enc := expfmt.NewEncoder(&buf, expfmt.FmtText)
for _, mf := range mfs {
if err := enc.Encode(mf); err != nil {
if err = enc.Encode(mf); err != nil {
return "", errors.Wrap(err, "encoding metric family")
}
}
@ -251,8 +257,8 @@ func gatherMetrics(logger log.Logger, gatherer prometheus.Gatherer) (string, err
}
// TODO: Remove this in Sourcegraph 4.4.
func (c *Client) CanceledJobs(ctx context.Context, queueName string, knownIDs []int) (canceledIDs []int, err error) {
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/canceledJobs", c.options.QueueName), executor.CanceledJobsRequest{
func (c *Client) CanceledJobs(ctx context.Context, knownIDs []int) (canceledIDs []int, err error) {
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/canceledJobs", c.options.QueueName), types.CanceledJobsRequest{
KnownJobIDs: knownIDs,
ExecutorName: c.options.ExecutorName,
})
@ -267,8 +273,8 @@ func (c *Client) CanceledJobs(ctx context.Context, queueName string, knownIDs []
return canceledIDs, nil
}
func (c *Client) Ping(ctx context.Context, queueName string, jobIDs []int) (err error) {
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/heartbeat", c.options.QueueName), executor.HeartbeatRequest{
func (c *Client) Ping(ctx context.Context) (err error) {
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/heartbeat", c.options.QueueName), types.HeartbeatRequest{
ExecutorName: c.options.ExecutorName,
})
if err != nil {
@ -278,16 +284,18 @@ func (c *Client) Ping(ctx context.Context, queueName string, jobIDs []int) (err
return c.client.DoAndDrop(ctx, req)
}
func (c *Client) AddExecutionLogEntry(ctx context.Context, jobID int, entry internalexecutor.ExecutionLogEntry) (entryID int, err error) {
func (c *Client) AddExecutionLogEntry(ctx context.Context, job types.Job, entry internalexecutor.ExecutionLogEntry) (entryID int, err error) {
ctx, _, endObservation := c.operations.addExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", jobID),
otlog.Int("jobID", job.ID),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/addExecutionLogEntry", c.options.QueueName), executor.AddExecutionLogEntryRequest{
ExecutorName: c.options.ExecutorName,
JobID: jobID,
req, err := c.client.NewJSONJobRequest(job.ID, http.MethodPost, fmt.Sprintf("%s/addExecutionLogEntry", c.options.QueueName), job.Token, types.AddExecutionLogEntryRequest{
JobOperationRequest: types.JobOperationRequest{
ExecutorName: c.options.ExecutorName,
JobID: job.ID,
},
ExecutionLogEntry: entry,
})
if err != nil {
@ -298,17 +306,19 @@ func (c *Client) AddExecutionLogEntry(ctx context.Context, jobID int, entry inte
return entryID, err
}
func (c *Client) UpdateExecutionLogEntry(ctx context.Context, jobID, entryID int, entry internalexecutor.ExecutionLogEntry) (err error) {
func (c *Client) UpdateExecutionLogEntry(ctx context.Context, job types.Job, entryID int, entry internalexecutor.ExecutionLogEntry) (err error) {
ctx, _, endObservation := c.operations.updateExecutionLogEntry.With(ctx, &err, observation.Args{LogFields: []otlog.Field{
otlog.String("queueName", c.options.QueueName),
otlog.Int("jobID", jobID),
otlog.Int("jobID", job.ID),
otlog.Int("entryID", entryID),
}})
defer endObservation(1, observation.Args{})
req, err := c.client.NewJSONRequest(http.MethodPost, fmt.Sprintf("%s/updateExecutionLogEntry", c.options.QueueName), executor.UpdateExecutionLogEntryRequest{
ExecutorName: c.options.ExecutorName,
JobID: jobID,
req, err := c.client.NewJSONJobRequest(job.ID, http.MethodPost, fmt.Sprintf("%s/updateExecutionLogEntry", c.options.QueueName), job.Token, types.UpdateExecutionLogEntryRequest{
JobOperationRequest: types.JobOperationRequest{
ExecutorName: c.options.ExecutorName,
JobID: job.ID,
},
EntryID: entryID,
ExecutionLogEntry: entry,
})

View File

@ -13,177 +13,292 @@ import (
"github.com/google/go-cmp/cmp"
"github.com/prometheus/client_golang/prometheus"
dto "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient/queue"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
internalexecutor "github.com/sourcegraph/sourcegraph/internal/executor"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func TestDequeue(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/dequeue",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "version": "0.0.0+dev"}`,
responseStatus: http.StatusOK,
responsePayload: `{"id": 42}`,
func TestClient_Dequeue(t *testing.T) {
tests := []struct {
name string
spec routeSpec
expectedJob types.Job
expectedErr error
isDequeued bool
}{
{
name: "Success",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/dequeue",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "version": "0.0.0+dev"}`,
responseStatus: http.StatusOK,
responsePayload: `{"id": 42}`,
},
expectedJob: types.Job{ID: 42, VirtualMachineFiles: map[string]types.VirtualMachineFile{}},
isDequeued: true,
},
{
name: "No record",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/dequeue",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "version": "0.0.0+dev"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
},
},
{
name: "Bad Response",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/dequeue",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "version": "0.0.0+dev"}`,
responseStatus: http.StatusInternalServerError,
responsePayload: ``,
},
expectedErr: errors.New("unexpected status code 500"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testRoute(t, test.spec, func(client *queue.Client) {
job, dequeued, err := client.Dequeue(context.Background(), "worker", "foo")
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
assert.Zero(t, job.ID)
assert.False(t, dequeued)
} else {
require.NoError(t, err)
assert.Equal(t, test.expectedJob, job)
assert.Equal(t, test.isDequeued, dequeued)
}
})
})
}
testRoute(t, spec, func(client *queue.Client) {
job, dequeued, err := client.Dequeue(context.Background(), "worker", nil)
if err != nil {
t.Fatalf("unexpected error dequeueing record: %s", err)
}
if !dequeued {
t.Fatalf("expected record to be dequeued")
}
if job.ID != 42 {
t.Errorf("unexpected id. want=%d have=%d", 42, job.ID)
}
})
}
func TestDequeueNoRecord(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/dequeue",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "version": "0.0.0+dev"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
func TestClient_MarkComplete(t *testing.T) {
tests := []struct {
name string
spec routeSpec
job types.Job
expectedErr error
}{
{
name: "Success",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markComplete",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
},
job: types.Job{ID: 42, Token: "job-token"},
},
{
name: "Success general access token",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markComplete",
expectedUsername: "test",
expectedToken: "hunter2",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
},
job: types.Job{ID: 42},
},
{
name: "Bad Response",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markComplete",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42}`,
responseStatus: http.StatusInternalServerError,
responsePayload: ``,
},
job: types.Job{ID: 42, Token: "job-token"},
expectedErr: errors.New("unexpected status code 500"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testRoute(t, test.spec, func(client *queue.Client) {
marked, err := client.MarkComplete(context.Background(), test.job)
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
assert.False(t, marked)
} else {
assert.True(t, marked)
}
})
})
}
testRoute(t, spec, func(client *queue.Client) {
_, dequeued, err := client.Dequeue(context.Background(), "worker", nil)
if err != nil {
t.Fatalf("unexpected error dequeueing record: %s", err)
}
if dequeued {
t.Fatalf("did not expect a record to be dequeued")
}
})
}
func TestDequeueBadResponse(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/dequeue",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "version": "0.0.0+dev"}`,
responseStatus: http.StatusInternalServerError,
responsePayload: ``,
func TestClient_MarkErrored(t *testing.T) {
tests := []struct {
name string
spec routeSpec
job types.Job
expectedErr error
}{
{
name: "Success",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markErrored",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
},
job: types.Job{ID: 42, Token: "job-token"},
},
{
name: "Success general access token",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markErrored",
expectedUsername: "test",
expectedToken: "hunter2",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
},
job: types.Job{ID: 42},
},
{
name: "Bad Response",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markErrored",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusInternalServerError,
responsePayload: ``,
},
job: types.Job{ID: 42, Token: "job-token"},
expectedErr: errors.New("unexpected status code 500"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testRoute(t, test.spec, func(client *queue.Client) {
marked, err := client.MarkErrored(context.Background(), test.job, "OH NO")
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
assert.False(t, marked)
} else {
assert.True(t, marked)
}
})
})
}
testRoute(t, spec, func(client *queue.Client) {
if _, _, err := client.Dequeue(context.Background(), "test_queue", nil); err == nil {
t.Fatalf("expected an error")
}
})
}
func TestMarkComplete(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markComplete",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
func TestClient_MarkFailed(t *testing.T) {
tests := []struct {
name string
spec routeSpec
job types.Job
expectedErr error
}{
{
name: "Success",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markFailed",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
},
job: types.Job{ID: 42, Token: "job-token"},
},
{
name: "Success general access token",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markFailed",
expectedUsername: "test",
expectedToken: "hunter2",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
},
job: types.Job{ID: 42},
},
{
name: "Bad Response",
spec: routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markFailed",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusInternalServerError,
responsePayload: ``,
},
job: types.Job{ID: 42, Token: "job-token"},
expectedErr: errors.New("unexpected status code 500"),
},
}
testRoute(t, spec, func(client *queue.Client) {
if marked, err := client.MarkComplete(context.Background(), 42); err != nil {
t.Fatalf("unexpected error completing job: %s", err)
} else if !marked {
t.Fatalf("expecting job to be marked")
}
})
}
func TestMarkCompleteBadResponse(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markComplete",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42}`,
responseStatus: http.StatusInternalServerError,
responsePayload: ``,
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
testRoute(t, test.spec, func(client *queue.Client) {
marked, err := client.MarkFailed(context.Background(), test.job, "OH NO")
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
assert.False(t, marked)
} else {
assert.True(t, marked)
}
})
})
}
testRoute(t, spec, func(client *queue.Client) {
if marked, err := client.MarkComplete(context.Background(), 42); err == nil {
t.Fatalf("expected an error")
} else if marked {
t.Fatalf("expecting job to not be marked")
}
})
}
func TestMarkErrored(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markErrored",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
}
testRoute(t, spec, func(client *queue.Client) {
if marked, err := client.MarkErrored(context.Background(), 42, "OH NO"); err != nil {
t.Fatalf("unexpected error completing job: %s", err)
} else if !marked {
t.Fatalf("expecting job to be marked")
}
})
}
func TestMarkErroredBadResponse(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markErrored",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusInternalServerError,
responsePayload: ``,
}
testRoute(t, spec, func(client *queue.Client) {
if marked, err := client.MarkErrored(context.Background(), 42, "OH NO"); err == nil {
t.Fatalf("expected an error")
} else if marked {
t.Fatalf("expecting job to not be marked")
}
})
}
func TestMarkFailed(t *testing.T) {
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/markFailed",
expectedUsername: "test",
expectedToken: "hunter2",
expectedPayload: `{"executorName": "deadbeef", "jobId": 42, "errorMessage": "OH NO"}`,
responseStatus: http.StatusNoContent,
responsePayload: ``,
}
testRoute(t, spec, func(client *queue.Client) {
if marked, err := client.MarkFailed(context.Background(), 42, "OH NO"); err != nil {
t.Fatalf("unexpected error completing job: %s", err)
} else if !marked {
t.Fatalf("expecting job to be marked")
}
})
}
func TestCanceledJobs(t *testing.T) {
@ -198,7 +313,7 @@ func TestCanceledJobs(t *testing.T) {
}
testRoute(t, spec, func(client *queue.Client) {
if ids, err := client.CanceledJobs(context.Background(), "test_queue", []int{1}); err != nil {
if ids, err := client.CanceledJobs(context.Background(), []int{1}); err != nil {
t.Fatalf("unexpected error completing job: %s", err)
} else if diff := cmp.Diff(ids, []int{1}); diff != "" {
t.Fatalf("unexpected set of IDs returned: %s", diff)
@ -290,10 +405,12 @@ func TestAddExecutionLogEntry(t *testing.T) {
}
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/addExecutionLogEntry",
expectedUsername: "test",
expectedToken: "hunter2",
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/addExecutionLogEntry",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{
"executorName": "deadbeef",
"jobId": 42,
@ -309,7 +426,7 @@ func TestAddExecutionLogEntry(t *testing.T) {
}
testRoute(t, spec, func(client *queue.Client) {
entryID, err := client.AddExecutionLogEntry(context.Background(), 42, entry)
entryID, err := client.AddExecutionLogEntry(context.Background(), types.Job{ID: 42, Token: "job-token"}, entry)
if err != nil {
t.Fatalf("unexpected error updating log contents: %s", err)
}
@ -330,10 +447,12 @@ func TestAddExecutionLogEntryBadResponse(t *testing.T) {
}
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/addExecutionLogEntry",
expectedUsername: "test",
expectedToken: "hunter2",
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/addExecutionLogEntry",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{
"executorName": "deadbeef",
"jobId": 42,
@ -349,7 +468,7 @@ func TestAddExecutionLogEntryBadResponse(t *testing.T) {
}
testRoute(t, spec, func(client *queue.Client) {
if _, err := client.AddExecutionLogEntry(context.Background(), 42, entry); err == nil {
if _, err := client.AddExecutionLogEntry(context.Background(), types.Job{ID: 42, Token: "job-token"}, entry); err == nil {
t.Fatalf("expected an error")
}
})
@ -366,10 +485,12 @@ func TestUpdateExecutionLogEntry(t *testing.T) {
}
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/updateExecutionLogEntry",
expectedUsername: "test",
expectedToken: "hunter2",
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/updateExecutionLogEntry",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{
"executorName": "deadbeef",
"jobId": 42,
@ -386,7 +507,7 @@ func TestUpdateExecutionLogEntry(t *testing.T) {
}
testRoute(t, spec, func(client *queue.Client) {
if err := client.UpdateExecutionLogEntry(context.Background(), 42, 99, entry); err != nil {
if err := client.UpdateExecutionLogEntry(context.Background(), types.Job{ID: 42, Token: "job-token"}, 99, entry); err != nil {
t.Fatalf("unexpected error updating log contents: %s", err)
}
})
@ -403,10 +524,12 @@ func TestUpdateExecutionLogEntryBadResponse(t *testing.T) {
}
spec := routeSpec{
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/updateExecutionLogEntry",
expectedUsername: "test",
expectedToken: "hunter2",
expectedMethod: "POST",
expectedPath: "/.executors/queue/test_queue/updateExecutionLogEntry",
expectedUsername: "test",
expectedToken: "job-token",
expectedJobID: "42",
expectedExecutorName: "deadbeef",
expectedPayload: `{
"executorName": "deadbeef",
"jobId": 42,
@ -423,20 +546,22 @@ func TestUpdateExecutionLogEntryBadResponse(t *testing.T) {
}
testRoute(t, spec, func(client *queue.Client) {
if err := client.UpdateExecutionLogEntry(context.Background(), 42, 99, entry); err == nil {
if err := client.UpdateExecutionLogEntry(context.Background(), types.Job{ID: 42, Token: "job-token"}, 99, entry); err == nil {
t.Fatalf("expected an error")
}
})
}
type routeSpec struct {
expectedMethod string
expectedPath string
expectedUsername string
expectedToken string
expectedPayload string
responseStatus int
responsePayload string
expectedMethod string
expectedPath string
expectedUsername string
expectedToken string
expectedJobID string
expectedExecutorName string
expectedPayload string
responseStatus int
responsePayload string
}
func testRoute(t *testing.T, spec routeSpec, f func(client *queue.Client)) {
@ -447,6 +572,7 @@ func testRoute(t *testing.T, spec routeSpec, f func(client *queue.Client)) {
ExecutorName: "deadbeef",
QueueName: "test_queue",
BaseClientOptions: apiclient.BaseClientOptions{
ExecutorName: "deadbeef",
EndpointOptions: apiclient.EndpointOptions{
URL: ts.URL,
PathPrefix: "/.executors/queue",
@ -464,42 +590,39 @@ func testRoute(t *testing.T, spec routeSpec, f func(client *queue.Client)) {
},
}
client, err := queue.New(&observation.TestContext, options, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }))
client, err := newQueueClient(options)
require.NoError(t, err)
f(client)
}
func testServer(t *testing.T, spec routeSpec) *httptest.Server {
handler := func(w http.ResponseWriter, r *http.Request) {
if r.Method != spec.expectedMethod {
t.Errorf("unexpected method. want=%s have=%s", spec.expectedMethod, r.Method)
}
if r.URL.Path != spec.expectedPath {
t.Errorf("unexpected method. want=%s have=%s", spec.expectedPath, r.URL.Path)
}
assert.Equal(t, spec.expectedMethod, r.Method)
assert.Equal(t, spec.expectedPath, r.URL.Path)
parts := strings.Split(r.Header.Get("Authorization"), " ")
if len(parts) != 2 || parts[0] != "token-executor" {
if parts[1] != spec.expectedToken {
t.Errorf("unexpected token`. want=%s have=%s", spec.expectedToken, parts[1])
}
}
assert.Len(t, parts, 2)
assert.Equal(t, spec.expectedToken, parts[1])
assert.Equal(t, spec.expectedJobID, r.Header.Get("X-Sourcegraph-Job-ID"))
assert.Equal(t, spec.expectedExecutorName, r.Header.Get("X-Sourcegraph-Executor-Name"))
content, err := io.ReadAll(r.Body)
if err != nil {
t.Fatalf("unexpected error reading payload: %s", err)
}
if diff := cmp.Diff(normalizeJSON([]byte(spec.expectedPayload)), normalizeJSON(content)); diff != "" {
t.Errorf("unexpected request payload (-want +got):\n%s", diff)
}
require.NoError(t, err)
assert.JSONEq(t, normalizeJSON([]byte(spec.expectedPayload)), normalizeJSON(content))
w.WriteHeader(spec.responseStatus)
w.Write([]byte(spec.responsePayload))
_, err = w.Write([]byte(spec.responsePayload))
require.NoError(t, err)
}
return httptest.NewServer(http.HandlerFunc(handler))
}
func newQueueClient(options queue.Options) (*queue.Client, error) {
return queue.New(&observation.TestContext, options, prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { return nil, nil }))
}
func normalizeJSON(v []byte) string {
temp := map[string]any{}
_ = json.Unmarshal(v, &temp)

View File

@ -11,7 +11,7 @@ import (
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
internalexecutor "github.com/sourcegraph/sourcegraph/internal/executor"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -92,8 +92,7 @@ type logger struct {
done chan struct{}
handles chan *entryHandle
job executor.Job
recordID int
job types.Job
replacer *strings.Replacer
@ -103,8 +102,8 @@ type logger struct {
// ExecutionLogEntryStore handle interactions with executor.Job logs.
type ExecutionLogEntryStore interface {
AddExecutionLogEntry(ctx context.Context, id int, entry internalexecutor.ExecutionLogEntry) (int, error)
UpdateExecutionLogEntry(ctx context.Context, id, entryID int, entry internalexecutor.ExecutionLogEntry) error
AddExecutionLogEntry(ctx context.Context, job types.Job, entry internalexecutor.ExecutionLogEntry) (int, error)
UpdateExecutionLogEntry(ctx context.Context, job types.Job, entryID int, entry internalexecutor.ExecutionLogEntry) error
}
// logEntryBufSize is the maximum number of log entries that are logged by the
@ -117,7 +116,7 @@ const logEntryBufsize = 50
// replace with a non-sensitive value.
// Each log message is written to the store in a goroutine. The Flush method
// must be called to ensure all entries are written.
func NewLogger(store ExecutionLogEntryStore, job executor.Job, recordID int, replacements map[string]string) Logger {
func NewLogger(store ExecutionLogEntryStore, job types.Job, recordID int, replacements map[string]string) Logger {
oldnew := make([]string, 0, len(replacements)*2)
for k, v := range replacements {
oldnew = append(oldnew, k, v)
@ -126,7 +125,6 @@ func NewLogger(store ExecutionLogEntryStore, job executor.Job, recordID int, rep
l := &logger{
store: store,
job: job,
recordID: recordID,
done: make(chan struct{}),
handles: make(chan *entryHandle, logEntryBufsize),
replacer: strings.NewReplacer(oldnew...),
@ -170,12 +168,12 @@ func (l *logger) writeEntries() {
var wg sync.WaitGroup
for handle := range l.handles {
initialLogEntry := handle.CurrentLogEntry()
entryID, err := l.store.AddExecutionLogEntry(context.Background(), l.recordID, initialLogEntry)
entryID, err := l.store.AddExecutionLogEntry(context.Background(), l.job, initialLogEntry)
if err != nil {
// If there is a timeout or cancellation error we don't want to skip
// writing these logs as users will often want to see how far something
// progressed prior to a timeout.
log15.Warn("Failed to upload executor log entry for job", "id", l.recordID, "repositoryName", l.job.RepositoryName, "commit", l.job.Commit, "error", err)
log15.Warn("Failed to upload executor log entry for job", "id", l.job.ID, "repositoryName", l.job.RepositoryName, "commit", l.job.Commit, "error", err)
l.appendError(err)
@ -230,7 +228,7 @@ func (l *logger) syncLogEntry(handle *entryHandle, entryID int, old internalexec
log15.Debug("Updating executor log entry", logArgs...)
if err := l.store.UpdateExecutionLogEntry(context.Background(), l.recordID, entryID, current); err != nil {
if err := l.store.UpdateExecutionLogEntry(context.Background(), l.job, entryID, current); err != nil {
logMethod := log15.Warn
if lastWrite {
logMethod = log15.Error

View File

@ -4,7 +4,7 @@ import (
"context"
"testing"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
internalexecutor "github.com/sourcegraph/sourcegraph/internal/executor"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -13,12 +13,12 @@ func TestLogger(t *testing.T) {
s := NewMockExecutionLogEntryStore()
doneAdding := make(chan struct{})
s.AddExecutionLogEntryFunc.SetDefaultHook(func(_ context.Context, _ int, _ internalexecutor.ExecutionLogEntry) (int, error) {
s.AddExecutionLogEntryFunc.SetDefaultHook(func(_ context.Context, _ types.Job, _ internalexecutor.ExecutionLogEntry) (int, error) {
doneAdding <- struct{}{}
return 1, nil
})
job := executor.Job{}
job := types.Job{}
l := NewLogger(s, job, 1, map[string]string{})
e := l.Log("the_key", []string{"cmd", "arg1"})
@ -55,7 +55,7 @@ func TestLogger(t *testing.T) {
func TestLogger_Failure(t *testing.T) {
s := NewMockExecutionLogEntryStore()
doneAdding := make(chan struct{})
s.AddExecutionLogEntryFunc.SetDefaultHook(func(_ context.Context, _ int, _ internalexecutor.ExecutionLogEntry) (int, error) {
s.AddExecutionLogEntryFunc.SetDefaultHook(func(_ context.Context, _ types.Job, _ internalexecutor.ExecutionLogEntry) (int, error) {
doneAdding <- struct{}{}
return 1, nil
})
@ -63,7 +63,7 @@ func TestLogger_Failure(t *testing.T) {
// Update should fail.
s.UpdateExecutionLogEntryFunc.SetDefaultReturn(errors.New("failure!!"))
job := executor.Job{}
job := types.Job{}
l := NewLogger(s, job, 1, map[string]string{})
e := l.Log("the_key", []string{"cmd", "arg1"})

View File

@ -10,6 +10,7 @@ import (
"context"
"sync"
types "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
executor "github.com/sourcegraph/sourcegraph/internal/executor"
)
@ -32,12 +33,12 @@ type MockExecutionLogEntryStore struct {
func NewMockExecutionLogEntryStore() *MockExecutionLogEntryStore {
return &MockExecutionLogEntryStore{
AddExecutionLogEntryFunc: &ExecutionLogEntryStoreAddExecutionLogEntryFunc{
defaultHook: func(context.Context, int, executor.ExecutionLogEntry) (r0 int, r1 error) {
defaultHook: func(context.Context, types.Job, executor.ExecutionLogEntry) (r0 int, r1 error) {
return
},
},
UpdateExecutionLogEntryFunc: &ExecutionLogEntryStoreUpdateExecutionLogEntryFunc{
defaultHook: func(context.Context, int, int, executor.ExecutionLogEntry) (r0 error) {
defaultHook: func(context.Context, types.Job, int, executor.ExecutionLogEntry) (r0 error) {
return
},
},
@ -50,12 +51,12 @@ func NewMockExecutionLogEntryStore() *MockExecutionLogEntryStore {
func NewStrictMockExecutionLogEntryStore() *MockExecutionLogEntryStore {
return &MockExecutionLogEntryStore{
AddExecutionLogEntryFunc: &ExecutionLogEntryStoreAddExecutionLogEntryFunc{
defaultHook: func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
defaultHook: func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
panic("unexpected invocation of MockExecutionLogEntryStore.AddExecutionLogEntry")
},
},
UpdateExecutionLogEntryFunc: &ExecutionLogEntryStoreUpdateExecutionLogEntryFunc{
defaultHook: func(context.Context, int, int, executor.ExecutionLogEntry) error {
defaultHook: func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
panic("unexpected invocation of MockExecutionLogEntryStore.UpdateExecutionLogEntry")
},
},
@ -80,15 +81,15 @@ func NewMockExecutionLogEntryStoreFrom(i ExecutionLogEntryStore) *MockExecutionL
// when the AddExecutionLogEntry method of the parent
// MockExecutionLogEntryStore instance is invoked.
type ExecutionLogEntryStoreAddExecutionLogEntryFunc struct {
defaultHook func(context.Context, int, executor.ExecutionLogEntry) (int, error)
hooks []func(context.Context, int, executor.ExecutionLogEntry) (int, error)
defaultHook func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)
hooks []func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)
history []ExecutionLogEntryStoreAddExecutionLogEntryFuncCall
mutex sync.Mutex
}
// AddExecutionLogEntry delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockExecutionLogEntryStore) AddExecutionLogEntry(v0 context.Context, v1 int, v2 executor.ExecutionLogEntry) (int, error) {
func (m *MockExecutionLogEntryStore) AddExecutionLogEntry(v0 context.Context, v1 types.Job, v2 executor.ExecutionLogEntry) (int, error) {
r0, r1 := m.AddExecutionLogEntryFunc.nextHook()(v0, v1, v2)
m.AddExecutionLogEntryFunc.appendCall(ExecutionLogEntryStoreAddExecutionLogEntryFuncCall{v0, v1, v2, r0, r1})
return r0, r1
@ -97,7 +98,7 @@ func (m *MockExecutionLogEntryStore) AddExecutionLogEntry(v0 context.Context, v1
// SetDefaultHook sets function that is called when the AddExecutionLogEntry
// method of the parent MockExecutionLogEntryStore instance is invoked and
// the hook queue is empty.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, int, executor.ExecutionLogEntry) (int, error)) {
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)) {
f.defaultHook = hook
}
@ -106,7 +107,7 @@ func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultHook(hook fun
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushHook(hook func(context.Context, int, executor.ExecutionLogEntry) (int, error)) {
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushHook(hook func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -115,19 +116,19 @@ func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushHook(hook func(cont
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultReturn(r0 int, r1 error) {
f.SetDefaultHook(func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
f.SetDefaultHook(func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushReturn(r0 int, r1 error) {
f.PushHook(func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
f.PushHook(func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
return r0, r1
})
}
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) nextHook() func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) nextHook() func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -167,7 +168,7 @@ type ExecutionLogEntryStoreAddExecutionLogEntryFuncCall struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
Arg1 types.Job
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 executor.ExecutionLogEntry
@ -195,15 +196,15 @@ func (c ExecutionLogEntryStoreAddExecutionLogEntryFuncCall) Results() []interfac
// when the UpdateExecutionLogEntry method of the parent
// MockExecutionLogEntryStore instance is invoked.
type ExecutionLogEntryStoreUpdateExecutionLogEntryFunc struct {
defaultHook func(context.Context, int, int, executor.ExecutionLogEntry) error
hooks []func(context.Context, int, int, executor.ExecutionLogEntry) error
defaultHook func(context.Context, types.Job, int, executor.ExecutionLogEntry) error
hooks []func(context.Context, types.Job, int, executor.ExecutionLogEntry) error
history []ExecutionLogEntryStoreUpdateExecutionLogEntryFuncCall
mutex sync.Mutex
}
// UpdateExecutionLogEntry delegates to the next hook function in the queue
// and stores the parameter and result values of this invocation.
func (m *MockExecutionLogEntryStore) UpdateExecutionLogEntry(v0 context.Context, v1 int, v2 int, v3 executor.ExecutionLogEntry) error {
func (m *MockExecutionLogEntryStore) UpdateExecutionLogEntry(v0 context.Context, v1 types.Job, v2 int, v3 executor.ExecutionLogEntry) error {
r0 := m.UpdateExecutionLogEntryFunc.nextHook()(v0, v1, v2, v3)
m.UpdateExecutionLogEntryFunc.appendCall(ExecutionLogEntryStoreUpdateExecutionLogEntryFuncCall{v0, v1, v2, v3, r0})
return r0
@ -212,7 +213,7 @@ func (m *MockExecutionLogEntryStore) UpdateExecutionLogEntry(v0 context.Context,
// SetDefaultHook sets function that is called when the
// UpdateExecutionLogEntry method of the parent MockExecutionLogEntryStore
// instance is invoked and the hook queue is empty.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, int, int, executor.ExecutionLogEntry) error) {
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, types.Job, int, executor.ExecutionLogEntry) error) {
f.defaultHook = hook
}
@ -221,7 +222,7 @@ func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultHook(hook
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushHook(hook func(context.Context, int, int, executor.ExecutionLogEntry) error) {
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushHook(hook func(context.Context, types.Job, int, executor.ExecutionLogEntry) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -230,19 +231,19 @@ func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushHook(hook func(c
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(context.Context, int, int, executor.ExecutionLogEntry) error {
f.SetDefaultHook(func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushReturn(r0 error) {
f.PushHook(func(context.Context, int, int, executor.ExecutionLogEntry) error {
f.PushHook(func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
return r0
})
}
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) nextHook() func(context.Context, int, int, executor.ExecutionLogEntry) error {
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) nextHook() func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -282,7 +283,7 @@ type ExecutionLogEntryStoreUpdateExecutionLogEntryFuncCall struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
Arg1 types.Job
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 int

View File

@ -8,7 +8,7 @@ import (
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -59,7 +59,7 @@ type Options struct {
type DockerOptions struct {
// DockerAuthConfig, if set, will be used to configure the docker CLI to authenticate to
// registries.
DockerAuthConfig executor.DockerAuthConfig
DockerAuthConfig types.DockerAuthConfig
// AddHostGateway, if set, will add a host entry and route to the daemon host to the
// container. This can be useful to add host.docker.internal as an endpoint inside
// the container.

View File

@ -9,7 +9,7 @@ import (
"github.com/c2h5oh/datasize"
"github.com/google/uuid"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/conf/confdefaults"
"github.com/sourcegraph/sourcegraph/internal/conf/deploy"
"github.com/sourcegraph/sourcegraph/internal/env"
@ -46,7 +46,7 @@ type Config struct {
DockerRegistryNodeExporterURL string
WorkerHostname string
DockerRegistryMirrorURL string
DockerAuthConfig executor.DockerAuthConfig
DockerAuthConfig types.DockerAuthConfig
dockerAuthConfigStr string
dockerAuthConfigUnmarshalError error
}

View File

@ -13,7 +13,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/config"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -70,7 +70,7 @@ func createVM(ctx context.Context, config *config.Config, repositoryName, revisi
// No need for files store in the test.
nil,
// Just enough to spin up a VM.
executor.Job{
types.Job{
RepositoryName: repositoryName,
Commit: revision,
},

View File

@ -186,12 +186,14 @@ func queueOptions(c *config.Config, telemetryOptions queue.TelemetryOptions) que
func filesOptions(c *config.Config) apiclient.BaseClientOptions {
return apiclient.BaseClientOptions{
ExecutorName: c.WorkerHostname,
EndpointOptions: endpointOptions(c, "/.executors/files"),
}
}
func baseClientOptions(c *config.Config, pathPrefix string) apiclient.BaseClientOptions {
return apiclient.BaseClientOptions{
ExecutorName: c.WorkerHostname,
EndpointOptions: endpointOptions(c, pathPrefix),
}
}

View File

@ -13,7 +13,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/ignite"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/janitor"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/honey"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
"github.com/sourcegraph/sourcegraph/lib/errors"
@ -29,8 +29,8 @@ type handler struct {
}
var (
_ workerutil.Handler[executor.Job] = &handler{}
_ workerutil.WithPreDequeue = &handler{}
_ workerutil.Handler[types.Job] = &handler{}
_ workerutil.WithPreDequeue = &handler{}
)
// PreDequeue determines if the number of VMs with the current instance's VM Prefix is less than
@ -61,7 +61,7 @@ func (h *handler) PreDequeue(ctx context.Context, logger log.Logger) (dequeueabl
// Handle clones the target code into a temporary directory, invokes the target indexer in a
// fresh docker container, and uploads the results to the external frontend API.
func (h *handler) Handle(ctx context.Context, logger log.Logger, job executor.Job) (err error) {
func (h *handler) Handle(ctx context.Context, logger log.Logger, job types.Job) (err error) {
logger = logger.With(
log.Int("jobID", job.ID),
log.String("repositoryName", job.RepositoryName),
@ -207,7 +207,7 @@ func union(a, b map[string]string) map[string]string {
return c
}
func createHoneyEvent(_ context.Context, job executor.Job, err error, duration time.Duration) honey.Event {
func createHoneyEvent(_ context.Context, job types.Job, err error, duration time.Duration) honey.Event {
fields := map[string]any{
"duration_ms": duration.Milliseconds(),
"recordID": job.RecordID(),

View File

@ -17,7 +17,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/janitor"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
@ -34,14 +34,14 @@ func TestHandle(t *testing.T) {
runner := NewMockRunner()
job := executor.Job{
job := types.Job{
ID: 42,
Commit: "deadbeef",
RepositoryName: "linux",
VirtualMachineFiles: map[string]executor.VirtualMachineFile{
VirtualMachineFiles: map[string]types.VirtualMachineFile{
"test.txt": {Content: []byte("<file payload>")},
},
DockerSteps: []executor.DockerStep{
DockerSteps: []types.DockerStep{
{
Image: "go",
Commands: []string{"go", "mod", "install"},
@ -55,7 +55,7 @@ func TestHandle(t *testing.T) {
Env: []string{},
},
},
CliSteps: []executor.CliStep{
CliSteps: []types.CliStep{
{
Commands: []string{"batch", "help"},
Dir: "",
@ -143,15 +143,16 @@ func TestHandle_WorkspaceFile(t *testing.T) {
virtualFileModifiedAt := time.Now()
job := executor.Job{
job := types.Job{
ID: 42,
Token: "sometoken",
Commit: "deadbeef",
RepositoryName: "linux",
VirtualMachineFiles: map[string]executor.VirtualMachineFile{
VirtualMachineFiles: map[string]types.VirtualMachineFile{
"test.txt": {Content: []byte("<file payload>")},
"script.sh": {Bucket: "batch-changes", Key: "123/abc", ModifiedAt: virtualFileModifiedAt},
},
DockerSteps: []executor.DockerStep{
DockerSteps: []types.DockerStep{
{
Image: "go",
Commands: []string{"go", "mod", "install"},
@ -165,7 +166,7 @@ func TestHandle_WorkspaceFile(t *testing.T) {
Env: []string{},
},
},
CliSteps: []executor.CliStep{
CliSteps: []types.CliStep{
{
Commands: []string{"batch", "help"},
Dir: "",
@ -234,8 +235,10 @@ func TestHandle_WorkspaceFile(t *testing.T) {
// Ensure the files store was called properly
getHistory := filesStore.GetFunc.History()
assert.Len(t, getHistory, 1)
assert.Equal(t, "batch-changes", getHistory[0].Arg1)
assert.Equal(t, "123/abc", getHistory[0].Arg2)
assert.Equal(t, 42, getHistory[0].Arg1.ID)
assert.Equal(t, "sometoken", getHistory[0].Arg1.Token)
assert.Equal(t, "batch-changes", getHistory[0].Arg2)
assert.Equal(t, "123/abc", getHistory[0].Arg3)
expectedCommands := [][]string{
{"/bin/sh", "42.0_linux@deadbeef.sh"},

View File

@ -13,6 +13,7 @@ import (
command "github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
workspace "github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
types "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
executor "github.com/sourcegraph/sourcegraph/internal/executor"
)
@ -35,12 +36,12 @@ type MockExecutionLogEntryStore struct {
func NewMockExecutionLogEntryStore() *MockExecutionLogEntryStore {
return &MockExecutionLogEntryStore{
AddExecutionLogEntryFunc: &ExecutionLogEntryStoreAddExecutionLogEntryFunc{
defaultHook: func(context.Context, int, executor.ExecutionLogEntry) (r0 int, r1 error) {
defaultHook: func(context.Context, types.Job, executor.ExecutionLogEntry) (r0 int, r1 error) {
return
},
},
UpdateExecutionLogEntryFunc: &ExecutionLogEntryStoreUpdateExecutionLogEntryFunc{
defaultHook: func(context.Context, int, int, executor.ExecutionLogEntry) (r0 error) {
defaultHook: func(context.Context, types.Job, int, executor.ExecutionLogEntry) (r0 error) {
return
},
},
@ -53,12 +54,12 @@ func NewMockExecutionLogEntryStore() *MockExecutionLogEntryStore {
func NewStrictMockExecutionLogEntryStore() *MockExecutionLogEntryStore {
return &MockExecutionLogEntryStore{
AddExecutionLogEntryFunc: &ExecutionLogEntryStoreAddExecutionLogEntryFunc{
defaultHook: func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
defaultHook: func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
panic("unexpected invocation of MockExecutionLogEntryStore.AddExecutionLogEntry")
},
},
UpdateExecutionLogEntryFunc: &ExecutionLogEntryStoreUpdateExecutionLogEntryFunc{
defaultHook: func(context.Context, int, int, executor.ExecutionLogEntry) error {
defaultHook: func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
panic("unexpected invocation of MockExecutionLogEntryStore.UpdateExecutionLogEntry")
},
},
@ -83,15 +84,15 @@ func NewMockExecutionLogEntryStoreFrom(i command.ExecutionLogEntryStore) *MockEx
// when the AddExecutionLogEntry method of the parent
// MockExecutionLogEntryStore instance is invoked.
type ExecutionLogEntryStoreAddExecutionLogEntryFunc struct {
defaultHook func(context.Context, int, executor.ExecutionLogEntry) (int, error)
hooks []func(context.Context, int, executor.ExecutionLogEntry) (int, error)
defaultHook func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)
hooks []func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)
history []ExecutionLogEntryStoreAddExecutionLogEntryFuncCall
mutex sync.Mutex
}
// AddExecutionLogEntry delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockExecutionLogEntryStore) AddExecutionLogEntry(v0 context.Context, v1 int, v2 executor.ExecutionLogEntry) (int, error) {
func (m *MockExecutionLogEntryStore) AddExecutionLogEntry(v0 context.Context, v1 types.Job, v2 executor.ExecutionLogEntry) (int, error) {
r0, r1 := m.AddExecutionLogEntryFunc.nextHook()(v0, v1, v2)
m.AddExecutionLogEntryFunc.appendCall(ExecutionLogEntryStoreAddExecutionLogEntryFuncCall{v0, v1, v2, r0, r1})
return r0, r1
@ -100,7 +101,7 @@ func (m *MockExecutionLogEntryStore) AddExecutionLogEntry(v0 context.Context, v1
// SetDefaultHook sets function that is called when the AddExecutionLogEntry
// method of the parent MockExecutionLogEntryStore instance is invoked and
// the hook queue is empty.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, int, executor.ExecutionLogEntry) (int, error)) {
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)) {
f.defaultHook = hook
}
@ -109,7 +110,7 @@ func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultHook(hook fun
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushHook(hook func(context.Context, int, executor.ExecutionLogEntry) (int, error)) {
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushHook(hook func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -118,19 +119,19 @@ func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushHook(hook func(cont
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) SetDefaultReturn(r0 int, r1 error) {
f.SetDefaultHook(func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
f.SetDefaultHook(func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) PushReturn(r0 int, r1 error) {
f.PushHook(func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
f.PushHook(func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
return r0, r1
})
}
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) nextHook() func(context.Context, int, executor.ExecutionLogEntry) (int, error) {
func (f *ExecutionLogEntryStoreAddExecutionLogEntryFunc) nextHook() func(context.Context, types.Job, executor.ExecutionLogEntry) (int, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -170,7 +171,7 @@ type ExecutionLogEntryStoreAddExecutionLogEntryFuncCall struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
Arg1 types.Job
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 executor.ExecutionLogEntry
@ -198,15 +199,15 @@ func (c ExecutionLogEntryStoreAddExecutionLogEntryFuncCall) Results() []interfac
// when the UpdateExecutionLogEntry method of the parent
// MockExecutionLogEntryStore instance is invoked.
type ExecutionLogEntryStoreUpdateExecutionLogEntryFunc struct {
defaultHook func(context.Context, int, int, executor.ExecutionLogEntry) error
hooks []func(context.Context, int, int, executor.ExecutionLogEntry) error
defaultHook func(context.Context, types.Job, int, executor.ExecutionLogEntry) error
hooks []func(context.Context, types.Job, int, executor.ExecutionLogEntry) error
history []ExecutionLogEntryStoreUpdateExecutionLogEntryFuncCall
mutex sync.Mutex
}
// UpdateExecutionLogEntry delegates to the next hook function in the queue
// and stores the parameter and result values of this invocation.
func (m *MockExecutionLogEntryStore) UpdateExecutionLogEntry(v0 context.Context, v1 int, v2 int, v3 executor.ExecutionLogEntry) error {
func (m *MockExecutionLogEntryStore) UpdateExecutionLogEntry(v0 context.Context, v1 types.Job, v2 int, v3 executor.ExecutionLogEntry) error {
r0 := m.UpdateExecutionLogEntryFunc.nextHook()(v0, v1, v2, v3)
m.UpdateExecutionLogEntryFunc.appendCall(ExecutionLogEntryStoreUpdateExecutionLogEntryFuncCall{v0, v1, v2, v3, r0})
return r0
@ -215,7 +216,7 @@ func (m *MockExecutionLogEntryStore) UpdateExecutionLogEntry(v0 context.Context,
// SetDefaultHook sets function that is called when the
// UpdateExecutionLogEntry method of the parent MockExecutionLogEntryStore
// instance is invoked and the hook queue is empty.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, int, int, executor.ExecutionLogEntry) error) {
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultHook(hook func(context.Context, types.Job, int, executor.ExecutionLogEntry) error) {
f.defaultHook = hook
}
@ -224,7 +225,7 @@ func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultHook(hook
// instance invokes the hook at the front of the queue and discards it.
// After the queue is empty, the default hook function is invoked for any
// future action.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushHook(hook func(context.Context, int, int, executor.ExecutionLogEntry) error) {
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushHook(hook func(context.Context, types.Job, int, executor.ExecutionLogEntry) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -233,19 +234,19 @@ func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushHook(hook func(c
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(context.Context, int, int, executor.ExecutionLogEntry) error {
f.SetDefaultHook(func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) PushReturn(r0 error) {
f.PushHook(func(context.Context, int, int, executor.ExecutionLogEntry) error {
f.PushHook(func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
return r0
})
}
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) nextHook() func(context.Context, int, int, executor.ExecutionLogEntry) error {
func (f *ExecutionLogEntryStoreUpdateExecutionLogEntryFunc) nextHook() func(context.Context, types.Job, int, executor.ExecutionLogEntry) error {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -285,7 +286,7 @@ type ExecutionLogEntryStoreUpdateExecutionLogEntryFuncCall struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
Arg1 types.Job
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 int
@ -709,12 +710,12 @@ type MockFilesStore struct {
func NewMockFilesStore() *MockFilesStore {
return &MockFilesStore{
ExistsFunc: &FilesStoreExistsFunc{
defaultHook: func(context.Context, string, string) (r0 bool, r1 error) {
defaultHook: func(context.Context, types.Job, string, string) (r0 bool, r1 error) {
return
},
},
GetFunc: &FilesStoreGetFunc{
defaultHook: func(context.Context, string, string) (r0 io.ReadCloser, r1 error) {
defaultHook: func(context.Context, types.Job, string, string) (r0 io.ReadCloser, r1 error) {
return
},
},
@ -726,12 +727,12 @@ func NewMockFilesStore() *MockFilesStore {
func NewStrictMockFilesStore() *MockFilesStore {
return &MockFilesStore{
ExistsFunc: &FilesStoreExistsFunc{
defaultHook: func(context.Context, string, string) (bool, error) {
defaultHook: func(context.Context, types.Job, string, string) (bool, error) {
panic("unexpected invocation of MockFilesStore.Exists")
},
},
GetFunc: &FilesStoreGetFunc{
defaultHook: func(context.Context, string, string) (io.ReadCloser, error) {
defaultHook: func(context.Context, types.Job, string, string) (io.ReadCloser, error) {
panic("unexpected invocation of MockFilesStore.Get")
},
},
@ -754,23 +755,23 @@ func NewMockFilesStoreFrom(i workspace.FilesStore) *MockFilesStore {
// FilesStoreExistsFunc describes the behavior when the Exists method of the
// parent MockFilesStore instance is invoked.
type FilesStoreExistsFunc struct {
defaultHook func(context.Context, string, string) (bool, error)
hooks []func(context.Context, string, string) (bool, error)
defaultHook func(context.Context, types.Job, string, string) (bool, error)
hooks []func(context.Context, types.Job, string, string) (bool, error)
history []FilesStoreExistsFuncCall
mutex sync.Mutex
}
// Exists delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockFilesStore) Exists(v0 context.Context, v1 string, v2 string) (bool, error) {
r0, r1 := m.ExistsFunc.nextHook()(v0, v1, v2)
m.ExistsFunc.appendCall(FilesStoreExistsFuncCall{v0, v1, v2, r0, r1})
func (m *MockFilesStore) Exists(v0 context.Context, v1 types.Job, v2 string, v3 string) (bool, error) {
r0, r1 := m.ExistsFunc.nextHook()(v0, v1, v2, v3)
m.ExistsFunc.appendCall(FilesStoreExistsFuncCall{v0, v1, v2, v3, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Exists method of the
// parent MockFilesStore instance is invoked and the hook queue is empty.
func (f *FilesStoreExistsFunc) SetDefaultHook(hook func(context.Context, string, string) (bool, error)) {
func (f *FilesStoreExistsFunc) SetDefaultHook(hook func(context.Context, types.Job, string, string) (bool, error)) {
f.defaultHook = hook
}
@ -778,7 +779,7 @@ func (f *FilesStoreExistsFunc) SetDefaultHook(hook func(context.Context, string,
// Exists method of the parent MockFilesStore instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *FilesStoreExistsFunc) PushHook(hook func(context.Context, string, string) (bool, error)) {
func (f *FilesStoreExistsFunc) PushHook(hook func(context.Context, types.Job, string, string) (bool, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -787,19 +788,19 @@ func (f *FilesStoreExistsFunc) PushHook(hook func(context.Context, string, strin
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *FilesStoreExistsFunc) SetDefaultReturn(r0 bool, r1 error) {
f.SetDefaultHook(func(context.Context, string, string) (bool, error) {
f.SetDefaultHook(func(context.Context, types.Job, string, string) (bool, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *FilesStoreExistsFunc) PushReturn(r0 bool, r1 error) {
f.PushHook(func(context.Context, string, string) (bool, error) {
f.PushHook(func(context.Context, types.Job, string, string) (bool, error) {
return r0, r1
})
}
func (f *FilesStoreExistsFunc) nextHook() func(context.Context, string, string) (bool, error) {
func (f *FilesStoreExistsFunc) nextHook() func(context.Context, types.Job, string, string) (bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -837,10 +838,13 @@ type FilesStoreExistsFuncCall struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
Arg1 types.Job
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 bool
@ -852,7 +856,7 @@ type FilesStoreExistsFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c FilesStoreExistsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}
}
// Results returns an interface slice containing the results of this
@ -864,23 +868,23 @@ func (c FilesStoreExistsFuncCall) Results() []interface{} {
// FilesStoreGetFunc describes the behavior when the Get method of the
// parent MockFilesStore instance is invoked.
type FilesStoreGetFunc struct {
defaultHook func(context.Context, string, string) (io.ReadCloser, error)
hooks []func(context.Context, string, string) (io.ReadCloser, error)
defaultHook func(context.Context, types.Job, string, string) (io.ReadCloser, error)
hooks []func(context.Context, types.Job, string, string) (io.ReadCloser, error)
history []FilesStoreGetFuncCall
mutex sync.Mutex
}
// Get delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockFilesStore) Get(v0 context.Context, v1 string, v2 string) (io.ReadCloser, error) {
r0, r1 := m.GetFunc.nextHook()(v0, v1, v2)
m.GetFunc.appendCall(FilesStoreGetFuncCall{v0, v1, v2, r0, r1})
func (m *MockFilesStore) Get(v0 context.Context, v1 types.Job, v2 string, v3 string) (io.ReadCloser, error) {
r0, r1 := m.GetFunc.nextHook()(v0, v1, v2, v3)
m.GetFunc.appendCall(FilesStoreGetFuncCall{v0, v1, v2, v3, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Get method of the
// parent MockFilesStore instance is invoked and the hook queue is empty.
func (f *FilesStoreGetFunc) SetDefaultHook(hook func(context.Context, string, string) (io.ReadCloser, error)) {
func (f *FilesStoreGetFunc) SetDefaultHook(hook func(context.Context, types.Job, string, string) (io.ReadCloser, error)) {
f.defaultHook = hook
}
@ -888,7 +892,7 @@ func (f *FilesStoreGetFunc) SetDefaultHook(hook func(context.Context, string, st
// Get method of the parent MockFilesStore instance invokes the hook at the
// front of the queue and discards it. After the queue is empty, the default
// hook function is invoked for any future action.
func (f *FilesStoreGetFunc) PushHook(hook func(context.Context, string, string) (io.ReadCloser, error)) {
func (f *FilesStoreGetFunc) PushHook(hook func(context.Context, types.Job, string, string) (io.ReadCloser, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -897,19 +901,19 @@ func (f *FilesStoreGetFunc) PushHook(hook func(context.Context, string, string)
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *FilesStoreGetFunc) SetDefaultReturn(r0 io.ReadCloser, r1 error) {
f.SetDefaultHook(func(context.Context, string, string) (io.ReadCloser, error) {
f.SetDefaultHook(func(context.Context, types.Job, string, string) (io.ReadCloser, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *FilesStoreGetFunc) PushReturn(r0 io.ReadCloser, r1 error) {
f.PushHook(func(context.Context, string, string) (io.ReadCloser, error) {
f.PushHook(func(context.Context, types.Job, string, string) (io.ReadCloser, error) {
return r0, r1
})
}
func (f *FilesStoreGetFunc) nextHook() func(context.Context, string, string) (io.ReadCloser, error) {
func (f *FilesStoreGetFunc) nextHook() func(context.Context, types.Job, string, string) (io.ReadCloser, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -947,10 +951,13 @@ type FilesStoreGetFuncCall struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
Arg1 types.Job
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 io.ReadCloser
@ -962,7 +969,7 @@ type FilesStoreGetFuncCall struct {
// Args returns an interface slice containing the arguments of this
// invocation.
func (c FilesStoreGetFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}
}
// Results returns an interface slice containing the results of this

View File

@ -17,7 +17,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/janitor"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/metrics"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
@ -108,7 +108,7 @@ func NewWorker(observationCtx *observation.Context, nameSet *janitor.NameSet, op
ctx := context.Background()
return workerutil.NewWorker[executor.Job](ctx, queueClient, h, options.WorkerOptions), nil
return workerutil.NewWorker[types.Job](ctx, queueClient, h, options.WorkerOptions), nil
}
// connectToFrontend will ping the configured Sourcegraph instance until it receives a 200 response.
@ -127,7 +127,7 @@ func connectToFrontend(logger log.Logger, queueClient *queue.Client, options Opt
defer signal.Stop(signals)
for {
err := queueClient.Ping(context.Background(), options.QueueName, nil)
err := queueClient.Ping(context.Background())
if err == nil {
logger.Debug("Connected to Sourcegraph instance")
return true

View File

@ -5,7 +5,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
)
// prepareWorkspace creates and returns a temporary directory in which acts the workspace
@ -15,7 +15,7 @@ import (
func (h *handler) prepareWorkspace(
ctx context.Context,
commandRunner command.Runner,
job executor.Job,
job types.Job,
commandLogger command.Logger,
) (workspace.Workspace, error) {
if h.options.FirecrackerOptions.Enabled {
@ -28,6 +28,7 @@ func (h *handler) prepareWorkspace(
commandRunner,
commandLogger,
workspace.CloneOptions{
ExecutorName: h.options.WorkerOptions.WorkerHostname,
EndpointURL: h.options.QueueOptions.BaseClientOptions.EndpointOptions.URL,
GitServicePath: h.options.GitServicePath,
ExecutorToken: h.options.QueueOptions.BaseClientOptions.EndpointOptions.Token,
@ -43,6 +44,7 @@ func (h *handler) prepareWorkspace(
commandRunner,
commandLogger,
workspace.CloneOptions{
ExecutorName: h.options.WorkerOptions.WorkerHostname,
EndpointURL: h.options.QueueOptions.BaseClientOptions.EndpointOptions.URL,
GitServicePath: h.options.GitServicePath,
ExecutorToken: h.options.QueueOptions.BaseClientOptions.EndpointOptions.Token,

View File

@ -9,10 +9,11 @@ import (
"net/url"
"os"
"path/filepath"
"strconv"
"strings"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -29,7 +30,7 @@ var gitStdEnv = []string{
func cloneRepo(
ctx context.Context,
workspaceDir string,
job executor.Job,
job types.Job,
commandRunner command.Runner,
options CloneOptions,
operations *command.Operations,
@ -47,7 +48,7 @@ func cloneRepo(
}
}
proxyURL, cleanup, err := newGitProxyServer(options.EndpointURL, options.GitServicePath, job.RepositoryName, options.ExecutorToken)
proxyURL, cleanup, err := newGitProxyServer(options, job)
defer func() {
err = errors.Append(err, cleanup())
}()
@ -178,9 +179,9 @@ func cloneRepo(
// This is used so that we never have to tell git about the credentials used here.
//
// In the future, this will be used to provide different access tokens per job,
// so that we can tell _which_ job misused the token and also scope it's access
// so that we can tell _which_ job misused the token and also scope its access
// to the particular repo in question.
func newGitProxyServer(endpointURL, gitServicePath, repositoryName, accessToken string) (string, func() error, error) {
func newGitProxyServer(options CloneOptions, job types.Job) (string, func() error, error) {
// Get new random free port.
listener, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
@ -189,40 +190,26 @@ func newGitProxyServer(endpointURL, gitServicePath, repositoryName, accessToken
cleanupListener := func() error { return listener.Close() }
upstream, err := makeRelativeURL(
endpointURL,
gitServicePath,
options.EndpointURL,
options.GitServicePath,
)
if err != nil {
return "", cleanupListener, err
}
d := httputil.NewSingleHostReverseProxy(upstream).Director
proxy := &httputil.ReverseProxy{
Director: func(req *http.Request) {
d(req)
req.Host = upstream.Host
// Add authentication. We don't add this in the git clone URL directly
// to never tell git about the clone secret.
req.Header.Set("Authorization", fmt.Sprintf("%s %s", SchemeExecutorToken, accessToken))
req.Header.Set("X-Sourcegraph-Actor-UID", "internal")
req.URL.User = url.User("executor")
},
}
proxy := newReverseProxy(upstream, options.ExecutorToken, job.Token, options.ExecutorName, job.ID)
go http.Serve(listener, http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Prevent queries for repos other than this jobs repo.
// This is _not_ a security measure, that should be handled by additional
// clone tokens. This is mostly a gate to finding when we accidentally
// would access another repo.
if !strings.HasPrefix(r.URL.Path, "/"+repositoryName+"/") {
if !strings.HasPrefix(r.URL.Path, "/"+job.RepositoryName+"/") {
w.WriteHeader(http.StatusForbidden)
return
}
// TODO: We might want to limit throughput here to the same level we limit
// it _inside_ the firecracker VM.
// TODO: We might want to limit throughput here to the same level we limit it _inside_ the firecracker VM.
proxy.ServeHTTP(w, r)
}))
@ -242,3 +229,29 @@ func makeRelativeURL(base string, path ...string) (*url.URL, error) {
return urlx, nil
}
func newReverseProxy(upstream *url.URL, accessToken string, jobToken string, executorName string, jobId int) *httputil.ReverseProxy {
proxy := httputil.NewSingleHostReverseProxy(upstream)
superDirector := proxy.Director
proxy.Director = func(req *http.Request) {
superDirector(req)
req.Host = upstream.Host
// Add authentication. We don't add this in the git clone URL directly
// to never tell git about the clone secret.
// If there is no token set, we may be talking with a version of Sourcegraph that is behind.
if len(jobToken) > 0 {
req.Header.Set("Authorization", fmt.Sprintf("%s %s", "Bearer", jobToken))
} else {
req.Header.Set("Authorization", fmt.Sprintf("%s %s", SchemeExecutorToken, accessToken))
}
req.Header.Set("X-Sourcegraph-Actor-UID", "internal")
req.Header.Set("X-Sourcegraph-Job-ID", strconv.Itoa(jobId))
// When using the reverse proxy, setting the username on req.User is not respected. If a username must be set,
// you have to use .SetBasicAuth(). However, this will set the Authorization using the username + password.
// So to avoid confusion, set the executor name in a specific HTTP header.
req.Header.Set("X-Sourcegraph-Executor-Name", executorName)
}
return proxy
}

View File

@ -7,7 +7,7 @@ import (
"strconv"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
)
// NewDockerWorkspace creates a new workspace for docker-based execution. A path on
@ -15,7 +15,7 @@ import (
func NewDockerWorkspace(
ctx context.Context,
filesStore FilesStore,
job executor.Job,
job types.Job,
commandRunner command.Runner,
logger command.Logger,
cloneOpts CloneOptions,

View File

@ -11,22 +11,22 @@ import (
"time"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// FilesStore handles interactions with the file store.
type FilesStore interface {
// Exists determines if the file exists.
Exists(ctx context.Context, bucket string, key string) (bool, error)
Exists(ctx context.Context, job types.Job, bucket string, key string) (bool, error)
// Get retrieves the file.
Get(ctx context.Context, bucket string, key string) (io.ReadCloser, error)
Get(ctx context.Context, job types.Job, bucket string, key string) (io.ReadCloser, error)
}
func prepareScripts(
ctx context.Context,
filesStore FilesStore,
job executor.Job,
job types.Job,
workspaceDir string,
commandLogger command.Logger,
) ([]string, error) {
@ -66,7 +66,7 @@ func prepareScripts(
workspaceFilesByPath[path] = buildScript(dockerStep)
}
if err := writeFiles(ctx, filesStore, workspaceFilesByPath, commandLogger); err != nil {
if err := writeFiles(ctx, filesStore, job, workspaceFilesByPath, commandLogger); err != nil {
return nil, errors.Wrap(err, "failed to write virtual machine files")
}
@ -111,16 +111,16 @@ set +e
set -x
`
func buildScript(dockerStep executor.DockerStep) workspaceFile {
func buildScript(dockerStep types.DockerStep) workspaceFile {
return workspaceFile{content: []byte(strings.Join(append([]string{ScriptPreamble, ""}, dockerStep.Commands...), "\n") + "\n")}
}
func scriptNameFromJobStep(job executor.Job, i int) string {
func scriptNameFromJobStep(job types.Job, i int) string {
return fmt.Sprintf("%d.%d_%s@%s.sh", job.ID, i, strings.ReplaceAll(job.RepositoryName, "/", "_"), job.Commit)
}
// writeFiles writes to the filesystem the content in the given map.
func writeFiles(ctx context.Context, store FilesStore, workspaceFileContentsByPath map[string]workspaceFile, logger command.Logger) (err error) {
func writeFiles(ctx context.Context, store FilesStore, job types.Job, workspaceFileContentsByPath map[string]workspaceFile, logger command.Logger) (err error) {
// Bail out early if nothing to do, we don't need to spawn an empty log group.
if len(workspaceFileContentsByPath) == 0 {
return nil
@ -148,7 +148,7 @@ func writeFiles(ctx context.Context, store FilesStore, workspaceFileContentsByPa
// Log how long it takes to write the files
start := time.Now()
if store != nil && wf.bucket != "" && wf.key != "" {
src, err = store.Get(ctx, wf.bucket, wf.key)
src, err = store.Get(ctx, job, wf.bucket, wf.key)
if err != nil {
return err
}

View File

@ -12,7 +12,7 @@ import (
"github.com/c2h5oh/datasize"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
@ -24,7 +24,7 @@ import (
func NewFirecrackerWorkspace(
ctx context.Context,
filesStore FilesStore,
job executor.Job,
job types.Job,
diskSpace string,
keepWorkspace bool,
commandRunner command.Runner,

View File

@ -3,6 +3,7 @@ package workspace
import "context"
type CloneOptions struct {
ExecutorName string
EndpointURL string
GitServicePath string
ExecutorToken string

View File

@ -14,7 +14,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/apiclient/queue"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/command"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/executor/internal/worker/workspace"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
@ -46,7 +46,7 @@ func TestPrepareWorkspace_Clone(t *testing.T) {
operations: command.NewOperations(&observation.TestContext),
}
workspace, err := handler.prepareWorkspace(context.Background(), runner, executor.Job{
workspace, err := handler.prepareWorkspace(context.Background(), runner, types.Job{
RepositoryName: "torvalds/linux",
Commit: "deadbeef",
FetchTags: true,
@ -102,7 +102,7 @@ func TestPrepareWorkspace_Clone_Subdirectory(t *testing.T) {
operations: command.NewOperations(&observation.TestContext),
}
workspace, err := handler.prepareWorkspace(context.Background(), runner, executor.Job{
workspace, err := handler.prepareWorkspace(context.Background(), runner, types.Job{
RepositoryName: "torvalds/linux",
RepositoryDirectory: "subdirectory",
Commit: "deadbeef",
@ -160,7 +160,7 @@ func TestPrepareWorkspace_ShallowClone(t *testing.T) {
operations: command.NewOperations(&observation.TestContext),
}
workspace, err := handler.prepareWorkspace(context.Background(), runner, executor.Job{
workspace, err := handler.prepareWorkspace(context.Background(), runner, types.Job{
RepositoryName: "torvalds/linux",
Commit: "deadbeef",
ShallowClone: true,
@ -216,7 +216,7 @@ func TestPrepareWorkspace_SparseCheckout(t *testing.T) {
operations: command.NewOperations(&observation.TestContext),
}
workspace, err := handler.prepareWorkspace(context.Background(), runner, executor.Job{
workspace, err := handler.prepareWorkspace(context.Background(), runner, types.Job{
RepositoryName: "torvalds/linux",
Commit: "deadbeef",
ShallowClone: true,
@ -265,7 +265,7 @@ func TestPrepareWorkspace_NoRepository(t *testing.T) {
operations: command.NewOperations(&observation.TestContext),
}
workspace, err := handler.prepareWorkspace(context.Background(), runner, executor.Job{}, nil)
workspace, err := handler.prepareWorkspace(context.Background(), runner, types.Job{}, nil)
if err != nil {
t.Fatalf("unexpected error preparing workspace: %s", err)
}

View File

@ -1,15 +1,23 @@
package handler
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
"github.com/gorilla/mux"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/sourcegraph/log"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
executorstore "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/store"
executortypes "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/executor"
internalexecutor "github.com/sourcegraph/sourcegraph/internal/executor"
metricsstore "github.com/sourcegraph/sourcegraph/internal/metrics/store"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/internal/workerutil"
@ -18,100 +26,132 @@ import (
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// ExecutorHandler handles the HTTP requests of an executor.
type ExecutorHandler interface {
// Name is the name of the queue the handler processes.
Name() string
handleDequeue(w http.ResponseWriter, r *http.Request)
handleAddExecutionLogEntry(w http.ResponseWriter, r *http.Request)
handleUpdateExecutionLogEntry(w http.ResponseWriter, r *http.Request)
handleMarkComplete(w http.ResponseWriter, r *http.Request)
handleMarkErrored(w http.ResponseWriter, r *http.Request)
handleMarkFailed(w http.ResponseWriter, r *http.Request)
handleHeartbeat(w http.ResponseWriter, r *http.Request)
handleCanceledJobs(w http.ResponseWriter, r *http.Request)
// HandleDequeue retrieves the next executor.Job to be processed in the queue.
HandleDequeue(w http.ResponseWriter, r *http.Request)
// HandleAddExecutionLogEntry adds the log entry for the executor.Job.
HandleAddExecutionLogEntry(w http.ResponseWriter, r *http.Request)
// HandleUpdateExecutionLogEntry updates the log entry for the executor.Job.
HandleUpdateExecutionLogEntry(w http.ResponseWriter, r *http.Request)
// HandleMarkComplete updates the executor.Job to have a completed status.
HandleMarkComplete(w http.ResponseWriter, r *http.Request)
// HandleMarkErrored updates the executor.Job to have an errored status.
HandleMarkErrored(w http.ResponseWriter, r *http.Request)
// HandleMarkFailed updates the executor.Job to have a failed status.
HandleMarkFailed(w http.ResponseWriter, r *http.Request)
// HandleHeartbeat handles the heartbeat of an executor.
HandleHeartbeat(w http.ResponseWriter, r *http.Request)
// HandleCanceledJobs cancels the specified executor.Jobs.
HandleCanceledJobs(w http.ResponseWriter, r *http.Request)
}
var _ ExecutorHandler = &handler[workerutil.Record]{}
type handler[T workerutil.Record] struct {
QueueOptions[T]
queueHandler QueueHandler[T]
executorStore database.ExecutorStore
jobTokenStore executorstore.JobTokenStore
metricsStore metricsstore.DistributedStore
logger log.Logger
}
type QueueOptions[T workerutil.Record] struct {
// QueueHandler the specific logic for handling a queue.
type QueueHandler[T workerutil.Record] struct {
// Name signifies the type of work the queue serves to executors.
Name string
// Store is a required dbworker store store for each registered queue.
// Store is a required dbworker store.
Store store.Store[T]
// RecordTransformer is a required hook for each registered queue that transforms a generic
// record from that queue into the job to be given to an executor.
RecordTransformer func(ctx context.Context, version string, record T, resourceMetadata ResourceMetadata) (apiclient.Job, error)
RecordTransformer TransformerFunc[T]
}
func NewHandler[T workerutil.Record](executorStore database.ExecutorStore, metricsStore metricsstore.DistributedStore, queueOptions QueueOptions[T]) *handler[T] {
// TransformerFunc is the function to transform a workerutil.Record into an executor.Job.
type TransformerFunc[T workerutil.Record] func(ctx context.Context, version string, record T, resourceMetadata ResourceMetadata) (executortypes.Job, error)
// NewHandler creates a new ExecutorHandler.
func NewHandler[T workerutil.Record](
executorStore database.ExecutorStore,
jobTokenStore executorstore.JobTokenStore,
metricsStore metricsstore.DistributedStore,
queueHandler QueueHandler[T],
) ExecutorHandler {
return &handler[T]{
executorStore: executorStore,
jobTokenStore: jobTokenStore,
metricsStore: metricsStore,
logger: log.Scoped("executor-queue-handler", "The route handler for all executor dbworker API tunnel endpoints"),
QueueOptions: queueOptions,
logger: log.Scoped(
fmt.Sprintf("executor-queue-handler-%s", queueHandler.Name),
fmt.Sprintf("The route handler for all executor %s dbworker API tunnel endpoints", queueHandler.Name),
),
queueHandler: queueHandler,
}
}
var ErrUnknownJob = errors.New("unknown job")
type ResourceMetadata struct {
NumCPUs int
Memory string
DiskSpace string
func (h *handler[T]) Name() string {
return h.queueHandler.Name
}
type executorMetadata struct {
Name string
Version string
Resources ResourceMetadata
}
func (h *handler[T]) HandleDequeue(w http.ResponseWriter, r *http.Request) {
var payload executortypes.DequeueRequest
func (h *handler[T]) Name() string { return h.QueueOptions.Name }
h.wrapHandler(w, r, &payload, func() (int, any, error) {
job, dequeued, err := h.dequeue(r.Context(), mux.Vars(r)["queueName"], executorMetadata{
name: payload.ExecutorName,
version: payload.Version,
resources: ResourceMetadata{
NumCPUs: payload.NumCPUs,
Memory: payload.Memory,
DiskSpace: payload.DiskSpace,
},
})
if !dequeued {
return http.StatusNoContent, nil, err
}
return http.StatusOK, job, err
})
}
// dequeue selects a job record from the database and stashes metadata including
// the job record and the locking transaction. If no job is available for processing,
// a false-valued flag is returned.
func (h *handler[T]) dequeue(ctx context.Context, metadata executorMetadata) (_ apiclient.Job, dequeued bool, _ error) {
if err := validateWorkerHostname(metadata.Name); err != nil {
return apiclient.Job{}, false, err
func (h *handler[T]) dequeue(ctx context.Context, queueName string, metadata executorMetadata) (executortypes.Job, bool, error) {
if err := validateWorkerHostname(metadata.name); err != nil {
return executortypes.Job{}, false, err
}
version2Supported := false
if metadata.Version != "" {
if metadata.version != "" {
var err error
version2Supported, err = api.CheckSourcegraphVersion(metadata.Version, "4.3.0-0", "2022-11-24")
version2Supported, err = api.CheckSourcegraphVersion(metadata.version, "4.3.0-0", "2022-11-24")
if err != nil {
return apiclient.Job{}, false, err
return executortypes.Job{}, false, err
}
}
// executorName is supposed to be unique.
record, dequeued, err := h.Store.Dequeue(ctx, metadata.Name, nil)
record, dequeued, err := h.queueHandler.Store.Dequeue(ctx, metadata.name, nil)
if err != nil {
return apiclient.Job{}, false, errors.Wrap(err, "dbworkerstore.Dequeue")
return executortypes.Job{}, false, errors.Wrap(err, "dbworkerstore.Dequeue")
}
if !dequeued {
return apiclient.Job{}, false, nil
return executortypes.Job{}, false, nil
}
logger := log.Scoped("dequeue", "Select a job record from the database.")
job, err := h.RecordTransformer(ctx, metadata.Version, record, metadata.Resources)
job, err := h.queueHandler.RecordTransformer(ctx, metadata.version, record, metadata.resources)
if err != nil {
if _, err := h.Store.MarkFailed(ctx, record.RecordID(), fmt.Sprintf("failed to transform record: %s", err), store.MarkFinalOptions{}); err != nil {
if _, err := h.queueHandler.Store.MarkFailed(ctx, record.RecordID(), fmt.Sprintf("failed to transform record: %s", err), store.MarkFinalOptions{}); err != nil {
logger.Error("Failed to mark record as failed",
log.Int("recordID", record.RecordID()),
log.Error(err))
}
return apiclient.Job{}, false, errors.Wrap(err, "RecordTransformer")
return executortypes.Job{}, false, errors.Wrap(err, "RecordTransformer")
}
// If this executor supports v2, return a v2 payload. Based on this field,
@ -120,16 +160,47 @@ func (h *handler[T]) dequeue(ctx context.Context, metadata executorMetadata) (_
job.Version = 2
}
token, err := h.jobTokenStore.Create(ctx, job.ID, queueName, job.RepositoryName)
if err != nil {
if errors.Is(err, executorstore.ErrJobTokenAlreadyCreated) {
// Token has already been created, regen it.
token, err = h.jobTokenStore.Regenerate(ctx, job.ID, queueName)
if err != nil {
return executortypes.Job{}, false, errors.Wrap(err, "RegenerateToken")
}
} else {
return executortypes.Job{}, false, errors.Wrap(err, "CreateToken")
}
}
job.Token = token
return job, true, nil
}
// addExecutionLogEntry calls AddExecutionLogEntry for the given job.
func (h *handler[T]) addExecutionLogEntry(ctx context.Context, executorName string, jobID int, entry executor.ExecutionLogEntry) (entryID int, err error) {
if err := validateWorkerHostname(executorName); err != nil {
return 0, err
}
type executorMetadata struct {
name string
version string
resources ResourceMetadata
}
entryID, err = h.Store.AddExecutionLogEntry(ctx, jobID, entry, store.ExecutionLogEntryOptions{
// ResourceMetadata is the specific resource data for an executor instance.
type ResourceMetadata struct {
NumCPUs int
Memory string
DiskSpace string
}
func (h *handler[T]) HandleAddExecutionLogEntry(w http.ResponseWriter, r *http.Request) {
var payload executortypes.AddExecutionLogEntryRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
id, err := h.addExecutionLogEntry(r.Context(), payload.ExecutorName, payload.JobID, payload.ExecutionLogEntry)
return http.StatusOK, id, err
})
}
func (h *handler[T]) addExecutionLogEntry(ctx context.Context, executorName string, jobID int, entry internalexecutor.ExecutionLogEntry) (int, error) {
entryID, err := h.queueHandler.Store.AddExecutionLogEntry(ctx, jobID, entry, store.ExecutionLogEntryOptions{
// We pass the WorkerHostname, so the store enforces the record to be owned by this executor. When
// the previous executor didn't report heartbeats anymore, but is still alive and reporting logs,
// both executors that ever got the job would be writing to the same record. This prevents it.
@ -143,13 +214,17 @@ func (h *handler[T]) addExecutionLogEntry(ctx context.Context, executorName stri
return entryID, errors.Wrap(err, "dbworkerstore.AddExecutionLogEntry")
}
// updateExecutionLogEntry calls UpdateExecutionLogEntry for the given job and entry.
func (h *handler[T]) updateExecutionLogEntry(ctx context.Context, executorName string, jobID int, entryID int, entry executor.ExecutionLogEntry) error {
if err := validateWorkerHostname(executorName); err != nil {
return err
}
func (h *handler[T]) HandleUpdateExecutionLogEntry(w http.ResponseWriter, r *http.Request) {
var payload executortypes.UpdateExecutionLogEntryRequest
err := h.Store.UpdateExecutionLogEntry(ctx, jobID, entryID, entry, store.ExecutionLogEntryOptions{
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.updateExecutionLogEntry(r.Context(), payload.ExecutorName, payload.JobID, payload.EntryID, payload.ExecutionLogEntry)
return http.StatusNoContent, nil, err
})
}
func (h *handler[T]) updateExecutionLogEntry(ctx context.Context, executorName string, jobID int, entryID int, entry internalexecutor.ExecutionLogEntry) error {
err := h.queueHandler.Store.UpdateExecutionLogEntry(ctx, jobID, entryID, entry, store.ExecutionLogEntryOptions{
// We pass the WorkerHostname, so the store enforces the record to be owned by this executor. When
// the previous executor didn't report heartbeats anymore, but is still alive and reporting logs,
// both executors that ever got the job would be writing to the same record. This prevents it.
@ -163,13 +238,21 @@ func (h *handler[T]) updateExecutionLogEntry(ctx context.Context, executorName s
return errors.Wrap(err, "dbworkerstore.UpdateExecutionLogEntry")
}
// markComplete calls MarkComplete for the given job.
func (h *handler[T]) markComplete(ctx context.Context, executorName string, jobID int) error {
if err := validateWorkerHostname(executorName); err != nil {
return err
}
func (h *handler[T]) HandleMarkComplete(w http.ResponseWriter, r *http.Request) {
var payload executortypes.MarkCompleteRequest
ok, err := h.Store.MarkComplete(ctx, jobID, store.MarkFinalOptions{
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.markComplete(r.Context(), mux.Vars(r)["queueName"], payload.ExecutorName, payload.JobID)
if err == ErrUnknownJob {
return http.StatusNotFound, nil, nil
}
return http.StatusNoContent, nil, err
})
}
func (h *handler[T]) markComplete(ctx context.Context, queueName string, executorName string, jobID int) error {
ok, err := h.queueHandler.Store.MarkComplete(ctx, jobID, store.MarkFinalOptions{
// We pass the WorkerHostname, so the store enforces the record to be owned by this executor. When
// the previous executor didn't report heartbeats anymore, but is still alive and reporting state,
// both executors that ever got the job would be writing to the same record. This prevents it.
@ -181,16 +264,29 @@ func (h *handler[T]) markComplete(ctx context.Context, executorName string, jobI
if !ok {
return ErrUnknownJob
}
if err = h.jobTokenStore.Delete(ctx, jobID, queueName); err != nil {
return errors.Wrap(err, "jobTokenStore.Delete")
}
return nil
}
// markErrored calls MarkErrored for the given job.
func (h *handler[T]) markErrored(ctx context.Context, executorName string, jobID int, errorMessage string) error {
if err := validateWorkerHostname(executorName); err != nil {
return err
}
func (h *handler[T]) HandleMarkErrored(w http.ResponseWriter, r *http.Request) {
var payload executortypes.MarkErroredRequest
ok, err := h.Store.MarkErrored(ctx, jobID, errorMessage, store.MarkFinalOptions{
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.markErrored(r.Context(), mux.Vars(r)["queueName"], payload.ExecutorName, payload.JobID, payload.ErrorMessage)
if err == ErrUnknownJob {
return http.StatusNotFound, nil, nil
}
return http.StatusNoContent, nil, err
})
}
func (h *handler[T]) markErrored(ctx context.Context, queueName string, executorName string, jobID int, errorMessage string) error {
ok, err := h.queueHandler.Store.MarkErrored(ctx, jobID, errorMessage, store.MarkFinalOptions{
// We pass the WorkerHostname, so the store enforces the record to be owned by this executor. When
// the previous executor didn't report heartbeats anymore, but is still alive and reporting state,
// both executors that ever got the job would be writing to the same record. This prevents it.
@ -202,16 +298,32 @@ func (h *handler[T]) markErrored(ctx context.Context, executorName string, jobID
if !ok {
return ErrUnknownJob
}
if err = h.jobTokenStore.Delete(ctx, jobID, queueName); err != nil {
return errors.Wrap(err, "jobTokenStore.Delete")
}
return nil
}
// markFailed calls MarkFailed for the given job.
func (h *handler[T]) markFailed(ctx context.Context, executorName string, jobID int, errorMessage string) error {
if err := validateWorkerHostname(executorName); err != nil {
return err
}
func (h *handler[T]) HandleMarkFailed(w http.ResponseWriter, r *http.Request) {
var payload executortypes.MarkErroredRequest
ok, err := h.Store.MarkFailed(ctx, jobID, errorMessage, store.MarkFinalOptions{
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.markFailed(r.Context(), mux.Vars(r)["queueName"], payload.ExecutorName, payload.JobID, payload.ErrorMessage)
if err == ErrUnknownJob {
return http.StatusNotFound, nil, nil
}
return http.StatusNoContent, nil, err
})
}
// ErrUnknownJob error when the job does not exist.
var ErrUnknownJob = errors.New("unknown job")
func (h *handler[T]) markFailed(ctx context.Context, queueName string, executorName string, jobID int, errorMessage string) error {
ok, err := h.queueHandler.Store.MarkFailed(ctx, jobID, errorMessage, store.MarkFinalOptions{
// We pass the WorkerHostname, so the store enforces the record to be owned by this executor. When
// the previous executor didn't report heartbeats anymore, but is still alive and reporting state,
// both executors that ever got the job would be writing to the same record. This prevents it.
@ -223,11 +335,58 @@ func (h *handler[T]) markFailed(ctx context.Context, executorName string, jobID
if !ok {
return ErrUnknownJob
}
if err = h.jobTokenStore.Delete(ctx, jobID, queueName); err != nil {
return errors.Wrap(err, "jobTokenStore.Delete")
}
return nil
}
// heartbeat calls Heartbeat for the given jobs.
func (h *handler[T]) heartbeat(ctx context.Context, executor types.Executor, ids []int) (knownIDs, cancelIDs []int, err error) {
func (h *handler[T]) HandleHeartbeat(w http.ResponseWriter, r *http.Request) {
var payload executortypes.HeartbeatRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
e := types.Executor{
Hostname: payload.ExecutorName,
QueueName: mux.Vars(r)["queueName"],
OS: payload.OS,
Architecture: payload.Architecture,
DockerVersion: payload.DockerVersion,
ExecutorVersion: payload.ExecutorVersion,
GitVersion: payload.GitVersion,
IgniteVersion: payload.IgniteVersion,
SrcCliVersion: payload.SrcCliVersion,
}
// Handle metrics in the background, this should not delay the heartbeat response being
// delivered. It is critical for keeping jobs alive.
go func() {
metrics, err := decodeAndLabelMetrics(payload.PrometheusMetrics, payload.ExecutorName)
if err != nil {
// Just log the error but don't panic. The heartbeat is more important.
h.logger.Error("failed to decode metrics and apply labels for executor heartbeat", log.Error(err))
return
}
if err = h.metricsStore.Ingest(payload.ExecutorName, metrics); err != nil {
// Just log the error but don't panic. The heartbeat is more important.
h.logger.Error("failed to ingest metrics for executor heartbeat", log.Error(err))
}
}()
knownIDs, cancelIDs, err := h.heartbeat(r.Context(), e, payload.JobIDs)
if payload.Version == executortypes.ExecutorAPIVersion2 {
return http.StatusOK, executortypes.HeartbeatResponse{KnownIDs: knownIDs, CancelIDs: cancelIDs}, err
}
// TODO: Remove in Sourcegraph 4.4.
return http.StatusOK, knownIDs, err
})
}
func (h *handler[T]) heartbeat(ctx context.Context, executor types.Executor, ids []int) ([]int, []int, error) {
if err := validateWorkerHostname(executor.Hostname); err != nil {
return nil, nil, err
}
@ -239,7 +398,7 @@ func (h *handler[T]) heartbeat(ctx context.Context, executor types.Executor, ids
logger.Error("Failed to upsert executor heartbeat", log.Error(err))
}
knownIDs, cancelIDs, err = h.Store.Heartbeat(ctx, ids, store.HeartbeatOptions{
knownIDs, cancelIDs, err := h.queueHandler.Store.Heartbeat(ctx, ids, store.HeartbeatOptions{
// We pass the WorkerHostname, so the store enforces the record to be owned by this executor. When
// the previous executor didn't report heartbeats anymore, but is still alive and reporting state,
// both executors that ever got the job would be writing to the same record. This prevents it.
@ -248,16 +407,112 @@ func (h *handler[T]) heartbeat(ctx context.Context, executor types.Executor, ids
return knownIDs, cancelIDs, errors.Wrap(err, "dbworkerstore.UpsertHeartbeat")
}
// canceled reaches to the queueOptions.FetchCanceled to determine jobs that need
// to be canceled.
// TODO: This handler can be removed in Sourcegraph 4.4.
func (h *handler[T]) HandleCanceledJobs(w http.ResponseWriter, r *http.Request) {
var payload executortypes.CanceledJobsRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
canceledIDs, err := h.cancelJobs(r.Context(), payload.ExecutorName, payload.KnownJobIDs)
return http.StatusOK, canceledIDs, err
})
}
// wrapHandler decodes the request body into the given payload pointer, then calls the given
// handler function. If the body cannot be decoded, a 400 BadRequest is returned and the handler
// function is not called. If the handler function returns an error, a 500 Internal Server Error
// is returned. Otherwise, the response status will match the status code value returned from the
// handler, and the payload value returned from the handler is encoded and written to the
// response body.
func (h *handler[T]) wrapHandler(w http.ResponseWriter, r *http.Request, payload any, handler func() (int, any, error)) {
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
http.Error(w, fmt.Sprintf("Failed to unmarshal payload: %s", err.Error()), http.StatusBadRequest)
return
}
status, payload, err := handler()
if err != nil {
h.logger.Error("Handler returned an error", log.Error(err))
status = http.StatusInternalServerError
payload = errorResponse{Error: err.Error()}
}
data, err := json.Marshal(payload)
if err != nil {
h.logger.Error("Failed to serialize payload", log.Error(err))
http.Error(w, fmt.Sprintf("Failed to serialize payload: %s", err), http.StatusInternalServerError)
return
}
w.WriteHeader(status)
if status != http.StatusNoContent {
_, _ = io.Copy(w, bytes.NewReader(data))
}
}
// decodeAndLabelMetrics decodes the text serialized prometheus metrics dump and then
// applies common labels.
func decodeAndLabelMetrics(encodedMetrics, instanceName string) ([]*dto.MetricFamily, error) {
var data []*dto.MetricFamily
dec := expfmt.NewDecoder(strings.NewReader(encodedMetrics), expfmt.FmtText)
for {
var mf dto.MetricFamily
if err := dec.Decode(&mf); err != nil {
if err == io.EOF {
break
}
return nil, errors.Wrap(err, "decoding metric family")
}
// Attach the extra labels.
metricLabelInstance := "sg_instance"
metricLabelJob := "sg_job"
executorJob := "sourcegraph-executors"
registryJob := "sourcegraph-executors-registry"
for _, m := range mf.Metric {
var metricLabelInstanceValue string
for _, l := range m.Label {
if *l.Name == metricLabelInstance {
metricLabelInstanceValue = l.GetValue()
break
}
}
// if sg_instance not set, set it as the executor name sent in the heartbeat.
// this is done for the executor's own and it's node_exporter metrics, executors
// set sg_instance for metrics scraped from the registry+registry's node_exporter
if metricLabelInstanceValue == "" {
m.Label = append(m.Label, &dto.LabelPair{Name: &metricLabelInstance, Value: &instanceName})
}
if metricLabelInstanceValue == "docker-registry" {
m.Label = append(m.Label, &dto.LabelPair{Name: &metricLabelJob, Value: &registryJob})
} else {
m.Label = append(m.Label, &dto.LabelPair{Name: &metricLabelJob, Value: &executorJob})
}
}
data = append(data, &mf)
}
return data, nil
}
type errorResponse struct {
Error string `json:"error"`
}
// cancelJobs reaches to the queueHandlers.FetchCanceled to determine jobs that need to be canceled.
// This endpoint is deprecated and should be removed in Sourcegraph 4.4.
func (h *handler[T]) canceled(ctx context.Context, executorName string, knownIDs []int) (canceledIDs []int, err error) {
func (h *handler[T]) cancelJobs(ctx context.Context, executorName string, knownIDs []int) ([]int, error) {
if err := validateWorkerHostname(executorName); err != nil {
return nil, err
}
// The Heartbeat method now handles both heartbeats and cancellation. For backcompat,
// we fall back to this method.
_, canceledIDs, err = h.Store.Heartbeat(ctx, knownIDs, store.HeartbeatOptions{
_, canceledIDs, err := h.queueHandler.Store.Heartbeat(ctx, knownIDs, store.HeartbeatOptions{
// We pass the WorkerHostname, so the store enforces the record to be owned by this executor. When
// the previous executor didn't report heartbeats anymore, but is still alive and reporting state,
// both executors that ever got the job would be writing to the same record. This prevents it.

View File

@ -1,273 +1,29 @@
package handler
import (
"bytes"
"encoding/json"
"fmt"
"io"
"net/http"
"strings"
dto "github.com/prometheus/client_model/go"
"github.com/prometheus/common/expfmt"
"github.com/gorilla/mux"
"github.com/grafana/regexp"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/log"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
"github.com/sourcegraph/sourcegraph/internal/database"
metricsstore "github.com/sourcegraph/sourcegraph/internal/metrics/store"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// SetupRoutes registers all route handlers required for all configured executor
// queues with the given router.
func SetupRoutes(executorStore database.ExecutorStore, metricsStore metricsstore.DistributedStore, handlers []ExecutorHandler, router *mux.Router) {
for _, h := range handlers {
subRouter := router.PathPrefix(fmt.Sprintf("/{queueName:(?:%s)}/", regexp.QuoteMeta(h.Name()))).Subrouter()
routes := map[string]func(w http.ResponseWriter, r *http.Request){
"dequeue": h.handleDequeue,
"addExecutionLogEntry": h.handleAddExecutionLogEntry,
"updateExecutionLogEntry": h.handleUpdateExecutionLogEntry,
"markComplete": h.handleMarkComplete,
"markErrored": h.handleMarkErrored,
"markFailed": h.handleMarkFailed,
"heartbeat": h.handleHeartbeat,
// TODO: This endpoint can be removed in Sourcegraph 4.4.
"canceledJobs": h.handleCanceledJobs,
}
for path, handler := range routes {
subRouter.Path(fmt.Sprintf("/%s", path)).Methods("POST").HandlerFunc(handler)
}
}
func SetupRoutes(handler ExecutorHandler, router *mux.Router) {
subRouter := router.PathPrefix(fmt.Sprintf("/{queueName:(?:%s)}", regexp.QuoteMeta(handler.Name()))).Subrouter()
subRouter.Path("/dequeue").Methods(http.MethodPost).HandlerFunc(handler.HandleDequeue)
subRouter.Path("/heartbeat").Methods(http.MethodPost).HandlerFunc(handler.HandleHeartbeat)
subRouter.Path("/canceledJobs").Methods(http.MethodPost).HandlerFunc(handler.HandleCanceledJobs)
}
// POST /{queueName}/dequeue
func (h *handler[T]) handleDequeue(w http.ResponseWriter, r *http.Request) {
var payload apiclient.DequeueRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
job, dequeued, err := h.dequeue(r.Context(), executorMetadata{
Name: payload.ExecutorName,
Version: payload.Version,
Resources: ResourceMetadata{
NumCPUs: payload.NumCPUs,
Memory: payload.Memory,
DiskSpace: payload.DiskSpace,
},
})
if !dequeued {
return http.StatusNoContent, nil, err
}
return http.StatusOK, job, err
})
}
// POST /{queueName}/addExecutionLogEntry
func (h *handler[T]) handleAddExecutionLogEntry(w http.ResponseWriter, r *http.Request) {
var payload apiclient.AddExecutionLogEntryRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
id, err := h.addExecutionLogEntry(r.Context(), payload.ExecutorName, payload.JobID, payload.ExecutionLogEntry)
return http.StatusOK, id, err
})
}
// POST /{queueName}/updateExecutionLogEntry
func (h *handler[T]) handleUpdateExecutionLogEntry(w http.ResponseWriter, r *http.Request) {
var payload apiclient.UpdateExecutionLogEntryRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.updateExecutionLogEntry(r.Context(), payload.ExecutorName, payload.JobID, payload.EntryID, payload.ExecutionLogEntry)
return http.StatusNoContent, nil, err
})
}
// POST /{queueName}/markComplete
func (h *handler[T]) handleMarkComplete(w http.ResponseWriter, r *http.Request) {
var payload apiclient.MarkCompleteRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.markComplete(r.Context(), payload.ExecutorName, payload.JobID)
if err == ErrUnknownJob {
return http.StatusNotFound, nil, nil
}
return http.StatusNoContent, nil, err
})
}
// POST /{queueName}/markErrored
func (h *handler[T]) handleMarkErrored(w http.ResponseWriter, r *http.Request) {
var payload apiclient.MarkErroredRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.markErrored(r.Context(), payload.ExecutorName, payload.JobID, payload.ErrorMessage)
if err == ErrUnknownJob {
return http.StatusNotFound, nil, nil
}
return http.StatusNoContent, nil, err
})
}
// POST /{queueName}/markFailed
func (h *handler[T]) handleMarkFailed(w http.ResponseWriter, r *http.Request) {
var payload apiclient.MarkErroredRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
err := h.markFailed(r.Context(), payload.ExecutorName, payload.JobID, payload.ErrorMessage)
if err == ErrUnknownJob {
return http.StatusNotFound, nil, nil
}
return http.StatusNoContent, nil, err
})
}
// POST /{queueName}/heartbeat
func (h *handler[T]) handleHeartbeat(w http.ResponseWriter, r *http.Request) {
var payload apiclient.HeartbeatRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
executor := types.Executor{
Hostname: payload.ExecutorName,
QueueName: h.QueueOptions.Name,
OS: payload.OS,
Architecture: payload.Architecture,
DockerVersion: payload.DockerVersion,
ExecutorVersion: payload.ExecutorVersion,
GitVersion: payload.GitVersion,
IgniteVersion: payload.IgniteVersion,
SrcCliVersion: payload.SrcCliVersion,
}
// Handle metrics in the background, this should not delay the heartbeat response being
// delivered. It is critical for keeping jobs alive.
go func() {
metrics, err := decodeAndLabelMetrics(payload.PrometheusMetrics, payload.ExecutorName)
if err != nil {
// Just log the error but don't panic. The heartbeat is more important.
h.logger.Error("failed to decode metrics and apply labels for executor heartbeat", log.Error(err))
return
}
if err := h.metricsStore.Ingest(payload.ExecutorName, metrics); err != nil {
// Just log the error but don't panic. The heartbeat is more important.
h.logger.Error("failed to ingest metrics for executor heartbeat", log.Error(err))
}
}()
knownIDs, cancelIDs, err := h.heartbeat(r.Context(), executor, payload.JobIDs)
if payload.Version == apiclient.ExecutorAPIVersion2 {
return http.StatusOK, apiclient.HeartbeatResponse{KnownIDs: knownIDs, CancelIDs: cancelIDs}, err
}
// TODO: Remove in Sourcegraph 4.4.
return http.StatusOK, knownIDs, err
})
}
// POST /{queueName}/canceledJobs
// TODO: This handler can be removed in Sourcegraph 4.4.
func (h *handler[T]) handleCanceledJobs(w http.ResponseWriter, r *http.Request) {
var payload apiclient.CanceledJobsRequest
h.wrapHandler(w, r, &payload, func() (int, any, error) {
canceledIDs, err := h.canceled(r.Context(), payload.ExecutorName, payload.KnownJobIDs)
return http.StatusOK, canceledIDs, err
})
}
type errorResponse struct {
Error string `json:"error"`
}
// wrapHandler decodes the request body into the given payload pointer, then calls the given
// handler function. If the body cannot be decoded, a 400 BadRequest is returned and the handler
// function is not called. If the handler function returns an error, a 500 Internal Server Error
// is returned. Otherwise, the response status will match the status code value returned from the
// handler, and the payload value returned from the handler is encoded and written to the
// response body.
func (h *handler[T]) wrapHandler(w http.ResponseWriter, r *http.Request, payload any, handler func() (int, any, error)) {
if err := json.NewDecoder(r.Body).Decode(&payload); err != nil {
http.Error(w, fmt.Sprintf("Failed to unmarshal payload: %s", err.Error()), http.StatusBadRequest)
return
}
status, payload, err := handler()
if err != nil {
log15.Error("Handler returned an error", "err", err)
status = http.StatusInternalServerError
payload = errorResponse{Error: err.Error()}
}
data, err := json.Marshal(payload)
if err != nil {
log15.Error("Failed to serialize payload", "err", err)
http.Error(w, fmt.Sprintf("Failed to serialize payload: %s", err), http.StatusInternalServerError)
return
}
w.WriteHeader(status)
if status != http.StatusNoContent {
_, _ = io.Copy(w, bytes.NewReader(data))
}
}
// decodeAndLabelMetrics decodes the text serialized prometheus metrics dump and then
// applies common labels.
func decodeAndLabelMetrics(encodedMetrics, instanceName string) ([]*dto.MetricFamily, error) {
data := []*dto.MetricFamily{}
dec := expfmt.NewDecoder(strings.NewReader(encodedMetrics), expfmt.FmtText)
for {
var mf dto.MetricFamily
if err := dec.Decode(&mf); err != nil {
if err == io.EOF {
break
}
return nil, errors.Wrap(err, "decoding metric family")
}
// Attach the extra labels.
metricLabelInstance := "sg_instance"
metricLabelJob := "sg_job"
executorJob := "sourcegraph-executors"
registryJob := "sourcegraph-executors-registry"
for _, m := range mf.Metric {
var metricLabelInstanceValue string
for _, l := range m.Label {
if *l.Name == metricLabelInstance {
metricLabelInstanceValue = l.GetValue()
break
}
}
// if sg_instance not set, set it as the executor name sent in the heartbeat.
// this is done for the executor's own and it's node_exporter metrics, executors
// set sg_instance for metrics scraped from the registry+registry's node_exporter
if metricLabelInstanceValue == "" {
m.Label = append(m.Label, &dto.LabelPair{Name: &metricLabelInstance, Value: &instanceName})
}
if metricLabelInstanceValue == "docker-registry" {
m.Label = append(m.Label, &dto.LabelPair{Name: &metricLabelJob, Value: &registryJob})
} else {
m.Label = append(m.Label, &dto.LabelPair{Name: &metricLabelJob, Value: &executorJob})
}
}
data = append(data, &mf)
}
return data, nil
// SetupJobRoutes registers all route handlers required for all configured executor
// queues with the given router.
func SetupJobRoutes(handler ExecutorHandler, router *mux.Router) {
subRouter := router.PathPrefix(fmt.Sprintf("/{queueName:(?:%s)}", regexp.QuoteMeta(handler.Name()))).Subrouter()
subRouter.Path("/addExecutionLogEntry").Methods(http.MethodPost).HandlerFunc(handler.HandleAddExecutionLogEntry)
subRouter.Path("/updateExecutionLogEntry").Methods(http.MethodPost).HandlerFunc(handler.HandleUpdateExecutionLogEntry)
subRouter.Path("/markComplete").Methods(http.MethodPost).HandlerFunc(handler.HandleMarkComplete)
subRouter.Path("/markErrored").Methods(http.MethodPost).HandlerFunc(handler.HandleMarkErrored)
subRouter.Path("/markFailed").Methods(http.MethodPost).HandlerFunc(handler.HandleMarkFailed)
}

View File

@ -0,0 +1,212 @@
package handler_test
import (
"net/http"
"net/http/httptest"
"testing"
"github.com/gorilla/mux"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/mock"
"github.com/stretchr/testify/require"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/handler"
)
func TestSetupRoutes(t *testing.T) {
tests := []struct {
name string
method string
path string
expectedStatusCode int
expectationsFunc func(h *testExecutorHandler)
}{
{
name: "Dequeue",
method: http.MethodPost,
path: "/test/dequeue",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleDequeue").Once()
},
},
{
name: "Heartbeat",
method: http.MethodPost,
path: "/test/heartbeat",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleHeartbeat").Once()
},
},
{
name: "CanceledJobs",
method: http.MethodPost,
path: "/test/canceledJobs",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleCanceledJobs").Once()
},
},
{
name: "Invalid root",
method: http.MethodPost,
path: "/test1/dequeue",
expectedStatusCode: http.StatusNotFound,
},
{
name: "Invalid path",
method: http.MethodPost,
path: "/test/foo",
expectedStatusCode: http.StatusNotFound,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
router := mux.NewRouter()
h := new(testExecutorHandler)
handler.SetupRoutes(h, router)
req, err := http.NewRequest(test.method, test.path, nil)
require.NoError(t, err)
responseRecorder := httptest.NewRecorder()
if test.expectationsFunc != nil {
test.expectationsFunc(h)
}
router.ServeHTTP(responseRecorder, req)
assert.Equal(t, test.expectedStatusCode, responseRecorder.Code)
h.AssertExpectations(t)
})
}
}
func TestSetupJobRoutes(t *testing.T) {
tests := []struct {
name string
method string
path string
expectedStatusCode int
expectationsFunc func(h *testExecutorHandler)
}{
{
name: "AddExecutionLogEntry",
method: http.MethodPost,
path: "/test/addExecutionLogEntry",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleAddExecutionLogEntry").Once()
},
},
{
name: "UpdateExecutionLogEntry",
method: http.MethodPost,
path: "/test/updateExecutionLogEntry",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleUpdateExecutionLogEntry").Once()
},
},
{
name: "MarkComplete",
method: http.MethodPost,
path: "/test/markComplete",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleMarkComplete").Once()
},
},
{
name: "MarkErrored",
method: http.MethodPost,
path: "/test/markErrored",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleMarkErrored").Once()
},
},
{
name: "MarkFailed",
method: http.MethodPost,
path: "/test/markFailed",
expectedStatusCode: http.StatusOK,
expectationsFunc: func(h *testExecutorHandler) {
h.On("HandleMarkFailed").Once()
},
},
{
name: "Invalid root",
method: http.MethodPost,
path: "/test1/addExecutionLogEntry",
expectedStatusCode: http.StatusNotFound,
},
{
name: "Invalid path",
method: http.MethodPost,
path: "/test/foo",
expectedStatusCode: http.StatusNotFound,
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
router := mux.NewRouter()
h := new(testExecutorHandler)
handler.SetupJobRoutes(h, router)
req, err := http.NewRequest(test.method, test.path, nil)
require.NoError(t, err)
responseRecorder := httptest.NewRecorder()
if test.expectationsFunc != nil {
test.expectationsFunc(h)
}
router.ServeHTTP(responseRecorder, req)
assert.Equal(t, test.expectedStatusCode, responseRecorder.Code)
h.AssertExpectations(t)
})
}
}
type testExecutorHandler struct {
mock.Mock
}
func (t *testExecutorHandler) Name() string {
return "test"
}
func (t *testExecutorHandler) HandleDequeue(w http.ResponseWriter, r *http.Request) {
t.Called()
}
func (t *testExecutorHandler) HandleAddExecutionLogEntry(w http.ResponseWriter, r *http.Request) {
t.Called()
}
func (t *testExecutorHandler) HandleUpdateExecutionLogEntry(w http.ResponseWriter, r *http.Request) {
t.Called()
}
func (t *testExecutorHandler) HandleMarkComplete(w http.ResponseWriter, r *http.Request) {
t.Called()
}
func (t *testExecutorHandler) HandleMarkErrored(w http.ResponseWriter, r *http.Request) {
t.Called()
}
func (t *testExecutorHandler) HandleMarkFailed(w http.ResponseWriter, r *http.Request) {
t.Called()
}
func (t *testExecutorHandler) HandleHeartbeat(w http.ResponseWriter, r *http.Request) {
t.Called()
}
func (t *testExecutorHandler) HandleCanceledJobs(w http.ResponseWriter, r *http.Request) {
t.Called()
}

View File

@ -1,26 +1,19 @@
package executorqueue
import (
"context"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/conf/confdefaults"
"github.com/sourcegraph/sourcegraph/internal/conf/conftypes"
"github.com/sourcegraph/sourcegraph/internal/conf/deploy"
"github.com/sourcegraph/sourcegraph/internal/database"
metricsstore "github.com/sourcegraph/sourcegraph/internal/metrics/store"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/cmd/frontend/enterprise"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/handler"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/queues/batches"
codeintelqueue "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/queues/codeintel"
)
// Init initializes the executor endpoints required for use with the executor service.
func Init(
ctx context.Context,
observationCtx *observation.Context,
db database.DB,
conf conftypes.UnifiedWatchable,
@ -29,6 +22,7 @@ func Init(
codeintelUploadHandler := enterpriseServices.NewCodeIntelUploadHandler(false)
batchesWorkspaceFileGetHandler := enterpriseServices.BatchesChangesFileGetHandler
batchesWorkspaceFileExistsHandler := enterpriseServices.BatchesChangesFileGetHandler
accessToken := func() string {
isSingleProgram := deploy.IsDeployTypeSingleProgram(deploy.Type())
if isSingleProgram {
@ -36,24 +30,13 @@ func Init(
}
return conf.SiteConfig().ExecutorsAccessToken
}
logger := log.Scoped("executorqueue", "")
metricsStore := metricsstore.NewDistributedStore("executors:")
executorStore := db.Executors()
// Register queues. If this set changes, be sure to also update the list of valid
// queue names in ./metrics/queue_allocation.go, and register a metrics exporter
// in the worker.
//
// Note: In order register a new queue type please change the validate() check code in enterprise/cmd/executor/config.go
codeintelHandler := handler.NewHandler(executorStore, metricsStore, codeintelqueue.QueueOptions(observationCtx, db, accessToken))
batchesHandler := handler.NewHandler(executorStore, metricsStore, batches.QueueOptions(observationCtx, db, accessToken))
queueOptions := []handler.ExecutorHandler{codeintelHandler, batchesHandler}
queueHandler := newExecutorQueueHandler(
logger,
queueHandler := newExecutorQueuesHandler(
observationCtx,
db,
queueOptions,
logger,
accessToken,
codeintelUploadHandler,
batchesWorkspaceFileGetHandler,

View File

@ -1,74 +1,159 @@
package executorqueue
import (
"crypto/subtle"
"fmt"
"net/http"
"strconv"
"strings"
"github.com/gorilla/mux"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/handler"
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/queues/batches"
codeintelqueue "github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/queues/codeintel"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/store"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/gitserver"
metricsstore "github.com/sourcegraph/sourcegraph/internal/metrics/store"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func newExecutorQueueHandler(logger log.Logger, db database.DB, queueHandlers []handler.ExecutorHandler, accessToken func() string, uploadHandler http.Handler, batchesWorkspaceFileGetHandler http.Handler, batchesWorkspaceFileExistsHandler http.Handler) func() http.Handler {
func newExecutorQueuesHandler(
observationCtx *observation.Context,
db database.DB,
logger log.Logger,
accessToken func() string,
uploadHandler http.Handler,
batchesWorkspaceFileGetHandler http.Handler,
batchesWorkspaceFileExistsHandler http.Handler,
) func() http.Handler {
metricsStore := metricsstore.NewDistributedStore("executors:")
executorStore := db.Executors()
jobTokenStore := store.NewJobTokenStore(observationCtx, db)
// Register queues. If this set changes, be sure to also update the list of valid
// queue names in ./metrics/queue_allocation.go, and register a metrics exporter
// in the worker.
//
// Note: In order register a new queue type please change the validate() check code in enterprise/cmd/executor/config.go
codeintelHandler := handler.NewHandler(executorStore, jobTokenStore, metricsStore, codeintelqueue.QueueHandler(observationCtx, db, accessToken))
batchesHandler := handler.NewHandler(executorStore, jobTokenStore, metricsStore, batches.QueueHandler(observationCtx, db, accessToken))
handlers := []handler.ExecutorHandler{codeintelHandler, batchesHandler}
gitserverClient := gitserver.NewClient()
// Auth middleware
executorAuth := executorAuthMiddleware(logger, accessToken)
factory := func() http.Handler {
// 🚨 SECURITY: These routes are secured by checking a token shared between services.
base := mux.NewRouter().PathPrefix("/.executors/").Subrouter()
base.StrictSlash(true)
// Proxy /info/refs and /git-upload-pack to gitservice for git clone/fetch.
base.Path("/git/{RepoName:.*}/info/refs").Handler(gitserverProxy(logger, gitserverClient, "/info/refs"))
base.Path("/git/{RepoName:.*}/git-upload-pack").Handler(gitserverProxy(logger, gitserverClient, "/git-upload-pack"))
// TODO: this is to make some integration tests happy (dev/authtest/code_intel_test.go). Add a meaningful route here.
// Previously, this function (newExecutorQueuesHandler) wrapped the entire base router in a middleware.
// This allowed code_intel_test.go to call `/.executors/` and get 500s and 401s because it would interact with
// middleware first before interacting the actual handler (which did not exist).
// With changes to support different middleware (executor token vs job token), the base route is no longer
// wrapped in a middleware. This means the integration tests get a 404 because the route (/.executors/) does not
// exist (since is no handler for that route).
// So adding a handler to that specific route make those tests happy. In the future, add a meaningful route and
// update the integration tests to use that route.
testRouter := base.PathPrefix("/").Subrouter()
testRouter.Path("/").Methods(http.MethodGet).HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
})
testRouter.Use(withInternalActor, executorAuth)
// Serve the executor queue API.
handler.SetupRoutes(executorStore, metricsStore, queueHandlers, base.PathPrefix("/queue/").Subrouter())
// Proxy /info/refs and /git-upload-pack to gitservice for git clone/fetch.
gitRouter := base.PathPrefix("/git").Subrouter()
gitRouter.Path("/{RepoName:.*}/info/refs").Handler(gitserverProxy(logger, gitserverClient, "/info/refs"))
gitRouter.Path("/{RepoName:.*}/git-upload-pack").Handler(gitserverProxy(logger, gitserverClient, "/git-upload-pack"))
// The git routes are treated as internal actor. Additionally, each job comes with a short-lived token that is
// checked by jobAuthMiddleware.
gitRouter.Use(withInternalActor, jobAuthMiddleware(logger, routeGit, jobTokenStore, executorStore))
// Serve the executor queue APIs.
queueRouter := base.PathPrefix("/queue").Subrouter()
// The queue route are treated as an internal actor and require the executor access token to authenticate.
queueRouter.Use(withInternalActor, executorAuth)
jobRouter := base.PathPrefix("/queue").Subrouter()
// The job routes are treated as internal actor. Additionally, each job comes with a short-lived token that is
// checked by jobAuthMiddleware.
jobRouter.Use(withInternalActor, jobAuthMiddleware(logger, routeQueue, jobTokenStore, executorStore))
for _, h := range handlers {
handler.SetupRoutes(h, queueRouter)
handler.SetupJobRoutes(h, jobRouter)
}
// Upload LSIF indexes without a sudo access token or github tokens.
base.Path("/scip/upload").Methods("POST").Handler(uploadHandler)
base.Path("/lsif/upload").Methods("POST").Handler(uploadHandler)
base.Path("/scip/upload").Methods("HEAD").Handler(noopHandler)
lsifRouter := base.PathPrefix("/lsif").Name("executor-lsif").Subrouter()
lsifRouter.Path("/upload").Methods("POST").Handler(uploadHandler)
// The lsif route are treated as an internal actor and require the executor access token to authenticate.
lsifRouter.Use(withInternalActor, executorAuth)
base.Path("/files/batch-changes/{spec}/{file}").Methods("GET").Handler(batchesWorkspaceFileGetHandler)
base.Path("/files/batch-changes/{spec}/{file}").Methods("HEAD").Handler(batchesWorkspaceFileExistsHandler)
// Upload SCIP indexes without a sudo access token or github tokens.
scipRouter := base.PathPrefix("/scip").Name("executor-scip").Subrouter()
scipRouter.Path("/upload").Methods("POST").Handler(uploadHandler)
scipRouter.Path("/upload").Methods("HEAD").Handler(noopHandler)
// The scip route are treated as an internal actor and require the executor access token to authenticate.
scipRouter.Use(withInternalActor, executorAuth)
// Make sure requests to these endpoints are treated as an internal actor.
// We treat executors as internal and the executor secret is an internal actor
// access token.
// Also ensure that proper executor authentication is provided.
return authMiddleware(accessToken, withInternalActor(base))
filesRouter := base.PathPrefix("/files").Name("executor-files").Subrouter()
batchChangesRouter := filesRouter.PathPrefix("/batch-changes").Subrouter()
batchChangesRouter.Path("/{spec}/{file}").Methods(http.MethodGet).Handler(batchesWorkspaceFileGetHandler)
batchChangesRouter.Path("/{spec}/{file}").Methods(http.MethodHead).Handler(batchesWorkspaceFileExistsHandler)
// The files route are treated as an internal actor and require the executor access token to authenticate.
filesRouter.Use(withInternalActor, jobAuthMiddleware(logger, routeFiles, jobTokenStore, executorStore))
return base
}
return factory
}
// authMiddleware rejects requests that do not have a Authorization header set
type routeName string
const (
routeFiles = "files"
routeGit = "git"
routeQueue = "queue"
)
// withInternalActor ensures that the request handling is running as an internal actor.
func withInternalActor(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context()
next.ServeHTTP(rw, req.WithContext(actor.WithInternalActor(ctx)))
})
}
// executorAuthMiddleware rejects requests that do not have a Authorization header set
// with the correct "token-executor <token>" value. This should only be used
// for internal _services_, not users, in which a shared key exchange can be
// done so safely.
func authMiddleware(accessToken func() string, next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if validateExecutorToken(w, r, accessToken()) {
next.ServeHTTP(w, r)
}
})
func executorAuthMiddleware(logger log.Logger, accessToken func() string) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if validateExecutorToken(w, r, logger, accessToken()) {
next.ServeHTTP(w, r)
}
})
}
}
const SchemeExecutorToken = "token-executor"
func validateExecutorToken(w http.ResponseWriter, r *http.Request, expectedAccessToken string) bool {
func validateExecutorToken(w http.ResponseWriter, r *http.Request, logger log.Logger, expectedAccessToken string) bool {
if expectedAccessToken == "" {
log15.Error("executors.accessToken not configured in site config")
logger.Error("executors.accessToken not configured in site config")
http.Error(w, "Executors are not configured on this instance", http.StatusInternalServerError)
return false
}
@ -92,7 +177,10 @@ func validateExecutorToken(w http.ResponseWriter, r *http.Request, expectedAcces
return false
}
if token != expectedAccessToken {
// 🚨 SECURITY: Use constant-time comparisons to avoid leaking the verification
// code via timing attack. It is not important to avoid leaking the *length* of
// the code, because the length of verification codes is constant.
if subtle.ConstantTimeCompare([]byte(token), []byte(expectedAccessToken)) == 0 {
w.WriteHeader(http.StatusForbidden)
return false
}
@ -100,13 +188,153 @@ func validateExecutorToken(w http.ResponseWriter, r *http.Request, expectedAcces
return true
}
// withInternalActor ensures that the request handling is running as an internal actor.
func withInternalActor(next http.Handler) http.Handler {
return http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) {
ctx := req.Context()
func jobAuthMiddleware(
logger log.Logger,
routeName routeName,
tokenStore store.JobTokenStore,
executorStore database.ExecutorStore,
) mux.MiddlewareFunc {
return func(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if validateJobRequest(w, r, logger, routeName, tokenStore, executorStore) {
next.ServeHTTP(w, r)
}
})
}
}
next.ServeHTTP(rw, req.WithContext(actor.WithInternalActor(ctx)))
})
func validateJobRequest(
w http.ResponseWriter,
r *http.Request,
logger log.Logger,
routeName routeName,
tokenStore store.JobTokenStore,
executorStore database.ExecutorStore,
) bool {
// Get the auth token from the Authorization header.
var tokenType string
var authToken string
if headerValue := r.Header.Get("Authorization"); headerValue != "" {
parts := strings.Split(headerValue, " ")
if len(parts) != 2 {
http.Error(w, fmt.Sprintf(`HTTP Authorization request header value must be of the following form: '%s "TOKEN"' or '%s TOKEN'`, "Bearer", "token-executor"), http.StatusUnauthorized)
return false
}
// Check what the token type is. For backwards compatibility sake, we should also accept the general executor
// access token.
tokenType = parts[0]
if tokenType != "Bearer" && tokenType != "token-executor" {
http.Error(w, fmt.Sprintf("unrecognized HTTP Authorization request header scheme (supported values: %q, %q)", "Bearer", "token-executor"), http.StatusUnauthorized)
return false
}
authToken = parts[1]
}
if authToken == "" {
http.Error(w, "no token value in the HTTP Authorization request header", http.StatusUnauthorized)
return false
}
// If the general executor access token was provided, simply check the value.
if tokenType == "token-executor" {
// 🚨 SECURITY: Use constant-time comparisons to avoid leaking the verification
// code via timing attack. It is not important to avoid leaking the *length* of
// the code, because the length of verification codes is constant.
if subtle.ConstantTimeCompare([]byte(authToken), []byte(conf.SiteConfig().ExecutorsAccessToken)) == 1 {
return true
} else {
w.WriteHeader(http.StatusForbidden)
return false
}
}
var executorName string
var jobId int64
var queue string
var repo string
var err error
// Each route is "special". Set additional information based on the route that is being worked with.
switch routeName {
case routeFiles:
queue = "batches"
case routeGit:
repo = mux.Vars(r)["RepoName"]
case routeQueue:
queue = mux.Vars(r)["queueName"]
default:
logger.Error("unsupported route", log.String("route", string(routeName)))
http.Error(w, "unsupported route", http.StatusBadRequest)
return false
}
jobId, err = parseJobIdHeader(r)
if err != nil {
logger.Error("failed to parse jobId", log.Error(err))
http.Error(w, err.Error(), http.StatusBadRequest)
return false
}
// When the requester sets a User with a username, r.URL.User.Username() will return a blank value (always).
// To get the username is by using BasicAuth(). Even if the requester does not use a reverse proxy, this is the
// way to get the username.
executorName = r.Header.Get("X-Sourcegraph-Executor-Name")
// Since the payload partially deserialize, ensure the worker hostname is valid.
if len(executorName) == 0 {
http.Error(w, "worker hostname cannot be empty", http.StatusBadRequest)
return false
}
jobToken, err := tokenStore.GetByToken(r.Context(), authToken)
if err != nil {
logger.Error("failed to retrieve token", log.Error(err))
http.Error(w, "invalid token", http.StatusUnauthorized)
return false
}
// Ensure the token was generated for the correct job.
if jobToken.JobID != jobId {
logger.Error("job ID does not match")
http.Error(w, "invalid token", http.StatusForbidden)
return false
}
// Check if the token is associated with the correct queue or repo.
if len(repo) > 0 {
if jobToken.Repo != repo {
logger.Error("repo does not match")
http.Error(w, "invalid token", http.StatusForbidden)
return false
}
} else {
// Ensure the token was generated for the correct queue.
if jobToken.Queue != queue {
logger.Error("queue name does not match")
http.Error(w, "invalid token", http.StatusForbidden)
return false
}
}
// Ensure the token came from a legit executor instance.
if _, _, err = executorStore.GetByHostname(r.Context(), executorName); err != nil {
logger.Error("failed to lookup executor by hostname", log.Error(err))
http.Error(w, "invalid token", http.StatusUnauthorized)
return false
}
return true
}
func parseJobIdHeader(r *http.Request) (int64, error) {
jobIdHeader := r.Header.Get("X-Sourcegraph-Job-ID")
if len(jobIdHeader) == 0 {
return 0, errors.New("job ID not provided in header 'X-Sourcegraph-Job-ID'")
}
id, err := strconv.Atoi(jobIdHeader)
if err != nil {
return 0, errors.Wrapf(err, "failed to parse Job ID")
}
return int64(id), nil
}
var noopHandler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {

View File

@ -1,66 +1,432 @@
package executorqueue
import (
"fmt"
"io"
"net/http"
"net/http/httptest"
"strings"
"testing"
"github.com/gorilla/mux"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
executorstore "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/store"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/types"
"github.com/sourcegraph/sourcegraph/lib/errors"
"github.com/sourcegraph/sourcegraph/schema"
)
func TestInternalProxyAuthTokenMiddleware(t *testing.T) {
func TestAuthMiddleware(t *testing.T) {
logger := logtest.Scoped(t)
accessToken := "hunter2"
ts := httptest.NewServer(authMiddleware(
func() string { return accessToken },
http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusTeapot)
}),
))
defer ts.Close()
accessTokenFunc := func() string { return accessToken }
req, err := http.NewRequest("GET", ts.URL, nil)
if err != nil {
t.Fatalf("unexpected error creating request: %s", err)
}
router := mux.NewRouter()
router.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusTeapot)
})
router.Use(executorAuthMiddleware(logger, accessTokenFunc))
// no auth
req.Header.Del("Authorization")
resp, err := http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("unexpected error performing request: %s", err)
}
if resp.StatusCode != http.StatusUnauthorized {
t.Errorf("unexpected status code. want=%d have=%d", http.StatusUnauthorized, resp.StatusCode)
tests := []struct {
name string
headers http.Header
expectedStatusCode int
expectedResponseBody string
}{
{
name: "Authorized",
headers: http.Header{"Authorization": {"token-executor hunter2"}},
expectedStatusCode: http.StatusTeapot,
},
{
name: "Missing Authorization header",
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: "no token value in the HTTP Authorization request header (recommended) or basic auth (deprecated)\n",
},
{
name: "Wrong token",
headers: http.Header{"Authorization": {"token-executor foobar"}},
expectedStatusCode: http.StatusForbidden,
},
{
name: "Invalid prefix",
headers: http.Header{"Authorization": {"foo hunter2"}},
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: "unrecognized HTTP Authorization request header scheme (supported values: \"token-executor\")\n",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
req, err := http.NewRequest("GET", "/", nil)
require.NoError(t, err)
req.Header = test.headers
// malformed token
req.Header.Set("Authorization", fmt.Sprintf("token-unknown %s", strings.ToUpper(accessToken)))
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("unexpected error performing request: %s", err)
}
if resp.StatusCode != http.StatusUnauthorized {
t.Errorf("unexpected status code. want=%d have=%d", http.StatusUnauthorized, resp.StatusCode)
}
rw := httptest.NewRecorder()
// wrong token
req.Header.Set("Authorization", fmt.Sprintf("token-executor %s", strings.ToUpper(accessToken)))
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("unexpected error performing request: %s", err)
}
if resp.StatusCode != http.StatusForbidden {
t.Errorf("unexpected status code. want=%d have=%d", http.StatusForbidden, resp.StatusCode)
}
router.ServeHTTP(rw, req)
// correct token
req.Header.Set("Authorization", fmt.Sprintf("token-executor %s", accessToken))
resp, err = http.DefaultClient.Do(req)
if err != nil {
t.Fatalf("unexpected error performing request: %s", err)
}
if resp.StatusCode != http.StatusTeapot {
t.Errorf("unexpected status code. want=%d have=%d", http.StatusTeapot, resp.StatusCode)
assert.Equal(t, test.expectedStatusCode, rw.Code)
b, err := io.ReadAll(rw.Body)
require.NoError(t, err)
assert.Equal(t, test.expectedResponseBody, string(b))
})
}
}
func TestJobAuthMiddleware(t *testing.T) {
logger := logtest.Scoped(t)
conf.Mock(&conf.Unified{SiteConfiguration: schema.SiteConfiguration{ExecutorsAccessToken: "hunter2"}})
tests := []struct {
name string
routeName routeName
header map[string]string
mockFunc func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore)
expectedStatusCode int
expectedResponseBody string
assertionFunc func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore)
}{
{
name: "Queue Authorized",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{JobID: 42, Queue: "test"}, nil)
executorStore.GetByHostnameFunc.PushReturn(types.Executor{}, true, nil)
},
expectedStatusCode: http.StatusTeapot,
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 1)
assert.Equal(t, executorStore.GetByHostnameFunc.History()[0].Arg1, "test-executor")
},
},
{
name: "Queue Authorized general access token",
routeName: routeQueue,
header: map[string]string{
"Authorization": "token-executor hunter2",
},
expectedStatusCode: http.StatusTeapot,
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Git Authorized",
routeName: routeGit,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{JobID: 42, Repo: "test"}, nil)
executorStore.GetByHostnameFunc.PushReturn(types.Executor{}, true, nil)
},
expectedStatusCode: http.StatusTeapot,
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 1)
assert.Equal(t, executorStore.GetByHostnameFunc.History()[0].Arg1, "test-executor")
},
},
{
name: "Git Authorized general access token",
routeName: routeGit,
header: map[string]string{
"Authorization": "token-executor hunter2",
},
expectedStatusCode: http.StatusTeapot,
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Files Authorized",
routeName: routeFiles,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{JobID: 42, Queue: "batches"}, nil)
executorStore.GetByHostnameFunc.PushReturn(types.Executor{}, true, nil)
},
expectedStatusCode: http.StatusTeapot,
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 1)
assert.Equal(t, executorStore.GetByHostnameFunc.History()[0].Arg1, "test-executor")
},
},
{
name: "Files Authorized general access token",
routeName: routeFiles,
header: map[string]string{
"Authorization": "token-executor hunter2",
},
expectedStatusCode: http.StatusTeapot,
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "No worker hostname provided",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
},
expectedStatusCode: http.StatusBadRequest,
expectedResponseBody: "worker hostname cannot be empty\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "No job id header",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Executor-Name": "test-executor",
},
expectedStatusCode: http.StatusBadRequest,
expectedResponseBody: "job ID not provided in header 'X-Sourcegraph-Job-ID'\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Invalid job id header",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Executor-Name": "test-executor",
"X-Sourcegraph-Job-ID": "abc",
},
expectedStatusCode: http.StatusBadRequest,
expectedResponseBody: "failed to parse Job ID: strconv.Atoi: parsing \"abc\": invalid syntax\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "No Authorized header",
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: "no token value in the HTTP Authorization request header\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Invalid Authorized header parts",
header: map[string]string{
"Authorization": "somejobtoken",
},
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: "HTTP Authorization request header value must be of the following form: 'Bearer \"TOKEN\"' or 'token-executor TOKEN'\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Invalid Authorized header prefix",
header: map[string]string{
"Authorization": "Foo bar",
},
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: "unrecognized HTTP Authorization request header scheme (supported values: \"Bearer\", \"token-executor\")\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Invalid general access token",
routeName: routeQueue,
header: map[string]string{
"Authorization": "token-executor hunter3",
},
expectedStatusCode: http.StatusForbidden,
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Unsupported route",
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
expectedStatusCode: http.StatusBadRequest,
expectedResponseBody: "unsupported route\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 0)
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Failed to retrieve job token",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{}, errors.New("failed to find job token"))
},
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: "invalid token\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Job ID does not match",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{JobID: 7, Queue: "test"}, nil)
},
expectedStatusCode: http.StatusForbidden,
expectedResponseBody: "invalid token\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Queue does not match",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{JobID: 42, Queue: "test1"}, nil)
},
expectedStatusCode: http.StatusForbidden,
expectedResponseBody: "invalid token\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
{
name: "Executor host does not exist",
routeName: routeQueue,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{JobID: 42, Queue: "test"}, nil)
executorStore.GetByHostnameFunc.PushReturn(types.Executor{}, false, errors.New("executor does not exist"))
},
expectedStatusCode: http.StatusUnauthorized,
expectedResponseBody: "invalid token\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 1)
assert.Equal(t, executorStore.GetByHostnameFunc.History()[0].Arg1, "test-executor")
},
},
{
name: "Repo does not exist",
routeName: routeGit,
header: map[string]string{
"Authorization": "Bearer somejobtoken",
"X-Sourcegraph-Job-ID": "42",
"X-Sourcegraph-Executor-Name": "test-executor",
},
mockFunc: func(executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
jobTokenStore.GetByTokenFunc.PushReturn(executorstore.JobToken{JobID: 42, Repo: "test1"}, nil)
},
expectedStatusCode: http.StatusForbidden,
expectedResponseBody: "invalid token\n",
assertionFunc: func(t *testing.T, executorStore *database.MockExecutorStore, jobTokenStore *executorstore.MockJobTokenStore) {
require.Len(t, jobTokenStore.GetByTokenFunc.History(), 1)
assert.Equal(t, jobTokenStore.GetByTokenFunc.History()[0].Arg1, "somejobtoken")
require.Len(t, executorStore.GetByHostnameFunc.History(), 0)
},
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
executorStore := database.NewMockExecutorStore()
jobTokenStore := executorstore.NewMockJobTokenStore()
router := mux.NewRouter()
if test.routeName == routeGit {
router.HandleFunc("/{RepoName}", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusTeapot)
})
} else {
router.HandleFunc("/{queueName}", func(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusTeapot)
})
}
router.Use(jobAuthMiddleware(logger, test.routeName, jobTokenStore, executorStore))
req, err := http.NewRequest("GET", "/test", nil)
require.NoError(t, err)
for k, v := range test.header {
req.Header.Add(k, v)
}
rw := httptest.NewRecorder()
if test.mockFunc != nil {
test.mockFunc(executorStore, jobTokenStore)
}
router.ServeHTTP(rw, req)
assert.Equal(t, test.expectedStatusCode, rw.Code)
b, err := io.ReadAll(rw.Body)
require.NoError(t, err)
assert.Equal(t, test.expectedResponseBody, string(b))
if test.assertionFunc != nil {
test.assertionFunc(t, executorStore, jobTokenStore)
}
})
}
}

View File

@ -8,12 +8,12 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/handler"
bstore "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/store"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
func QueueOptions(observationCtx *observation.Context, db database.DB, _ func() string) handler.QueueOptions[*btypes.BatchSpecWorkspaceExecutionJob] {
func QueueHandler(observationCtx *observation.Context, db database.DB, _ func() string) handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob] {
logger := log.Scoped("executor-queue.batches", "The executor queue handlers for the batches queue")
recordTransformer := func(ctx context.Context, version string, record *btypes.BatchSpecWorkspaceExecutionJob, _ handler.ResourceMetadata) (apiclient.Job, error) {
batchesStore := bstore.New(db, observationCtx, nil)
@ -21,7 +21,7 @@ func QueueOptions(observationCtx *observation.Context, db database.DB, _ func()
}
store := bstore.NewBatchSpecWorkspaceExecutionWorkerStore(observationCtx, db.Handle())
return handler.QueueOptions[*btypes.BatchSpecWorkspaceExecutionJob]{
return handler.QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob]{
Name: "batches",
Store: store,
RecordTransformer: recordTransformer,

View File

@ -14,7 +14,7 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
"github.com/sourcegraph/sourcegraph/enterprise/internal/batches/store"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"

View File

@ -14,7 +14,7 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/frontend/graphqlbackend"
btypes "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/types"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"

View File

@ -6,20 +6,20 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/handler"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/autoindexing"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/shared/types"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/observation"
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
)
func QueueOptions(observationCtx *observation.Context, db database.DB, accessToken func() string) handler.QueueOptions[types.Index] {
func QueueHandler(observationCtx *observation.Context, db database.DB, accessToken func() string) handler.QueueHandler[types.Index] {
recordTransformer := func(ctx context.Context, _ string, record types.Index, resourceMetadata handler.ResourceMetadata) (apiclient.Job, error) {
return transformRecord(ctx, db, record, resourceMetadata, accessToken())
}
store := dbworkerstore.New(observationCtx, db.Handle(), autoindexing.IndexWorkerStoreOptions)
return handler.QueueOptions[types.Index]{
return handler.QueueHandler[types.Index]{
Name: "codeintel",
Store: store,
RecordTransformer: recordTransformer,

View File

@ -14,7 +14,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/handler"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/shared/types"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/encryption/keyring"

View File

@ -10,7 +10,7 @@ import (
"github.com/sourcegraph/sourcegraph/enterprise/cmd/frontend/internal/executorqueue/handler"
"github.com/sourcegraph/sourcegraph/enterprise/internal/codeintel/shared/types"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor"
apiclient "github.com/sourcegraph/sourcegraph/enterprise/internal/executor/types"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/database"
srccli "github.com/sourcegraph/sourcegraph/internal/src-cli"

View File

@ -91,7 +91,7 @@ func EnterpriseSetupHook(db database.DB, conf conftypes.UnifiedWatchable) enterp
// Inititalize executor last, as we require code intel and batch changes services to be
// already populated on the enterpriseServices object.
if err := executor.Init(ctx, observationCtx, db, conf, &enterpriseServices); err != nil {
if err := executor.Init(observationCtx, db, conf, &enterpriseServices); err != nil {
logger.Fatal("failed to initialize executor", log.Error(err))
}

View File

@ -0,0 +1,796 @@
// Code generated by go-mockgen 1.3.7; DO NOT EDIT.
//
// This file was generated by running `sg generate` (or `go-mockgen`) at the root of
// this repository. To add additional mocks to this or another package, add a new entry
// to the mockgen.yaml file in the root of this repository.
package store
import (
"context"
"sync"
)
// MockJobTokenStore is a mock implementation of the JobTokenStore interface
// (from the package
// github.com/sourcegraph/sourcegraph/enterprise/internal/executor/store)
// used for unit testing.
type MockJobTokenStore struct {
// CreateFunc is an instance of a mock function object controlling the
// behavior of the method Create.
CreateFunc *JobTokenStoreCreateFunc
// DeleteFunc is an instance of a mock function object controlling the
// behavior of the method Delete.
DeleteFunc *JobTokenStoreDeleteFunc
// ExistsFunc is an instance of a mock function object controlling the
// behavior of the method Exists.
ExistsFunc *JobTokenStoreExistsFunc
// GetFunc is an instance of a mock function object controlling the
// behavior of the method Get.
GetFunc *JobTokenStoreGetFunc
// GetByTokenFunc is an instance of a mock function object controlling
// the behavior of the method GetByToken.
GetByTokenFunc *JobTokenStoreGetByTokenFunc
// RegenerateFunc is an instance of a mock function object controlling
// the behavior of the method Regenerate.
RegenerateFunc *JobTokenStoreRegenerateFunc
}
// NewMockJobTokenStore creates a new mock of the JobTokenStore interface.
// All methods return zero values for all results, unless overwritten.
func NewMockJobTokenStore() *MockJobTokenStore {
return &MockJobTokenStore{
CreateFunc: &JobTokenStoreCreateFunc{
defaultHook: func(context.Context, int, string, string) (r0 string, r1 error) {
return
},
},
DeleteFunc: &JobTokenStoreDeleteFunc{
defaultHook: func(context.Context, int, string) (r0 error) {
return
},
},
ExistsFunc: &JobTokenStoreExistsFunc{
defaultHook: func(context.Context, int, string) (r0 bool, r1 error) {
return
},
},
GetFunc: &JobTokenStoreGetFunc{
defaultHook: func(context.Context, int, string) (r0 JobToken, r1 error) {
return
},
},
GetByTokenFunc: &JobTokenStoreGetByTokenFunc{
defaultHook: func(context.Context, string) (r0 JobToken, r1 error) {
return
},
},
RegenerateFunc: &JobTokenStoreRegenerateFunc{
defaultHook: func(context.Context, int, string) (r0 string, r1 error) {
return
},
},
}
}
// NewStrictMockJobTokenStore creates a new mock of the JobTokenStore
// interface. All methods panic on invocation, unless overwritten.
func NewStrictMockJobTokenStore() *MockJobTokenStore {
return &MockJobTokenStore{
CreateFunc: &JobTokenStoreCreateFunc{
defaultHook: func(context.Context, int, string, string) (string, error) {
panic("unexpected invocation of MockJobTokenStore.Create")
},
},
DeleteFunc: &JobTokenStoreDeleteFunc{
defaultHook: func(context.Context, int, string) error {
panic("unexpected invocation of MockJobTokenStore.Delete")
},
},
ExistsFunc: &JobTokenStoreExistsFunc{
defaultHook: func(context.Context, int, string) (bool, error) {
panic("unexpected invocation of MockJobTokenStore.Exists")
},
},
GetFunc: &JobTokenStoreGetFunc{
defaultHook: func(context.Context, int, string) (JobToken, error) {
panic("unexpected invocation of MockJobTokenStore.Get")
},
},
GetByTokenFunc: &JobTokenStoreGetByTokenFunc{
defaultHook: func(context.Context, string) (JobToken, error) {
panic("unexpected invocation of MockJobTokenStore.GetByToken")
},
},
RegenerateFunc: &JobTokenStoreRegenerateFunc{
defaultHook: func(context.Context, int, string) (string, error) {
panic("unexpected invocation of MockJobTokenStore.Regenerate")
},
},
}
}
// NewMockJobTokenStoreFrom creates a new mock of the MockJobTokenStore
// interface. All methods delegate to the given implementation, unless
// overwritten.
func NewMockJobTokenStoreFrom(i JobTokenStore) *MockJobTokenStore {
return &MockJobTokenStore{
CreateFunc: &JobTokenStoreCreateFunc{
defaultHook: i.Create,
},
DeleteFunc: &JobTokenStoreDeleteFunc{
defaultHook: i.Delete,
},
ExistsFunc: &JobTokenStoreExistsFunc{
defaultHook: i.Exists,
},
GetFunc: &JobTokenStoreGetFunc{
defaultHook: i.Get,
},
GetByTokenFunc: &JobTokenStoreGetByTokenFunc{
defaultHook: i.GetByToken,
},
RegenerateFunc: &JobTokenStoreRegenerateFunc{
defaultHook: i.Regenerate,
},
}
}
// JobTokenStoreCreateFunc describes the behavior when the Create method of
// the parent MockJobTokenStore instance is invoked.
type JobTokenStoreCreateFunc struct {
defaultHook func(context.Context, int, string, string) (string, error)
hooks []func(context.Context, int, string, string) (string, error)
history []JobTokenStoreCreateFuncCall
mutex sync.Mutex
}
// Create delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockJobTokenStore) Create(v0 context.Context, v1 int, v2 string, v3 string) (string, error) {
r0, r1 := m.CreateFunc.nextHook()(v0, v1, v2, v3)
m.CreateFunc.appendCall(JobTokenStoreCreateFuncCall{v0, v1, v2, v3, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Create method of the
// parent MockJobTokenStore instance is invoked and the hook queue is empty.
func (f *JobTokenStoreCreateFunc) SetDefaultHook(hook func(context.Context, int, string, string) (string, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Create method of the parent MockJobTokenStore instance invokes the hook
// at the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *JobTokenStoreCreateFunc) PushHook(hook func(context.Context, int, string, string) (string, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *JobTokenStoreCreateFunc) SetDefaultReturn(r0 string, r1 error) {
f.SetDefaultHook(func(context.Context, int, string, string) (string, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *JobTokenStoreCreateFunc) PushReturn(r0 string, r1 error) {
f.PushHook(func(context.Context, int, string, string) (string, error) {
return r0, r1
})
}
func (f *JobTokenStoreCreateFunc) nextHook() func(context.Context, int, string, string) (string, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *JobTokenStoreCreateFunc) appendCall(r0 JobTokenStoreCreateFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of JobTokenStoreCreateFuncCall objects
// describing the invocations of this function.
func (f *JobTokenStoreCreateFunc) History() []JobTokenStoreCreateFuncCall {
f.mutex.Lock()
history := make([]JobTokenStoreCreateFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// JobTokenStoreCreateFuncCall is an object that describes an invocation of
// method Create on an instance of MockJobTokenStore.
type JobTokenStoreCreateFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Arg3 is the value of the 4th argument passed to this method
// invocation.
Arg3 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 string
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c JobTokenStoreCreateFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2, c.Arg3}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c JobTokenStoreCreateFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// JobTokenStoreDeleteFunc describes the behavior when the Delete method of
// the parent MockJobTokenStore instance is invoked.
type JobTokenStoreDeleteFunc struct {
defaultHook func(context.Context, int, string) error
hooks []func(context.Context, int, string) error
history []JobTokenStoreDeleteFuncCall
mutex sync.Mutex
}
// Delete delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockJobTokenStore) Delete(v0 context.Context, v1 int, v2 string) error {
r0 := m.DeleteFunc.nextHook()(v0, v1, v2)
m.DeleteFunc.appendCall(JobTokenStoreDeleteFuncCall{v0, v1, v2, r0})
return r0
}
// SetDefaultHook sets function that is called when the Delete method of the
// parent MockJobTokenStore instance is invoked and the hook queue is empty.
func (f *JobTokenStoreDeleteFunc) SetDefaultHook(hook func(context.Context, int, string) error) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Delete method of the parent MockJobTokenStore instance invokes the hook
// at the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *JobTokenStoreDeleteFunc) PushHook(hook func(context.Context, int, string) error) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *JobTokenStoreDeleteFunc) SetDefaultReturn(r0 error) {
f.SetDefaultHook(func(context.Context, int, string) error {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *JobTokenStoreDeleteFunc) PushReturn(r0 error) {
f.PushHook(func(context.Context, int, string) error {
return r0
})
}
func (f *JobTokenStoreDeleteFunc) nextHook() func(context.Context, int, string) error {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *JobTokenStoreDeleteFunc) appendCall(r0 JobTokenStoreDeleteFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of JobTokenStoreDeleteFuncCall objects
// describing the invocations of this function.
func (f *JobTokenStoreDeleteFunc) History() []JobTokenStoreDeleteFuncCall {
f.mutex.Lock()
history := make([]JobTokenStoreDeleteFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// JobTokenStoreDeleteFuncCall is an object that describes an invocation of
// method Delete on an instance of MockJobTokenStore.
type JobTokenStoreDeleteFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c JobTokenStoreDeleteFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c JobTokenStoreDeleteFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// JobTokenStoreExistsFunc describes the behavior when the Exists method of
// the parent MockJobTokenStore instance is invoked.
type JobTokenStoreExistsFunc struct {
defaultHook func(context.Context, int, string) (bool, error)
hooks []func(context.Context, int, string) (bool, error)
history []JobTokenStoreExistsFuncCall
mutex sync.Mutex
}
// Exists delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockJobTokenStore) Exists(v0 context.Context, v1 int, v2 string) (bool, error) {
r0, r1 := m.ExistsFunc.nextHook()(v0, v1, v2)
m.ExistsFunc.appendCall(JobTokenStoreExistsFuncCall{v0, v1, v2, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Exists method of the
// parent MockJobTokenStore instance is invoked and the hook queue is empty.
func (f *JobTokenStoreExistsFunc) SetDefaultHook(hook func(context.Context, int, string) (bool, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Exists method of the parent MockJobTokenStore instance invokes the hook
// at the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *JobTokenStoreExistsFunc) PushHook(hook func(context.Context, int, string) (bool, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *JobTokenStoreExistsFunc) SetDefaultReturn(r0 bool, r1 error) {
f.SetDefaultHook(func(context.Context, int, string) (bool, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *JobTokenStoreExistsFunc) PushReturn(r0 bool, r1 error) {
f.PushHook(func(context.Context, int, string) (bool, error) {
return r0, r1
})
}
func (f *JobTokenStoreExistsFunc) nextHook() func(context.Context, int, string) (bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *JobTokenStoreExistsFunc) appendCall(r0 JobTokenStoreExistsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of JobTokenStoreExistsFuncCall objects
// describing the invocations of this function.
func (f *JobTokenStoreExistsFunc) History() []JobTokenStoreExistsFuncCall {
f.mutex.Lock()
history := make([]JobTokenStoreExistsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// JobTokenStoreExistsFuncCall is an object that describes an invocation of
// method Exists on an instance of MockJobTokenStore.
type JobTokenStoreExistsFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 bool
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c JobTokenStoreExistsFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c JobTokenStoreExistsFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// JobTokenStoreGetFunc describes the behavior when the Get method of the
// parent MockJobTokenStore instance is invoked.
type JobTokenStoreGetFunc struct {
defaultHook func(context.Context, int, string) (JobToken, error)
hooks []func(context.Context, int, string) (JobToken, error)
history []JobTokenStoreGetFuncCall
mutex sync.Mutex
}
// Get delegates to the next hook function in the queue and stores the
// parameter and result values of this invocation.
func (m *MockJobTokenStore) Get(v0 context.Context, v1 int, v2 string) (JobToken, error) {
r0, r1 := m.GetFunc.nextHook()(v0, v1, v2)
m.GetFunc.appendCall(JobTokenStoreGetFuncCall{v0, v1, v2, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Get method of the
// parent MockJobTokenStore instance is invoked and the hook queue is empty.
func (f *JobTokenStoreGetFunc) SetDefaultHook(hook func(context.Context, int, string) (JobToken, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Get method of the parent MockJobTokenStore instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *JobTokenStoreGetFunc) PushHook(hook func(context.Context, int, string) (JobToken, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *JobTokenStoreGetFunc) SetDefaultReturn(r0 JobToken, r1 error) {
f.SetDefaultHook(func(context.Context, int, string) (JobToken, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *JobTokenStoreGetFunc) PushReturn(r0 JobToken, r1 error) {
f.PushHook(func(context.Context, int, string) (JobToken, error) {
return r0, r1
})
}
func (f *JobTokenStoreGetFunc) nextHook() func(context.Context, int, string) (JobToken, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *JobTokenStoreGetFunc) appendCall(r0 JobTokenStoreGetFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of JobTokenStoreGetFuncCall objects describing
// the invocations of this function.
func (f *JobTokenStoreGetFunc) History() []JobTokenStoreGetFuncCall {
f.mutex.Lock()
history := make([]JobTokenStoreGetFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// JobTokenStoreGetFuncCall is an object that describes an invocation of
// method Get on an instance of MockJobTokenStore.
type JobTokenStoreGetFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 JobToken
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c JobTokenStoreGetFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c JobTokenStoreGetFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// JobTokenStoreGetByTokenFunc describes the behavior when the GetByToken
// method of the parent MockJobTokenStore instance is invoked.
type JobTokenStoreGetByTokenFunc struct {
defaultHook func(context.Context, string) (JobToken, error)
hooks []func(context.Context, string) (JobToken, error)
history []JobTokenStoreGetByTokenFuncCall
mutex sync.Mutex
}
// GetByToken delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockJobTokenStore) GetByToken(v0 context.Context, v1 string) (JobToken, error) {
r0, r1 := m.GetByTokenFunc.nextHook()(v0, v1)
m.GetByTokenFunc.appendCall(JobTokenStoreGetByTokenFuncCall{v0, v1, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the GetByToken method of
// the parent MockJobTokenStore instance is invoked and the hook queue is
// empty.
func (f *JobTokenStoreGetByTokenFunc) SetDefaultHook(hook func(context.Context, string) (JobToken, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// GetByToken method of the parent MockJobTokenStore instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *JobTokenStoreGetByTokenFunc) PushHook(hook func(context.Context, string) (JobToken, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *JobTokenStoreGetByTokenFunc) SetDefaultReturn(r0 JobToken, r1 error) {
f.SetDefaultHook(func(context.Context, string) (JobToken, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *JobTokenStoreGetByTokenFunc) PushReturn(r0 JobToken, r1 error) {
f.PushHook(func(context.Context, string) (JobToken, error) {
return r0, r1
})
}
func (f *JobTokenStoreGetByTokenFunc) nextHook() func(context.Context, string) (JobToken, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *JobTokenStoreGetByTokenFunc) appendCall(r0 JobTokenStoreGetByTokenFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of JobTokenStoreGetByTokenFuncCall objects
// describing the invocations of this function.
func (f *JobTokenStoreGetByTokenFunc) History() []JobTokenStoreGetByTokenFuncCall {
f.mutex.Lock()
history := make([]JobTokenStoreGetByTokenFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// JobTokenStoreGetByTokenFuncCall is an object that describes an invocation
// of method GetByToken on an instance of MockJobTokenStore.
type JobTokenStoreGetByTokenFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 JobToken
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c JobTokenStoreGetByTokenFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c JobTokenStoreGetByTokenFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}
// JobTokenStoreRegenerateFunc describes the behavior when the Regenerate
// method of the parent MockJobTokenStore instance is invoked.
type JobTokenStoreRegenerateFunc struct {
defaultHook func(context.Context, int, string) (string, error)
hooks []func(context.Context, int, string) (string, error)
history []JobTokenStoreRegenerateFuncCall
mutex sync.Mutex
}
// Regenerate delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockJobTokenStore) Regenerate(v0 context.Context, v1 int, v2 string) (string, error) {
r0, r1 := m.RegenerateFunc.nextHook()(v0, v1, v2)
m.RegenerateFunc.appendCall(JobTokenStoreRegenerateFuncCall{v0, v1, v2, r0, r1})
return r0, r1
}
// SetDefaultHook sets function that is called when the Regenerate method of
// the parent MockJobTokenStore instance is invoked and the hook queue is
// empty.
func (f *JobTokenStoreRegenerateFunc) SetDefaultHook(hook func(context.Context, int, string) (string, error)) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// Regenerate method of the parent MockJobTokenStore instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *JobTokenStoreRegenerateFunc) PushHook(hook func(context.Context, int, string) (string, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *JobTokenStoreRegenerateFunc) SetDefaultReturn(r0 string, r1 error) {
f.SetDefaultHook(func(context.Context, int, string) (string, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *JobTokenStoreRegenerateFunc) PushReturn(r0 string, r1 error) {
f.PushHook(func(context.Context, int, string) (string, error) {
return r0, r1
})
}
func (f *JobTokenStoreRegenerateFunc) nextHook() func(context.Context, int, string) (string, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *JobTokenStoreRegenerateFunc) appendCall(r0 JobTokenStoreRegenerateFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of JobTokenStoreRegenerateFuncCall objects
// describing the invocations of this function.
func (f *JobTokenStoreRegenerateFunc) History() []JobTokenStoreRegenerateFuncCall {
f.mutex.Lock()
history := make([]JobTokenStoreRegenerateFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// JobTokenStoreRegenerateFuncCall is an object that describes an invocation
// of method Regenerate on an instance of MockJobTokenStore.
type JobTokenStoreRegenerateFuncCall struct {
// Arg0 is the value of the 1st argument passed to this method
// invocation.
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 string
// Result1 is the value of the 2nd result returned from this method
// invocation.
Result1 error
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c JobTokenStoreRegenerateFuncCall) Args() []interface{} {
return []interface{}{c.Arg0, c.Arg1, c.Arg2}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c JobTokenStoreRegenerateFuncCall) Results() []interface{} {
return []interface{}{c.Result0, c.Result1}
}

View File

@ -0,0 +1,45 @@
package store
import (
"fmt"
"github.com/sourcegraph/sourcegraph/internal/metrics"
"github.com/sourcegraph/sourcegraph/internal/observation"
)
type operations struct {
dequeue *observation.Operation
markComplete *observation.Operation
markErrored *observation.Operation
markFailed *observation.Operation
heartbeat *observation.Operation
addExecutionLogEntry *observation.Operation
updateExecutionLogEntry *observation.Operation
}
func newOperations(observationCtx *observation.Context) *operations {
m := metrics.NewREDMetrics(
observationCtx.Registerer,
"apiworker_apiclient_queue",
metrics.WithLabels("op"),
metrics.WithCountHelp("Total number of method invocations."),
)
op := func(name string) *observation.Operation {
return observationCtx.Operation(observation.Op{
Name: fmt.Sprintf("apiworker.apiclient.queue.worker.%s", name),
MetricLabelValues: []string{name},
Metrics: m,
})
}
return &operations{
dequeue: op("Dequeue"),
markComplete: op("MarkComplete"),
markErrored: op("MarkErrored"),
markFailed: op("MarkFailed"),
heartbeat: op("Heartbeat"),
addExecutionLogEntry: op("AddExecutionLogEntry"),
updateExecutionLogEntry: op("UpdateExecutionLogEntry"),
}
}

View File

@ -0,0 +1,203 @@
package store
import (
"context"
"crypto/rand"
"database/sql"
"encoding/hex"
"github.com/jackc/pgconn"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/hashutil"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
// JobTokenStore is the store for interacting with the executor_job_tokens table.
type JobTokenStore interface {
// Create creates a new JobToken.
Create(ctx context.Context, jobId int, queue string, repo string) (string, error)
// Regenerate creates a new value for the matching JobToken.
Regenerate(ctx context.Context, jobId int, queue string) (string, error)
// Exists checks if the JobToken exists.
Exists(ctx context.Context, jobId int, queue string) (bool, error)
// Get retrieves the JobToken matching the specified values.
Get(ctx context.Context, jobId int, queue string) (JobToken, error)
// GetByToken retrieves the JobToken matching the value of token.
GetByToken(ctx context.Context, tokenHexEncoded string) (JobToken, error)
// Delete deletes the matching JobToken.
Delete(ctx context.Context, jobId int, queue string) error
}
// JobToken is the token for the specific Job.
type JobToken struct {
Id int64
Value []byte
JobID int64
Queue string
RepoID int64
Repo string
}
type jobTokenStore struct {
*basestore.Store
logger log.Logger
operations *operations
observationCtx *observation.Context
}
// NewJobTokenStore creates a new JobTokenStore.
func NewJobTokenStore(observationCtx *observation.Context, db database.DB) JobTokenStore {
return &jobTokenStore{
Store: basestore.NewWithHandle(db.Handle()),
logger: observationCtx.Logger,
operations: newOperations(observationCtx),
observationCtx: observationCtx,
}
}
func (s *jobTokenStore) Create(ctx context.Context, jobId int, queue string, repo string) (string, error) {
if jobId == 0 {
return "", errors.New("missing jobId")
}
if len(queue) == 0 {
return "", errors.New("missing queue")
}
if len(repo) == 0 {
return "", errors.New("missing repo")
}
var b [20]byte
if _, err := rand.Read(b[:]); err != nil {
return "", err
}
err := s.Exec(
ctx,
sqlf.Sprintf(
createExecutorJobTokenFmtstr,
hashutil.ToSHA256Bytes(b[:]), jobId, queue, repo,
),
)
if err != nil {
if isUniqueConstraintViolation(err, "executor_job_tokens_job_id_queue_repo_id_key") {
return "", ErrJobTokenAlreadyCreated
}
return "", err
}
return hex.EncodeToString(b[:]), nil
}
const createExecutorJobTokenFmtstr = `
INSERT INTO executor_job_tokens (value_sha256, job_id, queue, repo_id)
SELECT %s, %s, %s, id from repo r where r.name = %s;
`
func isUniqueConstraintViolation(err error, constraintName string) bool {
var e *pgconn.PgError
return errors.As(err, &e) && e.Code == "23505" && e.ConstraintName == constraintName
}
// ErrJobTokenAlreadyCreated is a specific error when a token has already been created for a Job.
var ErrJobTokenAlreadyCreated = errors.New("job token already exists")
func (s *jobTokenStore) Regenerate(ctx context.Context, jobId int, queue string) (string, error) {
var b [20]byte
if _, err := rand.Read(b[:]); err != nil {
return "", err
}
err := s.Exec(
ctx,
sqlf.Sprintf(
regenerateExecutorJobTokenFmtstr,
hashutil.ToSHA256Bytes(b[:]), jobId, queue,
),
)
if err != nil {
return "", err
}
return hex.EncodeToString(b[:]), nil
}
const regenerateExecutorJobTokenFmtstr = `
UPDATE executor_job_tokens SET value_sha256 = %s, updated_at = NOW()
WHERE job_id = %s AND queue = %s
`
func (s *jobTokenStore) Exists(ctx context.Context, jobId int, queue string) (bool, error) {
exists, _, err := basestore.ScanFirstBool(s.Query(ctx, sqlf.Sprintf(existsExecutorJobTokenFmtstr, jobId, queue)))
return exists, err
}
const existsExecutorJobTokenFmtstr = `
SELECT EXISTS(SELECT 1 FROM executor_job_tokens WHERE job_id=%s AND queue=%s)
`
func (s *jobTokenStore) Get(ctx context.Context, jobId int, queue string) (JobToken, error) {
row := s.QueryRow(
ctx,
sqlf.Sprintf(
getExecutorJobTokenFmtstr,
jobId, queue,
),
)
return scanJobToken(row)
}
const getExecutorJobTokenFmtstr = `
SELECT id, value_sha256, job_id, queue, repo_id, (select name from repo where id = t.repo_id) as repo
FROM executor_job_tokens t
WHERE job_id = %s AND queue = %s
`
func (s *jobTokenStore) GetByToken(ctx context.Context, tokenHexEncoded string) (JobToken, error) {
token, err := hex.DecodeString(tokenHexEncoded)
if err != nil {
return JobToken{}, errors.New("invalid token")
}
row := s.QueryRow(
ctx,
sqlf.Sprintf(
getByTokenExecutorJobTokenFmtstr,
hashutil.ToSHA256Bytes(token),
),
)
return scanJobToken(row)
}
const getByTokenExecutorJobTokenFmtstr = `
SELECT id, value_sha256, job_id, queue, repo_id, (select name from repo where id = t.repo_id) as repo
FROM executor_job_tokens t
WHERE value_sha256 = %s
`
func scanJobToken(row *sql.Row) (JobToken, error) {
jobToken := JobToken{}
err := row.Scan(
&jobToken.Id,
&jobToken.Value,
&jobToken.JobID,
&jobToken.Queue,
&jobToken.RepoID,
&jobToken.Repo,
)
if err != nil {
return jobToken, err
}
return jobToken, nil
}
func (s *jobTokenStore) Delete(ctx context.Context, jobId int, queue string) error {
return s.Store.Exec(ctx, sqlf.Sprintf(deleteExecutorJobTokenFmtstr, jobId, queue))
}
const deleteExecutorJobTokenFmtstr = `
DELETE FROM executor_job_tokens WHERE job_id = %s AND queue = %s
`

View File

@ -0,0 +1,384 @@
package store_test
import (
"context"
"testing"
"github.com/sourcegraph/log/logtest"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
bt "github.com/sourcegraph/sourcegraph/enterprise/internal/batches/testing"
"github.com/sourcegraph/sourcegraph/enterprise/internal/executor/store"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/extsvc"
"github.com/sourcegraph/sourcegraph/internal/observation"
"github.com/sourcegraph/sourcegraph/lib/errors"
)
func TestJobTokenStore_Create(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(logger, t))
tokenStore := store.NewJobTokenStore(&observation.TestContext, db)
repoStore := database.ReposWith(logger, db)
esStore := database.ExternalServicesWith(logger, db)
repo := bt.TestRepo(t, esStore, extsvc.KindGitHub)
ctx := context.Background()
err := repoStore.Create(ctx, repo)
require.NoError(t, err)
defer repoStore.Delete(ctx, repo.ID)
tests := []struct {
name string
jobId int
queue string
repo string
expectedErr error
}{
{
name: "Token created",
jobId: 10,
queue: "test",
repo: string(repo.Name),
},
{
name: "No jobId",
queue: "test",
repo: string(repo.Name),
expectedErr: errors.New("missing jobId"),
},
{
name: "No queue",
jobId: 10,
repo: string(repo.Name),
expectedErr: errors.New("missing queue"),
},
{
name: "No repo",
jobId: 10,
queue: "test",
expectedErr: errors.New("missing repo"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
token, err := tokenStore.Create(context.Background(), test.jobId, test.queue, test.repo)
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
} else {
require.NoError(t, err)
assert.NotEmpty(t, token)
}
})
}
}
func TestJobTokenStore_Create_Duplicate(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(logger, t))
tokenStore := store.NewJobTokenStore(&observation.TestContext, db)
repoStore := database.ReposWith(logger, db)
esStore := database.ExternalServicesWith(logger, db)
repo := bt.TestRepo(t, esStore, extsvc.KindGitHub)
ctx := context.Background()
err := repoStore.Create(ctx, repo)
require.NoError(t, err)
defer repoStore.Delete(ctx, repo.ID)
_, err = tokenStore.Create(context.Background(), 10, "test", string(repo.Name))
require.NoError(t, err)
_, err = tokenStore.Create(context.Background(), 10, "test", string(repo.Name))
require.Error(t, err)
assert.True(t, errors.Is(err, store.ErrJobTokenAlreadyCreated))
}
func TestJobTokenStore_Regenerate(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(logger, t))
tokenStore := store.NewJobTokenStore(&observation.TestContext, db)
repoStore := database.ReposWith(logger, db)
esStore := database.ExternalServicesWith(logger, db)
repo := bt.TestRepo(t, esStore, extsvc.KindGitHub)
ctx := context.Background()
err := repoStore.Create(ctx, repo)
require.NoError(t, err)
defer repoStore.Delete(ctx, repo.ID)
// Create an existing token to test against
_, err = tokenStore.Create(context.Background(), 10, "test", string(repo.Name))
require.NoError(t, err)
tests := []struct {
name string
jobId int
queue string
expectedErr error
}{
{
name: "Regenerate Token",
jobId: 10,
queue: "test",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
token, err := tokenStore.Regenerate(context.Background(), test.jobId, test.queue)
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
} else {
require.NoError(t, err)
assert.NotEmpty(t, token)
}
})
}
}
func TestJobTokenStore_Exists(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(logger, t))
tokenStore := store.NewJobTokenStore(&observation.TestContext, db)
repoStore := database.ReposWith(logger, db)
esStore := database.ExternalServicesWith(logger, db)
repo := bt.TestRepo(t, esStore, extsvc.KindGitHub)
ctx := context.Background()
err := repoStore.Create(ctx, repo)
require.NoError(t, err)
defer repoStore.Delete(ctx, repo.ID)
// Create an existing token to test against
_, err = tokenStore.Create(context.Background(), 10, "test", string(repo.Name))
require.NoError(t, err)
tests := []struct {
name string
jobId int
queue string
expectedExists bool
expectedErr error
}{
{
name: "Token exists",
jobId: 10,
queue: "test",
expectedExists: true,
},
{
name: "Token does not exist",
jobId: 100,
queue: "test1",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
exists, err := tokenStore.Exists(context.Background(), test.jobId, test.queue)
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
assert.False(t, exists)
} else {
require.NoError(t, err)
assert.Equal(t, test.expectedExists, exists)
}
})
}
}
func TestJobTokenStore_Get(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(logger, t))
tokenStore := store.NewJobTokenStore(&observation.TestContext, db)
repoStore := database.ReposWith(logger, db)
esStore := database.ExternalServicesWith(logger, db)
repo := bt.TestRepo(t, esStore, extsvc.KindGitHub)
ctx := context.Background()
err := repoStore.Create(ctx, repo)
require.NoError(t, err)
defer repoStore.Delete(ctx, repo.ID)
// Create an existing token to test against
_, err = tokenStore.Create(context.Background(), 10, "test", string(repo.Name))
require.NoError(t, err)
tests := []struct {
name string
jobId int
queue string
expectedJobToken store.JobToken
expectedErr error
}{
{
name: "Retrieve token",
jobId: 10,
queue: "test",
expectedJobToken: store.JobToken{
Id: 1,
JobID: 10,
Queue: "test",
Repo: string(repo.Name),
},
},
{
name: "Token does not exist",
jobId: 100,
queue: "test1",
expectedErr: errors.New("sql: no rows in result set"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
jobToken, err := tokenStore.Get(context.Background(), test.jobId, test.queue)
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
assert.Zero(t, jobToken.Id)
assert.Empty(t, jobToken.Value)
assert.Zero(t, jobToken.JobID)
assert.Empty(t, jobToken.Queue)
assert.Empty(t, jobToken.Repo)
} else {
require.NoError(t, err)
assert.Equal(t, test.expectedJobToken.Id, jobToken.Id)
assert.Equal(t, test.expectedJobToken.JobID, jobToken.JobID)
assert.Equal(t, test.expectedJobToken.Queue, jobToken.Queue)
assert.Equal(t, test.expectedJobToken.Repo, jobToken.Repo)
assert.NotEmpty(t, jobToken.Value)
}
})
}
}
func TestJobTokenStore_GetByToken(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(logger, t))
tokenStore := store.NewJobTokenStore(&observation.TestContext, db)
repoStore := database.ReposWith(logger, db)
esStore := database.ExternalServicesWith(logger, db)
repo := bt.TestRepo(t, esStore, extsvc.KindGitHub)
ctx := context.Background()
err := repoStore.Create(ctx, repo)
require.NoError(t, err)
defer repoStore.Delete(ctx, repo.ID)
// Create an existing token to test against
token, err := tokenStore.Create(context.Background(), 10, "test", string(repo.Name))
require.NoError(t, err)
require.NotEmpty(t, token)
tests := []struct {
name string
token string
expectedJobToken store.JobToken
expectedErr error
}{
{
name: "Retrieve token",
token: token,
expectedJobToken: store.JobToken{
Id: 1,
JobID: 10,
Queue: "test",
Repo: string(repo.Name),
},
},
{
name: "Token does not exist",
token: "666f6f626172", // foobar
expectedErr: errors.New("sql: no rows in result set"),
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
jobToken, err := tokenStore.GetByToken(context.Background(), test.token)
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
assert.Zero(t, jobToken.Id)
assert.Empty(t, jobToken.Value)
assert.Zero(t, jobToken.JobID)
assert.Empty(t, jobToken.Queue)
assert.Empty(t, jobToken.Repo)
} else {
require.NoError(t, err)
assert.Equal(t, test.expectedJobToken.Id, jobToken.Id)
assert.Equal(t, test.expectedJobToken.JobID, jobToken.JobID)
assert.Equal(t, test.expectedJobToken.Queue, jobToken.Queue)
assert.Equal(t, test.expectedJobToken.Repo, jobToken.Repo)
assert.NotEmpty(t, jobToken.Value)
}
})
}
}
func TestJobTokenStore_Delete(t *testing.T) {
logger := logtest.Scoped(t)
db := database.NewDB(logger, dbtest.NewDB(logger, t))
tokenStore := store.NewJobTokenStore(&observation.TestContext, db)
repoStore := database.ReposWith(logger, db)
esStore := database.ExternalServicesWith(logger, db)
repo := bt.TestRepo(t, esStore, extsvc.KindGitHub)
ctx := context.Background()
err := repoStore.Create(ctx, repo)
require.NoError(t, err)
defer repoStore.Delete(ctx, repo.ID)
// Create an existing token to test against
_, err = tokenStore.Create(context.Background(), 10, "test", string(repo.Name))
require.NoError(t, err)
tests := []struct {
name string
jobId int
queue string
expectedErr error
}{
{
name: "Token deleted",
jobId: 10,
queue: "test",
},
{
name: "Token does not exist",
jobId: 100,
queue: "test1",
},
}
for _, test := range tests {
t.Run(test.name, func(t *testing.T) {
err := tokenStore.Delete(context.Background(), test.jobId, test.queue)
if test.expectedErr != nil {
require.Error(t, err)
assert.Equal(t, test.expectedErr.Error(), err.Error())
} else {
require.NoError(t, err)
// Double-check the token has been deleted
exists, err := tokenStore.Exists(context.Background(), test.jobId, test.queue)
require.NoError(t, err)
assert.False(t, exists)
}
})
}
}

View File

@ -0,0 +1,75 @@
package types
import (
"github.com/sourcegraph/sourcegraph/internal/executor"
)
type DequeueRequest struct {
ExecutorName string `json:"executorName"`
Version string `json:"version"`
NumCPUs int `json:"numCPUs,omitempty"`
Memory string `json:"memory,omitempty"`
DiskSpace string `json:"diskSpace,omitempty"`
}
type JobOperationRequest struct {
ExecutorName string `json:"executorName"`
JobID int `json:"jobId"`
}
type AddExecutionLogEntryRequest struct {
JobOperationRequest
executor.ExecutionLogEntry
}
type UpdateExecutionLogEntryRequest struct {
JobOperationRequest
EntryID int `json:"entryId"`
executor.ExecutionLogEntry
}
type MarkCompleteRequest struct {
JobOperationRequest
}
type MarkErroredRequest struct {
JobOperationRequest
ErrorMessage string `json:"errorMessage"`
}
type HeartbeatRequest struct {
// TODO: This field is set to become unneccesary in Sourcegraph 4.4.
Version ExecutorAPIVersion `json:"version"`
ExecutorName string `json:"executorName"`
JobIDs []int `json:"jobIds"`
// Telemetry data.
OS string `json:"os"`
Architecture string `json:"architecture"`
DockerVersion string `json:"dockerVersion"`
ExecutorVersion string `json:"executorVersion"`
GitVersion string `json:"gitVersion"`
IgniteVersion string `json:"igniteVersion"`
SrcCliVersion string `json:"srcCliVersion"`
PrometheusMetrics string `json:"prometheusMetrics"`
}
type ExecutorAPIVersion string
const (
ExecutorAPIVersion2 ExecutorAPIVersion = "V2"
)
type HeartbeatResponse struct {
KnownIDs []int `json:"knownIds"`
CancelIDs []int `json:"cancelIds"`
}
// TODO: Deprecated. Can be removed in Sourcegraph 4.4.
type CanceledJobsRequest struct {
KnownJobIDs []int `json:"knownJobIds"`
ExecutorName string `json:"executorName"`
}

View File

@ -1,10 +1,8 @@
package executor
package types
import (
"encoding/json"
"time"
"github.com/sourcegraph/sourcegraph/internal/executor"
)
// Job describes a series of steps to perform within an executor.
@ -18,6 +16,9 @@ type Job struct {
// that different queues can share identifiers.
ID int `json:"id"`
// Token is the authentication token for the specific Job.
Token string `json:"Token"`
// RepositoryName is the name of the repository to be cloned into the
// workspace prior to job execution.
RepositoryName string `json:"repositoryName"`
@ -73,6 +74,7 @@ func (j Job) MarshalJSON() ([]byte, error) {
v2 := v2Job{
Version: j.Version,
ID: j.ID,
Token: j.Token,
RepositoryName: j.RepositoryName,
RepositoryDirectory: j.RepositoryDirectory,
Commit: j.Commit,
@ -92,6 +94,7 @@ func (j Job) MarshalJSON() ([]byte, error) {
}
v1 := v1Job{
ID: j.ID,
Token: j.Token,
RepositoryName: j.RepositoryName,
RepositoryDirectory: j.RepositoryDirectory,
Commit: j.Commit,
@ -126,6 +129,7 @@ func (j *Job) UnmarshalJSON(data []byte) error {
}
j.Version = v2.Version
j.ID = v2.ID
j.Token = v2.Token
j.RepositoryName = v2.RepositoryName
j.RepositoryDirectory = v2.RepositoryDirectory
j.Commit = v2.Commit
@ -147,6 +151,7 @@ func (j *Job) UnmarshalJSON(data []byte) error {
return err
}
j.ID = v1.ID
j.Token = v1.Token
j.RepositoryName = v1.RepositoryName
j.RepositoryDirectory = v1.RepositoryDirectory
j.Commit = v1.Commit
@ -175,6 +180,7 @@ type versionJob struct {
type v2Job struct {
Version int `json:"version,omitempty"`
ID int `json:"id"`
Token string `json:"token"`
RepositoryName string `json:"repositoryName"`
RepositoryDirectory string `json:"repositoryDirectory"`
Commit string `json:"commit"`
@ -190,6 +196,7 @@ type v2Job struct {
type v1Job struct {
ID int `json:"id"`
Token string `json:"token"`
RepositoryName string `json:"repositoryName"`
RepositoryDirectory string `json:"repositoryDirectory"`
Commit string `json:"commit"`
@ -271,75 +278,6 @@ type CliStep struct {
Env []string `json:"env"`
}
type DequeueRequest struct {
ExecutorName string `json:"executorName"`
Version string `json:"version"`
NumCPUs int `json:"numCPUs,omitempty"`
Memory string `json:"memory,omitempty"`
DiskSpace string `json:"diskSpace,omitempty"`
}
type AddExecutionLogEntryRequest struct {
ExecutorName string `json:"executorName"`
JobID int `json:"jobId"`
executor.ExecutionLogEntry
}
type UpdateExecutionLogEntryRequest struct {
ExecutorName string `json:"executorName"`
JobID int `json:"jobId"`
EntryID int `json:"entryId"`
executor.ExecutionLogEntry
}
type MarkCompleteRequest struct {
ExecutorName string `json:"executorName"`
JobID int `json:"jobId"`
}
type MarkErroredRequest struct {
ExecutorName string `json:"executorName"`
JobID int `json:"jobId"`
ErrorMessage string `json:"errorMessage"`
}
type ExecutorAPIVersion string
const (
ExecutorAPIVersion2 ExecutorAPIVersion = "V2"
)
type HeartbeatRequest struct {
// TODO: This field is set to become unneccesary in Sourcegraph 4.4.
Version ExecutorAPIVersion `json:"version"`
ExecutorName string `json:"executorName"`
JobIDs []int `json:"jobIds"`
// Telemetry data.
OS string `json:"os"`
Architecture string `json:"architecture"`
DockerVersion string `json:"dockerVersion"`
ExecutorVersion string `json:"executorVersion"`
GitVersion string `json:"gitVersion"`
IgniteVersion string `json:"igniteVersion"`
SrcCliVersion string `json:"srcCliVersion"`
PrometheusMetrics string `json:"prometheusMetrics"`
}
type HeartbeatResponse struct {
KnownIDs []int `json:"knownIds"`
CancelIDs []int `json:"cancelIds"`
}
// TODO: Deprecated. Can be removed in Sourcegraph 4.4.
type CanceledJobsRequest struct {
KnownJobIDs []int `json:"knownJobIds"`
ExecutorName string `json:"executorName"`
}
// DockerAuthConfig represents a subset of the docker cli config with the necessary
// fields to make authentication work.
type DockerAuthConfig struct {

View File

@ -1,4 +1,4 @@
package executor
package types
import (
"encoding/json"
@ -61,6 +61,7 @@ func TestJob_MarshalJSON(t *testing.T) {
expected: `{
"version": 2,
"id": 1,
"token": "",
"repositoryName": "my-repo",
"repositoryDirectory": "foo/bar",
"commit": "xyz",
@ -131,6 +132,7 @@ func TestJob_MarshalJSON(t *testing.T) {
},
expected: `{
"id": 1,
"token": "",
"repositoryName": "my-repo",
"repositoryDirectory": "foo/bar",
"commit": "xyz",

View File

@ -44,7 +44,7 @@ type ExecutorStore interface {
// the Sourcegraph instance in at least the given duration.
DeleteInactiveHeartbeats(ctx context.Context, minAge time.Duration) error
// ExecutorByHostname returns an executor resolver for the given hostname, or
// GetByHostname returns an executor resolver for the given hostname, or
// nil when there is no executor record matching the given hostname.
//
// 🚨 SECURITY: This always returns nil for non-site admins.

View File

@ -601,6 +601,15 @@
"Increment": 1,
"CycleOption": "NO"
},
{
"Name": "executor_job_tokens_id_seq",
"TypeName": "integer",
"StartValue": 1,
"MinimumValue": 1,
"MaximumValue": 2147483647,
"Increment": 1,
"CycleOption": "NO"
},
{
"Name": "executor_secret_access_logs_id_seq",
"TypeName": "integer",
@ -9630,6 +9639,137 @@
"Constraints": null,
"Triggers": []
},
{
"Name": "executor_job_tokens",
"Comment": "",
"Columns": [
{
"Name": "created_at",
"Index": 6,
"TypeName": "timestamp with time zone",
"IsNullable": false,
"Default": "now()",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": ""
},
{
"Name": "id",
"Index": 1,
"TypeName": "integer",
"IsNullable": false,
"Default": "nextval('executor_job_tokens_id_seq'::regclass)",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": ""
},
{
"Name": "job_id",
"Index": 3,
"TypeName": "bigint",
"IsNullable": false,
"Default": "",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": ""
},
{
"Name": "queue",
"Index": 4,
"TypeName": "text",
"IsNullable": false,
"Default": "",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": ""
},
{
"Name": "repo_id",
"Index": 5,
"TypeName": "bigint",
"IsNullable": false,
"Default": "",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": ""
},
{
"Name": "updated_at",
"Index": 7,
"TypeName": "timestamp with time zone",
"IsNullable": false,
"Default": "now()",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": ""
},
{
"Name": "value_sha256",
"Index": 2,
"TypeName": "bytea",
"IsNullable": false,
"Default": "",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": ""
}
],
"Indexes": [
{
"Name": "executor_job_tokens_job_id_queue_repo_id_key",
"IsPrimaryKey": false,
"IsUnique": true,
"IsExclusion": false,
"IsDeferrable": false,
"IndexDefinition": "CREATE UNIQUE INDEX executor_job_tokens_job_id_queue_repo_id_key ON executor_job_tokens USING btree (job_id, queue, repo_id)",
"ConstraintType": "u",
"ConstraintDefinition": "UNIQUE (job_id, queue, repo_id)"
},
{
"Name": "executor_job_tokens_pkey",
"IsPrimaryKey": true,
"IsUnique": true,
"IsExclusion": false,
"IsDeferrable": false,
"IndexDefinition": "CREATE UNIQUE INDEX executor_job_tokens_pkey ON executor_job_tokens USING btree (id)",
"ConstraintType": "p",
"ConstraintDefinition": "PRIMARY KEY (id)"
},
{
"Name": "executor_job_tokens_value_sha256_key",
"IsPrimaryKey": false,
"IsUnique": true,
"IsExclusion": false,
"IsDeferrable": false,
"IndexDefinition": "CREATE UNIQUE INDEX executor_job_tokens_value_sha256_key ON executor_job_tokens USING btree (value_sha256)",
"ConstraintType": "u",
"ConstraintDefinition": "UNIQUE (value_sha256)"
}
],
"Constraints": null,
"Triggers": []
},
{
"Name": "executor_secret_access_logs",
"Comment": "",

View File

@ -1309,6 +1309,24 @@ Tracks the most recent activity of executors attached to this Sourcegraph instan
**src_cli_version**: The version of src-cli used by the executor.
# Table "public.executor_job_tokens"
```
Column | Type | Collation | Nullable | Default
--------------+--------------------------+-----------+----------+-------------------------------------------------
id | integer | | not null | nextval('executor_job_tokens_id_seq'::regclass)
value_sha256 | bytea | | not null |
job_id | bigint | | not null |
queue | text | | not null |
repo_id | bigint | | not null |
created_at | timestamp with time zone | | not null | now()
updated_at | timestamp with time zone | | not null | now()
Indexes:
"executor_job_tokens_pkey" PRIMARY KEY, btree (id)
"executor_job_tokens_job_id_queue_repo_id_key" UNIQUE CONSTRAINT, btree (job_id, queue, repo_id)
"executor_job_tokens_value_sha256_key" UNIQUE CONSTRAINT, btree (value_sha256)
```
# Table "public.executor_secret_access_logs"
```
Column | Type | Collation | Nullable | Default

View File

@ -45,16 +45,16 @@ func (s *storeShim[T]) Heartbeat(ctx context.Context, ids []int) (knownIDs, canc
return s.Store.Heartbeat(ctx, ids, store.HeartbeatOptions{})
}
func (s *storeShim[T]) MarkComplete(ctx context.Context, id int) (bool, error) {
return s.Store.MarkComplete(ctx, id, store.MarkFinalOptions{})
func (s *storeShim[T]) MarkComplete(ctx context.Context, rec T) (bool, error) {
return s.Store.MarkComplete(ctx, rec.RecordID(), store.MarkFinalOptions{})
}
func (s *storeShim[T]) MarkFailed(ctx context.Context, id int, failureMessage string) (bool, error) {
return s.Store.MarkFailed(ctx, id, failureMessage, store.MarkFinalOptions{})
func (s *storeShim[T]) MarkFailed(ctx context.Context, rec T, failureMessage string) (bool, error) {
return s.Store.MarkFailed(ctx, rec.RecordID(), failureMessage, store.MarkFinalOptions{})
}
func (s *storeShim[T]) MarkErrored(ctx context.Context, id int, errorMessage string) (bool, error) {
return s.Store.MarkErrored(ctx, id, errorMessage, store.MarkFinalOptions{})
func (s *storeShim[T]) MarkErrored(ctx context.Context, rec T, errorMessage string) (bool, error) {
return s.Store.MarkErrored(ctx, rec.RecordID(), errorMessage, store.MarkFinalOptions{})
}
// ErrNotConditions occurs when a PreDequeue handler returns non-sql query extra arguments.

View File

@ -202,17 +202,17 @@ func NewMockStore[T Record]() *MockStore[T] {
},
},
MarkCompleteFunc: &StoreMarkCompleteFunc[T]{
defaultHook: func(context.Context, int) (r0 bool, r1 error) {
defaultHook: func(context.Context, T) (r0 bool, r1 error) {
return
},
},
MarkErroredFunc: &StoreMarkErroredFunc[T]{
defaultHook: func(context.Context, int, string) (r0 bool, r1 error) {
defaultHook: func(context.Context, T, string) (r0 bool, r1 error) {
return
},
},
MarkFailedFunc: &StoreMarkFailedFunc[T]{
defaultHook: func(context.Context, int, string) (r0 bool, r1 error) {
defaultHook: func(context.Context, T, string) (r0 bool, r1 error) {
return
},
},
@ -239,17 +239,17 @@ func NewStrictMockStore[T Record]() *MockStore[T] {
},
},
MarkCompleteFunc: &StoreMarkCompleteFunc[T]{
defaultHook: func(context.Context, int) (bool, error) {
defaultHook: func(context.Context, T) (bool, error) {
panic("unexpected invocation of MockStore.MarkComplete")
},
},
MarkErroredFunc: &StoreMarkErroredFunc[T]{
defaultHook: func(context.Context, int, string) (bool, error) {
defaultHook: func(context.Context, T, string) (bool, error) {
panic("unexpected invocation of MockStore.MarkErrored")
},
},
MarkFailedFunc: &StoreMarkFailedFunc[T]{
defaultHook: func(context.Context, int, string) (bool, error) {
defaultHook: func(context.Context, T, string) (bool, error) {
panic("unexpected invocation of MockStore.MarkFailed")
},
},
@ -512,15 +512,15 @@ func (c StoreHeartbeatFuncCall[T]) Results() []interface{} {
// StoreMarkCompleteFunc describes the behavior when the MarkComplete method
// of the parent MockStore instance is invoked.
type StoreMarkCompleteFunc[T Record] struct {
defaultHook func(context.Context, int) (bool, error)
hooks []func(context.Context, int) (bool, error)
defaultHook func(context.Context, T) (bool, error)
hooks []func(context.Context, T) (bool, error)
history []StoreMarkCompleteFuncCall[T]
mutex sync.Mutex
}
// MarkComplete delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockStore[T]) MarkComplete(v0 context.Context, v1 int) (bool, error) {
func (m *MockStore[T]) MarkComplete(v0 context.Context, v1 T) (bool, error) {
r0, r1 := m.MarkCompleteFunc.nextHook()(v0, v1)
m.MarkCompleteFunc.appendCall(StoreMarkCompleteFuncCall[T]{v0, v1, r0, r1})
return r0, r1
@ -528,7 +528,7 @@ func (m *MockStore[T]) MarkComplete(v0 context.Context, v1 int) (bool, error) {
// SetDefaultHook sets function that is called when the MarkComplete method
// of the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreMarkCompleteFunc[T]) SetDefaultHook(hook func(context.Context, int) (bool, error)) {
func (f *StoreMarkCompleteFunc[T]) SetDefaultHook(hook func(context.Context, T) (bool, error)) {
f.defaultHook = hook
}
@ -536,7 +536,7 @@ func (f *StoreMarkCompleteFunc[T]) SetDefaultHook(hook func(context.Context, int
// MarkComplete method of the parent MockStore instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *StoreMarkCompleteFunc[T]) PushHook(hook func(context.Context, int) (bool, error)) {
func (f *StoreMarkCompleteFunc[T]) PushHook(hook func(context.Context, T) (bool, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -545,19 +545,19 @@ func (f *StoreMarkCompleteFunc[T]) PushHook(hook func(context.Context, int) (boo
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *StoreMarkCompleteFunc[T]) SetDefaultReturn(r0 bool, r1 error) {
f.SetDefaultHook(func(context.Context, int) (bool, error) {
f.SetDefaultHook(func(context.Context, T) (bool, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *StoreMarkCompleteFunc[T]) PushReturn(r0 bool, r1 error) {
f.PushHook(func(context.Context, int) (bool, error) {
f.PushHook(func(context.Context, T) (bool, error) {
return r0, r1
})
}
func (f *StoreMarkCompleteFunc[T]) nextHook() func(context.Context, int) (bool, error) {
func (f *StoreMarkCompleteFunc[T]) nextHook() func(context.Context, T) (bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -595,7 +595,7 @@ type StoreMarkCompleteFuncCall[T Record] struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
Arg1 T
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 bool
@ -619,15 +619,15 @@ func (c StoreMarkCompleteFuncCall[T]) Results() []interface{} {
// StoreMarkErroredFunc describes the behavior when the MarkErrored method
// of the parent MockStore instance is invoked.
type StoreMarkErroredFunc[T Record] struct {
defaultHook func(context.Context, int, string) (bool, error)
hooks []func(context.Context, int, string) (bool, error)
defaultHook func(context.Context, T, string) (bool, error)
hooks []func(context.Context, T, string) (bool, error)
history []StoreMarkErroredFuncCall[T]
mutex sync.Mutex
}
// MarkErrored delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockStore[T]) MarkErrored(v0 context.Context, v1 int, v2 string) (bool, error) {
func (m *MockStore[T]) MarkErrored(v0 context.Context, v1 T, v2 string) (bool, error) {
r0, r1 := m.MarkErroredFunc.nextHook()(v0, v1, v2)
m.MarkErroredFunc.appendCall(StoreMarkErroredFuncCall[T]{v0, v1, v2, r0, r1})
return r0, r1
@ -635,7 +635,7 @@ func (m *MockStore[T]) MarkErrored(v0 context.Context, v1 int, v2 string) (bool,
// SetDefaultHook sets function that is called when the MarkErrored method
// of the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreMarkErroredFunc[T]) SetDefaultHook(hook func(context.Context, int, string) (bool, error)) {
func (f *StoreMarkErroredFunc[T]) SetDefaultHook(hook func(context.Context, T, string) (bool, error)) {
f.defaultHook = hook
}
@ -643,7 +643,7 @@ func (f *StoreMarkErroredFunc[T]) SetDefaultHook(hook func(context.Context, int,
// MarkErrored method of the parent MockStore instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *StoreMarkErroredFunc[T]) PushHook(hook func(context.Context, int, string) (bool, error)) {
func (f *StoreMarkErroredFunc[T]) PushHook(hook func(context.Context, T, string) (bool, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -652,19 +652,19 @@ func (f *StoreMarkErroredFunc[T]) PushHook(hook func(context.Context, int, strin
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *StoreMarkErroredFunc[T]) SetDefaultReturn(r0 bool, r1 error) {
f.SetDefaultHook(func(context.Context, int, string) (bool, error) {
f.SetDefaultHook(func(context.Context, T, string) (bool, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *StoreMarkErroredFunc[T]) PushReturn(r0 bool, r1 error) {
f.PushHook(func(context.Context, int, string) (bool, error) {
f.PushHook(func(context.Context, T, string) (bool, error) {
return r0, r1
})
}
func (f *StoreMarkErroredFunc[T]) nextHook() func(context.Context, int, string) (bool, error) {
func (f *StoreMarkErroredFunc[T]) nextHook() func(context.Context, T, string) (bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -702,7 +702,7 @@ type StoreMarkErroredFuncCall[T Record] struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
Arg1 T
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string
@ -729,15 +729,15 @@ func (c StoreMarkErroredFuncCall[T]) Results() []interface{} {
// StoreMarkFailedFunc describes the behavior when the MarkFailed method of
// the parent MockStore instance is invoked.
type StoreMarkFailedFunc[T Record] struct {
defaultHook func(context.Context, int, string) (bool, error)
hooks []func(context.Context, int, string) (bool, error)
defaultHook func(context.Context, T, string) (bool, error)
hooks []func(context.Context, T, string) (bool, error)
history []StoreMarkFailedFuncCall[T]
mutex sync.Mutex
}
// MarkFailed delegates to the next hook function in the queue and stores
// the parameter and result values of this invocation.
func (m *MockStore[T]) MarkFailed(v0 context.Context, v1 int, v2 string) (bool, error) {
func (m *MockStore[T]) MarkFailed(v0 context.Context, v1 T, v2 string) (bool, error) {
r0, r1 := m.MarkFailedFunc.nextHook()(v0, v1, v2)
m.MarkFailedFunc.appendCall(StoreMarkFailedFuncCall[T]{v0, v1, v2, r0, r1})
return r0, r1
@ -745,7 +745,7 @@ func (m *MockStore[T]) MarkFailed(v0 context.Context, v1 int, v2 string) (bool,
// SetDefaultHook sets function that is called when the MarkFailed method of
// the parent MockStore instance is invoked and the hook queue is empty.
func (f *StoreMarkFailedFunc[T]) SetDefaultHook(hook func(context.Context, int, string) (bool, error)) {
func (f *StoreMarkFailedFunc[T]) SetDefaultHook(hook func(context.Context, T, string) (bool, error)) {
f.defaultHook = hook
}
@ -753,7 +753,7 @@ func (f *StoreMarkFailedFunc[T]) SetDefaultHook(hook func(context.Context, int,
// MarkFailed method of the parent MockStore instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *StoreMarkFailedFunc[T]) PushHook(hook func(context.Context, int, string) (bool, error)) {
func (f *StoreMarkFailedFunc[T]) PushHook(hook func(context.Context, T, string) (bool, error)) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
@ -762,19 +762,19 @@ func (f *StoreMarkFailedFunc[T]) PushHook(hook func(context.Context, int, string
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *StoreMarkFailedFunc[T]) SetDefaultReturn(r0 bool, r1 error) {
f.SetDefaultHook(func(context.Context, int, string) (bool, error) {
f.SetDefaultHook(func(context.Context, T, string) (bool, error) {
return r0, r1
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *StoreMarkFailedFunc[T]) PushReturn(r0 bool, r1 error) {
f.PushHook(func(context.Context, int, string) (bool, error) {
f.PushHook(func(context.Context, T, string) (bool, error) {
return r0, r1
})
}
func (f *StoreMarkFailedFunc[T]) nextHook() func(context.Context, int, string) (bool, error) {
func (f *StoreMarkFailedFunc[T]) nextHook() func(context.Context, T, string) (bool, error) {
f.mutex.Lock()
defer f.mutex.Unlock()
@ -812,7 +812,7 @@ type StoreMarkFailedFuncCall[T Record] struct {
Arg0 context.Context
// Arg1 is the value of the 2nd argument passed to this method
// invocation.
Arg1 int
Arg1 T
// Arg2 is the value of the 3rd argument passed to this method
// invocation.
Arg2 string

View File

@ -26,13 +26,13 @@ type Store[T Record] interface {
// MarkComplete attempts to update the state of the record to complete. This method returns a boolean flag indicating
// if the record was updated.
MarkComplete(ctx context.Context, id int) (bool, error)
MarkComplete(ctx context.Context, rec T) (bool, error)
// MarkErrored attempts to update the state of the record to errored. This method returns a boolean flag indicating
// if the record was updated.
MarkErrored(ctx context.Context, id int, failureMessage string) (bool, error)
MarkErrored(ctx context.Context, rec T, failureMessage string) (bool, error)
// MarkFailed attempts to update the state of the record to failed. This method returns a boolean flag indicating
// if the record was updated.
MarkFailed(ctx context.Context, id int, failureMessage string) (bool, error)
MarkFailed(ctx context.Context, rec T, failureMessage string) (bool, error)
}

View File

@ -9,6 +9,7 @@ import (
"github.com/derision-test/glock"
otlog "github.com/opentracing/opentracing-go/log"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/internal/errcode"
"github.com/sourcegraph/sourcegraph/internal/goroutine/recorder"
"github.com/sourcegraph/sourcegraph/internal/hostname"
@ -399,19 +400,19 @@ func (w *Worker[T]) handle(ctx, workerContext context.Context, record T) (err er
}
if errcode.IsNonRetryable(handleErr) || handleErr != nil && w.isJobCanceled(record.RecordID(), handleErr, ctx.Err()) {
if marked, markErr := w.store.MarkFailed(workerContext, record.RecordID(), handleErr.Error()); markErr != nil {
if marked, markErr := w.store.MarkFailed(workerContext, record, handleErr.Error()); markErr != nil {
return errors.Wrap(markErr, "store.MarkFailed")
} else if marked {
handleLog.Warn("Marked record as failed", log.Error(handleErr))
}
} else if handleErr != nil {
if marked, markErr := w.store.MarkErrored(workerContext, record.RecordID(), handleErr.Error()); markErr != nil {
if marked, markErr := w.store.MarkErrored(workerContext, record, handleErr.Error()); markErr != nil {
return errors.Wrap(markErr, "store.MarkErrored")
} else if marked {
handleLog.Warn("Marked record as errored", log.Error(handleErr))
}
} else {
if marked, markErr := w.store.MarkComplete(workerContext, record.RecordID()); markErr != nil {
if marked, markErr := w.store.MarkComplete(workerContext, record); markErr != nil {
return errors.Wrap(markErr, "store.MarkComplete")
} else if marked {
handleLog.Debug("Marked record as complete")

View File

@ -56,7 +56,7 @@ func TestWorkerHandlerSuccess(t *testing.T) {
if callCount := len(store.MarkCompleteFunc.History()); callCount != 1 {
t.Errorf("unexpected mark complete call count. want=%d have=%d", 1, callCount)
} else if id := store.MarkCompleteFunc.History()[0].Arg1; id != 42 {
} else if id := store.MarkCompleteFunc.History()[0].Arg1.RecordID(); id != 42 {
t.Errorf("unexpected id argument to mark complete. want=%v have=%v", 42, id)
}
}
@ -93,7 +93,7 @@ func TestWorkerHandlerFailure(t *testing.T) {
if callCount := len(store.MarkErroredFunc.History()); callCount != 1 {
t.Errorf("unexpected mark errored call count. want=%d have=%d", 1, callCount)
} else if id := store.MarkErroredFunc.History()[0].Arg1; id != 42 {
} else if id := store.MarkErroredFunc.History()[0].Arg1.RecordID(); id != 42 {
t.Errorf("unexpected id argument to mark errored. want=%v have=%v", 42, id)
} else if failureMessage := store.MarkErroredFunc.History()[0].Arg2; failureMessage != "oops" {
t.Errorf("unexpected failure message argument to mark errored. want=%q have=%q", "oops", failureMessage)
@ -139,7 +139,7 @@ func TestWorkerHandlerNonRetryableFailure(t *testing.T) {
if callCount := len(store.MarkFailedFunc.History()); callCount != 1 {
t.Errorf("unexpected mark failed call count. want=%d have=%d", 1, callCount)
} else if id := store.MarkFailedFunc.History()[0].Arg1; id != 42 {
} else if id := store.MarkFailedFunc.History()[0].Arg1.RecordID(); id != 42 {
t.Errorf("unexpected id argument to mark failed. want=%v have=%v", 42, id)
} else if failureMessage := store.MarkFailedFunc.History()[0].Arg2; failureMessage != testErr.Error() {
t.Errorf("unexpected failure message argument to mark failed. want=%q have=%q", testErr.Error(), failureMessage)
@ -496,7 +496,7 @@ func TestWorkerCancelJobs(t *testing.T) {
// Record when markFailed is called.
markedFailedCalled := make(chan struct{})
store.MarkFailedFunc.SetDefaultHook(func(c context.Context, i int, s string) (bool, error) {
store.MarkFailedFunc.SetDefaultHook(func(c context.Context, record *TestRecord, s string) (bool, error) {
close(markedFailedCalled)
return true, nil
})
@ -571,7 +571,7 @@ func TestWorkerDeadline(t *testing.T) {
// Record when markErrored is called.
markedErroredCalled := make(chan struct{})
store.MarkErroredFunc.SetDefaultHook(func(c context.Context, i int, s string) (bool, error) {
store.MarkErroredFunc.SetDefaultHook(func(c context.Context, record *TestRecord, s string) (bool, error) {
if !strings.Contains(s, "job exceeded maximum execution time of 10ms") {
t.Fatal("incorrect error message")
}

View File

@ -6,6 +6,8 @@ import (
"github.com/Masterminds/semver"
)
var buildDate = regexp.MustCompile(`\d+_(\d{4}-\d{2}-\d{2})_(\d+\.\d+-)?[a-z0-9]{7,}(_patch)?$`)
// NOTE: A version with a prerelease suffix (e.g. the "-rc.3" of "3.35.1-rc.3") is not
// considered by semver to satisfy a constraint without a prerelease suffix, regardless of
// whether or not the major/minor/patch version is greater than or equal to that of the
@ -26,7 +28,6 @@ func CheckSourcegraphVersion(version, constraint, minDate string) (bool, error)
// version string, we match on 7 or more characters. Currently, the Sourcegraph version
// is expected to return 12:
// https://sourcegraph.com/github.com/sourcegraph/sourcegraph/-/blob/enterprise/dev/ci/internal/ci/config.go?L96.
buildDate := regexp.MustCompile(`\d+_(\d{4}-\d{2}-\d{2})_(\d+\.\d+-)?[a-z0-9]{7,}(_patch)?$`)
matches := buildDate.FindStringSubmatch(version)
if len(matches) > 1 {
return matches[1] >= minDate, nil

View File

@ -0,0 +1 @@
DROP TABLE IF EXISTS executor_job_tokens;

View File

@ -0,0 +1,2 @@
name: executor job tokens
parents: [1674669794]

View File

@ -0,0 +1,20 @@
CREATE TABLE IF NOT EXISTS executor_job_tokens
(
id SERIAL PRIMARY KEY,
value_sha256 bytea NOT NULL,
job_id BIGINT NOT NULL,
queue TEXT NOT NULL,
repo_id BIGINT NOT NULL,
created_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL,
updated_at TIMESTAMP WITH TIME ZONE DEFAULT now() NOT NULL
);
ALTER TABLE executor_job_tokens
DROP CONSTRAINT IF EXISTS executor_job_tokens_value_sha256_key;
ALTER TABLE ONLY executor_job_tokens
ADD CONSTRAINT executor_job_tokens_value_sha256_key UNIQUE (value_sha256);
ALTER TABLE executor_job_tokens
DROP CONSTRAINT IF EXISTS executor_job_tokens_job_id_queue_repo_id_key;
ALTER TABLE ONLY executor_job_tokens
ADD CONSTRAINT executor_job_tokens_job_id_queue_repo_id_key UNIQUE (job_id, queue, repo_id);

View File

@ -2078,6 +2078,26 @@ CREATE SEQUENCE executor_heartbeats_id_seq
ALTER SEQUENCE executor_heartbeats_id_seq OWNED BY executor_heartbeats.id;
CREATE TABLE executor_job_tokens (
id integer NOT NULL,
value_sha256 bytea NOT NULL,
job_id bigint NOT NULL,
queue text NOT NULL,
repo_id bigint NOT NULL,
created_at timestamp with time zone DEFAULT now() NOT NULL,
updated_at timestamp with time zone DEFAULT now() NOT NULL
);
CREATE SEQUENCE executor_job_tokens_id_seq
AS integer
START WITH 1
INCREMENT BY 1
NO MINVALUE
NO MAXVALUE
CACHE 1;
ALTER SEQUENCE executor_job_tokens_id_seq OWNED BY executor_job_tokens.id;
CREATE TABLE executor_secret_access_logs (
id integer NOT NULL,
executor_secret_id integer NOT NULL,
@ -4305,6 +4325,8 @@ ALTER TABLE ONLY event_logs_scrape_state ALTER COLUMN id SET DEFAULT nextval('ev
ALTER TABLE ONLY executor_heartbeats ALTER COLUMN id SET DEFAULT nextval('executor_heartbeats_id_seq'::regclass);
ALTER TABLE ONLY executor_job_tokens ALTER COLUMN id SET DEFAULT nextval('executor_job_tokens_id_seq'::regclass);
ALTER TABLE ONLY executor_secret_access_logs ALTER COLUMN id SET DEFAULT nextval('executor_secret_access_logs_id_seq'::regclass);
ALTER TABLE ONLY executor_secrets ALTER COLUMN id SET DEFAULT nextval('executor_secrets_id_seq'::regclass);
@ -4574,6 +4596,15 @@ ALTER TABLE ONLY executor_heartbeats
ALTER TABLE ONLY executor_heartbeats
ADD CONSTRAINT executor_heartbeats_pkey PRIMARY KEY (id);
ALTER TABLE ONLY executor_job_tokens
ADD CONSTRAINT executor_job_tokens_job_id_queue_repo_id_key UNIQUE (job_id, queue, repo_id);
ALTER TABLE ONLY executor_job_tokens
ADD CONSTRAINT executor_job_tokens_pkey PRIMARY KEY (id);
ALTER TABLE ONLY executor_job_tokens
ADD CONSTRAINT executor_job_tokens_value_sha256_key UNIQUE (value_sha256);
ALTER TABLE ONLY executor_secret_access_logs
ADD CONSTRAINT executor_secret_access_logs_pkey PRIMARY KEY (id);

View File

@ -115,3 +115,7 @@
interfaces:
- UserEmailsService
- ReposService
- filename: enterprise/internal/executor/store/mocks_temp.go
path: github.com/sourcegraph/sourcegraph/enterprise/internal/executor/store
interfaces:
- JobTokenStore