2022-06-23 12:35:39 +00:00
|
|
|
# Documentation for how to override sg configuration for local development:
|
|
|
|
|
# https://github.com/sourcegraph/sourcegraph/blob/main/doc/dev/background-information/sg/index.md#configuration
|
2021-03-29 08:14:53 +00:00
|
|
|
env:
|
2021-07-21 15:37:24 +00:00
|
|
|
PGPORT: 5432
|
|
|
|
|
PGHOST: localhost
|
|
|
|
|
PGUSER: sourcegraph
|
|
|
|
|
PGPASSWORD: sourcegraph
|
|
|
|
|
PGDATABASE: sourcegraph
|
|
|
|
|
PGSSLMODE: disable
|
2023-05-25 15:51:15 +00:00
|
|
|
SG_DEV_MIGRATE_ON_APPLICATION_STARTUP: "true"
|
2022-04-29 19:13:57 +00:00
|
|
|
INSECURE_DEV: true
|
2022-01-06 11:49:45 +00:00
|
|
|
|
2021-03-29 08:14:53 +00:00
|
|
|
SRC_REPOS_DIR: $HOME/.sourcegraph/repos
|
|
|
|
|
SRC_LOG_LEVEL: info
|
|
|
|
|
SRC_LOG_FORMAT: condensed
|
2022-04-29 15:40:09 +00:00
|
|
|
SRC_TRACE_LOG: false
|
2022-02-01 20:55:40 +00:00
|
|
|
# Set this to true to show an iTerm link to the file:line where the log message came from
|
|
|
|
|
SRC_LOG_SOURCE_LINK: false
|
2022-10-10 13:26:58 +00:00
|
|
|
|
|
|
|
|
# Use two gitserver instances in local dev
|
|
|
|
|
SRC_GIT_SERVER_1: 127.0.0.1:3501
|
|
|
|
|
SRC_GIT_SERVER_2: 127.0.0.1:3502
|
|
|
|
|
SRC_GIT_SERVERS: 127.0.0.1:3501 127.0.0.1:3502
|
2021-03-29 08:14:53 +00:00
|
|
|
|
|
|
|
|
# Enable sharded indexed search mode:
|
|
|
|
|
INDEXED_SEARCH_SERVERS: localhost:3070 localhost:3071
|
|
|
|
|
|
2023-05-25 15:51:15 +00:00
|
|
|
GO111MODULE: "on"
|
2021-08-06 08:51:09 +00:00
|
|
|
|
2021-03-29 08:14:53 +00:00
|
|
|
DEPLOY_TYPE: dev
|
|
|
|
|
|
2023-05-25 15:51:15 +00:00
|
|
|
SRC_HTTP_ADDR: ":3082"
|
2021-03-29 08:14:53 +00:00
|
|
|
|
|
|
|
|
# I don't think we even need to set these?
|
|
|
|
|
SEARCHER_URL: http://127.0.0.1:3181
|
|
|
|
|
REPO_UPDATER_URL: http://127.0.0.1:3182
|
|
|
|
|
REDIS_ENDPOINT: 127.0.0.1:6379
|
|
|
|
|
SYMBOLS_URL: http://localhost:3184
|
embeddings: searcher and indexer (#48017)
# High-level architecture overview
<img width="2231" alt="Screenshot 2023-02-24 at 15 13 59"
src="https://user-images.githubusercontent.com/6417322/221200130-53c1ff25-4c47-4532-885f-5c4f9dadb05e.png">
# Embeddings
Really quickly: embeddings are a semantic representation of text.
Embeddings are usually floating-point vectors with 256+ elements. The
neat thing about embeddings is that they allow us to search over textual
information using a semantic correlation between the query and the text,
not just syntactic (matching keywords).
In this PR, we implemented an embedding service that will allow us to do
semantic code search over repositories in Sourcegraph. So, for example,
you'll be able to ask, "how do access tokens work in Sourcegraph", and
it will give you a list of the closest matching code files.
Additionally, we build a context detection service powered by
embeddings. In chat applications, it is important to know whether the
user's message requires additional context. We have to differentiate
between two cases: the user asks a general question about the codebase,
or the user references something in the existing conversation. In the
latter case, including the context would ruin the flow of the
conversation, and the chatbot would most likely return a confusing
answer. We determine whether a query _does not_ require additional
context using two approaches:
1. We check if the query contains well-known phrases that would indicate
the user is referencing the existing conversation (e.g., translate
previous, change that)
1. We have a static dataset of messages that require context and a
dataset of messages that do not. We embed both datasets, and then, using
embedding similarity, we can check which set is more similar to the
query.
## GraphQL API
We add four new resolvers to the GraphQL API:
```graphql
extend type Query {
embeddingsSearch(repo: ID!, query: String!, codeResultsCount: Int!, textResultsCount: Int!): EmbeddingsSearchResults!
isContextRequiredForQuery(query: String!): Boolean!
}
extend type Mutation {
scheduleRepositoriesForEmbedding(repoNames: [String!]!): EmptyResponse!
scheduleContextDetectionForEmbedding: EmptyResponse!
}
```
- `embeddingsSearch` performs embeddings search over the repo embeddings
and returns the specified number of results
- `isContextRequiredForQuery` determines whether the given query
requires additional context
- `scheduleRepositoriesForEmbedding` schedules a repo embedding
background job
- `scheduleContextDetectionForEmbedding` schedules a context detection
embedding background job that embeds a static dataset of messages.
## Repo embedding background job
Embedding a repository is implemented as a background job. The
background job handler receives the repository and the revision, which
should be embedded. Handler then gathers a list of files from the
gitserver and excludes files >1MB in size. The list of files is split
into code and text files (.md, .txt), and we build a separate embedding
index for both. We split them because in a combined index, the text
files always tended to feature as top results and didn't leave any room
for code files. Once we have the list of files, the procedure is as
follows:
- For each file
- Get file contents from gitserver
- Check if the file is embeddable (is not autogenerated, is large
enough, does not have long lines)
- Split the file into embeddable chunks
- Embed the file chunks using an external embedding service (defined in
site config)
- Add embedded file chunks and metadata to the index
- Metadata contains the file name, the start line, and the end line of
the chunk
- Once all files are processed, the index is marshaled into JSON and
stored in Cloud storage (GCS, S3)
### Site config changes
As mentioned, we use a configurable external embedding API that does the
actual text -> vector embedding part. Ideally, this allows us to swap
embedding providers in the future.
```json
"embeddings": {
"description": "Configuration for embeddings service.",
"type": "object",
"required": ["enabled", "dimensions", "model", "accessToken", "url"],
"properties": {
"enabled": {
"description": "Toggles whether embedding service is enabled.",
"type": "boolean",
"default": false
},
"dimensions": {
"description": "The dimensionality of the embedding vectors.",
"type": "integer",
"minimum": 0
},
"model": {
"description": "The model used for embedding.",
"type": "string"
},
"accessToken": {
"description": "The access token used to authenticate with the external embedding API service.",
"type": "string"
},
"url": {
"description": "The url to the external embedding API service.",
"type": "string",
"format": "uri"
}
}
}
```
## Repo embeddings search
The repo embeddings search is implemented in its own service. When a
user queries a repo using embeddings search, the following happens:
- Download the repo embedding index from blob storage and cache it in
memory
- We cache up to 5 embedding indexes in memory
- Embed the query and use the embedded query vector to find similar code
and text file metadata in the embedding index
- Query gitserver for the actual file contents
- Return the results
## Interesting files
- [Similarity
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-102cc83520004eb0e2795e49bc435c5142ca555189b1db3a52bbf1ffb82fa3c6)
- [Repo embedding job
handler](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-c345f373f426398beb4b9cd5852ba862a2718687882db2a8b2d9c7fbb5f1dc52)
- [External embedding api
client](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-ad1e7956f518e4bcaee17dd9e7ac04a5e090c00d970fcd273919e887e1d2cf8f)
- [Embedding a
repo](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-1f35118727128095b7816791b6f0a2e0e060cddee43d25102859b8159465585c)
- [Embeddings searcher
service](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-5b20f3e7ef87041daeeaef98b58ebf7388519cedcdfc359dc5e6d4e0b021472e)
- [Embeddings
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-79f95b9cc3f1ef39c1a0b88015bd9cd6c19c30a8d4c147409f1b8e8cd9462ea1)
- [Repo embedding index cache
management](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-8a41f7dec31054889dbf86e97c52223d5636b4d408c6b375bcfc09160a8b70f8)
- [GraphQL
resolvers](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-9b30a0b5efcb63e2f4611b99ab137fbe09629a769a4f30d10a1b2da41a01d21f)
## Test plan
- Start by filling out the `embeddings` object in the site config (let
me know if you need an API key)
- Start the embeddings service using `sg start embeddings`
- Go to the `/api/console` page and schedule a repo embedding job and a
context detection embedding job:
```graphql
mutation {
scheduleRepositoriesForEmbedding(repoNames: ["github.com/sourcegraph/handbook"]) {
__typename
}
scheduleContextDetectionForEmbedding {
__typename
}
}
```
- Once both are finished, you should be able to query the repo embedding
index, and determine whether context is need for a given query:
```graphql
query {
isContextRequiredForQuery(query: "how do access tokens work")
embeddingsSearch(
repo: "UmVwb3NpdG9yeToy", # github.com/sourcegraph/handbook GQL ID
query: "how do access tokens work",
codeResultsCount: 5,
textResultsCount: 5) {
codeResults {
fileName
content
}
textResults {
fileName
content
}
}
}
```
2023-03-01 09:50:12 +00:00
|
|
|
EMBEDDINGS_URL: http://localhost:9991
|
2021-03-29 08:14:53 +00:00
|
|
|
SRC_SYNTECT_SERVER: http://localhost:9238
|
|
|
|
|
SRC_FRONTEND_INTERNAL: localhost:3090
|
|
|
|
|
GRAFANA_SERVER_URL: http://localhost:3370
|
|
|
|
|
PROMETHEUS_URL: http://localhost:9090
|
|
|
|
|
JAEGER_SERVER_URL: http://localhost:16686
|
|
|
|
|
|
2023-08-10 17:34:08 +00:00
|
|
|
SRC_GRPC_ENABLE_CONF: "true"
|
2023-05-25 15:51:15 +00:00
|
|
|
SRC_DEVELOPMENT: "true"
|
|
|
|
|
SRC_PROF_HTTP: ""
|
2021-07-05 10:24:20 +00:00
|
|
|
SRC_PROF_SERVICES: |
|
|
|
|
|
[
|
|
|
|
|
{ "Name": "frontend", "Host": "127.0.0.1:6063" },
|
2022-10-10 13:26:58 +00:00
|
|
|
{ "Name": "gitserver-0", "Host": "127.0.0.1:3551" },
|
|
|
|
|
{ "Name": "gitserver-1", "Host": "127.0.0.1:3552" },
|
2021-07-05 10:24:20 +00:00
|
|
|
{ "Name": "searcher", "Host": "127.0.0.1:6069" },
|
|
|
|
|
{ "Name": "symbols", "Host": "127.0.0.1:6071" },
|
|
|
|
|
{ "Name": "repo-updater", "Host": "127.0.0.1:6074" },
|
2022-06-20 17:57:54 +00:00
|
|
|
{ "Name": "codeintel-worker", "Host": "127.0.0.1:6088" },
|
2021-07-05 10:24:20 +00:00
|
|
|
{ "Name": "worker", "Host": "127.0.0.1:6089" },
|
2022-08-03 10:08:04 +00:00
|
|
|
{ "Name": "worker-executors", "Host": "127.0.0.1:6996" },
|
2023-04-04 14:45:50 +00:00
|
|
|
{ "Name": "embeddings", "Host": "127.0.0.1:6099" },
|
2022-06-20 17:57:54 +00:00
|
|
|
{ "Name": "zoekt-index-0", "Host": "127.0.0.1:6072" },
|
|
|
|
|
{ "Name": "zoekt-index-1", "Host": "127.0.0.1:6073" },
|
|
|
|
|
{ "Name": "zoekt-web-0", "Host": "127.0.0.1:3070", "DefaultPath": "/debug/requests/" },
|
|
|
|
|
{ "Name": "zoekt-web-1", "Host": "127.0.0.1:3071", "DefaultPath": "/debug/requests/" }
|
2021-07-05 10:24:20 +00:00
|
|
|
]
|
2021-03-29 08:14:53 +00:00
|
|
|
# Settings/config
|
|
|
|
|
SITE_CONFIG_FILE: ./dev/site-config.json
|
|
|
|
|
SITE_CONFIG_ALLOW_EDITS: true
|
|
|
|
|
GLOBAL_SETTINGS_FILE: ./dev/global-settings.json
|
|
|
|
|
GLOBAL_SETTINGS_ALLOW_EDITS: true
|
|
|
|
|
|
2021-04-14 10:53:56 +00:00
|
|
|
# Point codeintel to the `frontend` database in development
|
|
|
|
|
CODEINTEL_PGPORT: $PGPORT
|
|
|
|
|
CODEINTEL_PGHOST: $PGHOST
|
|
|
|
|
CODEINTEL_PGUSER: $PGUSER
|
|
|
|
|
CODEINTEL_PGPASSWORD: $PGPASSWORD
|
|
|
|
|
CODEINTEL_PGDATABASE: $PGDATABASE
|
|
|
|
|
CODEINTEL_PGSSLMODE: $PGSSLMODE
|
|
|
|
|
CODEINTEL_PGDATASOURCE: $PGDATASOURCE
|
|
|
|
|
CODEINTEL_PG_ALLOW_SINGLE_DB: true
|
|
|
|
|
|
2021-05-07 11:01:10 +00:00
|
|
|
# Required for `frontend` and `web` commands
|
|
|
|
|
SOURCEGRAPH_HTTPS_DOMAIN: sourcegraph.test
|
|
|
|
|
SOURCEGRAPH_HTTPS_PORT: 3443
|
|
|
|
|
|
|
|
|
|
# Required for `web` commands
|
2023-05-25 15:51:15 +00:00
|
|
|
NODE_OPTIONS: "--max_old_space_size=8192"
|
2021-05-07 11:01:10 +00:00
|
|
|
# Default `NODE_ENV` to `development`
|
|
|
|
|
NODE_ENV: development
|
|
|
|
|
|
2021-05-31 07:51:39 +00:00
|
|
|
# Required for codeintel uploadstore
|
|
|
|
|
PRECISE_CODE_INTEL_UPLOAD_AWS_ENDPOINT: http://localhost:9000
|
2022-11-30 23:04:48 +00:00
|
|
|
PRECISE_CODE_INTEL_UPLOAD_BACKEND: blobstore
|
2021-05-31 07:51:39 +00:00
|
|
|
|
2023-04-03 15:50:06 +00:00
|
|
|
# Required for embeddings job upload
|
|
|
|
|
EMBEDDINGS_UPLOAD_AWS_ENDPOINT: http://localhost:9000
|
|
|
|
|
|
2023-09-08 13:01:03 +00:00
|
|
|
# Required for upload of search job results
|
|
|
|
|
SEARCH_JOBS_UPLOAD_AWS_ENDPOINT: http://localhost:9000
|
|
|
|
|
|
2021-05-31 07:51:39 +00:00
|
|
|
# Disable auto-indexing the CNCF repo group (this only works in Cloud)
|
|
|
|
|
# This setting will be going away soon
|
|
|
|
|
DISABLE_CNCF: notonmybox
|
|
|
|
|
|
2022-12-06 03:41:14 +00:00
|
|
|
# Point code insights to the `frontend` database in development
|
|
|
|
|
CODEINSIGHTS_PGPORT: $PGPORT
|
|
|
|
|
CODEINSIGHTS_PGHOST: $PGHOST
|
|
|
|
|
CODEINSIGHTS_PGUSER: $PGUSER
|
|
|
|
|
CODEINSIGHTS_PGPASSWORD: $PGPASSWORD
|
|
|
|
|
CODEINSIGHTS_PGDATABASE: $PGDATABASE
|
|
|
|
|
CODEINSIGHTS_PGSSLMODE: $PGSSLMODE
|
|
|
|
|
CODEINSIGHTS_PGDATASOURCE: $PGDATASOURCE
|
|
|
|
|
|
|
|
|
|
# Disable code insights by default
|
2021-05-21 16:01:11 +00:00
|
|
|
DB_STARTUP_TIMEOUT: 120s # codeinsights-db needs more time to start in some instances.
|
|
|
|
|
DISABLE_CODE_INSIGHTS_HISTORICAL: true
|
|
|
|
|
DISABLE_CODE_INSIGHTS: true
|
|
|
|
|
|
2022-08-02 14:09:41 +00:00
|
|
|
# # OpenTelemetry in dev - use single http/json endpoint
|
|
|
|
|
# OTEL_EXPORTER_OTLP_ENDPOINT: http://127.0.0.1:4318
|
|
|
|
|
# OTEL_EXPORTER_OTLP_PROTOCOL: http/json
|
|
|
|
|
|
2023-08-31 14:13:16 +00:00
|
|
|
# Enable gRPC Web UI for debugging
|
|
|
|
|
GRPC_WEB_UI_ENABLED: "true"
|
|
|
|
|
|
2023-07-20 22:53:31 +00:00
|
|
|
# Enable full protobuf message logging when an internal error occurred
|
|
|
|
|
SRC_GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_ENABLED: "true"
|
|
|
|
|
SRC_GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_JSON_TRUNCATION_SIZE_BYTES: "1KB"
|
|
|
|
|
SRC_GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_HANDLING_MAX_MESSAGE_SIZE_BYTES: "100MB"
|
2023-08-18 18:55:37 +00:00
|
|
|
## zoekt-specific message logging
|
|
|
|
|
GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_ENABLED: "true"
|
|
|
|
|
GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_JSON_TRUNCATION_SIZE_BYTES: "1KB"
|
|
|
|
|
GRPC_INTERNAL_ERROR_LOGGING_LOG_PROTOBUF_MESSAGES_HANDLING_MAX_MESSAGE_SIZE_BYTES: "100MB"
|
2023-06-12 23:58:55 +00:00
|
|
|
|
telemetrygateway: add exporter and service (#56699)
This change adds:
- telemetry export background jobs: flagged behind `TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR`, default empty => disabled
- telemetry redaction: configured in package `internal/telemetry/sensitivemetadataallowlist`
- telemetry-gateway service receiving events and forwarding it to a pub/sub topic (or just logging it, as configured in local dev)
- utilities for easily creating an event recorder: `internal/telemetry/telemetryrecorder`
Notes:
- all changes are feature-flagged to some degree, off by default, so the merge should be fairly low-risk.
- we decided that transmitting the full license key continues to be the way to go. we transmit it once per stream and attach it on all events in the telemetry-gateway. there is no auth mechanism at the moment
- GraphQL return type `EventLog.Source` is now a plain string instead of string enum. This should not be a breaking change in our clients, but must be made so that our generated V2 events do not break requesting of event logs
Stacked on https://github.com/sourcegraph/sourcegraph/pull/56520
Closes https://github.com/sourcegraph/sourcegraph/issues/56289
Closes https://github.com/sourcegraph/sourcegraph/issues/56287
## Test plan
Add an override to make the export super frequent:
```
env:
TELEMETRY_GATEWAY_EXPORTER_EXPORT_INTERVAL: "10s"
TELEMETRY_GATEWAY_EXPORTER_EXPORTED_EVENTS_RETENTION: "5m"
```
Start sourcegraph:
```
sg start
```
Enable `telemetry-export` featureflag (from https://github.com/sourcegraph/sourcegraph/pull/56520)
Emit some events in GraphQL:
```gql
mutation {
telemetry {
recordEvents(events:[{
feature:"foobar"
action:"view"
source:{
client:"WEB"
}
parameters:{
version:0
}
}]) {
alwaysNil
}
}
```
See series of log events:
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/telemetrygatewayexporter.go:61 Telemetry Gateway export enabled - initializing background routines
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:99 exporting events {"maxBatchSize": 10000, "count": 1}
[telemetry-g...y] INFO telemetry-gateway.pubsub pubsub/topic.go:115 Publish {"TraceId": "7852903434f0d2f647d397ee83b4009b", "SpanId": "8d945234bccf319b", "message": "{\"event\":{\"id\":\"dc96ae84-4ac4-4760-968f-0a0307b8bb3d\",\"timestamp\":\"2023-09-19T01:57:13.590266Z\",\"feature\":\"foobar\", ....
```
Build:
```
export VERSION="insiders"
bazel run //cmd/telemetry-gateway:candidate_push --config darwin-docker --stamp --workspace_status_command=./dev/bazel_stamp_vars.sh -- --tag $VERSION --repository us.gcr.io/sourcegraph-dev/telemetry-gateway
```
Deploy: https://github.com/sourcegraph/managed-services/pull/7
Add override:
```yaml
env:
# Port required. TODO: What's the best way to provide gRPC addresses, such that a
# localhost address is also possible?
TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR: "https://telemetry-gateway.sgdev.org:443"
```
Repeat the above (`sg start` and emit some events):
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 1}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 1}
```
2023-09-20 05:20:15 +00:00
|
|
|
TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR: "http://127.0.0.1:10080"
|
|
|
|
|
|
2021-03-29 08:14:53 +00:00
|
|
|
commands:
|
2022-07-06 17:09:41 +00:00
|
|
|
server:
|
2022-07-26 14:51:04 +00:00
|
|
|
description: Run an all-in-one sourcegraph/server image
|
2022-07-06 17:09:41 +00:00
|
|
|
cmd: ./dev/run-server-image.sh
|
|
|
|
|
env:
|
|
|
|
|
TAG: insiders
|
2023-05-25 15:51:15 +00:00
|
|
|
CLEAN: "true"
|
|
|
|
|
DATA: "/tmp/sourcegraph-data"
|
|
|
|
|
URL: "http://localhost:7080"
|
2022-07-06 17:09:41 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
frontend:
|
2023-09-11 12:16:38 +00:00
|
|
|
description: Frontend
|
2021-04-01 11:59:48 +00:00
|
|
|
cmd: |
|
|
|
|
|
# TODO: This should be fixed
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
2022-07-06 21:38:47 +00:00
|
|
|
# If EXTSVC_CONFIG_FILE is *unset*, set a default.
|
|
|
|
|
export EXTSVC_CONFIG_FILE=${EXTSVC_CONFIG_FILE-'../dev-private/enterprise/dev/external-services-config.json'}
|
|
|
|
|
|
2022-07-27 15:21:29 +00:00
|
|
|
.bin/frontend
|
2021-10-09 01:47:08 +00:00
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2021-10-09 01:47:08 +00:00
|
|
|
fi
|
2023-09-11 12:16:38 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/frontend github.com/sourcegraph/sourcegraph/cmd/frontend
|
2022-07-27 15:21:29 +00:00
|
|
|
checkBinary: .bin/frontend
|
2021-04-01 11:59:48 +00:00
|
|
|
env:
|
|
|
|
|
CONFIGURATION_MODE: server
|
|
|
|
|
USE_ENHANCED_LANGUAGE_DETECTION: false
|
2023-05-25 15:51:15 +00:00
|
|
|
SITE_CONFIG_FILE: "../dev-private/enterprise/dev/site-config.json"
|
|
|
|
|
SITE_CONFIG_ESCAPE_HATCH_PATH: "$HOME/.sourcegraph/site-config.json"
|
2021-04-12 12:14:41 +00:00
|
|
|
# frontend processes need this to be so that the paths to the assets are rendered correctly
|
|
|
|
|
WEBPACK_DEV_SERVER: 1
|
2021-03-29 08:14:53 +00:00
|
|
|
watch:
|
2021-05-31 07:51:39 +00:00
|
|
|
- lib
|
2021-03-29 08:14:53 +00:00
|
|
|
- internal
|
|
|
|
|
- cmd/frontend
|
|
|
|
|
|
2022-10-10 13:26:58 +00:00
|
|
|
gitserver-template: &gitserver_template
|
2021-03-29 08:14:53 +00:00
|
|
|
cmd: .bin/gitserver
|
2021-10-09 01:47:08 +00:00
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2021-10-09 01:47:08 +00:00
|
|
|
fi
|
2023-08-25 11:25:07 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/gitserver github.com/sourcegraph/sourcegraph/cmd/gitserver
|
2021-06-02 09:38:06 +00:00
|
|
|
checkBinary: .bin/gitserver
|
2022-10-10 13:26:58 +00:00
|
|
|
env: &gitserverenv
|
2021-03-29 08:14:53 +00:00
|
|
|
HOSTNAME: 127.0.0.1:3178
|
|
|
|
|
watch:
|
2021-05-31 07:51:39 +00:00
|
|
|
- lib
|
2021-03-29 08:14:53 +00:00
|
|
|
- internal
|
|
|
|
|
- cmd/gitserver
|
2022-12-28 08:44:07 +00:00
|
|
|
|
2022-10-10 13:26:58 +00:00
|
|
|
# This is only here to stay backwards-compatible with people's custom
|
|
|
|
|
# `sg.config.overwrite.yaml` files
|
|
|
|
|
gitserver:
|
|
|
|
|
<<: *gitserver_template
|
|
|
|
|
|
|
|
|
|
gitserver-0:
|
|
|
|
|
<<: *gitserver_template
|
|
|
|
|
env:
|
|
|
|
|
<<: *gitserverenv
|
2023-02-03 12:32:08 +00:00
|
|
|
GITSERVER_EXTERNAL_ADDR: 127.0.0.1:3501
|
2022-10-10 13:26:58 +00:00
|
|
|
GITSERVER_ADDR: 127.0.0.1:3501
|
|
|
|
|
SRC_REPOS_DIR: $HOME/.sourcegraph/repos_1
|
2022-10-31 17:22:27 +00:00
|
|
|
SRC_PROF_HTTP: 127.0.0.1:3551
|
2022-10-10 13:26:58 +00:00
|
|
|
|
|
|
|
|
gitserver-1:
|
|
|
|
|
<<: *gitserver_template
|
|
|
|
|
env:
|
|
|
|
|
<<: *gitserverenv
|
2023-02-03 12:32:08 +00:00
|
|
|
GITSERVER_EXTERNAL_ADDR: 127.0.0.1:3502
|
2022-10-10 13:26:58 +00:00
|
|
|
GITSERVER_ADDR: 127.0.0.1:3502
|
|
|
|
|
SRC_REPOS_DIR: $HOME/.sourcegraph/repos_2
|
2022-10-31 17:22:27 +00:00
|
|
|
SRC_PROF_HTTP: 127.0.0.1:3552
|
2022-10-10 13:26:58 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
repo-updater:
|
2022-09-01 19:27:23 +00:00
|
|
|
cmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
.bin/repo-updater
|
2021-10-09 01:47:08 +00:00
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2021-10-09 01:47:08 +00:00
|
|
|
fi
|
2023-07-14 13:05:13 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/repo-updater github.com/sourcegraph/sourcegraph/cmd/repo-updater
|
2022-07-27 15:21:29 +00:00
|
|
|
checkBinary: .bin/repo-updater
|
2021-03-29 08:14:53 +00:00
|
|
|
watch:
|
2021-05-31 07:51:39 +00:00
|
|
|
- lib
|
2021-03-29 08:14:53 +00:00
|
|
|
- internal
|
|
|
|
|
- cmd/repo-updater
|
|
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
symbols:
|
2022-07-27 15:21:29 +00:00
|
|
|
cmd: .bin/symbols
|
2022-03-03 04:13:28 +00:00
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2022-03-03 04:13:28 +00:00
|
|
|
fi
|
|
|
|
|
|
2022-07-27 15:21:29 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/symbols github.com/sourcegraph/sourcegraph/enterprise/cmd/symbols
|
|
|
|
|
checkBinary: .bin/symbols
|
2022-03-03 04:13:28 +00:00
|
|
|
env:
|
2022-12-06 17:32:21 +00:00
|
|
|
CTAGS_COMMAND: dev/universal-ctags-dev
|
2023-05-30 21:19:39 +00:00
|
|
|
SCIP_CTAGS_COMMAND: dev/scip-ctags-dev
|
2022-03-03 04:13:28 +00:00
|
|
|
CTAGS_PROCESSES: 2
|
2023-05-25 15:51:15 +00:00
|
|
|
USE_ROCKSKIP: "false"
|
2022-03-03 04:13:28 +00:00
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
|
|
|
|
- cmd/symbols
|
|
|
|
|
- enterprise/cmd/symbols
|
2023-07-12 20:51:02 +00:00
|
|
|
- internal/rockskip
|
2022-03-03 04:13:28 +00:00
|
|
|
|
embeddings: searcher and indexer (#48017)
# High-level architecture overview
<img width="2231" alt="Screenshot 2023-02-24 at 15 13 59"
src="https://user-images.githubusercontent.com/6417322/221200130-53c1ff25-4c47-4532-885f-5c4f9dadb05e.png">
# Embeddings
Really quickly: embeddings are a semantic representation of text.
Embeddings are usually floating-point vectors with 256+ elements. The
neat thing about embeddings is that they allow us to search over textual
information using a semantic correlation between the query and the text,
not just syntactic (matching keywords).
In this PR, we implemented an embedding service that will allow us to do
semantic code search over repositories in Sourcegraph. So, for example,
you'll be able to ask, "how do access tokens work in Sourcegraph", and
it will give you a list of the closest matching code files.
Additionally, we build a context detection service powered by
embeddings. In chat applications, it is important to know whether the
user's message requires additional context. We have to differentiate
between two cases: the user asks a general question about the codebase,
or the user references something in the existing conversation. In the
latter case, including the context would ruin the flow of the
conversation, and the chatbot would most likely return a confusing
answer. We determine whether a query _does not_ require additional
context using two approaches:
1. We check if the query contains well-known phrases that would indicate
the user is referencing the existing conversation (e.g., translate
previous, change that)
1. We have a static dataset of messages that require context and a
dataset of messages that do not. We embed both datasets, and then, using
embedding similarity, we can check which set is more similar to the
query.
## GraphQL API
We add four new resolvers to the GraphQL API:
```graphql
extend type Query {
embeddingsSearch(repo: ID!, query: String!, codeResultsCount: Int!, textResultsCount: Int!): EmbeddingsSearchResults!
isContextRequiredForQuery(query: String!): Boolean!
}
extend type Mutation {
scheduleRepositoriesForEmbedding(repoNames: [String!]!): EmptyResponse!
scheduleContextDetectionForEmbedding: EmptyResponse!
}
```
- `embeddingsSearch` performs embeddings search over the repo embeddings
and returns the specified number of results
- `isContextRequiredForQuery` determines whether the given query
requires additional context
- `scheduleRepositoriesForEmbedding` schedules a repo embedding
background job
- `scheduleContextDetectionForEmbedding` schedules a context detection
embedding background job that embeds a static dataset of messages.
## Repo embedding background job
Embedding a repository is implemented as a background job. The
background job handler receives the repository and the revision, which
should be embedded. Handler then gathers a list of files from the
gitserver and excludes files >1MB in size. The list of files is split
into code and text files (.md, .txt), and we build a separate embedding
index for both. We split them because in a combined index, the text
files always tended to feature as top results and didn't leave any room
for code files. Once we have the list of files, the procedure is as
follows:
- For each file
- Get file contents from gitserver
- Check if the file is embeddable (is not autogenerated, is large
enough, does not have long lines)
- Split the file into embeddable chunks
- Embed the file chunks using an external embedding service (defined in
site config)
- Add embedded file chunks and metadata to the index
- Metadata contains the file name, the start line, and the end line of
the chunk
- Once all files are processed, the index is marshaled into JSON and
stored in Cloud storage (GCS, S3)
### Site config changes
As mentioned, we use a configurable external embedding API that does the
actual text -> vector embedding part. Ideally, this allows us to swap
embedding providers in the future.
```json
"embeddings": {
"description": "Configuration for embeddings service.",
"type": "object",
"required": ["enabled", "dimensions", "model", "accessToken", "url"],
"properties": {
"enabled": {
"description": "Toggles whether embedding service is enabled.",
"type": "boolean",
"default": false
},
"dimensions": {
"description": "The dimensionality of the embedding vectors.",
"type": "integer",
"minimum": 0
},
"model": {
"description": "The model used for embedding.",
"type": "string"
},
"accessToken": {
"description": "The access token used to authenticate with the external embedding API service.",
"type": "string"
},
"url": {
"description": "The url to the external embedding API service.",
"type": "string",
"format": "uri"
}
}
}
```
## Repo embeddings search
The repo embeddings search is implemented in its own service. When a
user queries a repo using embeddings search, the following happens:
- Download the repo embedding index from blob storage and cache it in
memory
- We cache up to 5 embedding indexes in memory
- Embed the query and use the embedded query vector to find similar code
and text file metadata in the embedding index
- Query gitserver for the actual file contents
- Return the results
## Interesting files
- [Similarity
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-102cc83520004eb0e2795e49bc435c5142ca555189b1db3a52bbf1ffb82fa3c6)
- [Repo embedding job
handler](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-c345f373f426398beb4b9cd5852ba862a2718687882db2a8b2d9c7fbb5f1dc52)
- [External embedding api
client](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-ad1e7956f518e4bcaee17dd9e7ac04a5e090c00d970fcd273919e887e1d2cf8f)
- [Embedding a
repo](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-1f35118727128095b7816791b6f0a2e0e060cddee43d25102859b8159465585c)
- [Embeddings searcher
service](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-5b20f3e7ef87041daeeaef98b58ebf7388519cedcdfc359dc5e6d4e0b021472e)
- [Embeddings
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-79f95b9cc3f1ef39c1a0b88015bd9cd6c19c30a8d4c147409f1b8e8cd9462ea1)
- [Repo embedding index cache
management](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-8a41f7dec31054889dbf86e97c52223d5636b4d408c6b375bcfc09160a8b70f8)
- [GraphQL
resolvers](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-9b30a0b5efcb63e2f4611b99ab137fbe09629a769a4f30d10a1b2da41a01d21f)
## Test plan
- Start by filling out the `embeddings` object in the site config (let
me know if you need an API key)
- Start the embeddings service using `sg start embeddings`
- Go to the `/api/console` page and schedule a repo embedding job and a
context detection embedding job:
```graphql
mutation {
scheduleRepositoriesForEmbedding(repoNames: ["github.com/sourcegraph/handbook"]) {
__typename
}
scheduleContextDetectionForEmbedding {
__typename
}
}
```
- Once both are finished, you should be able to query the repo embedding
index, and determine whether context is need for a given query:
```graphql
query {
isContextRequiredForQuery(query: "how do access tokens work")
embeddingsSearch(
repo: "UmVwb3NpdG9yeToy", # github.com/sourcegraph/handbook GQL ID
query: "how do access tokens work",
codeResultsCount: 5,
textResultsCount: 5) {
codeResults {
fileName
content
}
textResults {
fileName
content
}
}
}
```
2023-03-01 09:50:12 +00:00
|
|
|
embeddings:
|
2023-03-24 17:11:38 +00:00
|
|
|
cmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
.bin/embeddings
|
embeddings: searcher and indexer (#48017)
# High-level architecture overview
<img width="2231" alt="Screenshot 2023-02-24 at 15 13 59"
src="https://user-images.githubusercontent.com/6417322/221200130-53c1ff25-4c47-4532-885f-5c4f9dadb05e.png">
# Embeddings
Really quickly: embeddings are a semantic representation of text.
Embeddings are usually floating-point vectors with 256+ elements. The
neat thing about embeddings is that they allow us to search over textual
information using a semantic correlation between the query and the text,
not just syntactic (matching keywords).
In this PR, we implemented an embedding service that will allow us to do
semantic code search over repositories in Sourcegraph. So, for example,
you'll be able to ask, "how do access tokens work in Sourcegraph", and
it will give you a list of the closest matching code files.
Additionally, we build a context detection service powered by
embeddings. In chat applications, it is important to know whether the
user's message requires additional context. We have to differentiate
between two cases: the user asks a general question about the codebase,
or the user references something in the existing conversation. In the
latter case, including the context would ruin the flow of the
conversation, and the chatbot would most likely return a confusing
answer. We determine whether a query _does not_ require additional
context using two approaches:
1. We check if the query contains well-known phrases that would indicate
the user is referencing the existing conversation (e.g., translate
previous, change that)
1. We have a static dataset of messages that require context and a
dataset of messages that do not. We embed both datasets, and then, using
embedding similarity, we can check which set is more similar to the
query.
## GraphQL API
We add four new resolvers to the GraphQL API:
```graphql
extend type Query {
embeddingsSearch(repo: ID!, query: String!, codeResultsCount: Int!, textResultsCount: Int!): EmbeddingsSearchResults!
isContextRequiredForQuery(query: String!): Boolean!
}
extend type Mutation {
scheduleRepositoriesForEmbedding(repoNames: [String!]!): EmptyResponse!
scheduleContextDetectionForEmbedding: EmptyResponse!
}
```
- `embeddingsSearch` performs embeddings search over the repo embeddings
and returns the specified number of results
- `isContextRequiredForQuery` determines whether the given query
requires additional context
- `scheduleRepositoriesForEmbedding` schedules a repo embedding
background job
- `scheduleContextDetectionForEmbedding` schedules a context detection
embedding background job that embeds a static dataset of messages.
## Repo embedding background job
Embedding a repository is implemented as a background job. The
background job handler receives the repository and the revision, which
should be embedded. Handler then gathers a list of files from the
gitserver and excludes files >1MB in size. The list of files is split
into code and text files (.md, .txt), and we build a separate embedding
index for both. We split them because in a combined index, the text
files always tended to feature as top results and didn't leave any room
for code files. Once we have the list of files, the procedure is as
follows:
- For each file
- Get file contents from gitserver
- Check if the file is embeddable (is not autogenerated, is large
enough, does not have long lines)
- Split the file into embeddable chunks
- Embed the file chunks using an external embedding service (defined in
site config)
- Add embedded file chunks and metadata to the index
- Metadata contains the file name, the start line, and the end line of
the chunk
- Once all files are processed, the index is marshaled into JSON and
stored in Cloud storage (GCS, S3)
### Site config changes
As mentioned, we use a configurable external embedding API that does the
actual text -> vector embedding part. Ideally, this allows us to swap
embedding providers in the future.
```json
"embeddings": {
"description": "Configuration for embeddings service.",
"type": "object",
"required": ["enabled", "dimensions", "model", "accessToken", "url"],
"properties": {
"enabled": {
"description": "Toggles whether embedding service is enabled.",
"type": "boolean",
"default": false
},
"dimensions": {
"description": "The dimensionality of the embedding vectors.",
"type": "integer",
"minimum": 0
},
"model": {
"description": "The model used for embedding.",
"type": "string"
},
"accessToken": {
"description": "The access token used to authenticate with the external embedding API service.",
"type": "string"
},
"url": {
"description": "The url to the external embedding API service.",
"type": "string",
"format": "uri"
}
}
}
```
## Repo embeddings search
The repo embeddings search is implemented in its own service. When a
user queries a repo using embeddings search, the following happens:
- Download the repo embedding index from blob storage and cache it in
memory
- We cache up to 5 embedding indexes in memory
- Embed the query and use the embedded query vector to find similar code
and text file metadata in the embedding index
- Query gitserver for the actual file contents
- Return the results
## Interesting files
- [Similarity
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-102cc83520004eb0e2795e49bc435c5142ca555189b1db3a52bbf1ffb82fa3c6)
- [Repo embedding job
handler](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-c345f373f426398beb4b9cd5852ba862a2718687882db2a8b2d9c7fbb5f1dc52)
- [External embedding api
client](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-ad1e7956f518e4bcaee17dd9e7ac04a5e090c00d970fcd273919e887e1d2cf8f)
- [Embedding a
repo](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-1f35118727128095b7816791b6f0a2e0e060cddee43d25102859b8159465585c)
- [Embeddings searcher
service](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-5b20f3e7ef87041daeeaef98b58ebf7388519cedcdfc359dc5e6d4e0b021472e)
- [Embeddings
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-79f95b9cc3f1ef39c1a0b88015bd9cd6c19c30a8d4c147409f1b8e8cd9462ea1)
- [Repo embedding index cache
management](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-8a41f7dec31054889dbf86e97c52223d5636b4d408c6b375bcfc09160a8b70f8)
- [GraphQL
resolvers](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-9b30a0b5efcb63e2f4611b99ab137fbe09629a769a4f30d10a1b2da41a01d21f)
## Test plan
- Start by filling out the `embeddings` object in the site config (let
me know if you need an API key)
- Start the embeddings service using `sg start embeddings`
- Go to the `/api/console` page and schedule a repo embedding job and a
context detection embedding job:
```graphql
mutation {
scheduleRepositoriesForEmbedding(repoNames: ["github.com/sourcegraph/handbook"]) {
__typename
}
scheduleContextDetectionForEmbedding {
__typename
}
}
```
- Once both are finished, you should be able to query the repo embedding
index, and determine whether context is need for a given query:
```graphql
query {
isContextRequiredForQuery(query: "how do access tokens work")
embeddingsSearch(
repo: "UmVwb3NpdG9yeToy", # github.com/sourcegraph/handbook GQL ID
query: "how do access tokens work",
codeResultsCount: 5,
textResultsCount: 5) {
codeResults {
fileName
content
}
textResults {
fileName
content
}
}
}
```
2023-03-01 09:50:12 +00:00
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
embeddings: searcher and indexer (#48017)
# High-level architecture overview
<img width="2231" alt="Screenshot 2023-02-24 at 15 13 59"
src="https://user-images.githubusercontent.com/6417322/221200130-53c1ff25-4c47-4532-885f-5c4f9dadb05e.png">
# Embeddings
Really quickly: embeddings are a semantic representation of text.
Embeddings are usually floating-point vectors with 256+ elements. The
neat thing about embeddings is that they allow us to search over textual
information using a semantic correlation between the query and the text,
not just syntactic (matching keywords).
In this PR, we implemented an embedding service that will allow us to do
semantic code search over repositories in Sourcegraph. So, for example,
you'll be able to ask, "how do access tokens work in Sourcegraph", and
it will give you a list of the closest matching code files.
Additionally, we build a context detection service powered by
embeddings. In chat applications, it is important to know whether the
user's message requires additional context. We have to differentiate
between two cases: the user asks a general question about the codebase,
or the user references something in the existing conversation. In the
latter case, including the context would ruin the flow of the
conversation, and the chatbot would most likely return a confusing
answer. We determine whether a query _does not_ require additional
context using two approaches:
1. We check if the query contains well-known phrases that would indicate
the user is referencing the existing conversation (e.g., translate
previous, change that)
1. We have a static dataset of messages that require context and a
dataset of messages that do not. We embed both datasets, and then, using
embedding similarity, we can check which set is more similar to the
query.
## GraphQL API
We add four new resolvers to the GraphQL API:
```graphql
extend type Query {
embeddingsSearch(repo: ID!, query: String!, codeResultsCount: Int!, textResultsCount: Int!): EmbeddingsSearchResults!
isContextRequiredForQuery(query: String!): Boolean!
}
extend type Mutation {
scheduleRepositoriesForEmbedding(repoNames: [String!]!): EmptyResponse!
scheduleContextDetectionForEmbedding: EmptyResponse!
}
```
- `embeddingsSearch` performs embeddings search over the repo embeddings
and returns the specified number of results
- `isContextRequiredForQuery` determines whether the given query
requires additional context
- `scheduleRepositoriesForEmbedding` schedules a repo embedding
background job
- `scheduleContextDetectionForEmbedding` schedules a context detection
embedding background job that embeds a static dataset of messages.
## Repo embedding background job
Embedding a repository is implemented as a background job. The
background job handler receives the repository and the revision, which
should be embedded. Handler then gathers a list of files from the
gitserver and excludes files >1MB in size. The list of files is split
into code and text files (.md, .txt), and we build a separate embedding
index for both. We split them because in a combined index, the text
files always tended to feature as top results and didn't leave any room
for code files. Once we have the list of files, the procedure is as
follows:
- For each file
- Get file contents from gitserver
- Check if the file is embeddable (is not autogenerated, is large
enough, does not have long lines)
- Split the file into embeddable chunks
- Embed the file chunks using an external embedding service (defined in
site config)
- Add embedded file chunks and metadata to the index
- Metadata contains the file name, the start line, and the end line of
the chunk
- Once all files are processed, the index is marshaled into JSON and
stored in Cloud storage (GCS, S3)
### Site config changes
As mentioned, we use a configurable external embedding API that does the
actual text -> vector embedding part. Ideally, this allows us to swap
embedding providers in the future.
```json
"embeddings": {
"description": "Configuration for embeddings service.",
"type": "object",
"required": ["enabled", "dimensions", "model", "accessToken", "url"],
"properties": {
"enabled": {
"description": "Toggles whether embedding service is enabled.",
"type": "boolean",
"default": false
},
"dimensions": {
"description": "The dimensionality of the embedding vectors.",
"type": "integer",
"minimum": 0
},
"model": {
"description": "The model used for embedding.",
"type": "string"
},
"accessToken": {
"description": "The access token used to authenticate with the external embedding API service.",
"type": "string"
},
"url": {
"description": "The url to the external embedding API service.",
"type": "string",
"format": "uri"
}
}
}
```
## Repo embeddings search
The repo embeddings search is implemented in its own service. When a
user queries a repo using embeddings search, the following happens:
- Download the repo embedding index from blob storage and cache it in
memory
- We cache up to 5 embedding indexes in memory
- Embed the query and use the embedded query vector to find similar code
and text file metadata in the embedding index
- Query gitserver for the actual file contents
- Return the results
## Interesting files
- [Similarity
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-102cc83520004eb0e2795e49bc435c5142ca555189b1db3a52bbf1ffb82fa3c6)
- [Repo embedding job
handler](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-c345f373f426398beb4b9cd5852ba862a2718687882db2a8b2d9c7fbb5f1dc52)
- [External embedding api
client](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-ad1e7956f518e4bcaee17dd9e7ac04a5e090c00d970fcd273919e887e1d2cf8f)
- [Embedding a
repo](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-1f35118727128095b7816791b6f0a2e0e060cddee43d25102859b8159465585c)
- [Embeddings searcher
service](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-5b20f3e7ef87041daeeaef98b58ebf7388519cedcdfc359dc5e6d4e0b021472e)
- [Embeddings
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-79f95b9cc3f1ef39c1a0b88015bd9cd6c19c30a8d4c147409f1b8e8cd9462ea1)
- [Repo embedding index cache
management](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-8a41f7dec31054889dbf86e97c52223d5636b4d408c6b375bcfc09160a8b70f8)
- [GraphQL
resolvers](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-9b30a0b5efcb63e2f4611b99ab137fbe09629a769a4f30d10a1b2da41a01d21f)
## Test plan
- Start by filling out the `embeddings` object in the site config (let
me know if you need an API key)
- Start the embeddings service using `sg start embeddings`
- Go to the `/api/console` page and schedule a repo embedding job and a
context detection embedding job:
```graphql
mutation {
scheduleRepositoriesForEmbedding(repoNames: ["github.com/sourcegraph/handbook"]) {
__typename
}
scheduleContextDetectionForEmbedding {
__typename
}
}
```
- Once both are finished, you should be able to query the repo embedding
index, and determine whether context is need for a given query:
```graphql
query {
isContextRequiredForQuery(query: "how do access tokens work")
embeddingsSearch(
repo: "UmVwb3NpdG9yeToy", # github.com/sourcegraph/handbook GQL ID
query: "how do access tokens work",
codeResultsCount: 5,
textResultsCount: 5) {
codeResults {
fileName
content
}
textResults {
fileName
content
}
}
}
```
2023-03-01 09:50:12 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/embeddings github.com/sourcegraph/sourcegraph/enterprise/cmd/embeddings
|
|
|
|
|
checkBinary: .bin/embeddings
|
|
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
|
|
|
|
- enterprise/cmd/embeddings
|
2023-07-05 17:55:09 +00:00
|
|
|
- internal/embeddings
|
2023-08-08 15:42:32 +00:00
|
|
|
qdrant:
|
|
|
|
|
cmd: |
|
|
|
|
|
docker run -p 6333:6333 -p 6334:6334 \
|
2023-08-23 15:54:50 +00:00
|
|
|
-v $HOME/.sourcegraph-dev/data/qdrant_data:/data \
|
2023-08-08 15:42:32 +00:00
|
|
|
-e QDRANT__SERVICE__GRPC_PORT="6334" \
|
2023-08-23 15:54:50 +00:00
|
|
|
-e QDRANT__LOG_LEVEL=INFO \
|
|
|
|
|
-e QDRANT__STORAGE__STORAGE_PATH=/data \
|
|
|
|
|
-e QDRANT__STORAGE__SNAPSHOTS_PATH=/data \
|
2023-09-27 19:05:52 +00:00
|
|
|
-e QDRANT_INIT_FILE_PATH=/data/.qdrant-initialized \
|
2023-08-23 15:54:50 +00:00
|
|
|
--entrypoint /usr/local/bin/qdrant \
|
|
|
|
|
sourcegraph/qdrant:insiders
|
2023-06-16 16:04:47 +00:00
|
|
|
worker:
|
|
|
|
|
cmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
.bin/worker
|
|
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2023-06-16 16:04:47 +00:00
|
|
|
fi
|
2023-10-03 12:53:04 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/worker github.com/sourcegraph/sourcegraph/cmd/worker
|
2023-06-16 16:04:47 +00:00
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
|
|
|
|
- cmd/worker
|
embeddings: searcher and indexer (#48017)
# High-level architecture overview
<img width="2231" alt="Screenshot 2023-02-24 at 15 13 59"
src="https://user-images.githubusercontent.com/6417322/221200130-53c1ff25-4c47-4532-885f-5c4f9dadb05e.png">
# Embeddings
Really quickly: embeddings are a semantic representation of text.
Embeddings are usually floating-point vectors with 256+ elements. The
neat thing about embeddings is that they allow us to search over textual
information using a semantic correlation between the query and the text,
not just syntactic (matching keywords).
In this PR, we implemented an embedding service that will allow us to do
semantic code search over repositories in Sourcegraph. So, for example,
you'll be able to ask, "how do access tokens work in Sourcegraph", and
it will give you a list of the closest matching code files.
Additionally, we build a context detection service powered by
embeddings. In chat applications, it is important to know whether the
user's message requires additional context. We have to differentiate
between two cases: the user asks a general question about the codebase,
or the user references something in the existing conversation. In the
latter case, including the context would ruin the flow of the
conversation, and the chatbot would most likely return a confusing
answer. We determine whether a query _does not_ require additional
context using two approaches:
1. We check if the query contains well-known phrases that would indicate
the user is referencing the existing conversation (e.g., translate
previous, change that)
1. We have a static dataset of messages that require context and a
dataset of messages that do not. We embed both datasets, and then, using
embedding similarity, we can check which set is more similar to the
query.
## GraphQL API
We add four new resolvers to the GraphQL API:
```graphql
extend type Query {
embeddingsSearch(repo: ID!, query: String!, codeResultsCount: Int!, textResultsCount: Int!): EmbeddingsSearchResults!
isContextRequiredForQuery(query: String!): Boolean!
}
extend type Mutation {
scheduleRepositoriesForEmbedding(repoNames: [String!]!): EmptyResponse!
scheduleContextDetectionForEmbedding: EmptyResponse!
}
```
- `embeddingsSearch` performs embeddings search over the repo embeddings
and returns the specified number of results
- `isContextRequiredForQuery` determines whether the given query
requires additional context
- `scheduleRepositoriesForEmbedding` schedules a repo embedding
background job
- `scheduleContextDetectionForEmbedding` schedules a context detection
embedding background job that embeds a static dataset of messages.
## Repo embedding background job
Embedding a repository is implemented as a background job. The
background job handler receives the repository and the revision, which
should be embedded. Handler then gathers a list of files from the
gitserver and excludes files >1MB in size. The list of files is split
into code and text files (.md, .txt), and we build a separate embedding
index for both. We split them because in a combined index, the text
files always tended to feature as top results and didn't leave any room
for code files. Once we have the list of files, the procedure is as
follows:
- For each file
- Get file contents from gitserver
- Check if the file is embeddable (is not autogenerated, is large
enough, does not have long lines)
- Split the file into embeddable chunks
- Embed the file chunks using an external embedding service (defined in
site config)
- Add embedded file chunks and metadata to the index
- Metadata contains the file name, the start line, and the end line of
the chunk
- Once all files are processed, the index is marshaled into JSON and
stored in Cloud storage (GCS, S3)
### Site config changes
As mentioned, we use a configurable external embedding API that does the
actual text -> vector embedding part. Ideally, this allows us to swap
embedding providers in the future.
```json
"embeddings": {
"description": "Configuration for embeddings service.",
"type": "object",
"required": ["enabled", "dimensions", "model", "accessToken", "url"],
"properties": {
"enabled": {
"description": "Toggles whether embedding service is enabled.",
"type": "boolean",
"default": false
},
"dimensions": {
"description": "The dimensionality of the embedding vectors.",
"type": "integer",
"minimum": 0
},
"model": {
"description": "The model used for embedding.",
"type": "string"
},
"accessToken": {
"description": "The access token used to authenticate with the external embedding API service.",
"type": "string"
},
"url": {
"description": "The url to the external embedding API service.",
"type": "string",
"format": "uri"
}
}
}
```
## Repo embeddings search
The repo embeddings search is implemented in its own service. When a
user queries a repo using embeddings search, the following happens:
- Download the repo embedding index from blob storage and cache it in
memory
- We cache up to 5 embedding indexes in memory
- Embed the query and use the embedded query vector to find similar code
and text file metadata in the embedding index
- Query gitserver for the actual file contents
- Return the results
## Interesting files
- [Similarity
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-102cc83520004eb0e2795e49bc435c5142ca555189b1db3a52bbf1ffb82fa3c6)
- [Repo embedding job
handler](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-c345f373f426398beb4b9cd5852ba862a2718687882db2a8b2d9c7fbb5f1dc52)
- [External embedding api
client](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-ad1e7956f518e4bcaee17dd9e7ac04a5e090c00d970fcd273919e887e1d2cf8f)
- [Embedding a
repo](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-1f35118727128095b7816791b6f0a2e0e060cddee43d25102859b8159465585c)
- [Embeddings searcher
service](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-5b20f3e7ef87041daeeaef98b58ebf7388519cedcdfc359dc5e6d4e0b021472e)
- [Embeddings
search](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-79f95b9cc3f1ef39c1a0b88015bd9cd6c19c30a8d4c147409f1b8e8cd9462ea1)
- [Repo embedding index cache
management](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-8a41f7dec31054889dbf86e97c52223d5636b4d408c6b375bcfc09160a8b70f8)
- [GraphQL
resolvers](https://github.com/sourcegraph/sourcegraph/pull/48017/files#diff-9b30a0b5efcb63e2f4611b99ab137fbe09629a769a4f30d10a1b2da41a01d21f)
## Test plan
- Start by filling out the `embeddings` object in the site config (let
me know if you need an API key)
- Start the embeddings service using `sg start embeddings`
- Go to the `/api/console` page and schedule a repo embedding job and a
context detection embedding job:
```graphql
mutation {
scheduleRepositoriesForEmbedding(repoNames: ["github.com/sourcegraph/handbook"]) {
__typename
}
scheduleContextDetectionForEmbedding {
__typename
}
}
```
- Once both are finished, you should be able to query the repo embedding
index, and determine whether context is need for a given query:
```graphql
query {
isContextRequiredForQuery(query: "how do access tokens work")
embeddingsSearch(
repo: "UmVwb3NpdG9yeToy", # github.com/sourcegraph/handbook GQL ID
query: "how do access tokens work",
codeResultsCount: 5,
textResultsCount: 5) {
codeResults {
fileName
content
}
textResults {
fileName
content
}
}
}
```
2023-03-01 09:50:12 +00:00
|
|
|
|
2023-05-30 15:44:27 +00:00
|
|
|
cody-gateway:
|
2023-04-18 21:42:27 +00:00
|
|
|
cmd: |
|
2023-05-30 15:44:27 +00:00
|
|
|
.bin/cody-gateway
|
2023-04-18 21:42:27 +00:00
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2023-04-18 21:42:27 +00:00
|
|
|
fi
|
|
|
|
|
|
2023-07-11 22:36:45 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/cody-gateway github.com/sourcegraph/sourcegraph/cmd/cody-gateway
|
2023-05-30 15:44:27 +00:00
|
|
|
checkBinary: .bin/cody-gateway
|
2023-05-10 00:56:12 +00:00
|
|
|
env:
|
2023-05-30 15:44:27 +00:00
|
|
|
CODY_GATEWAY_ANTHROPIC_ACCESS_TOKEN: foobar
|
2023-06-29 18:38:47 +00:00
|
|
|
# Set in override if you want to test local Cody Gateway: https://docs.sourcegraph.com/dev/how-to/cody_gateway
|
|
|
|
|
CODY_GATEWAY_DOTCOM_ACCESS_TOKEN: ""
|
2023-05-30 15:44:27 +00:00
|
|
|
CODY_GATEWAY_DOTCOM_API_URL: https://sourcegraph.test:3443/.api/graphql
|
|
|
|
|
CODY_GATEWAY_ALLOW_ANONYMOUS: true
|
|
|
|
|
CODY_GATEWAY_DIAGNOSTICS_SECRET: sekret
|
2023-06-29 18:38:47 +00:00
|
|
|
SRC_LOG_LEVEL: info
|
2023-07-20 20:35:16 +00:00
|
|
|
# Enables metrics in dev via debugserver
|
|
|
|
|
SRC_PROF_HTTP: "127.0.0.1:6098"
|
2023-04-18 21:42:27 +00:00
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
2023-07-11 22:36:45 +00:00
|
|
|
- cmd/cody-gateway
|
2023-04-18 21:42:27 +00:00
|
|
|
|
telemetrygateway: add exporter and service (#56699)
This change adds:
- telemetry export background jobs: flagged behind `TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR`, default empty => disabled
- telemetry redaction: configured in package `internal/telemetry/sensitivemetadataallowlist`
- telemetry-gateway service receiving events and forwarding it to a pub/sub topic (or just logging it, as configured in local dev)
- utilities for easily creating an event recorder: `internal/telemetry/telemetryrecorder`
Notes:
- all changes are feature-flagged to some degree, off by default, so the merge should be fairly low-risk.
- we decided that transmitting the full license key continues to be the way to go. we transmit it once per stream and attach it on all events in the telemetry-gateway. there is no auth mechanism at the moment
- GraphQL return type `EventLog.Source` is now a plain string instead of string enum. This should not be a breaking change in our clients, but must be made so that our generated V2 events do not break requesting of event logs
Stacked on https://github.com/sourcegraph/sourcegraph/pull/56520
Closes https://github.com/sourcegraph/sourcegraph/issues/56289
Closes https://github.com/sourcegraph/sourcegraph/issues/56287
## Test plan
Add an override to make the export super frequent:
```
env:
TELEMETRY_GATEWAY_EXPORTER_EXPORT_INTERVAL: "10s"
TELEMETRY_GATEWAY_EXPORTER_EXPORTED_EVENTS_RETENTION: "5m"
```
Start sourcegraph:
```
sg start
```
Enable `telemetry-export` featureflag (from https://github.com/sourcegraph/sourcegraph/pull/56520)
Emit some events in GraphQL:
```gql
mutation {
telemetry {
recordEvents(events:[{
feature:"foobar"
action:"view"
source:{
client:"WEB"
}
parameters:{
version:0
}
}]) {
alwaysNil
}
}
```
See series of log events:
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/telemetrygatewayexporter.go:61 Telemetry Gateway export enabled - initializing background routines
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:99 exporting events {"maxBatchSize": 10000, "count": 1}
[telemetry-g...y] INFO telemetry-gateway.pubsub pubsub/topic.go:115 Publish {"TraceId": "7852903434f0d2f647d397ee83b4009b", "SpanId": "8d945234bccf319b", "message": "{\"event\":{\"id\":\"dc96ae84-4ac4-4760-968f-0a0307b8bb3d\",\"timestamp\":\"2023-09-19T01:57:13.590266Z\",\"feature\":\"foobar\", ....
```
Build:
```
export VERSION="insiders"
bazel run //cmd/telemetry-gateway:candidate_push --config darwin-docker --stamp --workspace_status_command=./dev/bazel_stamp_vars.sh -- --tag $VERSION --repository us.gcr.io/sourcegraph-dev/telemetry-gateway
```
Deploy: https://github.com/sourcegraph/managed-services/pull/7
Add override:
```yaml
env:
# Port required. TODO: What's the best way to provide gRPC addresses, such that a
# localhost address is also possible?
TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR: "https://telemetry-gateway.sgdev.org:443"
```
Repeat the above (`sg start` and emit some events):
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 1}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 1}
```
2023-09-20 05:20:15 +00:00
|
|
|
telemetry-gateway:
|
|
|
|
|
cmd: |
|
|
|
|
|
# Telemetry Gateway needs this to parse and validate incoming license keys.
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
.bin/telemetry-gateway
|
|
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
|
|
|
|
export GCFLAGS='all=-N -l'
|
|
|
|
|
fi
|
|
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/telemetry-gateway github.com/sourcegraph/sourcegraph/cmd/telemetry-gateway
|
|
|
|
|
checkBinary: .bin/telemetry-gateway
|
|
|
|
|
env:
|
|
|
|
|
PORT: "10080"
|
|
|
|
|
DIAGNOSTICS_SECRET: sekret
|
|
|
|
|
TELEMETRY_GATEWAY_EVENTS_PUBSUB_ENABLED: false
|
|
|
|
|
SRC_LOG_LEVEL: info
|
|
|
|
|
# Enables metrics in dev via debugserver
|
|
|
|
|
SRC_PROF_HTTP: "127.0.0.1:6080"
|
|
|
|
|
GRPC_WEB_UI_ENABLED: true
|
|
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
|
|
|
|
- cmd/telemetry-gateway
|
|
|
|
|
- internal/telemetrygateway
|
|
|
|
|
|
2023-08-15 20:27:29 +00:00
|
|
|
pings:
|
|
|
|
|
cmd: |
|
|
|
|
|
.bin/pings
|
|
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2023-08-15 20:27:29 +00:00
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/pings github.com/sourcegraph/sourcegraph/cmd/pings
|
|
|
|
|
checkBinary: .bin/pings
|
|
|
|
|
env:
|
|
|
|
|
SRC_LOG_LEVEL: info
|
2023-09-14 19:26:21 +00:00
|
|
|
DIAGNOSTICS_SECRET: 'lifeisgood'
|
2023-09-18 21:27:25 +00:00
|
|
|
PINGS_PUBSUB_PROJECT_ID: 'telligentsourcegraph'
|
|
|
|
|
PINGS_PUBSUB_TOPIC_ID: 'server-update-checks-test'
|
2023-08-24 02:00:20 +00:00
|
|
|
HUBSPOT_ACCESS_TOKEN: ''
|
2023-09-21 00:17:37 +00:00
|
|
|
# Enables metrics in dev via debugserver
|
|
|
|
|
SRC_PROF_HTTP: "127.0.0.1:7011"
|
2023-08-15 20:27:29 +00:00
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
|
|
|
|
- cmd/pings
|
|
|
|
|
|
2021-03-29 08:14:53 +00:00
|
|
|
searcher:
|
|
|
|
|
cmd: .bin/searcher
|
2021-10-09 01:47:08 +00:00
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2021-10-09 01:47:08 +00:00
|
|
|
fi
|
|
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/searcher github.com/sourcegraph/sourcegraph/cmd/searcher
|
2021-06-02 09:38:06 +00:00
|
|
|
checkBinary: .bin/searcher
|
2021-03-29 08:14:53 +00:00
|
|
|
watch:
|
2021-05-31 07:51:39 +00:00
|
|
|
- lib
|
2021-03-29 08:14:53 +00:00
|
|
|
- internal
|
|
|
|
|
- cmd/searcher
|
|
|
|
|
|
|
|
|
|
caddy:
|
2021-05-06 14:58:47 +00:00
|
|
|
ignoreStdout: true
|
|
|
|
|
ignoreStderr: true
|
2022-04-26 06:35:54 +00:00
|
|
|
cmd: .bin/caddy_${CADDY_VERSION} run --watch --config=dev/Caddyfile
|
2022-05-23 07:58:53 +00:00
|
|
|
install_func: installCaddy
|
2021-03-29 08:14:53 +00:00
|
|
|
env:
|
2023-08-16 09:48:26 +00:00
|
|
|
CADDY_VERSION: 2.7.3
|
2021-03-29 08:14:53 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
web:
|
2022-07-26 14:51:04 +00:00
|
|
|
description: Enterprise version of the web app
|
2021-09-01 08:41:58 +00:00
|
|
|
cmd: ./node_modules/.bin/gulp --color dev
|
2023-01-12 03:50:09 +00:00
|
|
|
install: pnpm install
|
2021-04-01 11:59:48 +00:00
|
|
|
env:
|
2022-08-02 14:09:41 +00:00
|
|
|
ENABLE_OPEN_TELEMETRY: true
|
2021-05-07 11:01:10 +00:00
|
|
|
|
2021-10-22 10:30:29 +00:00
|
|
|
web-standalone-http:
|
2022-07-26 14:51:04 +00:00
|
|
|
description: Standalone web frontend (dev) with API proxy to a configurable URL
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm --filter @sourcegraph/web serve:dev --color
|
2022-07-06 09:51:02 +00:00
|
|
|
install: |
|
2023-01-12 03:50:09 +00:00
|
|
|
pnpm install
|
|
|
|
|
pnpm generate
|
2021-05-07 11:01:10 +00:00
|
|
|
env:
|
|
|
|
|
WEBPACK_SERVE_INDEX: true
|
2022-01-28 14:15:56 +00:00
|
|
|
SOURCEGRAPH_API_URL: https://k8s.sgdev.org
|
2021-05-07 11:01:10 +00:00
|
|
|
|
2021-10-22 10:30:29 +00:00
|
|
|
web-standalone-http-prod:
|
2022-07-26 14:51:04 +00:00
|
|
|
description: Standalone web frontend (production) with API proxy to a configurable URL
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm --filter @sourcegraph/web serve:prod
|
|
|
|
|
install: pnpm --filter @sourcegraph/web run build
|
2021-05-07 11:01:10 +00:00
|
|
|
env:
|
|
|
|
|
NODE_ENV: production
|
|
|
|
|
WEBPACK_SERVE_INDEX: true
|
|
|
|
|
SOURCEGRAPH_API_URL: https://k8s.sgdev.org
|
|
|
|
|
|
2022-09-09 09:36:10 +00:00
|
|
|
web-integration-build:
|
2023-01-30 06:51:24 +00:00
|
|
|
description: Build development web application for integration tests
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm --filter @sourcegraph/web run build
|
2022-09-09 09:36:10 +00:00
|
|
|
env:
|
|
|
|
|
INTEGRATION_TESTS: true
|
|
|
|
|
|
2023-01-30 06:51:24 +00:00
|
|
|
web-integration-build-prod:
|
|
|
|
|
description: Build production web application for integration tests
|
|
|
|
|
cmd: pnpm --filter @sourcegraph/web run build
|
|
|
|
|
env:
|
|
|
|
|
INTEGRATION_TESTS: true
|
|
|
|
|
NODE_ENV: production
|
|
|
|
|
|
2021-03-29 08:14:53 +00:00
|
|
|
docsite:
|
2022-07-26 14:51:04 +00:00
|
|
|
description: Docsite instance serving the docs
|
2023-07-06 17:41:30 +00:00
|
|
|
cmd: bazel run --noshow_progress --noshow_loading_progress //doc:serve
|
2021-03-29 08:14:53 +00:00
|
|
|
|
2021-09-28 04:18:12 +00:00
|
|
|
syntax-highlighter:
|
2021-05-06 14:58:47 +00:00
|
|
|
ignoreStdout: true
|
|
|
|
|
ignoreStderr: true
|
2021-03-29 08:14:53 +00:00
|
|
|
cmd: |
|
2021-09-28 04:18:12 +00:00
|
|
|
docker run --name=syntax-highlighter --rm -p9238:9238 \
|
2021-03-29 08:14:53 +00:00
|
|
|
-e WORKERS=1 -e ROCKET_ADDRESS=0.0.0.0 \
|
2022-03-01 18:19:16 +00:00
|
|
|
sourcegraph/syntax-highlighter:insiders
|
2021-09-28 04:18:12 +00:00
|
|
|
install: |
|
|
|
|
|
# Remove containers by the old name, too.
|
|
|
|
|
docker inspect syntect_server >/dev/null 2>&1 && docker rm -f syntect_server || true
|
|
|
|
|
docker inspect syntax-highlighter >/dev/null 2>&1 && docker rm -f syntax-highlighter || true
|
2022-12-05 13:06:34 +00:00
|
|
|
# Pull syntax-highlighter latest insider image, only during install, but
|
|
|
|
|
# skip if OFFLINE=true is set.
|
|
|
|
|
if [[ "$OFFLINE" != "true" ]]; then
|
|
|
|
|
docker pull -q sourcegraph/syntax-highlighter:insiders
|
|
|
|
|
fi
|
2021-03-29 08:14:53 +00:00
|
|
|
|
2021-05-31 07:51:39 +00:00
|
|
|
zoekt-indexserver-template: &zoekt_indexserver_template
|
2021-03-29 08:14:53 +00:00
|
|
|
cmd: |
|
2021-08-20 08:42:05 +00:00
|
|
|
env PATH="${PWD}/.bin:$PATH" .bin/zoekt-sourcegraph-indexserver \
|
2021-03-29 08:14:53 +00:00
|
|
|
-sourcegraph_url 'http://localhost:3090' \
|
2021-05-21 16:01:11 +00:00
|
|
|
-index "$HOME/.sourcegraph/zoekt/index-$ZOEKT_NUM" \
|
|
|
|
|
-hostname "localhost:$ZOEKT_HOSTNAME_PORT" \
|
2021-03-29 08:14:53 +00:00
|
|
|
-interval 1m \
|
2022-05-06 11:37:11 +00:00
|
|
|
-listen "127.0.0.1:$ZOEKT_LISTEN_PORT" \
|
2021-03-29 08:14:53 +00:00
|
|
|
-cpu_fraction 0.25
|
|
|
|
|
install: |
|
|
|
|
|
mkdir -p .bin
|
|
|
|
|
export GOBIN="${PWD}/.bin"
|
2022-08-17 22:30:49 +00:00
|
|
|
go install github.com/sourcegraph/zoekt/cmd/zoekt-archive-index
|
|
|
|
|
go install github.com/sourcegraph/zoekt/cmd/zoekt-git-index
|
|
|
|
|
go install github.com/sourcegraph/zoekt/cmd/zoekt-sourcegraph-indexserver
|
2021-06-02 09:38:06 +00:00
|
|
|
checkBinary: .bin/zoekt-sourcegraph-indexserver
|
2021-05-21 16:01:11 +00:00
|
|
|
env: &zoektenv
|
2022-12-06 17:32:21 +00:00
|
|
|
CTAGS_COMMAND: dev/universal-ctags-dev
|
2023-05-30 21:19:39 +00:00
|
|
|
SCIP_CTAGS_COMMAND: dev/scip-ctags-dev
|
2023-03-17 02:54:00 +00:00
|
|
|
GRPC_ENABLED: true
|
2021-04-01 11:59:48 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
zoekt-index-0:
|
2021-05-31 07:51:39 +00:00
|
|
|
<<: *zoekt_indexserver_template
|
2021-05-21 16:01:11 +00:00
|
|
|
env:
|
|
|
|
|
<<: *zoektenv
|
|
|
|
|
ZOEKT_NUM: 0
|
|
|
|
|
ZOEKT_HOSTNAME_PORT: 3070
|
|
|
|
|
ZOEKT_LISTEN_PORT: 6072
|
|
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
zoekt-index-1:
|
2021-05-31 07:51:39 +00:00
|
|
|
<<: *zoekt_indexserver_template
|
2021-03-29 08:14:53 +00:00
|
|
|
env:
|
2021-05-21 16:01:11 +00:00
|
|
|
<<: *zoektenv
|
|
|
|
|
ZOEKT_NUM: 1
|
|
|
|
|
ZOEKT_HOSTNAME_PORT: 3071
|
|
|
|
|
ZOEKT_LISTEN_PORT: 6073
|
2021-03-29 08:14:53 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
zoekt-web-template: &zoekt_webserver_template
|
2021-03-29 08:14:53 +00:00
|
|
|
install: |
|
|
|
|
|
mkdir -p .bin
|
2022-08-17 22:30:49 +00:00
|
|
|
env GOBIN="${PWD}/.bin" go install github.com/sourcegraph/zoekt/cmd/zoekt-webserver
|
2021-06-02 09:38:06 +00:00
|
|
|
checkBinary: .bin/zoekt-webserver
|
2021-04-01 11:59:48 +00:00
|
|
|
env:
|
2022-08-29 16:42:36 +00:00
|
|
|
JAEGER_DISABLED: true
|
|
|
|
|
OPENTELEMETRY_DISABLED: false
|
2022-07-14 11:08:58 +00:00
|
|
|
GOGC: 25
|
2021-04-01 11:59:48 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
zoekt-web-0:
|
2021-05-31 07:51:39 +00:00
|
|
|
<<: *zoekt_webserver_template
|
2022-12-02 07:38:44 +00:00
|
|
|
cmd: env PATH="${PWD}/.bin:$PATH" .bin/zoekt-webserver -index "$HOME/.sourcegraph/zoekt/index-0" -pprof -rpc -indexserver_proxy -listen "127.0.0.1:3070"
|
2021-05-31 07:51:39 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
zoekt-web-1:
|
2021-05-31 07:51:39 +00:00
|
|
|
<<: *zoekt_webserver_template
|
2022-12-02 07:38:44 +00:00
|
|
|
cmd: env PATH="${PWD}/.bin:$PATH" .bin/zoekt-webserver -index "$HOME/.sourcegraph/zoekt/index-1" -pprof -rpc -indexserver_proxy -listen "127.0.0.1:3071"
|
2021-03-29 08:14:53 +00:00
|
|
|
|
2022-06-20 17:57:54 +00:00
|
|
|
codeintel-worker:
|
2022-09-05 10:16:27 +00:00
|
|
|
cmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
.bin/codeintel-worker
|
2021-05-21 16:01:11 +00:00
|
|
|
install: |
|
2021-10-09 01:47:08 +00:00
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2021-10-09 01:47:08 +00:00
|
|
|
fi
|
2023-07-13 18:09:16 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/codeintel-worker github.com/sourcegraph/sourcegraph/cmd/precise-code-intel-worker
|
2022-08-04 21:21:45 +00:00
|
|
|
checkBinary: .bin/codeintel-worker
|
2021-05-21 16:01:11 +00:00
|
|
|
watch:
|
2021-05-31 07:51:39 +00:00
|
|
|
- lib
|
2021-05-21 16:01:11 +00:00
|
|
|
- internal
|
2023-07-13 18:09:16 +00:00
|
|
|
- cmd/precise-code-intel-worker
|
2021-05-21 16:01:11 +00:00
|
|
|
- lib/codeintel
|
|
|
|
|
|
2022-03-03 04:13:28 +00:00
|
|
|
executor-template:
|
|
|
|
|
&executor_template # TMPDIR is set here so it's not set in the `install` process, which would trip up `go build`.
|
2021-07-05 11:06:52 +00:00
|
|
|
cmd: |
|
|
|
|
|
env TMPDIR="$HOME/.sourcegraph/executor-temp" .bin/executor
|
2021-05-31 07:51:39 +00:00
|
|
|
install: |
|
2021-10-09 01:47:08 +00:00
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2021-10-09 01:47:08 +00:00
|
|
|
fi
|
2023-08-10 00:06:12 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/executor github.com/sourcegraph/sourcegraph/cmd/executor
|
2021-06-02 09:38:06 +00:00
|
|
|
checkBinary: .bin/executor
|
2021-05-21 16:01:11 +00:00
|
|
|
env:
|
2023-03-28 21:18:56 +00:00
|
|
|
# Required for frontend and executor to communicate
|
|
|
|
|
EXECUTOR_FRONTEND_URL: http://localhost:3080
|
|
|
|
|
# Must match the secret defined in the site config.
|
|
|
|
|
EXECUTOR_FRONTEND_PASSWORD: hunter2hunter2hunter2
|
|
|
|
|
# Disable firecracker inside executor in dev
|
|
|
|
|
EXECUTOR_USE_FIRECRACKER: false
|
2021-07-05 11:06:52 +00:00
|
|
|
EXECUTOR_QUEUE_NAME: TEMPLATE
|
2021-05-31 07:51:39 +00:00
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
2023-08-10 00:06:12 +00:00
|
|
|
- cmd/executor
|
2021-05-21 16:01:11 +00:00
|
|
|
|
2023-06-04 22:30:05 +00:00
|
|
|
executor-kubernetes-template: &executor_kubernetes_template
|
2023-06-01 16:28:13 +00:00
|
|
|
cmd: |
|
|
|
|
|
cd $MANIFEST_PATH
|
|
|
|
|
cleanup() {
|
2023-06-15 17:47:57 +00:00
|
|
|
kubectl delete jobs --all
|
2023-06-01 16:28:13 +00:00
|
|
|
kubectl delete -f .
|
|
|
|
|
}
|
|
|
|
|
kubectl delete -f . --ignore-not-found
|
|
|
|
|
kubectl apply -f .
|
2023-06-15 17:47:57 +00:00
|
|
|
trap cleanup EXIT SIGINT
|
2023-06-01 16:28:13 +00:00
|
|
|
while true; do
|
|
|
|
|
sleep 1
|
|
|
|
|
done
|
2023-06-21 16:58:39 +00:00
|
|
|
install: |
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
2023-09-08 14:24:05 +00:00
|
|
|
bazel build //cmd/executor-kubernetes:image_tarball
|
|
|
|
|
docker load --input $(bazel cquery //cmd/executor-kubernetes:image_tarball --output=files)
|
2023-06-21 16:58:39 +00:00
|
|
|
else
|
2023-09-08 14:24:05 +00:00
|
|
|
bazel build //cmd/executor-kubernetes:image_tarball --config darwin-docker
|
|
|
|
|
docker load --input $(bazel cquery //cmd/executor-kubernetes:image_tarball --config darwin-docker --output=files)
|
2023-06-21 16:58:39 +00:00
|
|
|
fi
|
|
|
|
|
|
2023-06-01 16:28:13 +00:00
|
|
|
env:
|
2023-06-21 16:58:39 +00:00
|
|
|
IMAGE: executor-kubernetes:candidate
|
2023-06-01 16:28:13 +00:00
|
|
|
# TODO: This is required but should only be set on M1 Macs.
|
|
|
|
|
PLATFORM: linux/arm64
|
|
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
2023-08-10 00:06:12 +00:00
|
|
|
- cmd/executor
|
2023-06-01 16:28:13 +00:00
|
|
|
|
2021-07-05 11:06:52 +00:00
|
|
|
codeintel-executor:
|
|
|
|
|
<<: *executor_template
|
|
|
|
|
cmd: |
|
|
|
|
|
env TMPDIR="$HOME/.sourcegraph/indexer-temp" .bin/executor
|
|
|
|
|
env:
|
|
|
|
|
EXECUTOR_QUEUE_NAME: codeintel
|
|
|
|
|
|
2022-09-21 19:39:50 +00:00
|
|
|
# If you want to use this, either start it with `sg run batches-executor-firecracker` or
|
|
|
|
|
# modify the `commandsets.batches` in your local `sg.config.overwrite.yaml`
|
|
|
|
|
codeintel-executor-firecracker:
|
|
|
|
|
<<: *executor_template
|
|
|
|
|
cmd: |
|
|
|
|
|
env TMPDIR="$HOME/.sourcegraph/codeintel-executor-temp" \
|
2022-10-05 13:32:30 +00:00
|
|
|
sudo --preserve-env=TMPDIR,EXECUTOR_QUEUE_NAME,EXECUTOR_FRONTEND_URL,EXECUTOR_FRONTEND_PASSWORD,EXECUTOR_USE_FIRECRACKER \
|
2022-09-21 19:39:50 +00:00
|
|
|
.bin/executor
|
|
|
|
|
env:
|
|
|
|
|
EXECUTOR_USE_FIRECRACKER: true
|
|
|
|
|
EXECUTOR_QUEUE_NAME: codeintel
|
|
|
|
|
|
2023-06-01 16:28:13 +00:00
|
|
|
codeintel-executor-kubernetes:
|
|
|
|
|
<<: *executor_kubernetes_template
|
|
|
|
|
env:
|
2023-08-10 00:06:12 +00:00
|
|
|
MANIFEST_PATH: ./cmd/executor/kubernetes/codeintel
|
2023-06-01 16:28:13 +00:00
|
|
|
|
2021-07-05 11:06:52 +00:00
|
|
|
batches-executor:
|
|
|
|
|
<<: *executor_template
|
|
|
|
|
cmd: |
|
|
|
|
|
env TMPDIR="$HOME/.sourcegraph/batches-executor-temp" .bin/executor
|
|
|
|
|
env:
|
2021-09-22 10:03:57 +00:00
|
|
|
EXECUTOR_QUEUE_NAME: batches
|
2021-09-29 12:17:18 +00:00
|
|
|
EXECUTOR_MAXIMUM_NUM_JOBS: 8
|
2021-07-05 11:06:52 +00:00
|
|
|
|
2023-06-01 16:28:13 +00:00
|
|
|
# If you want to use this, either start it with `sg run batches-executor-firecracker` or
|
|
|
|
|
# modify the `commandsets.batches` in your local `sg.config.overwrite.yaml`
|
|
|
|
|
batches-executor-firecracker:
|
|
|
|
|
<<: *executor_template
|
|
|
|
|
cmd: |
|
|
|
|
|
env TMPDIR="$HOME/.sourcegraph/batches-executor-temp" \
|
|
|
|
|
sudo --preserve-env=TMPDIR,EXECUTOR_QUEUE_NAME,EXECUTOR_FRONTEND_URL,EXECUTOR_FRONTEND_PASSWORD,EXECUTOR_USE_FIRECRACKER \
|
|
|
|
|
.bin/executor
|
|
|
|
|
env:
|
|
|
|
|
EXECUTOR_USE_FIRECRACKER: true
|
|
|
|
|
EXECUTOR_QUEUE_NAME: batches
|
|
|
|
|
|
|
|
|
|
batches-executor-kubernetes:
|
|
|
|
|
<<: *executor_kubernetes_template
|
|
|
|
|
env:
|
2023-08-10 00:06:12 +00:00
|
|
|
MANIFEST_PATH: ./cmd/executor/kubernetes/batches
|
2023-06-01 16:28:13 +00:00
|
|
|
|
Experiment: Natively run SSBC in docker (#44034)
This adds an experimental code path that I will use to test a docker-only execution mode for server-side batch changes. This code path is never executed for customers until we make the switch when we deem it ready. This will allow me to dogfood this while it's not available to customer instances yet.
Ultimately, the goal of this is to make executors simply be "the job runner platform through a generic interface". Today, this depends on src-cli to do a good bunch of the work. This is a blocker for going full docker-based with executors, which will ultimately be a requirement on the road to k8s-based executors.
As this removes the dependency on src-cli, nothing but the job interface and API endpoints tie executor and Sourcegraph instance together. Ultimately, this will allow us to support larger version spans between the two (pending executors going GA and being feature-complete).
Known issues/limitations:
Steps skipped in between steps that run don't work yet
Skipping steps dynamically is inefficient as we cannot tell the executor to skip a step IF X, so we replace the script by exit 0
It is unclear if all variants of file mounts still work. Basic cases do work. Files used to be read-only in src-cli, they aren't now, but content is still reset in between steps.
The assumption that everything operates in /work is broken here, because we need to use what executors give us to persist out-of-repo state in between containers (like the step result from the previous step)
It is unclear if workspace mounts work
Cache keys are not correctly computed if using workspace mounts - the metadataretriever is nil
We still use log outputs to transfer the AfterStepResults to the Sourcegraph instance, this should finally become an artifact instead. Then, we don't have to rely on the execution_log_entires anymore and can theoretically prune those after some time. This column is currently growing indefinitely.
It depends on tee being available in the docker images to capture the cmd.stdout/cmd.stderr properly for template variable rendering
Env-vars are not rendered in their evaluated form post-execution
File permissions are unclear and might be similarly broken to how they are now - or even worse
Disclaimer: It's not feature complete today! But it is also not hitting any default code paths either. As development on this goes on, we can eventually remove the feature flag and run the new job format on all instances. This PR handles fallback of rendering old records correctly in the UI already.
2022-11-09 23:20:43 +00:00
|
|
|
# This tool rebuilds the batcheshelper image every time the source of it is changed.
|
|
|
|
|
batcheshelper-builder:
|
|
|
|
|
# Nothing to run for this, we just want to re-run the install script every time.
|
|
|
|
|
cmd: exit 0
|
2023-06-21 16:58:39 +00:00
|
|
|
install: |
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
2023-07-13 17:46:09 +00:00
|
|
|
bazel build //cmd/batcheshelper:image_tarball
|
|
|
|
|
docker load --input $(bazel cquery //cmd/batcheshelper:image_tarball --output=files)
|
2023-06-21 16:58:39 +00:00
|
|
|
else
|
2023-07-13 17:46:09 +00:00
|
|
|
bazel build //cmd/batcheshelper:image_tarball --config darwin-docker
|
|
|
|
|
docker load --input $(bazel cquery //cmd/batcheshelper:image_tarball --config darwin-docker --output=files)
|
2023-06-21 16:58:39 +00:00
|
|
|
fi
|
Experiment: Natively run SSBC in docker (#44034)
This adds an experimental code path that I will use to test a docker-only execution mode for server-side batch changes. This code path is never executed for customers until we make the switch when we deem it ready. This will allow me to dogfood this while it's not available to customer instances yet.
Ultimately, the goal of this is to make executors simply be "the job runner platform through a generic interface". Today, this depends on src-cli to do a good bunch of the work. This is a blocker for going full docker-based with executors, which will ultimately be a requirement on the road to k8s-based executors.
As this removes the dependency on src-cli, nothing but the job interface and API endpoints tie executor and Sourcegraph instance together. Ultimately, this will allow us to support larger version spans between the two (pending executors going GA and being feature-complete).
Known issues/limitations:
Steps skipped in between steps that run don't work yet
Skipping steps dynamically is inefficient as we cannot tell the executor to skip a step IF X, so we replace the script by exit 0
It is unclear if all variants of file mounts still work. Basic cases do work. Files used to be read-only in src-cli, they aren't now, but content is still reset in between steps.
The assumption that everything operates in /work is broken here, because we need to use what executors give us to persist out-of-repo state in between containers (like the step result from the previous step)
It is unclear if workspace mounts work
Cache keys are not correctly computed if using workspace mounts - the metadataretriever is nil
We still use log outputs to transfer the AfterStepResults to the Sourcegraph instance, this should finally become an artifact instead. Then, we don't have to rely on the execution_log_entires anymore and can theoretically prune those after some time. This column is currently growing indefinitely.
It depends on tee being available in the docker images to capture the cmd.stdout/cmd.stderr properly for template variable rendering
Env-vars are not rendered in their evaluated form post-execution
File permissions are unclear and might be similarly broken to how they are now - or even worse
Disclaimer: It's not feature complete today! But it is also not hitting any default code paths either. As development on this goes on, we can eventually remove the feature flag and run the new job format on all instances. This PR handles fallback of rendering old records correctly in the UI already.
2022-11-09 23:20:43 +00:00
|
|
|
env:
|
2023-06-21 16:58:39 +00:00
|
|
|
IMAGE: batcheshelper:candidate
|
Experiment: Natively run SSBC in docker (#44034)
This adds an experimental code path that I will use to test a docker-only execution mode for server-side batch changes. This code path is never executed for customers until we make the switch when we deem it ready. This will allow me to dogfood this while it's not available to customer instances yet.
Ultimately, the goal of this is to make executors simply be "the job runner platform through a generic interface". Today, this depends on src-cli to do a good bunch of the work. This is a blocker for going full docker-based with executors, which will ultimately be a requirement on the road to k8s-based executors.
As this removes the dependency on src-cli, nothing but the job interface and API endpoints tie executor and Sourcegraph instance together. Ultimately, this will allow us to support larger version spans between the two (pending executors going GA and being feature-complete).
Known issues/limitations:
Steps skipped in between steps that run don't work yet
Skipping steps dynamically is inefficient as we cannot tell the executor to skip a step IF X, so we replace the script by exit 0
It is unclear if all variants of file mounts still work. Basic cases do work. Files used to be read-only in src-cli, they aren't now, but content is still reset in between steps.
The assumption that everything operates in /work is broken here, because we need to use what executors give us to persist out-of-repo state in between containers (like the step result from the previous step)
It is unclear if workspace mounts work
Cache keys are not correctly computed if using workspace mounts - the metadataretriever is nil
We still use log outputs to transfer the AfterStepResults to the Sourcegraph instance, this should finally become an artifact instead. Then, we don't have to rely on the execution_log_entires anymore and can theoretically prune those after some time. This column is currently growing indefinitely.
It depends on tee being available in the docker images to capture the cmd.stdout/cmd.stderr properly for template variable rendering
Env-vars are not rendered in their evaluated form post-execution
File permissions are unclear and might be similarly broken to how they are now - or even worse
Disclaimer: It's not feature complete today! But it is also not hitting any default code paths either. As development on this goes on, we can eventually remove the feature flag and run the new job format on all instances. This PR handles fallback of rendering old records correctly in the UI already.
2022-11-09 23:20:43 +00:00
|
|
|
# TODO: This is required but should only be set on M1 Macs.
|
|
|
|
|
PLATFORM: linux/arm64
|
|
|
|
|
watch:
|
2023-07-13 17:46:09 +00:00
|
|
|
- cmd/batcheshelper
|
Experiment: Natively run SSBC in docker (#44034)
This adds an experimental code path that I will use to test a docker-only execution mode for server-side batch changes. This code path is never executed for customers until we make the switch when we deem it ready. This will allow me to dogfood this while it's not available to customer instances yet.
Ultimately, the goal of this is to make executors simply be "the job runner platform through a generic interface". Today, this depends on src-cli to do a good bunch of the work. This is a blocker for going full docker-based with executors, which will ultimately be a requirement on the road to k8s-based executors.
As this removes the dependency on src-cli, nothing but the job interface and API endpoints tie executor and Sourcegraph instance together. Ultimately, this will allow us to support larger version spans between the two (pending executors going GA and being feature-complete).
Known issues/limitations:
Steps skipped in between steps that run don't work yet
Skipping steps dynamically is inefficient as we cannot tell the executor to skip a step IF X, so we replace the script by exit 0
It is unclear if all variants of file mounts still work. Basic cases do work. Files used to be read-only in src-cli, they aren't now, but content is still reset in between steps.
The assumption that everything operates in /work is broken here, because we need to use what executors give us to persist out-of-repo state in between containers (like the step result from the previous step)
It is unclear if workspace mounts work
Cache keys are not correctly computed if using workspace mounts - the metadataretriever is nil
We still use log outputs to transfer the AfterStepResults to the Sourcegraph instance, this should finally become an artifact instead. Then, we don't have to rely on the execution_log_entires anymore and can theoretically prune those after some time. This column is currently growing indefinitely.
It depends on tee being available in the docker images to capture the cmd.stdout/cmd.stderr properly for template variable rendering
Env-vars are not rendered in their evaluated form post-execution
File permissions are unclear and might be similarly broken to how they are now - or even worse
Disclaimer: It's not feature complete today! But it is also not hitting any default code paths either. As development on this goes on, we can eventually remove the feature flag and run the new job format on all instances. This PR handles fallback of rendering old records correctly in the UI already.
2022-11-09 23:20:43 +00:00
|
|
|
- lib/batches
|
|
|
|
|
continueWatchOnExit: true
|
|
|
|
|
|
2023-06-04 22:30:05 +00:00
|
|
|
multiqueue-executor:
|
|
|
|
|
<<: *executor_template
|
|
|
|
|
cmd: |
|
|
|
|
|
env TMPDIR="$HOME/.sourcegraph/multiqueue-executor-temp" .bin/executor
|
|
|
|
|
env:
|
|
|
|
|
EXECUTOR_QUEUE_NAME: ""
|
|
|
|
|
EXECUTOR_QUEUE_NAMES: "codeintel,batches"
|
|
|
|
|
EXECUTOR_MAXIMUM_NUM_JOBS: 8
|
|
|
|
|
|
|
|
|
|
blobstore:
|
|
|
|
|
cmd: .bin/blobstore
|
|
|
|
|
install: |
|
|
|
|
|
# Ensure the old blobstore Docker container is not running
|
|
|
|
|
docker rm -f blobstore
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2023-06-04 22:30:05 +00:00
|
|
|
fi
|
|
|
|
|
go build -gcflags="$GCFLAGS" -o .bin/blobstore github.com/sourcegraph/sourcegraph/cmd/blobstore
|
|
|
|
|
checkBinary: .bin/blobstore
|
|
|
|
|
watch:
|
|
|
|
|
- lib
|
|
|
|
|
- internal
|
|
|
|
|
- cmd/blobstore
|
|
|
|
|
env:
|
|
|
|
|
BLOBSTORE_DATA_DIR: $HOME/.sourcegraph-dev/data/blobstore-go
|
|
|
|
|
|
|
|
|
|
redis-postgres:
|
|
|
|
|
# Add the following overwrites to your sg.config.overwrite.yaml to use the docker-compose
|
|
|
|
|
# database:
|
|
|
|
|
#
|
|
|
|
|
# env:
|
|
|
|
|
# PGHOST: localhost
|
|
|
|
|
# PGPASSWORD: sourcegraph
|
|
|
|
|
# PGUSER: sourcegraph
|
|
|
|
|
#
|
|
|
|
|
# You could also add an overwrite to add `redis-postgres` to the relevant command set(s).
|
|
|
|
|
description: Dockerized version of redis and postgres
|
|
|
|
|
cmd: docker-compose -f dev/redis-postgres.yml up $COMPOSE_ARGS
|
|
|
|
|
env:
|
|
|
|
|
COMPOSE_ARGS: --force-recreate
|
|
|
|
|
|
|
|
|
|
jaeger:
|
|
|
|
|
cmd: |
|
|
|
|
|
echo "Jaeger will be available on http://localhost:16686/-/debug/jaeger/search"
|
|
|
|
|
.bin/jaeger-all-in-one-${JAEGER_VERSION} --log-level ${JAEGER_LOG_LEVEL}
|
|
|
|
|
install_func: installJaeger
|
|
|
|
|
env:
|
2023-06-23 21:08:31 +00:00
|
|
|
JAEGER_VERSION: 1.45.0
|
2023-06-04 22:30:05 +00:00
|
|
|
JAEGER_DISK: $HOME/.sourcegraph-dev/data/jaeger
|
|
|
|
|
JAEGER_LOG_LEVEL: error
|
|
|
|
|
QUERY_BASE_PATH: /-/debug/jaeger
|
|
|
|
|
|
|
|
|
|
grafana:
|
|
|
|
|
cmd: |
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
|
|
|
|
# Linux needs an extra arg to support host.internal.docker, which is how grafana connects
|
|
|
|
|
# to the prometheus backend.
|
|
|
|
|
ADD_HOST_FLAG="--add-host=host.docker.internal:host-gateway"
|
|
|
|
|
|
|
|
|
|
# Docker users on Linux will generally be using direct user mapping, which
|
|
|
|
|
# means that they'll want the data in the volume mount to be owned by the
|
|
|
|
|
# same user as is running this script. Fortunately, the Grafana container
|
|
|
|
|
# doesn't really care what user it runs as, so long as it can write to
|
|
|
|
|
# /var/lib/grafana.
|
|
|
|
|
DOCKER_USER="--user=$UID"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
echo "Grafana: serving on http://localhost:${PORT}"
|
|
|
|
|
echo "Grafana: note that logs are piped to ${GRAFANA_LOG_FILE}"
|
|
|
|
|
docker run --rm ${DOCKER_USER} \
|
|
|
|
|
--name=${CONTAINER} \
|
|
|
|
|
--cpus=1 \
|
|
|
|
|
--memory=1g \
|
|
|
|
|
-p 0.0.0.0:3370:3370 ${ADD_HOST_FLAG} \
|
|
|
|
|
-v "${GRAFANA_DISK}":/var/lib/grafana \
|
|
|
|
|
-v "$(pwd)"/dev/grafana/all:/sg_config_grafana/provisioning/datasources \
|
2023-06-21 16:58:39 +00:00
|
|
|
grafana:candidate >"${GRAFANA_LOG_FILE}" 2>&1
|
2023-06-04 22:30:05 +00:00
|
|
|
install: |
|
|
|
|
|
mkdir -p "${GRAFANA_DISK}"
|
|
|
|
|
mkdir -p "$(dirname ${GRAFANA_LOG_FILE})"
|
|
|
|
|
docker inspect $CONTAINER >/dev/null 2>&1 && docker rm -f $CONTAINER
|
2023-06-21 16:58:39 +00:00
|
|
|
bazel build //docker-images/grafana:image_tarball
|
|
|
|
|
docker load --input $(bazel cquery //docker-images/grafana:image_tarball --output=files)
|
2023-06-04 22:30:05 +00:00
|
|
|
env:
|
|
|
|
|
GRAFANA_DISK: $HOME/.sourcegraph-dev/data/grafana
|
|
|
|
|
# Log file location: since we log outside of the Docker container, we should
|
|
|
|
|
# log somewhere that's _not_ ~/.sourcegraph-dev/data/grafana, since that gets
|
|
|
|
|
# volume mounted into the container and therefore has its own ownership
|
|
|
|
|
# semantics.
|
|
|
|
|
# Now for the actual logging. Grafana's output gets sent to stdout and stderr.
|
|
|
|
|
# We want to capture that output, but because it's fairly noisy, don't want to
|
|
|
|
|
# display it in the normal case.
|
|
|
|
|
GRAFANA_LOG_FILE: $HOME/.sourcegraph-dev/logs/grafana/grafana.log
|
2023-06-21 16:58:39 +00:00
|
|
|
IMAGE: grafana:candidate
|
2023-06-04 22:30:05 +00:00
|
|
|
CONTAINER: grafana
|
|
|
|
|
PORT: 3370
|
|
|
|
|
# docker containers must access things via docker host on non-linux platforms
|
|
|
|
|
DOCKER_USER: ""
|
|
|
|
|
ADD_HOST_FLAG: ""
|
|
|
|
|
CACHE: false
|
|
|
|
|
|
|
|
|
|
prometheus:
|
|
|
|
|
cmd: |
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
|
|
|
|
DOCKER_USER="--user=$UID"
|
|
|
|
|
|
|
|
|
|
# Frontend generally runs outside of Docker, so to access it we need to be
|
|
|
|
|
# able to access ports on the host. --net=host is a very dirty way of
|
|
|
|
|
# enabling this.
|
|
|
|
|
DOCKER_NET="--net=host"
|
|
|
|
|
SRC_FRONTEND_INTERNAL="localhost:3090"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
echo "Prometheus: serving on http://localhost:${PORT}"
|
|
|
|
|
echo "Prometheus: note that logs are piped to ${PROMETHEUS_LOG_FILE}"
|
|
|
|
|
docker run --rm ${DOCKER_NET} ${DOCKER_USER} \
|
|
|
|
|
--name=${CONTAINER} \
|
|
|
|
|
--cpus=1 \
|
|
|
|
|
--memory=4g \
|
|
|
|
|
-p 0.0.0.0:9090:9090 \
|
|
|
|
|
-v "${PROMETHEUS_DISK}":/prometheus \
|
|
|
|
|
-v "$(pwd)/${CONFIG_DIR}":/sg_prometheus_add_ons \
|
|
|
|
|
-e SRC_FRONTEND_INTERNAL="${SRC_FRONTEND_INTERNAL}" \
|
|
|
|
|
-e DISABLE_SOURCEGRAPH_CONFIG="${DISABLE_SOURCEGRAPH_CONFIG:-""}" \
|
|
|
|
|
-e DISABLE_ALERTMANAGER="${DISABLE_ALERTMANAGER:-""}" \
|
|
|
|
|
-e PROMETHEUS_ADDITIONAL_FLAGS="--web.enable-lifecycle --web.enable-admin-api" \
|
|
|
|
|
${IMAGE} >"${PROMETHEUS_LOG_FILE}" 2>&1
|
|
|
|
|
install: |
|
|
|
|
|
mkdir -p "${PROMETHEUS_DISK}"
|
|
|
|
|
mkdir -p "$(dirname ${PROMETHEUS_LOG_FILE})"
|
|
|
|
|
|
|
|
|
|
docker inspect $CONTAINER >/dev/null 2>&1 && docker rm -f $CONTAINER
|
|
|
|
|
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
|
|
|
|
PROM_TARGETS="dev/prometheus/linux/prometheus_targets.yml"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
cp ${PROM_TARGETS} "${CONFIG_DIR}"/prometheus_targets.yml
|
2023-06-21 16:58:39 +00:00
|
|
|
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
|
|
|
|
bazel build //docker-images/prometheus:image_tarball
|
|
|
|
|
docker load --input $(bazel cquery //docker-images/prometheus:image_tarball --output=files)
|
|
|
|
|
else
|
|
|
|
|
bazel build //docker-images/prometheus:image_tarball --config darwin-docker
|
|
|
|
|
docker load --input $(bazel cquery //docker-images/prometheus:image_tarball --config darwin-docker --output=files)
|
|
|
|
|
fi
|
2023-06-04 22:30:05 +00:00
|
|
|
env:
|
|
|
|
|
PROMETHEUS_DISK: $HOME/.sourcegraph-dev/data/prometheus
|
|
|
|
|
# See comment above for `grafana`
|
|
|
|
|
PROMETHEUS_LOG_FILE: $HOME/.sourcegraph-dev/logs/prometheus/prometheus.log
|
2023-06-21 16:58:39 +00:00
|
|
|
IMAGE: prometheus:candidate
|
2023-06-04 22:30:05 +00:00
|
|
|
CONTAINER: prometheus
|
|
|
|
|
PORT: 9090
|
|
|
|
|
CONFIG_DIR: docker-images/prometheus/config
|
|
|
|
|
DOCKER_USER: ""
|
|
|
|
|
DOCKER_NET: ""
|
|
|
|
|
PROM_TARGETS: dev/prometheus/all/prometheus_targets.yml
|
|
|
|
|
SRC_FRONTEND_INTERNAL: host.docker.internal:3090
|
|
|
|
|
ADD_HOST_FLAG: ""
|
|
|
|
|
DISABLE_SOURCEGRAPH_CONFIG: false
|
|
|
|
|
|
|
|
|
|
postgres_exporter:
|
2023-09-11 12:15:03 +00:00
|
|
|
cmd: |
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
|
|
|
|
# Linux needs an extra arg to support host.internal.docker, which is how grafana connects
|
|
|
|
|
# to the prometheus backend.
|
|
|
|
|
ADD_HOST_FLAG="--add-host=host.docker.internal:host-gateway"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
# Use psql to read the effective values for PG* env vars (instead of, e.g., hardcoding the default
|
|
|
|
|
# values).
|
|
|
|
|
get_pg_env() { psql -c '\set' | grep "$1" | cut -f 2 -d "'"; }
|
|
|
|
|
PGHOST=${PGHOST-$(get_pg_env HOST)}
|
|
|
|
|
PGUSER=${PGUSER-$(get_pg_env USER)}
|
|
|
|
|
PGPORT=${PGPORT-$(get_pg_env PORT)}
|
|
|
|
|
# we need to be able to query migration_logs table
|
|
|
|
|
PGDATABASE=${PGDATABASE-$(get_pg_env DBNAME)}
|
|
|
|
|
|
|
|
|
|
ADJUSTED_HOST=${PGHOST:-127.0.0.1}
|
|
|
|
|
if [[ ("$ADJUSTED_HOST" == "localhost" || "$ADJUSTED_HOST" == "127.0.0.1" || -f "$ADJUSTED_HOST") && "$OSTYPE" != "linux-gnu" ]]; then
|
|
|
|
|
ADJUSTED_HOST="host.docker.internal"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
NET_ARG=""
|
|
|
|
|
DATA_SOURCE_NAME="postgresql://${PGUSER}:${PGPASSWORD}@${ADJUSTED_HOST}:${PGPORT}/${PGDATABASE}?sslmode=${PGSSLMODE:-disable}"
|
|
|
|
|
|
|
|
|
|
if [[ "$OSTYPE" == "linux-gnu" ]]; then
|
|
|
|
|
NET_ARG="--net=host"
|
|
|
|
|
DATA_SOURCE_NAME="postgresql://${PGUSER}:${PGPASSWORD}@${ADJUSTED_HOST}:${PGPORT}/${PGDATABASE}?sslmode=${PGSSLMODE:-disable}"
|
|
|
|
|
fi
|
|
|
|
|
|
|
|
|
|
echo "postgres_exporter: serving on http://localhost:${PORT}"
|
|
|
|
|
docker run --rm ${DOCKER_USER} \
|
|
|
|
|
--name=${CONTAINER} \
|
|
|
|
|
-e DATA_SOURCE_NAME="${DATA_SOURCE_NAME}" \
|
|
|
|
|
--cpus=1 \
|
|
|
|
|
--memory=1g \
|
|
|
|
|
-p 0.0.0.0:9187:9187 ${ADD_HOST_FLAG} \
|
|
|
|
|
"${IMAGE}"
|
|
|
|
|
install: |
|
|
|
|
|
docker inspect $CONTAINER >/dev/null 2>&1 && docker rm -f $CONTAINER
|
|
|
|
|
bazel build //docker-images/postgres_exporter:image_tarball
|
|
|
|
|
docker load --input $(bazel cquery //docker-images/postgres_exporter:image_tarball --output=files)
|
|
|
|
|
env:
|
|
|
|
|
IMAGE: postgres-exporter:candidate
|
|
|
|
|
CONTAINER: postgres_exporter
|
|
|
|
|
# docker containers must access things via docker host on non-linux platforms
|
|
|
|
|
DOCKER_USER: ""
|
|
|
|
|
ADD_HOST_FLAG: ""
|
2023-06-04 22:30:05 +00:00
|
|
|
|
|
|
|
|
monitoring-generator:
|
2023-08-14 21:17:25 +00:00
|
|
|
cmd: echo "monitoring-generator is deprecated, please run 'sg generate go' or 'bazel run //dev:write_all_generated' instead"
|
2023-06-04 22:30:05 +00:00
|
|
|
env:
|
|
|
|
|
|
|
|
|
|
loki:
|
|
|
|
|
cmd: |
|
|
|
|
|
echo "Loki: serving on http://localhost:3100"
|
|
|
|
|
echo "Loki: note that logs are piped to ${LOKI_LOG_FILE}"
|
|
|
|
|
docker run --rm --name=loki \
|
|
|
|
|
-p 3100:3100 -v $LOKI_DISK:/loki \
|
|
|
|
|
index.docker.io/grafana/loki:$LOKI_VERSION >"${LOKI_LOG_FILE}" 2>&1
|
|
|
|
|
install: |
|
|
|
|
|
mkdir -p "${LOKI_DISK}"
|
|
|
|
|
mkdir -p "$(dirname ${LOKI_LOG_FILE})"
|
|
|
|
|
docker pull index.docker.io/grafana/loki:$LOKI_VERSION
|
|
|
|
|
env:
|
|
|
|
|
LOKI_DISK: $HOME/.sourcegraph-dev/data/loki
|
|
|
|
|
LOKI_VERSION: "2.3.0"
|
|
|
|
|
LOKI_LOG_FILE: $HOME/.sourcegraph-dev/logs/loki/loki.log
|
|
|
|
|
|
|
|
|
|
otel-collector:
|
2023-06-21 16:58:39 +00:00
|
|
|
install: |
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
|
|
|
|
bazel build //docker-images/opentelemetry-collector:image_tarball
|
|
|
|
|
docker load --input $(bazel cquery //docker-images/opentelemetry-collector:image_tarball --output=files)
|
|
|
|
|
else
|
|
|
|
|
bazel build //docker-images/opentelemetry-collector:image_tarball --config darwin-docker
|
|
|
|
|
docker load --input $(bazel cquery //docker-images/opentelemetry-collector:image_tarball --config darwin-docker --output=files)
|
|
|
|
|
fi
|
2023-06-04 22:30:05 +00:00
|
|
|
description: OpenTelemetry collector
|
|
|
|
|
cmd: |
|
|
|
|
|
JAEGER_HOST='host.docker.internal'
|
|
|
|
|
if [[ $(uname) == "Linux" ]]; then
|
|
|
|
|
# Jaeger generally runs outside of Docker, so to access it we need to be
|
|
|
|
|
# able to access ports on the host, because the Docker host only exists on
|
|
|
|
|
# MacOS. --net=host is a very dirty way of enabling this.
|
|
|
|
|
DOCKER_NET="--net=host"
|
|
|
|
|
JAEGER_HOST="localhost"
|
|
|
|
|
fi
|
|
|
|
|
|
2023-09-11 12:15:03 +00:00
|
|
|
docker container rm -f otel-collector
|
2023-06-04 22:30:05 +00:00
|
|
|
docker run --rm --name=otel-collector $DOCKER_NET $DOCKER_ARGS \
|
|
|
|
|
-p 4317:4317 -p 4318:4318 -p 55679:55679 -p 55670:55670 \
|
|
|
|
|
-p 8888:8888 \
|
|
|
|
|
-e JAEGER_HOST=$JAEGER_HOST \
|
|
|
|
|
-e HONEYCOMB_API_KEY=$HONEYCOMB_API_KEY \
|
|
|
|
|
-e HONEYCOMB_DATASET=$HONEYCOMB_DATASET \
|
|
|
|
|
$IMAGE --config "/etc/otel-collector/$CONFIGURATION_FILE"
|
|
|
|
|
env:
|
2023-06-21 16:58:39 +00:00
|
|
|
IMAGE: opentelemetry-collector:candidate
|
2023-06-04 22:30:05 +00:00
|
|
|
# Overwrite the following in sg.config.overwrite.yaml, based on which collector
|
|
|
|
|
# config you are using - see docker-images/opentelemetry-collector for more details.
|
|
|
|
|
CONFIGURATION_FILE: "configs/jaeger.yaml"
|
|
|
|
|
# HONEYCOMB_API_KEY: ''
|
|
|
|
|
# HONEYCOMB_DATASET: ''
|
|
|
|
|
|
|
|
|
|
storybook:
|
|
|
|
|
cmd: pnpm storybook
|
|
|
|
|
install: pnpm install
|
|
|
|
|
|
|
|
|
|
# This will execute `env`, a utility to print the process environment. Can
|
|
|
|
|
# be used to debug which global vars `sg` uses.
|
|
|
|
|
debug-env:
|
|
|
|
|
description: Debug env vars
|
|
|
|
|
cmd: env
|
|
|
|
|
|
|
|
|
|
bext:
|
|
|
|
|
cmd: pnpm --filter @sourcegraph/browser dev
|
|
|
|
|
install: pnpm install
|
|
|
|
|
|
2023-09-30 03:55:26 +00:00
|
|
|
sourcegraph: &sourcegraph_command
|
|
|
|
|
description: Single-program distribution
|
2023-06-04 22:30:05 +00:00
|
|
|
cmd: |
|
|
|
|
|
unset SRC_GIT_SERVERS INDEXED_SEARCH_SERVERS REDIS_ENDPOINT
|
|
|
|
|
|
|
|
|
|
# TODO: This should be fixed
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
# If EXTSVC_CONFIG_FILE is *unset*, set a default.
|
|
|
|
|
export EXTSVC_CONFIG_FILE=${EXTSVC_CONFIG_FILE-'../dev-private/enterprise/dev/external-services-config.json'}
|
|
|
|
|
|
|
|
|
|
.bin/sourcegraph
|
|
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
2023-09-12 10:10:10 +00:00
|
|
|
export GCFLAGS='all=-N -l'
|
2023-06-04 22:30:05 +00:00
|
|
|
fi
|
2023-09-30 03:55:26 +00:00
|
|
|
go build -gcflags="$GCFLAGS" -ldflags="-X github.com/sourcegraph/sourcegraph/internal/conf/deploy.forceType=single-program" -o .bin/sourcegraph github.com/sourcegraph/sourcegraph/cmd/sourcegraph
|
2023-06-04 22:30:05 +00:00
|
|
|
checkBinary: .bin/sourcegraph
|
|
|
|
|
env:
|
|
|
|
|
SITE_CONFIG_FILE: "../dev-private/enterprise/dev/site-config.json"
|
|
|
|
|
SITE_CONFIG_ESCAPE_HATCH_PATH: "$HOME/.sourcegraph/site-config.json"
|
|
|
|
|
WEBPACK_DEV_SERVER: 1
|
|
|
|
|
watch:
|
|
|
|
|
- cmd
|
|
|
|
|
- enterprise
|
|
|
|
|
- internal
|
|
|
|
|
- lib
|
|
|
|
|
- schema
|
|
|
|
|
|
2023-09-30 03:55:26 +00:00
|
|
|
cody-app:
|
|
|
|
|
<<: *sourcegraph_command
|
|
|
|
|
description: Cody App
|
|
|
|
|
install: |
|
|
|
|
|
if [ -n "$DELVE" ]; then
|
|
|
|
|
export GCFLAGS='all=-N -l'
|
|
|
|
|
fi
|
|
|
|
|
go build -gcflags="$GCFLAGS" -ldflags="-X github.com/sourcegraph/sourcegraph/internal/conf/deploy.forceType=app" -o .bin/sourcegraph github.com/sourcegraph/sourcegraph/cmd/sourcegraph
|
|
|
|
|
|
2023-06-04 22:30:05 +00:00
|
|
|
tauri:
|
|
|
|
|
description: App shell (Tauri)
|
|
|
|
|
cmd: pnpm tauri dev --config src-tauri/tauri.dev.conf.json
|
|
|
|
|
|
2023-03-02 10:31:51 +00:00
|
|
|
bazelCommands:
|
2023-05-17 12:34:53 +00:00
|
|
|
blobstore:
|
|
|
|
|
target: //cmd/blobstore:blobstore
|
2023-03-02 10:31:51 +00:00
|
|
|
searcher:
|
|
|
|
|
target: //cmd/searcher
|
2023-05-17 11:52:42 +00:00
|
|
|
syntax-highlighter:
|
|
|
|
|
target: //docker-images/syntax-highlighter:syntect_server
|
2023-05-17 12:19:15 +00:00
|
|
|
ignoreStdout: true
|
|
|
|
|
ignoreStderr: true
|
2023-05-17 11:52:42 +00:00
|
|
|
env:
|
|
|
|
|
# Environment copied from Dockerfile
|
2023-06-04 22:30:05 +00:00
|
|
|
WORKERS: "1"
|
|
|
|
|
ROCKET_ENV: "production"
|
|
|
|
|
ROCKET_LIMITS: "{json=10485760}"
|
|
|
|
|
ROCKET_SECRET_KEY: "SeerutKeyIsI7releuantAndknvsuZPluaseIgnorYA="
|
|
|
|
|
ROCKET_KEEP_ALIVE: "0"
|
|
|
|
|
ROCKET_PORT: "9238"
|
|
|
|
|
QUIET: "true"
|
2023-03-02 10:31:51 +00:00
|
|
|
frontend:
|
|
|
|
|
description: Enterprise frontend
|
2023-09-11 12:16:38 +00:00
|
|
|
target: //cmd/frontend
|
2023-03-02 10:31:51 +00:00
|
|
|
precmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
# If EXTSVC_CONFIG_FILE is *unset*, set a default.
|
|
|
|
|
export EXTSVC_CONFIG_FILE=${EXTSVC_CONFIG_FILE-'../dev-private/enterprise/dev/external-services-config.json'}
|
|
|
|
|
env:
|
|
|
|
|
CONFIGURATION_MODE: server
|
|
|
|
|
USE_ENHANCED_LANGUAGE_DETECTION: false
|
2023-05-25 15:51:15 +00:00
|
|
|
SITE_CONFIG_FILE: "../dev-private/enterprise/dev/site-config.json"
|
|
|
|
|
SITE_CONFIG_ESCAPE_HATCH_PATH: "$HOME/.sourcegraph/site-config.json"
|
2023-03-02 10:31:51 +00:00
|
|
|
# frontend processes need this to be so that the paths to the assets are rendered correctly
|
|
|
|
|
WEBPACK_DEV_SERVER: 1
|
|
|
|
|
worker:
|
2023-10-03 12:53:04 +00:00
|
|
|
target: //cmd/worker
|
2023-03-02 10:31:51 +00:00
|
|
|
precmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
repo-updater:
|
2023-07-14 13:05:13 +00:00
|
|
|
target: //cmd/repo-updater
|
2023-03-02 10:31:51 +00:00
|
|
|
precmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
|
|
|
|
symbols:
|
|
|
|
|
target: //enterprise/cmd/symbols
|
|
|
|
|
checkBinary: .bin/symbols
|
|
|
|
|
env:
|
|
|
|
|
CTAGS_COMMAND: dev/universal-ctags-dev
|
2023-05-30 21:19:39 +00:00
|
|
|
SCIP_CTAGS_COMMAND: dev/scip-ctags-dev
|
2023-03-02 10:31:51 +00:00
|
|
|
CTAGS_PROCESSES: 2
|
2023-05-25 15:51:15 +00:00
|
|
|
USE_ROCKSKIP: "false"
|
2023-03-02 10:31:51 +00:00
|
|
|
gitserver-template: &gitserver_bazel_template
|
2023-08-25 11:25:07 +00:00
|
|
|
target: //cmd/gitserver
|
2023-03-02 10:31:51 +00:00
|
|
|
env: &gitserverenv
|
|
|
|
|
HOSTNAME: 127.0.0.1:3178
|
|
|
|
|
# This is only here to stay backwards-compatible with people's custom
|
|
|
|
|
# `sg.config.overwrite.yaml` files
|
|
|
|
|
gitserver:
|
|
|
|
|
<<: *gitserver_bazel_template
|
|
|
|
|
gitserver-0:
|
|
|
|
|
<<: *gitserver_bazel_template
|
|
|
|
|
env:
|
|
|
|
|
<<: *gitserverenv
|
|
|
|
|
GITSERVER_EXTERNAL_ADDR: 127.0.0.1:3501
|
|
|
|
|
GITSERVER_ADDR: 127.0.0.1:3501
|
|
|
|
|
SRC_REPOS_DIR: $HOME/.sourcegraph/repos_1
|
|
|
|
|
SRC_PROF_HTTP: 127.0.0.1:3551
|
|
|
|
|
gitserver-1:
|
|
|
|
|
<<: *gitserver_bazel_template
|
|
|
|
|
env:
|
|
|
|
|
<<: *gitserverenv
|
|
|
|
|
GITSERVER_EXTERNAL_ADDR: 127.0.0.1:3502
|
|
|
|
|
GITSERVER_ADDR: 127.0.0.1:3502
|
|
|
|
|
SRC_REPOS_DIR: $HOME/.sourcegraph/repos_2
|
|
|
|
|
SRC_PROF_HTTP: 127.0.0.1:3552
|
|
|
|
|
codeintel-worker:
|
|
|
|
|
precmd: |
|
|
|
|
|
export SOURCEGRAPH_LICENSE_GENERATION_KEY=$(cat ../dev-private/enterprise/dev/test-license-generation-key.pem)
|
2023-07-13 18:09:16 +00:00
|
|
|
target: //cmd/precise-code-intel-worker
|
2023-03-02 10:31:51 +00:00
|
|
|
executor-template: &executor_template_bazel
|
2023-08-10 00:06:12 +00:00
|
|
|
target: //cmd/executor
|
2023-03-02 10:31:51 +00:00
|
|
|
env:
|
|
|
|
|
EXECUTOR_QUEUE_NAME: TEMPLATE
|
|
|
|
|
TMPDIR: $HOME/.sourcegraph/executor-temp
|
2023-03-28 21:18:56 +00:00
|
|
|
# Required for frontend and executor to communicate
|
|
|
|
|
EXECUTOR_FRONTEND_URL: http://localhost:3080
|
|
|
|
|
# Must match the secret defined in the site config.
|
|
|
|
|
EXECUTOR_FRONTEND_PASSWORD: hunter2hunter2hunter2
|
|
|
|
|
# Disable firecracker inside executor in dev
|
|
|
|
|
EXECUTOR_USE_FIRECRACKER: false
|
2023-03-02 10:31:51 +00:00
|
|
|
codeintel-executor:
|
|
|
|
|
<<: *executor_template_bazel
|
|
|
|
|
env:
|
|
|
|
|
EXECUTOR_QUEUE_NAME: codeintel
|
|
|
|
|
TMPDIR: $HOME/.sourcegraph/indexer-temp
|
|
|
|
|
|
|
|
|
|
#
|
|
|
|
|
# CommandSets ################################################################
|
|
|
|
|
#
|
2021-10-01 13:36:24 +00:00
|
|
|
defaultCommandset: enterprise
|
2021-03-29 08:14:53 +00:00
|
|
|
commandsets:
|
2023-03-02 10:31:51 +00:00
|
|
|
enterprise-bazel: &enterprise_bazel_set
|
|
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
- git
|
2023-03-16 09:54:01 +00:00
|
|
|
- bazelisk
|
2023-03-03 13:58:50 +00:00
|
|
|
- ibazel
|
2023-03-02 10:31:51 +00:00
|
|
|
bazelCommands:
|
2023-05-17 12:34:53 +00:00
|
|
|
- blobstore
|
2023-03-02 10:31:51 +00:00
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- symbols
|
2023-05-17 11:52:42 +00:00
|
|
|
- syntax-highlighter
|
2023-03-02 10:31:51 +00:00
|
|
|
commands:
|
|
|
|
|
- web
|
|
|
|
|
- docsite
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- caddy
|
2023-05-10 00:56:12 +00:00
|
|
|
# If you modify this command set, please consider also updating the dotcom runset.
|
2021-07-13 16:13:18 +00:00
|
|
|
enterprise: &enterprise_set
|
2021-09-29 14:22:16 +00:00
|
|
|
requiresDevPrivate: true
|
2021-07-21 15:37:24 +00:00
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
2022-02-12 18:36:28 +00:00
|
|
|
- git
|
2021-07-21 15:37:24 +00:00
|
|
|
commands:
|
2022-06-20 17:57:54 +00:00
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
2022-10-10 13:26:58 +00:00
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
2021-07-21 15:37:24 +00:00
|
|
|
- searcher
|
|
|
|
|
- caddy
|
2023-06-14 20:30:50 +00:00
|
|
|
- symbols
|
2021-07-21 15:37:24 +00:00
|
|
|
- docsite
|
2021-09-28 04:18:12 +00:00
|
|
|
- syntax-highlighter
|
2022-06-20 17:57:54 +00:00
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
2023-01-11 13:51:36 +00:00
|
|
|
- blobstore
|
2023-06-07 23:33:03 +00:00
|
|
|
- embeddings
|
telemetrygateway: add exporter and service (#56699)
This change adds:
- telemetry export background jobs: flagged behind `TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR`, default empty => disabled
- telemetry redaction: configured in package `internal/telemetry/sensitivemetadataallowlist`
- telemetry-gateway service receiving events and forwarding it to a pub/sub topic (or just logging it, as configured in local dev)
- utilities for easily creating an event recorder: `internal/telemetry/telemetryrecorder`
Notes:
- all changes are feature-flagged to some degree, off by default, so the merge should be fairly low-risk.
- we decided that transmitting the full license key continues to be the way to go. we transmit it once per stream and attach it on all events in the telemetry-gateway. there is no auth mechanism at the moment
- GraphQL return type `EventLog.Source` is now a plain string instead of string enum. This should not be a breaking change in our clients, but must be made so that our generated V2 events do not break requesting of event logs
Stacked on https://github.com/sourcegraph/sourcegraph/pull/56520
Closes https://github.com/sourcegraph/sourcegraph/issues/56289
Closes https://github.com/sourcegraph/sourcegraph/issues/56287
## Test plan
Add an override to make the export super frequent:
```
env:
TELEMETRY_GATEWAY_EXPORTER_EXPORT_INTERVAL: "10s"
TELEMETRY_GATEWAY_EXPORTER_EXPORTED_EVENTS_RETENTION: "5m"
```
Start sourcegraph:
```
sg start
```
Enable `telemetry-export` featureflag (from https://github.com/sourcegraph/sourcegraph/pull/56520)
Emit some events in GraphQL:
```gql
mutation {
telemetry {
recordEvents(events:[{
feature:"foobar"
action:"view"
source:{
client:"WEB"
}
parameters:{
version:0
}
}]) {
alwaysNil
}
}
```
See series of log events:
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/telemetrygatewayexporter.go:61 Telemetry Gateway export enabled - initializing background routines
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:99 exporting events {"maxBatchSize": 10000, "count": 1}
[telemetry-g...y] INFO telemetry-gateway.pubsub pubsub/topic.go:115 Publish {"TraceId": "7852903434f0d2f647d397ee83b4009b", "SpanId": "8d945234bccf319b", "message": "{\"event\":{\"id\":\"dc96ae84-4ac4-4760-968f-0a0307b8bb3d\",\"timestamp\":\"2023-09-19T01:57:13.590266Z\",\"feature\":\"foobar\", ....
```
Build:
```
export VERSION="insiders"
bazel run //cmd/telemetry-gateway:candidate_push --config darwin-docker --stamp --workspace_status_command=./dev/bazel_stamp_vars.sh -- --tag $VERSION --repository us.gcr.io/sourcegraph-dev/telemetry-gateway
```
Deploy: https://github.com/sourcegraph/managed-services/pull/7
Add override:
```yaml
env:
# Port required. TODO: What's the best way to provide gRPC addresses, such that a
# localhost address is also possible?
TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR: "https://telemetry-gateway.sgdev.org:443"
```
Repeat the above (`sg start` and emit some events):
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 1}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 1}
```
2023-09-20 05:20:15 +00:00
|
|
|
- telemetry-gateway
|
2023-07-20 22:21:30 +00:00
|
|
|
env:
|
|
|
|
|
DISABLE_CODE_INSIGHTS_HISTORICAL: false
|
|
|
|
|
DISABLE_CODE_INSIGHTS: false
|
2021-03-29 08:14:53 +00:00
|
|
|
|
2022-05-04 07:12:30 +00:00
|
|
|
enterprise-e2e:
|
|
|
|
|
<<: *enterprise_set
|
|
|
|
|
env:
|
2022-05-05 16:17:18 +00:00
|
|
|
# EXTSVC_CONFIG_FILE being set prevents the e2e test suite to add
|
2022-05-04 07:12:30 +00:00
|
|
|
# additional connections.
|
2023-05-25 15:51:15 +00:00
|
|
|
EXTSVC_CONFIG_FILE: ""
|
2022-05-04 07:12:30 +00:00
|
|
|
|
2021-09-08 08:41:06 +00:00
|
|
|
dotcom:
|
2023-05-31 00:43:22 +00:00
|
|
|
# This is 95% the enterprise runset, with the addition of Cody Gateway.
|
2023-05-10 00:56:12 +00:00
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
- git
|
|
|
|
|
commands:
|
|
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- symbols
|
|
|
|
|
- caddy
|
|
|
|
|
- docsite
|
|
|
|
|
- syntax-highlighter
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- blobstore
|
2023-05-30 15:44:27 +00:00
|
|
|
- cody-gateway
|
2023-06-07 23:33:03 +00:00
|
|
|
- embeddings
|
telemetrygateway: add exporter and service (#56699)
This change adds:
- telemetry export background jobs: flagged behind `TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR`, default empty => disabled
- telemetry redaction: configured in package `internal/telemetry/sensitivemetadataallowlist`
- telemetry-gateway service receiving events and forwarding it to a pub/sub topic (or just logging it, as configured in local dev)
- utilities for easily creating an event recorder: `internal/telemetry/telemetryrecorder`
Notes:
- all changes are feature-flagged to some degree, off by default, so the merge should be fairly low-risk.
- we decided that transmitting the full license key continues to be the way to go. we transmit it once per stream and attach it on all events in the telemetry-gateway. there is no auth mechanism at the moment
- GraphQL return type `EventLog.Source` is now a plain string instead of string enum. This should not be a breaking change in our clients, but must be made so that our generated V2 events do not break requesting of event logs
Stacked on https://github.com/sourcegraph/sourcegraph/pull/56520
Closes https://github.com/sourcegraph/sourcegraph/issues/56289
Closes https://github.com/sourcegraph/sourcegraph/issues/56287
## Test plan
Add an override to make the export super frequent:
```
env:
TELEMETRY_GATEWAY_EXPORTER_EXPORT_INTERVAL: "10s"
TELEMETRY_GATEWAY_EXPORTER_EXPORTED_EVENTS_RETENTION: "5m"
```
Start sourcegraph:
```
sg start
```
Enable `telemetry-export` featureflag (from https://github.com/sourcegraph/sourcegraph/pull/56520)
Emit some events in GraphQL:
```gql
mutation {
telemetry {
recordEvents(events:[{
feature:"foobar"
action:"view"
source:{
client:"WEB"
}
parameters:{
version:0
}
}]) {
alwaysNil
}
}
```
See series of log events:
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/telemetrygatewayexporter.go:61 Telemetry Gateway export enabled - initializing background routines
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:99 exporting events {"maxBatchSize": 10000, "count": 1}
[telemetry-g...y] INFO telemetry-gateway.pubsub pubsub/topic.go:115 Publish {"TraceId": "7852903434f0d2f647d397ee83b4009b", "SpanId": "8d945234bccf319b", "message": "{\"event\":{\"id\":\"dc96ae84-4ac4-4760-968f-0a0307b8bb3d\",\"timestamp\":\"2023-09-19T01:57:13.590266Z\",\"feature\":\"foobar\", ....
```
Build:
```
export VERSION="insiders"
bazel run //cmd/telemetry-gateway:candidate_push --config darwin-docker --stamp --workspace_status_command=./dev/bazel_stamp_vars.sh -- --tag $VERSION --repository us.gcr.io/sourcegraph-dev/telemetry-gateway
```
Deploy: https://github.com/sourcegraph/managed-services/pull/7
Add override:
```yaml
env:
# Port required. TODO: What's the best way to provide gRPC addresses, such that a
# localhost address is also possible?
TELEMETRY_GATEWAY_EXPORTER_EXPORT_ADDR: "https://telemetry-gateway.sgdev.org:443"
```
Repeat the above (`sg start` and emit some events):
```
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 6}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:94 exporting events {"maxBatchSize": 10000, "count": 1}
[ worker] INFO worker.telemetrygateway-exporter telemetrygatewayexporter/exporter.go:113 events exported {"maxBatchSize": 10000, "succeeded": 1}
```
2023-09-20 05:20:15 +00:00
|
|
|
- telemetry-gateway
|
2021-09-08 08:41:06 +00:00
|
|
|
env:
|
|
|
|
|
SOURCEGRAPHDOTCOM_MODE: true
|
|
|
|
|
|
2023-03-02 10:31:51 +00:00
|
|
|
codeintel-bazel: &codeintel_bazel_set
|
|
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
- git
|
2023-03-16 09:54:01 +00:00
|
|
|
- bazelisk
|
2023-03-03 13:58:50 +00:00
|
|
|
- ibazel
|
2023-03-02 10:31:51 +00:00
|
|
|
bazelCommands:
|
2023-05-17 12:34:53 +00:00
|
|
|
- blobstore
|
2023-03-02 10:31:51 +00:00
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- symbols
|
2023-05-17 11:52:42 +00:00
|
|
|
- syntax-highlighter
|
2023-03-02 10:31:51 +00:00
|
|
|
- codeintel-worker
|
|
|
|
|
- codeintel-executor
|
|
|
|
|
commands:
|
|
|
|
|
- web
|
|
|
|
|
- docsite
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- caddy
|
|
|
|
|
- jaeger
|
|
|
|
|
- grafana
|
|
|
|
|
- prometheus
|
|
|
|
|
|
2023-06-06 20:45:04 +00:00
|
|
|
codeintel:
|
2021-09-29 14:22:16 +00:00
|
|
|
requiresDevPrivate: true
|
2021-07-21 15:37:24 +00:00
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
2022-02-12 18:36:28 +00:00
|
|
|
- git
|
2023-06-06 20:45:04 +00:00
|
|
|
commands:
|
|
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- symbols
|
|
|
|
|
- caddy
|
|
|
|
|
- docsite
|
|
|
|
|
- syntax-highlighter
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- blobstore
|
|
|
|
|
- codeintel-worker
|
|
|
|
|
- codeintel-executor
|
|
|
|
|
# - otel-collector
|
|
|
|
|
- jaeger
|
|
|
|
|
- grafana
|
|
|
|
|
- prometheus
|
Executors: enable dequeueing and heartbeating for multiple queues (#52016)
- Closes https://github.com/sourcegraph/sourcegraph/issues/50614
Depends on the following issues:
- https://github.com/sourcegraph/sourcegraph/issues/50616
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52020
- https://github.com/sourcegraph/sourcegraph/issues/51656
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52381
- https://github.com/sourcegraph/sourcegraph/issues/51658
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52525
Due to the dependency on the work listed above, all PRs were ultimately
merged into this branch, so the client side implementation got a little
bit messy to review, for which I apologize. The initial intent of this
PR, closing #50614, was reviewed before the merges occurred.
## Demos
### Scenario 1: old Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/6d325326-d4f5-4e6f-a5e0-0918dbde07bf
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/ff3a87db-ef07-4221-a159-73b93a6ae55b
### Scenario 2: new Sourcegraph version, old executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/c66a9b9f-b745-4abc-bf87-9077dd5f2959
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/785d7f34-630f-4c2a-abb6-82ca3b896813
### Scenario 3: new Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/69c5c06b-ea1c-47f7-9196-e3f108794493
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/4e791d98-06cd-4fa7-93dd-81be0a73c744
#### batches + codeintel in parallel, multi queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/004608dd-f98c-4305-aeb3-a01444c12224
## Initial PR description (enable client to dequeue from multiple
queues)
This PR updates the client's `Dequeue` method to dequeue from multiple
queues. It's backwards compatible with single-queue configurations. The
field `Job.Queue` is only set when dequeuing from the general `/dequeue`
endpoint, so the job `MarkXxx` methods default to single-queue behaviour
if the job does not specify a queue name.
Unrelated to the linked issue, the multi-handler will log and return no
content early in the event of an empty queue list in a dequeue request
(although this should never occur).
## Test plan
- [x] Unit tests
- [x] Local testing
- [x] Dogfood testing
- [x] Demo
<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->
2023-06-04 13:25:05 +00:00
|
|
|
|
|
|
|
|
codeintel-kubernetes:
|
2023-06-06 20:45:04 +00:00
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
- git
|
2021-07-21 15:37:24 +00:00
|
|
|
commands:
|
2022-06-20 17:57:54 +00:00
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
2022-10-10 13:26:58 +00:00
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
2021-07-21 15:37:24 +00:00
|
|
|
- searcher
|
2022-06-20 17:57:54 +00:00
|
|
|
- symbols
|
2021-07-21 15:37:24 +00:00
|
|
|
- caddy
|
|
|
|
|
- docsite
|
2021-09-28 04:18:12 +00:00
|
|
|
- syntax-highlighter
|
2022-06-20 17:57:54 +00:00
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
2022-11-30 23:04:48 +00:00
|
|
|
- blobstore
|
2022-06-20 17:57:54 +00:00
|
|
|
- codeintel-worker
|
Executors: enable dequeueing and heartbeating for multiple queues (#52016)
- Closes https://github.com/sourcegraph/sourcegraph/issues/50614
Depends on the following issues:
- https://github.com/sourcegraph/sourcegraph/issues/50616
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52020
- https://github.com/sourcegraph/sourcegraph/issues/51656
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52381
- https://github.com/sourcegraph/sourcegraph/issues/51658
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52525
Due to the dependency on the work listed above, all PRs were ultimately
merged into this branch, so the client side implementation got a little
bit messy to review, for which I apologize. The initial intent of this
PR, closing #50614, was reviewed before the merges occurred.
## Demos
### Scenario 1: old Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/6d325326-d4f5-4e6f-a5e0-0918dbde07bf
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/ff3a87db-ef07-4221-a159-73b93a6ae55b
### Scenario 2: new Sourcegraph version, old executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/c66a9b9f-b745-4abc-bf87-9077dd5f2959
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/785d7f34-630f-4c2a-abb6-82ca3b896813
### Scenario 3: new Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/69c5c06b-ea1c-47f7-9196-e3f108794493
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/4e791d98-06cd-4fa7-93dd-81be0a73c744
#### batches + codeintel in parallel, multi queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/004608dd-f98c-4305-aeb3-a01444c12224
## Initial PR description (enable client to dequeue from multiple
queues)
This PR updates the client's `Dequeue` method to dequeue from multiple
queues. It's backwards compatible with single-queue configurations. The
field `Job.Queue` is only set when dequeuing from the general `/dequeue`
endpoint, so the job `MarkXxx` methods default to single-queue behaviour
if the job does not specify a queue name.
Unrelated to the linked issue, the multi-handler will log and return no
content early in the event of an empty queue list in a dequeue request
(although this should never occur).
## Test plan
- [x] Unit tests
- [x] Local testing
- [x] Dogfood testing
- [x] Demo
<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->
2023-06-04 13:25:05 +00:00
|
|
|
- codeintel-executor-kubernetes
|
2022-12-05 18:10:39 +00:00
|
|
|
# - otel-collector
|
2021-11-30 17:56:45 +00:00
|
|
|
- jaeger
|
|
|
|
|
- grafana
|
|
|
|
|
- prometheus
|
2021-05-21 16:01:11 +00:00
|
|
|
|
Executors: enable dequeueing and heartbeating for multiple queues (#52016)
- Closes https://github.com/sourcegraph/sourcegraph/issues/50614
Depends on the following issues:
- https://github.com/sourcegraph/sourcegraph/issues/50616
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52020
- https://github.com/sourcegraph/sourcegraph/issues/51656
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52381
- https://github.com/sourcegraph/sourcegraph/issues/51658
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52525
Due to the dependency on the work listed above, all PRs were ultimately
merged into this branch, so the client side implementation got a little
bit messy to review, for which I apologize. The initial intent of this
PR, closing #50614, was reviewed before the merges occurred.
## Demos
### Scenario 1: old Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/6d325326-d4f5-4e6f-a5e0-0918dbde07bf
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/ff3a87db-ef07-4221-a159-73b93a6ae55b
### Scenario 2: new Sourcegraph version, old executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/c66a9b9f-b745-4abc-bf87-9077dd5f2959
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/785d7f34-630f-4c2a-abb6-82ca3b896813
### Scenario 3: new Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/69c5c06b-ea1c-47f7-9196-e3f108794493
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/4e791d98-06cd-4fa7-93dd-81be0a73c744
#### batches + codeintel in parallel, multi queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/004608dd-f98c-4305-aeb3-a01444c12224
## Initial PR description (enable client to dequeue from multiple
queues)
This PR updates the client's `Dequeue` method to dequeue from multiple
queues. It's backwards compatible with single-queue configurations. The
field `Job.Queue` is only set when dequeuing from the general `/dequeue`
endpoint, so the job `MarkXxx` methods default to single-queue behaviour
if the job does not specify a queue name.
Unrelated to the linked issue, the multi-handler will log and return no
content early in the event of an empty queue list in a dequeue request
(although this should never occur).
## Test plan
- [x] Unit tests
- [x] Local testing
- [x] Dogfood testing
- [x] Demo
<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->
2023-06-04 13:25:05 +00:00
|
|
|
enterprise-codeintel:
|
2023-06-06 20:45:04 +00:00
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
- git
|
2023-06-01 16:28:13 +00:00
|
|
|
commands:
|
|
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- symbols
|
|
|
|
|
- caddy
|
|
|
|
|
- docsite
|
|
|
|
|
- syntax-highlighter
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- blobstore
|
|
|
|
|
- codeintel-worker
|
Executors: enable dequeueing and heartbeating for multiple queues (#52016)
- Closes https://github.com/sourcegraph/sourcegraph/issues/50614
Depends on the following issues:
- https://github.com/sourcegraph/sourcegraph/issues/50616
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52020
- https://github.com/sourcegraph/sourcegraph/issues/51656
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52381
- https://github.com/sourcegraph/sourcegraph/issues/51658
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52525
Due to the dependency on the work listed above, all PRs were ultimately
merged into this branch, so the client side implementation got a little
bit messy to review, for which I apologize. The initial intent of this
PR, closing #50614, was reviewed before the merges occurred.
## Demos
### Scenario 1: old Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/6d325326-d4f5-4e6f-a5e0-0918dbde07bf
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/ff3a87db-ef07-4221-a159-73b93a6ae55b
### Scenario 2: new Sourcegraph version, old executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/c66a9b9f-b745-4abc-bf87-9077dd5f2959
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/785d7f34-630f-4c2a-abb6-82ca3b896813
### Scenario 3: new Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/69c5c06b-ea1c-47f7-9196-e3f108794493
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/4e791d98-06cd-4fa7-93dd-81be0a73c744
#### batches + codeintel in parallel, multi queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/004608dd-f98c-4305-aeb3-a01444c12224
## Initial PR description (enable client to dequeue from multiple
queues)
This PR updates the client's `Dequeue` method to dequeue from multiple
queues. It's backwards compatible with single-queue configurations. The
field `Job.Queue` is only set when dequeuing from the general `/dequeue`
endpoint, so the job `MarkXxx` methods default to single-queue behaviour
if the job does not specify a queue name.
Unrelated to the linked issue, the multi-handler will log and return no
content early in the event of an empty queue list in a dequeue request
(although this should never occur).
## Test plan
- [x] Unit tests
- [x] Local testing
- [x] Dogfood testing
- [x] Demo
<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->
2023-06-04 13:25:05 +00:00
|
|
|
- codeintel-executor
|
2023-06-01 16:28:13 +00:00
|
|
|
# - otel-collector
|
|
|
|
|
- jaeger
|
|
|
|
|
- grafana
|
|
|
|
|
- prometheus
|
Executors: enable dequeueing and heartbeating for multiple queues (#52016)
- Closes https://github.com/sourcegraph/sourcegraph/issues/50614
Depends on the following issues:
- https://github.com/sourcegraph/sourcegraph/issues/50616
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52020
- https://github.com/sourcegraph/sourcegraph/issues/51656
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52381
- https://github.com/sourcegraph/sourcegraph/issues/51658
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52525
Due to the dependency on the work listed above, all PRs were ultimately
merged into this branch, so the client side implementation got a little
bit messy to review, for which I apologize. The initial intent of this
PR, closing #50614, was reviewed before the merges occurred.
## Demos
### Scenario 1: old Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/6d325326-d4f5-4e6f-a5e0-0918dbde07bf
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/ff3a87db-ef07-4221-a159-73b93a6ae55b
### Scenario 2: new Sourcegraph version, old executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/c66a9b9f-b745-4abc-bf87-9077dd5f2959
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/785d7f34-630f-4c2a-abb6-82ca3b896813
### Scenario 3: new Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/69c5c06b-ea1c-47f7-9196-e3f108794493
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/4e791d98-06cd-4fa7-93dd-81be0a73c744
#### batches + codeintel in parallel, multi queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/004608dd-f98c-4305-aeb3-a01444c12224
## Initial PR description (enable client to dequeue from multiple
queues)
This PR updates the client's `Dequeue` method to dequeue from multiple
queues. It's backwards compatible with single-queue configurations. The
field `Job.Queue` is only set when dequeuing from the general `/dequeue`
endpoint, so the job `MarkXxx` methods default to single-queue behaviour
if the job does not specify a queue name.
Unrelated to the linked issue, the multi-handler will log and return no
content early in the event of an empty queue list in a dequeue request
(although this should never occur).
## Test plan
- [x] Unit tests
- [x] Local testing
- [x] Dogfood testing
- [x] Demo
<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->
2023-06-04 13:25:05 +00:00
|
|
|
enterprise-codeintel-multi-queue-executor:
|
2023-06-06 20:45:04 +00:00
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
- git
|
Executors: enable dequeueing and heartbeating for multiple queues (#52016)
- Closes https://github.com/sourcegraph/sourcegraph/issues/50614
Depends on the following issues:
- https://github.com/sourcegraph/sourcegraph/issues/50616
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52020
- https://github.com/sourcegraph/sourcegraph/issues/51656
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52381
- https://github.com/sourcegraph/sourcegraph/issues/51658
- Merged in https://github.com/sourcegraph/sourcegraph/pull/52525
Due to the dependency on the work listed above, all PRs were ultimately
merged into this branch, so the client side implementation got a little
bit messy to review, for which I apologize. The initial intent of this
PR, closing #50614, was reviewed before the merges occurred.
## Demos
### Scenario 1: old Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/6d325326-d4f5-4e6f-a5e0-0918dbde07bf
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/ff3a87db-ef07-4221-a159-73b93a6ae55b
### Scenario 2: new Sourcegraph version, old executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/c66a9b9f-b745-4abc-bf87-9077dd5f2959
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/785d7f34-630f-4c2a-abb6-82ca3b896813
### Scenario 3: new Sourcegraph version, new executor version
#### batches, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/69c5c06b-ea1c-47f7-9196-e3f108794493
#### codeintel, single queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/4e791d98-06cd-4fa7-93dd-81be0a73c744
#### batches + codeintel in parallel, multi queue
https://github.com/sourcegraph/sourcegraph/assets/2979513/004608dd-f98c-4305-aeb3-a01444c12224
## Initial PR description (enable client to dequeue from multiple
queues)
This PR updates the client's `Dequeue` method to dequeue from multiple
queues. It's backwards compatible with single-queue configurations. The
field `Job.Queue` is only set when dequeuing from the general `/dequeue`
endpoint, so the job `MarkXxx` methods default to single-queue behaviour
if the job does not specify a queue name.
Unrelated to the linked issue, the multi-handler will log and return no
content early in the event of an empty queue list in a dequeue request
(although this should never occur).
## Test plan
- [x] Unit tests
- [x] Local testing
- [x] Dogfood testing
- [x] Demo
<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->
2023-06-04 13:25:05 +00:00
|
|
|
commands:
|
|
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- symbols
|
|
|
|
|
- caddy
|
|
|
|
|
- docsite
|
|
|
|
|
- syntax-highlighter
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- blobstore
|
|
|
|
|
- codeintel-worker
|
|
|
|
|
- multiqueue-executor
|
|
|
|
|
# - otel-collector
|
|
|
|
|
- jaeger
|
|
|
|
|
- grafana
|
|
|
|
|
- prometheus
|
|
|
|
|
|
2023-03-02 10:31:51 +00:00
|
|
|
enterprise-codeintel-bazel:
|
|
|
|
|
<<: *codeintel_bazel_set
|
2022-03-11 15:06:49 +00:00
|
|
|
|
2021-05-21 16:01:11 +00:00
|
|
|
enterprise-codeinsights:
|
2021-09-29 14:22:16 +00:00
|
|
|
requiresDevPrivate: true
|
2021-07-21 15:37:24 +00:00
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
2022-02-12 18:36:28 +00:00
|
|
|
- git
|
2021-07-21 15:37:24 +00:00
|
|
|
commands:
|
2022-06-20 17:57:54 +00:00
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
2022-10-10 13:26:58 +00:00
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
2021-07-21 15:37:24 +00:00
|
|
|
- searcher
|
|
|
|
|
- symbols
|
|
|
|
|
- caddy
|
|
|
|
|
- docsite
|
2021-09-28 04:18:12 +00:00
|
|
|
- syntax-highlighter
|
2022-06-20 17:57:54 +00:00
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
2023-01-11 13:51:36 +00:00
|
|
|
- blobstore
|
2021-09-08 08:41:06 +00:00
|
|
|
env:
|
|
|
|
|
DISABLE_CODE_INSIGHTS_HISTORICAL: false
|
|
|
|
|
DISABLE_CODE_INSIGHTS: false
|
2021-05-21 16:01:11 +00:00
|
|
|
|
2021-05-25 08:33:48 +00:00
|
|
|
api-only:
|
2021-09-29 14:22:16 +00:00
|
|
|
requiresDevPrivate: true
|
2021-07-21 15:37:24 +00:00
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
2022-02-12 18:36:28 +00:00
|
|
|
- git
|
2021-07-21 15:37:24 +00:00
|
|
|
commands:
|
2022-06-20 17:57:54 +00:00
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
2022-10-10 13:26:58 +00:00
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
2021-07-21 15:37:24 +00:00
|
|
|
- searcher
|
|
|
|
|
- symbols
|
2022-06-20 17:57:54 +00:00
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
2023-01-11 13:51:36 +00:00
|
|
|
- blobstore
|
2021-05-25 08:33:48 +00:00
|
|
|
|
2021-07-05 11:06:52 +00:00
|
|
|
batches:
|
2021-09-29 14:22:16 +00:00
|
|
|
requiresDevPrivate: true
|
2021-07-21 15:37:24 +00:00
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
2022-02-12 18:36:28 +00:00
|
|
|
- git
|
2021-07-21 15:37:24 +00:00
|
|
|
commands:
|
2022-06-20 17:57:54 +00:00
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
2022-10-10 13:26:58 +00:00
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
2021-07-21 15:37:24 +00:00
|
|
|
- searcher
|
2022-06-20 17:57:54 +00:00
|
|
|
- symbols
|
2021-07-21 15:37:24 +00:00
|
|
|
- caddy
|
|
|
|
|
- docsite
|
2021-09-28 04:18:12 +00:00
|
|
|
- syntax-highlighter
|
2022-06-20 17:57:54 +00:00
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
2023-01-11 13:51:36 +00:00
|
|
|
- blobstore
|
2021-07-21 15:37:24 +00:00
|
|
|
- batches-executor
|
Experiment: Natively run SSBC in docker (#44034)
This adds an experimental code path that I will use to test a docker-only execution mode for server-side batch changes. This code path is never executed for customers until we make the switch when we deem it ready. This will allow me to dogfood this while it's not available to customer instances yet.
Ultimately, the goal of this is to make executors simply be "the job runner platform through a generic interface". Today, this depends on src-cli to do a good bunch of the work. This is a blocker for going full docker-based with executors, which will ultimately be a requirement on the road to k8s-based executors.
As this removes the dependency on src-cli, nothing but the job interface and API endpoints tie executor and Sourcegraph instance together. Ultimately, this will allow us to support larger version spans between the two (pending executors going GA and being feature-complete).
Known issues/limitations:
Steps skipped in between steps that run don't work yet
Skipping steps dynamically is inefficient as we cannot tell the executor to skip a step IF X, so we replace the script by exit 0
It is unclear if all variants of file mounts still work. Basic cases do work. Files used to be read-only in src-cli, they aren't now, but content is still reset in between steps.
The assumption that everything operates in /work is broken here, because we need to use what executors give us to persist out-of-repo state in between containers (like the step result from the previous step)
It is unclear if workspace mounts work
Cache keys are not correctly computed if using workspace mounts - the metadataretriever is nil
We still use log outputs to transfer the AfterStepResults to the Sourcegraph instance, this should finally become an artifact instead. Then, we don't have to rely on the execution_log_entires anymore and can theoretically prune those after some time. This column is currently growing indefinitely.
It depends on tee being available in the docker images to capture the cmd.stdout/cmd.stderr properly for template variable rendering
Env-vars are not rendered in their evaluated form post-execution
File permissions are unclear and might be similarly broken to how they are now - or even worse
Disclaimer: It's not feature complete today! But it is also not hitting any default code paths either. As development on this goes on, we can eventually remove the feature flag and run the new job format on all instances. This PR handles fallback of rendering old records correctly in the UI already.
2022-11-09 23:20:43 +00:00
|
|
|
- batcheshelper-builder
|
2021-07-05 11:06:52 +00:00
|
|
|
|
2023-06-01 16:28:13 +00:00
|
|
|
batches-kubernetes:
|
|
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
- git
|
|
|
|
|
commands:
|
|
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- symbols
|
|
|
|
|
- caddy
|
|
|
|
|
- docsite
|
|
|
|
|
- syntax-highlighter
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- blobstore
|
|
|
|
|
- batches-executor-kubernetes
|
|
|
|
|
- batcheshelper-builder
|
|
|
|
|
|
2022-06-10 06:28:27 +00:00
|
|
|
iam:
|
2021-09-29 14:22:16 +00:00
|
|
|
requiresDevPrivate: true
|
2021-07-23 07:59:43 +00:00
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
2022-02-12 18:36:28 +00:00
|
|
|
- git
|
2021-07-23 07:59:43 +00:00
|
|
|
commands:
|
2022-06-20 17:57:54 +00:00
|
|
|
- frontend
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
2022-10-10 13:26:58 +00:00
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
2021-07-23 07:59:43 +00:00
|
|
|
- caddy
|
|
|
|
|
|
2021-08-02 08:25:39 +00:00
|
|
|
monitoring:
|
2021-08-09 10:30:54 +00:00
|
|
|
checks:
|
|
|
|
|
- docker
|
2021-08-02 08:25:39 +00:00
|
|
|
commands:
|
|
|
|
|
- jaeger
|
2022-08-29 16:42:36 +00:00
|
|
|
- otel-collector
|
2021-08-02 08:25:39 +00:00
|
|
|
- prometheus
|
|
|
|
|
- grafana
|
|
|
|
|
- postgres_exporter
|
|
|
|
|
|
2021-08-09 10:30:54 +00:00
|
|
|
monitoring-alerts:
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
- redis
|
|
|
|
|
- postgres
|
|
|
|
|
commands:
|
|
|
|
|
- prometheus
|
|
|
|
|
- grafana
|
|
|
|
|
# For generated alerts docs
|
|
|
|
|
- docsite
|
|
|
|
|
# For the alerting integration with frontend
|
2022-06-20 17:57:54 +00:00
|
|
|
- frontend
|
|
|
|
|
- web
|
2021-08-09 10:30:54 +00:00
|
|
|
- caddy
|
|
|
|
|
|
2021-10-12 10:44:39 +00:00
|
|
|
web-standalone:
|
|
|
|
|
commands:
|
2021-10-22 10:30:29 +00:00
|
|
|
- web-standalone-http
|
2021-10-12 10:44:39 +00:00
|
|
|
- caddy
|
|
|
|
|
|
|
|
|
|
web-standalone-prod:
|
|
|
|
|
commands:
|
2021-10-22 10:30:29 +00:00
|
|
|
- web-standalone-http-prod
|
2021-10-12 10:44:39 +00:00
|
|
|
- caddy
|
|
|
|
|
|
2022-07-26 14:51:04 +00:00
|
|
|
# For testing our OpenTelemetry stack
|
2022-07-05 18:28:15 +00:00
|
|
|
otel:
|
|
|
|
|
checks:
|
|
|
|
|
- docker
|
|
|
|
|
commands:
|
|
|
|
|
- otel-collector
|
|
|
|
|
- jaeger
|
|
|
|
|
|
2023-09-30 03:55:26 +00:00
|
|
|
single-program:
|
2023-01-20 00:35:39 +00:00
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- git
|
|
|
|
|
commands:
|
|
|
|
|
- sourcegraph
|
2023-09-30 03:55:26 +00:00
|
|
|
- web
|
|
|
|
|
- caddy
|
|
|
|
|
env:
|
|
|
|
|
DISABLE_CODE_INSIGHTS: false
|
|
|
|
|
PRECISE_CODE_INTEL_UPLOAD_AWS_ENDPOINT: http://localhost:49000
|
|
|
|
|
EMBEDDINGS_UPLOAD_AWS_ENDPOINT: http://localhost:49000
|
|
|
|
|
USE_EMBEDDED_POSTGRESQL: false
|
|
|
|
|
|
|
|
|
|
app:
|
|
|
|
|
requiresDevPrivate: true
|
|
|
|
|
checks:
|
|
|
|
|
- git
|
|
|
|
|
commands:
|
|
|
|
|
- cody-app
|
2023-01-20 00:35:39 +00:00
|
|
|
- docsite
|
|
|
|
|
- web
|
|
|
|
|
- caddy
|
2023-04-24 17:26:18 +00:00
|
|
|
- tauri
|
2023-03-14 16:22:09 +00:00
|
|
|
env:
|
2023-06-26 11:17:01 +00:00
|
|
|
DISABLE_CODE_INSIGHTS: true
|
update code references to "Sourcegraph App" -> "Cody App" (#56747)
This updates variable names, property names, env var names, etc., to call it "Cody App".
The entire diff was created by running the following commands:
```
fastmod -e go SourcegraphAppMode CodyAppMode
fastmod -e go,ts,tsx sourcegraphAppMode codyAppMode
fastmod -e ts,tsx isSourcegraphApp isCodyApp
fastmod -e ts,tsx,go,yaml,sh,js SOURCEGRAPH_APP CODY_APP
fastmod -e ts,tsx,go,json,mod,graphql,md,js 'Sourcegraph App\b' 'Cody App'
fastmod -e ts,tsx,go,json,mod,graphql,md,js 'Sourcegraph app\b' 'Cody app' # with a few changes skipped
```
2023-09-19 22:31:12 +00:00
|
|
|
CODY_APP: 1
|
2023-06-19 18:25:00 +00:00
|
|
|
EXTSVC_CONFIG_ALLOW_EDITS: true
|
2023-07-05 12:36:30 +00:00
|
|
|
PRECISE_CODE_INTEL_UPLOAD_AWS_ENDPOINT: http://localhost:49000
|
|
|
|
|
EMBEDDINGS_UPLOAD_AWS_ENDPOINT: http://localhost:49000
|
2023-01-20 00:35:39 +00:00
|
|
|
|
2023-05-30 15:44:27 +00:00
|
|
|
cody-gateway:
|
2023-05-03 23:53:56 +00:00
|
|
|
checks:
|
|
|
|
|
- redis
|
|
|
|
|
commands:
|
2023-05-30 15:44:27 +00:00
|
|
|
- cody-gateway
|
2023-05-03 23:53:56 +00:00
|
|
|
|
2023-08-08 15:42:32 +00:00
|
|
|
qdrant:
|
|
|
|
|
commands:
|
|
|
|
|
- qdrant
|
|
|
|
|
- frontend
|
|
|
|
|
- worker
|
|
|
|
|
- repo-updater
|
|
|
|
|
- web
|
|
|
|
|
- gitserver-0
|
|
|
|
|
- gitserver-1
|
|
|
|
|
- searcher
|
|
|
|
|
- caddy
|
|
|
|
|
- symbols
|
|
|
|
|
- docsite
|
|
|
|
|
- syntax-highlighter
|
|
|
|
|
- zoekt-index-0
|
|
|
|
|
- zoekt-index-1
|
|
|
|
|
- zoekt-web-0
|
|
|
|
|
- zoekt-web-1
|
|
|
|
|
- blobstore
|
|
|
|
|
- embeddings
|
|
|
|
|
env:
|
|
|
|
|
QDRANT_ENDPOINT: 'localhost:6334'
|
|
|
|
|
|
|
|
|
|
|
2021-03-29 08:14:53 +00:00
|
|
|
tests:
|
|
|
|
|
# These can be run with `sg test [name]`
|
|
|
|
|
backend:
|
2021-05-10 08:28:45 +00:00
|
|
|
cmd: go test
|
|
|
|
|
defaultArgs: ./...
|
|
|
|
|
|
2021-03-29 08:14:53 +00:00
|
|
|
backend-integration:
|
|
|
|
|
cmd: cd dev/gqltest && go test -long -base-url $BASE_URL -email $EMAIL -username $USERNAME -password $PASSWORD ./gqltest
|
|
|
|
|
env:
|
|
|
|
|
# These are defaults. They can be overwritten by setting the env vars when
|
|
|
|
|
# running the command.
|
2023-05-25 15:51:15 +00:00
|
|
|
BASE_URL: "http://localhost:3080"
|
|
|
|
|
EMAIL: "joe@sourcegraph.com"
|
|
|
|
|
PASSWORD: "12345"
|
2021-05-10 08:28:45 +00:00
|
|
|
|
2021-11-10 09:26:15 +00:00
|
|
|
bext:
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm --filter @sourcegraph/browser test
|
2021-11-10 09:26:15 +00:00
|
|
|
|
2021-11-25 12:46:36 +00:00
|
|
|
bext-build:
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: EXTENSION_PERMISSIONS_ALL_URLS=true pnpm --filter @sourcegraph/browser build
|
2021-11-25 12:46:36 +00:00
|
|
|
|
2021-11-10 09:26:15 +00:00
|
|
|
bext-integration:
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm --filter @sourcegraph/browser test-integration
|
2021-11-10 09:26:15 +00:00
|
|
|
|
|
|
|
|
bext-e2e:
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm --filter @sourcegraph/browser mocha ./src/end-to-end/github.test.ts ./src/end-to-end/gitlab.test.ts
|
2021-11-10 09:26:15 +00:00
|
|
|
env:
|
|
|
|
|
SOURCEGRAPH_BASE_URL: https://sourcegraph.com
|
|
|
|
|
|
2022-08-24 11:41:51 +00:00
|
|
|
client:
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm jest --testPathIgnorePatterns end-to-end regression integration storybook
|
2021-05-10 08:28:45 +00:00
|
|
|
|
2022-09-09 09:36:10 +00:00
|
|
|
docsite:
|
|
|
|
|
cmd: .bin/docsite_${DOCSITE_VERSION} check ./doc
|
|
|
|
|
env:
|
2023-08-18 18:40:44 +00:00
|
|
|
DOCSITE_VERSION: v1.9.4 # Update DOCSITE_VERSION everywhere in all places (including outside this repo)
|
2022-09-09 09:36:10 +00:00
|
|
|
|
|
|
|
|
web-e2e:
|
2022-05-04 07:12:30 +00:00
|
|
|
preamble: |
|
2022-07-06 21:38:47 +00:00
|
|
|
A Sourcegraph isntance must be already running for these tests to work, most
|
2022-08-24 11:41:51 +00:00
|
|
|
commonly with: `sg start enterprise-e2e`
|
2022-05-04 07:12:30 +00:00
|
|
|
|
2022-08-24 11:41:51 +00:00
|
|
|
See more details: https://docs.sourcegraph.com/dev/how-to/testing#running-end-to-end-tests
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm test-e2e
|
2022-05-04 07:12:30 +00:00
|
|
|
env:
|
|
|
|
|
TEST_USER_EMAIL: test@sourcegraph.com
|
|
|
|
|
TEST_USER_PASSWORD: supersecurepassword
|
|
|
|
|
SOURCEGRAPH_BASE_URL: https://sourcegraph.test:3443
|
|
|
|
|
BROWSER: chrome
|
2022-05-05 16:17:18 +00:00
|
|
|
external_secrets:
|
2022-05-04 07:12:30 +00:00
|
|
|
GH_TOKEN:
|
2023-05-25 15:51:15 +00:00
|
|
|
project: "sourcegraph-ci"
|
|
|
|
|
name: "BUILDKITE_GITHUBDOTCOM_TOKEN"
|
2021-08-05 08:19:45 +00:00
|
|
|
|
2022-09-09 09:36:10 +00:00
|
|
|
web-regression:
|
2022-08-24 11:41:51 +00:00
|
|
|
preamble: |
|
|
|
|
|
A Sourcegraph instance must be already running for these tests to work, most
|
|
|
|
|
commonly with: `sg start enterprise-e2e`
|
|
|
|
|
|
|
|
|
|
See more details: https://docs.sourcegraph.com/dev/how-to/testing#running-regression-tests
|
|
|
|
|
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm test-regression
|
2022-08-24 11:41:51 +00:00
|
|
|
env:
|
|
|
|
|
SOURCEGRAPH_SUDO_USER: test
|
|
|
|
|
SOURCEGRAPH_BASE_URL: https://sourcegraph.test:3443
|
|
|
|
|
TEST_USER_PASSWORD: supersecurepassword
|
|
|
|
|
BROWSER: chrome
|
|
|
|
|
|
2022-09-09 09:36:10 +00:00
|
|
|
web-integration:
|
|
|
|
|
preamble: |
|
|
|
|
|
A web application should be built for these tests to work, most
|
2023-01-30 06:51:24 +00:00
|
|
|
commonly with: `sg run web-integration-build` or `sg run web-integration-build-prod` for production build.
|
2022-09-09 09:36:10 +00:00
|
|
|
|
|
|
|
|
See more details: https://docs.sourcegraph.com/dev/how-to/testing#running-integration-tests
|
|
|
|
|
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm test-integration
|
2022-09-09 09:36:10 +00:00
|
|
|
|
|
|
|
|
web-integration:debug:
|
|
|
|
|
preamble: |
|
|
|
|
|
A Sourcegraph instance must be already running for these tests to work, most
|
|
|
|
|
commonly with: `sg start web-standalone`
|
|
|
|
|
|
|
|
|
|
See more details: https://docs.sourcegraph.com/dev/how-to/testing#running-integration-tests
|
|
|
|
|
|
2023-01-12 03:50:09 +00:00
|
|
|
cmd: pnpm test-integration:debug
|