Revert "ci: use aspect workflow agents (#60317)" (#60856)

This reverts commit 67b2c665f1.
This commit is contained in:
William Bezuidenhout 2024-03-05 13:13:05 +02:00 committed by GitHub
parent 67b2c665f1
commit 5dee69a56a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
34 changed files with 385 additions and 163 deletions

View File

@ -3,18 +3,11 @@
# shellcheck disable=SC1090,SC1091
source "$HOME/.profile"
# due to how the repostiory is cloned with --depth=100 (see aspect terraform docs), we need to fetch the tags manually
# we don't want to fetch all the tags, so here:
# 1. Get a list of tags from the remote
# 2. Sort them
# 3. Get the last $COUNT tags
# 4. Parse them so that we only get the tag
# 5. Then loop through every tag and fetch it
COUNT=10
echo "~~~ :git: Fetching last $COUNT tags"
git ls-remote --tags origin | sort -t '/' -k 3 -V | tail -n $COUNT | awk -F'/' '{gsub(/\^\{\}$/, "", $3); print $3}' | uniq | while read -r tag; do
git fetch -v origin tag "$tag"
done
# Fetch the latest origin/main to accurately determine the set of changed
# files on this branch.
echo "Running git fetch..."
git fetch
echo "Running git fetch... done"
# Link command wrapper scripts so we can have more readable steps in the buildkite UI
ln -s "$(pwd)/dev/ci/scripts/annotated-command.sh" an 2>/dev/null || true

View File

@ -11,7 +11,6 @@ fi
# asdf setup
# ----------
# TODO(burmudar): Once the bazel agents are deprecated we need to remove this
if [[ "$BUILDKITE_AGENT_META_DATA_QUEUE" != "bazel" ]]; then
echo "~~~ Preparing asdf dependencies"

View File

@ -8,7 +8,18 @@
# https://sourcegraph.com/github.com/sourcegraph/infrastructure/-/blob/buildkite/kubernetes/buildkite-agent/buildkite-agent.Deployment.yaml
steps:
- group: "Pipeline setup"
if: pipeline.slug == "sourcegraph"
steps:
- label: ':bazel::pipeline: Generate pipeline'
key: 'pipeline-gen'
agents: { queue: bazel }
# Prioritize generating pipelines so that jobs can get generated and queued up as soon
# as possible, so as to better assess pipeline load e.g. to scale the Buildkite fleet.
priority: 10
command: './dev/ci/gen-pipeline.sh'
- group: ":aspect: Aspect Workflows "
if: pipeline.slug == "aspect-experimental"
steps:
- key: aspect-workflows-upload
label: ":aspect: Setup Aspect Workflows"
@ -17,7 +28,10 @@ steps:
agents:
queue: aspect-small
- label: ":pipeline: Generate pipeline"
if: build.branch !~ /^main(-dry-run\/)?.*/
commands:
- "./dev/ci/gen-pipeline.sh"
agents:
queue: aspect-default
queue: bazel
env:
ASPECT_WORKFLOWS_BUILD: "1"

View File

@ -15,8 +15,7 @@ cp "${base}/docker-mirror.pkr.hcl" workdir/
cp "${base}/aws_regions.json" workdir/
cp "${base}/install.sh" workdir/
GCP_PROJECT="aspect-dev"
"$gcloud" secrets versions access latest --secret=e2e-builder-sa-key --quiet --project="$GCP_PROJECT" >"workdir/builder-sa-key.json"
"$gcloud" secrets versions access latest --secret=e2e-builder-sa-key --quiet --project=sourcegraph-ci >"workdir/builder-sa-key.json"
## Setting up packer
export PKR_VAR_name

View File

@ -1,6 +1,7 @@
#!/usr/bin/env bash
set -eu
## Setting up inputs/data
gcloud="$(pwd)/$1" # used in workdir folder, so need an absolute path
packer="$(pwd)/$2"
@ -30,8 +31,7 @@ cp "$srccli" workdir/
docker tag executor-vm:candidate "sourcegraph/executor-vm:$VERSION"
docker save --output workdir/executor-vm.tar "sourcegraph/executor-vm:$VERSION"
GCP_PROJECT="aspect-dev"
"$gcloud" secrets versions access latest --secret=e2e-builder-sa-key --quiet --project="$GCP_PROJECT" >"workdir/builder-sa-key.json"
"$gcloud" secrets versions access latest --secret=e2e-builder-sa-key --quiet --project=sourcegraph-ci >"workdir/builder-sa-key.json"
export PKR_VAR_name
PKR_VAR_name="${IMAGE_FAMILY}-${BUILDKITE_BUILD_NUMBER}"

View File

@ -2,9 +2,7 @@
set -o errexit -o nounset -o pipefail
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
bazelrc=(--bazelrc="$aspectRC")
bazelrc=(--bazelrc=.bazelrc --bazelrc=.aspect/bazelrc/ci.bazelrc --bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc)
echo "--- :bazel: Build sg cli"
bazel "${bazelrc[@]}" build //dev/sg:sg

View File

@ -8,7 +8,6 @@ export PATH
cd "${BUILD_WORKSPACE_DIRECTORY}"
# This fails using rosetta binary, so we just use our normal bazelrc's
bazel \
--bazelrc=.bazelrc \
--bazelrc=.aspect/bazelrc/ci.bazelrc \

View File

@ -3,19 +3,21 @@
set -eu
EXIT_CODE=0
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
# go mod tidy gets run in different subdirectories
# so the bazelrc files are looked up relative to that,
# but we need to check from root
root=$(pwd)
runGoModTidy() {
local dir
dir=$1
cd "$dir"
echo "--- :bazel: Running go mod tidy in $dir"
bazel --bazelrc="$aspectRC" run @go_sdk//:bin/go -- mod tidy
bazel \
--bazelrc="$root/.bazelrc" \
--bazelrc="$root/.aspect/bazelrc/ci.bazelrc" \
--bazelrc="$root/.aspect/bazelrc/ci.sourcegraph.bazelrc" \
run @go_sdk//:bin/go -- mod tidy
cd -
}

View File

@ -3,9 +3,7 @@
set -eu
EXIT_CODE=0
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
bazelrc=(--bazelrc="$aspectRC")
bazelrc=(--bazelrc=.bazelrc --bazelrc=.aspect/bazelrc/ci.bazelrc --bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc)
#shellcheck disable=SC2317
# generates and uploads all bazel diffs checked by this script

View File

@ -1,15 +1,14 @@
#!/usr/bin/env bash
if [[ "${CI:-false}" == "true" ]]; then
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
bazelrc=(--bazelrc="$aspectRC")
if [[ "$1" == "build" || "$1" == "test" || "$1" == "run" ]]; then
# shellcheck disable=SC2145
echo "--- :bazel: bazel $@"
fi
bazel "${bazelrc[@]}" \
bazel \
--bazelrc=.bazelrc \
--bazelrc=.aspect/bazelrc/ci.bazelrc \
--bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc \
"$@" \
--stamp \
--workspace_status_command=./dev/bazel_stamp_vars.sh \

View File

@ -2,9 +2,7 @@
set -e
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
bazelrc=(--bazelrc="$aspectRC")
bazelrc=(--bazelrc=.bazelrc --bazelrc=.aspect/bazelrc/ci.bazelrc --bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc)
echo "--- :books: Annotating build with Glossary"
buildkite-agent annotate --style info <./dev/ci/glossary.md

View File

@ -61,8 +61,8 @@ services:
# Note: Must match right-hand side of scratch volume mount
- TMPDIR=/scratch
# Run as root (required for docker daemon control)
- UID=${EXECUTOR_UID}
- GID=${EXECUTOR_GID}
- UID=1000
- GID=1000
# Use the dind container to run docker commands within the executor
- DOCKER_HOST=${DOCKER_HOST}
volumes:

View File

@ -14,22 +14,13 @@ mkdir "${DATA}"
mkdir "${DATA}/data"
mkdir "${DATA}/config"
# we get the ID / GID here that the container should map *it's ID/GID* for root, so that when it writes files as root to a mapped in volume
# the file permissions will have the correct ID/GID set so that the current running user still have permissions to alter the files
EXECUTOR_UID="$(id -u)"
EXECUTOR_GID="$(id -g)"
export EXECUTOR_UID
export EXECUTOR_GID
cleanup() {
pushd "$root_dir"/dev/ci/integration/executors/ 1>/dev/null
docker-compose logs >"${root_dir}/docker-compose.log"
# We have to remove the directory here since the container creates files in that directory as root, and
# we can't remove outside of the container
docker-compose exec server /bin/sh -c "rm -rf /var/opt/sourcegraph/*"
docker-compose down --volumes --timeout 30 # seconds
docker volume rm executors-e2e || true
popd 1>/dev/null
rm -rf "${TMP_WORK_DIR}"
}
trap cleanup EXIT
@ -44,14 +35,6 @@ if [ -n "${DOCKER_GATEWAY_HOST}" ]; then
DOCKER_HOST="tcp://${DOCKER_GATEWAY_HOST:-host.docker.internal}:2375"
export DOCKER_HOST
fi
# Executor docker compose maps this explicitly because Non-aspect agents use Docker in Docker (DIND) and have this env var explicitly set.
#
# On Aspect this var is NOT set because we use "normal" docker (aka non DND) which uses a unix socket, but this var still needs to be mapped in
# so we explicitly set it here.
if [ -z "${DOCKER_HOST}" ]; then
DOCKER_HOST="unix:///var/run/docker.sock"
export DOCKER_HOST
fi
# Need to pull this image pre-execution as the docker executor doesn't have a
# credential to pull this image.

View File

@ -7,8 +7,8 @@ set -e
export SOURCEGRAPH_BASE_URL="${1:-"http://localhost:7080"}"
export SRC_LOG_LEVEL=dbug
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
echo "--- run tests"
bazel --bazelrc="$aspectRC" run //dev/ci/integration/executors/tester:tester
bazel \
--bazelrc=.bazelrc \
--bazelrc=.aspect/bazelrc/ci.bazelrc \
--bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc \
run //dev/ci/integration/executors/tester:tester

View File

@ -4,7 +4,6 @@ load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "ci",
srcs = [
"aspect_workflows.go",
"bazel_helpers.go",
"bazel_operations.go",
"cache_helpers.go",

View File

@ -1,12 +0,0 @@
package ci
var AspectWorkflows = struct {
TestStepKey string
QueueDefault string
QueueSmall string
}{
TestStepKey: "__main__::test",
QueueDefault: "aspect-default",
QueueSmall: "aspect-small",
}

View File

@ -13,7 +13,7 @@ import (
func bazelBuild(targets ...string) func(*bk.Pipeline) {
cmds := []bk.StepOpt{
bk.Key("bazel_build"),
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
}
cmd := bazelStampedCmd(fmt.Sprintf("build %s", strings.Join(targets, " ")))
cmds = append(
@ -29,23 +29,72 @@ func bazelBuild(targets ...string) func(*bk.Pipeline) {
}
}
func bazelTest(targets ...string) func(*bk.Pipeline) {
cmds := []bk.StepOpt{
bk.DependsOn("bazel-prechecks"),
bk.AllowDependencyFailure(),
bk.Agent("queue", "bazel"),
bk.Key("bazel-tests"),
bk.ArtifactPaths("./bazel-testlogs/cmd/embeddings/shared/shared_test/*.log", "./command.profile.gz"),
bk.AutomaticRetry(1), // TODO @jhchabran flaky stuff are breaking builds
}
// Test commands
bazelTestCmds := []bk.StepOpt{}
cmds = append(cmds, bazelApplyPrecheckChanges())
for _, target := range targets {
cmd := bazelCmd(fmt.Sprintf("test %s", target))
bazelTestCmds = append(bazelTestCmds,
bazelAnnouncef("bazel test %s", target),
bk.Cmd(cmd))
}
cmds = append(cmds, bazelTestCmds...)
return func(pipeline *bk.Pipeline) {
pipeline.AddStep(":bazel: Tests",
cmds...,
)
}
}
func bazelTestWithDepends(optional bool, dependsOn string, targets ...string) func(*bk.Pipeline) {
cmds := []bk.StepOpt{
bk.Agent("queue", "bazel"),
}
bazelCmd := bazelCmd(fmt.Sprintf("test %s", strings.Join(targets, " ")))
cmds = append(cmds, bk.Cmd(bazelCmd))
cmds = append(cmds, bk.DependsOn(dependsOn))
return func(pipeline *bk.Pipeline) {
if optional {
cmds = append(cmds, bk.SoftFail())
}
pipeline.AddStep(":bazel: Tests",
cmds...,
)
}
}
func bazelCmd(args ...string) string {
genBazelRC, bazelrc := aspectBazelRC()
pre := []string{
genBazelRC,
"bazel",
fmt.Sprintf("--bazelrc=%s", bazelrc),
"--bazelrc=.bazelrc",
"--bazelrc=.aspect/bazelrc/ci.bazelrc",
"--bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc",
}
Cmd := append(pre, args...)
return strings.Join(Cmd, " ")
}
func bazelStampedCmd(args ...string) string {
genBazelRC, bazelrc := aspectBazelRC()
pre := []string{
genBazelRC,
"bazel",
fmt.Sprintf("--bazelrc=%s", bazelrc),
"--bazelrc=.bazelrc",
"--bazelrc=.aspect/bazelrc/ci.bazelrc",
"--bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc",
}
post := []string{
"--stamp",
@ -57,20 +106,12 @@ func bazelStampedCmd(args ...string) string {
return strings.Join(cmd, " ")
}
func aspectBazelRC() (string, string) {
path := "/tmp/aspect-generated.bazelrc"
cmd := fmt.Sprintf("rosetta bazelrc > %s;", path)
return cmd, path
}
// TODO(burmudar): do we remove this?
func bazelPrechecks() func(*bk.Pipeline) {
cmds := []bk.StepOpt{
bk.Key("bazel-prechecks"),
bk.SoftFail(100),
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.ArtifactPaths("./sg"),
bk.Agent("queue", "bazel"),
bk.ArtifactPaths("./bazel-configure.diff", "./sg"),
bk.AnnotatedCmd("dev/ci/bazel-prechecks.sh", bk.AnnotatedCmdOpts{
Annotations: &bk.AnnotationOpts{
Type: bk.AnnotationTypeError,
@ -81,8 +122,6 @@ func bazelPrechecks() func(*bk.Pipeline) {
// of its own pipeline step. After pre-checks have passed seems
// the most natural, as we then know that the bazel files are
// up-to-date for building sg.
// TODO(burmudar): maybe move this to be part of gen pipeline?
bk.Cmd("dev/ci/bazel-build-sg.sh"),
}
@ -98,6 +137,10 @@ func bazelAnnouncef(format string, args ...any) bk.StepOpt {
return bk.Cmd(fmt.Sprintf(`echo "--- :bazel: %s"`, msg))
}
func bazelApplyPrecheckChanges() bk.StepOpt {
return bk.Cmd("dev/ci/bazel-prechecks-apply.sh")
}
var allowedBazelFlags = map[string]struct{}{
"--runs_per_test": {},
"--nobuild": {},
@ -115,11 +158,29 @@ var bazelFlagsRe = regexp.MustCompile(`--\w+`)
func verifyBazelCommand(command string) error {
// check for shell escape mechanisms.
bannedChars := []string{"`", "$", "(", ")", ";", "&", "|", "<", ">"}
for _, c := range bannedChars {
if strings.Contains(command, c) {
return errors.Newf("unauthorized input for bazel command: %q", c)
}
if strings.Contains(command, ";") {
return errors.New("unauthorized input for bazel command: ';'")
}
if strings.Contains(command, "&") {
return errors.New("unauthorized input for bazel command: '&'")
}
if strings.Contains(command, "|") {
return errors.New("unauthorized input for bazel command: '|'")
}
if strings.Contains(command, "$") {
return errors.New("unauthorized input for bazel command: '$'")
}
if strings.Contains(command, "`") {
return errors.New("unauthorized input for bazel command: '`'")
}
if strings.Contains(command, ">") {
return errors.New("unauthorized input for bazel command: '>'")
}
if strings.Contains(command, "<") {
return errors.New("unauthorized input for bazel command: '<'")
}
if strings.Contains(command, "(") {
return errors.New("unauthorized input for bazel command: '('")
}
// check for command and targets

View File

@ -7,6 +7,14 @@ import (
func BazelOperations(buildOpts bk.BuildOptions, opts CoreTestOperationsOptions) []operations.Operation {
ops := []operations.Operation{bazelPrechecks()}
ops = append(ops, triggerBackCompatTest(buildOpts), bazelGoModTidy())
if !opts.AspectWorkflows {
if opts.IsMainBranch {
ops = append(ops, bazelTest("//...", "//client/web:test", "//testing:codeintel_integration_test"))
} else {
ops = append(ops, bazelTest("//...", "//client/web:test"))
}
}
ops = append(ops, triggerBackCompatTest(buildOpts, opts.AspectWorkflows), bazelGoModTidy())
return ops
}

View File

@ -14,7 +14,7 @@ func bazelBuildExecutorVM(c Config, alwaysRebuild bool) operations.Operation {
return func(pipeline *bk.Pipeline) {
imageFamily := executorImageFamilyForConfig(c)
stepOpts := []bk.StepOpt{
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.Key(candidateImageStepKey("executor.vm-image")),
bk.Env("VERSION", c.Version),
bk.Env("IMAGE_FAMILY", imageFamily),
@ -38,7 +38,7 @@ func bazelPublishExecutorVM(c Config, alwaysRebuild bool) operations.Operation {
return func(pipeline *bk.Pipeline) {
imageFamily := executorImageFamilyForConfig(c)
stepOpts := []bk.StepOpt{
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.DependsOn(candidateImageStepKey("executor.vm-image")),
bk.Env("VERSION", c.Version),
bk.Env("IMAGE_FAMILY", imageFamily),
@ -56,7 +56,7 @@ func bazelPublishExecutorVM(c Config, alwaysRebuild bool) operations.Operation {
stepOpts = append(stepOpts, bk.Cmd(cmd))
pipeline.AddStep(":bazel::packer: :white_check_mark: Publish executor image", stepOpts...)
pipeline.AddStep(":bazel::packer: :construction: Build executor image", stepOpts...)
}
}
@ -64,7 +64,7 @@ func bazelBuildExecutorDockerMirror(c Config) operations.Operation {
return func(pipeline *bk.Pipeline) {
imageFamily := executorDockerMirrorImageFamilyForConfig(c)
stepOpts := []bk.StepOpt{
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.Key(candidateImageStepKey("executor-docker-miror.vm-image")),
bk.Env("VERSION", c.Version),
bk.Env("IMAGE_FAMILY", imageFamily),
@ -80,21 +80,21 @@ func bazelPublishExecutorDockerMirror(c Config) operations.Operation {
candidateBuildStep := candidateImageStepKey("executor-docker-miror.vm-image")
imageFamily := executorDockerMirrorImageFamilyForConfig(c)
stepOpts := []bk.StepOpt{
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.DependsOn(candidateBuildStep),
bk.Env("VERSION", c.Version),
bk.Env("IMAGE_FAMILY", imageFamily),
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease))),
bk.Cmd(bazelStampedCmd("run //cmd/executor/docker-mirror:ami.push")),
}
pipeline.AddStep(":bazel::packer: :white_check_mark: Publish docker registry mirror image", stepOpts...)
pipeline.AddStep(":packer: :white_check_mark: Publish docker registry mirror image", stepOpts...)
}
}
func bazelPublishExecutorBinary(c Config) operations.Operation {
return func(pipeline *bk.Pipeline) {
stepOpts := []bk.StepOpt{
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.Env("VERSION", c.Version),
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease))),
bk.Cmd(bazelStampedCmd(`run //cmd/executor:binary.push`)),
@ -138,12 +138,16 @@ func executorsE2E(candidateTag string) operations.Operation {
p.AddStep(":bazel::docker::packer: Executors E2E",
// Run tests against the candidate server image
bk.DependsOn("bazel-push-images-candidate"),
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.Env("CANDIDATE_VERSION", candidateTag),
bk.Env("SOURCEGRAPH_BASE_URL", "http://127.0.0.1:7080"),
bk.Env("SOURCEGRAPH_SUDO_USER", "admin"),
bk.Env("TEST_USER_EMAIL", "test@sourcegraph.com"),
bk.Env("TEST_USER_PASSWORD", "supersecurepassword"),
// See dev/ci/integration/executors/docker-compose.yaml
// This enable the executor to reach the dind container
// for docker commands.
bk.Env("DOCKER_GATEWAY_HOST", "172.17.0.1"),
bk.Cmd("dev/ci/integration/executors/run.sh"),
bk.ArtifactPaths("./*.log"),
)

View File

@ -73,8 +73,12 @@ func bazelPushImagesCandidates(version string) func(*bk.Pipeline) {
}
// Used in default run type
func bazelPushImagesFinal(version string) func(*bk.Pipeline) {
return bazelPushImagesCmd(version, false, bk.DependsOn(AspectWorkflows.TestStepKey))
func bazelPushImagesFinal(version string, isAspectBuild bool) func(*bk.Pipeline) {
depKey := "bazel-tests"
if isAspectBuild {
depKey = "__main__::test"
}
return bazelPushImagesCmd(version, false, bk.DependsOn(depKey))
}
// Used in CandidateNoTest run type
@ -96,13 +100,68 @@ func bazelPushImagesCmd(version string, isCandidate bool, opts ...bk.StepOpt) fu
return func(pipeline *bk.Pipeline) {
pipeline.AddStep(stepName,
append(opts,
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.Key(stepKey),
bk.Env("PUSH_VERSION", version),
bk.Env("CANDIDATE_ONLY", candidate),
bazelApplyPrecheckChanges(),
bk.Cmd(bazelStampedCmd(`build $$(bazel query 'kind("oci_push rule", //...)')`)),
bk.Cmd("./dev/ci/push_all.sh"),
)...,
)
}
}
// Tag and push final Docker image for the service defined by `app`
// after the e2e tests pass.
//
// It requires Config as an argument because published images require a lot of metadata.
func bazelPublishFinalDockerImage(c Config, apps []string) operations.Operation {
return func(pipeline *bk.Pipeline) {
cmds := []bk.StepOpt{}
cmds = append(cmds, bk.Agent("queue", "bazel"))
for _, app := range apps {
devImage := images.DevRegistryImage(app, "")
publishImage := images.PublishedRegistryImage(app, "")
var imgs []string
for _, image := range []string{publishImage, devImage} {
if app != "server" || c.RunType.Is(runtype.TaggedRelease, runtype.ImagePatch, runtype.ImagePatchNoTest) {
imgs = append(imgs, fmt.Sprintf("%s:%s", image, c.Version))
}
if app == "server" && c.RunType.Is(runtype.ReleaseBranch) {
imgs = append(imgs, fmt.Sprintf("%s:%s-insiders", image, c.Branch))
}
if c.RunType.Is(runtype.MainBranch) {
imgs = append(imgs, fmt.Sprintf("%s:insiders", image))
}
}
// these tags are pushed to our dev registry, and are only
// used internally
for _, tag := range []string{
c.Version,
c.Commit,
c.shortCommit(),
fmt.Sprintf("%s_%s_%d", c.shortCommit(), c.Time.Format("2006-01-02"), c.BuildNumber),
fmt.Sprintf("%s_%d", c.shortCommit(), c.BuildNumber),
fmt.Sprintf("%s_%d", c.Commit, c.BuildNumber),
strconv.Itoa(c.BuildNumber),
} {
internalImage := fmt.Sprintf("%s:%s", devImage, tag)
imgs = append(imgs, internalImage)
}
candidateImage := fmt.Sprintf("%s:%s", devImage, c.candidateImageTag())
cmds = append(cmds, bk.Cmd(fmt.Sprintf("./dev/ci/docker-publish.sh %s %s", candidateImage, strings.Join(imgs, " "))))
}
pipeline.AddStep(":docker: :truck: Publish images", cmds...)
// This step just pulls a prebuild image and pushes it to some registries. The
// only possible failure here is a registry flake, so we retry a few times.
bk.AutomaticRetry(3)
}
}

View File

@ -22,7 +22,7 @@ func legacyBuildCandidateDockerImages(apps []string, version string, tag string,
bk.Env("DOCKER_BAZEL", "true"),
bk.Env("DOCKER_BUILDKIT", "1"),
bk.Env("VERSION", version),
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
)
// Allow all build scripts to emit info annotations
@ -105,7 +105,7 @@ func legacyBuildCandidateDockerImage(app string, version string, tag string, rt
cmds = append(cmds,
bk.Key(candidateImageStepKey(app)),
bk.Env("VERSION", version),
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
)
// Allow all build scripts to emit info annotations

View File

@ -8,7 +8,10 @@ import (
"github.com/sourcegraph/sourcegraph/dev/ci/runtype"
)
func triggerBackCompatTest(buildOpts bk.BuildOptions) func(*bk.Pipeline) {
func triggerBackCompatTest(buildOpts bk.BuildOptions, isAspectWorkflows bool) func(*bk.Pipeline) {
if isAspectWorkflows {
buildOpts.Message += " (Aspect)"
}
return func(pipeline *bk.Pipeline) {
steps := []bk.StepOpt{
bk.Async(false),
@ -17,13 +20,16 @@ func triggerBackCompatTest(buildOpts bk.BuildOptions) func(*bk.Pipeline) {
bk.Build(buildOpts),
}
if !isAspectWorkflows {
steps = append(steps, bk.DependsOn("bazel-prechecks"))
}
pipeline.AddTrigger(":bazel::hourglass_flowing_sand: BackCompat Tests", "sourcegraph-backcompat", steps...)
}
}
func bazelGoModTidy() func(*bk.Pipeline) {
cmds := []bk.StepOpt{
bk.Agent("queue", AspectWorkflows.QueueSmall),
bk.Agent("queue", "bazel"),
bk.Key("bazel-go-mod"),
bk.Cmd("./dev/ci/bazel-gomodtidy.sh"),
}

View File

@ -1,6 +1,11 @@
package ci
import (
"fmt"
"time"
"github.com/Masterminds/semver"
bk "github.com/sourcegraph/sourcegraph/dev/ci/internal/buildkite"
"github.com/sourcegraph/sourcegraph/dev/ci/internal/ci/changed"
"github.com/sourcegraph/sourcegraph/dev/ci/internal/ci/operations"
@ -18,6 +23,9 @@ type CoreTestOperationsOptions struct {
CreateBundleSizeDiff bool // for addWebAppEnterpriseBuild
IsMainBranch bool
// AspectWorkflows is set to true when we generate steps as part of the Aspect Workflows pipeline
AspectWorkflows bool
}
// CoreTestOperations is a core set of tests that should be run in most CI cases. More
@ -33,11 +41,9 @@ type CoreTestOperationsOptions struct {
func CoreTestOperations(buildOpts bk.BuildOptions, diff changed.Diff, opts CoreTestOperationsOptions) *operations.Set {
// Base set
ops := operations.NewSet()
ops.Append(
bazelPrechecks(),
triggerBackCompatTest(buildOpts),
bazelGoModTidy(),
)
// Simple, fast-ish linter checks
ops.Append(BazelOperations(buildOpts, opts)...)
linterOps := operations.NewNamedSet("Linters and static analysis")
if targets := changed.GetLinterTargets(diff); len(targets) > 0 {
linterOps.Append(addSgLints(targets))
@ -70,3 +76,33 @@ func addJetBrainsUnitTests(pipeline *bk.Pipeline) {
func wait(pipeline *bk.Pipeline) {
pipeline.AddWait()
}
func triggerReleaseBranchHealthchecks(minimumUpgradeableVersion string) operations.Operation {
return func(pipeline *bk.Pipeline) {
version := semver.MustParse(minimumUpgradeableVersion)
// HACK: we can't just subtract a single minor version once we roll over to 4.0,
// so hard-code the previous minor version.
previousMinorVersion := fmt.Sprintf("%d.%d", version.Major(), version.Minor()-1)
if version.Major() == 4 && version.Minor() == 0 {
previousMinorVersion = "3.43"
} else if version.Major() == 5 && version.Minor() == 0 {
previousMinorVersion = "4.5"
}
for _, branch := range []string{
// Most recent major.minor
fmt.Sprintf("%d.%d", version.Major(), version.Minor()),
previousMinorVersion,
} {
name := fmt.Sprintf(":stethoscope: Trigger %s release branch healthcheck build", branch)
pipeline.AddTrigger(name, "sourcegraph",
bk.Async(false),
bk.Build(bk.BuildOptions{
Branch: branch,
Message: time.Now().Format(time.RFC1123) + " healthcheck build",
}),
)
}
}
}

View File

@ -89,6 +89,10 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
Env: env,
}
// We generate the pipeline slightly differently when running as part of the Aspect Workflows pipeline.
// Primarily, we don't run any `bazel test` since Aspect has got that covered
isAspectWorkflowBuild := os.Getenv("ASPECT_WORKFLOWS_BUILD") == "1"
// Test upgrades from mininum upgradeable Sourcegraph version - updated by release tool
const minimumUpgradeableVersion = "5.3.0"
@ -116,7 +120,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
ops.Append(func(pipeline *bk.Pipeline) {
pipeline.AddStep(":bazel::desktop_computer: bazel "+bzlCmd,
bk.Key("bazel-do"),
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.Agent("queue", "bazel"),
bk.Cmd(bazelCmd(bzlCmd)),
)
})
@ -138,12 +142,15 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
MinimumUpgradeableVersion: minimumUpgradeableVersion,
ForceReadyForReview: c.MessageFlags.ForceReadyForReview,
CreateBundleSizeDiff: true,
AspectWorkflows: isAspectWorkflowBuild,
}))
securityOps := operations.NewNamedSet("Security Scanning")
securityOps.Append(semgrepScan())
securityOps.Append(sonarcloudScan())
ops.Merge(securityOps)
if !isAspectWorkflowBuild {
securityOps := operations.NewNamedSet("Security Scanning")
securityOps.Append(semgrepScan())
securityOps.Append(sonarcloudScan())
ops.Merge(securityOps)
}
// Wolfi package and base images
packageOps, baseImageOps := addWolfiOps(c)
@ -160,12 +167,16 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
))
}
case runtype.ReleaseNightly:
ops.Append(triggerReleaseBranchHealthchecks(minimumUpgradeableVersion))
case runtype.BextReleaseBranch:
// If this is a browser extension release branch, run the browser-extension tests and
// builds.
ops = BazelOpsSet(buildOptions,
CoreTestOperationsOptions{
IsMainBranch: buildOptions.Branch == "main",
IsMainBranch: buildOptions.Branch == "main",
AspectWorkflows: isAspectWorkflowBuild,
},
addBrowserExtensionIntegrationTests(0), // we pass 0 here as we don't have other pipeline steps to contribute to the resulting Percy build
wait,
@ -176,7 +187,8 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
// e2e tests.
ops = BazelOpsSet(buildOptions,
CoreTestOperationsOptions{
IsMainBranch: buildOptions.Branch == "main",
IsMainBranch: buildOptions.Branch == "main",
AspectWorkflows: isAspectWorkflowBuild,
},
recordBrowserExtensionIntegrationTests,
wait,
@ -226,6 +238,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
// Test images
ops.Merge(CoreTestOperations(buildOptions, changed.All, CoreTestOperationsOptions{
MinimumUpgradeableVersion: minimumUpgradeableVersion,
AspectWorkflows: isAspectWorkflowBuild,
}))
// Publish images after everything is done
ops.Append(
@ -249,10 +262,13 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
case runtype.ExecutorPatchNoTest:
executorVMImage := "executor-vm"
ops = operations.NewSet(
bazelBuildExecutorVM(c, true),
// TODO(burmudar): This should use the bazel target
legacyBuildCandidateDockerImage(executorVMImage, c.Version, c.candidateImageTag(), c.RunType),
trivyScanCandidateImage(executorVMImage, c.candidateImageTag()),
bazelBuildExecutorVM(c, true),
bazelBuildExecutorDockerMirror(c),
wait,
publishFinalDockerImage(c, executorVMImage),
bazelPublishExecutorVM(c, true),
bazelPublishExecutorDockerMirror(c),
bazelPublishExecutorBinary(c),
@ -279,14 +295,17 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
ForceReadyForReview: c.MessageFlags.ForceReadyForReview,
CacheBundleSize: c.RunType.Is(runtype.MainBranch, runtype.MainDryRun),
IsMainBranch: true,
AspectWorkflows: isAspectWorkflowBuild,
}))
// Security scanning - sonarcloud & semgrep scan
// Sonarcloud scan will soon be phased out after semgrep scan is fully enabled
securityOps := operations.NewNamedSet("Security Scanning")
securityOps.Append(semgrepScan())
securityOps.Append(sonarcloudScan())
ops.Merge(securityOps)
if isAspectWorkflowBuild {
securityOps := operations.NewNamedSet("Security Scanning")
securityOps.Append(semgrepScan())
securityOps.Append(sonarcloudScan())
ops.Merge(securityOps)
}
// Publish candidate images to dev registry
publishOpsDev := operations.NewNamedSet("Publish candidate images")
@ -323,7 +342,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
}
// Final Bazel images
publishOps.Append(bazelPushImagesFinal(c.Version))
publishOps.Append(bazelPushImagesFinal(c.Version, isAspectWorkflowBuild))
ops.Merge(publishOps)
}

View File

@ -63,6 +63,8 @@ func trivyScanCandidateImage(app, tag string) operations.Operation {
return func(pipeline *bk.Pipeline) {
pipeline.AddStep(fmt.Sprintf(":trivy: :docker: :mag: Scan %s", app),
// These are the first images in the arrays we use to build images
bk.DependsOn(candidateImageStepKey("alpine-3.14")),
bk.DependsOn(candidateImageStepKey("batcheshelper")),
bk.DependsOn(dependsOnImage),
bk.Cmd(fmt.Sprintf("docker pull %s", image)),

View File

@ -77,8 +77,7 @@ func buildPackage(target string) (func(*bk.Pipeline), string) {
pipeline.AddStep(fmt.Sprintf(":package: Package dependency '%s'", target),
bk.Cmd(fmt.Sprintf("./dev/ci/scripts/wolfi/build-package.sh %s", target)),
// We want to run on the bazel queue, so we have a pretty minimal agent.
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.DependsOn(AspectWorkflows.TestStepKey),
bk.Agent("queue", "bazel"),
bk.Key(stepKey),
bk.SoftFail(222),
)
@ -90,7 +89,7 @@ func buildRepoIndex(packageKeys []string) func(*bk.Pipeline) {
pipeline.AddStep(":card_index_dividers: Build and sign repository index",
bk.Cmd("./dev/ci/scripts/wolfi/build-repo-index.sh"),
// We want to run on the bazel queue, so we have a pretty minimal agent.
bk.Agent("queue", AspectWorkflows.QueueSmall),
bk.Agent("queue", "bazel"),
// Depend on all previous package building steps
bk.DependsOn(packageKeys...),
bk.Key("buildRepoIndex"),
@ -113,8 +112,7 @@ func buildWolfiBaseImage(target string, tag string, dependOnPackages bool) (func
},
}),
// We want to run on the bazel queue, so we have a pretty minimal agent.
bk.Agent("queue", AspectWorkflows.QueueDefault),
bk.DependsOn(AspectWorkflows.TestStepKey),
bk.Agent("queue", "bazel"),
bk.Key(stepKey),
bk.SoftFail(222),
}
@ -136,7 +134,7 @@ func allBaseImagesBuilt(baseImageKeys []string) func(*bk.Pipeline) {
pipeline.AddStep(":octopus: All base images built",
bk.Cmd("echo 'All base images built'"),
// We want to run on the bazel queue, so we have a pretty minimal agent.
bk.Agent("queue", AspectWorkflows.QueueSmall),
bk.Agent("queue", "bazel"),
// Depend on all previous package building steps
bk.DependsOn(baseImageKeys...),
bk.Key("buildAllBaseImages"),
@ -343,7 +341,7 @@ func wolfiGenerateBaseImagePR() *operations.Set {
func(pipeline *bk.Pipeline) {
pipeline.AddStep(":whale::hash: Update Base Image Hashes",
bk.Cmd("./dev/ci/scripts/wolfi/update-base-image-hashes.sh"),
bk.Agent("queue", AspectWorkflows.QueueSmall),
bk.Agent("queue", "bazel"),
bk.DependsOn("buildAllBaseImages"),
bk.Key("updateBaseImageHashes"),
)

View File

@ -2,9 +2,7 @@
set -eu
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
bazelrc=(--bazelrc="$aspectRC")
bazelrc=(--bazelrc=.bazelrc --bazelrc=.aspect/bazelrc/ci.bazelrc --bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc)
function preview_tags() {
IFS=' ' read -r -a registries <<<"$1"

View File

@ -16,8 +16,13 @@ const (
PullRequest RunType = iota // pull request build
ManuallyTriggered // build that is manually triggred - typically used to start CI for external contributions
// Nightly builds - must be first because they take precedence
ReleaseNightly // release branch nightly healthcheck builds
BextNightly // browser extension nightly build
BextManualNightly // browser extension nightly build, triggered with a branch pattern
AppRelease // app release build
AppInsiders // app insiders build
WolfiBaseRebuild // wolfi base image build
// Release branches
@ -38,7 +43,10 @@ const (
ExecutorPatchNoTest // build executor image without testing
CandidatesNoTest // build one or all candidate images without testing
BazelDo // run a specific bazel command
// Special test branches
BackendIntegrationTests // run backend tests that are used on main
BazelDo // run a specific bazel command
// None is a no-op, add all run types above this type.
None
@ -77,6 +85,12 @@ func (t RunType) Is(oneOfTypes ...RunType) bool {
// Matcher returns the requirements for a build to be considered of this RunType.
func (t RunType) Matcher() *RunTypeMatcher {
switch t {
case ReleaseNightly:
return &RunTypeMatcher{
EnvIncludes: map[string]string{
"RELEASE_NIGHTLY": "true",
},
}
case BextNightly:
return &RunTypeMatcher{
EnvIncludes: map[string]string{
@ -93,6 +107,18 @@ func (t RunType) Matcher() *RunTypeMatcher {
"WOLFI_BASE_REBUILD": "true",
},
}
case AppRelease:
return &RunTypeMatcher{
Branch: "app/release",
BranchExact: true,
}
case AppInsiders:
return &RunTypeMatcher{
Branch: "app/insiders",
BranchExact: true,
}
case TaggedRelease:
return &RunTypeMatcher{
TagPrefix: "v",
@ -136,6 +162,10 @@ func (t RunType) Matcher() *RunTypeMatcher {
Branch: "executor-patch-notest/",
}
case BackendIntegrationTests:
return &RunTypeMatcher{
Branch: "backend-integration/",
}
case CandidatesNoTest:
return &RunTypeMatcher{
Branch: "docker-images-candidates-notest/",
@ -155,12 +185,18 @@ func (t RunType) String() string {
return "Pull request"
case ManuallyTriggered:
return "Manually Triggered External Build"
case ReleaseNightly:
return "Release branch nightly healthcheck build"
case BextNightly:
return "Browser extension nightly release build"
case BextManualNightly:
return "Manually triggered browser extension nightly release build"
case WolfiBaseRebuild:
return "Wolfi base images rebuild"
case AppRelease:
return "App release build"
case AppInsiders:
return "App insiders build"
case TaggedRelease:
return "Tagged release"
case ReleaseBranch:
@ -179,10 +215,12 @@ func (t RunType) String() string {
return "Build all candidates without testing"
case ExecutorPatchNoTest:
return "Build executor without testing"
case BackendIntegrationTests:
return "Backend integration tests"
case BazelDo:
return "Bazel command"
}
return "None"
return ""
}
// RunTypeMatcher defines the requirements for any given build to be considered a build of

View File

@ -60,8 +60,28 @@ func TestComputeRunType(t *testing.T) {
},
},
want: WolfiBaseRebuild,
},
}
}, {
name: "app release",
args: args{
branch: "app/release",
},
want: AppRelease,
}, {
name: "app release insiders",
args: args{
branch: "app/insiders",
},
want: AppInsiders,
}, {
name: "release nightly",
args: args{
branch: "main",
env: map[string]string{
"RELEASE_NIGHTLY": "true",
},
},
want: ReleaseNightly,
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got := Compute(tt.args.tag, tt.args.branch, tt.args.env)

View File

@ -7,9 +7,6 @@
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
set -e
# TODO(burmudar: remove me this should be created by other scripts
mkdir -p ./annotations
print_usage() {
printf "Usage:"
printf " echo \"your annotation\" | annotate.sh -s my-section"

View File

@ -130,7 +130,7 @@ docker load <"$tarball"
local_image_name="$image_name:latest-amd64"
# Push to internal dev repo
echo "* Pushing image to internal dev repo..."
echo " * Pushing image to internal dev repo..."
docker tag "$local_image_name" "us.gcr.io/sourcegraph-dev/wolfi-${name}-base:$tag"
docker push "us.gcr.io/sourcegraph-dev/wolfi-${name}-base:$tag"
docker tag "$local_image_name" "us.gcr.io/sourcegraph-dev/wolfi-${name}-base:latest"
@ -138,7 +138,7 @@ docker push "us.gcr.io/sourcegraph-dev/wolfi-${name}-base:latest"
# Push to Dockerhub only on main branch
if [[ "$IS_MAIN" == "true" ]]; then
echo "* Pushing image to prod repo..."
echo " * Pushing image to prod repo..."
docker tag "$local_image_name" "sourcegraph/wolfi-${name}-base:$tag"
docker push "sourcegraph/wolfi-${name}-base:$tag"
docker tag "$local_image_name" "sourcegraph/wolfi-${name}-base:latest"

View File

@ -4,7 +4,8 @@ set -eu -o pipefail
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
KEYS_DIR="/etc/sourcegraph/keys/"
# TODO: Manage these variables properly
GCP_PROJECT="sourcegraph-ci"
GCS_BUCKET="package-repository"
TARGET_ARCH="x86_64"
MAIN_BRANCH="main"
@ -42,7 +43,7 @@ apkindex_build_dir=$(mktemp -d -t apkindex-build.XXXXXXXX)
pushd "$apkindex_build_dir"
# Fetch all APKINDEX fragments from bucket
gsutil -m cp "gs://$GCS_BUCKET/$BRANCH_PATH/$TARGET_ARCH/*.APKINDEX.fragment" ./
gsutil -u "$GCP_PROJECT" -m cp "gs://$GCS_BUCKET/$BRANCH_PATH/$TARGET_ARCH/*.APKINDEX.fragment" ./
# Concat all fragments into a single APKINDEX and tar.gz it
touch placeholder.APKINDEX.fragment
@ -52,9 +53,9 @@ tar zcf APKINDEX.tar.gz APKINDEX DESCRIPTION
# Sign index, using separate keys from GCS for staging and prod repos
if [[ "$IS_MAIN" == "true" ]]; then
key_path="$KEYS_DIR/sourcegraph-melange-prod.rsa"
key_path="/keys/sourcegraph-melange-prod.rsa"
else
key_path="$KEYS_DIR/sourcegraph-melange-dev.rsa"
key_path="/keys/sourcegraph-melange-dev.rsa"
fi
melange sign-index --signing-key "$key_path" APKINDEX.tar.gz

View File

@ -5,9 +5,11 @@ set -eu -o pipefail
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
# Update hashes for all base images
aspectRC="/tmp/aspect-generated.bazelrc"
rosetta bazelrc > "$aspectRC"
bazel --bazelrc="$aspectRC" run //dev/sg -- wolfi update-hashes
bazel \
--bazelrc=.bazelrc \
--bazelrc=.aspect/bazelrc/ci.bazelrc \
--bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc \
run //dev/sg -- wolfi update-hashes
# Print diff
git diff dev/oci_deps.bzl

View File

@ -4,7 +4,8 @@ set -eu -o pipefail
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
GCP_PROJECT="aspect-dev"
# TODO: Manage these variables properly
GCP_PROJECT="sourcegraph-ci"
GCS_BUCKET="package-repository"
TARGET_ARCH="x86_64"
MAIN_BRANCH="main"
@ -20,6 +21,9 @@ fi
cd wolfi-packages/packages/$TARGET_ARCH
# Check that this exact package does not already exist in the repo - fail if so
echo " * Uploading package to repository"
# List all .apk files under wolfi-packages/packages/$TARGET_ARCH/
error="false"
package_usage_list=""