mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 17:11:49 +00:00
rfc795: new release process foundations (#60962)
--------- Co-authored-by: William Bezuidenhout <william.bezuidenhout@sourcegraph.com>
This commit is contained in:
parent
91c154c705
commit
9f10c1cb3d
@ -13,8 +13,8 @@ steps:
|
||||
- key: aspect-workflows-upload
|
||||
if: build.env("DISABLE_ASPECT_WORKFLOWS") != "true"
|
||||
label: ":aspect: Setup Aspect Workflows"
|
||||
commands:
|
||||
- "rosetta steps | buildkite-agent pipeline upload"
|
||||
commands: |
|
||||
rosetta steps | buildkite-agent pipeline upload
|
||||
agents:
|
||||
queue: aspect-small
|
||||
- label: ":pipeline: Generate pipeline"
|
||||
|
||||
@ -413,10 +413,6 @@ load("//dev:tool_deps.bzl", "tool_deps")
|
||||
|
||||
tool_deps()
|
||||
|
||||
load("//tools/release:schema_deps.bzl", "schema_deps")
|
||||
|
||||
schema_deps()
|
||||
|
||||
# Buildifier
|
||||
load("@buildifier_prebuilt//:deps.bzl", "buildifier_prebuilt_deps")
|
||||
|
||||
|
||||
@ -42,7 +42,15 @@ if [ "${EXECUTOR_IS_TAGGED_RELEASE}" = "true" ]; then
|
||||
# Without it, the tag will be empty and the gsutil rm -rf below will
|
||||
# drop the entire folder.
|
||||
if [ -z "$BUILDKITE_TAG" ] || [ "$BUILDKITE_TAG" = "" ]; then
|
||||
exit 1
|
||||
# But if a version is set and matches releases numbering scheme, we can
|
||||
# still use this. It's
|
||||
if [[ "$VERSION" =~ ^[0-9]+\.[0-9]+\.[0-9]+$ ]]; then
|
||||
echo ":warning: inferring \$BUILDKITE_TAG from \$VERSION"
|
||||
export BUILDKITE_TAG="v${VERSION}"
|
||||
else
|
||||
echo ":red_circle: inferring \$BUILDKITE_TAG from \$VERSION"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "Uploading binaries for the ${BUILDKITE_TAG} tag"
|
||||
|
||||
@ -18,7 +18,7 @@ trap 'rm -Rf "$workdir_abs"' EXIT
|
||||
cp "${base}/executor.pkr.hcl" workdir/
|
||||
cp "${base}/aws_regions.json" workdir/
|
||||
cp "${base}/install.sh" workdir/
|
||||
cp "$executor" workdir
|
||||
cp "$executor" workdir/
|
||||
|
||||
# Copy src-cli, see //dev/tools:src-cli
|
||||
cp "$srccli" workdir/
|
||||
|
||||
@ -56,7 +56,12 @@ oci_image(
|
||||
"CODEINTEL_PGSSLMODE": "disable",
|
||||
"CODEINTEL_PGUSER": "sg",
|
||||
},
|
||||
tars = [":tar_frontend"],
|
||||
tars = [
|
||||
":tar_frontend",
|
||||
# Needed so the "Upgrade readiness" page in the site-admin
|
||||
# can find locally the current schema.
|
||||
"//cmd/migrator:tar_current_schema_descriptions",
|
||||
],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
|
||||
@ -28,14 +28,36 @@ go_binary(
|
||||
},
|
||||
)
|
||||
|
||||
# See tools/release/README.md for details.
|
||||
# This rule builds a tarball of the database schema descriptions that only contains the current database schema
|
||||
# description. If the build is not stamped with a proper VERSION env var, this will end up as "dev-...".
|
||||
genrule(
|
||||
name = "tar_schema_descriptions",
|
||||
srcs = ["@schemas_archive//file"],
|
||||
outs = ["schema_descriptions.tar"],
|
||||
name = "tar_current_schema_descriptions",
|
||||
srcs = [
|
||||
"//internal/database:schema.json",
|
||||
"//internal/database:schema.codeinsights.json",
|
||||
"//internal/database:schema.codeintel.json",
|
||||
],
|
||||
outs = ["current_schema_description.tar"],
|
||||
cmd = """\
|
||||
if grep -q "STABLE_VERSION" bazel-out/stable-status.txt; then
|
||||
# When we're stamping, we can find the current version in the stable-status.
|
||||
# But we do stamp main builds with a specific format, which is irrelevant for migrator, so we override this to be dev.
|
||||
stable_version="$$(cat bazel-out/stable-status.txt | grep STABLE_VERSION | cut -d' ' -f2)"
|
||||
if ! [[ $$stable_version =~ ^[0-9]\\.[0-9]+\\.[0-9]+ ]]; then
|
||||
echo "🟠 (//cmd/migrator:tar_current_schema_descriptions) Stamping with dev version, will use "dev" as current version"
|
||||
echo $$stable_version
|
||||
version="dev"
|
||||
else
|
||||
version="v$$stable_version"
|
||||
fi
|
||||
else
|
||||
# When not, usually during local development, we just use dev instead.
|
||||
version="dev"
|
||||
fi
|
||||
mkdir -p schema-descriptions/
|
||||
tar zxf $(location @schemas_archive//file:file) --no-same-owner -C schema-descriptions/
|
||||
cp $(location //internal/database:schema.json) schema-descriptions/$${version}-internal_database_schema.json
|
||||
cp $(location //internal/database:schema.codeinsights.json) schema-descriptions/$${version}-internal_database_schema.codeinsights.json
|
||||
cp $(location //internal/database:schema.codeintel.json) schema-descriptions/$${version}-internal_database_schema.codeintel.json
|
||||
|
||||
if tar --version | grep -q bsdtar; then
|
||||
tar -cf $@ --uid=0 --gid=0 --numeric-owner schema-descriptions/
|
||||
@ -43,6 +65,8 @@ genrule(
|
||||
tar -cf $@ --owner=:0 --group=:0 --numeric-owner schema-descriptions/
|
||||
fi
|
||||
""",
|
||||
stamp = 1,
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
@ -60,7 +84,8 @@ oci_image(
|
||||
],
|
||||
tars = [
|
||||
":tar_migrator",
|
||||
":tar_schema_descriptions",
|
||||
":tar_current_schema_descriptions",
|
||||
"//cmd/migrator/airgappedgen:tar_schema_descriptions",
|
||||
],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
76
cmd/migrator/airgappedgen/BUILD.bazel
Normal file
76
cmd/migrator/airgappedgen/BUILD.bazel
Normal file
@ -0,0 +1,76 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "airgappedgen_lib",
|
||||
srcs = [
|
||||
"main.go",
|
||||
"schema_description.go",
|
||||
],
|
||||
embedsrcs = ["gcs_versions.json"],
|
||||
importpath = "github.com/sourcegraph/sourcegraph/cmd/migrator/airgappedgen",
|
||||
visibility = ["//visibility:private"],
|
||||
deps = [
|
||||
"//lib/errors",
|
||||
"@com_github_google_go_github_v55//github",
|
||||
"@com_github_grafana_regexp//:regexp",
|
||||
"@com_github_masterminds_semver//:semver",
|
||||
"@com_github_sourcegraph_conc//pool",
|
||||
"@org_golang_x_oauth2//:oauth2",
|
||||
],
|
||||
)
|
||||
|
||||
go_binary(
|
||||
name = "airgappedgen",
|
||||
embed = [":airgappedgen_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
genrule(
|
||||
name = "tar_schema_descriptions",
|
||||
srcs = [],
|
||||
outs = ["schema_descriptions.tar"],
|
||||
cmd = """\
|
||||
if grep -q "STABLE_VERSION" bazel-out/stable-status.txt; then
|
||||
# When we're stamping, we can find the current version in the stable-status.
|
||||
stable_version="$$(cat bazel-out/stable-status.txt | grep STABLE_VERSION | cut -d' ' -f2)"
|
||||
# Don't miss the not operator!
|
||||
if ! [[ $$stable_version =~ ^[0-9]\\.[0-9]+\\.[0-9]+ ]]; then
|
||||
echo "🟠 (//cmd/migrator/airgappedgen:tar_schema_descriptions) Stamping with dev version, will use "dev" as current version"
|
||||
version="dev"
|
||||
else
|
||||
version="v$$stable_version"
|
||||
fi
|
||||
else
|
||||
# When not, usually during local development, we just use dev instead.
|
||||
version="dev"
|
||||
fi
|
||||
# Create the folder that we'll tar for the OCI container.
|
||||
mkdir -p schema-descriptions/
|
||||
# If we're building this without stamping, we don't want to generate anything.
|
||||
# We leave an explanation to help debugging if something goes wrong and this end up
|
||||
# in a tagged release.
|
||||
if [ $$version = "dev" ]; then
|
||||
echo "This is a dev version of the airgapped migrator, if you see this in production, please reach out the support" > schema-descriptions/README.md
|
||||
else
|
||||
set +u
|
||||
if [[ -z "$$GH_TOKEN" ]]; then
|
||||
echo "🔴 GH_TOKEN is not set. Please ensure it's set and accessible to Bazel."
|
||||
echo "🔴 If you're running this locally, append --action_env=GH_TOKEN to make it visible to Bazel."
|
||||
echo "🔴 If you see this in CI, please reach out on #discuss-dev-infra."
|
||||
exit 1
|
||||
fi
|
||||
set -u
|
||||
# Fetch all the database schema descriptions up to the version we're releasing right now.
|
||||
$(execpath :airgappedgen) $$version schema-descriptions/
|
||||
fi
|
||||
if tar --version | grep -q bsdtar; then
|
||||
tar -cf $@ --uid=0 --gid=0 --numeric-owner schema-descriptions/
|
||||
else
|
||||
tar -cf $@ --owner=:0 --group=:0 --numeric-owner schema-descriptions/
|
||||
fi
|
||||
""",
|
||||
stamp = 1,
|
||||
tags = ["requires-network"],
|
||||
tools = [":airgappedgen"],
|
||||
visibility = ["//cmd/migrator:__subpackages__"],
|
||||
)
|
||||
132
cmd/migrator/airgappedgen/README.md
Normal file
132
cmd/migrator/airgappedgen/README.md
Normal file
@ -0,0 +1,132 @@
|
||||
# Airgapped Migrator Generator
|
||||
|
||||
For customers operating airgapped instances, we want the migrator to contains all database schema descriptions available at the
|
||||
time of the release, as they won't be able to fetch them from the outside.
|
||||
|
||||
`airgappedgen` simply fetches all of them up to the given version and output them in a designated folder, that can then be used
|
||||
to bundle them in the OCI container of the airgapped migrator.
|
||||
|
||||
Unless you're working on this little command, you'll never need to run it manually, as it'll be called as part of the build process
|
||||
for the airgapped migrator OCI container.
|
||||
|
||||
See the [FAQ](#FAQ) at the end of this document.
|
||||
|
||||
## Usage
|
||||
|
||||
All the instructions below applies to local development. If you're looking into running it manually:
|
||||
|
||||
### Running the tool itself
|
||||
|
||||
```
|
||||
bazel run //cmd/migrator/airgappedgen -- <version> <output-folder>
|
||||
```
|
||||
|
||||
### Inspecting the final tarball that gets injected in the OCI container for the airgapped migrator
|
||||
|
||||
If you want to inspect the tarball it creates to be added as a layer in the OCI container, you need a few additional things:
|
||||
|
||||
- We need to stamp the build and have the right status variables:
|
||||
- Set the `VERSION` env var to the version you want to pretend you're building the tarball for.
|
||||
- `export VERSION=v5.2.5` for example.
|
||||
- Add `--stamp` and `--workspace_status_command=dev/bazel_stamp_vars.sh` to the bazel command.
|
||||
- Make sure you have a GitHub token set in `$GH_TOKEN`
|
||||
- The script is making a single request to the GitHub API, to list all tags in order to get the releases it needs to get the schemas
|
||||
from. If done unauthenticated, this API call might get throttled, which might take a very long time depending the quota
|
||||
associated to your current IP. It can go as long as 1h in some cases. The tool manually sets a short timeout, but the same
|
||||
reasoning applies.
|
||||
- Append `--action_env=GH_TOKEN` to your Bazel command to make it visible to Bazel.
|
||||
|
||||
So if we compile all the above:
|
||||
|
||||
```
|
||||
export GH_TOKEN=<yourtoken>
|
||||
export VERSION=5.2.5
|
||||
bazel build //cmd/migrator/airgappedgen:tar_schema_descriptions --stamp --workspace_status_command=dev/bazel_stamp_vars.sh --action_env=GH_TOKEN
|
||||
INFO: Invocation ID: a59368d9-40a9-4da8-a4a6-f0554ab9397a
|
||||
WARNING: Build option --action_env has changed, discarding analysis cache (this can be expensive, see https://bazel.build/advanced/performance/iteration-speed).
|
||||
INFO: Analyzed target //cmd/migrator/airgappedgen:tar_schema_descriptions (346 packages loaded, 12864 targets configured).
|
||||
INFO: From GoLink cmd/migrator/airgappedgen/airgappedgen_/airgappedgen [for tool]:
|
||||
ld: warning: ignoring duplicate libraries: '-lm'
|
||||
INFO: Found 1 target...
|
||||
Target //cmd/migrator/airgappedgen:tar_schema_descriptions up-to-date:
|
||||
bazel-bin/cmd/migrator/airgappedgen/schema_descriptions.tar
|
||||
Aspect @@rules_rust//rust/private:clippy.bzl%rust_clippy_aspect of //cmd/migrator/airgappedgen:tar_schema_descriptions up-to-date (nothing to build)
|
||||
INFO: Elapsed time: 27.318s, Critical Path: 25.26s
|
||||
INFO: 4 processes: 1 internal, 3 darwin-sandbox.
|
||||
INFO: Build completed successfully, 4 total actions
|
||||
```
|
||||
|
||||
And we can inspect the tarball with:
|
||||
|
||||
```
|
||||
# We got the path the tarball from the command above, alternatively, we could use a cquery.
|
||||
tar tvf bazel-bin/cmd/migrator/airgappedgen/schema_descriptions.tar
|
||||
drwxr-xr-x 0 0 0 0 Jan 19 15:46 schema-descriptions/
|
||||
-rw-r--r-- 0 0 0 21371 Jan 19 15:46 schema-descriptions/v3.29.0-internal_database_schema.codeinsights.json
|
||||
-rw-r--r-- 0 0 0 41304 Jan 19 15:46 schema-descriptions/v3.30.2-internal_database_schema.codeintel.json
|
||||
-rw-r--r-- 0 0 0 396651 Jan 19 15:46 schema-descriptions/v3.30.0-internal_database_schema.json
|
||||
# (...)
|
||||
```
|
||||
|
||||
If you're building the tarball without stamping with the `VERSION` env var, it will still work, but will produce a tarball
|
||||
that only contains a README to indicate that this is dev version. If you ever stumble across this in a production deployment,
|
||||
it means that something wrong happened.
|
||||
|
||||
```
|
||||
# Build the tarball
|
||||
bazel build //cmd/migrator/airgappedgen:tar_schema_descriptions
|
||||
```
|
||||
|
||||
```
|
||||
# Grab the tarball output and shows what's inside.
|
||||
# (we throw away stderr for clarity, so we just see the content of the tarball and not the logs
|
||||
# from building the tarball).
|
||||
$ tar tvf $(bazel cquery //cmd/migrator/airgappedgen:tar_schema_descriptions --output=files 2>/dev/null)
|
||||
drwxr-xr-x 0 0 0 0 Jan 19 14:32 schema-descriptions/
|
||||
-rw-r--r-- 0 0 0 109 Jan 19 14:32 schema-descriptions/README.md
|
||||
```
|
||||
|
||||
# FAQ
|
||||
|
||||
## Why do we need this?
|
||||
|
||||
Airgapped customers are running migrations in a fully isolated environment, and they can't reach the internet for security reasons,
|
||||
meaning that we need to provide a migrator variant that comes with everything baked in.
|
||||
|
||||
## Why are we fetching database schemas prior to `v3.42.0` on GCS?
|
||||
|
||||
For versions prior to `v3.42.0`, the repository didn't have the `*schema.json` files committed, so they're stored in a GCS bucket
|
||||
instead. See `gcs_versions.json` in this folder for the file that manually lists them.
|
||||
|
||||
## Why do we have to list the GCS versions in `gcs_versions.json`?
|
||||
|
||||
We could have use `gsutil` and simply download the entire folder content in one go instead of having to specify the versions.
|
||||
But having to deal with the authentication on gcloud here would have been a bit more complicated where all we're doing is a bunch of
|
||||
HTTP GET requests.
|
||||
|
||||
## Why don't we fail the build if unstamped, instead of silently creating an invalid tarball?
|
||||
|
||||
This would prevent builds in CI that are not release builds to succeed, which would be really annoying to deal with.
|
||||
|
||||
TODO: add a release test to ensure the airgapped migrator ships with the schemas: https://github.com/sourcegraph/sourcegraph/issues/59721
|
||||
|
||||
## Why not write a dumb shell script for this?
|
||||
|
||||
We need to list all the tags available for the `sourcegraph/sourcegraph` repository, which can be done with a `curl` but that's a lot
|
||||
of fragile parsing and scripting that may fail in unexpected ways. Given the stakes of this build step, better have something robust.
|
||||
|
||||
## Why not get the schemas through `git` commands?
|
||||
|
||||
Bazel actions are unaware of the Git repo they're executed in, that's the price to pay for hermeticity. If we would really want
|
||||
to use git commands, we would have to clone the repo during build time, which is really slow due to the size of our monorepo.
|
||||
|
||||
So instead, we handle everything through HTTP Get requests and one single authenticated call to the GitHub API.
|
||||
|
||||
## Are the tarballs stable, i.e. idempotent?
|
||||
|
||||
Yes, as long as the content didn't change in the GCS bucket and nobody re-tagged a previous release, which should never happen, the tarballs are stable, that's why you see the timestamps set at unix time (epoch) 0.
|
||||
|
||||
## The tarball target has no inputs, i.e. `srcs = []`, how do we know when to rebuild it?
|
||||
|
||||
The `genrule` that creates the tarball has the `stamp = 1` attribute, so Bazel will inspect the stable status variables and will
|
||||
rebuild this if it changes. And the `VERSION` is a stable attribute, so it will get rebuilt every time it changes, .i.e. on each release.
|
||||
61
cmd/migrator/airgappedgen/gcs_versions.json
Normal file
61
cmd/migrator/airgappedgen/gcs_versions.json
Normal file
@ -0,0 +1,61 @@
|
||||
[
|
||||
"v3.20.0",
|
||||
"v3.20.1",
|
||||
"v3.21.0",
|
||||
"v3.21.1",
|
||||
"v3.21.2",
|
||||
"v3.22.0",
|
||||
"v3.22.1",
|
||||
"v3.23.0",
|
||||
"v3.24.0",
|
||||
"v3.24.1",
|
||||
"v3.25.0",
|
||||
"v3.25.1",
|
||||
"v3.25.2",
|
||||
"v3.26.0",
|
||||
"v3.26.1",
|
||||
"v3.26.2",
|
||||
"v3.26.3",
|
||||
"v3.27.0",
|
||||
"v3.27.1",
|
||||
"v3.27.2",
|
||||
"v3.27.3",
|
||||
"v3.27.4",
|
||||
"v3.27.5",
|
||||
"v3.28.0",
|
||||
"v3.29.0",
|
||||
"v3.29.1",
|
||||
"v3.30.0",
|
||||
"v3.30.1",
|
||||
"v3.30.2",
|
||||
"v3.30.3",
|
||||
"v3.30.4",
|
||||
"v3.31.0",
|
||||
"v3.31.1",
|
||||
"v3.31.2",
|
||||
"v3.32.0",
|
||||
"v3.32.1",
|
||||
"v3.33.0",
|
||||
"v3.33.1",
|
||||
"v3.33.2",
|
||||
"v3.34.0",
|
||||
"v3.34.1",
|
||||
"v3.34.2",
|
||||
"v3.35.0",
|
||||
"v3.35.1",
|
||||
"v3.35.2",
|
||||
"v3.36.0",
|
||||
"v3.36.1",
|
||||
"v3.36.2",
|
||||
"v3.36.3",
|
||||
"v3.37.0",
|
||||
"v3.38.0",
|
||||
"v3.38.1",
|
||||
"v3.39.0",
|
||||
"v3.39.1",
|
||||
"v3.40.0",
|
||||
"v3.40.1",
|
||||
"v3.40.2",
|
||||
"v3.41.0",
|
||||
"v3.41.1"
|
||||
]
|
||||
255
cmd/migrator/airgappedgen/main.go
Normal file
255
cmd/migrator/airgappedgen/main.go
Normal file
@ -0,0 +1,255 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
_ "embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"github.com/google/go-github/v55/github"
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/sourcegraph/conc/pool"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
//go:embed gcs_versions.json
|
||||
var gcsVersionsRaw []byte
|
||||
|
||||
var gcsFilenames = []string{
|
||||
"internal_database_schema.json",
|
||||
"internal_database_schema.codeintel.json",
|
||||
"internal_database_schema.codeinsights.json",
|
||||
}
|
||||
var githubFilenames = []string{
|
||||
"schema.json",
|
||||
"schema.codeintel.json",
|
||||
"schema.codeinsights.json",
|
||||
}
|
||||
|
||||
var gcsVersions []semver.Version
|
||||
|
||||
func init() {
|
||||
if err := json.Unmarshal(gcsVersionsRaw, &gcsVersions); err != nil {
|
||||
panic("invalid JSON for gcs_versions.json: " + err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func main() {
|
||||
ctx := context.Background()
|
||||
|
||||
usage := func() {
|
||||
fmt.Println("Current version argument is required.")
|
||||
fmt.Println("usage: airgappedgen vX.Y.Z <path to folder>")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(os.Args) < 3 {
|
||||
usage()
|
||||
}
|
||||
currentVersionRaw := os.Args[1]
|
||||
if currentVersionRaw == "" {
|
||||
usage()
|
||||
}
|
||||
|
||||
currentVersion, err := semver.NewVersion(currentVersionRaw)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
exportPath := os.Args[2]
|
||||
if exportPath == "" {
|
||||
usage()
|
||||
}
|
||||
|
||||
schemasGCS, err := downloadGCSVersions(ctx, gcsVersions)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
githubVersions, err := listRemoteTaggedVersions(ctx, currentVersion)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
schemasGitHub, err := downloadRemoteTaggedVersions(ctx, githubVersions)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
for _, sd := range append(schemasGCS, schemasGitHub...) {
|
||||
if err := sd.Export(exportPath); err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func downloadRemoteTaggedVersions(_ context.Context, versions []semver.Version) ([]*schemaDescription, error) {
|
||||
urlFmt := "https://raw.githubusercontent.com/sourcegraph/sourcegraph/v%s/internal/database/%s"
|
||||
|
||||
p := pool.NewWithResults[*schemaDescription]().WithMaxGoroutines(5).WithErrors()
|
||||
|
||||
for _, version := range versions {
|
||||
version := version
|
||||
p.Go(func() (*schemaDescription, error) {
|
||||
sd := schemaDescription{
|
||||
version: version,
|
||||
files: map[string][]byte{},
|
||||
}
|
||||
|
||||
for _, filename := range githubFilenames {
|
||||
url := fmt.Sprintf(urlFmt, version.String(), filename)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to download remote schema %q from GCS", versions[0].String())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 500 {
|
||||
return nil, fmt.Errorf("server error, remote schema %q: %s", url, resp.Status)
|
||||
}
|
||||
if resp.StatusCode == 404 {
|
||||
return nil, fmt.Errorf("server error, remote schema %q not found: %s", url, resp.Status)
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read response body")
|
||||
}
|
||||
sd.files[fmt.Sprintf("internal_database_%s", filename)] = b
|
||||
}
|
||||
return &sd, nil
|
||||
})
|
||||
}
|
||||
return p.Wait()
|
||||
}
|
||||
|
||||
func downloadGCSVersions(_ context.Context, versions []semver.Version) ([]*schemaDescription, error) {
|
||||
urlFmt := "https://storage.googleapis.com/sourcegraph-assets/migrations/drift/v%s-%s"
|
||||
|
||||
p := pool.NewWithResults[*schemaDescription]().WithMaxGoroutines(5).WithErrors()
|
||||
|
||||
for _, version := range versions {
|
||||
version := version
|
||||
p.Go(func() (*schemaDescription, error) {
|
||||
sd := schemaDescription{
|
||||
version: version,
|
||||
files: map[string][]byte{},
|
||||
}
|
||||
|
||||
for _, filename := range gcsFilenames {
|
||||
url := fmt.Sprintf(urlFmt, version.String(), filename)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to download remote schema %q from GCS", versions[0].String())
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode >= 500 {
|
||||
return nil, fmt.Errorf("server error downloading remote schema %q: %s", url, resp.Status)
|
||||
}
|
||||
if resp.StatusCode == 404 {
|
||||
// Oldest versions doesn't have all schemas, just the frontend, so we're fine skipping them.
|
||||
continue
|
||||
}
|
||||
|
||||
b, err := io.ReadAll(resp.Body)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to read response body")
|
||||
}
|
||||
sd.files[filename] = b
|
||||
|
||||
}
|
||||
return &sd, nil
|
||||
})
|
||||
}
|
||||
return p.Wait()
|
||||
}
|
||||
|
||||
func listRemoteTaggedVersions(ctx context.Context, currentVersion *semver.Version) ([]semver.Version, error) {
|
||||
var ghc *github.Client
|
||||
if tok := os.Getenv("GH_TOKEN"); tok != "" {
|
||||
ghc = github.NewClient(oauth2.NewClient(ctx, oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: os.Getenv("GH_TOKEN")},
|
||||
)))
|
||||
} else {
|
||||
ghc = github.NewClient(http.DefaultClient)
|
||||
}
|
||||
|
||||
// Unauthenticated requests can take a very long time if we get throttled.
|
||||
ctx, cancel := context.WithTimeout(ctx, 3*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
allTags := []string{}
|
||||
page := 0
|
||||
for {
|
||||
tags, resp, err := ghc.Repositories.ListTags(ctx, "sourcegraph", "sourcegraph", &github.ListOptions{Page: page})
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to list tags from GitHub")
|
||||
}
|
||||
if resp.NextPage == 0 {
|
||||
break
|
||||
}
|
||||
page = resp.NextPage
|
||||
|
||||
for _, tag := range tags {
|
||||
// If the tag is not a Sourcegraph release, like an old tag for App, we skip it.
|
||||
if !isTagSourcegraphRelease(tag.GetName()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Now we're sure it's a proper tag, let's parse it.
|
||||
versionTag, err := semver.NewVersion(tag.GetName())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "list remote release tags: %w")
|
||||
}
|
||||
|
||||
// If the tag is relevant to the version we're releasing, include it.
|
||||
if isTagAfterGCS(versionTag) && isTagPriorToCurrentRelease(versionTag, currentVersion) {
|
||||
allTags = append(allTags, tag.GetName())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
allVersions := []semver.Version{}
|
||||
for _, tag := range allTags {
|
||||
v, err := semver.NewVersion(tag)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid semver tag: %w")
|
||||
}
|
||||
allVersions = append(allVersions, *v)
|
||||
}
|
||||
return allVersions, nil
|
||||
}
|
||||
|
||||
var versionRegexp = regexp.MustCompile(`^v\d+\.\d+\.\d+$`)
|
||||
|
||||
// isTagSourcegraphRelease returns true if the tag we're looking at is a Sourcegraph release.
|
||||
func isTagSourcegraphRelease(tag string) bool {
|
||||
return versionRegexp.MatchString(tag)
|
||||
}
|
||||
|
||||
// isTagAfterGCS returns true if the tag we're looking at has been released at the time we started
|
||||
// committing to Git all schemas descriptions. The versions mentioned in ./gcs_versions.json were not
|
||||
// but the tags are still there, so we can't fetch those from GitHub as they'll be missing at that
|
||||
// point in time.
|
||||
func isTagAfterGCS(versionFromTag *semver.Version) bool {
|
||||
return versionFromTag.GreaterThan(&gcsVersions[len(gcsVersions)-1])
|
||||
}
|
||||
|
||||
// isTagPriorToCurrentRelease returns true if the tag we're looking at has been release prior to the
|
||||
// current version we're releasing. This is to avoid embedding 5.3.0 schemas into a 5.2.X patch release
|
||||
// that gets released AFTER 5.3.0, typically to share a bug fix to customers still running on 5.2.X-1.
|
||||
func isTagPriorToCurrentRelease(versionFromTag *semver.Version, currentVersion *semver.Version) bool {
|
||||
// We include versions that are:
|
||||
// - released after than the latest gcs version
|
||||
// - before the current version we're releasing.
|
||||
// Basically, if we release 5.2.X after 5.3.0 is out, we don't want to include the schemas for 5.3.0
|
||||
// because from the POV of the migrator, they don't exist yet.
|
||||
return versionFromTag.LessThan(currentVersion)
|
||||
}
|
||||
28
cmd/migrator/airgappedgen/schema_description.go
Normal file
28
cmd/migrator/airgappedgen/schema_description.go
Normal file
@ -0,0 +1,28 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
)
|
||||
|
||||
type schemaDescription struct {
|
||||
version semver.Version
|
||||
files map[string][]byte
|
||||
}
|
||||
|
||||
func (sd *schemaDescription) Export(path string) error {
|
||||
for filename, b := range sd.files {
|
||||
f, err := os.Create(filepath.Join(path, fmt.Sprintf("v%s-%s", sd.version.String(), filename)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
if _, err := f.Write(b); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -13,50 +13,17 @@ commandTests:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
- name: "has current database schema descriptions"
|
||||
# Because the version prefix is generated on the fly, we can't write this
|
||||
# as a file existence test. So instead, we just count the files in that folder
|
||||
# to make sure we have at least three json files.
|
||||
command: "sh"
|
||||
args: ["-c", "[ $(ls -al /schema-descriptions/*-internal_database_schema*.json | wc -l) -eq 3 ]"]
|
||||
|
||||
fileExistenceTests:
|
||||
# Following files are fetched through GCS
|
||||
- name: '/schema-descriptions 3.20.0 schema'
|
||||
path: '/schema-descriptions/v3.20.0-internal_database_schema.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 3.20.0 schema.codeintel does not exists'
|
||||
path: '/schema-descriptions/v3.20.0-internal_database_schema.codeintel.json'
|
||||
shouldExist: false
|
||||
uid: 0
|
||||
gid: 0
|
||||
|
||||
- name: '/schema-descriptions 3.21.0 schema'
|
||||
path: '/schema-descriptions/v3.21.0-internal_database_schema.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 3.21.0 schema.codeintel'
|
||||
path: '/schema-descriptions/v3.21.0-internal_database_schema.codeintel.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 3.21.0 schema.codeinsights does not exists'
|
||||
# We don't have codeinsights for that version, there should not be a file
|
||||
path: '/schema-descriptions/v3.21.0-internal_database_schema.codeinsights.json'
|
||||
shouldExist: false
|
||||
uid: 0
|
||||
gid: 0
|
||||
|
||||
# Following files are fetched through GitHub raw HTTP requests
|
||||
- name: '/schema-descriptions 5.0.1 schema'
|
||||
path: '/schema-descriptions/v5.0.1-internal_database_schema.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 5.0.1 schema.codeintel'
|
||||
path: '/schema-descriptions/v5.0.1-internal_database_schema.codeintel.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 5.0.1 schema.codeinsights'
|
||||
path: '/schema-descriptions/v5.0.1-internal_database_schema.codeinsights.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
# Unless built with stamping + VERSION, we won't end up with the schema descriptions here.
|
||||
# Instead we should test that at release time to still guarnatee that our customers are getting
|
||||
# the correct schemas.
|
||||
# TODO https://github.com/sourcegraph/sourcegraph/issues/59721
|
||||
- name: '/schema-descriptions README'
|
||||
path: '/schema-descriptions/README.md'
|
||||
|
||||
@ -15,7 +15,12 @@ const (
|
||||
// SourcegraphDockerDevRegistry is a private registry for dev images, and requires authentication to pull from.
|
||||
SourcegraphDockerDevRegistry = "us.gcr.io/sourcegraph-dev"
|
||||
// SourcegraphDockerPublishRegistry is a public registry for final images, and does not require authentication to pull from.
|
||||
SourcegraphDockerPublishRegistry = "index.docker.io/sourcegraph"
|
||||
// TODO RFC795: safeguard
|
||||
SourcegraphDockerPublishRegistry = "BROKENindex.docker.io/sourcegraph"
|
||||
// SourcegraphInternalReleaseRegistry is a private registry storing internal releases.
|
||||
SourcegraphInternalReleaseRegistry = "us-central1-docker.pkg.dev/sourcegraph-ci/rfc795-internal"
|
||||
// SourcegraphPublicReleaseRegistry is a currently private registry for storing public releases.
|
||||
SourcegraphPublicReleaseRegistry = "us-central1-docker.pkg.dev/sourcegraph-ci/rfc795-public"
|
||||
)
|
||||
|
||||
// DevRegistryImage returns the name of the image for the given app and tag on the
|
||||
@ -25,6 +30,13 @@ func DevRegistryImage(app, tag string) string {
|
||||
return maybeTaggedImage(root, tag)
|
||||
}
|
||||
|
||||
// InternalReleaseRegistry returns the name of the image for the given app and tag on the
|
||||
// internal releases private registry.
|
||||
func InternalReleaseRegistry(app, tag string) string {
|
||||
root := fmt.Sprintf("%s/%s", SourcegraphInternalReleaseRegistry, app)
|
||||
return maybeTaggedImage(root, tag)
|
||||
}
|
||||
|
||||
// PublishedRegistryImage returns the name of the image for the given app and tag on the
|
||||
// publish registry.
|
||||
func PublishedRegistryImage(app, tag string) string {
|
||||
|
||||
@ -33,9 +33,10 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
export POSTGRES_IMAGE="us.gcr.io/sourcegraph-dev/postgres-12-alpine:${CANDIDATE_VERSION}"
|
||||
export SERVER_IMAGE="us.gcr.io/sourcegraph-dev/server:${CANDIDATE_VERSION}"
|
||||
export EXECUTOR_IMAGE="us.gcr.io/sourcegraph-dev/executor:${CANDIDATE_VERSION}"
|
||||
registry="us.gcr.io/sourcegraph-dev"
|
||||
export POSTGRES_IMAGE="${registry}/postgres-12-alpine:${CANDIDATE_VERSION}"
|
||||
export SERVER_IMAGE="${registry}/server:${CANDIDATE_VERSION}"
|
||||
export EXECUTOR_IMAGE="${registry}/executor:${CANDIDATE_VERSION}"
|
||||
export EXECUTOR_FRONTEND_PASSWORD="hunter2hunter2hunter2"
|
||||
export SOURCEGRAPH_LICENSE_GENERATION_KEY="${SOURCEGRAPH_LICENSE_GENERATION_KEY:-""}"
|
||||
export TMP_DIR
|
||||
@ -55,7 +56,7 @@ fi
|
||||
|
||||
# Need to pull this image pre-execution as the docker executor doesn't have a
|
||||
# credential to pull this image.
|
||||
BATCHESHELPER_IMAGE="us.gcr.io/sourcegraph-dev/batcheshelper:${CANDIDATE_VERSION}"
|
||||
BATCHESHELPER_IMAGE="${registry}/batcheshelper:${CANDIDATE_VERSION}"
|
||||
docker pull "${BATCHESHELPER_IMAGE}"
|
||||
|
||||
echo "--- :terminal: Start server with executor"
|
||||
|
||||
@ -16,6 +16,7 @@ go_library(
|
||||
"misc_operations.go",
|
||||
"operations.go",
|
||||
"pipeline.go",
|
||||
"release_operations.go",
|
||||
"security_operations.go",
|
||||
"web-integration-workloads.go",
|
||||
"wolfi_operations.go",
|
||||
|
||||
@ -46,6 +46,7 @@ func bazelStampedCmd(args ...string) string {
|
||||
genBazelRC,
|
||||
"bazel",
|
||||
fmt.Sprintf("--bazelrc=%s", bazelrc),
|
||||
fmt.Sprintf("--bazelrc=%s", ".aspect/bazelrc/ci.sourcegraph.bazelrc"),
|
||||
}
|
||||
post := []string{
|
||||
"--stamp",
|
||||
|
||||
@ -69,6 +69,8 @@ func NewConfig(now time.Time) Config {
|
||||
"RELEASE_NIGHTLY": os.Getenv("RELEASE_NIGHTLY"),
|
||||
"VSCE_NIGHTLY": os.Getenv("VSCE_NIGHTLY"),
|
||||
"WOLFI_BASE_REBUILD": os.Getenv("WOLFI_BASE_REBUILD"),
|
||||
"RELEASE_INTERNAL": os.Getenv("RELEASE_INTERNAL"),
|
||||
"RELEASE_PUBLIC": os.Getenv("RELEASE_PUBLIC"),
|
||||
})
|
||||
// defaults to 0
|
||||
buildNumber, _ = strconv.Atoi(os.Getenv("BUILDKITE_BUILD_NUMBER"))
|
||||
@ -117,7 +119,7 @@ func NewConfig(now time.Time) Config {
|
||||
|
||||
Time: now,
|
||||
Branch: branch,
|
||||
Version: versionFromTag(runType, tag, commit, buildNumber, branch, now),
|
||||
Version: inferVersion(runType, tag, commit, buildNumber, branch, now),
|
||||
Commit: commit,
|
||||
MustIncludeCommit: mustIncludeCommits,
|
||||
Diff: diff,
|
||||
@ -134,8 +136,14 @@ func NewConfig(now time.Time) Config {
|
||||
}
|
||||
}
|
||||
|
||||
// versionFromTag constructs the Sourcegraph version from the given build state.
|
||||
func versionFromTag(runType runtype.RunType, tag string, commit string, buildNumber int, branch string, now time.Time) string {
|
||||
// inferVersion constructs the Sourcegraph version from the given build state.
|
||||
func inferVersion(runType runtype.RunType, tag string, commit string, buildNumber int, branch string, now time.Time) string {
|
||||
// If we're building a release, use the version that is being released regardless of
|
||||
// all other build attributes, such as tag, commit, build number, etc ...
|
||||
if runType.Is(runtype.InternalRelease, runtype.PromoteRelease) {
|
||||
return os.Getenv("VERSION")
|
||||
}
|
||||
|
||||
if runType.Is(runtype.TaggedRelease) {
|
||||
// This tag is used for publishing versioned releases.
|
||||
//
|
||||
|
||||
@ -18,7 +18,7 @@ func bazelBuildExecutorVM(c Config, alwaysRebuild bool) operations.Operation {
|
||||
bk.Key(candidateImageStepKey("executor.vm-image")),
|
||||
bk.Env("VERSION", c.Version),
|
||||
bk.Env("IMAGE_FAMILY", imageFamily),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease))),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease, runtype.InternalRelease))),
|
||||
}
|
||||
|
||||
cmd := bazelStampedCmd("run //cmd/executor/vm-image:ami.build")
|
||||
@ -42,7 +42,7 @@ func bazelPublishExecutorVM(c Config, alwaysRebuild bool) operations.Operation {
|
||||
bk.DependsOn(candidateImageStepKey("executor.vm-image")),
|
||||
bk.Env("VERSION", c.Version),
|
||||
bk.Env("IMAGE_FAMILY", imageFamily),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease))),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease, runtype.InternalRelease))),
|
||||
}
|
||||
|
||||
cmd := bazelStampedCmd("run //cmd/executor/vm-image:ami.push")
|
||||
@ -68,7 +68,7 @@ func bazelBuildExecutorDockerMirror(c Config) operations.Operation {
|
||||
bk.Key(candidateImageStepKey("executor-docker-miror.vm-image")),
|
||||
bk.Env("VERSION", c.Version),
|
||||
bk.Env("IMAGE_FAMILY", imageFamily),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease))),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease, runtype.InternalRelease))),
|
||||
bk.Cmd(bazelStampedCmd("run //cmd/executor/docker-mirror:ami.build")),
|
||||
}
|
||||
pipeline.AddStep(":bazel::packer: :construction: Build docker registry mirror image", stepOpts...)
|
||||
@ -84,7 +84,7 @@ func bazelPublishExecutorDockerMirror(c Config) operations.Operation {
|
||||
bk.DependsOn(candidateBuildStep),
|
||||
bk.Env("VERSION", c.Version),
|
||||
bk.Env("IMAGE_FAMILY", imageFamily),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease))),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease, runtype.InternalRelease))),
|
||||
bk.Cmd(bazelStampedCmd("run //cmd/executor/docker-mirror:ami.push")),
|
||||
}
|
||||
pipeline.AddStep(":bazel::packer: :white_check_mark: Publish docker registry mirror image", stepOpts...)
|
||||
@ -96,7 +96,7 @@ func bazelPublishExecutorBinary(c Config) operations.Operation {
|
||||
stepOpts := []bk.StepOpt{
|
||||
bk.Agent("queue", AspectWorkflows.QueueDefault),
|
||||
bk.Env("VERSION", c.Version),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease))),
|
||||
bk.Env("EXECUTOR_IS_TAGGED_RELEASE", strconv.FormatBool(c.RunType.Is(runtype.TaggedRelease, runtype.InternalRelease))),
|
||||
bk.Cmd(bazelStampedCmd(`run //cmd/executor:binary.push`)),
|
||||
}
|
||||
pipeline.AddStep(":bazel::arrow_heading_up: Publish executor binary", stepOpts...)
|
||||
@ -115,6 +115,14 @@ func executorDockerMirrorImageFamilyForConfig(c Config) string {
|
||||
}
|
||||
imageFamily = fmt.Sprintf("sourcegraph-executors-docker-mirror-%d-%d", ver.Major(), ver.Minor())
|
||||
}
|
||||
|
||||
if c.RunType.Is(runtype.InternalRelease) {
|
||||
ver, err := semver.NewVersion(c.Version)
|
||||
if err != nil {
|
||||
panic("cannot parse version")
|
||||
}
|
||||
imageFamily = fmt.Sprintf("sourcegraph-executors-docker-mirror-%d-%d-%d", ver.Major(), ver.Minor(), ver.Patch())
|
||||
}
|
||||
return imageFamily
|
||||
}
|
||||
|
||||
@ -130,6 +138,13 @@ func executorImageFamilyForConfig(c Config) string {
|
||||
}
|
||||
imageFamily = fmt.Sprintf("sourcegraph-executors-%d-%d", ver.Major(), ver.Minor())
|
||||
}
|
||||
if c.RunType.Is(runtype.InternalRelease) {
|
||||
ver, err := semver.NewVersion(c.Version)
|
||||
if err != nil {
|
||||
panic("cannot parse version")
|
||||
}
|
||||
imageFamily = fmt.Sprintf("sourcegraph-executors-internal-%d-%d-%d", ver.Major(), ver.Minor(), ver.Patch())
|
||||
}
|
||||
return imageFamily
|
||||
}
|
||||
|
||||
|
||||
@ -28,7 +28,7 @@ func publishFinalDockerImage(c Config, app string) operations.Operation {
|
||||
|
||||
var imgs []string
|
||||
for _, image := range []string{publishImage, devImage} {
|
||||
if app != "server" || c.RunType.Is(runtype.TaggedRelease, runtype.ImagePatch, runtype.ImagePatchNoTest) {
|
||||
if app != "server" || c.RunType.Is(runtype.TaggedRelease, runtype.InternalRelease, runtype.ImagePatch, runtype.ImagePatchNoTest) {
|
||||
imgs = append(imgs, fmt.Sprintf("%s:%s", image, c.Version))
|
||||
}
|
||||
|
||||
@ -68,39 +68,53 @@ func publishFinalDockerImage(c Config, app string) operations.Operation {
|
||||
}
|
||||
|
||||
// Used in default run type
|
||||
func bazelPushImagesCandidates(version string) func(*bk.Pipeline) {
|
||||
return bazelPushImagesCmd(version, true)
|
||||
func bazelPushImagesCandidates(c Config) func(*bk.Pipeline) {
|
||||
return bazelPushImagesCmd(c, true)
|
||||
}
|
||||
|
||||
// Used in default run type
|
||||
func bazelPushImagesFinal(version string) func(*bk.Pipeline) {
|
||||
return bazelPushImagesCmd(version, false, bk.DependsOn(AspectWorkflows.TestStepKey, AspectWorkflows.IntegrationTestStepKey))
|
||||
func bazelPushImagesFinal(c Config) func(*bk.Pipeline) {
|
||||
return bazelPushImagesCmd(c, false, bk.DependsOn(AspectWorkflows.TestStepKey, AspectWorkflows.IntegrationTestStepKey))
|
||||
}
|
||||
|
||||
// Used in CandidateNoTest run type
|
||||
func bazelPushImagesNoTest(version string) func(*bk.Pipeline) {
|
||||
return bazelPushImagesCmd(version, false)
|
||||
func bazelPushImagesNoTest(c Config) func(*bk.Pipeline) {
|
||||
return bazelPushImagesCmd(c, false)
|
||||
}
|
||||
|
||||
func bazelPushImagesCmd(version string, isCandidate bool, opts ...bk.StepOpt) func(*bk.Pipeline) {
|
||||
func bazelPushImagesCmd(c Config, isCandidate bool, opts ...bk.StepOpt) func(*bk.Pipeline) {
|
||||
stepName := ":bazel::docker: Push final images"
|
||||
stepKey := "bazel-push-images"
|
||||
candidate := ""
|
||||
|
||||
// Default registries.
|
||||
devRegistry := images.SourcegraphDockerDevRegistry
|
||||
prodRegistry := images.SourcegraphDockerPublishRegistry
|
||||
|
||||
if isCandidate {
|
||||
stepName = ":bazel::docker: Push candidate Images"
|
||||
stepKey = stepKey + "-candidate"
|
||||
candidate = "true"
|
||||
}
|
||||
|
||||
// If we're building an internal release, we push the final images to that specific registry instead.
|
||||
// See also: release_operations.go
|
||||
if c.RunType.Is(runtype.InternalRelease) {
|
||||
prodRegistry = images.SourcegraphInternalReleaseRegistry
|
||||
}
|
||||
|
||||
_, bazelRC := aspectBazelRC()
|
||||
|
||||
return func(pipeline *bk.Pipeline) {
|
||||
pipeline.AddStep(stepName,
|
||||
append(opts,
|
||||
bk.Agent("queue", AspectWorkflows.QueueDefault),
|
||||
bk.Key(stepKey),
|
||||
bk.Env("PUSH_VERSION", version),
|
||||
bk.Env("PUSH_VERSION", c.Version),
|
||||
bk.Env("CANDIDATE_ONLY", candidate),
|
||||
bk.Cmd(bazelStampedCmd(`build $$(bazel query 'kind("oci_push rule", //...)')`)),
|
||||
bk.Env("DEV_REGISTRY", devRegistry),
|
||||
bk.Env("PROD_REGISTRY", prodRegistry),
|
||||
bk.Cmd(bazelStampedCmd(fmt.Sprintf(`build $$(bazel --bazelrc=%s --bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc query 'kind("oci_push rule", //...)')`, bazelRC))),
|
||||
bk.AnnotatedCmd(
|
||||
"./dev/ci/push_all.sh",
|
||||
bk.AnnotatedCmdOpts{
|
||||
@ -109,8 +123,7 @@ func bazelPushImagesCmd(version string, isCandidate bool, opts ...bk.StepOpt) fu
|
||||
IncludeNames: false,
|
||||
},
|
||||
},
|
||||
),
|
||||
)...,
|
||||
))...,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@ -71,7 +71,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
bk.FeatureFlags.ApplyEnv(env)
|
||||
|
||||
// On release branches Percy must compare to the previous commit of the release branch, not main.
|
||||
if c.RunType.Is(runtype.ReleaseBranch, runtype.TaggedRelease) {
|
||||
if c.RunType.Is(runtype.ReleaseBranch, runtype.TaggedRelease, runtype.InternalRelease) {
|
||||
env["PERCY_TARGET_BRANCH"] = c.Branch
|
||||
// When we are building a release, we do not want to cache the client bundle.
|
||||
//
|
||||
@ -200,7 +200,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
|
||||
// Add final artifacts
|
||||
publishOps := operations.NewNamedSet("Publish images")
|
||||
publishOps.Append(bazelPushImagesNoTest(c.Version))
|
||||
publishOps.Append(bazelPushImagesNoTest(c))
|
||||
|
||||
for _, dockerImage := range legacyDockerImages {
|
||||
publishOps.Append(publishFinalDockerImage(c, dockerImage))
|
||||
@ -256,14 +256,17 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
bazelPublishExecutorDockerMirror(c),
|
||||
bazelPublishExecutorBinary(c),
|
||||
)
|
||||
|
||||
case runtype.PromoteRelease:
|
||||
ops = operations.NewSet(
|
||||
releasePromoteImages(c),
|
||||
)
|
||||
default:
|
||||
// Executor VM image
|
||||
alwaysRebuild := c.MessageFlags.SkipHashCompare || c.RunType.Is(runtype.ReleaseBranch, runtype.TaggedRelease) || c.Diff.Has(changed.ExecutorVMImage)
|
||||
alwaysRebuild := c.MessageFlags.SkipHashCompare || c.RunType.Is(runtype.ReleaseBranch, runtype.TaggedRelease, runtype.InternalRelease) || c.Diff.Has(changed.ExecutorVMImage)
|
||||
// Slow image builds
|
||||
imageBuildOps := operations.NewNamedSet("Image builds")
|
||||
|
||||
if c.RunType.Is(runtype.MainDryRun, runtype.MainBranch, runtype.ReleaseBranch, runtype.TaggedRelease) {
|
||||
if c.RunType.Is(runtype.MainDryRun, runtype.MainBranch, runtype.ReleaseBranch, runtype.TaggedRelease, runtype.InternalRelease) {
|
||||
imageBuildOps.Append(bazelBuildExecutorVM(c, alwaysRebuild))
|
||||
if c.RunType.Is(runtype.ReleaseBranch, runtype.TaggedRelease) || c.Diff.Has(changed.ExecutorDockerRegistryMirror) {
|
||||
imageBuildOps.Append(bazelBuildExecutorDockerMirror(c))
|
||||
@ -273,7 +276,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
|
||||
// Core tests
|
||||
ops.Merge(CoreTestOperations(buildOptions, changed.All, CoreTestOperationsOptions{
|
||||
ChromaticShouldAutoAccept: c.RunType.Is(runtype.MainBranch, runtype.ReleaseBranch, runtype.TaggedRelease),
|
||||
ChromaticShouldAutoAccept: c.RunType.Is(runtype.MainBranch, runtype.ReleaseBranch, runtype.TaggedRelease, runtype.InternalRelease),
|
||||
MinimumUpgradeableVersion: minimumUpgradeableVersion,
|
||||
ForceReadyForReview: c.MessageFlags.ForceReadyForReview,
|
||||
CacheBundleSize: c.RunType.Is(runtype.MainBranch, runtype.MainDryRun),
|
||||
@ -287,7 +290,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
|
||||
// Publish candidate images to dev registry
|
||||
publishOpsDev := operations.NewNamedSet("Publish candidate images")
|
||||
publishOpsDev.Append(bazelPushImagesCandidates(c.Version))
|
||||
publishOpsDev.Append(bazelPushImagesCandidates(c))
|
||||
ops.Merge(publishOpsDev)
|
||||
|
||||
// End-to-end tests
|
||||
@ -311,7 +314,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
// Add final artifacts
|
||||
publishOps := operations.NewNamedSet("Publish images")
|
||||
// Executor VM image
|
||||
if c.RunType.Is(runtype.MainBranch, runtype.TaggedRelease) {
|
||||
if c.RunType.Is(runtype.MainBranch, runtype.TaggedRelease, runtype.InternalRelease) {
|
||||
publishOps.Append(bazelPublishExecutorVM(c, alwaysRebuild))
|
||||
publishOps.Append(bazelPublishExecutorBinary(c))
|
||||
if c.RunType.Is(runtype.TaggedRelease) || c.Diff.Has(changed.ExecutorDockerRegistryMirror) {
|
||||
@ -320,7 +323,7 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
}
|
||||
|
||||
// Final Bazel images
|
||||
publishOps.Append(bazelPushImagesFinal(c.Version))
|
||||
publishOps.Append(bazelPushImagesFinal(c))
|
||||
ops.Merge(publishOps)
|
||||
}
|
||||
|
||||
|
||||
34
dev/ci/internal/ci/release_operations.go
Normal file
34
dev/ci/internal/ci/release_operations.go
Normal file
@ -0,0 +1,34 @@
|
||||
package ci
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
bk "github.com/sourcegraph/sourcegraph/dev/ci/internal/buildkite"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/dev/ci/images"
|
||||
"github.com/sourcegraph/sourcegraph/dev/ci/internal/ci/operations"
|
||||
)
|
||||
|
||||
// releasePromoteImages runs a script that iterates through all defined images that we're producing that has been uploaded
|
||||
// on the internal registry with a given version and retags them to the public registry.
|
||||
func releasePromoteImages(c Config) operations.Operation {
|
||||
image_args := strings.Join(images.SourcegraphDockerImages, " ")
|
||||
return func(pipeline *bk.Pipeline) {
|
||||
pipeline.AddStep("Promote release to public",
|
||||
bk.Agent("queue", AspectWorkflows.QueueDefault),
|
||||
bk.Env("VERSION", c.Version),
|
||||
bk.Env("INTERNAL_REGISTRY", images.SourcegraphInternalReleaseRegistry),
|
||||
bk.Env("PUBLIC_REGISTRY", images.SourcegraphPublicReleaseRegistry),
|
||||
bk.AnnotatedCmd(
|
||||
fmt.Sprintf("./tools/release/promote_images.sh %s", image_args),
|
||||
bk.AnnotatedCmdOpts{
|
||||
Annotations: &bk.AnnotationOpts{
|
||||
Type: bk.AnnotationTypeInfo,
|
||||
IncludeNames: false,
|
||||
},
|
||||
},
|
||||
),
|
||||
)
|
||||
}
|
||||
}
|
||||
@ -4,7 +4,7 @@ set -eu
|
||||
|
||||
aspectRC="/tmp/aspect-generated.bazelrc"
|
||||
rosetta bazelrc > "$aspectRC"
|
||||
bazelrc=(--bazelrc="$aspectRC")
|
||||
bazelrc=(--bazelrc="$aspectRC" --bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc)
|
||||
|
||||
function preview_tags() {
|
||||
IFS=' ' read -r -a registries <<<"$1"
|
||||
@ -78,10 +78,11 @@ function create_push_command() {
|
||||
}
|
||||
|
||||
dev_registries=(
|
||||
"us.gcr.io/sourcegraph-dev"
|
||||
"$DEV_REGISTRY"
|
||||
)
|
||||
|
||||
prod_registries=(
|
||||
"index.docker.io/sourcegraph"
|
||||
"$PROD_REGISTRY"
|
||||
)
|
||||
|
||||
date_fragment="$(date +%Y-%m-%d)"
|
||||
@ -99,11 +100,7 @@ CANDIDATE_ONLY=${CANDIDATE_ONLY:-""}
|
||||
|
||||
push_prod=false
|
||||
|
||||
# ok: main
|
||||
# ok: main-dry-run
|
||||
# ok: main-dry-run-123
|
||||
# no: main-foo
|
||||
if [[ "$BUILDKITE_BRANCH" =~ ^main$ ]] || [[ "$BUILDKITE_BRANCH" =~ ^docker-images-candidates-notest/.* ]]; then
|
||||
if [[ "$BUILDKITE_BRANCH" =~ ^docker-images-candidates-notest/.* ]]; then
|
||||
dev_tags+=("insiders")
|
||||
prod_tags+=("insiders")
|
||||
push_prod=true
|
||||
@ -116,6 +113,12 @@ if [[ "$BUILDKITE_BRANCH" =~ ^main-dry-run/.* ]]; then
|
||||
push_prod=false
|
||||
fi
|
||||
|
||||
# If we're doing an internal release, we need to push to the prod registry too.
|
||||
# TODO(rfc795) this should be more granular than this, we're abit abusing the idea of the prod registry here.
|
||||
if [ "${RELEASE_INTERNAL:-}" == "true" ]; then
|
||||
push_prod=true
|
||||
fi
|
||||
|
||||
# All release branch builds must be published to prod tags to support
|
||||
# format introduced by https://github.com/sourcegraph/sourcegraph/pull/48050
|
||||
# by release branch deployments.
|
||||
|
||||
@ -26,6 +26,9 @@ const (
|
||||
ReleaseBranch // release branch build
|
||||
BextReleaseBranch // browser extension release build
|
||||
|
||||
InternalRelease // Internal release
|
||||
PromoteRelease // Public release
|
||||
|
||||
// Main branches
|
||||
|
||||
MainBranch // main branch build
|
||||
@ -77,6 +80,18 @@ func (t RunType) Is(oneOfTypes ...RunType) bool {
|
||||
// Matcher returns the requirements for a build to be considered of this RunType.
|
||||
func (t RunType) Matcher() *RunTypeMatcher {
|
||||
switch t {
|
||||
case PromoteRelease:
|
||||
return &RunTypeMatcher{
|
||||
EnvIncludes: map[string]string{
|
||||
"RELEASE_PUBLIC": "true",
|
||||
},
|
||||
}
|
||||
case InternalRelease:
|
||||
return &RunTypeMatcher{
|
||||
EnvIncludes: map[string]string{
|
||||
"RELEASE_INTERNAL": "true",
|
||||
},
|
||||
}
|
||||
case BextNightly:
|
||||
return &RunTypeMatcher{
|
||||
EnvIncludes: map[string]string{
|
||||
@ -115,7 +130,8 @@ func (t RunType) Matcher() *RunTypeMatcher {
|
||||
}
|
||||
case MainDryRun:
|
||||
return &RunTypeMatcher{
|
||||
Branch: "main-dry-run/",
|
||||
Branch: "(?:main-dry-run/)",
|
||||
BranchRegexp: true,
|
||||
}
|
||||
case ManuallyTriggered:
|
||||
return &RunTypeMatcher{
|
||||
@ -144,6 +160,7 @@ func (t RunType) Matcher() *RunTypeMatcher {
|
||||
return &RunTypeMatcher{
|
||||
Branch: "bazel-do/",
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -181,6 +198,10 @@ func (t RunType) String() string {
|
||||
return "Build executor without testing"
|
||||
case BazelDo:
|
||||
return "Bazel command"
|
||||
case InternalRelease:
|
||||
return "Internal release"
|
||||
case PromoteRelease:
|
||||
return "Public release"
|
||||
}
|
||||
return "None"
|
||||
}
|
||||
|
||||
@ -32,8 +32,9 @@ func TestComputeRunType(t *testing.T) {
|
||||
}, {
|
||||
name: "tagged release",
|
||||
args: args{
|
||||
branch: "1.3",
|
||||
tag: "v1.2.3",
|
||||
// TODO(@jh) RFC795 check about this?
|
||||
// branch: "1.3",
|
||||
tag: "v1.2.3",
|
||||
},
|
||||
want: TaggedRelease,
|
||||
}, {
|
||||
@ -51,6 +52,22 @@ func TestComputeRunType(t *testing.T) {
|
||||
},
|
||||
},
|
||||
want: BextNightly,
|
||||
}, {
|
||||
name: "internal release",
|
||||
args: args{
|
||||
env: map[string]string{
|
||||
"RELEASE_INTERNAL": "true",
|
||||
},
|
||||
},
|
||||
want: InternalRelease,
|
||||
}, {
|
||||
name: "public release",
|
||||
args: args{
|
||||
env: map[string]string{
|
||||
"RELEASE_PUBLIC": "true",
|
||||
},
|
||||
},
|
||||
want: PromoteRelease,
|
||||
}, {
|
||||
name: "wolfi base image rebuild",
|
||||
args: args{
|
||||
|
||||
@ -30,7 +30,6 @@ go_library(
|
||||
"sg_monitoring.go",
|
||||
"sg_ops.go",
|
||||
"sg_page.go",
|
||||
"sg_release.go",
|
||||
"sg_rfc.go",
|
||||
"sg_run.go",
|
||||
"sg_secret.go",
|
||||
|
||||
@ -133,7 +133,7 @@ func (r *DockerHub) fetchDigest(repo string, tag string) (digest.Digest, error)
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(resp.Body)
|
||||
return "", errors.Newf("fetchDigest (%s) %s:%s, got %v: %s", r.host, repo, tag, resp.Status, string(data))
|
||||
return "", errors.Newf("DockerHub fetchDigest (%s) %s:%s, got %v: %s", r.host, repo, tag, resp.Status, string(data))
|
||||
}
|
||||
|
||||
d := resp.Header.Get("Docker-Content-Digest")
|
||||
|
||||
@ -68,7 +68,7 @@ func (r *GCR) fetchDigest(repo string, tag string) (digest.Digest, error) {
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != 200 {
|
||||
data, _ := io.ReadAll(resp.Body)
|
||||
return "", errors.Newf("fetchDigest (%s) %s:%s, got %v: %s", r.host, repo, tag, resp.Status, string(data))
|
||||
return "", errors.Newf("GCR fetchDigest (%s) %s:%s, got %v: %s", r.host, repo, tag, resp.Status, string(data))
|
||||
}
|
||||
d := resp.Header.Get("Docker-Content-Digest")
|
||||
g, err := digest.Parse(d)
|
||||
|
||||
@ -3,15 +3,23 @@ load("//dev:go_defs.bzl", "go_test")
|
||||
|
||||
go_library(
|
||||
name = "release",
|
||||
srcs = ["cve.go"],
|
||||
srcs = [
|
||||
"config.go",
|
||||
"cve.go",
|
||||
"release.go",
|
||||
],
|
||||
importpath = "github.com/sourcegraph/sourcegraph/dev/sg/internal/release",
|
||||
visibility = ["//dev/sg:__subpackages__"],
|
||||
deps = [
|
||||
"//dev/sg/internal/bk",
|
||||
"//dev/sg/internal/category",
|
||||
"//dev/sg/internal/std",
|
||||
"//lib/errors",
|
||||
"//lib/output",
|
||||
"@com_github_grafana_regexp//:regexp",
|
||||
"@com_github_sourcegraph_run//:run",
|
||||
"@com_github_urfave_cli_v2//:cli",
|
||||
"@in_gopkg_yaml_v3//:yaml_v3",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
368
dev/sg/internal/release/config.go
Normal file
368
dev/sg/internal/release/config.go
Normal file
@ -0,0 +1,368 @@
|
||||
package release
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/sourcegraph/run"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"github.com/sourcegraph/sourcegraph/lib/output"
|
||||
"gopkg.in/yaml.v3"
|
||||
)
|
||||
|
||||
// TODO sg release scaffold ...
|
||||
// TODO add PR body
|
||||
type ReleaseManifest struct {
|
||||
// Meta defines information about the product being released, so we can
|
||||
// track who is in charge of releasing this, what kind of artifacts is it producting,
|
||||
// etc ...
|
||||
Meta struct {
|
||||
ProductName string `yaml:"productName"`
|
||||
Owners []string `yaml:"owners"`
|
||||
Repository string `yaml:"repository"`
|
||||
Artifacts []string `yaml:"artifacts"`
|
||||
README string `yaml:"README"`
|
||||
} `yaml:"meta"`
|
||||
// Requirements is a list of commands that must exit without errors for the manifest to be
|
||||
// considered to be valid. Alternatively, instead of defining Cmd, Env can be set to
|
||||
// ensure an environment variable is defnined.
|
||||
Requirements []struct {
|
||||
Name string `yaml:"name"`
|
||||
Cmd string `yaml:"cmd"`
|
||||
Env string `yaml:"env"`
|
||||
FixInstructions string `yaml:"fixInstructions"`
|
||||
} `yaml:"requirements"`
|
||||
// Inputs defines a list of k=v strings, defining the required inputs for that release manifest.
|
||||
// Typically, this is either empty or server=vX.Y.Z to build a release that uses that particular
|
||||
// server version.
|
||||
Inputs []input `yaml:"inputs"`
|
||||
// Internal defines the steps to create an internal release.
|
||||
Internal struct {
|
||||
// Create defines the steps to create the release build. This is where we define changes that need
|
||||
// to be applied on the code for the release to exist. Typically, that means updating images,
|
||||
// fetching new container tags, etc ...
|
||||
Create struct {
|
||||
Steps struct {
|
||||
Patch []cmdManifest `yaml:"patch"`
|
||||
Minor []cmdManifest `yaml:"minor"`
|
||||
Major []cmdManifest `yaml:"major"`
|
||||
} `yaml:"steps"`
|
||||
} `yaml:"create"`
|
||||
// Finalize defines the steps to execute once the internal release build and test phases have been successfully completed.
|
||||
// Typically, this is where one would define commands to open a PR on a documentation repo to take note of this
|
||||
// new release.
|
||||
Finalize struct {
|
||||
Steps []cmdManifest `yaml:"steps"`
|
||||
} `yaml:"finalize"`
|
||||
} `yaml:"internal"`
|
||||
// Test defines the steps to test the release build. These are not meant to be "normal tests", but instead
|
||||
// extended testing to ensure the release is correct. These tests are to be executed both during the
|
||||
// create and promote-to-public phase.
|
||||
Test struct {
|
||||
Steps []cmdManifest `yaml:"steps"`
|
||||
} `yaml:"test"`
|
||||
// PromoteToPublic defines steps to execute when promoting the release to a public one. Typically that's where
|
||||
// one would move release artifacts from a private place to one that is publicly accessible.
|
||||
PromoteToPublic struct {
|
||||
Create struct {
|
||||
Steps []cmdManifest `yaml:"steps"`
|
||||
} `yaml:"create"`
|
||||
Finalize struct {
|
||||
Steps []cmdManifest `yaml:"steps"`
|
||||
} `yaml:"finalize"`
|
||||
} `yaml:"promoteToPublic"`
|
||||
}
|
||||
|
||||
type cmdManifest struct {
|
||||
Name string `yaml:"name"`
|
||||
Cmd string `yaml:"cmd"`
|
||||
}
|
||||
|
||||
type input struct {
|
||||
ReleaseID string `yaml:"releaseId"`
|
||||
}
|
||||
|
||||
type releaseRunner struct {
|
||||
vars map[string]string
|
||||
inputs map[string]string
|
||||
m *ReleaseManifest
|
||||
version string
|
||||
pretend bool
|
||||
typ string
|
||||
}
|
||||
|
||||
// releaseConfig is a serializable structure holding the configuration
|
||||
// for the release tooling, that can be passed around easily.
|
||||
type releaseConfig struct {
|
||||
Version string `json:"version"`
|
||||
Inputs string `json:"inputs"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
func parseReleaseConfig(configRaw string) (*releaseConfig, error) {
|
||||
rc := releaseConfig{}
|
||||
if err := json.Unmarshal([]byte(configRaw), &rc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &rc, nil
|
||||
}
|
||||
|
||||
func NewReleaseRunner(ctx context.Context, workdir string, version string, inputsArg string, typ string, gitBranch string, pretend bool) (*releaseRunner, error) {
|
||||
announce2("setup", "Finding release manifest in %q", workdir)
|
||||
|
||||
inputs, err := parseInputs(inputsArg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
config := releaseConfig{
|
||||
Version: version,
|
||||
Inputs: inputsArg,
|
||||
Type: typ,
|
||||
}
|
||||
|
||||
configBytes, err := json.Marshal(config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if gitBranch == "" {
|
||||
cmd := run.Cmd(ctx, "git rev-parse --abbrev-ref HEAD")
|
||||
cmd.Dir(workdir)
|
||||
out, err := cmd.Run().String()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gitBranch = out
|
||||
sayWarn("setup", "No explicit branch name was provided, assuming current branch is the target: %s", gitBranch)
|
||||
}
|
||||
|
||||
vars := map[string]string{
|
||||
"version": version,
|
||||
"tag": strings.TrimPrefix(version, "v"),
|
||||
"config": string(configBytes),
|
||||
"git.branch": gitBranch,
|
||||
}
|
||||
for k, v := range inputs {
|
||||
// TODO sanitize input format
|
||||
vars[fmt.Sprintf("inputs.%s.version", k)] = v
|
||||
vars[fmt.Sprintf("inputs.%s.tag", k)] = strings.TrimPrefix(v, "v")
|
||||
}
|
||||
|
||||
if err := os.Chdir(workdir); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
f, err := os.Open("release.yaml")
|
||||
if err != nil {
|
||||
say("setup", "failed to find release manifest")
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var m ReleaseManifest
|
||||
dec := yaml.NewDecoder(f)
|
||||
if err := dec.Decode(&m); err != nil {
|
||||
say("setup", "failed to decode manifest")
|
||||
return nil, err
|
||||
}
|
||||
saySuccess("setup", "Found manifest for %q (%s)", m.Meta.ProductName, m.Meta.Repository)
|
||||
|
||||
say("meta", "Owners: %s", strings.Join(m.Meta.Owners, ", "))
|
||||
say("meta", "Repository: %s", m.Meta.Repository)
|
||||
|
||||
for _, in := range m.Inputs {
|
||||
var found bool
|
||||
for k := range inputs {
|
||||
if k == in.ReleaseID {
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
sayFail("inputs", "Couldn't find input %q, required by manifest, but --inputs=%s=... flag is missing", in.ReleaseID, in.ReleaseID)
|
||||
return nil, errors.New("missing input")
|
||||
}
|
||||
}
|
||||
|
||||
announce2("vars", "Variables")
|
||||
for k, v := range vars {
|
||||
say("vars", "%s=%q", k, v)
|
||||
}
|
||||
|
||||
r := &releaseRunner{
|
||||
version: version,
|
||||
pretend: pretend,
|
||||
inputs: inputs,
|
||||
typ: typ,
|
||||
m: &m,
|
||||
vars: vars,
|
||||
}
|
||||
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func parseInputs(str string) (map[string]string, error) {
|
||||
if str == "" {
|
||||
return nil, nil
|
||||
}
|
||||
m := map[string]string{}
|
||||
parts := strings.Split(str, ",")
|
||||
for _, part := range parts {
|
||||
subparts := strings.Split(part, "=")
|
||||
if len(subparts) != 2 {
|
||||
return nil, errors.New("invalid inputs")
|
||||
}
|
||||
m[subparts[0]] = subparts[1]
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (r *releaseRunner) checkDeps(ctx context.Context) error {
|
||||
announce2("reqs", "Checking requirements...")
|
||||
var failed bool
|
||||
for _, req := range r.m.Requirements {
|
||||
if req.Env != "" && req.Cmd != "" {
|
||||
return errors.Newf("requirement %q can't have both env and cmd defined", req.Name)
|
||||
}
|
||||
if req.Env != "" {
|
||||
if _, ok := os.LookupEnv(req.Env); !ok {
|
||||
failed = true
|
||||
sayFail("reqs", "FAIL %s, $%s is not defined.", req.Name, req.Env)
|
||||
continue
|
||||
}
|
||||
saySuccess("reqs", "OK %s", req.Name)
|
||||
continue
|
||||
}
|
||||
|
||||
lines, err := run.Cmd(ctx, req.Cmd).Run().Lines()
|
||||
if err != nil {
|
||||
failed = true
|
||||
sayFail("reqs", "FAIL %s", req.Name)
|
||||
sayFail("reqs", " Error: %s", err.Error())
|
||||
for _, line := range lines {
|
||||
sayFail("reqs", " "+line)
|
||||
}
|
||||
} else {
|
||||
saySuccess("reqs", "OK %s", req.Name)
|
||||
}
|
||||
}
|
||||
if failed {
|
||||
announce2("reqs", "Requirement checks failed, aborting.")
|
||||
return errors.New("failed requirements")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *releaseRunner) InternalFinalize(ctx context.Context) error {
|
||||
// TODO skip check deps
|
||||
if len(r.m.Internal.Finalize.Steps) == 0 {
|
||||
announce2("finalize", "Skipping internal release finalization, none defined")
|
||||
return nil
|
||||
}
|
||||
announce2("finalize", "Running finalize steps for %s", r.version)
|
||||
return r.runSteps(ctx, r.m.Internal.Finalize.Steps)
|
||||
}
|
||||
func (r *releaseRunner) PromoteFinalize(ctx context.Context) error {
|
||||
// TODO skip check deps
|
||||
if len(r.m.PromoteToPublic.Finalize.Steps) == 0 {
|
||||
announce2("finalize", "Skipping public release finalization, none defined")
|
||||
return nil
|
||||
}
|
||||
announce2("finalize", "Running promote finalize steps for %s", r.version)
|
||||
return r.runSteps(ctx, r.m.PromoteToPublic.Finalize.Steps)
|
||||
}
|
||||
|
||||
func (r *releaseRunner) Test(ctx context.Context) error {
|
||||
if len(r.m.Test.Steps) == 0 {
|
||||
announce2("test", "Skipping release tests, none defined")
|
||||
return nil
|
||||
}
|
||||
announce2("test", "Running testing steps for %s", r.version)
|
||||
return r.runSteps(ctx, r.m.Test.Steps)
|
||||
}
|
||||
|
||||
func (r *releaseRunner) CreateRelease(ctx context.Context) error {
|
||||
if err := r.checkDeps(ctx); err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var steps []cmdManifest
|
||||
switch r.typ {
|
||||
case "patch":
|
||||
steps = r.m.Internal.Create.Steps.Patch
|
||||
case "minor":
|
||||
steps = r.m.Internal.Create.Steps.Minor
|
||||
case "major":
|
||||
steps = r.m.Internal.Create.Steps.Major
|
||||
}
|
||||
|
||||
announce2("create", "Will create a %s release %q", r.typ, r.version)
|
||||
return r.runSteps(ctx, steps)
|
||||
}
|
||||
|
||||
func (r *releaseRunner) Promote(ctx context.Context) error {
|
||||
if err := r.checkDeps(ctx); err != nil {
|
||||
return nil
|
||||
}
|
||||
announce2("promote", "Will promote %q to a public release", r.version)
|
||||
return r.runSteps(ctx, r.m.PromoteToPublic.Create.Steps)
|
||||
}
|
||||
|
||||
func (r *releaseRunner) runSteps(ctx context.Context, steps []cmdManifest) error {
|
||||
for _, step := range steps {
|
||||
cmd := interpolate(step.Cmd, r.vars)
|
||||
if r.pretend {
|
||||
announce2("step", "Pretending to run step %q", step.Name)
|
||||
for _, line := range strings.Split(cmd, "\n") {
|
||||
say(step.Name, line)
|
||||
}
|
||||
continue
|
||||
}
|
||||
announce2("step", "Running step %q", step.Name)
|
||||
err := run.Bash(ctx, cmd).Run().StreamLines(func(line string) {
|
||||
say(step.Name, line)
|
||||
})
|
||||
if err != nil {
|
||||
sayFail(step.Name, "Step failed: %v", err)
|
||||
return err
|
||||
} else {
|
||||
saySuccess("step", "Step %q succeeded", step.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func interpolate(s string, m map[string]string) string {
|
||||
for k, v := range m {
|
||||
s = strings.ReplaceAll(s, fmt.Sprintf("{{%s}}", k), v)
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
func announce2(section string, format string, a ...any) {
|
||||
std.Out.WriteLine(output.Linef("👉", output.StyleBold, fmt.Sprintf("[%10s] %s", section, format), a...))
|
||||
}
|
||||
|
||||
func say(section string, format string, a ...any) {
|
||||
sayKind(output.StyleReset, section, format, a...)
|
||||
}
|
||||
|
||||
func sayWarn(section string, format string, a ...any) {
|
||||
sayKind(output.StyleOrange, section, format, a...)
|
||||
}
|
||||
|
||||
func sayFail(section string, format string, a ...any) {
|
||||
sayKind(output.StyleRed, section, format, a...)
|
||||
}
|
||||
|
||||
func saySuccess(section string, format string, a ...any) {
|
||||
sayKind(output.StyleGreen, section, format, a...)
|
||||
}
|
||||
|
||||
func sayKind(style output.Style, section string, format string, a ...any) {
|
||||
std.Out.WriteLine(output.Linef(" ", style, fmt.Sprintf("[%10s] %s", section, format), a...))
|
||||
}
|
||||
@ -8,6 +8,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/bk"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
|
||||
@ -17,6 +18,20 @@ import (
|
||||
|
||||
var cvePattern = regexp.MustCompile(`<\w+>(CVE-\d+-\d+)<\/\w+>`)
|
||||
|
||||
var buildNumberFlag = cli.StringFlag{
|
||||
Name: "buildNumber",
|
||||
Usage: "The buildkite build number to check for CVEs",
|
||||
Required: true,
|
||||
Aliases: []string{"b"},
|
||||
}
|
||||
|
||||
var referenceUriFlag = cli.StringFlag{
|
||||
Name: "uri",
|
||||
Usage: "A reference url that contains approved CVEs. Often a link to a handbook page eg: https://handbook.sourcegraph.com/departments/security/tooling/trivy/4-2-0/.",
|
||||
Required: true,
|
||||
Aliases: []string{"u"},
|
||||
}
|
||||
|
||||
func findUnapprovedCVEs(all []string, referenceDocument string) []string {
|
||||
var unapproved []string
|
||||
for _, cve := range all {
|
||||
@ -51,6 +66,14 @@ func downloadUrl(uri string, w io.Writer) (err error) {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
func cveCheck(cmd *cli.Context) error {
|
||||
std.Out.WriteLine(output.Styledf(output.StylePending, "Checking release for approved CVEs..."))
|
||||
|
||||
referenceUrl := referenceUriFlag.Get(cmd)
|
||||
buildNumber := buildNumberFlag.Get(cmd)
|
||||
|
||||
return CveCheck(cmd.Context, buildNumber, referenceUrl, false) // TODO(@jhchabran)
|
||||
}
|
||||
|
||||
func CveCheck(ctx context.Context, buildNumber, referenceUrl string, verbose bool) error {
|
||||
client, err := bk.NewClient(ctx, std.Out)
|
||||
|
||||
185
dev/sg/internal/release/release.go
Normal file
185
dev/sg/internal/release/release.go
Normal file
@ -0,0 +1,185 @@
|
||||
package release
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sourcegraph/run"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/category"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
var releaseRunFlags = []cli.Flag{
|
||||
&cli.StringFlag{
|
||||
Name: "workdir",
|
||||
Value: ".",
|
||||
Usage: "Set the working directory to load release scripts from",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "type",
|
||||
Value: "patch",
|
||||
Usage: "Select release type: major, minor, patch",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "branch",
|
||||
Usage: "Branch to create release from, usually `main` or `5.3` if you're cutting a patch release",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "pretend",
|
||||
Value: false,
|
||||
Usage: "Preview all the commands that would be performed",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "version",
|
||||
Value: "v6.6.666",
|
||||
Usage: "Force version",
|
||||
},
|
||||
&cli.StringFlag{
|
||||
Name: "inputs",
|
||||
Usage: "Set inputs to use for a given release, ex: --input=server=v5.2.404040,foobar=ffefe",
|
||||
},
|
||||
&cli.BoolFlag{
|
||||
Name: "config-from-commit",
|
||||
Value: false,
|
||||
Usage: "Infer run configuration from last commit instead of flags.",
|
||||
},
|
||||
}
|
||||
|
||||
var Command = &cli.Command{
|
||||
Name: "release",
|
||||
Usage: "Sourcegraph release utilities",
|
||||
Category: category.Util,
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "cve-check",
|
||||
Usage: "Check all CVEs found in a buildkite build against a set of preapproved CVEs for a release",
|
||||
Category: category.Util,
|
||||
Action: cveCheck,
|
||||
Flags: []cli.Flag{
|
||||
&buildNumberFlag,
|
||||
&referenceUriFlag,
|
||||
},
|
||||
UsageText: `sg release cve-check -u https://handbook.sourcegraph.com/departments/security/tooling/trivy/4-2-0/ -b 184191`,
|
||||
},
|
||||
{
|
||||
Name: "run",
|
||||
Usage: "Run steps defined in release manifest. Those are meant to be run in CI",
|
||||
Category: category.Util,
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "test",
|
||||
Flags: releaseRunFlags,
|
||||
Usage: "Run test steps as defined in the release manifest",
|
||||
Action: func(cctx *cli.Context) error {
|
||||
r, err := newReleaseRunnerFromCliContext(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Test(cctx.Context)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "internal",
|
||||
Usage: "todo",
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "finalize",
|
||||
Usage: "Run internal release finalize steps",
|
||||
Flags: releaseRunFlags,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
r, err := newReleaseRunnerFromCliContext(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.InternalFinalize(cctx.Context)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "promote-to-public",
|
||||
Usage: "TODO",
|
||||
Subcommands: []*cli.Command{
|
||||
{
|
||||
Name: "finalize",
|
||||
Usage: "Run internal release finalize steps",
|
||||
Flags: releaseRunFlags,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
r, err := newReleaseRunnerFromCliContext(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.PromoteFinalize(cctx.Context)
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "create",
|
||||
Usage: "Create a release for a given product",
|
||||
UsageText: "sg release create --workdir [path] --type patch",
|
||||
Category: category.Util,
|
||||
Flags: releaseRunFlags,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
r, err := newReleaseRunnerFromCliContext(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.CreateRelease(cctx.Context)
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "promote-to-public",
|
||||
Usage: "Promete an existing release to the public",
|
||||
UsageText: "sg release promote-to-public --workdir [path] --type patch",
|
||||
Category: category.Util,
|
||||
Flags: releaseRunFlags,
|
||||
Action: func(cctx *cli.Context) error {
|
||||
r, err := newReleaseRunnerFromCliContext(cctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return r.Promote(cctx.Context)
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
func newReleaseRunnerFromCliContext(cctx *cli.Context) (*releaseRunner, error) {
|
||||
workdir := cctx.String("workdir")
|
||||
pretend := cctx.Bool("pretend")
|
||||
// Normalize the version string, to prevent issues where this was given with the wrong convention
|
||||
// which requires a full rebuild.
|
||||
version := fmt.Sprintf("v%s", strings.TrimPrefix(cctx.String("version"), "v"))
|
||||
typ := cctx.String("type")
|
||||
inputs := cctx.String("inputs")
|
||||
branch := cctx.String("branch")
|
||||
|
||||
if cctx.Bool("config-from-commit") {
|
||||
cmd := run.Cmd(cctx.Context, "git", "log", "-1")
|
||||
cmd.Dir(workdir)
|
||||
lines, err := cmd.Run().Lines()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// config dump is always the last line.
|
||||
configRaw := lines[len(lines)-1]
|
||||
if !strings.HasPrefix(strings.TrimSpace(configRaw), "{") {
|
||||
return nil, errors.New("Trying to infer config from last commit, but did not find serialized config")
|
||||
}
|
||||
rc, err := parseReleaseConfig(configRaw)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
version = rc.Version
|
||||
typ = rc.Type
|
||||
inputs = rc.Inputs
|
||||
}
|
||||
|
||||
return NewReleaseRunner(cctx.Context, workdir, version, inputs, typ, branch, pretend)
|
||||
}
|
||||
@ -15,6 +15,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/ci"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/analytics"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/background"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/release"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/secrets"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/sgconf"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
|
||||
@ -299,7 +300,7 @@ var sg = &cli.App{
|
||||
funkyLogoCommand,
|
||||
helpCommand,
|
||||
installCommand,
|
||||
releaseCommand,
|
||||
release.Command,
|
||||
updateCommand,
|
||||
versionCommand,
|
||||
},
|
||||
|
||||
@ -1,50 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/category"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/release"
|
||||
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
|
||||
"github.com/sourcegraph/sourcegraph/lib/output"
|
||||
)
|
||||
|
||||
var releaseCommand = &cli.Command{
|
||||
Name: "release",
|
||||
Usage: "Sourcegraph release utilities",
|
||||
Category: category.Util,
|
||||
Subcommands: []*cli.Command{{
|
||||
Name: "cve-check",
|
||||
Usage: "Check all CVEs found in a buildkite build against a set of preapproved CVEs for a release",
|
||||
Category: category.Util,
|
||||
Action: cveCheck,
|
||||
Flags: []cli.Flag{
|
||||
&buildNumberFlag,
|
||||
&referenceUriFlag,
|
||||
},
|
||||
UsageText: `sg release cve-check -u https://handbook.sourcegraph.com/departments/security/tooling/trivy/4-2-0/ -b 184191`,
|
||||
}},
|
||||
}
|
||||
|
||||
var buildNumberFlag = cli.StringFlag{
|
||||
Name: "buildNumber",
|
||||
Usage: "The buildkite build number to check for CVEs",
|
||||
Required: true,
|
||||
Aliases: []string{"b"},
|
||||
}
|
||||
|
||||
var referenceUriFlag = cli.StringFlag{
|
||||
Name: "uri",
|
||||
Usage: "A reference url that contains approved CVEs. Often a link to a handbook page eg: https://handbook.sourcegraph.com/departments/security/tooling/trivy/4-2-0/.",
|
||||
Required: true,
|
||||
Aliases: []string{"u"},
|
||||
}
|
||||
|
||||
func cveCheck(cmd *cli.Context) error {
|
||||
std.Out.WriteLine(output.Styledf(output.StylePending, "Checking release for approved CVEs..."))
|
||||
|
||||
referenceUrl := referenceUriFlag.Get(cmd)
|
||||
buildNumber := buildNumberFlag.Get(cmd)
|
||||
|
||||
return release.CveCheck(cmd.Context, buildNumber, referenceUrl, verbose)
|
||||
}
|
||||
@ -223,7 +223,7 @@ func checkForMigratorUpdate(ctx context.Context) (latest string, hasUpdate bool,
|
||||
return "", false, errors.Newf("last section in path is an invalid format: %s", latest)
|
||||
}
|
||||
|
||||
isMigratorOutOfDate := oobmigration.CompareVersions(latestVersion, migratorVersion) == oobmigration.VersionOrderBefore || (latestPatch > migratorPatch)
|
||||
isMigratorOutOfDate := oobmigration.CompareVersions(latestVersion, migratorVersion) == oobmigration.VersionOrderBefore || (latestVersion.Minor == migratorVersion.Minor && latestPatch > migratorPatch)
|
||||
|
||||
return latest, isMigratorOutOfDate, nil
|
||||
}
|
||||
|
||||
@ -11,6 +11,7 @@ go_library(
|
||||
deps = [
|
||||
"//internal/database/migration/schemas",
|
||||
"//internal/database/migration/shared",
|
||||
"//internal/database/migration/shared/data/cmd/generator/version",
|
||||
"//internal/database/migration/stitch",
|
||||
"//internal/oobmigration",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
@ -24,5 +25,6 @@ go_binary(
|
||||
x_defs = {
|
||||
"github.com/sourcegraph/sourcegraph/internal/version.version": "{STABLE_VERSION}",
|
||||
"github.com/sourcegraph/sourcegraph/internal/version.timestamp": "{VERSION_TIMESTAMP}",
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/migration/shared/data/cmd/generator/version.FinalVersionString": "{STABLE_VERSION}",
|
||||
},
|
||||
)
|
||||
|
||||
@ -3,6 +3,7 @@ package main
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/migration/shared/data/cmd/generator/version"
|
||||
"github.com/sourcegraph/sourcegraph/internal/oobmigration"
|
||||
)
|
||||
|
||||
@ -13,11 +14,15 @@ const maxVersionString = "5.3.0"
|
||||
|
||||
// MaxVersion is the highest known released version at the time the migrator was built.
|
||||
var MaxVersion = func() oobmigration.Version {
|
||||
if version, ok := oobmigration.NewVersionFromString(maxVersionString); ok {
|
||||
return version
|
||||
ver := maxVersionString
|
||||
if version.FinalVersionString != "dev" {
|
||||
ver = version.FinalVersionString
|
||||
}
|
||||
if oobVersion, ok := oobmigration.NewVersionFromString(ver); ok {
|
||||
return oobVersion
|
||||
}
|
||||
|
||||
panic(fmt.Sprintf("malformed maxVersionString %q", maxVersionString))
|
||||
panic(fmt.Sprintf("malformed maxVersionString %q", ver))
|
||||
}()
|
||||
|
||||
// MinVersion is the minimum version a migrator can support upgrading to a newer version of
|
||||
|
||||
@ -0,0 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "version",
|
||||
srcs = ["version.go"],
|
||||
importpath = "github.com/sourcegraph/sourcegraph/internal/database/migration/shared/data/cmd/generator/version",
|
||||
visibility = ["//:__subpackages__"],
|
||||
)
|
||||
@ -0,0 +1,4 @@
|
||||
package version
|
||||
|
||||
// TODO(JH): move this elsewhere we done POC'ing
|
||||
var FinalVersionString = "dev"
|
||||
@ -62,4 +62,6 @@ var (
|
||||
StyleGrey = Fg256Color(8)
|
||||
StyleYellow = Fg256Color(220)
|
||||
StyleOrange = Fg256Color(202)
|
||||
StyleRed = Fg256Color(196)
|
||||
StyleGreen = Fg256Color(2)
|
||||
)
|
||||
|
||||
87
release.yaml
Normal file
87
release.yaml
Normal file
@ -0,0 +1,87 @@
|
||||
meta:
|
||||
productName: sourcegraph
|
||||
repository: "github.com/sourcegraph/sourcegraph"
|
||||
owners:
|
||||
- "@sourcegraph/release"
|
||||
|
||||
requirements:
|
||||
- name: "curl"
|
||||
cmd: "curl --help"
|
||||
- name: "Buidkite access token"
|
||||
env: BUILDKITE_ACCESS_TOKEN
|
||||
|
||||
internal:
|
||||
create:
|
||||
steps:
|
||||
patch:
|
||||
- name: "buildkite"
|
||||
cmd: |
|
||||
echo "Triggering build on sourcegraph/sourcegraph with VERSION={{version}} on branch {{git.branch}}"
|
||||
body=$(curl -s --fail-with-body -X POST "https://api.buildkite.com/v2/organizations/sourcegraph/pipelines/sourcegraph/builds" -H "Content-Type: application/json" -H "Authorization: Bearer $BUILDKITE_ACCESS_TOKEN" -d '{
|
||||
"commit": "HEAD",
|
||||
"branch": "{{git.branch}}",
|
||||
"message": "Internal release build for {{version}}",
|
||||
"env": {
|
||||
"RELEASE_INTERNAL": "true",
|
||||
"VERSION": "{{tag}}"
|
||||
}
|
||||
}')
|
||||
exit_code=$?
|
||||
|
||||
if [ $exit_code != 0 ]; then
|
||||
echo "❌ Failed to create build on Buildkite, got:"
|
||||
echo "--- raw body ---"
|
||||
echo $body
|
||||
echo "--- raw body ---"
|
||||
exit $exit_code
|
||||
else
|
||||
echo "Build created, see:"
|
||||
echo $body | jq .web_url
|
||||
fi
|
||||
finalize:
|
||||
steps:
|
||||
- name: "Register on release registry"
|
||||
cmd: |
|
||||
echo "pretending to call release registry api"
|
||||
|
||||
test:
|
||||
steps:
|
||||
- name: "placeholder"
|
||||
cmd: |
|
||||
echo "-- pretending to test release ..."
|
||||
|
||||
promoteToPublic:
|
||||
create:
|
||||
steps:
|
||||
- name: "buildkite"
|
||||
cmd: |
|
||||
# We set DISABLE_ASPECT_WORKFLOWS to true, because the promotion is purely about retagging images
|
||||
# and we don't rely on AW at all.
|
||||
echo "Triggering build on sourcegraph/sourcegraph with VERSION={{version}} on branch {{git.branch}}"
|
||||
body=$(curl -s --fail-with-body -X POST "https://api.buildkite.com/v2/organizations/sourcegraph/pipelines/sourcegraph/builds" -H "Content-Type: application/json" -H "Authorization: Bearer $BUILDKITE_ACCESS_TOKEN" -d '{
|
||||
"commit": "HEAD",
|
||||
"branch": "{{git.branch}}",
|
||||
"message": "Promoting internal release {{version}} to public",
|
||||
"env": {
|
||||
"DISABLE_ASPECT_WORKFLOWS": "true"
|
||||
"RELEASE_PUBLIC": "true",
|
||||
"VERSION": "{{tag}}"
|
||||
}
|
||||
}')
|
||||
exit_code=$?
|
||||
|
||||
if [ $exit_code != 0 ]; then
|
||||
echo "❌ Failed to create build on Buildkite, got:"
|
||||
echo "--- raw body ---"
|
||||
echo $body
|
||||
echo "--- raw body ---"
|
||||
exit $exit_code
|
||||
else
|
||||
echo "Build created, see:"
|
||||
echo $body | jq .web_url
|
||||
fi
|
||||
finalize:
|
||||
steps:
|
||||
- name: "Register on release registry"
|
||||
cmd: |
|
||||
echo "pretending to call release registry api"
|
||||
@ -1,19 +0,0 @@
|
||||
sh_binary(
|
||||
name = "generate_schemas_archive",
|
||||
srcs = ["generate_schemas_archive.sh"],
|
||||
data = [
|
||||
"//internal/database:schema.codeinsights.json",
|
||||
"//internal/database:schema.codeintel.json",
|
||||
"//internal/database:schema.json",
|
||||
],
|
||||
)
|
||||
|
||||
sh_binary(
|
||||
name = "upload_current_schemas",
|
||||
srcs = ["upload_current_schemas.sh"],
|
||||
data = [
|
||||
"//internal/database:schema.codeinsights.json",
|
||||
"//internal/database:schema.codeintel.json",
|
||||
"//internal/database:schema.json",
|
||||
],
|
||||
)
|
||||
@ -1,198 +1,7 @@
|
||||
# Release tooling
|
||||
|
||||
## Usage
|
||||
## Images promotion
|
||||
|
||||
### Generating a database schemas tarball
|
||||
When a build is triggered with `RELEASE_PUBLIC=true`, the pipeline will consists of a single step that runs `promote_images.sh`.
|
||||
|
||||
Generating a database schemas tarball is achieved by downloading all known schemas plus the current schema. There are two options for the current version database schemas: either we are cutting a new release and we need to inject the current one, or we are regenerating the tarball to fix a problem.
|
||||
|
||||
To control which approach we take, we use the second parameter of the following command:
|
||||
|
||||
```
|
||||
bazel run //tools/release:generate_schemas_archive -- vX.Y.Z [ACTION] $HOME/[PATH-TO-YOUR-REPO]
|
||||
```
|
||||
|
||||
If ran with `fetch-current-schemas`, the script will ensure that the schemas archive in the bucket correctly
|
||||
contains the given version database schemas. It will also prompt the user for confirmation if the associated
|
||||
tarball with that version exists in the bucket.
|
||||
|
||||
If ran with `inject-current-schemas`, the script will ensure that the schemas archive in the bucket doesn't
|
||||
contain the schemas for the new version and will instead create them by injecting the `internal/database/schemas*.json` schemas into the tarball, properly renamed to the expected convention.
|
||||
|
||||
Finally, in both cases, the tarball will be uploaded in the bucket, and the third party dependency, located in
|
||||
`tools/release/schema_deps.bzl` will be updated accordingly, allowing builds past that point to use those schemas.
|
||||
|
||||
### Uploading the current database schemas
|
||||
|
||||
Once a release is considered to be correct (upcoming in RFC 795) the release tooling runs another command
|
||||
to store the current database schemas in the bucket, under the `schemas` folder, to capture how the database
|
||||
looks at that point.
|
||||
|
||||
This enables to build migrator binaries that will be able to use that particular release as a migration point.
|
||||
|
||||
```
|
||||
bazel run //tools/release:upload_current_schemas -- vX.Y.Z
|
||||
```
|
||||
|
||||
The script will ensure that there are no existing database schemas for that version before uploading anything. This way
|
||||
we prevent accidentally breaking previously generated database schemas.
|
||||
|
||||
## Database schemas
|
||||
|
||||
Database schemas are necessary for Multi-Version Upgrades, so we need to populate
|
||||
them when building and cutting new releases.
|
||||
|
||||
The following diagram provides an overview of how it works.
|
||||
|
||||
```
|
||||
┌─────────────────────────────────────────┐
|
||||
│ GCS Bucket │
|
||||
│ │
|
||||
│ ┌───────────────────────────────────┐ │
|
||||
│ │ schemas/ │ │
|
||||
│ │ v3.1.4-(...).schema.json │ │
|
||||
│ │ ... │ │
|
||||
│ │ v5.2.1234-(...).schema.json ◄────┼──┼─────── Uploaded on a successful
|
||||
│ │ │ │ new release build
|
||||
│ │ │ │
|
||||
│ │ │ │
|
||||
│ └───────────────────────────────────┘ │
|
||||
│ │
|
||||
│ ┌───────────────────────────────────┐ │
|
||||
│ │ dist/ │ │
|
||||
│ │ schemas-v5.2.1093.tar.gz │ │
|
||||
│ │ schemas-v5.2.1234.tar.gz ◄───────┼──┼────── Uploaded at the beginning of
|
||||
│ │ ▲ │ │ of a new release build.
|
||||
│ │ │ │ │
|
||||
│ │ │ │ │ Release build automatically
|
||||
│ │ │ │ │ update the Bazel reference
|
||||
│ │ │ │ │ to that file.
|
||||
│ └─────────────┼─────────────────────┘ │
|
||||
│ │ │ Gets deleted if the release
|
||||
│ │ │ build fails.
|
||||
│ │ │
|
||||
└────────────────┼────────────────────────┘
|
||||
│
|
||||
│
|
||||
│
|
||||
referenced by Bazel and used to
|
||||
populate the schemas when building
|
||||
the cmd/migrator Docker container.
|
||||
```
|
||||
|
||||
There are two distinct scenarios:
|
||||
|
||||
1. Normal builds
|
||||
2. Release builds
|
||||
|
||||
When doing a normal build, we simply use the schemas tarball that has been previously
|
||||
set by the last release build. It contains all knowns schema descriptions that existed
|
||||
at that time.
|
||||
|
||||
Now when doing a release build, we need to refresh the schema descriptions, because patch releases
|
||||
might have been publicly released, meaning those schemas now exist in the wild, on customer deployments
|
||||
or cloud.
|
||||
|
||||
Let's use a concrete example:
|
||||
|
||||
1. t=0 5.1.0 has been released publicly
|
||||
- `main` branch is now considered to be 5.2.0
|
||||
- `5.1` branch is the target for PRs for backports and bug fixes.
|
||||
1. t=10 5.1.2222 has been released publicly
|
||||
- `5.1` branch is from where this release was cut.
|
||||
1. t=20 5.2.0 has been released publicly
|
||||
- `main` branch is now considered to be 5.3.0
|
||||
- `5.2` branch is the target for PRs for backports and bug fixes.
|
||||
1. t=30 5.1.3333 has been released publicly
|
||||
- `5.1` branch is from where this release was cut.
|
||||
|
||||
So with that scenario, when 5.1.3333 has been released, we introduced a new version that the _migrator_ must be aware of, on both `main` and the `5.1` branch. Previously, this required us to make a PR to port to main, the 5.1 branch references
|
||||
to the new 5.1.3333 schemas. See [this PR for a real example](https://github.com/sourcegraph/sourcegraph/pull/56405/files#diff-38f26d6e9cb950b24ced060cd86effd4363b313d880d1afad1850887eabaf238R79).
|
||||
|
||||
Failing to do this, would mean the _migrator_ we're going to ship on the next 5.2 release will not cover the migration path from 5.1.3333 when doing multi-version upgrades.
|
||||
|
||||
Ultimately, this means that when a release cut is at any point in time, you need to be aware of all previously released
|
||||
version, even if they were released on the previous minor release. Instead of having to remember to enact those changes,
|
||||
we can take a different approach.
|
||||
|
||||
The GCS bucket has two folders: `schemas/` and `dist/`. `schemas/` is the source of truth for all known schemas up until now, regardless of the current version. Whenever a new release is cut, the new schemas are added in that folder. Therefore, when doing the next release cut, we will use that folder to populate all the schemas that _migrator_ needs to be aware of, without having to make any manual change in the code.
|
||||
|
||||
Now, when building the _migrator_, we can't directly use the GCS bucket. Bazel wants a deterministic set of inputs and "all content from the bucket" is not deterministic.
|
||||
To satisfy Bazel, we need a fixed input, checksumed, to guarantee that the build is stable. So when we're creating a new release, we simply regenerate that
|
||||
tarball based on the schemas we find in the bucket, under `schemas/` and upload it under `dist/`.
|
||||
|
||||
Step by step process (work-in-progress):
|
||||
|
||||
1. We want to create a new release, which is materialized by a pull-request automatically created by `sg`
|
||||
1. `sg release create ...` runs `bazel run //tools/release:generate_schemas_archive -- v5.3.4444`
|
||||
1. it fetches all schemas whose version are below 5.3.4444
|
||||
1. it copies the current `schema.*.json` files to `v5.3.4444-internal_database.schema.*.json`, to match the convention of the other schemas.
|
||||
1. it creates a tarball named `schemas-v5.3.4444.tar.gz`
|
||||
1. it uploads it under the `dist/` folder in the bucket.
|
||||
1. it updates `tools/release/schema_deps.bzl` with the new tarball URL and its checksum.
|
||||
1. CI builds the new release.
|
||||
1. At the end of the build:
|
||||
|
||||
- If green
|
||||
- the schemas `v5.3.4444-internal_database.schema.*.json` are uploaded to the `schemas/` folder.
|
||||
- If red
|
||||
- the schemas `v5.3.4444-internal_database.schema.*.json` are _NOT_ uploaded to the `schemas/` folder.
|
||||
- that's because if the release build failed, it means that it never existed, so there is no need to capture its existence as nobody will migrate from that version number.
|
||||
- the `schemas-v5.3.4444.tar.gz` tarball is removed from the `dist/` folder in the bucket. This is perfectly fine that there is no revision apart from the current PR that references it.
|
||||
|
||||
1. PR driving the release build is merged back in the base branch
|
||||
|
||||
- the updated buildfiles will now use that uploaded `schemas-v5.3.4444.tar.gz` tarball from now on, eliminating the need to fetch anything from GCS apart the tarball (until it's cached by Bazel).
|
||||
|
||||
## Q&A
|
||||
|
||||
> What happens if two release builds are built at the same time?
|
||||
|
||||
If two builds are going on at the same time, they won't interfere with each other, because the only artifacts that can be removed without notice are the schemas tarballs, which are
|
||||
only referenced by each individual release build. As for the schemas, the only time they get created is when the internal release is finally green and ready to be merged. If one of the two builds end
|
||||
up referencing the schemas from the other, it means they didn't happen at the same time, but instead that they happened sequentially. That's because GCS is guaranteeing us that file uploads are
|
||||
transactional, i.e it's not possible to list a file until it's fully uploaded.
|
||||
|
||||
> What happens if a release build fails. Can it mess with ulterior release builds?
|
||||
|
||||
It cannot, because the only time the schemas are finally added to `schemas/` is when the release build succeeds. This is why when we're regenerating the tarball, we are fetching
|
||||
all the known schemas _and_ adding the new one from the source tree at that point. Had we uploaded the new schemas at the beginning of the build instead, to then fetch everything to
|
||||
build the tarball, including the new one, we would have had the problem.
|
||||
|
||||
> How do we ensure that the `schema.*.json` in the source, at the revision we're cutting the release are correct?
|
||||
|
||||
This is covered by Bazel. These files are generated through `bazel run //dev:write_all_generated` which comes with automatically generated `diff_test` rules, which are comparing
|
||||
the files on disk, with the files it would generate. Therefore, if someone pushes code without updating the current schemas in the code, Bazel will fail the build. And if on that
|
||||
precise commit we would try to cut a release, that same exact test would run again and fail.
|
||||
|
||||
Therefore, we can safely use the current schemas when cutting a release.
|
||||
|
||||
> What happens if the _migrator_ is built with newer schemas, like 5.1.3333 that contains schemas for 5.2.4444?
|
||||
|
||||
The script that populates the schemas, when regenerating the tarball, in that case, would exclude all schemas above 5.1.X, so it won't happen.
|
||||
|
||||
> How does this work until we revamp the release process to match RFC 795?
|
||||
|
||||
The initial state has been created manually on the bucket, and there won't be any issues until we create a new release, which is at the time of writing this doc
|
||||
a periodic event, manually driven by the release captain. We can keep building the patch releases for 5.2.X with the old method, we just have to upload the
|
||||
new schemas to the bucket to ensure that the next release from `main`, i.e 5.3.X will be correct.
|
||||
|
||||
> How is that better than the previous flow?
|
||||
|
||||
- Before
|
||||
- Cutting a new release
|
||||
- Required to port the new schemas to `main` manually on each release.
|
||||
- Required Bazel to perform 280 individual HTTP requests sequentially to GitHub and GCS to fetch the schemas.
|
||||
- When building normally
|
||||
- Schemas are fully cached if the Bazel cache is warm. Otherwise, we go back to the 280 requests.
|
||||
- After
|
||||
- Cutting a new release
|
||||
- Schemas are always up to date when cutting a new release. No need to port changes to other release branches or `main`.
|
||||
- Schemas are downloaded concurrently - only takes a few second to grab all of them.
|
||||
- When building normally
|
||||
- Schemas are cached if the Bazel cache is warm. Otherwise, we download a single tarball of a few mbs.
|
||||
|
||||
> How do I see which schemas where used to build the _migrator_ container.
|
||||
|
||||
`bazel build //cmd/migrator:tar_schema_descriptions && tar tf $(bazel cquery //cmd/migrator:tar_schema_descriptions --output=files)` will show the content the container layer used
|
||||
to inject the schemas in the final _migrator_ container image.
|
||||
See TODO for the reference documentation for the new release process.
|
||||
|
||||
@ -1,192 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
version="$1"
|
||||
major=""
|
||||
minor=""
|
||||
action="$2"
|
||||
repository_root="$3"
|
||||
|
||||
set -u
|
||||
|
||||
if [ "$#" -ne 3 ]; then
|
||||
echo "usage: [script] vX.Y.Z [inject-current-schemas|fetch-current-schemas] /absolute/path/to/repository/root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ "$action" != "inject-current-schemas" && "$action" != "fetch-current-schemas" ]]; then
|
||||
echo "usage: [script] vX.Y.Z [inject-current-schemas|fetch-current-schemas] /absolute/path/to/repository/root"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if ! [[ $version =~ ^v[0-9]\.[0-9]+\.[0-9]+ ]]; then
|
||||
echo "version format is incorrect, usage: [script] vX.Y.Z"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# To avoid breaking previous builds by accident, we want the tarballs we're creating to be idempotent, i.e
|
||||
# if we recreate it with the same inputs, we get same exact tarball at the end.
|
||||
#
|
||||
# usage idempotent_tarball "foo" to produce foo.tar.gz containing files from ./*
|
||||
#
|
||||
# This is a bit tricky, as we have to manually eliminate anything that could change the result.
|
||||
# - Explicitly sort files in the archive so the ordering stays stable.
|
||||
# - Set the locale to C, so the sorting always have the same output.
|
||||
# - Set ownership to root:root
|
||||
# - Set the modified time to beginning of Unix time
|
||||
# - Use GNU tar regardless if on Linux or MacOs. BSDTar doesn't come with the flags we need produce the
|
||||
# same binaries, even if the implementation supposedly similar.
|
||||
# - GZip the tar file ourselves, using -n to not store the filename and more importantly the timestamp in the
|
||||
# metadata.
|
||||
function idempotent_tarball {
|
||||
local base="$1"
|
||||
local tarbin="tar"
|
||||
if tar --version | grep -q bsdtar; then
|
||||
echo "⚠️ BSDTar detected, using gtar to produce idempotent tarball."
|
||||
tarbin="gtar"
|
||||
fi
|
||||
|
||||
# Produces ${base}.tar
|
||||
LC_ALL=c "$tarbin" cf "${base}.tar" --owner=root:0 --group=root:0 --numeric-owner --mtime='UTC 1970-01-01' ./*
|
||||
|
||||
# Produces ${base}.tar.gz
|
||||
gzip -n "${base}.tar"
|
||||
}
|
||||
|
||||
bucket='gs://schemas-migrations'
|
||||
|
||||
if [[ $version =~ ^v([0-9]+)\.([0-9]+).[0-9]+$ ]]; then
|
||||
major=${BASH_REMATCH[1]}
|
||||
minor=${BASH_REMATCH[2]}
|
||||
else
|
||||
echo "Usage: [...] vX.Y.Z where X is the major version and Y the minor version"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "Generating an archive of all released database schemas for v$major.$minor"
|
||||
tmp_dir=$(mktemp -d)
|
||||
# shellcheck disable=SC2064
|
||||
trap "rm -Rf $tmp_dir" EXIT
|
||||
|
||||
# Downloading everything at once is much much faster and simple than fetching individual files
|
||||
# even if done concurrently.
|
||||
echo "--- Downloading all schemas from ${bucket}/schemas"
|
||||
gsutil -m -q cp "${bucket}/schemas/*" "$tmp_dir"
|
||||
|
||||
pushd "$tmp_dir"
|
||||
echo "--- Filtering out migrations after ${major}.${minor}"
|
||||
for file in *; do
|
||||
if [[ $file =~ ^v([0-9])\.([0-9]+) ]]; then
|
||||
found_major=${BASH_REMATCH[1]}
|
||||
found_minor=${BASH_REMATCH[2]}
|
||||
|
||||
# If the major version we're targeting is strictly greater the one we're looking at
|
||||
# we don't bother looking at minor version and we keep it.
|
||||
if [ "$major" -gt "$found_major" ]; then
|
||||
continue
|
||||
else
|
||||
# If the major version is the same, we need to inspect the minor versions to know
|
||||
# if we need to keep it or not.
|
||||
if [[ "$major" -eq "$found_major" && "$minor" -ge "$found_minor" ]]; then
|
||||
continue
|
||||
fi
|
||||
fi
|
||||
|
||||
# What's left has to be excluded.
|
||||
echo "Rejecting $file"
|
||||
rm "$file"
|
||||
fi
|
||||
done
|
||||
popd
|
||||
|
||||
if [[ $action == "fetch-current-schemas" ]]; then
|
||||
echo "--- Skipping current schema"
|
||||
must_exist_schemas=(
|
||||
"${tmp_dir}/${version}-internal_database_schema.json"
|
||||
"${tmp_dir}/${version}-internal_database_schema.codeintel.json"
|
||||
"${tmp_dir}/${version}-internal_database_schema.codeinsights.json"
|
||||
)
|
||||
|
||||
for f in "${must_exist_schemas[@]}"; do
|
||||
if [ -f "$f" ]; then
|
||||
echo "✅ Found $f database schema for ${version}"
|
||||
else
|
||||
echo "❌ Missing $f database schema for ${version}"
|
||||
echo "⚠️ Either this command was accidentally run with fetch-current-schemas while intending to create a release"
|
||||
echo "⚠️ or the currently archived database schemas are missing the current version, which indicates"
|
||||
echo "⚠️ a botched release."
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
else
|
||||
echo "--- Injecting current schemas"
|
||||
must_not_exist_schemas=(
|
||||
"${tmp_dir}/${version}-internal_database_schema.json"
|
||||
"${tmp_dir}/${version}-internal_database_schema.codeintel.json"
|
||||
"${tmp_dir}/${version}-internal_database_schema.codeinsights.json"
|
||||
)
|
||||
|
||||
for f in "${must_not_exist_schemas[@]}"; do
|
||||
if [ -f "$f" ]; then
|
||||
echo "❌ Prior database schemas exists for ${version}"
|
||||
echo "⚠️ Either this command was accidentally run with fetch-current-schemas while intending to create"
|
||||
echo "⚠️ a release or a release was botched."
|
||||
exit 1
|
||||
else
|
||||
echo "✅ No prior database schemas exist for ${version}"
|
||||
fi
|
||||
done
|
||||
|
||||
cp internal/database/schema.json "${tmp_dir}/${version}-internal_database_schema.json"
|
||||
cp internal/database/schema.codeintel.json "${tmp_dir}/${version}-internal_database_schema.codeintel.json"
|
||||
cp internal/database/schema.codeinsights.json "${tmp_dir}/${version}-internal_database_schema.codeinsights.json"
|
||||
fi
|
||||
|
||||
output_base_path="${PWD}/schemas-${version}"
|
||||
output_path="${output_base_path}.tar.gz"
|
||||
output_basename="$(basename "$output_path")"
|
||||
trap 'rm $output_path' EXIT
|
||||
|
||||
echo "--- Creating tarball '$output_path'"
|
||||
pushd "$tmp_dir"
|
||||
idempotent_tarball "$output_base_path"
|
||||
popd
|
||||
|
||||
checksum=$(sha256sum "$output_path" | cut -d ' ' -f1)
|
||||
echo "Checksum: $checksum"
|
||||
echo "--- Uploading tarball to ${bucket}/dist"
|
||||
|
||||
# Tarballs are reproducible, but the only reason for which the user would want to overwrite the existing one
|
||||
# is to fix a problem. We don't want anyone to run this by accident, so we explicitly ask for confirmation.
|
||||
if gsutil -q ls "${bucket}/dist/${output_basename}"; then
|
||||
echo "--- ⚠️ A database schemas tarball already exists for this version"
|
||||
echo "Type OVERWRITE followed by ENTER to confirm you want to overwrite it. Anything else will abort."
|
||||
read -p "Are you sure? " -r
|
||||
echo
|
||||
if [[ "$REPLY" != "OVERWRITE" ]]
|
||||
then
|
||||
echo "Aborting, tarball left intact on the bucket."
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
gsutil -q cp "$output_path" "${bucket}/dist/"
|
||||
|
||||
echo "--- Updating buildfiles"
|
||||
# Starlak is practically the same as Python, so we use that matcher.
|
||||
comby -matcher .py \
|
||||
-in-place \
|
||||
'urls = [":[1]"],' \
|
||||
"urls = [\"https://storage.googleapis.com/schemas-migrations/dist/$output_basename\"]," \
|
||||
"${repository_root}/tools/release/schema_deps.bzl"
|
||||
|
||||
comby -matcher .py \
|
||||
-in-place \
|
||||
'sha256 = ":[1]",' \
|
||||
"sha256 = \"$checksum\"," \
|
||||
"${repository_root}/tools/release/schema_deps.bzl"
|
||||
|
||||
echo "--- Summary"
|
||||
tar tvf "$output_path"
|
||||
echo "Uploaded ${bucket}/dist/${output_basename} sha256:${checksum}"
|
||||
25
tools/release/promote_images.sh
Executable file
25
tools/release/promote_images.sh
Executable file
@ -0,0 +1,25 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
if [ "$VERSION" = "" ]; then
|
||||
echo "❌ Need \$VERSION to be set to promote images"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ "$#" -lt 1 ]; then
|
||||
echo "❌ Usage: $0 gitserver blobstore <image-name-without-registry...> ..."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo -e "## Release: image promotions" > ./annotations/image_promotions.md
|
||||
echo -e "\n| Name | From | To |\n|---|---|---|" >> ./annotations/image_promotions.md
|
||||
for name in "${@:1}"; do
|
||||
echo "--- Copying ${name} from private registry to public registry"
|
||||
|
||||
docker pull "${INTERNAL_REGISTRY}/${name}:${VERSION}"
|
||||
docker tag "${INTERNAL_REGISTRY}/${name}:${VERSION}" "${PUBLIC_REGISTRY}/${name}:${VERSION}"
|
||||
docker push "${PUBLIC_REGISTRY}/${name}:${VERSION}"
|
||||
|
||||
echo -e "| ${name} | \`${INTERNAL_REGISTRY}/${name}:${VERSION}\` | \`${PUBLIC_REGISTRY}/${name}:${VERSION}\` |" >>./annotations/image_promotions.md
|
||||
done
|
||||
@ -1,13 +0,0 @@
|
||||
"""
|
||||
This module defines the third party dependency containing all database schemas that the
|
||||
migrator use to handle migrations. See the README.md in this folder for reference.
|
||||
"""
|
||||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_file")
|
||||
|
||||
def schema_deps():
|
||||
http_file(
|
||||
name = "schemas_archive",
|
||||
urls = ["https://storage.googleapis.com/schemas-migrations/dist/schemas-v5.2.3.tar.gz"],
|
||||
sha256 = "c5aec72d528c0b3803070ccba58049c42f9b2618c9dba367dffe106d30f8f6fe",
|
||||
)
|
||||
@ -1,38 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -eu
|
||||
|
||||
version="$1"
|
||||
|
||||
if [ "$#" -ne 1 ]; then
|
||||
echo "usage: [script] vX.Y.Z"
|
||||
fi
|
||||
|
||||
if ! [[ $version =~ ^v[0-9]\.[0-9]+\.[0-9]+ ]]; then
|
||||
echo "version format is incorrect, usage: [script] vX.Y.Z"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
bucket='gs://schemas-migrations'
|
||||
|
||||
tmp_dir=$(mktemp -d)
|
||||
trap 'rm -Rf $tmp_dir' EXIT
|
||||
|
||||
echo "--- Ensuring that databases schemas do not exist for this version"
|
||||
if gsutil -q ls "${bucket}/schemas/${version}-internal_database_schema*.json"; then
|
||||
echo "⚠️ Found the above schemas in the bucket."
|
||||
echo "--- ❌ Database schemas for version ${version} already exists: aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "--- Copying internal/database/schemas*.json to ${version}-internal_database_schema*.json"
|
||||
cp internal/database/schema.json "${tmp_dir}/${version}-internal_database_schema.json"
|
||||
cp internal/database/schema.codeintel.json "${tmp_dir}/${version}-internal_database_schema.codeintel.json"
|
||||
cp internal/database/schema.codeinsights.json "${tmp_dir}/${version}-internal_database_schema.codeinsights.json"
|
||||
|
||||
echo "--- Uploading to GCS Bucket '${bucket}/schemas'"
|
||||
pushd "$tmp_dir"
|
||||
gsutil cp ./*.json "${bucket}/schemas/"
|
||||
popd
|
||||
|
||||
echo "--- ✅ Schemas for ${version} are now available for other releases"
|
||||
Loading…
Reference in New Issue
Block a user