mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 15:51:43 +00:00
This PR ships our freshly rewritten container images built with rules_oci and Wolfi, which for now will only be used on S2. *What is this about* This work is the conjunction of [hardening container images](https://github.com/orgs/sourcegraph/projects/302?pane=issue&itemId=25019223) and fully building our container images with Bazel. * All base images are now distroless, based on Wolfi, meaning we fully control every little package version and we won't be subject anymore to Alpine maintainers dropping a postgres version for example. * Container images are now built with `rules_oci`, meaning we don't have Dockerfile anymore, but instead created through [Bazel rules](https://sourcegraph.sourcegraph.com/github.com/sourcegraph/sourcegraph@bzl/oci_wolfi/-/blob/enterprise/cmd/gitserver/BUILD.bazel). Don't be scared, while this will look a bit strange to you at first, it's much saner and simpler to do than our Dockerfiles and their muddy shell scripts calling themselves in cascade. :spiral_note_pad: *Plan*: *1/ (NOW) We merge our branch on `main` today, here is what it does change for you 👇:skin-tone-3::* * On `main`: * It will introduce a new job on `main` _Bazel Push_, which will push those new images on our registries with all tags prefixed by `bazel-`. * These new images will be picked up by S2 and S2 only. * The existing jobs building docker images and pushing them will stay in place until we have QA'ed them enough and are confident to roll them out on Dotcom. * Because we'll be building both images, there will be more jobs running on `main`, but this should not affect the wall clock time. * On all branches (so your PRs and `main`) * The _Bazel Test_ job will now run: Backend Integration Tests, E2E Tests and CodeIntel QA * This will increase the duration of your test jobs in PRs, but as we haven't removed yet the `sg lint` step, it should not affect too much the wall clock time of your PRs. * But it will also increase your confidence toward your changes, as the coverage will vastly increased compared to before. * If you have ongoing branches which are affecting the docker images (like adding a new binary, like the recent `scip-tags`, reach us out on #job-fair-bazel so we can help you to port your changes. It's much much simpler than before, but it's going to be unfamiliar to you). * If something goes awfully wrong, we'll rollback and update this thread. *2/ (EOW / Early next week) Once we're confident enough with what we saw on S2, we'll roll the new images on Dotcom.* * After the first successful deploy and a few sanity checks, we will drop the old images building jobs. * At this point, we'll reach out to all TLs asking for their help to exercise all features of our product to ensure we catch any potential breakage. ## Test plan <!-- All pull requests REQUIRE a test plan: https://docs.sourcegraph.com/dev/background-information/testing_principles --> * We tested our new images on `scale-testing` and it worked. * The new container building rules comes with _container tests_ which ensures that produced images are containing and configured with what should be in there: [example](https://sourcegraph.sourcegraph.com/github.com/sourcegraph/sourcegraph@bzl/oci_wolfi/-/blob/enterprise/cmd/gitserver/image_test.yaml) . --------- Co-authored-by: Dave Try <davetry@gmail.com> Co-authored-by: Will Dollman <will.dollman@sourcegraph.com>
133 lines
2.9 KiB
Go
133 lines
2.9 KiB
Go
package main
|
|
|
|
import (
|
|
"compress/gzip"
|
|
"context"
|
|
"fmt"
|
|
"io"
|
|
"os"
|
|
"path/filepath"
|
|
"strings"
|
|
|
|
"cloud.google.com/go/storage"
|
|
"github.com/sourcegraph/conc/pool"
|
|
"google.golang.org/api/iterator"
|
|
|
|
"github.com/sourcegraph/sourcegraph/dev/codeintel-qa/internal"
|
|
"github.com/sourcegraph/sourcegraph/dev/sg/root"
|
|
"github.com/sourcegraph/sourcegraph/lib/errors"
|
|
)
|
|
|
|
func main() {
|
|
if err := mainErr(context.Background()); err != nil {
|
|
fmt.Printf("%s error: %s\n", internal.EmojiFailure, err.Error())
|
|
os.Exit(1)
|
|
}
|
|
}
|
|
|
|
const (
|
|
bucketName = "codeintel-qa-indexes"
|
|
relativeIndexesDir = "dev/codeintel-qa/testdata/indexes"
|
|
)
|
|
|
|
func mainErr(ctx context.Context) error {
|
|
client, err := storage.NewClient(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
bucket := client.Bucket(bucketName)
|
|
|
|
paths, err := getPaths(ctx, bucket)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
if err := downloadAll(ctx, bucket, paths); err != nil {
|
|
return err
|
|
}
|
|
|
|
return nil
|
|
}
|
|
|
|
func getPaths(ctx context.Context, bucket *storage.BucketHandle) (paths []string, _ error) {
|
|
objects := bucket.Objects(ctx, &storage.Query{})
|
|
for {
|
|
attrs, err := objects.Next()
|
|
if err != nil {
|
|
if err == iterator.Done {
|
|
break
|
|
}
|
|
|
|
return nil, err
|
|
}
|
|
|
|
paths = append(paths, attrs.Name)
|
|
}
|
|
|
|
return paths, nil
|
|
}
|
|
|
|
func downloadAll(ctx context.Context, bucket *storage.BucketHandle, paths []string) error {
|
|
repoRoot, err := root.RepositoryRoot()
|
|
if err != nil {
|
|
if err == root.ErrNotInsideSourcegraph && os.Getenv("BAZEL_TEST") != "" {
|
|
// If we're running inside Bazel, we do not have access to the repo root.
|
|
// In that case, we simply use CWD instead.
|
|
repoRoot = "."
|
|
} else {
|
|
return err
|
|
}
|
|
}
|
|
indexesDir := filepath.Join(repoRoot, relativeIndexesDir)
|
|
|
|
if err := os.MkdirAll(indexesDir, os.ModePerm); err != nil {
|
|
return err
|
|
}
|
|
|
|
p := pool.New().WithErrors()
|
|
|
|
for _, path := range paths {
|
|
path := path
|
|
p.Go(func() error { return downloadIndex(ctx, bucket, indexesDir, path) })
|
|
}
|
|
|
|
return p.Wait()
|
|
}
|
|
|
|
func downloadIndex(ctx context.Context, bucket *storage.BucketHandle, indexesDir, name string) (err error) {
|
|
targetFile := filepath.Join(indexesDir, strings.TrimSuffix(name, ".gz"))
|
|
|
|
if ok, err := internal.FileExists(targetFile); err != nil {
|
|
return err
|
|
} else if ok {
|
|
fmt.Printf("Index %q already downloaded\n", name)
|
|
return nil
|
|
}
|
|
fmt.Printf("Downloading %q\n", name)
|
|
|
|
f, err := os.OpenFile(targetFile, os.O_WRONLY|os.O_CREATE, os.ModePerm)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() { err = errors.Append(err, f.Close()) }()
|
|
|
|
r, err := bucket.Object(name).NewReader(ctx)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() { err = errors.Append(err, r.Close()) }()
|
|
|
|
gzipReader, err := gzip.NewReader(r)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer func() { err = errors.Append(err, gzipReader.Close()) }()
|
|
|
|
if _, err := io.Copy(f, gzipReader); err != nil {
|
|
return err
|
|
}
|
|
|
|
fmt.Printf("Finished downloading %q\n", name)
|
|
return nil
|
|
}
|