Update s3proxy to latest custom release (v2) (#53921)

Update s3proxy to latest release of our fork, which I've updated. Fixes
~44 vulns!

[Previous PR](https://github.com/sourcegraph/sourcegraph/pull/53872)
caused issues which we need to investigate.

The issue was that the updated version of s3proxy has several mandatory
envars that need to be set, and doesn't handle the case where they
aren't set.

This PR also wraps in removing some of the wolfi final image building,
as it's no longer necessary and the recent PR to remove old
Dockerfiles/build scripts breaks this.

## Test plan

<!-- All pull requests REQUIRE a test plan:
https://docs.sourcegraph.com/dev/background-information/testing_principles
-->

- s3proxy tests pass
- manual checking of final image
- green wolfi ci run:
https://buildkite.com/sourcegraph/sourcegraph/builds/229728#0188e2dd-cbb4-4180-91ef-8f9170cce1b1
- green main-dry-run for bazel-tests:
https://buildkite.com/sourcegraph/sourcegraph/builds/229729
This commit is contained in:
Will Dollman 2023-06-22 13:03:51 +01:00 committed by GitHub
parent 1878b8ce63
commit 122cb76558
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
9 changed files with 18 additions and 171 deletions

View File

@ -50,6 +50,10 @@ func maybeBlobstore(logger sglog.Logger) []string {
SetDefaultEnv("JCLOUDS_KEYSTONE_SCOPE", "")
SetDefaultEnv("JCLOUDS_KEYSTONE_PROJECT_DOMAIN_NAME", "")
// SetDefaultEnv("JCLOUDS_FILESYSTEM_BASEDIR", dataDir) // overridden above; here for equality with Dockerfile
// We don't use the secure endpoint, but these values must be set
SetDefaultEnv("S3PROXY_SECURE_ENDPOINT", "https://0.0.0.0:9443")
SetDefaultEnv("S3PROXY_KEYSTORE_PATH", "/opt/s3proxy/test-classes/keystore.jks")
SetDefaultEnv("S3PROXY_KEYSTORE_PASSWORD", "password")
// Configure blobstore service
blobstoreDataDir := os.Getenv("JCLOUDS_FILESYSTEM_BASEDIR")

View File

@ -36,7 +36,7 @@ def oci_deps():
oci_pull(
name = "wolfi_server_base",
digest = "sha256:b4f5d5927d4e53937fdaf8fc9b54414ee87c2e2f288e566cc25bb98771e88008",
digest = "sha256:d1c7262de00d6d87a001e208c23c9551c67ec722f1eeac2a016b77ca6d539c2d",
image = "us.gcr.io/sourcegraph-dev/wolfi-server-base",
)
@ -168,6 +168,6 @@ def oci_deps():
oci_pull(
name = "wolfi_s3proxy_base",
digest = "sha256:4299634c0e403059a5a2aeda323181feb8189648c23fd69d0b5d057e0e7966eb",
digest = "sha256:92614e804e5a5cb5316f8d9235286b659d8957c557170ddefda2973053ca5e4d",
image = "us.gcr.io/sourcegraph-dev/wolfi-blobstore-base",
)

View File

@ -132,7 +132,6 @@ sg ci build wolfi
Base pipeline (more steps might be included based on branch changes):
- **Metadata**: Pipeline metadata
- **Wolfi image builds**: Build Wolfi-based batcheshelper, Build Wolfi-based blobstore, Build Wolfi-based bundled-executor, Build Wolfi-based cadvisor, Build Wolfi-based embeddings, Build Wolfi-based executor, Build Wolfi-based executor-kubernetes, Build Wolfi-based frontend, Build Wolfi-based github-proxy, Build Wolfi-based gitserver, Build Wolfi-based indexed-searcher, Build Wolfi-based jaeger-agent, Build Wolfi-based jaeger-all-in-one, Build Wolfi-based cody-gateway, Build Wolfi-based loadtest, Build Wolfi-based migrator, Build Wolfi-based node-exporter, Build Wolfi-based opentelemetry-collector, Build Wolfi-based postgres_exporter, Build Wolfi-based precise-code-intel-worker, Build Wolfi-based prometheus, Build Wolfi-based prometheus-gcp, Build Wolfi-based redis-cache, Build Wolfi-based redis-store, Build Wolfi-based redis_exporter, Build Wolfi-based repo-updater, Build Wolfi-based search-indexer, Build Wolfi-based searcher, Build Wolfi-based server, Build Wolfi-based sg, Build Wolfi-based symbols, Build Wolfi-based syntax-highlighter, Build Wolfi-based worker
### Release branch nightly healthcheck build

View File

@ -1,6 +1,5 @@
load("@container_structure_test//:defs.bzl", "container_structure_test")
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
load("@rules_pkg//:pkg.bzl", "pkg_tar")
load("//dev:oci_defs.bzl", "image_repository")
oci_image(
@ -37,6 +36,10 @@ oci_image(
"JCLOUDS_KEYSTONE_SCOPE": "",
"JCLOUDS_KEYSTONE_PROJECT_DOMAIN_NAME": "",
"JCLOUDS_FILESYSTEM_BASEDIR": "/data",
# We don't use the secure endpoint, but these values must be set
"S3PROXY_SECURE_ENDPOINT": "https://0.0.0.0:9443",
"S3PROXY_KEYSTORE_PATH": "test-classes/keystore.jks",
"S3PROXY_KEYSTORE_PASSWORD": "password",
},
user = "sourcegraph",
)

View File

@ -107,7 +107,6 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
updateBaseImages := c.Diff.Has(changed.WolfiBaseImages) || updatePackages
var numUpdatedPackages int
var numUpdatedBaseImages int
if updatePackages {
var packageOps *operations.Set
@ -116,59 +115,13 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
}
if updateBaseImages {
var baseImageOps *operations.Set
baseImageOps, numUpdatedBaseImages = WolfiBaseImagesOperations(
baseImageOps, _ = WolfiBaseImagesOperations(
c.ChangedFiles[changed.WolfiBaseImages], // TODO: If packages have changed need to update all base images. Requires a list of all base images
c.Version,
(numUpdatedPackages > 0),
)
ops.Merge(baseImageOps)
}
// Always rebuild Wolfi images
// Rebuild all images seems reasonable. We need a list somewhere! Maybe we can just use the standard image list though? But not all are wolfi-ified
ops.Merge(
// TODO: Just hardcode specific images initially
WolfiImagesOperations([]string{
"batcheshelper",
"blobstore",
"bundled-executor",
"cadvisor",
// "codeinsights-db",
// "codeintel-db",
"embeddings",
"executor",
"executor-kubernetes",
"frontend",
"github-proxy",
"gitserver",
"indexed-searcher",
"jaeger-agent",
"jaeger-all-in-one",
"cody-gateway",
"loadtest",
"migrator",
"node-exporter",
"opentelemetry-collector",
// "postgres-12-alpine",
"postgres_exporter",
"precise-code-intel-worker",
"prometheus",
"prometheus-gcp",
"redis-cache",
"redis-store",
"redis_exporter",
"repo-updater",
"search-indexer",
"searcher",
"server",
"sg",
"symbols",
"syntax-highlighter",
"worker",
}, c.Version,
c.candidateImageTag(),
(numUpdatedBaseImages > 0),
),
)
case runtype.PullRequest:
// First, we set up core test operations that apply both to PRs and to other run

View File

@ -2,13 +2,9 @@ package ci
import (
"fmt"
"os"
"path/filepath"
"strings"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/enterprise/dev/ci/images"
bk "github.com/sourcegraph/sourcegraph/enterprise/dev/ci/internal/buildkite"
"github.com/sourcegraph/sourcegraph/enterprise/dev/ci/internal/ci/operations"
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
@ -60,27 +56,6 @@ func WolfiBaseImagesOperations(changedFiles []string, tag string, packagesChange
return ops, len(buildStepKeys)
}
// WolfiImagesOperations builds the specified docker images, or all images if none are provided
func WolfiImagesOperations(buildImages []string, version string, tag string, baseImagesChanged bool) *operations.Set {
// If buildImages is not specified, rebuild all images
// TODO: Maintain a list of Wolfi-based images?
if len(buildImages) == 0 {
buildImages = images.SourcegraphDockerImages
}
wolfiImageBuildOps := operations.NewNamedSet("Wolfi image builds")
for _, dockerImage := range buildImages {
// Don't upload sourcemaps
// wolfiImageBuildOps.Append(buildCandidateDockerImage(dockerImage, version, tag, false))
wolfiImageBuildOps.Append(
buildCandidateWolfiDockerImage(dockerImage, version, tag, false, baseImagesChanged),
)
}
return wolfiImageBuildOps
}
// Dependency tree between steps:
// (buildPackage[1], buildPackage[2], ...) <-- buildRepoIndex <-- (buildWolfi[1], buildWolfi[2], ...)
@ -148,93 +123,6 @@ func allBaseImagesBuilt(baseImageKeys []string) func(*bk.Pipeline) {
}
}
// Build a candidate Wolfi docker image
func buildCandidateWolfiDockerImage(app, version, tag string, uploadSourcemaps bool, hasDependency bool) operations.Operation {
return func(pipeline *bk.Pipeline) {
image := strings.ReplaceAll(app, "/", "-")
localImage := "sourcegraph/wolfi-" + image + ":" + version
cmds := []bk.StepOpt{
bk.Key(candidateImageStepKey(app)),
bk.Cmd(fmt.Sprintf(`echo "Building Wolfi %s image..."`, app)),
bk.Env("DOCKER_BUILDKIT", "1"),
bk.Env("DOCKER_BAZEL", "true"),
bk.Env("IMAGE", localImage),
bk.Env("VERSION", version),
bk.Agent("queue", "bazel"),
}
if hasDependency {
cmds = append(cmds, bk.DependsOn("buildAllBaseImages"))
}
// Add Sentry environment variables if we are building off main branch
// to enable building the webapp with source maps enabled
if uploadSourcemaps {
cmds = append(cmds,
bk.Env("SENTRY_UPLOAD_SOURCE_MAPS", "1"),
bk.Env("SENTRY_ORGANIZATION", "sourcegraph"),
bk.Env("SENTRY_PROJECT", "sourcegraph-dot-com"),
)
}
// Allow all build scripts to emit info annotations
buildAnnotationOptions := bk.AnnotatedCmdOpts{
Annotations: &bk.AnnotationOpts{
Type: bk.AnnotationTypeInfo,
IncludeNames: true,
},
}
if _, err := os.Stat(filepath.Join("docker-images", app)); err == nil {
// Building Docker image located under $REPO_ROOT/docker-images/
cmds = append(cmds,
bk.Cmd("ls -lah "+filepath.Join("docker-images", app, "build-wolfi.sh")),
bk.Cmd(filepath.Join("docker-images", app, "build-wolfi.sh")))
} else {
// Building Docker images located under $REPO_ROOT/cmd/
cmdDir := func() string {
folder := app
if app == "blobstore2" {
// experiment: cmd/blobstore is a Go rewrite of docker-images/blobstore. While
// it is incomplete, we do not want cmd/blobstore/Dockerfile to get publishe
// under the same name.
// https://github.com/sourcegraph/sourcegraph/issues/45594
// TODO(blobstore): remove this when making Go blobstore the default
folder = "blobstore"
}
// If /enterprise/cmd/... does not exist, build just /cmd/... instead.
if _, err := os.Stat(filepath.Join("enterprise/cmd", folder)); err != nil {
return "cmd/" + folder
}
return "enterprise/cmd/" + folder
}()
preBuildScript := cmdDir + "/pre-build.sh"
if _, err := os.Stat(preBuildScript); err == nil {
// Allow all
cmds = append(cmds, bk.AnnotatedCmd(preBuildScript, buildAnnotationOptions))
}
cmds = append(cmds, bk.AnnotatedCmd(cmdDir+"/build-wolfi.sh", buildAnnotationOptions))
}
// Add "wolfi" to image name so we don't overwrite Alpine dev images
wolfiApp := fmt.Sprintf("wolfi-%s", app)
devImage := images.DevRegistryImage(wolfiApp, tag)
cmds = append(cmds,
// Retag the local image for dev registry
bk.Cmd(fmt.Sprintf("docker tag %s %s", localImage, devImage)),
// Publish tagged image
bk.Cmd(fmt.Sprintf("docker push %s || exit 10", devImage)),
// Retry in case of flakes when pushing
bk.AutomaticRetryStatus(3, 10),
// Retry in case of flakes when pushing
bk.AutomaticRetryStatus(3, 222),
)
pipeline.AddStep(fmt.Sprintf(":octopus: :docker: :construction: Build Wolfi-based %s", app), cmds...)
}
}
var reStepKeySanitizer = lazyregexp.New(`[^a-zA-Z0-9_-]+`)
// sanitizeStepKey sanitizes BuildKite StepKeys by removing any invalid characters

View File

@ -18,4 +18,4 @@ paths:
work-dir: /opt/s3proxy
# MANUAL REBUILD: Wed Jun 14 15:27:52 BST 2023
# MANUAL REBUILD: Wed Jun 21 16:29:51 BST 2023

View File

@ -78,4 +78,4 @@ paths:
type: directory
permissions: 0o755
# MANUAL REBUILD: Wed Jun 14 15:27:52 BST 2023
# MANUAL REBUILD: Wed Jun 21 16:30:05 BST 2023

View File

@ -1,7 +1,7 @@
package:
name: s3proxy
version: 2.0.0
epoch: 1
epoch: 2
description: "Access other storage backends via the S3 API"
target-architecture:
- x86_64
@ -12,7 +12,7 @@ package:
license: 'Apache License 2.0'
dependencies:
runtime:
- openjdk-11
- openjdk-11=11.0.20.4-r0 # TODO(will): Temporarily pinned to avoid bad signature
- openjdk-11-default-jvm # Set Java 11 as default JVM
environment:
@ -26,14 +26,14 @@ environment:
- busybox
- ca-certificates-bundle
- maven
- openjdk-11
- openjdk-11=11.0.20.4-r0 # TODO(will): Temporarily pinned to avoid bad signature
- openjdk-11-default-jvm
pipeline:
- uses: fetch
with:
uri: https://github.com/sourcegraph/s3proxy/archive/refs/tags/s3proxy-${{package.version}}.tar.gz
expected-sha256: e2d3f8f217d67ab8cc074490f27b4d649f4ec73f5bf540aa9da1ad4dda818d0b
uri: https://github.com/sourcegraph/s3proxy/archive/refs/tags/s3proxy-${{package.version}}-${{package.epoch}}.tar.gz
expected-sha256: efeda0b7e2d87dbfb053510d706b109f8dcbf83fb7833e9d43b2231a2beaf247
extract: true
- runs: |
JAVA_HOME=/usr/lib/jvm/java-11-openjdk/ mvn package -DskipTests