mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 13:31:54 +00:00
Switch to OCI/Wolfi based image (#52693)
This PR ships our freshly rewritten container images built with rules_oci and Wolfi, which for now will only be used on S2. *What is this about* This work is the conjunction of [hardening container images](https://github.com/orgs/sourcegraph/projects/302?pane=issue&itemId=25019223) and fully building our container images with Bazel. * All base images are now distroless, based on Wolfi, meaning we fully control every little package version and we won't be subject anymore to Alpine maintainers dropping a postgres version for example. * Container images are now built with `rules_oci`, meaning we don't have Dockerfile anymore, but instead created through [Bazel rules](https://sourcegraph.sourcegraph.com/github.com/sourcegraph/sourcegraph@bzl/oci_wolfi/-/blob/enterprise/cmd/gitserver/BUILD.bazel). Don't be scared, while this will look a bit strange to you at first, it's much saner and simpler to do than our Dockerfiles and their muddy shell scripts calling themselves in cascade. :spiral_note_pad: *Plan*: *1/ (NOW) We merge our branch on `main` today, here is what it does change for you 👇:skin-tone-3::* * On `main`: * It will introduce a new job on `main` _Bazel Push_, which will push those new images on our registries with all tags prefixed by `bazel-`. * These new images will be picked up by S2 and S2 only. * The existing jobs building docker images and pushing them will stay in place until we have QA'ed them enough and are confident to roll them out on Dotcom. * Because we'll be building both images, there will be more jobs running on `main`, but this should not affect the wall clock time. * On all branches (so your PRs and `main`) * The _Bazel Test_ job will now run: Backend Integration Tests, E2E Tests and CodeIntel QA * This will increase the duration of your test jobs in PRs, but as we haven't removed yet the `sg lint` step, it should not affect too much the wall clock time of your PRs. * But it will also increase your confidence toward your changes, as the coverage will vastly increased compared to before. * If you have ongoing branches which are affecting the docker images (like adding a new binary, like the recent `scip-tags`, reach us out on #job-fair-bazel so we can help you to port your changes. It's much much simpler than before, but it's going to be unfamiliar to you). * If something goes awfully wrong, we'll rollback and update this thread. *2/ (EOW / Early next week) Once we're confident enough with what we saw on S2, we'll roll the new images on Dotcom.* * After the first successful deploy and a few sanity checks, we will drop the old images building jobs. * At this point, we'll reach out to all TLs asking for their help to exercise all features of our product to ensure we catch any potential breakage. ## Test plan <!-- All pull requests REQUIRE a test plan: https://docs.sourcegraph.com/dev/background-information/testing_principles --> * We tested our new images on `scale-testing` and it worked. * The new container building rules comes with _container tests_ which ensures that produced images are containing and configured with what should be in there: [example](https://sourcegraph.sourcegraph.com/github.com/sourcegraph/sourcegraph@bzl/oci_wolfi/-/blob/enterprise/cmd/gitserver/image_test.yaml) . --------- Co-authored-by: Dave Try <davetry@gmail.com> Co-authored-by: Will Dollman <will.dollman@sourcegraph.com>
This commit is contained in:
parent
4a7033d667
commit
58da6780d7
@ -39,3 +39,6 @@ build --workspace_status_command=./dev/bazel_buildkite_stamp_vars.sh
|
||||
|
||||
# temp
|
||||
build --test_env=INCLUDE_ADMIN_ONBOARDING=false
|
||||
|
||||
# Used for container_structure_tests
|
||||
build --test_env=DOCKER_HOST
|
||||
|
||||
5
.bazelrc
5
.bazelrc
@ -30,3 +30,8 @@ try-import %workspace%/user.bazelrc
|
||||
|
||||
# Some special sauce for the special NixOS users in your life :) set by dev-shell shell-hook
|
||||
try-import %workspace%/.bazelrc-nix
|
||||
|
||||
# Used to locally cross compile, when targeting docker images
|
||||
build:darwin-docker --incompatible_enable_cc_toolchain_resolution
|
||||
build:darwin-docker --platforms @zig_sdk//platform:linux_amd64
|
||||
build:darwin-docker --extra_toolchains @zig_sdk//toolchain:linux_amd64_gnu.2.31
|
||||
|
||||
6
BUILD.bazel
generated
6
BUILD.bazel
generated
@ -9,6 +9,7 @@ load("@io_bazel_rules_go//proto/wkt:well_known_types.bzl", "WELL_KNOWN_TYPES_API
|
||||
load("@npm//:defs.bzl", "npm_link_all_packages")
|
||||
load("//dev/linters/staticcheck:analyzers.bzl", "STATIC_CHECK_ANALYZERS")
|
||||
load("@npm//:eslint/package_json.bzl", eslint_bin = "bin")
|
||||
load("//:stamp_tags.bzl", "stamp_tags")
|
||||
load("//dev:eslint.bzl", "eslint_test_with_types")
|
||||
|
||||
# Gazelle config
|
||||
@ -323,3 +324,8 @@ exports_files([
|
||||
# under certain conditions. See //ui/assets/...
|
||||
"CHANGELOG.md",
|
||||
])
|
||||
|
||||
stamp_tags(
|
||||
name = "tags",
|
||||
remote_tags = ["""($stamp.STABLE_VERSION // "0.0.0")"""],
|
||||
)
|
||||
|
||||
87
WORKSPACE
87
WORKSPACE
@ -74,6 +74,56 @@ http_archive(
|
||||
urls = ["https://github.com/bazelbuild/rules_rust/releases/download/0.19.0/rules_rust-v0.19.0.tar.gz"],
|
||||
)
|
||||
|
||||
# Container rules
|
||||
http_archive(
|
||||
name = "rules_oci",
|
||||
sha256 = "db57efd706f01eb3ce771468366baa1614b5b25f4cce99757e2b8d942155b8ec",
|
||||
strip_prefix = "rules_oci-1.0.0",
|
||||
url = "https://github.com/bazel-contrib/rules_oci/releases/download/v1.0.0/rules_oci-v1.0.0.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_pkg",
|
||||
sha256 = "8c20f74bca25d2d442b327ae26768c02cf3c99e93fad0381f32be9aab1967675",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||
"https://github.com/bazelbuild/rules_pkg/releases/download/0.8.1/rules_pkg-0.8.1.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
SRC_CLI_VERSION = "5.0.3"
|
||||
http_archive(
|
||||
name = "src-cli-linux-amd64",
|
||||
sha256 = "d125d732ad4c47ae6977c49574b01cc1b3c943b2a2108142267438e829538aa3",
|
||||
url = "https://github.com/sourcegraph/src-cli/releases/download/{0}/src-cli_{0}_linux_amd64.tar.gz".format(SRC_CLI_VERSION),
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "src-cli-linux-amd64",
|
||||
srcs = ["src"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
"""
|
||||
)
|
||||
|
||||
|
||||
http_archive(
|
||||
name = "container_structure_test",
|
||||
sha256 = "42edb647b51710cb917b5850380cc18a6c925ad195986f16e3b716887267a2d7",
|
||||
strip_prefix = "container-structure-test-104a53ede5f78fff72172639781ac52df9f5b18f",
|
||||
urls = ["https://github.com/GoogleContainerTools/container-structure-test/archive/104a53ede5f78fff72172639781ac52df9f5b18f.zip"],
|
||||
)
|
||||
|
||||
# hermetic_cc_toolchain setup ================================
|
||||
HERMETIC_CC_TOOLCHAIN_VERSION = "v2.0.0-rc2"
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "40dff82816735e631e8bd51ede3af1c4ed1ad4646928ffb6a0e53e228e55738c",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(HERMETIC_CC_TOOLCHAIN_VERSION),
|
||||
],
|
||||
)
|
||||
|
||||
# rules_js setup ================================
|
||||
load("@aspect_rules_js//js:repositories.bzl", "rules_js_dependencies")
|
||||
|
||||
@ -292,17 +342,6 @@ load("@crate_index//:defs.bzl", "crate_repositories")
|
||||
|
||||
crate_repositories()
|
||||
|
||||
BAZEL_ZIG_CC_VERSION = "v2.0.0-rc2"
|
||||
|
||||
http_archive(
|
||||
name = "hermetic_cc_toolchain",
|
||||
sha256 = "40dff82816735e631e8bd51ede3af1c4ed1ad4646928ffb6a0e53e228e55738c",
|
||||
urls = [
|
||||
"https://mirror.bazel.build/github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(BAZEL_ZIG_CC_VERSION),
|
||||
"https://github.com/uber/hermetic_cc_toolchain/releases/download/{0}/hermetic_cc_toolchain-{0}.tar.gz".format(BAZEL_ZIG_CC_VERSION),
|
||||
],
|
||||
)
|
||||
|
||||
load("@hermetic_cc_toolchain//toolchain:defs.bzl", zig_toolchains = "toolchains")
|
||||
|
||||
zig_toolchains()
|
||||
@ -311,6 +350,32 @@ load("//dev/backcompat:defs.bzl", "back_compat_defs")
|
||||
|
||||
back_compat_defs()
|
||||
|
||||
# containers steup ===============================
|
||||
load("@rules_oci//oci:dependencies.bzl", "rules_oci_dependencies")
|
||||
|
||||
rules_oci_dependencies()
|
||||
|
||||
load("@rules_oci//oci:repositories.bzl", "LATEST_CRANE_VERSION", "LATEST_ZOT_VERSION", "oci_register_toolchains")
|
||||
|
||||
oci_register_toolchains(
|
||||
name = "oci",
|
||||
crane_version = LATEST_CRANE_VERSION,
|
||||
# Uncommenting the zot toolchain will cause it to be used instead of crane for some tasks.
|
||||
# Note that it does not support docker-format images.
|
||||
# zot_version = LATEST_ZOT_VERSION,
|
||||
)
|
||||
|
||||
# Optional, for oci_tarball rule
|
||||
load("@rules_pkg//:deps.bzl", "rules_pkg_dependencies")
|
||||
|
||||
rules_pkg_dependencies()
|
||||
|
||||
load("//dev:oci_deps.bzl", "oci_deps")
|
||||
oci_deps()
|
||||
|
||||
load("//enterprise/cmd/embeddings/shared:assets.bzl", "embbedings_assets_deps")
|
||||
|
||||
embbedings_assets_deps()
|
||||
|
||||
load("@container_structure_test//:repositories.bzl", "container_structure_test_register_toolchain")
|
||||
container_structure_test_register_toolchain(name = "cst")
|
||||
|
||||
9
client/web/src/end-to-end/BUILD.bazel
generated
9
client/web/src/end-to-end/BUILD.bazel
generated
@ -63,5 +63,14 @@ mocha_test(
|
||||
"requires-network",
|
||||
],
|
||||
tests = [test.replace(".ts", ".js") for test in glob(["**/*.test.ts"])],
|
||||
visibility = ["//testing:__pkg__"],
|
||||
deps = [":end-to-end_tests"],
|
||||
)
|
||||
|
||||
# For some reason, we can't explicitly set a visibility on the target itself,
|
||||
# it seems the esbuild rule doesn't pass along the visibility attribute properly.
|
||||
alias(
|
||||
name = "testing_e2e_bundle",
|
||||
actual = ":e2e_bundle",
|
||||
visibility = ["//testing:__pkg__"],
|
||||
)
|
||||
|
||||
1
client/web/src/integration/BUILD.bazel
generated
1
client/web/src/integration/BUILD.bazel
generated
@ -115,6 +115,7 @@ mocha_test(
|
||||
flaky = True,
|
||||
is_percy_enabled = True,
|
||||
tags = [
|
||||
"manual",
|
||||
"no-sandbox",
|
||||
"requires-network",
|
||||
],
|
||||
|
||||
96
cmd/blobstore/BUILD.bazel
generated
96
cmd/blobstore/BUILD.bazel
generated
@ -1,4 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
go_library(
|
||||
name = "blobstore_lib",
|
||||
@ -21,3 +25,95 @@ go_binary(
|
||||
embed = [":blobstore_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_blobstore",
|
||||
srcs = [":blobstore"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/blobstore",
|
||||
],
|
||||
tars = [":tar_blobstore"],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["blobstore:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
# The rules below covers the old blobstore, which is based on s3 proxy. We don't push the newer one, as it's still considered experimental.
|
||||
oci_image(
|
||||
name = "s3_proxy_image",
|
||||
base = "@wolfi_s3proxy_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/opt/s3proxy/run-docker-container.sh",
|
||||
],
|
||||
env = {
|
||||
"LOG_LEVEL": "info",
|
||||
"S3PROXY_AUTHORIZATION": "none",
|
||||
"S3PROXY_ENDPOINT": "http://0.0.0.0:9000",
|
||||
"S3PROXY_IDENTITY": "local-identity",
|
||||
"S3PROXY_CREDENTIAL": "local-credential",
|
||||
"S3PROXY_VIRTUALHOST": "",
|
||||
"S3PROXY_CORS_ALLOW_ALL": "false",
|
||||
"S3PROXY_CORS_ALLOW_ORIGINS": "",
|
||||
"S3PROXY_CORS_ALLOW_METHODS": "",
|
||||
"S3PROXY_CORS_ALLOW_HEADERS": "",
|
||||
"S3PROXY_IGNORE_UNKNOWN_HEADERS": "false",
|
||||
"S3PROXY_ENCRYPTED_BLOBSTORE": "",
|
||||
"S3PROXY_ENCRYPTED_BLOBSTORE_PASSWORD": "",
|
||||
"S3PROXY_ENCRYPTED_BLOBSTORE_SALT": "",
|
||||
"S3PROXY_V4_MAX_NON_CHUNKED_REQ_SIZE": "33554432",
|
||||
"JCLOUDS_PROVIDER": "filesystem",
|
||||
"JCLOUDS_ENDPOINT": "",
|
||||
"JCLOUDS_REGION": "",
|
||||
"JCLOUDS_REGIONS": "us-east-1",
|
||||
"JCLOUDS_IDENTITY": "remote-identity",
|
||||
"JCLOUDS_CREDENTIAL": "remote-credential",
|
||||
"JCLOUDS_KEYSTONE_VERSION": "",
|
||||
"JCLOUDS_KEYSTONE_SCOPE": "",
|
||||
"JCLOUDS_KEYSTONE_PROJECT_DOMAIN_NAME": "",
|
||||
"JCLOUDS_FILESYSTEM_BASEDIR": "/data",
|
||||
},
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "s3_proxy_image_test",
|
||||
timeout = "short",
|
||||
configs = ["s3_proxy_image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":s3_proxy_image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "s3_proxy_image_tarball",
|
||||
image = ":s3_proxy_image",
|
||||
repo_tags = ["blobstore:candidate"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "s3_proxy_candidate_push",
|
||||
image = ":s3_proxy_image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("blobstore"),
|
||||
)
|
||||
|
||||
16
cmd/blobstore/image_test.yaml
Normal file
16
cmd/blobstore/image_test.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/blobstore"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
# TODO(security): This container should not be running as root
|
||||
# - name: "not running as root"
|
||||
# command: "/usr/bin/id"
|
||||
# args:
|
||||
# - -u
|
||||
# excludedOutput: ["^0"]
|
||||
# exitCode: 0
|
||||
16
cmd/blobstore/s3_proxy_image_test.yaml
Normal file
16
cmd/blobstore/s3_proxy_image_test.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: 'Test for run-docker-container.sh'
|
||||
path: '/opt/s3proxy/run-docker-container.sh'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
49
cmd/frontend/BUILD.bazel
generated
49
cmd/frontend/BUILD.bazel
generated
@ -1,4 +1,7 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
go_library(
|
||||
name = "frontend_lib",
|
||||
@ -23,3 +26,49 @@ go_binary(
|
||||
"github.com/sourcegraph/sourcegraph/internal/version.timestamp": "{VERSION_TIMESTAMP}",
|
||||
},
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_frontend",
|
||||
srcs = [":frontend"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/frontend",
|
||||
],
|
||||
env = {
|
||||
"CONFIGURATION_MODE": "server",
|
||||
"PGDATABASE": "sg",
|
||||
"PGHOST": "pgsql",
|
||||
"PGPORT": "5432",
|
||||
"PGSSLMODE": "disable",
|
||||
"PGUSER": "sg",
|
||||
"CODEINTEL_PGDATABASE": "sg",
|
||||
"CODEINTEL_PGHOST": "codeintel-db",
|
||||
"CODEINTEL_PGPORT": "5432",
|
||||
"CODEINTEL_PGSSLMODE": "disable",
|
||||
"CODEINTEL_PGUSER": "sg",
|
||||
"PUBLIC_REPO_REDIRECTS": "true",
|
||||
},
|
||||
tars = [":tar_frontend"],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["frontend:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
28
cmd/frontend/image_test.yaml
Normal file
28
cmd/frontend/image_test.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/frontend"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/mnt/cache/frontend'
|
||||
path: '/mnt/cache/frontend'
|
||||
shouldExist: true
|
||||
uid: 100
|
||||
gid: 101
|
||||
|
||||
metadataTest:
|
||||
envVars:
|
||||
- key: PGHOST
|
||||
value: .+
|
||||
isRegex: true
|
||||
46
cmd/github-proxy/BUILD.bazel
generated
46
cmd/github-proxy/BUILD.bazel
generated
@ -1,4 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
go_library(
|
||||
name = "github-proxy_lib",
|
||||
@ -21,3 +25,45 @@ go_binary(
|
||||
embed = [":github-proxy_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_github-proxy",
|
||||
srcs = [":github-proxy"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/github-proxy",
|
||||
],
|
||||
env = {
|
||||
"LOG_REQUEST": "true",
|
||||
},
|
||||
tars = [":tar_github-proxy"],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["github-proxy:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("github-proxy"),
|
||||
)
|
||||
|
||||
20
cmd/github-proxy/image_test.yaml
Normal file
20
cmd/github-proxy/image_test.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/github-proxy"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
metadataTest:
|
||||
envVars:
|
||||
- key: LOG_REQUEST
|
||||
value: true
|
||||
52
cmd/gitserver/BUILD.bazel
generated
52
cmd/gitserver/BUILD.bazel
generated
@ -1,4 +1,7 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
go_library(
|
||||
name = "gitserver_lib",
|
||||
@ -21,3 +24,52 @@ go_binary(
|
||||
embed = [":gitserver_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_gitserver",
|
||||
srcs = [":gitserver"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_p4_fusion_wrappers",
|
||||
srcs = [
|
||||
"p4-fusion-wrapper-detect-kill.sh",
|
||||
"process-stats-watcher.sh",
|
||||
],
|
||||
package_dir = "/usr/local/bin",
|
||||
remap_paths = {
|
||||
"/p4-fusion-wrapper-detect-kill.sh": "/p4-fusion",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_gitserver_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/gitserver",
|
||||
],
|
||||
tars = [
|
||||
":tar_gitserver",
|
||||
":tar_p4_fusion_wrappers",
|
||||
],
|
||||
user = "sourcegraph",
|
||||
workdir = "/",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["gitserver:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
68
cmd/gitserver/image_test.yaml
Normal file
68
cmd/gitserver/image_test.yaml
Normal file
@ -0,0 +1,68 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/gitserver"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
- name: "git is runnable"
|
||||
command: "git"
|
||||
args:
|
||||
- version
|
||||
- name: "git-lfs is runnable"
|
||||
command: "git-lfs"
|
||||
args:
|
||||
- version
|
||||
- name: "git p4 is runnable"
|
||||
command: "git"
|
||||
args:
|
||||
- p4
|
||||
expectedOutput: ["valid commands: submit"]
|
||||
exitCode: 2
|
||||
- name: "ssh is runnable"
|
||||
command: "ssh"
|
||||
exitCode: 255
|
||||
- name: "python3 is runnable"
|
||||
command: "python3"
|
||||
args:
|
||||
- --version
|
||||
- name: "bash is runnable"
|
||||
command: "bash"
|
||||
args:
|
||||
- --version
|
||||
- name: "p4 is runnable"
|
||||
command: "p4"
|
||||
args:
|
||||
- -h
|
||||
- name: "coursier is runnable"
|
||||
command: "coursier"
|
||||
- name: "p4-fusion is runnable"
|
||||
command: "p4-fusion-binary"
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/data/repos'
|
||||
path: '/data/repos'
|
||||
shouldExist: true
|
||||
uid: 100
|
||||
gid: 101
|
||||
# p4-fusion wrappers
|
||||
- name: '/usr/local/bin/p4-fusion'
|
||||
path: '/usr/local/bin/p4-fusion'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
permissions: '-r-xr-xr-x'
|
||||
- name: '/usr/local/bin/process-stats-watcher.sh'
|
||||
path: '/usr/local/bin/process-stats-watcher.sh'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
permissions: '-r-xr-xr-x'
|
||||
42
cmd/loadtest/BUILD.bazel
generated
42
cmd/loadtest/BUILD.bazel
generated
@ -1,4 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
go_library(
|
||||
name = "loadtest_lib",
|
||||
@ -22,3 +26,41 @@ go_binary(
|
||||
embed = [":loadtest_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_loadtest",
|
||||
srcs = [":loadtest"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/loadtest",
|
||||
],
|
||||
tars = [":tar_loadtest"],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["loadtest:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("loadtest"),
|
||||
)
|
||||
|
||||
16
cmd/loadtest/image_test.yaml
Normal file
16
cmd/loadtest/image_test.yaml
Normal file
@ -0,0 +1,16 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/loadtest"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
# TODO(security): This container should not be running as root
|
||||
# - name: "not running as root"
|
||||
# command: "/usr/bin/id"
|
||||
# args:
|
||||
# - -u
|
||||
# excludedOutput: ["^0"]
|
||||
# exitCode: 0
|
||||
315
cmd/migrator/BUILD.bazel
generated
315
cmd/migrator/BUILD.bazel
generated
@ -1,4 +1,7 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
go_library(
|
||||
name = "migrator_lib",
|
||||
@ -23,3 +26,315 @@ go_binary(
|
||||
embed = [":migrator_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
genrule(
|
||||
name = "schema_descriptions",
|
||||
srcs = ["generate.sh"],
|
||||
outs = [
|
||||
"schema-descriptions/v3.20.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.20.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.21.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.21.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.21.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.21.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.21.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.21.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.22.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.22.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.22.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.22.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.23.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.23.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.24.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.24.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.24.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.24.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.25.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.25.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.25.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.25.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.25.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.25.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.25.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.25.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.25.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.26.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.26.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.26.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.26.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.26.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.26.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.26.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.26.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.26.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.26.3-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.26.3-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.26.3-internal_database_schema.json",
|
||||
"schema-descriptions/v3.27.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.27.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.27.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.27.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.27.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.27.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.27.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.27.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.27.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.27.3-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.27.3-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.27.3-internal_database_schema.json",
|
||||
"schema-descriptions/v3.27.4-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.27.4-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.27.4-internal_database_schema.json",
|
||||
"schema-descriptions/v3.27.5-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.27.5-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.27.5-internal_database_schema.json",
|
||||
"schema-descriptions/v3.28.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.28.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.28.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.29.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.29.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.29.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.29.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.29.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.29.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.30.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.30.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.30.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.30.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.30.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.30.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.30.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.30.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.30.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.30.3-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.30.3-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.30.3-internal_database_schema.json",
|
||||
"schema-descriptions/v3.30.4-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.30.4-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.30.4-internal_database_schema.json",
|
||||
"schema-descriptions/v3.31.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.31.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.31.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.31.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.31.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.31.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.31.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.31.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.31.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.32.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.32.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.32.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.32.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.32.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.32.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.33.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.33.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.33.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.33.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.33.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.33.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.33.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.33.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.33.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.34.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.34.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.34.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.34.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.34.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.34.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.34.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.34.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.34.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.35.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.35.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.35.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.35.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.35.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.35.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.35.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.35.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.35.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.36.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.36.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.36.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.36.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.36.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.36.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.36.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.36.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.36.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.36.3-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.36.3-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.36.3-internal_database_schema.json",
|
||||
"schema-descriptions/v3.37.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.37.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.37.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.38.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.38.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.38.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.38.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.38.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.38.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.39.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.39.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.39.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.39.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.39.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.39.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.40.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.40.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.40.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.40.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.40.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.40.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.40.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.40.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.40.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.41.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.41.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.41.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.41.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.41.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.41.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.42.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.42.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.42.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.42.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.42.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.42.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.42.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.42.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.42.2-internal_database_schema.json",
|
||||
"schema-descriptions/v3.43.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.43.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.43.0-internal_database_schema.json",
|
||||
"schema-descriptions/v3.43.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.43.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.43.1-internal_database_schema.json",
|
||||
"schema-descriptions/v3.43.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v3.43.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v3.43.2-internal_database_schema.json",
|
||||
"schema-descriptions/v4.0.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.0.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.0.0-internal_database_schema.json",
|
||||
"schema-descriptions/v4.0.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.0.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.0.1-internal_database_schema.json",
|
||||
"schema-descriptions/v4.1.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.1.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.1.0-internal_database_schema.json",
|
||||
"schema-descriptions/v4.1.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.1.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.1.1-internal_database_schema.json",
|
||||
"schema-descriptions/v4.1.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.1.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.1.2-internal_database_schema.json",
|
||||
"schema-descriptions/v4.1.3-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.1.3-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.1.3-internal_database_schema.json",
|
||||
"schema-descriptions/v4.2.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.2.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.2.0-internal_database_schema.json",
|
||||
"schema-descriptions/v4.2.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.2.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.2.1-internal_database_schema.json",
|
||||
"schema-descriptions/v4.3.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.3.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.3.0-internal_database_schema.json",
|
||||
"schema-descriptions/v4.3.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.3.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.3.1-internal_database_schema.json",
|
||||
"schema-descriptions/v4.4.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.4.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.4.0-internal_database_schema.json",
|
||||
"schema-descriptions/v4.4.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.4.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.4.1-internal_database_schema.json",
|
||||
"schema-descriptions/v4.4.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.4.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.4.2-internal_database_schema.json",
|
||||
"schema-descriptions/v4.5.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.5.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.5.0-internal_database_schema.json",
|
||||
"schema-descriptions/v4.5.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v4.5.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v4.5.1-internal_database_schema.json",
|
||||
"schema-descriptions/v5.0.0-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v5.0.0-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v5.0.0-internal_database_schema.json",
|
||||
"schema-descriptions/v5.0.1-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v5.0.1-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v5.0.1-internal_database_schema.json",
|
||||
"schema-descriptions/v5.0.2-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v5.0.2-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v5.0.2-internal_database_schema.json",
|
||||
"schema-descriptions/v5.0.3-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v5.0.3-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v5.0.3-internal_database_schema.json",
|
||||
"schema-descriptions/v5.0.4-internal_database_schema.codeinsights.json",
|
||||
"schema-descriptions/v5.0.4-internal_database_schema.codeintel.json",
|
||||
"schema-descriptions/v5.0.4-internal_database_schema.json",
|
||||
],
|
||||
cmd = "$(location generate.sh) $(@D)",
|
||||
tags = ["requires-network"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
sh_test(
|
||||
name = "schema_descriptions_test",
|
||||
size = "small",
|
||||
srcs = [
|
||||
"schema_descriptions_test.sh",
|
||||
],
|
||||
args = [
|
||||
"$(location generate.sh)",
|
||||
"$(locations :schema_descriptions)",
|
||||
],
|
||||
data = [
|
||||
"generate.sh",
|
||||
":schema_descriptions",
|
||||
],
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_schema_descriptions",
|
||||
srcs = [":schema_descriptions"],
|
||||
package_dir = "schema-descriptions",
|
||||
visibility = ["//enterprise/cmd/migrator:__pkg__"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_migrator",
|
||||
srcs = [":migrator"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/migrator",
|
||||
],
|
||||
tars = [
|
||||
":tar_migrator",
|
||||
":tar_schema_descriptions",
|
||||
],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["migrator:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
@ -47,6 +47,7 @@ gcs_filenames=(
|
||||
|
||||
function download_gcs() {
|
||||
outfile="${OUTPUT}/schema-descriptions/${1}-${2}"
|
||||
echo "${outfile}"
|
||||
if ! curl -fsSL "https://storage.googleapis.com/sourcegraph-assets/migrations/drift/${1}-${2}" 2>/dev/null >"${outfile}"; then
|
||||
rm "${outfile}"
|
||||
fi
|
||||
|
||||
86
cmd/migrator/generate.sh
Executable file
86
cmd/migrator/generate.sh
Executable file
@ -0,0 +1,86 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script generates all the schema-descriptions files.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
|
||||
set -eu
|
||||
|
||||
OUTPUT="$1"
|
||||
|
||||
echo "Compiling schema descriptions ..."
|
||||
mkdir -p "${OUTPUT}/schema-descriptions"
|
||||
|
||||
# See internal/database/migration/cliutil/drift-schemas/generate-all.sh
|
||||
gcs_versions=(
|
||||
v3.20.0 v3.20.1
|
||||
v3.21.0 v3.21.1 v3.21.2
|
||||
v3.22.0 v3.22.1
|
||||
v3.23.0
|
||||
v3.24.0 v3.24.1
|
||||
v3.25.0 v3.25.1 v3.25.2
|
||||
v3.26.0 v3.26.1 v3.26.2 v3.26.3
|
||||
v3.27.0 v3.27.1 v3.27.2 v3.27.3 v3.27.4 v3.27.5
|
||||
v3.28.0
|
||||
v3.29.0 v3.29.1
|
||||
v3.30.0 v3.30.1 v3.30.2 v3.30.3 v3.30.4
|
||||
v3.31.0 v3.31.1 v3.31.2
|
||||
v3.32.0 v3.32.1
|
||||
v3.33.0 v3.33.1 v3.33.2
|
||||
v3.34.0 v3.34.1 v3.34.2
|
||||
v3.35.0 v3.35.1 v3.35.2
|
||||
v3.36.0 v3.36.1 v3.36.2 v3.36.3
|
||||
v3.37.0
|
||||
v3.38.0 v3.38.1
|
||||
v3.39.0 v3.39.1
|
||||
v3.40.0 v3.40.1 v3.40.2
|
||||
v3.41.0 v3.41.1
|
||||
)
|
||||
gcs_filenames=(
|
||||
internal_database_schema.json
|
||||
internal_database_schema.codeintel.json
|
||||
internal_database_schema.codeinsights.json
|
||||
)
|
||||
|
||||
function download_gcs() {
|
||||
outfile="${OUTPUT}/schema-descriptions/${1}-${2}"
|
||||
# 3.20.0 is missing the codeintel and codeinsights schemas.
|
||||
if ! curl -fsSL "https://storage.googleapis.com/sourcegraph-assets/migrations/drift/${1}-${2}" >"${outfile}"; then
|
||||
rm "${outfile}"
|
||||
fi
|
||||
}
|
||||
|
||||
for version in "${gcs_versions[@]}"; do
|
||||
echo "Persisting schemas for ${version} from GCS..."
|
||||
for filename in "${gcs_filenames[@]}"; do
|
||||
download_gcs "${version}" "${filename}"
|
||||
done
|
||||
done
|
||||
|
||||
function download_github() {
|
||||
local version
|
||||
version="$1"
|
||||
local github_url
|
||||
github_url="https://raw.githubusercontent.com/sourcegraph/sourcegraph/${version}/internal/database"
|
||||
|
||||
curl -fsSL "$github_url/schema.json" >"${OUTPUT}/schema-descriptions/${version}-internal_database_schema.json"
|
||||
curl -fsSL "$github_url/schema.codeintel.json" >"${OUTPUT}/schema-descriptions/${version}-internal_database_schema.codeintel.json"
|
||||
curl -fsSL "$github_url/schema.codeinsights.json" >"${OUTPUT}/schema-descriptions/${version}-internal_database_schema.codeinsights.json"
|
||||
}
|
||||
|
||||
git_versions=(
|
||||
v3.42.0 v3.42.1 v3.42.2
|
||||
v3.43.0 v3.43.1 v3.43.2
|
||||
v4.0.0 v4.0.1
|
||||
v4.1.0 v4.1.1 v4.1.2 v4.1.3
|
||||
v4.2.0 v4.2.1
|
||||
v4.3.0 v4.3.1
|
||||
v4.4.0 v4.4.1 v4.4.2
|
||||
v4.5.0 v4.5.1
|
||||
v5.0.0 v5.0.1 v5.0.2 v5.0.3 v5.0.4
|
||||
)
|
||||
|
||||
for version in "${git_versions[@]}"; do
|
||||
echo "Persisting schemas for ${version} from GitHub..."
|
||||
download_github "${version}"
|
||||
done
|
||||
|
||||
62
cmd/migrator/image_test.yaml
Normal file
62
cmd/migrator/image_test.yaml
Normal file
@ -0,0 +1,62 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/migrator"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
# Following files are fetched through GCS
|
||||
- name: '/schema-descriptions 3.20.0 schema'
|
||||
path: '/schema-descriptions/v3.20.0-internal_database_schema.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 3.20.0 schema.codeintel does not exists'
|
||||
path: '/schema-descriptions/v3.20.0-internal_database_schema.codeintel.json'
|
||||
shouldExist: false
|
||||
uid: 0
|
||||
gid: 0
|
||||
|
||||
- name: '/schema-descriptions 3.21.0 schema'
|
||||
path: '/schema-descriptions/v3.21.0-internal_database_schema.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 3.21.0 schema.codeintel'
|
||||
path: '/schema-descriptions/v3.21.0-internal_database_schema.codeintel.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 3.21.0 schema.codeinsights does not exists'
|
||||
# We don't have codeinsights for that version, there should not be a file
|
||||
path: '/schema-descriptions/v3.21.0-internal_database_schema.codeinsights.json'
|
||||
shouldExist: false
|
||||
uid: 0
|
||||
gid: 0
|
||||
|
||||
# Following files are fetched through GitHub raw HTTP requests
|
||||
- name: '/schema-descriptions 5.0.1 schema'
|
||||
path: '/schema-descriptions/v5.0.1-internal_database_schema.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 5.0.1 schema.codeintel'
|
||||
path: '/schema-descriptions/v5.0.1-internal_database_schema.codeintel.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
- name: '/schema-descriptions 5.0.1 schema.codeinsights'
|
||||
path: '/schema-descriptions/v5.0.1-internal_database_schema.codeinsights.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
40
cmd/migrator/schema_descriptions_test.sh
Executable file
40
cmd/migrator/schema_descriptions_test.sh
Executable file
@ -0,0 +1,40 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Path to the schema_descriptions tool
|
||||
generate_bin="$1"
|
||||
|
||||
# Array of paths for each of the outputs from the :generate_config target.
|
||||
# shellcheck disable=SC2124
|
||||
got_files="${@:2}"
|
||||
|
||||
# Manually run the script again, so have a list of all the files
|
||||
# we expect the :schema_descriptions target to output.
|
||||
#
|
||||
# We put them in the ./expected folder.
|
||||
"$generate_bin" expected/
|
||||
|
||||
# Loop over all of them and check if we can find each of them in the
|
||||
# outputs from :schema_descriptions target.
|
||||
for file in expected/**/*; do
|
||||
# Trim the "expected" part of the path
|
||||
want="${file##expected}"
|
||||
found="false"
|
||||
|
||||
# Loop over all files we got.
|
||||
# shellcheck disable=SC2068
|
||||
for got in ${got_files[@]}; do
|
||||
# Trim the path from the "monitoring/output" prefix
|
||||
# and test it against the expected file we're currently iterating with.
|
||||
if [[ "${got##cmd/migrator}" == "$want" ]]; then
|
||||
found="true"
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
# If we didn't find it, return an error.
|
||||
if [[ $found == "false" ]]; then
|
||||
echo "Couldn't find expected output $want, perhaps it's missing from the 'srcs' attribute?"
|
||||
exit 1
|
||||
fi
|
||||
done
|
||||
|
||||
43
cmd/repo-updater/BUILD.bazel
generated
43
cmd/repo-updater/BUILD.bazel
generated
@ -1,4 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
go_library(
|
||||
name = "repo-updater_lib",
|
||||
@ -21,3 +25,42 @@ go_binary(
|
||||
"github.com/sourcegraph/sourcegraph/internal/version.timestamp": "{VERSION_TIMESTAMP}",
|
||||
},
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_repo-updater",
|
||||
srcs = [":repo-updater"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_repo_updater_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/repo-updater",
|
||||
],
|
||||
tars = [":tar_repo-updater"],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["repo-updater:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("repo-updater"),
|
||||
)
|
||||
|
||||
21
cmd/repo-updater/image_test.yaml
Normal file
21
cmd/repo-updater/image_test.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/repo-updater"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
- name: "p4 is runnable"
|
||||
command: "p4"
|
||||
args:
|
||||
- -h
|
||||
- name: "coursier is runnable"
|
||||
command: "coursier"
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
46
cmd/searcher/BUILD.bazel
generated
46
cmd/searcher/BUILD.bazel
generated
@ -1,4 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
go_library(
|
||||
name = "searcher_lib",
|
||||
@ -21,3 +25,45 @@ go_binary(
|
||||
"github.com/sourcegraph/sourcegraph/internal/version.timestamp": "{VERSION_TIMESTAMP}",
|
||||
},
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_searcher",
|
||||
srcs = [":searcher"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_searcher_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/searcher",
|
||||
],
|
||||
env = {
|
||||
"CACHE_DIR": "/mnt/cache/searcher",
|
||||
},
|
||||
tars = [":tar_searcher"],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["searcher:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("searcher"),
|
||||
)
|
||||
|
||||
@ -15,7 +15,6 @@ LABEL org.opencontainers.image.version=${VERSION}
|
||||
LABEL com.sourcegraph.github.url=https://github.com/sourcegraph/sourcegraph/commit/${COMMIT_SHA}
|
||||
|
||||
ENV CACHE_DIR=/mnt/cache/searcher
|
||||
RUN mkdir -p ${CACHE_DIR} && chown -R sourcegraph:sourcegraph ${CACHE_DIR}
|
||||
USER sourcegraph
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/searcher"]
|
||||
|
||||
30
cmd/searcher/image_test.yaml
Normal file
30
cmd/searcher/image_test.yaml
Normal file
@ -0,0 +1,30 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/searcher"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
- name: "pcre is runnable"
|
||||
command: "pcregrep"
|
||||
args:
|
||||
- --help
|
||||
- name: "comby is runnable"
|
||||
command: "comby"
|
||||
args:
|
||||
- -h
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/mnt/cache/searcher'
|
||||
path: '/mnt/cache/searcher'
|
||||
shouldExist: true
|
||||
uid: 100
|
||||
gid: 101
|
||||
134
cmd/server/BUILD.bazel
generated
134
cmd/server/BUILD.bazel
generated
@ -1,4 +1,8 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_tarball")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("macro.bzl", "container_dependencies", "dependencies_tars")
|
||||
|
||||
go_library(
|
||||
name = "server_lib",
|
||||
@ -17,3 +21,133 @@ go_binary(
|
||||
embed = [":server_lib"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_server",
|
||||
srcs = [":server"],
|
||||
)
|
||||
|
||||
exports_files(["postgres_exporter.yaml"])
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_postgres_exporter_config",
|
||||
srcs = ["postgres_exporter.yaml"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_monitoring_config",
|
||||
srcs = [
|
||||
"//dev/prometheus:prometheus_targets_linux",
|
||||
"//docker-images/grafana/config",
|
||||
"//docker-images/prometheus:startup_scripts",
|
||||
"//docker-images/prometheus/config:base_config",
|
||||
"//monitoring:generate_config",
|
||||
],
|
||||
remap_paths = {
|
||||
"monitoring/outputs/docs": "/sg_config_docs",
|
||||
"monitoring/outputs/prometheus": "/sg_config_prometheus",
|
||||
"monitoring/outputs/grafana": "/sg_config_grafana/provisioning/dashboards/sourcegraph",
|
||||
"docker-images/grafana/config": "/sg_config_grafana",
|
||||
"docker-images/prometheus/config": "/sg_config_prometheus",
|
||||
"docker-images/prometheus": "",
|
||||
"dev/prometheus/linux": "/sg_prometheus_add_ons",
|
||||
},
|
||||
strip_prefix = ".",
|
||||
)
|
||||
|
||||
DEPS = [
|
||||
"//cmd/frontend",
|
||||
"//cmd/github-proxy",
|
||||
"//cmd/gitserver",
|
||||
"//cmd/migrator",
|
||||
"//cmd/repo-updater",
|
||||
"//cmd/searcher",
|
||||
"//cmd/symbols",
|
||||
"//cmd/worker",
|
||||
]
|
||||
|
||||
ZOEKT_DEPS = [
|
||||
"@com_github_sourcegraph_zoekt//cmd/zoekt-archive-index",
|
||||
"@com_github_sourcegraph_zoekt//cmd/zoekt-git-index",
|
||||
"@com_github_sourcegraph_zoekt//cmd/zoekt-sourcegraph-indexserver",
|
||||
"@com_github_sourcegraph_zoekt//cmd/zoekt-webserver",
|
||||
]
|
||||
|
||||
# Declares rules for building go_cross_binary + pkg_tar for each dep in DEPS
|
||||
container_dependencies(DEPS)
|
||||
|
||||
container_dependencies(ZOEKT_DEPS)
|
||||
|
||||
# This one is a special case because inside server images, the procfile expects to find it
|
||||
# under syntax-highlighter instead of syntect_server.
|
||||
pkg_tar(
|
||||
name = "tar_syntax-highlighter",
|
||||
srcs = ["//docker-images/syntax-highlighter:syntect_server"],
|
||||
remap_paths = {"/syntect_server": "/usr/local/bin/syntax_highlighter"},
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_scip-ctags",
|
||||
srcs = ["//docker-images/syntax-highlighter:scip-ctags"],
|
||||
package_dir = "/usr/local/bin",
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_postgres_optimize",
|
||||
srcs = ["//cmd/server/rootfs:postgres-optimize.sh"],
|
||||
)
|
||||
|
||||
# TODO(@jhchabran) prom-wrapper has to be in /bin while we're still
|
||||
# building the old and new images, because this path is fed to the procfile
|
||||
# by the server, and cannot be placed at two different places.
|
||||
pkg_tar(
|
||||
name = "tar_prom-wrapper",
|
||||
srcs = ["//docker-images/prometheus/cmd/prom-wrapper"],
|
||||
package_dir = "/bin",
|
||||
)
|
||||
|
||||
# Tip: to view exactly what gets built here, you can run:
|
||||
# bazel cquery '//cmd/server:image' --output build
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_server_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/server",
|
||||
],
|
||||
env = {
|
||||
"GO111MODULES": "on",
|
||||
"LANG": "en_US.utf8",
|
||||
"LC_ALL": "en_US.utf8",
|
||||
# "PGHOST": "/var/run/postgresql",
|
||||
},
|
||||
tars = [
|
||||
":tar_server",
|
||||
":tar_postgres_exporter_config",
|
||||
":tar_monitoring_config",
|
||||
":tar_syntax-highlighter",
|
||||
":tar_scip-ctags",
|
||||
":tar_postgres_optimize",
|
||||
":tar_prom-wrapper",
|
||||
"//cmd/gitserver:tar_p4_fusion_wrappers",
|
||||
] + dependencies_tars(DEPS) + dependencies_tars(ZOEKT_DEPS),
|
||||
workdir = "/",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["server:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image_tarball",
|
||||
tags = [
|
||||
"manual", # this test is broken, an init function is checking for the config before reaching the main func.
|
||||
"requires-network",
|
||||
],
|
||||
)
|
||||
|
||||
@ -61,10 +61,7 @@ RUN env SANITY_CHECK=true /usr/local/bin/symbols
|
||||
|
||||
WORKDIR /
|
||||
|
||||
# TODO: Nginx expects these directories but doesn't create them by default, figure out why
|
||||
RUN mkdir /var/lib/nginx/tmp /var/run
|
||||
|
||||
# TODO: Check all paths in script still line up
|
||||
ENV GO111MODULES=on
|
||||
# ENV LANG=en_US.utf8 # TODO: Not setting this seems to fix a postgres startup issue
|
||||
ENV LANG=en_US.utf8
|
||||
ENV PGHOST=/var/run/postgresql
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/server"]
|
||||
|
||||
20
cmd/server/image_test.yaml
Normal file
20
cmd/server/image_test.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
commandTests:
|
||||
- name: "sanity check"
|
||||
command: "/server"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
- name: "scip-ctags is runnable"
|
||||
command: "/usr/local/bin/scip-ctags"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
|
||||
# TODO(security): This container should not be running as root
|
||||
# - name: "not running as root"
|
||||
# command: "/usr/bin/id"
|
||||
# args:
|
||||
# - -u
|
||||
# excludedOutput: ["^0"]
|
||||
# exitCode: 0
|
||||
30
cmd/server/macro.bzl
Normal file
30
cmd/server/macro.bzl
Normal file
@ -0,0 +1,30 @@
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
|
||||
def get_last_segment(path):
|
||||
segments = path.split("/")
|
||||
last_segment = segments[-1]
|
||||
|
||||
s = last_segment.split(":")
|
||||
if len(s) == 1:
|
||||
return last_segment
|
||||
else:
|
||||
return s[-1]
|
||||
|
||||
def container_dependencies(targets):
|
||||
for target in targets:
|
||||
name = get_last_segment(target)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_{}".format(name),
|
||||
srcs = [target],
|
||||
remap_paths = { "/{}".format(name): "/usr/local/bin/{}".format(name) }
|
||||
)
|
||||
|
||||
def dependencies_tars(targets):
|
||||
tars = []
|
||||
for target in targets:
|
||||
name = get_last_segment(target)
|
||||
tars.append(":tar_{}".format(name))
|
||||
|
||||
return tars
|
||||
|
||||
1
cmd/server/postgres_exporter.yaml
Normal file
1
cmd/server/postgres_exporter.yaml
Normal file
@ -0,0 +1 @@
|
||||
auth_modules:
|
||||
3
cmd/server/rootfs/BUILD.bazel
generated
Normal file
3
cmd/server/rootfs/BUILD.bazel
generated
Normal file
@ -0,0 +1,3 @@
|
||||
exports_files([
|
||||
"postgres-optimize.sh",
|
||||
])
|
||||
@ -149,7 +149,7 @@ func Main() {
|
||||
log.Fatal("Failed to setup nginx:", err)
|
||||
}
|
||||
|
||||
postgresExporterLine := fmt.Sprintf(`postgres_exporter: env DATA_SOURCE_NAME="%s" postgres_exporter --log.level=%s`, postgresdsn.New("", "postgres", os.Getenv), convertLogLevel(os.Getenv("SRC_LOG_LEVEL")))
|
||||
postgresExporterLine := fmt.Sprintf(`postgres_exporter: env DATA_SOURCE_NAME="%s" postgres_exporter --config.file="/postgres_exporter.yaml" --log.level=%s`, postgresdsn.New("", "postgres", os.Getenv), convertLogLevel(os.Getenv("SRC_LOG_LEVEL")))
|
||||
|
||||
// TODO: This should be fixed properly.
|
||||
// Tell `gitserver` that its `hostname` is what the others think of as gitserver hostnames.
|
||||
|
||||
46
cmd/symbols/BUILD.bazel
generated
46
cmd/symbols/BUILD.bazel
generated
@ -1,4 +1,7 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
go_library(
|
||||
name = "symbols_lib",
|
||||
@ -21,3 +24,46 @@ go_binary(
|
||||
"github.com/sourcegraph/sourcegraph/internal/version.timestamp": "{VERSION_TIMESTAMP}",
|
||||
},
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_symbols",
|
||||
srcs = [":symbols"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_scip-ctags",
|
||||
srcs = ["//docker-images/syntax-highlighter:scip-ctags"],
|
||||
package_dir = "/usr/local/bin",
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_symbols_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/symbols",
|
||||
],
|
||||
env = {
|
||||
"CACHE_DIR": "/mnt/cache/symbols",
|
||||
},
|
||||
tars = [
|
||||
":tar_symbols",
|
||||
":tar_scip-ctags",
|
||||
],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["symbols:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
@ -63,6 +63,5 @@ COPY --from=symbols-build /symbols /usr/local/bin/symbols
|
||||
RUN env SANITY_CHECK=true /usr/local/bin/symbols
|
||||
|
||||
ENV CACHE_DIR=/mnt/cache/symbols
|
||||
RUN mkdir -p ${CACHE_DIR}
|
||||
EXPOSE 3184
|
||||
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/symbols"]
|
||||
|
||||
36
cmd/symbols/image_test.yaml
Normal file
36
cmd/symbols/image_test.yaml
Normal file
@ -0,0 +1,36 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/symbols"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
- name: "ctags is runnable"
|
||||
command: "universal-ctags"
|
||||
args:
|
||||
- --version
|
||||
- name: "scip-ctags is runnable"
|
||||
command: "/usr/local/bin/scip-ctags"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
# TODO(security): This container should not be running as root
|
||||
# - name: "not running as root"
|
||||
# command: "/usr/bin/id"
|
||||
# args:
|
||||
# - -u
|
||||
# excludedOutput: ["^0"]
|
||||
# exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/mnt/cache/symbols'
|
||||
path: '/mnt/cache/symbols'
|
||||
shouldExist: true
|
||||
uid: 100
|
||||
gid: 101
|
||||
permissions: 'drwxr-xr-x'
|
||||
- name: 'jansson package'
|
||||
path: 'usr/lib/libjansson.la'
|
||||
shouldExist: true
|
||||
34
cmd/worker/BUILD.bazel
generated
34
cmd/worker/BUILD.bazel
generated
@ -1,4 +1,7 @@
|
||||
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_library")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
|
||||
go_library(
|
||||
name = "worker_lib",
|
||||
@ -21,3 +24,34 @@ go_binary(
|
||||
"github.com/sourcegraph/sourcegraph/internal/version.timestamp": "{VERSION_TIMESTAMP}",
|
||||
},
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "tar_worker",
|
||||
srcs = [":worker"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_base",
|
||||
entrypoint = [
|
||||
"/sbin/tini",
|
||||
"--",
|
||||
"/worker",
|
||||
],
|
||||
tars = [":tar_worker"],
|
||||
user = "sourcegraph",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["worker:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
15
cmd/worker/image_test.yaml
Normal file
15
cmd/worker/image_test.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "binary is runnable"
|
||||
command: "/worker"
|
||||
envVars:
|
||||
- key: "SANITY_CHECK"
|
||||
value: "true"
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
@ -34,6 +34,13 @@ func currentVersion(logger log.Logger) (oobmigration.Version, error) {
|
||||
return version, nil
|
||||
}
|
||||
|
||||
// TODO: @jhchabran
|
||||
// The infer mechanism doesn't work in CI, because we weren't expecting to run a container
|
||||
// with a 0.0.0+dev version. This fixes it. We should come back to this.
|
||||
if version.IsDev(version.Version()) && os.Getenv("BAZEL_SKIP_OOB_INFER_VERSION") != "" {
|
||||
return oobmigration.NewVersion(5, 99), nil
|
||||
}
|
||||
|
||||
version, err := inferNextReleaseVersion()
|
||||
if err != nil {
|
||||
return oobmigration.Version{}, err
|
||||
|
||||
1
dev/authtest/BUILD.bazel
generated
1
dev/authtest/BUILD.bazel
generated
@ -10,6 +10,7 @@ go_test(
|
||||
"repository_test.go",
|
||||
"site_admin_test.go",
|
||||
],
|
||||
visibility = ["//testing:__subpackages__"],
|
||||
deps = [
|
||||
"//internal/auth",
|
||||
"//internal/extsvc",
|
||||
|
||||
@ -22,6 +22,6 @@ FLAKES = {
|
||||
"path": "enterprise/internal/codeintel/ranking/internal/store",
|
||||
"prefix": "Test",
|
||||
"reason": "Shifting constraints on table; ranking is experimental"
|
||||
}
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
@ -1,6 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
stamp_version="${VERSION:-$(git rev-parse HEAD)}"
|
||||
build_number="${BUILDKITE_BUILD_NUMBER:-000000}"
|
||||
date_fragment="$(date +%y-%m-%d)"
|
||||
latest_tag="5.0"
|
||||
stamp_version="${build_number}_${date_fragment}_${latest_tag}-$(git rev-parse --short HEAD)-bazel-qa"
|
||||
|
||||
echo STABLE_VERSION "$stamp_version"
|
||||
echo VERSION_TIMESTAMP "$(date +%s)"
|
||||
|
||||
# Unstable Buildkite env vars
|
||||
echo "BUILDKITE $BUILDKITE"
|
||||
echo "BUILDKITE_COMMIT $BUILDKITE_COMMIT"
|
||||
echo "BUILDKITE_BRANCH $BUILDKITE_BRANCH"
|
||||
echo "BUILDKITE_PULL_REQUEST_REPO $BUILDKITE_PULL_REQUEST_REPO"
|
||||
echo "BUILDKITE_PULL_REQUEST $BUILDKITE_PULL_REQUEST"
|
||||
|
||||
1
dev/ci/integration/code-intel/BUILD.bazel
generated
Normal file
1
dev/ci/integration/code-intel/BUILD.bazel
generated
Normal file
@ -0,0 +1 @@
|
||||
exports_files(["repos.json"])
|
||||
@ -3,12 +3,18 @@
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
set -e
|
||||
|
||||
URL="${1:-"http://localhost:7080"}"
|
||||
# URL="${1:-"http://localhost:7080"}"
|
||||
|
||||
echo "--- pnpm run test-e2e"
|
||||
env SOURCEGRAPH_BASE_URL="$URL" pnpm run cover-e2e
|
||||
echo "--- bazel test e2e"
|
||||
bazel \
|
||||
--bazelrc=.bazelrc \
|
||||
--bazelrc=.aspect/bazelrc/ci.bazelrc \
|
||||
--bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc \
|
||||
test \
|
||||
//client/web/src/end-to-end:e2e
|
||||
# env SOURCEGRAPH_BASE_URL="$URL" pnpm run cover-e2e
|
||||
|
||||
echo "--- coverage"
|
||||
pnpm nyc report -r json
|
||||
# Upload the coverage under the "e2e" flag (toggleable in the CodeCov UI)
|
||||
./dev/ci/codecov.sh -F e2e
|
||||
# echo "--- coverage"
|
||||
# pnpm nyc report -r json
|
||||
# # Upload the coverage under the "e2e" flag (toggleable in the CodeCov UI)
|
||||
# ./dev/ci/codecov.sh -F e2e
|
||||
|
||||
@ -2,8 +2,5 @@
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm generate
|
||||
|
||||
curl -L https://sourcegraph.com/.api/src-cli/src_linux_amd64 -o /usr/local/bin/src
|
||||
chmod +x /usr/local/bin/src
|
||||
|
||||
@ -7,6 +7,10 @@ root_dir=$(pwd)
|
||||
|
||||
set -ex
|
||||
|
||||
# Install dependencies for upgrade test script
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm generate
|
||||
|
||||
dev/ci/integration/setup-deps.sh
|
||||
dev/ci/integration/setup-display.sh
|
||||
|
||||
|
||||
@ -70,7 +70,13 @@ func getPaths(ctx context.Context, bucket *storage.BucketHandle) (paths []string
|
||||
func downloadAll(ctx context.Context, bucket *storage.BucketHandle, paths []string) error {
|
||||
repoRoot, err := root.RepositoryRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
if err == root.ErrNotInsideSourcegraph && os.Getenv("BAZEL_TEST") != "" {
|
||||
// If we're running inside Bazel, we do not have access to the repo root.
|
||||
// In that case, we simply use CWD instead.
|
||||
repoRoot = "."
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
}
|
||||
indexesDir := filepath.Join(repoRoot, relativeIndexesDir)
|
||||
|
||||
|
||||
@ -17,6 +17,7 @@ var (
|
||||
verbose bool
|
||||
pollInterval time.Duration
|
||||
timeout time.Duration
|
||||
srcPath string
|
||||
|
||||
start = time.Now()
|
||||
)
|
||||
@ -28,6 +29,7 @@ func init() {
|
||||
flag.BoolVar(&verbose, "verbose", false, "Display full state from graphql")
|
||||
flag.DurationVar(&pollInterval, "poll-interval", time.Second*5, "The time to wait between graphql requests")
|
||||
flag.DurationVar(&timeout, "timeout", 0, "The time it should take to upload and process all targets")
|
||||
flag.StringVar(&srcPath, "src-path", "src", "Path to src-cli binary")
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
||||
@ -127,7 +127,7 @@ func upload(ctx context.Context, repoName, commit, file string) (string, error)
|
||||
return "", err
|
||||
}
|
||||
|
||||
cmd := exec.CommandContext(ctx, "src", append([]string{"lsif", "upload", "-json"}, args...)...)
|
||||
cmd := exec.CommandContext(ctx, srcPath, append([]string{"lsif", "upload", "-json"}, args...)...)
|
||||
cmd.Dir = tempDir
|
||||
cmd.Env = os.Environ()
|
||||
cmd.Env = append(cmd.Env, fmt.Sprintf("SRC_ENDPOINT=%s", internal.SourcegraphEndpoint))
|
||||
|
||||
4
dev/gqltest/BUILD.bazel
generated
4
dev/gqltest/BUILD.bazel
generated
@ -18,6 +18,10 @@ go_test(
|
||||
"site_config_test.go",
|
||||
"sub_repo_permissions_test.go",
|
||||
],
|
||||
visibility = [
|
||||
"//testing:__pkg__",
|
||||
"//testing:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//internal/extsvc",
|
||||
"//internal/gqltestutil",
|
||||
|
||||
6
dev/oci_defs.bzl
Normal file
6
dev/oci_defs.bzl
Normal file
@ -0,0 +1,6 @@
|
||||
REGISTRY_REPOSITORY_PREFIX = "europe-west1-docker.pkg.dev/sourcegraph-security-logging/rules-oci-test/{}"
|
||||
# REGISTRY_REPOSITORY_PREFIX = "us.gcr.io/sourcegraph-dev/{}"
|
||||
|
||||
def image_repository(image):
|
||||
return REGISTRY_REPOSITORY_PREFIX.format(image)
|
||||
|
||||
173
dev/oci_deps.bzl
Normal file
173
dev/oci_deps.bzl
Normal file
@ -0,0 +1,173 @@
|
||||
load("@rules_oci//oci:pull.bzl", "oci_pull")
|
||||
|
||||
# Quick script to get the latest tags for each of the base images:
|
||||
#
|
||||
# grep 'image = ' ./dev/oci_deps.bzl | while read -r str ; do
|
||||
# str_no_spaces="${str#"${str%%[![:space:]]*}"}" # remove leading spaces
|
||||
# url="${str_no_spaces#*\"}" # remove prefix until first quote
|
||||
# url="${url%%\"*}" # remove suffix from first quote
|
||||
#
|
||||
# IMAGE_DETAILS=$(gcloud container images list-tags $url --limit=1 --sort-by=~timestamp --format=json)
|
||||
# TAG=$(echo $IMAGE_DETAILS | jq -r '.[0].tags[0]')
|
||||
# DIGEST=$(echo $IMAGE_DETAILS | jq -r '.[0].digest')
|
||||
#
|
||||
# echo $url
|
||||
# echo $DIGEST
|
||||
# done
|
||||
|
||||
def oci_deps():
|
||||
oci_pull(
|
||||
name = "wolfi_base",
|
||||
digest = "sha256:a236182c8e16e23aafc2d96a0baea17414626064373e38b9abbd5056c3c4d990",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-sourcegraph-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_cadvisor_base",
|
||||
digest = "sha256:11ab7cbe533a01b31d8b8803ec8e6e52074f6d5ba20a96166f7f765db4e8819b",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-cadvisor-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_symbols_base",
|
||||
digest = "sha256:c1b84bd2c2840bbed25a65fa6ade38755be180f4ca0b60e45b461339073a5dc6",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-symbols-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_server_base",
|
||||
digest = "sha256:0cded0aabc7509d0ff02cc16f76374730e834d8aadd875f47bb4dc381d4105a4",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-server-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_gitserver_base",
|
||||
digest = "sha256:ac49e90f580bd7f729c78116e384feb25e8c2e1dc7f1ed69395901e68cfd82d1",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-gitserver-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_grafana_base",
|
||||
digest = "sha256:ec1049f35ff7e4ab6ff7b4cc6790996ad74d196b8dcee8ea5283fca759156637",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-grafana",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_postgres_exporter_base",
|
||||
digest = "sha256:544d4f8a44cd03c7110855654d17490d3d485d69198a8789a2bfa25029a66e09",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-postgres-exporter-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_jaeger_all_in_one_base",
|
||||
digest = "sha256:f8e416626dcdc7d14894876e0751f2bfa0653169b14c4a929b3834d30cea087d",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-jaeger-all-in-one-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_jaeger_agent_base",
|
||||
digest = "sha256:aa6ee947778196115d3027eab91bf0d0e0cc91a03d01f5c2854cd6ecf97b089f",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-jaeger-agent-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_redis_base",
|
||||
digest = "sha256:4945cd8307f1d835b9b9607a1168ecfb84cdc5a5c14eb7c4ba84c08c50741b7b",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-redis-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_redis_exporter_base",
|
||||
digest = "sha256:d1a3d302a4be9447f2b0863587cb042e69c5cceb4eaac5294c9632b58f285a64",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-redis-exporter-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_syntax_highlighter_base",
|
||||
digest = "sha256:0c777bb76c4e586702f5367f54e62881f2f0fa5a96a1bd519ebaff1e982d1ef1",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-syntax-highlighter-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_search_indexer_base",
|
||||
digest = "sha256:991f696b62c4afa2ced41b1071b15e44094a2c541d973a831b34d4a4db4c2131",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-search-indexer-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_repo_updater_base",
|
||||
digest = "sha256:74e478195b750c5547d6d240bc5d9e94b10d470d6bf2ef855dcd912f83550cdf",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-repo-updater-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_searcher_base",
|
||||
digest = "sha256:fba8f4cce1306463b03c0388eb830bac63f02f4b8941c4df8f9fde99390da32e",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-searcher-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_executor_base",
|
||||
digest = "sha256:0ab096b0ffae9054fa18fa8121b105acfda767c5f74fd8530f72f8fe87ef20c2",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-executor-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_bundled_executor_base",
|
||||
digest = "sha256:8941bfcf8db44c462c4fee057b4d2a128cd268d6fb385989086af4381a05137e",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-bundled-executor-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_executor_kubernetes_base",
|
||||
digest = "sha256:c2053b17cb8904a09773552049929b73215af24520ca22613cb5b16f96d8bcfa",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-executor-kubernetes-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_batcheshelper_base",
|
||||
digest = "sha256:2abe940a2d9e13a998d07e4faf072c7ba6e17243a0b9c56a3adf9878d9332f6a",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-batcheshelper-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_prometheus_base",
|
||||
digest = "sha256:11a84d7ae6a7f3a8954306f224391a2301e5518fbe2d6e8dfee4abe12ca91180",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-prometheus-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_postgresql-12_base",
|
||||
digest = "sha256:7c8bfb96038fb6b3980fb6b12f692a54ff8f1d1cebbd72d9706578be8d278cae",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-postgresql-12-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_postgresql-12-codeinsights_base",
|
||||
digest = "sha256:4d85ed245a6d3f22a42cf2fdf840a8e14590fb5216f597a19697e7f3025a0a26",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-postgresql-12-codeinsights-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_node_exporter_base",
|
||||
digest = "sha256:0a42810eafc6c81f95cb3295003de966d3a857d37dd79ef45a3be54c3b4c8e7c",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-node-exporter-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_opentelemetry_collector_base",
|
||||
digest = "sha256:d5d55fb77056422eea328b709795a38cf599e42f2a90787c9dd32c2f1a3654f3",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-opentelemetry-collector-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_searcher_base",
|
||||
digest = "sha256:7ae7d14bc055f5dbcdc727261025e9527558513a61093e956cb39dae7dbc0dcf",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-searcher-base",
|
||||
)
|
||||
|
||||
oci_pull(
|
||||
name = "wolfi_s3proxy_base",
|
||||
digest = "sha256:4076564aaa3bfc17a8e50822e9937f240f155a69f9cffcb039a49f892446d28c",
|
||||
image = "us.gcr.io/sourcegraph-dev/wolfi-blobstore-base",
|
||||
)
|
||||
15
dev/prometheus/BUILD.bazel
generated
Normal file
15
dev/prometheus/BUILD.bazel
generated
Normal file
@ -0,0 +1,15 @@
|
||||
filegroup(
|
||||
name = "prometheus_targets_linux",
|
||||
srcs = [
|
||||
"linux/prometheus_targets.yml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
filegroup(
|
||||
name = "prometheus_targets_all",
|
||||
srcs = [
|
||||
"all/prometheus_targets.yml",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
@ -9,6 +9,7 @@ URL="http://localhost:$PORT"
|
||||
DATA=${DATA:-"/tmp/sourcegraph-data"}
|
||||
SOURCEGRAPH_LICENSE_GENERATION_KEY=${SOURCEGRAPH_LICENSE_GENERATION_KEY:-""}
|
||||
SG_FEATURE_FLAG_GRPC=${SG_FEATURE_FLAG_GRPC:-"false"}
|
||||
DB_STARTUP_TIMEOUT="10s"
|
||||
|
||||
echo "--- Checking for existing Sourcegraph instance at $URL"
|
||||
if curl --output /dev/null --silent --head --fail "$URL"; then
|
||||
@ -19,16 +20,16 @@ fi
|
||||
|
||||
# shellcheck disable=SC2153
|
||||
case "$CLEAN" in
|
||||
"true")
|
||||
clean=y
|
||||
;;
|
||||
"false")
|
||||
clean=n
|
||||
;;
|
||||
*)
|
||||
echo -n "Do you want to delete $DATA and start clean? [Y/n] "
|
||||
read -r clean
|
||||
;;
|
||||
"true")
|
||||
clean=y
|
||||
;;
|
||||
"false")
|
||||
clean=n
|
||||
;;
|
||||
*)
|
||||
echo -n "Do you want to delete $DATA and start clean? [Y/n] "
|
||||
read -r clean
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$clean" != "n" ] && [ "$clean" != "N" ]; then
|
||||
@ -36,14 +37,19 @@ if [ "$clean" != "n" ] && [ "$clean" != "N" ]; then
|
||||
rm -rf "$DATA"
|
||||
fi
|
||||
|
||||
# WIP WIP
|
||||
# -e DISABLE_BLOBSTORE=true \
|
||||
# -e DISABLE_OBSERVABILITY=true \
|
||||
# -it \
|
||||
# --entrypoint sh \
|
||||
|
||||
echo "--- Starting server ${IMAGE} on port ${PORT}"
|
||||
docker run "$@" \
|
||||
--publish "$PORT":7080 \
|
||||
-e SRC_LOG_LEVEL=dbug \
|
||||
-e DEBUG=t \
|
||||
-e ALLOW_SINGLE_DOCKER_CODE_INSIGHTS=t \
|
||||
-e SOURCEGRAPH_LICENSE_GENERATION_KEY="$SOURCEGRAPH_LICENSE_GENERATION_KEY" \
|
||||
-e SG_FEATURE_FLAG_GRPC="$SG_FEATURE_FLAG_GRPC" \
|
||||
-e DB_STARTUP_TIMEOUT="$DB_STARTUP_TIMEOUT" \
|
||||
--volume "$DATA/config:/etc/sourcegraph" \
|
||||
--volume "$DATA/data:/var/opt/sourcegraph" \
|
||||
"$IMAGE"
|
||||
|
||||
@ -214,7 +214,7 @@ Base pipeline (more steps might be included based on branch changes):
|
||||
- **Client checks**: Upload Storybook to Chromatic, Enterprise build, Build (client/jetbrains), Tests for VS Code extension, Unit, integration, and E2E tests for the Cody VS Code extension, Stylelint (all)
|
||||
- **Integration tests**: Backend integration tests (gRPC), Backend integration tests, Code Intel QA
|
||||
- **End-to-end tests**: Executors E2E, Sourcegraph E2E, Sourcegraph Upgrade
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack, Publish executor image, Publish executor binary, Publish docker registry mirror image
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack, Publish executor image, Publish executor binary, Publish docker registry mirror image, Push OCI/Wolfi
|
||||
|
||||
### Release branch
|
||||
|
||||
@ -233,7 +233,7 @@ Base pipeline (more steps might be included based on branch changes):
|
||||
- **Client checks**: Upload Storybook to Chromatic, Enterprise build, Build (client/jetbrains), Tests for VS Code extension, Unit, integration, and E2E tests for the Cody VS Code extension, Stylelint (all)
|
||||
- **Integration tests**: Backend integration tests (gRPC), Backend integration tests, Code Intel QA
|
||||
- **End-to-end tests**: Executors E2E, Sourcegraph E2E, Sourcegraph Upgrade
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack, Push OCI/Wolfi
|
||||
|
||||
### Browser extension release build
|
||||
|
||||
@ -293,7 +293,7 @@ Base pipeline (more steps might be included based on branch changes):
|
||||
- **Client checks**: Upload Storybook to Chromatic, Enterprise build, Build (client/jetbrains), Tests for VS Code extension, Unit, integration, and E2E tests for the Cody VS Code extension, Stylelint (all)
|
||||
- **Integration tests**: Backend integration tests (gRPC), Backend integration tests, Code Intel QA
|
||||
- **End-to-end tests**: Executors E2E, Sourcegraph E2E, Sourcegraph Upgrade
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack, Publish executor image, Publish executor binary
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack, Publish executor image, Publish executor binary, Push OCI/Wolfi
|
||||
|
||||
### Main dry run
|
||||
|
||||
@ -317,7 +317,7 @@ Base pipeline (more steps might be included based on branch changes):
|
||||
- **Client checks**: Upload Storybook to Chromatic, Enterprise build, Build (client/jetbrains), Tests for VS Code extension, Unit, integration, and E2E tests for the Cody VS Code extension, Stylelint (all)
|
||||
- **Integration tests**: Backend integration tests (gRPC), Backend integration tests, Code Intel QA
|
||||
- **End-to-end tests**: Executors E2E, Sourcegraph E2E, Sourcegraph Upgrade
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack
|
||||
- **Publish images**: server, executor, alpine-3.14, postgres-12-alpine, blobstore, cadvisor, codeinsights-db, codeintel-db, frontend, github-proxy, gitserver, grafana, indexed-searcher, migrator, node-exporter, opentelemetry-collector, postgres_exporter, precise-code-intel-worker, prometheus, prometheus-gcp, redis-cache, redis-store, redis_exporter, repo-updater, search-indexer, searcher, syntax-highlighter, worker, symbols, batcheshelper, blobstore2, bundled-executor, dind, embeddings, executor-kubernetes, executor-vm, jaeger-agent, jaeger-all-in-one, cody-gateway, sg, cody-slack, Push OCI/Wolfi
|
||||
|
||||
### Patch image
|
||||
|
||||
|
||||
44
docker-images/cadvisor/BUILD.bazel
generated
Normal file
44
docker-images/cadvisor/BUILD.bazel
generated
Normal file
@ -0,0 +1,44 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_cadvisor_base",
|
||||
cmd = ["--sampling.strategies-file=/etc/jaeger/sampling_strategies.json"],
|
||||
entrypoint = [
|
||||
"/usr/bin/cadvisor",
|
||||
"-logtostderr",
|
||||
"-port=48080",
|
||||
"-enable_metrics=cpu,diskIO,memory,network",
|
||||
"-docker_only",
|
||||
"-housekeeping_interval=10s",
|
||||
"-max_housekeeping_interval=15s",
|
||||
"-event_storage_event_limit=default=0",
|
||||
"-v=3",
|
||||
"-event_storage_age_limit=default=0",
|
||||
"-containerd=/var/run/containerd/containerd.sock",
|
||||
],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["cadvdisor:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("cadvisor"),
|
||||
)
|
||||
15
docker-images/cadvisor/image_test.yaml
Normal file
15
docker-images/cadvisor/image_test.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "cadvisor is runnable"
|
||||
command: "cadvisor"
|
||||
args:
|
||||
- --version
|
||||
|
||||
# TODO(security): Image runs as root
|
||||
# - name: "not running as root"
|
||||
# command: "/usr/bin/id"
|
||||
# args:
|
||||
# - -u
|
||||
# excludedOutput: ["^0"]
|
||||
# exitCode: 0
|
||||
61
docker-images/codeinsights-db/BUILD.bazel
generated
Normal file
61
docker-images/codeinsights-db/BUILD.bazel
generated
Normal file
@ -0,0 +1,61 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
filegroup(
|
||||
name = "config",
|
||||
srcs = glob(
|
||||
["rootfs/*"],
|
||||
["config/*"],
|
||||
),
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "config_tar",
|
||||
srcs = [
|
||||
":config",
|
||||
],
|
||||
remap_paths = {
|
||||
"/rootfs": "/",
|
||||
"/config": "/usr/share/postgresql",
|
||||
},
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_postgresql-12-codeinsights_base",
|
||||
entrypoint = ["/postgres-wolfi.sh"],
|
||||
env = {
|
||||
"POSTGRES_PASSWORD": "",
|
||||
"POSTGRES_USER": "sg",
|
||||
"POSTGRES_DB": "sg",
|
||||
"PGDATA": "/var/lib/postgresql/pgdata",
|
||||
"LANG": "en_US.utf8",
|
||||
"PGHOST": "/var/run/postgresql",
|
||||
},
|
||||
tars = [":config_tar"],
|
||||
user = "postgres",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["codeinsights-db:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("codeinsights-db"),
|
||||
)
|
||||
753
docker-images/codeinsights-db/config/postgresql.conf.sample
Normal file
753
docker-images/codeinsights-db/config/postgresql.conf.sample
Normal file
@ -0,0 +1,753 @@
|
||||
# -----------------------------
|
||||
# PostgreSQL configuration file
|
||||
# -----------------------------
|
||||
#
|
||||
# This file consists of lines of the form:
|
||||
#
|
||||
# name = value
|
||||
#
|
||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||
# values can be found in the PostgreSQL documentation.
|
||||
#
|
||||
# The commented-out settings shown in this file represent the default values.
|
||||
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||
# you need to reload the server.
|
||||
#
|
||||
# This file is read on server startup and when the server receives a SIGHUP
|
||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||
# require a server shutdown and restart to take effect.
|
||||
#
|
||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||
# with the "SET" SQL command.
|
||||
#
|
||||
# Memory units: B = bytes Time units: us = microseconds
|
||||
# kB = kilobytes ms = milliseconds
|
||||
# MB = megabytes s = seconds
|
||||
# GB = gigabytes min = minutes
|
||||
# TB = terabytes h = hours
|
||||
# d = days
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# FILE LOCATIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# The default values of these variables are driven from the -D command-line
|
||||
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||
|
||||
#data_directory = 'ConfigDir' # use data in another directory
|
||||
# (change requires restart)
|
||||
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
|
||||
# (change requires restart)
|
||||
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
|
||||
# (change requires restart)
|
||||
|
||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||
#external_pid_file = '' # write an extra PID file
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONNECTIONS AND AUTHENTICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Connection Settings -
|
||||
|
||||
# Sourcegraph: Listen on all interfaces
|
||||
listen_addresses = '*' # what IP address(es) to listen on;
|
||||
# comma-separated list of addresses;
|
||||
# defaults to 'localhost'; use '*' for all
|
||||
# (change requires restart)
|
||||
#port = 5432 # (change requires restart)
|
||||
max_connections = 100 # (change requires restart)
|
||||
#superuser_reserved_connections = 3 # (change requires restart)
|
||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
||||
# (change requires restart)
|
||||
#unix_socket_group = '' # (change requires restart)
|
||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||
# (change requires restart)
|
||||
#bonjour = off # advertise server via Bonjour
|
||||
# (change requires restart)
|
||||
#bonjour_name = '' # defaults to the computer name
|
||||
# (change requires restart)
|
||||
|
||||
# - TCP settings -
|
||||
# see "man 7 tcp" for details
|
||||
|
||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||
# 0 selects the system default
|
||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
||||
# 0 selects the system default
|
||||
|
||||
# - Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#password_encryption = md5 # md5 or scram-sha-256
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - SSL -
|
||||
|
||||
#ssl = off
|
||||
#ssl_ca_file = ''
|
||||
#ssl_cert_file = 'server.crt'
|
||||
#ssl_crl_file = ''
|
||||
#ssl_key_file = 'server.key'
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
#ssl_prefer_server_ciphers = on
|
||||
#ssl_ecdh_curve = 'prime256v1'
|
||||
#ssl_min_protocol_version = 'TLSv1'
|
||||
#ssl_max_protocol_version = ''
|
||||
#ssl_dh_params_file = ''
|
||||
#ssl_passphrase_command = ''
|
||||
#ssl_passphrase_command_supports_reload = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# RESOURCE USAGE (except WAL)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Memory -
|
||||
|
||||
shared_buffers = 128MB # min 128kB
|
||||
# (change requires restart)
|
||||
#huge_pages = try # on, off, or try
|
||||
# (change requires restart)
|
||||
#temp_buffers = 8MB # min 800kB
|
||||
#max_prepared_transactions = 0 # zero disables the feature
|
||||
# (change requires restart)
|
||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||
# you actively intend to use prepared transactions.
|
||||
#work_mem = 4MB # min 64kB
|
||||
#maintenance_work_mem = 64MB # min 1MB
|
||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||
#max_stack_depth = 2MB # min 100kB
|
||||
#shared_memory_type = mmap # the default is the first option
|
||||
# supported by the operating system:
|
||||
# mmap
|
||||
# sysv
|
||||
# windows
|
||||
# (change requires restart)
|
||||
dynamic_shared_memory_type = posix # the default is the first option
|
||||
# supported by the operating system:
|
||||
# posix
|
||||
# sysv
|
||||
# windows
|
||||
# mmap
|
||||
# (change requires restart)
|
||||
|
||||
# - Disk -
|
||||
|
||||
#temp_file_limit = -1 # limits per-process temp file space
|
||||
# in kB, or -1 for no limit
|
||||
|
||||
# - Kernel Resources -
|
||||
|
||||
#max_files_per_process = 1000 # min 25
|
||||
# (change requires restart)
|
||||
|
||||
# - Cost-Based Vacuum Delay -
|
||||
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||
#vacuum_cost_page_miss = 10 # 0-10000 credits
|
||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||
|
||||
# - Background Writer -
|
||||
|
||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||
|
||||
# - Asynchronous Behavior -
|
||||
|
||||
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#parallel_leader_participation = on
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel operations
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
# (change requires restart)
|
||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# WRITE-AHEAD LOG
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Settings -
|
||||
|
||||
#wal_level = replica # minimal, replica, or logical
|
||||
# (change requires restart)
|
||||
#fsync = on # flush data to disk for crash safety
|
||||
# (turning this off can cause
|
||||
# unrecoverable data corruption)
|
||||
#synchronous_commit = on # synchronization level;
|
||||
# off, local, remote_write, remote_apply, or on
|
||||
#wal_sync_method = fsync # the default is the first option
|
||||
# supported by the operating system:
|
||||
# open_datasync
|
||||
# fdatasync (default on Linux and FreeBSD)
|
||||
# fsync
|
||||
# fsync_writethrough
|
||||
# open_sync
|
||||
#full_page_writes = on # recover from partial page writes
|
||||
#wal_compression = off # enable compression of full-page writes
|
||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||
# (change requires restart)
|
||||
#wal_init_zero = on # zero-fill new WAL files
|
||||
#wal_recycle = on # recycle WAL files
|
||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||
# (change requires restart)
|
||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||
|
||||
#commit_delay = 0 # range 0-100000, in microseconds
|
||||
#commit_siblings = 5 # range 1-1000
|
||||
|
||||
# - Checkpoints -
|
||||
|
||||
#checkpoint_timeout = 5min # range 30s-1d
|
||||
max_wal_size = 1GB
|
||||
min_wal_size = 80MB
|
||||
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
|
||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||
#checkpoint_warning = 30s # 0 disables
|
||||
|
||||
# - Archiving -
|
||||
|
||||
#archive_mode = off # enables archiving; off, on, or always
|
||||
# (change requires restart)
|
||||
#archive_command = '' # command to use to archive a logfile segment
|
||||
# placeholders: %p = path of file to archive
|
||||
# %f = file name only
|
||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||
#archive_timeout = 0 # force a logfile segment switch after this
|
||||
# number of seconds; 0 disables
|
||||
|
||||
# - Archive Recovery -
|
||||
|
||||
# These are only used in recovery mode.
|
||||
|
||||
#restore_command = '' # command to use to restore an archived logfile segment
|
||||
# placeholders: %p = path of file to restore
|
||||
# %f = file name only
|
||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
||||
# (change requires restart)
|
||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
||||
#recovery_end_command = '' # command to execute at completion of recovery
|
||||
|
||||
# - Recovery Target -
|
||||
|
||||
# Set these only when performing a targeted recovery.
|
||||
|
||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
||||
# consistent state is reached
|
||||
# (change requires restart)
|
||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
||||
# just after the specified recovery target (on)
|
||||
# just before the recovery target (off)
|
||||
# (change requires restart)
|
||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
||||
# (change requires restart)
|
||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPLICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Sending Servers -
|
||||
|
||||
# Set these on the master and on any standby that will send replication data.
|
||||
|
||||
#max_wal_senders = 10 # max number of walsender processes
|
||||
# (change requires restart)
|
||||
#wal_keep_segments = 0 # in logfile segments; 0 disables
|
||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||
|
||||
#max_replication_slots = 10 # max number of replication slots
|
||||
# (change requires restart)
|
||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||
# (change requires restart)
|
||||
|
||||
# - Master Server -
|
||||
|
||||
# These settings are ignored on a standby server.
|
||||
|
||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||
# method to choose sync standbys, number of sync standbys,
|
||||
# and comma-separated list of application_name
|
||||
# from standby(s); '*' = all
|
||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||
|
||||
# - Standby Servers -
|
||||
|
||||
# These settings are ignored on a master server.
|
||||
|
||||
#primary_conninfo = '' # connection string to sending server
|
||||
# (change requires restart)
|
||||
#primary_slot_name = '' # replication slot on sending server
|
||||
# (change requires restart)
|
||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
||||
#hot_standby = on # "off" disallows queries during recovery
|
||||
# (change requires restart)
|
||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||
# when reading WAL from archive;
|
||||
# -1 allows indefinite delay
|
||||
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||
# when reading streaming WAL;
|
||||
# -1 allows indefinite delay
|
||||
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||
# 0 disables
|
||||
#hot_standby_feedback = off # send info from standby to prevent
|
||||
# query conflicts
|
||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||
# communication from master
|
||||
# in milliseconds; 0 disables
|
||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||
# retrieve WAL after a failed attempt
|
||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
||||
|
||||
# - Subscribers -
|
||||
|
||||
# These settings are ignored on a publisher.
|
||||
|
||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||
# (change requires restart)
|
||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# QUERY TUNING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Planner Method Configuration -
|
||||
|
||||
#enable_bitmapscan = on
|
||||
#enable_hashagg = on
|
||||
#enable_hashjoin = on
|
||||
#enable_indexscan = on
|
||||
#enable_indexonlyscan = on
|
||||
#enable_material = on
|
||||
#enable_mergejoin = on
|
||||
#enable_nestloop = on
|
||||
#enable_parallel_append = on
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_tidscan = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
#enable_parallel_hash = on
|
||||
#enable_partition_pruning = on
|
||||
|
||||
# - Planner Cost Constants -
|
||||
|
||||
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||
#random_page_cost = 4.0 # same scale as above
|
||||
#cpu_tuple_cost = 0.01 # same scale as above
|
||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||
#cpu_operator_cost = 0.0025 # same scale as above
|
||||
#parallel_tuple_cost = 0.1 # same scale as above
|
||||
#parallel_setup_cost = 1000.0 # same scale as above
|
||||
|
||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||
# and query more expensive than this;
|
||||
# -1 disables
|
||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||
# more expensive than this; -1 disables
|
||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||
# query is more expensive than this;
|
||||
# -1 disables
|
||||
|
||||
#min_parallel_table_scan_size = 8MB
|
||||
#min_parallel_index_scan_size = 512kB
|
||||
#effective_cache_size = 4GB
|
||||
|
||||
# - Genetic Query Optimizer -
|
||||
|
||||
#geqo = on
|
||||
#geqo_threshold = 12
|
||||
#geqo_effort = 5 # range 1-10
|
||||
#geqo_pool_size = 0 # selects default based on effort
|
||||
#geqo_generations = 0 # selects default based on effort
|
||||
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||
#geqo_seed = 0.0 # range 0.0-1.0
|
||||
|
||||
# - Other Planner Options -
|
||||
|
||||
#default_statistics_target = 100 # range 1-10000
|
||||
#constraint_exclusion = partition # on, off, or partition
|
||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||
#from_collapse_limit = 8
|
||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||
# JOIN clauses
|
||||
#force_parallel_mode = off
|
||||
#jit = on # allow JIT compilation
|
||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
||||
# force_custom_plan
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPORTING AND LOGGING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Where to Log -
|
||||
|
||||
#log_destination = 'stderr' # Valid values are combinations of
|
||||
# stderr, csvlog, syslog, and eventlog,
|
||||
# depending on platform. csvlog
|
||||
# requires logging_collector to be on.
|
||||
|
||||
# This is used when logging to stderr:
|
||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||
# into log files. Required to be on for
|
||||
# csvlogs.
|
||||
# (change requires restart)
|
||||
|
||||
# These are only used if logging_collector is on:
|
||||
#log_directory = 'log' # directory where log files are written,
|
||||
# can be absolute or relative to PGDATA
|
||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||
# can include strftime() escapes
|
||||
#log_file_mode = 0600 # creation mode for log files,
|
||||
# begin with 0 to use octal notation
|
||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||
# same name as the new log file will be
|
||||
# truncated rather than appended to.
|
||||
# But such truncation only occurs on
|
||||
# time-driven rotation, not on restarts
|
||||
# or size-driven rotation. Default is
|
||||
# off, meaning append to existing files
|
||||
# in all cases.
|
||||
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||
# happen after that time. 0 disables.
|
||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||
# happen after that much log output.
|
||||
# 0 disables.
|
||||
|
||||
# These are relevant when logging to syslog:
|
||||
#syslog_facility = 'LOCAL0'
|
||||
#syslog_ident = 'postgres'
|
||||
#syslog_sequence_numbers = on
|
||||
#syslog_split_messages = on
|
||||
|
||||
# This is only relevant when logging to eventlog (win32):
|
||||
# (change requires restart)
|
||||
#event_source = 'PostgreSQL'
|
||||
|
||||
# - When to Log -
|
||||
|
||||
#log_min_messages = warning # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic
|
||||
|
||||
#log_min_error_statement = error # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic (effectively off)
|
||||
|
||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
||||
# and their durations, > 0 logs only
|
||||
# statements running at least this number
|
||||
# of milliseconds
|
||||
|
||||
#log_transaction_sample_rate = 0.0 # Fraction of transactions whose statements
|
||||
# are logged regardless of their duration. 1.0 logs all
|
||||
# statements from all transactions, 0.0 never logs.
|
||||
|
||||
# - What to Log -
|
||||
|
||||
#debug_print_parse = off
|
||||
#debug_print_rewritten = off
|
||||
#debug_print_plan = off
|
||||
#debug_pretty_print = on
|
||||
#log_checkpoints = off
|
||||
#log_connections = off
|
||||
#log_disconnections = off
|
||||
#log_duration = off
|
||||
#log_error_verbosity = default # terse, default, or verbose messages
|
||||
#log_hostname = off
|
||||
#log_line_prefix = '%m [%p] ' # special values:
|
||||
# %a = application name
|
||||
# %u = user name
|
||||
# %d = database name
|
||||
# %r = remote host and port
|
||||
# %h = remote host
|
||||
# %p = process ID
|
||||
# %t = timestamp without milliseconds
|
||||
# %m = timestamp with milliseconds
|
||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||
# %i = command tag
|
||||
# %e = SQL state
|
||||
# %c = session ID
|
||||
# %l = session line number
|
||||
# %s = session start timestamp
|
||||
# %v = virtual transaction ID
|
||||
# %x = transaction ID (0 if none)
|
||||
# %q = stop here in non-session
|
||||
# processes
|
||||
# %% = '%'
|
||||
# e.g. '<%u%%%d> '
|
||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||
#log_statement = 'none' # none, ddl, mod, all
|
||||
#log_replication_commands = off
|
||||
#log_temp_files = -1 # log temporary files equal or larger
|
||||
# than the specified size in kilobytes;
|
||||
# -1 disables, 0 logs all temp files
|
||||
log_timezone = 'UTC'
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# PROCESS TITLE
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#cluster_name = '' # added to process titles if nonempty
|
||||
# (change requires restart)
|
||||
#update_process_title = on
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# STATISTICS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Query and Index Statistics Collector -
|
||||
|
||||
#track_activities = on
|
||||
#track_counts = on
|
||||
#track_io_timing = off
|
||||
#track_functions = none # none, pl, all
|
||||
#track_activity_query_size = 1024 # (change requires restart)
|
||||
#stats_temp_directory = 'pg_stat_tmp'
|
||||
|
||||
|
||||
# - Monitoring -
|
||||
|
||||
#log_parser_stats = off
|
||||
#log_planner_stats = off
|
||||
#log_executor_stats = off
|
||||
#log_statement_stats = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# AUTOVACUUM
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||
# requires track_counts to also be on.
|
||||
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
|
||||
# their durations, > 0 logs only
|
||||
# actions running at least this number
|
||||
# of milliseconds.
|
||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||
# (change requires restart)
|
||||
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||
# vacuum
|
||||
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||
# analyze
|
||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||
# before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
||||
# autovacuum, in milliseconds;
|
||||
# -1 means use vacuum_cost_delay
|
||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||
# autovacuum, -1 means use
|
||||
# vacuum_cost_limit
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CLIENT CONNECTION DEFAULTS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Statement Behavior -
|
||||
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
#search_path = '"$user", public' # schema names
|
||||
#row_security = on
|
||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||
# only default tablespace
|
||||
#default_table_access_method = 'heap'
|
||||
#check_function_bodies = on
|
||||
#default_transaction_isolation = 'read committed'
|
||||
#default_transaction_read_only = off
|
||||
#default_transaction_deferrable = off
|
||||
#session_replication_role = 'origin'
|
||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#vacuum_freeze_min_age = 50000000
|
||||
#vacuum_freeze_table_age = 150000000
|
||||
#vacuum_multixact_freeze_min_age = 5000000
|
||||
#vacuum_multixact_freeze_table_age = 150000000
|
||||
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
|
||||
# before index cleanup, 0 always performs
|
||||
# index cleanup
|
||||
#bytea_output = 'hex' # hex, escape
|
||||
#xmlbinary = 'base64'
|
||||
#xmloption = 'content'
|
||||
#gin_fuzzy_search_limit = 0
|
||||
#gin_pending_list_limit = 4MB
|
||||
|
||||
# - Locale and Formatting -
|
||||
|
||||
datestyle = 'iso, mdy'
|
||||
#intervalstyle = 'postgres'
|
||||
timezone = 'UTC'
|
||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||
# abbreviations. Currently, there are
|
||||
# Default
|
||||
# Australia (historical usage)
|
||||
# India
|
||||
# You can create your own file in
|
||||
# share/timezonesets/.
|
||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
||||
# selects precise output mode
|
||||
#client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
|
||||
# Sourcegraph: Use 'en_US.utf8' locale
|
||||
# These settings are initialized by initdb, but they can be changed.
|
||||
lc_messages = 'en_US.utf8' # locale for system error message
|
||||
# strings
|
||||
lc_monetary = 'en_US.utf8' # locale for monetary formatting
|
||||
lc_numeric = 'en_US.utf8' # locale for number formatting
|
||||
lc_time = 'en_US.utf8' # locale for time formatting
|
||||
|
||||
# default configuration for text search
|
||||
default_text_search_config = 'pg_catalog.english'
|
||||
|
||||
# - Shared Library Preloading -
|
||||
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
#jit_provider = 'llvmjit' # JIT library to use
|
||||
|
||||
# - Other Defaults -
|
||||
|
||||
#dynamic_library_path = '$libdir'
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# LOCK MANAGEMENT
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_relation = -2 # negative values mean
|
||||
# (max_pred_locks_per_transaction
|
||||
# / -max_pred_locks_per_relation) - 1
|
||||
#max_pred_locks_per_page = 2 # min 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# VERSION AND PLATFORM COMPATIBILITY
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Previous PostgreSQL Versions -
|
||||
|
||||
#array_nulls = on
|
||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||
#escape_string_warning = on
|
||||
#lo_compat_privileges = off
|
||||
#operator_precedence_warning = off
|
||||
#quote_all_identifiers = off
|
||||
#standard_conforming_strings = on
|
||||
#synchronize_seqscans = on
|
||||
|
||||
# - Other Platforms and Clients -
|
||||
|
||||
#transform_null_equals = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# ERROR HANDLING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#exit_on_error = off # terminate session on any error?
|
||||
#restart_after_crash = on # reinitialize after backend crash?
|
||||
#data_sync_retry = off # retry or panic on failure to fsync
|
||||
# data?
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONFIG FILE INCLUDES
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# These options allow settings to be loaded from files other than the
|
||||
# default postgresql.conf. Note that these are directives, not variable
|
||||
# assignments, so they can usefully be given more than once.
|
||||
|
||||
#include_dir = '...' # include files ending in '.conf' from
|
||||
# a directory, e.g., 'conf.d'
|
||||
#include_if_exists = '...' # include file only if it exists
|
||||
#include = '...' # include file
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CUSTOMIZED OPTIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# Add settings for extensions here
|
||||
43
docker-images/codeinsights-db/image_test.yaml
Normal file
43
docker-images/codeinsights-db/image_test.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "postgres is runnable"
|
||||
command: "postgres"
|
||||
args:
|
||||
- --version
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
- name: "postgres user has correct uid"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
expectedOutput: ["^70\n"]
|
||||
exitCode: 0
|
||||
- name: "postgres user has correct gid"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -g
|
||||
expectedOutput: ["^70\n"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/data/pgdata-12'
|
||||
path: '/data/pgdata-12'
|
||||
shouldExist: true
|
||||
uid: 70
|
||||
gid: 70
|
||||
|
||||
metadataTest:
|
||||
envVars:
|
||||
- key: PGDATA
|
||||
value: .+
|
||||
isRegex: true
|
||||
- key: LANG
|
||||
value: 'en_US.utf8'
|
||||
- key: PGHOST
|
||||
value: '/var/run/postgresql'
|
||||
15
docker-images/codeinsights-db/rootfs/conf.sh
Executable file
15
docker-images/codeinsights-db/rootfs/conf.sh
Executable file
@ -0,0 +1,15 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -x
|
||||
# Copies postgresql.conf over from /conf/postgresql.conf if it exists
|
||||
|
||||
if [ ! -d "/conf" ] || [ ! -f "/conf/postgresql.conf" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
cp /conf/postgresql.conf "$PGDATA/postgresql.conf"
|
||||
|
||||
# allow the container to be started with `--user`
|
||||
if [ "$(id -u)" = '0' ]; then
|
||||
chown postgres:postgres "$PGDATA/postgresql.conf"
|
||||
fi
|
||||
3
docker-images/codeinsights-db/rootfs/env.sh
Executable file
3
docker-images/codeinsights-db/rootfs/env.sh
Executable file
@ -0,0 +1,3 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
export REINDEX_COMPLETED_FILE="${PGDATA}/3.31-reindex.completed"
|
||||
186
docker-images/codeinsights-db/rootfs/initdb.sh
Executable file
186
docker-images/codeinsights-db/rootfs/initdb.sh
Executable file
@ -0,0 +1,186 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Adapted from https://github.com/docker-library/postgres/blob/master/11/docker-entrypoint.sh
|
||||
# to support running this separately from starting postgres. See postgres.sh for usage.
|
||||
|
||||
set -Eexo pipefail
|
||||
# TODO swap to -Eeuo pipefail above (after handling all potentially-unset variables)
|
||||
|
||||
# usage: file_env VAR [DEFAULT]
|
||||
# ie: file_env 'XYZ_DB_PASSWORD' 'example'
|
||||
# (will allow for "$XYZ_DB_PASSWORD_FILE" to fill in the value of
|
||||
# "$XYZ_DB_PASSWORD" from a file, especially for Docker's secrets feature)
|
||||
file_env() {
|
||||
local var="$1"
|
||||
local fileVar="${var}_FILE"
|
||||
local def="${2:-}"
|
||||
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
|
||||
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
|
||||
exit 1
|
||||
fi
|
||||
local val="$def"
|
||||
if [ "${!var:-}" ]; then
|
||||
val="${!var}"
|
||||
elif [ "${!fileVar:-}" ]; then
|
||||
val="$(<"${!fileVar}")"
|
||||
fi
|
||||
export "$var"="$val"
|
||||
unset "$fileVar"
|
||||
}
|
||||
|
||||
# allow the container to be started with `--user`
|
||||
if [ "$(id -u)" = '0' ]; then
|
||||
mkdir -p "$PGDATA"
|
||||
chown -R postgres "$PGDATA"
|
||||
chmod 700 "$PGDATA"
|
||||
|
||||
mkdir -p /var/run/postgresql
|
||||
chown -R postgres /var/run/postgresql
|
||||
chmod 775 /var/run/postgresql
|
||||
|
||||
# Create the transaction log directory before initdb is run (below) so the directory is owned by the correct user
|
||||
if [ "$POSTGRES_INITDB_WALDIR" ]; then
|
||||
mkdir -p "$POSTGRES_INITDB_WALDIR"
|
||||
chown -R postgres "$POSTGRES_INITDB_WALDIR"
|
||||
chmod 700 "$POSTGRES_INITDB_WALDIR"
|
||||
fi
|
||||
|
||||
# TODO@davejrt is this fix what you meant?
|
||||
su-exec postgres "${BASH_SOURCE[0]}" "$@"
|
||||
fi
|
||||
|
||||
mkdir -p "$PGDATA"
|
||||
chown -R "$(id -u)" "$PGDATA" 2>/dev/null || :
|
||||
chmod 700 "$PGDATA" 2>/dev/null || :
|
||||
|
||||
# look specifically for PG_VERSION, as it is expected in the DB dir
|
||||
if [ ! -s "$PGDATA/PG_VERSION" ]; then
|
||||
# "initdb" is particular about the current user existing in "/etc/passwd", so we use "nss_wrapper" to fake that if necessary
|
||||
# see https://github.com/docker-library/postgres/pull/253, https://github.com/docker-library/postgres/issues/359, https://cwrap.org/nss_wrapper.html
|
||||
if ! getent passwd "$(id -u)" &>/dev/null && [ -e /usr/lib/libnss_wrapper.so ]; then
|
||||
export LD_PRELOAD='/usr/lib/libnss_wrapper.so'
|
||||
|
||||
NSS_WRAPPER_PASSWD="$(mktemp)"
|
||||
export NSS_WRAPPER_PASSWD
|
||||
|
||||
NSS_WRAPPER_GROUP="$(mktemp)"
|
||||
export NSS_WRAPPER_GROUP
|
||||
|
||||
echo "postgres:x:$(id -u):$(id -g):PostgreSQL:$PGDATA:/bin/false" >"$NSS_WRAPPER_PASSWD"
|
||||
echo "postgres:x:$(id -g):" >"$NSS_WRAPPER_GROUP"
|
||||
fi
|
||||
|
||||
file_env 'POSTGRES_USER' 'postgres'
|
||||
file_env 'POSTGRES_PASSWORD'
|
||||
|
||||
file_env 'POSTGRES_INITDB_ARGS'
|
||||
if [ "$POSTGRES_INITDB_WALDIR" ]; then
|
||||
export POSTGRES_INITDB_ARGS="$POSTGRES_INITDB_ARGS --waldir $POSTGRES_INITDB_WALDIR"
|
||||
fi
|
||||
eval 'initdb --username="$POSTGRES_USER" --pwfile=<(echo "$POSTGRES_PASSWORD") '"$POSTGRES_INITDB_ARGS"
|
||||
|
||||
# unset/cleanup "nss_wrapper" bits
|
||||
if [ "${LD_PRELOAD:-}" = '/usr/lib/libnss_wrapper.so' ]; then
|
||||
rm -f "$NSS_WRAPPER_PASSWD" "$NSS_WRAPPER_GROUP"
|
||||
unset LD_PRELOAD NSS_WRAPPER_PASSWD NSS_WRAPPER_GROUP
|
||||
fi
|
||||
|
||||
# check password first so we can output the warning before postgres
|
||||
# messes it up
|
||||
if [ -n "$POSTGRES_PASSWORD" ]; then
|
||||
authMethod=md5
|
||||
|
||||
if [ "${#POSTGRES_PASSWORD}" -ge 100 ]; then
|
||||
cat >&2 <<-'EOWARN'
|
||||
WARNING: The supplied POSTGRES_PASSWORD is 100+ characters.
|
||||
|
||||
This will not work if used via PGPASSWORD with "psql".
|
||||
|
||||
https://www.postgresql.org/message-id/flat/E1Rqxp2-0004Qt-PL%40wrigleys.postgresql.org (BUG #6412)
|
||||
https://github.com/docker-library/postgres/issues/507
|
||||
|
||||
EOWARN
|
||||
fi
|
||||
else
|
||||
# The - option suppresses leading tabs but *not* spaces. :)
|
||||
cat >&2 <<-'EOWARN'
|
||||
****************************************************
|
||||
WARNING: No password has been set for the database.
|
||||
This will allow anyone with access to the
|
||||
Postgres port to access your database. In
|
||||
Docker's default configuration, this is
|
||||
effectively any other container on the same
|
||||
system.
|
||||
|
||||
Use "-e POSTGRES_PASSWORD=password" to set
|
||||
it in "docker run".
|
||||
****************************************************
|
||||
EOWARN
|
||||
|
||||
authMethod=trust
|
||||
fi
|
||||
|
||||
{
|
||||
echo
|
||||
echo "host all all all $authMethod"
|
||||
} >>"$PGDATA/pg_hba.conf"
|
||||
|
||||
# internal start of server in order to allow set-up using psql-client
|
||||
# does not listen on external TCP/IP and waits until start finishes
|
||||
PGUSER="${PGUSER:-$POSTGRES_USER}" \
|
||||
pg_ctl -D "$PGDATA" \
|
||||
-o "-c listen_addresses='' -c unix_socket_directories=/var/run/postgresql" \
|
||||
-w start
|
||||
|
||||
file_env 'POSTGRES_DB' "$POSTGRES_USER"
|
||||
|
||||
export PGPASSWORD="${PGPASSWORD:-$POSTGRES_PASSWORD}"
|
||||
psql=(psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" --no-password)
|
||||
|
||||
if [ "$POSTGRES_DB" != 'postgres' ]; then
|
||||
"${psql[@]}" --dbname postgres --set db="$POSTGRES_DB" <<-'EOSQL'
|
||||
CREATE DATABASE :"db" ;
|
||||
EOSQL
|
||||
echo
|
||||
fi
|
||||
psql+=(--dbname "$POSTGRES_DB")
|
||||
|
||||
echo
|
||||
for f in /docker-entrypoint-initdb.d/*; do
|
||||
case "$f" in
|
||||
*.sh)
|
||||
# https://github.com/docker-library/postgres/issues/450#issuecomment-393167936
|
||||
# https://github.com/docker-library/postgres/pull/452
|
||||
if [ -x "$f" ]; then
|
||||
echo "$0: running $f"
|
||||
"$f"
|
||||
else
|
||||
echo "$0: sourcing $f"
|
||||
# shellcheck source=/dev/null
|
||||
. "$f"
|
||||
fi
|
||||
;;
|
||||
*.sql)
|
||||
echo "$0: running $f"
|
||||
"${psql[@]}" -f "$f"
|
||||
echo
|
||||
;;
|
||||
*.sql.gz)
|
||||
echo "$0: running $f"
|
||||
gunzip -c "$f" | "${psql[@]}"
|
||||
echo
|
||||
;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
esac
|
||||
echo
|
||||
done
|
||||
|
||||
PGUSER="${PGUSER:-$POSTGRES_USER}" \
|
||||
pg_ctl -D "$PGDATA" -m fast -w stop
|
||||
|
||||
unset PGPASSWORD
|
||||
|
||||
echo
|
||||
echo 'PostgreSQL init process complete; ready for start up.'
|
||||
echo
|
||||
fi
|
||||
19
docker-images/codeinsights-db/rootfs/liveness.sh
Executable file
19
docker-images/codeinsights-db/rootfs/liveness.sh
Executable file
@ -0,0 +1,19 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
# shellcheck source=./env.sh
|
||||
source /env.sh
|
||||
|
||||
# This script checks to see if postgres is alive. It uses the ready check, but
|
||||
# additionally ignores upgrades to give the container enough time to
|
||||
# re-compute indexes. It is expected to be used by a Kubernetes liveness probe.
|
||||
|
||||
# Ensure we are in the same dir ready.sh
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
|
||||
if [ -s "${PGDATA}/PG_VERSION" ] && [ ! -s "${REINDEX_COMPLETED_FILE}" ]; then
|
||||
echo "[INFO] Postgres is re-indexing..."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
./ready.sh
|
||||
18
docker-images/codeinsights-db/rootfs/patch-conf.sh
Executable file
18
docker-images/codeinsights-db/rootfs/patch-conf.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
# In Wolfi, unix_socket_directories defaults to /tmp. In previous Alpine images, this defaulted to /var/run/postgres.
|
||||
# /tmp may not be writable, so any existing postgresql.conf configs that predate the Wolfi migration should be patched to update this setting.
|
||||
|
||||
CONFIG_DIR=${PGDATA:-/data/pgdata-12}
|
||||
|
||||
conf_file="$CONFIG_DIR/postgresql.conf"
|
||||
new_socket_dir="/var/run/postgresql"
|
||||
|
||||
# Check if the parameter already exists in the file
|
||||
if grep -q "^\s*unix_socket_directories" "$conf_file"; then
|
||||
echo "unix_socket_directories already exists in $conf_file"
|
||||
else
|
||||
# Append the setting to the end of the file
|
||||
echo "unix_socket_directories = '$new_socket_dir'" >>"$conf_file"
|
||||
echo "Updated unix_socket_directories in $conf_file"
|
||||
fi
|
||||
31
docker-images/codeinsights-db/rootfs/postgres-wolfi.sh
Executable file
31
docker-images/codeinsights-db/rootfs/postgres-wolfi.sh
Executable file
@ -0,0 +1,31 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
cd /
|
||||
|
||||
# shellcheck source=./env.sh
|
||||
source /env.sh
|
||||
|
||||
# Allow the container to be started with root in Kubernetes and change permissions
|
||||
# of the parent volume directory to be owned entirely by the postgres user.
|
||||
if [ "$(id -u)" = '0' ]; then
|
||||
mkdir -p "$PGDATA"
|
||||
chown -R postgres:postgres "$(dirname "$PGDATA")"
|
||||
chmod 750 "$(dirname "$PGDATA")" "$PGDATA"
|
||||
su-exec postgres "${BASH_SOURCE[0]}" "$@"
|
||||
fi
|
||||
|
||||
if [ ! -s "$PGDATA/PG_VERSION" ]; then
|
||||
echo "[INFO] Initializing Postgres database '$POSTGRES_DB' from scratch in $PGDATA"
|
||||
/initdb.sh
|
||||
fi
|
||||
|
||||
/conf.sh
|
||||
/patch-conf.sh
|
||||
|
||||
if [ ! -s "${REINDEX_COMPLETED_FILE}" ]; then
|
||||
echo "[INFO] Re-creating all indexes for database '$POSTGRES_DB'"
|
||||
/reindex.sh
|
||||
fi
|
||||
|
||||
exec postgres
|
||||
30
docker-images/codeinsights-db/rootfs/postgres.sh
Executable file
30
docker-images/codeinsights-db/rootfs/postgres.sh
Executable file
@ -0,0 +1,30 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
cd /var/lib/postgresql
|
||||
|
||||
# shellcheck source=./env.sh
|
||||
source /env.sh
|
||||
|
||||
# Allow the container to be started with root in Kubernetes and change permissions
|
||||
# of the parent volume directory to be owned entirely by the postgres user.
|
||||
if [ "$(id -u)" = '0' ]; then
|
||||
mkdir -p "$PGDATA"
|
||||
chown -R postgres:postgres "$(dirname "$PGDATA")"
|
||||
chmod 750 "$(dirname "$PGDATA")" "$PGDATA"
|
||||
su-exec postgres "${BASH_SOURCE[0]}" "$@"
|
||||
fi
|
||||
|
||||
if [ ! -s "$PGDATA/PG_VERSION" ]; then
|
||||
echo "[INFO] Initializing Postgres database '$POSTGRES_DB' from scratch in $PGDATA"
|
||||
/initdb.sh
|
||||
fi
|
||||
|
||||
/conf.sh
|
||||
|
||||
if [ ! -s "${REINDEX_COMPLETED_FILE}" ]; then
|
||||
echo "[INFO] Re-creating all indexes for database '$POSTGRES_DB'"
|
||||
/reindex.sh
|
||||
fi
|
||||
|
||||
exec postgres
|
||||
22
docker-images/codeinsights-db/rootfs/ready.sh
Executable file
22
docker-images/codeinsights-db/rootfs/ready.sh
Executable file
@ -0,0 +1,22 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
# This script checks to see if postgres is alive. It is expected to be used by
|
||||
# a Kubernetes ready probe.
|
||||
|
||||
# We check if the TCP port is available since that is how clients will
|
||||
# connect. While re-indexing only the unix port will be available, so we
|
||||
# specifically want to avoid reporting ready in that case.
|
||||
|
||||
if [ -n "$POSTGRES_PASSWORD" ]; then
|
||||
export PGPASSWORD="$POSTGRES_PASSWORD"
|
||||
fi
|
||||
|
||||
export PGUSER="$POSTGRES_USER"
|
||||
export PGDATABASE="$POSTGRES_DB"
|
||||
export PGCONNECT_TIMEOUT=10
|
||||
|
||||
# Check if we can run a simple query. If it fails the reason will be printed
|
||||
# to stderr and we will have a non-zero exit code.
|
||||
psql --no-password --tuples-only --no-align -c 'select 1;' >/dev/null
|
||||
125
docker-images/codeinsights-db/rootfs/reindex.sh
Executable file
125
docker-images/codeinsights-db/rootfs/reindex.sh
Executable file
@ -0,0 +1,125 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
set -Eexo pipefail
|
||||
|
||||
# shellcheck source=./env.sh
|
||||
source /env.sh
|
||||
|
||||
file_env() {
|
||||
local var="$1"
|
||||
local fileVar="${var}_FILE"
|
||||
local def="${2:-}"
|
||||
if [ "${!var:-}" ] && [ "${!fileVar:-}" ]; then
|
||||
echo >&2 "error: both $var and $fileVar are set (but are exclusive)"
|
||||
exit 1
|
||||
fi
|
||||
local val="$def"
|
||||
if [ "${!var:-}" ]; then
|
||||
val="${!var}"
|
||||
elif [ "${!fileVar:-}" ]; then
|
||||
val="$(<"${!fileVar}")"
|
||||
fi
|
||||
export "$var"="$val"
|
||||
unset "$fileVar"
|
||||
}
|
||||
|
||||
prepare_env() {
|
||||
file_env 'POSTGRES_USER' 'postgres'
|
||||
file_env 'POSTGRES_PASSWORD'
|
||||
file_env 'POSTGRES_DB'
|
||||
export PGUSER="${PGUSER:-$POSTGRES_USER}"
|
||||
export PGPASSWORD="${PGPASSWORD:-$POSTGRES_PASSWORD}"
|
||||
export PGDATABASE="${PGDATABASE:-$POSTGRES_DB}"
|
||||
}
|
||||
|
||||
unset_env() {
|
||||
unset PGPASSWORD
|
||||
}
|
||||
|
||||
postgres_is_running() {
|
||||
local pid="$1"
|
||||
|
||||
local proc_entry="/proc/$pid/comm"
|
||||
[[ -s "$proc_entry" ]] && grep -q '^postgres$' "$proc_entry"
|
||||
}
|
||||
|
||||
postgres_stop_cleanly() {
|
||||
pg_ctl -D "$PGDATA" -m fast -w stop
|
||||
}
|
||||
|
||||
postgres_stop() {
|
||||
# This logic handles the case where we've restored a snapshot
|
||||
# that was taken from a still running or improperly shutdown
|
||||
# postgres instance. We'll need to check to see if postgres is
|
||||
# actually still running under the pid specified in the postmaster.pid
|
||||
# file. If it is, we shut it down properly. If it isn't, we
|
||||
# delete the pid file so that we can start up properly.
|
||||
local postmaster_file="$PGDATA/postmaster.pid"
|
||||
|
||||
if ! [[ -s "$postmaster_file" ]]; then
|
||||
# postgres isn't running - nothing to do
|
||||
return 0
|
||||
fi
|
||||
|
||||
local pid
|
||||
pid="$(head -1 "$postmaster_file")"
|
||||
|
||||
if postgres_is_running "$pid"; then
|
||||
# postgres is currently running in the container - shut it down cleanly
|
||||
postgres_stop_cleanly
|
||||
return 0
|
||||
fi
|
||||
|
||||
# we have a postmaster file, but a postgres process isn't running anymore.
|
||||
# remove the postmaster file - we can't do any better here
|
||||
rm "$postmaster_file" || true
|
||||
}
|
||||
|
||||
postgres_start() {
|
||||
# internal start of server in order to allow set-up using psql-client
|
||||
# - does not listen on external TCP/IP and waits until start finishes
|
||||
# - "-P" prevents Postgres from using indexes for system catalog lookups -
|
||||
# see https://www.postgresql.org/docs/12/sql-reindex.html
|
||||
|
||||
pg_ctl -D "$PGDATA" \
|
||||
-o "-c listen_addresses=''" \
|
||||
-o "-P" \
|
||||
-w restart
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
postgres_stop_cleanly
|
||||
unset_env
|
||||
}
|
||||
|
||||
reindex() {
|
||||
reindexdb --no-password --verbose --echo "$@"
|
||||
}
|
||||
|
||||
# allow the container to be started with `--user`
|
||||
if [ "$(id -u)" = '0' ]; then
|
||||
su-exec postgres "${BASH_SOURCE[0]}" "$@"
|
||||
fi
|
||||
|
||||
# look specifically for REINDEX_COMPLETED_FILE, as it is expected in the DB dir
|
||||
if [ ! -s "${REINDEX_COMPLETED_FILE}" ]; then
|
||||
prepare_env
|
||||
postgres_stop
|
||||
postgres_start
|
||||
trap cleanup EXIT
|
||||
|
||||
echo
|
||||
echo 'PostgresSQL must rebuild its indexes. This process can take up to a few hours on systems with a large dataset.'
|
||||
echo
|
||||
|
||||
reindex --system
|
||||
reindex --all
|
||||
|
||||
# mark reindexing process as done
|
||||
echo "Re-indexing for 3.31 release completed successfully at $(date)" >"${REINDEX_COMPLETED_FILE}"
|
||||
|
||||
echo
|
||||
echo 'PostgreSQL reindexing process complete - ready for start up.'
|
||||
echo
|
||||
fi
|
||||
30
docker-images/codeintel-db/BUILD.bazel
generated
Normal file
30
docker-images/codeintel-db/BUILD.bazel
generated
Normal file
@ -0,0 +1,30 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "//docker-images/postgres-12-alpine:image",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["codeintel-db:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("codeintel-db"),
|
||||
)
|
||||
43
docker-images/codeintel-db/image_test.yaml
Normal file
43
docker-images/codeintel-db/image_test.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "postgres is runnable"
|
||||
command: "postgres"
|
||||
args:
|
||||
- --version
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
- name: "postgres user has correct uid"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
expectedOutput: ["^999\n"]
|
||||
exitCode: 0
|
||||
- name: "postgres user has correct gid"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -g
|
||||
expectedOutput: ["^999\n"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/data/pgdata-12'
|
||||
path: '/data/pgdata-12'
|
||||
shouldExist: true
|
||||
uid: 999
|
||||
gid: 999
|
||||
|
||||
metadataTest:
|
||||
envVars:
|
||||
- key: PGDATA
|
||||
value: .+
|
||||
isRegex: true
|
||||
- key: LANG
|
||||
value: 'en_US.utf8'
|
||||
- key: PGHOST
|
||||
value: '/var/run/postgresql'
|
||||
53
docker-images/grafana/BUILD.bazel
generated
Normal file
53
docker-images/grafana/BUILD.bazel
generated
Normal file
@ -0,0 +1,53 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
filegroup(
|
||||
name = "config_files",
|
||||
srcs = glob(["config/*"]) + ["entry-bazel.sh"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "config_tar",
|
||||
srcs = [
|
||||
":config_files",
|
||||
"//docker-images/grafana/config",
|
||||
"//monitoring:generate_config_zip",
|
||||
],
|
||||
remap_paths = {
|
||||
"docker-images/grafana/config": "/sg_config_grafana",
|
||||
"monitoring/outputs/grafana": "/sg_config_grafana/provisioning/dashboards/sourcegraph",
|
||||
"/entry-bazel.sh": "/entry.sh",
|
||||
},
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_grafana_base",
|
||||
entrypoint = ["/entry.sh"],
|
||||
tars = [":config_tar"],
|
||||
user = "grafana",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["grafana:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("grafana"),
|
||||
)
|
||||
@ -11,8 +11,8 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
./dev/ci/bazel.sh build //monitoring:generate_config
|
||||
monitoring_cfg=$(./dev/ci/bazel.sh cquery //monitoring:generate_config --output=files)
|
||||
./dev/ci/bazel.sh build //monitoring:generate_config_zip
|
||||
monitoring_cfg=$(./dev/ci/bazel.sh cquery //monitoring:generate_config_zip --output=files)
|
||||
|
||||
cp "$monitoring_cfg" "$TMP"
|
||||
pushd "$TMP"
|
||||
|
||||
8
docker-images/grafana/config/BUILD.bazel
generated
Normal file
8
docker-images/grafana/config/BUILD.bazel
generated
Normal file
@ -0,0 +1,8 @@
|
||||
filegroup(
|
||||
name = "config",
|
||||
srcs = glob([
|
||||
"grafana*.ini",
|
||||
"provisioning/**/*.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
16
docker-images/grafana/entry-bazel.sh
Executable file
16
docker-images/grafana/entry-bazel.sh
Executable file
@ -0,0 +1,16 @@
|
||||
#!/usr/bin/env bash
|
||||
set -e
|
||||
|
||||
export GF_PATHS_PROVISIONING=/sg_config_grafana/provisioning
|
||||
export GF_PATHS_CONFIG=/sg_config_grafana/grafana.ini
|
||||
|
||||
exec grafana-server \
|
||||
--homepath="$GF_PATHS_HOME" \
|
||||
--config="$GF_PATHS_CONFIG" \
|
||||
--packaging=docker \
|
||||
"$@" \
|
||||
cfg:default.log.mode="console" \
|
||||
cfg:default.paths.data="$GF_PATHS_DATA" \
|
||||
cfg:default.paths.logs="$GF_PATHS_LOGS" \
|
||||
cfg:default.paths.plugins="$GF_PATHS_PLUGINS" \
|
||||
cfg:default.paths.provisioning="$GF_PATHS_PROVISIONING"
|
||||
28
docker-images/grafana/image_test.yaml
Normal file
28
docker-images/grafana/image_test.yaml
Normal file
@ -0,0 +1,28 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "grafana-server is runnable"
|
||||
command: "grafana-server"
|
||||
args:
|
||||
- -v
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/opt/grafana/entry.sh'
|
||||
path: '/opt/grafana/entry.sh'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
permissions: '-rwxr-xr-x'
|
||||
- name: 'Dashboard config'
|
||||
path: '/sg_config_grafana/provisioning/dashboards/sourcegraph/gitserver.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
permissions: '-rwxr-xr-x'
|
||||
56
docker-images/indexed-searcher/BUILD.bazel
generated
Normal file
56
docker-images/indexed-searcher/BUILD.bazel
generated
Normal file
@ -0,0 +1,56 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("//cmd/server:macro.bzl", "container_dependencies", "dependencies_tars")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
DEPS = ["@com_github_sourcegraph_zoekt//cmd/zoekt-webserver"]
|
||||
|
||||
container_dependencies(DEPS)
|
||||
|
||||
filegroup(
|
||||
name = "entrypoint",
|
||||
srcs = ["entry.sh"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "entry_tar",
|
||||
srcs = [":entrypoint"],
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_base",
|
||||
entrypoint = [
|
||||
"/entry.sh",
|
||||
],
|
||||
env = {
|
||||
"DATA_DIR": "/data/index",
|
||||
"GOGC": "25",
|
||||
},
|
||||
tars = dependencies_tars(DEPS) + [":entry_tar"],
|
||||
user = "sourcegraph",
|
||||
workdir = "/home/sourcegraph",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["zoekt-webserver:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("indexed-searcher"),
|
||||
)
|
||||
@ -15,8 +15,6 @@ LABEL org.opencontainers.image.created=${DATE}
|
||||
LABEL org.opencontainers.image.version=${VERSION}
|
||||
|
||||
ENV DATA_DIR /data/index
|
||||
RUN mkdir -p ${DATA_DIR}
|
||||
RUN chown -R sourcegraph:sourcegraph /data
|
||||
|
||||
USER sourcegraph
|
||||
WORKDIR /home/sourcegraph
|
||||
|
||||
3
docker-images/indexed-searcher/entry.sh
Normal file
3
docker-images/indexed-searcher/entry.sh
Normal file
@ -0,0 +1,3 @@
|
||||
#!/bin/sh
|
||||
|
||||
/sbin/tini -s -- zoekt-webserver -index "$DATA_DIR" -pprof -rpc -indexserver_proxy
|
||||
29
docker-images/indexed-searcher/image_test.yaml
Normal file
29
docker-images/indexed-searcher/image_test.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "zoekt-webserver is runnable"
|
||||
command: "zoekt-webserver"
|
||||
args:
|
||||
- --version
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/data/index'
|
||||
path: '/data/index'
|
||||
shouldExist: true
|
||||
uid: 100
|
||||
gid: 101
|
||||
permissions: 'drwxr-xr-x'
|
||||
|
||||
metadataTest:
|
||||
envVars:
|
||||
- key: DATA_DIR
|
||||
value: '/data/index'
|
||||
- key: GOGC
|
||||
value: 25
|
||||
31
docker-images/jaeger-agent/BUILD.bazel
generated
Normal file
31
docker-images/jaeger-agent/BUILD.bazel
generated
Normal file
@ -0,0 +1,31 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_jaeger_agent_base",
|
||||
entrypoint = ["/usr/local/bin/jaeger-agent"],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["jaeger-agent:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("jaeger-agent"),
|
||||
)
|
||||
20
docker-images/jaeger-agent/image_test.yaml
Normal file
20
docker-images/jaeger-agent/image_test.yaml
Normal file
@ -0,0 +1,20 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "jaeger-agent is runnable"
|
||||
command: "jaeger-agent"
|
||||
args:
|
||||
- --help
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
- name: "running as jaeger"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
expectedOutput: ["^10001"]
|
||||
exitCode: 0
|
||||
52
docker-images/jaeger-all-in-one/BUILD.bazel
generated
Normal file
52
docker-images/jaeger-all-in-one/BUILD.bazel
generated
Normal file
@ -0,0 +1,52 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
filegroup(
|
||||
name = "config",
|
||||
srcs = glob(["config/*"]),
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "config_tar",
|
||||
srcs = [":config"],
|
||||
remap_paths = {"": "/etc/jaeger/"},
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_jaeger_all_in_one_base",
|
||||
cmd = ["--sampling.strategies-file=/etc/jaeger/sampling_strategies.json"],
|
||||
entrypoint = ["/usr/local/bin/jaeger-all-in-one"],
|
||||
env = {
|
||||
# Used in order to reverse proxy the Jaeger UI
|
||||
"QUERY_BASE_PATH": "/-/debug/jaeger",
|
||||
# Default configuration file for setting sampling strategies, we override the command in docker-compose
|
||||
"SAMPLING_STRATEGIES_FILE": "/etc/jaeger/sampling_strategies.json",
|
||||
},
|
||||
tars = [":config_tar"],
|
||||
user = "jaeger",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["jaeger-all-in-one:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("jaeger-all-in-one"),
|
||||
)
|
||||
41
docker-images/jaeger-all-in-one/image_test.yaml
Normal file
41
docker-images/jaeger-all-in-one/image_test.yaml
Normal file
@ -0,0 +1,41 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "jaeger-all-in-one is runnable"
|
||||
command: "jaeger-all-in-one"
|
||||
args:
|
||||
- --help
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
- name: "running as jaeger"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
expectedOutput: ["^10001"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/etc/jaeger/sampling_strategies.json'
|
||||
path: '/etc/jaeger/sampling_strategies.json'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
permissions: '-r-xr-xr-x'
|
||||
- name: '/tmp'
|
||||
path: '/tmp'
|
||||
shouldExist: true
|
||||
uid: 10001
|
||||
gid: 0
|
||||
permissions: 'drwxrwxrwx'
|
||||
|
||||
metadataTest:
|
||||
envVars:
|
||||
- key: QUERY_BASE_PATH
|
||||
value: '/-/debug/jaeger'
|
||||
- key: SAMPLING_STRATEGIES_FILE
|
||||
value: '/etc/jaeger/sampling_strategies.json'
|
||||
32
docker-images/node-exporter/BUILD.bazel
generated
Normal file
32
docker-images/node-exporter/BUILD.bazel
generated
Normal file
@ -0,0 +1,32 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_node_exporter_base",
|
||||
entrypoint = ["/usr/bin/node_exporter"],
|
||||
user = "nobody",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["node-exporter:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("node-exporter"),
|
||||
)
|
||||
14
docker-images/node-exporter/image_test.yaml
Normal file
14
docker-images/node-exporter/image_test.yaml
Normal file
@ -0,0 +1,14 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "node_exporter is runnable"
|
||||
command: "node_exporter"
|
||||
args:
|
||||
- --version
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
44
docker-images/opentelemetry-collector/BUILD.bazel
generated
Normal file
44
docker-images/opentelemetry-collector/BUILD.bazel
generated
Normal file
@ -0,0 +1,44 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
filegroup(
|
||||
name = "config",
|
||||
srcs = glob(["configs/*"]),
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "config_tar",
|
||||
srcs = [":config"],
|
||||
remap_paths = {"": "/etc/otel-collector/configs/"},
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_opentelemetry_collector_base",
|
||||
entrypoint = ["/bin/otelcol-sourcegraph"],
|
||||
tars = [":config_tar"],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["opentelemetry-collector:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("opentelemetry-collector"),
|
||||
)
|
||||
29
docker-images/opentelemetry-collector/image_test.yaml
Normal file
29
docker-images/opentelemetry-collector/image_test.yaml
Normal file
@ -0,0 +1,29 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "otelcol-sourcegraph is runnable"
|
||||
command: "otelcol-sourcegraph"
|
||||
args:
|
||||
- --version
|
||||
|
||||
# TODO(security): This container should not be running as root
|
||||
# - name: "not running as root"
|
||||
# command: "/usr/bin/id"
|
||||
# args:
|
||||
# - -u
|
||||
# excludedOutput: ["^0"]
|
||||
# exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/otel-collector'
|
||||
path: '/otel-collector'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
permissions: 'drwxr-xr-x'
|
||||
- name: 'Opentelemetry Configs'
|
||||
path: '/etc/otel-collector/configs/jaeger.yaml'
|
||||
shouldExist: true
|
||||
uid: 0
|
||||
gid: 0
|
||||
permissions: '-r-xr-xr-x'
|
||||
65
docker-images/postgres-12-alpine/BUILD.bazel
generated
Normal file
65
docker-images/postgres-12-alpine/BUILD.bazel
generated
Normal file
@ -0,0 +1,65 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
|
||||
# TODO move this to a different folder
|
||||
|
||||
filegroup(
|
||||
name = "config",
|
||||
srcs = glob(
|
||||
["rootfs/*"],
|
||||
) + ["config/postgresql.conf.sample"],
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "config_tar",
|
||||
srcs = [
|
||||
":config",
|
||||
],
|
||||
remap_paths = {
|
||||
"/rootfs": "/",
|
||||
"/postgresql.conf.sample": "/usr/share/postgresql/postgresql.conf.sample",
|
||||
},
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_postgresql-12_base",
|
||||
entrypoint = ["/postgres-wolfi.sh"],
|
||||
env = {
|
||||
"POSTGRES_PASSWORD": "",
|
||||
"POSTGRES_USER": "sg",
|
||||
"POSTGRES_DB": "sg",
|
||||
"PGDATA": "/data/pgdata-12",
|
||||
"LANG": "en_US.utf8",
|
||||
"PGHOST": "/var/run/postgresql",
|
||||
},
|
||||
tars = [":config_tar"],
|
||||
user = "postgres",
|
||||
visibility = [
|
||||
"//docker-images/codeintel-db:__pkg__",
|
||||
],
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["postgres-12:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("postgres-12-alpine"), # TODO careful, this is not an alpine
|
||||
)
|
||||
@ -13,17 +13,6 @@ FROM us.gcr.io/sourcegraph-dev/wolfi-postgresql-12-base:latest
|
||||
# To remain compatibility with codeinsights-db and codeintel-db, user and group
|
||||
# IDs are set here, rather than in the base image
|
||||
|
||||
ARG PING_UID=99
|
||||
ARG POSTGRES_UID=999
|
||||
|
||||
# We modify the postgres user/group to reconcile with our previous debian based images
|
||||
# and avoid issues with customers migrating.
|
||||
RUN addgroup -g $PING_UID ping &&\
|
||||
adduser -D -u $POSTGRES_UID postgres postgres &&\
|
||||
mkdir -p /data/pgdata-12 && chown -R postgres:postgres /data &&\
|
||||
mkdir -p /var/lib/postgresql && chown -R postgres:postgres /var/lib/postgresql &&\
|
||||
mkdir -p /var/run/postgresql && chown -R postgres:postgres /var/run/postgresql
|
||||
|
||||
COPY rootfs /
|
||||
# Overwrite default postgresql.conf.sample
|
||||
COPY config/postgresql.conf.sample /usr/share/postgresql/postgresql.conf.sample
|
||||
|
||||
43
docker-images/postgres-12-alpine/image_test.yaml
Normal file
43
docker-images/postgres-12-alpine/image_test.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
schemaVersion: "2.0.0"
|
||||
|
||||
commandTests:
|
||||
- name: "postgres is runnable"
|
||||
command: "postgres"
|
||||
args:
|
||||
- --version
|
||||
|
||||
- name: "not running as root"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
excludedOutput: ["^0"]
|
||||
exitCode: 0
|
||||
- name: "postgres user has correct uid"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -u
|
||||
expectedOutput: ["^999\n"]
|
||||
exitCode: 0
|
||||
- name: "postgres user has correct gid"
|
||||
command: "/usr/bin/id"
|
||||
args:
|
||||
- -g
|
||||
expectedOutput: ["^999\n"]
|
||||
exitCode: 0
|
||||
|
||||
fileExistenceTests:
|
||||
- name: '/data/pgdata-12'
|
||||
path: '/data/pgdata-12'
|
||||
shouldExist: true
|
||||
uid: 999
|
||||
gid: 999
|
||||
|
||||
metadataTest:
|
||||
envVars:
|
||||
- key: PGDATA
|
||||
value: .+
|
||||
isRegex: true
|
||||
- key: LANG
|
||||
value: 'en_US.utf8'
|
||||
- key: PGHOST
|
||||
value: '/var/run/postgresql'
|
||||
@ -148,29 +148,29 @@ if [ ! -s "$PGDATA/PG_VERSION" ]; then
|
||||
echo
|
||||
for f in /docker-entrypoint-initdb.d/*; do
|
||||
case "$f" in
|
||||
*.sh)
|
||||
# https://github.com/docker-library/postgres/issues/450#issuecomment-393167936
|
||||
# https://github.com/docker-library/postgres/pull/452
|
||||
if [ -x "$f" ]; then
|
||||
echo "$0: running $f"
|
||||
"$f"
|
||||
else
|
||||
echo "$0: sourcing $f"
|
||||
# shellcheck source=/dev/null
|
||||
. "$f"
|
||||
fi
|
||||
;;
|
||||
*.sql)
|
||||
*.sh)
|
||||
# https://github.com/docker-library/postgres/issues/450#issuecomment-393167936
|
||||
# https://github.com/docker-library/postgres/pull/452
|
||||
if [ -x "$f" ]; then
|
||||
echo "$0: running $f"
|
||||
"${psql[@]}" -f "$f"
|
||||
echo
|
||||
;;
|
||||
*.sql.gz)
|
||||
echo "$0: running $f"
|
||||
gunzip -c "$f" | "${psql[@]}"
|
||||
echo
|
||||
;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
"$f"
|
||||
else
|
||||
echo "$0: sourcing $f"
|
||||
# shellcheck source=/dev/null
|
||||
. "$f"
|
||||
fi
|
||||
;;
|
||||
*.sql)
|
||||
echo "$0: running $f"
|
||||
"${psql[@]}" -f "$f"
|
||||
echo
|
||||
;;
|
||||
*.sql.gz)
|
||||
echo "$0: running $f"
|
||||
gunzip -c "$f" | "${psql[@]}"
|
||||
echo
|
||||
;;
|
||||
*) echo "$0: ignoring $f" ;;
|
||||
esac
|
||||
echo
|
||||
done
|
||||
|
||||
50
docker-images/postgres_exporter/BUILD.bazel
generated
Normal file
50
docker-images/postgres_exporter/BUILD.bazel
generated
Normal file
@ -0,0 +1,50 @@
|
||||
load("@rules_oci//oci:defs.bzl", "oci_image", "oci_push", "oci_tarball")
|
||||
load("@rules_pkg//:pkg.bzl", "pkg_tar")
|
||||
load("//dev:oci_defs.bzl", "image_repository")
|
||||
load("@container_structure_test//:defs.bzl", "container_structure_test")
|
||||
|
||||
filegroup(
|
||||
name = "config_files",
|
||||
srcs = glob(["config/*"]),
|
||||
)
|
||||
|
||||
pkg_tar(
|
||||
name = "config_tar",
|
||||
srcs = [":config_files"],
|
||||
remap_paths = {
|
||||
"/": "/config/",
|
||||
},
|
||||
)
|
||||
|
||||
oci_image(
|
||||
name = "image",
|
||||
base = "@wolfi_postgres_exporter_base",
|
||||
entrypoint = ["/usr/bin/postgres_exporter"],
|
||||
env = {
|
||||
"PG_EXPORTER_EXTEND_QUERY_PATH": "/config/queries.yaml",
|
||||
},
|
||||
tars = [":config_tar"],
|
||||
user = "postgres_exporter",
|
||||
)
|
||||
|
||||
oci_tarball(
|
||||
name = "image_tarball",
|
||||
image = ":image",
|
||||
repo_tags = ["postgres-exporter:candidate"],
|
||||
)
|
||||
|
||||
container_structure_test(
|
||||
name = "image_test",
|
||||
timeout = "short",
|
||||
configs = ["image_test.yaml"],
|
||||
driver = "docker",
|
||||
image = ":image",
|
||||
tags = ["requires-network"],
|
||||
)
|
||||
|
||||
oci_push(
|
||||
name = "candidate_push",
|
||||
image = ":image",
|
||||
remote_tags = "//:tags",
|
||||
repository = image_repository("postgres_exporter"),
|
||||
)
|
||||
@ -1,27 +0,0 @@
|
||||
# In this file:
|
||||
#
|
||||
# | name | type | description |
|
||||
# | --------------------------------------- | ------- | -------------------------------------------------------- |
|
||||
# | pg_invalid_index_count{datname,relname} | COUNTER | Non-zero value used to tag existence of an invalid index |
|
||||
|
||||
pg_invalid_index:
|
||||
query: |
|
||||
SELECT
|
||||
current_database() AS datname,
|
||||
pc.relname AS relname,
|
||||
1 AS count
|
||||
FROM pg_class pc
|
||||
JOIN pg_index pi ON pi.indexrelid = pc.oid
|
||||
WHERE
|
||||
NOT indisvalid AND
|
||||
NOT EXISTS (SELECT 1 FROM pg_stat_progress_create_index pci WHERE pci.index_relid = pi.indexrelid)
|
||||
metrics:
|
||||
- datname:
|
||||
usage: "LABEL"
|
||||
description: "Name of current database"
|
||||
- relname:
|
||||
usage: "LABEL"
|
||||
description: "Name of the index"
|
||||
- count:
|
||||
usage: "COUNTER"
|
||||
description: "Non-zero value used to tag existence of an invalid index"
|
||||
@ -1,44 +0,0 @@
|
||||
# In this file:
|
||||
#
|
||||
# | name | type | description |
|
||||
# | --------------------------------- ------ | ----- | -------------------------------------------------------------- |
|
||||
# | pg_table_size_bytes{datname,relname} | GAUGE | Total size of this table (including toast, index, toast index) |
|
||||
# | pg_table_size_indexsize{datname,relname} | GAUGE | Size of all related indexes |
|
||||
# | pg_table_size_relsize{datname,relname} | GAUGE | Size of this table itself (main, vm, fsm) |
|
||||
# | pg_table_size_toastsize{datname,relname} | GAUGE | Size of corresponding toast tables |
|
||||
#
|
||||
# Contents of this file are loosely based off of:
|
||||
# https://github.com/Vonng/pg_exporter/blob/f682b06630db8e4585aa52df150d0a653bbde07e/conf/810-pg_table_size.yaml
|
||||
|
||||
pg_table_size:
|
||||
query: |
|
||||
SELECT
|
||||
CURRENT_CATALOG AS datname,
|
||||
r.relname AS relname,
|
||||
pg_total_relation_size(r.oid) AS bytes,
|
||||
pg_relation_size(r.oid) AS relsize,
|
||||
pg_indexes_size(r.oid) AS indexsize,
|
||||
pg_total_relation_size(r.reltoastrelid) AS toastsize
|
||||
FROM pg_class r
|
||||
JOIN pg_namespace n ON n.oid = r.relnamespace
|
||||
WHERE
|
||||
r.relkind = 'r' AND n.nspname NOT IN ('pg_catalog', 'information_schema');
|
||||
metrics:
|
||||
- datname:
|
||||
usage: 'LABEL'
|
||||
description: 'Name of current database'
|
||||
- relname:
|
||||
usage: 'LABEL'
|
||||
description: 'Name of the table'
|
||||
- bytes:
|
||||
usage: 'GAUGE'
|
||||
description: 'Total size of this table (including toast, index, toast index)'
|
||||
- indexsize:
|
||||
usage: 'GAUGE'
|
||||
description: 'Size of all related indexes'
|
||||
- relsize:
|
||||
usage: 'GAUGE'
|
||||
description: 'Size of this table itself (main, vm, fsm)'
|
||||
- toastsize:
|
||||
usage: 'GAUGE'
|
||||
description: 'Size of corresponding toast tables'
|
||||
@ -1,27 +0,0 @@
|
||||
# In this file:
|
||||
#
|
||||
# | name | type | description |
|
||||
# | ---------------------- | ------- | ------------------------------------------ |
|
||||
# | pg_sg_migration_status | GAUGE | Whether the migration applied successfully |
|
||||
# | | | This only applies to the frontend db. |
|
||||
|
||||
pg_sg_migration:
|
||||
query: |
|
||||
WITH ranked_migration_logs AS (
|
||||
SELECT
|
||||
migration_logs.*,
|
||||
ROW_NUMBER() OVER (PARTITION BY version ORDER BY finished_at DESC) AS row_number
|
||||
FROM migration_logs
|
||||
WHERE schema = 'schema_migrations'
|
||||
)
|
||||
SELECT EXISTS (
|
||||
SELECT 1
|
||||
FROM ranked_migration_logs
|
||||
WHERE row_number = 1
|
||||
AND NOT success
|
||||
)::int;
|
||||
master: true
|
||||
metrics:
|
||||
- status:
|
||||
usage: "GAUGE"
|
||||
description: Whether the migration applied successfully
|
||||
@ -1,3 +1,76 @@
|
||||
# In this file:
|
||||
#
|
||||
# | name | type | description |
|
||||
# | --------------------------------------- | ------- | -------------------------------------------------------- |
|
||||
# | pg_invalid_index_count{datname,relname} | COUNTER | Non-zero value used to tag existence of an invalid index |
|
||||
|
||||
pg_invalid_index:
|
||||
query: |
|
||||
SELECT
|
||||
current_database() AS datname,
|
||||
pc.relname AS relname,
|
||||
1 AS count
|
||||
FROM pg_class pc
|
||||
JOIN pg_index pi ON pi.indexrelid = pc.oid
|
||||
WHERE
|
||||
NOT indisvalid AND
|
||||
NOT EXISTS (SELECT 1 FROM pg_stat_progress_create_index pci WHERE pci.index_relid = pi.indexrelid)
|
||||
metrics:
|
||||
- datname:
|
||||
usage: "LABEL"
|
||||
description: "Name of current database"
|
||||
- relname:
|
||||
usage: "LABEL"
|
||||
description: "Name of the index"
|
||||
- count:
|
||||
usage: "COUNTER"
|
||||
description: "Non-zero value used to tag existence of an invalid index"
|
||||
|
||||
# In this file:
|
||||
#
|
||||
# | name | type | description |
|
||||
# | --------------------------------- ------ | ----- | -------------------------------------------------------------- |
|
||||
# | pg_table_size_bytes{datname,relname} | GAUGE | Total size of this table (including toast, index, toast index) |
|
||||
# | pg_table_size_indexsize{datname,relname} | GAUGE | Size of all related indexes |
|
||||
# | pg_table_size_relsize{datname,relname} | GAUGE | Size of this table itself (main, vm, fsm) |
|
||||
# | pg_table_size_toastsize{datname,relname} | GAUGE | Size of corresponding toast tables |
|
||||
#
|
||||
# Contents of this file are loosely based off of:
|
||||
# https://github.com/Vonng/pg_exporter/blob/f682b06630db8e4585aa52df150d0a653bbde07e/conf/810-pg_table_size.yaml
|
||||
|
||||
pg_table_size:
|
||||
query: |
|
||||
SELECT
|
||||
CURRENT_CATALOG AS datname,
|
||||
r.relname AS relname,
|
||||
pg_total_relation_size(r.oid) AS bytes,
|
||||
pg_relation_size(r.oid) AS relsize,
|
||||
pg_indexes_size(r.oid) AS indexsize,
|
||||
pg_total_relation_size(r.reltoastrelid) AS toastsize
|
||||
FROM pg_class r
|
||||
JOIN pg_namespace n ON n.oid = r.relnamespace
|
||||
WHERE
|
||||
r.relkind = 'r' AND n.nspname NOT IN ('pg_catalog', 'information_schema');
|
||||
metrics:
|
||||
- datname:
|
||||
usage: 'LABEL'
|
||||
description: 'Name of current database'
|
||||
- relname:
|
||||
usage: 'LABEL'
|
||||
description: 'Name of the table'
|
||||
- bytes:
|
||||
usage: 'GAUGE'
|
||||
description: 'Total size of this table (including toast, index, toast index)'
|
||||
- indexsize:
|
||||
usage: 'GAUGE'
|
||||
description: 'Size of all related indexes'
|
||||
- relsize:
|
||||
usage: 'GAUGE'
|
||||
description: 'Size of this table itself (main, vm, fsm)'
|
||||
- toastsize:
|
||||
usage: 'GAUGE'
|
||||
description: 'Size of corresponding toast tables'
|
||||
|
||||
# In this file:
|
||||
#
|
||||
# | name | type | description |
|
||||
@ -123,3 +196,31 @@ pg_table_bloat:
|
||||
- ratio:
|
||||
usage: 'GAUGE'
|
||||
description: 'Estimated bloat ratio of this table, 0~1'
|
||||
|
||||
# In this file:
|
||||
#
|
||||
# | name | type | description |
|
||||
# | ---------------------- | ------- | ------------------------------------------ |
|
||||
# | pg_sg_migration_status | GAUGE | Whether the migration applied successfully |
|
||||
# | | | This only applies to the frontend db. |
|
||||
|
||||
pg_sg_migration:
|
||||
query: |
|
||||
WITH ranked_migration_logs AS (
|
||||
SELECT
|
||||
migration_logs.*,
|
||||
ROW_NUMBER() OVER (PARTITION BY version ORDER BY finished_at DESC) AS row_number
|
||||
FROM migration_logs
|
||||
WHERE schema = 'schema_migrations'
|
||||
)
|
||||
SELECT EXISTS (
|
||||
SELECT 1
|
||||
FROM ranked_migration_logs
|
||||
WHERE row_number = 1
|
||||
AND NOT success
|
||||
)::int;
|
||||
master: true
|
||||
metrics:
|
||||
- status:
|
||||
usage: "GAUGE"
|
||||
description: Whether the migration applied successfully
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user