mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 18:11:48 +00:00
Apply Wolfi Cloud Instance Fixes (#51263)
This PR contains all the fixes and tweaks I've made during the process of getting the wolfi-test cloud instance working. There's a lot going on in this PR: Packages: * Build comby from source - the published binary isn't compatible with Arch/Wolfi's libpcre library path * Build cadvisor from source - may not be necessary, but done as a debugging step and gives us more arch flexibility * Package s3proxy * Update p4-fusion dependency SHA * Bump all packages to trigger a rebuild with latest version of melange. The old version didn't seem to pick up on dylibs properly * Fix opentelemetry-collector binary path Base images: * Fix cadvisor base image * Fix redis UID+GIDs * Add missing p4cli package to repo-updater image * Remove nodejs from server image (no longer required by code-intel) Other fixes: * Update build-wolfi.sh scripts to match the current Bazelified build.sh scripts * Fix `server` image builds * Fix zoekt-webserver CMD * Fix postgres locale + default config + add a patch for `unix_socket_directories` * Fix `symbols` go-build-wolfi scripts ## Things to do before merging - [ ] Validate the new postgres patch-conf.sh script with delivery - will do post-merge - [x] Remove all DO NOT MERGE comments ## Test plan <!-- All pull requests REQUIRE a test plan: https://docs.sourcegraph.com/dev/background-information/testing_principles --> - [x] Manual testing on wolfi-test cloud instance - [x] Manual testing on scaletesting instance
This commit is contained in:
parent
97910a6944
commit
b95719f528
@ -9,6 +9,18 @@ cleanup() {
|
||||
rm -rf "$OUTPUT"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //cmd/frontend
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/frontend --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/frontend/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
|
||||
@ -9,7 +9,7 @@ cleanup() {
|
||||
rm -rf "$OUTPUT"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
if [[ "$DOCKER_BAZEL" == "true" ]]; then
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //cmd/frontend
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/frontend --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
@ -11,13 +11,12 @@ cleanup() {
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
|
||||
bazel build //cmd/github-proxy \
|
||||
./dev/ci/bazel.sh build //cmd/github-proxy \
|
||||
--stamp \
|
||||
--workspace_status_command=./dev/bazel_stamp_vars.sh \
|
||||
--platforms=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
|
||||
out=$(bazel cquery //cmd/github-proxy --output=files)
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/github-proxy --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/github-proxy/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
|
||||
@ -13,12 +13,8 @@ cleanup() {
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
bazel build //cmd/gitserver \
|
||||
--stamp \
|
||||
--workspace_status_command=./dev/bazel_stamp_vars.sh \
|
||||
--platforms=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
|
||||
out=$(bazel cquery //cmd/gitserver --output=files)
|
||||
./dev/ci/bazel.sh build //cmd/gitserver
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/gitserver --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/gitserver/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
|
||||
@ -11,13 +11,8 @@ cleanup() {
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
|
||||
bazel build //cmd/loadtest \
|
||||
--stamp \
|
||||
--workspace_status_command=./dev/bazel_stamp_vars.sh \
|
||||
--platforms=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
|
||||
out=$(bazel cquery //cmd/loadtest --output=files)
|
||||
./dev/ci/bazel.sh build //cmd/loadtest
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/loadtest --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/loadtest/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
|
||||
@ -11,18 +11,6 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
export GOOS=linux
|
||||
export CGO_ENABLED=0
|
||||
|
||||
echo "--- go build"
|
||||
pkg=${1:-"github.com/sourcegraph/sourcegraph/cmd/migrator"}
|
||||
output="$OUTPUT/$(basename "$pkg")"
|
||||
# shellcheck disable=SC2153
|
||||
go build -trimpath -ldflags "-X github.com/sourcegraph/sourcegraph/internal/version.version=$VERSION -X github.com/sourcegraph/sourcegraph/internal/version.timestamp=$(date +%s)" -buildmode exe -tags dist -o "$output" "$pkg"
|
||||
|
||||
echo "--- compile schema descriptions"
|
||||
mkdir -p "${OUTPUT}/schema-descriptions"
|
||||
|
||||
@ -80,8 +68,8 @@ git_versions=(
|
||||
v4.3.0 v4.3.1
|
||||
v4.4.0 v4.4.1 v4.4.2
|
||||
v4.5.0 v4.5.1
|
||||
v5.0.0
|
||||
)
|
||||
v5.0.0 v5.0.1 v5.0.2 v5.0.3)
|
||||
|
||||
for version in "${git_versions[@]}"; do
|
||||
echo "Persisting schemas for ${version} from Git..."
|
||||
git show "${version}:internal/database/schema.json" >"${OUTPUT}/schema-descriptions/${version}-internal_database_schema.json"
|
||||
@ -94,7 +82,7 @@ if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/migrator --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/migrator/Dockerfile -t "$IMAGE" "$OUTPUT" \
|
||||
docker build -f cmd/migrator/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
@ -102,6 +90,18 @@ if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
export GOOS=linux
|
||||
export CGO_ENABLED=0
|
||||
|
||||
echo "--- go build"
|
||||
pkg=${1:-"github.com/sourcegraph/sourcegraph/cmd/migrator"}
|
||||
output="$OUTPUT/$(basename "$pkg")"
|
||||
# shellcheck disable=SC2153
|
||||
go build -trimpath -ldflags "-X github.com/sourcegraph/sourcegraph/internal/version.version=$VERSION -X github.com/sourcegraph/sourcegraph/internal/version.timestamp=$(date +%s)" -buildmode exe -tags dist -o "$output" "$pkg"
|
||||
|
||||
echo "--- docker build"
|
||||
docker build -f cmd/migrator/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
|
||||
@ -13,12 +13,8 @@ cleanup() {
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
bazel build //cmd/repo-updater \
|
||||
--stamp \
|
||||
--workspace_status_command=./dev/bazel_stamp_vars.sh \
|
||||
--platforms=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
|
||||
out=$(bazel cquery //cmd/repo-updater --output=files)
|
||||
./dev/ci/bazel.sh build //cmd/repo-updater
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/repo-updater --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/repo-updater/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
|
||||
@ -11,12 +11,8 @@ cleanup() {
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
bazel build //cmd/searcher \
|
||||
--stamp \
|
||||
--workspace_status_command=./dev/bazel_stamp_vars.sh \
|
||||
--platforms=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
|
||||
out=$(bazel cquery //cmd/searcher --output=files)
|
||||
./dev/ci/bazel.sh build //cmd/searcher
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/searcher --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/searcher/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
|
||||
@ -48,9 +48,9 @@ go_build() {
|
||||
local package="$1"
|
||||
|
||||
if [[ "${CI_DEBUG_PROFILE:-"false"}" == "true" ]]; then
|
||||
env time -v ./cmd/server/go-build.sh "$package"
|
||||
env time -v ./cmd/server/go-build-wolfi.sh "$package"
|
||||
else
|
||||
./cmd/server/go-build.sh "$package"
|
||||
./cmd/server/go-build-wolfi.sh "$package"
|
||||
fi
|
||||
}
|
||||
export -f go_build
|
||||
|
||||
20
cmd/server/go-build-wolfi.sh
Executable file
20
cmd/server/go-build-wolfi.sh
Executable file
@ -0,0 +1,20 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euxo pipefail
|
||||
|
||||
PACKAGE="$1"
|
||||
RELATIVE_PACKAGE="${PACKAGE#github.com/sourcegraph/sourcegraph/}"
|
||||
BASENAME="$(basename "$PACKAGE")"
|
||||
|
||||
if [[ "$BASENAME" != "server" ]] && [[ -f "$RELATIVE_PACKAGE/go-build-wolfi.sh" ]]; then
|
||||
# Application builds itself (e.g. requires CGO)
|
||||
bash "$RELATIVE_PACKAGE/go-build-wolfi.sh" "$BINDIR"
|
||||
else
|
||||
go build \
|
||||
-trimpath \
|
||||
-ldflags "-X github.com/sourcegraph/sourcegraph/internal/version.version=$VERSION -X github.com/sourcegraph/sourcegraph/internal/version.timestamp=$(date +%s)" \
|
||||
-buildmode exe \
|
||||
-installsuffix netgo \
|
||||
-tags "dist netgo" \
|
||||
-o "$BINDIR/$BASENAME" "$PACKAGE"
|
||||
fi
|
||||
0
cmd/symbols/go-build-wolfi.sh
Normal file → Executable file
0
cmd/symbols/go-build-wolfi.sh
Normal file → Executable file
@ -12,13 +12,8 @@ cleanup() {
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
|
||||
bazel build //cmd/worker \
|
||||
--stamp \
|
||||
--workspace_status_command=./dev/bazel_stamp_vars.sh \
|
||||
--platforms=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
|
||||
out=$(bazel cquery //cmd/worker --output=files)
|
||||
./dev/ci/bazel.sh build //cmd/worker
|
||||
out=$(./dev/ci/bazel.sh cquery //cmd/worker --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f cmd/worker/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
|
||||
@ -132,7 +132,7 @@ sg ci build wolfi
|
||||
Base pipeline (more steps might be included based on branch changes):
|
||||
|
||||
- **Metadata**: Pipeline metadata
|
||||
- **Wolfi image builds**: Build Wolfi-based batcheshelper, Build Wolfi-based embeddings, Build Wolfi-based executor-kubernetes, Build Wolfi-based frontend, Build Wolfi-based github-proxy, Build Wolfi-based gitserver, Build Wolfi-based llm-proxy, Build Wolfi-based loadtest, Build Wolfi-based migrator, Build Wolfi-based precise-code-intel-worker, Build Wolfi-based repo-updater, Build Wolfi-based searcher, Build Wolfi-based symbols, Build Wolfi-based worker, Build Wolfi-based blobstore, Build Wolfi-based cadvisor, Build Wolfi-based codeinsights-db, Build Wolfi-based codeintel-db, Build Wolfi-based indexed-searcher, Build Wolfi-based jaeger-agent, Build Wolfi-based jaeger-all-in-one, Build Wolfi-based node-exporter, Build Wolfi-based opentelemetry-collector, Build Wolfi-based postgres-12-alpine, Build Wolfi-based postgres_exporter, Build Wolfi-based prometheus, Build Wolfi-based prometheus-gcp, Build Wolfi-based redis-cache, Build Wolfi-based redis-store, Build Wolfi-based redis_exporter, Build Wolfi-based search-indexer, Build Wolfi-based sg, Build Wolfi-based syntax-highlighter
|
||||
- **Wolfi image builds**: Build Wolfi-based batcheshelper, Build Wolfi-based blobstore, Build Wolfi-based cadvisor, Build Wolfi-based codeinsights-db, Build Wolfi-based codeintel-db, Build Wolfi-based embeddings, Build Wolfi-based executor-kubernetes, Build Wolfi-based frontend, Build Wolfi-based github-proxy, Build Wolfi-based gitserver, Build Wolfi-based indexed-searcher, Build Wolfi-based jaeger-agent, Build Wolfi-based jaeger-all-in-one, Build Wolfi-based llm-proxy, Build Wolfi-based loadtest, Build Wolfi-based migrator, Build Wolfi-based node-exporter, Build Wolfi-based opentelemetry-collector, Build Wolfi-based postgres-12-alpine, Build Wolfi-based postgres_exporter, Build Wolfi-based precise-code-intel-worker, Build Wolfi-based prometheus, Build Wolfi-based prometheus-gcp, Build Wolfi-based redis-cache, Build Wolfi-based redis-store, Build Wolfi-based redis_exporter, Build Wolfi-based repo-updater, Build Wolfi-based search-indexer, Build Wolfi-based searcher, Build Wolfi-based server, Build Wolfi-based sg, Build Wolfi-based symbols, Build Wolfi-based syntax-highlighter, Build Wolfi-based worker
|
||||
|
||||
### Release branch nightly healthcheck build
|
||||
|
||||
|
||||
@ -1,17 +1,3 @@
|
||||
# Build s3proxy from source
|
||||
# hadolint ignore=DL3022
|
||||
FROM maven:3.8.6-openjdk-11-slim AS builder
|
||||
|
||||
# hadolint ignore=DL3008,DL3009
|
||||
RUN apt-get update && \
|
||||
apt-get install -y --no-install-recommends git
|
||||
|
||||
RUN git clone https://github.com/sourcegraph/s3proxy /build
|
||||
WORKDIR /build
|
||||
RUN mvn package -DskipTests && \
|
||||
mv target/ /opt/s3proxy && \
|
||||
cp src/main/resources/run-docker-container.sh /opt/s3proxy
|
||||
|
||||
# Build our final Wolfi-based image
|
||||
# hadolint ignore=DL3007
|
||||
FROM us.gcr.io/sourcegraph-dev/wolfi-blobstore-base:latest
|
||||
@ -25,8 +11,6 @@ LABEL org.opencontainers.image.created=${DATE}
|
||||
LABEL org.opencontainers.image.version=${VERSION}
|
||||
LABEL com.sourcegraph.github.url=https://github.com/sourcegraph/sourcegraph/commit/${COMMIT_SHA}
|
||||
|
||||
COPY --from=builder /opt/s3proxy /opt/s3proxy
|
||||
|
||||
ENV \
|
||||
LOG_LEVEL="info" \
|
||||
S3PROXY_AUTHORIZATION="none" \
|
||||
|
||||
@ -2,14 +2,8 @@
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
set -ex
|
||||
|
||||
# Enable image build caching via CACHE=true
|
||||
BUILD_CACHE="--no-cache"
|
||||
if [[ "$CACHE" == "true" ]]; then
|
||||
BUILD_CACHE=""
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
docker build ${BUILD_CACHE} -f Dockerfile.wolfi -t "${IMAGE:-"sourcegraph/blobstore"}" . \
|
||||
docker build -f Dockerfile.wolfi -t "${IMAGE:-"sourcegraph/blobstore"}" . \
|
||||
--platform linux/amd64 \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
# NOTE: Check the README before updating
|
||||
# hadolint ignore=DL3007
|
||||
FROM us.gcr.io/sourcegraph-dev/wolfi-sourcegraph-base:latest
|
||||
FROM us.gcr.io/sourcegraph-dev/wolfi-cadvisor-base:latest
|
||||
LABEL com.sourcegraph.cadvisor.version=v0.47.0
|
||||
|
||||
ARG COMMIT_SHA="unknown"
|
||||
@ -28,4 +28,6 @@ ENTRYPOINT ["/usr/bin/cadvisor", "-logtostderr", \
|
||||
"-housekeeping_interval=10s", \
|
||||
"-max_housekeeping_interval=15s", \
|
||||
"-event_storage_event_limit=default=0", \
|
||||
"-event_storage_age_limit=default=0"]
|
||||
"-v=3", \
|
||||
"-event_storage_age_limit=default=0", \
|
||||
"-containerd=/var/run/containerd/containerd.sock"]
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
set -ex
|
||||
|
||||
docker build --no-cache -f Dockerfile.wolfi -t "${IMAGE:-"sourcegraph/cadvisor"}" . \
|
||||
docker build -f Dockerfile.wolfi -t "${IMAGE:-"sourcegraph/cadvisor"}" . \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
|
||||
@ -30,4 +30,5 @@ COPY --from=zoekt_upstream /usr/local/bin/zoekt-webserver /usr/local/bin/
|
||||
ENV GOGC=25
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ["zoekt-webserver", "-index $DATA_DIR", "-pprof", "-rpc", "-indexserver_proxy"]
|
||||
# hadolint ignore=DL3025
|
||||
CMD zoekt-webserver -index $DATA_DIR -pprof -rpc -indexserver_proxy
|
||||
|
||||
@ -18,4 +18,4 @@ WORKDIR /otel-collector
|
||||
# Set up bundled configuration - see README
|
||||
COPY ./configs /etc/otel-collector/configs
|
||||
|
||||
ENTRYPOINT [ "/usr/bin/otelcol-sourcegraph" ]
|
||||
ENTRYPOINT [ "/bin/otelcol-sourcegraph" ]
|
||||
|
||||
@ -20,9 +20,23 @@ ARG POSTGRES_UID=999
|
||||
# and avoid issues with customers migrating.
|
||||
RUN addgroup -g $PING_UID ping &&\
|
||||
adduser -D -u $POSTGRES_UID postgres postgres &&\
|
||||
mkdir -p /data/pgdata-12 && chown -R postgres:postgres /data
|
||||
mkdir -p /data/pgdata-12 && chown -R postgres:postgres /data &&\
|
||||
mkdir -p /var/lib/postgresql && chown -R postgres:postgres /var/lib/postgresql &&\
|
||||
mkdir -p /var/run/postgresql && chown -R postgres:postgres /var/run/postgresql
|
||||
|
||||
COPY rootfs /
|
||||
# Overwrite default postgresql.conf.sample
|
||||
COPY config/postgresql.conf.sample /usr/share/postgresql/postgresql.conf.sample
|
||||
|
||||
# PGDATA: Set default Postgres data storage directory
|
||||
# LANG: Set locale used by Postgresql during initdb (defaults to LANG=C)
|
||||
# PGHOST: Set socket path for psql (defaults to /tmp)
|
||||
ENV POSTGRES_PASSWORD='' \
|
||||
POSTGRES_USER=sg \
|
||||
POSTGRES_DB=sg \
|
||||
PGDATA=/data/pgdata-12 \
|
||||
LANG=en_US.utf8 \
|
||||
PGHOST=/var/run/postgresql
|
||||
|
||||
USER postgres
|
||||
ENTRYPOINT ["/postgres-wolfi.sh"]
|
||||
|
||||
753
docker-images/postgres-12-alpine/config/postgresql.conf.sample
Normal file
753
docker-images/postgres-12-alpine/config/postgresql.conf.sample
Normal file
@ -0,0 +1,753 @@
|
||||
# -----------------------------
|
||||
# PostgreSQL configuration file
|
||||
# -----------------------------
|
||||
#
|
||||
# This file consists of lines of the form:
|
||||
#
|
||||
# name = value
|
||||
#
|
||||
# (The "=" is optional.) Whitespace may be used. Comments are introduced with
|
||||
# "#" anywhere on a line. The complete list of parameter names and allowed
|
||||
# values can be found in the PostgreSQL documentation.
|
||||
#
|
||||
# The commented-out settings shown in this file represent the default values.
|
||||
# Re-commenting a setting is NOT sufficient to revert it to the default value;
|
||||
# you need to reload the server.
|
||||
#
|
||||
# This file is read on server startup and when the server receives a SIGHUP
|
||||
# signal. If you edit the file on a running system, you have to SIGHUP the
|
||||
# server for the changes to take effect, run "pg_ctl reload", or execute
|
||||
# "SELECT pg_reload_conf()". Some parameters, which are marked below,
|
||||
# require a server shutdown and restart to take effect.
|
||||
#
|
||||
# Any parameter can also be given as a command-line option to the server, e.g.,
|
||||
# "postgres -c log_connections=on". Some parameters can be changed at run time
|
||||
# with the "SET" SQL command.
|
||||
#
|
||||
# Memory units: B = bytes Time units: us = microseconds
|
||||
# kB = kilobytes ms = milliseconds
|
||||
# MB = megabytes s = seconds
|
||||
# GB = gigabytes min = minutes
|
||||
# TB = terabytes h = hours
|
||||
# d = days
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# FILE LOCATIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# The default values of these variables are driven from the -D command-line
|
||||
# option or PGDATA environment variable, represented here as ConfigDir.
|
||||
|
||||
#data_directory = 'ConfigDir' # use data in another directory
|
||||
# (change requires restart)
|
||||
#hba_file = 'ConfigDir/pg_hba.conf' # host-based authentication file
|
||||
# (change requires restart)
|
||||
#ident_file = 'ConfigDir/pg_ident.conf' # ident configuration file
|
||||
# (change requires restart)
|
||||
|
||||
# If external_pid_file is not explicitly set, no extra PID file is written.
|
||||
#external_pid_file = '' # write an extra PID file
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONNECTIONS AND AUTHENTICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Connection Settings -
|
||||
|
||||
# Sourcegraph: Listen on all interfaces
|
||||
listen_addresses = '*' # what IP address(es) to listen on;
|
||||
# comma-separated list of addresses;
|
||||
# defaults to 'localhost'; use '*' for all
|
||||
# (change requires restart)
|
||||
#port = 5432 # (change requires restart)
|
||||
max_connections = 100 # (change requires restart)
|
||||
#superuser_reserved_connections = 3 # (change requires restart)
|
||||
unix_socket_directories = '/var/run/postgresql' # comma-separated list of directories
|
||||
# (change requires restart)
|
||||
#unix_socket_group = '' # (change requires restart)
|
||||
#unix_socket_permissions = 0777 # begin with 0 to use octal notation
|
||||
# (change requires restart)
|
||||
#bonjour = off # advertise server via Bonjour
|
||||
# (change requires restart)
|
||||
#bonjour_name = '' # defaults to the computer name
|
||||
# (change requires restart)
|
||||
|
||||
# - TCP settings -
|
||||
# see "man 7 tcp" for details
|
||||
|
||||
#tcp_keepalives_idle = 0 # TCP_KEEPIDLE, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_interval = 0 # TCP_KEEPINTVL, in seconds;
|
||||
# 0 selects the system default
|
||||
#tcp_keepalives_count = 0 # TCP_KEEPCNT;
|
||||
# 0 selects the system default
|
||||
#tcp_user_timeout = 0 # TCP_USER_TIMEOUT, in milliseconds;
|
||||
# 0 selects the system default
|
||||
|
||||
# - Authentication -
|
||||
|
||||
#authentication_timeout = 1min # 1s-600s
|
||||
#password_encryption = md5 # md5 or scram-sha-256
|
||||
#db_user_namespace = off
|
||||
|
||||
# GSSAPI using Kerberos
|
||||
#krb_server_keyfile = 'FILE:${sysconfdir}/krb5.keytab'
|
||||
#krb_caseins_users = off
|
||||
|
||||
# - SSL -
|
||||
|
||||
#ssl = off
|
||||
#ssl_ca_file = ''
|
||||
#ssl_cert_file = 'server.crt'
|
||||
#ssl_crl_file = ''
|
||||
#ssl_key_file = 'server.key'
|
||||
#ssl_ciphers = 'HIGH:MEDIUM:+3DES:!aNULL' # allowed SSL ciphers
|
||||
#ssl_prefer_server_ciphers = on
|
||||
#ssl_ecdh_curve = 'prime256v1'
|
||||
#ssl_min_protocol_version = 'TLSv1'
|
||||
#ssl_max_protocol_version = ''
|
||||
#ssl_dh_params_file = ''
|
||||
#ssl_passphrase_command = ''
|
||||
#ssl_passphrase_command_supports_reload = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# RESOURCE USAGE (except WAL)
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Memory -
|
||||
|
||||
shared_buffers = 128MB # min 128kB
|
||||
# (change requires restart)
|
||||
#huge_pages = try # on, off, or try
|
||||
# (change requires restart)
|
||||
#temp_buffers = 8MB # min 800kB
|
||||
#max_prepared_transactions = 0 # zero disables the feature
|
||||
# (change requires restart)
|
||||
# Caution: it is not advisable to set max_prepared_transactions nonzero unless
|
||||
# you actively intend to use prepared transactions.
|
||||
#work_mem = 4MB # min 64kB
|
||||
#maintenance_work_mem = 64MB # min 1MB
|
||||
#autovacuum_work_mem = -1 # min 1MB, or -1 to use maintenance_work_mem
|
||||
#max_stack_depth = 2MB # min 100kB
|
||||
#shared_memory_type = mmap # the default is the first option
|
||||
# supported by the operating system:
|
||||
# mmap
|
||||
# sysv
|
||||
# windows
|
||||
# (change requires restart)
|
||||
dynamic_shared_memory_type = posix # the default is the first option
|
||||
# supported by the operating system:
|
||||
# posix
|
||||
# sysv
|
||||
# windows
|
||||
# mmap
|
||||
# (change requires restart)
|
||||
|
||||
# - Disk -
|
||||
|
||||
#temp_file_limit = -1 # limits per-process temp file space
|
||||
# in kB, or -1 for no limit
|
||||
|
||||
# - Kernel Resources -
|
||||
|
||||
#max_files_per_process = 1000 # min 25
|
||||
# (change requires restart)
|
||||
|
||||
# - Cost-Based Vacuum Delay -
|
||||
|
||||
#vacuum_cost_delay = 0 # 0-100 milliseconds (0 disables)
|
||||
#vacuum_cost_page_hit = 1 # 0-10000 credits
|
||||
#vacuum_cost_page_miss = 10 # 0-10000 credits
|
||||
#vacuum_cost_page_dirty = 20 # 0-10000 credits
|
||||
#vacuum_cost_limit = 200 # 1-10000 credits
|
||||
|
||||
# - Background Writer -
|
||||
|
||||
#bgwriter_delay = 200ms # 10-10000ms between rounds
|
||||
#bgwriter_lru_maxpages = 100 # max buffers written/round, 0 disables
|
||||
#bgwriter_lru_multiplier = 2.0 # 0-10.0 multiplier on buffers scanned/round
|
||||
#bgwriter_flush_after = 512kB # measured in pages, 0 disables
|
||||
|
||||
# - Asynchronous Behavior -
|
||||
|
||||
#effective_io_concurrency = 1 # 1-1000; 0 disables prefetching
|
||||
#max_worker_processes = 8 # (change requires restart)
|
||||
#max_parallel_maintenance_workers = 2 # taken from max_parallel_workers
|
||||
#max_parallel_workers_per_gather = 2 # taken from max_parallel_workers
|
||||
#parallel_leader_participation = on
|
||||
#max_parallel_workers = 8 # maximum number of max_worker_processes that
|
||||
# can be used in parallel operations
|
||||
#old_snapshot_threshold = -1 # 1min-60d; -1 disables; 0 is immediate
|
||||
# (change requires restart)
|
||||
#backend_flush_after = 0 # measured in pages, 0 disables
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# WRITE-AHEAD LOG
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Settings -
|
||||
|
||||
#wal_level = replica # minimal, replica, or logical
|
||||
# (change requires restart)
|
||||
#fsync = on # flush data to disk for crash safety
|
||||
# (turning this off can cause
|
||||
# unrecoverable data corruption)
|
||||
#synchronous_commit = on # synchronization level;
|
||||
# off, local, remote_write, remote_apply, or on
|
||||
#wal_sync_method = fsync # the default is the first option
|
||||
# supported by the operating system:
|
||||
# open_datasync
|
||||
# fdatasync (default on Linux and FreeBSD)
|
||||
# fsync
|
||||
# fsync_writethrough
|
||||
# open_sync
|
||||
#full_page_writes = on # recover from partial page writes
|
||||
#wal_compression = off # enable compression of full-page writes
|
||||
#wal_log_hints = off # also do full page writes of non-critical updates
|
||||
# (change requires restart)
|
||||
#wal_init_zero = on # zero-fill new WAL files
|
||||
#wal_recycle = on # recycle WAL files
|
||||
#wal_buffers = -1 # min 32kB, -1 sets based on shared_buffers
|
||||
# (change requires restart)
|
||||
#wal_writer_delay = 200ms # 1-10000 milliseconds
|
||||
#wal_writer_flush_after = 1MB # measured in pages, 0 disables
|
||||
|
||||
#commit_delay = 0 # range 0-100000, in microseconds
|
||||
#commit_siblings = 5 # range 1-1000
|
||||
|
||||
# - Checkpoints -
|
||||
|
||||
#checkpoint_timeout = 5min # range 30s-1d
|
||||
max_wal_size = 1GB
|
||||
min_wal_size = 80MB
|
||||
#checkpoint_completion_target = 0.5 # checkpoint target duration, 0.0 - 1.0
|
||||
#checkpoint_flush_after = 256kB # measured in pages, 0 disables
|
||||
#checkpoint_warning = 30s # 0 disables
|
||||
|
||||
# - Archiving -
|
||||
|
||||
#archive_mode = off # enables archiving; off, on, or always
|
||||
# (change requires restart)
|
||||
#archive_command = '' # command to use to archive a logfile segment
|
||||
# placeholders: %p = path of file to archive
|
||||
# %f = file name only
|
||||
# e.g. 'test ! -f /mnt/server/archivedir/%f && cp %p /mnt/server/archivedir/%f'
|
||||
#archive_timeout = 0 # force a logfile segment switch after this
|
||||
# number of seconds; 0 disables
|
||||
|
||||
# - Archive Recovery -
|
||||
|
||||
# These are only used in recovery mode.
|
||||
|
||||
#restore_command = '' # command to use to restore an archived logfile segment
|
||||
# placeholders: %p = path of file to restore
|
||||
# %f = file name only
|
||||
# e.g. 'cp /mnt/server/archivedir/%f %p'
|
||||
# (change requires restart)
|
||||
#archive_cleanup_command = '' # command to execute at every restartpoint
|
||||
#recovery_end_command = '' # command to execute at completion of recovery
|
||||
|
||||
# - Recovery Target -
|
||||
|
||||
# Set these only when performing a targeted recovery.
|
||||
|
||||
#recovery_target = '' # 'immediate' to end recovery as soon as a
|
||||
# consistent state is reached
|
||||
# (change requires restart)
|
||||
#recovery_target_name = '' # the named restore point to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_time = '' # the time stamp up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_xid = '' # the transaction ID up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_lsn = '' # the WAL LSN up to which recovery will proceed
|
||||
# (change requires restart)
|
||||
#recovery_target_inclusive = on # Specifies whether to stop:
|
||||
# just after the specified recovery target (on)
|
||||
# just before the recovery target (off)
|
||||
# (change requires restart)
|
||||
#recovery_target_timeline = 'latest' # 'current', 'latest', or timeline ID
|
||||
# (change requires restart)
|
||||
#recovery_target_action = 'pause' # 'pause', 'promote', 'shutdown'
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPLICATION
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Sending Servers -
|
||||
|
||||
# Set these on the master and on any standby that will send replication data.
|
||||
|
||||
#max_wal_senders = 10 # max number of walsender processes
|
||||
# (change requires restart)
|
||||
#wal_keep_segments = 0 # in logfile segments; 0 disables
|
||||
#wal_sender_timeout = 60s # in milliseconds; 0 disables
|
||||
|
||||
#max_replication_slots = 10 # max number of replication slots
|
||||
# (change requires restart)
|
||||
#track_commit_timestamp = off # collect timestamp of transaction commit
|
||||
# (change requires restart)
|
||||
|
||||
# - Master Server -
|
||||
|
||||
# These settings are ignored on a standby server.
|
||||
|
||||
#synchronous_standby_names = '' # standby servers that provide sync rep
|
||||
# method to choose sync standbys, number of sync standbys,
|
||||
# and comma-separated list of application_name
|
||||
# from standby(s); '*' = all
|
||||
#vacuum_defer_cleanup_age = 0 # number of xacts by which cleanup is delayed
|
||||
|
||||
# - Standby Servers -
|
||||
|
||||
# These settings are ignored on a master server.
|
||||
|
||||
#primary_conninfo = '' # connection string to sending server
|
||||
# (change requires restart)
|
||||
#primary_slot_name = '' # replication slot on sending server
|
||||
# (change requires restart)
|
||||
#promote_trigger_file = '' # file name whose presence ends recovery
|
||||
#hot_standby = on # "off" disallows queries during recovery
|
||||
# (change requires restart)
|
||||
#max_standby_archive_delay = 30s # max delay before canceling queries
|
||||
# when reading WAL from archive;
|
||||
# -1 allows indefinite delay
|
||||
#max_standby_streaming_delay = 30s # max delay before canceling queries
|
||||
# when reading streaming WAL;
|
||||
# -1 allows indefinite delay
|
||||
#wal_receiver_status_interval = 10s # send replies at least this often
|
||||
# 0 disables
|
||||
#hot_standby_feedback = off # send info from standby to prevent
|
||||
# query conflicts
|
||||
#wal_receiver_timeout = 60s # time that receiver waits for
|
||||
# communication from master
|
||||
# in milliseconds; 0 disables
|
||||
#wal_retrieve_retry_interval = 5s # time to wait before retrying to
|
||||
# retrieve WAL after a failed attempt
|
||||
#recovery_min_apply_delay = 0 # minimum delay for applying changes during recovery
|
||||
|
||||
# - Subscribers -
|
||||
|
||||
# These settings are ignored on a publisher.
|
||||
|
||||
#max_logical_replication_workers = 4 # taken from max_worker_processes
|
||||
# (change requires restart)
|
||||
#max_sync_workers_per_subscription = 2 # taken from max_logical_replication_workers
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# QUERY TUNING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Planner Method Configuration -
|
||||
|
||||
#enable_bitmapscan = on
|
||||
#enable_hashagg = on
|
||||
#enable_hashjoin = on
|
||||
#enable_indexscan = on
|
||||
#enable_indexonlyscan = on
|
||||
#enable_material = on
|
||||
#enable_mergejoin = on
|
||||
#enable_nestloop = on
|
||||
#enable_parallel_append = on
|
||||
#enable_seqscan = on
|
||||
#enable_sort = on
|
||||
#enable_tidscan = on
|
||||
#enable_partitionwise_join = off
|
||||
#enable_partitionwise_aggregate = off
|
||||
#enable_parallel_hash = on
|
||||
#enable_partition_pruning = on
|
||||
|
||||
# - Planner Cost Constants -
|
||||
|
||||
#seq_page_cost = 1.0 # measured on an arbitrary scale
|
||||
#random_page_cost = 4.0 # same scale as above
|
||||
#cpu_tuple_cost = 0.01 # same scale as above
|
||||
#cpu_index_tuple_cost = 0.005 # same scale as above
|
||||
#cpu_operator_cost = 0.0025 # same scale as above
|
||||
#parallel_tuple_cost = 0.1 # same scale as above
|
||||
#parallel_setup_cost = 1000.0 # same scale as above
|
||||
|
||||
#jit_above_cost = 100000 # perform JIT compilation if available
|
||||
# and query more expensive than this;
|
||||
# -1 disables
|
||||
#jit_inline_above_cost = 500000 # inline small functions if query is
|
||||
# more expensive than this; -1 disables
|
||||
#jit_optimize_above_cost = 500000 # use expensive JIT optimizations if
|
||||
# query is more expensive than this;
|
||||
# -1 disables
|
||||
|
||||
#min_parallel_table_scan_size = 8MB
|
||||
#min_parallel_index_scan_size = 512kB
|
||||
#effective_cache_size = 4GB
|
||||
|
||||
# - Genetic Query Optimizer -
|
||||
|
||||
#geqo = on
|
||||
#geqo_threshold = 12
|
||||
#geqo_effort = 5 # range 1-10
|
||||
#geqo_pool_size = 0 # selects default based on effort
|
||||
#geqo_generations = 0 # selects default based on effort
|
||||
#geqo_selection_bias = 2.0 # range 1.5-2.0
|
||||
#geqo_seed = 0.0 # range 0.0-1.0
|
||||
|
||||
# - Other Planner Options -
|
||||
|
||||
#default_statistics_target = 100 # range 1-10000
|
||||
#constraint_exclusion = partition # on, off, or partition
|
||||
#cursor_tuple_fraction = 0.1 # range 0.0-1.0
|
||||
#from_collapse_limit = 8
|
||||
#join_collapse_limit = 8 # 1 disables collapsing of explicit
|
||||
# JOIN clauses
|
||||
#force_parallel_mode = off
|
||||
#jit = on # allow JIT compilation
|
||||
#plan_cache_mode = auto # auto, force_generic_plan or
|
||||
# force_custom_plan
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# REPORTING AND LOGGING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Where to Log -
|
||||
|
||||
#log_destination = 'stderr' # Valid values are combinations of
|
||||
# stderr, csvlog, syslog, and eventlog,
|
||||
# depending on platform. csvlog
|
||||
# requires logging_collector to be on.
|
||||
|
||||
# This is used when logging to stderr:
|
||||
#logging_collector = off # Enable capturing of stderr and csvlog
|
||||
# into log files. Required to be on for
|
||||
# csvlogs.
|
||||
# (change requires restart)
|
||||
|
||||
# These are only used if logging_collector is on:
|
||||
#log_directory = 'log' # directory where log files are written,
|
||||
# can be absolute or relative to PGDATA
|
||||
#log_filename = 'postgresql-%Y-%m-%d_%H%M%S.log' # log file name pattern,
|
||||
# can include strftime() escapes
|
||||
#log_file_mode = 0600 # creation mode for log files,
|
||||
# begin with 0 to use octal notation
|
||||
#log_truncate_on_rotation = off # If on, an existing log file with the
|
||||
# same name as the new log file will be
|
||||
# truncated rather than appended to.
|
||||
# But such truncation only occurs on
|
||||
# time-driven rotation, not on restarts
|
||||
# or size-driven rotation. Default is
|
||||
# off, meaning append to existing files
|
||||
# in all cases.
|
||||
#log_rotation_age = 1d # Automatic rotation of logfiles will
|
||||
# happen after that time. 0 disables.
|
||||
#log_rotation_size = 10MB # Automatic rotation of logfiles will
|
||||
# happen after that much log output.
|
||||
# 0 disables.
|
||||
|
||||
# These are relevant when logging to syslog:
|
||||
#syslog_facility = 'LOCAL0'
|
||||
#syslog_ident = 'postgres'
|
||||
#syslog_sequence_numbers = on
|
||||
#syslog_split_messages = on
|
||||
|
||||
# This is only relevant when logging to eventlog (win32):
|
||||
# (change requires restart)
|
||||
#event_source = 'PostgreSQL'
|
||||
|
||||
# - When to Log -
|
||||
|
||||
#log_min_messages = warning # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic
|
||||
|
||||
#log_min_error_statement = error # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# info
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
# log
|
||||
# fatal
|
||||
# panic (effectively off)
|
||||
|
||||
#log_min_duration_statement = -1 # -1 is disabled, 0 logs all statements
|
||||
# and their durations, > 0 logs only
|
||||
# statements running at least this number
|
||||
# of milliseconds
|
||||
|
||||
#log_transaction_sample_rate = 0.0 # Fraction of transactions whose statements
|
||||
# are logged regardless of their duration. 1.0 logs all
|
||||
# statements from all transactions, 0.0 never logs.
|
||||
|
||||
# - What to Log -
|
||||
|
||||
#debug_print_parse = off
|
||||
#debug_print_rewritten = off
|
||||
#debug_print_plan = off
|
||||
#debug_pretty_print = on
|
||||
#log_checkpoints = off
|
||||
#log_connections = off
|
||||
#log_disconnections = off
|
||||
#log_duration = off
|
||||
#log_error_verbosity = default # terse, default, or verbose messages
|
||||
#log_hostname = off
|
||||
#log_line_prefix = '%m [%p] ' # special values:
|
||||
# %a = application name
|
||||
# %u = user name
|
||||
# %d = database name
|
||||
# %r = remote host and port
|
||||
# %h = remote host
|
||||
# %p = process ID
|
||||
# %t = timestamp without milliseconds
|
||||
# %m = timestamp with milliseconds
|
||||
# %n = timestamp with milliseconds (as a Unix epoch)
|
||||
# %i = command tag
|
||||
# %e = SQL state
|
||||
# %c = session ID
|
||||
# %l = session line number
|
||||
# %s = session start timestamp
|
||||
# %v = virtual transaction ID
|
||||
# %x = transaction ID (0 if none)
|
||||
# %q = stop here in non-session
|
||||
# processes
|
||||
# %% = '%'
|
||||
# e.g. '<%u%%%d> '
|
||||
#log_lock_waits = off # log lock waits >= deadlock_timeout
|
||||
#log_statement = 'none' # none, ddl, mod, all
|
||||
#log_replication_commands = off
|
||||
#log_temp_files = -1 # log temporary files equal or larger
|
||||
# than the specified size in kilobytes;
|
||||
# -1 disables, 0 logs all temp files
|
||||
log_timezone = 'UTC'
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# PROCESS TITLE
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#cluster_name = '' # added to process titles if nonempty
|
||||
# (change requires restart)
|
||||
#update_process_title = on
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# STATISTICS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Query and Index Statistics Collector -
|
||||
|
||||
#track_activities = on
|
||||
#track_counts = on
|
||||
#track_io_timing = off
|
||||
#track_functions = none # none, pl, all
|
||||
#track_activity_query_size = 1024 # (change requires restart)
|
||||
#stats_temp_directory = 'pg_stat_tmp'
|
||||
|
||||
|
||||
# - Monitoring -
|
||||
|
||||
#log_parser_stats = off
|
||||
#log_planner_stats = off
|
||||
#log_executor_stats = off
|
||||
#log_statement_stats = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# AUTOVACUUM
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#autovacuum = on # Enable autovacuum subprocess? 'on'
|
||||
# requires track_counts to also be on.
|
||||
#log_autovacuum_min_duration = -1 # -1 disables, 0 logs all actions and
|
||||
# their durations, > 0 logs only
|
||||
# actions running at least this number
|
||||
# of milliseconds.
|
||||
#autovacuum_max_workers = 3 # max number of autovacuum subprocesses
|
||||
# (change requires restart)
|
||||
#autovacuum_naptime = 1min # time between autovacuum runs
|
||||
#autovacuum_vacuum_threshold = 50 # min number of row updates before
|
||||
# vacuum
|
||||
#autovacuum_analyze_threshold = 50 # min number of row updates before
|
||||
# analyze
|
||||
#autovacuum_vacuum_scale_factor = 0.2 # fraction of table size before vacuum
|
||||
#autovacuum_analyze_scale_factor = 0.1 # fraction of table size before analyze
|
||||
#autovacuum_freeze_max_age = 200000000 # maximum XID age before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_multixact_freeze_max_age = 400000000 # maximum multixact age
|
||||
# before forced vacuum
|
||||
# (change requires restart)
|
||||
#autovacuum_vacuum_cost_delay = 2ms # default vacuum cost delay for
|
||||
# autovacuum, in milliseconds;
|
||||
# -1 means use vacuum_cost_delay
|
||||
#autovacuum_vacuum_cost_limit = -1 # default vacuum cost limit for
|
||||
# autovacuum, -1 means use
|
||||
# vacuum_cost_limit
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CLIENT CONNECTION DEFAULTS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Statement Behavior -
|
||||
|
||||
#client_min_messages = notice # values in order of decreasing detail:
|
||||
# debug5
|
||||
# debug4
|
||||
# debug3
|
||||
# debug2
|
||||
# debug1
|
||||
# log
|
||||
# notice
|
||||
# warning
|
||||
# error
|
||||
#search_path = '"$user", public' # schema names
|
||||
#row_security = on
|
||||
#default_tablespace = '' # a tablespace name, '' uses the default
|
||||
#temp_tablespaces = '' # a list of tablespace names, '' uses
|
||||
# only default tablespace
|
||||
#default_table_access_method = 'heap'
|
||||
#check_function_bodies = on
|
||||
#default_transaction_isolation = 'read committed'
|
||||
#default_transaction_read_only = off
|
||||
#default_transaction_deferrable = off
|
||||
#session_replication_role = 'origin'
|
||||
#statement_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#lock_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#idle_in_transaction_session_timeout = 0 # in milliseconds, 0 is disabled
|
||||
#vacuum_freeze_min_age = 50000000
|
||||
#vacuum_freeze_table_age = 150000000
|
||||
#vacuum_multixact_freeze_min_age = 5000000
|
||||
#vacuum_multixact_freeze_table_age = 150000000
|
||||
#vacuum_cleanup_index_scale_factor = 0.1 # fraction of total number of tuples
|
||||
# before index cleanup, 0 always performs
|
||||
# index cleanup
|
||||
#bytea_output = 'hex' # hex, escape
|
||||
#xmlbinary = 'base64'
|
||||
#xmloption = 'content'
|
||||
#gin_fuzzy_search_limit = 0
|
||||
#gin_pending_list_limit = 4MB
|
||||
|
||||
# - Locale and Formatting -
|
||||
|
||||
datestyle = 'iso, mdy'
|
||||
#intervalstyle = 'postgres'
|
||||
timezone = 'UTC'
|
||||
#timezone_abbreviations = 'Default' # Select the set of available time zone
|
||||
# abbreviations. Currently, there are
|
||||
# Default
|
||||
# Australia (historical usage)
|
||||
# India
|
||||
# You can create your own file in
|
||||
# share/timezonesets/.
|
||||
#extra_float_digits = 1 # min -15, max 3; any value >0 actually
|
||||
# selects precise output mode
|
||||
#client_encoding = sql_ascii # actually, defaults to database
|
||||
# encoding
|
||||
|
||||
# Sourcegraph: Use 'en_US.utf8' locale
|
||||
# These settings are initialized by initdb, but they can be changed.
|
||||
lc_messages = 'en_US.utf8' # locale for system error message
|
||||
# strings
|
||||
lc_monetary = 'en_US.utf8' # locale for monetary formatting
|
||||
lc_numeric = 'en_US.utf8' # locale for number formatting
|
||||
lc_time = 'en_US.utf8' # locale for time formatting
|
||||
|
||||
# default configuration for text search
|
||||
default_text_search_config = 'pg_catalog.english'
|
||||
|
||||
# - Shared Library Preloading -
|
||||
|
||||
#shared_preload_libraries = '' # (change requires restart)
|
||||
#local_preload_libraries = ''
|
||||
#session_preload_libraries = ''
|
||||
#jit_provider = 'llvmjit' # JIT library to use
|
||||
|
||||
# - Other Defaults -
|
||||
|
||||
#dynamic_library_path = '$libdir'
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# LOCK MANAGEMENT
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#deadlock_timeout = 1s
|
||||
#max_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_transaction = 64 # min 10
|
||||
# (change requires restart)
|
||||
#max_pred_locks_per_relation = -2 # negative values mean
|
||||
# (max_pred_locks_per_transaction
|
||||
# / -max_pred_locks_per_relation) - 1
|
||||
#max_pred_locks_per_page = 2 # min 0
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# VERSION AND PLATFORM COMPATIBILITY
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# - Previous PostgreSQL Versions -
|
||||
|
||||
#array_nulls = on
|
||||
#backslash_quote = safe_encoding # on, off, or safe_encoding
|
||||
#escape_string_warning = on
|
||||
#lo_compat_privileges = off
|
||||
#operator_precedence_warning = off
|
||||
#quote_all_identifiers = off
|
||||
#standard_conforming_strings = on
|
||||
#synchronize_seqscans = on
|
||||
|
||||
# - Other Platforms and Clients -
|
||||
|
||||
#transform_null_equals = off
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# ERROR HANDLING
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
#exit_on_error = off # terminate session on any error?
|
||||
#restart_after_crash = on # reinitialize after backend crash?
|
||||
#data_sync_retry = off # retry or panic on failure to fsync
|
||||
# data?
|
||||
# (change requires restart)
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CONFIG FILE INCLUDES
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# These options allow settings to be loaded from files other than the
|
||||
# default postgresql.conf. Note that these are directives, not variable
|
||||
# assignments, so they can usefully be given more than once.
|
||||
|
||||
#include_dir = '...' # include files ending in '.conf' from
|
||||
# a directory, e.g., 'conf.d'
|
||||
#include_if_exists = '...' # include file only if it exists
|
||||
#include = '...' # include file
|
||||
|
||||
|
||||
#------------------------------------------------------------------------------
|
||||
# CUSTOMIZED OPTIONS
|
||||
#------------------------------------------------------------------------------
|
||||
|
||||
# Add settings for extensions here
|
||||
18
docker-images/postgres-12-alpine/rootfs/patch-conf.sh
Executable file
18
docker-images/postgres-12-alpine/rootfs/patch-conf.sh
Executable file
@ -0,0 +1,18 @@
|
||||
#!/bin/bash
|
||||
|
||||
# In Wolfi, unix_socket_directories defaults to /tmp. In previous Alpine images, this defaulted to /var/run/postgres.
|
||||
# /tmp may not be writable, so any existing postgresql.conf configs that predate the Wolfi migration should be patched to update this setting.
|
||||
|
||||
CONFIG_DIR=${PGDATA:-/data/pgdata-12}
|
||||
|
||||
conf_file="$CONFIG_DIR/postgresql.conf"
|
||||
new_socket_dir="/var/run/postgresql"
|
||||
|
||||
# Check if the parameter already exists in the file
|
||||
if grep -q "^\s*unix_socket_directories" "$conf_file"; then
|
||||
echo "unix_socket_directories already exists in $conf_file"
|
||||
else
|
||||
# Append the setting to the end of the file
|
||||
echo "unix_socket_directories = '$new_socket_dir'" >>"$conf_file"
|
||||
echo "Updated unix_socket_directories in $conf_file"
|
||||
fi
|
||||
@ -21,6 +21,7 @@ if [ ! -s "$PGDATA/PG_VERSION" ]; then
|
||||
fi
|
||||
|
||||
/conf.sh
|
||||
/patch-conf.sh
|
||||
|
||||
if [ ! -s "${REINDEX_COMPLETED_FILE}" ]; then
|
||||
echo "[INFO] Re-creating all indexes for database '$POSTGRES_DB'"
|
||||
|
||||
@ -6,7 +6,7 @@ cd "$(dirname "${BASH_SOURCE[0]}")"
|
||||
export BASE_IMAGE="gke.gcr.io/prometheus-engine/prometheus:v2.35.0-gmp.2-gke.0"
|
||||
export IMAGE="${IMAGE:-sourcegraph/prometheus-gcp}"
|
||||
|
||||
if [[ "$DOCKER_BAZEL" == "true" ]]; then
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
../prometheus/build-bazel.sh
|
||||
else
|
||||
../prometheus/build.sh
|
||||
|
||||
@ -44,14 +44,7 @@ pushd "$BUILDDIR"
|
||||
# https://github.com/sourcegraph/sourcegraph/pull/11832#discussion_r451109637
|
||||
chmod -R 777 config
|
||||
|
||||
# Enable image build caching via CACHE=true
|
||||
BUILD_CACHE="--no-cache"
|
||||
if [[ "$CACHE" == "true" ]]; then
|
||||
BUILD_CACHE=""
|
||||
fi
|
||||
|
||||
# shellcheck disable=SC2086
|
||||
docker build ${BUILD_CACHE} -f Dockerfile.wolfi -t "${IMAGE:-sourcegraph/prometheus}" . \
|
||||
docker build -f Dockerfile.wolfi -t "${IMAGE:-sourcegraph/prometheus}" . \
|
||||
--progress=plain \
|
||||
--build-arg BASE_IMAGE \
|
||||
--build-arg COMMIT_SHA \
|
||||
|
||||
@ -10,6 +10,24 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //dev/sg
|
||||
|
||||
out=$(./dev/ci/bazel.sh cquery //dev/sg --output=files)
|
||||
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
echo "--- docker build $IMAGE"
|
||||
# TODO: Move to dev/sg/Dockerfile
|
||||
docker build -f docker-images/sg/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
@ -21,6 +39,7 @@ pkg="github.com/sourcegraph/sourcegraph/dev/sg"
|
||||
go build -trimpath -ldflags "-X main.BuildCommit=$BUILD_COMMIT" -o "$OUTPUT/sg" -buildmode exe "$pkg"
|
||||
|
||||
echo "--- docker build $IMAGE"
|
||||
# TODO: Move to dev/sg/Dockerfile
|
||||
docker build -f docker-images/sg/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
|
||||
@ -10,7 +10,7 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "$DOCKER_BAZEL" == "true" ]]; then
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //dev/sg
|
||||
|
||||
out=$(./dev/ci/bazel.sh cquery //dev/sg --output=files)
|
||||
|
||||
@ -15,6 +15,19 @@ export GOARCH=amd64
|
||||
export GOOS=linux
|
||||
export CGO_ENABLED=0
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //enterprise/cmd/batcheshelper
|
||||
out=$(./dev/ci/bazel.sh cquery //enterprise/cmd/batcheshelper --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f enterprise/cmd/batcheshelper/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
exit $?
|
||||
fi
|
||||
|
||||
pkg="github.com/sourcegraph/sourcegraph/enterprise/cmd/batcheshelper"
|
||||
go build -trimpath -ldflags "-X github.com/sourcegraph/sourcegraph/internal/version.version=$VERSION -X github.com/sourcegraph/sourcegraph/internal/version.timestamp=$(date +%s)" -buildmode exe -tags dist -o "$OUTPUT/$(basename $pkg)" "$pkg"
|
||||
|
||||
|
||||
@ -10,6 +10,20 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
|
||||
./dev/ci/bazel.sh build //enterprise/cmd/embeddings
|
||||
out=$(./dev/ci/bazel.sh cquery //enterprise/cmd/embeddings --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f enterprise/cmd/embeddings/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
|
||||
@ -38,4 +38,3 @@ docker build -f enterprise/cmd/embeddings/Dockerfile -t "$IMAGE" "$OUTPUT" \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
∏
|
||||
|
||||
@ -10,6 +10,20 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //enterprise/cmd/executor
|
||||
out=$(./dev/ci/bazel.sh cquery //enterprise/cmd/executor --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f enterprise/cmd/executor-kubernetes/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
|
||||
@ -12,12 +12,8 @@ cleanup() {
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
bazel build //enterprise/cmd/frontend \
|
||||
--stamp \
|
||||
--workspace_status_command=./dev/bazel_stamp_vars.sh \
|
||||
--platforms=@io_bazel_rules_go//go/toolchain:linux_amd64
|
||||
|
||||
out=$(bazel cquery //enterprise/cmd/frontend --output=files)
|
||||
./dev/ci/bazel.sh build //enterprise/cmd/frontend
|
||||
out=$(./dev/ci/bazel.sh cquery //enterprise/cmd/frontend --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f enterprise/cmd/frontend/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
|
||||
@ -12,6 +12,19 @@ cleanup() {
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //enterprise/cmd/gitserver
|
||||
out=$(./dev/ci/bazel.sh cquery //enterprise/cmd/gitserver --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f enterprise/cmd/gitserver/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
|
||||
@ -11,6 +11,19 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //enterprise/cmd/precise-code-intel-worker
|
||||
out=$(./dev/ci/bazel.sh cquery //enterprise/cmd/precise-code-intel-worker --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f enterprise/cmd/precise-code-intel-worker/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
|
||||
8
enterprise/cmd/symbols/go-build-wolfi.sh
Executable file
8
enterprise/cmd/symbols/go-build-wolfi.sh
Executable file
@ -0,0 +1,8 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../.."
|
||||
set -eu
|
||||
|
||||
env \
|
||||
PKG=github.com/sourcegraph/sourcegraph/enterprise/cmd/symbols \
|
||||
cmd/symbols/go-build-wolfi.sh "$@"
|
||||
@ -11,6 +11,20 @@ cleanup() {
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
if [[ "${DOCKER_BAZEL:-false}" == "true" ]]; then
|
||||
./dev/ci/bazel.sh build //enterprise/cmd/worker
|
||||
|
||||
out=$(./dev/ci/bazel.sh cquery //enterprise/cmd/worker --output=files)
|
||||
cp "$out" "$OUTPUT"
|
||||
|
||||
docker build -f enterprise/cmd/worker/Dockerfile.wolfi -t "$IMAGE" "$OUTPUT" \
|
||||
--progress=plain \
|
||||
--build-arg COMMIT_SHA \
|
||||
--build-arg DATE \
|
||||
--build-arg VERSION
|
||||
exit $?
|
||||
fi
|
||||
|
||||
# Environment for building linux binaries
|
||||
export GO111MODULE=on
|
||||
export GOARCH=amd64
|
||||
|
||||
@ -126,39 +126,39 @@ func GeneratePipeline(c Config) (*bk.Pipeline, error) {
|
||||
// TODO: Just hardcode specific images initially
|
||||
WolfiImagesOperations([]string{
|
||||
"batcheshelper",
|
||||
"blobstore",
|
||||
"cadvisor",
|
||||
"codeinsights-db",
|
||||
"codeintel-db",
|
||||
"embeddings",
|
||||
"executor-kubernetes",
|
||||
"frontend",
|
||||
"github-proxy",
|
||||
"gitserver",
|
||||
"llm-proxy",
|
||||
"loadtest",
|
||||
"migrator",
|
||||
"precise-code-intel-worker",
|
||||
"repo-updater",
|
||||
"searcher",
|
||||
// "server",
|
||||
"symbols",
|
||||
"worker",
|
||||
"blobstore",
|
||||
"cadvisor",
|
||||
"codeinsights-db",
|
||||
"codeintel-db",
|
||||
"indexed-searcher",
|
||||
"jaeger-agent",
|
||||
"jaeger-all-in-one",
|
||||
"llm-proxy",
|
||||
"loadtest",
|
||||
"migrator",
|
||||
"node-exporter",
|
||||
"opentelemetry-collector",
|
||||
"postgres-12-alpine",
|
||||
"postgres_exporter",
|
||||
"precise-code-intel-worker",
|
||||
"prometheus",
|
||||
"prometheus-gcp",
|
||||
"redis-cache",
|
||||
"redis-store",
|
||||
"redis_exporter",
|
||||
"repo-updater",
|
||||
"search-indexer",
|
||||
"searcher",
|
||||
"server",
|
||||
"sg",
|
||||
"symbols",
|
||||
"syntax-highlighter",
|
||||
"worker",
|
||||
}, c.Version,
|
||||
c.candidateImageTag(),
|
||||
(numUpdatedBaseImages > 0),
|
||||
|
||||
@ -49,4 +49,5 @@ tar zcf APKINDEX.tar.gz APKINDEX DESCRIPTION
|
||||
melange sign-index --signing-key "$key_path/melange.rsa" APKINDEX.tar.gz
|
||||
|
||||
# Upload signed APKINDEX archive
|
||||
gsutil -u "$GCP_PROJECT" cp APKINDEX.tar.gz "gs://$GCS_BUCKET/packages/$branch/$TARGET_ARCH/"
|
||||
# Use no-cache to avoid index/packages getting out of sync
|
||||
gsutil -u "$GCP_PROJECT" -h "Cache-Control:no-cache" cp APKINDEX.tar.gz "gs://$GCS_BUCKET/packages/$branch/$TARGET_ARCH/"
|
||||
|
||||
@ -45,6 +45,7 @@ for apk in "${apks[@]}"; do
|
||||
fi
|
||||
|
||||
# TODO: Pass -n when on main to avoid accidental overwriting
|
||||
# no-cache to avoid index/packages getting out of sync
|
||||
echo " * Uploading package and index fragment to repo"
|
||||
gsutil -u "$GCP_PROJECT" cp "$apk" "$index_fragment" "$dest_path"
|
||||
gsutil -u "$GCP_PROJECT" -h "Cache-Control:no-cache" cp "$apk" "$index_fragment" "$dest_path"
|
||||
done
|
||||
|
||||
@ -1,8 +1,10 @@
|
||||
contents:
|
||||
keyring:
|
||||
- https://packages.wolfi.dev/os/wolfi-signing.rsa.pub
|
||||
- https://storage.googleapis.com/package-repository/packages/melange.rsa.pub
|
||||
repositories:
|
||||
- https://packages.wolfi.dev/os
|
||||
- '@sourcegraph https://storage.googleapis.com/package-repository/packages/main'
|
||||
packages:
|
||||
## Base set of packages included in sourcegraph/alpine base image
|
||||
- wolfi-baselayout
|
||||
@ -17,7 +19,7 @@ contents:
|
||||
- bind-tools
|
||||
|
||||
## blobstore packages
|
||||
- openjdk-11
|
||||
- s3proxy@sourcegraph
|
||||
|
||||
accounts:
|
||||
groups:
|
||||
|
||||
@ -39,3 +39,5 @@ annotations:
|
||||
org.opencontainers.image.url: https://sourcegraph.com/
|
||||
org.opencontainers.image.source: https://github.com/sourcegraph/sourcegraph/
|
||||
org.opencontainers.image.documentation: https://docs.sourcegraph.com/
|
||||
|
||||
# MANUAL REBUILD: Tue May 17 15:17:00 BST 2023
|
||||
|
||||
@ -41,3 +41,5 @@ annotations:
|
||||
org.opencontainers.image.url: https://sourcegraph.com/
|
||||
org.opencontainers.image.source: https://github.com/sourcegraph/sourcegraph/
|
||||
org.opencontainers.image.documentation: https://docs.sourcegraph.com/
|
||||
|
||||
# MANUAL REBUILD: Fri Apr 28 17:05:52 BST 2023
|
||||
|
||||
@ -13,6 +13,7 @@ contents:
|
||||
- postgresql-12-oci-entrypoint
|
||||
- postgresql-12-contrib
|
||||
- libpq-12
|
||||
- glibc-locale-en
|
||||
|
||||
# TODO: Currently missing shadow package which would let us modify users and groups in the Dockerfile
|
||||
# In the future, we can use accounts: and paths: directives to do that in this image
|
||||
|
||||
@ -11,17 +11,18 @@ contents:
|
||||
accounts:
|
||||
groups:
|
||||
- groupname: redis
|
||||
gid: 65532
|
||||
gid: 1000
|
||||
users:
|
||||
- username: redis
|
||||
uid: 65532
|
||||
uid: 999
|
||||
gid: 1000
|
||||
run-as: redis
|
||||
|
||||
paths:
|
||||
- path: /redis-data
|
||||
type: directory
|
||||
uid: 65532
|
||||
gid: 65532
|
||||
uid: 999
|
||||
gid: 1000
|
||||
permissions: 0o755
|
||||
|
||||
work-dir:
|
||||
|
||||
@ -20,6 +20,7 @@ contents:
|
||||
|
||||
## repo-updater packages
|
||||
- coursier@sourcegraph
|
||||
- p4cli@sourcegraph
|
||||
|
||||
accounts:
|
||||
groups:
|
||||
|
||||
@ -31,7 +31,6 @@ contents:
|
||||
- libev
|
||||
- libstdc++ # TODO: Is this still required?
|
||||
- nginx
|
||||
- nodejs-16 # TODO: Earliest version from Wolfi; test upgrade from 14.5.0 to 16.19.1
|
||||
- openjdk-11
|
||||
- openssh-client
|
||||
- pcre
|
||||
|
||||
@ -21,7 +21,7 @@ contents:
|
||||
|
||||
## symbols packages
|
||||
- ca-certificates
|
||||
- jansson # TODO: Need to add in ctags build environment
|
||||
- jansson
|
||||
- libstdc++ # TODO: For tree-sitter. Not an apk dependency
|
||||
- ctags@sourcegraph
|
||||
|
||||
|
||||
@ -1,7 +1,10 @@
|
||||
# Loosely based on the cadvisor Dockerfile, without support for ipmctl
|
||||
# https://github.com/google/cadvisor/blob/master/deploy/Dockerfile
|
||||
package:
|
||||
name: cadvisor
|
||||
# libpfm version may also need updating - check cadvisor Dockerfile
|
||||
version: 0.47.0
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "Analyzes resource usage and performance characteristics of running containers"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
@ -12,6 +15,10 @@ package:
|
||||
license: 'Apache License 2.0'
|
||||
dependencies:
|
||||
runtime:
|
||||
- device-mapper
|
||||
- findutils
|
||||
- ndctl
|
||||
- thin-provisioning-tools
|
||||
|
||||
environment:
|
||||
contents:
|
||||
@ -23,14 +30,45 @@ environment:
|
||||
- wolfi-base
|
||||
- busybox
|
||||
- ca-certificates-bundle
|
||||
- build-base
|
||||
- cmake
|
||||
- device-mapper
|
||||
- findutils
|
||||
- git
|
||||
- go
|
||||
- linux-headers
|
||||
- ndctl-dev
|
||||
- thin-provisioning-tools
|
||||
|
||||
pipeline:
|
||||
# Fetch and build libpfm
|
||||
- uses: fetch
|
||||
with:
|
||||
uri: https://github.com/google/cadvisor/releases/download/v${{package.version}}/cadvisor-v${{package.version}}-linux-amd64
|
||||
expected-sha256: caf4491298e0702f9d0c6a1d1949767f5c6400f77e12cd3524d6d3fcc66abc2a
|
||||
extract: false
|
||||
uri: https://sourceforge.net/projects/perfmon2/files/libpfm4/libpfm-4.11.0.tar.gz
|
||||
expected-sha256: 5da5f8872bde14b3634c9688d980f68bda28b510268723cc12973eedbab9fecc
|
||||
strip-components: 0
|
||||
- runs: |
|
||||
pwd
|
||||
export DBG="-g -Wall"
|
||||
make -e -C libpfm-4.11.0
|
||||
make install -C libpfm-4.11.0
|
||||
|
||||
# Check out cadvisor
|
||||
- uses: git-checkout
|
||||
with:
|
||||
repository: https://github.com/google/cadvisor
|
||||
tag: v${{package.version}}
|
||||
expected-commit: c7714a77f72d77936446cb45296a1ef305a06c11
|
||||
destination: /cadvisor
|
||||
# Build cadvisor
|
||||
- runs: |
|
||||
cd /cadvisor
|
||||
GO_FLAGS="-tags=libfpm,netgo" ./build/build.sh
|
||||
|
||||
# Package libpfm and cadvisor
|
||||
- runs: |
|
||||
mkdir -p ${{targets.destdir}}/usr/bin/
|
||||
chmod +x cadvisor-v${{package.version}}-linux-amd64
|
||||
cp cadvisor-v${{package.version}}-linux-amd64 ${{targets.destdir}}/usr/bin/cadvisor
|
||||
mkdir -p ${{targets.destdir}}/usr/local/lib/
|
||||
cp /cadvisor/_output/cadvisor ${{targets.destdir}}/usr/bin/cadvisor
|
||||
cp /usr/local/lib/libpfm.so* ${{targets.destdir}}/usr/local/lib/
|
||||
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package:
|
||||
name: comby
|
||||
version: 1.8.1
|
||||
epoch: 0
|
||||
epoch: 2
|
||||
description: "A code rewrite tool for structural search and replace that supports ~every language."
|
||||
target-architecture:
|
||||
- x86_64
|
||||
@ -23,15 +23,44 @@ environment:
|
||||
- wolfi-base
|
||||
- busybox
|
||||
- ca-certificates-bundle
|
||||
- build-base
|
||||
- autoconf
|
||||
- pcre-dev
|
||||
- zlib-dev
|
||||
- m4
|
||||
- gmp-dev
|
||||
- libev-dev
|
||||
- sqlite-libs
|
||||
- sqlite-dev
|
||||
- wget
|
||||
# opam deps
|
||||
- posix-libc-utils
|
||||
- patch
|
||||
- bubblewrap
|
||||
|
||||
pipeline:
|
||||
- runs: |
|
||||
ls -al $HOME
|
||||
# Fetch Comby source
|
||||
- uses: fetch
|
||||
with:
|
||||
uri: https://github.com/comby-tools/comby/releases/download/${{package.version}}/comby-${{package.version}}-x86_64-linux.tar.gz
|
||||
expected-sha256: ec0ca6477822154d71033e0b0a724c23a0608b99028ecab492bc9876ae8c458a
|
||||
# TODO: Work out why we can't use fetch's extract: true
|
||||
uri: https://github.com/comby-tools/comby/archive/refs/tags/${{package.version}}.tar.gz
|
||||
expected-sha256: 04d51cf742bbbf5e5fda064a710be44537fac49bff598d0e9762a3a799d666e2
|
||||
strip-components: 0
|
||||
# Download and configure opam
|
||||
# Safe to use --disable-sandboxing as melange builds within a sandbox
|
||||
- runs: |
|
||||
tar zxvf comby-${{package.version}}-x86_64-linux.tar.gz
|
||||
wget https://github.com/ocaml/opam/releases/download/2.1.4/opam-2.1.4-x86_64-linux -O opam
|
||||
chmod +x opam
|
||||
mv opam /usr/bin
|
||||
opam init --disable-sandboxing
|
||||
opam switch create 4.11.0 4.11.0
|
||||
eval $(opam env --switch=4.11.0)
|
||||
cd comby-${{package.version}}/
|
||||
opam install . --deps-only --yes
|
||||
make
|
||||
make test
|
||||
make install
|
||||
- runs: |
|
||||
mkdir -p ${{targets.destdir}}/usr/bin/
|
||||
cp comby-${{package.version}}-x86_64-linux ${{targets.destdir}}/usr/bin/comby
|
||||
cp $HOME/.opam/4.11.0/bin/comby ${{targets.destdir}}/usr/bin/
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
package:
|
||||
name: coursier
|
||||
version: 2.0.13
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "Java dependency resolver"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
package:
|
||||
name: ctags
|
||||
version: f95bb3497f53748c2b6afc7f298cff218103ab90
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "A maintained ctags implementation"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
package:
|
||||
name: http-server-stabilizer
|
||||
version: 1.0.5
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "HTTP server stabilizer for unruly servers"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
|
||||
@ -4,7 +4,7 @@
|
||||
package:
|
||||
name: jaeger
|
||||
version: 1.42.0
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "Distributed Tracing Platform"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package:
|
||||
name: opentelemetry-collector
|
||||
version: 0.71.0 # Keep in sync with version in go.mod
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "Vendor-agnostic implementation on how to receive, process and export telemetry data"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
@ -41,6 +41,7 @@ pipeline:
|
||||
--config opentelemetry-collector/builder.yaml \
|
||||
--output-path=/tmp/otelcol-sourcegraph
|
||||
- name: Package collector
|
||||
# TODO: Change directory, but /bin is hardcoded into k8s manifests
|
||||
runs: |
|
||||
mkdir -p ${{targets.destdir}}/usr/bin/
|
||||
cp /tmp/otelcol-sourcegraph/otelcol-sourcegraph ${{targets.destdir}}/usr/bin/otelcol-sourcegraph
|
||||
mkdir -p ${{targets.destdir}}/bin/
|
||||
cp /tmp/otelcol-sourcegraph/otelcol-sourcegraph ${{targets.destdir}}/bin/otelcol-sourcegraph
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
package:
|
||||
name: p4-fusion
|
||||
version: 1.12
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "A fast Perforce to Git conversion tool"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
@ -56,7 +56,7 @@ pipeline:
|
||||
- uses: fetch
|
||||
with:
|
||||
uri: https://www.perforce.com/downloads/perforce/r22.1/bin.linux26x86_64/p4api.tgz
|
||||
expected-sha256: 82db09791758516ba2561d75c744f190a9562c8be26dc2cb96aea537e2a451d3
|
||||
expected-sha256: bca0ad96aac36a4d855aa49c278da7851d1b846e0cc82608c6f2f3a4d8af8ef3
|
||||
extract: false
|
||||
- runs: |
|
||||
mkdir -p p4-fusion-src/vendor/helix-core-api/linux
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package:
|
||||
name: p4cli
|
||||
version: 22.2
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "Command line interface for Perforce"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
package:
|
||||
name: redis_exporter
|
||||
version: 1.35.0
|
||||
epoch: 0
|
||||
epoch: 1
|
||||
description: "Prometheus Exporter for Redis Metrics"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
|
||||
43
wolfi-packages/s3proxy.yaml
Normal file
43
wolfi-packages/s3proxy.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
package:
|
||||
name: s3proxy
|
||||
version: 2.0.0
|
||||
epoch: 1
|
||||
description: "Access other storage backends via the S3 API"
|
||||
target-architecture:
|
||||
- x86_64
|
||||
copyright:
|
||||
- paths:
|
||||
- "*"
|
||||
attestation: ''
|
||||
license: 'Apache License 2.0'
|
||||
dependencies:
|
||||
runtime:
|
||||
- openjdk-11
|
||||
- openjdk-11-default-jvm # Set Java 11 as default JVM
|
||||
|
||||
environment:
|
||||
contents:
|
||||
repositories:
|
||||
- https://packages.wolfi.dev/os
|
||||
keyring:
|
||||
- https://packages.wolfi.dev/os/wolfi-signing.rsa.pub
|
||||
packages:
|
||||
- wolfi-base
|
||||
- busybox
|
||||
- ca-certificates-bundle
|
||||
- maven
|
||||
- openjdk-11
|
||||
- openjdk-11-default-jvm
|
||||
|
||||
pipeline:
|
||||
- uses: fetch
|
||||
with:
|
||||
uri: https://github.com/sourcegraph/s3proxy/archive/refs/tags/s3proxy-${{package.version}}.tar.gz
|
||||
expected-sha256: e2d3f8f217d67ab8cc074490f27b4d649f4ec73f5bf540aa9da1ad4dda818d0b
|
||||
extract: true
|
||||
- runs: |
|
||||
JAVA_HOME=/usr/lib/jvm/java-11-openjdk/ mvn package -DskipTests
|
||||
- runs: |
|
||||
mkdir -p ${{targets.destdir}}/opt/
|
||||
cp -r target/ ${{targets.destdir}}/opt/s3proxy
|
||||
cp src/main/resources/run-docker-container.sh ${{targets.destdir}}/opt/s3proxy
|
||||
Loading…
Reference in New Issue
Block a user