mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 14:11:44 +00:00
ci: remove outdated scripts (#59666)
This commit is contained in:
parent
53f57d0fe7
commit
551fae4cb3
@ -1,13 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs the backend integration tests against a candidate server image.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
set -ex
|
||||
|
||||
echo "--- test.sh"
|
||||
|
||||
# Backend integration tests requires a GitHub Enterprise Token
|
||||
GITHUB_TOKEN=$GHE_GITHUB_TOKEN
|
||||
GITHUB_TOKEN=$GITHUB_TOKEN ./dev/ci/integration/run-integration.sh "${root_dir}/dev/ci/integration/backend/test.sh"
|
||||
@ -1,17 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs the backend integration tests against a running server.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
set -e
|
||||
|
||||
URL="${1:-"http://localhost:7080"}"
|
||||
|
||||
echo '--- integration test ./dev/gqltest -long'
|
||||
bazel run //dev/gqltest:gqltest_test -- -long -base-url "$URL"
|
||||
|
||||
echo '--- sleep 5s to wait for site configuration to be restored from gqltest'
|
||||
sleep 5
|
||||
|
||||
echo '--- integration test ./dev/authtest -long'
|
||||
bazel run //dev/authtest:authtest_test -- -long -base-url "$URL" -email "gqltest@sourcegraph.com" -username "gqltest-admin"
|
||||
@ -1,4 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
PID=$(pgrep ffmpeg)
|
||||
kill "$PID"
|
||||
@ -1,15 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euxo pipefail
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
set -ex
|
||||
|
||||
echo "--- set up deploy-sourcegraph"
|
||||
test_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)""
|
||||
git clone --depth 1 \
|
||||
https://github.com/sourcegraph/deploy-sourcegraph.git \
|
||||
"$test_dir/deploy-sourcegraph"
|
||||
|
||||
echo "--- test.sh"
|
||||
"${root_dir}"/dev/ci/integration/cluster/test.sh
|
||||
@ -1,13 +0,0 @@
|
||||
# base/sourcegraph.StorageClass.yaml
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: sourcegraph
|
||||
labels:
|
||||
deploy: sourcegraph
|
||||
provisioner: pd.csi.storage.gke.io
|
||||
parameters:
|
||||
type: pd-balanced
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
reclaimPolicy: Delete
|
||||
allowVolumeExpansion: true
|
||||
@ -1,128 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -euo pipefail
|
||||
|
||||
# setup DIR for easier pathing test dir
|
||||
test_dir=$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)""
|
||||
|
||||
# cd to repo root
|
||||
root_dir="$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
cd "$root_dir"
|
||||
root_dir=$(pwd)
|
||||
|
||||
export NAMESPACE="cluster-ci-$BUILDKITE_BUILD_NUMBER-$BUILDKITE_JOB_ID"
|
||||
|
||||
# Capture information about the state of the test cluster
|
||||
function cluster_capture_state() {
|
||||
# Get some more verbobe information about what is running.
|
||||
set -x
|
||||
|
||||
echo "--- dump diagnostics"
|
||||
# The reason have the grep here and filter out the otel-agents in Pending state is due to how otel-agents
|
||||
# are scheduled. The otel agent is deployed using a DaemonSet, which means on every node, k8s will schedule a
|
||||
# otel-agent, even if the node isn't running anything else. For this QA scenario we don't want to run anything
|
||||
# more than what we want, so if you look in deploy-sourcegraph/overlays/otel-agent-patch.yaml you'll see
|
||||
# the otel-agent DaemonSet is patched with a podAffinity. PodAffinity ensures that a Pod will only be scheduled
|
||||
# that matches a certain condition, if the Pod doesn't match it's status will be PENDING - hence we filter them
|
||||
# out
|
||||
# Get overview of all pods
|
||||
kubectl get pods | grep -v -e "otel-agent-.*Pending"
|
||||
|
||||
# Get specifics of pods
|
||||
kubectl describe pods >"$root_dir/describe_pods.log" 2>&1
|
||||
|
||||
# Get logs for some deployments
|
||||
IFS=' ' read -ra deployments <<< "$(kubectl get deployments -o=jsonpath='{.items[*].metadata.name}')"
|
||||
for dep in "${deployments[@]}"; do
|
||||
kubectl logs "deployment/$dep" --all-containers >"$root_dir/$dep.log" 2>&1
|
||||
done
|
||||
set +x
|
||||
}
|
||||
|
||||
# Cleanup the cluster
|
||||
function cluster_cleanup() {
|
||||
cluster_capture_state || true
|
||||
kubectl delete namespace "$NAMESPACE"
|
||||
}
|
||||
|
||||
function cluster_setup() {
|
||||
gcloud container clusters get-credentials default-buildkite --zone=us-central1-c --project=sourcegraph-ci
|
||||
|
||||
echo "--- create namespace"
|
||||
kubectl create ns "$NAMESPACE" -oyaml --dry-run=client | kubectl apply -f -
|
||||
trap cluster_cleanup exit
|
||||
|
||||
echo "--- create storageclass"
|
||||
kubectl apply -f "$test_dir/storageClass.yaml"
|
||||
kubectl config set-context --current --namespace="$NAMESPACE"
|
||||
kubectl config current-context
|
||||
echo "--- wait for namespace to come up and check pods"
|
||||
sleep 15 # wait for namespace to come up
|
||||
kubectl get -n "$NAMESPACE" pods
|
||||
|
||||
echo "--- rewrite manifests"
|
||||
pushd "$test_dir/deploy-sourcegraph"
|
||||
set +e
|
||||
set +o pipefail
|
||||
# See $DOCKER_CLUSTER_IMAGES_TXT in pipeline-steps.go for env var
|
||||
# replace all docker image tags with previously built candidate images
|
||||
while IFS= read -r line; do
|
||||
echo "$line"
|
||||
grep -lr './base/' -e "index.docker.io/sourcegraph/$line" --include \*.yaml | xargs sed -i -E "s#index.docker.io/sourcegraph/$line:.*#us.gcr.io/sourcegraph-dev/$line:$CANDIDATE_VERSION#g"
|
||||
done < <(printf '%s\n' "$DOCKER_CLUSTER_IMAGES_TXT")
|
||||
|
||||
echo "--- create cluster"
|
||||
./overlay-generate-cluster.sh low-resource generated-cluster
|
||||
kubectl apply -n "$NAMESPACE" --recursive --validate -f generated-cluster
|
||||
popd
|
||||
echo "--- wait for ready"
|
||||
sleep 15 #add in a small wait for all pods to be rolled out by the replication controller
|
||||
kubectl get pods -n "$NAMESPACE"
|
||||
time kubectl wait --for=condition=Ready -l app=sourcegraph-frontend pod --timeout=5m -n "$NAMESPACE"
|
||||
set -e
|
||||
set -o pipefail
|
||||
}
|
||||
|
||||
function test_setup() {
|
||||
|
||||
set +x +u
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.profile
|
||||
|
||||
dev/ci/integration/setup-deps.sh
|
||||
|
||||
sleep 15
|
||||
export SOURCEGRAPH_BASE_URL="http://sourcegraph-frontend.$NAMESPACE.svc.cluster.local:30080"
|
||||
curl "$SOURCEGRAPH_BASE_URL"
|
||||
|
||||
# setup admin users, etc
|
||||
pushd internal/cmd/init-sg
|
||||
go build
|
||||
./init-sg initSG -baseurl="$SOURCEGRAPH_BASE_URL"
|
||||
popd
|
||||
|
||||
# Load variables set up by init-server, disabling `-x` to avoid printing variables, setting +u to avoid blowing up on ubound ones
|
||||
set +x +u
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.sg_envrc
|
||||
set -u
|
||||
|
||||
echo "--- TEST: Checking Sourcegraph instance is accessible"
|
||||
|
||||
curl --fail "$SOURCEGRAPH_BASE_URL"
|
||||
curl --fail "$SOURCEGRAPH_BASE_URL/healthz"
|
||||
}
|
||||
|
||||
function e2e() {
|
||||
pushd client/web
|
||||
echo "$SOURCEGRAPH_BASE_URL"
|
||||
echo "--- TEST: Running tests"
|
||||
pnpm run test:regression:core
|
||||
popd
|
||||
}
|
||||
|
||||
# main
|
||||
cluster_setup
|
||||
test_setup
|
||||
set +o pipefail
|
||||
# special exit code to capture e2e failures
|
||||
e2e || exit 123
|
||||
@ -1,31 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script is called by test.sh to install an up-to-date
|
||||
# version of src-cli as required by the codeintel-qa pipeline. The target binary
|
||||
# is installed to {REPO_ROOT}/.bin/src.
|
||||
|
||||
set -eux
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir="$(pwd)"
|
||||
|
||||
# By default, version of src-cli that builds with 1.19.8
|
||||
VERSION=${1:-'58b3f701691cbdbd10b54161d9bfca88b781480d'}
|
||||
|
||||
TEMP=$(mktemp -d -t sgdockerbuild_XXXXXXX)
|
||||
cleanup() {
|
||||
rm -rf "${TEMP}"
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# TODO: migrate upstream to bazel
|
||||
# bazel build @com_github_sourcegraph_src-cli//cmd/src:src
|
||||
# out=$(bazel cquery @com_github_sourcegraph_src-cli//cmd/src:src --output=files)
|
||||
# cp "$out" "$root_dir/.bin/src"
|
||||
|
||||
git clone git@github.com:sourcegraph/src-cli.git "${TEMP}" --depth 1
|
||||
pushd "${TEMP}"
|
||||
git fetch origin "${VERSION}" --depth 1
|
||||
git checkout "${VERSION}"
|
||||
mkdir -p "${root_dir}/.bin"
|
||||
go build -o "${root_dir}/.bin" ./cmd/src
|
||||
popd
|
||||
@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs the codeintel-qa test utility against a candidate server image.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
set -ex
|
||||
|
||||
echo "--- :terminal: Installing src-cli latest release"
|
||||
curl -L https://sourcegraph.com/.api/src-cli/src_linux_amd64 -o /usr/local/bin/src
|
||||
chmod +x /usr/local/bin/src
|
||||
src version
|
||||
|
||||
echo "--- :spiral_note_pad: test.sh"
|
||||
export IMAGE="us.gcr.io/sourcegraph-dev/server:${CANDIDATE_VERSION}"
|
||||
./dev/ci/integration/run-integration.sh "${root_dir}/dev/ci/integration/code-intel/test.sh"
|
||||
@ -1,49 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script runs the codeintel-qa tests against a running server.
|
||||
# This script is invoked by ./dev/ci/integration/run-integration.sh after running an instance.
|
||||
|
||||
set -eux
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
|
||||
SOURCEGRAPH_BASE_URL="${1:-"http://localhost:7080"}"
|
||||
export SOURCEGRAPH_BASE_URL
|
||||
|
||||
echo '--- :go: Building init-sg'
|
||||
bazel build //internal/cmd/init-sg
|
||||
out=$(bazel cquery //internal/cmd/init-sg --output=files)
|
||||
cp "$out" "$root_dir/"
|
||||
|
||||
echo '--- Initializing instance'
|
||||
"$root_dir/init-sg" initSG
|
||||
|
||||
echo '--- Loading secrets'
|
||||
set +x # Avoid printing secrets
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.sg_envrc
|
||||
set -x
|
||||
|
||||
echo '--- :horse: Running init-sg addRepos'
|
||||
"${root_dir}/init-sg" addRepos -config ./dev/ci/integration/code-intel/repos.json
|
||||
|
||||
echo '--- Installing local src-cli'
|
||||
./dev/ci/integration/code-intel/install-src.sh
|
||||
which src
|
||||
src version
|
||||
|
||||
echo '--- :brain: Running the test suite'
|
||||
pushd dev/codeintel-qa
|
||||
|
||||
echo '--- :zero: downloading test data from GCS'
|
||||
bazel run //dev/codeintel-qa/cmd/download
|
||||
|
||||
echo '--- :one: clearing existing state'
|
||||
bazel run //dev/codeintel-qa/cmd/clear
|
||||
|
||||
echo '--- :two: integration test ./dev/codeintel-qa/cmd/upload'
|
||||
bazel run //dev/codeintel-qa/cmd/upload -- --timeout=5m --index-dir="$root_dir/dev/codeintel-qa/testdata/indexes"
|
||||
|
||||
echo '--- :three: integration test ./dev/codeintel-qa/cmd/query'
|
||||
bazel run //dev/codeintel-qa/cmd/query -- --index-dir="$root_dir/dev/codeintel-qa/testdata/indexes"
|
||||
popd
|
||||
@ -1,24 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.profile
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
set -ex
|
||||
|
||||
set -ex
|
||||
|
||||
dev/ci/integration/setup-deps.sh
|
||||
dev/ci/integration/setup-display.sh
|
||||
|
||||
cleanup() {
|
||||
cd "$root_dir"
|
||||
dev/ci/integration/cleanup-display.sh
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# ==========================
|
||||
|
||||
echo "--- test.sh"
|
||||
export IMAGE=${IMAGE:-"us.gcr.io/sourcegraph-dev/server:$CANDIDATE_VERSION"}
|
||||
./dev/ci/integration/run-integration.sh "${root_dir}/dev/ci/integration/e2e/test.sh"
|
||||
@ -1,14 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
set -e
|
||||
|
||||
# URL="${1:-"http://localhost:7080"}"
|
||||
|
||||
echo "--- bazel test e2e"
|
||||
bazel \
|
||||
--bazelrc=.bazelrc \
|
||||
--bazelrc=.aspect/bazelrc/ci.bazelrc \
|
||||
--bazelrc=.aspect/bazelrc/ci.sourcegraph.bazelrc \
|
||||
test \
|
||||
//client/web/src/end-to-end:e2e
|
||||
@ -1,27 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
set -ex
|
||||
|
||||
dev/ci/integration/setup-deps.sh
|
||||
dev/ci/integration/setup-display.sh
|
||||
|
||||
cleanup() {
|
||||
cd "$root_dir"
|
||||
dev/ci/integration/cleanup-display.sh
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# ==========================
|
||||
|
||||
echo "--- Running QA tests"
|
||||
|
||||
echo "--- test.sh"
|
||||
export IMAGE=${IMAGE:-"us.gcr.io/sourcegraph-dev/server:$CANDIDATE_VERSION"}
|
||||
set +x
|
||||
# Hotfix (Owner: @jhchabran)
|
||||
GITHUB_TOKEN="$(gcloud secrets versions access latest --secret=QA_GITHUB_TOKEN --quiet --project=sourcegraph-ci)"
|
||||
export GITHUB_TOKEN
|
||||
set -x
|
||||
./dev/ci/integration/run-integration.sh "${root_dir}/dev/ci/integration/qa/test.sh"
|
||||
@ -1,30 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
export SOURCEGRAPH_BASE_URL="${1:-"http://localhost:7080"}"
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.profile
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
|
||||
set -e
|
||||
|
||||
echo "--- init sourcegraph"
|
||||
pushd internal/cmd/init-sg
|
||||
go build
|
||||
./init-sg initSG
|
||||
popd
|
||||
# Load variables set up by init-server, disabling `-x` to avoid printing variables
|
||||
set +x
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.sg_envrc
|
||||
|
||||
echo "--- TEST: Checking Sourcegraph instance is accessible"
|
||||
curl -f http://localhost:7080
|
||||
curl -f http://localhost:7080/healthz
|
||||
echo "--- TEST: Running tests"
|
||||
# Run all tests, and error if one fails
|
||||
test_status=0
|
||||
pushd client/web
|
||||
pnpm run test:regression || test_status=1
|
||||
popd
|
||||
exit $test_status
|
||||
@ -1,16 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Use the shared volume in between dind and the agent to host the data, so we can delete it afterward.
|
||||
export DATA="/mnt/tmp/sourcegraph-data"
|
||||
|
||||
echo Y | ./dev/run-server-image.sh -d --name sourcegraph
|
||||
|
||||
SOURCEGRAPH_BASE_URL="http://localhost:7080"
|
||||
echo "--- Waiting for $SOURCEGRAPH_BASE_URL to be up"
|
||||
set +e
|
||||
timeout 120s bash -c "until curl --output /dev/null --silent --head --fail $SOURCEGRAPH_BASE_URL; do
|
||||
echo Waiting 5s for $SOURCEGRAPH_BASE_URL...
|
||||
sleep 5
|
||||
done"
|
||||
|
||||
echo "--- Up, running tests..."
|
||||
@ -1,87 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# This script sets up a Sourcegraph instance for integration testing. This script expects to be
|
||||
# passed a path to a bash script that runs the actual tests against a running instance. The passed
|
||||
# script will be passed a single parameter: the target URL from which the instance is accessible.
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../.."
|
||||
root_dir=$(pwd)
|
||||
set -ex
|
||||
|
||||
if [ -z "$IMAGE" ]; then
|
||||
echo "Must specify \$IMAGE."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
URL="http://localhost:7080"
|
||||
|
||||
# In CI, provide a directory and container name unique to this job
|
||||
IDENT=${BUILDKITE_JOB_ID:-$(openssl rand -hex 12)}
|
||||
export DATA="/tmp/sourcegraph-data-${IDENT}"
|
||||
export CONTAINER="sourcegraph-${IDENT}"
|
||||
|
||||
function docker_cleanup() {
|
||||
echo "--- docker cleanup"
|
||||
if [[ $(docker ps -aq | wc -l) -gt 0 ]]; then
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -aq)
|
||||
fi
|
||||
if [[ $(docker images -q | wc -l) -gt 0 ]]; then
|
||||
# shellcheck disable=SC2046
|
||||
docker rmi -f $(docker images -q)
|
||||
fi
|
||||
docker volume prune -f
|
||||
|
||||
echo "--- Deleting $DATA"
|
||||
rm -rf "$DATA"
|
||||
}
|
||||
|
||||
# Do a pre-run cleanup
|
||||
docker_cleanup
|
||||
|
||||
function cleanup() {
|
||||
exit_status=$?
|
||||
if [ $exit_status -ne 0 ]; then
|
||||
# Expand the output if our run failed.
|
||||
echo "^^^ +++"
|
||||
fi
|
||||
|
||||
echo "--- dump server logs"
|
||||
docker logs --timestamps "$CONTAINER" >"$root_dir/server.log" 2>&1
|
||||
|
||||
echo "--- $CONTAINER cleanup"
|
||||
docker container rm -f "$CONTAINER"
|
||||
docker image rm -f "$IMAGE"
|
||||
|
||||
docker_cleanup
|
||||
|
||||
if [ $exit_status -ne 0 ]; then
|
||||
# This command will fail, so our last step will be expanded. We don't want
|
||||
# to expand "docker cleanup" so we add in a dummy section.
|
||||
echo "--- integration test failed"
|
||||
echo "See integration test section for test runner logs, and uploaded artifacts for server logs."
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
echo "--- Running a daemonized $IMAGE as the test subject..."
|
||||
CLEAN="true" ALLOW_SINGLE_DOCKER_CODE_INSIGHTS="true" "${root_dir}"/dev/run-server-image.sh -d --name "$CONTAINER"
|
||||
|
||||
echo "--- Waiting for $URL to be up"
|
||||
set +e
|
||||
timeout 120s bash -c "until curl --output /dev/null --silent --head --fail $URL; do
|
||||
echo Waiting 5s for $URL...
|
||||
sleep 5
|
||||
done"
|
||||
# shellcheck disable=SC2181
|
||||
if [ $? -ne 0 ]; then
|
||||
echo "^^^ +++"
|
||||
echo "$URL was not accessible within 120s."
|
||||
docker inspect "$CONTAINER"
|
||||
exit 1
|
||||
fi
|
||||
set -e
|
||||
echo "Waiting for $URL... done"
|
||||
|
||||
# Run tests against instance
|
||||
"${1}" "${URL}"
|
||||
@ -1,6 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
curl -L https://sourcegraph.com/.api/src-cli/src_linux_amd64 -o /usr/local/bin/src
|
||||
chmod +x /usr/local/bin/src
|
||||
@ -1,5 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
Xvfb "$DISPLAY" -screen 0 1280x1024x24 &
|
||||
x11vnc -display "$DISPLAY" -forever -rfbport 5900 >/x11vnc.log 2>&1 &
|
||||
ffmpeg -y -f x11grab -video_size 1280x1024 -i "$DISPLAY" -pix_fmt yuv420p qatest.mp4 >ffmpeg.log 2>&1 &
|
||||
@ -1,37 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.profile
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
|
||||
set -ex
|
||||
|
||||
# Install dependencies for upgrade test script
|
||||
pnpm install --frozen-lockfile
|
||||
pnpm generate
|
||||
|
||||
dev/ci/integration/setup-deps.sh
|
||||
dev/ci/integration/setup-display.sh
|
||||
|
||||
cleanup() {
|
||||
cd "$root_dir"
|
||||
dev/ci/integration/cleanup-display.sh
|
||||
|
||||
# We don't use run-integration so make sure we stop/remove the containers
|
||||
echo "--- docker cleanup"
|
||||
if [[ $(docker ps -aq | wc -l) -gt 0 ]]; then
|
||||
# shellcheck disable=SC2046
|
||||
docker rm -f $(docker ps -aq)
|
||||
fi
|
||||
if [[ $(docker images -q | wc -l) -gt 0 ]]; then
|
||||
# shellcheck disable=SC2046
|
||||
docker rmi -f $(docker images -q)
|
||||
fi
|
||||
}
|
||||
trap cleanup EXIT
|
||||
|
||||
# ==========================
|
||||
|
||||
echo "--- test.sh"
|
||||
"${root_dir}"/dev/ci/integration/upgrade/test.sh
|
||||
@ -1,95 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.profile
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../../.."
|
||||
root_dir=$(pwd)
|
||||
set -e
|
||||
|
||||
URL="${1:-"http://localhost:7080"}"
|
||||
|
||||
# In CI, provide a directory and container name unique to this job
|
||||
IDENT=${BUILDKITE_JOB_ID:-$(openssl rand -hex 12)}
|
||||
export DATA="/tmp/sourcegraph-data-${IDENT}"
|
||||
|
||||
cleanup() {
|
||||
echo "--- dump server logs"
|
||||
docker logs --timestamps "$CONTAINER" >"$root_dir/$CONTAINER.log" 2>&1
|
||||
|
||||
echo "--- Deleting $DATA"
|
||||
rm -rf "$DATA"
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# Run and initialize an old Sourcegraph release
|
||||
echo "--- start sourcegraph $MINIMUM_UPGRADEABLE_VERSION"
|
||||
CONTAINER="sourcegraph-old-${IDENT}"
|
||||
IMAGE=sourcegraph/server:$MINIMUM_UPGRADEABLE_VERSION CLEAN="true" ./dev/run-server-image.sh -d --name "$CONTAINER"
|
||||
sleep 15
|
||||
pushd internal/cmd/init-sg
|
||||
go build
|
||||
./init-sg initSG
|
||||
popd
|
||||
# shellcheck disable=SC1091
|
||||
source /root/.sg_envrc
|
||||
|
||||
SOURCEGRAPH_REPORTED_VERSION_OLD=$(curl -fs "$URL/__version")
|
||||
echo
|
||||
echo "--- Sourcegraph instance (before upgrade) is reporting version: '$SOURCEGRAPH_REPORTED_VERSION_OLD'"
|
||||
|
||||
# Stop old Sourcegraph release
|
||||
docker container stop "$CONTAINER"
|
||||
sleep 5
|
||||
|
||||
# Migrate DB if on version < 3.27.0
|
||||
regex="3\.26\.[0-9]"
|
||||
OLD=11
|
||||
NEW=12
|
||||
SRC_DIR=/tmp/sourcegraph
|
||||
if [[ $MINIMUM_UPGRADEABLE_VERSION =~ $regex ]]; then
|
||||
docker run \
|
||||
-w /tmp/upgrade \
|
||||
-v "$SRC_DIR/data/postgres-$NEW-upgrade:/tmp/upgrade" \
|
||||
-v "$SRC_DIR/data/postgresql:/var/lib/postgresql/$OLD/data" \
|
||||
-v "$SRC_DIR/data/postgresql-$NEW:/var/lib/postgresql/$NEW/data" \
|
||||
"tianon/postgres-upgrade:$OLD-to-$NEW"
|
||||
|
||||
mv "$SRC_DIR/data/"{postgresql,postgresql-$OLD}
|
||||
mv "$SRC_DIR/data/"{postgresql-$NEW,postgresql}
|
||||
|
||||
curl -fsSL -o "$SRC_DIR/data/postgres-$NEW-upgrade/optimize.sh" https://raw.githubusercontent.com/sourcegraph/sourcegraph/master/cmd/server/rootfs/postgres-optimize.sh
|
||||
|
||||
docker run \
|
||||
--entrypoint "/bin/bash" \
|
||||
-w /tmp/upgrade \
|
||||
-v "$SRC_DIR/data/postgres-$NEW-upgrade:/tmp/upgrade" \
|
||||
-v "$SRC_DIR/data/postgresql:/var/lib/postgresql/data" \
|
||||
"postgres:$NEW" \
|
||||
-c 'chown -R postgres $PGDATA . && gosu postgres bash ./optimize.sh $PGDATA'
|
||||
fi
|
||||
|
||||
# Upgrade to current candidate image. Capture logs for the attempted upgrade.
|
||||
echo "--- start candidate"
|
||||
CONTAINER="sourcegraph-new-${IDENT}"
|
||||
IMAGE=us.gcr.io/sourcegraph-dev/server:bazel-${CANDIDATE_VERSION} CLEAN="false" ./dev/run-server-image.sh -d --name "$CONTAINER"
|
||||
sleep 15
|
||||
|
||||
# Run tests
|
||||
echo "--- TEST: Checking Sourcegraph instance is accessible"
|
||||
curl -f "$URL"
|
||||
curl -f "$URL"/healthz
|
||||
|
||||
SOURCEGRAPH_REPORTED_VERSION_NEW=$(curl -fs "$URL/__version")
|
||||
echo
|
||||
echo "--- Sourcegraph instance (after upgrade) is reporting version: '$SOURCEGRAPH_REPORTED_VERSION_NEW'"
|
||||
|
||||
if [ "$SOURCEGRAPH_REPORTED_VERSION_NEW" == "$SOURCEGRAPH_REPORTED_VERSION_OLD" ]; then
|
||||
echo "Error: Instance version unchanged after upgrade" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo "--- TEST: Running tests"
|
||||
pushd client/web
|
||||
pnpm run test:regression:core
|
||||
popd
|
||||
@ -106,27 +106,3 @@ func triggerReleaseBranchHealthchecks(minimumUpgradeableVersion string) operatio
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func codeIntelQA(candidateTag string) operations.Operation {
|
||||
return func(p *bk.Pipeline) {
|
||||
p.AddStep(":bazel::docker::brain: Code Intel QA",
|
||||
bk.SlackStepNotify(&bk.SlackStepNotifyConfigPayload{
|
||||
Message: ":alert: :noemi-handwriting: Code Intel QA Flake detected <@Noah S-C>",
|
||||
ChannelName: "code-intel-buildkite",
|
||||
Conditions: bk.SlackStepNotifyPayloadConditions{
|
||||
Failed: true,
|
||||
},
|
||||
}),
|
||||
// Run tests against the candidate server image
|
||||
bk.DependsOn(candidateImageStepKey("server")),
|
||||
bk.Agent("queue", "bazel"),
|
||||
bk.Env("CANDIDATE_VERSION", candidateTag),
|
||||
bk.Env("SOURCEGRAPH_BASE_URL", "http://127.0.0.1:7080"),
|
||||
bk.Env("SOURCEGRAPH_SUDO_USER", "admin"),
|
||||
bk.Env("TEST_USER_EMAIL", "test@sourcegraph.com"),
|
||||
bk.Env("TEST_USER_PASSWORD", "supersecurepassword"),
|
||||
bk.Cmd("dev/ci/integration/code-intel/run.sh"),
|
||||
bk.ArtifactPaths("./*.log"),
|
||||
bk.SoftFail(1))
|
||||
}
|
||||
}
|
||||
|
||||
@ -83,17 +83,17 @@ server_integration_test(
|
||||
"$(location //dev/codeintel-qa/cmd/clear)",
|
||||
"$(location //dev/codeintel-qa/cmd/upload)",
|
||||
"$(location //dev/codeintel-qa/cmd/query)",
|
||||
"$(location //dev/ci/integration/code-intel:repos.json)",
|
||||
"$(location //testing/code-intel:repos.json)",
|
||||
],
|
||||
data = [
|
||||
"//cmd/server:image_tarball",
|
||||
"//dev/ci/integration/code-intel:repos.json",
|
||||
"//dev/codeintel-qa/cmd/clear",
|
||||
"//dev/codeintel-qa/cmd/download",
|
||||
"//dev/codeintel-qa/cmd/query",
|
||||
"//dev/codeintel-qa/cmd/upload",
|
||||
"//dev/tools:src-cli",
|
||||
"//internal/cmd/init-sg",
|
||||
"//testing/code-intel:repos.json",
|
||||
],
|
||||
env = {
|
||||
"TEST_USER_EMAIL": "test@sourcegraph.com",
|
||||
@ -145,7 +145,7 @@ server_integration_test(
|
||||
"$(location //dev/codeintel-qa/cmd/clear:clear-darwin-arm64)",
|
||||
"$(location //dev/codeintel-qa/cmd/upload:upload-darwin-arm64)",
|
||||
"$(location //dev/codeintel-qa/cmd/query:query-darwin-arm64)",
|
||||
"$(location //dev/ci/integration/code-intel:repos.json)",
|
||||
"$(location //testing/code-intel:repos.json)",
|
||||
],
|
||||
"//conditions:default": [
|
||||
"$(location //internal/cmd/init-sg)",
|
||||
@ -154,29 +154,29 @@ server_integration_test(
|
||||
"$(location //dev/codeintel-qa/cmd/clear)",
|
||||
"$(location //dev/codeintel-qa/cmd/upload)",
|
||||
"$(location //dev/codeintel-qa/cmd/query)",
|
||||
"$(location //dev/ci/integration/code-intel:repos.json)",
|
||||
"$(location //testing/code-intel:repos.json)",
|
||||
],
|
||||
}),
|
||||
data = select({
|
||||
"//:darwin_docker_e2e_go": [
|
||||
"//cmd/server:image_tarball",
|
||||
"//dev/ci/integration/code-intel:repos.json",
|
||||
"//dev/codeintel-qa/cmd/clear:clear-darwin-arm64",
|
||||
"//dev/codeintel-qa/cmd/download:download-darwin-arm64",
|
||||
"//dev/codeintel-qa/cmd/query:query-darwin-arm64",
|
||||
"//dev/codeintel-qa/cmd/upload:upload-darwin-arm64",
|
||||
"//internal/cmd/init-sg:init-sg-darwin-arm64",
|
||||
"//testing/code-intel:repos.json",
|
||||
"@src-cli-darwin-arm64//:src-cli-darwin-arm64",
|
||||
],
|
||||
"//conditions:default": [
|
||||
"//cmd/server:image_tarball",
|
||||
"//dev/ci/integration/code-intel:repos.json",
|
||||
"//dev/codeintel-qa/cmd/clear",
|
||||
"//dev/codeintel-qa/cmd/download",
|
||||
"//dev/codeintel-qa/cmd/query",
|
||||
"//dev/codeintel-qa/cmd/upload",
|
||||
"//dev/tools:src-cli",
|
||||
"//internal/cmd/init-sg",
|
||||
"//testing/code-intel:repos.json",
|
||||
],
|
||||
}),
|
||||
env = {
|
||||
|
||||
Loading…
Reference in New Issue
Block a user