mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 18:51:59 +00:00
codeintel: RFC 235: Add migration infrastructure to codeintel database (#13903)
This commit is contained in:
parent
455462b088
commit
07444839f7
@ -1,13 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"/../../migrations/frontend
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")"/../../migrations
|
||||
set -e
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "USAGE: $0 <name>"
|
||||
if [ -z "$2" ]; then
|
||||
echo "USAGE: $0 <db_name> <name>"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "$1" ]; then
|
||||
echo "Unknown database '$1'"
|
||||
exit 1
|
||||
fi
|
||||
pushd "$1" >/dev/null || exit 1
|
||||
|
||||
# This simulates what "migrate create -ext sql -digits 10 -seq" does.
|
||||
awkcmd=$(
|
||||
cat <<-EOF
|
||||
@ -37,5 +43,5 @@ BEGIN;
|
||||
COMMIT;
|
||||
EOF
|
||||
|
||||
echo "Created migrations/$f"
|
||||
echo "Created migrations/$1/$f"
|
||||
done
|
||||
|
||||
@ -2,7 +2,23 @@
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../.."
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../migrations"
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "USAGE: $0 <db_name> [ <command> ]"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "$1" ]; then
|
||||
echo "Unknown database '$1'"
|
||||
exit 1
|
||||
fi
|
||||
pushd "$1" >/dev/null || exit 1
|
||||
|
||||
migrations_table='schema_migrations'
|
||||
if [ "$1" != "frontend" ]; then
|
||||
migrations_table="$1_${migrations_table}"
|
||||
fi
|
||||
|
||||
hash migrate 2>/dev/null || {
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
@ -13,4 +29,5 @@ hash migrate 2>/dev/null || {
|
||||
fi
|
||||
}
|
||||
|
||||
migrate -database "postgres://${PGHOST}:${PGPORT}/${PGDATABASE}" -path ./migrations/frontend "$@"
|
||||
shift # get rid of db name
|
||||
migrate -database "postgres://${PGHOST}:${PGPORT}/${PGDATABASE}?x-migrations-table=${migrations_table}" -path . "$@"
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
|
||||
set -eo pipefail
|
||||
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../migrations/frontend"
|
||||
cd "$(dirname "${BASH_SOURCE[0]}")/../../migrations"
|
||||
|
||||
hash migrate 2>/dev/null || {
|
||||
if [[ $(uname) == "Darwin" ]]; then
|
||||
@ -13,8 +13,8 @@ hash migrate 2>/dev/null || {
|
||||
fi
|
||||
}
|
||||
|
||||
if [ -z "$1" ]; then
|
||||
echo "USAGE: $0 <tag>"
|
||||
if [ -z "$2" ]; then
|
||||
echo "USAGE: $0 <db_name> <tag>"
|
||||
echo ""
|
||||
echo "This tool will squash all migrations up to and including the last migration defined"
|
||||
echo "in the given tag branch. The input to this tool should be three minor releases before"
|
||||
@ -24,8 +24,24 @@ if [ -z "$1" ]; then
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [ ! -d "$1" ]; then
|
||||
echo "Unknown database '$1'"
|
||||
exit 1
|
||||
fi
|
||||
pushd "$1" >/dev/null || exit 1
|
||||
|
||||
migrations_table='schema_migrations'
|
||||
if [ "$1" != "frontend" ]; then
|
||||
migrations_table="$1_${migrations_table}"
|
||||
fi
|
||||
|
||||
target='./'
|
||||
if [ -z "$(git ls-tree -r --name-only "$1" "./")" ]; then
|
||||
if [ -z "$(git ls-tree -r --name-only "$2" "./")" ]; then
|
||||
if [ "$1" != "frontend" ]; then
|
||||
echo "database does not exist at this version - nothing to squash"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# If we're squashing migrations no a tagged version where the
|
||||
# migrations/frontend directory does not exist, scan the files
|
||||
# in the parent directory where they were located previously.
|
||||
@ -33,12 +49,14 @@ if [ -z "$(git ls-tree -r --name-only "$1" "./")" ]; then
|
||||
fi
|
||||
|
||||
# Find the last migration defined in the given tag
|
||||
VERSION=$(git ls-tree -r --name-only "$1" "${target}" |
|
||||
cut -d'_' -f1 | # Keep only prefix
|
||||
cut -d'/' -f2 | # Remove any leading ../
|
||||
grep -v "[^0-9]" | # Remove non-numeric remainders
|
||||
sort | # Sort by id prefix
|
||||
tail -n1) # Get latest migration
|
||||
VERSION=$(
|
||||
git ls-tree -r --name-only "$2" "${target}" |
|
||||
cut -d'_' -f1 | # Keep only prefix
|
||||
cut -d'/' -f2 | # Remove any leading ../
|
||||
grep -v "[^0-9]" | # Remove non-numeric remainders
|
||||
sort | # Sort by id prefix
|
||||
tail -n1 # Get latest migration
|
||||
)
|
||||
|
||||
if [ -z "${VERSION}" ]; then
|
||||
echo "failed to retrieve migration version"
|
||||
@ -68,16 +86,16 @@ if [ "${SERVER_VERSION}" != 9.6 ]; then
|
||||
fi
|
||||
|
||||
# First, apply migrations up to the version we want to squash
|
||||
migrate -database "postgres://${PGHOST}:${PGPORT}/${PGDATABASE}?sslmode=disable" -path . goto "${VERSION}"
|
||||
migrate -database "postgres://${PGHOST}:${PGPORT}/${PGDATABASE}?sslmode=disable&x-migrations-table=${migrations_table}" -path . goto "${VERSION}"
|
||||
|
||||
# Dump the database into a temporary file that we need to post-process
|
||||
pg_dump -s --no-owner --no-comments --clean --if-exists -f tmp_squashed.sql
|
||||
pg_dump --schema-only --no-owner --no-comments --exclude-table='*schema_migrations' -f tmp_squashed.sql
|
||||
|
||||
# Remove settings header from pg_dump output
|
||||
sed -i '' -e 's/^SET .*$//g' tmp_squashed.sql
|
||||
sed -i '' -e 's/^SELECT pg_catalog.set_config.*$//g' tmp_squashed.sql
|
||||
|
||||
# Do not drop extensions if they already exists. This causes some
|
||||
# Do not drop extensions if they already exist. This causes some
|
||||
# weird problems with the back-compat tests as the extensions are
|
||||
# not dropped in the correct order to honor dependencies.
|
||||
sed -i '' -e 's/^DROP EXTENSION .*$//g' tmp_squashed.sql
|
||||
@ -113,15 +131,11 @@ cat tmp_squashed.sql >>"./${VERSION}_squashed_migrations.up.sql"
|
||||
printf "\nCOMMIT;\n" >>"./${VERSION}_squashed_migrations.up.sql"
|
||||
rm tmp_squashed.sql
|
||||
|
||||
# Create down migration. This needs to drop everything, so we just drop the
|
||||
# schema and recreate it. This happens to also drop the schema_migrations
|
||||
# table, which blows up the migrate tool if we don't put it back.
|
||||
|
||||
cat >"./${VERSION}_squashed_migrations.down.sql" <<EOL
|
||||
DROP SCHEMA IF EXISTS public CASCADE;
|
||||
CREATE SCHEMA public;
|
||||
|
||||
CREATE TABLE IF NOT EXISTS schema_migrations (
|
||||
CREATE TABLE IF NOT EXISTS ${migrations_table} (
|
||||
version bigint NOT NULL PRIMARY KEY,
|
||||
dirty boolean NOT NULL
|
||||
);
|
||||
|
||||
@ -72,8 +72,8 @@ func New(dataSource, dbNameSuffix string) (*sql.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func MigrateDB(db *sql.DB, dataSource string) error {
|
||||
m, err := dbutil.NewMigrate(db, dataSource)
|
||||
func MigrateDB(db *sql.DB, databaseName string) error {
|
||||
m, err := dbutil.NewMigrate(db, databaseName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -17,16 +17,35 @@ func TestMigrations(t *testing.T) {
|
||||
// Setup a global test database
|
||||
dbtesting.SetupGlobalTestDB(t)
|
||||
|
||||
m, err := dbutil.NewMigrate(dbconn.Global, "")
|
||||
if err != nil {
|
||||
t.Errorf("error constructing migrations: %s", err)
|
||||
migrate := func() {
|
||||
for _, databaseName := range dbutil.DatabaseNames {
|
||||
if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil {
|
||||
t.Errorf("error running initial migrations: %s", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Run all down migrations then up migrations again to ensure there are no SQL errors.
|
||||
if err := m.Down(); err != nil {
|
||||
t.Errorf("error running down migrations: %s", err)
|
||||
}
|
||||
if err := dbutil.DoMigrate(m); err != nil {
|
||||
t.Errorf("error running up migrations: %s", err)
|
||||
|
||||
for _, databaseName := range dbutil.DatabaseNames {
|
||||
t.Run(databaseName, func(t *testing.T) {
|
||||
// Dropping a squash schema _all_ the way down just drops the entire public
|
||||
// schema. Because we have a "combined" database that runs migrations for
|
||||
// multiple disjoint schemas in development environments, migrating all the
|
||||
// way down will drop all tables from all schemas. This loop runs such down
|
||||
// migrations, so we prep our tests by re-migrating up on each iteration.
|
||||
migrate()
|
||||
|
||||
m, err := dbutil.NewMigrate(dbconn.Global, databaseName)
|
||||
if err != nil {
|
||||
t.Errorf("error constructing migrations: %s", err)
|
||||
}
|
||||
// Run all down migrations then up migrations again to ensure there are no SQL errors.
|
||||
if err := m.Down(); err != nil {
|
||||
t.Errorf("error running down migrations: %s", err)
|
||||
}
|
||||
if err := dbutil.DoMigrate(m); err != nil {
|
||||
t.Errorf("error running up migrations: %s", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -64,12 +64,14 @@ func NewDB(t testing.TB, dsn string) *sql.DB {
|
||||
config.Path = "/" + dbname
|
||||
testDB := dbConn(t, config)
|
||||
|
||||
m, err := dbutil.NewMigrate(testDB, dsn)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to construct migrations: %s", err)
|
||||
}
|
||||
if err = dbutil.DoMigrate(m); err != nil {
|
||||
t.Fatalf("failed to apply migrations: %s", err)
|
||||
for _, databaseName := range dbutil.DatabaseNames {
|
||||
m, err := dbutil.NewMigrate(testDB, databaseName)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to construct migrations: %s", err)
|
||||
}
|
||||
if err = dbutil.DoMigrate(m); err != nil {
|
||||
t.Fatalf("failed to apply migrations: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
t.Cleanup(func() {
|
||||
|
||||
@ -3,6 +3,7 @@ package dbtesting
|
||||
|
||||
import (
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"io"
|
||||
"log"
|
||||
@ -15,6 +16,7 @@ import (
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
|
||||
"github.com/sourcegraph/sourcegraph/internal/db/dbutil"
|
||||
)
|
||||
|
||||
// MockHashPassword if non-nil is used instead of db.hashPassword. This is useful
|
||||
@ -87,7 +89,12 @@ func emptyDBPreserveSchema(t testing.TB, d *sql.DB) {
|
||||
t.Fatalf("Table schema_migrations not found: %v", err)
|
||||
}
|
||||
|
||||
rows, err := d.Query("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' AND table_name != 'schema_migrations'")
|
||||
var conds []string
|
||||
for _, migrationTable := range dbutil.MigrationTables {
|
||||
conds = append(conds, fmt.Sprintf("table_name != '%s'", migrationTable))
|
||||
}
|
||||
|
||||
rows, err := d.Query("SELECT table_name FROM information_schema.tables WHERE table_schema='public' AND table_type='BASE TABLE' AND " + strings.Join(conds, " AND "))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -145,5 +152,11 @@ func initTest(nameSuffix string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return dbconn.MigrateDB(dbconn.Global, "dbname="+dbname)
|
||||
for _, databaseName := range dbutil.DatabaseNames {
|
||||
if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -22,7 +22,8 @@ import (
|
||||
"github.com/opentracing/opentracing-go/ext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace/ot"
|
||||
migrations "github.com/sourcegraph/sourcegraph/migrations/frontend"
|
||||
codeintelMigrations "github.com/sourcegraph/sourcegraph/migrations/codeintel"
|
||||
frontendMigrations "github.com/sourcegraph/sourcegraph/migrations/frontend"
|
||||
)
|
||||
|
||||
// Transaction calls f within a transaction, rolling back if any error is
|
||||
@ -112,18 +113,60 @@ func NewDB(dsn, app string) (*sql.DB, error) {
|
||||
return db, nil
|
||||
}
|
||||
|
||||
func NewMigrationSourceLoader(dataSource string) *bindata.AssetSource {
|
||||
return bindata.Resource(migrations.AssetNames(), migrations.Asset)
|
||||
// databases configures the migrations we want based on a database name. This
|
||||
// configuration includes the name of the migration version table as well as
|
||||
// the raw migration assets to run to migrate the target schema to a new version.
|
||||
var databases = map[string]struct {
|
||||
MigrationsTable string
|
||||
Resource *bindata.AssetSource
|
||||
}{
|
||||
"frontend": {
|
||||
MigrationsTable: "schema_migrations",
|
||||
Resource: bindata.Resource(frontendMigrations.AssetNames(), frontendMigrations.Asset),
|
||||
},
|
||||
"codeintel": {
|
||||
MigrationsTable: "codeintel_schema_migrations",
|
||||
Resource: bindata.Resource(codeintelMigrations.AssetNames(), codeintelMigrations.Asset),
|
||||
},
|
||||
}
|
||||
|
||||
func NewMigrate(db *sql.DB, dataSource string) (*migrate.Migrate, error) {
|
||||
var cfg postgres.Config
|
||||
driver, err := postgres.WithInstance(db, &cfg)
|
||||
// DatabaseNames returns the list of database names (configured via `dbutil.databases`)..
|
||||
var DatabaseNames = func() []string {
|
||||
var names []string
|
||||
for databaseName := range databases {
|
||||
names = append(names, databaseName)
|
||||
}
|
||||
|
||||
return names
|
||||
}()
|
||||
|
||||
// MigrationTables returns the list of migration table names (configured via `dbutil.databases`).
|
||||
var MigrationTables = func() []string {
|
||||
var migrationTables []string
|
||||
for _, db := range databases {
|
||||
migrationTables = append(migrationTables, db.MigrationsTable)
|
||||
}
|
||||
|
||||
return migrationTables
|
||||
}()
|
||||
|
||||
// NewMigrate returns a new configured migration object for the given database name. This database
|
||||
// name must be present in the `dbutil.databases` map. This migration can be subsequently run by
|
||||
// invoking `dbutil.DoMigrate`.
|
||||
func NewMigrate(db *sql.DB, databaseName string) (*migrate.Migrate, error) {
|
||||
schemaData, ok := databases[databaseName]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unknown database '%s'", databaseName)
|
||||
}
|
||||
|
||||
driver, err := postgres.WithInstance(db, &postgres.Config{
|
||||
MigrationsTable: schemaData.MigrationsTable,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
d, err := bindata.WithInstance(NewMigrationSourceLoader(dataSource))
|
||||
d, err := bindata.WithInstance(schemaData.Resource)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -251,6 +251,17 @@ Triggers:
|
||||
|
||||
```
|
||||
|
||||
# Table "public.codeintel_schema_migrations"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
---------+---------+-----------
|
||||
version | bigint | not null
|
||||
dirty | boolean | not null
|
||||
Indexes:
|
||||
"codeintel_schema_migrations_pkey" PRIMARY KEY, btree (version)
|
||||
|
||||
```
|
||||
|
||||
# Table "public.critical_and_site_config"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
@ -477,6 +488,57 @@ Indexes:
|
||||
|
||||
```
|
||||
|
||||
# Table "public.lsif_data_definitions"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
------------+---------+-----------
|
||||
dump_id | integer | not null
|
||||
scheme | text | not null
|
||||
identifier | text | not null
|
||||
data | bytea |
|
||||
|
||||
```
|
||||
|
||||
# Table "public.lsif_data_documents"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
---------+---------+-----------
|
||||
dump_id | integer | not null
|
||||
path | text | not null
|
||||
data | bytea |
|
||||
|
||||
```
|
||||
|
||||
# Table "public.lsif_data_metadata"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
-------------------+---------+-----------
|
||||
dump_id | integer | not null
|
||||
num_result_chunks | integer |
|
||||
|
||||
```
|
||||
|
||||
# Table "public.lsif_data_references"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
------------+---------+-----------
|
||||
dump_id | integer | not null
|
||||
scheme | text | not null
|
||||
identifier | text | not null
|
||||
data | bytea |
|
||||
|
||||
```
|
||||
|
||||
# Table "public.lsif_data_result_chunks"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
---------+---------+-----------
|
||||
dump_id | integer | not null
|
||||
idx | integer | not null
|
||||
data | bytea |
|
||||
|
||||
```
|
||||
|
||||
# Table "public.lsif_dirty_repositories"
|
||||
```
|
||||
Column | Type | Modifiers
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/db/dbconn"
|
||||
"github.com/sourcegraph/sourcegraph/internal/db/dbutil"
|
||||
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
|
||||
|
||||
_ "github.com/lib/pq"
|
||||
@ -98,8 +99,12 @@ func generate(log *log.Logger) (string, error) {
|
||||
return "", fmt.Errorf("SetupGlobalConnection: %w", err)
|
||||
}
|
||||
|
||||
if err := dbconn.MigrateDB(dbconn.Global, dataSource); err != nil {
|
||||
return "", fmt.Errorf("MigrateDB: %w", err)
|
||||
// Migrate the codeintel db on top of the frontend one so we capture
|
||||
// the schema of both databases.
|
||||
for _, databaseName := range dbutil.DatabaseNames {
|
||||
if err := dbconn.MigrateDB(dbconn.Global, databaseName); err != nil {
|
||||
return "", fmt.Errorf("MigrateDB: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
db, err := dbconn.Open(dataSource)
|
||||
|
||||
110
migrations/README.md
Normal file
110
migrations/README.md
Normal file
@ -0,0 +1,110 @@
|
||||
# Postgres Migrations
|
||||
|
||||
The children of this directory contain migrations for each Postgres database instance:
|
||||
|
||||
- `frontend` is the main database (things should go here unless there is a good reason)
|
||||
- `codeintel` is a database containing only processed LSIF data (which can become extremely large)
|
||||
|
||||
The migration path for each database instance is the same and is described below. Each of the database instances described here are deployed separately, but are designed to be _overlayable_ to reduce friction during development. That is, we assume that the names in each database do not overlap so that the same connection parameters can be used for both database instances. Each database also has a uniquely named schema versions table:
|
||||
|
||||
| database | schema version table name |
|
||||
| ----------- | ----------------------------- |
|
||||
| `frontend` | `schema_migrations` |
|
||||
| `codeintel` | `codeintel_schema_migrations` |
|
||||
|
||||
Migrations are handled by the [migrate](https://github.com/golang-migrate/migrate/tree/master/cmd/migrate#installation) tool. Migrations get applied automatically at application startup. The CLI tool can also be used to manually test migrations.
|
||||
|
||||
## Migrating up and down
|
||||
|
||||
Up migrations happen automatically on server start-up after running the generate scripts. They can also be run manually using the migrate CLI:
|
||||
|
||||
- run `./dev/db/migrate.sh <db_name> -h` for a list of options
|
||||
- run `./dev/db/migrate.sh <db_name> up` to move forward to the latest migration
|
||||
- run `./dev/db/migrate.sh <db_name> down 1` to rollback the previous migration
|
||||
|
||||
If a migration fails and you need to revert to a previous state `./dev/db/migrate.sh <db_name> force` may be helpful. Alternatively use the `dropdb` and `createdb` commands to wipe your local database and start from a clean state.
|
||||
|
||||
**Note:** if you find that you need to run a down migration, that almost certainly means the migration was not backward-compatible, and you should fix this before merging the migration into `main`.
|
||||
|
||||
## Adding a migration
|
||||
|
||||
**IMPORTANT:** All migrations must be backward-compatible, meaning that an _existing_ running instance must be able to run against the _new_ (post-migration) version of the schema. This is because frontend pods are updated in a rolling fashion. During the rolling update, there will be both old and new frontend pods. The first updated pod will migrate the schema atomically, but the remaining old ones may continue to write before they are terminated.
|
||||
|
||||
Run the following:
|
||||
|
||||
```
|
||||
./dev/db/add_migration.sh <db_name> my_migration_name
|
||||
```
|
||||
|
||||
There will be up/down `.sql` migration files created in the instance's migrations directory. Add SQL statements to these files that will perform the desired migration.
|
||||
|
||||
**NOTE**: the migration runner does not use transactions. Use the explicit transaction blocks added to the migration script template.
|
||||
|
||||
After adding SQL statements to those files, embed them into the Go code and update the schema doc:
|
||||
|
||||
```
|
||||
go generate ./migrations/...
|
||||
go generate ./internal/db/
|
||||
```
|
||||
|
||||
Alternatively, regenerate everything in the repository via `./dev/generate.sh`.
|
||||
|
||||
Verify that the migration is backward-compatible. We currently have no automated testing for this. You need to ensure that an old version of Sourcegraph, like what is currently deployed on Sourcegraph.com, can continue to use the DB during a rolling upgrade from the old version to your version.
|
||||
|
||||
Some migrations are difficult to do in a single step. For instance, renaming a column, table, or view, or adding a column with a non-nullable constraint will all break existing code that accesses that table or view. In order to do such changes you may need to break your changes into several parts separated by a deployment.
|
||||
|
||||
For example, a non-nullable column can be added to an existing table with the following steps:
|
||||
|
||||
- Add a nullable column to the table
|
||||
- Update the code to always populate this row on writes
|
||||
- Deploy to Sourcegraph.com
|
||||
- Add a non-nullable constraint to the table
|
||||
- Deploy to Sourcegraph.com
|
||||
|
||||
We have a hard requirement (enforced by CI) that rolling upgrades are always possible on Sourcegraph.com. When possible, this same standard should be kept between minor release versions to ensure a smooth upgrade process for private instances (although there will be exceptions due to feature velocity and a monthly release cadence).
|
||||
|
||||
## Customer rollbacks
|
||||
|
||||
Running down migrations in a rollback **should NOT** be necessary if all migrations are backward-compatible. In case the customer must run a down migration, they will need perform do the following steps.
|
||||
|
||||
- Roll back Sourcegraph to the previous version. On startup, the frontend pods will log a migration warning stating that the schema has been migrated to a newer version. This warning should **NOT** indicate that the database is dirty.
|
||||
|
||||
- Determine if a database is dirty by running the following commands.
|
||||
|
||||
**frontend database**:
|
||||
|
||||
```
|
||||
kubectl exec $(kubectl get pod -l app=pgsql -o jsonpath='{.items[0].metadata.name}') -- psql -U sg -c 'SELECT * FROM schema_migrations'
|
||||
```
|
||||
|
||||
**codeintel database**:
|
||||
|
||||
```
|
||||
kubectl exec $(kubectl get pod -l app=pgsql-codeintel -o jsonpath='{.items[0].metadata.name}') -- psql -U sg -c 'SELECT * FROM codeintel_schema_migrations'
|
||||
```
|
||||
|
||||
For each dirty database, follow the steps in the _Dirty schema_ section below.
|
||||
|
||||
- For each database `<db_name>` with the schema version table `<schema_version_table_name>`, do the following:
|
||||
- Determine the two commits that correspond to the previous and new versions of Sourcegraph. Check out each commit and run `ls -1` in the `migrations/<db_name>` directory. The order of the migrations is the same as the alphabetical order of the migration scripts, so take the diff between the two list outputs to determine which migrations should be run.
|
||||
- Apply the down migration scripts in **reverse chronological order**. Wrap each down migration in a transaction block. If there are any errors, stop and resolve the issue before proceeding with the next down migration.
|
||||
- After all down migrations have been applied, run
|
||||
```
|
||||
update <schema_version_table_name> set version=$VERSION;
|
||||
```
|
||||
where `$VERSION` is the numerical prefix of the migration script corresponding to the first migration you _didn't_ just apply. In other words, it is the numerical prefix of the last migration script as of the rolled-back-to commit.
|
||||
- Restart frontend frontend pods. On restart, they should spin up successfully.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Dirty schema
|
||||
|
||||
If the schema for a database is dirty, that means the current migration (as indicated in the schema version table), regardless of migration direction) failed midway through. This should almost never happen. If it does happen, it probably means up/down migrations were applied out of order or other manual changes were made to the DB that conflict with the current migration stage.
|
||||
|
||||
If the schema for database `<db_name>` with the schema version table `<schema_version_table_name>` is dirty, do the following:
|
||||
|
||||
- Figure out what change was made to cause the migration to fail midway through.
|
||||
- Let `$VERSION` be the numerical prefix of the last migration script run to produce this version of the schema.
|
||||
- Run the necessary SQL commands to make the schema consistent with version `$VERSION` of the schema.
|
||||
- Run `update <schema_version_table_name> set version=$VERSION, dirty=false;`.
|
||||
- Restart frontend pods.
|
||||
1
migrations/codeintel/1000000000_init.down.sql
Normal file
1
migrations/codeintel/1000000000_init.down.sql
Normal file
@ -0,0 +1 @@
|
||||
-- empty migration
|
||||
1
migrations/codeintel/1000000000_init.up.sql
Normal file
1
migrations/codeintel/1000000000_init.up.sql
Normal file
@ -0,0 +1 @@
|
||||
-- empty migration
|
||||
9
migrations/codeintel/1000000001_init.down.sql
Normal file
9
migrations/codeintel/1000000001_init.down.sql
Normal file
@ -0,0 +1,9 @@
|
||||
BEGIN;
|
||||
|
||||
DROP TABLE IF EXISTS lsif_data_metadata;
|
||||
DROP TABLE IF EXISTS lsif_data_documents;
|
||||
DROP TABLE IF EXISTS lsif_data_result_chunks;
|
||||
DROP TABLE IF EXISTS lsif_data_definitions;
|
||||
DROP TABLE IF EXISTS lsif_data_references;
|
||||
|
||||
COMMIT;
|
||||
9
migrations/codeintel/1000000001_init.up.sql
Normal file
9
migrations/codeintel/1000000001_init.up.sql
Normal file
@ -0,0 +1,9 @@
|
||||
BEGIN;
|
||||
|
||||
CREATE TABLE lsif_data_metadata (dump_id integer NOT NULL, num_result_chunks integer);
|
||||
CREATE TABLE lsif_data_documents (dump_id integer NOT NULL, path text NOT NULL, data bytea);
|
||||
CREATE TABLE lsif_data_result_chunks (dump_id integer NOT NULL, idx integer NOT NULL, data bytea);
|
||||
CREATE TABLE lsif_data_definitions (dump_id integer NOT NULL, scheme text NOT NULL, identifier text NOT NULL, data bytea);
|
||||
CREATE TABLE lsif_data_references (dump_id integer NOT NULL, scheme text NOT NULL, identifier text NOT NULL, data bytea);
|
||||
|
||||
COMMIT;
|
||||
343
migrations/codeintel/bindata.go
generated
Normal file
343
migrations/codeintel/bindata.go
generated
Normal file
@ -0,0 +1,343 @@
|
||||
// Code generated by go-bindata. DO NOT EDIT.
|
||||
// sources:
|
||||
// 1000000000_init.down.sql (19B)
|
||||
// 1000000000_init.up.sql (19B)
|
||||
// 1000000001_init.down.sql (233B)
|
||||
// 1000000001_init.up.sql (541B)
|
||||
|
||||
package migrations
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
func bindataRead(data []byte, name string) ([]byte, error) {
|
||||
gz, err := gzip.NewReader(bytes.NewBuffer(data))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||
}
|
||||
|
||||
var buf bytes.Buffer
|
||||
_, err = io.Copy(&buf, gz)
|
||||
clErr := gz.Close()
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("read %q: %w", name, err)
|
||||
}
|
||||
if clErr != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return buf.Bytes(), nil
|
||||
}
|
||||
|
||||
type asset struct {
|
||||
bytes []byte
|
||||
info os.FileInfo
|
||||
digest [sha256.Size]byte
|
||||
}
|
||||
|
||||
type bindataFileInfo struct {
|
||||
name string
|
||||
size int64
|
||||
mode os.FileMode
|
||||
modTime time.Time
|
||||
}
|
||||
|
||||
func (fi bindataFileInfo) Name() string {
|
||||
return fi.name
|
||||
}
|
||||
func (fi bindataFileInfo) Size() int64 {
|
||||
return fi.size
|
||||
}
|
||||
func (fi bindataFileInfo) Mode() os.FileMode {
|
||||
return fi.mode
|
||||
}
|
||||
func (fi bindataFileInfo) ModTime() time.Time {
|
||||
return fi.modTime
|
||||
}
|
||||
func (fi bindataFileInfo) IsDir() bool {
|
||||
return false
|
||||
}
|
||||
func (fi bindataFileInfo) Sys() interface{} {
|
||||
return nil
|
||||
}
|
||||
|
||||
var __1000000000_initDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\x48\xcd\x2d\x28\xa9\x54\xc8\xcd\x4c\x2f\x4a\x2c\xc9\xcc\xcf\xe3\x02\x04\x00\x00\xff\xff\x32\x4d\x68\xbd\x13\x00\x00\x00")
|
||||
|
||||
func _1000000000_initDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1000000000_initDownSql,
|
||||
"1000000000_init.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1000000000_initDownSql() (*asset, error) {
|
||||
bytes, err := _1000000000_initDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1000000000_init.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0x46, 0xd1, 0x31, 0xb9, 0x68, 0x19, 0xcc, 0x70, 0xb6, 0x7, 0x20, 0x2e, 0x6a, 0x4d, 0xf1, 0xce, 0xd0, 0xc8, 0xda, 0x50, 0xce, 0x8c, 0xee, 0x52, 0x36, 0x80, 0xd0, 0x5a, 0xd2, 0x7a, 0x82}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1000000000_initUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xd2\xd5\x55\x48\xcd\x2d\x28\xa9\x54\xc8\xcd\x4c\x2f\x4a\x2c\xc9\xcc\xcf\xe3\x02\x04\x00\x00\xff\xff\x32\x4d\x68\xbd\x13\x00\x00\x00")
|
||||
|
||||
func _1000000000_initUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1000000000_initUpSql,
|
||||
"1000000000_init.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1000000000_initUpSql() (*asset, error) {
|
||||
bytes, err := _1000000000_initUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1000000000_init.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x9c, 0x46, 0xd1, 0x31, 0xb9, 0x68, 0x19, 0xcc, 0x70, 0xb6, 0x7, 0x20, 0x2e, 0x6a, 0x4d, 0xf1, 0xce, 0xd0, 0xc8, 0xda, 0x50, 0xce, 0x8c, 0xee, 0x52, 0x36, 0x80, 0xd0, 0x5a, 0xd2, 0x7a, 0x82}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1000000001_initDownSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x72\x72\x75\xf7\xf4\xb3\xe6\xe2\x72\x09\xf2\x0f\x50\x08\x71\x74\xf2\x71\x55\xf0\x74\x53\x70\x8d\xf0\x0c\x0e\x09\x56\xc8\x29\xce\x4c\x8b\x4f\x49\x2c\x49\x8c\xcf\x4d\x2d\x49\x04\x31\xac\x09\x29\x4c\xc9\x4f\x2e\xcd\x4d\xcd\x2b\x29\x26\xa8\xb2\x28\xb5\xb8\x34\xa7\x24\x3e\x39\xa3\x34\x2f\x9b\xb0\xea\x94\xd4\xb4\xcc\xbc\xcc\x92\xcc\xfc\x3c\x62\x4c\x4e\x4b\x2d\x4a\xcd\x4b\x4e\x2d\xb6\xe6\xe2\x72\xf6\xf7\xf5\xf5\x0c\xb1\xe6\x02\x04\x00\x00\xff\xff\xdc\x1f\x48\x24\xe9\x00\x00\x00")
|
||||
|
||||
func _1000000001_initDownSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1000000001_initDownSql,
|
||||
"1000000001_init.down.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1000000001_initDownSql() (*asset, error) {
|
||||
bytes, err := _1000000001_initDownSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1000000001_init.down.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0xe5, 0xa1, 0x76, 0x54, 0xd0, 0xb0, 0xb9, 0xdc, 0x93, 0x84, 0xd, 0xb5, 0xa3, 0x55, 0x91, 0xef, 0xc0, 0xe4, 0x89, 0x62, 0x20, 0x2, 0x6e, 0x92, 0x58, 0x73, 0xb3, 0x9b, 0xcd, 0xde, 0xa, 0x86}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
var __1000000001_initUpSql = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xb4\x8f\xcd\x4a\xc5\x30\x10\x85\xf7\x79\x8a\x59\x2a\xf8\x06\x5d\xdd\x7b\x09\x52\xe8\x0f\x48\x5c\x87\x98\x4c\xec\x60\x33\x2d\xc9\x04\xea\xdb\x8b\x82\xa0\x62\x8b\x08\x77\x37\x1c\x86\xef\x3b\xe7\xac\xef\xdb\xa1\x51\xea\xf2\xa0\x4f\x46\x83\x39\x9d\x3b\x0d\x73\xa1\x68\x83\x13\x67\x13\x8a\x7b\x3f\xe0\x26\xd4\xb4\x5a\x0a\x40\x2c\xf8\x8c\x19\x86\xd1\xc0\xf0\xd8\x75\x77\xc0\x35\xd9\x8c\xa5\xce\x62\xfd\x54\xf9\xa5\x7c\xfe\xdc\x36\x7b\xd8\xb0\xf8\x9a\x90\xa5\x1c\x71\x57\x27\x13\x08\x6e\xf2\x25\xfb\xe8\xf2\xf4\x2a\xe8\xf6\xe1\xdf\xbb\x1c\x08\x28\x6c\xbf\xa4\x7f\x51\x04\x8c\xc4\x24\xb4\xf0\xa1\xa0\xf8\x09\x13\xfe\xdc\x40\x01\x59\x28\x12\xe6\xff\xad\x8b\x98\x91\x3d\x5e\xcf\xac\x2e\x63\xdf\xb7\xa6\x51\x6f\x01\x00\x00\xff\xff\x9f\x15\x82\x58\x1d\x02\x00\x00")
|
||||
|
||||
func _1000000001_initUpSqlBytes() ([]byte, error) {
|
||||
return bindataRead(
|
||||
__1000000001_initUpSql,
|
||||
"1000000001_init.up.sql",
|
||||
)
|
||||
}
|
||||
|
||||
func _1000000001_initUpSql() (*asset, error) {
|
||||
bytes, err := _1000000001_initUpSqlBytes()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
info := bindataFileInfo{name: "1000000001_init.up.sql", size: 0, mode: os.FileMode(0), modTime: time.Unix(0, 0)}
|
||||
a := &asset{bytes: bytes, info: info, digest: [32]uint8{0x76, 0x54, 0x17, 0xdc, 0xf, 0xca, 0x65, 0xea, 0xdb, 0x29, 0x60, 0xf3, 0x52, 0x53, 0x66, 0xf1, 0x8c, 0xbb, 0xe4, 0xe9, 0xb9, 0xe5, 0xca, 0xe7, 0xd, 0xe1, 0xeb, 0x55, 0xd3, 0x5b, 0x73, 0xd1}}
|
||||
return a, nil
|
||||
}
|
||||
|
||||
// Asset loads and returns the asset for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func Asset(name string) ([]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.bytes, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
|
||||
// AssetString returns the asset contents as a string (instead of a []byte).
|
||||
func AssetString(name string) (string, error) {
|
||||
data, err := Asset(name)
|
||||
return string(data), err
|
||||
}
|
||||
|
||||
// MustAsset is like Asset but panics when Asset would return an error.
|
||||
// It simplifies safe initialization of global variables.
|
||||
func MustAsset(name string) []byte {
|
||||
a, err := Asset(name)
|
||||
if err != nil {
|
||||
panic("asset: Asset(" + name + "): " + err.Error())
|
||||
}
|
||||
|
||||
return a
|
||||
}
|
||||
|
||||
// MustAssetString is like AssetString but panics when Asset would return an
|
||||
// error. It simplifies safe initialization of global variables.
|
||||
func MustAssetString(name string) string {
|
||||
return string(MustAsset(name))
|
||||
}
|
||||
|
||||
// AssetInfo loads and returns the asset info for the given name.
|
||||
// It returns an error if the asset could not be found or
|
||||
// could not be loaded.
|
||||
func AssetInfo(name string) (os.FileInfo, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.info, nil
|
||||
}
|
||||
return nil, fmt.Errorf("AssetInfo %s not found", name)
|
||||
}
|
||||
|
||||
// AssetDigest returns the digest of the file with the given name. It returns an
|
||||
// error if the asset could not be found or the digest could not be loaded.
|
||||
func AssetDigest(name string) ([sha256.Size]byte, error) {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
if f, ok := _bindata[canonicalName]; ok {
|
||||
a, err := f()
|
||||
if err != nil {
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s can't read by error: %v", name, err)
|
||||
}
|
||||
return a.digest, nil
|
||||
}
|
||||
return [sha256.Size]byte{}, fmt.Errorf("AssetDigest %s not found", name)
|
||||
}
|
||||
|
||||
// Digests returns a map of all known files and their checksums.
|
||||
func Digests() (map[string][sha256.Size]byte, error) {
|
||||
mp := make(map[string][sha256.Size]byte, len(_bindata))
|
||||
for name := range _bindata {
|
||||
a, err := _bindata[name]()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
mp[name] = a.digest
|
||||
}
|
||||
return mp, nil
|
||||
}
|
||||
|
||||
// AssetNames returns the names of the assets.
|
||||
func AssetNames() []string {
|
||||
names := make([]string, 0, len(_bindata))
|
||||
for name := range _bindata {
|
||||
names = append(names, name)
|
||||
}
|
||||
return names
|
||||
}
|
||||
|
||||
// _bindata is a table, holding each asset generator, mapped to its name.
|
||||
var _bindata = map[string]func() (*asset, error){
|
||||
"1000000000_init.down.sql": _1000000000_initDownSql,
|
||||
"1000000000_init.up.sql": _1000000000_initUpSql,
|
||||
"1000000001_init.down.sql": _1000000001_initDownSql,
|
||||
"1000000001_init.up.sql": _1000000001_initUpSql,
|
||||
}
|
||||
|
||||
// AssetDebug is true if the assets were built with the debug flag enabled.
|
||||
const AssetDebug = false
|
||||
|
||||
// AssetDir returns the file names below a certain
|
||||
// directory embedded in the file by go-bindata.
|
||||
// For example if you run go-bindata on data/... and data contains the
|
||||
// following hierarchy:
|
||||
// data/
|
||||
// foo.txt
|
||||
// img/
|
||||
// a.png
|
||||
// b.png
|
||||
// then AssetDir("data") would return []string{"foo.txt", "img"},
|
||||
// AssetDir("data/img") would return []string{"a.png", "b.png"},
|
||||
// AssetDir("foo.txt") and AssetDir("notexist") would return an error, and
|
||||
// AssetDir("") will return []string{"data"}.
|
||||
func AssetDir(name string) ([]string, error) {
|
||||
node := _bintree
|
||||
if len(name) != 0 {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
pathList := strings.Split(canonicalName, "/")
|
||||
for _, p := range pathList {
|
||||
node = node.Children[p]
|
||||
if node == nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
}
|
||||
}
|
||||
if node.Func != nil {
|
||||
return nil, fmt.Errorf("Asset %s not found", name)
|
||||
}
|
||||
rv := make([]string, 0, len(node.Children))
|
||||
for childName := range node.Children {
|
||||
rv = append(rv, childName)
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
type bintree struct {
|
||||
Func func() (*asset, error)
|
||||
Children map[string]*bintree
|
||||
}
|
||||
|
||||
var _bintree = &bintree{nil, map[string]*bintree{
|
||||
"1000000000_init.down.sql": {_1000000000_initDownSql, map[string]*bintree{}},
|
||||
"1000000000_init.up.sql": {_1000000000_initUpSql, map[string]*bintree{}},
|
||||
"1000000001_init.down.sql": {_1000000001_initDownSql, map[string]*bintree{}},
|
||||
"1000000001_init.up.sql": {_1000000001_initUpSql, map[string]*bintree{}},
|
||||
}}
|
||||
|
||||
// RestoreAsset restores an asset under the given directory.
|
||||
func RestoreAsset(dir, name string) error {
|
||||
data, err := Asset(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
info, err := AssetInfo(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime())
|
||||
}
|
||||
|
||||
// RestoreAssets restores an asset under the given directory recursively.
|
||||
func RestoreAssets(dir, name string) error {
|
||||
children, err := AssetDir(name)
|
||||
// File
|
||||
if err != nil {
|
||||
return RestoreAsset(dir, name)
|
||||
}
|
||||
// Dir
|
||||
for _, child := range children {
|
||||
err = RestoreAssets(dir, filepath.Join(name, child))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func _filePath(dir, name string) string {
|
||||
canonicalName := strings.Replace(name, "\\", "/", -1)
|
||||
return filepath.Join(append([]string{dir}, strings.Split(canonicalName, "/")...)...)
|
||||
}
|
||||
6
migrations/codeintel/gen.go
Normal file
6
migrations/codeintel/gen.go
Normal file
@ -0,0 +1,6 @@
|
||||
// Package migrations contains the migration scripts for the DB.
|
||||
package migrations
|
||||
|
||||
//go:generate env GOBIN=$PWD/.bin GO111MODULE=on go install github.com/kevinburke/go-bindata/go-bindata
|
||||
//go:generate $PWD/.bin/go-bindata -nometadata -pkg migrations -ignore README.md -ignore .*\.go .
|
||||
//go:generate gofmt -s -w bindata.go
|
||||
53
migrations/codeintel/migrations_test.go
Normal file
53
migrations/codeintel/migrations_test.go
Normal file
@ -0,0 +1,53 @@
|
||||
package migrations_test
|
||||
|
||||
import (
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
migrations "github.com/sourcegraph/sourcegraph/migrations/codeintel"
|
||||
)
|
||||
|
||||
const FirstMigration = 1000000000
|
||||
|
||||
func TestIDConstraints(t *testing.T) {
|
||||
ups, err := filepath.Glob("*.up.sql")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
byID := map[int][]string{}
|
||||
for _, name := range ups {
|
||||
id, err := strconv.Atoi(name[:strings.IndexByte(name, '_')])
|
||||
if err != nil {
|
||||
t.Fatalf("failed to parse name %q: %v", name, err)
|
||||
}
|
||||
byID[id] = append(byID[id], name)
|
||||
}
|
||||
|
||||
for id, names := range byID {
|
||||
// Check if we are using sequential migrations from a certain point.
|
||||
if _, hasPrev := byID[id-1]; id > FirstMigration && !hasPrev {
|
||||
t.Errorf("migration with ID %d exists, but previous one (%d) does not", id, id-1)
|
||||
}
|
||||
if len(names) > 1 {
|
||||
t.Errorf("multiple migrations with ID %d: %s", id, strings.Join(names, " "))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestNeedsGenerate(t *testing.T) {
|
||||
want, err := filepath.Glob("*.sql")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := migrations.AssetNames()
|
||||
sort.Strings(want)
|
||||
sort.Strings(got)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
t.Fatal("bindata out of date. Please run:\n go generate github.com/sourcegraph/sourcegraph/migrations/...")
|
||||
}
|
||||
}
|
||||
Loading…
Reference in New Issue
Block a user