Approach 2: add repo_statistics and gitserver_repos_statistics tables (#40577)

This is the 2nd approach to implementing these statistics tables after we discovered that the original approach in https://github.com/sourcegraph/sourcegraph/pull/39660 lead to contention around the `repo_statistics` table.

90% of the code in here is the same as in #39660, what changes is that now we have **multiple rows** in the `repo_statistics` table:

* Every time a `repo` row (and in certain cases: a`gitserver_repo` row) is updated/inserted/deleted, we **append** (!) a row to `repo_statistics` with a diff of the total counts before/after the row change. Example: if a `repo` is deleted we append a row with `total = -1` to the `repo_statistics` table.
* At query time we use `SELECT SUM(total), SUM(cloned), SUM(deleted), ...` to get the current total counts.
* A worker periodically (right now: every 30min) compacts the table by (1) getting the current counts, (2) updating the first row's columns to reflect total counts, (3) deleting all other rows.
This commit is contained in:
Thorsten Ball 2022-08-19 14:09:44 +02:00 committed by GitHub
parent bcd9afb373
commit 5e1b6ed100
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 1645 additions and 0 deletions

View File

@ -0,0 +1,62 @@
package repostatistics
import (
"context"
"time"
"github.com/sourcegraph/log"
"github.com/sourcegraph/sourcegraph/cmd/worker/job"
workerdb "github.com/sourcegraph/sourcegraph/cmd/worker/shared/init/db"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/goroutine"
)
// compactor is a worker responsible for compacting rows in the repo_statistics table.
type compactor struct{}
var _ job.Job = &compactor{}
func NewCompactor() job.Job {
return &compactor{}
}
func (j *compactor) Description() string {
return ""
}
func (j *compactor) Config() []env.Config {
return nil
}
func (j *compactor) Routines(ctx context.Context, logger log.Logger) ([]goroutine.BackgroundRoutine, error) {
db, err := workerdb.Init()
if err != nil {
return nil, err
}
return []goroutine.BackgroundRoutine{
goroutine.NewPeriodicGoroutine(context.Background(), 30*time.Minute, &handler{
store: database.NewDB(logger, db).RepoStatistics(),
logger: logger,
}),
}, nil
}
type handler struct {
store database.RepoStatisticsStore
logger log.Logger
}
var _ goroutine.Handler = &handler{}
var _ goroutine.ErrorHandler = &handler{}
func (h *handler) Handle(ctx context.Context) error {
return h.store.CompactRepoStatistics(ctx)
}
func (h *handler) HandleError(err error) {
h.logger.Error("error compacting repo statistics rows", log.Error(err))
}

View File

@ -16,6 +16,7 @@ import (
"github.com/sourcegraph/sourcegraph/cmd/worker/internal/encryption"
"github.com/sourcegraph/sourcegraph/cmd/worker/internal/gitserver"
workermigrations "github.com/sourcegraph/sourcegraph/cmd/worker/internal/migrations"
"github.com/sourcegraph/sourcegraph/cmd/worker/internal/repostatistics"
"github.com/sourcegraph/sourcegraph/cmd/worker/internal/webhooks"
"github.com/sourcegraph/sourcegraph/cmd/worker/job"
"github.com/sourcegraph/sourcegraph/internal/conf"
@ -47,6 +48,7 @@ func Start(logger log.Logger, additionalJobs map[string]job.Job, registerEnterpr
"codeintel-policies-repository-matcher": codeintel.NewPoliciesRepositoryMatcherJob(),
"gitserver-metrics": gitserver.NewMetricsJob(),
"record-encrypter": encryption.NewRecordEncrypterJob(),
"repo-statistics-compactor": repostatistics.NewCompactor(),
}
jobs := map[string]job.Job{}

View File

@ -38,5 +38,15 @@
"path": "lib/group",
"prefix": "ResultErrorGroup",
"reason": "A flaky test that was made unflaky after the release cut"
},
{
"path": "internal/database",
"prefix": "TestDBTransactions",
"reason": "This test was fixed in 3d51294 but the fix not make it into 3.42.0. It causes the whole test suite to deadlock and that deadlocks shows up reliably in this test."
},
{
"path": "internal/database/basestore",
"prefix": "TestConcurrentTransactions",
"reason": "Related test (see above) was fixed in 3d51294 but did not make it into 3.42.0. It causes the whole test suite to deadlock and that deadlocks shows up reliably in this test."
}
]

View File

@ -107,6 +107,10 @@ This job runs the workspace resolutions for batch specs. Used for batch changes
This job runs queries against the database pertaining to generate `gitserver` metrics. These queries are generally expensive to run and do not need to be run per-instance of `gitserver` so the worker allows them to only be run once per scrape.
#### `repo-statistics-compactor`
This job periodically cleans up the `repo_statistics` table by rolling up all rows into a single row.
#### `record-encrypter`
This job bulk encrypts existing data in the database when an encryption key is introduced, and decrypts it when instructed to do. See [encryption](./config/encryption.md) for additional details.

View File

@ -6886,6 +6886,9 @@ type MockEnterpriseDB struct {
// RepoKVPsFunc is an instance of a mock function object controlling the
// behavior of the method RepoKVPs.
RepoKVPsFunc *EnterpriseDBRepoKVPsFunc
// RepoStatisticsFunc is an instance of a mock function object
// controlling the behavior of the method RepoStatistics.
RepoStatisticsFunc *EnterpriseDBRepoStatisticsFunc
// ReposFunc is an instance of a mock function object controlling the
// behavior of the method Repos.
ReposFunc *EnterpriseDBReposFunc
@ -7054,6 +7057,11 @@ func NewMockEnterpriseDB() *MockEnterpriseDB {
return
},
},
RepoStatisticsFunc: &EnterpriseDBRepoStatisticsFunc{
defaultHook: func() (r0 database.RepoStatisticsStore) {
return
},
},
ReposFunc: &EnterpriseDBReposFunc{
defaultHook: func() (r0 database.RepoStore) {
return
@ -7251,6 +7259,11 @@ func NewStrictMockEnterpriseDB() *MockEnterpriseDB {
panic("unexpected invocation of MockEnterpriseDB.RepoKVPs")
},
},
RepoStatisticsFunc: &EnterpriseDBRepoStatisticsFunc{
defaultHook: func() database.RepoStatisticsStore {
panic("unexpected invocation of MockEnterpriseDB.RepoStatistics")
},
},
ReposFunc: &EnterpriseDBReposFunc{
defaultHook: func() database.RepoStore {
panic("unexpected invocation of MockEnterpriseDB.Repos")
@ -7401,6 +7414,9 @@ func NewMockEnterpriseDBFrom(i EnterpriseDB) *MockEnterpriseDB {
RepoKVPsFunc: &EnterpriseDBRepoKVPsFunc{
defaultHook: i.RepoKVPs,
},
RepoStatisticsFunc: &EnterpriseDBRepoStatisticsFunc{
defaultHook: i.RepoStatistics,
},
ReposFunc: &EnterpriseDBReposFunc{
defaultHook: i.Repos,
},
@ -9882,6 +9898,105 @@ func (c EnterpriseDBRepoKVPsFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// EnterpriseDBRepoStatisticsFunc describes the behavior when the
// RepoStatistics method of the parent MockEnterpriseDB instance is invoked.
type EnterpriseDBRepoStatisticsFunc struct {
defaultHook func() database.RepoStatisticsStore
hooks []func() database.RepoStatisticsStore
history []EnterpriseDBRepoStatisticsFuncCall
mutex sync.Mutex
}
// RepoStatistics delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockEnterpriseDB) RepoStatistics() database.RepoStatisticsStore {
r0 := m.RepoStatisticsFunc.nextHook()()
m.RepoStatisticsFunc.appendCall(EnterpriseDBRepoStatisticsFuncCall{r0})
return r0
}
// SetDefaultHook sets function that is called when the RepoStatistics
// method of the parent MockEnterpriseDB instance is invoked and the hook
// queue is empty.
func (f *EnterpriseDBRepoStatisticsFunc) SetDefaultHook(hook func() database.RepoStatisticsStore) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// RepoStatistics method of the parent MockEnterpriseDB instance invokes the
// hook at the front of the queue and discards it. After the queue is empty,
// the default hook function is invoked for any future action.
func (f *EnterpriseDBRepoStatisticsFunc) PushHook(hook func() database.RepoStatisticsStore) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *EnterpriseDBRepoStatisticsFunc) SetDefaultReturn(r0 database.RepoStatisticsStore) {
f.SetDefaultHook(func() database.RepoStatisticsStore {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *EnterpriseDBRepoStatisticsFunc) PushReturn(r0 database.RepoStatisticsStore) {
f.PushHook(func() database.RepoStatisticsStore {
return r0
})
}
func (f *EnterpriseDBRepoStatisticsFunc) nextHook() func() database.RepoStatisticsStore {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *EnterpriseDBRepoStatisticsFunc) appendCall(r0 EnterpriseDBRepoStatisticsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of EnterpriseDBRepoStatisticsFuncCall objects
// describing the invocations of this function.
func (f *EnterpriseDBRepoStatisticsFunc) History() []EnterpriseDBRepoStatisticsFuncCall {
f.mutex.Lock()
history := make([]EnterpriseDBRepoStatisticsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// EnterpriseDBRepoStatisticsFuncCall is an object that describes an
// invocation of method RepoStatistics on an instance of MockEnterpriseDB.
type EnterpriseDBRepoStatisticsFuncCall struct {
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 database.RepoStatisticsStore
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c EnterpriseDBRepoStatisticsFuncCall) Args() []interface{} {
return []interface{}{}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c EnterpriseDBRepoStatisticsFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// EnterpriseDBReposFunc describes the behavior when the Repos method of the
// parent MockEnterpriseDB instance is invoked.
type EnterpriseDBReposFunc struct {

View File

@ -49,6 +49,7 @@ type DB interface {
UserPublicRepos() UserPublicRepoStore
Users() UserStore
WebhookLogs(encryption.Key) WebhookLogStore
RepoStatistics() RepoStatisticsStore
Transact(context.Context) (DB, error)
Done(error) error
@ -214,3 +215,7 @@ func (d *db) Users() UserStore {
func (d *db) WebhookLogs(key encryption.Key) WebhookLogStore {
return WebhookLogsWith(d.Store, key)
}
func (d *db) RepoStatistics() RepoStatisticsStore {
return RepoStatisticsWith(d.Store)
}

View File

@ -3682,6 +3682,9 @@ type MockDB struct {
// RepoKVPsFunc is an instance of a mock function object controlling the
// behavior of the method RepoKVPs.
RepoKVPsFunc *DBRepoKVPsFunc
// RepoStatisticsFunc is an instance of a mock function object
// controlling the behavior of the method RepoStatistics.
RepoStatisticsFunc *DBRepoStatisticsFunc
// ReposFunc is an instance of a mock function object controlling the
// behavior of the method Repos.
ReposFunc *DBReposFunc
@ -3840,6 +3843,11 @@ func NewMockDB() *MockDB {
return
},
},
RepoStatisticsFunc: &DBRepoStatisticsFunc{
defaultHook: func() (r0 RepoStatisticsStore) {
return
},
},
ReposFunc: &DBReposFunc{
defaultHook: func() (r0 RepoStore) {
return
@ -4027,6 +4035,11 @@ func NewStrictMockDB() *MockDB {
panic("unexpected invocation of MockDB.RepoKVPs")
},
},
RepoStatisticsFunc: &DBRepoStatisticsFunc{
defaultHook: func() RepoStatisticsStore {
panic("unexpected invocation of MockDB.RepoStatistics")
},
},
ReposFunc: &DBReposFunc{
defaultHook: func() RepoStore {
panic("unexpected invocation of MockDB.Repos")
@ -4170,6 +4183,9 @@ func NewMockDBFrom(i DB) *MockDB {
RepoKVPsFunc: &DBRepoKVPsFunc{
defaultHook: i.RepoKVPs,
},
RepoStatisticsFunc: &DBRepoStatisticsFunc{
defaultHook: i.RepoStatistics,
},
ReposFunc: &DBReposFunc{
defaultHook: i.Repos,
},
@ -6436,6 +6452,105 @@ func (c DBRepoKVPsFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// DBRepoStatisticsFunc describes the behavior when the RepoStatistics
// method of the parent MockDB instance is invoked.
type DBRepoStatisticsFunc struct {
defaultHook func() RepoStatisticsStore
hooks []func() RepoStatisticsStore
history []DBRepoStatisticsFuncCall
mutex sync.Mutex
}
// RepoStatistics delegates to the next hook function in the queue and
// stores the parameter and result values of this invocation.
func (m *MockDB) RepoStatistics() RepoStatisticsStore {
r0 := m.RepoStatisticsFunc.nextHook()()
m.RepoStatisticsFunc.appendCall(DBRepoStatisticsFuncCall{r0})
return r0
}
// SetDefaultHook sets function that is called when the RepoStatistics
// method of the parent MockDB instance is invoked and the hook queue is
// empty.
func (f *DBRepoStatisticsFunc) SetDefaultHook(hook func() RepoStatisticsStore) {
f.defaultHook = hook
}
// PushHook adds a function to the end of hook queue. Each invocation of the
// RepoStatistics method of the parent MockDB instance invokes the hook at
// the front of the queue and discards it. After the queue is empty, the
// default hook function is invoked for any future action.
func (f *DBRepoStatisticsFunc) PushHook(hook func() RepoStatisticsStore) {
f.mutex.Lock()
f.hooks = append(f.hooks, hook)
f.mutex.Unlock()
}
// SetDefaultReturn calls SetDefaultHook with a function that returns the
// given values.
func (f *DBRepoStatisticsFunc) SetDefaultReturn(r0 RepoStatisticsStore) {
f.SetDefaultHook(func() RepoStatisticsStore {
return r0
})
}
// PushReturn calls PushHook with a function that returns the given values.
func (f *DBRepoStatisticsFunc) PushReturn(r0 RepoStatisticsStore) {
f.PushHook(func() RepoStatisticsStore {
return r0
})
}
func (f *DBRepoStatisticsFunc) nextHook() func() RepoStatisticsStore {
f.mutex.Lock()
defer f.mutex.Unlock()
if len(f.hooks) == 0 {
return f.defaultHook
}
hook := f.hooks[0]
f.hooks = f.hooks[1:]
return hook
}
func (f *DBRepoStatisticsFunc) appendCall(r0 DBRepoStatisticsFuncCall) {
f.mutex.Lock()
f.history = append(f.history, r0)
f.mutex.Unlock()
}
// History returns a sequence of DBRepoStatisticsFuncCall objects describing
// the invocations of this function.
func (f *DBRepoStatisticsFunc) History() []DBRepoStatisticsFuncCall {
f.mutex.Lock()
history := make([]DBRepoStatisticsFuncCall, len(f.history))
copy(history, f.history)
f.mutex.Unlock()
return history
}
// DBRepoStatisticsFuncCall is an object that describes an invocation of
// method RepoStatistics on an instance of MockDB.
type DBRepoStatisticsFuncCall struct {
// Result0 is the value of the 1st result returned from this method
// invocation.
Result0 RepoStatisticsStore
}
// Args returns an interface slice containing the arguments of this
// invocation.
func (c DBRepoStatisticsFuncCall) Args() []interface{} {
return []interface{}{}
}
// Results returns an interface slice containing the results of this
// invocation.
func (c DBRepoStatisticsFuncCall) Results() []interface{} {
return []interface{}{c.Result0}
}
// DBReposFunc describes the behavior when the Repos method of the parent
// MockDB instance is invoked.
type DBReposFunc struct {

View File

@ -0,0 +1,149 @@
package database
import (
"context"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
)
// RepoStatistics represents the contents of the single row in the
// repo_statistics table.
type RepoStatistics struct {
Total int
SoftDeleted int
NotCloned int
Cloning int
Cloned int
FailedFetch int
}
// gitserverRepoStatistics represents the contents of the
// gitserver_repo_statistics table, where each gitserver shard should have a
// separate row and gitserver_repos that haven't been assigned a shard yet have an empty ShardID.
type GitserverReposStatistic struct {
ShardID string
Total int
NotCloned int
Cloning int
Cloned int
FailedFetch int
}
type RepoStatisticsStore interface {
Transact(context.Context) (RepoStatisticsStore, error)
With(basestore.ShareableStore) RepoStatisticsStore
GetRepoStatistics(ctx context.Context) (RepoStatistics, error)
CompactRepoStatistics(ctx context.Context) error
GetGitserverReposStatistics(ctx context.Context) ([]GitserverReposStatistic, error)
}
// repoStatisticsStore is responsible for data stored in the repo_statistics
// and the gitserver_repos_statistics tables.
type repoStatisticsStore struct {
*basestore.Store
}
// RepoStatisticsWith instantiates and returns a new repoStatisticsStore using
// the other store handle.
func RepoStatisticsWith(other basestore.ShareableStore) RepoStatisticsStore {
return &repoStatisticsStore{Store: basestore.NewWithHandle(other.Handle())}
}
func (s *repoStatisticsStore) With(other basestore.ShareableStore) RepoStatisticsStore {
return &repoStatisticsStore{Store: s.Store.With(other)}
}
func (s *repoStatisticsStore) Transact(ctx context.Context) (RepoStatisticsStore, error) {
txBase, err := s.Store.Transact(ctx)
return &repoStatisticsStore{Store: txBase}, err
}
func (s *repoStatisticsStore) GetRepoStatistics(ctx context.Context) (RepoStatistics, error) {
var rs RepoStatistics
row := s.QueryRow(ctx, sqlf.Sprintf(getRepoStatisticsQueryFmtstr))
err := row.Scan(&rs.Total, &rs.SoftDeleted, &rs.NotCloned, &rs.Cloning, &rs.Cloned, &rs.FailedFetch)
if err != nil {
return rs, err
}
return rs, nil
}
const getRepoStatisticsQueryFmtstr = `
-- source: internal/database/repo_statistics.go:repoStatisticsStore.GetRepoStatistics
SELECT
SUM(total),
SUM(soft_deleted),
SUM(not_cloned),
SUM(cloning),
SUM(cloned),
SUM(failed_fetch)
FROM repo_statistics
`
func (s *repoStatisticsStore) CompactRepoStatistics(ctx context.Context) error {
return s.Exec(ctx, sqlf.Sprintf(compactRepoStatisticsQueryFmtstr))
}
const compactRepoStatisticsQueryFmtstr = `
-- source: internal/database/repo_statistics.go:repoStatisticsStore.CompactRepoStatistics
WITH deleted AS (
DELETE FROM repo_statistics
RETURNING
total,
soft_deleted,
not_cloned,
cloning,
cloned,
failed_fetch
)
INSERT INTO repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)
SELECT
SUM(total),
SUM(soft_deleted),
SUM(not_cloned),
SUM(cloning),
SUM(cloned),
SUM(failed_fetch)
FROM deleted;
`
func (s *repoStatisticsStore) GetGitserverReposStatistics(ctx context.Context) ([]GitserverReposStatistic, error) {
rows, err := s.Query(ctx, sqlf.Sprintf(getGitserverReposStatisticsQueryFmtStr))
return scanGitserverReposStatistics(rows, err)
}
const getGitserverReposStatisticsQueryFmtStr = `
-- source: internal/database/repo_statistics.go:repoStatisticsStore.GetGitserverReposStatistics
SELECT
shard_id,
total,
not_cloned,
cloning,
cloned,
failed_fetch
FROM gitserver_repos_statistics
`
var scanGitserverReposStatistics = basestore.NewSliceScanner(scanGitserverReposStatistic)
func scanGitserverReposStatistic(s dbutil.Scanner) (GitserverReposStatistic, error) {
var gs = GitserverReposStatistic{}
err := s.Scan(&gs.ShardID, &gs.Total, &gs.NotCloned, &gs.Cloning, &gs.Cloned, &gs.FailedFetch)
if err != nil {
return gs, err
}
return gs, nil
}
func scanRepoStatistics(s dbutil.Scanner) (RepoStatistics, error) {
var rs RepoStatistics
err := s.Scan(&rs.Total, &rs.SoftDeleted, &rs.NotCloned, &rs.Cloning, &rs.Cloned, &rs.FailedFetch)
if err != nil {
return rs, err
}
return rs, nil
}

View File

@ -0,0 +1,279 @@
package database
import (
"context"
"sort"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/keegancsmith/sqlf"
"github.com/sourcegraph/log/logtest"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/database/dbtest"
"github.com/sourcegraph/sourcegraph/internal/types"
)
func TestRepoStatistics(t *testing.T) {
if testing.Short() {
t.Skip()
}
logger := logtest.Scoped(t)
db := NewDB(logger, dbtest.NewDB(logger, t))
ctx := context.Background()
s := &repoStatisticsStore{Store: basestore.NewWithHandle(db.Handle())}
shards := []string{
"shard-1",
"shard-2",
"shard-3",
}
repos := types.Repos{
&types.Repo{Name: "repo1"},
&types.Repo{Name: "repo2"},
&types.Repo{Name: "repo3"},
&types.Repo{Name: "repo4"},
&types.Repo{Name: "repo5"},
&types.Repo{Name: "repo6"},
}
createTestRepos(ctx, t, db, repos)
assertRepoStatistics(t, ctx, s, RepoStatistics{
Total: 6, NotCloned: 6, SoftDeleted: 0,
}, []GitserverReposStatistic{
{ShardID: "", Total: 6, NotCloned: 6},
})
// Move to to shards[0] as cloning
setCloneStatus(t, db, repos[0].Name, shards[0], types.CloneStatusCloning)
setCloneStatus(t, db, repos[1].Name, shards[0], types.CloneStatusCloning)
assertRepoStatistics(t, ctx, s, RepoStatistics{
Total: 6, SoftDeleted: 0, NotCloned: 4, Cloning: 2,
}, []GitserverReposStatistic{
{ShardID: "", Total: 4, NotCloned: 4},
{ShardID: shards[0], Total: 2, Cloning: 2},
})
// Move two repos to shards[1] as cloning
setCloneStatus(t, db, repos[2].Name, shards[1], types.CloneStatusCloning)
setCloneStatus(t, db, repos[3].Name, shards[1], types.CloneStatusCloning)
// Move two repos to shards[2] as cloning
setCloneStatus(t, db, repos[4].Name, shards[2], types.CloneStatusCloning)
setCloneStatus(t, db, repos[5].Name, shards[2], types.CloneStatusCloning)
assertRepoStatistics(t, ctx, s, RepoStatistics{
Total: 6, SoftDeleted: 0, Cloning: 6,
}, []GitserverReposStatistic{
{ShardID: ""},
{ShardID: shards[0], Total: 2, Cloning: 2},
{ShardID: shards[1], Total: 2, Cloning: 2},
{ShardID: shards[2], Total: 2, Cloning: 2},
})
// Move from shards[0] to shards[2] and change status
setCloneStatus(t, db, repos[2].Name, shards[2], types.CloneStatusCloned)
assertRepoStatistics(t, ctx, s, RepoStatistics{
Total: 6, SoftDeleted: 0, Cloning: 5, Cloned: 1,
}, []GitserverReposStatistic{
{ShardID: ""},
{ShardID: shards[0], Total: 2, Cloning: 2},
{ShardID: shards[1], Total: 1, Cloning: 1},
{ShardID: shards[2], Total: 3, Cloning: 2, Cloned: 1},
})
// Soft delete repos
if err := db.Repos().Delete(ctx, repos[2].ID); err != nil {
t.Fatal(err)
}
deletedRepoName := queryRepoName(t, ctx, s, repos[2].ID)
// Deletion is reflected in repoStatistics
assertRepoStatistics(t, ctx, s, RepoStatistics{
Total: 5, SoftDeleted: 1, Cloning: 5,
}, []GitserverReposStatistic{
// But gitserverReposStatistics is unchanged
{ShardID: ""},
{ShardID: shards[0], Total: 2, Cloning: 2},
{ShardID: shards[1], Total: 1, Cloning: 1},
{ShardID: shards[2], Total: 3, Cloning: 2, Cloned: 1},
})
// Until we remove it from disk in gitserver, which causes the clone status
// to be set to not_cloned:
setCloneStatus(t, db, deletedRepoName, shards[2], types.CloneStatusNotCloned)
assertRepoStatistics(t, ctx, s, RepoStatistics{
// Global stats are unchanged
Total: 5, SoftDeleted: 1, Cloning: 5,
}, []GitserverReposStatistic{
{ShardID: ""},
{ShardID: shards[0], Total: 2, Cloning: 2},
{ShardID: shards[1], Total: 1, Cloning: 1},
// But now it's reflected as NotCloned
{ShardID: shards[2], Total: 3, Cloning: 2, NotCloned: 1},
})
// Now we set errors on 2 non-deleted repositories
setLastError(t, db, repos[0].Name, shards[0], "internet broke repo-1")
setLastError(t, db, repos[4].Name, shards[2], "internet broke repo-3")
assertRepoStatistics(t, ctx, s, RepoStatistics{
// Only FailedFetch changed
Total: 5, SoftDeleted: 1, Cloning: 5, FailedFetch: 2,
}, []GitserverReposStatistic{
{ShardID: ""},
{ShardID: shards[0], Total: 2, Cloning: 2, FailedFetch: 1},
{ShardID: shards[1], Total: 1, Cloning: 1, FailedFetch: 1},
{ShardID: shards[2], Total: 3, Cloning: 2, NotCloned: 1},
})
// Now we move a repo and set an error
setLastError(t, db, repos[1].Name, shards[1], "internet broke repo-2")
assertRepoStatistics(t, ctx, s, RepoStatistics{
// Only FailedFetch changed
Total: 5, SoftDeleted: 1, Cloning: 5, FailedFetch: 3,
}, []GitserverReposStatistic{
{ShardID: ""},
{ShardID: shards[0], Total: 1, Cloning: 1, FailedFetch: 1},
{ShardID: shards[1], Total: 2, Cloning: 2, FailedFetch: 2},
{ShardID: shards[2], Total: 3, Cloning: 2, NotCloned: 1},
})
}
func TestRepoStatistics_Compaction(t *testing.T) {
if testing.Short() {
t.Skip()
}
logger := logtest.Scoped(t)
db := NewDB(logger, dbtest.NewDB(logger, t))
ctx := context.Background()
s := &repoStatisticsStore{Store: basestore.NewWithHandle(db.Handle())}
shards := []string{
"shard-1",
"shard-2",
"shard-3",
}
repos := types.Repos{
&types.Repo{Name: "repo1"},
&types.Repo{Name: "repo2"},
&types.Repo{Name: "repo3"},
&types.Repo{Name: "repo4"},
&types.Repo{Name: "repo5"},
&types.Repo{Name: "repo6"},
}
// Trigger 9 insertions into repo_statistics table:
createTestRepos(ctx, t, db, repos)
setCloneStatus(t, db, repos[0].Name, shards[0], types.CloneStatusCloning)
setCloneStatus(t, db, repos[1].Name, shards[0], types.CloneStatusCloning)
setCloneStatus(t, db, repos[2].Name, shards[1], types.CloneStatusCloning)
setCloneStatus(t, db, repos[3].Name, shards[1], types.CloneStatusCloning)
setCloneStatus(t, db, repos[4].Name, shards[2], types.CloneStatusCloning)
setCloneStatus(t, db, repos[5].Name, shards[2], types.CloneStatusCloning)
setLastError(t, db, repos[0].Name, shards[0], "internet broke repo-1")
setLastError(t, db, repos[4].Name, shards[2], "internet broke repo-3")
// Safety check that the counts are right:
wantRepoStatistics := RepoStatistics{
Total: 6, Cloning: 6, FailedFetch: 2,
}
wantGitserverReposStatistics := []GitserverReposStatistic{
{ShardID: ""},
{ShardID: shards[0], Total: 2, Cloning: 2, FailedFetch: 1},
{ShardID: shards[1], Total: 2, Cloning: 2},
{ShardID: shards[2], Total: 2, Cloning: 2, FailedFetch: 1},
}
assertRepoStatistics(t, ctx, s, wantRepoStatistics, wantGitserverReposStatistics)
// The initial insert in the migration also added a row, which means we want:
wantCount := 10
count := queryRepoStatisticsCount(t, ctx, s)
if count != wantCount {
t.Fatalf("wrong statistics count. have=%d, want=%d", count, wantCount)
}
// Now we compact the rows into a single row:
if err := s.CompactRepoStatistics(ctx); err != nil {
t.Fatalf("GetRepoStatistics failed: %s", err)
}
// We should be left with 1 row
wantCount = 1
count = queryRepoStatisticsCount(t, ctx, s)
if count != wantCount {
t.Fatalf("wrong statistics count. have=%d, want=%d", count, wantCount)
}
// And counts should still be the same
assertRepoStatistics(t, ctx, s, wantRepoStatistics, wantGitserverReposStatistics)
// Safety check: add another event and make sure row count goes up again
setCloneStatus(t, db, repos[5].Name, shards[2], types.CloneStatusCloned)
wantCount = 2
count = queryRepoStatisticsCount(t, ctx, s)
if count != wantCount {
t.Fatalf("wrong statistics count. have=%d, want=%d", count, wantCount)
}
}
func queryRepoName(t *testing.T, ctx context.Context, s *repoStatisticsStore, repoID api.RepoID) api.RepoName {
t.Helper()
var name api.RepoName
err := s.QueryRow(ctx, sqlf.Sprintf("SELECT name FROM repo WHERE id = %s", repoID)).Scan(&name)
if err != nil {
t.Fatalf("failed to query repo name: %s", err)
}
return name
}
func queryRepoStatisticsCount(t *testing.T, ctx context.Context, s *repoStatisticsStore) int {
t.Helper()
var count int
err := s.QueryRow(ctx, sqlf.Sprintf("SELECT COUNT(*) FROM repo_statistics;")).Scan(&count)
if err != nil {
t.Fatalf("failed to query repo name: %s", err)
}
return count
}
func setCloneStatus(t *testing.T, db DB, repoName api.RepoName, shard string, status types.CloneStatus) {
t.Helper()
if err := db.GitserverRepos().SetCloneStatus(context.Background(), repoName, status, shard); err != nil {
t.Fatalf("failed to set clone status for repo %s: %s", repoName, err)
}
}
func setLastError(t *testing.T, db DB, repoName api.RepoName, shard string, msg string) {
t.Helper()
if err := db.GitserverRepos().SetLastError(context.Background(), repoName, msg, shard); err != nil {
t.Fatalf("failed to set clone status for repo %s: %s", repoName, err)
}
}
func assertRepoStatistics(t *testing.T, ctx context.Context, s RepoStatisticsStore, wantRepoStats RepoStatistics, wantGitserverStats []GitserverReposStatistic) {
t.Helper()
haveRepoStats, err := s.GetRepoStatistics(ctx)
if err != nil {
t.Fatalf("GetRepoStatistics failed: %s", err)
}
if diff := cmp.Diff(haveRepoStats, wantRepoStats); diff != "" {
t.Fatalf("repoStatistics differ: %s", diff)
}
haveGitserverStats, err := s.GetGitserverReposStatistics(ctx)
if err != nil {
t.Fatalf("GetRepoStatistics failed: %s", err)
}
sort.Slice(haveGitserverStats, func(i, j int) bool { return haveGitserverStats[i].ShardID < haveGitserverStats[j].ShardID })
sort.Slice(wantGitserverStats, func(i, j int) bool { return wantGitserverStats[i].ShardID < wantGitserverStats[j].ShardID })
if diff := cmp.Diff(haveRepoStats, wantRepoStats); diff != "" {
t.Fatalf("gitserverReposStatistics differ: %s", diff)
}
}

View File

@ -144,6 +144,30 @@
"Name": "merge_audit_log_transitions",
"Definition": "CREATE OR REPLACE FUNCTION public.merge_audit_log_transitions(internal hstore, arrayhstore hstore[])\n RETURNS hstore\n LANGUAGE plpgsql\n IMMUTABLE\nAS $function$\n DECLARE\n trans hstore;\n BEGIN\n FOREACH trans IN ARRAY arrayhstore\n LOOP\n internal := internal || hstore(trans-\u003e'column', trans-\u003e'new');\n END LOOP;\n\n RETURN internal;\n END;\n$function$\n"
},
{
"Name": "recalc_gitserver_repos_statistics_on_delete",
"Definition": "CREATE OR REPLACE FUNCTION public.recalc_gitserver_repos_statistics_on_delete()\n RETURNS trigger\n LANGUAGE plpgsql\nAS $function$ BEGIN\n UPDATE gitserver_repos_statistics grs\n SET\n total = grs.total - (SELECT COUNT(*) FROM oldtab WHERE oldtab.shard_id = grs.shard_id),\n not_cloned = grs.not_cloned - (SELECT COUNT(*) FILTER(WHERE clone_status = 'not_cloned') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),\n cloning = grs.cloning - (SELECT COUNT(*) FILTER(WHERE clone_status = 'cloning') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),\n cloned = grs.cloned - (SELECT COUNT(*) FILTER(WHERE clone_status = 'cloned') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),\n failed_fetch = grs.cloned - (SELECT COUNT(*) FILTER(WHERE last_error IS NOT NULL) FROM oldtab WHERE oldtab.shard_id = grs.shard_id)\n ;\n\n RETURN NULL;\n END\n$function$\n"
},
{
"Name": "recalc_gitserver_repos_statistics_on_insert",
"Definition": "CREATE OR REPLACE FUNCTION public.recalc_gitserver_repos_statistics_on_insert()\n RETURNS trigger\n LANGUAGE plpgsql\nAS $function$ BEGIN\n INSERT INTO gitserver_repos_statistics AS grs (shard_id, total, not_cloned, cloning, cloned, failed_fetch)\n SELECT\n shard_id,\n COUNT(*) AS total,\n COUNT(*) FILTER(WHERE clone_status = 'not_cloned') AS not_cloned,\n COUNT(*) FILTER(WHERE clone_status = 'cloning') AS cloning,\n COUNT(*) FILTER(WHERE clone_status = 'cloned') AS cloned,\n COUNT(*) FILTER(WHERE last_error IS NOT NULL) AS failed_fetch\n FROM\n newtab\n GROUP BY shard_id\n ON CONFLICT(shard_id)\n DO UPDATE\n SET\n total = grs.total + excluded.total,\n not_cloned = grs.not_cloned + excluded.not_cloned,\n cloning = grs.cloning + excluded.cloning,\n cloned = grs.cloned + excluded.cloned,\n failed_fetch = grs.failed_fetch + excluded.failed_fetch\n ;\n\n RETURN NULL;\n END\n$function$\n"
},
{
"Name": "recalc_gitserver_repos_statistics_on_update",
"Definition": "CREATE OR REPLACE FUNCTION public.recalc_gitserver_repos_statistics_on_update()\n RETURNS trigger\n LANGUAGE plpgsql\nAS $function$ BEGIN\n INSERT INTO gitserver_repos_statistics AS grs (shard_id, total, not_cloned, cloning, cloned, failed_fetch)\n SELECT\n newtab.shard_id AS shard_id,\n COUNT(*) AS total,\n COUNT(*) FILTER(WHERE clone_status = 'not_cloned') AS not_cloned,\n COUNT(*) FILTER(WHERE clone_status = 'cloning') AS cloning,\n COUNT(*) FILTER(WHERE clone_status = 'cloned') AS cloned,\n COUNT(*) FILTER(WHERE last_error IS NOT NULL) AS failed_fetch\n FROM\n newtab\n GROUP BY newtab.shard_id\n ON CONFLICT(shard_id) DO\n UPDATE\n SET\n total = grs.total + (excluded.total - (SELECT COUNT(*) FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),\n not_cloned = grs.not_cloned + (excluded.not_cloned - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'not_cloned') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),\n cloning = grs.cloning + (excluded.cloning - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'cloning') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),\n cloned = grs.cloned + (excluded.cloned - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'cloned') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),\n failed_fetch = grs.failed_fetch + (excluded.failed_fetch - (SELECT COUNT(*) FILTER(WHERE ot.last_error IS NOT NULL) FROM oldtab ot WHERE ot.shard_id = excluded.shard_id))\n ;\n\n WITH moved AS (\n SELECT\n oldtab.shard_id AS shard_id,\n COUNT(*) AS total,\n COUNT(*) FILTER(WHERE oldtab.clone_status = 'not_cloned') AS not_cloned,\n COUNT(*) FILTER(WHERE oldtab.clone_status = 'cloning') AS cloning,\n COUNT(*) FILTER(WHERE oldtab.clone_status = 'cloned') AS cloned,\n COUNT(*) FILTER(WHERE oldtab.last_error IS NOT NULL) AS failed_fetch\n FROM\n oldtab\n JOIN newtab ON newtab.repo_id = oldtab.repo_id\n WHERE\n oldtab.shard_id != newtab.shard_id\n GROUP BY oldtab.shard_id\n )\n UPDATE gitserver_repos_statistics grs\n SET\n total = grs.total - moved.total,\n not_cloned = grs.not_cloned - moved.not_cloned,\n cloning = grs.cloning - moved.cloning,\n cloned = grs.cloned - moved.cloned,\n failed_fetch = grs.failed_fetch - moved.failed_fetch\n FROM moved\n WHERE moved.shard_id = grs.shard_id;\n\n INSERT INTO repo_statistics (not_cloned, cloning, cloned, failed_fetch)\n VALUES (\n (\n (SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'not_cloned')\n -\n (SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'not_cloned')\n ),\n (\n (SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'cloning')\n -\n (SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'cloning')\n ),\n (\n (SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'cloned')\n -\n (SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'cloned')\n ),\n (\n (SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.last_error IS NOT NULL)\n -\n (SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.last_error IS NOT NULL)\n )\n );\n\n RETURN NULL;\n END\n$function$\n"
},
{
"Name": "recalc_repo_statistics_on_repo_delete",
"Definition": "CREATE OR REPLACE FUNCTION public.recalc_repo_statistics_on_repo_delete()\n RETURNS trigger\n LANGUAGE plpgsql\nAS $function$ BEGIN\n INSERT INTO\n repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)\n VALUES (\n -- Insert negative counts\n (SELECT -COUNT(*) FROM oldtab WHERE deleted_at IS NULL AND blocked IS NULL),\n (SELECT -COUNT(*) FROM oldtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),\n (SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'not_cloned'),\n (SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloning'),\n (SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloned'),\n (SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.last_error IS NOT NULL)\n );\n RETURN NULL;\n END\n$function$\n"
},
{
"Name": "recalc_repo_statistics_on_repo_insert",
"Definition": "CREATE OR REPLACE FUNCTION public.recalc_repo_statistics_on_repo_insert()\n RETURNS trigger\n LANGUAGE plpgsql\nAS $function$ BEGIN\n INSERT INTO\n repo_statistics (total, soft_deleted, not_cloned)\n VALUES (\n (SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL),\n (SELECT COUNT(*) FROM newtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),\n -- New repositories are always not_cloned by default, so we can count them as not cloned here\n (SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL)\n -- New repositories never have last_error set, so we can also ignore those here\n );\n RETURN NULL;\n END\n$function$\n"
},
{
"Name": "recalc_repo_statistics_on_repo_update",
"Definition": "CREATE OR REPLACE FUNCTION public.recalc_repo_statistics_on_repo_update()\n RETURNS trigger\n LANGUAGE plpgsql\nAS $function$ BEGIN\n -- Insert diff of changes\n INSERT INTO\n repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)\n VALUES (\n (SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL) - (SELECT COUNT(*) FROM oldtab WHERE deleted_at IS NULL AND blocked IS NULL),\n (SELECT COUNT(*) FROM newtab WHERE deleted_at IS NOT NULL AND blocked IS NULL) - (SELECT COUNT(*) FROM oldtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),\n (\n (SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'not_cloned')\n -\n (SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'not_cloned')\n ),\n (\n (SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'cloning')\n -\n (SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloning')\n ),\n (\n (SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'cloned')\n -\n (SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloned')\n ),\n (\n (SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.last_error IS NOT NULL)\n -\n (SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.last_error IS NOT NULL)\n )\n )\n ;\n RETURN NULL;\n END\n$function$\n"
},
{
"Name": "repo_block",
"Definition": "CREATE OR REPLACE FUNCTION public.repo_block(reason text, at timestamp with time zone)\n RETURNS jsonb\n LANGUAGE sql\n IMMUTABLE STRICT\nAS $function$\nSELECT jsonb_build_object(\n 'reason', reason,\n 'at', extract(epoch from timezone('utc', at))::bigint\n);\n$function$\n"
@ -9565,6 +9589,117 @@
"ConstraintDefinition": "FOREIGN KEY (repo_id) REFERENCES repo(id) ON DELETE CASCADE"
}
],
"Triggers": [
{
"Name": "trig_recalc_gitserver_repos_statistics_on_delete",
"Definition": "CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_delete AFTER DELETE ON gitserver_repos REFERENCING OLD TABLE AS oldtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_delete()"
},
{
"Name": "trig_recalc_gitserver_repos_statistics_on_insert",
"Definition": "CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_insert AFTER INSERT ON gitserver_repos REFERENCING NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_insert()"
},
{
"Name": "trig_recalc_gitserver_repos_statistics_on_update",
"Definition": "CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_update AFTER UPDATE ON gitserver_repos REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_update()"
}
]
},
{
"Name": "gitserver_repos_statistics",
"Comment": "",
"Columns": [
{
"Name": "cloned",
"Index": 5,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories in gitserver_repos table on this shard that are cloned"
},
{
"Name": "cloning",
"Index": 4,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories in gitserver_repos table on this shard that cloning"
},
{
"Name": "failed_fetch",
"Index": 6,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories in gitserver_repos table on this shard where last_error is set"
},
{
"Name": "not_cloned",
"Index": 3,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories in gitserver_repos table on this shard that are not cloned yet"
},
{
"Name": "shard_id",
"Index": 1,
"TypeName": "text",
"IsNullable": false,
"Default": "",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "ID of this gitserver shard. If an empty string then the repositories havent been assigned a shard."
},
{
"Name": "total",
"Index": 2,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories in gitserver_repos table on this shard"
}
],
"Indexes": [
{
"Name": "gitserver_repos_statistics_pkey",
"IsPrimaryKey": true,
"IsUnique": true,
"IsExclusion": false,
"IsDeferrable": false,
"IndexDefinition": "CREATE UNIQUE INDEX gitserver_repos_statistics_pkey ON gitserver_repos_statistics USING btree (shard_id)",
"ConstraintType": "p",
"ConstraintDefinition": "PRIMARY KEY (shard_id)"
}
],
"Constraints": null,
"Triggers": []
},
{
@ -15732,6 +15867,18 @@
"Name": "trig_delete_repo_ref_on_external_service_repos",
"Definition": "CREATE TRIGGER trig_delete_repo_ref_on_external_service_repos AFTER UPDATE OF deleted_at ON repo FOR EACH ROW EXECUTE FUNCTION delete_repo_ref_on_external_service_repos()"
},
{
"Name": "trig_recalc_repo_statistics_on_repo_delete",
"Definition": "CREATE TRIGGER trig_recalc_repo_statistics_on_repo_delete AFTER DELETE ON repo REFERENCING OLD TABLE AS oldtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_delete()"
},
{
"Name": "trig_recalc_repo_statistics_on_repo_insert",
"Definition": "CREATE TRIGGER trig_recalc_repo_statistics_on_repo_insert AFTER INSERT ON repo REFERENCING NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_insert()"
},
{
"Name": "trig_recalc_repo_statistics_on_repo_update",
"Definition": "CREATE TRIGGER trig_recalc_repo_statistics_on_repo_update AFTER UPDATE ON repo REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_update()"
},
{
"Name": "trigger_gitserver_repo_insert",
"Definition": "CREATE TRIGGER trigger_gitserver_repo_insert AFTER INSERT ON repo FOR EACH ROW EXECUTE FUNCTION func_insert_gitserver_repo()"
@ -15985,6 +16132,93 @@
"Constraints": null,
"Triggers": []
},
{
"Name": "repo_statistics",
"Comment": "",
"Columns": [
{
"Name": "cloned",
"Index": 5,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories that are NOT soft-deleted and not blocked and cloned by gitserver"
},
{
"Name": "cloning",
"Index": 4,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories that are NOT soft-deleted and not blocked and currently being cloned by gitserver"
},
{
"Name": "failed_fetch",
"Index": 6,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories that are NOT soft-deleted and not blocked and have last_error set in gitserver_repos table"
},
{
"Name": "not_cloned",
"Index": 3,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories that are NOT soft-deleted and not blocked and not cloned by gitserver"
},
{
"Name": "soft_deleted",
"Index": 2,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories that are soft-deleted and not blocked"
},
{
"Name": "total",
"Index": 1,
"TypeName": "bigint",
"IsNullable": false,
"Default": "0",
"CharacterMaximumLength": 0,
"IsIdentity": false,
"IdentityGeneration": "",
"IsGenerated": "NEVER",
"GenerationExpression": "",
"Comment": "Number of repositories that are not soft-deleted and not blocked"
}
],
"Indexes": [],
"Constraints": null,
"Triggers": []
},
{
"Name": "saved_searches",
"Comment": "",

View File

@ -1284,9 +1284,40 @@ Indexes:
"gitserver_repos_shard_id" btree (shard_id, repo_id)
Foreign-key constraints:
"gitserver_repos_repo_id_fkey" FOREIGN KEY (repo_id) REFERENCES repo(id) ON DELETE CASCADE
Triggers:
trig_recalc_gitserver_repos_statistics_on_delete AFTER DELETE ON gitserver_repos REFERENCING OLD TABLE AS oldtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_delete()
trig_recalc_gitserver_repos_statistics_on_insert AFTER INSERT ON gitserver_repos REFERENCING NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_insert()
trig_recalc_gitserver_repos_statistics_on_update AFTER UPDATE ON gitserver_repos REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_update()
```
# Table "public.gitserver_repos_statistics"
```
Column | Type | Collation | Nullable | Default
--------------+--------+-----------+----------+---------
shard_id | text | | not null |
total | bigint | | not null | 0
not_cloned | bigint | | not null | 0
cloning | bigint | | not null | 0
cloned | bigint | | not null | 0
failed_fetch | bigint | | not null | 0
Indexes:
"gitserver_repos_statistics_pkey" PRIMARY KEY, btree (shard_id)
```
**cloned**: Number of repositories in gitserver_repos table on this shard that are cloned
**cloning**: Number of repositories in gitserver_repos table on this shard that cloning
**failed_fetch**: Number of repositories in gitserver_repos table on this shard where last_error is set
**not_cloned**: Number of repositories in gitserver_repos table on this shard that are not cloned yet
**shard_id**: ID of this gitserver shard. If an empty string then the repositories havent been assigned a shard.
**total**: Number of repositories in gitserver_repos table on this shard
# Table "public.global_state"
```
Column | Type | Collation | Nullable | Default
@ -2406,6 +2437,9 @@ Referenced by:
TABLE "user_public_repos" CONSTRAINT "user_public_repos_repo_id_fkey" FOREIGN KEY (repo_id) REFERENCES repo(id) ON DELETE CASCADE
Triggers:
trig_delete_repo_ref_on_external_service_repos AFTER UPDATE OF deleted_at ON repo FOR EACH ROW EXECUTE FUNCTION delete_repo_ref_on_external_service_repos()
trig_recalc_repo_statistics_on_repo_delete AFTER DELETE ON repo REFERENCING OLD TABLE AS oldtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_delete()
trig_recalc_repo_statistics_on_repo_insert AFTER INSERT ON repo REFERENCING NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_insert()
trig_recalc_repo_statistics_on_repo_update AFTER UPDATE ON repo REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_update()
trigger_gitserver_repo_insert AFTER INSERT ON repo FOR EACH ROW EXECUTE FUNCTION func_insert_gitserver_repo()
```
@ -2453,6 +2487,31 @@ Indexes:
```
# Table "public.repo_statistics"
```
Column | Type | Collation | Nullable | Default
--------------+--------+-----------+----------+---------
total | bigint | | not null | 0
soft_deleted | bigint | | not null | 0
not_cloned | bigint | | not null | 0
cloning | bigint | | not null | 0
cloned | bigint | | not null | 0
failed_fetch | bigint | | not null | 0
```
**cloned**: Number of repositories that are NOT soft-deleted and not blocked and cloned by gitserver
**cloning**: Number of repositories that are NOT soft-deleted and not blocked and currently being cloned by gitserver
**failed_fetch**: Number of repositories that are NOT soft-deleted and not blocked and have last_error set in gitserver_repos table
**not_cloned**: Number of repositories that are NOT soft-deleted and not blocked and not cloned by gitserver
**soft_deleted**: Number of repositories that are soft-deleted and not blocked
**total**: Number of repositories that are not soft-deleted and not blocked
# Table "public.saved_searches"
```
Column | Type | Collation | Nullable | Default

View File

@ -0,0 +1,18 @@
DROP TRIGGER IF EXISTS trig_recalc_repo_statistics_on_repo_update ON repo;
DROP TRIGGER IF EXISTS trig_recalc_repo_statistics_on_repo_insert ON repo;
DROP TRIGGER IF EXISTS trig_recalc_repo_statistics_on_repo_delete ON repo;
DROP FUNCTION IF EXISTS recalc_repo_statistics_on_repo_update();
DROP FUNCTION IF EXISTS recalc_repo_statistics_on_repo_insert();
DROP FUNCTION IF EXISTS recalc_repo_statistics_on_repo_delete();
DROP TRIGGER IF EXISTS trig_recalc_gitserver_repos_statistics_on_update ON gitserver_repos;
DROP TRIGGER IF EXISTS trig_recalc_gitserver_repos_statistics_on_insert ON gitserver_repos;
DROP TRIGGER IF EXISTS trig_recalc_gitserver_repos_statistics_on_delete ON gitserver_repos;
DROP FUNCTION IF EXISTS recalc_gitserver_repos_statistics_on_update();
DROP FUNCTION IF EXISTS recalc_gitserver_repos_statistics_on_insert();
DROP FUNCTION IF EXISTS recalc_gitserver_repos_statistics_on_delete();
DROP TABLE IF EXISTS gitserver_repos_statistics;
DROP TABLE IF EXISTS repo_statistics;

View File

@ -0,0 +1,2 @@
name: add_repo_stats_table
parents: [1658950366]

View File

@ -0,0 +1,344 @@
--------------------------------------------------------------------------------
-- repos table --
--------------------------------------------------------------------------------
-- repo_statistics holds statistics for the repo table (hence the singular
-- "repo" in the name)
CREATE TABLE IF NOT EXISTS repo_statistics (
total BIGINT NOT NULL DEFAULT 0,
soft_deleted BIGINT NOT NULL DEFAULT 0,
not_cloned BIGINT NOT NULL DEFAULT 0,
cloning BIGINT NOT NULL DEFAULT 0,
cloned BIGINT NOT NULL DEFAULT 0,
failed_fetch BIGINT NOT NULL DEFAULT 0
);
COMMENT ON COLUMN repo_statistics.total IS 'Number of repositories that are not soft-deleted and not blocked';
COMMENT ON COLUMN repo_statistics.soft_deleted IS 'Number of repositories that are soft-deleted and not blocked';
COMMENT ON COLUMN repo_statistics.not_cloned IS 'Number of repositories that are NOT soft-deleted and not blocked and not cloned by gitserver';
COMMENT ON COLUMN repo_statistics.cloning IS 'Number of repositories that are NOT soft-deleted and not blocked and currently being cloned by gitserver';
COMMENT ON COLUMN repo_statistics.cloned IS 'Number of repositories that are NOT soft-deleted and not blocked and cloned by gitserver';
COMMENT ON COLUMN repo_statistics.failed_fetch IS 'Number of repositories that are NOT soft-deleted and not blocked and have last_error set in gitserver_repos table';
-- Insert initial values into repo_statistics table
INSERT INTO repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)
VALUES (
(SELECT COUNT(*) FROM repo WHERE deleted_at is NULL AND blocked IS NULL),
(SELECT COUNT(*) FROM repo WHERE deleted_at is NOT NULL AND blocked IS NULL),
(
SELECT COUNT(*)
FROM repo
JOIN gitserver_repos gr ON gr.repo_id = repo.id
WHERE
repo.deleted_at is NULL
AND
repo.blocked IS NULL
AND
gr.clone_status = 'not_cloned'
),
(
SELECT COUNT(*)
FROM repo
JOIN gitserver_repos gr ON gr.repo_id = repo.id
WHERE
repo.deleted_at is NULL
AND
repo.blocked IS NULL
AND
gr.clone_status = 'cloning'
),
(
SELECT COUNT(*)
FROM repo
JOIN gitserver_repos gr ON gr.repo_id = repo.id
WHERE
repo.deleted_at is NULL
AND
repo.blocked IS NULL
AND
gr.clone_status = 'cloned'
),
(
SELECT COUNT(*)
FROM repo
JOIN gitserver_repos gr ON gr.repo_id = repo.id
WHERE
repo.deleted_at is NULL
AND
repo.blocked IS NULL
AND
gr.last_error IS NOT NULL
)
);
-- UPDATE
CREATE OR REPLACE FUNCTION recalc_repo_statistics_on_repo_update() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
-- Insert diff of changes
INSERT INTO
repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)
VALUES (
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL) - (SELECT COUNT(*) FROM oldtab WHERE deleted_at IS NULL AND blocked IS NULL),
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NOT NULL AND blocked IS NULL) - (SELECT COUNT(*) FROM oldtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'not_cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'not_cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'cloning')
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloning')
),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.last_error IS NOT NULL)
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.last_error IS NOT NULL)
)
)
;
RETURN NULL;
END
$$;
DROP TRIGGER IF EXISTS trig_recalc_repo_statistics_on_repo_update ON repo;
CREATE TRIGGER trig_recalc_repo_statistics_on_repo_update
AFTER UPDATE ON repo
REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab
FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_update();
-- INSERT
CREATE OR REPLACE FUNCTION recalc_repo_statistics_on_repo_insert() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO
repo_statistics (total, soft_deleted, not_cloned)
VALUES (
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL),
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),
-- New repositories are always not_cloned by default, so we can count them as not cloned here
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL)
-- New repositories never have last_error set, so we can also ignore those here
);
RETURN NULL;
END
$$;
DROP TRIGGER IF EXISTS trig_recalc_repo_statistics_on_repo_insert ON repo;
CREATE TRIGGER trig_recalc_repo_statistics_on_repo_insert
AFTER INSERT ON repo
REFERENCING NEW TABLE AS newtab
FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_insert();
-- DELETE
CREATE OR REPLACE FUNCTION recalc_repo_statistics_on_repo_delete() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO
repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)
VALUES (
-- Insert negative counts
(SELECT -COUNT(*) FROM oldtab WHERE deleted_at IS NULL AND blocked IS NULL),
(SELECT -COUNT(*) FROM oldtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'not_cloned'),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloning'),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloned'),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.last_error IS NOT NULL)
);
RETURN NULL;
END
$$;
DROP TRIGGER IF EXISTS trig_recalc_repo_statistics_on_repo_delete ON repo;
CREATE TRIGGER trig_recalc_repo_statistics_on_repo_delete
AFTER DELETE ON repo
REFERENCING OLD TABLE AS oldtab
FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_delete();
--------------------------------------------------------------------------------
-- gitserver_repos table --
--------------------------------------------------------------------------------
-- gitserver_repos_statistics holds statistics for the gitserver_repos table
CREATE TABLE IF NOT EXISTS gitserver_repos_statistics (
-- In this table we have one row per shard_id
shard_id text PRIMARY KEY,
total BIGINT NOT NULL DEFAULT 0,
not_cloned BIGINT NOT NULL DEFAULT 0,
cloning BIGINT NOT NULL DEFAULT 0,
cloned BIGINT NOT NULL DEFAULT 0,
failed_fetch BIGINT NOT NULL DEFAULT 0
);
COMMENT ON COLUMN gitserver_repos_statistics.shard_id IS 'ID of this gitserver shard. If an empty string then the repositories havent been assigned a shard.';
COMMENT ON COLUMN gitserver_repos_statistics.total IS 'Number of repositories in gitserver_repos table on this shard';
COMMENT ON COLUMN gitserver_repos_statistics.not_cloned IS 'Number of repositories in gitserver_repos table on this shard that are not cloned yet';
COMMENT ON COLUMN gitserver_repos_statistics.cloning IS 'Number of repositories in gitserver_repos table on this shard that cloning';
COMMENT ON COLUMN gitserver_repos_statistics.cloned IS 'Number of repositories in gitserver_repos table on this shard that are cloned';
COMMENT ON COLUMN gitserver_repos_statistics.failed_fetch IS 'Number of repositories in gitserver_repos table on this shard where last_error is set';
-- Insert initial values into gitserver_repos_statistics
INSERT INTO
gitserver_repos_statistics (shard_id, total, not_cloned, cloning, cloned, failed_fetch)
SELECT
shard_id,
COUNT(*) AS total,
COUNT(*) FILTER(WHERE clone_status = 'not_cloned') AS not_cloned,
COUNT(*) FILTER(WHERE clone_status = 'cloning') AS cloning,
COUNT(*) FILTER(WHERE clone_status = 'cloned') AS cloned,
COUNT(*) FILTER(WHERE last_error IS NOT NULL) AS failed_fetch
FROM
gitserver_repos
GROUP BY shard_id
ON CONFLICT(shard_id)
DO UPDATE
SET
total = gitserver_repos_statistics.total + excluded.total,
not_cloned = gitserver_repos_statistics.not_cloned + excluded.not_cloned,
cloning = gitserver_repos_statistics.cloning + excluded.cloning,
cloned = gitserver_repos_statistics.cloned + excluded.cloned,
failed_fetch = gitserver_repos_statistics.failed_fetch + excluded.failed_fetch
;
-- UPDATE
CREATE OR REPLACE FUNCTION recalc_gitserver_repos_statistics_on_update() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO gitserver_repos_statistics AS grs (shard_id, total, not_cloned, cloning, cloned, failed_fetch)
SELECT
newtab.shard_id AS shard_id,
COUNT(*) AS total,
COUNT(*) FILTER(WHERE clone_status = 'not_cloned') AS not_cloned,
COUNT(*) FILTER(WHERE clone_status = 'cloning') AS cloning,
COUNT(*) FILTER(WHERE clone_status = 'cloned') AS cloned,
COUNT(*) FILTER(WHERE last_error IS NOT NULL) AS failed_fetch
FROM
newtab
GROUP BY newtab.shard_id
ON CONFLICT(shard_id) DO
UPDATE
SET
total = grs.total + (excluded.total - (SELECT COUNT(*) FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
not_cloned = grs.not_cloned + (excluded.not_cloned - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'not_cloned') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
cloning = grs.cloning + (excluded.cloning - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'cloning') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
cloned = grs.cloned + (excluded.cloned - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'cloned') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
failed_fetch = grs.failed_fetch + (excluded.failed_fetch - (SELECT COUNT(*) FILTER(WHERE ot.last_error IS NOT NULL) FROM oldtab ot WHERE ot.shard_id = excluded.shard_id))
;
WITH moved AS (
SELECT
oldtab.shard_id AS shard_id,
COUNT(*) AS total,
COUNT(*) FILTER(WHERE oldtab.clone_status = 'not_cloned') AS not_cloned,
COUNT(*) FILTER(WHERE oldtab.clone_status = 'cloning') AS cloning,
COUNT(*) FILTER(WHERE oldtab.clone_status = 'cloned') AS cloned,
COUNT(*) FILTER(WHERE oldtab.last_error IS NOT NULL) AS failed_fetch
FROM
oldtab
JOIN newtab ON newtab.repo_id = oldtab.repo_id
WHERE
oldtab.shard_id != newtab.shard_id
GROUP BY oldtab.shard_id
)
UPDATE gitserver_repos_statistics grs
SET
total = grs.total - moved.total,
not_cloned = grs.not_cloned - moved.not_cloned,
cloning = grs.cloning - moved.cloning,
cloned = grs.cloned - moved.cloned,
failed_fetch = grs.failed_fetch - moved.failed_fetch
FROM moved
WHERE moved.shard_id = grs.shard_id;
INSERT INTO repo_statistics (not_cloned, cloning, cloned, failed_fetch)
VALUES (
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'not_cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'not_cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'cloning')
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'cloning')
),
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.last_error IS NOT NULL)
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.last_error IS NOT NULL)
)
);
RETURN NULL;
END
$$;
DROP TRIGGER IF EXISTS trig_recalc_gitserver_repos_statistics_on_update ON gitserver_repos;
CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_update
AFTER UPDATE ON gitserver_repos
REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab
FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_update();
-- INSERT
CREATE OR REPLACE FUNCTION recalc_gitserver_repos_statistics_on_insert() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO gitserver_repos_statistics AS grs (shard_id, total, not_cloned, cloning, cloned, failed_fetch)
SELECT
shard_id,
COUNT(*) AS total,
COUNT(*) FILTER(WHERE clone_status = 'not_cloned') AS not_cloned,
COUNT(*) FILTER(WHERE clone_status = 'cloning') AS cloning,
COUNT(*) FILTER(WHERE clone_status = 'cloned') AS cloned,
COUNT(*) FILTER(WHERE last_error IS NOT NULL) AS failed_fetch
FROM
newtab
GROUP BY shard_id
ON CONFLICT(shard_id)
DO UPDATE
SET
total = grs.total + excluded.total,
not_cloned = grs.not_cloned + excluded.not_cloned,
cloning = grs.cloning + excluded.cloning,
cloned = grs.cloned + excluded.cloned,
failed_fetch = grs.failed_fetch + excluded.failed_fetch
;
RETURN NULL;
END
$$;
DROP TRIGGER IF EXISTS trig_recalc_gitserver_repos_statistics_on_insert ON gitserver_repos;
CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_insert
AFTER INSERT ON gitserver_repos
REFERENCING NEW TABLE AS newtab
FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_insert();
-- DELETE
CREATE OR REPLACE FUNCTION recalc_gitserver_repos_statistics_on_delete() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
UPDATE gitserver_repos_statistics grs
SET
total = grs.total - (SELECT COUNT(*) FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
not_cloned = grs.not_cloned - (SELECT COUNT(*) FILTER(WHERE clone_status = 'not_cloned') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
cloning = grs.cloning - (SELECT COUNT(*) FILTER(WHERE clone_status = 'cloning') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
cloned = grs.cloned - (SELECT COUNT(*) FILTER(WHERE clone_status = 'cloned') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
failed_fetch = grs.cloned - (SELECT COUNT(*) FILTER(WHERE last_error IS NOT NULL) FROM oldtab WHERE oldtab.shard_id = grs.shard_id)
;
RETURN NULL;
END
$$;
DROP TRIGGER IF EXISTS trig_recalc_gitserver_repos_statistics_on_delete ON gitserver_repos;
CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_delete
AFTER DELETE ON gitserver_repos REFERENCING
OLD TABLE AS oldtab
FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_delete();

View File

@ -432,6 +432,196 @@ CREATE FUNCTION merge_audit_log_transitions(internal hstore, arrayhstore hstore[
END;
$$;
CREATE FUNCTION recalc_gitserver_repos_statistics_on_delete() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
UPDATE gitserver_repos_statistics grs
SET
total = grs.total - (SELECT COUNT(*) FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
not_cloned = grs.not_cloned - (SELECT COUNT(*) FILTER(WHERE clone_status = 'not_cloned') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
cloning = grs.cloning - (SELECT COUNT(*) FILTER(WHERE clone_status = 'cloning') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
cloned = grs.cloned - (SELECT COUNT(*) FILTER(WHERE clone_status = 'cloned') FROM oldtab WHERE oldtab.shard_id = grs.shard_id),
failed_fetch = grs.cloned - (SELECT COUNT(*) FILTER(WHERE last_error IS NOT NULL) FROM oldtab WHERE oldtab.shard_id = grs.shard_id)
;
RETURN NULL;
END
$$;
CREATE FUNCTION recalc_gitserver_repos_statistics_on_insert() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO gitserver_repos_statistics AS grs (shard_id, total, not_cloned, cloning, cloned, failed_fetch)
SELECT
shard_id,
COUNT(*) AS total,
COUNT(*) FILTER(WHERE clone_status = 'not_cloned') AS not_cloned,
COUNT(*) FILTER(WHERE clone_status = 'cloning') AS cloning,
COUNT(*) FILTER(WHERE clone_status = 'cloned') AS cloned,
COUNT(*) FILTER(WHERE last_error IS NOT NULL) AS failed_fetch
FROM
newtab
GROUP BY shard_id
ON CONFLICT(shard_id)
DO UPDATE
SET
total = grs.total + excluded.total,
not_cloned = grs.not_cloned + excluded.not_cloned,
cloning = grs.cloning + excluded.cloning,
cloned = grs.cloned + excluded.cloned,
failed_fetch = grs.failed_fetch + excluded.failed_fetch
;
RETURN NULL;
END
$$;
CREATE FUNCTION recalc_gitserver_repos_statistics_on_update() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO gitserver_repos_statistics AS grs (shard_id, total, not_cloned, cloning, cloned, failed_fetch)
SELECT
newtab.shard_id AS shard_id,
COUNT(*) AS total,
COUNT(*) FILTER(WHERE clone_status = 'not_cloned') AS not_cloned,
COUNT(*) FILTER(WHERE clone_status = 'cloning') AS cloning,
COUNT(*) FILTER(WHERE clone_status = 'cloned') AS cloned,
COUNT(*) FILTER(WHERE last_error IS NOT NULL) AS failed_fetch
FROM
newtab
GROUP BY newtab.shard_id
ON CONFLICT(shard_id) DO
UPDATE
SET
total = grs.total + (excluded.total - (SELECT COUNT(*) FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
not_cloned = grs.not_cloned + (excluded.not_cloned - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'not_cloned') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
cloning = grs.cloning + (excluded.cloning - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'cloning') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
cloned = grs.cloned + (excluded.cloned - (SELECT COUNT(*) FILTER(WHERE ot.clone_status = 'cloned') FROM oldtab ot WHERE ot.shard_id = excluded.shard_id)),
failed_fetch = grs.failed_fetch + (excluded.failed_fetch - (SELECT COUNT(*) FILTER(WHERE ot.last_error IS NOT NULL) FROM oldtab ot WHERE ot.shard_id = excluded.shard_id))
;
WITH moved AS (
SELECT
oldtab.shard_id AS shard_id,
COUNT(*) AS total,
COUNT(*) FILTER(WHERE oldtab.clone_status = 'not_cloned') AS not_cloned,
COUNT(*) FILTER(WHERE oldtab.clone_status = 'cloning') AS cloning,
COUNT(*) FILTER(WHERE oldtab.clone_status = 'cloned') AS cloned,
COUNT(*) FILTER(WHERE oldtab.last_error IS NOT NULL) AS failed_fetch
FROM
oldtab
JOIN newtab ON newtab.repo_id = oldtab.repo_id
WHERE
oldtab.shard_id != newtab.shard_id
GROUP BY oldtab.shard_id
)
UPDATE gitserver_repos_statistics grs
SET
total = grs.total - moved.total,
not_cloned = grs.not_cloned - moved.not_cloned,
cloning = grs.cloning - moved.cloning,
cloned = grs.cloned - moved.cloned,
failed_fetch = grs.failed_fetch - moved.failed_fetch
FROM moved
WHERE moved.shard_id = grs.shard_id;
INSERT INTO repo_statistics (not_cloned, cloning, cloned, failed_fetch)
VALUES (
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'not_cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'not_cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'cloning')
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'cloning')
),
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.clone_status = 'cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.clone_status = 'cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN repo r ON newtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND newtab.last_error IS NOT NULL)
-
(SELECT COUNT(*) FROM oldtab JOIN repo r ON oldtab.repo_id = r.id WHERE r.deleted_at is NULL AND r.blocked IS NULL AND oldtab.last_error IS NOT NULL)
)
);
RETURN NULL;
END
$$;
CREATE FUNCTION recalc_repo_statistics_on_repo_delete() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO
repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)
VALUES (
-- Insert negative counts
(SELECT -COUNT(*) FROM oldtab WHERE deleted_at IS NULL AND blocked IS NULL),
(SELECT -COUNT(*) FROM oldtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'not_cloned'),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloning'),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloned'),
(SELECT -COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.last_error IS NOT NULL)
);
RETURN NULL;
END
$$;
CREATE FUNCTION recalc_repo_statistics_on_repo_insert() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
INSERT INTO
repo_statistics (total, soft_deleted, not_cloned)
VALUES (
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL),
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),
-- New repositories are always not_cloned by default, so we can count them as not cloned here
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL)
-- New repositories never have last_error set, so we can also ignore those here
);
RETURN NULL;
END
$$;
CREATE FUNCTION recalc_repo_statistics_on_repo_update() RETURNS trigger
LANGUAGE plpgsql
AS $$ BEGIN
-- Insert diff of changes
INSERT INTO
repo_statistics (total, soft_deleted, not_cloned, cloning, cloned, failed_fetch)
VALUES (
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NULL AND blocked IS NULL) - (SELECT COUNT(*) FROM oldtab WHERE deleted_at IS NULL AND blocked IS NULL),
(SELECT COUNT(*) FROM newtab WHERE deleted_at IS NOT NULL AND blocked IS NULL) - (SELECT COUNT(*) FROM oldtab WHERE deleted_at IS NOT NULL AND blocked IS NULL),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'not_cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'not_cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'cloning')
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloning')
),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.clone_status = 'cloned')
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.clone_status = 'cloned')
),
(
(SELECT COUNT(*) FROM newtab JOIN gitserver_repos gr ON gr.repo_id = newtab.id WHERE newtab.deleted_at is NULL AND newtab.blocked IS NULL AND gr.last_error IS NOT NULL)
-
(SELECT COUNT(*) FROM oldtab JOIN gitserver_repos gr ON gr.repo_id = oldtab.id WHERE oldtab.deleted_at is NULL AND oldtab.blocked IS NULL AND gr.last_error IS NOT NULL)
)
)
;
RETURN NULL;
END
$$;
CREATE FUNCTION repo_block(reason text, at timestamp with time zone) RETURNS jsonb
LANGUAGE sql IMMUTABLE STRICT
AS $$
@ -1757,6 +1947,27 @@ CREATE TABLE gitserver_repos (
repo_size_bytes bigint
);
CREATE TABLE gitserver_repos_statistics (
shard_id text NOT NULL,
total bigint DEFAULT 0 NOT NULL,
not_cloned bigint DEFAULT 0 NOT NULL,
cloning bigint DEFAULT 0 NOT NULL,
cloned bigint DEFAULT 0 NOT NULL,
failed_fetch bigint DEFAULT 0 NOT NULL
);
COMMENT ON COLUMN gitserver_repos_statistics.shard_id IS 'ID of this gitserver shard. If an empty string then the repositories havent been assigned a shard.';
COMMENT ON COLUMN gitserver_repos_statistics.total IS 'Number of repositories in gitserver_repos table on this shard';
COMMENT ON COLUMN gitserver_repos_statistics.not_cloned IS 'Number of repositories in gitserver_repos table on this shard that are not cloned yet';
COMMENT ON COLUMN gitserver_repos_statistics.cloning IS 'Number of repositories in gitserver_repos table on this shard that cloning';
COMMENT ON COLUMN gitserver_repos_statistics.cloned IS 'Number of repositories in gitserver_repos table on this shard that are cloned';
COMMENT ON COLUMN gitserver_repos_statistics.failed_fetch IS 'Number of repositories in gitserver_repos table on this shard where last_error is set';
CREATE TABLE global_state (
site_id uuid NOT NULL,
initialized boolean DEFAULT false NOT NULL
@ -2858,6 +3069,27 @@ CREATE TABLE repo_permissions (
unrestricted boolean DEFAULT false NOT NULL
);
CREATE TABLE repo_statistics (
total bigint DEFAULT 0 NOT NULL,
soft_deleted bigint DEFAULT 0 NOT NULL,
not_cloned bigint DEFAULT 0 NOT NULL,
cloning bigint DEFAULT 0 NOT NULL,
cloned bigint DEFAULT 0 NOT NULL,
failed_fetch bigint DEFAULT 0 NOT NULL
);
COMMENT ON COLUMN repo_statistics.total IS 'Number of repositories that are not soft-deleted and not blocked';
COMMENT ON COLUMN repo_statistics.soft_deleted IS 'Number of repositories that are soft-deleted and not blocked';
COMMENT ON COLUMN repo_statistics.not_cloned IS 'Number of repositories that are NOT soft-deleted and not blocked and not cloned by gitserver';
COMMENT ON COLUMN repo_statistics.cloning IS 'Number of repositories that are NOT soft-deleted and not blocked and currently being cloned by gitserver';
COMMENT ON COLUMN repo_statistics.cloned IS 'Number of repositories that are NOT soft-deleted and not blocked and cloned by gitserver';
COMMENT ON COLUMN repo_statistics.failed_fetch IS 'Number of repositories that are NOT soft-deleted and not blocked and have last_error set in gitserver_repos table';
CREATE TABLE saved_searches (
id integer NOT NULL,
description text NOT NULL,
@ -3489,6 +3721,9 @@ ALTER TABLE ONLY gitserver_relocator_jobs
ALTER TABLE ONLY gitserver_repos
ADD CONSTRAINT gitserver_repos_pkey PRIMARY KEY (repo_id);
ALTER TABLE ONLY gitserver_repos_statistics
ADD CONSTRAINT gitserver_repos_statistics_pkey PRIMARY KEY (shard_id);
ALTER TABLE ONLY global_state
ADD CONSTRAINT global_state_pkey PRIMARY KEY (site_id);
@ -4013,6 +4248,18 @@ CREATE TRIGGER trig_delete_repo_ref_on_external_service_repos AFTER UPDATE OF de
CREATE TRIGGER trig_invalidate_session_on_password_change BEFORE UPDATE OF passwd ON users FOR EACH ROW EXECUTE FUNCTION invalidate_session_for_userid_on_password_change();
CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_delete AFTER DELETE ON gitserver_repos REFERENCING OLD TABLE AS oldtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_delete();
CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_insert AFTER INSERT ON gitserver_repos REFERENCING NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_insert();
CREATE TRIGGER trig_recalc_gitserver_repos_statistics_on_update AFTER UPDATE ON gitserver_repos REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_gitserver_repos_statistics_on_update();
CREATE TRIGGER trig_recalc_repo_statistics_on_repo_delete AFTER DELETE ON repo REFERENCING OLD TABLE AS oldtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_delete();
CREATE TRIGGER trig_recalc_repo_statistics_on_repo_insert AFTER INSERT ON repo REFERENCING NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_insert();
CREATE TRIGGER trig_recalc_repo_statistics_on_repo_update AFTER UPDATE ON repo REFERENCING OLD TABLE AS oldtab NEW TABLE AS newtab FOR EACH STATEMENT EXECUTE FUNCTION recalc_repo_statistics_on_repo_update();
CREATE TRIGGER trig_soft_delete_user_reference_on_external_service AFTER UPDATE OF deleted_at ON users FOR EACH ROW EXECUTE FUNCTION soft_delete_user_reference_on_external_service();
CREATE TRIGGER trigger_configuration_policies_delete AFTER DELETE ON lsif_configuration_policies REFERENCING OLD TABLE AS old FOR EACH STATEMENT EXECUTE FUNCTION func_configuration_policies_delete();