mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 16:31:47 +00:00
rcache: Explicitly pass redis pool to use (#63644)
Recently, this was refactored to also allow using the redispool.Store. However, that makes it very implicit to know where something is being written, so instead we pass down the pool instance at instantiation. This also gives a slightly better overview of where redispool is actually required. Test plan: CI passes.
This commit is contained in:
parent
60dc37d1a5
commit
169db11ce6
@ -154,7 +154,7 @@ func Main(ctx context.Context, obctx *observation.Context, ready service.ReadyFu
|
||||
sources.Add(
|
||||
dotcomuser.NewSource(
|
||||
obctx.Logger,
|
||||
rcache.NewWithTTL(fmt.Sprintf("dotcom-users:%s", dotcomuser.SourceVersion), int(cfg.SourcesCacheTTL.Seconds())),
|
||||
rcache.NewWithTTL(redispool.Cache, fmt.Sprintf("dotcom-users:%s", dotcomuser.SourceVersion), int(cfg.SourcesCacheTTL.Seconds())),
|
||||
dotcomClient,
|
||||
cfg.ActorConcurrencyLimit,
|
||||
rs,
|
||||
@ -194,7 +194,7 @@ func Main(ctx context.Context, obctx *observation.Context, ready service.ReadyFu
|
||||
sources.Add(
|
||||
productsubscription.NewSource(
|
||||
obctx.Logger,
|
||||
rcache.NewWithTTL(fmt.Sprintf("product-subscriptions:%s", productsubscription.SourceVersion), int(cfg.SourcesCacheTTL.Seconds())),
|
||||
rcache.NewWithTTL(redispool.Cache, fmt.Sprintf("product-subscriptions:%s", productsubscription.SourceVersion), int(cfg.SourcesCacheTTL.Seconds())),
|
||||
codyaccessv1.NewCodyAccessServiceClient(conn),
|
||||
cfg.ActorConcurrencyLimit,
|
||||
),
|
||||
|
||||
@ -53,6 +53,7 @@ go_library(
|
||||
"//internal/observation",
|
||||
"//internal/randstring",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/repos",
|
||||
"//internal/repoupdater",
|
||||
"//internal/trace",
|
||||
|
||||
@ -17,13 +17,14 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/dotcom"
|
||||
"github.com/sourcegraph/sourcegraph/internal/httpcli"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
var MockCountGoImporters func(ctx context.Context, repo api.RepoName) (int, error)
|
||||
|
||||
var (
|
||||
goImportersCountCache = rcache.NewWithTTL("go-importers-count", 14400) // 4 hours
|
||||
goImportersCountCache = rcache.NewWithTTL(redispool.Cache, "go-importers-count", 14400) // 4 hours
|
||||
)
|
||||
|
||||
// CountGoImporters returns the number of Go importers for the repository's Go subpackages. This is
|
||||
|
||||
@ -10,8 +10,6 @@ import (
|
||||
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
|
||||
"github.com/sourcegraph/log"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/cmd/frontend/internal/inventory"
|
||||
@ -20,6 +18,8 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
@ -27,7 +27,7 @@ import (
|
||||
// filenames. Enabled by default.
|
||||
var useEnhancedLanguageDetection, _ = strconv.ParseBool(env.Get("USE_ENHANCED_LANGUAGE_DETECTION", "true", "Enable more accurate but slower language detection that uses file contents"))
|
||||
|
||||
var inventoryCache = rcache.New(fmt.Sprintf("inv:v2:enhanced_%v", useEnhancedLanguageDetection))
|
||||
var inventoryCache = rcache.New(redispool.Cache, fmt.Sprintf("inv:v2:enhanced_%v", useEnhancedLanguageDetection))
|
||||
|
||||
var gitServerConcurrency, _ = strconv.Atoi(env.Get("GET_INVENTORY_GIT_SERVER_CONCURRENCY", "4", "Changes the number of concurrent requests against the gitserver for getInventory requests."))
|
||||
|
||||
|
||||
@ -3,18 +3,13 @@ package graphqlbackend
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log" //nolint:logging // TODO move all logging to sourcegraph/log
|
||||
"os"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/grafana/regexp"
|
||||
"github.com/graph-gophers/graphql-go"
|
||||
"github.com/inconshreveable/log15" //nolint:logging // TODO move all logging to sourcegraph/log
|
||||
sglog "github.com/sourcegraph/log"
|
||||
"github.com/sourcegraph/log/logtest"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@ -26,18 +21,6 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
if !testing.Verbose() {
|
||||
log15.Root().SetHandler(log15.DiscardHandler())
|
||||
log.SetOutput(io.Discard)
|
||||
logtest.InitWithLevel(m, sglog.LevelNone)
|
||||
} else {
|
||||
logtest.Init(m)
|
||||
}
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
func BenchmarkPrometheusFieldName(b *testing.B) {
|
||||
tests := [][3]string{
|
||||
{"Query", "settingsSubject", "settingsSubject"},
|
||||
|
||||
@ -2,16 +2,39 @@ package graphqlbackend
|
||||
|
||||
import (
|
||||
"context"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
"log" //nolint:logging // TODO move all logging to sourcegraph/log
|
||||
"os"
|
||||
"sync"
|
||||
"testing"
|
||||
|
||||
"github.com/inconshreveable/log15" //nolint:logging // TODO move all logging to sourcegraph/log
|
||||
"github.com/keegancsmith/sqlf"
|
||||
sglog "github.com/sourcegraph/log"
|
||||
"github.com/sourcegraph/log/logtest"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/database"
|
||||
"github.com/sourcegraph/sourcegraph/internal/txemail"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
flag.Parse()
|
||||
if !testing.Verbose() {
|
||||
log15.Root().SetHandler(log15.DiscardHandler())
|
||||
log.SetOutput(io.Discard)
|
||||
logtest.InitWithLevel(m, sglog.LevelNone)
|
||||
} else {
|
||||
logtest.Init(m)
|
||||
}
|
||||
|
||||
txemail.DisableSilently()
|
||||
|
||||
os.Exit(m.Run())
|
||||
}
|
||||
|
||||
var createTestUser = func() func(*testing.T, database.DB, bool) *types.User {
|
||||
var mu sync.Mutex
|
||||
count := 0
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gqlutil"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/wrexec"
|
||||
)
|
||||
|
||||
@ -50,7 +51,7 @@ func (r *RepositoryResolver) RecordedCommands(ctx context.Context, args *Recorde
|
||||
if recordingConf == nil {
|
||||
return graphqlutil.NewSliceConnectionResolver([]RecordedCommandResolver{}, 0, currentEnd), nil
|
||||
}
|
||||
store := rcache.NewFIFOList(wrexec.GetFIFOListKey(r.Name()), recordingConf.Size)
|
||||
store := rcache.NewFIFOList(redispool.Cache, wrexec.GetFIFOListKey(r.Name()), recordingConf.Size)
|
||||
empty, err := store.IsEmpty()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
@ -21,7 +21,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRecordedCommandsResolver(t *testing.T) {
|
||||
rcache.SetupForTest(t)
|
||||
kv := rcache.SetupForTest(t)
|
||||
|
||||
timeFormat := "2006-01-02T15:04:05Z"
|
||||
startTime, err := time.Parse(timeFormat, "2023-07-20T15:04:05Z")
|
||||
@ -137,7 +137,7 @@ func TestRecordedCommandsResolver(t *testing.T) {
|
||||
repos.GetFunc.SetDefaultReturn(&types.Repo{Name: api.RepoName(repoName)}, nil)
|
||||
db.ReposFunc.SetDefaultReturn(repos)
|
||||
|
||||
r := rcache.NewFIFOList(wrexec.GetFIFOListKey(repoName), 3)
|
||||
r := rcache.NewFIFOList(kv, wrexec.GetFIFOListKey(repoName), 3)
|
||||
cmd1 := wrexec.RecordedCommand{
|
||||
Start: startTime,
|
||||
Duration: float64(100),
|
||||
@ -225,7 +225,7 @@ func TestRecordedCommandsResolver(t *testing.T) {
|
||||
repos.GetFunc.SetDefaultReturn(&types.Repo{Name: api.RepoName(repoName)}, nil)
|
||||
db.ReposFunc.SetDefaultReturn(repos)
|
||||
|
||||
r := rcache.NewFIFOList(wrexec.GetFIFOListKey(repoName), 3)
|
||||
r := rcache.NewFIFOList(kv, wrexec.GetFIFOListKey(repoName), 3)
|
||||
|
||||
err = r.Insert(marshalCmd(t, cmd1))
|
||||
require.NoError(t, err)
|
||||
|
||||
@ -24,6 +24,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/honey"
|
||||
searchhoney "github.com/sourcegraph/sourcegraph/internal/honey/search"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/search"
|
||||
searchclient "github.com/sourcegraph/sourcegraph/internal/search/client"
|
||||
"github.com/sourcegraph/sourcegraph/internal/search/job/jobutil"
|
||||
@ -434,7 +435,7 @@ func (srs *searchResultsStats) ApproximateResultCount() string { return srs.JApp
|
||||
func (srs *searchResultsStats) Sparkline() []int32 { return srs.JSparkline }
|
||||
|
||||
var (
|
||||
searchResultsStatsCache = rcache.NewWithTTL("search_results_stats", 3600) // 1h
|
||||
searchResultsStatsCache = rcache.NewWithTTL(redispool.Cache, "search_results_stats", 3600) // 1h
|
||||
searchResultsStatsCounter = promauto.NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "src_graphql_search_results_stats_cache_hit",
|
||||
Help: "Counts cache hits and misses for search results stats (e.g. sparklines).",
|
||||
|
||||
@ -16,6 +16,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gqlutil"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
@ -24,7 +25,7 @@ import (
|
||||
const slowRequestRedisFIFOListPerPage = 50
|
||||
|
||||
// slowRequestRedisFIFOList is a FIFO redis cache to store the slow requests.
|
||||
var slowRequestRedisFIFOList = rcache.NewFIFOListDynamic("slow-graphql-requests-list", func() int {
|
||||
var slowRequestRedisFIFOList = rcache.NewFIFOListDynamic(redispool.Cache, "slow-graphql-requests-list", func() int {
|
||||
return conf.Get().ObservabilityCaptureSlowGraphQLRequestsLimit
|
||||
})
|
||||
|
||||
|
||||
@ -20,15 +20,10 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/dbmocks"
|
||||
"github.com/sourcegraph/sourcegraph/internal/database/fakedb"
|
||||
"github.com/sourcegraph/sourcegraph/internal/gitserver"
|
||||
"github.com/sourcegraph/sourcegraph/internal/txemail"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
func init() {
|
||||
txemail.DisableSilently()
|
||||
}
|
||||
|
||||
func TestUserEmail_ViewerCanManuallyVerify(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
|
||||
@ -21,6 +21,7 @@ go_library(
|
||||
"//internal/executor/types",
|
||||
"//internal/metrics/store",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/types",
|
||||
"//internal/workerutil",
|
||||
"//internal/workerutil/dbworker/store",
|
||||
|
||||
@ -20,6 +20,7 @@ import (
|
||||
executortypes "github.com/sourcegraph/sourcegraph/internal/executor/types"
|
||||
metricsstore "github.com/sourcegraph/sourcegraph/internal/metrics/store"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/internal/workerutil"
|
||||
dbworkerstore "github.com/sourcegraph/sourcegraph/internal/workerutil/dbworker/store"
|
||||
@ -49,7 +50,7 @@ func NewMultiHandler(
|
||||
batchesQueueHandler QueueHandler[*btypes.BatchSpecWorkspaceExecutionJob],
|
||||
) MultiHandler {
|
||||
siteConfig := conf.Get().SiteConfiguration
|
||||
dequeueCache := rcache.New(executortypes.DequeueCachePrefix)
|
||||
dequeueCache := rcache.New(redispool.Cache, executortypes.DequeueCachePrefix)
|
||||
dequeueCacheConfig := executortypes.DequeuePropertiesPerQueue
|
||||
if siteConfig.ExecutorsMultiqueue != nil && siteConfig.ExecutorsMultiqueue.DequeueCacheConfig != nil {
|
||||
dequeueCacheConfig = siteConfig.ExecutorsMultiqueue.DequeueCacheConfig
|
||||
|
||||
@ -34,6 +34,7 @@ go_library(
|
||||
"//internal/httpcli",
|
||||
"//internal/observation",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/types",
|
||||
"//lib/errors",
|
||||
"//lib/pointers",
|
||||
@ -68,6 +69,7 @@ go_test(
|
||||
"//internal/gitserver",
|
||||
"//internal/httpcli",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/types",
|
||||
"//lib/errors",
|
||||
"@com_github_google_uuid//:uuid",
|
||||
|
||||
@ -34,6 +34,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/httpcli"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
"github.com/sourcegraph/sourcegraph/lib/pointers"
|
||||
@ -79,7 +80,7 @@ func (srv *gitHubAppServer) registerRoutes(router *mux.Router) {
|
||||
|
||||
// SetupGitHubAppRoutes registers the routes for the GitHub App setup API.
|
||||
func SetupGitHubAppRoutes(router *mux.Router, db database.DB) {
|
||||
ghAppState := rcache.NewWithTTL("github_app_state", cacheTTLSeconds)
|
||||
ghAppState := rcache.NewWithTTL(redispool.Cache, "github_app_state", cacheTTLSeconds)
|
||||
appServer := &gitHubAppServer{
|
||||
cache: ghAppState,
|
||||
db: db,
|
||||
|
||||
@ -8,10 +8,8 @@ import (
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/gorilla/mux"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
@ -22,7 +20,9 @@ import (
|
||||
ghtypes "github.com/sourcegraph/sourcegraph/internal/github_apps/types"
|
||||
"github.com/sourcegraph/sourcegraph/internal/httpcli"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
func TestGenerateRedirectURL(t *testing.T) {
|
||||
@ -154,7 +154,7 @@ func TestGithubAppHTTPAPI(t *testing.T) {
|
||||
db.GitHubAppsFunc.SetDefaultReturn(mockGitHubAppsStore)
|
||||
|
||||
rcache.SetupForTest(t)
|
||||
cache := rcache.NewWithTTL("test_cache", 200)
|
||||
cache := rcache.NewWithTTL(redispool.Cache, "test_cache", 200)
|
||||
|
||||
mux := mux.NewRouter()
|
||||
subrouter := mux.PathPrefix("/githubapp/").Subrouter()
|
||||
|
||||
@ -23,6 +23,7 @@ go_library(
|
||||
"//internal/metrics/store",
|
||||
"//internal/observation",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//lib/errors",
|
||||
"@com_github_gomodule_redigo//redis",
|
||||
"@com_github_prometheus_client_golang//prometheus",
|
||||
@ -42,5 +43,6 @@ go_test(
|
||||
deps = [
|
||||
"//internal/executor/types",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
],
|
||||
)
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/goroutine"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
type janitorJob struct{}
|
||||
@ -32,7 +33,7 @@ func (j *janitorJob) Routines(_ context.Context, observationCtx *observation.Con
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dequeueCache := rcache.New(executortypes.DequeueCachePrefix)
|
||||
dequeueCache := rcache.New(redispool.Cache, executortypes.DequeueCachePrefix)
|
||||
|
||||
routines := []goroutine.BackgroundRoutine{
|
||||
goroutine.NewPeriodicGoroutine(
|
||||
|
||||
@ -8,6 +8,7 @@ import (
|
||||
|
||||
executortypes "github.com/sourcegraph/sourcegraph/internal/executor/types"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
var defaultTime = time.Date(2000, 1, 1, 1, 1, 1, 1, time.UTC)
|
||||
@ -77,7 +78,7 @@ func Test_multiqueueCacheCleaner_Handle(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
rcache.SetupForTest(t)
|
||||
m := &multiqueueCacheCleaner{
|
||||
cache: rcache.New(executortypes.DequeueCachePrefix),
|
||||
cache: rcache.New(redispool.Cache, executortypes.DequeueCachePrefix),
|
||||
windowSize: executortypes.DequeueTtl,
|
||||
}
|
||||
timeNow = func() time.Time {
|
||||
|
||||
@ -41,10 +41,9 @@ go_test(
|
||||
deps = [
|
||||
"//internal/license",
|
||||
"//internal/licensing",
|
||||
"//internal/redispool",
|
||||
"//internal/rcache",
|
||||
"//lib/pointers",
|
||||
"@com_github_derision_test_glock//:glock",
|
||||
"@com_github_gomodule_redigo//redis",
|
||||
"@com_github_sourcegraph_log//logtest",
|
||||
"@com_github_stretchr_testify//require",
|
||||
],
|
||||
|
||||
@ -24,7 +24,6 @@ import (
|
||||
|
||||
var (
|
||||
licenseCheckStarted = false
|
||||
store = redispool.Store
|
||||
baseUrl = env.Get("SOURCEGRAPH_API_URL", "https://sourcegraph.com", "Base URL for license check API")
|
||||
)
|
||||
|
||||
@ -38,18 +37,19 @@ type licenseChecker struct {
|
||||
token string
|
||||
doer httpcli.Doer
|
||||
logger log.Logger
|
||||
kv redispool.KeyValue
|
||||
}
|
||||
|
||||
func (l *licenseChecker) Handle(ctx context.Context) error {
|
||||
l.logger.Debug("starting license check", log.String("siteID", l.siteID))
|
||||
if err := store.Set(lastCalledAtStoreKey, time.Now().Format(time.RFC3339)); err != nil {
|
||||
if err := l.kv.Set(lastCalledAtStoreKey, time.Now().Format(time.RFC3339)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// skip if has explicitly allowed air-gapped feature
|
||||
if err := licensing.Check(licensing.FeatureAllowAirGapped); err == nil {
|
||||
l.logger.Debug("license is air-gapped, skipping check", log.String("siteID", l.siteID))
|
||||
if err := store.Set(licensing.LicenseValidityStoreKey, true); err != nil {
|
||||
if err := l.kv.Set(licensing.LicenseValidityStoreKey, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -61,7 +61,7 @@ func (l *licenseChecker) Handle(ctx context.Context) error {
|
||||
}
|
||||
if info.HasTag("dev") || info.HasTag("internal") || info.Plan().IsFreePlan() {
|
||||
l.logger.Debug("internal, dev, or free license, skipping license verification check")
|
||||
if err := store.Set(licensing.LicenseValidityStoreKey, true); err != nil {
|
||||
if err := l.kv.Set(licensing.LicenseValidityStoreKey, true); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
@ -115,9 +115,9 @@ func (l *licenseChecker) Handle(ctx context.Context) error {
|
||||
}
|
||||
|
||||
// best effort, ignore errors here
|
||||
_ = store.Set(licensing.LicenseInvalidReason, body.Data.Reason)
|
||||
_ = l.kv.Set(licensing.LicenseInvalidReason, body.Data.Reason)
|
||||
|
||||
if err := store.Set(licensing.LicenseValidityStoreKey, body.Data.IsValid); err != nil {
|
||||
if err := l.kv.Set(licensing.LicenseValidityStoreKey, body.Data.IsValid); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -128,8 +128,8 @@ func (l *licenseChecker) Handle(ctx context.Context) error {
|
||||
// calcDurationSinceLastCalled calculates the duration to wait
|
||||
// before running the next license check. It returns 0 if the
|
||||
// license check should be run immediately.
|
||||
func calcDurationSinceLastCalled(clock glock.Clock) (time.Duration, error) {
|
||||
lastCalledAt, err := store.Get(lastCalledAtStoreKey).String()
|
||||
func calcDurationSinceLastCalled(kv redispool.KeyValue, clock glock.Clock) (time.Duration, error) {
|
||||
lastCalledAt, err := kv.Get(lastCalledAtStoreKey).String()
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
@ -153,7 +153,7 @@ func calcDurationSinceLastCalled(clock glock.Clock) (time.Duration, error) {
|
||||
// StartLicenseCheck starts a goroutine that periodically checks
|
||||
// license validity from dotcom and stores the result in redis.
|
||||
// It re-runs the check if the license key changes.
|
||||
func StartLicenseCheck(originalCtx context.Context, logger log.Logger, db database.DB) {
|
||||
func StartLicenseCheck(originalCtx context.Context, logger log.Logger, db database.DB, kv redispool.KeyValue) {
|
||||
if licenseCheckStarted {
|
||||
logger.Info("license check already started")
|
||||
return
|
||||
@ -171,15 +171,17 @@ func StartLicenseCheck(originalCtx context.Context, logger log.Logger, db databa
|
||||
cancel()
|
||||
ctxWithCancel, cancel = context.WithCancel(originalCtx)
|
||||
|
||||
prevLicenseToken, _ := store.Get(prevLicenseTokenKey).String()
|
||||
prevLicenseToken, _ := kv.Get(prevLicenseTokenKey).String()
|
||||
licenseToken := license.GenerateLicenseKeyBasedAccessToken(conf.Get().LicenseKey)
|
||||
var initialWaitInterval time.Duration = 0
|
||||
if prevLicenseToken == licenseToken {
|
||||
initialWaitInterval, _ = calcDurationSinceLastCalled(glock.NewRealClock())
|
||||
initialWaitInterval, _ = calcDurationSinceLastCalled(kv, glock.NewRealClock())
|
||||
}
|
||||
|
||||
// continue running with new license key
|
||||
store.Set(prevLicenseTokenKey, licenseToken)
|
||||
if err := kv.Set(prevLicenseTokenKey, licenseToken); err != nil {
|
||||
logger.Error("error storing license token in redis", log.Error(err))
|
||||
}
|
||||
|
||||
// read site_id from global_state table if not done before
|
||||
if siteID == "" {
|
||||
@ -193,7 +195,13 @@ func StartLicenseCheck(originalCtx context.Context, logger log.Logger, db databa
|
||||
|
||||
routine := goroutine.NewPeriodicGoroutine(
|
||||
ctxWithCancel,
|
||||
&licenseChecker{siteID: siteID, token: licenseToken, doer: httpcli.ExternalDoer, logger: logger.Scoped("licenseChecker")},
|
||||
&licenseChecker{
|
||||
siteID: siteID,
|
||||
token: licenseToken,
|
||||
doer: httpcli.ExternalDoer,
|
||||
logger: logger.Scoped("licenseChecker"),
|
||||
kv: kv,
|
||||
},
|
||||
goroutine.WithName("licensing.check-license-validity"),
|
||||
goroutine.WithDescription("check if license is valid from sourcegraph.com"),
|
||||
goroutine.WithInterval(licensing.LicenseCheckInterval),
|
||||
|
||||
@ -11,27 +11,22 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/derision-test/glock"
|
||||
"github.com/gomodule/redigo/redis"
|
||||
"github.com/stretchr/testify/require"
|
||||
|
||||
"github.com/sourcegraph/log/logtest"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/license"
|
||||
"github.com/sourcegraph/sourcegraph/internal/licensing"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/lib/pointers"
|
||||
)
|
||||
|
||||
func Test_calcDurationToWaitForNextHandle(t *testing.T) {
|
||||
// Connect to local redis for testing, this is the same URL used in rcache.SetupForTest
|
||||
store = redispool.NewKeyValue("127.0.0.1:6379", &redis.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 5 * time.Second,
|
||||
})
|
||||
kv := rcache.SetupForTest(t)
|
||||
|
||||
cleanupStore := func() {
|
||||
_ = store.Del(licensing.LicenseValidityStoreKey)
|
||||
_ = store.Del(lastCalledAtStoreKey)
|
||||
_ = kv.Del(licensing.LicenseValidityStoreKey)
|
||||
_ = kv.Del(lastCalledAtStoreKey)
|
||||
}
|
||||
|
||||
now := time.Now().Round(time.Second)
|
||||
@ -78,10 +73,10 @@ func Test_calcDurationToWaitForNextHandle(t *testing.T) {
|
||||
t.Run(name, func(t *testing.T) {
|
||||
cleanupStore()
|
||||
if test.lastCalledAt != "" {
|
||||
_ = store.Set(lastCalledAtStoreKey, test.lastCalledAt)
|
||||
_ = kv.Set(lastCalledAtStoreKey, test.lastCalledAt)
|
||||
}
|
||||
|
||||
got, err := calcDurationSinceLastCalled(clock)
|
||||
got, err := calcDurationSinceLastCalled(kv, clock)
|
||||
if test.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
@ -106,15 +101,11 @@ func mockDotcomURL(t *testing.T, u *string) {
|
||||
}
|
||||
|
||||
func Test_licenseChecker(t *testing.T) {
|
||||
// Connect to local redis for testing, this is the same URL used in rcache.SetupForTest
|
||||
store = redispool.NewKeyValue("127.0.0.1:6379", &redis.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 5 * time.Second,
|
||||
})
|
||||
kv := rcache.SetupForTest(t)
|
||||
|
||||
cleanupStore := func() {
|
||||
_ = store.Del(licensing.LicenseValidityStoreKey)
|
||||
_ = store.Del(lastCalledAtStoreKey)
|
||||
_ = kv.Del(licensing.LicenseValidityStoreKey)
|
||||
_ = kv.Del(lastCalledAtStoreKey)
|
||||
}
|
||||
|
||||
siteID := "some-site-id"
|
||||
@ -159,6 +150,7 @@ func Test_licenseChecker(t *testing.T) {
|
||||
token: token,
|
||||
doer: doer,
|
||||
logger: logtest.NoOp(t),
|
||||
kv: kv,
|
||||
}
|
||||
|
||||
err := handler.Handle(context.Background())
|
||||
@ -168,12 +160,12 @@ func Test_licenseChecker(t *testing.T) {
|
||||
require.False(t, doer.DoCalled)
|
||||
|
||||
// check result was set to true
|
||||
valid, err := store.Get(licensing.LicenseValidityStoreKey).Bool()
|
||||
valid, err := kv.Get(licensing.LicenseValidityStoreKey).Bool()
|
||||
require.NoError(t, err)
|
||||
require.True(t, valid)
|
||||
|
||||
// check last called at was set
|
||||
lastCalledAt, err := store.Get(lastCalledAtStoreKey).String()
|
||||
lastCalledAt, err := kv.Get(lastCalledAtStoreKey).String()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, lastCalledAt)
|
||||
})
|
||||
@ -238,6 +230,7 @@ func Test_licenseChecker(t *testing.T) {
|
||||
token: token,
|
||||
doer: doer,
|
||||
logger: logtest.NoOp(t),
|
||||
kv: kv,
|
||||
}
|
||||
|
||||
err := checker.Handle(context.Background())
|
||||
@ -245,25 +238,25 @@ func Test_licenseChecker(t *testing.T) {
|
||||
require.Error(t, err)
|
||||
|
||||
// check result was NOT set
|
||||
require.True(t, store.Get(licensing.LicenseValidityStoreKey).IsNil())
|
||||
require.True(t, kv.Get(licensing.LicenseValidityStoreKey).IsNil())
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
|
||||
// check result was set
|
||||
got, err := store.Get(licensing.LicenseValidityStoreKey).Bool()
|
||||
got, err := kv.Get(licensing.LicenseValidityStoreKey).Bool()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, test.want, got)
|
||||
|
||||
// check result reason was set
|
||||
if test.reason != nil {
|
||||
got, err := store.Get(licensing.LicenseInvalidReason).String()
|
||||
got, err := kv.Get(licensing.LicenseInvalidReason).String()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, *test.reason, got)
|
||||
}
|
||||
}
|
||||
|
||||
// check last called at was set
|
||||
lastCalledAt, err := store.Get(lastCalledAtStoreKey).String()
|
||||
lastCalledAt, err := kv.Get(lastCalledAtStoreKey).String()
|
||||
require.NoError(t, err)
|
||||
require.NotEmpty(t, lastCalledAt)
|
||||
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/goroutine"
|
||||
"github.com/sourcegraph/sourcegraph/internal/licensing"
|
||||
"github.com/sourcegraph/sourcegraph/internal/observation"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
type licenseWorker struct{}
|
||||
@ -61,7 +62,7 @@ func (l *licenseChecksWrapper) Start() {
|
||||
})
|
||||
})
|
||||
if !dotcom.SourcegraphDotComMode() {
|
||||
StartLicenseCheck(context.Background(), l.logger, l.db)
|
||||
StartLicenseCheck(context.Background(), l.logger, l.db, redispool.Store)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@ -39,6 +39,7 @@ go_library(
|
||||
"//internal/featureflag",
|
||||
"//internal/lazyregexp",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/session",
|
||||
"//internal/suspiciousnames",
|
||||
"//internal/telemetry",
|
||||
|
||||
@ -13,6 +13,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/cmd/frontend/globals"
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/txemail"
|
||||
"github.com/sourcegraph/sourcegraph/internal/txemail/txtypes"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
@ -58,10 +59,10 @@ func NewLockoutStore(failedThreshold int, lockoutPeriod, consecutivePeriod time.
|
||||
|
||||
return &lockoutStore{
|
||||
failedThreshold: failedThreshold,
|
||||
lockouts: rcache.NewWithTTL("account_lockout", int(lockoutPeriod.Seconds())),
|
||||
failedAttempts: rcache.NewWithTTL("account_failed_attempts", int(consecutivePeriod.Seconds())),
|
||||
unlockToken: rcache.NewWithTTL("account_unlock_token", int(lockoutPeriod.Seconds())),
|
||||
unlockEmailSent: rcache.NewWithTTL("account_lockout_email_sent", int(lockoutPeriod.Seconds())),
|
||||
lockouts: rcache.NewWithTTL(redispool.Cache, "account_lockout", int(lockoutPeriod.Seconds())),
|
||||
failedAttempts: rcache.NewWithTTL(redispool.Cache, "account_failed_attempts", int(consecutivePeriod.Seconds())),
|
||||
unlockToken: rcache.NewWithTTL(redispool.Cache, "account_unlock_token", int(lockoutPeriod.Seconds())),
|
||||
unlockEmailSent: rcache.NewWithTTL(redispool.Cache, "account_lockout_email_sent", int(lockoutPeriod.Seconds())),
|
||||
sendEmail: sendEmailF,
|
||||
}
|
||||
}
|
||||
|
||||
@ -26,6 +26,7 @@ go_library(
|
||||
"//internal/licensing",
|
||||
"//internal/oauthtoken",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/types",
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
|
||||
"github.com/sourcegraph/sourcegraph/internal/oauthtoken"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
@ -70,6 +71,7 @@ func NewProvider(urn string, opts ProviderOptions) *Provider {
|
||||
if opts.GroupsCacheTTL >= 0 {
|
||||
cg = &cachedGroups{
|
||||
cache: rcache.NewWithTTL(
|
||||
redispool.Cache,
|
||||
fmt.Sprintf("gh_groups_perms:%s:%s", codeHost.ServiceID, urn), int(opts.GroupsCacheTTL.Seconds()),
|
||||
),
|
||||
}
|
||||
|
||||
@ -27,6 +27,7 @@ go_library(
|
||||
"//internal/extsvc/github",
|
||||
"//internal/observation",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/types",
|
||||
"//lib/errors",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
|
||||
@ -6,6 +6,7 @@ import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
type GitHubAuthCache struct {
|
||||
@ -13,7 +14,7 @@ type GitHubAuthCache struct {
|
||||
}
|
||||
|
||||
var githubAuthCache = &GitHubAuthCache{
|
||||
cache: rcache.NewWithTTL("codeintel.github-authz:", 60 /* seconds */),
|
||||
cache: rcache.NewWithTTL(redispool.Cache, "codeintel.github-authz:", 60 /* seconds */),
|
||||
}
|
||||
|
||||
func (c *GitHubAuthCache) Get(key string) (authorized bool, _ bool) {
|
||||
|
||||
@ -19,6 +19,7 @@ go_library(
|
||||
],
|
||||
deps = [
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//lib/errors",
|
||||
],
|
||||
)
|
||||
@ -34,5 +35,6 @@ go_test(
|
||||
deps = [
|
||||
":tokenusage",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
],
|
||||
)
|
||||
|
||||
@ -5,11 +5,12 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
type Manager struct {
|
||||
Cache *rcache.Cache
|
||||
cache *rcache.Cache
|
||||
}
|
||||
|
||||
type ModelData struct {
|
||||
@ -19,7 +20,7 @@ type ModelData struct {
|
||||
|
||||
func NewManager() *Manager {
|
||||
return &Manager{
|
||||
Cache: rcache.NewWithRedisStore("LLMUsage"),
|
||||
cache: rcache.New(redispool.Store, "LLMUsage"),
|
||||
}
|
||||
}
|
||||
|
||||
@ -45,7 +46,7 @@ func (m *Manager) UpdateTokenCountsFromModelUsage(inputTokens, outputTokens int,
|
||||
}
|
||||
|
||||
func (m *Manager) updateTokenCounts(key string, tokenCount int64) error {
|
||||
if _, err := m.Cache.IncrByInt64(key, tokenCount); err != nil {
|
||||
if _, err := m.cache.IncrByInt64(key, tokenCount); err != nil {
|
||||
return errors.Newf("failed to increment token count for key %s: %w", key, err)
|
||||
}
|
||||
return nil
|
||||
@ -80,7 +81,7 @@ func (m *Manager) FetchTokenUsageDataForAnalysis() (map[string]float64, error) {
|
||||
|
||||
// fetchTokenUsageData retrieves token usage data, optionally decrementing token counts.
|
||||
func (m *Manager) fetchTokenUsageData(decrement bool) (map[string]float64, error) {
|
||||
allKeys := m.Cache.ListAllKeys()
|
||||
allKeys := m.cache.ListAllKeys()
|
||||
tokenUsageData := make(map[string]float64)
|
||||
|
||||
for _, key := range allKeys {
|
||||
@ -103,13 +104,13 @@ func (m *Manager) getModelNameAndValue(key string, decrement bool) (string, int6
|
||||
}
|
||||
modelName := parts[1]
|
||||
|
||||
value, found, err := m.Cache.GetInt64(modelName)
|
||||
value, found, err := m.cache.GetInt64(modelName)
|
||||
if err != nil || !found {
|
||||
return "", 0, err // Skip keys that are not found or have conversion errors
|
||||
}
|
||||
|
||||
if decrement {
|
||||
if _, err := m.Cache.DecrByInt64(modelName, value); err != nil {
|
||||
if _, err := m.cache.DecrByInt64(modelName, value); err != nil {
|
||||
return "", 0, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -5,12 +5,13 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/completions/tokenusage"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
func TestGetAllTokenUsageData(t *testing.T) {
|
||||
rcache.SetupForTest(t)
|
||||
manager := tokenusage.NewManager()
|
||||
cache := rcache.NewWithTTL("LLMUsage", 1800)
|
||||
cache := rcache.NewWithTTL(redispool.Store, "LLMUsage", 1800)
|
||||
cache.SetInt("LLMUsage:model1:feature1:stream:input", 10)
|
||||
cache.SetInt("LLMUsage:model1:feature1:stream:output", 20)
|
||||
|
||||
|
||||
@ -15,6 +15,7 @@ go_library(
|
||||
"//internal/api",
|
||||
"//internal/extsvc",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//lib/errors",
|
||||
"@com_github_aws_aws_sdk_go_v2//aws",
|
||||
"@com_github_aws_aws_sdk_go_v2_service_codecommit//:codecommit",
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
"github.com/aws/smithy-go"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
@ -24,7 +25,7 @@ func NewClient(config aws.Config) *Client {
|
||||
// Cache for repository metadata. The configuration-specific key prefix is not known
|
||||
// synchronously, so cache consumers must call (*Client).cacheKeyPrefix to obtain the
|
||||
// prefix value and prepend it explicitly.
|
||||
repoCache := rcache.NewWithTTL("cc_repo:", 60 /* seconds */)
|
||||
repoCache := rcache.NewWithTTL(redispool.Cache, "cc_repo:", 60 /* seconds */)
|
||||
|
||||
return &Client{
|
||||
aws: config,
|
||||
|
||||
@ -38,6 +38,7 @@ go_library(
|
||||
"//internal/oauthutil",
|
||||
"//internal/ratelimit",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/trace",
|
||||
"//lib/errors",
|
||||
"@com_github_masterminds_semver//:semver",
|
||||
@ -82,6 +83,7 @@ go_test(
|
||||
"//internal/oauthutil",
|
||||
"//internal/ratelimit",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//lib/errors",
|
||||
"//schema",
|
||||
"@com_github_google_go_cmp//cmp",
|
||||
|
||||
@ -25,6 +25,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/oauthutil"
|
||||
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
@ -178,7 +179,7 @@ func (p *ClientProvider) NewClient(a auth.Authenticator) *Client {
|
||||
tokenHash = a.Hash()
|
||||
key += tokenHash
|
||||
}
|
||||
projCache := rcache.NewWithTTL(key, int(cacheTTL/time.Second))
|
||||
projCache := rcache.NewWithTTL(redispool.Cache, key, int(cacheTTL/time.Second))
|
||||
|
||||
rl := ratelimit.NewInstrumentedLimiter(p.urn, ratelimit.NewGlobalRateLimiter(log.Scoped("GitLabClient"), p.urn))
|
||||
rlm := ratelimit.DefaultMonitorRegistry.GetOrSet(p.baseURL.String(), tokenHash, "rest", &ratelimit.Monitor{})
|
||||
|
||||
@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/ratelimit"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
type mockHTTPResponseBody struct {
|
||||
@ -51,6 +52,6 @@ func newTestClient(t *testing.T) *Client {
|
||||
baseURL: &url.URL{Scheme: "https", Host: "example.com", Path: "/"},
|
||||
httpClient: &http.Client{},
|
||||
externalRateLimiter: &ratelimit.Monitor{},
|
||||
projCache: rcache.NewWithTTL("__test__gl_proj", 1000),
|
||||
projCache: rcache.NewWithTTL(redispool.Cache, "__test__gl_proj", 1000),
|
||||
}
|
||||
}
|
||||
|
||||
@ -18,6 +18,7 @@ go_library(
|
||||
"//internal/github_apps/store",
|
||||
"//internal/httpcli",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/types",
|
||||
"//lib/errors",
|
||||
"@com_github_golang_jwt_jwt_v4//:jwt",
|
||||
|
||||
@ -19,6 +19,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/extsvc/github"
|
||||
"github.com/sourcegraph/sourcegraph/internal/httpcli"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
@ -127,7 +128,7 @@ func NewInstallationAccessToken(
|
||||
appAuthenticator auth.Authenticator,
|
||||
encryptionKey encryption.Key, // Used to encrypt the token before caching it
|
||||
) *InstallationAuthenticator {
|
||||
cache := rcache.NewWithTTL("github_app_installation_token", 55*60)
|
||||
cache := rcache.NewWithTTL(redispool.Cache, "github_app_installation_token", 55*60)
|
||||
return &InstallationAuthenticator{
|
||||
baseURL: baseURL,
|
||||
installationID: installationID,
|
||||
|
||||
@ -12,6 +12,7 @@ go_library(
|
||||
visibility = ["//:__subpackages__"],
|
||||
deps = [
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//lib/errors",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
],
|
||||
@ -28,6 +29,7 @@ go_test(
|
||||
],
|
||||
deps = [
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//lib/errors",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
"@com_github_stretchr_testify//assert",
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
// JobInfo contains information about a job, including all its routines.
|
||||
@ -74,7 +75,7 @@ const (
|
||||
const ttlSeconds = 604800 // 7 days
|
||||
|
||||
func GetCache() *rcache.Cache {
|
||||
return rcache.NewWithTTL(keyPrefix, ttlSeconds)
|
||||
return rcache.NewWithTTL(redispool.Cache, keyPrefix, ttlSeconds)
|
||||
}
|
||||
|
||||
// mergeStats returns the given stats updated with the given run data.
|
||||
|
||||
@ -9,6 +9,7 @@ import (
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
@ -17,7 +18,7 @@ func TestLoggerAndReaderHappyPaths(t *testing.T) {
|
||||
rcache.SetupForTest(t)
|
||||
|
||||
// Create logger
|
||||
c := rcache.NewWithTTL(keyPrefix, 1)
|
||||
c := rcache.NewWithTTL(redispool.Cache, keyPrefix, 1)
|
||||
recorder := New(log.NoOp(), "test", c)
|
||||
|
||||
// Create routines
|
||||
|
||||
@ -24,6 +24,7 @@ go_library(
|
||||
"//internal/lazyregexp",
|
||||
"//internal/metrics",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"//internal/requestclient",
|
||||
"//internal/requestinteraction",
|
||||
"//internal/trace",
|
||||
|
||||
@ -29,6 +29,7 @@ import (
|
||||
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
|
||||
"github.com/sourcegraph/sourcegraph/internal/metrics"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/requestclient"
|
||||
"github.com/sourcegraph/sourcegraph/internal/requestinteraction"
|
||||
"github.com/sourcegraph/sourcegraph/internal/trace"
|
||||
@ -83,7 +84,7 @@ type Factory struct {
|
||||
// redisCache is an HTTP cache backed by Redis. The TTL of a week is a balance
|
||||
// between caching values for a useful amount of time versus growing the cache
|
||||
// too large.
|
||||
var redisCache = rcache.NewWithTTL("http", 604800)
|
||||
var redisCache = rcache.NewWithTTL(redispool.Cache, "http", 604800)
|
||||
|
||||
// CachedTransportOpt is the default transport cache - it will return values from
|
||||
// the cache where possible (avoiding a network request) and will additionally add
|
||||
|
||||
@ -16,14 +16,19 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/conf/deploy"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/internal/types"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
// outboundRequestsRedisFIFOList is a FIFO redis cache to store the requests.
|
||||
var outboundRequestsRedisFIFOList = rcache.NewFIFOListDynamic("outbound-requests", func() int {
|
||||
return int(outboundRequestLogLimit())
|
||||
})
|
||||
var outboundRequestsRedisFIFOList = rcache.NewFIFOListDynamic(
|
||||
redispool.Cache,
|
||||
"outbound-requests",
|
||||
func() int {
|
||||
return int(outboundRequestLogLimit())
|
||||
},
|
||||
)
|
||||
|
||||
const sourcegraphPrefix = "github.com/sourcegraph/sourcegraph/"
|
||||
|
||||
|
||||
@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
"github.com/sourcegraph/sourcegraph/lib/errors"
|
||||
)
|
||||
|
||||
@ -12,22 +13,25 @@ import (
|
||||
type FIFOList struct {
|
||||
key string
|
||||
maxSize func() int
|
||||
_kv redispool.KeyValue
|
||||
}
|
||||
|
||||
// NewFIFOList returns a FIFOList, storing only a fixed amount of elements, discarding old ones if needed.
|
||||
func NewFIFOList(key string, size int) *FIFOList {
|
||||
func NewFIFOList(kv redispool.KeyValue, key string, size int) *FIFOList {
|
||||
return &FIFOList{
|
||||
key: key,
|
||||
maxSize: func() int { return size },
|
||||
_kv: kv,
|
||||
}
|
||||
}
|
||||
|
||||
// NewFIFOListDynamic is like NewFIFOList except size will be called each time
|
||||
// we enforce list size invariants.
|
||||
func NewFIFOListDynamic(key string, size func() int) *FIFOList {
|
||||
func NewFIFOListDynamic(kv redispool.KeyValue, key string, size func() int) *FIFOList {
|
||||
l := &FIFOList{
|
||||
key: key,
|
||||
maxSize: size,
|
||||
_kv: kv,
|
||||
}
|
||||
return l
|
||||
}
|
||||
@ -35,7 +39,7 @@ func NewFIFOListDynamic(key string, size func() int) *FIFOList {
|
||||
// Insert b in the cache and drops the oldest inserted item if the size exceeds the configured limit.
|
||||
func (l *FIFOList) Insert(b []byte) error {
|
||||
if !utf8.Valid(b) {
|
||||
errors.Newf("rcache: keys must be valid utf8", "key", b)
|
||||
return errors.Newf("rcache: keys must be valid utf8", "key", b)
|
||||
}
|
||||
key := l.globalPrefixKey()
|
||||
|
||||
@ -43,19 +47,19 @@ func (l *FIFOList) Insert(b []byte) error {
|
||||
// disabling.
|
||||
maxSize := l.MaxSize()
|
||||
if maxSize == 0 {
|
||||
if err := kv().LTrim(key, 0, 0); err != nil {
|
||||
if err := l.kv().LTrim(key, 0, 0); err != nil {
|
||||
return errors.Wrap(err, "failed to execute redis command LTRIM")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// O(1) because we're just adding a single element.
|
||||
if err := kv().LPush(key, b); err != nil {
|
||||
if err := l.kv().LPush(key, b); err != nil {
|
||||
return errors.Wrap(err, "failed to execute redis command LPUSH")
|
||||
}
|
||||
|
||||
// O(1) because the average case if just about dropping the last element.
|
||||
if err := kv().LTrim(key, 0, maxSize-1); err != nil {
|
||||
if err := l.kv().LTrim(key, 0, maxSize-1); err != nil {
|
||||
return errors.Wrap(err, "failed to execute redis command LTRIM")
|
||||
}
|
||||
return nil
|
||||
@ -64,7 +68,7 @@ func (l *FIFOList) Insert(b []byte) error {
|
||||
// Size returns the number of elements in the list.
|
||||
func (l *FIFOList) Size() (int, error) {
|
||||
key := l.globalPrefixKey()
|
||||
n, err := kv().LLen(key)
|
||||
n, err := l.kv().LLen(key)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to execute redis command LLEN")
|
||||
}
|
||||
@ -106,7 +110,7 @@ func (l *FIFOList) Slice(ctx context.Context, from, to int) ([][]byte, error) {
|
||||
}
|
||||
|
||||
key := l.globalPrefixKey()
|
||||
bs, err := kv().WithContext(ctx).LRange(key, from, to).ByteSlices()
|
||||
bs, err := l.kv().WithContext(ctx).LRange(key, from, to).ByteSlices()
|
||||
if err != nil {
|
||||
// Return ctx error if it expired
|
||||
if ctx.Err() != nil {
|
||||
@ -123,3 +127,10 @@ func (l *FIFOList) Slice(ctx context.Context, from, to int) ([][]byte, error) {
|
||||
func (l *FIFOList) globalPrefixKey() string {
|
||||
return fmt.Sprintf("%s:%s", globalPrefix, l.key)
|
||||
}
|
||||
|
||||
func (l *FIFOList) kv() redispool.KeyValue {
|
||||
if kvMock != nil {
|
||||
return kvMock
|
||||
}
|
||||
return l._kv
|
||||
}
|
||||
|
||||
@ -11,7 +11,7 @@ import (
|
||||
)
|
||||
|
||||
func Test_FIFOList_All_OK(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
type testcase struct {
|
||||
key string
|
||||
@ -54,7 +54,7 @@ func Test_FIFOList_All_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
r := NewFIFOList(c.key, c.size)
|
||||
r := NewFIFOList(kv, c.key, c.size)
|
||||
t.Run(fmt.Sprintf("size %d with %d entries", c.size, len(c.inserts)), func(t *testing.T) {
|
||||
for _, b := range c.inserts {
|
||||
if err := r.Insert(b); err != nil {
|
||||
@ -80,7 +80,7 @@ func Test_FIFOList_All_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_FIFOList_Slice_OK(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
type testcase struct {
|
||||
key string
|
||||
@ -143,7 +143,7 @@ func Test_FIFOList_Slice_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
r := NewFIFOList(c.key, c.size)
|
||||
r := NewFIFOList(kv, c.key, c.size)
|
||||
t.Run(fmt.Sprintf("size %d with %d entries, [%d,%d]", c.size, len(c.inserts), c.from, c.to), func(t *testing.T) {
|
||||
for _, b := range c.inserts {
|
||||
if err := r.Insert(b); err != nil {
|
||||
@ -162,9 +162,9 @@ func Test_FIFOList_Slice_OK(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_NewFIFOListDynamic(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
maxSize := 3
|
||||
r := NewFIFOListDynamic("a", func() int { return maxSize })
|
||||
r := NewFIFOListDynamic(kv, "a", func() int { return maxSize })
|
||||
for range 10 {
|
||||
err := r.Insert([]byte("a"))
|
||||
if err != nil {
|
||||
@ -198,8 +198,8 @@ func Test_NewFIFOListDynamic(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_FIFOListContextCancellation(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
r := NewFIFOList("a", 3)
|
||||
kv := SetupForTest(t)
|
||||
r := NewFIFOList(kv, "a", 3)
|
||||
err := r.Insert([]byte("a"))
|
||||
if err != nil {
|
||||
t.Errorf("expected no error, got %q", err)
|
||||
@ -213,8 +213,8 @@ func Test_FIFOListContextCancellation(t *testing.T) {
|
||||
}
|
||||
|
||||
func Test_FIFOListIsEmpty(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
r := NewFIFOList("a", 3)
|
||||
kv := SetupForTest(t)
|
||||
r := NewFIFOList(kv, "a", 3)
|
||||
empty, err := r.IsEmpty()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, empty)
|
||||
|
||||
@ -23,15 +23,6 @@ import (
|
||||
const dataVersion = "v2"
|
||||
const dataVersionToDelete = "v1"
|
||||
|
||||
// StoreType for selecting Redis store types.
|
||||
type StoreType int
|
||||
|
||||
const (
|
||||
// Define constants for each store type.
|
||||
CacheStore StoreType = iota // Default Redis cache
|
||||
RedisStore // Specific Redis store
|
||||
)
|
||||
|
||||
// DeleteOldCacheData deletes the rcache data in the given Redis instance
|
||||
// that's prefixed with dataVersionToDelete
|
||||
func DeleteOldCacheData(c redis.Conn) error {
|
||||
@ -42,32 +33,24 @@ func DeleteOldCacheData(c redis.Conn) error {
|
||||
type Cache struct {
|
||||
keyPrefix string
|
||||
ttlSeconds int
|
||||
storeType StoreType // Updated field to use StoreType
|
||||
_kv redispool.KeyValue
|
||||
}
|
||||
|
||||
// New creates a redis backed Cache
|
||||
func New(keyPrefix string) *Cache {
|
||||
func New(kv redispool.KeyValue, keyPrefix string) *Cache {
|
||||
return &Cache{
|
||||
keyPrefix: keyPrefix,
|
||||
storeType: CacheStore,
|
||||
}
|
||||
}
|
||||
|
||||
// New creates a redis backed Cache
|
||||
func NewWithRedisStore(keyPrefix string) *Cache {
|
||||
return &Cache{
|
||||
keyPrefix: keyPrefix,
|
||||
storeType: RedisStore,
|
||||
_kv: kv,
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithTTL creates a redis backed Cache which expires values after
|
||||
// ttlSeconds.
|
||||
func NewWithTTL(keyPrefix string, ttlSeconds int) *Cache {
|
||||
func NewWithTTL(kv redispool.KeyValue, keyPrefix string, ttlSeconds int) *Cache {
|
||||
return &Cache{
|
||||
keyPrefix: keyPrefix,
|
||||
ttlSeconds: ttlSeconds,
|
||||
storeType: CacheStore,
|
||||
_kv: kv,
|
||||
}
|
||||
}
|
||||
|
||||
@ -208,7 +191,7 @@ func (r *Cache) ListAllKeys() []string {
|
||||
|
||||
// FIFOList returns a FIFOList namespaced in r.
|
||||
func (r *Cache) FIFOList(key string, maxSize int) *FIFOList {
|
||||
return NewFIFOList(r.rkeyPrefix()+key, maxSize)
|
||||
return NewFIFOList(r.kv(), r.rkeyPrefix()+key, maxSize)
|
||||
}
|
||||
|
||||
// SetHashItem sets a key in a HASH.
|
||||
@ -258,18 +241,20 @@ type TB interface {
|
||||
Helper()
|
||||
}
|
||||
|
||||
const TestAddr = "127.0.0.1:6379"
|
||||
const testAddr = "127.0.0.1:6379"
|
||||
|
||||
// SetupForTest adjusts the globalPrefix and clears it out. You will have
|
||||
// conflicts if you do `t.Parallel()`
|
||||
func SetupForTest(t testing.TB) {
|
||||
// conflicts if you do `t.Parallel()`. You should always use the returned KeyValue
|
||||
// in tests. Ultimately, that will help us get rid of the global mock, and the conflicts
|
||||
// from running tests in parallel.
|
||||
func SetupForTest(t testing.TB) redispool.KeyValue {
|
||||
t.Helper()
|
||||
|
||||
pool := &redis.Pool{
|
||||
MaxIdle: 3,
|
||||
IdleTimeout: 240 * time.Second,
|
||||
Dial: func() (redis.Conn, error) {
|
||||
return redis.Dial("tcp", TestAddr)
|
||||
return redis.Dial("tcp", testAddr)
|
||||
},
|
||||
TestOnBorrow: func(c redis.Conn, t time.Time) error {
|
||||
_, err := c.Do("PING")
|
||||
@ -277,6 +262,10 @@ func SetupForTest(t testing.TB) {
|
||||
},
|
||||
}
|
||||
kvMock = redispool.RedisKeyValue(pool)
|
||||
t.Cleanup(func() {
|
||||
pool.Close()
|
||||
kvMock = nil
|
||||
})
|
||||
|
||||
globalPrefix = "__test__" + t.Name()
|
||||
c := pool.Get()
|
||||
@ -294,27 +283,21 @@ func SetupForTest(t testing.TB) {
|
||||
if err != nil {
|
||||
log15.Error("Could not clear test prefix", "name", t.Name(), "globalPrefix", globalPrefix, "error", err)
|
||||
}
|
||||
|
||||
return kvMock
|
||||
}
|
||||
|
||||
var kvMock redispool.KeyValue
|
||||
|
||||
func (r *Cache) kv() redispool.KeyValue {
|
||||
// TODO: We should refactor the SetupForTest method to return a KV, not mock
|
||||
// a global thing.
|
||||
// That can only work when all tests pass the redis connection directly to the
|
||||
// tested methods though.
|
||||
if kvMock != nil {
|
||||
return kvMock
|
||||
}
|
||||
switch r.storeType {
|
||||
case RedisStore:
|
||||
return redispool.Store
|
||||
default:
|
||||
return redispool.Cache
|
||||
}
|
||||
}
|
||||
|
||||
func kv() redispool.KeyValue {
|
||||
if kvMock != nil {
|
||||
return kvMock
|
||||
}
|
||||
return redispool.Cache
|
||||
return r._kv
|
||||
}
|
||||
|
||||
var (
|
||||
|
||||
@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCache_namespace(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
type testcase struct {
|
||||
prefix string
|
||||
@ -46,7 +46,7 @@ func TestCache_namespace(t *testing.T) {
|
||||
|
||||
caches := make([]*Cache, len(cases))
|
||||
for i, test := range cases {
|
||||
caches[i] = New(test.prefix)
|
||||
caches[i] = New(kv, test.prefix)
|
||||
for k, v := range test.entries {
|
||||
caches[i].Set(k, []byte(v))
|
||||
}
|
||||
@ -71,9 +71,9 @@ func TestCache_namespace(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCache_simple(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
c := New("some_prefix")
|
||||
c := New(kv, "some_prefix")
|
||||
_, ok := c.Get("a")
|
||||
if ok {
|
||||
t.Fatal("Initial Get should find nothing")
|
||||
@ -96,9 +96,9 @@ func TestCache_simple(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCache_deleteAllKeysWithPrefix(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
c := New("some_prefix")
|
||||
c := New(kv, "some_prefix")
|
||||
var aKeys, bKeys []string
|
||||
var key string
|
||||
for i := range 10 {
|
||||
@ -113,7 +113,7 @@ func TestCache_deleteAllKeysWithPrefix(t *testing.T) {
|
||||
c.Set(key, []byte(strconv.Itoa(i)))
|
||||
}
|
||||
|
||||
pool := kv().Pool()
|
||||
pool := kv.Pool()
|
||||
|
||||
conn := pool.Get()
|
||||
defer conn.Close()
|
||||
@ -145,9 +145,9 @@ func TestCache_deleteAllKeysWithPrefix(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCache_Increase(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
c := NewWithTTL("some_prefix", 1)
|
||||
c := NewWithTTL(kv, "some_prefix", 1)
|
||||
c.Increase("a")
|
||||
|
||||
got, ok := c.Get("a")
|
||||
@ -164,9 +164,9 @@ func TestCache_Increase(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCache_KeyTTL(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
c := NewWithTTL("some_prefix", 1)
|
||||
c := NewWithTTL(kv, "some_prefix", 1)
|
||||
c.Set("a", []byte("b"))
|
||||
|
||||
ttl, ok := c.KeyTTL("a")
|
||||
@ -187,22 +187,11 @@ func TestCache_KeyTTL(t *testing.T) {
|
||||
t.Fatal("KeyTTL after setting invalid ttl should have found nothing")
|
||||
}
|
||||
}
|
||||
func TestNewWithRedisStore(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
|
||||
// Create a Cache instance using NewWithRedisStore
|
||||
c := NewWithRedisStore("test_prefix")
|
||||
|
||||
// Assert that the storeType field is RedisStore, indicating it uses the Redis store
|
||||
if c.storeType != RedisStore {
|
||||
t.Errorf("Expected storeType to be RedisStore, got %v", c.storeType)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCache_SetWithTTL(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
c := NewWithTTL("some_prefix", 60)
|
||||
c := NewWithTTL(kv, "some_prefix", 60)
|
||||
c.SetWithTTL("a", []byte("b"), 30)
|
||||
b, ok := c.Get("a")
|
||||
if !ok {
|
||||
@ -233,10 +222,10 @@ func TestCache_SetWithTTL(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCache_Hashes(t *testing.T) {
|
||||
SetupForTest(t)
|
||||
kv := SetupForTest(t)
|
||||
|
||||
// Test SetHashItem
|
||||
c := NewWithTTL("simple_hash", 1)
|
||||
c := NewWithTTL(kv, "simple_hash", 1)
|
||||
err := c.SetHashItem("key", "hashKey1", "value1")
|
||||
assert.NoError(t, err)
|
||||
err = c.SetHashItem("key", "hashKey2", "value2")
|
||||
|
||||
@ -12,6 +12,7 @@ go_library(
|
||||
deps = [
|
||||
"//internal/api",
|
||||
"//internal/rcache",
|
||||
"//internal/redispool",
|
||||
"@com_github_sourcegraph_log//:log",
|
||||
],
|
||||
)
|
||||
|
||||
@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/sourcegraph/sourcegraph/internal/api"
|
||||
"github.com/sourcegraph/sourcegraph/internal/rcache"
|
||||
"github.com/sourcegraph/sourcegraph/internal/redispool"
|
||||
)
|
||||
|
||||
// KeyPrefix is the prefix that will be used to initialise the redis database with.
|
||||
@ -196,14 +197,14 @@ func (rf *RecordingCommandFactory) Disable() {
|
||||
|
||||
// Command returns a new RecordingCommand with the ShouldRecordFunc already set.
|
||||
func (rf *RecordingCommandFactory) Command(ctx context.Context, logger log.Logger, repoName, cmdName string, args ...string) *RecordingCmd {
|
||||
store := rcache.NewFIFOList(GetFIFOListKey(repoName), rf.maxSize)
|
||||
store := rcache.NewFIFOList(redispool.Cache, GetFIFOListKey(repoName), rf.maxSize)
|
||||
return RecordingCommand(ctx, logger, rf.shouldRecord, store, cmdName, args...)
|
||||
}
|
||||
|
||||
// Wrap constructs a new RecordingCommand based of an existing os/exec.Cmd, while also setting up the ShouldRecordFunc
|
||||
// currently set in the factory.
|
||||
func (rf *RecordingCommandFactory) Wrap(ctx context.Context, logger log.Logger, cmd *exec.Cmd) *RecordingCmd {
|
||||
store := rcache.NewFIFOList(KeyPrefix, rf.maxSize)
|
||||
store := rcache.NewFIFOList(redispool.Cache, KeyPrefix, rf.maxSize)
|
||||
return RecordingWrap(ctx, logger, rf.shouldRecord, store, cmd)
|
||||
}
|
||||
|
||||
@ -211,7 +212,7 @@ func (rf *RecordingCommandFactory) Wrap(ctx context.Context, logger log.Logger,
|
||||
// os/exec.Cmd, while also setting up the ShouldRecordFunc currently set in the
|
||||
// factory. It uses repoName to create a new Redis list using it.
|
||||
func (rf *RecordingCommandFactory) WrapWithRepoName(ctx context.Context, logger log.Logger, repoName api.RepoName, cmd *exec.Cmd) *RecordingCmd {
|
||||
store := rcache.NewFIFOList(GetFIFOListKey(string(repoName)), rf.maxSize)
|
||||
store := rcache.NewFIFOList(redispool.Cache, GetFIFOListKey(string(repoName)), rf.maxSize)
|
||||
return RecordingWrap(ctx, logger, rf.shouldRecord, store, cmd)
|
||||
}
|
||||
|
||||
|
||||
@ -90,8 +90,8 @@ func getRecordingAt(t *testing.T, store *rcache.FIFOList, idx int) *wrexec.Recor
|
||||
}
|
||||
|
||||
func TestRecordingCmd(t *testing.T) {
|
||||
rcache.SetupForTest(t)
|
||||
store := rcache.NewFIFOList(wrexec.KeyPrefix, 100)
|
||||
kv := rcache.SetupForTest(t)
|
||||
store := rcache.NewFIFOList(kv, wrexec.KeyPrefix, 100)
|
||||
var recordAlways wrexec.ShouldRecordFunc = func(ctx context.Context, c *osexec.Cmd) bool {
|
||||
return true
|
||||
}
|
||||
@ -120,7 +120,7 @@ func TestRecordingCmd(t *testing.T) {
|
||||
t.Fatalf("failed to execute recorded command: %v", err)
|
||||
}
|
||||
|
||||
readingStore := rcache.NewFIFOList(wrexec.KeyPrefix, 100)
|
||||
readingStore := rcache.NewFIFOList(kv, wrexec.KeyPrefix, 100)
|
||||
recording := getFirst(t, readingStore)
|
||||
if valid, err := isValidRecording(t, cmd, recording); !valid {
|
||||
t.Error(err)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user