mirror of
https://github.com/sourcegraph/sourcegraph.git
synced 2026-02-06 14:11:44 +00:00
all: run gofmt -s -w from 1.19 (#41629)
gofmt in go1.19 does a lot of reformating of godoc strings, mostly to make them more consistent around lists. Test Plan: CI
This commit is contained in:
parent
4e103ac59d
commit
27569d1fc7
@ -31,20 +31,20 @@ type GetAndSaveUserOp struct {
|
||||
// the necessary updates to the DB, and returns the user ID after the updates have been applied.
|
||||
//
|
||||
// At a high level, it does the following:
|
||||
// 1. Determine the identity of the user by applying the following rules in order:
|
||||
// a. If ctx contains an authenticated Actor, the Actor's identity is the user identity.
|
||||
// b. Look up the user by external account ID.
|
||||
// c. If the email specified in op.UserProps is verified, Look up the user by verified email.
|
||||
// If op.LookUpByUsername is true, look up by username instead of verified email.
|
||||
// (Note: most clients should look up by email, as username is typically insecure.)
|
||||
// d. If op.CreateIfNotExist is true, attempt to create a new user with the properties
|
||||
// specified in op.UserProps. This may fail if the desired username is already taken.
|
||||
// e. If a new user is successfully created, attempt to grant pending permissions.
|
||||
// 2. Ensure that the user is associated with the external account information. This means
|
||||
// creating the external account if it does not already exist or updating it if it
|
||||
// already does.
|
||||
// 3. Update any user props that have changed.
|
||||
// 4. Return the user ID.
|
||||
// 1. Determine the identity of the user by applying the following rules in order:
|
||||
// a. If ctx contains an authenticated Actor, the Actor's identity is the user identity.
|
||||
// b. Look up the user by external account ID.
|
||||
// c. If the email specified in op.UserProps is verified, Look up the user by verified email.
|
||||
// If op.LookUpByUsername is true, look up by username instead of verified email.
|
||||
// (Note: most clients should look up by email, as username is typically insecure.)
|
||||
// d. If op.CreateIfNotExist is true, attempt to create a new user with the properties
|
||||
// specified in op.UserProps. This may fail if the desired username is already taken.
|
||||
// e. If a new user is successfully created, attempt to grant pending permissions.
|
||||
// 2. Ensure that the user is associated with the external account information. This means
|
||||
// creating the external account if it does not already exist or updating it if it
|
||||
// already does.
|
||||
// 3. Update any user props that have changed.
|
||||
// 4. Return the user ID.
|
||||
//
|
||||
// 🚨 SECURITY: It is the caller's responsibility to ensure the veracity of the information that
|
||||
// op contains (e.g., by receiving it from the appropriate authentication mechanism). It must
|
||||
|
||||
@ -15,8 +15,9 @@ var ErrNotAuthenticated = errors.New("not authenticated")
|
||||
// CheckOrgAccessOrSiteAdmin returns an error if:
|
||||
// (1) if we are on Cloud instance and the user is not a member of the organization
|
||||
// (2) if we are NOT on Cloud and
|
||||
// (a) the user is not a member of the organization
|
||||
// (b) the user is not a site admin
|
||||
//
|
||||
// (a) the user is not a member of the organization
|
||||
// (b) the user is not a site admin
|
||||
//
|
||||
// It is used when an action on an org can only be performed by the
|
||||
// organization's members, (or site-admins - not on Cloud).
|
||||
|
||||
@ -331,8 +331,7 @@ var blocklistedPrometheusTypeNames = map[string]struct{}{
|
||||
// not worth tracking. You can find a complete list of the ones Prometheus is
|
||||
// currently tracking via:
|
||||
//
|
||||
// sum by (type)(src_graphql_field_seconds_count)
|
||||
//
|
||||
// sum by (type)(src_graphql_field_seconds_count)
|
||||
func prometheusTypeName(typeName string) string {
|
||||
if _, ok := blocklistedPrometheusTypeNames[typeName]; ok {
|
||||
return "other"
|
||||
|
||||
@ -5,49 +5,61 @@ import (
|
||||
)
|
||||
|
||||
// mainSchema is the main raw graqhql schema.
|
||||
//
|
||||
//go:embed schema.graphql
|
||||
var mainSchema string
|
||||
|
||||
// batchesSchema is the Batch Changes raw graqhql schema.
|
||||
//
|
||||
//go:embed batches.graphql
|
||||
var batchesSchema string
|
||||
|
||||
// codeIntelSchema is the Code Intel raw graqhql schema.
|
||||
//
|
||||
//go:embed codeintel.graphql
|
||||
var codeIntelSchema string
|
||||
|
||||
// dotcomSchema is the Dotcom schema extension raw graqhql schema.
|
||||
//
|
||||
//go:embed dotcom.graphql
|
||||
var dotcomSchema string
|
||||
|
||||
// licenseSchema is the Licensing raw graqhql schema.
|
||||
//
|
||||
//go:embed license.graphql
|
||||
var licenseSchema string
|
||||
|
||||
// codeMonitorsSchema is the Code Monitoring raw graqhql schema.
|
||||
//
|
||||
//go:embed code_monitors.graphql
|
||||
var codeMonitorsSchema string
|
||||
|
||||
// insightsSchema is the Code Insights raw graqhql schema.
|
||||
//
|
||||
//go:embed insights.graphql
|
||||
var insightsSchema string
|
||||
|
||||
// authzSchema is the Authz raw graqhql schema.
|
||||
//
|
||||
//go:embed authz.graphql
|
||||
var authzSchema string
|
||||
|
||||
// computeSchema is an experimental graphql endpoint for computing values from search results.
|
||||
//
|
||||
//go:embed compute.graphql
|
||||
var computeSchema string
|
||||
|
||||
// searchContextsSchema is the Search Contexts raw graqhql schema.
|
||||
//
|
||||
//go:embed search_contexts.graphql
|
||||
var searchContextsSchema string
|
||||
|
||||
// notebooksSchema is the Notebooks raw graqhql schema.
|
||||
//
|
||||
//go:embed notebooks.graphql
|
||||
var notebooksSchema string
|
||||
|
||||
// insightsAggregationsSchema is the Code Insights Aggregations raw graqhql schema.
|
||||
//
|
||||
//go:embed insights_aggregations.graphql
|
||||
var insightsAggregationsSchema string
|
||||
|
||||
@ -1,21 +1,20 @@
|
||||
// Package ui handles server-side rendering of the Sourcegraph web app.
|
||||
//
|
||||
// Development
|
||||
// # Development
|
||||
//
|
||||
// To develop, simply update the template files in cmd/frontend/internal/app/ui/...
|
||||
// and reload the page (the templates will be automatically reloaded).
|
||||
//
|
||||
// Testing the error page
|
||||
// # Testing the error page
|
||||
//
|
||||
// Testing out the layout/styling of the error page that is used to handle
|
||||
// internal server errors, 404s, etc. is very easy by visiting:
|
||||
//
|
||||
// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500
|
||||
// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500
|
||||
//
|
||||
// The parameters are as follows:
|
||||
//
|
||||
// nodebug=true -- hides error messages (which is ALWAYS the case in production)
|
||||
// error=theerror -- controls the error message text
|
||||
// status=500 -- controls the status code
|
||||
//
|
||||
// nodebug=true -- hides error messages (which is ALWAYS the case in production)
|
||||
// error=theerror -- controls the error message text
|
||||
// status=500 -- controls the status code
|
||||
package ui
|
||||
|
||||
@ -106,6 +106,7 @@ type serveErrorHandler func(w http.ResponseWriter, r *http.Request, db database.
|
||||
// mockNewCommon is used in tests to mock newCommon (duh!).
|
||||
//
|
||||
// Ensure that the mock is reset at the end of every test by adding a call like the following:
|
||||
//
|
||||
// defer func() {
|
||||
// mockNewCommon = nil
|
||||
// }()
|
||||
@ -116,13 +117,13 @@ var mockNewCommon func(w http.ResponseWriter, r *http.Request, title string, ser
|
||||
// In the event of the repository having been renamed, the request is handled
|
||||
// by newCommon and nil, nil is returned. Basic usage looks like:
|
||||
//
|
||||
// common, err := newCommon(w, r, noIndex, serveError)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if common == nil {
|
||||
// return nil // request was handled
|
||||
// }
|
||||
// common, err := newCommon(w, r, noIndex, serveError)
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if common == nil {
|
||||
// return nil // request was handled
|
||||
// }
|
||||
//
|
||||
// In the case of a repository that is cloning, a Common data structure is
|
||||
// returned but it has an incomplete RevSpec.
|
||||
|
||||
@ -378,9 +378,8 @@ func initRouter(db database.DB, router *mux.Router, codeIntelResolver graphqlbac
|
||||
// The scheme, host, and path in the specified url override ones in the incoming
|
||||
// request. For example:
|
||||
//
|
||||
// staticRedirectHandler("http://google.com") serving "https://sourcegraph.com/foobar?q=foo" -> "http://google.com/foobar?q=foo"
|
||||
// staticRedirectHandler("/foo") serving "https://sourcegraph.com/bar?q=foo" -> "https://sourcegraph.com/foo?q=foo"
|
||||
//
|
||||
// staticRedirectHandler("http://google.com") serving "https://sourcegraph.com/foobar?q=foo" -> "http://google.com/foobar?q=foo"
|
||||
// staticRedirectHandler("/foo") serving "https://sourcegraph.com/bar?q=foo" -> "https://sourcegraph.com/foo?q=foo"
|
||||
func staticRedirectHandler(u string, code int) http.Handler {
|
||||
target, err := url.Parse(u)
|
||||
if err != nil {
|
||||
@ -420,9 +419,8 @@ func limitString(s string, n int, ellipsis bool) string {
|
||||
// Clients that wish to return their own HTTP status code should use this from
|
||||
// their handler:
|
||||
//
|
||||
// serveError(w, r, err, http.MyStatusCode)
|
||||
// return nil
|
||||
//
|
||||
// serveError(w, r, err, http.MyStatusCode)
|
||||
// return nil
|
||||
func handler(db database.DB, f handlerFunc) http.Handler {
|
||||
h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
@ -544,7 +542,7 @@ func serveErrorNoDebug(w http.ResponseWriter, r *http.Request, db database.DB, e
|
||||
// serveErrorTest makes it easy to test styling/layout of the error template by
|
||||
// visiting:
|
||||
//
|
||||
// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500
|
||||
// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500
|
||||
//
|
||||
// The `nodebug=true` parameter hides error messages (which is ALWAYS the case
|
||||
// in production), `error` controls the error message text, and status controls
|
||||
|
||||
@ -36,9 +36,9 @@ var goImportMetaTagTemplate = template.Must(template.New("").Parse(`<html><head>
|
||||
//
|
||||
// It implements the following mapping:
|
||||
//
|
||||
// 1. If the username (first path element) is "sourcegraph", consider it to be a vanity
|
||||
// import path pointing to github.com/sourcegraph/<repo> as the clone URL.
|
||||
// 2. All other requests are served with 404 Not Found.
|
||||
// 1. If the username (first path element) is "sourcegraph", consider it to be a vanity
|
||||
// import path pointing to github.com/sourcegraph/<repo> as the clone URL.
|
||||
// 2. All other requests are served with 404 Not Found.
|
||||
//
|
||||
// 🚨 SECURITY: This handler is served to all clients, even on private servers to clients who have
|
||||
// not authenticated. It must not reveal any sensitive information.
|
||||
|
||||
@ -304,7 +304,7 @@ func (h *HighlightedCode) LinesForRanges(ranges []LineRange) ([][]string, error)
|
||||
return lineRanges, nil
|
||||
}
|
||||
|
||||
/// identifyError returns true + the problem code if err matches a known error.
|
||||
// identifyError returns true + the problem code if err matches a known error.
|
||||
func identifyError(err error) (bool, string) {
|
||||
var problem string
|
||||
if errors.Is(err, gosyntect.ErrRequestTooLarge) {
|
||||
@ -609,15 +609,15 @@ func CodeAsLines(ctx context.Context, p Params) ([]template.HTML, bool, error) {
|
||||
// normalizeFilepath ensures that the filepath p has a lowercase extension, i.e. it applies the
|
||||
// following transformations:
|
||||
//
|
||||
// a/b/c/FOO.TXT → a/b/c/FOO.txt
|
||||
// FOO.Sh → FOO.sh
|
||||
// a/b/c/FOO.TXT → a/b/c/FOO.txt
|
||||
// FOO.Sh → FOO.sh
|
||||
//
|
||||
// The following are left unmodified, as they already have lowercase extensions:
|
||||
//
|
||||
// a/b/c/FOO.txt
|
||||
// a/b/c/Makefile
|
||||
// Makefile.am
|
||||
// FOO.txt
|
||||
// a/b/c/FOO.txt
|
||||
// a/b/c/Makefile
|
||||
// Makefile.am
|
||||
// FOO.txt
|
||||
//
|
||||
// It expects the filepath uses forward slashes always.
|
||||
func normalizeFilepath(p string) string {
|
||||
|
||||
@ -19,10 +19,10 @@ import (
|
||||
// handler implements a http.Handler that wraps a VersionCache to provide two
|
||||
// endpoints:
|
||||
//
|
||||
// - GET /.*: this looks up the given branch and returns the latest
|
||||
// version, if any.
|
||||
// - POST /webhooks: this triggers an update of the version cache if given a
|
||||
// valid GitHub webhook.
|
||||
// - GET /.*: this looks up the given branch and returns the latest
|
||||
// version, if any.
|
||||
// - POST /webhooks: this triggers an update of the version cache if given a
|
||||
// valid GitHub webhook.
|
||||
//
|
||||
// The routing relies on a previous handler having injected a gorilla.Mux
|
||||
// variable called "rest" that includes the path to route.
|
||||
|
||||
@ -308,10 +308,10 @@ func CookieMiddleware(logger log.Logger, db database.DB, next http.Handler) http
|
||||
// CookieMiddlewareWithCSRFSafety is a middleware that authenticates HTTP requests using the
|
||||
// provided cookie (if any), *only if* one of the following is true.
|
||||
//
|
||||
// - The request originates from a trusted origin (the same origin, browser extension origin, or one
|
||||
// in the site configuration corsOrigin allow list.)
|
||||
// - The request has the special X-Requested-With header present, which is only possible to send in
|
||||
// browsers if the request passed the CORS preflight request (see the handleCORSRequest function.)
|
||||
// - The request originates from a trusted origin (the same origin, browser extension origin, or one
|
||||
// in the site configuration corsOrigin allow list.)
|
||||
// - The request has the special X-Requested-With header present, which is only possible to send in
|
||||
// browsers if the request passed the CORS preflight request (see the handleCORSRequest function.)
|
||||
//
|
||||
// If one of the above are not true, the request is still allowed to proceed but will be
|
||||
// unauthenticated unless some other authentication is provided, such as an access token.
|
||||
|
||||
@ -1098,12 +1098,12 @@ repository should be recloned.`
|
||||
)
|
||||
|
||||
// writeSGMLog writes a log file with the format
|
||||
// <header>
|
||||
//
|
||||
// <sgmLogPrefix>=<int>
|
||||
// <header>
|
||||
//
|
||||
// <error message>
|
||||
// <sgmLogPrefix>=<int>
|
||||
//
|
||||
// <error message>
|
||||
func writeSGMLog(dir GitDir, m []byte) error {
|
||||
return os.WriteFile(
|
||||
dir.Path(sgmLog),
|
||||
|
||||
@ -2542,7 +2542,7 @@ var (
|
||||
// tag called HEAD (case insensitive), most commands will output a warning
|
||||
// from git:
|
||||
//
|
||||
// warning: refname 'HEAD' is ambiguous.
|
||||
// warning: refname 'HEAD' is ambiguous.
|
||||
//
|
||||
// Instead we just remove this ref.
|
||||
func removeBadRefs(ctx context.Context, dir GitDir) {
|
||||
|
||||
@ -30,7 +30,7 @@ import (
|
||||
// GitDir is an absolute path to a GIT_DIR.
|
||||
// They will all follow the form:
|
||||
//
|
||||
// ${s.ReposDir}/${name}/.git
|
||||
// ${s.ReposDir}/${name}/.git
|
||||
type GitDir string
|
||||
|
||||
// Path is a helper which returns filepath.Join(dir, elem...)
|
||||
|
||||
@ -2,13 +2,13 @@
|
||||
// a specific commit.
|
||||
//
|
||||
// Architecture Notes:
|
||||
// * Archive is fetched from gitserver
|
||||
// * Simple HTTP API exposed
|
||||
// * Currently no concept of authorization
|
||||
// * On disk cache of fetched archives to reduce load on gitserver
|
||||
// * Run search on archive. Rely on OS file buffers
|
||||
// * Simple to scale up since stateless
|
||||
// * Use ingress with affinity to increase local cache hit ratio
|
||||
// - Archive is fetched from gitserver
|
||||
// - Simple HTTP API exposed
|
||||
// - Currently no concept of authorization
|
||||
// - On disk cache of fetched archives to reduce load on gitserver
|
||||
// - Run search on archive. Rely on OS file buffers
|
||||
// - Simple to scale up since stateless
|
||||
// - Use ingress with affinity to increase local cache hit ratio
|
||||
package search
|
||||
|
||||
import (
|
||||
|
||||
@ -46,10 +46,10 @@ const maxFileSize = 2 << 20 // 2MB; match https://sourcegraph.com/search?q=repo:
|
||||
//
|
||||
// We use an LRU to do cache eviction:
|
||||
//
|
||||
// * When to evict is based on the total size of *.zip on disk.
|
||||
// * What to evict uses the LRU algorithm.
|
||||
// * We touch files when opening them, so can do LRU based on file
|
||||
// modification times.
|
||||
// - When to evict is based on the total size of *.zip on disk.
|
||||
// - What to evict uses the LRU algorithm.
|
||||
// - We touch files when opening them, so can do LRU based on file
|
||||
// modification times.
|
||||
//
|
||||
// Note: The store fetches tarballs but stores zips. We want to be able to
|
||||
// filter which files we cache, so we need a format that supports streaming
|
||||
|
||||
@ -28,8 +28,9 @@ type Breadcrumbs []Breadcrumb
|
||||
|
||||
// Prints breadcrumbs like this:
|
||||
//
|
||||
// v some breadcrumb
|
||||
// vvv other breadcrumb
|
||||
// v some breadcrumb
|
||||
// vvv other breadcrumb
|
||||
//
|
||||
// 78 | func f(f Foo) {
|
||||
func (bs *Breadcrumbs) pretty(w *strings.Builder, readFile ReadFileFunc) {
|
||||
// First collect all the breadcrumbs in a map (path -> line -> breadcrumb) for easier printing.
|
||||
|
||||
@ -151,8 +151,9 @@ func (s *Server) handleGetBuild(w http.ResponseWriter, req *http.Request) {
|
||||
// handleEvent handles an event received from the http listener. A event is valid when:
|
||||
// - Has the correct headers from Buildkite
|
||||
// - On of the following events
|
||||
// * job.finished
|
||||
// * build.finished
|
||||
// - job.finished
|
||||
// - build.finished
|
||||
//
|
||||
// - Has valid JSON
|
||||
// Note that if we received an unwanted event ie. the event is not "job.finished" or "build.finished" we respond with a 200 OK regardless.
|
||||
// Once all the conditions are met, the event is processed in a go routine with `processEvent`
|
||||
|
||||
@ -256,8 +256,8 @@ func (m *RunTypeMatcher) IsBranchPrefixMatcher() bool {
|
||||
// ExtractBranchArgument extracts the second segment, delimited by '/', of the branch as
|
||||
// an argument, for example:
|
||||
//
|
||||
// prefix/{argument}
|
||||
// prefix/{argument}/something-else
|
||||
// prefix/{argument}
|
||||
// prefix/{argument}/something-else
|
||||
//
|
||||
// If BranchArgumentRequired, an error is returned if no argument is found.
|
||||
//
|
||||
|
||||
@ -20,11 +20,11 @@ import (
|
||||
// given repo, as well as the status of each given upload. When there is a change of
|
||||
// state for a repository, it is printed. The state changes that can occur are:
|
||||
//
|
||||
// - An upload fails to process (returns an error)
|
||||
// - An upload completes processing
|
||||
// - The last upload for a repository completes processing, but the
|
||||
// containing repo has a stale commit graph
|
||||
// - A repository with no pending uploads has a fresh commit graph
|
||||
// - An upload fails to process (returns an error)
|
||||
// - An upload completes processing
|
||||
// - The last upload for a repository completes processing, but the
|
||||
// containing repo has a stale commit graph
|
||||
// - A repository with no pending uploads has a fresh commit graph
|
||||
func monitor(ctx context.Context, repoNames []string, uploads []uploadMeta) error {
|
||||
var oldState map[string]repoState
|
||||
waitMessageDisplayed := make(map[string]struct{}, len(repoNames))
|
||||
|
||||
@ -14,10 +14,10 @@ const spanCategoryKey attribute.Key = "sg.span_category"
|
||||
|
||||
// StartSpan starts an OpenTelemetry span from context. Example:
|
||||
//
|
||||
// ctx, span := analytics.StartSpan(ctx, spanName,
|
||||
// ctx, span := analytics.StartSpan(ctx, spanName,
|
||||
// trace.WithAttributes(...)
|
||||
// defer span.End()
|
||||
// // ... do your things
|
||||
// // ... do your things
|
||||
//
|
||||
// Span provides convenience functions for setting the status of the span.
|
||||
func StartSpan(ctx context.Context, spanName string, category string, opts ...trace.SpanStartOption) (context.Context, *Span) {
|
||||
|
||||
@ -513,8 +513,7 @@ func (i *imageRepository) checkLegacy(rawImage string) bool {
|
||||
|
||||
// Effectively the same as:
|
||||
//
|
||||
// $ curl -H "Authorization: Bearer $token" https://index.docker.io/v2/sourcegraph/server/tags/list
|
||||
//
|
||||
// $ curl -H "Authorization: Bearer $token" https://index.docker.io/v2/sourcegraph/server/tags/list
|
||||
func (i *imageRepository) fetchDigest(tag string) (digest.Digest, error) {
|
||||
req, err := http.NewRequest("GET", fmt.Sprintf("https://index.docker.io/v2/%s/manifests/%s", i.name, tag), nil)
|
||||
if err != nil {
|
||||
@ -547,8 +546,7 @@ const dockerImageTagsURL = "https://index.docker.io/v2/%s/tags/list"
|
||||
|
||||
// Effectively the same as:
|
||||
//
|
||||
// $ export token=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:sourcegraph/server:pull" | jq -r .token)
|
||||
//
|
||||
// $ export token=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:sourcegraph/server:pull" | jq -r .token)
|
||||
func (i *imageRepository) fetchAllTags() ([]string, error) {
|
||||
if !i.isDockerRegistry {
|
||||
return nil, ErrUnsupportedRegistry
|
||||
|
||||
@ -489,8 +489,8 @@ var privilegedQueryPattern = lazyregexp.New(`(CREATE|COMMENT ON) EXTENSION .+;\n
|
||||
//
|
||||
// Currently, we consider the following query patterns as privileged from pg_dump output:
|
||||
//
|
||||
// - CREATE EXTENSION ...
|
||||
// - COMMENT ON EXTENSION ...
|
||||
// - CREATE EXTENSION ...
|
||||
// - COMMENT ON EXTENSION ...
|
||||
func splitPrivilegedMigrations(content string) (privilegedMigration string, unprivilegedMigration string) {
|
||||
var privilegedQueries []string
|
||||
unprivileged := privilegedQueryPattern.ReplaceAllStringFunc(content, func(s string) string {
|
||||
|
||||
@ -114,15 +114,14 @@ func runCheck(name string, check check.CheckAction[*repo.State]) *linter {
|
||||
// yarnInstallFilter is a LineMap that filters out all the warning junk that yarn install
|
||||
// emits that seem inconsequential, for example:
|
||||
//
|
||||
// warning "@storybook/addon-storyshots > react-test-renderer@16.14.0" has incorrect peer dependency "react@^16.14.0".
|
||||
// warning "@storybook/addon-storyshots > @storybook/core > @storybook/core-server > @storybook/builder-webpack4 > webpack-filter-warnings-plugin@1.2.1" has incorrect peer dependency "webpack@^2.0.0 || ^3.0.0 || ^4.0.0".
|
||||
// warning " > @storybook/react@6.5.9" has unmet peer dependency "require-from-string@^2.0.2".
|
||||
// warning "@storybook/react > react-element-to-jsx-string@14.3.4" has incorrect peer dependency "react@^0.14.8 || ^15.0.1 || ^16.0.0 || ^17.0.1".
|
||||
// warning " > @testing-library/react-hooks@8.0.0" has incorrect peer dependency "react@^16.9.0 || ^17.0.0".
|
||||
// warning "storybook-addon-designs > @figspec/react@1.0.0" has incorrect peer dependency "react@^16.14.0 || ^17.0.0".
|
||||
// warning Workspaces can only be enabled in private projects.
|
||||
// warning Workspaces can only be enabled in private projects.
|
||||
//
|
||||
// warning "@storybook/addon-storyshots > react-test-renderer@16.14.0" has incorrect peer dependency "react@^16.14.0".
|
||||
// warning "@storybook/addon-storyshots > @storybook/core > @storybook/core-server > @storybook/builder-webpack4 > webpack-filter-warnings-plugin@1.2.1" has incorrect peer dependency "webpack@^2.0.0 || ^3.0.0 || ^4.0.0".
|
||||
// warning " > @storybook/react@6.5.9" has unmet peer dependency "require-from-string@^2.0.2".
|
||||
// warning "@storybook/react > react-element-to-jsx-string@14.3.4" has incorrect peer dependency "react@^0.14.8 || ^15.0.1 || ^16.0.0 || ^17.0.1".
|
||||
// warning " > @testing-library/react-hooks@8.0.0" has incorrect peer dependency "react@^16.9.0 || ^17.0.0".
|
||||
// warning "storybook-addon-designs > @figspec/react@1.0.0" has incorrect peer dependency "react@^16.14.0 || ^17.0.0".
|
||||
// warning Workspaces can only be enabled in private projects.
|
||||
// warning Workspaces can only be enabled in private projects.
|
||||
func yarnInstallFilter() run.LineMap {
|
||||
return func(ctx context.Context, line []byte, dst io.Writer) (int, error) {
|
||||
// We can't seem to do a simple prefix check, so let's just do something lazy for
|
||||
|
||||
@ -22,24 +22,24 @@ import (
|
||||
// The following is a minimal example of decorating the base client, making the
|
||||
// actual logic of the decorated client extremely lean:
|
||||
//
|
||||
// type SprocketClient struct {
|
||||
// *httpcli.BaseClient
|
||||
// type SprocketClient struct {
|
||||
// *httpcli.BaseClient
|
||||
//
|
||||
// baseURL *url.URL
|
||||
// }
|
||||
// baseURL *url.URL
|
||||
// }
|
||||
//
|
||||
// func (c *SprocketClient) Fabricate(ctx context.Context(), spec SprocketSpec) (Sprocket, error) {
|
||||
// url := c.baseURL.ResolveReference(&url.URL{Path: "/new"})
|
||||
// func (c *SprocketClient) Fabricate(ctx context.Context(), spec SprocketSpec) (Sprocket, error) {
|
||||
// url := c.baseURL.ResolveReference(&url.URL{Path: "/new"})
|
||||
//
|
||||
// req, err := httpcli.MakeJSONRequest("POST", url.String(), spec)
|
||||
// if err != nil {
|
||||
// return Sprocket{}, err
|
||||
// }
|
||||
// req, err := httpcli.MakeJSONRequest("POST", url.String(), spec)
|
||||
// if err != nil {
|
||||
// return Sprocket{}, err
|
||||
// }
|
||||
//
|
||||
// var s Sprocket
|
||||
// err := c.client.DoAndDecode(ctx, req, &s)
|
||||
// return s, err
|
||||
// }
|
||||
// var s Sprocket
|
||||
// err := c.client.DoAndDecode(ctx, req, &s)
|
||||
// return s, err
|
||||
// }
|
||||
type BaseClient struct {
|
||||
httpClient *http.Client
|
||||
options BaseClientOptions
|
||||
|
||||
@ -249,11 +249,11 @@ func buildAuthURLRedirect(p *provider, relayState relayState) (string, error) {
|
||||
// login flows.
|
||||
//
|
||||
// SAML overloads the term "RelayState".
|
||||
// * In the SP-initiated login flow, it is an opaque value originated from the SP and reflected
|
||||
// back in the AuthnResponse. The Sourcegraph SP uses the base64-encoded JSON of this struct as
|
||||
// the RelayState.
|
||||
// * In the IdP-initiated login flow, the RelayState can be any arbitrary hint, but in practice
|
||||
// is the desired post-login redirect URL in plain text.
|
||||
// - In the SP-initiated login flow, it is an opaque value originated from the SP and reflected
|
||||
// back in the AuthnResponse. The Sourcegraph SP uses the base64-encoded JSON of this struct as
|
||||
// the RelayState.
|
||||
// - In the IdP-initiated login flow, the RelayState can be any arbitrary hint, but in practice
|
||||
// is the desired post-login redirect URL in plain text.
|
||||
type relayState struct {
|
||||
ProviderID string `json:"k"`
|
||||
ReturnToURL string `json:"r"`
|
||||
|
||||
@ -35,9 +35,10 @@ type repositoryConnectionResolver struct {
|
||||
// is the site admin because this method computes data from all available information in
|
||||
// the database.
|
||||
// This function takes returns a pagination of the repo IDs
|
||||
// r.ids - the full slice of sorted repo IDs
|
||||
// r.after - (optional) the repo ID to start the paging after (does not include the after ID itself)
|
||||
// r.first - the # of repo IDs to return
|
||||
//
|
||||
// r.ids - the full slice of sorted repo IDs
|
||||
// r.after - (optional) the repo ID to start the paging after (does not include the after ID itself)
|
||||
// r.first - the # of repo IDs to return
|
||||
func (r *repositoryConnectionResolver) compute(ctx context.Context) ([]*types.Repo, *graphqlutil.PageInfo, error) {
|
||||
r.once.Do(func() {
|
||||
var idSubset []int32
|
||||
|
||||
@ -34,9 +34,10 @@ type userConnectionResolver struct {
|
||||
// is the site admin because this method computes data from all available information in
|
||||
// the database.
|
||||
// This function takes returns a pagination of the user IDs
|
||||
// r.ids - the full slice of sorted user IDs
|
||||
// r.after - (optional) the user ID to start the paging after (does not include the after ID itself)
|
||||
// r.first - the # of user IDs to return
|
||||
//
|
||||
// r.ids - the full slice of sorted user IDs
|
||||
// r.after - (optional) the user ID to start the paging after (does not include the after ID itself)
|
||||
// r.first - the # of user IDs to return
|
||||
func (r *userConnectionResolver) compute(ctx context.Context) ([]*types.User, *graphqlutil.PageInfo, error) {
|
||||
r.once.Do(func() {
|
||||
var idSubset []int32
|
||||
|
||||
@ -690,9 +690,7 @@ func (r *Resolver) ownerForID64(ctx context.Context, monitorID int64) (graphql.I
|
||||
return graphqlbackend.MarshalUserID(monitor.UserID), nil
|
||||
}
|
||||
|
||||
//
|
||||
// MonitorConnection
|
||||
//
|
||||
type monitorConnection struct {
|
||||
*Resolver
|
||||
monitors []graphqlbackend.MonitorResolver
|
||||
@ -756,9 +754,7 @@ func unmarshalAfter(after *string) (*int, error) {
|
||||
return &a, err
|
||||
}
|
||||
|
||||
//
|
||||
// Monitor
|
||||
//
|
||||
type monitor struct {
|
||||
*Resolver
|
||||
*edb.Monitor
|
||||
@ -865,9 +861,7 @@ func (r *Resolver) actionConnectionResolverWithTriggerID(ctx context.Context, tr
|
||||
return &monitorActionConnection{actions: actions, totalCount: int32(totalCount)}, nil
|
||||
}
|
||||
|
||||
//
|
||||
// MonitorTrigger <<UNION>>
|
||||
//
|
||||
type monitorTrigger struct {
|
||||
query graphqlbackend.MonitorQueryResolver
|
||||
}
|
||||
@ -876,9 +870,7 @@ func (t *monitorTrigger) ToMonitorQuery() (graphqlbackend.MonitorQueryResolver,
|
||||
return t.query, t.query != nil
|
||||
}
|
||||
|
||||
//
|
||||
// Query
|
||||
//
|
||||
type monitorQuery struct {
|
||||
*Resolver
|
||||
*edb.QueryTrigger
|
||||
@ -920,9 +912,7 @@ func (q *monitorQuery) Events(ctx context.Context, args *graphqlbackend.ListEven
|
||||
return &monitorTriggerEventConnection{Resolver: q.Resolver, events: events, totalCount: totalCount}, nil
|
||||
}
|
||||
|
||||
//
|
||||
// MonitorTriggerEventConnection
|
||||
//
|
||||
type monitorTriggerEventConnection struct {
|
||||
*Resolver
|
||||
events []graphqlbackend.MonitorTriggerEventResolver
|
||||
@ -944,9 +934,7 @@ func (a *monitorTriggerEventConnection) PageInfo() *graphqlutil.PageInfo {
|
||||
return graphqlutil.NextPageCursor(string(a.events[len(a.events)-1].ID()))
|
||||
}
|
||||
|
||||
//
|
||||
// MonitorTriggerEvent
|
||||
//
|
||||
type monitorTriggerEvent struct {
|
||||
*Resolver
|
||||
*edb.TriggerJob
|
||||
@ -1002,7 +990,6 @@ func (m *monitorTriggerEvent) Actions(ctx context.Context, args *graphqlbackend.
|
||||
}
|
||||
|
||||
// ActionConnection
|
||||
//
|
||||
type monitorActionConnection struct {
|
||||
actions []graphqlbackend.MonitorAction
|
||||
totalCount int32
|
||||
@ -1027,9 +1014,7 @@ func (a *monitorActionConnection) PageInfo() *graphqlutil.PageInfo {
|
||||
panic("found non-email monitor action")
|
||||
}
|
||||
|
||||
//
|
||||
// Action <<UNION>>
|
||||
//
|
||||
type action struct {
|
||||
email graphqlbackend.MonitorEmailResolver
|
||||
webhook graphqlbackend.MonitorWebhookResolver
|
||||
@ -1061,9 +1046,7 @@ func (a *action) ToMonitorSlackWebhook() (graphqlbackend.MonitorSlackWebhookReso
|
||||
return a.slackWebhook, a.slackWebhook != nil
|
||||
}
|
||||
|
||||
//
|
||||
// Email
|
||||
//
|
||||
type monitorEmail struct {
|
||||
*Resolver
|
||||
*edb.EmailAction
|
||||
@ -1287,9 +1270,7 @@ func intPtrToInt64Ptr(i *int) *int64 {
|
||||
return &j
|
||||
}
|
||||
|
||||
//
|
||||
// MonitorActionEmailRecipientConnection
|
||||
//
|
||||
type monitorActionEmailRecipientsConnection struct {
|
||||
recipients []graphqlbackend.NamespaceResolver
|
||||
nextPageCursor string
|
||||
@ -1311,9 +1292,7 @@ func (a *monitorActionEmailRecipientsConnection) PageInfo() *graphqlutil.PageInf
|
||||
return graphqlutil.NextPageCursor(a.nextPageCursor)
|
||||
}
|
||||
|
||||
//
|
||||
// MonitorActionEventConnection
|
||||
//
|
||||
type monitorActionEventConnection struct {
|
||||
events []graphqlbackend.MonitorActionEventResolver
|
||||
totalCount int32
|
||||
@ -1334,9 +1313,7 @@ func (a *monitorActionEventConnection) PageInfo() *graphqlutil.PageInfo {
|
||||
return graphqlutil.NextPageCursor(string(a.events[len(a.events)-1].ID()))
|
||||
}
|
||||
|
||||
//
|
||||
// MonitorEvent
|
||||
//
|
||||
type monitorActionEvent struct {
|
||||
*Resolver
|
||||
*edb.ActionJob
|
||||
|
||||
@ -2,8 +2,8 @@
|
||||
//
|
||||
// Usage:
|
||||
//
|
||||
// pipeline := buildkite.Pipeline{}
|
||||
// pipeline.AddStep("check_mark", buildkite.Cmd("./dev/check/all.sh"))
|
||||
// pipeline := buildkite.Pipeline{}
|
||||
// pipeline.AddStep("check_mark", buildkite.Cmd("./dev/check/all.sh"))
|
||||
package buildkite
|
||||
|
||||
import (
|
||||
|
||||
@ -55,7 +55,7 @@ var topLevelGoDirs = []string{
|
||||
// ParseDiff identifies what has changed in files by generating a Diff that can be used
|
||||
// to check for specific changes, e.g.
|
||||
//
|
||||
// if diff.Has(changed.Client | changed.GraphQL) { ... }
|
||||
// if diff.Has(changed.Client | changed.GraphQL) { ... }
|
||||
//
|
||||
// To introduce a new type of Diff, add it a new Diff constant above and add a check in
|
||||
// this function to identify the Diff.
|
||||
|
||||
@ -33,9 +33,9 @@ type CoreTestOperationsOptions struct {
|
||||
// notably, this is what is used to define operations that run on PRs. Please read the
|
||||
// following notes:
|
||||
//
|
||||
// - opts should be used ONLY to adjust the behaviour of specific steps, e.g. by adding
|
||||
// flags and not as a condition for adding steps or commands.
|
||||
// - be careful not to add duplicate steps.
|
||||
// - opts should be used ONLY to adjust the behaviour of specific steps, e.g. by adding
|
||||
// flags and not as a condition for adding steps or commands.
|
||||
// - be careful not to add duplicate steps.
|
||||
//
|
||||
// If the conditions for the addition of an operation cannot be expressed using the above
|
||||
// arguments, please add it to the switch case within `GeneratePipeline` instead.
|
||||
|
||||
@ -59,14 +59,14 @@ type DeploymentTrace struct {
|
||||
//
|
||||
// The generated trace is structured as follows:
|
||||
//
|
||||
// deploy/env ---------
|
||||
// pr/1 -------------
|
||||
// -------- service/1
|
||||
// -------- service/2
|
||||
// pr/2 ---------
|
||||
// ---- service/1
|
||||
// ---- service/2
|
||||
// ...
|
||||
// deploy/env ---------
|
||||
// pr/1 -------------
|
||||
// -------- service/1
|
||||
// -------- service/2
|
||||
// pr/2 ---------
|
||||
// ---- service/1
|
||||
// ---- service/2
|
||||
// ...
|
||||
//
|
||||
// The following fields are important in each event:
|
||||
//
|
||||
|
||||
@ -16,8 +16,8 @@ import (
|
||||
)
|
||||
|
||||
// p4ProtectLine is a parsed line from `p4 protects`. See:
|
||||
// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protect.html#Usage_Notes_..364
|
||||
// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protects.html#p4_protects
|
||||
// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protect.html#Usage_Notes_..364
|
||||
// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protects.html#p4_protects
|
||||
type p4ProtectLine struct {
|
||||
level string // e.g. read
|
||||
entityType string // e.g. user
|
||||
|
||||
@ -25,11 +25,11 @@ type parser struct {
|
||||
//
|
||||
// BNF-ish query syntax:
|
||||
//
|
||||
// exprList := {exprSign} | exprSign (sep exprSign)*
|
||||
// exprSign := {"-"} expr
|
||||
// expr := fieldExpr | lit | quoted | pattern
|
||||
// fieldExpr := lit ":" value
|
||||
// value := lit | quoted
|
||||
// exprList := {exprSign} | exprSign (sep exprSign)*
|
||||
// exprSign := {"-"} expr
|
||||
// expr := fieldExpr | lit | quoted | pattern
|
||||
// fieldExpr := lit ":" value
|
||||
// value := lit | quoted
|
||||
func Parse(input string) (ParseTree, error) {
|
||||
tokens := Scan(input)
|
||||
p := parser{tokens: tokens}
|
||||
|
||||
@ -618,14 +618,14 @@ type GetRewirerMappingsOpts struct {
|
||||
// └───────────────────────────────────────┘ └───────────────────────────────┘
|
||||
//
|
||||
// We need to:
|
||||
// 1. Find out whether our new specs should _update_ an existing
|
||||
// changeset (ChangesetSpec != 0, Changeset != 0), or whether we need to create a new one.
|
||||
// 2. Since we can have multiple changesets per repository, we need to match
|
||||
// based on repo and external ID for imported changesets and on repo and head_ref for 'branch' changesets.
|
||||
// 3. If a changeset wasn't published yet, it doesn't have an external ID nor does it have an external head_ref.
|
||||
// In that case, we need to check whether the branch on which we _might_
|
||||
// push the commit (because the changeset might not be published
|
||||
// yet) is the same or compare the external IDs in the current and new specs.
|
||||
// 1. Find out whether our new specs should _update_ an existing
|
||||
// changeset (ChangesetSpec != 0, Changeset != 0), or whether we need to create a new one.
|
||||
// 2. Since we can have multiple changesets per repository, we need to match
|
||||
// based on repo and external ID for imported changesets and on repo and head_ref for 'branch' changesets.
|
||||
// 3. If a changeset wasn't published yet, it doesn't have an external ID nor does it have an external head_ref.
|
||||
// In that case, we need to check whether the branch on which we _might_
|
||||
// push the commit (because the changeset might not be published
|
||||
// yet) is the same or compare the external IDs in the current and new specs.
|
||||
//
|
||||
// What we want:
|
||||
//
|
||||
|
||||
@ -12,8 +12,7 @@ import (
|
||||
|
||||
// Toggles particularly slow tests. To enable, use `go test` with this flag, for example:
|
||||
//
|
||||
// go test -timeout 360s -v -run ^TestIntegration_PermsStore$ github.com/sourcegraph/sourcegraph/enterprise/internal/database -slow-tests
|
||||
//
|
||||
// go test -timeout 360s -v -run ^TestIntegration_PermsStore$ github.com/sourcegraph/sourcegraph/enterprise/internal/database -slow-tests
|
||||
var slowTests = flag.Bool("slow-tests", false, "Enable very slow tests")
|
||||
|
||||
// postgresParameterLimitTest names tests that are focused on ensuring the default
|
||||
|
||||
@ -114,9 +114,9 @@ func GetBackgroundQueryRunnerJob(ctx context.Context, logger log.Logger, mainApp
|
||||
|
||||
// newWorkerMetrics returns a basic set of metrics to be used for a worker and its resetter:
|
||||
//
|
||||
// * WorkerMetrics records worker operations & number of jobs.
|
||||
// * ResetterMetrics records the number of jobs that got reset because workers timed out / took too
|
||||
// long.
|
||||
// - WorkerMetrics records worker operations & number of jobs.
|
||||
// - ResetterMetrics records the number of jobs that got reset because workers timed out / took too
|
||||
// long.
|
||||
//
|
||||
// Individual insights workers may then _also_ want to register their own metrics, if desired, in
|
||||
// their NewWorker functions.
|
||||
|
||||
@ -315,28 +315,27 @@ type RepoStore interface {
|
||||
//
|
||||
// It works roughly like this:
|
||||
//
|
||||
// * For every repository on Sourcegraph (a subset on Sourcegraph.com):
|
||||
// * Build a list of time frames that we should consider
|
||||
// * Check the commit index to see if any timeframes can be discarded (if they didn't change)
|
||||
// * For each frame:
|
||||
// * Find the oldest commit in the repository.
|
||||
// * For every unique search insight series (i.e. search query):
|
||||
// * Consider yielding/sleeping.
|
||||
// * If the series has data for this timeframe+repo already, nothing to do.
|
||||
// * If the timeframe we're generating data for is before the oldest commit in the repo, record a zero value.
|
||||
// * Else, locate the commit nearest to the point in time we're trying to get data for and
|
||||
// enqueue a queryrunner job to search that repository commit - recording historical data
|
||||
// for it.
|
||||
// For every repository on Sourcegraph (a subset on Sourcegraph.com):
|
||||
// 1. Build a list of time frames that we should consider
|
||||
// - Check the commit index to see if any timeframes can be discarded (if they didn't change)
|
||||
// 2. For each frame
|
||||
// - Find the oldest commit in the repository.
|
||||
// 3. For every unique pair of frame and search insight series (i.e. search query):
|
||||
// - Consider yielding/sleeping.
|
||||
// - If the series has data for this timeframe+repo already, nothing to do.
|
||||
// - If the timeframe we're generating data for is before the oldest commit in the repo, record a zero value.
|
||||
// - Else, locate the commit nearest to the point in time we're trying to get data for and
|
||||
// enqueue a queryrunner job to search that repository commit - recording historical data
|
||||
// for it.
|
||||
//
|
||||
// As you can no doubt see, there is much complexity and potential room for duplicative API calls
|
||||
// here (e.g. "for every timeframe we list every repository"). For this exact reason, we do two
|
||||
// things:
|
||||
//
|
||||
// 1. Cache duplicative calls to prevent performing heavy operations multiple times.
|
||||
// 2. Lift heavy operations to the layer/loop one level higher, when it is sane to do so.
|
||||
// 3. Ensure we perform work slowly, linearly, and with yielding/sleeping between any substantial
|
||||
// work being performed.
|
||||
//
|
||||
// 1. Cache duplicative calls to prevent performing heavy operations multiple times.
|
||||
// 2. Lift heavy operations to the layer/loop one level higher, when it is sane to do so.
|
||||
// 3. Ensure we perform work slowly, linearly, and with yielding/sleeping between any substantial
|
||||
// work being performed.
|
||||
type historicalEnqueuer struct {
|
||||
// Required fields used for mocking in tests.
|
||||
now func() time.Time
|
||||
|
||||
@ -64,7 +64,6 @@ var testRealGlobalSettings = &api.Settings{ID: 1, Contents: `{
|
||||
// 1. Webhook insights are not enqueued (not yet supported.)
|
||||
// 2. Duplicate insights are deduplicated / do not submit multiple jobs.
|
||||
// 3. Jobs are scheduled not to all run at the same time.
|
||||
//
|
||||
func Test_discoverAndEnqueueInsights(t *testing.T) {
|
||||
// Setup the setting store and job enqueuer mocks.
|
||||
ctx := context.Background()
|
||||
|
||||
@ -104,7 +104,6 @@ func (a *AllReposIterator) ForEach(ctx context.Context, forEach func(repoName st
|
||||
|
||||
// cachedRepoStoreList calls a.repoStore.List to do a paginated list of repositories, and caches the
|
||||
// results in-memory for some time.
|
||||
//
|
||||
func (a *AllReposIterator) cachedRepoStoreList(ctx context.Context, page database.LimitOffset) ([]*types.Repo, error) {
|
||||
if a.cachedPageRequests == nil {
|
||||
a.cachedPageRequests = map[database.LimitOffset]cachedPageRequest{}
|
||||
|
||||
@ -207,13 +207,13 @@ ORDER BY sub.series_id, sub.interval_time ASC
|
||||
// Note that the series_points table may contain duplicate points, or points recorded at irregular
|
||||
// intervals. In specific:
|
||||
//
|
||||
// 1. Multiple points recorded at the same time T for cardinality C will be considered part of the same vector.
|
||||
// For example, series S and repos R1, R2 have a point at time T. The sum over R1,R2 at T will give the
|
||||
// aggregated sum for that series at time T.
|
||||
// 2. Rarely, it may contain duplicate data points due to the at-least once semantics of query execution.
|
||||
// This will cause some jitter in the aggregated series, and will skew the results slightly.
|
||||
// 3. Searches may not complete at the same exact time, so even in a perfect world if the interval
|
||||
// should be 12h it may be off by a minute or so.
|
||||
// 1. Multiple points recorded at the same time T for cardinality C will be considered part of the same vector.
|
||||
// For example, series S and repos R1, R2 have a point at time T. The sum over R1,R2 at T will give the
|
||||
// aggregated sum for that series at time T.
|
||||
// 2. Rarely, it may contain duplicate data points due to the at-least once semantics of query execution.
|
||||
// This will cause some jitter in the aggregated series, and will skew the results slightly.
|
||||
// 3. Searches may not complete at the same exact time, so even in a perfect world if the interval
|
||||
// should be 12h it may be off by a minute or so.
|
||||
func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query {
|
||||
preds := []*sqlf.Query{}
|
||||
|
||||
@ -267,7 +267,7 @@ func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query {
|
||||
)
|
||||
}
|
||||
|
||||
//values constructs a SQL values statement out of an array of repository ids
|
||||
// values constructs a SQL values statement out of an array of repository ids
|
||||
func values(ids []api.RepoID) string {
|
||||
if len(ids) == 0 {
|
||||
return ""
|
||||
|
||||
@ -3,7 +3,7 @@
|
||||
|
||||
// Command generate-license generates a signed Sourcegraph license key.
|
||||
//
|
||||
// REQUIREMENTS
|
||||
// # REQUIREMENTS
|
||||
//
|
||||
// You must provide a private key to sign the license.
|
||||
//
|
||||
@ -12,11 +12,11 @@
|
||||
//
|
||||
// To create a test private key that will NOT generate valid licenses, use:
|
||||
//
|
||||
// openssl genrsa -out /tmp/key.pem 2048
|
||||
// openssl genrsa -out /tmp/key.pem 2048
|
||||
//
|
||||
// EXAMPLE
|
||||
//
|
||||
// go run generate-license.go -private-key key.pem -tags=dev -users=100 -expires=8784h
|
||||
// go run generate-license.go -private-key key.pem -tags=dev -users=100 -expires=8784h
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
//
|
||||
// EXAMPLE
|
||||
//
|
||||
// go run ./read-license.go < license-file
|
||||
// go run ./read-license.go < license-file
|
||||
package main
|
||||
|
||||
import (
|
||||
|
||||
@ -21,16 +21,18 @@ import (
|
||||
//
|
||||
// We have the following assumptions about the schema (for a configured table T):
|
||||
//
|
||||
// 1. There is an index on T.dump_id
|
||||
// 2. For each distinct dump_id in table T, there is a corresponding row in table
|
||||
// T_schema_version. This invariant is kept up to date via triggers on insert.
|
||||
// 3. Table T_schema_version has the following schema:
|
||||
// 1. There is an index on T.dump_id
|
||||
//
|
||||
// CREATE TABLE T_schema_versions (
|
||||
// dump_id integer PRIMARY KEY NOT NULL,
|
||||
// min_schema_version integer,
|
||||
// max_schema_version integer
|
||||
// );
|
||||
// 2. For each distinct dump_id in table T, there is a corresponding row in table
|
||||
// T_schema_version. This invariant is kept up to date via triggers on insert.
|
||||
//
|
||||
// 3. Table T_schema_version has the following schema:
|
||||
//
|
||||
// CREATE TABLE T_schema_versions (
|
||||
// dump_id integer PRIMARY KEY NOT NULL,
|
||||
// min_schema_version integer,
|
||||
// max_schema_version integer
|
||||
// );
|
||||
//
|
||||
// When selecting a set of candidate records to migrate, we first use the each upload record's
|
||||
// schema version bounds to determine if there are still records associated with that upload
|
||||
|
||||
@ -29,9 +29,9 @@ func IsUnrecognizedScheme(err error) bool {
|
||||
// Two forms of the Authorization header's "credentials" token are supported (see [RFC 7235,
|
||||
// Appendix C](https://tools.ietf.org/html/rfc7235#appendix-C):
|
||||
//
|
||||
// - With only an access token: "token" 1*SP token68
|
||||
// - With a token as params:
|
||||
// "token" 1*SP "token" BWS "=" BWS quoted-string
|
||||
// - With only an access token: "token" 1*SP token68
|
||||
// - With a token as params:
|
||||
// "token" 1*SP "token" BWS "=" BWS quoted-string
|
||||
//
|
||||
// The returned values are derived directly from user input and have not been validated or
|
||||
// authenticated.
|
||||
|
||||
@ -15,28 +15,28 @@ import (
|
||||
//
|
||||
// Rules are expressed as Glob syntaxes:
|
||||
//
|
||||
// pattern:
|
||||
// { term }
|
||||
// pattern:
|
||||
// { term }
|
||||
//
|
||||
// term:
|
||||
// `*` matches any sequence of non-separator characters
|
||||
// `**` matches any sequence of characters
|
||||
// `?` matches any single non-separator character
|
||||
// `[` [ `!` ] { character-range } `]`
|
||||
// character class (must be non-empty)
|
||||
// `{` pattern-list `}`
|
||||
// pattern alternatives
|
||||
// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
|
||||
// `\` c matches character c
|
||||
// term:
|
||||
// `*` matches any sequence of non-separator characters
|
||||
// `**` matches any sequence of characters
|
||||
// `?` matches any single non-separator character
|
||||
// `[` [ `!` ] { character-range } `]`
|
||||
// character class (must be non-empty)
|
||||
// `{` pattern-list `}`
|
||||
// pattern alternatives
|
||||
// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`)
|
||||
// `\` c matches character c
|
||||
//
|
||||
// character-range:
|
||||
// c matches character c (c != `\\`, `-`, `]`)
|
||||
// `\` c matches character c
|
||||
// lo `-` hi matches character c for lo <= c <= hi
|
||||
// character-range:
|
||||
// c matches character c (c != `\\`, `-`, `]`)
|
||||
// `\` c matches character c
|
||||
// lo `-` hi matches character c for lo <= c <= hi
|
||||
//
|
||||
// pattern-list:
|
||||
// pattern { `,` pattern }
|
||||
// comma-separated (without spaces) patterns
|
||||
// pattern-list:
|
||||
// pattern { `,` pattern }
|
||||
// comma-separated (without spaces) patterns
|
||||
//
|
||||
// This Glob syntax is currently from github.com/gobwas/glob:
|
||||
// https://sourcegraph.com/github.com/gobwas/glob@e7a84e9525fe90abcda167b604e483cc959ad4aa/-/blob/glob.go?L39:6
|
||||
|
||||
@ -131,17 +131,18 @@ func DefaultEndpointProvider(service string) []*url.URL {
|
||||
}
|
||||
|
||||
// NewAggregateHealthCheckHandler returns a JSON with the high-level structure
|
||||
// {
|
||||
// <service1>: {
|
||||
// <address1>: {
|
||||
// <check1-name> : <check1-data>,
|
||||
// <check2-name> : <check2-data>
|
||||
// },
|
||||
// <address2>: ...
|
||||
// },
|
||||
// <service2>: ...
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// {
|
||||
// <service1>: {
|
||||
// <address1>: {
|
||||
// <check1-name> : <check1-data>,
|
||||
// <check2-name> : <check2-data>
|
||||
// },
|
||||
// <address2>: ...
|
||||
// },
|
||||
// <service2>: ...
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// The handler should only be used in frontend.
|
||||
//
|
||||
|
||||
@ -11,10 +11,10 @@ import (
|
||||
// scripts via UserData values. This struct can take one of two mutually
|
||||
// exclusive forms:
|
||||
//
|
||||
// (1) An applicable recognizer with patterns and a generate function.
|
||||
// (2) A fallback recognizer, which consists of a list of children.
|
||||
// Execution of a fallback recognizer will invoke its children,
|
||||
// in order and recursively, until the non-empty value is yielded.
|
||||
// (1) An applicable recognizer with patterns and a generate function.
|
||||
// (2) A fallback recognizer, which consists of a list of children.
|
||||
// Execution of a fallback recognizer will invoke its children,
|
||||
// in order and recursively, until the non-empty value is yielded.
|
||||
type Recognizer struct {
|
||||
patterns []*PathPattern
|
||||
patternsForContent []*PathPattern
|
||||
|
||||
@ -341,10 +341,10 @@ type configurationFactoryFunc func(ctx context.Context, repositoryID int, commit
|
||||
// getIndexRecords determines the set of index records that should be enqueued for the given commit.
|
||||
// For each repository, we look for index configuration in the following order:
|
||||
//
|
||||
// - supplied explicitly via parameter
|
||||
// - in the database
|
||||
// - committed to `sourcegraph.yaml` in the repository
|
||||
// - inferred from the repository structure
|
||||
// - supplied explicitly via parameter
|
||||
// - in the database
|
||||
// - committed to `sourcegraph.yaml` in the repository
|
||||
// - inferred from the repository structure
|
||||
func (s *Service) getIndexRecords(ctx context.Context, repositoryID int, commit, configuration string, bypassLimit bool) ([]shared.Index, error) {
|
||||
fns := []configurationFactoryFunc{
|
||||
makeExplicitConfigurationFactory(configuration),
|
||||
|
||||
@ -136,9 +136,9 @@ func reverseGraph(graph map[string][]string) map[string][]string {
|
||||
// tokens to upload meta value. Select commits are any commits that satisfy one of the following
|
||||
// properties:
|
||||
//
|
||||
// 1. They define an upload,
|
||||
// 2. They have multiple parents, or
|
||||
// 3. They have a child with multiple parents.
|
||||
// 1. They define an upload,
|
||||
// 2. They have multiple parents, or
|
||||
// 3. They have a child with multiple parents.
|
||||
//
|
||||
// For all remaining commits, we can easily re-calculate the visible uploads without storing them.
|
||||
// All such commits have a single, unambiguous path to an ancestor that does store data. These
|
||||
@ -187,9 +187,9 @@ func populateUploadsByTraversal(graph map[string][]string, order []string, commi
|
||||
// populateUploadsForCommit populates the items stored in the given mapping for the given commit.
|
||||
// The uploads considered visible for a commit include:
|
||||
//
|
||||
// 1. the set of uploads defined on that commit, and
|
||||
// 2. the set of uploads visible from the ancestors with the minimum distance
|
||||
// for equivalent root and indexer values.
|
||||
// 1. the set of uploads defined on that commit, and
|
||||
// 2. the set of uploads visible from the ancestors with the minimum distance
|
||||
// for equivalent root and indexer values.
|
||||
//
|
||||
// If two ancestors have different uploads visible for the same root and indexer, the one with the
|
||||
// smaller distance to the source commit will shadow the other. Similarly, If an ancestor and the
|
||||
|
||||
@ -11,7 +11,7 @@ import (
|
||||
|
||||
// GetService creates or returns an already-initialized dependencies service. If the service is
|
||||
// new, it will use the given database handle and syncer instance. If the given syncer is nil,
|
||||
/// then ErrorSyncer will be used instead.
|
||||
// then ErrorSyncer will be used instead.
|
||||
func GetService(db database.DB) *dependencies.Service {
|
||||
return dependencies.GetService(db)
|
||||
}
|
||||
|
||||
@ -54,10 +54,10 @@ func mergeSourceCommits(usc []uploads.SourcedCommits, isc []autoindexing.Sourced
|
||||
return sourceCommits
|
||||
}
|
||||
|
||||
// func (j *janitor) HandleError(err error) {
|
||||
// j.metrics.numErrors.Inc()
|
||||
// log.Error("Failed to delete codeintel records with an unknown commit", "error", err)
|
||||
// }
|
||||
// func (j *janitor) HandleError(err error) {
|
||||
// j.metrics.numErrors.Inc()
|
||||
// log.Error("Failed to delete codeintel records with an unknown commit", "error", err)
|
||||
// }
|
||||
type SourcedCommits struct {
|
||||
RepositoryID int
|
||||
RepositoryName string
|
||||
|
||||
@ -456,7 +456,6 @@ const numAncestors = 100
|
||||
// the graph. This will not always produce the full set of visible commits - some responses may not contain
|
||||
// all results while a subsequent request made after the lsif_nearest_uploads has been updated to include
|
||||
// this commit will.
|
||||
//
|
||||
func (s *Service) InferClosestUploads(ctx context.Context, repositoryID int, commit, path string, exactPath bool, indexer string) (_ []shared.Dump, err error) {
|
||||
ctx, _, endObservation := s.operations.inferClosestUploads.With(ctx, &err, observation.Args{
|
||||
LogFields: []log.Field{log.Int("repositoryID", repositoryID), log.String("commit", commit), log.String("path", path), log.Bool("exactPath", exactPath), log.String("indexer", indexer)},
|
||||
|
||||
@ -23,7 +23,6 @@ import (
|
||||
//
|
||||
// - The site configuration, from the database (from the site-admin panel).
|
||||
// - Service connections, from the frontend (e.g. which gitservers to talk to).
|
||||
//
|
||||
type Unified struct {
|
||||
schema.SiteConfiguration
|
||||
ServiceConnectionConfig conftypes.ServiceConnections
|
||||
|
||||
@ -16,19 +16,19 @@ import (
|
||||
//
|
||||
// The signature of this function allows scan methods to be written uniformly:
|
||||
//
|
||||
// func ScanThings(rows *sql.Rows, queryErr error) (_ []Thing, err error) {
|
||||
// if queryErr != nil {
|
||||
// return nil, queryErr
|
||||
// }
|
||||
// defer func() { err = CloseRows(rows, err) }()
|
||||
// func ScanThings(rows *sql.Rows, queryErr error) (_ []Thing, err error) {
|
||||
// if queryErr != nil {
|
||||
// return nil, queryErr
|
||||
// }
|
||||
// defer func() { err = CloseRows(rows, err) }()
|
||||
//
|
||||
// // read things from rows
|
||||
// }
|
||||
// // read things from rows
|
||||
// }
|
||||
//
|
||||
// Scan methods should be called directly with the results of `*store.Query` to
|
||||
// ensure that the rows are always properly handled.
|
||||
//
|
||||
// things, err := ScanThings(store.Query(ctx, query))
|
||||
// things, err := ScanThings(store.Query(ctx, query))
|
||||
func CloseRows(rows *sql.Rows, err error) error {
|
||||
return errors.Append(err, rows.Close(), rows.Err())
|
||||
}
|
||||
|
||||
@ -23,22 +23,22 @@ import (
|
||||
// return a modified base store with no methods from the outer layer. All other methods
|
||||
// of the base store are available on the outer layer without needing to be re-defined.
|
||||
//
|
||||
// type SprocketStore struct {
|
||||
// *basestore.Store
|
||||
// }
|
||||
// type SprocketStore struct {
|
||||
// *basestore.Store
|
||||
// }
|
||||
//
|
||||
// func NewWithDB(database dbutil.DB) *SprocketStore {
|
||||
// return &SprocketStore{Store: basestore.NewWithDB(database, sql.TxOptions{})}
|
||||
// }
|
||||
// func NewWithDB(database dbutil.DB) *SprocketStore {
|
||||
// return &SprocketStore{Store: basestore.NewWithDB(database, sql.TxOptions{})}
|
||||
// }
|
||||
//
|
||||
// func (s *SprocketStore) With(other basestore.ShareableStore) *SprocketStore {
|
||||
// return &SprocketStore{Store: s.Store.With(other)}
|
||||
// }
|
||||
// func (s *SprocketStore) With(other basestore.ShareableStore) *SprocketStore {
|
||||
// return &SprocketStore{Store: s.Store.With(other)}
|
||||
// }
|
||||
//
|
||||
// func (s *SprocketStore) Transact(ctx context.Context) (*SprocketStore, error) {
|
||||
// txBase, err := s.Store.Transact(ctx)
|
||||
// return &SprocketStore{Store: txBase}, err
|
||||
// }
|
||||
// func (s *SprocketStore) Transact(ctx context.Context) (*SprocketStore, error) {
|
||||
// txBase, err := s.Store.Transact(ctx)
|
||||
// return &SprocketStore{Store: txBase}, err
|
||||
// }
|
||||
type Store struct {
|
||||
handle TransactableHandle
|
||||
}
|
||||
@ -67,12 +67,12 @@ func (s *Store) Handle() TransactableHandle {
|
||||
// This method should be used when two distinct store instances need to perform an
|
||||
// operation within the same shared transaction.
|
||||
//
|
||||
// txn1 := store1.Transact(ctx) // Creates a transaction
|
||||
// txn2 := store2.With(txn1) // References the same transaction
|
||||
// txn1 := store1.Transact(ctx) // Creates a transaction
|
||||
// txn2 := store2.With(txn1) // References the same transaction
|
||||
//
|
||||
// txn1.A(ctx) // Occurs within shared transaction
|
||||
// txn2.B(ctx) // Occurs within shared transaction
|
||||
// txn1.Done() // closes shared transaction
|
||||
// txn1.A(ctx) // Occurs within shared transaction
|
||||
// txn2.B(ctx) // Occurs within shared transaction
|
||||
// txn1.Done() // closes shared transaction
|
||||
//
|
||||
// Note that once a handle is shared between two stores, committing or rolling back
|
||||
// a transaction will affect the handle of both stores. Most notably, two stores that
|
||||
|
||||
@ -88,27 +88,27 @@ func openDBWithStartupWait(cfg *pgx.ConnConfig) (db *sql.DB, err error) {
|
||||
// For all mandatory methods the sqlHooks driver is used. For the optional methods namely Ping, ResetSession and CheckNamedValue
|
||||
// (which the sqlHooks driver does not implement), extendedConn goes to the original default driver.
|
||||
//
|
||||
// Ping()
|
||||
// ResetSession()
|
||||
// CheckNamedValue()
|
||||
// ┌──────────────────────────────┐
|
||||
// │ │
|
||||
// │ │
|
||||
// │ │
|
||||
// ┌───────┐ ┌──────┴─────┐ ┌────────┐ ┌─────▼───────┐
|
||||
// │ │ │ │ │ │ │ │
|
||||
// │otelsql├──►│extendedConn├──►│sqlhooks├────►│DefaultDriver│
|
||||
// │ │ │ │ │ │ │ │
|
||||
// └─┬─────┘ └─┬──────────┘ └─┬──────┘ └─┬───────────┘
|
||||
// │ │ │ │
|
||||
// │ │ │ │Implements all SQL driver methods
|
||||
// │ │ │
|
||||
// │ │ │Only implements mandatory ones
|
||||
// │ │ │Ping(), ResetSession() and CheckNamedValue() are missing.
|
||||
// │ │
|
||||
// │ │Implement all SQL driver methods
|
||||
// │
|
||||
// │Expects all SQL driver methods
|
||||
// Ping()
|
||||
// ResetSession()
|
||||
// CheckNamedValue()
|
||||
// ┌──────────────────────────────┐
|
||||
// │ │
|
||||
// │ │
|
||||
// │ │
|
||||
// ┌───────┐ ┌──────┴─────┐ ┌────────┐ ┌─────▼───────┐
|
||||
// │ │ │ │ │ │ │ │
|
||||
// │otelsql├──►│extendedConn├──►│sqlhooks├────►│DefaultDriver│
|
||||
// │ │ │ │ │ │ │ │
|
||||
// └─┬─────┘ └─┬──────────┘ └─┬──────┘ └─┬───────────┘
|
||||
// │ │ │ │
|
||||
// │ │ │ │Implements all SQL driver methods
|
||||
// │ │ │
|
||||
// │ │ │Only implements mandatory ones
|
||||
// │ │ │Ping(), ResetSession() and CheckNamedValue() are missing.
|
||||
// │ │
|
||||
// │ │Implement all SQL driver methods
|
||||
// │
|
||||
// │Expects all SQL driver methods
|
||||
//
|
||||
// A sqlhooks.Driver must be used as a Driver otherwise errors will be raised.
|
||||
type extendedDriver struct {
|
||||
|
||||
@ -25,7 +25,9 @@ import (
|
||||
// transaction if an error didn't occur.
|
||||
//
|
||||
// After opening this transaction, it executes the query
|
||||
// SET CONSTRAINTS ALL DEFERRED
|
||||
//
|
||||
// SET CONSTRAINTS ALL DEFERRED
|
||||
//
|
||||
// which aids in testing.
|
||||
func NewTx(t testing.TB, db *sql.DB) *sql.Tx {
|
||||
tx, err := db.Begin()
|
||||
|
||||
@ -122,11 +122,11 @@ func (ds *Definitions) Filter(ids []int) (*Definitions, error) {
|
||||
// of all leaves. This gives us a nice clean single-entry, single-exit graph prefix
|
||||
// that can be squashed into a single migration.
|
||||
//
|
||||
// +-- ... --+ +-- [ leaf 1 ]
|
||||
// | | |
|
||||
// [ root ] -+ +- [ nca ] -+
|
||||
// | | |
|
||||
// +-- ... --+ +-- [ leaf 2 ]
|
||||
// +-- ... --+ +-- [ leaf 1 ]
|
||||
// | | |
|
||||
// [ root ] -+ +- [ nca ] -+
|
||||
// | | |
|
||||
// +-- ... --+ +-- [ leaf 2 ]
|
||||
func (ds *Definitions) LeafDominator() (Definition, bool) {
|
||||
leaves := ds.Leaves()
|
||||
if len(leaves) == 0 {
|
||||
|
||||
@ -210,7 +210,7 @@ type NewUser struct {
|
||||
// username/email and password. If no password is given, a non-builtin auth provider must be used to
|
||||
// sign into the account.
|
||||
//
|
||||
// CREATION OF SITE ADMINS
|
||||
// # CREATION OF SITE ADMINS
|
||||
//
|
||||
// The new user is made to be a site admin if the following are both true: (1) this user would be
|
||||
// the first and only user on the server, and (2) the site has not yet been initialized. Otherwise,
|
||||
|
||||
@ -47,10 +47,9 @@ type endpoints struct {
|
||||
//
|
||||
// Examples URL specifiers:
|
||||
//
|
||||
// "k8s+http://searcher"
|
||||
// "k8s+rpc://indexed-searcher?kind=sts"
|
||||
// "http://searcher-0 http://searcher-1 http://searcher-2"
|
||||
//
|
||||
// "k8s+http://searcher"
|
||||
// "k8s+rpc://indexed-searcher?kind=sts"
|
||||
// "http://searcher-0 http://searcher-1 http://searcher-2"
|
||||
func New(urlspec string) *Map {
|
||||
if !strings.HasPrefix(urlspec, "k8s+") {
|
||||
return Static(strings.Fields(urlspec)...)
|
||||
|
||||
40
internal/env/baseconfig.go
vendored
40
internal/env/baseconfig.go
vendored
@ -23,31 +23,31 @@ type Config interface {
|
||||
// BaseConfig is a base struct for configuration objects. The following is a minimal
|
||||
// example of declaring, loading, and validating configuration from the environment.
|
||||
//
|
||||
// type Config struct {
|
||||
// env.BaseConfig
|
||||
// type Config struct {
|
||||
// env.BaseConfig
|
||||
//
|
||||
// Name string
|
||||
// Weight int
|
||||
// Age time.Duration
|
||||
// }
|
||||
// Name string
|
||||
// Weight int
|
||||
// Age time.Duration
|
||||
// }
|
||||
//
|
||||
// func (c *Config) Load() {
|
||||
// c.Name = c.Get("SRC_NAME", "test", "The service's name (wat).")
|
||||
// c.Weight = c.GetInt("SRC_WEIGHT", "1m", "The service's weight (wat).")
|
||||
// c.Age = c.GetInterval("SRC_AGE", "10s", "The service's age (wat).")
|
||||
// }
|
||||
// func (c *Config) Load() {
|
||||
// c.Name = c.Get("SRC_NAME", "test", "The service's name (wat).")
|
||||
// c.Weight = c.GetInt("SRC_WEIGHT", "1m", "The service's weight (wat).")
|
||||
// c.Age = c.GetInterval("SRC_AGE", "10s", "The service's age (wat).")
|
||||
// }
|
||||
//
|
||||
// func applicationInit() {
|
||||
// config := &Config{}
|
||||
// config.Load()
|
||||
// func applicationInit() {
|
||||
// config := &Config{}
|
||||
// config.Load()
|
||||
//
|
||||
// env.Lock()
|
||||
// env.HandleHelpFlag()
|
||||
// env.Lock()
|
||||
// env.HandleHelpFlag()
|
||||
//
|
||||
// if err := config.Validate(); err != nil{
|
||||
// // handle me
|
||||
// }
|
||||
// }
|
||||
// if err := config.Validate(); err != nil{
|
||||
// // handle me
|
||||
// }
|
||||
// }
|
||||
type BaseConfig struct {
|
||||
errs []error
|
||||
|
||||
|
||||
@ -19,7 +19,6 @@
|
||||
// containing source code or a binary
|
||||
//
|
||||
// https://pypi.org/help/#packages
|
||||
//
|
||||
package pypi
|
||||
|
||||
import (
|
||||
|
||||
@ -1123,8 +1123,9 @@ func (c *clientImplementor) httpPostWithURI(ctx context.Context, repo api.RepoNa
|
||||
return c.do(ctx, repo, "POST", uri, b)
|
||||
}
|
||||
|
||||
//nolint:unparam // unparam complains that `method` always has same value across call-sites, but that's OK
|
||||
// do performs a request to a gitserver instance based on the address in the uri argument.
|
||||
//
|
||||
//nolint:unparam // unparam complains that `method` always has same value across call-sites, but that's OK
|
||||
func (c *clientImplementor) do(ctx context.Context, repo api.RepoName, method, uri string, payload []byte) (resp *http.Response, err error) {
|
||||
parsedURL, err := url.ParseRequestURI(uri)
|
||||
if err != nil {
|
||||
|
||||
@ -238,12 +238,11 @@ func parseShortLog(out []byte) ([]*gitdomain.ContributorCount, error) {
|
||||
// the following somewhat-common malformed syntax where a user has misconfigured
|
||||
// their email address as their name:
|
||||
//
|
||||
// foo@gmail.com <foo@gmail.com>
|
||||
// foo@gmail.com <foo@gmail.com>
|
||||
//
|
||||
// As a valid name, whereas mail.ParseAddress would return an error:
|
||||
//
|
||||
// mail: expected single address, got "<foo@gmail.com>"
|
||||
//
|
||||
// mail: expected single address, got "<foo@gmail.com>"
|
||||
func lenientParseAddress(address string) (*mail.Address, error) {
|
||||
addr, err := mail.ParseAddress(address)
|
||||
if err != nil && strings.Contains(err.Error(), "expected single address") {
|
||||
@ -1042,8 +1041,8 @@ func (c *clientImplementor) ListDirectoryChildren(
|
||||
// cleanDirectoriesForLsTree sanitizes the input dirnames to a git ls-tree command. There are a
|
||||
// few peculiarities handled here:
|
||||
//
|
||||
// 1. The root of the tree must be indicated with `.`, and
|
||||
// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash.
|
||||
// 1. The root of the tree must be indicated with `.`, and
|
||||
// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash.
|
||||
func cleanDirectoriesForLsTree(dirnames []string) []string {
|
||||
var args []string
|
||||
for _, dir := range dirnames {
|
||||
|
||||
@ -11,10 +11,10 @@ import (
|
||||
|
||||
// HTTPMiddleware wraps the handler with the following:
|
||||
//
|
||||
// - If the HTTP header, X-Sourcegraph-Should-Trace, is set to a truthy value, set the
|
||||
// shouldTraceKey context.Context value to true
|
||||
// - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp, which applies the
|
||||
// desired instrumentation.
|
||||
// - If the HTTP header, X-Sourcegraph-Should-Trace, is set to a truthy value, set the
|
||||
// shouldTraceKey context.Context value to true
|
||||
// - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp, which applies the
|
||||
// desired instrumentation.
|
||||
//
|
||||
// The provided operation name is used to add details to spans.
|
||||
func HTTPMiddleware(operation string, h http.Handler, opts ...otelhttp.Option) http.Handler {
|
||||
|
||||
@ -97,9 +97,9 @@ func (c *Monitor) Get() (remaining int, reset, retry time.Duration, known bool)
|
||||
// want to perform a cost-500 operation. Only 4 more cost-500 operations are allowed in the next 30 minutes (per
|
||||
// the rate limit):
|
||||
//
|
||||
// -500 -500 -500
|
||||
// Now |------------*------------*------------*------------| 30 min from now
|
||||
// Remaining 1500 1000 500 0 5000 (reset)
|
||||
// -500 -500 -500
|
||||
// Now |------------*------------*------------*------------| 30 min from now
|
||||
// Remaining 1500 1000 500 0 5000 (reset)
|
||||
//
|
||||
// Assuming no other operations are being performed (that count against the rate limit), the recommended wait would
|
||||
// be 7.5 minutes (30 minutes / 4), so that the operations are evenly spaced out.
|
||||
|
||||
@ -37,10 +37,11 @@ type MutexOptions struct {
|
||||
// If, on release, we are unable to unlock the mutex it will continue to be locked until
|
||||
// it is expired by Redis.
|
||||
// The returned context will be cancelled if any of the following occur:
|
||||
// * The parent context in cancelled
|
||||
// * The release function is called
|
||||
// * There is an error extending the lock expiry or the expiry can't be extended because
|
||||
// they key no longer exists in Redis
|
||||
// - The parent context in cancelled
|
||||
// - The release function is called
|
||||
// - There is an error extending the lock expiry or the expiry can't be extended because
|
||||
// they key no longer exists in Redis
|
||||
//
|
||||
// A caller can therefore assume that they are the sole holder of the lock as long as the
|
||||
// context has not been cancelled.
|
||||
func TryAcquireMutex(ctx context.Context, name string, options MutexOptions) (context.Context, func(), bool) {
|
||||
|
||||
@ -56,9 +56,9 @@ func init() {
|
||||
var schemeMatcher = lazyregexp.New(`^[A-Za-z][A-Za-z0-9\+\-\.]*://`)
|
||||
|
||||
// dialRedis dials Redis given the raw endpoint string. The string can have two formats:
|
||||
// 1) If there is a HTTP scheme, it should be either be "redis://" or "rediss://" and the URL
|
||||
// must be of the format specified in https://www.iana.org/assignments/uri-schemes/prov/redis.
|
||||
// 2) Otherwise, it is assumed to be of the format $HOSTNAME:$PORT.
|
||||
// 1. If there is a HTTP scheme, it should be either be "redis://" or "rediss://" and the URL
|
||||
// must be of the format specified in https://www.iana.org/assignments/uri-schemes/prov/redis.
|
||||
// 2. Otherwise, it is assumed to be of the format $HOSTNAME:$PORT.
|
||||
func dialRedis(rawEndpoint string) (redis.Conn, error) {
|
||||
if schemeMatcher.MatchString(rawEndpoint) { // expect "redis://"
|
||||
return redis.DialURL(rawEndpoint)
|
||||
|
||||
@ -287,13 +287,12 @@ var configuredLimiter = func() *mutablelimiter.Limiter {
|
||||
// possible. We treat repos differently depending on which part of the
|
||||
// diff they are:
|
||||
//
|
||||
//
|
||||
// Deleted - remove from scheduler and queue.
|
||||
// Added - new repo, enqueue for asap clone.
|
||||
// Modified - likely new url or name. May also be a sign of new
|
||||
// commits. Enqueue for asap clone (or fetch).
|
||||
// Unmodified - we likely already have this cloned. Just rely on
|
||||
// the scheduler and do not enqueue.
|
||||
// Deleted - remove from scheduler and queue.
|
||||
// Added - new repo, enqueue for asap clone.
|
||||
// Modified - likely new url or name. May also be a sign of new
|
||||
// commits. Enqueue for asap clone (or fetch).
|
||||
// Unmodified - we likely already have this cloned. Just rely on
|
||||
// the scheduler and do not enqueue.
|
||||
func (s *UpdateScheduler) UpdateFromDiff(diff Diff) {
|
||||
for _, r := range diff.Deleted {
|
||||
s.remove(r)
|
||||
|
||||
@ -285,11 +285,11 @@ func (rm ReposModified) ReposModified(modified types.RepoModified) types.Repos {
|
||||
//
|
||||
// It works for repos from:
|
||||
//
|
||||
// 1. Public "cloud_default" code hosts since we don't sync them in the background
|
||||
// (which would delete lazy synced repos).
|
||||
// 2. Any package hosts (i.e. npm, Maven, etc) since callers are expected to store
|
||||
// repos in the `lsif_dependency_repos` table which is used as the source of truth
|
||||
// for the next full sync, so lazy added repos don't get wiped.
|
||||
// 1. Public "cloud_default" code hosts since we don't sync them in the background
|
||||
// (which would delete lazy synced repos).
|
||||
// 2. Any package hosts (i.e. npm, Maven, etc) since callers are expected to store
|
||||
// repos in the `lsif_dependency_repos` table which is used as the source of truth
|
||||
// for the next full sync, so lazy added repos don't get wiped.
|
||||
//
|
||||
// The "background" boolean flag indicates that we should run this
|
||||
// sync in the background vs block and call s.syncRepo synchronously.
|
||||
|
||||
@ -117,9 +117,9 @@ type OrJob struct {
|
||||
}
|
||||
|
||||
// For OR queries, there are two phases:
|
||||
// 1) Stream any results that are found in every subquery
|
||||
// 2) Once all subqueries have completed, send the results we've found that
|
||||
// were returned by some subqueries, but not all subqueries.
|
||||
// 1. Stream any results that are found in every subquery
|
||||
// 2. Once all subqueries have completed, send the results we've found that
|
||||
// were returned by some subqueries, but not all subqueries.
|
||||
//
|
||||
// This means that the only time we would hit streaming limit before we have
|
||||
// results from all subqueries is if we hit the limit only with results from
|
||||
@ -132,17 +132,17 @@ type OrJob struct {
|
||||
// they will be from a random distribution of sub-queries.
|
||||
//
|
||||
// This solution has the following nice properties:
|
||||
// - Early cancellation is possible
|
||||
// - Results are streamed where possible, decreasing user-visible latency
|
||||
// - The only results that are streamed are "fair" results. They are "fair" because
|
||||
// they were returned from every subquery, so there can be no bias between subqueries
|
||||
// - The only time we cancel early is when streamed results hit the limit. Since the only
|
||||
// streamed results are "fair" results, there will be no bias against slow or low-volume subqueries
|
||||
// - Every result we stream is guaranteed to be "complete". By "complete", I mean if I search for "a or b",
|
||||
// the streamed result will highlight both "a" and "b" if they both exist in the document.
|
||||
// - The bias is towards documents that match all of our subqueries, so doesn't bias any individual subquery.
|
||||
// Additionally, a bias towards matching all subqueries is probably desirable, since it's more likely that
|
||||
// a document matching all subqueries is what the user is looking for than a document matching only one.
|
||||
// - Early cancellation is possible
|
||||
// - Results are streamed where possible, decreasing user-visible latency
|
||||
// - The only results that are streamed are "fair" results. They are "fair" because
|
||||
// they were returned from every subquery, so there can be no bias between subqueries
|
||||
// - The only time we cancel early is when streamed results hit the limit. Since the only
|
||||
// streamed results are "fair" results, there will be no bias against slow or low-volume subqueries
|
||||
// - Every result we stream is guaranteed to be "complete". By "complete", I mean if I search for "a or b",
|
||||
// the streamed result will highlight both "a" and "b" if they both exist in the document.
|
||||
// - The bias is towards documents that match all of our subqueries, so doesn't bias any individual subquery.
|
||||
// Additionally, a bias towards matching all subqueries is probably desirable, since it's more likely that
|
||||
// a document matching all subqueries is what the user is looking for than a document matching only one.
|
||||
func (j *OrJob) Run(ctx context.Context, clients job.RuntimeClients, stream streaming.Sender) (alert *search.Alert, err error) {
|
||||
_, ctx, stream, finish := job.StartSpan(ctx, stream, j)
|
||||
defer func() { finish(alert, err) }()
|
||||
|
||||
@ -515,13 +515,20 @@ func conjunction(left, right Basic) Basic {
|
||||
// pattern node, just not in any of the parameters.
|
||||
//
|
||||
// For example, the query
|
||||
// repo:a (file:b OR file:c)
|
||||
//
|
||||
// repo:a (file:b OR file:c)
|
||||
//
|
||||
// is transformed to
|
||||
// (repo:a file:b) OR (repo:a file:c)
|
||||
//
|
||||
// (repo:a file:b) OR (repo:a file:c)
|
||||
//
|
||||
// but the query
|
||||
// (repo:a OR repo:b) (b OR c)
|
||||
//
|
||||
// (repo:a OR repo:b) (b OR c)
|
||||
//
|
||||
// is transformed to
|
||||
// (repo:a (b OR c)) OR (repo:b (b OR c))
|
||||
//
|
||||
// (repo:a (b OR c)) OR (repo:b (b OR c))
|
||||
func BuildPlan(query []Node) Plan {
|
||||
return distribute([]Basic{}, query)
|
||||
}
|
||||
|
||||
@ -203,10 +203,11 @@ func (p Plan) ToQ() Q {
|
||||
|
||||
// Basic represents a leaf expression to evaluate in our search engine. A basic
|
||||
// query comprises:
|
||||
// (1) a single search pattern expression, which may contain
|
||||
// 'and' or 'or' operators; and
|
||||
// (2) parameters that scope the evaluation of search
|
||||
// patterns (e.g., to repos, files, etc.).
|
||||
//
|
||||
// (1) a single search pattern expression, which may contain
|
||||
// 'and' or 'or' operators; and
|
||||
// (2) parameters that scope the evaluation of search
|
||||
// patterns (e.g., to repos, files, etc.).
|
||||
type Basic struct {
|
||||
Parameters
|
||||
Pattern Node
|
||||
|
||||
@ -74,7 +74,7 @@ func (r *RepositoryRevisions) Equal(other *RepositoryRevisions) bool {
|
||||
// ParseRepositoryRevisions parses strings that refer to a repository and 0
|
||||
// or more revspecs. The format is:
|
||||
//
|
||||
// repo@revs
|
||||
// repo@revs
|
||||
//
|
||||
// where repo is a repository regex and revs is a ':'-separated list of revspecs
|
||||
// and/or ref globs. A ref glob is a revspec prefixed with '*' (which is not a
|
||||
@ -83,13 +83,13 @@ func (r *RepositoryRevisions) Equal(other *RepositoryRevisions) bool {
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// - 'foo' refers to the 'foo' repo at the default branch
|
||||
// - 'foo@bar' refers to the 'foo' repo and the 'bar' revspec.
|
||||
// - 'foo@bar:baz:qux' refers to the 'foo' repo and 3 revspecs: 'bar', 'baz',
|
||||
// and 'qux'.
|
||||
// - 'foo@*bar' refers to the 'foo' repo and all refs matching the glob 'bar/*',
|
||||
// because git interprets the ref glob 'bar' as being 'bar/*' (see `man git-log`
|
||||
// section on the --glob flag)
|
||||
// - 'foo' refers to the 'foo' repo at the default branch
|
||||
// - 'foo@bar' refers to the 'foo' repo and the 'bar' revspec.
|
||||
// - 'foo@bar:baz:qux' refers to the 'foo' repo and 3 revspecs: 'bar', 'baz',
|
||||
// and 'qux'.
|
||||
// - 'foo@*bar' refers to the 'foo' repo and all refs matching the glob 'bar/*',
|
||||
// because git interprets the ref glob 'bar' as being 'bar/*' (see `man git-log`
|
||||
// section on the --glob flag)
|
||||
func ParseRepositoryRevisions(repoAndOptionalRev string) (string, []RevisionSpecifier) {
|
||||
i := strings.Index(repoAndOptionalRev, "@")
|
||||
if i == -1 {
|
||||
|
||||
@ -123,8 +123,8 @@ func (fm *FileMatch) AppendMatches(src *FileMatch) {
|
||||
// Limit will mutate fm such that it only has limit results. limit is a number
|
||||
// greater than 0.
|
||||
//
|
||||
// if limit >= ResultCount then nothing is done and we return limit - ResultCount.
|
||||
// if limit < ResultCount then ResultCount becomes limit and we return 0.
|
||||
// if limit >= ResultCount then nothing is done and we return limit - ResultCount.
|
||||
// if limit < ResultCount then ResultCount becomes limit and we return 0.
|
||||
func (fm *FileMatch) Limit(limit int) int {
|
||||
matchCount := fm.ChunkMatches.MatchCount()
|
||||
symbolCount := len(fm.Symbols)
|
||||
|
||||
@ -292,9 +292,10 @@ func HTTPMiddleware(logger log.Logger, next http.Handler, siteConfig conftypes.S
|
||||
|
||||
// Recoverer is a recovery handler to wrap the stdlib net/http Mux.
|
||||
// Example:
|
||||
// mux := http.NewServeMux
|
||||
// ...
|
||||
// http.Handle("/", sentry.Recoverer(mux))
|
||||
//
|
||||
// mux := http.NewServeMux
|
||||
// ...
|
||||
// http.Handle("/", sentry.Recoverer(mux))
|
||||
func loggingRecoverer(logger log.Logger, handler http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
defer func() {
|
||||
|
||||
@ -8,11 +8,11 @@ package on
|
||||
// This is essentially a generic type, with two parameters (albeit these are
|
||||
// mostly exposed in OnResult:
|
||||
//
|
||||
// RepoID: An opaque identifier used to identify unique repositories. This
|
||||
// must be able to be used as a map key.
|
||||
// Revision: An object that identifies the specific revision. There are no
|
||||
// requirements for this type, as it will be returned as-is in
|
||||
// Revisions().
|
||||
// - RepoID: An opaque identifier used to identify unique repositories. This
|
||||
// must be able to be used as a map key.
|
||||
// - Revision: An object that identifies the specific revision. There are no
|
||||
// requirements for this type, as it will be returned as-is in
|
||||
// Revisions().
|
||||
type RepoRevisionAggregator struct {
|
||||
results []*RuleRevisions
|
||||
}
|
||||
|
||||
@ -38,12 +38,12 @@ func NewDocumentationResultEdge(id, inV, outV uint64) DocumentationResultEdge {
|
||||
//
|
||||
// It allows one represent hierarchical documentation like:
|
||||
//
|
||||
// "project" (e.g. an HTTP library)
|
||||
// -> "documentationResult" (e.g. "HTTP library" library documentation)
|
||||
// -> "documentationResult" (e.g. docs for the "Server" class in the HTTP library)
|
||||
// -> "documentationResult" (e.g. docs for the "Listen" method on the "Server" class)
|
||||
// -> "documentationResult" (e.g. docs for the "Shutdown" method on the "Server" class)
|
||||
// -> ...
|
||||
// "project" (e.g. an HTTP library)
|
||||
// -> "documentationResult" (e.g. "HTTP library" library documentation)
|
||||
// -> "documentationResult" (e.g. docs for the "Server" class in the HTTP library)
|
||||
// -> "documentationResult" (e.g. docs for the "Listen" method on the "Server" class)
|
||||
// -> "documentationResult" (e.g. docs for the "Shutdown" method on the "Server" class)
|
||||
// -> ...
|
||||
//
|
||||
// Note: the "project" -> "documentationResult" attachment above is expressed via a
|
||||
// "documentationResult" edge, since the parent is not a "documentationResult" vertex.
|
||||
@ -100,10 +100,10 @@ func NewDocumentationResult(id uint64, result Documentation) DocumentationResult
|
||||
//
|
||||
// Attached to this vertex MUST be two "documentationString" vertices:
|
||||
//
|
||||
// 1. A "documentationString" vertex with `type: "label"`, which is a one-line label or this section
|
||||
// of documentation.
|
||||
// 1. A "documentationString" vertex with `type: "detail"`, which is a multi-line detailed string
|
||||
// for this section of documentation.
|
||||
// 1. A "documentationString" vertex with `type: "label"`, which is a one-line label or this section
|
||||
// of documentation.
|
||||
// 1. A "documentationString" vertex with `type: "detail"`, which is a multi-line detailed string
|
||||
// for this section of documentation.
|
||||
//
|
||||
// Both are attached to the documentationResult via a "documentationString" edge.
|
||||
//
|
||||
@ -275,11 +275,11 @@ const (
|
||||
// strings, which are "documentationString" vertices. The overall structure looks like the
|
||||
// following roughly:
|
||||
//
|
||||
// {id: 53, type:"vertex", label:"documentationResult", result:{identifier:"httpserver", ...}}
|
||||
// {id: 54, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A single-line label for an HTTPServer instance"}}
|
||||
// {id: 55, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A multi-line\n detailed\n explanation of an HTTPServer instance, what it does, etc."}}
|
||||
// {id: 54, type:"edge", label:"documentationString", inV: 54, outV: 53, kind:"label"}
|
||||
// {id: 54, type:"edge", label:"documentationString", inV: 55, outV: 53, kind:"detail"}
|
||||
// {id: 53, type:"vertex", label:"documentationResult", result:{identifier:"httpserver", ...}}
|
||||
// {id: 54, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A single-line label for an HTTPServer instance"}}
|
||||
// {id: 55, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A multi-line\n detailed\n explanation of an HTTPServer instance, what it does, etc."}}
|
||||
// {id: 54, type:"edge", label:"documentationString", inV: 54, outV: 53, kind:"label"}
|
||||
// {id: 54, type:"edge", label:"documentationString", inV: 55, outV: 53, kind:"detail"}
|
||||
//
|
||||
// Hover, definition, etc. results can then be attached to ranges within the "documentationString"
|
||||
// vertices themselves (vertex 54 / 55), see the docs for DocumentationString for more details.
|
||||
@ -332,13 +332,12 @@ func NewDocumentationStringEdge(id, inV, outV uint64, kind DocumentationStringKi
|
||||
// in the documentation string's markup content itself) using a "contains" edge. This enables
|
||||
// ranges within a documentation string to have:
|
||||
//
|
||||
// * "hoverResult"s (e.g. you can hover over a type signature in the documentation string and get info)
|
||||
// * "definitionResult" and "referenceResults"
|
||||
// * "documentationResult" itself - allowing a range of text in one documentation to link to another
|
||||
// documentation section (e.g. in the same way a hyperlink works in HTML.)
|
||||
// * "moniker" to link to another project's hover/definition/documentation results, across
|
||||
// repositories.
|
||||
//
|
||||
// - "hoverResult"s (e.g. you can hover over a type signature in the documentation string and get info)
|
||||
// - "definitionResult" and "referenceResults"
|
||||
// - "documentationResult" itself - allowing a range of text in one documentation to link to another
|
||||
// documentation section (e.g. in the same way a hyperlink works in HTML.)
|
||||
// - "moniker" to link to another project's hover/definition/documentation results, across
|
||||
// repositories.
|
||||
type DocumentationString struct {
|
||||
Vertex
|
||||
Result MarkupContent `json:"result"`
|
||||
|
||||
@ -54,8 +54,8 @@ func LocalGitGetChildrenFunc(repoRoot string) GetChildrenFunc {
|
||||
// cleanDirectoriesForLsTree sanitizes the input dirnames to a git ls-tree command. There are a
|
||||
// few peculiarities handled here:
|
||||
//
|
||||
// 1. The root of the tree must be indicated with `.`, and
|
||||
// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash.
|
||||
// 1. The root of the tree must be indicated with `.`, and
|
||||
// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash.
|
||||
func cleanDirectoriesForLsTree(dirnames []string) []string {
|
||||
var args []string
|
||||
for _, dir := range dirnames {
|
||||
|
||||
@ -43,9 +43,8 @@ func fmtLine(line int, prefixWidth int, text string) string {
|
||||
// | | ^^^^^^^^^^^^^^^^ actual
|
||||
// |6| return;
|
||||
//
|
||||
//
|
||||
// Only operates on locations with the same URI.
|
||||
// It doesn't make sense to diff anything here when we don't have that.
|
||||
// Only operates on locations with the same URI. It doesn't make sense to diff
|
||||
// anything here when we don't have that.
|
||||
func DrawLocations(contents string, expected, actual Location, context int) (string, error) {
|
||||
if expected.URI != actual.URI {
|
||||
return "", errors.New("Must pass in two locations with the same URI")
|
||||
|
||||
@ -43,15 +43,15 @@ var _ Warning = (*warning)(nil)
|
||||
// Consumers of these errors should then use errors.As to check if the error is of a warning type
|
||||
// and based on that, should just log it as a warning. For example:
|
||||
//
|
||||
// var ref errors.Warning
|
||||
// err := someFunctionThatReturnsAWarningErrorOrACriticalError()
|
||||
// if err != nil && errors.As(err, &ref) {
|
||||
// log.Warnf("failed to do X: %v", err)
|
||||
// }
|
||||
// var ref errors.Warning
|
||||
// err := someFunctionThatReturnsAWarningErrorOrACriticalError()
|
||||
// if err != nil && errors.As(err, &ref) {
|
||||
// log.Warnf("failed to do X: %v", err)
|
||||
// }
|
||||
//
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
// if err != nil {
|
||||
// return err
|
||||
// }
|
||||
func NewWarningError(err error) *warning {
|
||||
return &warning{
|
||||
error: err,
|
||||
|
||||
@ -6,7 +6,7 @@ import (
|
||||
)
|
||||
|
||||
// This regex is taken from here:
|
||||
// https://github.com/acarl005/stripansi/blob/5a71ef0e047df0427e87a79f27009029921f1f9b/stripansi.go
|
||||
// https://github.com/acarl005/stripansi/blob/5a71ef0e047df0427e87a79f27009029921f1f9b/stripansi.go
|
||||
const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))"
|
||||
|
||||
var ansiRegex = regexp.MustCompile(ansi)
|
||||
|
||||
@ -94,10 +94,14 @@ func makeFilters(containerLabel, containerName string, filters ...string) string
|
||||
return strings.Join(filters, ",")
|
||||
}
|
||||
|
||||
// makeBy returns the suffix if the aggregator expression (e.g., max by (queue)),
|
||||
// ^^^^^^^^^^
|
||||
// as well as a prefix to be used as part of the legend consisting of placeholder
|
||||
// values that will render to the value of the label/variable in the Grafana UI.
|
||||
// makeBy returns the suffix if the aggregator expression.
|
||||
//
|
||||
// e.g. max by (queue)
|
||||
// ^^^^^^^^^^
|
||||
//
|
||||
// legendPrefix is a prefix to be used as part of the legend consisting of
|
||||
// placeholder values that will render to the value of the label/variable in
|
||||
// the Grafana UI.
|
||||
func makeBy(labels ...string) (aggregateExprSuffix string, legendPrefix string) {
|
||||
if len(labels) == 0 {
|
||||
return "", ""
|
||||
|
||||
@ -5,19 +5,19 @@
|
||||
// When editing this package or introducing any shared declarations, you should abide strictly by the
|
||||
// following rules:
|
||||
//
|
||||
// 1. Do NOT declare a shared definition unless 5+ dashboards will use it. Sharing dashboard
|
||||
// declarations means the codebase becomes more complex and non-declarative which we want to avoid
|
||||
// so repeat yourself instead if it applies to less than 5 dashboards.
|
||||
// 1. Do NOT declare a shared definition unless 5+ dashboards will use it. Sharing dashboard
|
||||
// declarations means the codebase becomes more complex and non-declarative which we want to avoid
|
||||
// so repeat yourself instead if it applies to less than 5 dashboards.
|
||||
//
|
||||
// 2. ONLY declare shared Observables. Introducing shared Rows or Groups prevents individual dashboard
|
||||
// maintainers from holistically considering both the layout of dashboards as well as the
|
||||
// metrics and alerts defined within them -- which we do not want.
|
||||
// 2. ONLY declare shared Observables. Introducing shared Rows or Groups prevents individual dashboard
|
||||
// maintainers from holistically considering both the layout of dashboards as well as the
|
||||
// metrics and alerts defined within them -- which we do not want.
|
||||
//
|
||||
// 3. Use the sharedObservable type and do NOT parameterize more than just the container name. It may
|
||||
// be tempting to pass an alerting threshold as an argument, or parameterize whether a critical
|
||||
// alert is defined -- but this makes reasoning about alerts at a high level much more difficult.
|
||||
// If you have a need for this, it is a strong signal you should NOT be using the shared definition
|
||||
// anymore and should instead copy it and apply your modifications.
|
||||
// 3. Use the sharedObservable type and do NOT parameterize more than just the container name. It may
|
||||
// be tempting to pass an alerting threshold as an argument, or parameterize whether a critical
|
||||
// alert is defined -- but this makes reasoning about alerts at a high level much more difficult.
|
||||
// If you have a need for this, it is a strong signal you should NOT be using the shared definition
|
||||
// anymore and should instead copy it and apply your modifications.
|
||||
//
|
||||
// Learn more about monitoring in https://handbook.sourcegraph.com/engineering/observability/monitoring_pillars
|
||||
package shared
|
||||
|
||||
@ -101,7 +101,7 @@ func observablePanelID(groupIndex, rowIndex, observableIndex int) uint {
|
||||
// primarily used in the URL, e.g. /-/debug/grafana/d/syntect-server/<UID> and allows us to have
|
||||
// static URLs we can document like:
|
||||
//
|
||||
// Go to https://sourcegraph.example.com/-/debug/grafana/d/syntect-server/syntect-server
|
||||
// Go to https://sourcegraph.example.com/-/debug/grafana/d/syntect-server/syntect-server
|
||||
//
|
||||
// Instead of having to describe all the steps to navigate there because the UID is random.
|
||||
func isValidGrafanaUID(s string) bool {
|
||||
|
||||
@ -11,9 +11,9 @@ import (
|
||||
//
|
||||
// You can make any customization you want to a graph panel by using `ObservablePanel.With`:
|
||||
//
|
||||
// Panel: monitoring.Panel().With(func(o monitoring.Observable, p *sdk.Panel) {
|
||||
// // modify 'p.GraphPanel' or 'p.HeatmapPanel' etc. with desired changes
|
||||
// }),
|
||||
// Panel: monitoring.Panel().With(func(o monitoring.Observable, p *sdk.Panel) {
|
||||
// // modify 'p.GraphPanel' or 'p.HeatmapPanel' etc. with desired changes
|
||||
// }),
|
||||
//
|
||||
// When writing a custom `ObservablePanelOption`, keep in mind that:
|
||||
//
|
||||
@ -33,7 +33,7 @@ import (
|
||||
// `panelOptionsLibrary` that returns a `ObservablePanelOption`. The function should be
|
||||
// It can then be used with the `ObservablePanel.With`:
|
||||
//
|
||||
// Panel: monitoring.Panel().With(monitoring.PanelOptions.MyCustomization),
|
||||
// Panel: monitoring.Panel().With(monitoring.PanelOptions.MyCustomization),
|
||||
//
|
||||
// Using a shared prefix helps with discoverability of available options.
|
||||
type ObservablePanelOption func(Observable, *sdk.Panel)
|
||||
|
||||
@ -45,7 +45,6 @@ func (r *promRule) validate() error {
|
||||
// see:
|
||||
//
|
||||
// https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/
|
||||
//
|
||||
type promRulesFile struct {
|
||||
Groups []promGroup
|
||||
}
|
||||
|
||||
@ -3,81 +3,101 @@ package schema
|
||||
import _ "embed"
|
||||
|
||||
// AWSCodeCommitSchemaJSON is the content of the file "aws_codecommit.schema.json".
|
||||
//
|
||||
//go:embed aws_codecommit.schema.json
|
||||
var AWSCodeCommitSchemaJSON string
|
||||
|
||||
// BatchSpecSchemaJSON is the content of the file "batch_spec.schema.json".
|
||||
//
|
||||
//go:embed batch_spec.schema.json
|
||||
var BatchSpecSchemaJSON string
|
||||
|
||||
// BitbucketCloudSchemaJSON is the content of the file "bitbucket_cloud.schema.json".
|
||||
//
|
||||
//go:embed bitbucket_cloud.schema.json
|
||||
var BitbucketCloudSchemaJSON string
|
||||
|
||||
// BitbucketServerSchemaJSON is the content of the file "bitbucket_server.schema.json".
|
||||
//
|
||||
//go:embed bitbucket_server.schema.json
|
||||
var BitbucketServerSchemaJSON string
|
||||
|
||||
// ChangesetSpecSchemaJSON is the content of the file "changeset_spec.schema.json".
|
||||
//
|
||||
//go:embed changeset_spec.schema.json
|
||||
var ChangesetSpecSchemaJSON string
|
||||
|
||||
// GerritSchemaJSON is the content of the file "gerrit.schema.json".
|
||||
//
|
||||
//go:embed gerrit.schema.json
|
||||
var GerritSchemaJSON string
|
||||
|
||||
// GitHubSchemaJSON is the content of the file "github.schema.json".
|
||||
//
|
||||
//go:embed github.schema.json
|
||||
var GitHubSchemaJSON string
|
||||
|
||||
// GitLabSchemaJSON is the content of the file "gitlab.schema.json".
|
||||
//
|
||||
//go:embed gitlab.schema.json
|
||||
var GitLabSchemaJSON string
|
||||
|
||||
// GitoliteSchemaJSON is the content of the file "gitolite.schema.json".
|
||||
//
|
||||
//go:embed gitolite.schema.json
|
||||
var GitoliteSchemaJSON string
|
||||
|
||||
// GoModulesSchemaJSON is the content of the file "go-modules.schema.json".
|
||||
//
|
||||
//go:embed go-modules.schema.json
|
||||
var GoModulesSchemaJSON string
|
||||
|
||||
// JVMPackagesSchemaJSON is the content of the file "jvm-packages.schema.json".
|
||||
//
|
||||
//go:embed jvm-packages.schema.json
|
||||
var JVMPackagesSchemaJSON string
|
||||
|
||||
// NpmPackagesSchemaJSON is the content of the file "npm-packages.schema.json".
|
||||
//
|
||||
//go:embed npm-packages.schema.json
|
||||
var NpmPackagesSchemaJSON string
|
||||
|
||||
// PythonPackagesSchemaJSON is the content of the file "python-packages.schema.json".
|
||||
//
|
||||
//go:embed python-packages.schema.json
|
||||
var PythonPackagesSchemaJSON string
|
||||
|
||||
// RustPackagesSchemaJSON is the content of the file "python-packages.schema.json".
|
||||
//
|
||||
//go:embed rust-packages.schema.json
|
||||
var RustPackagesSchemaJSON string
|
||||
|
||||
// OtherExternalServiceSchemaJSON is the content of the file "other_external_service.schema.json".
|
||||
//
|
||||
//go:embed other_external_service.schema.json
|
||||
var OtherExternalServiceSchemaJSON string
|
||||
|
||||
// PerforceSchemaJSON is the content of the file "perforce.schema.json".
|
||||
//
|
||||
//go:embed perforce.schema.json
|
||||
var PerforceSchemaJSON string
|
||||
|
||||
// PhabricatorSchemaJSON is the content of the file "phabricator.schema.json".
|
||||
//
|
||||
//go:embed phabricator.schema.json
|
||||
var PhabricatorSchemaJSON string
|
||||
|
||||
// PagureSchemaJSON is the content of the file "pagure.schema.json".
|
||||
//
|
||||
//go:embed pagure.schema.json
|
||||
var PagureSchemaJSON string
|
||||
|
||||
// SettingsSchemaJSON is the content of the file "settings.schema.json".
|
||||
//
|
||||
//go:embed settings.schema.json
|
||||
var SettingsSchemaJSON string
|
||||
|
||||
// SiteSchemaJSON is the content of the file "site.schema.json".
|
||||
//
|
||||
//go:embed site.schema.json
|
||||
var SiteSchemaJSON string
|
||||
|
||||
Loading…
Reference in New Issue
Block a user