From 27569d1fc74ea069a0331857819a9635ce40502f Mon Sep 17 00:00:00 2001 From: Keegan Carruthers-Smith Date: Tue, 13 Sep 2022 09:44:06 +0200 Subject: [PATCH] all: run gofmt -s -w from 1.19 (#41629) gofmt in go1.19 does a lot of reformating of godoc strings, mostly to make them more consistent around lists. Test Plan: CI --- cmd/frontend/auth/user.go | 28 ++++++------ cmd/frontend/backend/orgs.go | 5 ++- cmd/frontend/graphqlbackend/graphqlbackend.go | 3 +- cmd/frontend/graphqlbackend/schema.go | 12 ++++++ cmd/frontend/internal/app/ui/doc.go | 13 +++--- cmd/frontend/internal/app/ui/handlers.go | 15 ++++--- cmd/frontend/internal/app/ui/router.go | 12 +++--- .../internal/cli/middleware/goimportpath.go | 6 +-- cmd/frontend/internal/highlight/highlight.go | 14 +++--- .../internal/httpapi/releasecache/http.go | 8 ++-- cmd/frontend/internal/session/session.go | 8 ++-- cmd/gitserver/server/cleanup.go | 6 +-- cmd/gitserver/server/server.go | 2 +- cmd/gitserver/server/serverutil.go | 2 +- cmd/searcher/internal/search/search.go | 14 +++--- cmd/searcher/internal/search/store.go | 8 ++-- cmd/symbols/squirrel/breadcrumbs.go | 5 ++- dev/build-tracker/main.go | 5 ++- dev/ci/runtype/runtype.go | 4 +- dev/codeintel-qa/cmd/upload/state.go | 10 ++--- dev/sg/internal/analytics/tracer.go | 4 +- dev/sg/internal/images/images.go | 6 +-- dev/sg/internal/migration/squash.go | 4 +- dev/sg/linters/linters.go | 17 ++++---- .../executor/internal/apiclient/baseclient.go | 28 ++++++------ .../frontend/internal/auth/saml/middleware.go | 10 ++--- .../internal/authz/resolvers/repositories.go | 7 +-- .../internal/authz/resolvers/users.go | 7 +-- .../codemonitors/resolvers/resolvers.go | 23 ---------- .../dev/ci/internal/buildkite/buildkite.go | 4 +- enterprise/dev/ci/internal/ci/changed/diff.go | 2 +- enterprise/dev/ci/internal/ci/operations.go | 6 +-- enterprise/dev/deployment-notifier/trace.go | 16 +++---- .../internal/authz/perforce/protects.go | 4 +- .../internal/batches/search/syntax/parser.go | 10 ++--- .../internal/batches/store/changeset_specs.go | 16 +++---- .../internal/database/integration_test.go | 3 +- .../insights/background/background.go | 6 +-- .../background/historical_enqueuer.go | 33 +++++++------- .../background/insight_enqueuer_test.go | 1 - .../insights/discovery/all_repos_iterator.go | 1 - enterprise/internal/insights/store/store.go | 16 +++---- .../internal/license/generate-license.go | 6 +-- enterprise/internal/license/read-license.go | 2 +- .../migrations/codeintel/migrator.go | 20 +++++---- internal/authz/header.go | 6 +-- internal/authz/iface.go | 38 ++++++++-------- internal/check/check.go | 23 +++++----- .../inference/luatypes/recognizers.go | 8 ++-- internal/codeintel/autoindexing/service.go | 8 ++-- .../codeintel/commitgraph/commit_graph.go | 12 +++--- internal/codeintel/dependencies/live/init.go | 2 +- .../background/cleanup/unknown_commits.go | 8 ++-- internal/codeintel/uploads/service.go | 1 - internal/conf/conf.go | 1 - internal/database/basestore/rows.go | 16 +++---- internal/database/basestore/store.go | 36 ++++++++-------- internal/database/dbconn/open.go | 42 +++++++++--------- internal/database/dbtest/dbtest.go | 4 +- .../migration/definition/definition.go | 10 ++--- internal/database/users.go | 2 +- internal/endpoint/endpoint.go | 7 ++- internal/env/baseconfig.go | 40 ++++++++--------- internal/extsvc/pypi/client.go | 1 - internal/gitserver/client.go | 3 +- internal/gitserver/commands.go | 9 ++-- internal/instrumentation/http.go | 8 ++-- internal/ratelimit/monitor.go | 6 +-- internal/rcache/mutex.go | 9 ++-- internal/redispool/redispool.go | 6 +-- internal/repos/scheduler.go | 13 +++--- internal/repos/syncer.go | 10 ++--- internal/search/job/jobutil/expression_job.go | 28 ++++++------ internal/search/query/transformer.go | 15 +++++-- internal/search/query/types.go | 9 ++-- internal/search/repo_revs.go | 16 +++---- internal/search/result/file.go | 4 +- internal/trace/httptrace.go | 7 +-- lib/batches/on/aggregator.go | 10 ++--- lib/codeintel/lsif/protocol/documentation.go | 43 +++++++++---------- lib/codeintel/pathexistence/git.go | 4 +- .../tools/lsif-index-tester/range_differ.go | 5 +-- lib/errors/warning.go | 16 +++---- lib/output/visible_string_width.go | 2 +- monitoring/definitions/shared/constructor.go | 12 ++++-- monitoring/definitions/shared/shared.go | 22 +++++----- monitoring/monitoring/dashboards.go | 2 +- monitoring/monitoring/panel_options.go | 8 ++-- monitoring/monitoring/prometheus.go | 1 - schema/stringdata.go | 20 +++++++++ 90 files changed, 500 insertions(+), 485 deletions(-) diff --git a/cmd/frontend/auth/user.go b/cmd/frontend/auth/user.go index 314eca62f17..265b5204192 100644 --- a/cmd/frontend/auth/user.go +++ b/cmd/frontend/auth/user.go @@ -31,20 +31,20 @@ type GetAndSaveUserOp struct { // the necessary updates to the DB, and returns the user ID after the updates have been applied. // // At a high level, it does the following: -// 1. Determine the identity of the user by applying the following rules in order: -// a. If ctx contains an authenticated Actor, the Actor's identity is the user identity. -// b. Look up the user by external account ID. -// c. If the email specified in op.UserProps is verified, Look up the user by verified email. -// If op.LookUpByUsername is true, look up by username instead of verified email. -// (Note: most clients should look up by email, as username is typically insecure.) -// d. If op.CreateIfNotExist is true, attempt to create a new user with the properties -// specified in op.UserProps. This may fail if the desired username is already taken. -// e. If a new user is successfully created, attempt to grant pending permissions. -// 2. Ensure that the user is associated with the external account information. This means -// creating the external account if it does not already exist or updating it if it -// already does. -// 3. Update any user props that have changed. -// 4. Return the user ID. +// 1. Determine the identity of the user by applying the following rules in order: +// a. If ctx contains an authenticated Actor, the Actor's identity is the user identity. +// b. Look up the user by external account ID. +// c. If the email specified in op.UserProps is verified, Look up the user by verified email. +// If op.LookUpByUsername is true, look up by username instead of verified email. +// (Note: most clients should look up by email, as username is typically insecure.) +// d. If op.CreateIfNotExist is true, attempt to create a new user with the properties +// specified in op.UserProps. This may fail if the desired username is already taken. +// e. If a new user is successfully created, attempt to grant pending permissions. +// 2. Ensure that the user is associated with the external account information. This means +// creating the external account if it does not already exist or updating it if it +// already does. +// 3. Update any user props that have changed. +// 4. Return the user ID. // // 🚨 SECURITY: It is the caller's responsibility to ensure the veracity of the information that // op contains (e.g., by receiving it from the appropriate authentication mechanism). It must diff --git a/cmd/frontend/backend/orgs.go b/cmd/frontend/backend/orgs.go index ce77b017589..0b666bd0ffd 100644 --- a/cmd/frontend/backend/orgs.go +++ b/cmd/frontend/backend/orgs.go @@ -15,8 +15,9 @@ var ErrNotAuthenticated = errors.New("not authenticated") // CheckOrgAccessOrSiteAdmin returns an error if: // (1) if we are on Cloud instance and the user is not a member of the organization // (2) if we are NOT on Cloud and -// (a) the user is not a member of the organization -// (b) the user is not a site admin +// +// (a) the user is not a member of the organization +// (b) the user is not a site admin // // It is used when an action on an org can only be performed by the // organization's members, (or site-admins - not on Cloud). diff --git a/cmd/frontend/graphqlbackend/graphqlbackend.go b/cmd/frontend/graphqlbackend/graphqlbackend.go index f92cd2107c4..226bb9bf2aa 100644 --- a/cmd/frontend/graphqlbackend/graphqlbackend.go +++ b/cmd/frontend/graphqlbackend/graphqlbackend.go @@ -331,8 +331,7 @@ var blocklistedPrometheusTypeNames = map[string]struct{}{ // not worth tracking. You can find a complete list of the ones Prometheus is // currently tracking via: // -// sum by (type)(src_graphql_field_seconds_count) -// +// sum by (type)(src_graphql_field_seconds_count) func prometheusTypeName(typeName string) string { if _, ok := blocklistedPrometheusTypeNames[typeName]; ok { return "other" diff --git a/cmd/frontend/graphqlbackend/schema.go b/cmd/frontend/graphqlbackend/schema.go index f0eb73fe879..4d8e15877fa 100644 --- a/cmd/frontend/graphqlbackend/schema.go +++ b/cmd/frontend/graphqlbackend/schema.go @@ -5,49 +5,61 @@ import ( ) // mainSchema is the main raw graqhql schema. +// //go:embed schema.graphql var mainSchema string // batchesSchema is the Batch Changes raw graqhql schema. +// //go:embed batches.graphql var batchesSchema string // codeIntelSchema is the Code Intel raw graqhql schema. +// //go:embed codeintel.graphql var codeIntelSchema string // dotcomSchema is the Dotcom schema extension raw graqhql schema. +// //go:embed dotcom.graphql var dotcomSchema string // licenseSchema is the Licensing raw graqhql schema. +// //go:embed license.graphql var licenseSchema string // codeMonitorsSchema is the Code Monitoring raw graqhql schema. +// //go:embed code_monitors.graphql var codeMonitorsSchema string // insightsSchema is the Code Insights raw graqhql schema. +// //go:embed insights.graphql var insightsSchema string // authzSchema is the Authz raw graqhql schema. +// //go:embed authz.graphql var authzSchema string // computeSchema is an experimental graphql endpoint for computing values from search results. +// //go:embed compute.graphql var computeSchema string // searchContextsSchema is the Search Contexts raw graqhql schema. +// //go:embed search_contexts.graphql var searchContextsSchema string // notebooksSchema is the Notebooks raw graqhql schema. +// //go:embed notebooks.graphql var notebooksSchema string // insightsAggregationsSchema is the Code Insights Aggregations raw graqhql schema. +// //go:embed insights_aggregations.graphql var insightsAggregationsSchema string diff --git a/cmd/frontend/internal/app/ui/doc.go b/cmd/frontend/internal/app/ui/doc.go index 0ee43d0727b..649ac6f2128 100644 --- a/cmd/frontend/internal/app/ui/doc.go +++ b/cmd/frontend/internal/app/ui/doc.go @@ -1,21 +1,20 @@ // Package ui handles server-side rendering of the Sourcegraph web app. // -// Development +// # Development // // To develop, simply update the template files in cmd/frontend/internal/app/ui/... // and reload the page (the templates will be automatically reloaded). // -// Testing the error page +// # Testing the error page // // Testing out the layout/styling of the error page that is used to handle // internal server errors, 404s, etc. is very easy by visiting: // -// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500 +// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500 // // The parameters are as follows: // -// nodebug=true -- hides error messages (which is ALWAYS the case in production) -// error=theerror -- controls the error message text -// status=500 -- controls the status code -// +// nodebug=true -- hides error messages (which is ALWAYS the case in production) +// error=theerror -- controls the error message text +// status=500 -- controls the status code package ui diff --git a/cmd/frontend/internal/app/ui/handlers.go b/cmd/frontend/internal/app/ui/handlers.go index 2ab72b20ffa..90afd04ea40 100644 --- a/cmd/frontend/internal/app/ui/handlers.go +++ b/cmd/frontend/internal/app/ui/handlers.go @@ -106,6 +106,7 @@ type serveErrorHandler func(w http.ResponseWriter, r *http.Request, db database. // mockNewCommon is used in tests to mock newCommon (duh!). // // Ensure that the mock is reset at the end of every test by adding a call like the following: +// // defer func() { // mockNewCommon = nil // }() @@ -116,13 +117,13 @@ var mockNewCommon func(w http.ResponseWriter, r *http.Request, title string, ser // In the event of the repository having been renamed, the request is handled // by newCommon and nil, nil is returned. Basic usage looks like: // -// common, err := newCommon(w, r, noIndex, serveError) -// if err != nil { -// return err -// } -// if common == nil { -// return nil // request was handled -// } +// common, err := newCommon(w, r, noIndex, serveError) +// if err != nil { +// return err +// } +// if common == nil { +// return nil // request was handled +// } // // In the case of a repository that is cloning, a Common data structure is // returned but it has an incomplete RevSpec. diff --git a/cmd/frontend/internal/app/ui/router.go b/cmd/frontend/internal/app/ui/router.go index e8a18631cc5..53b8a78f341 100644 --- a/cmd/frontend/internal/app/ui/router.go +++ b/cmd/frontend/internal/app/ui/router.go @@ -378,9 +378,8 @@ func initRouter(db database.DB, router *mux.Router, codeIntelResolver graphqlbac // The scheme, host, and path in the specified url override ones in the incoming // request. For example: // -// staticRedirectHandler("http://google.com") serving "https://sourcegraph.com/foobar?q=foo" -> "http://google.com/foobar?q=foo" -// staticRedirectHandler("/foo") serving "https://sourcegraph.com/bar?q=foo" -> "https://sourcegraph.com/foo?q=foo" -// +// staticRedirectHandler("http://google.com") serving "https://sourcegraph.com/foobar?q=foo" -> "http://google.com/foobar?q=foo" +// staticRedirectHandler("/foo") serving "https://sourcegraph.com/bar?q=foo" -> "https://sourcegraph.com/foo?q=foo" func staticRedirectHandler(u string, code int) http.Handler { target, err := url.Parse(u) if err != nil { @@ -420,9 +419,8 @@ func limitString(s string, n int, ellipsis bool) string { // Clients that wish to return their own HTTP status code should use this from // their handler: // -// serveError(w, r, err, http.MyStatusCode) -// return nil -// +// serveError(w, r, err, http.MyStatusCode) +// return nil func handler(db database.DB, f handlerFunc) http.Handler { h := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer func() { @@ -544,7 +542,7 @@ func serveErrorNoDebug(w http.ResponseWriter, r *http.Request, db database.DB, e // serveErrorTest makes it easy to test styling/layout of the error template by // visiting: // -// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500 +// http://localhost:3080/__errorTest?nodebug=true&error=theerror&status=500 // // The `nodebug=true` parameter hides error messages (which is ALWAYS the case // in production), `error` controls the error message text, and status controls diff --git a/cmd/frontend/internal/cli/middleware/goimportpath.go b/cmd/frontend/internal/cli/middleware/goimportpath.go index 09912f8bc83..62039bc2492 100644 --- a/cmd/frontend/internal/cli/middleware/goimportpath.go +++ b/cmd/frontend/internal/cli/middleware/goimportpath.go @@ -36,9 +36,9 @@ var goImportMetaTagTemplate = template.Must(template.New("").Parse(` // // It implements the following mapping: // -// 1. If the username (first path element) is "sourcegraph", consider it to be a vanity -// import path pointing to github.com/sourcegraph/ as the clone URL. -// 2. All other requests are served with 404 Not Found. +// 1. If the username (first path element) is "sourcegraph", consider it to be a vanity +// import path pointing to github.com/sourcegraph/ as the clone URL. +// 2. All other requests are served with 404 Not Found. // // 🚨 SECURITY: This handler is served to all clients, even on private servers to clients who have // not authenticated. It must not reveal any sensitive information. diff --git a/cmd/frontend/internal/highlight/highlight.go b/cmd/frontend/internal/highlight/highlight.go index ad7e073b6b2..bfa49a07cb1 100644 --- a/cmd/frontend/internal/highlight/highlight.go +++ b/cmd/frontend/internal/highlight/highlight.go @@ -304,7 +304,7 @@ func (h *HighlightedCode) LinesForRanges(ranges []LineRange) ([][]string, error) return lineRanges, nil } -/// identifyError returns true + the problem code if err matches a known error. +// identifyError returns true + the problem code if err matches a known error. func identifyError(err error) (bool, string) { var problem string if errors.Is(err, gosyntect.ErrRequestTooLarge) { @@ -609,15 +609,15 @@ func CodeAsLines(ctx context.Context, p Params) ([]template.HTML, bool, error) { // normalizeFilepath ensures that the filepath p has a lowercase extension, i.e. it applies the // following transformations: // -// a/b/c/FOO.TXT β†’ a/b/c/FOO.txt -// FOO.Sh β†’ FOO.sh +// a/b/c/FOO.TXT β†’ a/b/c/FOO.txt +// FOO.Sh β†’ FOO.sh // // The following are left unmodified, as they already have lowercase extensions: // -// a/b/c/FOO.txt -// a/b/c/Makefile -// Makefile.am -// FOO.txt +// a/b/c/FOO.txt +// a/b/c/Makefile +// Makefile.am +// FOO.txt // // It expects the filepath uses forward slashes always. func normalizeFilepath(p string) string { diff --git a/cmd/frontend/internal/httpapi/releasecache/http.go b/cmd/frontend/internal/httpapi/releasecache/http.go index c10984b4b56..7ef3eb32955 100644 --- a/cmd/frontend/internal/httpapi/releasecache/http.go +++ b/cmd/frontend/internal/httpapi/releasecache/http.go @@ -19,10 +19,10 @@ import ( // handler implements a http.Handler that wraps a VersionCache to provide two // endpoints: // -// - GET /.*: this looks up the given branch and returns the latest -// version, if any. -// - POST /webhooks: this triggers an update of the version cache if given a -// valid GitHub webhook. +// - GET /.*: this looks up the given branch and returns the latest +// version, if any. +// - POST /webhooks: this triggers an update of the version cache if given a +// valid GitHub webhook. // // The routing relies on a previous handler having injected a gorilla.Mux // variable called "rest" that includes the path to route. diff --git a/cmd/frontend/internal/session/session.go b/cmd/frontend/internal/session/session.go index d4007010cf2..ecc48fe6dce 100644 --- a/cmd/frontend/internal/session/session.go +++ b/cmd/frontend/internal/session/session.go @@ -308,10 +308,10 @@ func CookieMiddleware(logger log.Logger, db database.DB, next http.Handler) http // CookieMiddlewareWithCSRFSafety is a middleware that authenticates HTTP requests using the // provided cookie (if any), *only if* one of the following is true. // -// - The request originates from a trusted origin (the same origin, browser extension origin, or one -// in the site configuration corsOrigin allow list.) -// - The request has the special X-Requested-With header present, which is only possible to send in -// browsers if the request passed the CORS preflight request (see the handleCORSRequest function.) +// - The request originates from a trusted origin (the same origin, browser extension origin, or one +// in the site configuration corsOrigin allow list.) +// - The request has the special X-Requested-With header present, which is only possible to send in +// browsers if the request passed the CORS preflight request (see the handleCORSRequest function.) // // If one of the above are not true, the request is still allowed to proceed but will be // unauthenticated unless some other authentication is provided, such as an access token. diff --git a/cmd/gitserver/server/cleanup.go b/cmd/gitserver/server/cleanup.go index 91d497c9816..33d73b58ba5 100644 --- a/cmd/gitserver/server/cleanup.go +++ b/cmd/gitserver/server/cleanup.go @@ -1098,12 +1098,12 @@ repository should be recloned.` ) // writeSGMLog writes a log file with the format -//
// -// = +//
// -// +// = // +// func writeSGMLog(dir GitDir, m []byte) error { return os.WriteFile( dir.Path(sgmLog), diff --git a/cmd/gitserver/server/server.go b/cmd/gitserver/server/server.go index e181d2c798f..2f6543af58a 100644 --- a/cmd/gitserver/server/server.go +++ b/cmd/gitserver/server/server.go @@ -2542,7 +2542,7 @@ var ( // tag called HEAD (case insensitive), most commands will output a warning // from git: // -// warning: refname 'HEAD' is ambiguous. +// warning: refname 'HEAD' is ambiguous. // // Instead we just remove this ref. func removeBadRefs(ctx context.Context, dir GitDir) { diff --git a/cmd/gitserver/server/serverutil.go b/cmd/gitserver/server/serverutil.go index 8449c7927e7..c00d87fe562 100644 --- a/cmd/gitserver/server/serverutil.go +++ b/cmd/gitserver/server/serverutil.go @@ -30,7 +30,7 @@ import ( // GitDir is an absolute path to a GIT_DIR. // They will all follow the form: // -// ${s.ReposDir}/${name}/.git +// ${s.ReposDir}/${name}/.git type GitDir string // Path is a helper which returns filepath.Join(dir, elem...) diff --git a/cmd/searcher/internal/search/search.go b/cmd/searcher/internal/search/search.go index 3dc17f84eb6..badfd6f76ee 100644 --- a/cmd/searcher/internal/search/search.go +++ b/cmd/searcher/internal/search/search.go @@ -2,13 +2,13 @@ // a specific commit. // // Architecture Notes: -// * Archive is fetched from gitserver -// * Simple HTTP API exposed -// * Currently no concept of authorization -// * On disk cache of fetched archives to reduce load on gitserver -// * Run search on archive. Rely on OS file buffers -// * Simple to scale up since stateless -// * Use ingress with affinity to increase local cache hit ratio +// - Archive is fetched from gitserver +// - Simple HTTP API exposed +// - Currently no concept of authorization +// - On disk cache of fetched archives to reduce load on gitserver +// - Run search on archive. Rely on OS file buffers +// - Simple to scale up since stateless +// - Use ingress with affinity to increase local cache hit ratio package search import ( diff --git a/cmd/searcher/internal/search/store.go b/cmd/searcher/internal/search/store.go index a2e3f9ec3f8..8cea4deb0fb 100644 --- a/cmd/searcher/internal/search/store.go +++ b/cmd/searcher/internal/search/store.go @@ -46,10 +46,10 @@ const maxFileSize = 2 << 20 // 2MB; match https://sourcegraph.com/search?q=repo: // // We use an LRU to do cache eviction: // -// * When to evict is based on the total size of *.zip on disk. -// * What to evict uses the LRU algorithm. -// * We touch files when opening them, so can do LRU based on file -// modification times. +// - When to evict is based on the total size of *.zip on disk. +// - What to evict uses the LRU algorithm. +// - We touch files when opening them, so can do LRU based on file +// modification times. // // Note: The store fetches tarballs but stores zips. We want to be able to // filter which files we cache, so we need a format that supports streaming diff --git a/cmd/symbols/squirrel/breadcrumbs.go b/cmd/symbols/squirrel/breadcrumbs.go index 4a585640548..6cad2919d5c 100644 --- a/cmd/symbols/squirrel/breadcrumbs.go +++ b/cmd/symbols/squirrel/breadcrumbs.go @@ -28,8 +28,9 @@ type Breadcrumbs []Breadcrumb // Prints breadcrumbs like this: // -// v some breadcrumb -// vvv other breadcrumb +// v some breadcrumb +// vvv other breadcrumb +// // 78 | func f(f Foo) { func (bs *Breadcrumbs) pretty(w *strings.Builder, readFile ReadFileFunc) { // First collect all the breadcrumbs in a map (path -> line -> breadcrumb) for easier printing. diff --git a/dev/build-tracker/main.go b/dev/build-tracker/main.go index 19bb1cbfa3a..f8c5607f87a 100644 --- a/dev/build-tracker/main.go +++ b/dev/build-tracker/main.go @@ -151,8 +151,9 @@ func (s *Server) handleGetBuild(w http.ResponseWriter, req *http.Request) { // handleEvent handles an event received from the http listener. A event is valid when: // - Has the correct headers from Buildkite // - On of the following events -// * job.finished -// * build.finished +// - job.finished +// - build.finished +// // - Has valid JSON // Note that if we received an unwanted event ie. the event is not "job.finished" or "build.finished" we respond with a 200 OK regardless. // Once all the conditions are met, the event is processed in a go routine with `processEvent` diff --git a/dev/ci/runtype/runtype.go b/dev/ci/runtype/runtype.go index 36fe086686f..836fb573c41 100644 --- a/dev/ci/runtype/runtype.go +++ b/dev/ci/runtype/runtype.go @@ -256,8 +256,8 @@ func (m *RunTypeMatcher) IsBranchPrefixMatcher() bool { // ExtractBranchArgument extracts the second segment, delimited by '/', of the branch as // an argument, for example: // -// prefix/{argument} -// prefix/{argument}/something-else +// prefix/{argument} +// prefix/{argument}/something-else // // If BranchArgumentRequired, an error is returned if no argument is found. // diff --git a/dev/codeintel-qa/cmd/upload/state.go b/dev/codeintel-qa/cmd/upload/state.go index 243f83a74f6..72dbbe64096 100644 --- a/dev/codeintel-qa/cmd/upload/state.go +++ b/dev/codeintel-qa/cmd/upload/state.go @@ -20,11 +20,11 @@ import ( // given repo, as well as the status of each given upload. When there is a change of // state for a repository, it is printed. The state changes that can occur are: // -// - An upload fails to process (returns an error) -// - An upload completes processing -// - The last upload for a repository completes processing, but the -// containing repo has a stale commit graph -// - A repository with no pending uploads has a fresh commit graph +// - An upload fails to process (returns an error) +// - An upload completes processing +// - The last upload for a repository completes processing, but the +// containing repo has a stale commit graph +// - A repository with no pending uploads has a fresh commit graph func monitor(ctx context.Context, repoNames []string, uploads []uploadMeta) error { var oldState map[string]repoState waitMessageDisplayed := make(map[string]struct{}, len(repoNames)) diff --git a/dev/sg/internal/analytics/tracer.go b/dev/sg/internal/analytics/tracer.go index 55d714ebd62..867fed79653 100644 --- a/dev/sg/internal/analytics/tracer.go +++ b/dev/sg/internal/analytics/tracer.go @@ -14,10 +14,10 @@ const spanCategoryKey attribute.Key = "sg.span_category" // StartSpan starts an OpenTelemetry span from context. Example: // -// ctx, span := analytics.StartSpan(ctx, spanName, +// ctx, span := analytics.StartSpan(ctx, spanName, // trace.WithAttributes(...) // defer span.End() -// // ... do your things +// // ... do your things // // Span provides convenience functions for setting the status of the span. func StartSpan(ctx context.Context, spanName string, category string, opts ...trace.SpanStartOption) (context.Context, *Span) { diff --git a/dev/sg/internal/images/images.go b/dev/sg/internal/images/images.go index 26d53ef4c16..f59d9673e17 100644 --- a/dev/sg/internal/images/images.go +++ b/dev/sg/internal/images/images.go @@ -513,8 +513,7 @@ func (i *imageRepository) checkLegacy(rawImage string) bool { // Effectively the same as: // -// $ curl -H "Authorization: Bearer $token" https://index.docker.io/v2/sourcegraph/server/tags/list -// +// $ curl -H "Authorization: Bearer $token" https://index.docker.io/v2/sourcegraph/server/tags/list func (i *imageRepository) fetchDigest(tag string) (digest.Digest, error) { req, err := http.NewRequest("GET", fmt.Sprintf("https://index.docker.io/v2/%s/manifests/%s", i.name, tag), nil) if err != nil { @@ -547,8 +546,7 @@ const dockerImageTagsURL = "https://index.docker.io/v2/%s/tags/list" // Effectively the same as: // -// $ export token=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:sourcegraph/server:pull" | jq -r .token) -// +// $ export token=$(curl -s "https://auth.docker.io/token?service=registry.docker.io&scope=repository:sourcegraph/server:pull" | jq -r .token) func (i *imageRepository) fetchAllTags() ([]string, error) { if !i.isDockerRegistry { return nil, ErrUnsupportedRegistry diff --git a/dev/sg/internal/migration/squash.go b/dev/sg/internal/migration/squash.go index 97380dfdf0b..c53d0dd3b5e 100644 --- a/dev/sg/internal/migration/squash.go +++ b/dev/sg/internal/migration/squash.go @@ -489,8 +489,8 @@ var privilegedQueryPattern = lazyregexp.New(`(CREATE|COMMENT ON) EXTENSION .+;\n // // Currently, we consider the following query patterns as privileged from pg_dump output: // -// - CREATE EXTENSION ... -// - COMMENT ON EXTENSION ... +// - CREATE EXTENSION ... +// - COMMENT ON EXTENSION ... func splitPrivilegedMigrations(content string) (privilegedMigration string, unprivilegedMigration string) { var privilegedQueries []string unprivileged := privilegedQueryPattern.ReplaceAllStringFunc(content, func(s string) string { diff --git a/dev/sg/linters/linters.go b/dev/sg/linters/linters.go index 346555a3200..de7e6478488 100644 --- a/dev/sg/linters/linters.go +++ b/dev/sg/linters/linters.go @@ -114,15 +114,14 @@ func runCheck(name string, check check.CheckAction[*repo.State]) *linter { // yarnInstallFilter is a LineMap that filters out all the warning junk that yarn install // emits that seem inconsequential, for example: // -// warning "@storybook/addon-storyshots > react-test-renderer@16.14.0" has incorrect peer dependency "react@^16.14.0". -// warning "@storybook/addon-storyshots > @storybook/core > @storybook/core-server > @storybook/builder-webpack4 > webpack-filter-warnings-plugin@1.2.1" has incorrect peer dependency "webpack@^2.0.0 || ^3.0.0 || ^4.0.0". -// warning " > @storybook/react@6.5.9" has unmet peer dependency "require-from-string@^2.0.2". -// warning "@storybook/react > react-element-to-jsx-string@14.3.4" has incorrect peer dependency "react@^0.14.8 || ^15.0.1 || ^16.0.0 || ^17.0.1". -// warning " > @testing-library/react-hooks@8.0.0" has incorrect peer dependency "react@^16.9.0 || ^17.0.0". -// warning "storybook-addon-designs > @figspec/react@1.0.0" has incorrect peer dependency "react@^16.14.0 || ^17.0.0". -// warning Workspaces can only be enabled in private projects. -// warning Workspaces can only be enabled in private projects. -// +// warning "@storybook/addon-storyshots > react-test-renderer@16.14.0" has incorrect peer dependency "react@^16.14.0". +// warning "@storybook/addon-storyshots > @storybook/core > @storybook/core-server > @storybook/builder-webpack4 > webpack-filter-warnings-plugin@1.2.1" has incorrect peer dependency "webpack@^2.0.0 || ^3.0.0 || ^4.0.0". +// warning " > @storybook/react@6.5.9" has unmet peer dependency "require-from-string@^2.0.2". +// warning "@storybook/react > react-element-to-jsx-string@14.3.4" has incorrect peer dependency "react@^0.14.8 || ^15.0.1 || ^16.0.0 || ^17.0.1". +// warning " > @testing-library/react-hooks@8.0.0" has incorrect peer dependency "react@^16.9.0 || ^17.0.0". +// warning "storybook-addon-designs > @figspec/react@1.0.0" has incorrect peer dependency "react@^16.14.0 || ^17.0.0". +// warning Workspaces can only be enabled in private projects. +// warning Workspaces can only be enabled in private projects. func yarnInstallFilter() run.LineMap { return func(ctx context.Context, line []byte, dst io.Writer) (int, error) { // We can't seem to do a simple prefix check, so let's just do something lazy for diff --git a/enterprise/cmd/executor/internal/apiclient/baseclient.go b/enterprise/cmd/executor/internal/apiclient/baseclient.go index 503676eb139..d2729ce839c 100644 --- a/enterprise/cmd/executor/internal/apiclient/baseclient.go +++ b/enterprise/cmd/executor/internal/apiclient/baseclient.go @@ -22,24 +22,24 @@ import ( // The following is a minimal example of decorating the base client, making the // actual logic of the decorated client extremely lean: // -// type SprocketClient struct { -// *httpcli.BaseClient +// type SprocketClient struct { +// *httpcli.BaseClient // -// baseURL *url.URL -// } +// baseURL *url.URL +// } // -// func (c *SprocketClient) Fabricate(ctx context.Context(), spec SprocketSpec) (Sprocket, error) { -// url := c.baseURL.ResolveReference(&url.URL{Path: "/new"}) +// func (c *SprocketClient) Fabricate(ctx context.Context(), spec SprocketSpec) (Sprocket, error) { +// url := c.baseURL.ResolveReference(&url.URL{Path: "/new"}) // -// req, err := httpcli.MakeJSONRequest("POST", url.String(), spec) -// if err != nil { -// return Sprocket{}, err -// } +// req, err := httpcli.MakeJSONRequest("POST", url.String(), spec) +// if err != nil { +// return Sprocket{}, err +// } // -// var s Sprocket -// err := c.client.DoAndDecode(ctx, req, &s) -// return s, err -// } +// var s Sprocket +// err := c.client.DoAndDecode(ctx, req, &s) +// return s, err +// } type BaseClient struct { httpClient *http.Client options BaseClientOptions diff --git a/enterprise/cmd/frontend/internal/auth/saml/middleware.go b/enterprise/cmd/frontend/internal/auth/saml/middleware.go index c25163e97e2..c0633a1b065 100644 --- a/enterprise/cmd/frontend/internal/auth/saml/middleware.go +++ b/enterprise/cmd/frontend/internal/auth/saml/middleware.go @@ -249,11 +249,11 @@ func buildAuthURLRedirect(p *provider, relayState relayState) (string, error) { // login flows. // // SAML overloads the term "RelayState". -// * In the SP-initiated login flow, it is an opaque value originated from the SP and reflected -// back in the AuthnResponse. The Sourcegraph SP uses the base64-encoded JSON of this struct as -// the RelayState. -// * In the IdP-initiated login flow, the RelayState can be any arbitrary hint, but in practice -// is the desired post-login redirect URL in plain text. +// - In the SP-initiated login flow, it is an opaque value originated from the SP and reflected +// back in the AuthnResponse. The Sourcegraph SP uses the base64-encoded JSON of this struct as +// the RelayState. +// - In the IdP-initiated login flow, the RelayState can be any arbitrary hint, but in practice +// is the desired post-login redirect URL in plain text. type relayState struct { ProviderID string `json:"k"` ReturnToURL string `json:"r"` diff --git a/enterprise/cmd/frontend/internal/authz/resolvers/repositories.go b/enterprise/cmd/frontend/internal/authz/resolvers/repositories.go index e650a58db70..5b4e24698ba 100644 --- a/enterprise/cmd/frontend/internal/authz/resolvers/repositories.go +++ b/enterprise/cmd/frontend/internal/authz/resolvers/repositories.go @@ -35,9 +35,10 @@ type repositoryConnectionResolver struct { // is the site admin because this method computes data from all available information in // the database. // This function takes returns a pagination of the repo IDs -// r.ids - the full slice of sorted repo IDs -// r.after - (optional) the repo ID to start the paging after (does not include the after ID itself) -// r.first - the # of repo IDs to return +// +// r.ids - the full slice of sorted repo IDs +// r.after - (optional) the repo ID to start the paging after (does not include the after ID itself) +// r.first - the # of repo IDs to return func (r *repositoryConnectionResolver) compute(ctx context.Context) ([]*types.Repo, *graphqlutil.PageInfo, error) { r.once.Do(func() { var idSubset []int32 diff --git a/enterprise/cmd/frontend/internal/authz/resolvers/users.go b/enterprise/cmd/frontend/internal/authz/resolvers/users.go index 9daaf5ad73e..53d475eea50 100644 --- a/enterprise/cmd/frontend/internal/authz/resolvers/users.go +++ b/enterprise/cmd/frontend/internal/authz/resolvers/users.go @@ -34,9 +34,10 @@ type userConnectionResolver struct { // is the site admin because this method computes data from all available information in // the database. // This function takes returns a pagination of the user IDs -// r.ids - the full slice of sorted user IDs -// r.after - (optional) the user ID to start the paging after (does not include the after ID itself) -// r.first - the # of user IDs to return +// +// r.ids - the full slice of sorted user IDs +// r.after - (optional) the user ID to start the paging after (does not include the after ID itself) +// r.first - the # of user IDs to return func (r *userConnectionResolver) compute(ctx context.Context) ([]*types.User, *graphqlutil.PageInfo, error) { r.once.Do(func() { var idSubset []int32 diff --git a/enterprise/cmd/frontend/internal/codemonitors/resolvers/resolvers.go b/enterprise/cmd/frontend/internal/codemonitors/resolvers/resolvers.go index 89c03ca8b58..4365557b34e 100644 --- a/enterprise/cmd/frontend/internal/codemonitors/resolvers/resolvers.go +++ b/enterprise/cmd/frontend/internal/codemonitors/resolvers/resolvers.go @@ -690,9 +690,7 @@ func (r *Resolver) ownerForID64(ctx context.Context, monitorID int64) (graphql.I return graphqlbackend.MarshalUserID(monitor.UserID), nil } -// // MonitorConnection -// type monitorConnection struct { *Resolver monitors []graphqlbackend.MonitorResolver @@ -756,9 +754,7 @@ func unmarshalAfter(after *string) (*int, error) { return &a, err } -// // Monitor -// type monitor struct { *Resolver *edb.Monitor @@ -865,9 +861,7 @@ func (r *Resolver) actionConnectionResolverWithTriggerID(ctx context.Context, tr return &monitorActionConnection{actions: actions, totalCount: int32(totalCount)}, nil } -// // MonitorTrigger <> -// type monitorTrigger struct { query graphqlbackend.MonitorQueryResolver } @@ -876,9 +870,7 @@ func (t *monitorTrigger) ToMonitorQuery() (graphqlbackend.MonitorQueryResolver, return t.query, t.query != nil } -// // Query -// type monitorQuery struct { *Resolver *edb.QueryTrigger @@ -920,9 +912,7 @@ func (q *monitorQuery) Events(ctx context.Context, args *graphqlbackend.ListEven return &monitorTriggerEventConnection{Resolver: q.Resolver, events: events, totalCount: totalCount}, nil } -// // MonitorTriggerEventConnection -// type monitorTriggerEventConnection struct { *Resolver events []graphqlbackend.MonitorTriggerEventResolver @@ -944,9 +934,7 @@ func (a *monitorTriggerEventConnection) PageInfo() *graphqlutil.PageInfo { return graphqlutil.NextPageCursor(string(a.events[len(a.events)-1].ID())) } -// // MonitorTriggerEvent -// type monitorTriggerEvent struct { *Resolver *edb.TriggerJob @@ -1002,7 +990,6 @@ func (m *monitorTriggerEvent) Actions(ctx context.Context, args *graphqlbackend. } // ActionConnection -// type monitorActionConnection struct { actions []graphqlbackend.MonitorAction totalCount int32 @@ -1027,9 +1014,7 @@ func (a *monitorActionConnection) PageInfo() *graphqlutil.PageInfo { panic("found non-email monitor action") } -// // Action <> -// type action struct { email graphqlbackend.MonitorEmailResolver webhook graphqlbackend.MonitorWebhookResolver @@ -1061,9 +1046,7 @@ func (a *action) ToMonitorSlackWebhook() (graphqlbackend.MonitorSlackWebhookReso return a.slackWebhook, a.slackWebhook != nil } -// // Email -// type monitorEmail struct { *Resolver *edb.EmailAction @@ -1287,9 +1270,7 @@ func intPtrToInt64Ptr(i *int) *int64 { return &j } -// // MonitorActionEmailRecipientConnection -// type monitorActionEmailRecipientsConnection struct { recipients []graphqlbackend.NamespaceResolver nextPageCursor string @@ -1311,9 +1292,7 @@ func (a *monitorActionEmailRecipientsConnection) PageInfo() *graphqlutil.PageInf return graphqlutil.NextPageCursor(a.nextPageCursor) } -// // MonitorActionEventConnection -// type monitorActionEventConnection struct { events []graphqlbackend.MonitorActionEventResolver totalCount int32 @@ -1334,9 +1313,7 @@ func (a *monitorActionEventConnection) PageInfo() *graphqlutil.PageInfo { return graphqlutil.NextPageCursor(string(a.events[len(a.events)-1].ID())) } -// // MonitorEvent -// type monitorActionEvent struct { *Resolver *edb.ActionJob diff --git a/enterprise/dev/ci/internal/buildkite/buildkite.go b/enterprise/dev/ci/internal/buildkite/buildkite.go index 2e07e0e72ee..d62d6066df0 100644 --- a/enterprise/dev/ci/internal/buildkite/buildkite.go +++ b/enterprise/dev/ci/internal/buildkite/buildkite.go @@ -2,8 +2,8 @@ // // Usage: // -// pipeline := buildkite.Pipeline{} -// pipeline.AddStep("check_mark", buildkite.Cmd("./dev/check/all.sh")) +// pipeline := buildkite.Pipeline{} +// pipeline.AddStep("check_mark", buildkite.Cmd("./dev/check/all.sh")) package buildkite import ( diff --git a/enterprise/dev/ci/internal/ci/changed/diff.go b/enterprise/dev/ci/internal/ci/changed/diff.go index b0a95688472..eb9178042e0 100644 --- a/enterprise/dev/ci/internal/ci/changed/diff.go +++ b/enterprise/dev/ci/internal/ci/changed/diff.go @@ -55,7 +55,7 @@ var topLevelGoDirs = []string{ // ParseDiff identifies what has changed in files by generating a Diff that can be used // to check for specific changes, e.g. // -// if diff.Has(changed.Client | changed.GraphQL) { ... } +// if diff.Has(changed.Client | changed.GraphQL) { ... } // // To introduce a new type of Diff, add it a new Diff constant above and add a check in // this function to identify the Diff. diff --git a/enterprise/dev/ci/internal/ci/operations.go b/enterprise/dev/ci/internal/ci/operations.go index 9c3472f77d0..1525d00c265 100644 --- a/enterprise/dev/ci/internal/ci/operations.go +++ b/enterprise/dev/ci/internal/ci/operations.go @@ -33,9 +33,9 @@ type CoreTestOperationsOptions struct { // notably, this is what is used to define operations that run on PRs. Please read the // following notes: // -// - opts should be used ONLY to adjust the behaviour of specific steps, e.g. by adding -// flags and not as a condition for adding steps or commands. -// - be careful not to add duplicate steps. +// - opts should be used ONLY to adjust the behaviour of specific steps, e.g. by adding +// flags and not as a condition for adding steps or commands. +// - be careful not to add duplicate steps. // // If the conditions for the addition of an operation cannot be expressed using the above // arguments, please add it to the switch case within `GeneratePipeline` instead. diff --git a/enterprise/dev/deployment-notifier/trace.go b/enterprise/dev/deployment-notifier/trace.go index 425967ffaef..44fb3f41940 100644 --- a/enterprise/dev/deployment-notifier/trace.go +++ b/enterprise/dev/deployment-notifier/trace.go @@ -59,14 +59,14 @@ type DeploymentTrace struct { // // The generated trace is structured as follows: // -// deploy/env --------- -// pr/1 ------------- -// -------- service/1 -// -------- service/2 -// pr/2 --------- -// ---- service/1 -// ---- service/2 -// ... +// deploy/env --------- +// pr/1 ------------- +// -------- service/1 +// -------- service/2 +// pr/2 --------- +// ---- service/1 +// ---- service/2 +// ... // // The following fields are important in each event: // diff --git a/enterprise/internal/authz/perforce/protects.go b/enterprise/internal/authz/perforce/protects.go index 1eaaaf731f8..1e754215bb0 100644 --- a/enterprise/internal/authz/perforce/protects.go +++ b/enterprise/internal/authz/perforce/protects.go @@ -16,8 +16,8 @@ import ( ) // p4ProtectLine is a parsed line from `p4 protects`. See: -// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protect.html#Usage_Notes_..364 -// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protects.html#p4_protects +// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protect.html#Usage_Notes_..364 +// - https://www.perforce.com/manuals/cmdref/Content/CmdRef/p4_protects.html#p4_protects type p4ProtectLine struct { level string // e.g. read entityType string // e.g. user diff --git a/enterprise/internal/batches/search/syntax/parser.go b/enterprise/internal/batches/search/syntax/parser.go index c1c6cfd15c9..7754c4191c8 100644 --- a/enterprise/internal/batches/search/syntax/parser.go +++ b/enterprise/internal/batches/search/syntax/parser.go @@ -25,11 +25,11 @@ type parser struct { // // BNF-ish query syntax: // -// exprList := {exprSign} | exprSign (sep exprSign)* -// exprSign := {"-"} expr -// expr := fieldExpr | lit | quoted | pattern -// fieldExpr := lit ":" value -// value := lit | quoted +// exprList := {exprSign} | exprSign (sep exprSign)* +// exprSign := {"-"} expr +// expr := fieldExpr | lit | quoted | pattern +// fieldExpr := lit ":" value +// value := lit | quoted func Parse(input string) (ParseTree, error) { tokens := Scan(input) p := parser{tokens: tokens} diff --git a/enterprise/internal/batches/store/changeset_specs.go b/enterprise/internal/batches/store/changeset_specs.go index 3a0953dbed1..bed28a928a0 100644 --- a/enterprise/internal/batches/store/changeset_specs.go +++ b/enterprise/internal/batches/store/changeset_specs.go @@ -618,14 +618,14 @@ type GetRewirerMappingsOpts struct { // β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ // // We need to: -// 1. Find out whether our new specs should _update_ an existing -// changeset (ChangesetSpec != 0, Changeset != 0), or whether we need to create a new one. -// 2. Since we can have multiple changesets per repository, we need to match -// based on repo and external ID for imported changesets and on repo and head_ref for 'branch' changesets. -// 3. If a changeset wasn't published yet, it doesn't have an external ID nor does it have an external head_ref. -// In that case, we need to check whether the branch on which we _might_ -// push the commit (because the changeset might not be published -// yet) is the same or compare the external IDs in the current and new specs. +// 1. Find out whether our new specs should _update_ an existing +// changeset (ChangesetSpec != 0, Changeset != 0), or whether we need to create a new one. +// 2. Since we can have multiple changesets per repository, we need to match +// based on repo and external ID for imported changesets and on repo and head_ref for 'branch' changesets. +// 3. If a changeset wasn't published yet, it doesn't have an external ID nor does it have an external head_ref. +// In that case, we need to check whether the branch on which we _might_ +// push the commit (because the changeset might not be published +// yet) is the same or compare the external IDs in the current and new specs. // // What we want: // diff --git a/enterprise/internal/database/integration_test.go b/enterprise/internal/database/integration_test.go index 1acdbbce4b9..0f1f5a8438b 100644 --- a/enterprise/internal/database/integration_test.go +++ b/enterprise/internal/database/integration_test.go @@ -12,8 +12,7 @@ import ( // Toggles particularly slow tests. To enable, use `go test` with this flag, for example: // -// go test -timeout 360s -v -run ^TestIntegration_PermsStore$ github.com/sourcegraph/sourcegraph/enterprise/internal/database -slow-tests -// +// go test -timeout 360s -v -run ^TestIntegration_PermsStore$ github.com/sourcegraph/sourcegraph/enterprise/internal/database -slow-tests var slowTests = flag.Bool("slow-tests", false, "Enable very slow tests") // postgresParameterLimitTest names tests that are focused on ensuring the default diff --git a/enterprise/internal/insights/background/background.go b/enterprise/internal/insights/background/background.go index a2c61700efa..6c98482ff37 100644 --- a/enterprise/internal/insights/background/background.go +++ b/enterprise/internal/insights/background/background.go @@ -114,9 +114,9 @@ func GetBackgroundQueryRunnerJob(ctx context.Context, logger log.Logger, mainApp // newWorkerMetrics returns a basic set of metrics to be used for a worker and its resetter: // -// * WorkerMetrics records worker operations & number of jobs. -// * ResetterMetrics records the number of jobs that got reset because workers timed out / took too -// long. +// - WorkerMetrics records worker operations & number of jobs. +// - ResetterMetrics records the number of jobs that got reset because workers timed out / took too +// long. // // Individual insights workers may then _also_ want to register their own metrics, if desired, in // their NewWorker functions. diff --git a/enterprise/internal/insights/background/historical_enqueuer.go b/enterprise/internal/insights/background/historical_enqueuer.go index 91ec4ec9639..f710a80926e 100644 --- a/enterprise/internal/insights/background/historical_enqueuer.go +++ b/enterprise/internal/insights/background/historical_enqueuer.go @@ -315,28 +315,27 @@ type RepoStore interface { // // It works roughly like this: // -// * For every repository on Sourcegraph (a subset on Sourcegraph.com): -// * Build a list of time frames that we should consider -// * Check the commit index to see if any timeframes can be discarded (if they didn't change) -// * For each frame: -// * Find the oldest commit in the repository. -// * For every unique search insight series (i.e. search query): -// * Consider yielding/sleeping. -// * If the series has data for this timeframe+repo already, nothing to do. -// * If the timeframe we're generating data for is before the oldest commit in the repo, record a zero value. -// * Else, locate the commit nearest to the point in time we're trying to get data for and -// enqueue a queryrunner job to search that repository commit - recording historical data -// for it. +// For every repository on Sourcegraph (a subset on Sourcegraph.com): +// 1. Build a list of time frames that we should consider +// - Check the commit index to see if any timeframes can be discarded (if they didn't change) +// 2. For each frame +// - Find the oldest commit in the repository. +// 3. For every unique pair of frame and search insight series (i.e. search query): +// - Consider yielding/sleeping. +// - If the series has data for this timeframe+repo already, nothing to do. +// - If the timeframe we're generating data for is before the oldest commit in the repo, record a zero value. +// - Else, locate the commit nearest to the point in time we're trying to get data for and +// enqueue a queryrunner job to search that repository commit - recording historical data +// for it. // // As you can no doubt see, there is much complexity and potential room for duplicative API calls // here (e.g. "for every timeframe we list every repository"). For this exact reason, we do two // things: // -// 1. Cache duplicative calls to prevent performing heavy operations multiple times. -// 2. Lift heavy operations to the layer/loop one level higher, when it is sane to do so. -// 3. Ensure we perform work slowly, linearly, and with yielding/sleeping between any substantial -// work being performed. -// +// 1. Cache duplicative calls to prevent performing heavy operations multiple times. +// 2. Lift heavy operations to the layer/loop one level higher, when it is sane to do so. +// 3. Ensure we perform work slowly, linearly, and with yielding/sleeping between any substantial +// work being performed. type historicalEnqueuer struct { // Required fields used for mocking in tests. now func() time.Time diff --git a/enterprise/internal/insights/background/insight_enqueuer_test.go b/enterprise/internal/insights/background/insight_enqueuer_test.go index 207786af229..aae59c09e8f 100644 --- a/enterprise/internal/insights/background/insight_enqueuer_test.go +++ b/enterprise/internal/insights/background/insight_enqueuer_test.go @@ -64,7 +64,6 @@ var testRealGlobalSettings = &api.Settings{ID: 1, Contents: `{ // 1. Webhook insights are not enqueued (not yet supported.) // 2. Duplicate insights are deduplicated / do not submit multiple jobs. // 3. Jobs are scheduled not to all run at the same time. -// func Test_discoverAndEnqueueInsights(t *testing.T) { // Setup the setting store and job enqueuer mocks. ctx := context.Background() diff --git a/enterprise/internal/insights/discovery/all_repos_iterator.go b/enterprise/internal/insights/discovery/all_repos_iterator.go index baaef33de85..3bf16456e95 100644 --- a/enterprise/internal/insights/discovery/all_repos_iterator.go +++ b/enterprise/internal/insights/discovery/all_repos_iterator.go @@ -104,7 +104,6 @@ func (a *AllReposIterator) ForEach(ctx context.Context, forEach func(repoName st // cachedRepoStoreList calls a.repoStore.List to do a paginated list of repositories, and caches the // results in-memory for some time. -// func (a *AllReposIterator) cachedRepoStoreList(ctx context.Context, page database.LimitOffset) ([]*types.Repo, error) { if a.cachedPageRequests == nil { a.cachedPageRequests = map[database.LimitOffset]cachedPageRequest{} diff --git a/enterprise/internal/insights/store/store.go b/enterprise/internal/insights/store/store.go index 4c2533ac953..e9194bbef3a 100644 --- a/enterprise/internal/insights/store/store.go +++ b/enterprise/internal/insights/store/store.go @@ -207,13 +207,13 @@ ORDER BY sub.series_id, sub.interval_time ASC // Note that the series_points table may contain duplicate points, or points recorded at irregular // intervals. In specific: // -// 1. Multiple points recorded at the same time T for cardinality C will be considered part of the same vector. -// For example, series S and repos R1, R2 have a point at time T. The sum over R1,R2 at T will give the -// aggregated sum for that series at time T. -// 2. Rarely, it may contain duplicate data points due to the at-least once semantics of query execution. -// This will cause some jitter in the aggregated series, and will skew the results slightly. -// 3. Searches may not complete at the same exact time, so even in a perfect world if the interval -// should be 12h it may be off by a minute or so. +// 1. Multiple points recorded at the same time T for cardinality C will be considered part of the same vector. +// For example, series S and repos R1, R2 have a point at time T. The sum over R1,R2 at T will give the +// aggregated sum for that series at time T. +// 2. Rarely, it may contain duplicate data points due to the at-least once semantics of query execution. +// This will cause some jitter in the aggregated series, and will skew the results slightly. +// 3. Searches may not complete at the same exact time, so even in a perfect world if the interval +// should be 12h it may be off by a minute or so. func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { preds := []*sqlf.Query{} @@ -267,7 +267,7 @@ func seriesPointsQuery(opts SeriesPointsOpts) *sqlf.Query { ) } -//values constructs a SQL values statement out of an array of repository ids +// values constructs a SQL values statement out of an array of repository ids func values(ids []api.RepoID) string { if len(ids) == 0 { return "" diff --git a/enterprise/internal/license/generate-license.go b/enterprise/internal/license/generate-license.go index d5e5d20b9f9..c2c73f29933 100644 --- a/enterprise/internal/license/generate-license.go +++ b/enterprise/internal/license/generate-license.go @@ -3,7 +3,7 @@ // Command generate-license generates a signed Sourcegraph license key. // -// REQUIREMENTS +// # REQUIREMENTS // // You must provide a private key to sign the license. // @@ -12,11 +12,11 @@ // // To create a test private key that will NOT generate valid licenses, use: // -// openssl genrsa -out /tmp/key.pem 2048 +// openssl genrsa -out /tmp/key.pem 2048 // // EXAMPLE // -// go run generate-license.go -private-key key.pem -tags=dev -users=100 -expires=8784h +// go run generate-license.go -private-key key.pem -tags=dev -users=100 -expires=8784h package main import ( diff --git a/enterprise/internal/license/read-license.go b/enterprise/internal/license/read-license.go index e834ec95603..f02a8690c55 100644 --- a/enterprise/internal/license/read-license.go +++ b/enterprise/internal/license/read-license.go @@ -6,7 +6,7 @@ // // EXAMPLE // -// go run ./read-license.go < license-file +// go run ./read-license.go < license-file package main import ( diff --git a/enterprise/internal/oobmigration/migrations/codeintel/migrator.go b/enterprise/internal/oobmigration/migrations/codeintel/migrator.go index 215ba2578a5..73269888076 100644 --- a/enterprise/internal/oobmigration/migrations/codeintel/migrator.go +++ b/enterprise/internal/oobmigration/migrations/codeintel/migrator.go @@ -21,16 +21,18 @@ import ( // // We have the following assumptions about the schema (for a configured table T): // -// 1. There is an index on T.dump_id -// 2. For each distinct dump_id in table T, there is a corresponding row in table -// T_schema_version. This invariant is kept up to date via triggers on insert. -// 3. Table T_schema_version has the following schema: +// 1. There is an index on T.dump_id // -// CREATE TABLE T_schema_versions ( -// dump_id integer PRIMARY KEY NOT NULL, -// min_schema_version integer, -// max_schema_version integer -// ); +// 2. For each distinct dump_id in table T, there is a corresponding row in table +// T_schema_version. This invariant is kept up to date via triggers on insert. +// +// 3. Table T_schema_version has the following schema: +// +// CREATE TABLE T_schema_versions ( +// dump_id integer PRIMARY KEY NOT NULL, +// min_schema_version integer, +// max_schema_version integer +// ); // // When selecting a set of candidate records to migrate, we first use the each upload record's // schema version bounds to determine if there are still records associated with that upload diff --git a/internal/authz/header.go b/internal/authz/header.go index 3a12c3df641..c3940480df3 100644 --- a/internal/authz/header.go +++ b/internal/authz/header.go @@ -29,9 +29,9 @@ func IsUnrecognizedScheme(err error) bool { // Two forms of the Authorization header's "credentials" token are supported (see [RFC 7235, // Appendix C](https://tools.ietf.org/html/rfc7235#appendix-C): // -// - With only an access token: "token" 1*SP token68 -// - With a token as params: -// "token" 1*SP "token" BWS "=" BWS quoted-string +// - With only an access token: "token" 1*SP token68 +// - With a token as params: +// "token" 1*SP "token" BWS "=" BWS quoted-string // // The returned values are derived directly from user input and have not been validated or // authenticated. diff --git a/internal/authz/iface.go b/internal/authz/iface.go index 3739aea8ece..669da7ac754 100644 --- a/internal/authz/iface.go +++ b/internal/authz/iface.go @@ -15,28 +15,28 @@ import ( // // Rules are expressed as Glob syntaxes: // -// pattern: -// { term } +// pattern: +// { term } // -// term: -// `*` matches any sequence of non-separator characters -// `**` matches any sequence of characters -// `?` matches any single non-separator character -// `[` [ `!` ] { character-range } `]` -// character class (must be non-empty) -// `{` pattern-list `}` -// pattern alternatives -// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) -// `\` c matches character c +// term: +// `*` matches any sequence of non-separator characters +// `**` matches any sequence of characters +// `?` matches any single non-separator character +// `[` [ `!` ] { character-range } `]` +// character class (must be non-empty) +// `{` pattern-list `}` +// pattern alternatives +// c matches character c (c != `*`, `**`, `?`, `\`, `[`, `{`, `}`) +// `\` c matches character c // -// character-range: -// c matches character c (c != `\\`, `-`, `]`) -// `\` c matches character c -// lo `-` hi matches character c for lo <= c <= hi +// character-range: +// c matches character c (c != `\\`, `-`, `]`) +// `\` c matches character c +// lo `-` hi matches character c for lo <= c <= hi // -// pattern-list: -// pattern { `,` pattern } -// comma-separated (without spaces) patterns +// pattern-list: +// pattern { `,` pattern } +// comma-separated (without spaces) patterns // // This Glob syntax is currently from github.com/gobwas/glob: // https://sourcegraph.com/github.com/gobwas/glob@e7a84e9525fe90abcda167b604e483cc959ad4aa/-/blob/glob.go?L39:6 diff --git a/internal/check/check.go b/internal/check/check.go index 0cb2ffbc306..d8e6ec8668c 100644 --- a/internal/check/check.go +++ b/internal/check/check.go @@ -131,17 +131,18 @@ func DefaultEndpointProvider(service string) []*url.URL { } // NewAggregateHealthCheckHandler returns a JSON with the high-level structure -// { -// : { -// : { -// : , -// : -// }, -// : ... -// }, -// : ... -// ... -// } +// +// { +// : { +// : { +// : , +// : +// }, +// : ... +// }, +// : ... +// ... +// } // // The handler should only be used in frontend. // diff --git a/internal/codeintel/autoindexing/internal/inference/luatypes/recognizers.go b/internal/codeintel/autoindexing/internal/inference/luatypes/recognizers.go index 82c113aa7a7..d1487b2ea39 100644 --- a/internal/codeintel/autoindexing/internal/inference/luatypes/recognizers.go +++ b/internal/codeintel/autoindexing/internal/inference/luatypes/recognizers.go @@ -11,10 +11,10 @@ import ( // scripts via UserData values. This struct can take one of two mutually // exclusive forms: // -// (1) An applicable recognizer with patterns and a generate function. -// (2) A fallback recognizer, which consists of a list of children. -// Execution of a fallback recognizer will invoke its children, -// in order and recursively, until the non-empty value is yielded. +// (1) An applicable recognizer with patterns and a generate function. +// (2) A fallback recognizer, which consists of a list of children. +// Execution of a fallback recognizer will invoke its children, +// in order and recursively, until the non-empty value is yielded. type Recognizer struct { patterns []*PathPattern patternsForContent []*PathPattern diff --git a/internal/codeintel/autoindexing/service.go b/internal/codeintel/autoindexing/service.go index a340737872b..524b667540a 100644 --- a/internal/codeintel/autoindexing/service.go +++ b/internal/codeintel/autoindexing/service.go @@ -341,10 +341,10 @@ type configurationFactoryFunc func(ctx context.Context, repositoryID int, commit // getIndexRecords determines the set of index records that should be enqueued for the given commit. // For each repository, we look for index configuration in the following order: // -// - supplied explicitly via parameter -// - in the database -// - committed to `sourcegraph.yaml` in the repository -// - inferred from the repository structure +// - supplied explicitly via parameter +// - in the database +// - committed to `sourcegraph.yaml` in the repository +// - inferred from the repository structure func (s *Service) getIndexRecords(ctx context.Context, repositoryID int, commit, configuration string, bypassLimit bool) ([]shared.Index, error) { fns := []configurationFactoryFunc{ makeExplicitConfigurationFactory(configuration), diff --git a/internal/codeintel/commitgraph/commit_graph.go b/internal/codeintel/commitgraph/commit_graph.go index d0f7852f065..9759f76ad64 100644 --- a/internal/codeintel/commitgraph/commit_graph.go +++ b/internal/codeintel/commitgraph/commit_graph.go @@ -136,9 +136,9 @@ func reverseGraph(graph map[string][]string) map[string][]string { // tokens to upload meta value. Select commits are any commits that satisfy one of the following // properties: // -// 1. They define an upload, -// 2. They have multiple parents, or -// 3. They have a child with multiple parents. +// 1. They define an upload, +// 2. They have multiple parents, or +// 3. They have a child with multiple parents. // // For all remaining commits, we can easily re-calculate the visible uploads without storing them. // All such commits have a single, unambiguous path to an ancestor that does store data. These @@ -187,9 +187,9 @@ func populateUploadsByTraversal(graph map[string][]string, order []string, commi // populateUploadsForCommit populates the items stored in the given mapping for the given commit. // The uploads considered visible for a commit include: // -// 1. the set of uploads defined on that commit, and -// 2. the set of uploads visible from the ancestors with the minimum distance -// for equivalent root and indexer values. +// 1. the set of uploads defined on that commit, and +// 2. the set of uploads visible from the ancestors with the minimum distance +// for equivalent root and indexer values. // // If two ancestors have different uploads visible for the same root and indexer, the one with the // smaller distance to the source commit will shadow the other. Similarly, If an ancestor and the diff --git a/internal/codeintel/dependencies/live/init.go b/internal/codeintel/dependencies/live/init.go index e3fa02e51a2..1f1c5af8dfa 100644 --- a/internal/codeintel/dependencies/live/init.go +++ b/internal/codeintel/dependencies/live/init.go @@ -11,7 +11,7 @@ import ( // GetService creates or returns an already-initialized dependencies service. If the service is // new, it will use the given database handle and syncer instance. If the given syncer is nil, -/// then ErrorSyncer will be used instead. +// then ErrorSyncer will be used instead. func GetService(db database.DB) *dependencies.Service { return dependencies.GetService(db) } diff --git a/internal/codeintel/uploads/background/cleanup/unknown_commits.go b/internal/codeintel/uploads/background/cleanup/unknown_commits.go index df9779a0f3f..34b2e25bc5e 100644 --- a/internal/codeintel/uploads/background/cleanup/unknown_commits.go +++ b/internal/codeintel/uploads/background/cleanup/unknown_commits.go @@ -54,10 +54,10 @@ func mergeSourceCommits(usc []uploads.SourcedCommits, isc []autoindexing.Sourced return sourceCommits } -// func (j *janitor) HandleError(err error) { -// j.metrics.numErrors.Inc() -// log.Error("Failed to delete codeintel records with an unknown commit", "error", err) -// } +// func (j *janitor) HandleError(err error) { +// j.metrics.numErrors.Inc() +// log.Error("Failed to delete codeintel records with an unknown commit", "error", err) +// } type SourcedCommits struct { RepositoryID int RepositoryName string diff --git a/internal/codeintel/uploads/service.go b/internal/codeintel/uploads/service.go index 44d4f3a544c..4284951bf62 100644 --- a/internal/codeintel/uploads/service.go +++ b/internal/codeintel/uploads/service.go @@ -456,7 +456,6 @@ const numAncestors = 100 // the graph. This will not always produce the full set of visible commits - some responses may not contain // all results while a subsequent request made after the lsif_nearest_uploads has been updated to include // this commit will. -// func (s *Service) InferClosestUploads(ctx context.Context, repositoryID int, commit, path string, exactPath bool, indexer string) (_ []shared.Dump, err error) { ctx, _, endObservation := s.operations.inferClosestUploads.With(ctx, &err, observation.Args{ LogFields: []log.Field{log.Int("repositoryID", repositoryID), log.String("commit", commit), log.String("path", path), log.Bool("exactPath", exactPath), log.String("indexer", indexer)}, diff --git a/internal/conf/conf.go b/internal/conf/conf.go index 679bcfc277c..71903d5671b 100644 --- a/internal/conf/conf.go +++ b/internal/conf/conf.go @@ -23,7 +23,6 @@ import ( // // - The site configuration, from the database (from the site-admin panel). // - Service connections, from the frontend (e.g. which gitservers to talk to). -// type Unified struct { schema.SiteConfiguration ServiceConnectionConfig conftypes.ServiceConnections diff --git a/internal/database/basestore/rows.go b/internal/database/basestore/rows.go index e8001dd6694..443ac46b69d 100644 --- a/internal/database/basestore/rows.go +++ b/internal/database/basestore/rows.go @@ -16,19 +16,19 @@ import ( // // The signature of this function allows scan methods to be written uniformly: // -// func ScanThings(rows *sql.Rows, queryErr error) (_ []Thing, err error) { -// if queryErr != nil { -// return nil, queryErr -// } -// defer func() { err = CloseRows(rows, err) }() +// func ScanThings(rows *sql.Rows, queryErr error) (_ []Thing, err error) { +// if queryErr != nil { +// return nil, queryErr +// } +// defer func() { err = CloseRows(rows, err) }() // -// // read things from rows -// } +// // read things from rows +// } // // Scan methods should be called directly with the results of `*store.Query` to // ensure that the rows are always properly handled. // -// things, err := ScanThings(store.Query(ctx, query)) +// things, err := ScanThings(store.Query(ctx, query)) func CloseRows(rows *sql.Rows, err error) error { return errors.Append(err, rows.Close(), rows.Err()) } diff --git a/internal/database/basestore/store.go b/internal/database/basestore/store.go index 1b005f81efa..f83358fe7c9 100644 --- a/internal/database/basestore/store.go +++ b/internal/database/basestore/store.go @@ -23,22 +23,22 @@ import ( // return a modified base store with no methods from the outer layer. All other methods // of the base store are available on the outer layer without needing to be re-defined. // -// type SprocketStore struct { -// *basestore.Store -// } +// type SprocketStore struct { +// *basestore.Store +// } // -// func NewWithDB(database dbutil.DB) *SprocketStore { -// return &SprocketStore{Store: basestore.NewWithDB(database, sql.TxOptions{})} -// } +// func NewWithDB(database dbutil.DB) *SprocketStore { +// return &SprocketStore{Store: basestore.NewWithDB(database, sql.TxOptions{})} +// } // -// func (s *SprocketStore) With(other basestore.ShareableStore) *SprocketStore { -// return &SprocketStore{Store: s.Store.With(other)} -// } +// func (s *SprocketStore) With(other basestore.ShareableStore) *SprocketStore { +// return &SprocketStore{Store: s.Store.With(other)} +// } // -// func (s *SprocketStore) Transact(ctx context.Context) (*SprocketStore, error) { -// txBase, err := s.Store.Transact(ctx) -// return &SprocketStore{Store: txBase}, err -// } +// func (s *SprocketStore) Transact(ctx context.Context) (*SprocketStore, error) { +// txBase, err := s.Store.Transact(ctx) +// return &SprocketStore{Store: txBase}, err +// } type Store struct { handle TransactableHandle } @@ -67,12 +67,12 @@ func (s *Store) Handle() TransactableHandle { // This method should be used when two distinct store instances need to perform an // operation within the same shared transaction. // -// txn1 := store1.Transact(ctx) // Creates a transaction -// txn2 := store2.With(txn1) // References the same transaction +// txn1 := store1.Transact(ctx) // Creates a transaction +// txn2 := store2.With(txn1) // References the same transaction // -// txn1.A(ctx) // Occurs within shared transaction -// txn2.B(ctx) // Occurs within shared transaction -// txn1.Done() // closes shared transaction +// txn1.A(ctx) // Occurs within shared transaction +// txn2.B(ctx) // Occurs within shared transaction +// txn1.Done() // closes shared transaction // // Note that once a handle is shared between two stores, committing or rolling back // a transaction will affect the handle of both stores. Most notably, two stores that diff --git a/internal/database/dbconn/open.go b/internal/database/dbconn/open.go index bdb27c34a5e..9edfa6369ca 100644 --- a/internal/database/dbconn/open.go +++ b/internal/database/dbconn/open.go @@ -88,27 +88,27 @@ func openDBWithStartupWait(cfg *pgx.ConnConfig) (db *sql.DB, err error) { // For all mandatory methods the sqlHooks driver is used. For the optional methods namely Ping, ResetSession and CheckNamedValue // (which the sqlHooks driver does not implement), extendedConn goes to the original default driver. // -// Ping() -// ResetSession() -// CheckNamedValue() -// β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” -// β”‚ β”‚ -// β”‚ β”‚ -// β”‚ β”‚ -// β”Œβ”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β” -// β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ -// β”‚otelsqlβ”œβ”€β”€β–Ίβ”‚extendedConnβ”œβ”€β”€β–Ίβ”‚sqlhooksβ”œβ”€β”€β”€β”€β–Ίβ”‚DefaultDriverβ”‚ -// β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ -// β””β”€β”¬β”€β”€β”€β”€β”€β”˜ β””β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ -// β”‚ β”‚ β”‚ β”‚ -// β”‚ β”‚ β”‚ β”‚Implements all SQL driver methods -// β”‚ β”‚ β”‚ -// β”‚ β”‚ β”‚Only implements mandatory ones -// β”‚ β”‚ β”‚Ping(), ResetSession() and CheckNamedValue() are missing. -// β”‚ β”‚ -// β”‚ β”‚Implement all SQL driver methods -// β”‚ -// β”‚Expects all SQL driver methods +// Ping() +// ResetSession() +// CheckNamedValue() +// β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β” +// β”‚ β”‚ +// β”‚ β”‚ +// β”‚ β”‚ +// β”Œβ”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”΄β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β”€β”€β”€β” β”Œβ”€β”€β”€β”€β”€β–Όβ”€β”€β”€β”€β”€β”€β”€β” +// β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ +// β”‚otelsqlβ”œβ”€β”€β–Ίβ”‚extendedConnβ”œβ”€β”€β–Ίβ”‚sqlhooksβ”œβ”€β”€β”€β”€β–Ίβ”‚DefaultDriverβ”‚ +// β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ β”‚ +// β””β”€β”¬β”€β”€β”€β”€β”€β”˜ β””β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”¬β”€β”€β”€β”€β”€β”€β”˜ β””β”€β”¬β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”€β”˜ +// β”‚ β”‚ β”‚ β”‚ +// β”‚ β”‚ β”‚ β”‚Implements all SQL driver methods +// β”‚ β”‚ β”‚ +// β”‚ β”‚ β”‚Only implements mandatory ones +// β”‚ β”‚ β”‚Ping(), ResetSession() and CheckNamedValue() are missing. +// β”‚ β”‚ +// β”‚ β”‚Implement all SQL driver methods +// β”‚ +// β”‚Expects all SQL driver methods // // A sqlhooks.Driver must be used as a Driver otherwise errors will be raised. type extendedDriver struct { diff --git a/internal/database/dbtest/dbtest.go b/internal/database/dbtest/dbtest.go index 4810b453949..3f343c809cc 100644 --- a/internal/database/dbtest/dbtest.go +++ b/internal/database/dbtest/dbtest.go @@ -25,7 +25,9 @@ import ( // transaction if an error didn't occur. // // After opening this transaction, it executes the query -// SET CONSTRAINTS ALL DEFERRED +// +// SET CONSTRAINTS ALL DEFERRED +// // which aids in testing. func NewTx(t testing.TB, db *sql.DB) *sql.Tx { tx, err := db.Begin() diff --git a/internal/database/migration/definition/definition.go b/internal/database/migration/definition/definition.go index 880ebe12146..47ea4657cfc 100644 --- a/internal/database/migration/definition/definition.go +++ b/internal/database/migration/definition/definition.go @@ -122,11 +122,11 @@ func (ds *Definitions) Filter(ids []int) (*Definitions, error) { // of all leaves. This gives us a nice clean single-entry, single-exit graph prefix // that can be squashed into a single migration. // -// +-- ... --+ +-- [ leaf 1 ] -// | | | -// [ root ] -+ +- [ nca ] -+ -// | | | -// +-- ... --+ +-- [ leaf 2 ] +// +-- ... --+ +-- [ leaf 1 ] +// | | | +// [ root ] -+ +- [ nca ] -+ +// | | | +// +-- ... --+ +-- [ leaf 2 ] func (ds *Definitions) LeafDominator() (Definition, bool) { leaves := ds.Leaves() if len(leaves) == 0 { diff --git a/internal/database/users.go b/internal/database/users.go index 2077ae74b22..9fbf0c42743 100644 --- a/internal/database/users.go +++ b/internal/database/users.go @@ -210,7 +210,7 @@ type NewUser struct { // username/email and password. If no password is given, a non-builtin auth provider must be used to // sign into the account. // -// CREATION OF SITE ADMINS +// # CREATION OF SITE ADMINS // // The new user is made to be a site admin if the following are both true: (1) this user would be // the first and only user on the server, and (2) the site has not yet been initialized. Otherwise, diff --git a/internal/endpoint/endpoint.go b/internal/endpoint/endpoint.go index 33865ee3a34..480d12543ac 100644 --- a/internal/endpoint/endpoint.go +++ b/internal/endpoint/endpoint.go @@ -47,10 +47,9 @@ type endpoints struct { // // Examples URL specifiers: // -// "k8s+http://searcher" -// "k8s+rpc://indexed-searcher?kind=sts" -// "http://searcher-0 http://searcher-1 http://searcher-2" -// +// "k8s+http://searcher" +// "k8s+rpc://indexed-searcher?kind=sts" +// "http://searcher-0 http://searcher-1 http://searcher-2" func New(urlspec string) *Map { if !strings.HasPrefix(urlspec, "k8s+") { return Static(strings.Fields(urlspec)...) diff --git a/internal/env/baseconfig.go b/internal/env/baseconfig.go index 1f56d6f27c0..6bc31069a91 100644 --- a/internal/env/baseconfig.go +++ b/internal/env/baseconfig.go @@ -23,31 +23,31 @@ type Config interface { // BaseConfig is a base struct for configuration objects. The following is a minimal // example of declaring, loading, and validating configuration from the environment. // -// type Config struct { -// env.BaseConfig +// type Config struct { +// env.BaseConfig // -// Name string -// Weight int -// Age time.Duration -// } +// Name string +// Weight int +// Age time.Duration +// } // -// func (c *Config) Load() { -// c.Name = c.Get("SRC_NAME", "test", "The service's name (wat).") -// c.Weight = c.GetInt("SRC_WEIGHT", "1m", "The service's weight (wat).") -// c.Age = c.GetInterval("SRC_AGE", "10s", "The service's age (wat).") -// } +// func (c *Config) Load() { +// c.Name = c.Get("SRC_NAME", "test", "The service's name (wat).") +// c.Weight = c.GetInt("SRC_WEIGHT", "1m", "The service's weight (wat).") +// c.Age = c.GetInterval("SRC_AGE", "10s", "The service's age (wat).") +// } // -// func applicationInit() { -// config := &Config{} -// config.Load() +// func applicationInit() { +// config := &Config{} +// config.Load() // -// env.Lock() -// env.HandleHelpFlag() +// env.Lock() +// env.HandleHelpFlag() // -// if err := config.Validate(); err != nil{ -// // handle me -// } -// } +// if err := config.Validate(); err != nil{ +// // handle me +// } +// } type BaseConfig struct { errs []error diff --git a/internal/extsvc/pypi/client.go b/internal/extsvc/pypi/client.go index c9b98a13572..30447d82333 100644 --- a/internal/extsvc/pypi/client.go +++ b/internal/extsvc/pypi/client.go @@ -19,7 +19,6 @@ // containing source code or a binary // // https://pypi.org/help/#packages -// package pypi import ( diff --git a/internal/gitserver/client.go b/internal/gitserver/client.go index 0d36adc4ab2..ea99c4bb11e 100644 --- a/internal/gitserver/client.go +++ b/internal/gitserver/client.go @@ -1123,8 +1123,9 @@ func (c *clientImplementor) httpPostWithURI(ctx context.Context, repo api.RepoNa return c.do(ctx, repo, "POST", uri, b) } -//nolint:unparam // unparam complains that `method` always has same value across call-sites, but that's OK // do performs a request to a gitserver instance based on the address in the uri argument. +// +//nolint:unparam // unparam complains that `method` always has same value across call-sites, but that's OK func (c *clientImplementor) do(ctx context.Context, repo api.RepoName, method, uri string, payload []byte) (resp *http.Response, err error) { parsedURL, err := url.ParseRequestURI(uri) if err != nil { diff --git a/internal/gitserver/commands.go b/internal/gitserver/commands.go index dfaa51fa650..8016319d623 100644 --- a/internal/gitserver/commands.go +++ b/internal/gitserver/commands.go @@ -238,12 +238,11 @@ func parseShortLog(out []byte) ([]*gitdomain.ContributorCount, error) { // the following somewhat-common malformed syntax where a user has misconfigured // their email address as their name: // -// foo@gmail.com +// foo@gmail.com // // As a valid name, whereas mail.ParseAddress would return an error: // -// mail: expected single address, got "" -// +// mail: expected single address, got "" func lenientParseAddress(address string) (*mail.Address, error) { addr, err := mail.ParseAddress(address) if err != nil && strings.Contains(err.Error(), "expected single address") { @@ -1042,8 +1041,8 @@ func (c *clientImplementor) ListDirectoryChildren( // cleanDirectoriesForLsTree sanitizes the input dirnames to a git ls-tree command. There are a // few peculiarities handled here: // -// 1. The root of the tree must be indicated with `.`, and -// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash. +// 1. The root of the tree must be indicated with `.`, and +// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash. func cleanDirectoriesForLsTree(dirnames []string) []string { var args []string for _, dir := range dirnames { diff --git a/internal/instrumentation/http.go b/internal/instrumentation/http.go index 49714c80142..2fb489bb421 100644 --- a/internal/instrumentation/http.go +++ b/internal/instrumentation/http.go @@ -11,10 +11,10 @@ import ( // HTTPMiddleware wraps the handler with the following: // -// - If the HTTP header, X-Sourcegraph-Should-Trace, is set to a truthy value, set the -// shouldTraceKey context.Context value to true -// - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp, which applies the -// desired instrumentation. +// - If the HTTP header, X-Sourcegraph-Should-Trace, is set to a truthy value, set the +// shouldTraceKey context.Context value to true +// - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp, which applies the +// desired instrumentation. // // The provided operation name is used to add details to spans. func HTTPMiddleware(operation string, h http.Handler, opts ...otelhttp.Option) http.Handler { diff --git a/internal/ratelimit/monitor.go b/internal/ratelimit/monitor.go index 16ea27d4e0d..914df824847 100644 --- a/internal/ratelimit/monitor.go +++ b/internal/ratelimit/monitor.go @@ -97,9 +97,9 @@ func (c *Monitor) Get() (remaining int, reset, retry time.Duration, known bool) // want to perform a cost-500 operation. Only 4 more cost-500 operations are allowed in the next 30 minutes (per // the rate limit): // -// -500 -500 -500 -// Now |------------*------------*------------*------------| 30 min from now -// Remaining 1500 1000 500 0 5000 (reset) +// -500 -500 -500 +// Now |------------*------------*------------*------------| 30 min from now +// Remaining 1500 1000 500 0 5000 (reset) // // Assuming no other operations are being performed (that count against the rate limit), the recommended wait would // be 7.5 minutes (30 minutes / 4), so that the operations are evenly spaced out. diff --git a/internal/rcache/mutex.go b/internal/rcache/mutex.go index 4f96699f95d..69da6b915b6 100644 --- a/internal/rcache/mutex.go +++ b/internal/rcache/mutex.go @@ -37,10 +37,11 @@ type MutexOptions struct { // If, on release, we are unable to unlock the mutex it will continue to be locked until // it is expired by Redis. // The returned context will be cancelled if any of the following occur: -// * The parent context in cancelled -// * The release function is called -// * There is an error extending the lock expiry or the expiry can't be extended because -// they key no longer exists in Redis +// - The parent context in cancelled +// - The release function is called +// - There is an error extending the lock expiry or the expiry can't be extended because +// they key no longer exists in Redis +// // A caller can therefore assume that they are the sole holder of the lock as long as the // context has not been cancelled. func TryAcquireMutex(ctx context.Context, name string, options MutexOptions) (context.Context, func(), bool) { diff --git a/internal/redispool/redispool.go b/internal/redispool/redispool.go index 2e6c16ee790..5df23da319b 100644 --- a/internal/redispool/redispool.go +++ b/internal/redispool/redispool.go @@ -56,9 +56,9 @@ func init() { var schemeMatcher = lazyregexp.New(`^[A-Za-z][A-Za-z0-9\+\-\.]*://`) // dialRedis dials Redis given the raw endpoint string. The string can have two formats: -// 1) If there is a HTTP scheme, it should be either be "redis://" or "rediss://" and the URL -// must be of the format specified in https://www.iana.org/assignments/uri-schemes/prov/redis. -// 2) Otherwise, it is assumed to be of the format $HOSTNAME:$PORT. +// 1. If there is a HTTP scheme, it should be either be "redis://" or "rediss://" and the URL +// must be of the format specified in https://www.iana.org/assignments/uri-schemes/prov/redis. +// 2. Otherwise, it is assumed to be of the format $HOSTNAME:$PORT. func dialRedis(rawEndpoint string) (redis.Conn, error) { if schemeMatcher.MatchString(rawEndpoint) { // expect "redis://" return redis.DialURL(rawEndpoint) diff --git a/internal/repos/scheduler.go b/internal/repos/scheduler.go index ada633c1850..51b7f34d5e8 100644 --- a/internal/repos/scheduler.go +++ b/internal/repos/scheduler.go @@ -287,13 +287,12 @@ var configuredLimiter = func() *mutablelimiter.Limiter { // possible. We treat repos differently depending on which part of the // diff they are: // -// -// Deleted - remove from scheduler and queue. -// Added - new repo, enqueue for asap clone. -// Modified - likely new url or name. May also be a sign of new -// commits. Enqueue for asap clone (or fetch). -// Unmodified - we likely already have this cloned. Just rely on -// the scheduler and do not enqueue. +// Deleted - remove from scheduler and queue. +// Added - new repo, enqueue for asap clone. +// Modified - likely new url or name. May also be a sign of new +// commits. Enqueue for asap clone (or fetch). +// Unmodified - we likely already have this cloned. Just rely on +// the scheduler and do not enqueue. func (s *UpdateScheduler) UpdateFromDiff(diff Diff) { for _, r := range diff.Deleted { s.remove(r) diff --git a/internal/repos/syncer.go b/internal/repos/syncer.go index 8576a32fdbf..a551345c0b3 100644 --- a/internal/repos/syncer.go +++ b/internal/repos/syncer.go @@ -285,11 +285,11 @@ func (rm ReposModified) ReposModified(modified types.RepoModified) types.Repos { // // It works for repos from: // -// 1. Public "cloud_default" code hosts since we don't sync them in the background -// (which would delete lazy synced repos). -// 2. Any package hosts (i.e. npm, Maven, etc) since callers are expected to store -// repos in the `lsif_dependency_repos` table which is used as the source of truth -// for the next full sync, so lazy added repos don't get wiped. +// 1. Public "cloud_default" code hosts since we don't sync them in the background +// (which would delete lazy synced repos). +// 2. Any package hosts (i.e. npm, Maven, etc) since callers are expected to store +// repos in the `lsif_dependency_repos` table which is used as the source of truth +// for the next full sync, so lazy added repos don't get wiped. // // The "background" boolean flag indicates that we should run this // sync in the background vs block and call s.syncRepo synchronously. diff --git a/internal/search/job/jobutil/expression_job.go b/internal/search/job/jobutil/expression_job.go index acf1831ced9..3dee69522ca 100644 --- a/internal/search/job/jobutil/expression_job.go +++ b/internal/search/job/jobutil/expression_job.go @@ -117,9 +117,9 @@ type OrJob struct { } // For OR queries, there are two phases: -// 1) Stream any results that are found in every subquery -// 2) Once all subqueries have completed, send the results we've found that -// were returned by some subqueries, but not all subqueries. +// 1. Stream any results that are found in every subquery +// 2. Once all subqueries have completed, send the results we've found that +// were returned by some subqueries, but not all subqueries. // // This means that the only time we would hit streaming limit before we have // results from all subqueries is if we hit the limit only with results from @@ -132,17 +132,17 @@ type OrJob struct { // they will be from a random distribution of sub-queries. // // This solution has the following nice properties: -// - Early cancellation is possible -// - Results are streamed where possible, decreasing user-visible latency -// - The only results that are streamed are "fair" results. They are "fair" because -// they were returned from every subquery, so there can be no bias between subqueries -// - The only time we cancel early is when streamed results hit the limit. Since the only -// streamed results are "fair" results, there will be no bias against slow or low-volume subqueries -// - Every result we stream is guaranteed to be "complete". By "complete", I mean if I search for "a or b", -// the streamed result will highlight both "a" and "b" if they both exist in the document. -// - The bias is towards documents that match all of our subqueries, so doesn't bias any individual subquery. -// Additionally, a bias towards matching all subqueries is probably desirable, since it's more likely that -// a document matching all subqueries is what the user is looking for than a document matching only one. +// - Early cancellation is possible +// - Results are streamed where possible, decreasing user-visible latency +// - The only results that are streamed are "fair" results. They are "fair" because +// they were returned from every subquery, so there can be no bias between subqueries +// - The only time we cancel early is when streamed results hit the limit. Since the only +// streamed results are "fair" results, there will be no bias against slow or low-volume subqueries +// - Every result we stream is guaranteed to be "complete". By "complete", I mean if I search for "a or b", +// the streamed result will highlight both "a" and "b" if they both exist in the document. +// - The bias is towards documents that match all of our subqueries, so doesn't bias any individual subquery. +// Additionally, a bias towards matching all subqueries is probably desirable, since it's more likely that +// a document matching all subqueries is what the user is looking for than a document matching only one. func (j *OrJob) Run(ctx context.Context, clients job.RuntimeClients, stream streaming.Sender) (alert *search.Alert, err error) { _, ctx, stream, finish := job.StartSpan(ctx, stream, j) defer func() { finish(alert, err) }() diff --git a/internal/search/query/transformer.go b/internal/search/query/transformer.go index 64c717eda60..812c824be5f 100644 --- a/internal/search/query/transformer.go +++ b/internal/search/query/transformer.go @@ -515,13 +515,20 @@ func conjunction(left, right Basic) Basic { // pattern node, just not in any of the parameters. // // For example, the query -// repo:a (file:b OR file:c) +// +// repo:a (file:b OR file:c) +// // is transformed to -// (repo:a file:b) OR (repo:a file:c) +// +// (repo:a file:b) OR (repo:a file:c) +// // but the query -// (repo:a OR repo:b) (b OR c) +// +// (repo:a OR repo:b) (b OR c) +// // is transformed to -// (repo:a (b OR c)) OR (repo:b (b OR c)) +// +// (repo:a (b OR c)) OR (repo:b (b OR c)) func BuildPlan(query []Node) Plan { return distribute([]Basic{}, query) } diff --git a/internal/search/query/types.go b/internal/search/query/types.go index 9b275b19c61..8a011ae396f 100644 --- a/internal/search/query/types.go +++ b/internal/search/query/types.go @@ -203,10 +203,11 @@ func (p Plan) ToQ() Q { // Basic represents a leaf expression to evaluate in our search engine. A basic // query comprises: -// (1) a single search pattern expression, which may contain -// 'and' or 'or' operators; and -// (2) parameters that scope the evaluation of search -// patterns (e.g., to repos, files, etc.). +// +// (1) a single search pattern expression, which may contain +// 'and' or 'or' operators; and +// (2) parameters that scope the evaluation of search +// patterns (e.g., to repos, files, etc.). type Basic struct { Parameters Pattern Node diff --git a/internal/search/repo_revs.go b/internal/search/repo_revs.go index 0b2cc62db8e..1c6223399b1 100644 --- a/internal/search/repo_revs.go +++ b/internal/search/repo_revs.go @@ -74,7 +74,7 @@ func (r *RepositoryRevisions) Equal(other *RepositoryRevisions) bool { // ParseRepositoryRevisions parses strings that refer to a repository and 0 // or more revspecs. The format is: // -// repo@revs +// repo@revs // // where repo is a repository regex and revs is a ':'-separated list of revspecs // and/or ref globs. A ref glob is a revspec prefixed with '*' (which is not a @@ -83,13 +83,13 @@ func (r *RepositoryRevisions) Equal(other *RepositoryRevisions) bool { // // For example: // -// - 'foo' refers to the 'foo' repo at the default branch -// - 'foo@bar' refers to the 'foo' repo and the 'bar' revspec. -// - 'foo@bar:baz:qux' refers to the 'foo' repo and 3 revspecs: 'bar', 'baz', -// and 'qux'. -// - 'foo@*bar' refers to the 'foo' repo and all refs matching the glob 'bar/*', -// because git interprets the ref glob 'bar' as being 'bar/*' (see `man git-log` -// section on the --glob flag) +// - 'foo' refers to the 'foo' repo at the default branch +// - 'foo@bar' refers to the 'foo' repo and the 'bar' revspec. +// - 'foo@bar:baz:qux' refers to the 'foo' repo and 3 revspecs: 'bar', 'baz', +// and 'qux'. +// - 'foo@*bar' refers to the 'foo' repo and all refs matching the glob 'bar/*', +// because git interprets the ref glob 'bar' as being 'bar/*' (see `man git-log` +// section on the --glob flag) func ParseRepositoryRevisions(repoAndOptionalRev string) (string, []RevisionSpecifier) { i := strings.Index(repoAndOptionalRev, "@") if i == -1 { diff --git a/internal/search/result/file.go b/internal/search/result/file.go index 696a410e11b..a9656176505 100644 --- a/internal/search/result/file.go +++ b/internal/search/result/file.go @@ -123,8 +123,8 @@ func (fm *FileMatch) AppendMatches(src *FileMatch) { // Limit will mutate fm such that it only has limit results. limit is a number // greater than 0. // -// if limit >= ResultCount then nothing is done and we return limit - ResultCount. -// if limit < ResultCount then ResultCount becomes limit and we return 0. +// if limit >= ResultCount then nothing is done and we return limit - ResultCount. +// if limit < ResultCount then ResultCount becomes limit and we return 0. func (fm *FileMatch) Limit(limit int) int { matchCount := fm.ChunkMatches.MatchCount() symbolCount := len(fm.Symbols) diff --git a/internal/trace/httptrace.go b/internal/trace/httptrace.go index f2ae32d2eae..92c3bc9c6be 100644 --- a/internal/trace/httptrace.go +++ b/internal/trace/httptrace.go @@ -292,9 +292,10 @@ func HTTPMiddleware(logger log.Logger, next http.Handler, siteConfig conftypes.S // Recoverer is a recovery handler to wrap the stdlib net/http Mux. // Example: -// mux := http.NewServeMux -// ... -// http.Handle("/", sentry.Recoverer(mux)) +// +// mux := http.NewServeMux +// ... +// http.Handle("/", sentry.Recoverer(mux)) func loggingRecoverer(logger log.Logger, handler http.Handler) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { defer func() { diff --git a/lib/batches/on/aggregator.go b/lib/batches/on/aggregator.go index 01281f7a0eb..c94f647daab 100644 --- a/lib/batches/on/aggregator.go +++ b/lib/batches/on/aggregator.go @@ -8,11 +8,11 @@ package on // This is essentially a generic type, with two parameters (albeit these are // mostly exposed in OnResult: // -// RepoID: An opaque identifier used to identify unique repositories. This -// must be able to be used as a map key. -// Revision: An object that identifies the specific revision. There are no -// requirements for this type, as it will be returned as-is in -// Revisions(). +// - RepoID: An opaque identifier used to identify unique repositories. This +// must be able to be used as a map key. +// - Revision: An object that identifies the specific revision. There are no +// requirements for this type, as it will be returned as-is in +// Revisions(). type RepoRevisionAggregator struct { results []*RuleRevisions } diff --git a/lib/codeintel/lsif/protocol/documentation.go b/lib/codeintel/lsif/protocol/documentation.go index 8116f705766..04800dc979b 100644 --- a/lib/codeintel/lsif/protocol/documentation.go +++ b/lib/codeintel/lsif/protocol/documentation.go @@ -38,12 +38,12 @@ func NewDocumentationResultEdge(id, inV, outV uint64) DocumentationResultEdge { // // It allows one represent hierarchical documentation like: // -// "project" (e.g. an HTTP library) -// -> "documentationResult" (e.g. "HTTP library" library documentation) -// -> "documentationResult" (e.g. docs for the "Server" class in the HTTP library) -// -> "documentationResult" (e.g. docs for the "Listen" method on the "Server" class) -// -> "documentationResult" (e.g. docs for the "Shutdown" method on the "Server" class) -// -> ... +// "project" (e.g. an HTTP library) +// -> "documentationResult" (e.g. "HTTP library" library documentation) +// -> "documentationResult" (e.g. docs for the "Server" class in the HTTP library) +// -> "documentationResult" (e.g. docs for the "Listen" method on the "Server" class) +// -> "documentationResult" (e.g. docs for the "Shutdown" method on the "Server" class) +// -> ... // // Note: the "project" -> "documentationResult" attachment above is expressed via a // "documentationResult" edge, since the parent is not a "documentationResult" vertex. @@ -100,10 +100,10 @@ func NewDocumentationResult(id uint64, result Documentation) DocumentationResult // // Attached to this vertex MUST be two "documentationString" vertices: // -// 1. A "documentationString" vertex with `type: "label"`, which is a one-line label or this section -// of documentation. -// 1. A "documentationString" vertex with `type: "detail"`, which is a multi-line detailed string -// for this section of documentation. +// 1. A "documentationString" vertex with `type: "label"`, which is a one-line label or this section +// of documentation. +// 1. A "documentationString" vertex with `type: "detail"`, which is a multi-line detailed string +// for this section of documentation. // // Both are attached to the documentationResult via a "documentationString" edge. // @@ -275,11 +275,11 @@ const ( // strings, which are "documentationString" vertices. The overall structure looks like the // following roughly: // -// {id: 53, type:"vertex", label:"documentationResult", result:{identifier:"httpserver", ...}} -// {id: 54, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A single-line label for an HTTPServer instance"}} -// {id: 55, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A multi-line\n detailed\n explanation of an HTTPServer instance, what it does, etc."}} -// {id: 54, type:"edge", label:"documentationString", inV: 54, outV: 53, kind:"label"} -// {id: 54, type:"edge", label:"documentationString", inV: 55, outV: 53, kind:"detail"} +// {id: 53, type:"vertex", label:"documentationResult", result:{identifier:"httpserver", ...}} +// {id: 54, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A single-line label for an HTTPServer instance"}} +// {id: 55, type:"vertex", label:"documentationString", result:{kind:"plaintext", "value": "A multi-line\n detailed\n explanation of an HTTPServer instance, what it does, etc."}} +// {id: 54, type:"edge", label:"documentationString", inV: 54, outV: 53, kind:"label"} +// {id: 54, type:"edge", label:"documentationString", inV: 55, outV: 53, kind:"detail"} // // Hover, definition, etc. results can then be attached to ranges within the "documentationString" // vertices themselves (vertex 54 / 55), see the docs for DocumentationString for more details. @@ -332,13 +332,12 @@ func NewDocumentationStringEdge(id, inV, outV uint64, kind DocumentationStringKi // in the documentation string's markup content itself) using a "contains" edge. This enables // ranges within a documentation string to have: // -// * "hoverResult"s (e.g. you can hover over a type signature in the documentation string and get info) -// * "definitionResult" and "referenceResults" -// * "documentationResult" itself - allowing a range of text in one documentation to link to another -// documentation section (e.g. in the same way a hyperlink works in HTML.) -// * "moniker" to link to another project's hover/definition/documentation results, across -// repositories. -// +// - "hoverResult"s (e.g. you can hover over a type signature in the documentation string and get info) +// - "definitionResult" and "referenceResults" +// - "documentationResult" itself - allowing a range of text in one documentation to link to another +// documentation section (e.g. in the same way a hyperlink works in HTML.) +// - "moniker" to link to another project's hover/definition/documentation results, across +// repositories. type DocumentationString struct { Vertex Result MarkupContent `json:"result"` diff --git a/lib/codeintel/pathexistence/git.go b/lib/codeintel/pathexistence/git.go index 0b41e78ec61..7a323e2ac43 100644 --- a/lib/codeintel/pathexistence/git.go +++ b/lib/codeintel/pathexistence/git.go @@ -54,8 +54,8 @@ func LocalGitGetChildrenFunc(repoRoot string) GetChildrenFunc { // cleanDirectoriesForLsTree sanitizes the input dirnames to a git ls-tree command. There are a // few peculiarities handled here: // -// 1. The root of the tree must be indicated with `.`, and -// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash. +// 1. The root of the tree must be indicated with `.`, and +// 2. In order for git ls-tree to return a directory's contents, the name must end in a slash. func cleanDirectoriesForLsTree(dirnames []string) []string { var args []string for _, dir := range dirnames { diff --git a/lib/codeintel/tools/lsif-index-tester/range_differ.go b/lib/codeintel/tools/lsif-index-tester/range_differ.go index fee0a87f6d0..2e33e7bf5d4 100644 --- a/lib/codeintel/tools/lsif-index-tester/range_differ.go +++ b/lib/codeintel/tools/lsif-index-tester/range_differ.go @@ -43,9 +43,8 @@ func fmtLine(line int, prefixWidth int, text string) string { // | | ^^^^^^^^^^^^^^^^ actual // |6| return; // -// -// Only operates on locations with the same URI. -// It doesn't make sense to diff anything here when we don't have that. +// Only operates on locations with the same URI. It doesn't make sense to diff +// anything here when we don't have that. func DrawLocations(contents string, expected, actual Location, context int) (string, error) { if expected.URI != actual.URI { return "", errors.New("Must pass in two locations with the same URI") diff --git a/lib/errors/warning.go b/lib/errors/warning.go index 3972affc0b0..b1d4255d755 100644 --- a/lib/errors/warning.go +++ b/lib/errors/warning.go @@ -43,15 +43,15 @@ var _ Warning = (*warning)(nil) // Consumers of these errors should then use errors.As to check if the error is of a warning type // and based on that, should just log it as a warning. For example: // -// var ref errors.Warning -// err := someFunctionThatReturnsAWarningErrorOrACriticalError() -// if err != nil && errors.As(err, &ref) { -// log.Warnf("failed to do X: %v", err) -// } +// var ref errors.Warning +// err := someFunctionThatReturnsAWarningErrorOrACriticalError() +// if err != nil && errors.As(err, &ref) { +// log.Warnf("failed to do X: %v", err) +// } // -// if err != nil { -// return err -// } +// if err != nil { +// return err +// } func NewWarningError(err error) *warning { return &warning{ error: err, diff --git a/lib/output/visible_string_width.go b/lib/output/visible_string_width.go index 982e491fee2..08cb8c8e332 100644 --- a/lib/output/visible_string_width.go +++ b/lib/output/visible_string_width.go @@ -6,7 +6,7 @@ import ( ) // This regex is taken from here: -// https://github.com/acarl005/stripansi/blob/5a71ef0e047df0427e87a79f27009029921f1f9b/stripansi.go +// https://github.com/acarl005/stripansi/blob/5a71ef0e047df0427e87a79f27009029921f1f9b/stripansi.go const ansi = "[\u001B\u009B][[\\]()#;?]*(?:(?:(?:[a-zA-Z\\d]*(?:;[a-zA-Z\\d]*)*)?\u0007)|(?:(?:\\d{1,4}(?:;\\d{0,4})*)?[\\dA-PRZcf-ntqry=><~]))" var ansiRegex = regexp.MustCompile(ansi) diff --git a/monitoring/definitions/shared/constructor.go b/monitoring/definitions/shared/constructor.go index c53f9143e08..b888cdecdd0 100644 --- a/monitoring/definitions/shared/constructor.go +++ b/monitoring/definitions/shared/constructor.go @@ -94,10 +94,14 @@ func makeFilters(containerLabel, containerName string, filters ...string) string return strings.Join(filters, ",") } -// makeBy returns the suffix if the aggregator expression (e.g., max by (queue)), -// ^^^^^^^^^^ -// as well as a prefix to be used as part of the legend consisting of placeholder -// values that will render to the value of the label/variable in the Grafana UI. +// makeBy returns the suffix if the aggregator expression. +// +// e.g. max by (queue) +// ^^^^^^^^^^ +// +// legendPrefix is a prefix to be used as part of the legend consisting of +// placeholder values that will render to the value of the label/variable in +// the Grafana UI. func makeBy(labels ...string) (aggregateExprSuffix string, legendPrefix string) { if len(labels) == 0 { return "", "" diff --git a/monitoring/definitions/shared/shared.go b/monitoring/definitions/shared/shared.go index cddf51eb0ca..a4d52e240c8 100644 --- a/monitoring/definitions/shared/shared.go +++ b/monitoring/definitions/shared/shared.go @@ -5,19 +5,19 @@ // When editing this package or introducing any shared declarations, you should abide strictly by the // following rules: // -// 1. Do NOT declare a shared definition unless 5+ dashboards will use it. Sharing dashboard -// declarations means the codebase becomes more complex and non-declarative which we want to avoid -// so repeat yourself instead if it applies to less than 5 dashboards. +// 1. Do NOT declare a shared definition unless 5+ dashboards will use it. Sharing dashboard +// declarations means the codebase becomes more complex and non-declarative which we want to avoid +// so repeat yourself instead if it applies to less than 5 dashboards. // -// 2. ONLY declare shared Observables. Introducing shared Rows or Groups prevents individual dashboard -// maintainers from holistically considering both the layout of dashboards as well as the -// metrics and alerts defined within them -- which we do not want. +// 2. ONLY declare shared Observables. Introducing shared Rows or Groups prevents individual dashboard +// maintainers from holistically considering both the layout of dashboards as well as the +// metrics and alerts defined within them -- which we do not want. // -// 3. Use the sharedObservable type and do NOT parameterize more than just the container name. It may -// be tempting to pass an alerting threshold as an argument, or parameterize whether a critical -// alert is defined -- but this makes reasoning about alerts at a high level much more difficult. -// If you have a need for this, it is a strong signal you should NOT be using the shared definition -// anymore and should instead copy it and apply your modifications. +// 3. Use the sharedObservable type and do NOT parameterize more than just the container name. It may +// be tempting to pass an alerting threshold as an argument, or parameterize whether a critical +// alert is defined -- but this makes reasoning about alerts at a high level much more difficult. +// If you have a need for this, it is a strong signal you should NOT be using the shared definition +// anymore and should instead copy it and apply your modifications. // // Learn more about monitoring in https://handbook.sourcegraph.com/engineering/observability/monitoring_pillars package shared diff --git a/monitoring/monitoring/dashboards.go b/monitoring/monitoring/dashboards.go index 24914d1f57d..cbc3f9eb992 100644 --- a/monitoring/monitoring/dashboards.go +++ b/monitoring/monitoring/dashboards.go @@ -101,7 +101,7 @@ func observablePanelID(groupIndex, rowIndex, observableIndex int) uint { // primarily used in the URL, e.g. /-/debug/grafana/d/syntect-server/ and allows us to have // static URLs we can document like: // -// Go to https://sourcegraph.example.com/-/debug/grafana/d/syntect-server/syntect-server +// Go to https://sourcegraph.example.com/-/debug/grafana/d/syntect-server/syntect-server // // Instead of having to describe all the steps to navigate there because the UID is random. func isValidGrafanaUID(s string) bool { diff --git a/monitoring/monitoring/panel_options.go b/monitoring/monitoring/panel_options.go index a3de328aa5a..6c1cc95a781 100644 --- a/monitoring/monitoring/panel_options.go +++ b/monitoring/monitoring/panel_options.go @@ -11,9 +11,9 @@ import ( // // You can make any customization you want to a graph panel by using `ObservablePanel.With`: // -// Panel: monitoring.Panel().With(func(o monitoring.Observable, p *sdk.Panel) { -// // modify 'p.GraphPanel' or 'p.HeatmapPanel' etc. with desired changes -// }), +// Panel: monitoring.Panel().With(func(o monitoring.Observable, p *sdk.Panel) { +// // modify 'p.GraphPanel' or 'p.HeatmapPanel' etc. with desired changes +// }), // // When writing a custom `ObservablePanelOption`, keep in mind that: // @@ -33,7 +33,7 @@ import ( // `panelOptionsLibrary` that returns a `ObservablePanelOption`. The function should be // It can then be used with the `ObservablePanel.With`: // -// Panel: monitoring.Panel().With(monitoring.PanelOptions.MyCustomization), +// Panel: monitoring.Panel().With(monitoring.PanelOptions.MyCustomization), // // Using a shared prefix helps with discoverability of available options. type ObservablePanelOption func(Observable, *sdk.Panel) diff --git a/monitoring/monitoring/prometheus.go b/monitoring/monitoring/prometheus.go index 645223006e4..58e9ad5598d 100644 --- a/monitoring/monitoring/prometheus.go +++ b/monitoring/monitoring/prometheus.go @@ -45,7 +45,6 @@ func (r *promRule) validate() error { // see: // // https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ -// type promRulesFile struct { Groups []promGroup } diff --git a/schema/stringdata.go b/schema/stringdata.go index 73057609a24..b69b6d0d34b 100644 --- a/schema/stringdata.go +++ b/schema/stringdata.go @@ -3,81 +3,101 @@ package schema import _ "embed" // AWSCodeCommitSchemaJSON is the content of the file "aws_codecommit.schema.json". +// //go:embed aws_codecommit.schema.json var AWSCodeCommitSchemaJSON string // BatchSpecSchemaJSON is the content of the file "batch_spec.schema.json". +// //go:embed batch_spec.schema.json var BatchSpecSchemaJSON string // BitbucketCloudSchemaJSON is the content of the file "bitbucket_cloud.schema.json". +// //go:embed bitbucket_cloud.schema.json var BitbucketCloudSchemaJSON string // BitbucketServerSchemaJSON is the content of the file "bitbucket_server.schema.json". +// //go:embed bitbucket_server.schema.json var BitbucketServerSchemaJSON string // ChangesetSpecSchemaJSON is the content of the file "changeset_spec.schema.json". +// //go:embed changeset_spec.schema.json var ChangesetSpecSchemaJSON string // GerritSchemaJSON is the content of the file "gerrit.schema.json". +// //go:embed gerrit.schema.json var GerritSchemaJSON string // GitHubSchemaJSON is the content of the file "github.schema.json". +// //go:embed github.schema.json var GitHubSchemaJSON string // GitLabSchemaJSON is the content of the file "gitlab.schema.json". +// //go:embed gitlab.schema.json var GitLabSchemaJSON string // GitoliteSchemaJSON is the content of the file "gitolite.schema.json". +// //go:embed gitolite.schema.json var GitoliteSchemaJSON string // GoModulesSchemaJSON is the content of the file "go-modules.schema.json". +// //go:embed go-modules.schema.json var GoModulesSchemaJSON string // JVMPackagesSchemaJSON is the content of the file "jvm-packages.schema.json". +// //go:embed jvm-packages.schema.json var JVMPackagesSchemaJSON string // NpmPackagesSchemaJSON is the content of the file "npm-packages.schema.json". +// //go:embed npm-packages.schema.json var NpmPackagesSchemaJSON string // PythonPackagesSchemaJSON is the content of the file "python-packages.schema.json". +// //go:embed python-packages.schema.json var PythonPackagesSchemaJSON string // RustPackagesSchemaJSON is the content of the file "python-packages.schema.json". +// //go:embed rust-packages.schema.json var RustPackagesSchemaJSON string // OtherExternalServiceSchemaJSON is the content of the file "other_external_service.schema.json". +// //go:embed other_external_service.schema.json var OtherExternalServiceSchemaJSON string // PerforceSchemaJSON is the content of the file "perforce.schema.json". +// //go:embed perforce.schema.json var PerforceSchemaJSON string // PhabricatorSchemaJSON is the content of the file "phabricator.schema.json". +// //go:embed phabricator.schema.json var PhabricatorSchemaJSON string // PagureSchemaJSON is the content of the file "pagure.schema.json". +// //go:embed pagure.schema.json var PagureSchemaJSON string // SettingsSchemaJSON is the content of the file "settings.schema.json". +// //go:embed settings.schema.json var SettingsSchemaJSON string // SiteSchemaJSON is the content of the file "site.schema.json". +// //go:embed site.schema.json var SiteSchemaJSON string