Remove query runner (#28333)

This removes the query runner service. Followup work will remove all code around saved search notifications and update the graphql API.
This commit is contained in:
Camden Cheek 2021-11-30 10:13:20 -07:00 committed by GitHub
parent 303f76597b
commit de8ae5ee28
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 1 additions and 2194 deletions

View File

@ -8,7 +8,6 @@ import (
"github.com/graph-gophers/graphql-go/relay"
"github.com/sourcegraph/sourcegraph/cmd/frontend/backend"
"github.com/sourcegraph/sourcegraph/cmd/query-runner/queryrunnerapi"
"github.com/sourcegraph/sourcegraph/internal/actor"
"github.com/sourcegraph/sourcegraph/internal/database"
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
@ -134,20 +133,6 @@ func (r *schemaResolver) SavedSearches(ctx context.Context) ([]*savedSearchResol
func (r *schemaResolver) SendSavedSearchTestNotification(ctx context.Context, args *struct {
ID graphql.ID
}) (*EmptyResponse, error) {
// 🚨 SECURITY: Only site admins should be able to send test notifications.
if err := backend.CheckCurrentUserIsSiteAdmin(ctx, r.db); err != nil {
return nil, err
}
id, err := unmarshalSavedSearchID(args.ID)
if err != nil {
return nil, err
}
savedSearch, err := database.SavedSearches(r.db).GetByID(ctx, id)
if err != nil {
return nil, err
}
go queryrunnerapi.Client.TestNotification(context.Background(), *savedSearch)
return &EmptyResponse{}, nil
}

View File

@ -462,18 +462,6 @@ type Mutation {
"""
logEvents(events: [Event!]): EmptyResponse
"""
Sends a test notification for the saved search. Be careful: this will send a notifcation (email and other
types of notifications, if configured) to all subscribers of the saved search, which could be bothersome.
Only subscribers to this saved search may perform this action.
"""
sendSavedSearchTestNotification(
"""
ID of the saved search.
"""
id: ID!
): EmptyResponse
"""
All mutations that update settings (global, organization, and user settings) are under this field.
Only the settings subject whose settings are being mutated (and site admins) may perform this mutation.

View File

@ -133,10 +133,6 @@ func NewInternalHandler(m *mux.Router, db database.DB, schema *graphql.Schema, n
m.Get(apirouter.ReposGetByName).Handler(trace.Route(handler(serveReposGetByName(db))))
m.Get(apirouter.SettingsGetForSubject).Handler(trace.Route(handler(serveSettingsGetForSubject(db))))
m.Get(apirouter.SavedQueriesListAll).Handler(trace.Route(handler(serveSavedQueriesListAll(db))))
m.Get(apirouter.SavedQueriesGetInfo).Handler(trace.Route(handler(serveSavedQueriesGetInfo(db))))
m.Get(apirouter.SavedQueriesSetInfo).Handler(trace.Route(handler(serveSavedQueriesSetInfo(db))))
m.Get(apirouter.SavedQueriesDeleteInfo).Handler(trace.Route(handler(serveSavedQueriesDeleteInfo(db))))
m.Get(apirouter.OrgsListUsers).Handler(trace.Route(handler(serveOrgsListUsers(db))))
m.Get(apirouter.OrgsGetByName).Handler(trace.Route(handler(serveOrgsGetByName(db))))
m.Get(apirouter.UsersGetByUsername).Handler(trace.Route(handler(serveUsersGetByUsername(db))))

View File

@ -157,94 +157,6 @@ func serveConfiguration(w http.ResponseWriter, r *http.Request) error {
return nil
}
func serveSavedQueriesListAll(db database.DB) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error {
// List settings for all users, orgs, etc.
settings, err := database.SavedSearches(db).ListAll(r.Context())
if err != nil {
return errors.Wrap(err, "database.SavedSearches.ListAll")
}
queries := make([]api.SavedQuerySpecAndConfig, 0, len(settings))
for _, s := range settings {
var spec api.SavedQueryIDSpec
if s.Config.UserID != nil {
spec = api.SavedQueryIDSpec{Subject: api.SettingsSubject{User: s.Config.UserID}, Key: s.Config.Key}
} else if s.Config.OrgID != nil {
spec = api.SavedQueryIDSpec{Subject: api.SettingsSubject{Org: s.Config.OrgID}, Key: s.Config.Key}
}
queries = append(queries, api.SavedQuerySpecAndConfig{
Spec: spec,
Config: s.Config,
})
}
if err := json.NewEncoder(w).Encode(queries); err != nil {
return errors.Wrap(err, "Encode")
}
return nil
}
}
func serveSavedQueriesGetInfo(db database.DB) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error {
var query string
err := json.NewDecoder(r.Body).Decode(&query)
if err != nil {
return errors.Wrap(err, "Decode")
}
info, err := database.QueryRunnerState(db).Get(r.Context(), query)
if err != nil {
return errors.Wrap(err, "SavedQueries.Get")
}
if err := json.NewEncoder(w).Encode(info); err != nil {
return errors.Wrap(err, "Encode")
}
return nil
}
}
func serveSavedQueriesSetInfo(db database.DB) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error {
var info *api.SavedQueryInfo
err := json.NewDecoder(r.Body).Decode(&info)
if err != nil {
return errors.Wrap(err, "Decode")
}
err = database.QueryRunnerState(db).Set(r.Context(), &database.SavedQueryInfo{
Query: info.Query,
LastExecuted: info.LastExecuted,
LatestResult: info.LatestResult,
ExecDuration: info.ExecDuration,
})
if err != nil {
return errors.Wrap(err, "SavedQueries.Set")
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("OK"))
return nil
}
}
func serveSavedQueriesDeleteInfo(db database.DB) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error {
var query string
err := json.NewDecoder(r.Body).Decode(&query)
if err != nil {
return errors.Wrap(err, "Decode")
}
err = database.QueryRunnerState(db).Delete(r.Context(), query)
if err != nil {
return errors.Wrap(err, "SavedQueries.Delete")
}
w.WriteHeader(http.StatusOK)
_, _ = w.Write([]byte("OK"))
return nil
}
}
func serveSettingsGetForSubject(db database.DB) func(w http.ResponseWriter, r *http.Request) error {
return func(w http.ResponseWriter, r *http.Request) error {
var subject api.SettingsSubject

View File

@ -26,10 +26,6 @@ const (
GitLabWebhooks = "gitlab.webhooks"
BitbucketServerWebhooks = "bitbucketServer.webhooks"
SavedQueriesListAll = "internal.saved-queries.list-all"
SavedQueriesGetInfo = "internal.saved-queries.get-info"
SavedQueriesSetInfo = "internal.saved-queries.set-info"
SavedQueriesDeleteInfo = "internal.saved-queries.delete-info"
SettingsGetForSubject = "internal.settings.get-for-subject"
OrgsListUsers = "internal.orgs.list-users"
OrgsGetByName = "internal.orgs.get-by-name"
@ -96,10 +92,6 @@ func NewInternal(base *mux.Router) *mux.Router {
base.StrictSlash(true)
// Internal API endpoints should only be served on the internal Handler
base.Path("/saved-queries/list-all").Methods("POST").Name(SavedQueriesListAll)
base.Path("/saved-queries/get-info").Methods("POST").Name(SavedQueriesGetInfo)
base.Path("/saved-queries/set-info").Methods("POST").Name(SavedQueriesSetInfo)
base.Path("/saved-queries/delete-info").Methods("POST").Name(SavedQueriesDeleteInfo)
base.Path("/settings/get-for-subject").Methods("POST").Name(SettingsGetForSubject)
base.Path("/orgs/list-users").Methods("POST").Name(OrgsListUsers)
base.Path("/orgs/get-by-name").Methods("POST").Name(OrgsGetByName)

View File

@ -624,10 +624,6 @@ func GuessSource(r *http.Request) trace.SourceType {
return trace.SourceType("searchblitz_" + match[1])
}
if userAgent == "sourcegraph/query-runner" {
return trace.SourceQueryRunner
}
return trace.SourceOther
}

View File

@ -1,19 +0,0 @@
# This Dockerfile was generated from github.com/sourcegraph/godockerize. It
# was not written by a human, and as such looks janky. As you change this
# file, please don't be scared to make it more pleasant / remove hadolint
# ignores.
FROM sourcegraph/alpine-3.12:116273_2021-11-12_dbac772@sha256:78995f23b1dbadb35ba4a153adecde3f309ee3763888e4172e0f8dc05c9728d3
ARG COMMIT_SHA="unknown"
ARG DATE="unknown"
ARG VERSION="unknown"
LABEL org.opencontainers.image.revision=${COMMIT_SHA}
LABEL org.opencontainers.image.created=${DATE}
LABEL org.opencontainers.image.version=${VERSION}
LABEL com.sourcegraph.github.url=https://github.com/sourcegraph/sourcegraph/commit/${COMMIT_SHA}
USER sourcegraph
ENTRYPOINT ["/sbin/tini", "--", "/usr/local/bin/query-runner"]
COPY query-runner /usr/local/bin/

View File

@ -1,3 +0,0 @@
# query-runner
Periodically runs saved searches, determines the difference in results, and sends notification emails. It is a singleton service by design so there must only be one replica.

View File

@ -1,169 +0,0 @@
package main
import (
"context"
"encoding/json"
"fmt"
"net/http"
"reflect"
"sync"
"github.com/cockroachdb/errors"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/cmd/query-runner/queryrunnerapi"
"github.com/sourcegraph/sourcegraph/internal/api"
)
// diffSavedQueryConfigs takes the old and new saved queries configurations.
//
// It returns maps whose keys represent the old value and value represent the
// new value, i.e. a map of the saved query in the oldList and what its new
// value is in the newList for each respective category. For deleted, the new
// value will be an empty struct.
func diffSavedQueryConfigs(oldList, newList map[api.SavedQueryIDSpec]api.ConfigSavedQuery) (deleted, updated, created map[api.SavedQuerySpecAndConfig]api.SavedQuerySpecAndConfig) {
deleted = map[api.SavedQuerySpecAndConfig]api.SavedQuerySpecAndConfig{}
updated = map[api.SavedQuerySpecAndConfig]api.SavedQuerySpecAndConfig{}
created = map[api.SavedQuerySpecAndConfig]api.SavedQuerySpecAndConfig{}
// Because the api.SavedqueryIDSpec contains pointers, we should use its
// unique string key.
//
// TODO(slimsag/farhan): long term: let's make these
// api.SavedQuery Spec types more sane / remove them (in reality, this will
// be easy to do once we move query runner to frontend later.)
oldByKey := make(map[string]api.SavedQuerySpecAndConfig, len(oldList))
for k, v := range oldList {
oldByKey[k.Key] = api.SavedQuerySpecAndConfig{Spec: k, Config: v}
}
newByKey := make(map[string]api.SavedQuerySpecAndConfig, len(newList))
for k, v := range newList {
newByKey[k.Key] = api.SavedQuerySpecAndConfig{Spec: k, Config: v}
}
// Detect deleted entries
for k, oldVal := range oldByKey {
if _, ok := newByKey[k]; !ok {
deleted[oldVal] = api.SavedQuerySpecAndConfig{}
}
}
for k, newVal := range newByKey {
// Detect created entries
if oldVal, ok := oldByKey[k]; !ok {
created[oldVal] = newVal
continue
}
// Detect updated entries
oldVal := oldByKey[k]
if ok := reflect.DeepEqual(newVal, oldVal); !ok {
updated[oldVal] = newVal
}
}
return deleted, updated, created
}
func sendNotificationsForCreatedOrUpdatedOrDeleted(oldList, newList map[api.SavedQueryIDSpec]api.ConfigSavedQuery) {
deleted, updated, created := diffSavedQueryConfigs(oldList, newList)
for oldVal, newVal := range deleted {
oldVal := oldVal
newVal := newVal
go func() {
if err := notifySavedQueryWasCreatedOrUpdated(oldVal, newVal); err != nil {
log15.Error("Failed to handle deleted saved search.", "query", oldVal.Config.Query, "error", err)
}
}()
}
for oldVal, newVal := range created {
oldVal := oldVal
newVal := newVal
go func() {
if err := notifySavedQueryWasCreatedOrUpdated(oldVal, newVal); err != nil {
log15.Error("Failed to handle created saved search.", "query", oldVal.Config.Query, "error", err)
}
}()
}
for oldVal, newVal := range updated {
oldVal := oldVal
newVal := newVal
go func() {
if err := notifySavedQueryWasCreatedOrUpdated(oldVal, newVal); err != nil {
log15.Error("Failed to handle updated saved search.", "query", oldVal.Config.Query, "error", err)
}
}()
}
}
func notifySavedQueryWasCreatedOrUpdated(oldValue, newValue api.SavedQuerySpecAndConfig) error {
ctx := context.Background()
oldRecipients, err := getNotificationRecipients(ctx, oldValue.Spec, oldValue.Config)
if err != nil {
return err
}
newRecipients, err := getNotificationRecipients(ctx, newValue.Spec, newValue.Config)
if err != nil {
return err
}
removedRecipients, addedRecipients := diffNotificationRecipients(oldRecipients, newRecipients)
log15.Debug("Notifying for created/updated saved search", "removed", removedRecipients, "added", addedRecipients)
for _, removedRecipient := range removedRecipients {
if removedRecipient.email {
if err := emailNotifySubscribeUnsubscribe(ctx, removedRecipient, oldValue, notifyUnsubscribedTemplate); err != nil {
log15.Error("Failed to send unsubscribed email notification.", "recipient", removedRecipient, "error", err)
}
}
if removedRecipient.slack {
if err := slackNotifyUnsubscribed(ctx, removedRecipient, oldValue); err != nil {
log15.Error("Failed to send unsubscribed Slack notification.", "recipient", removedRecipient, "error", err)
}
}
}
for _, addedRecipient := range addedRecipients {
if addedRecipient.email {
if err := emailNotifySubscribeUnsubscribe(ctx, addedRecipient, newValue, notifySubscribedTemplate); err != nil {
log15.Error("Failed to send subscribed email notification.", "recipient", addedRecipient, "error", err)
}
}
if addedRecipient.slack {
if err := slackNotifySubscribed(ctx, addedRecipient, newValue); err != nil {
log15.Error("Failed to send subscribed Slack notification.", "recipient", addedRecipient, "error", err)
}
}
}
return nil
}
var testNotificationMu sync.Mutex
func serveTestNotification(w http.ResponseWriter, r *http.Request) {
testNotificationMu.Lock()
defer testNotificationMu.Unlock()
var args *queryrunnerapi.TestNotificationArgs
if err := json.NewDecoder(r.Body).Decode(&args); err != nil {
writeError(w, errors.Wrap(err, "decoding JSON arguments"))
return
}
recipients, err := getNotificationRecipients(r.Context(), args.SavedSearch.Spec, args.SavedSearch.Config)
if err != nil {
writeError(w, errors.Errorf("error computing recipients: %s", err))
return
}
for _, recipient := range recipients {
if err := emailNotifySubscribeUnsubscribe(r.Context(), recipient, args.SavedSearch, notifySubscribedTemplate); err != nil {
writeError(w, errors.Errorf("error sending email notifications to %s: %s", recipient.spec, err))
return
}
testNotificationAlert := fmt.Sprintf(`It worked! This is a test notification for the Sourcegraph saved search <%s|"%s">.`, searchURL(args.SavedSearch.Config.Query, utmSourceSlack), args.SavedSearch.Config.Description)
if err := slackNotify(context.Background(), recipient,
testNotificationAlert, args.SavedSearch.Config.SlackWebhookURL); err != nil {
writeError(w, errors.Errorf("error sending slack notifications to %s: %s", recipient.spec, err))
return
}
}
log15.Info("saved query test notification sent", "spec", args.SavedSearch.Spec, "key", args.SavedSearch.Spec.Key)
}

View File

@ -1,26 +0,0 @@
#!/usr/bin/env bash
# We want to build multiple go binaries, so we use a custom build step on CI.
cd "$(dirname "${BASH_SOURCE[0]}")"/../..
set -ex
OUTPUT=$(mktemp -d -t sgdockerbuild_XXXXXXX)
cleanup() {
rm -rf "$OUTPUT"
}
trap cleanup EXIT
# Environment for building linux binaries
export GO111MODULE=on
export GOARCH=amd64
export GOOS=linux
export CGO_ENABLED=0
pkg="github.com/sourcegraph/sourcegraph/cmd/query-runner"
go build -trimpath -ldflags "-X github.com/sourcegraph/sourcegraph/internal/version.version=$VERSION -X github.com/sourcegraph/sourcegraph/internal/version.timestamp=$(date +%s)" -buildmode exe -tags dist -o "$OUTPUT/$(basename $pkg)" "$pkg"
docker build -f cmd/query-runner/Dockerfile -t "$IMAGE" "$OUTPUT" \
--progress=plain \
--build-arg COMMIT_SHA \
--build-arg DATE \
--build-arg VERSION

View File

@ -1,188 +0,0 @@
package main
import (
"context"
"fmt"
"time"
"github.com/cockroachdb/errors"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/api/internalapi"
"github.com/sourcegraph/sourcegraph/internal/txemail"
"github.com/sourcegraph/sourcegraph/internal/txemail/txtypes"
)
func canSendEmail(ctx context.Context) error {
canSendEmail, err := internalapi.Client.CanSendEmail(ctx)
if err != nil {
return errors.Wrap(err, "internalapi.Client.CanSendEmail")
}
if !canSendEmail {
return errors.New("SMTP server not set in site configuration")
}
return nil
}
func (n *notifier) emailNotify(ctx context.Context) {
if err := canSendEmail(ctx); err != nil {
log15.Error("Failed to send email notification for saved search.", "error", err)
return
}
// Send tx emails asynchronously.
go func() {
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
for _, recipient := range n.recipients {
ownership := "the" // example: "new search results have been found for {{.Ownership}} saved search"
if n.spec.Subject.User != nil && *n.spec.Subject.User == recipient.spec.userID {
ownership = "your"
}
if n.spec.Subject.Org != nil {
ownership = "your organization's"
}
plural := ""
if n.results.Data.Search.Results.ApproximateResultCount != "1" {
plural = "s"
}
if err := sendEmail(ctx, recipient.spec.userID, "results", newSearchResultsEmailTemplates, struct {
URL string
SavedSearchPageURL string
Description string
Query string
ApproximateResultCount string
Ownership string
PluralResults string
}{
URL: searchURL(n.newQuery, utmSourceEmail),
SavedSearchPageURL: savedSearchListPageURL(utmSourceEmail),
Description: n.query.Description,
Query: n.query.Query,
ApproximateResultCount: n.results.Data.Search.Results.ApproximateResultCount,
Ownership: ownership,
PluralResults: plural,
}); err != nil {
log15.Error("Failed to send email notification for new saved search results.", "userID", recipient.spec.userID, "error", err)
}
}
}()
}
var newSearchResultsEmailTemplates = txemail.MustValidate(txtypes.Templates{
Subject: `[{{.ApproximateResultCount}} new result{{.PluralResults}}] {{.Description}}`,
Text: `
{{.ApproximateResultCount}} new search result{{.PluralResults}} found for {{.Ownership}} saved search:
"{{.Description}}"
View the new result{{.PluralResults}} on Sourcegraph: {{.URL}}
`,
HTML: `
<strong>{{.ApproximateResultCount}}</strong> new search result{{.PluralResults}} found for {{.Ownership}} saved search:
<p style="padding-left: 16px">&quot;{{.Description}}&quot;</p>
<p><a href="{{.URL}}">View the new result{{.PluralResults}} on Sourcegraph</a></p>
<p><a href="{{.SavedSearchPageURL}}">Edit your saved searches on Sourcegraph</a></p>
`,
})
func emailNotifySubscribeUnsubscribe(ctx context.Context, recipient *recipient, query api.SavedQuerySpecAndConfig, template txtypes.Templates) error {
if !recipient.email {
return nil
}
if err := canSendEmail(ctx); err != nil {
return err
}
var eventType string
switch {
case template == notifySubscribedTemplate:
eventType = "subscribed"
case template == notifyUnsubscribedTemplate:
eventType = "unsubscribed"
default:
eventType = "unknown"
}
ctx, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
ownership := "the" // example: "new search results have been found for {{.Ownership}} saved search"
if query.Spec.Subject.User != nil && *query.Spec.Subject.User == recipient.spec.userID {
ownership = "your"
}
if query.Spec.Subject.Org != nil {
ownership = "your organization's"
}
return sendEmail(ctx, recipient.spec.userID, eventType, template, struct {
Ownership string
Description string
}{
Ownership: ownership,
Description: query.Config.Description,
})
}
func sendEmail(ctx context.Context, userID int32, eventType string, template txtypes.Templates, data interface{}) error {
email, err := internalapi.Client.UserEmailsGetEmail(ctx, userID)
if err != nil {
return errors.Wrap(err, fmt.Sprintf("internalapi.Client.UserEmailsGetEmail for userID=%d", userID))
}
if email == nil {
return errors.Errorf("unable to send email to user ID %d with unknown email address", userID)
}
if err := internalapi.Client.SendEmail(ctx, txtypes.Message{
To: []string{*email},
Template: template,
Data: data,
}); err != nil {
return errors.Wrap(err, fmt.Sprintf("internalapi.Client.SendEmail to email=%q userID=%d", *email, userID))
}
logEvent(userID, "SavedSearchEmailNotificationSent", eventType)
return nil
}
var notifySubscribedTemplate = txemail.MustValidate(txtypes.Templates{
Subject: `[Subscribed] {{.Description}}`,
Text: `
You are now receiving notifications for {{.Ownership}} saved search:
"{{.Description}}"
When new search results become available, we will notify you.
`,
HTML: `
<p>You are now receiving notifications for {{.Ownership}} saved search:</p>
<p style="padding-left: 16px">&quot;{{.Description}}&quot;</p>
<p>When new search results become available, we will notify you.</p>
`,
})
var notifyUnsubscribedTemplate = txemail.MustValidate(txtypes.Templates{
Subject: `[Unsubscribed] {{.Description}}`,
Text: `
You will no longer receive notifications for {{.Ownership}} saved search:
"{{.Description}}"
(either you were removed as a person to notify, or the saved search was deleted)
`,
HTML: `
<p>You will no longer receive notifications for {{.Ownership}} saved search:</p>
<p style="padding-left: 16px">&quot;{{.Description}}&quot;</p>
<p>(either you were removed as a person to notify, or the saved search was deleted)</p>
`,
})

View File

@ -1,206 +0,0 @@
package main
import (
"bytes"
"context"
"encoding/json"
"log"
"net/http"
"net/url"
"runtime"
"time"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/api/internalapi"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
"github.com/cockroachdb/errors"
)
type graphQLQuery struct {
Query string `json:"query"`
Variables interface{} `json:"variables"`
}
const gqlSearchQuery = `query Search(
$query: String!,
) {
search(query: $query) {
results {
approximateResultCount
limitHit
cloning { name }
timedout { name }
results {
__typename
... on FileMatch {
limitHit
lineMatches {
preview
lineNumber
offsetAndLengths
}
}
... on CommitSearchResult {
refs {
name
displayName
prefix
repository {
name
}
}
sourceRefs {
name
displayName
prefix
repository {
name
}
}
messagePreview {
value
highlights {
line
character
length
}
}
diffPreview {
value
highlights {
line
character
length
}
}
commit {
repository {
name
}
oid
abbreviatedOID
author {
person {
displayName
avatarURL
}
date
}
message
}
}
}
alert {
title
description
proposedQueries {
description
query
}
}
}
}
}`
type gqlSearchVars struct {
Query string `json:"query"`
}
type gqlSearchResponse struct {
Data struct {
Search struct {
Results struct {
ApproximateResultCount string
Cloning []*api.Repo
Timedout []*api.Repo
Results []interface{}
}
}
}
Errors []interface{}
}
func search(ctx context.Context, query string) (*gqlSearchResponse, error) {
var buf bytes.Buffer
err := json.NewEncoder(&buf).Encode(graphQLQuery{
Query: gqlSearchQuery,
Variables: gqlSearchVars{Query: query},
})
if err != nil {
return nil, errors.Wrap(err, "Encode")
}
url, err := gqlURL("QueryRunnerSearch")
if err != nil {
return nil, errors.Wrap(err, "constructing frontend URL")
}
req, err := http.NewRequest("POST", url, &buf)
if err != nil {
return nil, errors.Wrap(err, "Post")
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("User-Agent", "sourcegraph/query-runner")
resp, err := httpcli.InternalDoer.Do(req.WithContext(ctx))
if err != nil {
return nil, errors.Wrap(err, "Post")
}
defer resp.Body.Close()
var res *gqlSearchResponse
if err := json.NewDecoder(resp.Body).Decode(&res); err != nil {
return nil, errors.Wrap(err, "Decode")
}
if len(res.Errors) > 0 {
return res, errors.Errorf("graphql: errors: %v", res.Errors)
}
return res, nil
}
func gqlURL(queryName string) (string, error) {
u, err := url.Parse(internalapi.Client.URL)
if err != nil {
return "", err
}
u.Path = "/.internal/graphql"
u.RawQuery = queryName
return u.String(), nil
}
// extractTime extracts the time from the given search result.
func extractTime(result interface{}) (t *time.Time, err error) {
// Use recover because we assume the data structure here a lot, for less
// error checking.
defer func() {
if r := recover(); r != nil {
// Same as net/http
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
log.Printf("failed to extract time from search result: %v\n%s", r, buf)
}
err = errors.Errorf("failed to extract time from search result")
}()
m := result.(map[string]interface{})
typeName := m["__typename"].(string)
switch typeName {
case "CommitSearchResult":
commit := m["commit"].(map[string]interface{})
author := commit["author"].(map[string]interface{})
date := author["date"].(string)
// For now, our graphql API commit authorship date is in Go default time format.
goTimeFormat := "2006-01-02 15:04:05.999999999 -0700 MST"
t, err := time.Parse(date, goTimeFormat)
if err != nil {
return nil, err
}
return &t, nil
default:
return nil, errors.Errorf("unexpected result __typename %q", typeName)
}
}

View File

@ -1,372 +0,0 @@
// Command query-runner runs saved queries and notifies subscribers when the queries have new results.
package main
import (
"context"
"encoding/json"
"fmt"
"log"
"net"
"net/http"
"net/url"
"os"
"os/signal"
"strings"
"syscall"
"time"
"github.com/cockroachdb/errors"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/cmd/query-runner/queryrunnerapi"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/api/internalapi"
"github.com/sourcegraph/sourcegraph/internal/conf"
"github.com/sourcegraph/sourcegraph/internal/debugserver"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/eventlogger"
"github.com/sourcegraph/sourcegraph/internal/logging"
"github.com/sourcegraph/sourcegraph/internal/sentry"
"github.com/sourcegraph/sourcegraph/internal/trace"
"github.com/sourcegraph/sourcegraph/internal/tracer"
)
var forceRunInterval = env.Get("FORCE_RUN_INTERVAL", "", "Force an interval to run saved queries at, instead of assuming query execution time * 30 (query that takes 2s to run, runs every 60s)")
const port = "3183"
func main() {
env.Lock()
env.HandleHelpFlag()
conf.Init()
logging.Init()
tracer.Init(conf.DefaultClient())
sentry.Init(conf.DefaultClient())
trace.Init()
go func() {
c := make(chan os.Signal, 1)
signal.Notify(c, syscall.SIGINT, syscall.SIGHUP)
<-c
os.Exit(0)
}()
// Ready immediately
ready := make(chan struct{})
close(ready)
go debugserver.NewServerRoutine(ready).Start()
ctx := context.Background()
http.HandleFunc(queryrunnerapi.PathTestNotification, serveTestNotification)
go func() {
err := executor.run(ctx)
if err != nil {
log15.Error("executor: failed to run due to error", "error", err)
}
}()
host := ""
if env.InsecureDev {
host = "127.0.0.1"
}
addr := net.JoinHostPort(host, port)
log15.Info("query-runner: listening", "addr", addr)
s := http.Server{
ReadTimeout: 75 * time.Second,
WriteTimeout: 10 * time.Minute,
Addr: addr,
Handler: http.DefaultServeMux,
}
log.Fatalf("Fatal error serving: %s", s.ListenAndServe())
}
func writeError(w http.ResponseWriter, err error) {
w.WriteHeader(http.StatusInternalServerError)
err2 := json.NewEncoder(w).Encode(&queryrunnerapi.ErrorResponse{
Message: err.Error(),
})
if err2 != nil {
log15.Error("error encoding HTTP error response", "error", err2.Error(), "writing_error", err.Error())
}
}
// Useful for debugging e.g. email and slack notifications. Set it to true and
// it will send one notification on server startup, effectively.
var debugPretendSavedQueryResultsExist = false
var executor = &executorT{}
type executorT struct {
forceRunInterval *time.Duration
}
func (e *executorT) run(ctx context.Context) error {
// Parse FORCE_RUN_INTERVAL value.
if forceRunInterval != "" {
forceRunInterval, err := time.ParseDuration(forceRunInterval)
if err != nil {
log15.Error("executor: failed to parse FORCE_RUN_INTERVAL", "error", err)
return nil
}
e.forceRunInterval = &forceRunInterval
}
// TODO(slimsag): Make gitserver notify us about repositories being updated
// as we could avoid executing queries if repositories haven't updated
// (impossible for new results to exist).
var oldList map[api.SavedQueryIDSpec]api.ConfigSavedQuery
for {
allSavedQueries, err := internalapi.Client.SavedQueriesListAll(context.Background())
if err != nil {
log15.Error("executor: error fetching saved queries list (trying again in 5s", "error", err)
time.Sleep(5 * time.Second)
continue
}
if oldList != nil {
sendNotificationsForCreatedOrUpdatedOrDeleted(oldList, allSavedQueries)
}
oldList = allSavedQueries
start := time.Now()
for spec, config := range allSavedQueries {
err := e.runQuery(ctx, spec, config)
if err != nil {
log15.Error("executor: failed to run query", "error", err, "query_description", config.Description)
}
}
// If running all the queries didn't take very long (due to them
// erroring out quickly, or if we had zero to run, or if they very
// quickly produced zero results), then sleep for a few second to
// prevent busy waiting and needlessly polling the DB.
if time.Since(start) < time.Second {
time.Sleep(5 * time.Second)
}
}
}
// runQuery runs the given query if an appropriate amount of time has elapsed
// since it last ran.
func (e *executorT) runQuery(ctx context.Context, spec api.SavedQueryIDSpec, query api.ConfigSavedQuery) error {
if !query.Notify && !query.NotifySlack {
// No need to run this query because there will be nobody to notify.
return nil
}
if !strings.Contains(query.Query, "type:diff") && !strings.Contains(query.Query, "type:commit") {
// TODO(slimsag): we temporarily do not support non-commit search
// queries, since those do not support the after:"time" operator.
return nil
}
info, err := internalapi.Client.SavedQueriesGetInfo(ctx, query.Query)
if err != nil {
return errors.Wrap(err, "SavedQueriesGetInfo")
}
// If the saved query was executed recently in the past, then skip it to
// avoid putting too much pressure on searcher/gitserver.
if info != nil {
// We assume a run interval of 30x that which it takes to execute the
// query. For example, a query which takes 2s to execute will run (2s*30)
// every minute.
//
// Additionally, in case queries run very quickly (e.g. our after:
// queries with no results often return in ~15ms), we impose a minimum
// run interval of 10s.
runInterval := info.ExecDuration * 30
if runInterval < 10*time.Second {
runInterval = 10 * time.Second
}
if e.forceRunInterval != nil {
runInterval = *e.forceRunInterval
}
if time.Since(info.LastExecuted) < runInterval {
return nil // too early to run the query
}
}
// Construct a new query which finds search results introduced after the
// last time we queried.
var latestKnownResult time.Time
if info != nil {
latestKnownResult = info.LatestResult
} else {
// We've never executed this search query before, so use the current
// time. We'll most certainly find nothing, which is okay.
latestKnownResult = time.Now()
}
afterTime := latestKnownResult.UTC().Format(time.RFC3339)
newQuery := strings.Join([]string{query.Query, fmt.Sprintf(`after:"%s"`, afterTime)}, " ")
if debugPretendSavedQueryResultsExist {
debugPretendSavedQueryResultsExist = false
newQuery = query.Query
}
// Perform the search and mark the saved query as having been executed in
// the database. We do this regardless of whether or not the search query
// fails in order to avoid e.g. failed saved queries from executing
// constantly and potentially causing harm to the system. We'll retry at
// our normal interval, regardless of errors.
v, execDuration, searchErr := performSearch(ctx, newQuery)
if err := internalapi.Client.SavedQueriesSetInfo(ctx, &api.SavedQueryInfo{
Query: query.Query,
LastExecuted: time.Now(),
LatestResult: latestResultTime(info, v, searchErr),
ExecDuration: execDuration,
}); err != nil {
return errors.Wrap(err, "SavedQueriesSetInfo")
}
if searchErr != nil {
return searchErr
}
// Send notifications for new search results in a separate goroutine, so
// that we don't block other search queries from running in sequence (which
// is done intentionally, to ensure no overloading of searcher/gitserver).
go func() {
if err := notify(context.Background(), spec, query, newQuery, v); err != nil {
log15.Error("executor: failed to send notifications", "error", err)
}
}()
return nil
}
func performSearch(ctx context.Context, query string) (v *gqlSearchResponse, execDuration time.Duration, err error) {
attempts := 0
for {
// Query for search results.
start := time.Now()
v, err := search(ctx, query)
execDuration := time.Since(start)
if err != nil {
return nil, execDuration, errors.Wrap(err, "search")
}
if len(v.Data.Search.Results.Results) > 0 {
return v, execDuration, nil // We have at least some search results, so we're done.
}
cloning := len(v.Data.Search.Results.Cloning)
timedout := len(v.Data.Search.Results.Timedout)
if cloning == 0 && timedout == 0 {
return v, execDuration, nil // zero results, but no cloning or timed out repos. No point in retrying.
}
if attempts > 5 {
return nil, execDuration, errors.Errorf("found 0 results due to %d cloning %d timedout repos", cloning, timedout)
}
// We didn't find any search results. Some repos are cloning or timed
// out, so try again in a few seconds.
attempts++
log15.Warn("executor: failed to run query found 0 search results due to cloning or timed out repos (retrying in 5s)", "cloning", cloning, "timedout", timedout, "query", query)
time.Sleep(5 * time.Second)
}
}
func latestResultTime(prevInfo *api.SavedQueryInfo, v *gqlSearchResponse, searchErr error) time.Time {
if searchErr != nil || len(v.Data.Search.Results.Results) == 0 {
// Error performing the search, or there were no results. Assume the
// previous info's result time.
if prevInfo != nil {
return prevInfo.LatestResult
}
return time.Now()
}
// Results are ordered chronologically, so first result is the latest.
t, err := extractTime(v.Data.Search.Results.Results[0])
if err != nil {
// Error already logged by extractTime.
return time.Now()
}
return *t
}
var externalURL *url.URL
// notify handles sending notifications for new search results.
func notify(ctx context.Context, spec api.SavedQueryIDSpec, query api.ConfigSavedQuery, newQuery string, results *gqlSearchResponse) error {
if len(results.Data.Search.Results.Results) == 0 {
return nil
}
log15.Info("sending notifications", "new_results", len(results.Data.Search.Results.Results), "description", query.Description)
// Determine which users to notify.
recipients, err := getNotificationRecipients(ctx, spec, query)
if err != nil {
return err
}
// Send slack notifications.
n := &notifier{
spec: spec,
query: query,
newQuery: newQuery,
results: results,
recipients: recipients,
}
// Send Slack and email notifications.
n.slackNotify(ctx)
n.emailNotify(ctx)
return nil
}
type notifier struct {
spec api.SavedQueryIDSpec
query api.ConfigSavedQuery
newQuery string
results *gqlSearchResponse
recipients recipients
}
const (
utmSourceEmail = "saved-search-email"
utmSourceSlack = "saved-search-slack"
)
func searchURL(query, utmSource string) string {
return sourcegraphURL("search", query, utmSource)
}
func savedSearchListPageURL(utmSource string) string {
return sourcegraphURL("user/searches", "", utmSource)
}
func sourcegraphURL(path, query, utmSource string) string {
if externalURL == nil {
// Determine the external URL.
externalURLStr, err := internalapi.Client.ExternalURL(context.Background())
if err != nil {
log15.Error("failed to get ExternalURL", err)
return ""
}
externalURL, err = url.Parse(externalURLStr)
if err != nil {
log15.Error("failed to parse ExternalURL", err)
return ""
}
}
// Construct URL to the search query.
u := externalURL.ResolveReference(&url.URL{Path: path})
q := u.Query()
if query != "" {
q.Set("q", query)
}
q.Set("utm_source", utmSource)
u.RawQuery = q.Encode()
return u.String()
}
func logEvent(userID int32, eventName, eventType string) {
contents, _ := json.Marshal(map[string]string{
"event_type": eventType,
})
eventlogger.LogEvent(userID, eventName, json.RawMessage(contents))
}

View File

@ -1,156 +0,0 @@
package main
import (
"context"
"fmt"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/api/internalapi"
)
// recipientSpec identifies a recipient of a saved search notification. Exactly one of its fields is
// nonzero.
type recipientSpec struct {
userID, orgID int32
}
func (r recipientSpec) String() string {
if r.userID != 0 {
return fmt.Sprintf("user %d", r.userID)
}
return fmt.Sprintf("org %d", r.orgID)
}
// recipient describes a recipient of a saved search notification and the type of notifications
// they're configured to receive.
type recipient struct {
spec recipientSpec // the recipient's identity
email bool // send an email to the recipient
slack bool // post a Slack message to the recipient
}
func (r *recipient) String() string {
return fmt.Sprintf("{%s email:%v slack:%v}", r.spec, r.email, r.slack)
}
// getNotificationRecipients retrieves the list of recipients who should receive notifications for
// events related to the saved search.
func getNotificationRecipients(ctx context.Context, spec api.SavedQueryIDSpec, query api.ConfigSavedQuery) ([]*recipient, error) {
var recipients recipients
// Notify the owner (user or org).
switch {
case spec.Subject.User != nil:
recipients.add(recipient{
spec: recipientSpec{userID: *spec.Subject.User},
email: query.Notify,
slack: query.NotifySlack,
})
case spec.Subject.Org != nil:
if query.Notify {
// Email all org members.
orgMembers, err := internalapi.Client.OrgsListUsers(ctx, *spec.Subject.Org)
if err != nil {
return nil, err
}
for _, userID := range orgMembers {
recipients.add(recipient{
spec: recipientSpec{userID: userID},
email: true,
})
}
}
recipients.add(recipient{
spec: recipientSpec{orgID: *spec.Subject.Org},
slack: query.NotifySlack,
})
}
return recipients, nil
}
type recipients []*recipient
// add adds the new recipient, merging it into an existing slice element if one already exists for
// the userID or orgID.
func (rs *recipients) add(r recipient) {
for _, r2 := range *rs {
if r.spec == r2.spec {
// Merge into existing recipient.
r2.email = r2.email || r.email
r2.slack = r2.slack || r.slack
return
}
}
// Add new recipient.
*rs = append(*rs, &r)
}
// get returns the recipient with the given spec, if any, or else nil.
func (rs recipients) get(s recipientSpec) *recipient {
for _, r := range rs {
if r.spec == s {
return r
}
}
return nil
}
// diffNotificationRecipients diffs old against new, returning the removed and added recipients. The
// same recipient identity may be returned in both the removed and added lists, if they changed the
// type of notifications they receive (e.g., unsubscribe from email, subscribe to Slack).
func diffNotificationRecipients(old, new recipients) (removed, added recipients) {
diff := func(spec recipientSpec, old, new *recipient) (removed, added *recipient) {
empty := recipient{spec: spec}
if old == nil || *old == empty {
return nil, new
}
if new == nil || *new == empty {
return old, nil
}
if *old == *new {
return nil, nil
}
removed = &recipient{
spec: spec,
email: old.email && !new.email,
slack: old.slack && !new.slack,
}
if *removed == empty {
removed = nil
}
added = &recipient{
spec: spec,
email: new.email && !old.email,
slack: new.slack && !old.slack,
}
if *added == empty {
added = nil
}
return removed, added
}
seen := map[recipientSpec]struct{}{}
handle := func(spec recipientSpec, oldr, newr *recipient) {
if _, seen := seen[spec]; seen {
return
}
seen[spec] = struct{}{}
removedr, addedr := diff(spec, oldr, newr)
if removedr != nil {
removed.add(*removedr)
}
if addedr != nil {
added.add(*addedr)
}
}
for _, oldr := range old {
handle(oldr.spec, oldr, new.get(oldr.spec))
}
for _, newr := range new {
handle(newr.spec, old.get(newr.spec), newr)
}
return removed, added
}

View File

@ -1,127 +0,0 @@
// Package queryrunnerapi implements a client for the query-runner service.
package queryrunnerapi
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/url"
"github.com/cockroachdb/errors"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/env"
"github.com/sourcegraph/sourcegraph/internal/httpcli"
)
var (
queryRunnerURL = env.Get(
"QUERY_RUNNER_URL",
"http://query-runner",
"URL at which the query-runner service can be reached",
)
Client = &client{
client: httpcli.InternalClient,
}
)
type SubjectAndConfig struct {
Subject api.SettingsSubject
Config api.PartialConfigSavedQueries
}
type ErrorResponse struct {
Message string
}
const (
PathSavedQueryWasCreatedOrUpdated = "/saved-query-was-created-or-updated"
PathSavedQueryWasDeleted = "/saved-query-was-deleted"
PathTestNotification = "/test-notification"
)
type client struct {
client *http.Client
}
type SavedQueryWasCreatedOrUpdatedArgs struct {
SubjectAndConfig *SubjectAndConfig
DisableSubscriptionNotifications bool
}
// SavedQueryWasCreated should be called whenever a saved query was created
// or updated after the server has started.
func (c *client) SavedQueryWasCreatedOrUpdated(
ctx context.Context,
subject api.SettingsSubject,
config api.PartialConfigSavedQueries,
disableSubscriptionNotifications bool,
) error {
return c.post(PathSavedQueryWasCreatedOrUpdated, &SavedQueryWasCreatedOrUpdatedArgs{
SubjectAndConfig: &SubjectAndConfig{
Subject: subject,
Config: config,
},
DisableSubscriptionNotifications: disableSubscriptionNotifications,
})
}
type SavedQueryWasDeletedArgs struct {
Spec api.SavedQueryIDSpec
DisableSubscriptionNotifications bool
}
// SavedQueryWasDeleted should be called whenever a saved query was deleted
// after the server has started.
func (c *client) SavedQueryWasDeleted(
ctx context.Context,
spec api.SavedQueryIDSpec,
disableSubscriptionNotifications bool,
) error {
return c.post(PathSavedQueryWasDeleted, &SavedQueryWasDeletedArgs{
Spec: spec,
DisableSubscriptionNotifications: disableSubscriptionNotifications,
})
}
type TestNotificationArgs struct {
SavedSearch api.SavedQuerySpecAndConfig
}
// TestNotification is called to send a test notification for a saved search. Users may perform this
// action to test that the configured notifications are working.
func (c *client) TestNotification(ctx context.Context, savedSearch api.SavedQuerySpecAndConfig) {
err := c.post(PathTestNotification, &TestNotificationArgs{SavedSearch: savedSearch})
if err != nil {
log15.Error("Unable to send test notification, POST failed.", err)
}
}
func (c *client) post(path string, data interface{}) error {
var buf bytes.Buffer
if err := json.NewEncoder(&buf).Encode(data); err != nil {
return errors.Wrap(err, "Encoding request")
}
u, err := url.Parse(queryRunnerURL)
if err != nil {
return errors.Wrap(err, "Parse QUERY_RUNNER_URL")
}
u.Path = path
resp, err := c.client.Post(u.String(), "application/json", &buf)
if err != nil {
return errors.Wrap(err, "Post "+u.String())
}
defer resp.Body.Close()
if resp.StatusCode == http.StatusOK {
return nil
}
var errResp *ErrorResponse
if err := json.NewDecoder(resp.Body).Decode(&errResp); err != nil {
return errors.Wrap(err, "Decoding response")
}
return errors.Errorf("Error from %s: %s", u.String(), errResp.Message)
}

View File

@ -1,79 +0,0 @@
package main
import (
"context"
"fmt"
"github.com/cockroachdb/errors"
"github.com/inconshreveable/log15"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/slack"
)
func (n *notifier) slackNotify(ctx context.Context) {
plural := ""
if n.results.Data.Search.Results.ApproximateResultCount != "1" {
plural = "s"
}
text := fmt.Sprintf(`*%s* new result%s found for saved search <%s|"%s">`,
n.results.Data.Search.Results.ApproximateResultCount,
plural,
searchURL(n.newQuery, utmSourceSlack),
n.query.Description,
)
for _, recipient := range n.recipients {
if err := slackNotify(ctx, recipient, text, n.query.SlackWebhookURL); err != nil {
log15.Error("Failed to post Slack notification message.", "recipient", recipient, "text", text, "error", err)
}
}
// TODO(Dan): find all users in the recipient list and log events for all of them
logEvent(0, "SavedSearchSlackNotificationSent", "results")
}
func slackNotifySubscribed(ctx context.Context, recipient *recipient, query api.SavedQuerySpecAndConfig) error {
text := fmt.Sprintf(`Slack notifications enabled for the saved search <%s|"%s">. Notifications will be sent here when new results are available.`,
searchURL(query.Config.Query, utmSourceSlack),
query.Config.Description,
)
if err := slackNotify(ctx, recipient, text, query.Config.SlackWebhookURL); err != nil {
return err
}
// TODO(Dan): find all users in the recipient list and log events for all of them
logEvent(0, "SavedSearchSlackNotificationSent", "enabled")
return nil
}
func slackNotifyUnsubscribed(ctx context.Context, recipient *recipient, query api.SavedQuerySpecAndConfig) error {
text := fmt.Sprintf(`Slack notifications for the saved search <%s|"%s"> disabled.`,
searchURL(query.Config.Query, utmSourceSlack),
query.Config.Description,
)
if err := slackNotify(ctx, recipient, text, query.Config.SlackWebhookURL); err != nil {
return err
}
// TODO(Dan): find all users in the recipient list and log events for all of them
logEvent(0, "SavedSearchSlackNotificationSent", "disabled")
return nil
}
func slackNotify(ctx context.Context, recipient *recipient, text string, slackWebhookURL *string) error {
if !recipient.slack {
return nil
}
if slackWebhookURL == nil || *slackWebhookURL == "" {
return errors.Errorf("unable to send Slack notification because recipient (%s) has no Slack webhook URL configured", recipient.spec)
}
payload := &slack.Payload{
Username: "saved-search-bot",
IconEmoji: ":mag:",
UnfurlLinks: false,
UnfurlMedia: false,
Text: text,
}
client := slack.New(*slackWebhookURL)
return client.Post(ctx, payload)
}

View File

@ -24,11 +24,6 @@
targets:
# repo-updater
- host.docker.internal:6074
- labels:
job: query-runner
targets:
# query-runner
- host.docker.internal:6067
- labels:
job: zoekt-indexserver
targets:

View File

@ -24,11 +24,6 @@
targets:
# repo-updater
- 127.0.0.1:6074
- labels:
job: query-runner
targets:
# query-runner
- 127.0.0.1:6067
- labels:
job: zoekt-indexserver
targets:

View File

@ -4,7 +4,6 @@
{ "Name": "searcher", "Host": "127.0.0.1:6069" },
{ "Name": "symbols", "Host": "127.0.0.1:6071" },
{ "Name": "repo-updater", "Host": "127.0.0.1:6074" },
{ "Name": "query-runner", "Host": "127.0.0.1:6067" },
{ "Name": "worker", "Host": "127.0.0.1:6089" },
{ "Name": "precise-code-intel-worker", "Host": "127.0.0.1:6088" },
{ "Name": "executor-codeintel", "Host": "127.0.0.1:6092" },

View File

@ -2154,255 +2154,6 @@ To learn more about Sourcegraph's alerting and how to set up alerts, see [our al
<br />
## query-runner: frontend_internal_api_error_responses
<p class="subtitle">frontend-internal API error responses every 5m by route</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 2%+ frontend-internal API error responses every 5m by route for 5m0s
**Possible solutions**
- **Single-container deployments:** Check `docker logs $CONTAINER_ID` for logs starting with `repo-updater` that indicate requests to the frontend service are failing.
- **Kubernetes:**
- Confirm that `kubectl get pods` shows the `frontend` pods are healthy.
- Check `kubectl logs query-runner` for logs indicate request failures to `frontend` or `frontend-internal`.
- **Docker Compose:**
- Confirm that `docker ps` shows the `frontend-internal` container is healthy.
- Check `docker logs query-runner` for logs indicating request failures to `frontend` or `frontend-internal`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-frontend-internal-api-error-responses).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_frontend_internal_api_error_responses"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: container_cpu_usage
<p class="subtitle">container cpu usage total (1m average) across all cores by instance</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 99%+ container cpu usage total (1m average) across all cores by instance
**Possible solutions**
- **Kubernetes:** Consider increasing CPU limits in the the relevant `Deployment.yaml`.
- **Docker Compose:** Consider increasing `cpus:` of the query-runner container in `docker-compose.yml`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-container-cpu-usage).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_container_cpu_usage"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: container_memory_usage
<p class="subtitle">container memory usage by instance</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 99%+ container memory usage by instance
**Possible solutions**
- **Kubernetes:** Consider increasing memory limit in relevant `Deployment.yaml`.
- **Docker Compose:** Consider increasing `memory:` of query-runner container in `docker-compose.yml`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-container-memory-usage).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_container_memory_usage"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: provisioning_container_cpu_usage_long_term
<p class="subtitle">container cpu usage total (90th percentile over 1d) across all cores by instance</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 80%+ container cpu usage total (90th percentile over 1d) across all cores by instance for 336h0m0s
**Possible solutions**
- **Kubernetes:** Consider increasing CPU limits in the `Deployment.yaml` for the query-runner service.
- **Docker Compose:** Consider increasing `cpus:` of the query-runner container in `docker-compose.yml`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-provisioning-container-cpu-usage-long-term).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_provisioning_container_cpu_usage_long_term"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: provisioning_container_memory_usage_long_term
<p class="subtitle">container memory usage (1d maximum) by instance</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 80%+ container memory usage (1d maximum) by instance for 336h0m0s
**Possible solutions**
- **Kubernetes:** Consider increasing memory limits in the `Deployment.yaml` for the query-runner service.
- **Docker Compose:** Consider increasing `memory:` of the query-runner container in `docker-compose.yml`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-provisioning-container-memory-usage-long-term).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_provisioning_container_memory_usage_long_term"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: provisioning_container_cpu_usage_short_term
<p class="subtitle">container cpu usage total (5m maximum) across all cores by instance</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 90%+ container cpu usage total (5m maximum) across all cores by instance for 30m0s
**Possible solutions**
- **Kubernetes:** Consider increasing CPU limits in the the relevant `Deployment.yaml`.
- **Docker Compose:** Consider increasing `cpus:` of the query-runner container in `docker-compose.yml`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-provisioning-container-cpu-usage-short-term).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_provisioning_container_cpu_usage_short_term"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: provisioning_container_memory_usage_short_term
<p class="subtitle">container memory usage (5m maximum) by instance</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 90%+ container memory usage (5m maximum) by instance
**Possible solutions**
- **Kubernetes:** Consider increasing memory limit in relevant `Deployment.yaml`.
- **Docker Compose:** Consider increasing `memory:` of query-runner container in `docker-compose.yml`.
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-provisioning-container-memory-usage-short-term).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_provisioning_container_memory_usage_short_term"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: go_goroutines
<p class="subtitle">maximum active goroutines</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 10000+ maximum active goroutines for 10m0s
**Possible solutions**
- More help interpreting this metric is available in the [dashboards reference](./dashboards.md#query-runner-go-goroutines).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_go_goroutines"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: go_gc_duration_seconds
<p class="subtitle">maximum go garbage collection duration</p>
**Descriptions**
- <span class="badge badge-warning">warning</span> query-runner: 2s+ maximum go garbage collection duration
**Possible solutions**
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-go-gc-duration-seconds).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"warning_query-runner_go_gc_duration_seconds"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## query-runner: pods_available_percentage
<p class="subtitle">percentage pods available</p>
**Descriptions**
- <span class="badge badge-critical">critical</span> query-runner: less than 90% percentage pods available for 10m0s
**Possible solutions**
- Learn more about the related dashboard panel in the [dashboards reference](./dashboards.md#query-runner-pods-available-percentage).
- **Silence this alert:** If you are aware of this alert and want to silence notifications for it, add the following to your site configuration and set a reminder to re-evaluate the alert:
```json
"observability.silenceAlerts": [
"critical_query-runner_pods_available_percentage"
]
```
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<br />
## redis: redis-store_up
<p class="subtitle">redis-store availability</p>

View File

@ -5564,265 +5564,6 @@ Query: `sum by(app) (up{app=~".*precise-code-intel-worker"}) / count by (app) (u
<br />
## Query Runner
<p class="subtitle">Periodically runs saved searches and instructs the frontend to send out notifications.</p>
To see this dashboard, visit `/-/debug/grafana/d/query-runner/query-runner` on your Sourcegraph instance.
### Query Runner: Internal service requests
#### query-runner: frontend_internal_api_error_responses
<p class="subtitle">Frontend-internal API error responses every 5m by route</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-frontend-internal-api-error-responses) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100000` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `sum by (category)(increase(src_frontend_internal_request_duration_seconds_count{job="query-runner",code!~"2.."}[5m])) / ignoring(category) group_left sum(increase(src_frontend_internal_request_duration_seconds_count{job="query-runner"}[5m]))`
</details>
<br />
### Query Runner: Container monitoring (not available on server)
#### query-runner: container_missing
<p class="subtitle">Container missing</p>
This value is the number of times a container has not been seen for more than one minute. If you observe this
value change independent of deployment events (such as an upgrade), it could indicate pods are being OOM killed or terminated for some other reasons.
- **Kubernetes:**
- Determine if the pod was OOM killed using `kubectl describe pod query-runner` (look for `OOMKilled: true`) and, if so, consider increasing the memory limit in the relevant `Deployment.yaml`.
- Check the logs before the container restarted to see if there are `panic:` messages or similar using `kubectl logs -p query-runner`.
- **Docker Compose:**
- Determine if the pod was OOM killed using `docker inspect -f '{{json .State}}' query-runner` (look for `"OOMKilled":true`) and, if so, consider increasing the memory limit of the query-runner container in `docker-compose.yml`.
- Check the logs before the container restarted to see if there are `panic:` messages or similar using `docker logs query-runner` (note this will include logs from the previous and currently running container).
This panel has no related alerts.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100100` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `count by(name) ((time() - container_last_seen{name=~"^query-runner.*"}) > 60)`
</details>
<br />
#### query-runner: container_cpu_usage
<p class="subtitle">Container cpu usage total (1m average) across all cores by instance</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-container-cpu-usage) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100101` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `cadvisor_container_cpu_usage_percentage_total{name=~"^query-runner.*"}`
</details>
<br />
#### query-runner: container_memory_usage
<p class="subtitle">Container memory usage by instance</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-container-memory-usage) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100102` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `cadvisor_container_memory_usage_percentage_total{name=~"^query-runner.*"}`
</details>
<br />
#### query-runner: fs_io_operations
<p class="subtitle">Filesystem reads and writes rate by instance over 1h</p>
This value indicates the number of filesystem read and write operations by containers of this service.
When extremely high, this can indicate a resource usage problem, or can cause problems with the service itself, especially if high values or spikes correlate with {{CONTAINER_NAME}} issues.
This panel has no related alerts.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100103` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Core application team](https://handbook.sourcegraph.com/engineering/core-application).*</sub>
<details>
<summary>Technical details</summary>
Query: `sum by(name) (rate(container_fs_reads_total{name=~"^query-runner.*"}[1h]) + rate(container_fs_writes_total{name=~"^query-runner.*"}[1h]))`
</details>
<br />
### Query Runner: Provisioning indicators (not available on server)
#### query-runner: provisioning_container_cpu_usage_long_term
<p class="subtitle">Container cpu usage total (90th percentile over 1d) across all cores by instance</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-provisioning-container-cpu-usage-long-term) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100200` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `quantile_over_time(0.9, cadvisor_container_cpu_usage_percentage_total{name=~"^query-runner.*"}[1d])`
</details>
<br />
#### query-runner: provisioning_container_memory_usage_long_term
<p class="subtitle">Container memory usage (1d maximum) by instance</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-provisioning-container-memory-usage-long-term) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100201` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `max_over_time(cadvisor_container_memory_usage_percentage_total{name=~"^query-runner.*"}[1d])`
</details>
<br />
#### query-runner: provisioning_container_cpu_usage_short_term
<p class="subtitle">Container cpu usage total (5m maximum) across all cores by instance</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-provisioning-container-cpu-usage-short-term) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100210` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `max_over_time(cadvisor_container_cpu_usage_percentage_total{name=~"^query-runner.*"}[5m])`
</details>
<br />
#### query-runner: provisioning_container_memory_usage_short_term
<p class="subtitle">Container memory usage (5m maximum) by instance</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-provisioning-container-memory-usage-short-term) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100211` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `max_over_time(cadvisor_container_memory_usage_percentage_total{name=~"^query-runner.*"}[5m])`
</details>
<br />
### Query Runner: Golang runtime monitoring
#### query-runner: go_goroutines
<p class="subtitle">Maximum active goroutines</p>
A high value here indicates a possible goroutine leak.
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-go-goroutines) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100300` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `max by(instance) (go_goroutines{job=~".*query-runner"})`
</details>
<br />
#### query-runner: go_gc_duration_seconds
<p class="subtitle">Maximum go garbage collection duration</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-go-gc-duration-seconds) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100301` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `max by(instance) (go_gc_duration_seconds{job=~".*query-runner"})`
</details>
<br />
### Query Runner: Kubernetes monitoring (only available on Kubernetes)
#### query-runner: pods_available_percentage
<p class="subtitle">Percentage pods available</p>
Refer to the [alert solutions reference](./alert_solutions.md#query-runner-pods-available-percentage) for 1 alert related to this panel.
To see this panel, visit `/-/debug/grafana/d/query-runner/query-runner?viewPanel=100400` on your Sourcegraph instance.
<sub>*Managed by the [Sourcegraph Search team](https://handbook.sourcegraph.com/engineering/search).*</sub>
<details>
<summary>Technical details</summary>
Query: `sum by(app) (up{app=~".*query-runner"}) / count by (app) (up{app=~".*query-runner"}) * 100`
</details>
<br />
## Redis
<p class="subtitle">Metrics from both redis databases.</p>

View File

@ -64,7 +64,6 @@ This is a table of Sourcegraph backend debug ports in the two deployment context
| searcher | 6060 | 6069 |
| symbols | 6060 | 6071 |
| repo-updater | 6060 | 6074 |
| query-runner | 6060 | 6067 |
| zoekt-indexserver | 6060 | 6072 |
| zoekt-webserver | 6060 | 3070 |

View File

@ -103,11 +103,6 @@ digraph architecture {
fillcolor="#e2a8fd"
URL="https://github.com/sourcegraph/sourcegraph/tree/main/cmd/searcher"
]
query_runner [
label="query runner"
fillcolor="#aaffff"
URL="https://github.com/sourcegraph/sourcegraph/tree/main/cmd/query-runner"
]
}
subgraph cluster_code_intelligence {
@ -215,7 +210,7 @@ digraph architecture {
codeintel_worker -> {postgres, codeintel_db} [ fillcolor="#eac1c1"]
/* Internal routes */
frontend -> {searcher, symbols, query_runner, blob_store, gitserver, repo_updater, zoekt_webserver, syntect_server} [fillcolor="#7e78dc"]
frontend -> {searcher, symbols, blob_store, gitserver, repo_updater, zoekt_webserver, syntect_server} [fillcolor="#7e78dc"]
searcher -> gitserver [fillcolor="#e2a8fd"]
symbols -> gitserver [fillcolor="#c1eaea"]
zoekt_indexserver -> {frontend, gitserver} [fillcolor="#aaccff"]

View File

@ -36,40 +36,6 @@ var requestDuration = promauto.NewHistogramVec(prometheus.HistogramOpts{
Buckets: prometheus.DefBuckets,
}, []string{"category", "code"})
// SavedQueriesListAll lists all saved queries, from every user, org, etc.
func (c *internalClient) SavedQueriesListAll(ctx context.Context) (map[api.SavedQueryIDSpec]api.ConfigSavedQuery, error) {
var result []api.SavedQuerySpecAndConfig
err := c.postInternal(ctx, "saved-queries/list-all", nil, &result)
if err != nil {
return nil, err
}
m := map[api.SavedQueryIDSpec]api.ConfigSavedQuery{}
for _, r := range result {
m[r.Spec] = r.Config
}
return m, nil
}
// SavedQueriesGetInfo gets the info from the DB for the given saved query. nil
// is returned if there is no existing info for the saved query.
func (c *internalClient) SavedQueriesGetInfo(ctx context.Context, query string) (*api.SavedQueryInfo, error) {
var result *api.SavedQueryInfo
err := c.postInternal(ctx, "saved-queries/get-info", query, &result)
if err != nil {
return nil, err
}
return result, nil
}
// SavedQueriesSetInfo sets the info in the DB for the given query.
func (c *internalClient) SavedQueriesSetInfo(ctx context.Context, info *api.SavedQueryInfo) error {
return c.postInternal(ctx, "saved-queries/set-info", info, nil)
}
func (c *internalClient) SavedQueriesDeleteInfo(ctx context.Context, query string) error {
return c.postInternal(ctx, "saved-queries/delete-info", query, nil)
}
func (c *internalClient) SettingsGetForSubject(
ctx context.Context,
subject api.SettingsSubject,

View File

@ -1,110 +0,0 @@
package database
import (
"context"
"database/sql"
"time"
"github.com/cockroachdb/errors"
"github.com/sourcegraph/sourcegraph/internal/database/basestore"
"github.com/sourcegraph/sourcegraph/internal/database/dbutil"
)
type QueryRunnerStateStore struct {
*basestore.Store
}
// QueryRunnerState instantiates and returns a new QueryRunnerStateStore with prepared statements.
func QueryRunnerState(db dbutil.DB) *QueryRunnerStateStore {
return &QueryRunnerStateStore{Store: basestore.NewWithDB(db, sql.TxOptions{})}
}
// NewQueryRunnerStateStoreWithDB instantiates and returns a new QueryRunnerStateStore using the other store handle.
func QueryRunnerStateWith(other basestore.ShareableStore) *QueryRunnerStateStore {
return &QueryRunnerStateStore{Store: basestore.NewWithHandle(other.Handle())}
}
func (s *QueryRunnerStateStore) With(other basestore.ShareableStore) *QueryRunnerStateStore {
return &QueryRunnerStateStore{Store: s.Store.With(other)}
}
func (s *QueryRunnerStateStore) Transact(ctx context.Context) (*QueryRunnerStateStore, error) {
txBase, err := s.Store.Transact(ctx)
return &QueryRunnerStateStore{Store: txBase}, err
}
type SavedQueryInfo struct {
Query string
LastExecuted time.Time
LatestResult time.Time
ExecDuration time.Duration
}
// Get gets the saved query information for the given query. nil
// is returned if there is no existing saved query info.
func (s *QueryRunnerStateStore) Get(ctx context.Context, query string) (*SavedQueryInfo, error) {
info := &SavedQueryInfo{
Query: query,
}
var execDurationNs int64
err := s.Handle().DB().QueryRowContext(
ctx,
"SELECT last_executed, latest_result, exec_duration_ns FROM query_runner_state WHERE query=$1",
query,
).Scan(&info.LastExecuted, &info.LatestResult, &execDurationNs)
if err != nil {
if err == sql.ErrNoRows {
return nil, nil
}
return nil, errors.Wrap(err, "QueryRow")
}
info.ExecDuration = time.Duration(execDurationNs)
return info, nil
}
// Set sets the saved query information for the given info.Query.
//
// It is not safe to call concurrently for the same info.Query, as it uses a
// poor man's upsert implementation.
func (s *QueryRunnerStateStore) Set(ctx context.Context, info *SavedQueryInfo) error {
res, err := s.Handle().DB().ExecContext(
ctx,
"UPDATE query_runner_state SET last_executed=$1, latest_result=$2, exec_duration_ns=$3 WHERE query=$4",
info.LastExecuted,
info.LatestResult,
int64(info.ExecDuration),
info.Query,
)
if err != nil {
return errors.Wrap(err, "UPDATE")
}
updated, err := res.RowsAffected()
if err != nil {
return errors.Wrap(err, "RowsAffected")
}
if updated == 0 {
// Didn't update any row, so insert a new one.
_, err := s.Handle().DB().ExecContext(
ctx,
"INSERT INTO query_runner_state(query, last_executed, latest_result, exec_duration_ns) VALUES($1, $2, $3, $4)",
info.Query,
info.LastExecuted,
info.LatestResult,
int64(info.ExecDuration),
)
if err != nil {
return errors.Wrap(err, "INSERT")
}
}
return nil
}
func (s *QueryRunnerStateStore) Delete(ctx context.Context, query string) error {
_, err := s.Handle().DB().ExecContext(
ctx,
"DELETE FROM query_runner_state WHERE query=$1",
query,
)
return err
}

View File

@ -103,10 +103,6 @@ const (
// SourceBrowser indicates the request likely came from a web browser.
SourceBrowser SourceType = "browser"
// SourceQueryRunner indicates the request likely came from the
// query-runner service (saved searches).
SourceQueryRunner SourceType = "query-runner"
// SourceOther indicates the request likely came from a non-browser HTTP client.
SourceOther SourceType = "other"
)

View File

@ -1,23 +0,0 @@
package definitions
import (
"github.com/sourcegraph/sourcegraph/monitoring/definitions/shared"
"github.com/sourcegraph/sourcegraph/monitoring/monitoring"
)
func QueryRunner() *monitoring.Container {
const containerName = "query-runner"
return &monitoring.Container{
Name: "query-runner",
Title: "Query Runner",
Description: "Periodically runs saved searches and instructs the frontend to send out notifications.",
Groups: []monitoring.Group{
shared.NewFrontendInternalAPIErrorResponseMonitoringGroup(containerName, monitoring.ObservableOwnerSearch, nil),
shared.NewContainerMonitoringGroup(containerName, monitoring.ObservableOwnerSearch, nil),
shared.NewProvisioningIndicatorsGroup(containerName, monitoring.ObservableOwnerSearch, nil),
shared.NewGolangMonitoringGroup(containerName, monitoring.ObservableOwnerSearch, nil),
shared.NewKubernetesMonitoringGroup(containerName, monitoring.ObservableOwnerSearch, nil),
},
}
}

View File

@ -37,7 +37,6 @@ func main() {
definitions.GitHubProxy(),
definitions.Postgres(),
definitions.PreciseCodeIntelWorker(),
definitions.QueryRunner(),
definitions.Redis(),
definitions.Worker(),
definitions.RepoUpdater(),

View File

@ -26,7 +26,6 @@ env:
SEARCHER_URL: http://127.0.0.1:3181
REPO_UPDATER_URL: http://127.0.0.1:3182
REDIS_ENDPOINT: 127.0.0.1:6379
QUERY_RUNNER_URL: http://localhost:3183
SYMBOLS_URL: http://localhost:3184
SRC_SYNTECT_SERVER: http://localhost:9238
SRC_FRONTEND_INTERNAL: localhost:3090
@ -45,7 +44,6 @@ env:
{ "Name": "symbols", "Host": "127.0.0.1:6071" },
{ "Name": "repo-updater", "Host": "127.0.0.1:6074" },
{ "Name": "enterprise-repo-updater", "Host": "127.0.0.1:6074" },
{ "Name": "query-runner", "Host": "127.0.0.1:6067" },
{ "Name": "precise-code-intel-worker", "Host": "127.0.0.1:6088" },
{ "Name": "worker", "Host": "127.0.0.1:6089" },
{ "Name": "enterprise-worker", "Host": "127.0.0.1:6089" },
@ -241,19 +239,6 @@ commands:
- enterprise/internal
- enterprise/cmd/repo-updater
query-runner:
cmd: .bin/query-runner
install: |
if [ -n "$DELVE" ]; then
export GCFLAGS='all=-N -l'
fi
go build -gcflags="$GCFLAGS" -o .bin/query-runner github.com/sourcegraph/sourcegraph/cmd/query-runner
checkBinary: .bin/query-runner
watch:
- lib
- internal
- cmd/query-runner
symbols:
cmd: .bin/symbols
install: |
@ -756,7 +741,6 @@ commandsets:
- gitserver
- searcher
- symbols
- query-runner
- web
- caddy
- docsite
@ -781,7 +765,6 @@ commandsets:
- gitserver
- searcher
- symbols
- query-runner
- caddy
- docsite
- syntax-highlighter
@ -810,7 +793,6 @@ commandsets:
- gitserver
- searcher
- symbols
- query-runner
- caddy
- docsite
- syntax-highlighter
@ -837,7 +819,6 @@ commandsets:
- gitserver
- searcher
- symbols
- query-runner
- caddy
- docsite
- syntax-highlighter
@ -884,7 +865,6 @@ commandsets:
- gitserver
- searcher
- symbols
- query-runner
- caddy
- docsite
- syntax-highlighter