New Filters Panel: Fix limitHit value on filters (#59365)

Before, filters were updated individually using the filters.Add() function. This caused an issue where not all filters would show that searches were incomplete, only some of them.

Now, filters are set universally based on the value of SearchFilters.isLimitHit.
This commit is contained in:
Jason Hawk Harris 2024-01-05 20:10:43 -06:00 committed by GitHub
parent 7f1df01d62
commit 9a4f52cd42
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 163 additions and 155 deletions

View File

@ -121,14 +121,16 @@ export const SearchDynamicFilter: FC<SearchDynamicFilterProps> = props => {
// presented in filters from search stream API. If the filter is in both
// places (URL and steam API) merged them to avoid duplicates in the UI
if (filterQueryFilters.length > 0 && !staticFilters) {
const mappedSelectedFilters = filterQueryFilters.map(selectedFilter => {
const mappedSelectedFilters = filterQueryFilters.map((selectedFilter): DynamicClientFilter => {
const mappedSelectedFilter = filters?.find(filter => isSameFilter(filter.value, selectedFilter))
return {
count: mappedSelectedFilter?.count ?? 0,
label: mappedSelectedFilter?.label ?? upperFirst(selectedFilter?.value?.value),
value: stringHuman([selectedFilter]),
} as DynamicClientFilter
exhaustive: mappedSelectedFilter?.exhaustive ?? false,
kind: mappedSelectedFilter?.kind ?? selectedFilter.field.value,
} as DynamicClientFilter // TODO(camdencheek): fix the typing here so I can remove the cast
})
const otherFilters = filterTypes.flatMap(
@ -214,7 +216,7 @@ const DynamicFilterItem: FC<DynamicFilterItemProps> = props => {
<span className={styles.itemText}>{renderItem ? renderItem(filter) : filter.label}</span>
{filter.count !== 0 && (
<Badge variant="secondary" className="ml-2">
{filter.count}
{filter.exhaustive ? filter.count : `${roundCount(filter.count)}+`}
</Badge>
)}
{selected && <Icon svgPath={mdiClose} aria-hidden={true} className="ml-1 flex-shrink-0" />}
@ -223,6 +225,16 @@ const DynamicFilterItem: FC<DynamicFilterItemProps> = props => {
)
}
function roundCount(count: number): number {
const roundNumbers = [10000, 5000, 1000, 500, 100, 50, 10, 5, 1]
for (const roundNumber of roundNumbers) {
if (count >= roundNumber) {
return roundNumber
}
}
return 0
}
const isSameFilter = (filterValue: string, filter: QueryFilter): boolean => {
const constructedFilterValue = stringHuman([filter])

View File

@ -18,15 +18,15 @@ export interface DynamicClientFilter extends Filter {
}
export const SYMBOL_KIND_FILTERS: DynamicClientFilter[] = [
{ kind: 'select', label: 'Function', count: 0, limitHit: false, value: 'select:symbol.function' },
{ kind: 'select', label: 'Method', count: 0, limitHit: false, value: 'select:symbol.method' },
{ kind: 'select', label: 'Module', count: 0, limitHit: false, value: 'select:symbol.module' },
{ kind: 'select', label: 'Class', count: 0, limitHit: false, value: 'select:symbol.class' },
{ kind: 'select', label: 'Enum', count: 0, limitHit: false, value: 'select:symbol.enum' },
{ kind: 'select', label: 'Function', count: 0, exhaustive: true, value: 'select:symbol.function' },
{ kind: 'select', label: 'Method', count: 0, exhaustive: true, value: 'select:symbol.method' },
{ kind: 'select', label: 'Module', count: 0, exhaustive: true, value: 'select:symbol.module' },
{ kind: 'select', label: 'Class', count: 0, exhaustive: true, value: 'select:symbol.class' },
{ kind: 'select', label: 'Enum', count: 0, exhaustive: true, value: 'select:symbol.enum' },
]
export const COMMIT_DATE_FILTERS: DynamicClientFilter[] = [
{ kind: 'after', label: 'Last 24 hours', count: 0, limitHit: false, value: 'after:yesterday' },
{ kind: 'before', label: 'Last week', count: 0, limitHit: false, value: 'before:"1 week ago"' },
{ kind: 'before', label: 'Last month', count: 0, limitHit: false, value: 'before:"1 month ago"' },
{ kind: 'after', label: 'Last 24 hours', count: 0, exhaustive: true, value: 'after:yesterday' },
{ kind: 'before', label: 'Last week', count: 0, exhaustive: true, value: 'before:"1 week ago"' },
{ kind: 'before', label: 'Last month', count: 0, exhaustive: true, value: 'before:"1 month ago"' },
]

View File

@ -17,7 +17,7 @@ export const generateAuthorFilters = (results: SearchMatch[]): DynamicClientFilt
label: authorName,
count: commitMatches.length,
value: `author:"${authorName}"`,
limitHit: false,
exhaustive: true,
}
})
.sort((match1, match2) => match2.count - match1.count)

View File

@ -14,7 +14,7 @@ describe('FilterLink', () => {
label: 'gitlab.com/sourcegraph/sourcegraph',
value: 'repo:^gitlab\\.com/sourcegraph/sourcgreaph$',
count: 5,
limitHit: false,
exhaustive: true,
kind: 'repo',
}
@ -22,7 +22,7 @@ describe('FilterLink', () => {
label: 'github.com/microsoft/vscode',
value: 'repo:^github\\.com/microsoft/vscode$',
count: 201,
limitHit: true,
exhaustive: false,
kind: 'repo',
}
@ -30,7 +30,7 @@ describe('FilterLink', () => {
label: 'lang:go',
value: 'lang:go',
count: 500,
limitHit: true,
exhaustive: false,
kind: 'lang',
}
@ -38,7 +38,7 @@ describe('FilterLink', () => {
label: 'lang:typescript',
value: 'lang:typescript',
count: 241,
limitHit: false,
exhaustive: true,
kind: 'lang',
}
@ -46,7 +46,7 @@ describe('FilterLink', () => {
label: '-file:_test\\.go$',
value: '-file:_test\\.go$',
count: 1,
limitHit: false,
exhaustive: true,
kind: 'file',
}

View File

@ -14,7 +14,7 @@ describe('SearchSidebarSection', () => {
label: `lang:${lang}`,
value: `lang:${lang}`,
count: 10,
limitHit: true,
exhaustive: false,
kind: 'lang',
})
)

View File

@ -35,7 +35,7 @@ exports[`FilterLink > should have correct links for dynamic filters 1`] = `
<span
class="pl-2 flex-shrink-0"
>
500+
500
</span>
</button>
<button
@ -199,7 +199,7 @@ exports[`FilterLink > should have correct links for repos 1`] = `
<span
class="pl-2 flex-shrink-0"
>
201+
201
</span>
</button>
</DocumentFragment>
@ -307,7 +307,7 @@ exports[`FilterLink > should have show icons for repos on cloud 1`] = `
<span
class="pl-2 flex-shrink-0"
>
201+
201
</span>
</button>
</DocumentFragment>

View File

@ -17,7 +17,7 @@ function createRepoFilter(value: string): Filter {
value: `${value}@HEAD`,
label: value,
count: 1,
limitHit: false,
exhaustive: true,
}
}

View File

@ -156,113 +156,113 @@ export const mixedSearchStreamEvents: SearchEvent[] = [
{
type: 'filters',
data: [
{ value: 'lang:go', label: 'lang:go', count: 1092, limitHit: false, kind: 'lang' },
{ value: 'lang:go', label: 'lang:go', count: 1092, exhaustive: true, kind: 'lang' },
{
value: '-file:_test\\.go$',
label: '-file:_test\\.go$',
count: 663,
limitHit: false,
exhaustive: true,
kind: 'file',
},
{
value: 'lang:typescript',
label: 'lang:typescript',
count: 379,
limitHit: false,
exhaustive: true,
kind: 'lang',
},
{ value: 'lang:markdown', label: 'lang:markdown', count: 343, limitHit: false, kind: 'lang' },
{ value: 'lang:yaml', label: 'lang:yaml', count: 193, limitHit: false, kind: 'lang' },
{ value: 'lang:markdown', label: 'lang:markdown', count: 343, exhaustive: true, kind: 'lang' },
{ value: 'lang:yaml', label: 'lang:yaml', count: 193, exhaustive: true, kind: 'lang' },
{
value: 'repo:^gitlab\\.sgdev\\.org/sourcegraph/src-cli$',
label: 'gitlab.sgdev.org/sourcegraph/src-cli',
count: 156,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^ghe\\.sgdev\\.org/sourcegraph/gorillalabs-sparkling$',
label: 'ghe.sgdev.org/sourcegraph/gorillalabs-sparkling',
count: 145,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^gitlab\\.sgdev\\.org/sourcegraph/java-langserver$',
label: 'gitlab.sgdev.org/sourcegraph/java-langserver',
count: 142,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^gitlab\\.sgdev\\.org/sourcegraph/go-jsonschema$',
label: 'gitlab.sgdev.org/sourcegraph/go-jsonschema',
count: 130,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^gitlab\\.sgdev\\.org/aharvey/batch-change-utils$',
label: 'gitlab.sgdev.org/aharvey/batch-change-utils',
count: 125,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^gitlab\\.sgdev\\.org/sourcegraph/about$',
label: 'gitlab.sgdev.org/sourcegraph/about',
count: 125,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^ghe\\.sgdev\\.org/sourcegraph/gorilla-websocket$',
label: 'ghe.sgdev.org/sourcegraph/gorilla-websocket',
count: 123,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^github\\.com/hashicorp/go-multierror$',
label: 'github.com/hashicorp/go-multierror',
count: 121,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^gitlab\\.sgdev\\.org/sourcegraph/sourcegraph$',
label: 'gitlab.sgdev.org/sourcegraph/sourcegraph',
count: 115,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^github\\.com/sourcegraph/sourcegraph$',
label: 'github.com/sourcegraph/sourcegraph',
count: 112,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^gitlab\\.sgdev\\.org/aharvey/sourcegraph$',
label: 'gitlab.sgdev.org/aharvey/sourcegraph',
count: 109,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{
value: 'repo:^ghe\\.sgdev\\.org/sourcegraph/gorilla-mux$',
label: 'ghe.sgdev.org/sourcegraph/gorilla-mux',
count: 108,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
{ value: 'lang:java', label: 'lang:java', count: 95, limitHit: false, kind: 'lang' },
{ value: 'lang:json', label: 'lang:json', count: 77, limitHit: false, kind: 'lang' },
{ value: 'lang:graphql', label: 'lang:graphql', count: 70, limitHit: false, kind: 'lang' },
{ value: 'lang:text', label: 'lang:text', count: 50, limitHit: false, kind: 'lang' },
{ value: 'lang:clojure', label: 'lang:clojure', count: 45, limitHit: false, kind: 'lang' },
{ value: 'lang:css', label: 'lang:css', count: 32, limitHit: false, kind: 'lang' },
{ value: 'lang:java', label: 'lang:java', count: 95, exhaustive: true, kind: 'lang' },
{ value: 'lang:json', label: 'lang:json', count: 77, exhaustive: true, kind: 'lang' },
{ value: 'lang:graphql', label: 'lang:graphql', count: 70, exhaustive: true, kind: 'lang' },
{ value: 'lang:text', label: 'lang:text', count: 50, exhaustive: true, kind: 'lang' },
{ value: 'lang:clojure', label: 'lang:clojure', count: 45, exhaustive: true, kind: 'lang' },
{ value: 'lang:css', label: 'lang:css', count: 32, exhaustive: true, kind: 'lang' },
],
},
{ type: 'done', data: {} },

View File

@ -269,7 +269,7 @@ export interface Filter {
value: string
label: string
count: number
limitHit: boolean
exhaustive: boolean
kind: 'file' | 'repo' | 'lang' | 'utility' | 'select' | 'after' | 'before' | 'author'
}

View File

@ -336,13 +336,13 @@ export const SEARCH_RESULT: AggregateStreamingSearchResults = {
skipped: [],
},
filters: [
{ value: 'file:\\.yml$', label: 'YAML', count: 1, limitHit: false, kind: 'file' },
{ value: 'case:yes', label: 'Make search case sensitive', count: 0, limitHit: false, kind: 'utility' },
{ value: 'file:\\.yml$', label: 'YAML', count: 1, exhaustive: true, kind: 'file' },
{ value: 'case:yes', label: 'Make search case sensitive', count: 0, exhaustive: true, kind: 'utility' },
{
value: 'repo:^github\\.com/golang/oauth2$',
label: 'github.com/golang/oauth2',
count: 1,
limitHit: false,
exhaustive: true,
kind: 'repo',
},
],

View File

@ -89,22 +89,22 @@ const mockDefaultStreamEvents: SearchEvent[] = [
{
type: 'filters',
data: [
{ label: 'archived:yes', value: 'archived:yes', count: 5, kind: 'utility', limitHit: true },
{ label: 'fork:yes', value: 'fork:yes', count: 46, kind: 'utility', limitHit: true },
{ label: 'archived:yes', value: 'archived:yes', count: 5, kind: 'utility', exhaustive: false },
{ label: 'fork:yes', value: 'fork:yes', count: 46, kind: 'utility', exhaustive: false },
// Two repo filters to trigger the repository sidebar section
{
label: 'github.com/Algorilla/manta-ray',
value: 'repo:^github\\.com/Algorilla/manta-ray$',
count: 1,
kind: 'repo',
limitHit: true,
exhaustive: false,
},
{
label: 'github.com/Algorilla/manta-ray2',
value: 'repo:^github\\.com/Algorilla/manta-ray2$',
count: 1,
kind: 'repo',
limitHit: true,
exhaustive: false,
},
],
},

View File

@ -38,22 +38,22 @@ const mockDefaultStreamEvents: SearchEvent[] = [
{
type: 'filters',
data: [
{ label: 'archived:yes', value: 'archived:yes', count: 5, kind: 'utility', limitHit: true },
{ label: 'fork:yes', value: 'fork:yes', count: 46, kind: 'utility', limitHit: true },
{ label: 'archived:yes', value: 'archived:yes', count: 5, kind: 'utility', exhaustive: false },
{ label: 'fork:yes', value: 'fork:yes', count: 46, kind: 'utility', exhaustive: false },
// Two repo filters to trigger the repository sidebar section
{
label: 'github.com/Algorilla/manta-ray',
value: 'repo:^github\\.com/Algorilla/manta-ray$',
count: 1,
kind: 'repo',
limitHit: true,
exhaustive: false,
},
{
label: 'github.com/Algorilla/manta-ray2',
value: 'repo:^github\\.com/Algorilla/manta-ray2$',
count: 1,
kind: 'repo',
limitHit: true,
exhaustive: false,
},
],
},

View File

@ -112,7 +112,7 @@ func (c *SearchResultsResolver) Missing(ctx context.Context) ([]*RepositoryResol
}
func (c *SearchResultsResolver) Timedout(ctx context.Context) ([]*RepositoryResolver, error) {
return c.repositoryResolvers(ctx, c.repoIDsByStatus(search.RepoStatusTimedout))
return c.repositoryResolvers(ctx, c.repoIDsByStatus(search.RepoStatusTimedOut))
}
func (c *SearchResultsResolver) IndexUnavailable() bool {
@ -176,7 +176,7 @@ func (sr *SearchResultsResolver) ResultCount() int32 { return sr.MatchCount() }
func (sr *SearchResultsResolver) ApproximateResultCount() string {
count := sr.MatchCount()
if sr.LimitHit() || sr.Stats.Status.Any(search.RepoStatusCloning|search.RepoStatusTimedout) {
if sr.LimitHit() || sr.Stats.Status.Any(search.RepoStatusCloning|search.RepoStatusTimedOut) {
return fmt.Sprintf("%d+", count)
}
return strconv.Itoa(int(count))
@ -224,7 +224,8 @@ func (sf *searchFilterResolver) Count() int32 {
}
func (sf *searchFilterResolver) LimitHit() bool {
return sf.filter.IsLimitHit
// TODO(camdencheek): calculate exhaustiveness correctly
return true
}
func (sf *searchFilterResolver) Kind() string {
@ -475,7 +476,7 @@ func (r *searchResolver) Stats(ctx context.Context) (stats *searchResultsStats,
}
status := v.Stats.Status
if !status.Any(search.RepoStatusCloning) && !status.Any(search.RepoStatusTimedout) {
if !status.Any(search.RepoStatusCloning) && !status.Any(search.RepoStatusTimedOut) {
break // zero results, but no cloning or timed out repos. No point in retrying.
}
@ -483,7 +484,7 @@ func (r *searchResolver) Stats(ctx context.Context) (stats *searchResultsStats,
status.Filter(search.RepoStatusCloning, func(api.RepoID) {
cloning++
})
status.Filter(search.RepoStatusTimedout, func(api.RepoID) {
status.Filter(search.RepoStatusTimedOut, func(api.RepoID) {
timedout++
})

View File

@ -29,16 +29,16 @@ func (e *eventWriter) MatchesJSON(data []byte) error {
return e.inner.EventBytes("matches", data)
}
func (e *eventWriter) Filters(fs []*streaming.Filter) error {
func (e *eventWriter) Filters(fs []*streaming.Filter, exhaustive bool) error {
if len(fs) > 0 {
buf := make([]streamhttp.EventFilter, 0, len(fs))
for _, f := range fs {
buf = append(buf, streamhttp.EventFilter{
Value: f.Value,
Label: f.Label,
Count: f.Count,
LimitHit: f.IsLimitHit,
Kind: f.Kind,
Value: f.Value,
Label: f.Label,
Count: f.Count,
Kind: f.Kind,
Exhaustive: exhaustive,
})
}

View File

@ -709,7 +709,7 @@ func (h *eventHandler) Send(event streaming.SearchEvent) {
// Instantly send results if we have not sent any yet.
if h.first && len(event.Results) > 0 {
h.first = false
h.eventWriter.Filters(h.filters.Compute())
h.eventWriter.Filters(h.filters.Compute(), false)
h.matchesBuf.Flush()
h.logLatency()
}
@ -727,7 +727,11 @@ func (h *eventHandler) Done() {
h.progressTimer = nil
// Flush the final state
h.eventWriter.Filters(h.filters.Compute())
// TODO: make sure we actually respect timeouts
exhaustive := !h.progress.Stats.IsLimitHit &&
!h.progress.Stats.Status.Any(search.RepoStatusLimitHit) &&
!h.progress.Stats.Status.Any(search.RepoStatusTimedOut)
h.eventWriter.Filters(h.filters.Compute(), exhaustive)
h.matchesBuf.Flush()
h.eventWriter.Progress(h.progress.Final())
}
@ -751,7 +755,8 @@ func (h *eventHandler) flushTick() {
// a nil flushTimer indicates that Done() was called
if h.flushTimer != nil {
h.eventWriter.Filters(h.filters.Compute())
// TODO(camdencheek): add a `dirty` to filters so we don't send them every tick
h.eventWriter.Filters(h.filters.Compute(), false)
h.matchesBuf.Flush()
if h.progress.Dirty {
h.eventWriter.Progress(h.progress.Current())

View File

@ -15,9 +15,9 @@ func DetermineStatusForLogs(alert *search.Alert, stats streaming.Stats, err erro
return "timeout"
case err != nil:
return "error"
case stats.Status.All(search.RepoStatusTimedout) && stats.Status.Len() == len(stats.Repos):
case stats.Status.All(search.RepoStatusTimedOut) && stats.Status.Len() == len(stats.Repos):
return "timeout"
case stats.Status.Any(search.RepoStatusTimedout):
case stats.Status.Any(search.RepoStatusTimedOut):
return "partial_timeout"
case alert != nil:
return "alert"

View File

@ -59,7 +59,7 @@ func (j *alertJob) Run(ctx context.Context, clients job.RuntimeClients, stream s
// progress notifications work, but this is the third attempt at trying to
// fix this behaviour so we are accepting that.
if errors.Is(err, context.DeadlineExceeded) {
if !statsObserver.Status.Any(search.RepoStatusTimedout) {
if !statsObserver.Status.Any(search.RepoStatusTimedOut) {
usedTime := time.Since(start)
suggestTime := longer(2, usedTime)
return search.AlertForTimeout(usedTime, suggestTime, j.inputs.OriginalQuery, j.inputs.PatternType), nil

View File

@ -1138,7 +1138,7 @@ func TestRepoSubsetTextSearch(t *testing.T) {
"foo/cloning": search.RepoStatusCloning,
"foo/missing": search.RepoStatusMissing,
"foo/missing-database": search.RepoStatusMissing,
"foo/timedout": search.RepoStatusTimedout,
"foo/timedout": search.RepoStatusTimedOut,
})
// If we specify a rev and it isn't found, we fail the whole search since

View File

@ -18,7 +18,7 @@ const (
RepoStatusCloning RepoStatus = 1 << iota // could not be searched because they were still being cloned
RepoStatusMissing // could not be searched because they do not exist
RepoStatusLimitHit // searched, but have results that were not returned due to exceeded limits
RepoStatusTimedout // repos that were not searched due to timeout
RepoStatusTimedOut // repos that were not searched due to timeout
)
var repoStatusName = []struct {
@ -28,7 +28,7 @@ var repoStatusName = []struct {
{RepoStatusCloning, "cloning"},
{RepoStatusMissing, "missing"},
{RepoStatusLimitHit, "limithit"},
{RepoStatusTimedout, "timedout"},
{RepoStatusTimedOut, "timedout"},
}
func (s RepoStatus) String() string {
@ -180,7 +180,7 @@ func HandleRepoSearchResult(repoID api.RepoID, revSpecs []string, limitHit, time
} else if errcode.IsNotFound(searchErr) {
status |= RepoStatusMissing
} else if errcode.IsTimeout(searchErr) || errcode.IsTemporary(searchErr) || timedOut {
status |= RepoStatusTimedout
status |= RepoStatusTimedOut
} else if searchErr != nil {
fatalErr = searchErr
}

View File

@ -12,27 +12,27 @@ import (
func TestRepoStatusMap(t *testing.T) {
aM := map[api.RepoID]RepoStatus{
1: RepoStatusTimedout,
1: RepoStatusTimedOut,
2: RepoStatusCloning,
3: RepoStatusTimedout | RepoStatusLimitHit,
3: RepoStatusTimedOut | RepoStatusLimitHit,
4: RepoStatusLimitHit,
}
a := mkStatusMap(aM)
b := mkStatusMap(map[api.RepoID]RepoStatus{
2: RepoStatusCloning,
4: RepoStatusTimedout,
4: RepoStatusTimedOut,
5: RepoStatusMissing,
})
c := mkStatusMap(map[api.RepoID]RepoStatus{
8: RepoStatusTimedout | RepoStatusLimitHit,
9: RepoStatusTimedout,
8: RepoStatusTimedOut | RepoStatusLimitHit,
9: RepoStatusTimedOut,
})
// Get
if got, want := a.Get(10), RepoStatus(0); got != want {
t.Errorf("a.Get(10) got %s want %s", got, want)
}
if got, want := a.Get(3), RepoStatusTimedout|RepoStatusLimitHit; got != want {
if got, want := a.Get(3), RepoStatusTimedOut|RepoStatusLimitHit; got != want {
t.Errorf("a.Get(3) got %s want %s", got, want)
}
@ -45,7 +45,7 @@ func TestRepoStatusMap(t *testing.T) {
}
// All
if !c.All(RepoStatusTimedout) {
if !c.All(RepoStatusTimedOut) {
t.Error("c.All(RepoStatusTimedout) should be true")
}
if c.All(RepoStatusLimitHit) {
@ -59,7 +59,7 @@ func TestRepoStatusMap(t *testing.T) {
// Update
c.Update(9, RepoStatusLimitHit)
if got, want := c.Get(9), RepoStatusTimedout|RepoStatusLimitHit; got != want {
if got, want := c.Get(9), RepoStatusTimedOut|RepoStatusLimitHit; got != want {
t.Errorf("c.Get(9) got %s want %s", got, want)
}
@ -93,7 +93,7 @@ func TestRepoStatusMap(t *testing.T) {
t.Errorf("a.Filter(%s) diff (-want, +got):\n%s", status, d)
}
}
assertAFilter(RepoStatusTimedout, []int{1, 3})
assertAFilter(RepoStatusTimedOut, []int{1, 3})
assertAFilter(RepoStatusMissing, []int{})
// Union
@ -102,10 +102,10 @@ func TestRepoStatusMap(t *testing.T) {
b.Union(&a)
t.Logf("%s", &b)
abUnionWant := mkStatusMap(map[api.RepoID]RepoStatus{
1: RepoStatusTimedout,
1: RepoStatusTimedOut,
2: RepoStatusCloning,
3: RepoStatusTimedout | RepoStatusLimitHit,
4: RepoStatusTimedout | RepoStatusLimitHit,
3: RepoStatusTimedOut | RepoStatusLimitHit,
4: RepoStatusTimedOut | RepoStatusLimitHit,
5: RepoStatusMissing,
})
assertReposStatusEqual(t, abUnionWant, b)
@ -123,16 +123,16 @@ func TestRepoStatusMap_nil(t *testing.T) {
x.Iterate(func(api.RepoID, RepoStatus) {
t.Error("Iterate should be empty")
})
x.Filter(RepoStatusTimedout, func(api.RepoID) {
x.Filter(RepoStatusTimedOut, func(api.RepoID) {
t.Error("Filter should be empty")
})
if got, want := x.Get(10), RepoStatus(0); got != want {
t.Errorf("Get got %s want %s", got, want)
}
if x.Any(RepoStatusTimedout) {
if x.Any(RepoStatusTimedOut) {
t.Error("Any should be false")
}
if x.All(RepoStatusTimedout) {
if x.All(RepoStatusTimedOut) {
t.Error("All should be false")
}
if got, want := x.Len(), 0; got != want {
@ -141,9 +141,9 @@ func TestRepoStatusMap_nil(t *testing.T) {
}
func TestRepoStatusSingleton(t *testing.T) {
x := repoStatusSingleton(123, RepoStatusTimedout|RepoStatusLimitHit)
x := repoStatusSingleton(123, RepoStatusTimedOut|RepoStatusLimitHit)
want := mkStatusMap(map[api.RepoID]RepoStatus{
123: RepoStatusTimedout | RepoStatusLimitHit,
123: RepoStatusTimedOut | RepoStatusLimitHit,
})
assertReposStatusEqual(t, want, x)
}

View File

@ -63,7 +63,7 @@ func (p *ProgressAggregator) currentStats() api.ProgressStats {
BackendsMissing: p.Stats.BackendsMissing,
ExcludedArchived: p.Stats.ExcludedArchived,
ExcludedForks: p.Stats.ExcludedForks,
Timedout: getRepos(p.Stats, searchshared.RepoStatusTimedout),
Timedout: getRepos(p.Stats, searchshared.RepoStatusTimedOut),
Missing: getRepos(p.Stats, searchshared.RepoStatusMissing),
Cloning: getRepos(p.Stats, searchshared.RepoStatusCloning),
LimitHit: p.Stats.IsLimitHit,

View File

@ -16,10 +16,6 @@ type Filter struct {
// for `repo:` filters.
Count int
// IsLimitHit is true if the results returned for a repository are
// incomplete.
IsLimitHit bool
// Kind of filter. Should be "repo", "file", or "lang".
Kind string
@ -46,15 +42,14 @@ func (f *Filter) Less(o *Filter) bool {
type filters map[string]*Filter
// Add the count to the filter with value.
func (m filters) Add(value string, label string, count int32, limitHit bool, kind string) {
func (m filters) Add(value string, label string, count int32, kind string) {
sf, ok := m[value]
if !ok {
sf = &Filter{
Value: value,
Label: label,
Count: int(count),
IsLimitHit: limitHit,
Kind: kind,
Value: value,
Label: label,
Count: int(count),
Kind: kind,
}
m[value] = sf
} else {

View File

@ -14,19 +14,19 @@ func TestFilters(t *testing.T) {
m := make(filters)
for count := int32(1); count <= 1000; count++ {
repo := fmt.Sprintf("repo-%d", count)
m.Add(repo, repo, count, false, "repo")
m.Add(repo, repo, count, "repo")
}
for count := int32(1); count <= 100; count++ {
file := fmt.Sprintf("file-%d", count)
m.Add(file, file, count, false, "file")
m.Add(file, file, count, "file")
}
// Add one large file count to see if it is recommended near the top.
m.Add("file-big", "file-big", 10000, false, "file")
m.Add("file-big", "file-big", 10000, "file")
// Test important and updating
m.Add("fork", "fork", 3, false, "repo")
m.Add("fork", "fork", 3, "repo")
m.MarkImportant("fork")
m.Add("fork", "fork", 1, false, "repo")
m.Add("fork", "fork", 1, "repo")
want := []string{
"fork 4",

View File

@ -204,11 +204,11 @@ func (e *EventTeamMatch) eventMatch() {}
// EventFilter is a suggestion for a search filter. Currently has a 1-1
// correspondance with the SearchFilter graphql type.
type EventFilter struct {
Value string `json:"value"`
Label string `json:"label"`
Count int `json:"count"`
LimitHit bool `json:"limitHit"`
Kind string `json:"kind"`
Value string `json:"value"`
Label string `json:"label"`
Count int `json:"count"`
Exhaustive bool `json:"exhaustive"`
Kind string `json:"kind"`
}
// EventAlert is GQL.SearchAlert. It replaces when sent to match existing

View File

@ -8,12 +8,10 @@ import (
"time"
"github.com/grafana/regexp"
"github.com/sourcegraph/sourcegraph/internal/api"
"github.com/sourcegraph/sourcegraph/internal/gitserver/gitdomain"
"github.com/sourcegraph/sourcegraph/internal/inventory"
"github.com/sourcegraph/sourcegraph/internal/lazyregexp"
"github.com/sourcegraph/sourcegraph/internal/search"
"github.com/sourcegraph/sourcegraph/internal/search/result"
)
@ -123,28 +121,27 @@ func (s *SearchFilters) Update(event SearchEvent) {
s.filters = make(filters)
}
addRepoFilter := func(repoName api.RepoName, repoID api.RepoID, rev string, lineMatchCount int32) {
addRepoFilter := func(repoName api.RepoName, rev string, lineMatchCount int32) {
filter := fmt.Sprintf(`repo:^%s$`, regexp.QuoteMeta(string(repoName)))
if rev != "" {
// We don't need to quote rev. The only special characters we interpret
// are @ and :, both of which are disallowed in git refs
filter = filter + fmt.Sprintf(`@%s`, rev)
}
limitHit := event.Stats.Status.Get(repoID)&search.RepoStatusLimitHit != 0
s.filters.Add(filter, string(repoName), lineMatchCount, limitHit, "repo")
s.filters.Add(filter, string(repoName), lineMatchCount, "repo")
}
addFileFilter := func(fileMatchPath string, lineMatchCount int32, limitHit bool) {
addFileFilter := func(fileMatchPath string, lineMatchCount int32) {
for _, ff := range commonFileFilters {
// use regexp to match file paths unconditionally, whether globbing is enabled or not,
// since we have no native library call to match `**` for globs.
if ff.regexp.MatchString(fileMatchPath) {
s.filters.Add(ff.regexFilter, ff.label, lineMatchCount, limitHit, "file")
s.filters.Add(ff.regexFilter, ff.label, lineMatchCount, "file")
}
}
}
addLangFilter := func(fileMatchPath string, lineMatchCount int32, limitHit bool) {
addLangFilter := func(fileMatchPath string, lineMatchCount int32) {
if ext := path.Ext(fileMatchPath); ext != "" {
rawLanguage, _ := inventory.GetLanguageByFilename(fileMatchPath)
language := strings.ToLower(rawLanguage)
@ -153,22 +150,22 @@ func (s *SearchFilters) Update(event SearchEvent) {
language = strconv.Quote(language)
}
value := fmt.Sprintf(`lang:%s`, language)
s.filters.Add(value, rawLanguage, lineMatchCount, limitHit, "lang")
s.filters.Add(value, rawLanguage, lineMatchCount, "lang")
}
}
}
addSymbolFilter := func(symbols []*result.SymbolMatch, limitHit bool) {
addSymbolFilter := func(symbols []*result.SymbolMatch) {
for _, sym := range symbols {
selectKind := result.ToSelectKind[strings.ToLower(sym.Symbol.Kind)]
filter := fmt.Sprintf(`select:symbol.%s`, selectKind)
s.filters.Add(filter, selectKind, 1, limitHit, "symbol type")
s.filters.Add(filter, selectKind, 1, "symbol type")
}
}
addCommitAuthorFilter := func(commit gitdomain.Commit) {
filter := fmt.Sprintf(`author:%s`, commit.Author.Email)
s.filters.Add(filter, commit.Author.Name, 1, false, "author")
s.filters.Add(filter, commit.Author.Name, 1, "author")
}
addCommitDateFilter := func(commit gitdomain.Commit) {
@ -181,40 +178,41 @@ func (s *SearchFilters) Update(event SearchEvent) {
df := determineTimeframe(cd)
filter := fmt.Sprintf("%s:%s", df.Timeframe, df.Value)
s.filters.Add(filter, df.Label, 1, false, "date")
s.filters.Add(filter, df.Label, 1, "date")
}
if event.Stats.ExcludedForks > 0 {
s.filters.Add("fork:yes", "Include forked repos", int32(event.Stats.ExcludedForks), event.Stats.IsLimitHit, "utility")
s.filters.Add("fork:yes", "Include forked repos", int32(event.Stats.ExcludedForks), "utility")
s.filters.MarkImportant("fork:yes")
}
if event.Stats.ExcludedArchived > 0 {
s.filters.Add("archived:yes", "Include archived repos", int32(event.Stats.ExcludedArchived), event.Stats.IsLimitHit, "utility")
s.filters.Add("archived:yes", "Include archived repos", int32(event.Stats.ExcludedArchived), "utility")
s.filters.MarkImportant("archived:yes")
}
for _, match := range event.Results {
switch v := match.(type) {
case *result.FileMatch:
rev := ""
if v.InputRev != nil {
rev = *v.InputRev
}
lines := int32(v.ResultCount())
addRepoFilter(v.Repo.Name, v.Repo.ID, rev, lines)
addLangFilter(v.Path, lines, v.LimitHit)
addFileFilter(v.Path, lines, v.LimitHit)
addSymbolFilter(v.Symbols, v.LimitHit)
addRepoFilter(v.Repo.Name, rev, lines)
addLangFilter(v.Path, lines)
addFileFilter(v.Path, lines)
addSymbolFilter(v.Symbols)
case *result.RepoMatch:
// It should be fine to leave this blank since revision specifiers
// can only be used with the 'repo:' scope. In that case,
// we shouldn't be getting any repositoy name matches back.
addRepoFilter(v.Name, v.ID, "", 1)
addRepoFilter(v.Name, "", 1)
case *result.CommitMatch:
// We leave "rev" empty, instead of using "CommitMatch.Commit.ID". This way we
// get 1 filter per repo instead of 1 filter per sha in the side-bar.
addRepoFilter(v.Repo.Name, v.Repo.ID, "", int32(v.ResultCount()))
addRepoFilter(v.Repo.Name, "", int32(v.ResultCount()))
addCommitAuthorFilter(v.Commit)
addCommitDateFilter(v.Commit)

View File

@ -205,25 +205,22 @@ func TestSymbolCounts(t *testing.T) {
},
wantFilters: map[string]*Filter{
"select:symbol.class": &Filter{
Value: "select:symbol.class",
Label: "class",
Count: 1,
IsLimitHit: false,
Kind: "symbol type",
Value: "select:symbol.class",
Label: "class",
Count: 1,
Kind: "symbol type",
},
"select:symbol.variable": &Filter{
Value: "select:symbol.variable",
Label: "variable",
Count: 2,
IsLimitHit: false,
Kind: "symbol type",
Value: "select:symbol.variable",
Label: "variable",
Count: 2,
Kind: "symbol type",
},
"select:symbol.constant": &Filter{
Value: "select:symbol.constant",
Label: "constant",
Count: 4,
IsLimitHit: false,
Kind: "symbol type",
Value: "select:symbol.constant",
Label: "constant",
Count: 4,
Kind: "symbol type",
},
},
},

View File

@ -350,7 +350,7 @@ func zoektSearch(ctx context.Context, repos *IndexedRepoRevs, q zoektquery.Q, pa
}
if !foundResults.Load() && since(t0) >= searchOpts.MaxWallTime {
c.Send(streaming.SearchEvent{Stats: streaming.Stats{Status: mkStatusMap(search.RepoStatusTimedout)}})
c.Send(streaming.SearchEvent{Stats: streaming.Stats{Status: mkStatusMap(search.RepoStatusTimedOut)}})
}
return nil
}

View File

@ -94,8 +94,8 @@ func TestIndexedSearch(t *testing.T) {
},
wantCommon: streaming.Stats{
Status: mkStatusMap(map[string]search.RepoStatus{
"foo/bar": search.RepoStatusTimedout,
"foo/foobar": search.RepoStatusTimedout,
"foo/bar": search.RepoStatusTimedOut,
"foo/foobar": search.RepoStatusTimedOut,
}),
},
},