sourcegraph/dev/sg/sg_start_test.go
Jean-Hadrien Chabran 2dfeb486d5
fix(local): fix race in sg_start_test.go (#63642)
Fixes DINF-82; This was very much a rabbithole. A few things: 

- The race that @bobheadxi mentioned here
https://github.com/sourcegraph/sourcegraph/pull/63405#discussion_r1648180713
wasn't from `*output.Output` being unsafe, but `outputtest.Buffer` as it
happened again (see
[DINF-82](https://linear.app/sourcegraph/issue/DINF-82/devsgsg-test-failed-with-a-detected-race-condition))
- There something messed up with `cmds.start()`, which sometimes ends up
printing the command output _after_ the exit message instead of before.
- The crude `sort.Strings(want|have)` that was there already fixes that.
- And without the sleep, it's possible to read the output from the
`outputtest.Buffer` before the command outputs get written to it.
- The `time.Sleep(300 * time.Milliseconds)` _mitigates/hides_ that
problem.

At least, this shouldn't blow up in CI and buys us time to fix the whole
thing. We're tracking this in DINF-104. And out of 200 runs, I also
stumbled on a race in `progress_tty`, tracked in DINF-105 (that packages
is originally meant to be used by `src-cli` and was re-used for `sg` 3
years ago).

I'm pretty unhappy about the solution, but a bandage is better than
nothing. While ideally, we should really reconsider dropping
`std.Output` entirely in `sg` and use the good stuff from
github.com/charmbracelet instead because we don't want to spend too much
time on arcane terminal things ourselves, I'm much more about concerned
the concurrency issues mentioned above.

## Test plan

CI + `sg bazel test //dev/sg:sg_test --runs_per_test=100`
2024-07-04 19:11:10 +02:00

139 lines
3.9 KiB
Go

package main
import (
"context"
"sort"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/run"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/sgconf"
"github.com/sourcegraph/sourcegraph/dev/sg/internal/std"
"github.com/sourcegraph/sourcegraph/lib/output/outputtest"
)
func TestStartCommandSet(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
buf := useOutputBuffer(t)
commandSet := &sgconf.Commandset{Name: "test-set", Commands: []string{"test-cmd-1"}}
command := &run.Command{
Config: run.SGConfigCommandOptions{
Name: "test-cmd-1",
},
Install: "echo 'booting up horsegraph'",
Cmd: "echo 'horsegraph booted up. mount your horse.' && echo 'quitting. not horsing around anymore.'",
}
testConf := &sgconf.Config{
Commands: map[string]*run.Command{"test-cmd-1": command},
Commandsets: map[string]*sgconf.Commandset{"test-set": commandSet},
}
args := StartArgs{
CommandSet: "test-set",
}
cmds, _ := args.toCommands(testConf)
if err := cmds.start(ctx); err != nil {
t.Errorf("failed to start: %s", err)
}
expectOutput(t, buf, []string{
"",
"💡 Installing 1 commands...",
"",
"test-cmd-1 installed",
"✅ 1/1 commands installed ███████████████████████████████████████████████ 100%",
"",
"✅ Everything installed! Booting up the system!",
"",
"Starting 1 cmds",
"Running test-cmd-1...",
"[ test-cmd-1] horsegraph booted up. mount your horse.",
"[ test-cmd-1] quitting. not horsing around anymore.",
"test-cmd-1 exited without error",
})
}
func TestStartCommandSet_InstallError(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
buf := useOutputBuffer(t)
commandSet := &sgconf.Commandset{Name: "test-set", Commands: []string{"test-cmd-1"}}
command := &run.Command{
Config: run.SGConfigCommandOptions{
Name: "test-cmd-1",
},
Install: "echo 'booting up horsegraph' && exit 1",
Cmd: "echo 'never appears'",
}
testConf := &sgconf.Config{
Commands: map[string]*run.Command{"test-cmd-1": command},
Commandsets: map[string]*sgconf.Commandset{"test-set": commandSet},
}
args := StartArgs{
CommandSet: "test-set",
}
cmds, err := args.toCommands(testConf)
if err != nil {
t.Errorf("unexected error constructing commands: %s", err)
}
err = cmds.start(ctx)
if err == nil {
t.Fatalf("err is nil unexpectedly")
}
if !strings.Contains(err.Error(), "failed to run test-cmd-1") {
t.Errorf("err contains wrong message: %s", err.Error())
}
expectOutput(t, buf, []string{
"",
"💡 Installing 1 commands...",
"--------------------------------------------------------------------------------",
`Failed to build test-cmd-1: 'bash -c echo 'booting up horsegraph' && exit 1' failed with "exit status 1":`,
"booting up horsegraph",
"--------------------------------------------------------------------------------",
})
}
func useOutputBuffer(t *testing.T) *outputtest.Buffer {
t.Helper()
buf := &outputtest.Buffer{}
oldStdout := std.Out
std.Out = std.NewFixedOutput(buf, true)
t.Cleanup(func() { std.Out = oldStdout })
return buf
}
func expectOutput(t *testing.T, buf *outputtest.Buffer, want []string) {
t.Helper()
// TODO: DINF-104, find out why we need this sleep. Basically, when running in tests,
// for some reason, even though cmds.start() returned, it hasn't finished writing
// the outputs in rare cases (6 out out 100 runs on my machine).
time.Sleep(300 * time.Millisecond)
have := buf.Lines()
// TODO: See DINF-104, without this, we can see the cmd output printed out after
// the exit message. For now, and to prevent flakes, we'll keep the sort.
sort.Strings(want)
sort.Strings(have)
if !cmp.Equal(want, have) {
t.Fatalf("wrong output:\n%s", cmp.Diff(want, have))
}
}