feat(appliance): self-update can update multiple deployments (#64132)

The helm chart will configure both the backend and frontend deployments
to self-update:
https://github.com/sourcegraph/deploy-sourcegraph-helm/pull/513

Relates to
https://linear.app/sourcegraph/issue/REL-302/self-update-should-update-appliance-frontend-too
This commit is contained in:
Craig Furman 2024-07-30 09:30:26 +01:00 committed by GitHub
parent ebec72d7ed
commit d945f19285
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 56 additions and 39 deletions

View File

@ -108,12 +108,12 @@ func Start(ctx context.Context, observationCtx *observation.Context, ready servi
grpcServer := makeGRPCServer(logger, app)
selfUpdater := &selfupdate.SelfUpdate{
Interval: time.Hour,
Logger: logger.Scoped("SelfUpdate"),
K8sClient: k8sClient,
RelregClient: relregClient,
DeploymentName: config.selfDeploymentName,
Namespace: config.namespace,
Interval: time.Hour,
Logger: logger.Scoped("SelfUpdate"),
K8sClient: k8sClient,
RelregClient: relregClient,
DeploymentNames: config.selfDeploymentName,
Namespace: config.namespace,
}
probe := &healthchecker.PodProbe{K8sClient: k8sClient}

View File

@ -2,6 +2,7 @@ package integrationtest
import (
"context"
"errors"
"fmt"
"os"
"strings"
@ -80,8 +81,12 @@ func TestSelfUpdateLoop(t *testing.T) {
nsName := ns.GetName()
// provision example appliance deployment
dep := buildTestDeployment(nsName)
err = k8sClient.Create(ctx, dep)
dep1 := buildTestDeployment("appliance", nsName)
err = k8sClient.Create(ctx, dep1)
require.NoError(t, err)
dep2 := buildTestDeployment("appliance-frontend", nsName)
err = k8sClient.Create(ctx, dep2)
require.NoError(t, err)
cfgMap := &corev1.ConfigMap{
@ -101,37 +106,38 @@ func TestSelfUpdateLoop(t *testing.T) {
{Version: "4.5.7", Public: true},
}, nil)
selfUpdater := &selfupdate.SelfUpdate{
Interval: time.Second,
Logger: logtest.Scoped(t),
RelregClient: relregClient,
K8sClient: k8sClient,
DeploymentName: "appliance",
Namespace: nsName,
Interval: time.Second,
Logger: logtest.Scoped(t),
RelregClient: relregClient,
K8sClient: k8sClient,
DeploymentNames: "appliance,appliance-frontend",
Namespace: nsName,
}
loopCtx, cancel := context.WithCancel(context.Background())
loopDone := make(chan struct{})
go func() {
_ = selfUpdater.Loop(loopCtx)
err := selfUpdater.Loop(loopCtx)
if !errors.Is(err, context.Canceled) {
require.NoError(t, err)
}
close(loopDone)
}()
err = selfUpdater.Once(ctx)
require.NoError(t, err)
require.Eventually(t, func() bool {
var dep appsv1.Deployment
depName := types.NamespacedName{Name: "appliance", Namespace: nsName}
require.NoError(t, k8sClient.Get(ctx, depName, &dep))
return strings.HasSuffix(dep.Spec.Template.Spec.Containers[0].Image, "4.5.7")
}, time.Second*10, time.Second)
for _, depName := range []string{"appliance", "appliance-frontend"} {
require.Eventually(t, func() bool {
var dep appsv1.Deployment
depName := types.NamespacedName{Name: depName, Namespace: nsName}
require.NoError(t, k8sClient.Get(ctx, depName, &dep))
return strings.HasSuffix(dep.Spec.Template.Spec.Containers[0].Image, "4.5.7")
}, time.Second*10, time.Second)
}
cancel()
<-loopDone
}
func buildTestDeployment(namespace string) *appsv1.Deployment {
name := "appliance"
func buildTestDeployment(name, namespace string) *appsv1.Deployment {
defaultContainer := container.NewContainer(name, nil, config.ContainerConfig{
Image: "index.docker.io/sourcegraph/appliance:4.3.1",
Resources: &corev1.ResourceRequirements{},

View File

@ -17,12 +17,12 @@ import (
)
type SelfUpdate struct {
Interval time.Duration
Logger log.Logger
K8sClient client.Client
RelregClient releaseregistry.ReleaseRegistryClient
DeploymentName string
Namespace string
Interval time.Duration
Logger log.Logger
K8sClient client.Client
RelregClient releaseregistry.ReleaseRegistryClient
DeploymentNames string
Namespace string
}
func (u *SelfUpdate) Loop(ctx context.Context) error {
@ -31,6 +31,11 @@ func (u *SelfUpdate) Loop(ctx context.Context) error {
ticker := time.NewTicker(u.Interval)
defer ticker.Stop()
// Do one iteration without having to wait for the first tick
if err := u.Once(ctx); err != nil {
u.Logger.Error("error self-updating", log.Error(err))
return err
}
for {
select {
case <-ticker.C:
@ -48,10 +53,14 @@ func (u *SelfUpdate) Loop(ctx context.Context) error {
func (u *SelfUpdate) Once(ctx context.Context) error {
u.Logger.Info("starting self-update")
var dep appsv1.Deployment
depName := types.NamespacedName{Name: u.DeploymentName, Namespace: u.Namespace}
if err := u.K8sClient.Get(ctx, depName, &dep); err != nil {
return errors.Wrap(err, "getting deployment")
var deps []appsv1.Deployment
for _, depName := range strings.Split(u.DeploymentNames, ",") {
depNsName := types.NamespacedName{Name: depName, Namespace: u.Namespace}
var dep appsv1.Deployment
if err := u.K8sClient.Get(ctx, depNsName, &dep); err != nil {
return errors.Wrap(err, "getting deployment")
}
deps = append(deps, dep)
}
newTag, err := u.getLatestTag(ctx)
@ -59,9 +68,11 @@ func (u *SelfUpdate) Once(ctx context.Context) error {
return errors.Wrap(err, "getting latest tag")
}
dep.Spec.Template.Spec.Containers[0].Image = replaceTag(dep.Spec.Template.Spec.Containers[0].Image, newTag)
if err := u.K8sClient.Update(ctx, &dep); err != nil {
return errors.Wrap(err, "updating deployment")
for _, dep := range deps {
dep.Spec.Template.Spec.Containers[0].Image = replaceTag(dep.Spec.Template.Spec.Containers[0].Image, newTag)
if err := u.K8sClient.Update(ctx, &dep); err != nil {
return errors.Wrap(err, "updating deployment")
}
}
return nil