add local rack (#60)

* add local rack

* add instructions for trusting local ca

* handle local rack idling

* must install ca to system keychain

* update tests

* change router service type on linux local

* trim platform name when passing to rack

* restart microk8s after installation
This commit is contained in:
David Dollar 2019-12-31 11:01:52 -05:00 committed by GitHub
parent 964ade629a
commit bba828e3e9
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
53 changed files with 1598 additions and 210 deletions

67
install/local/README.md Normal file
View File

@ -0,0 +1,67 @@
# Convox on Local Workstation
## Initial Setup
### MacOS
- Install [Docker Desktop](https://www.docker.com/products/docker-desktop)
- Go to Docker Desktop Preferences
- Go to the Advanced tab
- Drag the CPU slider to the halfway point
- Drag the Memory slider to at least 8GB
- Go to the Kubernetes tab
- Enable Kubernetes
### Ubuntu
- `snap install microk8s --classic --channel=1.13/stable`
- `microk8s.enable dns storage`
- `mkdir -p ~/.kube`
- `microk8s.config > ~/.kube/config`
- `sudo snap restart microk8s`
## Install Convox
- Clone this repository and switch to the directory containing this `README`
- Run `terraform init`
- Run `terraform apply`
## DNS Setup
Set `*.convox` to be resolved by the local Rack's DNS server.
### MacOS
- `sudo mkdir -p /etc/resolver`
- `sudo bash -c 'echo "nameserver 127.0.0.1" > /etc/resolver/convox'`
### Ubuntu
- `sudo mkdir -p /usr/lib/systemd/resolved.conf.d`
- `sudo bash -c "printf '[Resolve]\nDNS=$(kubectl get service/resolver-external -n convox-system -o jsonpath="{.spec.clusterIP}")\nDomains=~convox' > /usr/lib/systemd/resolved.conf.d/convox.conf"`
- `systemctl daemon-reload`
- `systemctl restart systemd-networkd systemd-resolved`
## CA Trust (optional)
To remove browser warnings about untrusted certificates for local applications
you can trust the Rack's CA certificate.
This certificate is generated on your local machine and is unique to your Rack.
### MacOS
- `kubectl get secret/ca -n convox-system -o jsonpath="{.data.tls\.crt}" | base64 -d > /tmp/ca`
- `sudo security add-trusted-cert -d -r trustRoot -k /Library/Keychains/System.keychain /tmp/ca`
### Ubuntu
- `kubectl get secret/ca -n convox-system -o jsonpath="{.data.tls\.crt}" | base64 -d > /tmp/ca`
- `sudo mv /tmp/ca /usr/local/share/ca-certificates/convox.crt`
- `sudo update-ca-certificates`
## Convox CLI Setup
- [Install the Convox CLI](../../docs/guides/installation/cli.md)
- Run `export RACK_URL=$(terraform output rack_url)`
- Run `convox rack` to ensure that your CLI is connected to your new Rack

20
install/local/main.tf Normal file
View File

@ -0,0 +1,20 @@
variable "name" {
description = "rack name"
default = "convox"
}
variable "release" {
description = "convox release version to install"
default = ""
}
module "system" {
source = "../../terraform/system/local"
name = var.name
release = var.release
}
output "rack_url" {
value = module.system.api
}

View File

@ -71,7 +71,7 @@ func currentEndpoint(c *stdcli.Context, rack_ string) (string, error) {
}
if strings.HasPrefix(rack_, "local/") {
return fmt.Sprintf("https://rack.%s", strings.SplitN(rack_, "/", 2)[1]), nil
return fmt.Sprintf("https://api.%s", strings.SplitN(rack_, "/", 2)[1]), nil
}
host, err := currentHost(c)
@ -195,10 +195,12 @@ func localRacks(c *stdcli.Context) ([]rack, error) {
for _, nsr := range nsrs {
if strings.HasPrefix(nsr, "namespace/") {
racks = append(racks, rack{
Name: fmt.Sprintf("local/%s", strings.TrimPrefix(nsr, "namespace/")),
Status: "running",
})
if name, err := c.Execute("kubectl", "get", nsr, "-o", "jsonpath={.metadata.labels.rack}"); err == nil {
racks = append(racks, rack{
Name: fmt.Sprintf("local/%s", strings.TrimSpace(string(name))),
Status: "running",
})
}
}
}
}

View File

@ -38,6 +38,7 @@ func TestRacks(t *testing.T) {
me := &mockstdcli.Executor{}
me.On("Execute", "kubectl", "get", "ns", "--selector=system=convox,type=rack", "--output=name").Return([]byte("namespace/dev\n"), nil)
me.On("Execute", "kubectl", "get", "namespace/dev", "-o", "jsonpath={.metadata.labels.rack}").Return([]byte("dev\n"), nil)
e.Executor = me
res, err := testExecute(e, "racks", nil)

View File

@ -76,6 +76,8 @@ func TestVersionNoSystemMultipleLocal(t *testing.T) {
testClient(t, func(e *cli.Engine, i *mocksdk.Interface) {
me := &mockstdcli.Executor{}
me.On("Execute", "kubectl", "get", "ns", "--selector=system=convox,type=rack", "--output=name").Return([]byte("namespace/dev\nnamespace/dev2\n"), nil)
me.On("Execute", "kubectl", "get", "namespace/dev", "-o", "jsonpath={.metadata.labels.rack}").Return([]byte("dev\n"), nil)
me.On("Execute", "kubectl", "get", "namespace/dev2", "-o", "jsonpath={.metadata.labels.rack}").Return([]byte("dev2\n"), nil)
e.Executor = me
res, err := testExecute(e, "version", nil)
@ -93,6 +95,7 @@ func TestVersionNoSystemSingleLocal(t *testing.T) {
testClient(t, func(e *cli.Engine, i *mocksdk.Interface) {
me := &mockstdcli.Executor{}
me.On("Execute", "kubectl", "get", "ns", "--selector=system=convox,type=rack", "--output=name").Return([]byte("namespace/dev\n"), nil)
me.On("Execute", "kubectl", "get", "namespace/dev", "-o", "jsonpath={.metadata.labels.rack}").Return([]byte("dev\n"), nil)
e.Executor = me
i.On("SystemGet").Return(fxSystemLocal(), nil)

View File

@ -0,0 +1,198 @@
package logstorage
import (
"context"
"fmt"
"math/rand"
"sort"
"sync"
"time"
)
type Store struct {
lock sync.Mutex
streams map[string][]Log
subscriptions subscriptions
}
type Log struct {
Prefix string
Message string
Timestamp time.Time
}
type Receiver chan Log
func init() {
rand.Seed(time.Now().UTC().UnixNano())
}
func New() Store {
s := Store{streams: map[string][]Log{}}
go s.startCleaner()
return s
}
func (s *Store) Append(stream string, ts time.Time, prefix, message string) {
s.lock.Lock()
defer s.lock.Unlock()
log := Log{Message: message, Prefix: prefix, Timestamp: ts}
ls, ok := s.streams[stream]
if !ok {
ls = []Log{}
}
n := sort.Search(len(ls), func(i int) bool { return ls[i].Timestamp.After(ts) })
ls = append(ls, Log{})
copy(ls[n+1:], ls[n:])
ls[n] = log
s.streams[stream] = ls
s.subscriptions.send(stream, log)
}
func (s *Store) Subscribe(ctx context.Context, ch Receiver, stream string, start time.Time, follow bool) {
s.lock.Lock()
defer s.lock.Unlock()
if ls, ok := s.streams[stream]; ok {
n := sort.Search(len(ls), func(i int) bool { return !ls[i].Timestamp.Before(start) })
go sendMultiple(ch, ls[n:], func() {
if !follow {
close(ch)
}
})
}
if follow {
s.subscriptions.Subscribe(ctx, ch, stream, start)
}
}
func (s *Store) cleanupLogs() {
s.lock.Lock()
defer s.lock.Unlock()
for name := range s.streams {
ls := s.streams[name]
n := sort.Search(len(ls), func(i int) bool { return ls[i].Timestamp.After(time.Now().Add(30 * time.Second)) })
s.streams[name] = ls[n:]
}
}
func (s *Store) startCleaner() {
for range time.Tick(30 * time.Second) {
s.cleanupLogs()
}
}
type subscriptions struct {
lock sync.Mutex
subscriptions map[string]map[string]*subscription
}
type subscription struct {
ch Receiver
lock sync.Mutex
queue []Log
start time.Time
}
func (s *subscriptions) Subscribe(ctx context.Context, ch Receiver, stream string, start time.Time) {
s.add(ctx, ch, stream, start)
}
func (s *subscriptions) add(ctx context.Context, ch Receiver, stream string, start time.Time) {
s.lock.Lock()
defer s.lock.Unlock()
if s.subscriptions == nil {
s.subscriptions = map[string]map[string]*subscription{}
}
if _, ok := s.subscriptions[stream]; !ok {
s.subscriptions[stream] = map[string]*subscription{}
}
handle := fmt.Sprintf("%v:%d", ch, rand.Int63())
s.subscriptions[stream][handle] = &subscription{ch: ch, start: start}
go s.watch(ctx, stream, handle)
}
func (s *subscriptions) remove(stream, handle string) {
s.lock.Lock()
defer s.lock.Unlock()
if _, ok := s.subscriptions[stream]; !ok {
return
}
delete(s.subscriptions[stream], handle)
}
func (s *subscriptions) send(stream string, l Log) {
s.lock.Lock()
defer s.lock.Unlock()
if _, ok := s.subscriptions[stream]; !ok {
return
}
for _, sub := range s.subscriptions[stream] {
if !sub.start.After(l.Timestamp) {
sub.add(l)
}
}
}
func (s *subscriptions) watch(ctx context.Context, stream, handle string) {
defer s.remove(stream, handle)
tick := time.NewTicker(100 * time.Millisecond)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return
case <-tick.C:
if ss, ok := s.subscriptions[stream]; ok {
if sub, ok := ss[handle]; ok && len(sub.queue) > 0 {
sub.flush()
}
}
}
}
}
func (s *subscription) add(l Log) {
s.lock.Lock()
defer s.lock.Unlock()
s.queue = append(s.queue, l)
}
func (s *subscription) flush() {
s.lock.Lock()
defer s.lock.Unlock()
for _, l := range s.queue {
s.ch <- l
}
s.queue = s.queue[:0]
}
func sendMultiple(ch Receiver, ls []Log, done func()) {
defer done()
for _, l := range ls {
ch <- l
}
}

View File

@ -0,0 +1,88 @@
package logstorage_test
import (
"context"
"testing"
"time"
"github.com/convox/convox/pkg/logstorage"
"github.com/stretchr/testify/require"
)
var (
time1 = time.Date(2019, 01, 01, 0, 1, 0, 0, time.UTC)
time2 = time.Date(2019, 01, 01, 0, 2, 0, 0, time.UTC)
time3 = time.Date(2019, 01, 01, 0, 3, 0, 0, time.UTC)
)
func TestNoFollow(t *testing.T) {
s := logstorage.New()
s.Append("foo", time2, "p2", "two")
s.Append("foo", time1, "p1", "one")
s.Append("foo", time3, "p3", "three")
ch := make(chan logstorage.Log)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s.Subscribe(ctx, ch, "foo", time1, false)
log, ok := <-ch
require.True(t, ok)
require.Equal(t, "p1", log.Prefix)
require.Equal(t, "one", log.Message)
require.Equal(t, time1, log.Timestamp)
log, ok = <-ch
require.True(t, ok)
require.Equal(t, "p2", log.Prefix)
require.Equal(t, "two", log.Message)
require.Equal(t, time2, log.Timestamp)
log, ok = <-ch
require.True(t, ok)
require.Equal(t, "p3", log.Prefix)
require.Equal(t, "three", log.Message)
require.Equal(t, time3, log.Timestamp)
_, ok = <-ch
require.False(t, ok)
}
func TestFollow(t *testing.T) {
s := logstorage.New()
s.Append("foo", time2, "p2", "two")
ch := make(chan logstorage.Log)
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
s.Subscribe(ctx, ch, "foo", time1, true)
time.Sleep(500 * time.Millisecond)
s.Append("foo", time3, "p3", "three")
s.Append("foo", time1, "p1", "one")
log, ok := <-ch
require.True(t, ok)
require.Equal(t, "two", log.Message)
require.Equal(t, "p2", log.Prefix)
require.Equal(t, time2, log.Timestamp)
log, ok = <-ch
require.True(t, ok)
require.Equal(t, time3, log.Timestamp)
require.Equal(t, "p3", log.Prefix)
require.Equal(t, "three", log.Message)
log, ok = <-ch
require.True(t, ok)
require.Equal(t, time1, log.Timestamp)
require.Equal(t, "p1", log.Prefix)
require.Equal(t, "one", log.Message)
}

View File

@ -127,7 +127,20 @@ func (b *BackendKubernetes) IdleGet(target string) (bool, error) {
fmt.Printf("ns=backend.k8s at=idle.get target=%q\n", target)
if service, namespace, ok := parseTarget(target); ok {
return b.idled[fmt.Sprintf("%s/%s", namespace, service)], nil
key := fmt.Sprintf("%s/%s", namespace, service)
if idle, ok := b.idled[key]; ok {
return idle, nil
}
d, err := b.cluster.ExtensionsV1beta1().Deployments(namespace).Get(service, am.GetOptions{})
if err != nil {
return false, err
}
b.idled[key] = (d.Spec.Replicas == nil || int(*d.Spec.Replicas) == 0)
return b.idled[key], nil
}
return true, nil

View File

@ -68,7 +68,7 @@ func New() (*Router, error) {
switch os.Getenv("CACHE") {
case "dynamodb":
c, err := NewCacheDynamo(os.Getenv("ROUTER_CACHE"))
c, err := NewCacheDynamo(os.Getenv("DYNAMODB_CACHE"))
if err != nil {
return nil, err
}
@ -87,7 +87,7 @@ func New() (*Router, error) {
switch os.Getenv("STORAGE") {
case "dynamodb":
s, err := NewStorageDynamo(os.Getenv("ROUTER_HOSTS"), os.Getenv("ROUTER_TARGETS"))
s, err := NewStorageDynamo(os.Getenv("DYNAMODB_HOSTS"), os.Getenv("DYNAMODB_TARGETS"))
if err != nil {
return nil, err
}

5
provider/local/app.go Normal file
View File

@ -0,0 +1,5 @@
package local
func (p *Provider) AppIdles(name string) (bool, error) {
return true, nil
}

47
provider/local/build.go Normal file
View File

@ -0,0 +1,47 @@
package local
import (
"io"
"os/exec"
"strings"
"github.com/convox/convox/pkg/structs"
)
func (p *Provider) BuildExport(app, id string, w io.Writer) error {
if err := p.authAppRepository(app); err != nil {
return err
}
return p.Provider.BuildExport(app, id, w)
}
func (p *Provider) BuildImport(app string, r io.Reader) (*structs.Build, error) {
if err := p.authAppRepository(app); err != nil {
return nil, err
}
return p.Provider.BuildImport(app, r)
}
func (p *Provider) authAppRepository(app string) error {
repo, _, err := p.RepositoryHost(app)
if err != nil {
return err
}
user, pass, err := p.RepositoryAuth(app)
if err != nil {
return err
}
cmd := exec.Command("docker", "login", "-u", user, "--password-stdin", repo)
cmd.Stdin = strings.NewReader(pass)
if err := cmd.Run(); err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,276 @@
package local
import (
"bufio"
"context"
"fmt"
"reflect"
"strings"
"sync"
"time"
"github.com/convox/convox/pkg/kctl"
ac "k8s.io/api/core/v1"
am "k8s.io/apimachinery/pkg/apis/meta/v1"
ic "k8s.io/client-go/informers/core/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
)
const (
ScannerStartSize = 4096
ScannerMaxSize = 20 * 1024 * 1024
)
type PodController struct {
Controller *kctl.Controller
Provider *Provider
logger *podLogger
start time.Time
}
func NewPodController(p *Provider) (*PodController, error) {
pc := &PodController{
Provider: p,
logger: NewPodLogger(p),
start: time.Now().UTC(),
}
c, err := kctl.NewController(p.Namespace, "convox-local-pod", pc)
if err != nil {
return nil, err
}
pc.Controller = c
return pc, nil
}
func (c *PodController) Client() kubernetes.Interface {
return c.Provider.Cluster
}
func (c *PodController) ListOptions(opts *am.ListOptions) {
opts.LabelSelector = fmt.Sprintf("system=convox,rack=%s", c.Provider.Name)
// opts.ResourceVersion = ""
}
func (c *PodController) Run() {
i := ic.NewFilteredPodInformer(c.Provider.Cluster, ac.NamespaceAll, 0, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, c.ListOptions)
ch := make(chan error)
go c.Controller.Run(i, ch)
for err := range ch {
fmt.Printf("err = %+v\n", err)
}
}
func (c *PodController) Start() error {
c.start = time.Now().UTC()
return nil
}
func (c *PodController) Stop() error {
return nil
}
func (c *PodController) Add(obj interface{}) error {
p, err := assertPod(obj)
if err != nil {
return err
}
// fmt.Printf("pod add %s/%s: %s\n", p.ObjectMeta.Namespace, p.ObjectMeta.Name, p.Status.Phase)
switch p.Status.Phase {
case "Succeeded", "Failed":
go c.cleanupPod(p)
case "Pending", "Running":
c.logger.Start(p.ObjectMeta.Namespace, p.ObjectMeta.Name, c.start)
}
return nil
}
func (c *PodController) Delete(obj interface{}) error {
return nil
}
func (c *PodController) Update(prev, cur interface{}) error {
pp, err := assertPod(prev)
if err != nil {
return err
}
cp, err := assertPod(cur)
if err != nil {
return err
}
if reflect.DeepEqual(pp.Status, cp.Status) {
return nil
}
// fmt.Printf("pod update %s/%s: %s (was %s)\n", cp.ObjectMeta.Namespace, cp.ObjectMeta.Name, cp.Status.Phase, pp.Status.Phase)
if cp.Status.Phase != pp.Status.Phase {
switch cp.Status.Phase {
case "Succeeded", "Failed":
go c.cleanupPod(cp)
}
}
return nil
}
func (c *PodController) cleanupPod(p *ac.Pod) error {
time.Sleep(5 * time.Second)
if err := c.Client().CoreV1().Pods(p.ObjectMeta.Namespace).Delete(p.ObjectMeta.Name, nil); err != nil {
return err
}
return nil
}
func assertPod(v interface{}) (*ac.Pod, error) {
p, ok := v.(*ac.Pod)
if !ok {
return nil, fmt.Errorf("could not assert pod for type: %T", v)
}
return p, nil
}
// func podCondition(p *ac.Pod, name string) *ac.PodCondition {
// for _, c := range p.Status.Conditions {
// if string(c.Type) == name {
// return &c
// }
// }
// return nil
// }
type podLogger struct {
provider *Provider
streams sync.Map
}
func NewPodLogger(p *Provider) *podLogger {
return &podLogger{provider: p}
}
func (l *podLogger) Start(namespace, pod string, start time.Time) {
key := fmt.Sprintf("%s:%s", namespace, pod)
ctx, cancel := context.WithCancel(context.Background())
if _, exists := l.streams.LoadOrStore(key, cancel); !exists {
go l.watch(ctx, namespace, pod, start)
}
}
func (l *podLogger) Stop(namespace, pod string) {
key := fmt.Sprintf("%s:%s", namespace, pod)
if cv, ok := l.streams.Load(key); ok {
if cfn, ok := cv.(context.CancelFunc); ok {
cfn()
}
l.streams.Delete(key)
}
}
func (l *podLogger) stream(ch chan string, namespace, pod string, start time.Time) {
defer close(ch)
since := am.NewTime(start)
for {
lopts := &ac.PodLogOptions{
Follow: true,
SinceTime: &since,
Timestamps: true,
}
r, err := l.provider.Cluster.CoreV1().Pods(namespace).GetLogs(pod, lopts).Stream()
if err != nil {
fmt.Printf("err = %+v\n", err)
break
}
s := bufio.NewScanner(r)
s.Buffer(make([]byte, ScannerStartSize), ScannerMaxSize)
for s.Scan() {
line := s.Text()
if ts, err := time.Parse(time.RFC3339Nano, strings.Split(line, " ")[0]); err == nil {
since = am.NewTime(ts)
}
ch <- line
}
if err := s.Err(); err != nil {
fmt.Printf("err = %+v\n", err)
continue
}
break
}
}
func (l *podLogger) watch(ctx context.Context, namespace, pod string, start time.Time) {
defer l.Stop(namespace, pod)
ch := make(chan string)
var p *ac.Pod
var err error
for {
p, err = l.provider.Cluster.CoreV1().Pods(namespace).Get(pod, am.GetOptions{})
if err != nil {
fmt.Printf("err = %+v\n", err)
return
}
if p.Status.Phase != "Pending" {
break
}
time.Sleep(1 * time.Second)
}
app := p.ObjectMeta.Labels["app"]
typ := p.ObjectMeta.Labels["type"]
name := p.ObjectMeta.Labels["name"]
if typ == "process" {
typ = "service"
}
go l.stream(ch, namespace, pod, start)
for {
select {
case <-ctx.Done():
return
case log, ok := <-ch:
if !ok {
return
}
if parts := strings.SplitN(log, " ", 2); len(parts) == 2 {
if ts, err := time.Parse(time.RFC3339Nano, parts[0]); err == nil {
l.provider.Engine.Log(app, fmt.Sprintf("%s/%s/%s", typ, name, pod), ts, parts[1])
}
}
}
}
}

View File

@ -0,0 +1,5 @@
package local
func (p *Provider) DeploymentTimeout() int {
return 1800
}

View File

@ -0,0 +1,7 @@
package local
func (p *Provider) Heartbeat() (map[string]interface{}, error) {
hs := map[string]interface{}{}
return hs, nil
}

106
provider/local/helpers.go Normal file
View File

@ -0,0 +1,106 @@
package local
import (
"context"
"errors"
"fmt"
"os"
"os/exec"
"regexp"
"strings"
"time"
// gv "github.com/GoogleCloudPlatform/gke-managed-certs/pkg/clientgen/clientset/versioned"
am "k8s.io/apimachinery/pkg/apis/meta/v1"
)
func (p *Provider) appRegistry(app string) (string, error) {
ns, err := p.Provider.Cluster.CoreV1().Namespaces().Get(p.AppNamespace(app), am.GetOptions{})
if err != nil {
return "", err
}
registry, ok := ns.ObjectMeta.Annotations["convox.registry"]
if !ok {
return "", fmt.Errorf("no registry for app: %s", app)
}
return registry, nil
}
// func (p *Provider) gkeManagedCertsClient() (gv.Interface, error) {
// return gv.NewForConfig(p.Config)
// }
func (p *Provider) watchForProcessTermination(ctx context.Context, app, pid string, cancel func()) {
defer cancel()
tick := time.NewTicker(2 * time.Second)
defer tick.Stop()
for {
select {
case <-ctx.Done():
return
case <-tick.C:
if _, err := p.ProcessGet(app, pid); err != nil {
time.Sleep(2 * time.Second)
cancel()
return
}
}
}
}
func kubectl(args ...string) error {
cmd := exec.Command("kubectl", args...)
cmd.Env = os.Environ()
out, err := cmd.CombinedOutput()
if err != nil {
return errors.New(strings.TrimSpace(string(out)))
}
return nil
}
var outputConverter = regexp.MustCompile("([a-z])([A-Z])") // lower case letter followed by upper case
func outputToEnvironment(name string) string {
return strings.ToUpper(outputConverter.ReplaceAllString(name, "${1}_${2}"))
}
func upperName(name string) string {
if name == "" {
return ""
}
// replace underscores with dashes
name = strings.Replace(name, "_", "-", -1)
// myapp -> Myapp; my-app -> MyApp
us := strings.ToUpper(name[0:1]) + name[1:]
for {
i := strings.Index(us, "-")
if i == -1 {
break
}
s := us[0:i]
if len(us) > i+1 {
s += strings.ToUpper(us[i+1 : i+2])
}
if len(us) > i+2 {
s += us[i+2:]
}
us = s
}
return us
}

13
provider/local/ingress.go Normal file
View File

@ -0,0 +1,13 @@
package local
func (p *Provider) IngressAnnotations(app string) (map[string]string, error) {
ans := map[string]string{
"kubernetes.io/ingress.class": "convox",
}
return ans, nil
}
func (p *Provider) IngressSecrets(app string) ([]string, error) {
return []string{}, nil
}

54
provider/local/local.go Normal file
View File

@ -0,0 +1,54 @@
package local
import (
"context"
"os"
"github.com/convox/convox/pkg/structs"
"github.com/convox/convox/provider/k8s"
)
type Provider struct {
*k8s.Provider
Registry string
Secret string
}
func FromEnv() (*Provider, error) {
k, err := k8s.FromEnv()
if err != nil {
return nil, err
}
p := &Provider{
Provider: k,
Registry: os.Getenv("REGISTRY"),
Secret: os.Getenv("SECRET"),
}
k.Engine = p
return p, nil
}
func (p *Provider) Initialize(opts structs.ProviderOptions) error {
if err := p.Provider.Initialize(opts); err != nil {
return err
}
pc, err := NewPodController(p)
if err != nil {
return err
}
go pc.Run()
return nil
}
func (p *Provider) WithContext(ctx context.Context) structs.Provider {
pp := *p
pp.Provider = pp.Provider.WithContext(ctx).(*k8s.Provider)
return &pp
}

112
provider/local/log.go Normal file
View File

@ -0,0 +1,112 @@
package local
import (
"context"
"fmt"
"io"
"net/url"
"time"
"github.com/convox/convox/pkg/common"
"github.com/convox/convox/pkg/logstorage"
"github.com/convox/convox/pkg/structs"
)
var logs = logstorage.New()
func (p *Provider) Log(app, stream string, ts time.Time, message string) error {
logs.Append(app, ts, stream, message)
logs.Append(fmt.Sprintf("%s/%s", app, stream), ts, stream, message)
return nil
}
func (p *Provider) AppLogs(name string, opts structs.LogsOptions) (io.ReadCloser, error) {
r, w := io.Pipe()
go subscribeLogs(p.Context(), w, name, opts)
return r, nil
}
func (p *Provider) BuildLogs(app, id string, opts structs.LogsOptions) (io.ReadCloser, error) {
b, err := p.BuildGet(app, id)
if err != nil {
return nil, err
}
switch b.Status {
case "running":
return p.ProcessLogs(app, b.Process, opts)
default:
u, err := url.Parse(b.Logs)
if err != nil {
return nil, err
}
switch u.Scheme {
case "object":
return p.ObjectFetch(u.Hostname(), u.Path)
default:
return nil, fmt.Errorf("unable to read logs for build: %s", id)
}
}
}
func (p *Provider) ProcessLogs(app, pid string, opts structs.LogsOptions) (io.ReadCloser, error) {
ps, err := p.ProcessGet(app, pid)
if err != nil {
return nil, err
}
stream := fmt.Sprintf("%s/service/%s/%s", app, ps.Name, pid)
r, w := io.Pipe()
ctx, cancel := context.WithCancel(p.Context())
go subscribeLogs(ctx, w, stream, opts)
go p.watchForProcessTermination(ctx, app, pid, cancel)
return r, nil
}
func (p *Provider) SystemLogs(opts structs.LogsOptions) (io.ReadCloser, error) {
r, w := io.Pipe()
go subscribeLogs(p.Context(), w, "rack", opts)
return r, nil
}
func subscribeLogs(ctx context.Context, w io.WriteCloser, stream string, opts structs.LogsOptions) {
defer w.Close()
ch := make(chan logstorage.Log, 1000)
sctx, cancel := context.WithCancel(ctx)
defer cancel()
since := time.Now().UTC().Add(-1 * common.DefaultDuration(opts.Since, 0))
follow := common.DefaultBool(opts.Follow, true)
logs.Subscribe(sctx, ch, stream, since, follow)
for {
select {
case <-ctx.Done():
return
case l, ok := <-ch:
if !ok {
return
}
prefix := ""
if common.DefaultBool(opts.Prefix, false) {
prefix = fmt.Sprintf("%s %s ", l.Timestamp.Format(time.RFC3339), l.Prefix)
}
if _, err := fmt.Fprintf(w, "%s%s\n", prefix, l.Message); err != nil {
return
}
}
}
}

View File

@ -0,0 +1,9 @@
package local
import (
"github.com/convox/convox/pkg/manifest"
)
func (p *Provider) ManifestValidate(m *manifest.Manifest) error {
return nil
}

View File

@ -0,0 +1,11 @@
package local
import "fmt"
func (p *Provider) RepositoryAuth(app string) (string, string, error) {
return "docker", p.Secret, nil
}
func (p *Provider) RepositoryHost(app string) (string, bool, error) {
return fmt.Sprintf("%s/%s", p.Registry, app), true, nil
}

9
provider/local/system.go Normal file
View File

@ -0,0 +1,9 @@
package local
func (p *Provider) SystemHost() string {
return p.Domain
}
func (p *Provider) SystemStatus() (string, error) {
return "running", nil
}

View File

@ -10,6 +10,7 @@ import (
"github.com/convox/convox/provider/do"
"github.com/convox/convox/provider/gcp"
"github.com/convox/convox/provider/k8s"
"github.com/convox/convox/provider/local"
)
var Mock = &structs.MockProvider{}
@ -28,8 +29,8 @@ func FromEnv() (structs.Provider, error) {
return gcp.FromEnv()
case "k8s":
return k8s.FromEnv()
// case "local":
// return local.FromEnv()
case "local":
return local.FromEnv()
case "test":
return Mock, nil
case "":

View File

@ -1 +0,0 @@
# data "google_container_registry_repository" "registry" {}

View File

@ -64,7 +64,7 @@ resource "kubernetes_deployment" "api" {
spec {
min_ready_seconds = 3
revision_history_limit = 0
replicas = 2
replicas = var.replicas
selector {
match_labels = {
@ -125,7 +125,7 @@ resource "kubernetes_deployment" "api" {
env {
name = "PASSWORD"
value = random_string.password.result
value = var.authentication ? random_string.password.result : ""
}
env {

View File

@ -2,6 +2,10 @@ variable "annotations" {
default = {}
}
variable "authentication" {
default = true
}
variable "domain" {
type = string
}
@ -26,6 +30,10 @@ variable "release" {
type = string
}
variable "replicas" {
default = 2
}
variable "socket" {
default = "/var/run/docker.sock"
}

View File

@ -0,0 +1,39 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.10"
}
locals {
tags = {
System = "convox"
Rack = var.name
}
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
authentication = false
domain = var.domain
name = var.name
namespace = var.namespace
release = var.release
replicas = 1
annotations = {}
env = {
PROVIDER = "local"
REGISTRY = "registry.${var.domain}"
RESOLVER = var.resolver
ROUTER = var.router
SECRET = var.secret
}
}

View File

@ -0,0 +1,3 @@
output "endpoint" {
value = module.k8s.endpoint
}

View File

@ -0,0 +1,27 @@
variable "domain" {
type = string
}
variable "name" {
type = string
}
variable "namespace" {
type = string
}
variable "release" {
type = string
}
variable "resolver" {
type = string
}
variable "router" {
type = string
}
variable "secret" {
type = string
}

View File

@ -45,7 +45,7 @@ resource "kubernetes_stateful_set" "elasticsearch" {
spec {
service_name = "elasticsearch"
replicas = 2
replicas = var.replicas
selector {
match_labels = {

View File

@ -1,3 +1,7 @@
variable "namespace" {
type = string
}
variable "replicas" {
default = 2
}

View File

@ -13,7 +13,7 @@ module "k8s" {
kubernetes = kubernetes
}
cluster = var.cluster
cluster = var.name
image = "fluent/fluentd-kubernetes-daemonset:v1.7-debian-elasticsearch6-1"
namespace = var.namespace
target = templatefile("${path.module}/target.conf.tpl", { elasticsearch = var.elasticsearch })

View File

@ -1,7 +1,3 @@
variable "cluster" {
type = string
}
variable "elasticsearch" {
type = string
}

View File

@ -37,8 +37,7 @@ module "api" {
release = var.release
resource_group = var.resource_group
router = module.router.endpoint
# secret = random_string.secret.result
workspace = var.workspace
workspace = var.workspace
}
module "router" {

View File

@ -1,174 +0,0 @@
# resource "random_string" "suffix" {
# length = 12
# special = false
# upper = false
# }
# resource "digitalocean_spaces_bucket" "registry" {
# name = "${var.name}-registry-${random_string.suffix.result}"
# region = var.region
# acl = "private"
# }
# resource "random_string" "secret" {
# length = 30
# }
# resource "kubernetes_deployment" "registry" {
# metadata {
# namespace = module.k8s.namespace
# name = "registry"
# labels = {
# serivce = "registry"
# }
# }
# spec {
# min_ready_seconds = 1
# revision_history_limit = 0
# selector {
# match_labels = {
# system = "convox"
# service = "registry"
# }
# }
# strategy {
# type = "RollingUpdate"
# rolling_update {
# max_surge = 1
# max_unavailable = 0
# }
# }
# template {
# metadata {
# labels = {
# system = "convox"
# service = "registry"
# }
# }
# spec {
# container {
# name = "main"
# image = "registry:2"
# image_pull_policy = "IfNotPresent"
# env {
# name = "REGISTRY_HTTP_SECRET"
# value = random_string.secret.result
# }
# env {
# name = "REGISTRY_STORAGE"
# value = "s3"
# }
# env {
# name = "REGISTRY_STORAGE_S3_ACCESSKEY"
# value = var.access_id
# }
# env {
# name = "REGISTRY_STORAGE_S3_BUCKET"
# value = digitalocean_spaces_bucket.registry.name
# }
# env {
# name = "REGISTRY_STORAGE_S3_REGION"
# value = var.region
# }
# env {
# name = "REGISTRY_STORAGE_S3_REGIONENDPOINT"
# value = "https://${var.region}.digitaloceanspaces.com"
# }
# env {
# name = "REGISTRY_STORAGE_S3_SECRETKEY"
# value = var.secret_key
# }
# port {
# container_port = 5000
# protocol = "TCP"
# }
# volume_mount {
# name = "registry"
# mount_path = "/var/lib/registry"
# }
# }
# volume {
# name = "registry"
# host_path {
# path = "/var/lib/registry"
# }
# }
# }
# }
# }
# }
# resource "kubernetes_service" "registry" {
# metadata {
# namespace = module.k8s.namespace
# name = "registry"
# }
# spec {
# type = "ClusterIP"
# selector = {
# system = "convox"
# service = "registry"
# }
# port {
# name = "http"
# port = 80
# target_port = 5000
# protocol = "TCP"
# }
# }
# }
# resource "kubernetes_ingress" "registry" {
# metadata {
# namespace = module.k8s.namespace
# name = "registry"
# annotations = {
# "convox.idles" : "true"
# }
# labels = {
# system = "convox"
# service = "registry"
# }
# }
# spec {
# tls {
# hosts = ["registry.${module.router.endpoint}"]
# }
# rule {
# host = "registry.${module.router.endpoint}"
# http {
# path {
# backend {
# service_name = kubernetes_service.registry.metadata.0.name
# service_port = 80
# }
# }
# }
# }
# }
# }

View File

@ -9,9 +9,10 @@ provider "kubernetes" {
resource "kubernetes_namespace" "system" {
metadata {
labels = {
app = "system"
rack = var.name
system = "convox"
app = "system"
type = "rack"
}
name = "${var.name}-system"

View File

@ -0,0 +1,48 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.10"
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
domain = module.router.endpoint
name = var.name
release = var.release
}
module "api" {
source = "../../api/local"
providers = {
kubernetes = kubernetes
}
domain = module.router.endpoint
name = var.name
namespace = module.k8s.namespace
release = var.release
resolver = module.router.resolver
router = module.router.endpoint
secret = random_string.secret.result
}
module "router" {
source = "../../router/local"
providers = {
kubernetes = kubernetes
}
name = var.name
namespace = module.k8s.namespace
platform = var.platform
release = var.release
}

View File

@ -0,0 +1,7 @@
output "api" {
value = module.api.endpoint
}
output "endpoint" {
value = module.router.endpoint
}

View File

@ -0,0 +1,149 @@
resource "random_string" "secret" {
length = 30
special = false
}
resource "kubernetes_deployment" "registry" {
metadata {
namespace = module.k8s.namespace
name = "registry"
labels = {
serivce = "registry"
}
}
spec {
min_ready_seconds = 1
revision_history_limit = 0
selector {
match_labels = {
system = "convox"
service = "registry"
}
}
strategy {
type = "RollingUpdate"
rolling_update {
max_surge = 1
max_unavailable = 0
}
}
template {
metadata {
labels = {
system = "convox"
service = "registry"
}
}
spec {
container {
name = "main"
image = "registry:2"
image_pull_policy = "IfNotPresent"
env {
name = "REGISTRY_HTTP_SECRET"
value = random_string.secret.result
}
port {
container_port = 5000
protocol = "TCP"
}
volume_mount {
name = "registry"
mount_path = "/var/lib/registry"
}
}
volume {
name = "registry"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim.registry.metadata.0.name
}
}
}
}
}
}
resource "kubernetes_persistent_volume_claim" "registry" {
metadata {
namespace = module.k8s.namespace
name = "registry"
}
spec {
access_modes = ["ReadWriteOnce"]
resources {
requests = {
storage = var.registry_disk
}
}
}
}
resource "kubernetes_service" "registry" {
metadata {
namespace = module.k8s.namespace
name = "registry"
}
spec {
type = "ClusterIP"
selector = {
system = "convox"
service = "registry"
}
port {
name = "http"
port = 80
target_port = 5000
protocol = "TCP"
}
}
}
resource "kubernetes_ingress" "registry" {
metadata {
namespace = module.k8s.namespace
name = "registry"
annotations = {
"convox.idles" : "true"
}
labels = {
system = "convox"
service = "registry"
}
}
spec {
tls {
hosts = ["registry.${module.router.endpoint}"]
}
rule {
host = "registry.${module.router.endpoint}"
http {
path {
backend {
service_name = kubernetes_service.registry.metadata.0.name
service_port = 80
}
}
}
}
}
}

View File

@ -0,0 +1,15 @@
variable "name" {
type = string
}
variable "platform" {
type = string
}
variable "registry_disk" {
default = 20
}
variable "release" {
type = string
}

View File

@ -40,12 +40,13 @@ module "k8s" {
}
env = {
AWS_REGION = data.aws_region.current.name
CACHE = "dynamodb"
STORAGE = "dynamodb"
ROUTER_CACHE = aws_dynamodb_table.cache.name
ROUTER_HOSTS = aws_dynamodb_table.hosts.name
ROUTER_TARGETS = aws_dynamodb_table.targets.name
AUTOCERT = "true"
AWS_REGION = data.aws_region.current.name
CACHE = "dynamodb"
DYNAMODB_CACHE = aws_dynamodb_table.cache.name
DYNAMODB_HOSTS = aws_dynamodb_table.hosts.name
DYNAMODB_TARGETS = aws_dynamodb_table.targets.name
STORAGE = "dynamodb"
}
}

View File

@ -36,6 +36,7 @@ module "k8s" {
release = var.release
env = {
AUTOCERT = "true"
CACHE = "redis"
REDIS_ADDR = "${azurerm_redis_cache.cache.hostname}:${azurerm_redis_cache.cache.ssl_port}"
REDIS_AUTH = azurerm_redis_cache.cache.primary_access_key

View File

@ -32,6 +32,7 @@ module "k8s" {
release = var.release
env = {
AUTOCERT = "true"
CACHE = "redis"
REDIS_ADDR = "${digitalocean_database_cluster.cache.private_host}:${digitalocean_database_cluster.cache.port}"
REDIS_AUTH = digitalocean_database_cluster.cache.password

View File

@ -32,6 +32,7 @@ module "k8s" {
release = var.release
env = {
AUTOCERT = "true"
CACHE = "redis"
REDIS_ADDR = "${google_redis_instance.cache.host}:${google_redis_instance.cache.port}"
}

View File

@ -162,11 +162,6 @@ resource "kubernetes_deployment" "router" {
value = "router.${var.namespace}.svc.cluster.local"
}
env {
name = "AUTOCERT"
value = "true"
}
dynamic "env" {
for_each = var.env
@ -243,7 +238,7 @@ resource "kubernetes_service" "resolver" {
type = "ClusterIP"
port {
name = "dns-udp"
name = "dns"
port = 53
protocol = "UDP"
target_port = 5454

View File

@ -0,0 +1,39 @@
resource "tls_private_key" "ca-private" {
algorithm = "RSA"
}
resource "tls_self_signed_cert" "ca" {
key_algorithm = tls_private_key.ca-private.algorithm
private_key_pem = tls_private_key.ca-private.private_key_pem
dns_names = ["ca.${var.name}"]
is_ca_certificate = true
set_subject_key_id = true
validity_period_hours = 24 * 365 * 10
allowed_uses = [
"cert_signing",
"digital_signature",
"key_encipherment",
"server_auth"
]
subject {
common_name = "ca.${var.name}"
organization = "Convox"
}
}
resource "kubernetes_secret" "ca" {
metadata {
namespace = var.namespace
name = "ca"
}
type = "kubernetes.io/tls"
data = {
"tls.crt" = tls_self_signed_cert.ca.cert_pem,
"tls.key" = tls_private_key.ca-private.private_key_pem,
}
}

View File

@ -0,0 +1,98 @@
terraform {
required_version = ">= 0.12.0"
}
provider "http" {
version = "~> 1.1"
}
provider "kubernetes" {
version = "~> 1.10"
}
provider "tls" {
version = "~> 2.1"
}
locals {
tags = {
System = "convox"
Rack = var.name
}
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
namespace = var.namespace
release = var.release
env = {
CACHE = "memory"
}
}
resource "kubernetes_service" "resolver-external" {
metadata {
namespace = var.namespace
name = "resolver-external"
}
spec {
type = var.platform == "Linux" ? "ClusterIP" : "LoadBalancer"
port {
name = "dns"
port = 53
protocol = "UDP"
target_port = 5453
}
selector = {
system = "convox"
service = "router"
}
}
lifecycle {
ignore_changes = [metadata[0].annotations]
}
}
resource "kubernetes_service" "router" {
metadata {
namespace = var.namespace
name = "router"
}
spec {
type = var.platform == "Linux" ? "ClusterIP" : "LoadBalancer"
port {
name = "http"
port = 80
protocol = "TCP"
target_port = 80
}
port {
name = "https"
port = 443
protocol = "TCP"
target_port = 443
}
selector = {
system = "convox"
service = "router"
}
}
lifecycle {
ignore_changes = [metadata[0].annotations]
}
}

View File

@ -0,0 +1,8 @@
output "endpoint" {
value = var.name
}
output "resolver" {
value = module.k8s.resolver
}

View File

@ -0,0 +1,15 @@
variable "name" {
type = string
}
variable "namespace" {
type = string
}
variable "platform" {
type = string
}
variable "release" {
type = string
}

View File

@ -43,13 +43,12 @@ module "cluster" {
}
module "fluentd" {
source = "../../fluentd/do"
source = "../../fluentd/elasticsearch"
providers = {
kubernetes = kubernetes
}
cluster = module.cluster.name
elasticsearch = module.rack.elasticsearch
namespace = "kube-system"
name = var.name

View File

@ -0,0 +1,48 @@
terraform {
required_version = ">= 0.12.0"
}
provider "http" {
version = "~> 1.1"
}
provider "kubernetes" {
version = "~> 1.10"
}
locals {
platform_filename = "/tmp/convox.platform"
}
data "http" "releases" {
url = "https://api.github.com/repos/convox/convox/releases"
}
locals {
current = jsondecode(data.http.releases.body).0.tag_name
release = coalesce(var.release, local.current)
}
resource "null_resource" "platform" {
provisioner "local-exec" {
command = "uname -s > ${local.platform_filename}"
}
}
data "local_file" "platform" {
depends_on = [null_resource.platform]
filename = local.platform_filename
}
module "rack" {
source = "../../rack/local"
providers = {
kubernetes = kubernetes
}
name = var.name
platform = trimspace(data.local_file.platform.content)
release = local.release
}

View File

@ -0,0 +1,7 @@
output "api" {
value = module.rack.api
}
output "endpoint" {
value = module.rack.endpoint
}

View File

@ -0,0 +1,7 @@
variable "name" {
type = string
}
variable "release" {
default = ""
}