add terraform to repo

This commit is contained in:
David Dollar 2019-10-10 10:43:02 -04:00
parent 56f40224ec
commit 263260fbc1
No known key found for this signature in database
GPG Key ID: AFAF263FB45B2124
78 changed files with 3562 additions and 0 deletions

3
terraform/.gitignore vendored Normal file
View File

@ -0,0 +1,3 @@
.terraform
*.tfstate*
terraform.tfvars

27
terraform/README.md Normal file
View File

@ -0,0 +1,27 @@
# convox/terraform
## Modules
* system/aws
* cluster/aws
* logs/aws
* logs/k8s
* rack/aws
* rack/k8s
* atom/k8s
* api/aws
* api/k8s
* router/aws
* router/k8s
* system/gcp
* cluster/gcp
* logs/gcp
* logs/k8s
* rack/gcp
* rack/k8s
* atom/k8s
* api/gcp
* api/k8s
* router/gcp
* router/k8s

63
terraform/api/aws/iam.tf Normal file
View File

@ -0,0 +1,63 @@
data "aws_iam_policy_document" "api_assume" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "AWS"
identifiers = [var.nodes_role]
}
}
}
data "aws_iam_policy_document" "logs" {
statement {
actions = [
"logs:CreateLogGroup",
"logs:CreateLogStream",
"logs:FilterLogEvents",
"logs:PutLogEvents",
]
resources = [
"arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:${var.name}-*",
"arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:/convox/${var.name}/*",
]
}
}
data "aws_iam_policy_document" "storage" {
statement {
actions = [
"s3:DeleteObject",
"s3:HeadObject",
"s3:GetObject",
"s3:ListObjects",
"s3:PutObject",
]
resources = [
"${aws_s3_bucket.storage.arn}/*",
]
}
}
resource "aws_iam_role" "api" {
name = "${var.name}-api"
assume_role_policy = data.aws_iam_policy_document.api_assume.json
path = "/convox/"
tags = local.tags
}
resource "aws_iam_role_policy_attachment" "api_ecr" {
role = aws_iam_role.api.name
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryFullAccess"
}
resource "aws_iam_role_policy" "api_logs" {
name = "logs"
role = aws_iam_role.api.name
policy = data.aws_iam_policy_document.logs.json
}
resource "aws_iam_role_policy" "api_storage" {
name = "storage"
role = aws_iam_role.api.name
policy = data.aws_iam_policy_document.storage.json
}

49
terraform/api/aws/main.tf Normal file
View File

@ -0,0 +1,49 @@
terraform {
required_version = ">= 0.12.0"
}
provider "aws" {
version = "~> 2.22"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
data "aws_caller_identity" "current" {}
data "aws_region" "current" {}
locals {
tags = {
System = "convox"
Rack = var.name
}
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = var.kubeconfig
name = var.name
namespace = var.namespace
release = var.release
annotations = {
"iam.amazonaws.com/role" : aws_iam_role.api.arn,
}
env = {
AWS_REGION = data.aws_region.current.name
BUCKET = aws_s3_bucket.storage.id
PROVIDER = "aws"
ROUTER = var.router
SOCKET = "/var/run/docker.sock"
}
}

View File

@ -0,0 +1,3 @@
output "endpoint" {
value = module.k8s.endpoint
}

14
terraform/api/aws/s3.tf Normal file
View File

@ -0,0 +1,14 @@
resource "aws_s3_bucket" "storage" {
acl = "private"
bucket_prefix = "${var.name}-storage-"
force_destroy = true
tags = local.tags
server_side_encryption_configuration {
rule {
apply_server_side_encryption_by_default {
sse_algorithm = "aws:kms"
}
}
}
}

View File

@ -0,0 +1,27 @@
variable "domain" {
type = "string"
}
variable "kubeconfig" {
type = "string"
}
variable "name" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "nodes_role" {
type = "string"
}
variable "release" {
type = "string"
}
variable "router" {
type = "string"
}

View File

@ -0,0 +1,22 @@
resource "google_service_account" "api" {
account_id = "${var.name}-api"
}
resource "google_service_account_key" "api" {
service_account_id = google_service_account.api.name
}
resource "google_project_iam_member" "api-logging-viewer" {
role = "roles/logging.viewer"
member = "serviceAccount:${google_service_account.api.email}"
}
resource "google_project_iam_member" "api-logging-writer" {
role = "roles/logging.logWriter"
member = "serviceAccount:${google_service_account.api.email}"
}
resource "google_project_iam_member" "api-storage" {
role = "roles/storage.admin"
member = "serviceAccount:${google_service_account.api.email}"
}

51
terraform/api/gcp/main.tf Normal file
View File

@ -0,0 +1,51 @@
terraform {
required_version = ">= 0.12.0"
}
provider "google" {
version = "~> 2.12"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
data "google_client_config" "current" {}
locals {
tags = {
System = "convox"
Rack = var.name
}
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = var.kubeconfig
name = var.name
namespace = var.namespace
release = var.release
annotations = {
"cloud.google.com/service-account" : google_service_account.api.email
}
env = {
BUCKET = google_storage_bucket.storage.name
KEY = google_service_account_key.api.private_key
PROJECT = data.google_client_config.current.project,
PROVIDER = "gcp"
REGION = data.google_client_config.current.region
REGISTRY = data.google_container_registry_repository.registry.repository_url
ROUTER = var.router
SOCKET = "/var/run/docker.sock"
}
}

View File

@ -0,0 +1,3 @@
output "endpoint" {
value = module.k8s.endpoint
}

View File

@ -0,0 +1 @@
data "google_container_registry_repository" "registry" {}

View File

@ -0,0 +1,12 @@
resource "random_string" "suffix" {
length = 12
special = false
upper = false
}
resource "google_storage_bucket" "storage" {
name = "${var.name}-storage-${random_string.suffix.result}"
bucket_policy_only = true
force_destroy = true
}

View File

@ -0,0 +1,27 @@
variable "domain" {
type = "string"
}
variable "kubeconfig" {
type = "string"
}
variable "name" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "nodes_account" {
type = "string"
}
variable "release" {
type = "string"
}
variable "router" {
type = "string"
}

37
terraform/api/k8s/crd.yml Normal file
View File

@ -0,0 +1,37 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: builds.convox.com
spec:
group: convox.com
versions:
- name: v1
served: true
storage: true
version: v1
scope: Namespaced
names:
plural: builds
singular: build
kind: Build
categories:
- convox
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: releases.convox.com
spec:
group: convox.com
versions:
- name: v1
served: true
storage: true
version: v1
scope: Namespaced
names:
plural: releases
singular: release
kind: Release
categories:
- convox

276
terraform/api/k8s/main.tf Normal file
View File

@ -0,0 +1,276 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
provider "random" {
version = "~> 2.2"
}
resource "random_string" "password" {
length = 64
special = false
}
resource "null_resource" "crd" {
provisioner "local-exec" {
command = "kubectl apply -f ${path.module}/crd.yml"
environment = {
"KUBECONFIG" : var.kubeconfig,
}
}
}
resource "kubernetes_cluster_role" "api" {
metadata {
name = "${var.name}-api"
}
rule {
api_groups = ["*"]
resources = ["*"]
verbs = ["*"]
}
}
resource "kubernetes_cluster_role_binding" "api" {
metadata {
name = "${var.name}-api"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = kubernetes_cluster_role.api.metadata.0.name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.api.metadata.0.name
namespace = kubernetes_service_account.api.metadata.0.namespace
}
}
resource "kubernetes_service_account" "api" {
metadata {
namespace = var.namespace
name = "api"
}
}
resource "kubernetes_deployment" "api" {
metadata {
namespace = var.namespace
name = "api"
labels = {
system = "convox"
service = "api"
}
}
spec {
min_ready_seconds = 3
revision_history_limit = 0
selector {
match_labels = {
system = "convox"
service = "api"
}
}
strategy {
type = "RollingUpdate"
rolling_update {
max_surge = 1
max_unavailable = 0
}
}
template {
metadata {
annotations = merge(var.annotations, {
"scheduler.alpha.kubernetes.io/critical-pod" : ""
})
labels = {
system = "convox"
service = "api"
}
}
spec {
automount_service_account_token = true
service_account_name = kubernetes_service_account.api.metadata.0.name
share_process_namespace = true
container {
name = "main"
args = ["api"]
image = "convox/convox:${var.release}"
image_pull_policy = "Always"
env {
name = "DOMAIN"
value = var.domain
}
env {
name = "IMAGE"
value = "convox/convox:${var.release}"
}
env {
name = "NAMESPACE"
value_from {
field_ref {
field_path = "metadata.namespace"
}
}
}
env {
name = "PASSWORD"
value = random_string.password.result
}
env {
name = "VERSION"
value = var.release
}
dynamic "env" {
for_each = var.env
content {
name = env.key
value = env.value
}
}
port {
container_port = 5443
}
liveness_probe {
http_get {
path = "/check"
port = 5443
scheme = "HTTPS"
}
failure_threshold = 3
initial_delay_seconds = 15
period_seconds = 5
success_threshold = 1
timeout_seconds = 3
}
readiness_probe {
http_get {
path = "/check"
port = 5443
scheme = "HTTPS"
}
period_seconds = 5
timeout_seconds = 3
}
volume_mount {
name = "docker"
mount_path = "/var/run/docker.sock"
}
volume_mount {
name = "storage"
mount_path = "/var/storage"
}
}
volume {
name = "docker"
host_path {
path = var.socket
}
}
volume {
name = "storage"
host_path {
path = "/var/rack/${var.name}/storage"
}
}
}
}
}
}
resource "kubernetes_service" "api" {
metadata {
namespace = var.namespace
name = "api"
labels = {
system = "convox"
service = "api"
}
}
spec {
port {
name = "https"
port = 5443
target_port = 5443
protocol = "TCP"
}
selector = {
system = "convox"
service = "api"
}
}
}
resource "kubernetes_ingress" "api" {
metadata {
namespace = var.namespace
name = var.name
annotations = {
"convox.idles" : "true"
"convox.ingress.service.api.5443.protocol" : "https"
}
labels = {
system = "convox"
service = "api"
}
}
spec {
tls {
hosts = ["api.${var.domain}"]
}
rule {
host = "api.${var.domain}"
http {
path {
backend {
service_name = kubernetes_service.api.metadata.0.name
service_port = 5443
}
}
}
}
}
}

View File

@ -0,0 +1,3 @@
output "endpoint" {
value = "https://convox:${random_string.password.result}@api.${var.domain}"
}

View File

@ -0,0 +1,31 @@
variable annotations {
default = {}
}
variable "domain" {
type = "string"
}
variable "env" {
default = {}
}
variable "kubeconfig" {
type = "string"
}
variable "name" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "release" {
type = "string"
}
variable "socket" {
default = "/var/run/docker.sock"
}

View File

@ -0,0 +1,37 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: atoms.atom.convox.com
spec:
group: atom.convox.com
versions:
- name: v1
served: true
storage: true
version: v1
scope: Namespaced
names:
plural: atoms
singular: atom
kind: Atom
categories:
- convox
---
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: atomversions.atom.convox.com
spec:
group: atom.convox.com
versions:
- name: v1
served: true
storage: true
version: v1
scope: Namespaced
names:
plural: atomversions
singular: atomversion
kind: AtomVersion
categories:
- convox

118
terraform/atom/k8s/main.tf Normal file
View File

@ -0,0 +1,118 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
provider "null" {
version = "~> 2.1"
}
resource "null_resource" "crd" {
provisioner "local-exec" {
command = "kubectl apply -f ${path.module}/crd.yml"
environment = {
"KUBECONFIG" : var.kubeconfig,
}
}
}
resource "kubernetes_cluster_role" "atom" {
metadata {
name = "atom"
}
rule {
api_groups = ["*"]
resources = ["*"]
verbs = ["*"]
}
}
resource "kubernetes_cluster_role_binding" "atom" {
metadata {
name = "atom"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "atom"
}
subject {
kind = "ServiceAccount"
name = "atom"
namespace = var.namespace
}
}
resource "kubernetes_service_account" "atom" {
metadata {
namespace = var.namespace
name = "atom"
}
}
resource "kubernetes_deployment" "atom" {
metadata {
namespace = var.namespace
name = "atom"
}
spec {
revision_history_limit = 0
selector {
match_labels = {
system = "convox"
service = "atom"
}
}
strategy {
type = "RollingUpdate"
rolling_update {
max_surge = 1
max_unavailable = 0
}
}
template {
metadata {
annotations = {
"scheduler.alpha.kubernetes.io/critical-pod" : ""
}
labels = {
system = "convox"
service = "atom"
}
}
spec {
automount_service_account_token = true
share_process_namespace = true
service_account_name = "atom"
container {
name = "main"
args = ["atom"]
image = "convox/convox:${var.release}"
image_pull_policy = "Always"
resources {
requests {
cpu = "32m"
memory = "32Mi"
}
}
}
}
}
}
}

View File

@ -0,0 +1,11 @@
variable "kubeconfig" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "release" {
type = "string"
}

View File

@ -0,0 +1,184 @@
AWSTemplateFormatVersion: "2010-09-09"
Conditions:
SshKey: !Not [ !Equals [ !Ref SshKey, "" ] ]
Mappings:
SubnetMasks:
"16": { Public: 12, Private: 14 }
"17": { Public: 11, Private: 13 }
"18": { Public: 10, Private: 12 }
"19": { Public: 9, Private: 11 }
"20": { Public: 8, Private: 10 }
"21": { Public: 7, Private: 9 }
"22": { Public: 6, Private: 8 }
"23": { Public: 5, Private: 7 }
"24": { Public: 4, Private: 6 }
Outputs:
EventQueue:
Value: !Ref EventQueue
EventTopic:
Value: !Ref EventTopic
RackBucket:
Value: !Ref RackBucket
RouterTargetGroup80:
Value: !Ref RouterTargetGroup80
RouterTargetGroup443:
Value: !Ref RouterTargetGroup443
Parameters:
Ami:
Type: String
MinLength: 1
Cluster:
Type: String
MinLength: 1
Role:
Type: String
MinLength: 1
Security:
Type: AWS::EC2::SecurityGroup::Id
SshKey:
Type: String
Default: ""
Subnets:
Type: List<AWS::EC2::Subnet::Id>
Type:
Type: String
Default: t3.small
Vpc:
Type: AWS::EC2::VPC::Id
Resources:
# AutoscalerRole:
# Type: AWS::IAM::Role
# Properties:
# AssumeRolePolicyDocument:
# Version: "2012-10-17"
# Statement:
# - Effect: "Allow"
# Principal: !Ref Role
# Action: sts:AssumeRole
# Path: /convox/
# Policies:
# - PolicyName: alb-ingress
# PolicyDocument:
# Version: "2012-10-17"
# Statement:
# - Effect: Allow
# Action:
# - autoscaling:DescribeAutoScalingGroups
# - autoscaling:DescribeAutoScalingInstances
# - autoscaling:DescribeTags
# Resource: "*"
# - Effect: Allow
# Action:
# - autoscaling:SetDesiredCapacity
# - autoscaling:TerminateInstanceInAutoScalingGroup
# Resource: !Sub "arn:aws:autoscaling:${AWS::Region}:${AWS::AccountId}:autoScalingGroup:*:autoScalingGroupName/${Nodes}"
EventQueue:
Type: AWS::SQS::Queue
EventQueuePolicy:
Type: AWS::SQS::QueuePolicy
Properties:
Queues:
- !Ref EventQueue
PolicyDocument:
Version: "2012-10-17"
Statement:
- Effect: Allow
Principal: { "AWS": "*" }
Action: sqs:SendMessage
Resource: !GetAtt EventQueue.Arn
Condition: { "ArnEquals": { "aws:SourceArn": !Ref EventTopic } }
EventTopic:
Type: AWS::SNS::Topic
Properties:
DisplayName: !Ref AWS::StackName
Subscription:
- Protocol: sqs
Endpoint: !GetAtt EventQueue.Arn
NodesInstanceProfile:
Type: AWS::IAM::InstanceProfile
Properties:
Path: /convox/
Roles:
- !Ref Role
NodesLaunchConfig:
Type: AWS::AutoScaling::LaunchConfiguration
Properties:
AssociatePublicIpAddress: true
IamInstanceProfile: !Ref NodesInstanceProfile
ImageId: !Ref Ami
InstanceType: !Ref Type
KeyName: !If [ SshKey, !Ref SshKey, !Ref "AWS::NoValue" ]
SecurityGroups:
- !Ref Security
UserData:
Fn::Base64: !Sub |
#!/bin/bash -xe
/etc/eks/bootstrap.sh ${Cluster}
iptables --append PREROUTING --protocol tcp --destination 169.254.169.254 --dport 80 --in-interface eni+ --jump DNAT --table nat --to-destination `curl 169.254.169.254/latest/meta-data/local-ipv4`:8181
/opt/aws/bin/cfn-signal -e $? --stack ${AWS::StackName} --resource Nodes --region ${AWS::Region}
Nodes:
Type: AWS::AutoScaling::AutoScalingGroup
Properties:
LaunchConfigurationName: !Ref NodesLaunchConfig
MinSize: 3
MaxSize: 20
TargetGroupARNs:
- !Ref RouterTargetGroup80
- !Ref RouterTargetGroup443
VPCZoneIdentifier:
- !Select [ 0, !Ref Subnets ]
- !Select [ 1, !Ref Subnets ]
- !Select [ 2, !Ref Subnets ]
Tags:
- Key: Name
Value: !Ref AWS::StackName
PropagateAtLaunch: true
- Key: !Sub "kubernetes.io/cluster/${Cluster}"
Value: owned
PropagateAtLaunch: true
- Key: k8s.io/cluster-autoscaler/enabled
Value: ""
PropagateAtLaunch: false
- Key: !Sub k8s.io/cluster-autoscaler/${Cluster}
Value: ""
PropagateAtLaunch: false
UpdatePolicy:
AutoScalingRollingUpdate:
MinInstancesInService: 2
MaxBatchSize: 1
PauseTime: PT5M
SuspendProcesses: [ ScheduledActions ]
WaitOnResourceSignals: true
RackBucket:
Type: AWS::S3::Bucket
DeletionPolicy: Retain
Properties:
AccessControl: Private
BucketEncryption:
ServerSideEncryptionConfiguration:
- ServerSideEncryptionByDefault:
SSEAlgorithm: aws:kms
RouterTargetGroup80:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
HealthCheckIntervalSeconds: 10
HealthCheckPath: /convox/health
HealthCheckProtocol: HTTP
HealthyThresholdCount: 2
Port: 32000
Protocol: TCP
TargetType: instance
UnhealthyThresholdCount: 2
VpcId: !Ref Vpc
RouterTargetGroup443:
Type: AWS::ElasticLoadBalancingV2::TargetGroup
Properties:
HealthCheckIntervalSeconds: 10
HealthCheckPath: /convox/health
HealthCheckProtocol: HTTPS
HealthyThresholdCount: 2
Port: 32001
Protocol: TCP
TargetType: instance
UnhealthyThresholdCount: 2
VpcId: !Ref Vpc

View File

@ -0,0 +1,58 @@
data "aws_iam_policy_document" "assume_ec2" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["ec2.amazonaws.com"]
}
}
}
data "aws_iam_policy_document" "assume_eks" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "Service"
identifiers = ["eks.amazonaws.com"]
}
}
}
resource "aws_iam_role" "cluster" {
assume_role_policy = data.aws_iam_policy_document.assume_eks.json
name = "${var.name}-cluster"
path = "/convox/"
}
resource "aws_iam_role_policy_attachment" "cluster_eks_cluster" {
role = aws_iam_role.cluster.name
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSClusterPolicy"
}
resource "aws_iam_role_policy_attachment" "cluster_eks_service" {
role = aws_iam_role.cluster.name
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSServicePolicy"
}
resource "aws_iam_role" "nodes" {
assume_role_policy = data.aws_iam_policy_document.assume_ec2.json
name = "${var.name}-nodes"
path = "/convox/"
}
resource "aws_iam_role_policy_attachment" "nodes_ecr" {
role = aws_iam_role.nodes.name
policy_arn = "arn:aws:iam::aws:policy/AmazonEC2ContainerRegistryReadOnly"
}
resource "aws_iam_role_policy_attachment" "nodes_eks_cni" {
role = aws_iam_role.nodes.name
policy_arn = "arn:aws:iam::aws:policy/AmazonEKS_CNI_Policy"
}
resource "aws_iam_role_policy_attachment" "nodes_eks_worker" {
role = aws_iam_role.nodes.name
policy_arn = "arn:aws:iam::aws:policy/AmazonEKSWorkerNodePolicy"
}

View File

@ -0,0 +1,104 @@
resource "kubernetes_cluster_role" "kube2iam" {
metadata {
name = "kube2iam"
}
rule {
api_groups = [""]
resources = ["namespaces", "pods"]
verbs = ["get", "list", "watch"]
}
}
resource "kubernetes_cluster_role_binding" "kube2iam" {
metadata {
name = "kube2iam"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "kube2iam"
}
subject {
kind = "ServiceAccount"
name = "kube2iam"
namespace = "kube-system"
}
}
resource "kubernetes_service_account" "kube2iam" {
metadata {
namespace = "kube-system"
name = "kube2iam"
}
}
resource "kubernetes_daemonset" "kube2iam" {
metadata {
namespace = "kube-system"
name = "kube2iam"
}
spec {
selector {
match_labels = {
service = "kube2iam"
}
}
template {
metadata {
labels = {
service = "kube2iam"
}
}
spec {
automount_service_account_token = true
host_network = true
service_account_name = "kube2iam"
container {
image = "jtblin/kube2iam:latest"
name = "kube2iam"
args = [
"--base-role-arn=arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/",
"--host-interface=eni+",
"--host-ip=$(HOST_IP)",
"--node=$(NODE_NAME)",
]
env {
name = "HOST_IP"
value_from {
field_ref {
field_path = "status.podIP"
}
}
}
env {
name = "NODE_NAME"
value_from {
field_ref {
field_path = "spec.nodeName"
}
}
}
port {
container_port = 8181
host_port = 8181
name = "http"
}
security_context {
privileged = true
}
}
}
}
}
}

View File

@ -0,0 +1,25 @@
apiVersion: v1
clusters:
- cluster:
server: ${endpoint}
certificate-authority-data: ${ca}
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: aws
name: aws
current-context: aws
kind: Config
preferences: {}
users:
- name: aws
user:
exec:
apiVersion: client.authentication.k8s.io/v1alpha1
command: aws
args:
- "eks"
- "get-token"
- "--cluster-name"
- "${cluster}"

View File

@ -0,0 +1,159 @@
terraform {
required_version = ">= 0.12.0"
}
provider "aws" {
version = "~> 2.22"
}
provider "local" {
version = "~> 1.3"
}
data "aws_caller_identity" "current" {
}
data "aws_ami" "node" {
most_recent = true
owners = ["602401143452"] # aws eks team
filter {
name = "name"
values = ["amazon-eks-node-${var.kubernetes_version}-v*"]
}
}
resource "aws_cloudformation_stack" "nodes" {
depends_on = [aws_internet_gateway.nodes]
capabilities = ["CAPABILITY_IAM"]
on_failure = "DELETE"
name = "${var.name}-nodes"
template_body = file("${path.module}/cloudformation.yml")
parameters = {
Ami = data.aws_ami.node.id
Cluster = aws_eks_cluster.cluster.id
Role = aws_iam_role.nodes.name
Security = aws_security_group.nodes.id
SshKey = var.ssh_key
Subnets = join(",", aws_subnet.private.*.id)
Type = var.node_type
Vpc = aws_vpc.nodes.id
}
}
resource "aws_eks_cluster" "cluster" {
name = var.name
role_arn = aws_iam_role.cluster.arn
vpc_config {
endpoint_public_access = true
endpoint_private_access = false
security_group_ids = [aws_security_group.cluster.id]
subnet_ids = concat(aws_subnet.public.*.id)
}
}
resource "local_file" "kubeconfig" {
depends_on = [
aws_cloudformation_stack.nodes,
aws_iam_role_policy_attachment.cluster_eks_cluster,
aws_iam_role_policy_attachment.cluster_eks_service,
aws_iam_role_policy_attachment.nodes_ecr,
aws_iam_role_policy_attachment.nodes_eks_cni,
aws_iam_role_policy_attachment.nodes_eks_worker,
aws_route.private-default,
aws_route.public-default,
aws_route_table.private,
aws_route_table.public,
aws_route_table_association.private,
aws_route_table_association.public,
aws_security_group_rule.cluster_egress_control,
aws_security_group_rule.cluster_egress_traffic,
aws_security_group_rule.cluster_ingress_control,
aws_security_group_rule.nodes_egress_internet,
aws_security_group_rule.nodes_ingress_control,
aws_security_group_rule.nodes_ingress_internal,
aws_security_group_rule.nodes_ingress_mtu,
aws_security_group_rule.nodes_ingress_traffic,
]
filename = pathexpand("~/.kube/config.${var.name}")
content = templatefile("${path.module}/kubeconfig.tpl", {
ca = aws_eks_cluster.cluster.certificate_authority.0.data
cluster = aws_eks_cluster.cluster.id
endpoint = aws_eks_cluster.cluster.endpoint
})
}
provider "kubernetes" {
version = "~> 1.8"
alias = "direct"
load_config_file = false
cluster_ca_certificate = base64decode(aws_eks_cluster.cluster.certificate_authority.0.data)
host = aws_eks_cluster.cluster.endpoint
exec {
api_version = "client.authentication.k8s.io/v1alpha1"
command = "aws"
args = ["eks", "get-token", "--cluster-name", aws_eks_cluster.cluster.id]
}
}
resource "kubernetes_config_map" "auth" {
depends_on = [
aws_cloudformation_stack.nodes,
aws_iam_role_policy_attachment.cluster_eks_cluster,
aws_iam_role_policy_attachment.cluster_eks_service,
aws_iam_role_policy_attachment.nodes_ecr,
aws_iam_role_policy_attachment.nodes_eks_cni,
aws_iam_role_policy_attachment.nodes_eks_worker,
aws_route.private-default,
aws_route.public-default,
aws_route_table.private,
aws_route_table.public,
aws_route_table_association.private,
aws_route_table_association.public,
aws_security_group_rule.cluster_egress_control,
aws_security_group_rule.cluster_egress_traffic,
aws_security_group_rule.cluster_ingress_control,
aws_security_group_rule.nodes_egress_internet,
aws_security_group_rule.nodes_ingress_control,
aws_security_group_rule.nodes_ingress_internal,
aws_security_group_rule.nodes_ingress_mtu,
aws_security_group_rule.nodes_ingress_traffic,
]
provider = kubernetes.direct
metadata {
namespace = "kube-system"
name = "aws-auth"
}
data = {
mapRoles = <<EOF
- rolearn: "arn:aws:iam::${data.aws_caller_identity.current.account_id}:role/${aws_iam_role.nodes.name}"
username: system:node:{{EC2PrivateDNSName}}
groups:
- system:bootstrappers
- system:nodes
EOF
}
}
module "logs" {
source = "../../logs/aws"
providers = {
aws = aws
kubernetes = kubernetes.direct
}
cluster = var.name
namespace = "kube-system"
name = var.name
nodes_role = aws_iam_role.nodes.arn
}

View File

@ -0,0 +1,30 @@
output "kubeconfig" {
depends_on = [local_file.kubeconfig, kubernetes_config_map.auth]
value = local_file.kubeconfig.filename
}
output "nodes_role" {
depends_on = ["google_project_service.cloudresourcemanager"]
value = aws_iam_role.nodes.arn
}
output "nodes_security" {
value = aws_security_group.nodes.id
}
output "subnets_private" {
value = aws_subnet.private.*.id
}
output "subnets_public" {
value = aws_subnet.public.*.id
}
output "target_group_http" {
value = aws_cloudformation_stack.nodes.outputs.RouterTargetGroup80
}
output "target_group_https" {
value = aws_cloudformation_stack.nodes.outputs.RouterTargetGroup443
}

View File

@ -0,0 +1,140 @@
resource "aws_security_group" "cluster" {
name = "${var.name}-cluster"
description = "${var.name} cluster"
vpc_id = aws_vpc.nodes.id
tags = merge(local.tags, {
Name = "${var.name}-cluster"
})
}
resource "aws_security_group_rule" "cluster_ingress_control" {
type = "ingress"
description = "control ingress"
security_group_id = aws_security_group.cluster.id
source_security_group_id = aws_security_group.nodes.id
protocol = "tcp"
from_port = 443
to_port = 443
}
resource "aws_security_group_rule" "cluster_egress_control" {
type = "egress"
description = "control egress"
security_group_id = aws_security_group.cluster.id
source_security_group_id = aws_security_group.nodes.id
protocol = "tcp"
from_port = 443
to_port = 443
}
resource "aws_security_group_rule" "cluster_egress_traffic" {
type = "egress"
description = "traffic egress"
security_group_id = aws_security_group.cluster.id
source_security_group_id = aws_security_group.nodes.id
protocol = "tcp"
from_port = 1025
to_port = 65535
}
resource "aws_security_group" "nodes" {
name = "${var.name}-nodes"
description = "${var.name} nodes"
vpc_id = aws_vpc.nodes.id
# ingress {
# description = "mtu discovery"
# cidr_blocks = ["0.0.0.0/0"]
# protocol = "icmp"
# from_port = 3
# to_port = 4
# }
# ingress {
# description = "control ingress"
# security_groups = [aws_security_group.cluster.id]
# protocol = "tcp"
# from_port = 443
# to_port = 443
# }
# ingress {
# description = "traffic ingress"
# security_groups = [aws_security_group.cluster.id]
# protocol = "tcp"
# from_port = 1025
# to_port = 65535
# }
# ingress {
# description = "internal ingress"
# self = true
# protocol = -1
# from_port = 0
# to_port = 0
# }
# egress {
# description = "internet egress"
# protocol = "-1"
# cidr_blocks = ["0.0.0.0/0"]
# from_port = 0
# to_port = 0
# }
tags = merge(local.tags, {
Name = "${var.name} nodes"
"kubernetes.io.cluster/${aws_eks_cluster.cluster.id}" : "owned"
})
}
resource "aws_security_group_rule" "nodes_ingress_mtu" {
type = "ingress"
description = "mtu discovery"
security_group_id = aws_security_group.nodes.id
cidr_blocks = ["0.0.0.0/0"]
protocol = "icmp"
from_port = 3
to_port = 4
}
resource "aws_security_group_rule" "nodes_ingress_control" {
type = "ingress"
description = "control ingress"
security_group_id = aws_security_group.nodes.id
source_security_group_id = aws_security_group.cluster.id
protocol = "tcp"
from_port = 443
to_port = 443
}
resource "aws_security_group_rule" "nodes_ingress_traffic" {
type = "ingress"
description = "traffic ingress"
security_group_id = aws_security_group.nodes.id
source_security_group_id = aws_security_group.cluster.id
protocol = "tcp"
from_port = 1025
to_port = 65535
}
resource "aws_security_group_rule" "nodes_ingress_internal" {
type = "ingress"
description = "internal ingress"
security_group_id = aws_security_group.nodes.id
source_security_group_id = aws_security_group.nodes.id
protocol = -1
from_port = 0
to_port = 65535
}
resource "aws_security_group_rule" "nodes_egress_internet" {
type = "egress"
description = "internet egress"
security_group_id = aws_security_group.nodes.id
cidr_blocks = ["0.0.0.0/0"]
protocol = -1
from_port = 0
to_port = 65535
}

View File

@ -0,0 +1,19 @@
variable "cidr" {
default = "10.1.0.0/16"
}
variable "kubernetes_version" {
default = "1.13"
}
variable "name" {
type = "string"
}
variable "node_type" {
default = "t3.small"
}
variable "ssh_key" {
default = ""
}

View File

@ -0,0 +1,120 @@
locals {
tags = {
Name = var.name
}
}
data "aws_availability_zones" "available" {
state = "available"
}
resource "aws_vpc" "nodes" {
cidr_block = var.cidr
enable_dns_hostnames = true
enable_dns_support = true
tags = merge(local.tags, {
"kubernetes.io/cluster/${var.name}" : "shared"
})
}
resource "aws_internet_gateway" "nodes" {
vpc_id = aws_vpc.nodes.id
tags = local.tags
}
resource "aws_subnet" "public" {
count = 3
availability_zone = data.aws_availability_zones.available.names[count.index]
cidr_block = cidrsubnet(var.cidr, 4, count.index)
vpc_id = aws_vpc.nodes.id
tags = merge(local.tags, {
Name = "${var.name} public ${count.index}"
"kubernetes.io/cluster/${var.name}" : "shared"
"kubernetes.io/role/elb" : ""
})
}
resource "aws_route_table" "public" {
vpc_id = aws_vpc.nodes.id
tags = merge(local.tags, {
Name = "${var.name} public"
})
}
resource "aws_route" "public-default" {
destination_cidr_block = "0.0.0.0/0"
gateway_id = aws_internet_gateway.nodes.id
route_table_id = aws_route_table.public.id
}
resource "aws_route_table_association" "public" {
count = 3
route_table_id = aws_route_table.public.id
subnet_id = aws_subnet.public[count.index].id
}
resource "aws_subnet" "private" {
count = 3
availability_zone = data.aws_availability_zones.available.names[count.index]
cidr_block = cidrsubnet(var.cidr, 2, count.index + 1)
vpc_id = aws_vpc.nodes.id
tags = merge(local.tags, {
Name = "${var.name} private ${count.index}"
"kubernetes.io/cluster/${var.name}" : "shared"
"kubernetes.io/role/internal-elb" : ""
})
}
resource "aws_eip" "nat" {
count = 3
vpc = true
tags = merge(local.tags, {
Name = "${var.name} nat ${count.index}"
})
}
resource "aws_nat_gateway" "private" {
count = 3
allocation_id = aws_eip.nat[count.index].id
subnet_id = aws_subnet.public[count.index].id
tags = merge(local.tags, {
Name = "${var.name} ${count.index}"
})
}
resource "aws_route_table" "private" {
count = 3
vpc_id = aws_vpc.nodes.id
tags = merge(local.tags, {
Name = "${var.name} private ${count.index}"
})
}
resource "aws_route" "private-default" {
count = 3
destination_cidr_block = "0.0.0.0/0"
nat_gateway_id = aws_nat_gateway.private[count.index].id
route_table_id = aws_route_table.private[count.index].id
}
resource "aws_route_table_association" "private" {
count = 3
route_table_id = aws_route_table.private[count.index].id
subnet_id = aws_subnet.private[count.index].id
}

View File

@ -0,0 +1,31 @@
resource "google_service_account" "nodes" {
account_id = "${var.name}-nodes"
}
resource "google_project_iam_member" "nodes-logging" {
depends_on = ["google_project_service.cloudresourcemanager"]
role = "roles/logging.logWriter"
member = "serviceAccount:${google_service_account.nodes.email}"
}
resource "google_project_iam_member" "nodes-monitoring" {
depends_on = ["google_project_service.cloudresourcemanager"]
role = "roles/monitoring.metricWriter"
member = "serviceAccount:${google_service_account.nodes.email}"
}
resource "google_project_iam_member" "nodes-storage" {
depends_on = ["google_project_service.cloudresourcemanager"]
role = "roles/storage.admin"
member = "serviceAccount:${google_service_account.nodes.email}"
}
resource "google_project_iam_member" "nodes-token-creator" {
depends_on = ["google_project_service.cloudresourcemanager"]
role = "roles/iam.serviceAccountTokenCreator"
member = "serviceAccount:${google_service_account.nodes.email}"
}

View File

@ -0,0 +1,106 @@
resource "kubernetes_cluster_role" "kube-google-iam" {
metadata {
name = "kube-google-iam"
}
rule {
api_groups = [""]
resources = ["namespaces", "pods"]
verbs = ["get", "list", "watch"]
}
}
resource "kubernetes_cluster_role_binding" "kube-google-iam" {
metadata {
name = "kube-google-iam"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "kube-google-iam"
}
subject {
kind = "ServiceAccount"
name = "kube-google-iam"
namespace = "kube-system"
}
}
resource "kubernetes_service_account" "kube-google-iam" {
metadata {
namespace = "kube-system"
name = "kube-google-iam"
}
}
resource "kubernetes_daemonset" "kube-google-iam" {
metadata {
namespace = "kube-system"
name = "kube-google-iam"
}
spec {
selector {
match_labels = {
service = "kube-google-iam"
}
}
template {
metadata {
labels = {
service = "kube-google-iam"
}
}
spec {
automount_service_account_token = true
host_network = true
service_account_name = "kube-google-iam"
toleration {
key = "node-role.kubernetes.io/master"
effect = "NoSchedule"
}
container {
image = "convox/kube-google-iam"
name = "kube-google-iam"
args = [
"--verbose",
"--iptables=true",
"--host-interface=cbr0",
"--host-ip=$(HOST_IP)",
"--attributes=cluster-name",
"--default-service-account=${google_service_account.nodes.email}"
]
image_pull_policy = "Always"
env {
name = "HOST_IP"
value_from {
field_ref {
field_path = "status.podIP"
}
}
}
# port {
# container_port = 8181
# host_port = 8181
# name = "http"
# }
security_context {
privileged = true
}
}
}
}
}
}

View File

@ -0,0 +1,19 @@
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: ${ca}
server: https://${endpoint}
name: gcloud
contexts:
- context:
cluster: gcloud
user: gcloud
name: gcloud
current-context: gcloud
kind: Config
preferences: {}
users:
- name: gcloud
user:
client-certificate-data: ${client_certificate}
client-key-data: ${client_key}

View File

@ -0,0 +1,143 @@
terraform {
required_version = ">= 0.12.0"
}
provider "google" {
version = "~> 2.12"
}
provider "local" {
version = "~> 1.3"
}
provider "random" {
version = "~> 2.2"
}
data "google_client_config" "current" {}
data "google_container_engine_versions" "available" {
location = data.google_client_config.current.region
version_prefix = "1.13."
}
resource "random_string" "password" {
length = 64
special = true
}
resource "google_container_cluster" "rack" {
name = var.name
location = data.google_client_config.current.region
remove_default_node_pool = true
initial_node_count = 1
logging_service = "logging.googleapis.com"
min_master_version = data.google_container_engine_versions.available.latest_master_version
ip_allocation_policy {
use_ip_aliases = true
}
master_auth {
username = "gcloud"
password = random_string.password.result
client_certificate_config {
issue_client_certificate = true
}
}
}
resource "google_container_node_pool" "rack" {
name = "${google_container_cluster.rack.name}-nodes-${var.node_type}"
location = google_container_cluster.rack.location
cluster = google_container_cluster.rack.name
node_count = 1
node_config {
preemptible = true
machine_type = var.node_type
metadata = {
disable-legacy-endpoints = "true"
}
service_account = google_service_account.nodes.email
oauth_scopes = [
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/devstorage.read_write",
"https://www.googleapis.com/auth/logging.write",
"https://www.googleapis.com/auth/monitoring",
]
}
lifecycle {
create_before_destroy = true
}
}
resource "local_file" "kubeconfig" {
depends_on = [
kubernetes_cluster_role_binding.client,
google_container_node_pool.rack,
]
filename = pathexpand("~/.kube/config.${var.name}")
content = templatefile("${path.module}/kubeconfig.tpl", {
ca = google_container_cluster.rack.master_auth.0.cluster_ca_certificate
endpoint = google_container_cluster.rack.endpoint
client_certificate = google_container_cluster.rack.master_auth.0.client_certificate
client_key = google_container_cluster.rack.master_auth.0.client_key
})
lifecycle {
ignore_changes = [content]
}
}
provider "kubernetes" {
version = "~> 1.8"
alias = "direct"
load_config_file = false
cluster_ca_certificate = "${base64decode(google_container_cluster.rack.master_auth.0.cluster_ca_certificate)}"
host = "https://${google_container_cluster.rack.endpoint}"
username = "gcloud"
password = random_string.password.result
}
resource "kubernetes_cluster_role_binding" "client" {
provider = "kubernetes.direct"
metadata {
name = "client-binding"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "cluster-admin"
}
subject {
kind = "User"
name = "client"
}
}
# module "logs" {
# source = "../../logs/gcp"
# providers = {
# google = google
# kubernetes = kubernetes.direct
# }
# cluster = var.name
# name = var.name
# namespace = "kube-system"
# }

View File

@ -0,0 +1,17 @@
output "kubeconfig" {
depends_on = [
local_file.kubeconfig,
kubernetes_cluster_role_binding.client,
google_container_node_pool.rack,
]
value = local_file.kubeconfig.filename
}
output "nodes_account" {
depends_on = [
google_project_service.cloudresourcemanager,
google_project_service.redis,
]
value = google_service_account.nodes.email
}

View File

@ -0,0 +1,24 @@
resource "google_project_service" "cloudresourcemanager" {
disable_on_destroy = false
service = "cloudresourcemanager.googleapis.com"
}
resource "google_project_service" "compute" {
disable_on_destroy = false
service = "compute.googleapis.com"
}
resource "google_project_service" "container" {
disable_on_destroy = false
service = "container.googleapis.com"
}
resource "google_project_service" "iam" {
disable_on_destroy = false
service = "iam.googleapis.com"
}
resource "google_project_service" "redis" {
disable_on_destroy = false
service = "redis.googleapis.com"
}

View File

@ -0,0 +1,7 @@
variable "name" {
type = string
}
variable "node_type" {
type = string
}

45
terraform/logs/aws/iam.tf Normal file
View File

@ -0,0 +1,45 @@
data "aws_iam_policy_document" "api_assume" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "AWS"
identifiers = [var.nodes_role]
}
}
}
data "aws_iam_policy_document" "logs" {
statement {
actions = [
"logs:CreateLogGroup",
"logs:DescribeLogGroups",
]
resources = [
"arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:*"
]
}
statement {
actions = [
"logs:CreateLogStream",
"logs:DescribeLogStreams",
"logs:PutLogEvents",
]
resources = [
"arn:aws:logs:${data.aws_region.current.name}:${data.aws_caller_identity.current.account_id}:log-group:/convox/*"
]
}
}
resource "aws_iam_role" "logs" {
name = "${var.name}-logs"
assume_role_policy = data.aws_iam_policy_document.api_assume.json
path = "/convox/"
tags = local.tags
}
resource "aws_iam_role_policy" "logs" {
name = "logs"
role = aws_iam_role.logs.name
policy = data.aws_iam_policy_document.logs.json
}

View File

@ -0,0 +1,42 @@
terraform {
required_version = ">= 0.12.0"
}
provider "aws" {
version = "~> 2.22"
}
provider "kubernetes" {
version = "~> 1.8"
}
data "aws_caller_identity" "current" {}
data "aws_region" "current" {}
locals {
tags = {
System = "convox"
Cluster = var.cluster
}
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
cluster = var.cluster
image = "fluent/fluentd-kubernetes-daemonset:v1.3.3-debian-cloudwatch-1.4"
namespace = var.namespace
target = file("${path.module}/target.conf")
annotations = {
"iam.amazonaws.com/role" = aws_iam_role.logs.arn
}
env = {
AWS_REGION = data.aws_region.current.name
}
}

View File

@ -0,0 +1,27 @@
<label @target>
<filter **>
@type record_transformer
enable_ruby true
<record>
group_name /convox/${record["kubernetes"]["namespace_labels"]["rack"]}/${record["kubernetes"]["namespace_labels"]["app"]}
stream_name service/${record["kubernetes"]["labels"]["service"]}/${record["kubernetes"]["pod_name"]}
</record>
</filter>
<match **>
@type cloudwatch_logs
region us-east-1
auto_create_stream true
log_group_name_key group_name
log_stream_name_key stream_name
message_keys log
remove_log_group_name_key true
remove_log_stream_name_key true
<buffer>
flush_interval 5
chunk_limit_size 2m
queued_chunks_limit_size 32
retry_forever true
</buffer>
</match>
</label>

View File

@ -0,0 +1,16 @@
variable "cluster" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "name" {
type = "string"
}
variable "nodes_role" {
type = "string"
}

View File

@ -0,0 +1,12 @@
resource "google_service_account" "logs" {
account_id = "${var.name}-logs"
}
resource "google_service_account_key" "logs" {
service_account_id = google_service_account.logs.name
}
resource "google_project_iam_member" "logs-logging" {
role = "roles/logging.admin"
member = "serviceAccount:${google_service_account.logs.email}"
}

View File

@ -0,0 +1,42 @@
terraform {
required_version = ">= 0.12.0"
}
provider "google" {
version = "~> 2.12"
}
provider "kubernetes" {
version = "~> 1.8"
}
# data "aws_caller_identity" "current" {}
# data "aws_region" "current" {}
locals {
tags = {
System = "convox"
Cluster = var.cluster
}
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
cluster = var.cluster
image = "fluent/fluentd-kubernetes-daemonset:v1.3.1-debian-stackdriver-1.3"
namespace = var.namespace
target = file("${path.module}/target.conf")
annotations = {
"cloud.google.com/service-account" : google_service_account.logs.email
}
# env = {
# AWS_REGION = data.aws_region.current.name
# }
}

View File

@ -0,0 +1,8 @@
<label @target>
<match **>
@type google_cloud
flush_interval 2
buffer_chunk_limit 1m
disable_retry_limit true
</match>
</label>

View File

@ -0,0 +1,11 @@
variable "cluster" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "name" {
type = "string"
}

View File

@ -0,0 +1,84 @@
<source>
@type tail
@label @container
path /var/log/containers/*.log
exclude_path ["/var/log/containers/cloudwatch-agent*", "/var/log/containers/fluentd*"]
pos_file /var/log/fluentd-containers.log.pos
tag container.*
read_from_head true
<parse>
@type json
time_format %Y-%m-%dT%H:%M:%S.%NZ
</parse>
</source>
<label @container>
<filter container.**>
@type kubernetes_metadata
log_level warn
</filter>
<match container.**>
@type rewrite_tag_filter
<rule>
key $.kubernetes.labels.system
pattern ^convox$
tag convox
</rule>
</match>
<match convox>
@type relabel
@label @convox
</match>
</label>
<label @convox>
<match convox>
@type rewrite_tag_filter
<rule>
key $.kubernetes.labels.service
pattern ^(.+)$
tag service.$1
</rule>
</match>
<match service.**>
@type rewrite_tag_filter
<rule>
key $.kubernetes.namespace_labels.app
pattern ^(.+)$
tag app.$1.${tag}
</rule>
</match>
<match app.**>
@type rewrite_tag_filter
<rule>
key $.kubernetes.namespace_labels.rack
pattern ^(.+)$
tag rack.$1.${tag}
</rule>
</match>
<match rack.**>
@type relabel
@label @rack
</match>
</label>
<label @rack>
# <filter **>
# @type concat
# key log
# multiline_start_regexp /^\S/
# separator ""
# flush_interval 5
# timeout_label @target
# </filter>
<match **>
@type relabel
@label @target
</match>
</label>

View File

@ -0,0 +1,6 @@
@include containers.conf
@include target.conf
<match fluent.**>
@type null
</match>

201
terraform/logs/k8s/main.tf Normal file
View File

@ -0,0 +1,201 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.8"
}
resource "kubernetes_cluster_role" "fluentd" {
metadata {
name = "${var.cluster}-fluentd"
}
rule {
api_groups = [""]
resources = ["namespaces", "pods", "pods/logs"]
verbs = ["get", "list", "watch"]
}
}
resource "kubernetes_cluster_role_binding" "fluentd" {
metadata {
name = "${var.cluster}-fluentd"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = kubernetes_cluster_role.fluentd.metadata.0.name
}
subject {
kind = "ServiceAccount"
name = kubernetes_service_account.fluentd.metadata.0.name
namespace = kubernetes_service_account.fluentd.metadata.0.namespace
}
}
resource "kubernetes_service_account" "fluentd" {
metadata {
namespace = var.namespace
name = "fluentd"
}
}
resource "kubernetes_config_map" "fluentd" {
metadata {
namespace = var.namespace
name = "fluentd"
}
data = {
"fluent.conf" = file("${path.module}/fluent.conf")
"containers.conf" = file("${path.module}/containers.conf")
"target.conf" = var.target
}
}
resource "kubernetes_daemonset" "fluentd" {
metadata {
namespace = var.namespace
name = "fluentd"
}
spec {
selector {
match_labels = {
service = "fluentd"
}
}
template {
metadata {
labels = {
service = "fluentd"
}
annotations = var.annotations
}
spec {
service_account_name = "fluentd"
automount_service_account_token = true
init_container {
name = "config"
image = "busybox"
command = ["sh", "-c", "cp /config/..data/* /fluentd/etc"]
volume_mount {
name = "config"
mount_path = "/config"
}
volume_mount {
name = "fluentd-etc"
mount_path = "/fluentd/etc"
}
}
container {
name = "main"
image = var.image
env {
name = "CLUSTER_NAME"
value = var.cluster
}
dynamic "env" {
for_each = var.env
content {
name = env.key
value = env.value
}
}
resources {
limits {
memory = "200Mi"
}
requests {
cpu = "100m"
memory = "200Mi"
}
}
volume_mount {
name = "fluentd-etc"
mount_path = "/fluentd/etc"
}
volume_mount {
name = "var-log"
mount_path = "/var/log"
}
volume_mount {
name = "var-lib-docker-containers"
mount_path = "/var/lib/docker/containers"
read_only = true
}
volume_mount {
name = "run-log-journal"
mount_path = "/run/log/journal"
read_only = true
}
volume_mount {
name = "var-log-dmesg"
mount_path = "/var/log/dmesg"
read_only = true
}
}
volume {
name = "config"
config_map {
name = "fluentd"
}
}
volume {
name = "fluentd-etc"
empty_dir {}
}
volume {
name = "var-log"
host_path {
path = "/var/log"
}
}
volume {
name = "var-lib-docker-containers"
host_path {
path = "/var/lib/docker/containers"
}
}
volume {
name = "run-log-journal"
host_path {
path = "/run/log/journal"
}
}
volume {
name = "var-log-dmesg"
host_path {
path = "/var/log/dmesg"
}
}
}
}
}
}

View File

@ -0,0 +1,23 @@
variable "annotations" {
default = {}
}
variable "env" {
default = {}
}
variable "cluster" {
type = "string"
}
variable "image" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "target" {
type = "string"
}

View File

@ -0,0 +1,65 @@
terraform {
required_version = ">= 0.12.0"
}
provider "aws" {
version = "~> 2.22"
}
provider "external" {
version = "~> 1.2"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = var.kubeconfig
name = var.name
release = var.release
}
module "api" {
source = "../../api/aws"
providers = {
aws = aws
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = var.kubeconfig
name = var.name
namespace = module.k8s.namespace
nodes_role = var.nodes_role
release = var.release
router = module.router.endpoint
}
module "router" {
source = "../../router/aws"
providers = {
aws = aws
kubernetes = kubernetes
}
name = var.name
namespace = module.k8s.namespace
nodes_role = var.nodes_role
nodes_security = var.nodes_security
release = var.release
subnets = var.subnets_public
target_group_http = var.target_group_http
target_group_https = var.target_group_https
}

View File

@ -0,0 +1,7 @@
output "api" {
value = module.api.endpoint
}
output "router" {
value = module.router.endpoint
}

View File

@ -0,0 +1,40 @@
variable "domain" {
type = "string"
}
variable "kubeconfig" {
type = "string"
}
variable "name" {
type = "string"
}
variable "nodes_role" {
type = "string"
}
variable "nodes_security" {
type = "string"
}
variable "release" {
type = "string"
}
variable "subnets_private" {
type = "list"
}
variable "subnets_public" {
type = "list"
}
variable "target_group_http" {
type = "string"
}
variable "target_group_https" {
type = "string"
}

View File

@ -0,0 +1,60 @@
terraform {
required_version = ">= 0.12.0"
}
provider "google" {
version = "~> 2.12"
credentials = pathexpand(var.credentials)
project = var.project
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = var.kubeconfig
name = var.name
release = var.release
}
module "api" {
source = "../../api/gcp"
providers = {
google = google
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = var.kubeconfig
name = var.name
namespace = module.k8s.namespace
nodes_account = var.nodes_account
release = var.release
router = module.router.endpoint
}
module "router" {
source = "../../router/gcp"
providers = {
google = google
kubernetes = kubernetes
}
name = var.name
namespace = module.k8s.namespace
release = var.release
}

View File

@ -0,0 +1,7 @@
output "api" {
value = module.api.endpoint
}
output "endpoint" {
value = module.router.endpoint
}

View File

@ -0,0 +1,31 @@
variable "credentials" {
default = "~/.config/gcloud/terraform.json"
}
variable "domain" {
type = "string"
}
variable "kubeconfig" {
type = "string"
}
variable "name" {
type = "string"
}
variable "node_type" {
default = "n1-standard-1"
}
variable "nodes_account" {
type = "string"
}
variable "region" {
default = "us-east1"
}
variable "release" {
type = "string"
}

View File

@ -0,0 +1,44 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
resource "kubernetes_namespace" "system" {
metadata {
labels = {
rack = var.name
system = "convox"
app = "system"
}
name = "${var.name}-system"
}
}
resource "kubernetes_config_map" "rack" {
metadata {
namespace = kubernetes_namespace.system.metadata.0.name
name = "rack"
}
data = {
DOMAIN = var.domain
}
}
module "atom" {
source = "../../atom/k8s"
providers = {
kubernetes = kubernetes
}
kubeconfig = var.kubeconfig
namespace = kubernetes_namespace.system.metadata.0.name
release = var.release
}

View File

@ -0,0 +1,4 @@
output "namespace" {
depends_on = [kubernetes_namespace.system]
value = kubernetes_namespace.system.metadata[0].name
}

View File

@ -0,0 +1,15 @@
variable "domain" {
type = "string"
}
variable "kubeconfig" {
type = "string"
}
variable "name" {
type = "string"
}
variable "release" {
type = "string"
}

View File

@ -0,0 +1,27 @@
resource "aws_alb" "router" {
name = "${var.name}-router"
load_balancer_type = "network"
subnets = var.subnets
}
resource "aws_alb_listener" "http" {
load_balancer_arn = aws_alb.router.arn
port = 80
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = var.target_group_http
}
}
resource "aws_alb_listener" "https" {
load_balancer_arn = aws_alb.router.arn
port = 443
protocol = "TCP"
default_action {
type = "forward"
target_group_arn = var.target_group_https
}
}

View File

@ -0,0 +1,38 @@
resource "aws_dynamodb_table" "cache" {
name = "${var.name}-cache"
billing_mode = "PAY_PER_REQUEST"
hash_key = "key"
attribute {
name = "key"
type = "S"
}
tags = local.tags
}
resource "aws_dynamodb_table" "hosts" {
name = "${var.name}-hosts"
billing_mode = "PAY_PER_REQUEST"
hash_key = "host"
attribute {
name = "host"
type = "S"
}
tags = local.tags
}
resource "aws_dynamodb_table" "targets" {
name = "${var.name}-targets"
billing_mode = "PAY_PER_REQUEST"
hash_key = "target"
attribute {
name = "target"
type = "S"
}
tags = local.tags
}

View File

@ -0,0 +1,49 @@
data "aws_iam_policy_document" "nodes-assume" {
statement {
actions = ["sts:AssumeRole"]
principals {
type = "AWS"
identifiers = [var.nodes_role]
}
}
}
resource "aws_iam_role" "router" {
name = "${var.name}-router"
assume_role_policy = data.aws_iam_policy_document.nodes-assume.json
path = "/convox/"
tags = local.tags
}
data "aws_iam_policy_document" "router" {
statement {
resources = [aws_dynamodb_table.cache.arn]
actions = [
"dynamodb:DeleteItem",
"dynamodb:GetItem",
"dynamodb:PutItem",
]
}
statement {
resources = [aws_dynamodb_table.hosts.arn]
actions = [
"dynamodb:GetItem",
"dynamodb:UpdateItem",
]
}
statement {
resources = [aws_dynamodb_table.targets.arn]
actions = [
"dynamodb:GetItem",
"dynamodb:UpdateItem",
]
}
}
resource "aws_iam_role_policy" "router" {
name = "${var.name}-router"
role = aws_iam_role.router.id
policy = data.aws_iam_policy_document.router.json
}

View File

@ -0,0 +1,78 @@
terraform {
required_version = ">= 0.12.0"
}
provider "aws" {
version = "~> 2.22"
}
provider "kubernetes" {
version = "~> 1.8"
}
locals {
tags = {
System = "convox"
Rack = var.name
}
}
data "aws_region" "current" {
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
namespace = var.namespace
release = var.release
annotations = {
"iam.amazonaws.com/role" : aws_iam_role.router.arn,
}
env = {
AWS_REGION = data.aws_region.current.name
CACHE = "dynamodb"
STORAGE = "dynamodb"
ROUTER_CACHE = aws_dynamodb_table.cache.name
ROUTER_HOSTS = aws_dynamodb_table.hosts.name
ROUTER_TARGETS = aws_dynamodb_table.targets.name
}
}
resource "kubernetes_service" "router" {
metadata {
namespace = var.namespace
name = "router"
}
spec {
external_traffic_policy = "Local"
type = "NodePort"
port {
name = "http"
node_port = 32000
port = 80
protocol = "TCP"
target_port = 80
}
port {
name = "https"
node_port = 32001
port = 443
protocol = "TCP"
target_port = 443
}
selector = {
system = "convox"
service = "router"
}
}
}

View File

@ -0,0 +1,7 @@
# output "endpoint" {
# value = module.router.endpoint
# }
output "endpoint" {
value = aws_alb.router.dns_name
}

View File

@ -0,0 +1,10 @@
resource "aws_security_group_rule" "nodes_ingress_router" {
type = "ingress"
description = "router ingress"
security_group_id = var.nodes_security
cidr_blocks = ["0.0.0.0/0"]
protocol = "tcp"
from_port = 32000
to_port = 32001
}

View File

@ -0,0 +1,31 @@
variable "name" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "nodes_role" {
type = "string"
}
variable "nodes_security" {
type = "string"
}
variable "release" {
type = "string"
}
variable "subnets" {
type = "list"
}
variable "target_group_http" {
type = "string"
}
variable "target_group_https" {
type = "string"
}

View File

@ -0,0 +1,60 @@
terraform {
required_version = ">= 0.12.0"
}
provider "google" {
version = "~> 2.12"
}
locals {
tags = {
System = "convox"
Rack = var.name
}
}
module "k8s" {
source = "../k8s"
providers = {
kubernetes = kubernetes
}
namespace = var.namespace
release = var.release
env = {
CACHE = "redis"
REDIS_ADDR = "${google_redis_instance.cache.host}:${google_redis_instance.cache.port}"
}
}
resource "kubernetes_service" "router" {
metadata {
namespace = var.namespace
name = "router"
}
spec {
type = "LoadBalancer"
port {
name = "http"
port = 80
protocol = "TCP"
target_port = 80
}
port {
name = "https"
port = 443
protocol = "TCP"
target_port = 443
}
selector = {
system = "convox"
service = "router"
}
}
}

View File

@ -0,0 +1,4 @@
output "endpoint" {
value = kubernetes_service.router.load_balancer_ingress.0.ip
}

View File

@ -0,0 +1,4 @@
resource "google_redis_instance" "cache" {
name = "${var.name}-router"
memory_size_gb = 1
}

View File

@ -0,0 +1,11 @@
variable "name" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "release" {
type = "string"
}

View File

@ -0,0 +1,227 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.8"
}
resource "kubernetes_cluster_role" "router" {
metadata {
name = "router"
}
rule {
api_groups = [""]
resources = ["services"]
verbs = ["get", "list", "watch"]
}
rule {
api_groups = ["extensions"]
resources = ["ingresses"]
verbs = ["get", "list", "watch", ]
}
rule {
api_groups = ["extensions"]
resources = ["ingresses/status"]
verbs = ["update"]
}
rule {
api_groups = [""]
resources = ["configmaps"]
verbs = ["create", "get", "update"]
}
rule {
api_groups = [""]
resources = ["events"]
verbs = ["create"]
}
}
resource "kubernetes_cluster_role_binding" "router" {
depends_on = [kubernetes_cluster_role.router, kubernetes_service_account.router]
metadata {
name = "router"
}
role_ref {
api_group = "rbac.authorization.k8s.io"
kind = "ClusterRole"
name = "router"
}
subject {
kind = "ServiceAccount"
name = "router"
namespace = var.namespace
}
}
resource "kubernetes_service_account" "router" {
metadata {
namespace = var.namespace
name = "router"
}
}
resource "kubernetes_deployment" "router" {
depends_on = [kubernetes_cluster_role_binding.router]
metadata {
namespace = var.namespace
name = "router"
}
spec {
min_ready_seconds = 1
revision_history_limit = 1
selector {
match_labels = {
system = "convox"
service = "router"
}
}
strategy {
type = "RollingUpdate"
rolling_update {
max_surge = "100%"
max_unavailable = "0"
}
}
template {
metadata {
annotations = var.annotations
labels = {
system = "convox"
service = "router"
}
}
spec {
automount_service_account_token = true
service_account_name = "router"
affinity {
pod_anti_affinity {
preferred_during_scheduling_ignored_during_execution {
weight = 100
pod_affinity_term {
label_selector {
match_labels = {
system = "convox"
service = "router"
}
}
topology_key = "kubernetes.io/hostname"
}
}
}
}
container {
name = "main"
args = ["router"]
image = "convox/convox:${var.release}"
image_pull_policy = "Always"
env {
name = "NAMESPACE"
value_from {
field_ref {
field_path = "metadata.namespace"
}
}
}
env {
name = "POD_IP"
value_from {
field_ref {
field_path = "status.podIP"
}
}
}
env {
name = "SERVICE_HOST"
value = "router.${var.namespace}.svc.cluster.local"
}
env {
name = "AUTOCERT"
value = "true"
}
dynamic "env" {
for_each = var.env
content {
name = env.key
value = env.value
}
}
port {
container_port = "80"
protocol = "TCP"
}
port {
container_port = "443"
protocol = "TCP"
}
port {
container_port = "5453"
protocol = "UDP"
}
resources {
requests {
cpu = "256m"
memory = "64Mi"
}
}
}
dns_config {
option {
name = "ndots"
value = "1"
}
}
}
}
}
}
resource "kubernetes_horizontal_pod_autoscaler" "router" {
metadata {
namespace = var.namespace
name = "router"
}
spec {
min_replicas = 1
max_replicas = 1
target_cpu_utilization_percentage = 100
scale_target_ref {
api_version = "apps/v1"
kind = "Deployment"
name = "router"
}
}
lifecycle {
ignore_changes = [spec[0].min_replicas, spec[0].max_replicas]
}
}

View File

@ -0,0 +1,17 @@
variable "annotations" {
type = "map"
default = {}
}
variable "env" {
type = "map"
default = {}
}
variable "namespace" {
type = "string"
}
variable "release" {
type = "string"
}

View File

@ -0,0 +1,55 @@
terraform {
required_version = ">= 0.12.0"
}
provider "aws" {
version = "~> 2.22"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = module.cluster.kubeconfig
}
data "http" "releases" {
url = "https://api.github.com/repos/convox/convox/releases"
}
locals {
current = jsondecode(data.http.releases.body).0.tag_name
release = coalesce(var.release, local.current)
}
module "cluster" {
source = "../../cluster/aws"
providers = {
aws = aws
}
cidr = var.cidr
name = var.name
node_type = var.node_type
ssh_key = var.ssh_key
}
module "rack" {
source = "../../rack/aws"
providers = {
aws = aws
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = module.cluster.kubeconfig
name = var.name
nodes_role = module.cluster.nodes_role
nodes_security = module.cluster.nodes_security
release = local.release
subnets_private = module.cluster.subnets_private
subnets_public = module.cluster.subnets_public
target_group_http = module.cluster.target_group_http
target_group_https = module.cluster.target_group_https
}

View File

@ -0,0 +1,7 @@
output "api" {
value = module.rack.api
}
output "router" {
value = module.rack.router
}

View File

@ -0,0 +1,27 @@
variable "cidr" {
default = "10.1.0.0/16"
}
variable "domain" {
type = "string"
}
variable "name" {
type = "string"
}
variable "node_type" {
default = "t3.small"
}
variable "release" {
default = ""
}
variable "region" {
default = "us-east-1"
}
variable "ssh_key" {
default = ""
}

View File

@ -0,0 +1,48 @@
terraform {
required_version = ">= 0.12.0"
}
provider "google" {
version = "~> 2.12"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = module.cluster.kubeconfig
}
data "http" "releases" {
url = "https://api.github.com/repos/convox/convox/releases"
}
locals {
current = jsondecode(data.http.releases.body).0.tag_name
release = coalesce(var.release, local.current)
}
module "cluster" {
source = "../../cluster/gcp"
providers = {
google = google
}
name = var.name
node_type = var.node_type
}
module "rack" {
source = "../../rack/gcp"
providers = {
google = google
kubernetes = kubernetes
}
domain = var.domain
kubeconfig = module.cluster.kubeconfig
name = var.name
nodes_account = module.cluster.nodes_account
release = local.release
}

View File

@ -0,0 +1,7 @@
output "api" {
value = module.rack.api
}
output "endpoint" {
value = module.rack.endpoint
}

View File

@ -0,0 +1,15 @@
variable "domain" {
type = "string"
}
variable "name" {
type = "string"
}
variable "node_type" {
default = "n1-standard-1"
}
variable "release" {
default = ""
}

View File

@ -0,0 +1,32 @@
terraform {
required_version = ">= 0.12.0"
}
provider "kubernetes" {
version = "~> 1.8"
config_path = var.kubeconfig
}
module "atom" {
source = "../atom/k8s"
providers = {
kubernetes = kubernetes
}
kubeconfig = var.kubeconfig
namespace = var.namespace
}
module "router" {
source = "../router/k8s"
providers = {
kubernetes = kubernetes
}
annotations = var.router_annotations
env = var.router_env
namespace = var.namespace
}

View File

@ -0,0 +1,17 @@
variable "kubeconfig" {
type = "string"
}
variable "namespace" {
type = "string"
}
variable "router_annotations" {
type = "map"
default = {}
}
variable "router_env" {
type = "map"
default = {}
}