Compare commits

..

5 Commits

Author SHA1 Message Date
510baa7f94 Basic setup now passing initial checks
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 15:14:22 -08:00
088846cad9 Ensure that static hosts have docker and the latest python versions installed
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 12:34:41 -08:00
1be3a8e588 Quick fix for ansible-lint things
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
2025-03-04 11:46:17 -08:00
da580eb7d2 REmoving bogus wiki stuff
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 11:44:09 -08:00
a8d7c01efe Slowing building out the new workflows
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
2025-03-04 11:20:00 -08:00
100 changed files with 963 additions and 618 deletions

View File

@ -10,6 +10,6 @@ jobs:
steps: steps:
- name: Checkout repo content - name: Checkout repo content
uses: actions/checkout@v4 uses: actions/checkout@v4
- run: ansible-lint -c linter.yaml - run: ansible-lint
working-directory: ansible/ working-directory: ansible/

1
.gitignore vendored
View File

@ -21,4 +21,3 @@ docker/beta/shockrah.xyz/
docker/beta/resume.shockrah.xyz/ docker/beta/resume.shockrah.xyz/
k8s/config.yaml k8s/config.yaml
infra/**/tfplan infra/**/tfplan
.ansible/

View File

@ -1,3 +0,0 @@
nigel:
hosts:
nigel.local:

View File

@ -1,4 +0,0 @@
---
skip_list:
- role-name
- var-naming[no-role-prefix]

View File

@ -1,28 +0,0 @@
# This playbook is meant to be a oneshot to be ran manually on the dev box
# The rest of the role stuff is meant to be ran as the admin user that
# this playbook creates for us
---
- name: Setup local admin user with a fresh ubuntu host
hosts: nigel.local
remote_user: nigel
vars:
admin:
username: nigel
tasks:
- name: Copy the nigel admin key
ansible.posix.authorized_key:
user: "{{ admin.username }}"
state: present
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
- name: Prevent password based logins
become: true
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
- name: Restart SSH Daemon
become: true
ansible.builtin.service:
name: ssh
state: restarted

View File

@ -1,9 +0,0 @@
---
- name: Setup all the responsibilities of the nomad server
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the nomad role
ansible.builtin.include_role:
name: nomad

View File

@ -1,14 +0,0 @@
---
- name: Setup bare metal requirements
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the base role to the nuc
ansible.builtin.include_role:
name: base
- name: Apply the k3s base role
ansible.builtin.include_role:
name: k3s
- name: Apply the proxy role
ansible.builtin.include_role:
name: proxy

View File

@ -1,8 +0,0 @@
---
- name: Setup host as a reverse proxy
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply reverse proxy role
ansible.builtin.include_role:
name: proxy

View File

@ -1 +0,0 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable

View File

@ -1,41 +0,0 @@
- name: Ensure we have basic updated packages setting up docker
ansible.builtin.apt:
name: "{{ item }}"
update_cache: true
loop:
- ca-certificates
- curl
- name: Running install on the keyrings directory
ansible.builtin.command:
cmd: install -m 0755 -d /etc/apt/keyrings
register: install
changed_when: install.rc == 0
- name: Fetch Docker GPG Key
vars:
keylink: https://download.docker.com/linux/ubuntu/gpg
ansible.builtin.get_url:
url: "{{ keylink }}"
dest: /etc/apt/keyrings/docker.asc
mode: "0644"
- name: Add repo to apt sources
ansible.builtin.copy:
src: docker.list
dest: /etc/apt/sources.list.d/docker.list
mode: "0644"
- name: Update Apt cache with latest docker.list packages
ansible.builtin.apt:
update_cache: true
- name: Ensure all docker packages are updated to the latest versions
ansible.builtin.apt:
name: "{{ item }}"
loop:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- name: Verify that the docker components are installed properly
ansible.builtin.command:
cmd: docker run hello-world
register: docker
changed_when: docker.rc == 0

View File

@ -1,8 +0,0 @@
- name: Download the setup script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s.sh
mode: "0644"
- name: Run installation script
ansible.builtin.command:
cmd: bash /tmp/k3s.sh

View File

@ -1,25 +0,0 @@
- name: Ensure nigel can use sudo without password
become: true
tags:
- setup
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "nigel ALL=(ALL) NOPASSWD:ALL"
- name: Ensure docker components are installed
tags:
- setup
ansible.builtin.include_tasks:
file: ensure-docker-basic.yaml
apply:
become: true
tags:
- setup
- name: Run through nomad removal steps
tags: nomad
ansible.builtin.include_tasks:
file: nomad.yaml
apply:
become: true
tags:
- nomad

View File

@ -1,12 +0,0 @@
bind_addr = "{{ ip }}"
advertise_addr = "{{ ip }}"
bootstrap = true
bootstrap_expect = 1
client_addr = "{{ ip }}"
server = true
data_dir = "/opt/consul"
ui_config {
enabled = true
}

View File

@ -1 +0,0 @@
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main

View File

@ -1,11 +0,0 @@
- name: Download the installation script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp
register: install_script
- name: Run installation script
become: true
environment:
INSTALL_K3S_EXEC: server
ansible.builtin.command:
cmd: sh {{ install_script.dest }}

View File

@ -1,24 +0,0 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
server {
enabled = true
bootstrap_expect = 1
}
client {
enabled = true
servers = ["127.0.0.1"]
}
host_volume "registry" {
path = "/opt/volumes/registry"
read_only = false
}
host_volume "nfs" {
path = "/opt/volumes/nfs"
read_only = false
}

View File

@ -1,18 +0,0 @@
- name: Nomad server configuration
become: true
block:
- name: Ensure the root data directory is present
ansible.builtin.file:
path: "{{ nomad.volumes.root }}"
state: absent
mode: "0755"
- name: Ensure registry volume is present
ansible.builtin.file:
path: "{{ nomad.volumes.registry }}"
state: absent
mode: "0755"
- name: Ensure the MinIO diretory is present
ansible.builtin.file:
path: "{{ nomad.volumes.nfs }}"
state: absent
mode: "0755"

View File

@ -1,5 +0,0 @@
nomad:
volumes:
root: /opt/volumes
registry: /opt/volumes/ncr
nfs: /opt/volumes/nfs

View File

@ -1,15 +0,0 @@
127.0.0.1 localhost
127.0.1.1 nigel
# Our own dns stuff
127.0.1.1 nigel.local
127.0.1.1 nomad.nigel.local
127.0.1.1 sanity.nigel.local
127.0.1.1 ncr.nigel.local
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

View File

@ -1,6 +0,0 @@
server {
server_name ncr.nigel.local;
location / {
proxy_pass http://localhost:5000;
}
}

View File

@ -1,25 +0,0 @@
server {
server_name nomad.nigel.local;
location / {
proxy_pass http://nomad-ws;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 319s;
# This is for log streaming requests
proxy_buffering off;
# Upgrade and Connection headers for upgrading to websockets
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "${scheme}://${proxy_host}";
}
}
upstream nomad-ws {
ip_hash;
server nomad.nigel.local:4646;
}

View File

@ -1,28 +0,0 @@
- name: Reverse proxy role configuration
become: true
block:
- name: Ensure /etc/hosts are up to date
ansible.builtin.copy:
dest: /etc/hosts
src: host-file
mode: "0644"
- name: Ensure nginx is setup as latest
ansible.builtin.apt:
name: nginx
- name: Copy the nomad.conf to available configurations
ansible.builtin.copy:
src: "{{ item }}"
dest: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Link the nomad.conf to sites-enabled
ansible.builtin.file:
path: "/etc/nginx/sites-enabled/{{ item }}"
state: link
src: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Restart nginx
ansible.builtin.systemd_service:
name: nginx
state: restarted

View File

@ -1,3 +0,0 @@
proxy_nginx_configs:
- nomad.conf
- ncr.conf

View File

@ -0,0 +1,23 @@
#!/bin/bash
set -e
bucket="$1"
s3env=/opt/nginx/s3.env
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
source $s3env
pull() {
aws s3 sync s3://$bucket /opt/nginx/$bucket
}
case $bucket in
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
*) echo "Invalid bucket name" && exit 1 ;;
esac

View File

@ -0,0 +1,40 @@
networks:
gitea:
external: false
services:
gitea:
image: gitea/gitea:latest-rootless
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "2222:22"
gitea-runner:
image: gitea/act_runner:nightly
container_name: gitea-runner
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea_runner/
- /var/run/docker.sock:/var/run/docker.sock
environment:
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
- GITEA_RUNNER_NAME=gitea-main
- GITEA_RUNNER_LABELS=gitea-main
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}

View File

@ -0,0 +1,29 @@
What is this
============
Here we contain scripts to build out all the containers that are run.
All of these images are based on images that are made from other projects
docker-compose.yaml
===================
Services that are more/less "special" go here since most of the stuff that is
run on the main host are basically just static html websites
Services & Containers
=====================
| Service | Docker Image Used |
|------------|--------------------------|
| Gitea | gitea/gitea:latest |
| Act Runner | gitea/act_runner:nightly |
Why the servics above?
======================
The Gitea related services are there so that I can host my own Git projects
away from "Git as a service" services. I have no issue with Github/Gitlab
but I just like being able to host my own stuff when possible :smiley:

34
infra/dns/build.sh Normal file
View File

@ -0,0 +1,34 @@
#!/bin/bash
set -e
opt=$1
plan=tfplan
build_plan() {
echo Generating plan
set -x
terraform plan -var-file variables.tfvars -input=false -out $plan
}
deploy_plan() {
terraform apply $plan
}
init() {
terraform init
}
help_prompt() {
cat <<- EOF
Options: plan deploy help
EOF
}
# Default to building a plan
source ./secrets.sh
case $opt in
plan) build_plan;;
deploy) deploy_plan;;
*) help_prompt;;
esac

View File

@ -37,7 +37,6 @@ locals {
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] }, { name = "www.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] }, { name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] }, { name = "git.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "example.shockrah.xyz", records = [ var.vke_lb ] },
] ]
} }

View File

@ -26,7 +26,3 @@ variable "vultr_host" {
description = "IP of the temp Vultr host" description = "IP of the temp Vultr host"
} }
variable "vke_lb" {
type = string
description = "IP of our VKE load balancer"
}

View File

@ -1,2 +1 @@
vultr_host = "45.32.83.83" vultr_host = "45.32.83.83"
vke_lb = "140.82.21.106"

View File

@ -1 +0,0 @@
config.yaml

View File

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
name: nginx-port
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80
nodePort: 30808
targetPort: nginx-port

View File

@ -1,19 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: hello
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the sample cron-container
restartPolicy: OnFailure

View File

@ -0,0 +1,62 @@
resource kubernetes_namespace admin-servers {
count = length(var.admin_services.configs) > 0 ? 1 : 0
metadata {
name = var.admin_services.namespace
}
}
resource kubernetes_pod admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
spec {
node_selector = {
NodeType = var.admin_services.namespace
}
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
# TODO: don't make these NodePorts since we're gonna want them
# to be purely internal to the Cluster.
# WHY? Because we want to keep dashboards as unexposed as possible
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -9,20 +9,15 @@ terraform {
required_providers { required_providers {
aws = { aws = {
source = "hashicorp/aws" source = "hashicorp/aws"
version = "5.98.0" version = "~> 5.0"
} }
vultr = { vultr = {
source = "vultr/vultr" source = "vultr/vultr"
version = "2.26.0" version = "2.22.1"
} }
kubernetes = { kubernetes = {
source = "hashicorp/kubernetes" source = "hashicorp/kubernetes"
version = "2.37.1" version = "2.34.0"
}
tls = {
source = "hashicorp/tls"
version = "4.1.0"
} }
} }
} }

View File

@ -1,18 +1,30 @@
resource vultr_kubernetes athens { resource vultr_kubernetes athens {
region = var.cluster.region region = var.cluster.region
version = var.cluster.version version = var.cluster.version
label = var.cluster.label label = var.cluster.label
# vpc_id = vultr_vpc.athens.id # BUG: only have this set when creating the resource for the first time
# once the cluster is up, we should comment this out again
# enable_firewall = true
node_pools { node_pools {
node_quantity = var.cluster.pools["main"].min_nodes node_quantity = 1
plan = var.cluster.pools["main"].plan plan = var.cluster.pools["meta"].plan
label = var.cluster.pools["main"].label label = var.admin_services.namespace
min_nodes = var.cluster.pools["main"].min_nodes min_nodes = var.cluster.pools["meta"].min
max_nodes = var.cluster.pools["main"].max_nodes max_nodes = var.cluster.pools["meta"].max
# tag = var.admin_services.namespace
} }
} }
resource vultr_kubernetes_node_pools games {
cluster_id = vultr_kubernetes.athens.id
node_quantity = var.cluster.pools["games"].min
plan = var.cluster.pools["games"].plan
label = var.game_servers.namespace
min_nodes = var.cluster.pools["games"].min
max_nodes = var.cluster.pools["games"].max
tag = var.admin_services.namespace
}
output k8s_config { output k8s_config {
value = vultr_kubernetes.athens.kube_config value = vultr_kubernetes.athens.kube_config
sensitive = true sensitive = true

4
infra/vultr-kubernetes/dev/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# created by virtualenv automatically
bin/
lib/

View File

@ -0,0 +1,51 @@
from argparse import ArgumentParser
from argparse import Namespace
from kubernetes import client, config
import re
def get_args() -> Namespace:
parser = ArgumentParser(
prog="Cluster Search Thing",
description="General utility for finding resources for game server bot"
)
games = {"reflex", "minecraft"}
parser.add_argument('-g', '--game', required=False, choices=games)
admin = {"health"}
parser.add_argument('-a', '--admin', required=False, choices=admin)
return parser.parse_args()
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
config.load_kube_config("../config.yaml")
return client.CoreV1Api()
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
print('admin thing requested', args.admin)
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
node_name = pods.items[0].spec.node_name
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
port = services.items[0].spec.ports[0].port
# Collecting the IPV4 of the node that contains the pod(container)
# we actually care about. Since these pods only have 1 container
# Now we collect specific data about the game server we requested
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
if __name__ == '__main__':
args = get_args()
api = k8s_api('../config.yaml')
if args.game:
get_game_server_ip(args, api)
if args.admin:
get_admin_service_details(args, api)

View File

@ -0,0 +1,8 @@
home = /usr
implementation = CPython
version_info = 3.10.12.final.0
virtualenv = 20.13.0+ds
include-system-site-packages = false
base-prefix = /usr
base-exec-prefix = /usr
base-executable = /usr/bin/python3

View File

@ -0,0 +1,18 @@
cachetools==5.5.0
certifi==2024.8.30
charset-normalizer==3.4.0
durationpy==0.9
google-auth==2.36.0
idna==3.10
kubernetes==31.0.0
oauthlib==3.2.2
pyasn1==0.6.1
pyasn1_modules==0.4.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
six==1.17.0
urllib3==2.2.3
websocket-client==1.8.0

View File

@ -1,23 +1,32 @@
# resource vultr_firewall_rule web_inbound { resource vultr_firewall_rule web_inbound {
# for_each = toset([for port in [80, 443, 6443] : tostring(port) ]) for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
# firewall_group_id = vultr_kubernetes.athens.firewall_group_id firewall_group_id = vultr_kubernetes.athens.firewall_group_id
# protocol = "tcp"
# ip_type = "v4"
# subnet = "0.0.0.0"
# subnet_size = 0
# port = each.value
# }
resource vultr_firewall_group bastion {
description = "For connections into and out of the bastion host"
}
resource vultr_firewall_rule bastion_inbound {
firewall_group_id = vultr_firewall_group.bastion.id
protocol = "tcp" protocol = "tcp"
ip_type = "v4" ip_type = "v4"
subnet = "0.0.0.0" subnet = "0.0.0.0"
subnet_size = 0 subnet_size = 0
port = 22 port = each.value
}
resource vultr_firewall_rule game-server-inbound {
for_each = var.game_servers.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = each.value.port.expose
}
resource vultr_firewall_rule admin-service-inbound {
for_each = var.admin_services.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
notes = each.value.port.notes
port = each.value.port.expose
} }

View File

@ -0,0 +1,55 @@
resource kubernetes_namespace game-servers {
count = length(var.game_servers.configs) > 0 ? 1 : 0
metadata {
name = var.game_servers.namespace
}
}
resource kubernetes_pod game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -1,35 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-service
spec:
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whoami-ingress
annotations:
cert-manager.io/cluster-issuer: letencrypt-prod
spec:
ingressClassName: nginx
tls:
- secretName: whoami-tls
hosts:
- example.shockrah.xyz
rules:
- host: example.shockrah.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whoami-service
port:
number: 80

1
infra/vultr-kubernetes/k8s/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
terraform.yaml

View File

@ -0,0 +1,33 @@
terraform {
required_version = ">= 0.13"
backend s3 {
bucket = "project-athens"
key = "infra/vke/k8s/state/build.tfstate"
region = "us-west-1"
encrypt = true
}
required_providers {
# For interacting with S3
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.30.0"
}
}
}
provider aws {
access_key = var.aws_key
secret_key = var.aws_secret
region = var.aws_region
max_retries = 1
}
provider kubernetes {
config_path = "terraform.yaml"
}

View File

@ -0,0 +1,50 @@
resource kubernetes_ingress_v1 athens {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
rule {
host = "test.shockrah.xyz"
http {
path {
backend {
service {
name = var.shockrahxyz.name
port {
number = 80
}
}
}
path = "/"
}
}
}
}
}
resource kubernetes_service athens_lb {
metadata {
name = "athens-websites"
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
selector = {
app = kubernetes_ingress_v1.athens.metadata.0.labels.app
}
port {
port = 80
target_port = 80
}
type = "LoadBalancer"
external_ips = [ var.cluster.ip ]
}
}

View File

@ -0,0 +1,5 @@
resource kubernetes_namespace websites {
metadata {
name = "websites"
}
}

View File

@ -0,0 +1,62 @@
# First we setup the ingress controller with helm
```sh
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
# Now we can install this to our cluster
helm install --kubeconfig config.yaml traefik traefik/traefik
```
# Prove the service is present with
```sh
kubectl --kubeconfig config.yaml get svc
```
# Create the pods
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-dep.yaml
```
# Expose on port 80
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-service.yaml
```
# Create ingress on k8s
```sh
kubectl --kubeconfig config.yaml -f k8s/traefik-ingress.yaml
```
# Take the external IP from the ingress
Put that into terraform's A record for the domain since this is a load balancer
in Vultr ( actual resource apparantly )
# Configure cert-manager for traefik ingress
Using the latest version from here:
https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.crds.yaml
```sh
kubectl --kubeconfig config.yaml \
apply --validate=false \
-f https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.yaml
```
# Create the cert issuer and certificate
```sh
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
```
Because we just have 1 cert for now we are looking for it's status to be `READY`

View File

@ -0,0 +1,21 @@
Plain nginx for now so that we can test out reverse dns
resource kubernetes_pod shockrah {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = var.shockrahxyz.name
}
}
spec {
container {
image = "nginx"
name = "${var.shockrahxyz.name}"
port {
container_port = 80
}
}
}
}

View File

@ -0,0 +1,35 @@
# API Keys required to reach AWS/Vultr
variable vultr_api_key {
type = string
sensitive = true
}
variable aws_key {
type = string
sensitive = true
}
variable aws_secret {
type = string
sensitive = true
}
variable aws_region {
type = string
sensitive = true
}
variable shockrahxyz {
type = object({
name = string
port = number
dns = string
})
}
variable cluster {
type = object({
ip = string
})
}

View File

@ -0,0 +1,37 @@
# Here we are going to define the deployment and service
# Basically all things directly related to the actual service we want to provide
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: alternate-nginx-web
namespace: default
labels:
app: alternate-nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: alternate-nginx-web
template:
metadata:
labels:
app: alternate-nginx-web
spec:
# Container comes from an example thing i randomly found on docker hub
containers:
- name: alternate-nginx-web
image: dockerbogo/docker-nginx-hello-world
---
apiVersion: v1
kind: Service
metadata:
name: alternate-nginx-web
namespace: default
spec:
selector:
app: alternate-nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@ -0,0 +1,30 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod-hello
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-prod-hello
solvers:
- http01:
ingress:
class: traefik

View File

@ -0,0 +1,13 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@ -1,14 +1,15 @@
apiVersion: cert-manager.io/v1 apiVersion: cert-manager.io/v1
kind: Issuer kind: ClusterIssuer
metadata: metadata:
name: letsencrypt-nginx name: letsencrypt-prod
namespace: default
spec: spec:
acme: acme:
email: dev@shockrah.xyz
server: https://acme-v02.api.letsencrypt.org/directory server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef: privateKeySecretRef:
name: example name: letsencrypt-prod
solvers: solvers:
- http01: - http01:
ingress: ingress:
class: nginx class: traefik

View File

@ -0,0 +1,20 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx-web
namespace: default
labels:
app: nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: nginx-web
template:
metadata:
labels:
app: nginx-web
spec:
containers:
- name: nginx
image: nginx

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-web
namespace: default
spec:
selector:
app: nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@ -0,0 +1,44 @@
# This is the first thing we need to create, an issue to put certs into
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-temprah-lab
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
namespace: default
labels:
name: project-athens-lb
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: sample.temprah-lab.xyz
http:
paths:
- backend:
service:
name: nginx-web
port:
number: 80
path: /
pathType: Prefix
- host: hello.temprah-lab.xyz
http:
paths:
- backend:
service:
name: alternate-nginx-web
port:
number: 80
path: /
pathType: Prefix

View File

@ -1,6 +0,0 @@
apiVersion: v1
kind: Namespace
metadata:
name: testing
labels:
name: testing

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-lb
annotations:
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-interval: "30"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-response-timeout: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-unhealthy-threshold: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-healthy-threshold: "5"
spec:
type: LoadBalancer
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080

View File

@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: whoami
spec:
replicas: 3
selector:
matchLabels:
name: whoami
template:
metadata:
labels:
name: whoami
spec:
containers:
- name: whoami
image: quanhua92/whoami:latest
imagePullPolicy: Always
ports:
- containerPort: 8080

View File

@ -1,37 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
preferredChain: "ISRG Root X1"
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx

View File

@ -26,30 +26,46 @@ variable cluster {
label = string label = string
version = string version = string
pools = map(object({ pools = map(object({
node_quantity = number plan = string
plan = string autoscale = bool
label = string min = number
min_nodes = number max = number
max_nodes = number
tag = string
})) }))
}) })
} }
variable game_servers {
variable personal {
type = object({ type = object({
namespace = string namespace = string
configs = map(object({
name = optional(string)
image = string
cpu = string
mem = string
port = object({
internal = number
expose = number
})
proto = optional(string)
}))
}) })
} }
variable admin_services {
variable bastion {
type = object({ type = object({
plan = string namespace = string
os = string configs = map(object({
label = string name = string
image = string
cpu = string
mem = string
port = object({
notes = optional(string)
internal = number
expose = number
})
proto = optional(string)
}))
}) })
} }

View File

@ -1,27 +1,51 @@
cluster = { cluster = {
region = "lax" region = "lax"
label = "athens-cluster" label = "athens-cluster"
version = "v1.33.0+3" version = "v1.31.2+1"
pools = { pools = {
main = { meta = {
node_quantity = 1 plan = "vc2-1c-2gb"
plan = "vc2-1c-2gb" autoscale = true
label = "main" min = 1
min_nodes = 1 max = 2
max_nodes = 2 }
tag = "athens-main" games = {
plan = "vc2-1c-2gb"
autoscale = true
min = 1
max = 3
} }
} }
} }
personal = { game_servers = {
namespace = "athens-main" namespace = "games"
configs = {
# minecraft = {
# image = "itzg/minecraft-server"
# cpu = "1000m"
# mem = "2048Mi"
# port = {
# expose = 30808
# internal = 80
# }
# }
}
} }
bastion = { admin_services = {
plan = "vc2-1c-2gb" namespace = "admin-services"
label = "bastion" configs = {
os = "1743" # health = {
# image = "nginx:latest"
# name = "health"
# cpu = "200m"
# mem = "64Mi"
# port = {
# notes = "Basic nginx sanity check service"
# expose = 30800
# internal = 80
# }
# }
}
} }

3
playbooks/inventory.yaml Normal file
View File

@ -0,0 +1,3 @@
static-web:
hosts:
shockrah.xyz:

View File

@ -0,0 +1,9 @@
---
- name: Pre Pipeline Playbook for Static Hosts
hosts: static-web
remote_user: root
tasks:
- name: Import manual setup steps
ansible.builtin.import_role:
name: static
tasks_from: setup-webadmin.yaml

5
playbooks/readme.md Normal file
View File

@ -0,0 +1,5 @@
# What is this
Here be the ansible based workflows that we use to keep things like the static
hosts properly setup with all the resources they need to properly host the
services we intended on hosting.

View File

@ -0,0 +1,8 @@
---
- name: Static Host Maintenance and Setup
hosts: static-web
remote_user: webadmin
tasks:
- name: Import static host role
ansible.builtin.import_role:
name: static

View File

@ -0,0 +1,5 @@
- name: Restart SSH
become: true
ansible.builtin.systemd:
name: sshd
state: restarted

View File

View File

@ -0,0 +1,40 @@
# Things that we definitely want to have are the following
# docker docker-compose python(latest) certbot
- name: Uhhh yea
become: true
block:
- name: Install base dependencies
ansible.builtin.apt:
update_cache: true
pkg:
- ca-certificates
- curl
- lsb-release
- name: Setup keyring directory
ansible.builtin.command:
cmd: "install -m 0755 -d {{ static_keyring_dir }}"
creates: "{{ static_keyring_dir }}"
- name: Download the docker GPG key
ansible.builtin.get_url:
url: "{{ static_docker_ubuntu }}/gpg"
dest: "{{ static_keyring_dir }}/docker.asc"
mode: "0644"
- name: Ensure docker.lst is present
vars:
key_path: "{{ static_keyring_dir }}/docker.asc"
repo: "{{ static_docker_ubuntu }}"
os_codename: jammy
ansible.builtin.template:
src: docker.list
dest: "{{ static_apt_sources_dir }}/docker.list"
mode: "0644"
- name: Install docker and python packages
ansible.builtin.apt:
update_cache: true
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- python3

View File

@ -0,0 +1,43 @@
- name: Ensure sudo is available
ansible.builtin.apt:
state: present
update_cache: true
pkg:
- sudo
- zsh
- name: Create webadmin user
ansible.builtin.user:
name: webadmin
state: present
shell: /bin/zsh
groups:
- nginx
append: true
- name: Copy webadmin public key
ansible.posix.authorized_key:
user: webadmin
state: present
key: "{{ lookup('file', 'files/webadmin.pem.pub') }}"
- name: Add webadmin to sudoers
ansible.builtin.copy:
dest: "/etc/sudoers.d/webadmin"
content: "webadmin ALL=(ALL) NOPASSWD: ALL"
mode: "0644"
owner: root
group: root
- name: Disable Password Authentication
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
notify:
- Restart SSH
- name: Disable root login
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PermitRootLogin no
state: present
backup: true
notify:
- Restart SSH

View File

View File

@ -0,0 +1 @@
deb [arch=amd64 signed-by={{ key_path }}] {{ repo }} {{ os_codename }} stable

View File

View File

@ -0,0 +1,4 @@
static_keyring_dir: /etc/apt/keyrings
static_docker_ubuntu: https://download.docker.com/linux/ubuntu
static_apt_sources_dir: /etc/apt/sources.list.d
static_codename: jammy

View File

@ -1,45 +0,0 @@
# What this covers
The creation of Atlas as it happened in order
## Commands Ran
Once the infra was provisioned and verified to be configured by Terraform correctly
we move on to the following
```sh
# Setup the machine to run docker
ansible-playbook -i hosts.ini atlas/init/system-deps.yml
# Second we copy over the contents of Alpha's mounted docker volumes
ansible-playbook -i hosts.ini atlas/init/perma-mount-drives.yml
# Next we copy over the data that we want to migrate ( if any )
ansible-playbook -i hosts.ini -e filebrowser=/path -e clippable=/path atlas/init/migrate-clips-files.yml
# Setup the services on the host that we want to run
ansible-playbook -i hosts.ini atlas/init/setup-containers.yml
# Next we put up the reverse proxy (nginx)
ansible-playbook -i hosts.ini atlas/init/setup-reverse-proxy.yml
# Finally we add TLS on top of nginx and we're done
ansible-playbook -i hosts.ini atlas/init/setup-certbot.yml
```
Maintenance should be straight forward for this machine as TLS is automatically
renewed every 3 months by a cron job. We can manually update the certs however
if we really want to. They also don't require anymore manual variable injection
like Alpha did as the only thing protected was `dev@shockrah.xyz` which is at
this point becoming semi-public. This means while it is associated with code
it is more of a _business e-mail_ so it can be placed in this repository with
very little concern.
System updates are now also to be fetched with a:
```sh
ansible-playbook -i hosts.ini atlas/maintain/analyze-system-deps.yml
```
Which performs purely read operations and does not affect the state of the
machine.

View File

@ -1,33 +0,0 @@
# Mounting an attached drive
Assumptions:
* New drive is attached(in AWS) and detected in software
Ideally attachment is made through terraform
## Mounting Instructions (Step-by-Step)
1. Verify data does not have data: `sudo file -s /dev/xvdf`
Should return `data` if its ok. Other wise we're probably looking at the wrong
drive.
2. Create the filesystem on the new empty drive: `sudo mkfs -t ext4 /dev/xvdf`
3. Create mountpoint other wares to actaully use the drive
`sudo mkdir /mnt/example`.
Change _example_ to something that actually makes sense.
4. Add a new entry to /etc/fstab for automounting
`/dev/xvdf /newvolume ext4 defaults,nofail 0 0`
Tab delimited btw.
5. Mount all drives listed in `/etc/fstab` from before. `sudo mount -a`