Compare commits
33 Commits
c2099e2133
...
fix/ansibl
| Author | SHA1 | Date | |
|---|---|---|---|
| 510baa7f94 | |||
| 088846cad9 | |||
| 1be3a8e588 | |||
| da580eb7d2 | |||
| a8d7c01efe | |||
| f2c4506245 | |||
| ac11487feb | |||
| ee23406f49 | |||
| 6e4982fffd | |||
| f5f670e5f2 | |||
| 6d642a7359 | |||
| 7a41d033b5 | |||
| 280a1f7a87 | |||
| 90c61d7c00 | |||
| ad0f3e6089 | |||
| f9c73b1e4a | |||
| 5d03f6b218 | |||
| 7f2ee6d35b | |||
| a4a1d55a53 | |||
| bf812cce4c | |||
| abf3297498 | |||
| 52e8c56682 | |||
| c50deddf53 | |||
| 6ab49d1b28 | |||
| 68acbe2842 | |||
| a6dc2da7be | |||
| d483f5ed72 | |||
| e759802ce6 | |||
| f141a42689 | |||
| fba534b9df | |||
| 3779d53810 | |||
| 16c0e5ee98 | |||
| 8f18ff8c85 |
15
.gitea/workflows/ansible-lint.yaml
Normal file
15
.gitea/workflows/ansible-lint.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
name: Ansible Linting
|
||||||
|
on:
|
||||||
|
- push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
ansible-lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: shockrah/ansible
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo content
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- run: ansible-lint
|
||||||
|
working-directory: ansible/
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
name: Actions demo
|
|
||||||
run-name: ${{ gitea.actor }} is testing the actions
|
|
||||||
on:
|
|
||||||
- push
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
simple-echo:
|
|
||||||
runs-on: gitea-main
|
|
||||||
steps:
|
|
||||||
- run: echo "🎉 The job was automatically triggered by a ${{ gitea.event_name }} event."
|
|
||||||
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
|
|
||||||
- run: echo "🔎 The name of your branch is ${{ gitea.ref }} and your repository is ${{ gitea.repository }}."
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
|
|
||||||
- run: echo "🖥️ The workflow is now ready to test your code on the runner."
|
|
||||||
- name: List files in the repository
|
|
||||||
run: |
|
|
||||||
ls ${{ gitea.workspace }}
|
|
||||||
- run: echo "🍏 This job's status is ${{ job.status }}."
|
|
||||||
|
|
||||||
3
ansible/ansible.cfg
Normal file
3
ansible/ansible.cfg
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[defaults]
|
||||||
|
stdout_callback = yaml
|
||||||
|
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: webhost
|
|
||||||
remote_user: root
|
|
||||||
tasks:
|
|
||||||
- name: Copy pull script
|
|
||||||
copy:
|
|
||||||
src: ../scripts/pull-down-s3.sh
|
|
||||||
dest: /opt/nginx/pull-down-s3.sh
|
|
||||||
- name: Pull down all sites from S3
|
|
||||||
shell: bash /opt/nginx/pull-down-s3.sh {{ item }}
|
|
||||||
loop:
|
|
||||||
- shockrah.xyz
|
|
||||||
- resume.shockrah.xyz
|
|
||||||
- temper.tv
|
|
||||||
25
ansible/playbooks/setup-git-web-deployer.yml
Normal file
25
ansible/playbooks/setup-git-web-deployer.yml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: Setup all attributes of the html-deployer user for static website CI
|
||||||
|
hosts: webhost
|
||||||
|
vars:
|
||||||
|
username: html-deployer
|
||||||
|
remote_user: webadmin
|
||||||
|
tasks:
|
||||||
|
- name: Create user for git actions to deploy html
|
||||||
|
become: true
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ username }}"
|
||||||
|
comment: Used for deploying html from Gitea Actions
|
||||||
|
group: nginx
|
||||||
|
- name: Set the authorized keys
|
||||||
|
become: true
|
||||||
|
ansible.posix.authorized_key:
|
||||||
|
user: "{{ username }}"
|
||||||
|
state: present
|
||||||
|
key: "{{ lookup('file', '~/.ssh/vultr/html-deployer.pem.pub') }}"
|
||||||
|
- name: Ensure /opt/nginx website folders are owned by html-deployer
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/opt/nginx/{{ item }}"
|
||||||
|
recurse: true
|
||||||
|
owner: "{{ username }}"
|
||||||
|
group: "nginx"
|
||||||
20
ansible/playbooks/update.yml
Normal file
20
ansible/playbooks/update.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Purpose: General update to the system to keep packages up to date
|
||||||
|
---
|
||||||
|
- hosts: webhost
|
||||||
|
remote_user: webadmin
|
||||||
|
tasks:
|
||||||
|
- name: Informational Dump of what is upgradeable
|
||||||
|
ansible.builtin.command: apt list --upgradable
|
||||||
|
register: pkg
|
||||||
|
- name: Show list of packages to upgrade
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ pkg.stdout_lines }}"
|
||||||
|
- name: Update the packages at the system level to the latest versions
|
||||||
|
become: true
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: "*"
|
||||||
|
state: latest
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -5,7 +5,7 @@ networks:
|
|||||||
|
|
||||||
services:
|
services:
|
||||||
gitea:
|
gitea:
|
||||||
image: gitea/gitea:latest
|
image: gitea/gitea:latest-rootless
|
||||||
container_name: gitea
|
container_name: gitea
|
||||||
environment:
|
environment:
|
||||||
- USER_UID=1000
|
- USER_UID=1000
|
||||||
|
|||||||
@@ -1,24 +0,0 @@
|
|||||||
plan=out.plan
|
|
||||||
|
|
||||||
SHELL := /bin/bash
|
|
||||||
|
|
||||||
$(plan): *.tf
|
|
||||||
source ../secrets/set-env.sh && terraform plan -input=false -out $(plan)
|
|
||||||
|
|
||||||
push: build
|
|
||||||
source ../secrets/set-env.sh && terraform apply $(plan)
|
|
||||||
|
|
||||||
refresh:
|
|
||||||
source ../secrets/set-env.sh && terraform apply -refresh-only
|
|
||||||
|
|
||||||
test:
|
|
||||||
terraform validate
|
|
||||||
|
|
||||||
|
|
||||||
rip:
|
|
||||||
source ../secrets/set-env.sh && terraform destroy
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f $(plan)
|
|
||||||
|
|
||||||
.PHONY: test build clean push rip
|
|
||||||
34
infra/dns/build.sh
Normal file
34
infra/dns/build.sh
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
opt=$1
|
||||||
|
plan=tfplan
|
||||||
|
|
||||||
|
build_plan() {
|
||||||
|
echo Generating plan
|
||||||
|
set -x
|
||||||
|
terraform plan -var-file variables.tfvars -input=false -out $plan
|
||||||
|
}
|
||||||
|
|
||||||
|
deploy_plan() {
|
||||||
|
terraform apply $plan
|
||||||
|
}
|
||||||
|
|
||||||
|
init() {
|
||||||
|
terraform init
|
||||||
|
}
|
||||||
|
|
||||||
|
help_prompt() {
|
||||||
|
cat <<- EOF
|
||||||
|
Options: plan deploy help
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default to building a plan
|
||||||
|
source ./secrets.sh
|
||||||
|
case $opt in
|
||||||
|
plan) build_plan;;
|
||||||
|
deploy) deploy_plan;;
|
||||||
|
*) help_prompt;;
|
||||||
|
esac
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#############################
|
|
||||||
# project-athens.xyz DNS ZONE
|
|
||||||
#############################
|
|
||||||
|
|
||||||
# This entry is just for the sample service that is just plain nginx
|
|
||||||
# No TLS will be placed on this just yet as we need to make sure this
|
|
||||||
# and the load balancer are setup to receive things properly
|
|
||||||
resource "aws_route53_zone" "project-athens" {
|
|
||||||
name = "project-athens.xyz"
|
|
||||||
comment = "Project Athens domain zone"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
locals {
|
|
||||||
project-athens-records = [
|
|
||||||
{
|
|
||||||
name = "project-athens.xyz"
|
|
||||||
type = "NS"
|
|
||||||
ttl = 172800
|
|
||||||
records = [
|
|
||||||
"ns-806.awsdns-36.net.",
|
|
||||||
"ns-1881.awsdns-43.co.uk.",
|
|
||||||
"ns-1109.awsdns-10.org.",
|
|
||||||
"ns-11.awsdns-01.com.",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name = "project-athens.xyz"
|
|
||||||
type = "SOA"
|
|
||||||
ttl = 900
|
|
||||||
records = [
|
|
||||||
"ns-806.awsdns-36.net. awsdns-hostmaster.amazon.com. 1 7200 900 1209600 86400"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_route53_record" "project-athens-record" {
|
|
||||||
for_each = {
|
|
||||||
for index, record in local.project-athens-records:
|
|
||||||
index => record
|
|
||||||
}
|
|
||||||
zone_id = aws_route53_zone.project-athens.id
|
|
||||||
name = each.value.name
|
|
||||||
type = lookup(each.value, "type", "A")
|
|
||||||
ttl = lookup(each.value, "ttl", 300)
|
|
||||||
records = each.value.records
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -36,7 +36,6 @@ locals {
|
|||||||
},
|
},
|
||||||
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
|
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
|
||||||
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
|
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
|
||||||
{ name = "immich.shockrah.xyz", records = [ "45.32.92.196" ] },
|
|
||||||
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
|
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|||||||
1
infra/dns/variables.tfvars
Normal file
1
infra/dns/variables.tfvars
Normal file
@@ -0,0 +1 @@
|
|||||||
|
vultr_host = "45.32.83.83"
|
||||||
36
infra/static-vultr/build.sh
Normal file
36
infra/static-vultr/build.sh
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
opt=$1
|
||||||
|
plan=tfplan
|
||||||
|
|
||||||
|
build_plan() {
|
||||||
|
echo Generating plan
|
||||||
|
set -x
|
||||||
|
terraform plan -var-file variables.tfvars -input=false -out $plan
|
||||||
|
}
|
||||||
|
|
||||||
|
deploy_plan() {
|
||||||
|
terraform apply $plan
|
||||||
|
}
|
||||||
|
|
||||||
|
init() {
|
||||||
|
terraform init
|
||||||
|
}
|
||||||
|
|
||||||
|
help_prompt() {
|
||||||
|
cat <<- EOF
|
||||||
|
Options: plan deploy help
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default to building a plan
|
||||||
|
source ./secrets.sh
|
||||||
|
case $opt in
|
||||||
|
plan) build_plan;;
|
||||||
|
deploy) deploy_plan;;
|
||||||
|
init) init;;
|
||||||
|
*) help_prompt;;
|
||||||
|
esac
|
||||||
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
# Here we define the resources for our temporary immich server
|
|
||||||
|
|
||||||
resource vultr_instance immich {
|
|
||||||
plan = var.host.plan
|
|
||||||
region = var.host.region
|
|
||||||
os_id = var.host.os
|
|
||||||
enable_ipv6 = true
|
|
||||||
|
|
||||||
# Enable backups for now since we're getting off of s3 as well at some point
|
|
||||||
backups = "enabled"
|
|
||||||
backups_schedule {
|
|
||||||
type = "weekly"
|
|
||||||
dow = var.host.backups.day
|
|
||||||
hour = var.host.backups.hour
|
|
||||||
}
|
|
||||||
|
|
||||||
ssh_key_ids = [ vultr_ssh_key.immich.id ]
|
|
||||||
firewall_group_id = vultr_firewall_group.host.id
|
|
||||||
label = "Immich Server"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -14,10 +14,3 @@ output vultr_key_id {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
output immich_key {
|
|
||||||
sensitive = true
|
|
||||||
# value = tls_private_key.host.private_key_openssh
|
|
||||||
value = vultr_instance.immich.default_password
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,18 +5,6 @@ resource tls_private_key host {
|
|||||||
|
|
||||||
resource vultr_ssh_key host {
|
resource vultr_ssh_key host {
|
||||||
name = "static_ssh_key"
|
name = "static_ssh_key"
|
||||||
ssh_key = tls_private_key.host.public_key_openssh
|
ssh_key = chomp(tls_private_key.host.public_key_openssh)
|
||||||
}
|
}
|
||||||
|
|
||||||
####################
|
|
||||||
# Immich keys #
|
|
||||||
####################
|
|
||||||
resource tls_private_key immich {
|
|
||||||
algorithm = "RSA"
|
|
||||||
rsa_bits = 4096
|
|
||||||
}
|
|
||||||
|
|
||||||
resource vultr_ssh_key immich {
|
|
||||||
name = "static_ssh_key"
|
|
||||||
ssh_key = tls_private_key.immich.public_key_openssh
|
|
||||||
}
|
|
||||||
|
|||||||
62
infra/vultr-kubernetes/admin-services.tf
Normal file
62
infra/vultr-kubernetes/admin-services.tf
Normal file
@@ -0,0 +1,62 @@
|
|||||||
|
resource kubernetes_namespace admin-servers {
|
||||||
|
count = length(var.admin_services.configs) > 0 ? 1 : 0
|
||||||
|
metadata {
|
||||||
|
name = var.admin_services.namespace
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource kubernetes_pod admin {
|
||||||
|
for_each = var.admin_services.configs
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
name = each.key
|
||||||
|
namespace = var.admin_services.namespace
|
||||||
|
labels = {
|
||||||
|
app = each.key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
node_selector = {
|
||||||
|
NodeType = var.admin_services.namespace
|
||||||
|
}
|
||||||
|
container {
|
||||||
|
image = each.value.image
|
||||||
|
name = coalesce(each.value.name, each.key)
|
||||||
|
resources {
|
||||||
|
limits = {
|
||||||
|
cpu = each.value.cpu
|
||||||
|
memory = each.value.mem
|
||||||
|
}
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
container_port = each.value.port.internal
|
||||||
|
protocol = coalesce(each.value.proto, "TCP")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource kubernetes_service admin {
|
||||||
|
for_each = var.admin_services.configs
|
||||||
|
metadata {
|
||||||
|
name = each.key
|
||||||
|
namespace = var.admin_services.namespace
|
||||||
|
labels = {
|
||||||
|
app = each.key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
# TODO: don't make these NodePorts since we're gonna want them
|
||||||
|
# to be purely internal to the Cluster.
|
||||||
|
# WHY? Because we want to keep dashboards as unexposed as possible
|
||||||
|
spec {
|
||||||
|
selector = {
|
||||||
|
app = each.key
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
target_port = each.value.port.internal
|
||||||
|
port = each.value.port.expose
|
||||||
|
}
|
||||||
|
type = "NodePort"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@@ -13,7 +13,11 @@ terraform {
|
|||||||
}
|
}
|
||||||
vultr = {
|
vultr = {
|
||||||
source = "vultr/vultr"
|
source = "vultr/vultr"
|
||||||
version = "2.19.0"
|
version = "2.22.1"
|
||||||
|
}
|
||||||
|
kubernetes = {
|
||||||
|
source = "hashicorp/kubernetes"
|
||||||
|
version = "2.34.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -31,4 +35,8 @@ provider aws {
|
|||||||
max_retries = 1
|
max_retries = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
provider kubernetes {
|
||||||
|
config_path = "config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -15,6 +15,10 @@ deploy_plan() {
|
|||||||
terraform apply $plan
|
terraform apply $plan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
init() {
|
||||||
|
terraform init
|
||||||
|
}
|
||||||
|
|
||||||
help_prompt() {
|
help_prompt() {
|
||||||
cat <<- EOF
|
cat <<- EOF
|
||||||
Options: plan deploy help
|
Options: plan deploy help
|
||||||
|
|||||||
@@ -2,17 +2,29 @@ resource vultr_kubernetes athens {
|
|||||||
region = var.cluster.region
|
region = var.cluster.region
|
||||||
version = var.cluster.version
|
version = var.cluster.version
|
||||||
label = var.cluster.label
|
label = var.cluster.label
|
||||||
enable_firewall = true
|
# BUG: only have this set when creating the resource for the first time
|
||||||
|
# once the cluster is up, we should comment this out again
|
||||||
|
# enable_firewall = true
|
||||||
node_pools {
|
node_pools {
|
||||||
# how many nodes do we want in this pool
|
|
||||||
node_quantity = 1
|
node_quantity = 1
|
||||||
plan = var.cluster.pool.plan
|
plan = var.cluster.pools["meta"].plan
|
||||||
label = var.cluster.label
|
label = var.admin_services.namespace
|
||||||
min_nodes = var.cluster.pool.min
|
min_nodes = var.cluster.pools["meta"].min
|
||||||
max_nodes = var.cluster.pool.max
|
max_nodes = var.cluster.pools["meta"].max
|
||||||
|
# tag = var.admin_services.namespace
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource vultr_kubernetes_node_pools games {
|
||||||
|
cluster_id = vultr_kubernetes.athens.id
|
||||||
|
node_quantity = var.cluster.pools["games"].min
|
||||||
|
plan = var.cluster.pools["games"].plan
|
||||||
|
label = var.game_servers.namespace
|
||||||
|
min_nodes = var.cluster.pools["games"].min
|
||||||
|
max_nodes = var.cluster.pools["games"].max
|
||||||
|
tag = var.admin_services.namespace
|
||||||
|
}
|
||||||
|
|
||||||
output k8s_config {
|
output k8s_config {
|
||||||
value = vultr_kubernetes.athens.kube_config
|
value = vultr_kubernetes.athens.kube_config
|
||||||
sensitive = true
|
sensitive = true
|
||||||
|
|||||||
4
infra/vultr-kubernetes/dev/.gitignore
vendored
Normal file
4
infra/vultr-kubernetes/dev/.gitignore
vendored
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
# created by virtualenv automatically
|
||||||
|
bin/
|
||||||
|
lib/
|
||||||
|
|
||||||
51
infra/vultr-kubernetes/dev/find-server.py
Normal file
51
infra/vultr-kubernetes/dev/find-server.py
Normal file
@@ -0,0 +1,51 @@
|
|||||||
|
from argparse import ArgumentParser
|
||||||
|
from argparse import Namespace
|
||||||
|
from kubernetes import client, config
|
||||||
|
import re
|
||||||
|
|
||||||
|
def get_args() -> Namespace:
|
||||||
|
parser = ArgumentParser(
|
||||||
|
prog="Cluster Search Thing",
|
||||||
|
description="General utility for finding resources for game server bot"
|
||||||
|
)
|
||||||
|
games = {"reflex", "minecraft"}
|
||||||
|
parser.add_argument('-g', '--game', required=False, choices=games)
|
||||||
|
|
||||||
|
admin = {"health"}
|
||||||
|
parser.add_argument('-a', '--admin', required=False, choices=admin)
|
||||||
|
return parser.parse_args()
|
||||||
|
|
||||||
|
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
|
||||||
|
config.load_kube_config("../config.yaml")
|
||||||
|
return client.CoreV1Api()
|
||||||
|
|
||||||
|
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||||
|
print('admin thing requested', args.admin)
|
||||||
|
|
||||||
|
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||||
|
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
|
||||||
|
node_name = pods.items[0].spec.node_name
|
||||||
|
|
||||||
|
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
|
||||||
|
port = services.items[0].spec.ports[0].port
|
||||||
|
|
||||||
|
# Collecting the IPV4 of the node that contains the pod(container)
|
||||||
|
# we actually care about. Since these pods only have 1 container
|
||||||
|
# Now we collect specific data about the game server we requested
|
||||||
|
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
|
||||||
|
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||||
|
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||||
|
|
||||||
|
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
args = get_args()
|
||||||
|
api = k8s_api('../config.yaml')
|
||||||
|
|
||||||
|
if args.game:
|
||||||
|
get_game_server_ip(args, api)
|
||||||
|
|
||||||
|
if args.admin:
|
||||||
|
get_admin_service_details(args, api)
|
||||||
|
|
||||||
8
infra/vultr-kubernetes/dev/pyvenv.cfg
Normal file
8
infra/vultr-kubernetes/dev/pyvenv.cfg
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
home = /usr
|
||||||
|
implementation = CPython
|
||||||
|
version_info = 3.10.12.final.0
|
||||||
|
virtualenv = 20.13.0+ds
|
||||||
|
include-system-site-packages = false
|
||||||
|
base-prefix = /usr
|
||||||
|
base-exec-prefix = /usr
|
||||||
|
base-executable = /usr/bin/python3
|
||||||
18
infra/vultr-kubernetes/dev/requirements.txt
Normal file
18
infra/vultr-kubernetes/dev/requirements.txt
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
cachetools==5.5.0
|
||||||
|
certifi==2024.8.30
|
||||||
|
charset-normalizer==3.4.0
|
||||||
|
durationpy==0.9
|
||||||
|
google-auth==2.36.0
|
||||||
|
idna==3.10
|
||||||
|
kubernetes==31.0.0
|
||||||
|
oauthlib==3.2.2
|
||||||
|
pyasn1==0.6.1
|
||||||
|
pyasn1_modules==0.4.1
|
||||||
|
python-dateutil==2.9.0.post0
|
||||||
|
PyYAML==6.0.2
|
||||||
|
requests==2.32.3
|
||||||
|
requests-oauthlib==2.0.0
|
||||||
|
rsa==4.9
|
||||||
|
six==1.17.0
|
||||||
|
urllib3==2.2.3
|
||||||
|
websocket-client==1.8.0
|
||||||
@@ -1,11 +1,32 @@
|
|||||||
# Inbound rules for web traffic
|
|
||||||
resource vultr_firewall_rule web_inbound {
|
resource vultr_firewall_rule web_inbound {
|
||||||
for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
||||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||||
#firewall_group_id = vultr_firewall_group.cluster.id
|
|
||||||
protocol = "tcp"
|
protocol = "tcp"
|
||||||
ip_type = "v4"
|
ip_type = "v4"
|
||||||
subnet = "0.0.0.0"
|
subnet = "0.0.0.0"
|
||||||
subnet_size = 0
|
subnet_size = 0
|
||||||
port = each.value
|
port = each.value
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource vultr_firewall_rule game-server-inbound {
|
||||||
|
for_each = var.game_servers.configs
|
||||||
|
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||||
|
protocol = "tcp"
|
||||||
|
ip_type = "v4"
|
||||||
|
subnet = "0.0.0.0"
|
||||||
|
subnet_size = 0
|
||||||
|
port = each.value.port.expose
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource vultr_firewall_rule admin-service-inbound {
|
||||||
|
for_each = var.admin_services.configs
|
||||||
|
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||||
|
protocol = "tcp"
|
||||||
|
ip_type = "v4"
|
||||||
|
subnet = "0.0.0.0"
|
||||||
|
subnet_size = 0
|
||||||
|
notes = each.value.port.notes
|
||||||
|
port = each.value.port.expose
|
||||||
|
}
|
||||||
|
|
||||||
|
|||||||
55
infra/vultr-kubernetes/game-server.tf
Normal file
55
infra/vultr-kubernetes/game-server.tf
Normal file
@@ -0,0 +1,55 @@
|
|||||||
|
resource kubernetes_namespace game-servers {
|
||||||
|
count = length(var.game_servers.configs) > 0 ? 1 : 0
|
||||||
|
metadata {
|
||||||
|
name = var.game_servers.namespace
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource kubernetes_pod game {
|
||||||
|
for_each = var.game_servers.configs
|
||||||
|
|
||||||
|
metadata {
|
||||||
|
name = each.key
|
||||||
|
namespace = var.game_servers.namespace
|
||||||
|
labels = {
|
||||||
|
app = each.key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
container {
|
||||||
|
image = each.value.image
|
||||||
|
name = coalesce(each.value.name, each.key)
|
||||||
|
resources {
|
||||||
|
limits = {
|
||||||
|
cpu = each.value.cpu
|
||||||
|
memory = each.value.mem
|
||||||
|
}
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
container_port = each.value.port.internal
|
||||||
|
protocol = coalesce(each.value.proto, "TCP")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource kubernetes_service game {
|
||||||
|
for_each = var.game_servers.configs
|
||||||
|
metadata {
|
||||||
|
name = each.key
|
||||||
|
namespace = var.game_servers.namespace
|
||||||
|
labels = {
|
||||||
|
app = each.key
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
selector = {
|
||||||
|
app = each.key
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
target_port = each.value.port.internal
|
||||||
|
port = each.value.port.expose
|
||||||
|
}
|
||||||
|
type = "NodePort"
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -25,20 +25,47 @@ variable cluster {
|
|||||||
region = string
|
region = string
|
||||||
label = string
|
label = string
|
||||||
version = string
|
version = string
|
||||||
pool = object({
|
pools = map(object({
|
||||||
plan = string
|
plan = string
|
||||||
autoscale = bool
|
autoscale = bool
|
||||||
min = number
|
min = number
|
||||||
max = number
|
max = number
|
||||||
})
|
}))
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
variable lab_domain {
|
variable game_servers {
|
||||||
type = string
|
type = object({
|
||||||
|
namespace = string
|
||||||
|
configs = map(object({
|
||||||
|
name = optional(string)
|
||||||
|
image = string
|
||||||
|
cpu = string
|
||||||
|
mem = string
|
||||||
|
port = object({
|
||||||
|
internal = number
|
||||||
|
expose = number
|
||||||
|
})
|
||||||
|
proto = optional(string)
|
||||||
|
}))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
variable lb_ip4 {
|
variable admin_services {
|
||||||
type = string
|
type = object({
|
||||||
|
namespace = string
|
||||||
|
configs = map(object({
|
||||||
|
name = string
|
||||||
|
image = string
|
||||||
|
cpu = string
|
||||||
|
mem = string
|
||||||
|
port = object({
|
||||||
|
notes = optional(string)
|
||||||
|
internal = number
|
||||||
|
expose = number
|
||||||
|
})
|
||||||
|
proto = optional(string)
|
||||||
|
}))
|
||||||
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -1,18 +1,51 @@
|
|||||||
|
|
||||||
cluster = {
|
cluster = {
|
||||||
region = "lax"
|
region = "lax"
|
||||||
label = "athens-cluster"
|
label = "athens-cluster"
|
||||||
version = "v1.30.0+1"
|
version = "v1.31.2+1"
|
||||||
pool = {
|
pools = {
|
||||||
|
meta = {
|
||||||
plan = "vc2-1c-2gb"
|
plan = "vc2-1c-2gb"
|
||||||
autoscale = true
|
autoscale = true
|
||||||
min = 1
|
min = 1
|
||||||
max = 2
|
max = 2
|
||||||
}
|
}
|
||||||
|
games = {
|
||||||
|
plan = "vc2-1c-2gb"
|
||||||
|
autoscale = true
|
||||||
|
min = 1
|
||||||
|
max = 3
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
lab_domain = "temprah-lab.xyz"
|
game_servers = {
|
||||||
|
namespace = "games"
|
||||||
lb_ip4 = "45.32.68.232"
|
configs = {
|
||||||
|
# minecraft = {
|
||||||
|
# image = "itzg/minecraft-server"
|
||||||
|
# cpu = "1000m"
|
||||||
|
# mem = "2048Mi"
|
||||||
|
# port = {
|
||||||
|
# expose = 30808
|
||||||
|
# internal = 80
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
admin_services = {
|
||||||
|
namespace = "admin-services"
|
||||||
|
configs = {
|
||||||
|
# health = {
|
||||||
|
# image = "nginx:latest"
|
||||||
|
# name = "health"
|
||||||
|
# cpu = "200m"
|
||||||
|
# mem = "64Mi"
|
||||||
|
# port = {
|
||||||
|
# notes = "Basic nginx sanity check service"
|
||||||
|
# expose = 30800
|
||||||
|
# internal = 80
|
||||||
|
# }
|
||||||
|
# }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
3
playbooks/inventory.yaml
Normal file
3
playbooks/inventory.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
static-web:
|
||||||
|
hosts:
|
||||||
|
shockrah.xyz:
|
||||||
9
playbooks/manual-prerequisites.yaml
Normal file
9
playbooks/manual-prerequisites.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: Pre Pipeline Playbook for Static Hosts
|
||||||
|
hosts: static-web
|
||||||
|
remote_user: root
|
||||||
|
tasks:
|
||||||
|
- name: Import manual setup steps
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: static
|
||||||
|
tasks_from: setup-webadmin.yaml
|
||||||
5
playbooks/readme.md
Normal file
5
playbooks/readme.md
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
# What is this
|
||||||
|
|
||||||
|
Here be the ansible based workflows that we use to keep things like the static
|
||||||
|
hosts properly setup with all the resources they need to properly host the
|
||||||
|
services we intended on hosting.
|
||||||
8
playbooks/static-setup.yaml
Normal file
8
playbooks/static-setup.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: Static Host Maintenance and Setup
|
||||||
|
hosts: static-web
|
||||||
|
remote_user: webadmin
|
||||||
|
tasks:
|
||||||
|
- name: Import static host role
|
||||||
|
ansible.builtin.import_role:
|
||||||
|
name: static
|
||||||
0
playbooks/static/files/.gitkeep
Normal file
0
playbooks/static/files/.gitkeep
Normal file
0
playbooks/static/handlers/.gitkeep
Normal file
0
playbooks/static/handlers/.gitkeep
Normal file
5
playbooks/static/handlers/main.yaml
Normal file
5
playbooks/static/handlers/main.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
- name: Restart SSH
|
||||||
|
become: true
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: sshd
|
||||||
|
state: restarted
|
||||||
0
playbooks/static/tasks/.gitkeep
Normal file
0
playbooks/static/tasks/.gitkeep
Normal file
40
playbooks/static/tasks/main.yaml
Normal file
40
playbooks/static/tasks/main.yaml
Normal file
@@ -0,0 +1,40 @@
|
|||||||
|
# Things that we definitely want to have are the following
|
||||||
|
# docker docker-compose python(latest) certbot
|
||||||
|
- name: Uhhh yea
|
||||||
|
become: true
|
||||||
|
block:
|
||||||
|
- name: Install base dependencies
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
pkg:
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- lsb-release
|
||||||
|
- name: Setup keyring directory
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: "install -m 0755 -d {{ static_keyring_dir }}"
|
||||||
|
creates: "{{ static_keyring_dir }}"
|
||||||
|
- name: Download the docker GPG key
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ static_docker_ubuntu }}/gpg"
|
||||||
|
dest: "{{ static_keyring_dir }}/docker.asc"
|
||||||
|
mode: "0644"
|
||||||
|
- name: Ensure docker.lst is present
|
||||||
|
vars:
|
||||||
|
key_path: "{{ static_keyring_dir }}/docker.asc"
|
||||||
|
repo: "{{ static_docker_ubuntu }}"
|
||||||
|
os_codename: jammy
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: docker.list
|
||||||
|
dest: "{{ static_apt_sources_dir }}/docker.list"
|
||||||
|
mode: "0644"
|
||||||
|
- name: Install docker and python packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
pkg:
|
||||||
|
- docker-ce
|
||||||
|
- docker-ce-cli
|
||||||
|
- containerd.io
|
||||||
|
- docker-buildx-plugin
|
||||||
|
- docker-compose-plugin
|
||||||
|
- python3
|
||||||
43
playbooks/static/tasks/setup-webadmin.yaml
Normal file
43
playbooks/static/tasks/setup-webadmin.yaml
Normal file
@@ -0,0 +1,43 @@
|
|||||||
|
- name: Ensure sudo is available
|
||||||
|
ansible.builtin.apt:
|
||||||
|
state: present
|
||||||
|
update_cache: true
|
||||||
|
pkg:
|
||||||
|
- sudo
|
||||||
|
- zsh
|
||||||
|
- name: Create webadmin user
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: webadmin
|
||||||
|
state: present
|
||||||
|
shell: /bin/zsh
|
||||||
|
groups:
|
||||||
|
- nginx
|
||||||
|
append: true
|
||||||
|
- name: Copy webadmin public key
|
||||||
|
ansible.posix.authorized_key:
|
||||||
|
user: webadmin
|
||||||
|
state: present
|
||||||
|
key: "{{ lookup('file', 'files/webadmin.pem.pub') }}"
|
||||||
|
- name: Add webadmin to sudoers
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: "/etc/sudoers.d/webadmin"
|
||||||
|
content: "webadmin ALL=(ALL) NOPASSWD: ALL"
|
||||||
|
mode: "0644"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
- name: Disable Password Authentication
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
dest: /etc/ssh/sshd_config
|
||||||
|
line: PasswordAuthentication no
|
||||||
|
state: present
|
||||||
|
backup: true
|
||||||
|
notify:
|
||||||
|
- Restart SSH
|
||||||
|
- name: Disable root login
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
dest: /etc/ssh/sshd_config
|
||||||
|
line: PermitRootLogin no
|
||||||
|
state: present
|
||||||
|
backup: true
|
||||||
|
notify:
|
||||||
|
- Restart SSH
|
||||||
0
playbooks/static/templates/.gitkeep
Normal file
0
playbooks/static/templates/.gitkeep
Normal file
1
playbooks/static/templates/docker.list
Normal file
1
playbooks/static/templates/docker.list
Normal file
@@ -0,0 +1 @@
|
|||||||
|
deb [arch=amd64 signed-by={{ key_path }}] {{ repo }} {{ os_codename }} stable
|
||||||
0
playbooks/static/vars/.gitkeep
Normal file
0
playbooks/static/vars/.gitkeep
Normal file
4
playbooks/static/vars/main.yaml
Normal file
4
playbooks/static/vars/main.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
static_keyring_dir: /etc/apt/keyrings
|
||||||
|
static_docker_ubuntu: https://download.docker.com/linux/ubuntu
|
||||||
|
static_apt_sources_dir: /etc/apt/sources.list.d
|
||||||
|
static_codename: jammy
|
||||||
@@ -1,45 +0,0 @@
|
|||||||
# What this covers
|
|
||||||
|
|
||||||
The creation of Atlas as it happened in order
|
|
||||||
|
|
||||||
## Commands Ran
|
|
||||||
|
|
||||||
Once the infra was provisioned and verified to be configured by Terraform correctly
|
|
||||||
we move on to the following
|
|
||||||
|
|
||||||
```sh
|
|
||||||
# Setup the machine to run docker
|
|
||||||
ansible-playbook -i hosts.ini atlas/init/system-deps.yml
|
|
||||||
|
|
||||||
# Second we copy over the contents of Alpha's mounted docker volumes
|
|
||||||
ansible-playbook -i hosts.ini atlas/init/perma-mount-drives.yml
|
|
||||||
|
|
||||||
# Next we copy over the data that we want to migrate ( if any )
|
|
||||||
ansible-playbook -i hosts.ini -e filebrowser=/path -e clippable=/path atlas/init/migrate-clips-files.yml
|
|
||||||
|
|
||||||
# Setup the services on the host that we want to run
|
|
||||||
ansible-playbook -i hosts.ini atlas/init/setup-containers.yml
|
|
||||||
|
|
||||||
# Next we put up the reverse proxy (nginx)
|
|
||||||
ansible-playbook -i hosts.ini atlas/init/setup-reverse-proxy.yml
|
|
||||||
|
|
||||||
# Finally we add TLS on top of nginx and we're done
|
|
||||||
ansible-playbook -i hosts.ini atlas/init/setup-certbot.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Maintenance should be straight forward for this machine as TLS is automatically
|
|
||||||
renewed every 3 months by a cron job. We can manually update the certs however
|
|
||||||
if we really want to. They also don't require anymore manual variable injection
|
|
||||||
like Alpha did as the only thing protected was `dev@shockrah.xyz` which is at
|
|
||||||
this point becoming semi-public. This means while it is associated with code
|
|
||||||
it is more of a _business e-mail_ so it can be placed in this repository with
|
|
||||||
very little concern.
|
|
||||||
|
|
||||||
System updates are now also to be fetched with a:
|
|
||||||
|
|
||||||
```sh
|
|
||||||
ansible-playbook -i hosts.ini atlas/maintain/analyze-system-deps.yml
|
|
||||||
```
|
|
||||||
|
|
||||||
Which performs purely read operations and does not affect the state of the
|
|
||||||
machine.
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
# Mounting an attached drive
|
|
||||||
|
|
||||||
Assumptions:
|
|
||||||
|
|
||||||
* New drive is attached(in AWS) and detected in software
|
|
||||||
Ideally attachment is made through terraform
|
|
||||||
|
|
||||||
## Mounting Instructions (Step-by-Step)
|
|
||||||
|
|
||||||
1. Verify data does not have data: `sudo file -s /dev/xvdf`
|
|
||||||
|
|
||||||
Should return `data` if its ok. Other wise we're probably looking at the wrong
|
|
||||||
drive.
|
|
||||||
|
|
||||||
2. Create the filesystem on the new empty drive: `sudo mkfs -t ext4 /dev/xvdf`
|
|
||||||
|
|
||||||
3. Create mountpoint other wares to actaully use the drive
|
|
||||||
`sudo mkdir /mnt/example`.
|
|
||||||
|
|
||||||
Change _example_ to something that actually makes sense.
|
|
||||||
|
|
||||||
4. Add a new entry to /etc/fstab for automounting
|
|
||||||
|
|
||||||
`/dev/xvdf /newvolume ext4 defaults,nofail 0 0`
|
|
||||||
|
|
||||||
Tab delimited btw.
|
|
||||||
|
|
||||||
5. Mount all drives listed in `/etc/fstab` from before. `sudo mount -a`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Reference in New Issue
Block a user