Compare commits

...

57 Commits

Author SHA1 Message Date
5b4a440cb4 Removing the last remnants of nomad and getting k3s setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-07-10 20:47:27 -07:00
826d334c3c Completing the state required to setup k3s
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 15s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 37s
2025-07-10 14:06:53 -07:00
77590b067a Create the static host volume for the new NFS
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-06-18 17:08:00 -07:00
850570faf5 Creating simple bastion host for testing deployment setup scripts
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-06-16 15:15:09 -07:00
12831fbaf3 Adding vars for the new bastion host
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-06-12 09:20:21 -07:00
a6123dd7e2 Adding tls into required providers
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-06-12 09:16:10 -07:00
9c2e0a84d7 Creating VKE cluster in a private VPC
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-06-04 23:29:26 -07:00
1281ea8857 simplifying vars
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-28 21:22:10 -07:00
ee2d502ca6 Slimming down the cluster definition
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-05-28 18:24:31 -07:00
88059a5e0f upgrading providers 2025-05-28 17:43:56 -07:00
4024809cc4 Removing scripts that won't ever be used again 2025-05-29 00:35:32 -07:00
029a3c80d5 Ignoring .ansible directory
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-29 00:34:25 -07:00
75b7f2fa3d Unreal amounts of linter fixes
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-05-26 01:10:00 -07:00
8ef606153f Swapping basic setup steps for sudo access
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-25 22:57:30 -07:00
be34327791 Moving nomad host volume setup to its own role
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-25 22:56:07 -07:00
c6ef6ae4d2 moving more files around for nomad this time
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-25 22:41:37 -07:00
eb7871584b Renaming role to be more generic going forward
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-25 22:35:10 -07:00
4a0a12242a Moving common vars for role to vars/main.yaml
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2025-05-25 22:33:26 -07:00
053db8793b Moving proxy things to its own playbook + role
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-23 01:42:40 -07:00
24fcbc957a almost functional registry, still need to figure out tls 2025-05-23 00:47:32 -07:00
9675fbacef Tracking nomad client config 2025-05-23 00:34:22 -07:00
3f0c8a865d DNS Endpoint for a tarpit meme project
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-23 00:31:24 -07:00
3f2e6d86f6 Proxying a new container registry service
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-23 00:31:00 -07:00
08560c945b Removing succ build script
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 13s
2025-05-21 22:24:50 -07:00
506a9b32d9 renaming tarpit
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-21 21:36:48 -07:00
d4ece741e0 tarpit server that i'll use for the lulz
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-21 21:35:20 -07:00
311a592d6e Adding a task subset for host volume setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-05-20 15:06:04 -07:00
153ea8e982 Improving nomad nginx config to be more responsive or soemthing
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
https://developer.hashicorp.com/nomad/tutorials/manage-clusters/reverse-proxy-ui#extend-connection-timeout
2025-05-15 22:59:07 -07:00
943e9651da Swapping the health container to our own thing
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
it's just nginx on port 8080 :)
2025-05-13 18:40:12 -07:00
669c414288 Simple sanity container on port 8080 for testing purposes
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Has been cancelled
2025-05-13 18:39:47 -07:00
e3afed5e4f sanity service on 8080 now 2025-05-12 02:01:31 -07:00
e337989a59 just roll with it at this point 2025-05-12 02:01:20 -07:00
7f36ff272e boostrap since we have literally 1 node
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:44:25 -07:00
79e6698db1 Templatizing consul config
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:28:39 -07:00
603559b255 omfg this config i swear
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:08:11 -07:00
4851b6521c consul config
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:05:54 -07:00
9785e8a40a Even more file shuffling
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 27s
2025-05-12 00:21:08 -07:00
79bd7424c3 Moving around more stuff
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-05-12 00:18:24 -07:00
5227bea568 renaming stuff to note that it's not used anymore 2025-05-12 00:17:30 -07:00
47b69d7f49 Nomad now responds to the basic nomad.nigel.local DNS name
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-10 17:26:45 -07:00
a3fdc5fcc7 Sanity check job with nomad :D
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-10 15:38:16 -07:00
5a1afb4a07 Make sure the nomad agent is running on boot
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-10 14:58:19 -07:00
e03daa62e5 removing unused role
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-10 14:52:32 -07:00
15dfaea8db Nomad completely setup with --tags nomad now
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2025-05-04 23:35:58 -07:00
ef4967cd88 wari wari da it's so over ( im using ansible again )
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
:(((
2025-04-23 23:25:34 -07:00
55217ce50b Ensure nigel sudo ability is setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-04-23 22:25:23 -07:00
2bbc9095f7 Removing services role as it's being replaced by terraform
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-04-23 22:14:50 -07:00
fcf7ded218 Removing docker resources for now
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 7s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 22s
Migrating to terraform for better state control
2025-04-23 22:14:05 -07:00
b68d53b143 Opting for an example minio setup over filebrowser
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-04-16 21:00:39 -07:00
3c6bc90feb health container and filebrowser container now active
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
Configuration needed at this point however
2025-04-16 20:28:48 -07:00
3521b840ae Seperating the roles of basic infra requirements and docker service requirements into seperate roles
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
With this we have a working proof of concept for a proper simple docker host
2025-04-16 18:25:24 -07:00
5f10976264 Docker now setup with ansible
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-04-16 17:34:03 -07:00
10e936a8da Basic docker setup verified by ansible-lint locally
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-04-16 14:55:02 -07:00
8bbaea8fd9 Simple admin user setup on a clean buntu machine
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-04-11 02:43:22 -07:00
d39e0c04e5 Adding health to games selector set
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-02-10 22:11:09 -08:00
b99525955e Swapping health pod to admin-services 2025-02-10 22:10:46 -08:00
9b6f9b6656 Fixing tag issues with pod selector
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-02-10 22:10:02 -08:00
66 changed files with 538 additions and 446 deletions

View File

@ -10,6 +10,6 @@ jobs:
steps:
- name: Checkout repo content
uses: actions/checkout@v4
- run: ansible-lint
- run: ansible-lint -c linter.yaml
working-directory: ansible/

1
.gitignore vendored
View File

@ -21,3 +21,4 @@ docker/beta/shockrah.xyz/
docker/beta/resume.shockrah.xyz/
k8s/config.yaml
infra/**/tfplan
.ansible/

3
ansible/inventory.yaml Normal file
View File

@ -0,0 +1,3 @@
nigel:
hosts:
nigel.local:

4
ansible/linter.yaml Normal file
View File

@ -0,0 +1,4 @@
---
skip_list:
- role-name
- var-naming[no-role-prefix]

View File

@ -0,0 +1,28 @@
# This playbook is meant to be a oneshot to be ran manually on the dev box
# The rest of the role stuff is meant to be ran as the admin user that
# this playbook creates for us
---
- name: Setup local admin user with a fresh ubuntu host
hosts: nigel.local
remote_user: nigel
vars:
admin:
username: nigel
tasks:
- name: Copy the nigel admin key
ansible.posix.authorized_key:
user: "{{ admin.username }}"
state: present
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
- name: Prevent password based logins
become: true
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
- name: Restart SSH Daemon
become: true
ansible.builtin.service:
name: ssh
state: restarted

9
ansible/nomad.yaml Normal file
View File

@ -0,0 +1,9 @@
---
- name: Setup all the responsibilities of the nomad server
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the nomad role
ansible.builtin.include_role:
name: nomad

14
ansible/nuc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
- name: Setup bare metal requirements for nomad
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the base role to the nuc
ansible.builtin.include_role:
name: base
- name: Apply the k3s base role
ansible.builtin.include_role:
name: k3s
- name: Apply the proxy role
ansible.builtin.include_role:
name: proxy

8
ansible/proxy.yaml Normal file
View File

@ -0,0 +1,8 @@
---
- name: Setup host as a reverse proxy
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply reverse proxy role
ansible.builtin.include_role:
name: proxy

View File

@ -0,0 +1 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable

View File

@ -0,0 +1,41 @@
- name: Ensure we have basic updated packages setting up docker
ansible.builtin.apt:
name: "{{ item }}"
update_cache: true
loop:
- ca-certificates
- curl
- name: Running install on the keyrings directory
ansible.builtin.command:
cmd: install -m 0755 -d /etc/apt/keyrings
register: install
changed_when: install.rc == 0
- name: Fetch Docker GPG Key
vars:
keylink: https://download.docker.com/linux/ubuntu/gpg
ansible.builtin.get_url:
url: "{{ keylink }}"
dest: /etc/apt/keyrings/docker.asc
mode: "0644"
- name: Add repo to apt sources
ansible.builtin.copy:
src: docker.list
dest: /etc/apt/sources.list.d/docker.list
mode: "0644"
- name: Update Apt cache with latest docker.list packages
ansible.builtin.apt:
update_cache: true
- name: Ensure all docker packages are updated to the latest versions
ansible.builtin.apt:
name: "{{ item }}"
loop:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- name: Verify that the docker components are installed properly
ansible.builtin.command:
cmd: docker run hello-world
register: docker
changed_when: docker.rc == 0

View File

@ -0,0 +1,25 @@
- name: Ensure nigel can use sudo without password
become: true
tags:
- setup
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "nigel ALL=(ALL) NOPASSWD:ALL"
- name: Ensure docker components are installed
tags:
- setup
ansible.builtin.include_tasks:
file: ensure-docker-basic.yaml
apply:
become: true
tags:
- setup
- name: Run through nomad removal steps
tags: nomad
ansible.builtin.include_tasks:
file: nomad.yaml
apply:
become: true
tags:
- nomad

View File

@ -0,0 +1,54 @@
- name: Ensure prerequisite packages are installed
ansible.builtin.apt:
pkg:
- wget
- gpg
- coreutils
update_cache: true
- name: Hashicorp repo setup
vars:
keypath: /usr/share/keyrings/hashicorp-archive-keyring.gpg
gpgpath: /tmp/hashicorp.gpg
block:
- name: Download the hashicorp GPG Key
ansible.builtin.get_url:
url: https://apt.releases.hashicorp.com/gpg
dest: "{{ gpgpath }}"
mode: "0755"
- name: Dearmor the hashicorp gpg key
ansible.builtin.command:
cmd: "gpg --dearmor --yes -o {{ keypath }} {{ gpgpath }}"
register: gpg
changed_when: gpg.rc == 0
- name: Add the hashicorp linux repo
vars:
keyfile: "{{ keypath }}"
ansible.builtin.template:
src: hashicorp.list
dest: /etc/apt/sources.list.d/hashicorp.list
mode: "0644"
- name: Update apt repo cache
ansible.builtin.apt:
update_cache: true
- name: Install consul
ansible.builtin.apt:
name: consul
state: absent
- name: Install nomad package
ansible.builtin.apt:
pkg: nomad
state: absent
- name: Remove the bare metal consul template
ansible.builtin.file:
path: /etc/consul.d/consul.hcl
state: absent
- name: Start nomad
ansible.builtin.systemd_service:
name: nomad
state: stopped
enabled: false
- name: Make sure the consul service is NOT available
ansible.builtin.systemd_service:
name: consul
state: stopped
enabled: false

View File

@ -0,0 +1,12 @@
bind_addr = "{{ ip }}"
advertise_addr = "{{ ip }}"
bootstrap = true
bootstrap_expect = 1
client_addr = "{{ ip }}"
server = true
data_dir = "/opt/consul"
ui_config {
enabled = true
}

View File

@ -0,0 +1 @@
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main

View File

View File

@ -0,0 +1,11 @@
- name: Download the installation script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp
register: install_script
- name: Run installation script
become: true
environment:
INSTALL_K3S_EXEC: server
ansible.builtin.command:
cmd: sh {{ install_script.dest }}

View File

View File

@ -0,0 +1,24 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
server {
enabled = true
bootstrap_expect = 1
}
client {
enabled = true
servers = ["127.0.0.1"]
}
host_volume "registry" {
path = "/opt/volumes/registry"
read_only = false
}
host_volume "nfs" {
path = "/opt/volumes/nfs"
read_only = false
}

View File

@ -0,0 +1,18 @@
- name: Nomad server configuration
become: true
block:
- name: Ensure the root data directory is present
ansible.builtin.file:
path: "{{ nomad.volumes.root }}"
state: absent
mode: "0755"
- name: Ensure registry volume is present
ansible.builtin.file:
path: "{{ nomad.volumes.registry }}"
state: absent
mode: "0755"
- name: Ensure the MinIO diretory is present
ansible.builtin.file:
path: "{{ nomad.volumes.nfs }}"
state: absent
mode: "0755"

View File

@ -0,0 +1,5 @@
nomad:
volumes:
root: /opt/volumes
registry: /opt/volumes/ncr
nfs: /opt/volumes/nfs

View File

@ -0,0 +1,15 @@
127.0.0.1 localhost
127.0.1.1 nigel
# Our own dns stuff
127.0.1.1 nigel.local
127.0.1.1 nomad.nigel.local
127.0.1.1 sanity.nigel.local
127.0.1.1 ncr.nigel.local
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

View File

@ -0,0 +1,6 @@
server {
server_name ncr.nigel.local;
location / {
proxy_pass http://localhost:5000;
}
}

View File

@ -0,0 +1,25 @@
server {
server_name nomad.nigel.local;
location / {
proxy_pass http://nomad-ws;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 319s;
# This is for log streaming requests
proxy_buffering off;
# Upgrade and Connection headers for upgrading to websockets
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "${scheme}://${proxy_host}";
}
}
upstream nomad-ws {
ip_hash;
server nomad.nigel.local:4646;
}

View File

@ -0,0 +1,28 @@
- name: Reverse proxy role configuration
become: true
block:
- name: Ensure /etc/hosts are up to date
ansible.builtin.copy:
dest: /etc/hosts
src: host-file
mode: "0644"
- name: Ensure nginx is setup as latest
ansible.builtin.apt:
name: nginx
- name: Copy the nomad.conf to available configurations
ansible.builtin.copy:
src: "{{ item }}"
dest: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Link the nomad.conf to sites-enabled
ansible.builtin.file:
path: "/etc/nginx/sites-enabled/{{ item }}"
state: link
src: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Restart nginx
ansible.builtin.systemd_service:
name: nginx
state: restarted

View File

@ -0,0 +1,3 @@
proxy_nginx_configs:
- nomad.conf
- ncr.conf

View File

@ -1,23 +0,0 @@
#!/bin/bash
set -e
bucket="$1"
s3env=/opt/nginx/s3.env
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
source $s3env
pull() {
aws s3 sync s3://$bucket /opt/nginx/$bucket
}
case $bucket in
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
*) echo "Invalid bucket name" && exit 1 ;;
esac

View File

@ -1,40 +0,0 @@
networks:
gitea:
external: false
services:
gitea:
image: gitea/gitea:latest-rootless
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "2222:22"
gitea-runner:
image: gitea/act_runner:nightly
container_name: gitea-runner
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea_runner/
- /var/run/docker.sock:/var/run/docker.sock
environment:
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
- GITEA_RUNNER_NAME=gitea-main
- GITEA_RUNNER_LABELS=gitea-main
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}

View File

@ -1,29 +0,0 @@
What is this
============
Here we contain scripts to build out all the containers that are run.
All of these images are based on images that are made from other projects
docker-compose.yaml
===================
Services that are more/less "special" go here since most of the stuff that is
run on the main host are basically just static html websites
Services & Containers
=====================
| Service | Docker Image Used |
|------------|--------------------------|
| Gitea | gitea/gitea:latest |
| Act Runner | gitea/act_runner:nightly |
Why the servics above?
======================
The Gitea related services are there so that I can host my own Git projects
away from "Git as a service" services. I have no issue with Github/Gitlab
but I just like being able to host my own stuff when possible :smiley:

View File

@ -1,34 +0,0 @@
#!/bin/bash
set -e
opt=$1
plan=tfplan
build_plan() {
echo Generating plan
set -x
terraform plan -var-file variables.tfvars -input=false -out $plan
}
deploy_plan() {
terraform apply $plan
}
init() {
terraform init
}
help_prompt() {
cat <<- EOF
Options: plan deploy help
EOF
}
# Default to building a plan
source ./secrets.sh
case $opt in
plan) build_plan;;
deploy) deploy_plan;;
*) help_prompt;;
esac

View File

@ -37,6 +37,7 @@ locals {
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "lmao.shockrah.xyz", records = [ "207.246.107.99" ] },
]
}

View File

@ -0,0 +1,46 @@
# Nigel's Container Registry
job "ncr" {
type = "service"
group "ncr" {
count = 1
network {
port "docker" {
static = 5000
}
}
service {
name = "ncr"
port = "docker"
provider = "nomad"
}
volume "container_images" {
type = "host"
read_only = false
source = "registry"
}
restart {
attempts = 10
interval = "5m"
delay = "30s"
mode = "delay"
}
task "ncr" {
driver = "docker"
volume_mount {
volume = "container_images"
destination = "/registry/data"
read_only = false
}
config {
image = "registry:latest"
ports = [ "docker" ]
}
}
}
}

View File

@ -0,0 +1,30 @@
# This 'service' job is just a simple nginx container that lives here as a kind of sanity check
# PORT: 8080
# DNS : sanity.nigel.local
job "health" {
type = "service"
group "health" {
count = 1
network {
port "http" {
static = 8080
}
}
service {
name = "health-svc"
port = "http"
provider = "nomad"
}
task "health-setup" {
driver = "docker"
config {
image = "shockrah/sanity:latest"
ports = [ "http" ]
}
}
}
}

View File

@ -0,0 +1,28 @@
resource tls_private_key tarpit {
algorithm = "RSA"
rsa_bits = 4096
}
resource vultr_ssh_key tarpit {
name = "tarpit_ssh_key"
ssh_key = chomp(tls_private_key.tarpit.public_key_openssh)
}
resource vultr_instance tarpit {
# Core configuration
plan = var.host.plan
region = var.host.region
os_id = var.host.os
enable_ipv6 = true
ssh_key_ids = [ vultr_ssh_key.host.id ]
firewall_group_id = vultr_firewall_group.host.id
label = "Tarpit"
}
output tarpit_ssh_key {
sensitive = true
value = tls_private_key.host.private_key_pem
}

View File

@ -1,62 +0,0 @@
resource kubernetes_namespace admin-servers {
count = length(var.admin_services.configs) > 0 ? 1 : 0
metadata {
name = var.admin_services.namespace
}
}
resource kubernetes_pod admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
spec {
node_selector = {
NodeType = var.admin_services.namespace
}
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
# TODO: don't make these NodePorts since we're gonna want them
# to be purely internal to the Cluster.
# WHY? Because we want to keep dashboards as unexposed as possible
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -9,15 +9,20 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
version = "5.98.0"
}
vultr = {
source = "vultr/vultr"
version = "2.22.1"
version = "2.26.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.34.0"
version = "2.37.1"
}
tls = {
source = "hashicorp/tls"
version = "4.1.0"
}
}
}

View File

@ -0,0 +1,27 @@
resource tls_private_key bastion {
algorithm = "ED25519"
}
resource vultr_ssh_key bastion {
name = "bastion"
ssh_key = tls_private_key.bastion.public_key_openssh
}
resource vultr_instance bastion {
region = var.cluster.region
vpc_ids = [ vultr_vpc.athens.id ]
plan = var.bastion.plan
os_id = var.bastion.os
label = var.bastion.label
ssh_key_ids = [ vultr_ssh_key.bastion.id ]
enable_ipv6 = true
disable_public_ipv4 = false
activation_email = false
}
output bastion_ssh {
value = tls_private_key.bastion.private_key_pem
sensitive = true
}

View File

@ -1,28 +1,16 @@
resource vultr_kubernetes athens {
region = var.cluster.region
region = var.cluster.region
version = var.cluster.version
label = var.cluster.label
# BUG: only have this set when creating the resource for the first time
# once the cluster is up, we should comment this out again
# enable_firewall = true
node_pools {
node_quantity = 1
plan = var.cluster.pools["meta"].plan
label = var.admin_services.namespace
min_nodes = var.cluster.pools["meta"].min
max_nodes = var.cluster.pools["meta"].max
# tag = var.admin_services.namespace
}
}
label = var.cluster.label
vpc_id = vultr_vpc.athens.id
resource vultr_kubernetes_node_pools games {
cluster_id = vultr_kubernetes.athens.id
node_quantity = var.cluster.pools["games"].min
plan = var.cluster.pools["games"].plan
label = var.game_servers.namespace
min_nodes = var.cluster.pools["games"].min
max_nodes = var.cluster.pools["games"].max
tag = var.admin_services.namespace
node_pools {
node_quantity = var.cluster.pools["main"].min_nodes
plan = var.cluster.pools["main"].plan
label = var.cluster.pools["main"].label
min_nodes = var.cluster.pools["main"].min_nodes
max_nodes = var.cluster.pools["main"].max_nodes
}
}
output k8s_config {

View File

@ -1,4 +0,0 @@
# created by virtualenv automatically
bin/
lib/

View File

@ -1,51 +0,0 @@
from argparse import ArgumentParser
from argparse import Namespace
from kubernetes import client, config
import re
def get_args() -> Namespace:
parser = ArgumentParser(
prog="Cluster Search Thing",
description="General utility for finding resources for game server bot"
)
games = {"reflex", "minecraft"}
parser.add_argument('-g', '--game', required=False, choices=games)
admin = {"health"}
parser.add_argument('-a', '--admin', required=False, choices=admin)
return parser.parse_args()
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
config.load_kube_config("../config.yaml")
return client.CoreV1Api()
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
print('admin thing requested', args.admin)
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
node_name = pods.items[0].spec.node_name
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
port = services.items[0].spec.ports[0].port
# Collecting the IPV4 of the node that contains the pod(container)
# we actually care about. Since these pods only have 1 container
# Now we collect specific data about the game server we requested
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
if __name__ == '__main__':
args = get_args()
api = k8s_api('../config.yaml')
if args.game:
get_game_server_ip(args, api)
if args.admin:
get_admin_service_details(args, api)

View File

@ -1,8 +0,0 @@
home = /usr
implementation = CPython
version_info = 3.10.12.final.0
virtualenv = 20.13.0+ds
include-system-site-packages = false
base-prefix = /usr
base-exec-prefix = /usr
base-executable = /usr/bin/python3

View File

@ -1,18 +0,0 @@
cachetools==5.5.0
certifi==2024.8.30
charset-normalizer==3.4.0
durationpy==0.9
google-auth==2.36.0
idna==3.10
kubernetes==31.0.0
oauthlib==3.2.2
pyasn1==0.6.1
pyasn1_modules==0.4.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
six==1.17.0
urllib3==2.2.3
websocket-client==1.8.0

View File

@ -1,32 +1,23 @@
resource vultr_firewall_rule web_inbound {
for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
# resource vultr_firewall_rule web_inbound {
# for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
# firewall_group_id = vultr_kubernetes.athens.firewall_group_id
# protocol = "tcp"
# ip_type = "v4"
# subnet = "0.0.0.0"
# subnet_size = 0
# port = each.value
# }
resource vultr_firewall_group bastion {
description = "For connections into and out of the bastion host"
}
resource vultr_firewall_rule bastion_inbound {
firewall_group_id = vultr_firewall_group.bastion.id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = each.value
}
resource vultr_firewall_rule game-server-inbound {
for_each = var.game_servers.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = each.value.port.expose
}
resource vultr_firewall_rule admin-service-inbound {
for_each = var.admin_services.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
notes = each.value.port.notes
port = each.value.port.expose
port = 22
}

View File

@ -1,55 +0,0 @@
resource kubernetes_namespace game-servers {
count = length(var.game_servers.configs) > 0 ? 1 : 0
metadata {
name = var.game_servers.namespace
}
}
resource kubernetes_pod game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -26,46 +26,30 @@ variable cluster {
label = string
version = string
pools = map(object({
plan = string
autoscale = bool
min = number
max = number
node_quantity = number
plan = string
label = string
min_nodes = number
max_nodes = number
tag = string
}))
})
}
variable game_servers {
variable personal {
type = object({
namespace = string
configs = map(object({
name = optional(string)
image = string
cpu = string
mem = string
port = object({
internal = number
expose = number
})
proto = optional(string)
}))
})
}
variable admin_services {
variable bastion {
type = object({
namespace = string
configs = map(object({
name = string
image = string
cpu = string
mem = string
port = object({
notes = optional(string)
internal = number
expose = number
})
proto = optional(string)
}))
plan = string
os = string
label = string
})
}

View File

@ -1,51 +1,27 @@
cluster = {
region = "lax"
label = "athens-cluster"
version = "v1.31.2+1"
version = "v1.33.0+1"
pools = {
meta = {
plan = "vc2-1c-2gb"
autoscale = true
min = 1
max = 2
}
games = {
plan = "vc2-1c-2gb"
autoscale = true
min = 1
max = 3
main = {
node_quantity = 1
plan = "vc2-1c-2gb"
label = "main"
min_nodes = 1
max_nodes = 2
tag = "athens-main"
}
}
}
game_servers = {
namespace = "games"
configs = {
# minecraft = {
# image = "itzg/minecraft-server"
# cpu = "1000m"
# mem = "2048Mi"
# port = {
# expose = 30808
# internal = 80
# }
# }
}
personal = {
namespace = "athens-main"
}
admin_services = {
namespace = "admin-services"
configs = {
# health = {
# image = "nginx:latest"
# name = "health"
# cpu = "200m"
# mem = "64Mi"
# port = {
# notes = "Basic nginx sanity check service"
# expose = 30800
# internal = 80
# }
# }
}
bastion = {
plan = "vc2-1c-2gb"
label = "bastion"
os = "1743"
}

View File

@ -0,0 +1,4 @@
resource vultr_vpc athens {
description = "Private VPC for private and personal service projects"
region = var.cluster.region
}