Compare commits

..

5 Commits

Author SHA1 Message Date
510baa7f94 Basic setup now passing initial checks
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 15:14:22 -08:00
088846cad9 Ensure that static hosts have docker and the latest python versions installed
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 12:34:41 -08:00
1be3a8e588 Quick fix for ansible-lint things
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
2025-03-04 11:46:17 -08:00
da580eb7d2 REmoving bogus wiki stuff
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 11:44:09 -08:00
a8d7c01efe Slowing building out the new workflows
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
2025-03-04 11:20:00 -08:00
32 changed files with 168 additions and 273 deletions

View File

@ -1,3 +0,0 @@
nigel:
hosts:
nigel.local:

View File

@ -1,4 +0,0 @@
---
skip_list:
- role-name
- var-naming[no-role-prefix]

View File

@ -1,27 +0,0 @@
# This playbook is meant to be a oneshot to be ran manually on the dev box
# The rest of the role stuff is meant to be ran as the admin user that
# this playbook creates for us
---
- hosts: nigel.local
remote_user: nigel
vars:
admin:
username: nigel
tasks:
- name: Copy the nigel admin key
ansible.builtin.authorized_key:
user: "{{ admin.username }}"
state: present
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
- name: Prevent password based logins
become: true
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
- name: Restart SSH Daemon
become: true
ansible.builtin.service:
name: ssh
state: restarted

View File

@ -1,21 +0,0 @@
---
- hosts: nigel.local
remote_user: nigel
tasks:
- name: Setup basic role on nigel
tags:
- setup
ansible.builtin.include_role:
name: local-server-head
apply:
tags:
- setup
- name: Setup Docker services on nigel
tags:
- services
ansible.builtin.include_role:
name: services
apply:
become: true
tags:
- services

View File

@ -1 +0,0 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable

View File

@ -1,41 +0,0 @@
- name: Ensure we have basic updated packages setting up docker
ansible.builtin.apt:
name: "{{ item }}"
update_cache: true
loop:
- ca-certificates
- curl
- name: Running install on the keyrings directory
ansible.builtin.command:
cmd: install -m 0755 -d /etc/apt/keyrings
register: install
changed_when: install.rc == 0
- name: Fetch Docker GPG Key
vars:
keylink: https://download.docker.com/linux/ubuntu/gpg
ansible.builtin.get_url:
url: "{{ keylink }}"
dest: /etc/apt/keyrings/docker.asc
mode: "0644"
- name: Add repo to apt sources
ansible.builtin.copy:
src: docker.list
dest: /etc/apt/sources.list.d/docker.list
mode: "0644"
- name: Update Apt cache with latest docker.list packages
ansible.builtin.apt:
update_cache: true
- name: Ensure all docker packages are updated to the latest versions
ansible.builtin.apt:
name: "{{ item }}"
loop:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- name: Verify that the docker components are installed properly
ansible.builtin.command:
cmd: docker run hello-world
register: docker
changed_when: docker.rc == 0

View File

@ -1,18 +0,0 @@
- name: Ensure docker components are installed
tags:
- setup
ansible.builtin.include_tasks:
file: ensure-docker-basic.yaml
apply:
become: true
tags:
- setup
- name: Ensure docker services are present and ready for configuration/usage
tags:
- services
ansible.builtin.include_tasks:
file: ensure-docker-services.yaml
apply:
become: true
tags:
- services

View File

@ -1,32 +0,0 @@
- name: Ensure docker dir is present
ansible.builtin.file:
path: "{{ services.compose_dir }}"
state: directory
mode: "0755"
- name: Collect id -u result
ansible.builtin.command:
cmd: id -u
register: id_u
changed_when: false
- name: Collect id -g result
ansible.builtin.command:
cmd: id -g
register: id_g
changed_when: false
- name: Ensure compose.yaml is present
vars:
puid: "{{ id_u.stdout }}"
pgid: "{{ id_g.stdout }}"
health_port: "{{ services.health.port }}"
ansible.builtin.template:
src: compose.yaml
dest: "{{ services.compose_dir }}/compose.yaml"
mode: "0644"
- name: Apply docker compose with services
community.docker.docker_compose_v2:
project_src: "{{ services.compose_dir }}"
remove_orphans: true
register: compose_output
- name: Show output of docker compose apply
ansible.builtin.debug:
var: compose_output

View File

@ -1,19 +0,0 @@
services:
health:
container_name: health
image: nginx:latest
restart: always
ports:
- "{{ health_port }}:80"
minio:
container_name: minio
image: quay.io/minio/minio
command: server /data --console-address ":9001"
volumes:
- "/opt/minio/data:/data"
environment:
MINIO_ROOT_USER: admin
MINIO_ROOT_PASSWORD: admin123
ports:
- "9000:9000"
- "9001:9001"

View File

@ -1,4 +0,0 @@
services:
compose_dir: /home/nigel/compose
health:
port: 8080

View File

@ -0,0 +1,23 @@
#!/bin/bash
set -e
bucket="$1"
s3env=/opt/nginx/s3.env
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
source $s3env
pull() {
aws s3 sync s3://$bucket /opt/nginx/$bucket
}
case $bucket in
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
*) echo "Invalid bucket name" && exit 1 ;;
esac

View File

@ -17,7 +17,7 @@ resource kubernetes_pod admin {
} }
spec { spec {
node_selector = { node_selector = {
"vke.vultr.com/node-pool" = var.admin_services.namespace NodeType = var.admin_services.namespace
} }
container { container {
image = each.value.image image = each.value.image

View File

@ -22,7 +22,7 @@ resource vultr_kubernetes_node_pools games {
label = var.game_servers.namespace label = var.game_servers.namespace
min_nodes = var.cluster.pools["games"].min min_nodes = var.cluster.pools["games"].min
max_nodes = var.cluster.pools["games"].max max_nodes = var.cluster.pools["games"].max
tag = var.game_servers.namespace tag = var.admin_services.namespace
} }
output k8s_config { output k8s_config {

View File

@ -8,7 +8,7 @@ def get_args() -> Namespace:
prog="Cluster Search Thing", prog="Cluster Search Thing",
description="General utility for finding resources for game server bot" description="General utility for finding resources for game server bot"
) )
games = {"health", "reflex", "minecraft"} games = {"reflex", "minecraft"}
parser.add_argument('-g', '--game', required=False, choices=games) parser.add_argument('-g', '--game', required=False, choices=games)
admin = {"health"} admin = {"health"}
@ -21,19 +21,11 @@ def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api): def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
print('admin thing requested', args.admin) print('admin thing requested', args.admin)
services = api.list_service_for_all_namespaces(label_selector=f'app={args.admin}')
if len(services.items) == 0:
print(f'Unable to find {args.admin} amongst the admin-services')
return
port = services.items[0].spec.ports[0].port
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.admin} --> {ipv4}:{port} ~~> {ipv6}:{port}')
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api): def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
node_name = pods.items[0].spec.node_name
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}') services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
port = services.items[0].spec.ports[0].port port = services.items[0].spec.ports[0].port

View File

@ -29,3 +29,4 @@ resource vultr_firewall_rule admin-service-inbound {
notes = each.value.port.notes notes = each.value.port.notes
port = each.value.port.expose port = each.value.port.expose
} }

View File

@ -21,22 +21,31 @@ cluster = {
game_servers = { game_servers = {
namespace = "games" namespace = "games"
configs = { configs = {
# minecraft = {
# image = "itzg/minecraft-server"
# cpu = "1000m"
# mem = "2048Mi"
# port = {
# expose = 30808
# internal = 80
# }
# }
} }
} }
admin_services = { admin_services = {
namespace = "admin-services" namespace = "admin-services"
configs = { configs = {
health = { # health = {
image = "nginx:latest" # image = "nginx:latest"
name = "health" # name = "health"
cpu = "200m" # cpu = "200m"
mem = "64Mi" # mem = "64Mi"
port = { # port = {
notes = "Basic nginx sanity check service" # notes = "Basic nginx sanity check service"
expose = 30800 # expose = 30800
internal = 80 # internal = 80
} # }
} # }
} }
} }

3
playbooks/inventory.yaml Normal file
View File

@ -0,0 +1,3 @@
static-web:
hosts:
shockrah.xyz:

View File

@ -0,0 +1,9 @@
---
- name: Pre Pipeline Playbook for Static Hosts
hosts: static-web
remote_user: root
tasks:
- name: Import manual setup steps
ansible.builtin.import_role:
name: static
tasks_from: setup-webadmin.yaml

5
playbooks/readme.md Normal file
View File

@ -0,0 +1,5 @@
# What is this
Here be the ansible based workflows that we use to keep things like the static
hosts properly setup with all the resources they need to properly host the
services we intended on hosting.

View File

@ -0,0 +1,8 @@
---
- name: Static Host Maintenance and Setup
hosts: static-web
remote_user: webadmin
tasks:
- name: Import static host role
ansible.builtin.import_role:
name: static

View File

View File

@ -0,0 +1,5 @@
- name: Restart SSH
become: true
ansible.builtin.systemd:
name: sshd
state: restarted

View File

View File

@ -0,0 +1,40 @@
# Things that we definitely want to have are the following
# docker docker-compose python(latest) certbot
- name: Uhhh yea
become: true
block:
- name: Install base dependencies
ansible.builtin.apt:
update_cache: true
pkg:
- ca-certificates
- curl
- lsb-release
- name: Setup keyring directory
ansible.builtin.command:
cmd: "install -m 0755 -d {{ static_keyring_dir }}"
creates: "{{ static_keyring_dir }}"
- name: Download the docker GPG key
ansible.builtin.get_url:
url: "{{ static_docker_ubuntu }}/gpg"
dest: "{{ static_keyring_dir }}/docker.asc"
mode: "0644"
- name: Ensure docker.lst is present
vars:
key_path: "{{ static_keyring_dir }}/docker.asc"
repo: "{{ static_docker_ubuntu }}"
os_codename: jammy
ansible.builtin.template:
src: docker.list
dest: "{{ static_apt_sources_dir }}/docker.list"
mode: "0644"
- name: Install docker and python packages
ansible.builtin.apt:
update_cache: true
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- python3

View File

@ -0,0 +1,43 @@
- name: Ensure sudo is available
ansible.builtin.apt:
state: present
update_cache: true
pkg:
- sudo
- zsh
- name: Create webadmin user
ansible.builtin.user:
name: webadmin
state: present
shell: /bin/zsh
groups:
- nginx
append: true
- name: Copy webadmin public key
ansible.posix.authorized_key:
user: webadmin
state: present
key: "{{ lookup('file', 'files/webadmin.pem.pub') }}"
- name: Add webadmin to sudoers
ansible.builtin.copy:
dest: "/etc/sudoers.d/webadmin"
content: "webadmin ALL=(ALL) NOPASSWD: ALL"
mode: "0644"
owner: root
group: root
- name: Disable Password Authentication
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
notify:
- Restart SSH
- name: Disable root login
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PermitRootLogin no
state: present
backup: true
notify:
- Restart SSH

View File

View File

@ -0,0 +1 @@
deb [arch=amd64 signed-by={{ key_path }}] {{ repo }} {{ os_codename }} stable

View File

View File

@ -0,0 +1,4 @@
static_keyring_dir: /etc/apt/keyrings
static_docker_ubuntu: https://download.docker.com/linux/ubuntu
static_apt_sources_dir: /etc/apt/sources.list.d
static_codename: jammy

View File

@ -1,45 +0,0 @@
# What this covers
The creation of Atlas as it happened in order
## Commands Ran
Once the infra was provisioned and verified to be configured by Terraform correctly
we move on to the following
```sh
# Setup the machine to run docker
ansible-playbook -i hosts.ini atlas/init/system-deps.yml
# Second we copy over the contents of Alpha's mounted docker volumes
ansible-playbook -i hosts.ini atlas/init/perma-mount-drives.yml
# Next we copy over the data that we want to migrate ( if any )
ansible-playbook -i hosts.ini -e filebrowser=/path -e clippable=/path atlas/init/migrate-clips-files.yml
# Setup the services on the host that we want to run
ansible-playbook -i hosts.ini atlas/init/setup-containers.yml
# Next we put up the reverse proxy (nginx)
ansible-playbook -i hosts.ini atlas/init/setup-reverse-proxy.yml
# Finally we add TLS on top of nginx and we're done
ansible-playbook -i hosts.ini atlas/init/setup-certbot.yml
```
Maintenance should be straight forward for this machine as TLS is automatically
renewed every 3 months by a cron job. We can manually update the certs however
if we really want to. They also don't require anymore manual variable injection
like Alpha did as the only thing protected was `dev@shockrah.xyz` which is at
this point becoming semi-public. This means while it is associated with code
it is more of a _business e-mail_ so it can be placed in this repository with
very little concern.
System updates are now also to be fetched with a:
```sh
ansible-playbook -i hosts.ini atlas/maintain/analyze-system-deps.yml
```
Which performs purely read operations and does not affect the state of the
machine.

View File

@ -1,33 +0,0 @@
# Mounting an attached drive
Assumptions:
* New drive is attached(in AWS) and detected in software
Ideally attachment is made through terraform
## Mounting Instructions (Step-by-Step)
1. Verify data does not have data: `sudo file -s /dev/xvdf`
Should return `data` if its ok. Other wise we're probably looking at the wrong
drive.
2. Create the filesystem on the new empty drive: `sudo mkfs -t ext4 /dev/xvdf`
3. Create mountpoint other wares to actaully use the drive
`sudo mkdir /mnt/example`.
Change _example_ to something that actually makes sense.
4. Add a new entry to /etc/fstab for automounting
`/dev/xvdf /newvolume ext4 defaults,nofail 0 0`
Tab delimited btw.
5. Mount all drives listed in `/etc/fstab` from before. `sudo mount -a`