Compare commits

..

5 Commits

Author SHA1 Message Date
510baa7f94 Basic setup now passing initial checks
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 15:14:22 -08:00
088846cad9 Ensure that static hosts have docker and the latest python versions installed
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 12:34:41 -08:00
1be3a8e588 Quick fix for ansible-lint things
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
2025-03-04 11:46:17 -08:00
da580eb7d2 REmoving bogus wiki stuff
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 11:44:09 -08:00
a8d7c01efe Slowing building out the new workflows
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
2025-03-04 11:20:00 -08:00
21 changed files with 145 additions and 103 deletions

View File

@ -17,7 +17,7 @@ resource kubernetes_pod admin {
}
spec {
node_selector = {
"vke.vultr.com/node-pool" = var.admin_services.namespace
NodeType = var.admin_services.namespace
}
container {
image = each.value.image

View File

@ -22,7 +22,7 @@ resource vultr_kubernetes_node_pools games {
label = var.game_servers.namespace
min_nodes = var.cluster.pools["games"].min
max_nodes = var.cluster.pools["games"].max
tag = var.game_servers.namespace
tag = var.admin_services.namespace
}
output k8s_config {

View File

@ -8,7 +8,7 @@ def get_args() -> Namespace:
prog="Cluster Search Thing",
description="General utility for finding resources for game server bot"
)
games = {"health", "reflex", "minecraft"}
games = {"reflex", "minecraft"}
parser.add_argument('-g', '--game', required=False, choices=games)
admin = {"health"}
@ -21,19 +21,11 @@ def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
print('admin thing requested', args.admin)
services = api.list_service_for_all_namespaces(label_selector=f'app={args.admin}')
if len(services.items) == 0:
print(f'Unable to find {args.admin} amongst the admin-services')
return
port = services.items[0].spec.ports[0].port
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.admin} --> {ipv4}:{port} ~~> {ipv6}:{port}')
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
node_name = pods.items[0].spec.node_name
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
port = services.items[0].spec.ports[0].port

View File

@ -29,3 +29,4 @@ resource vultr_firewall_rule admin-service-inbound {
notes = each.value.port.notes
port = each.value.port.expose
}

View File

@ -21,22 +21,31 @@ cluster = {
game_servers = {
namespace = "games"
configs = {
# minecraft = {
# image = "itzg/minecraft-server"
# cpu = "1000m"
# mem = "2048Mi"
# port = {
# expose = 30808
# internal = 80
# }
# }
}
}
admin_services = {
namespace = "admin-services"
configs = {
health = {
image = "nginx:latest"
name = "health"
cpu = "200m"
mem = "64Mi"
port = {
notes = "Basic nginx sanity check service"
expose = 30800
internal = 80
}
}
# health = {
# image = "nginx:latest"
# name = "health"
# cpu = "200m"
# mem = "64Mi"
# port = {
# notes = "Basic nginx sanity check service"
# expose = 30800
# internal = 80
# }
# }
}
}

3
playbooks/inventory.yaml Normal file
View File

@ -0,0 +1,3 @@
static-web:
hosts:
shockrah.xyz:

View File

@ -0,0 +1,9 @@
---
- name: Pre Pipeline Playbook for Static Hosts
hosts: static-web
remote_user: root
tasks:
- name: Import manual setup steps
ansible.builtin.import_role:
name: static
tasks_from: setup-webadmin.yaml

5
playbooks/readme.md Normal file
View File

@ -0,0 +1,5 @@
# What is this
Here be the ansible based workflows that we use to keep things like the static
hosts properly setup with all the resources they need to properly host the
services we intended on hosting.

View File

@ -0,0 +1,8 @@
---
- name: Static Host Maintenance and Setup
hosts: static-web
remote_user: webadmin
tasks:
- name: Import static host role
ansible.builtin.import_role:
name: static

View File

View File

View File

@ -0,0 +1,5 @@
- name: Restart SSH
become: true
ansible.builtin.systemd:
name: sshd
state: restarted

View File

View File

@ -0,0 +1,40 @@
# Things that we definitely want to have are the following
# docker docker-compose python(latest) certbot
- name: Uhhh yea
become: true
block:
- name: Install base dependencies
ansible.builtin.apt:
update_cache: true
pkg:
- ca-certificates
- curl
- lsb-release
- name: Setup keyring directory
ansible.builtin.command:
cmd: "install -m 0755 -d {{ static_keyring_dir }}"
creates: "{{ static_keyring_dir }}"
- name: Download the docker GPG key
ansible.builtin.get_url:
url: "{{ static_docker_ubuntu }}/gpg"
dest: "{{ static_keyring_dir }}/docker.asc"
mode: "0644"
- name: Ensure docker.lst is present
vars:
key_path: "{{ static_keyring_dir }}/docker.asc"
repo: "{{ static_docker_ubuntu }}"
os_codename: jammy
ansible.builtin.template:
src: docker.list
dest: "{{ static_apt_sources_dir }}/docker.list"
mode: "0644"
- name: Install docker and python packages
ansible.builtin.apt:
update_cache: true
pkg:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- python3

View File

@ -0,0 +1,43 @@
- name: Ensure sudo is available
ansible.builtin.apt:
state: present
update_cache: true
pkg:
- sudo
- zsh
- name: Create webadmin user
ansible.builtin.user:
name: webadmin
state: present
shell: /bin/zsh
groups:
- nginx
append: true
- name: Copy webadmin public key
ansible.posix.authorized_key:
user: webadmin
state: present
key: "{{ lookup('file', 'files/webadmin.pem.pub') }}"
- name: Add webadmin to sudoers
ansible.builtin.copy:
dest: "/etc/sudoers.d/webadmin"
content: "webadmin ALL=(ALL) NOPASSWD: ALL"
mode: "0644"
owner: root
group: root
- name: Disable Password Authentication
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
notify:
- Restart SSH
- name: Disable root login
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PermitRootLogin no
state: present
backup: true
notify:
- Restart SSH

View File

View File

@ -0,0 +1 @@
deb [arch=amd64 signed-by={{ key_path }}] {{ repo }} {{ os_codename }} stable

View File

View File

@ -0,0 +1,4 @@
static_keyring_dir: /etc/apt/keyrings
static_docker_ubuntu: https://download.docker.com/linux/ubuntu
static_apt_sources_dir: /etc/apt/sources.list.d
static_codename: jammy

View File

@ -1,45 +0,0 @@
# What this covers
The creation of Atlas as it happened in order
## Commands Ran
Once the infra was provisioned and verified to be configured by Terraform correctly
we move on to the following
```sh
# Setup the machine to run docker
ansible-playbook -i hosts.ini atlas/init/system-deps.yml
# Second we copy over the contents of Alpha's mounted docker volumes
ansible-playbook -i hosts.ini atlas/init/perma-mount-drives.yml
# Next we copy over the data that we want to migrate ( if any )
ansible-playbook -i hosts.ini -e filebrowser=/path -e clippable=/path atlas/init/migrate-clips-files.yml
# Setup the services on the host that we want to run
ansible-playbook -i hosts.ini atlas/init/setup-containers.yml
# Next we put up the reverse proxy (nginx)
ansible-playbook -i hosts.ini atlas/init/setup-reverse-proxy.yml
# Finally we add TLS on top of nginx and we're done
ansible-playbook -i hosts.ini atlas/init/setup-certbot.yml
```
Maintenance should be straight forward for this machine as TLS is automatically
renewed every 3 months by a cron job. We can manually update the certs however
if we really want to. They also don't require anymore manual variable injection
like Alpha did as the only thing protected was `dev@shockrah.xyz` which is at
this point becoming semi-public. This means while it is associated with code
it is more of a _business e-mail_ so it can be placed in this repository with
very little concern.
System updates are now also to be fetched with a:
```sh
ansible-playbook -i hosts.ini atlas/maintain/analyze-system-deps.yml
```
Which performs purely read operations and does not affect the state of the
machine.

View File

@ -1,33 +0,0 @@
# Mounting an attached drive
Assumptions:
* New drive is attached(in AWS) and detected in software
Ideally attachment is made through terraform
## Mounting Instructions (Step-by-Step)
1. Verify data does not have data: `sudo file -s /dev/xvdf`
Should return `data` if its ok. Other wise we're probably looking at the wrong
drive.
2. Create the filesystem on the new empty drive: `sudo mkfs -t ext4 /dev/xvdf`
3. Create mountpoint other wares to actaully use the drive
`sudo mkdir /mnt/example`.
Change _example_ to something that actually makes sense.
4. Add a new entry to /etc/fstab for automounting
`/dev/xvdf /newvolume ext4 defaults,nofail 0 0`
Tab delimited btw.
5. Mount all drives listed in `/etc/fstab` from before. `sudo mount -a`