Compare commits
9 Commits
fix/ansibl
...
master
Author | SHA1 | Date | |
---|---|---|---|
b68d53b143 | |||
3c6bc90feb | |||
3521b840ae | |||
5f10976264 | |||
10e936a8da | |||
8bbaea8fd9 | |||
d39e0c04e5 | |||
b99525955e | |||
9b6f9b6656 |
3
ansible/inventory.yaml
Normal file
3
ansible/inventory.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
nigel:
|
||||
hosts:
|
||||
nigel.local:
|
4
ansible/linter.yaml
Normal file
4
ansible/linter.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
---
|
||||
skip_list:
|
||||
- role-name
|
||||
- var-naming[no-role-prefix]
|
27
ansible/local-setup-admin-user.yaml
Normal file
27
ansible/local-setup-admin-user.yaml
Normal file
@ -0,0 +1,27 @@
|
||||
# This playbook is meant to be a oneshot to be ran manually on the dev box
|
||||
# The rest of the role stuff is meant to be ran as the admin user that
|
||||
# this playbook creates for us
|
||||
---
|
||||
- hosts: nigel.local
|
||||
remote_user: nigel
|
||||
vars:
|
||||
admin:
|
||||
username: nigel
|
||||
tasks:
|
||||
- name: Copy the nigel admin key
|
||||
ansible.builtin.authorized_key:
|
||||
user: "{{ admin.username }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
|
||||
- name: Prevent password based logins
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PasswordAuthentication no
|
||||
state: present
|
||||
backup: true
|
||||
- name: Restart SSH Daemon
|
||||
become: true
|
||||
ansible.builtin.service:
|
||||
name: ssh
|
||||
state: restarted
|
21
ansible/nuc.yaml
Normal file
21
ansible/nuc.yaml
Normal file
@ -0,0 +1,21 @@
|
||||
---
|
||||
- hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Setup basic role on nigel
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.include_role:
|
||||
name: local-server-head
|
||||
apply:
|
||||
tags:
|
||||
- setup
|
||||
- name: Setup Docker services on nigel
|
||||
tags:
|
||||
- services
|
||||
ansible.builtin.include_role:
|
||||
name: services
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- services
|
1
ansible/roles/local-server-head/files/docker.list
Normal file
1
ansible/roles/local-server-head/files/docker.list
Normal file
@ -0,0 +1 @@
|
||||
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable
|
@ -0,0 +1,41 @@
|
||||
- name: Ensure we have basic updated packages setting up docker
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
update_cache: true
|
||||
loop:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- name: Running install on the keyrings directory
|
||||
ansible.builtin.command:
|
||||
cmd: install -m 0755 -d /etc/apt/keyrings
|
||||
register: install
|
||||
changed_when: install.rc == 0
|
||||
- name: Fetch Docker GPG Key
|
||||
vars:
|
||||
keylink: https://download.docker.com/linux/ubuntu/gpg
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ keylink }}"
|
||||
dest: /etc/apt/keyrings/docker.asc
|
||||
mode: "0644"
|
||||
- name: Add repo to apt sources
|
||||
ansible.builtin.copy:
|
||||
src: docker.list
|
||||
dest: /etc/apt/sources.list.d/docker.list
|
||||
mode: "0644"
|
||||
- name: Update Apt cache with latest docker.list packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
- name: Ensure all docker packages are updated to the latest versions
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
loop:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-buildx-plugin
|
||||
- docker-compose-plugin
|
||||
- name: Verify that the docker components are installed properly
|
||||
ansible.builtin.command:
|
||||
cmd: docker run hello-world
|
||||
register: docker
|
||||
changed_when: docker.rc == 0
|
18
ansible/roles/local-server-head/tasks/main.yaml
Normal file
18
ansible/roles/local-server-head/tasks/main.yaml
Normal file
@ -0,0 +1,18 @@
|
||||
- name: Ensure docker components are installed
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.include_tasks:
|
||||
file: ensure-docker-basic.yaml
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
- name: Ensure docker services are present and ready for configuration/usage
|
||||
tags:
|
||||
- services
|
||||
ansible.builtin.include_tasks:
|
||||
file: ensure-docker-services.yaml
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- services
|
32
ansible/roles/services/tasks/main.yaml
Normal file
32
ansible/roles/services/tasks/main.yaml
Normal file
@ -0,0 +1,32 @@
|
||||
- name: Ensure docker dir is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ services.compose_dir }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
- name: Collect id -u result
|
||||
ansible.builtin.command:
|
||||
cmd: id -u
|
||||
register: id_u
|
||||
changed_when: false
|
||||
- name: Collect id -g result
|
||||
ansible.builtin.command:
|
||||
cmd: id -g
|
||||
register: id_g
|
||||
changed_when: false
|
||||
- name: Ensure compose.yaml is present
|
||||
vars:
|
||||
puid: "{{ id_u.stdout }}"
|
||||
pgid: "{{ id_g.stdout }}"
|
||||
health_port: "{{ services.health.port }}"
|
||||
ansible.builtin.template:
|
||||
src: compose.yaml
|
||||
dest: "{{ services.compose_dir }}/compose.yaml"
|
||||
mode: "0644"
|
||||
- name: Apply docker compose with services
|
||||
community.docker.docker_compose_v2:
|
||||
project_src: "{{ services.compose_dir }}"
|
||||
remove_orphans: true
|
||||
register: compose_output
|
||||
- name: Show output of docker compose apply
|
||||
ansible.builtin.debug:
|
||||
var: compose_output
|
19
ansible/roles/services/templates/compose.yaml
Normal file
19
ansible/roles/services/templates/compose.yaml
Normal file
@ -0,0 +1,19 @@
|
||||
services:
|
||||
health:
|
||||
container_name: health
|
||||
image: nginx:latest
|
||||
restart: always
|
||||
ports:
|
||||
- "{{ health_port }}:80"
|
||||
minio:
|
||||
container_name: minio
|
||||
image: quay.io/minio/minio
|
||||
command: server /data --console-address ":9001"
|
||||
volumes:
|
||||
- "/opt/minio/data:/data"
|
||||
environment:
|
||||
MINIO_ROOT_USER: admin
|
||||
MINIO_ROOT_PASSWORD: admin123
|
||||
ports:
|
||||
- "9000:9000"
|
||||
- "9001:9001"
|
4
ansible/roles/services/vars/main.yaml
Normal file
4
ansible/roles/services/vars/main.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
services:
|
||||
compose_dir: /home/nigel/compose
|
||||
health:
|
||||
port: 8080
|
@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
bucket="$1"
|
||||
s3env=/opt/nginx/s3.env
|
||||
|
||||
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
|
||||
|
||||
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
|
||||
source $s3env
|
||||
|
||||
pull() {
|
||||
aws s3 sync s3://$bucket /opt/nginx/$bucket
|
||||
}
|
||||
|
||||
|
||||
case $bucket in
|
||||
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
|
||||
*) echo "Invalid bucket name" && exit 1 ;;
|
||||
esac
|
||||
|
||||
|
@ -17,7 +17,7 @@ resource kubernetes_pod admin {
|
||||
}
|
||||
spec {
|
||||
node_selector = {
|
||||
NodeType = var.admin_services.namespace
|
||||
"vke.vultr.com/node-pool" = var.admin_services.namespace
|
||||
}
|
||||
container {
|
||||
image = each.value.image
|
||||
|
@ -22,7 +22,7 @@ resource vultr_kubernetes_node_pools games {
|
||||
label = var.game_servers.namespace
|
||||
min_nodes = var.cluster.pools["games"].min
|
||||
max_nodes = var.cluster.pools["games"].max
|
||||
tag = var.admin_services.namespace
|
||||
tag = var.game_servers.namespace
|
||||
}
|
||||
|
||||
output k8s_config {
|
||||
|
@ -8,7 +8,7 @@ def get_args() -> Namespace:
|
||||
prog="Cluster Search Thing",
|
||||
description="General utility for finding resources for game server bot"
|
||||
)
|
||||
games = {"reflex", "minecraft"}
|
||||
games = {"health", "reflex", "minecraft"}
|
||||
parser.add_argument('-g', '--game', required=False, choices=games)
|
||||
|
||||
admin = {"health"}
|
||||
@ -21,11 +21,19 @@ def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
|
||||
|
||||
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||
print('admin thing requested', args.admin)
|
||||
services = api.list_service_for_all_namespaces(label_selector=f'app={args.admin}')
|
||||
if len(services.items) == 0:
|
||||
print(f'Unable to find {args.admin} amongst the admin-services')
|
||||
return
|
||||
|
||||
port = services.items[0].spec.ports[0].port
|
||||
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
|
||||
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
|
||||
print(f'{args.admin} --> {ipv4}:{port} ~~> {ipv6}:{port}')
|
||||
|
||||
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
|
||||
node_name = pods.items[0].spec.node_name
|
||||
|
||||
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
|
||||
port = services.items[0].spec.ports[0].port
|
||||
|
||||
|
@ -29,4 +29,3 @@ resource vultr_firewall_rule admin-service-inbound {
|
||||
notes = each.value.port.notes
|
||||
port = each.value.port.expose
|
||||
}
|
||||
|
||||
|
@ -21,31 +21,22 @@ cluster = {
|
||||
game_servers = {
|
||||
namespace = "games"
|
||||
configs = {
|
||||
# minecraft = {
|
||||
# image = "itzg/minecraft-server"
|
||||
# cpu = "1000m"
|
||||
# mem = "2048Mi"
|
||||
# port = {
|
||||
# expose = 30808
|
||||
# internal = 80
|
||||
# }
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
admin_services = {
|
||||
namespace = "admin-services"
|
||||
configs = {
|
||||
# health = {
|
||||
# image = "nginx:latest"
|
||||
# name = "health"
|
||||
# cpu = "200m"
|
||||
# mem = "64Mi"
|
||||
# port = {
|
||||
# notes = "Basic nginx sanity check service"
|
||||
# expose = 30800
|
||||
# internal = 80
|
||||
# }
|
||||
# }
|
||||
health = {
|
||||
image = "nginx:latest"
|
||||
name = "health"
|
||||
cpu = "200m"
|
||||
mem = "64Mi"
|
||||
port = {
|
||||
notes = "Basic nginx sanity check service"
|
||||
expose = 30800
|
||||
internal = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,3 +0,0 @@
|
||||
static-web:
|
||||
hosts:
|
||||
shockrah.xyz:
|
@ -1,9 +0,0 @@
|
||||
---
|
||||
- name: Pre Pipeline Playbook for Static Hosts
|
||||
hosts: static-web
|
||||
remote_user: root
|
||||
tasks:
|
||||
- name: Import manual setup steps
|
||||
ansible.builtin.import_role:
|
||||
name: static
|
||||
tasks_from: setup-webadmin.yaml
|
@ -1,5 +0,0 @@
|
||||
# What is this
|
||||
|
||||
Here be the ansible based workflows that we use to keep things like the static
|
||||
hosts properly setup with all the resources they need to properly host the
|
||||
services we intended on hosting.
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: Static Host Maintenance and Setup
|
||||
hosts: static-web
|
||||
remote_user: webadmin
|
||||
tasks:
|
||||
- name: Import static host role
|
||||
ansible.builtin.import_role:
|
||||
name: static
|
@ -1,5 +0,0 @@
|
||||
- name: Restart SSH
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: sshd
|
||||
state: restarted
|
@ -1,40 +0,0 @@
|
||||
# Things that we definitely want to have are the following
|
||||
# docker docker-compose python(latest) certbot
|
||||
- name: Uhhh yea
|
||||
become: true
|
||||
block:
|
||||
- name: Install base dependencies
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
pkg:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- lsb-release
|
||||
- name: Setup keyring directory
|
||||
ansible.builtin.command:
|
||||
cmd: "install -m 0755 -d {{ static_keyring_dir }}"
|
||||
creates: "{{ static_keyring_dir }}"
|
||||
- name: Download the docker GPG key
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ static_docker_ubuntu }}/gpg"
|
||||
dest: "{{ static_keyring_dir }}/docker.asc"
|
||||
mode: "0644"
|
||||
- name: Ensure docker.lst is present
|
||||
vars:
|
||||
key_path: "{{ static_keyring_dir }}/docker.asc"
|
||||
repo: "{{ static_docker_ubuntu }}"
|
||||
os_codename: jammy
|
||||
ansible.builtin.template:
|
||||
src: docker.list
|
||||
dest: "{{ static_apt_sources_dir }}/docker.list"
|
||||
mode: "0644"
|
||||
- name: Install docker and python packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
pkg:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-buildx-plugin
|
||||
- docker-compose-plugin
|
||||
- python3
|
@ -1,43 +0,0 @@
|
||||
- name: Ensure sudo is available
|
||||
ansible.builtin.apt:
|
||||
state: present
|
||||
update_cache: true
|
||||
pkg:
|
||||
- sudo
|
||||
- zsh
|
||||
- name: Create webadmin user
|
||||
ansible.builtin.user:
|
||||
name: webadmin
|
||||
state: present
|
||||
shell: /bin/zsh
|
||||
groups:
|
||||
- nginx
|
||||
append: true
|
||||
- name: Copy webadmin public key
|
||||
ansible.posix.authorized_key:
|
||||
user: webadmin
|
||||
state: present
|
||||
key: "{{ lookup('file', 'files/webadmin.pem.pub') }}"
|
||||
- name: Add webadmin to sudoers
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/sudoers.d/webadmin"
|
||||
content: "webadmin ALL=(ALL) NOPASSWD: ALL"
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
- name: Disable Password Authentication
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PasswordAuthentication no
|
||||
state: present
|
||||
backup: true
|
||||
notify:
|
||||
- Restart SSH
|
||||
- name: Disable root login
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PermitRootLogin no
|
||||
state: present
|
||||
backup: true
|
||||
notify:
|
||||
- Restart SSH
|
@ -1 +0,0 @@
|
||||
deb [arch=amd64 signed-by={{ key_path }}] {{ repo }} {{ os_codename }} stable
|
@ -1,4 +0,0 @@
|
||||
static_keyring_dir: /etc/apt/keyrings
|
||||
static_docker_ubuntu: https://download.docker.com/linux/ubuntu
|
||||
static_apt_sources_dir: /etc/apt/sources.list.d
|
||||
static_codename: jammy
|
45
runbooks/atlas-setup.md
Normal file
45
runbooks/atlas-setup.md
Normal file
@ -0,0 +1,45 @@
|
||||
# What this covers
|
||||
|
||||
The creation of Atlas as it happened in order
|
||||
|
||||
## Commands Ran
|
||||
|
||||
Once the infra was provisioned and verified to be configured by Terraform correctly
|
||||
we move on to the following
|
||||
|
||||
```sh
|
||||
# Setup the machine to run docker
|
||||
ansible-playbook -i hosts.ini atlas/init/system-deps.yml
|
||||
|
||||
# Second we copy over the contents of Alpha's mounted docker volumes
|
||||
ansible-playbook -i hosts.ini atlas/init/perma-mount-drives.yml
|
||||
|
||||
# Next we copy over the data that we want to migrate ( if any )
|
||||
ansible-playbook -i hosts.ini -e filebrowser=/path -e clippable=/path atlas/init/migrate-clips-files.yml
|
||||
|
||||
# Setup the services on the host that we want to run
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-containers.yml
|
||||
|
||||
# Next we put up the reverse proxy (nginx)
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-reverse-proxy.yml
|
||||
|
||||
# Finally we add TLS on top of nginx and we're done
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-certbot.yml
|
||||
```
|
||||
|
||||
Maintenance should be straight forward for this machine as TLS is automatically
|
||||
renewed every 3 months by a cron job. We can manually update the certs however
|
||||
if we really want to. They also don't require anymore manual variable injection
|
||||
like Alpha did as the only thing protected was `dev@shockrah.xyz` which is at
|
||||
this point becoming semi-public. This means while it is associated with code
|
||||
it is more of a _business e-mail_ so it can be placed in this repository with
|
||||
very little concern.
|
||||
|
||||
System updates are now also to be fetched with a:
|
||||
|
||||
```sh
|
||||
ansible-playbook -i hosts.ini atlas/maintain/analyze-system-deps.yml
|
||||
```
|
||||
|
||||
Which performs purely read operations and does not affect the state of the
|
||||
machine.
|
33
runbooks/new-drive.md
Normal file
33
runbooks/new-drive.md
Normal file
@ -0,0 +1,33 @@
|
||||
# Mounting an attached drive
|
||||
|
||||
Assumptions:
|
||||
|
||||
* New drive is attached(in AWS) and detected in software
|
||||
Ideally attachment is made through terraform
|
||||
|
||||
## Mounting Instructions (Step-by-Step)
|
||||
|
||||
1. Verify data does not have data: `sudo file -s /dev/xvdf`
|
||||
|
||||
Should return `data` if its ok. Other wise we're probably looking at the wrong
|
||||
drive.
|
||||
|
||||
2. Create the filesystem on the new empty drive: `sudo mkfs -t ext4 /dev/xvdf`
|
||||
|
||||
3. Create mountpoint other wares to actaully use the drive
|
||||
`sudo mkdir /mnt/example`.
|
||||
|
||||
Change _example_ to something that actually makes sense.
|
||||
|
||||
4. Add a new entry to /etc/fstab for automounting
|
||||
|
||||
`/dev/xvdf /newvolume ext4 defaults,nofail 0 0`
|
||||
|
||||
Tab delimited btw.
|
||||
|
||||
5. Mount all drives listed in `/etc/fstab` from before. `sudo mount -a`
|
||||
|
||||
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user