Compare commits
5 Commits
master
...
fix/ansibl
Author | SHA1 | Date | |
---|---|---|---|
510baa7f94 | |||
088846cad9 | |||
1be3a8e588 | |||
da580eb7d2 | |||
a8d7c01efe |
@ -10,6 +10,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repo content
|
||||
uses: actions/checkout@v4
|
||||
- run: ansible-lint -c linter.yaml
|
||||
- run: ansible-lint
|
||||
working-directory: ansible/
|
||||
|
||||
|
1
.gitignore
vendored
1
.gitignore
vendored
@ -21,4 +21,3 @@ docker/beta/shockrah.xyz/
|
||||
docker/beta/resume.shockrah.xyz/
|
||||
k8s/config.yaml
|
||||
infra/**/tfplan
|
||||
.ansible/
|
||||
|
@ -1,3 +0,0 @@
|
||||
nigel:
|
||||
hosts:
|
||||
nigel.local:
|
@ -1,4 +0,0 @@
|
||||
---
|
||||
skip_list:
|
||||
- role-name
|
||||
- var-naming[no-role-prefix]
|
@ -1,28 +0,0 @@
|
||||
# This playbook is meant to be a oneshot to be ran manually on the dev box
|
||||
# The rest of the role stuff is meant to be ran as the admin user that
|
||||
# this playbook creates for us
|
||||
---
|
||||
- name: Setup local admin user with a fresh ubuntu host
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
vars:
|
||||
admin:
|
||||
username: nigel
|
||||
tasks:
|
||||
- name: Copy the nigel admin key
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ admin.username }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
|
||||
- name: Prevent password based logins
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PasswordAuthentication no
|
||||
state: present
|
||||
backup: true
|
||||
- name: Restart SSH Daemon
|
||||
become: true
|
||||
ansible.builtin.service:
|
||||
name: ssh
|
||||
state: restarted
|
@ -1,12 +0,0 @@
|
||||
---
|
||||
- name: Setup bare metal requirements for nomad
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Setup basic role on nigel
|
||||
tags:
|
||||
- setup
|
||||
- nomad
|
||||
- volumes
|
||||
ansible.builtin.include_role:
|
||||
name: local-server-head
|
@ -1,8 +0,0 @@
|
||||
---
|
||||
- name: Setup host as a reverse proxy
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Apply reverse proxy role
|
||||
ansible.builtin.include_role:
|
||||
name: proxy
|
@ -1 +0,0 @@
|
||||
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable
|
@ -1,41 +0,0 @@
|
||||
- name: Ensure we have basic updated packages setting up docker
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
update_cache: true
|
||||
loop:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- name: Running install on the keyrings directory
|
||||
ansible.builtin.command:
|
||||
cmd: install -m 0755 -d /etc/apt/keyrings
|
||||
register: install
|
||||
changed_when: install.rc == 0
|
||||
- name: Fetch Docker GPG Key
|
||||
vars:
|
||||
keylink: https://download.docker.com/linux/ubuntu/gpg
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ keylink }}"
|
||||
dest: /etc/apt/keyrings/docker.asc
|
||||
mode: "0644"
|
||||
- name: Add repo to apt sources
|
||||
ansible.builtin.copy:
|
||||
src: docker.list
|
||||
dest: /etc/apt/sources.list.d/docker.list
|
||||
mode: "0644"
|
||||
- name: Update Apt cache with latest docker.list packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
- name: Ensure all docker packages are updated to the latest versions
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
loop:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-buildx-plugin
|
||||
- docker-compose-plugin
|
||||
- name: Verify that the docker components are installed properly
|
||||
ansible.builtin.command:
|
||||
cmd: docker run hello-world
|
||||
register: docker
|
||||
changed_when: docker.rc == 0
|
@ -1,25 +0,0 @@
|
||||
- name: Ensure nigel can use sudo without password
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "nigel ALL=(ALL) NOPASSWD:ALL"
|
||||
- name: Ensure docker components are installed
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.include_tasks:
|
||||
file: ensure-docker-basic.yaml
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
- name: Run through nomad installation steps
|
||||
tags: nomad
|
||||
ansible.builtin.include_tasks:
|
||||
file: nomad.yaml
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- nomad
|
@ -1,55 +0,0 @@
|
||||
- name: Ensure prerequisite packages are installed
|
||||
ansible.builtin.apt:
|
||||
pkg:
|
||||
- wget
|
||||
- gpg
|
||||
- coreutils
|
||||
update_cache: true
|
||||
- name: Hashicorp repo setup
|
||||
vars:
|
||||
keypath: /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||
gpgpath: /tmp/hashicorp.gpg
|
||||
block:
|
||||
- name: Download the hashicorp GPG Key
|
||||
ansible.builtin.get_url:
|
||||
url: https://apt.releases.hashicorp.com/gpg
|
||||
dest: "{{ gpgpath }}"
|
||||
mode: "0755"
|
||||
- name: Dearmor the hashicorp gpg key
|
||||
ansible.builtin.command:
|
||||
cmd: "gpg --dearmor --yes -o {{ keypath }} {{ gpgpath }}"
|
||||
register: gpg
|
||||
changed_when: gpg.rc == 0
|
||||
- name: Add the hashicorp linux repo
|
||||
vars:
|
||||
keyfile: "{{ keypath }}"
|
||||
ansible.builtin.template:
|
||||
src: hashicorp.list
|
||||
dest: /etc/apt/sources.list.d/hashicorp.list
|
||||
mode: "0644"
|
||||
- name: Update apt repo cache
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
- name: Install consul
|
||||
ansible.builtin.apt:
|
||||
name: consul
|
||||
- name: Install nomad package
|
||||
ansible.builtin.apt:
|
||||
pkg: nomad
|
||||
- name: Copy in the consul configuration
|
||||
vars:
|
||||
ip: "{{ ansible_default_ipv4['address'] }}"
|
||||
ansible.builtin.template:
|
||||
src: consul.hcl
|
||||
dest: /etc/consul.d/consul.hcl
|
||||
mode: "0644"
|
||||
- name: Start nomad
|
||||
ansible.builtin.systemd_service:
|
||||
name: nomad
|
||||
state: started
|
||||
enabled: true
|
||||
- name: Make sure the consul service is NOT available
|
||||
ansible.builtin.systemd_service:
|
||||
name: consul
|
||||
state: stopped
|
||||
enabled: true
|
@ -1,12 +0,0 @@
|
||||
bind_addr = "{{ ip }}"
|
||||
advertise_addr = "{{ ip }}"
|
||||
bootstrap = true
|
||||
bootstrap_expect = 1
|
||||
client_addr = "{{ ip }}"
|
||||
server = true
|
||||
data_dir = "/opt/consul"
|
||||
|
||||
ui_config {
|
||||
enabled = true
|
||||
}
|
||||
|
@ -1 +0,0 @@
|
||||
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main
|
@ -1,18 +0,0 @@
|
||||
data_dir = "/opt/nomad/data"
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
server {
|
||||
enabled = true
|
||||
bootstrap_expect = 1
|
||||
}
|
||||
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
servers = ["127.0.0.1"]
|
||||
}
|
||||
|
||||
host_volume "registry" {
|
||||
path = "/opt/volumes/registry"
|
||||
read_only = false
|
||||
}
|
@ -1,10 +0,0 @@
|
||||
- name: Ensure the root data directory is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.root }}"
|
||||
state: directory
|
||||
mode: "0755"
|
||||
- name: Ensure registry volume is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.registry }}"
|
||||
state: directory
|
||||
mode: "0755"
|
@ -1,4 +0,0 @@
|
||||
nomad:
|
||||
volumes:
|
||||
root: /opt/volumes
|
||||
registry: /opt/volumes/ncr
|
@ -1,15 +0,0 @@
|
||||
127.0.0.1 localhost
|
||||
127.0.1.1 nigel
|
||||
|
||||
# Our own dns stuff
|
||||
127.0.1.1 nigel.local
|
||||
127.0.1.1 nomad.nigel.local
|
||||
127.0.1.1 sanity.nigel.local
|
||||
127.0.1.1 ncr.nigel.local
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
@ -1,6 +0,0 @@
|
||||
server {
|
||||
server_name ncr.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://localhost:5000;
|
||||
}
|
||||
}
|
@ -1,25 +0,0 @@
|
||||
server {
|
||||
server_name nomad.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://nomad-ws;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
proxy_read_timeout 319s;
|
||||
|
||||
# This is for log streaming requests
|
||||
proxy_buffering off;
|
||||
|
||||
# Upgrade and Connection headers for upgrading to websockets
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
|
||||
proxy_set_header Origin "${scheme}://${proxy_host}";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
upstream nomad-ws {
|
||||
ip_hash;
|
||||
server nomad.nigel.local:4646;
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
- name: Reverse proxy role configuration
|
||||
become: true
|
||||
block:
|
||||
- name: Ensure /etc/hosts are up to date
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/hosts
|
||||
src: host-file
|
||||
mode: "0644"
|
||||
- name: Ensure nginx is setup as latest
|
||||
ansible.builtin.apt:
|
||||
name: nginx
|
||||
- name: Copy the nomad.conf to available configurations
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop: "{{ proxy_nginx_configs }}"
|
||||
- name: Link the nomad.conf to sites-enabled
|
||||
ansible.builtin.file:
|
||||
path: "/etc/nginx/sites-enabled/{{ item }}"
|
||||
state: link
|
||||
src: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop: "{{ proxy_nginx_configs }}"
|
||||
- name: Restart nginx
|
||||
ansible.builtin.systemd_service:
|
||||
name: nginx
|
||||
state: restarted
|
@ -1,3 +0,0 @@
|
||||
proxy_nginx_configs:
|
||||
- nomad.conf
|
||||
- ncr.conf
|
23
ansible/scripts/pull-down-s3.sh
Normal file
23
ansible/scripts/pull-down-s3.sh
Normal file
@ -0,0 +1,23 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
bucket="$1"
|
||||
s3env=/opt/nginx/s3.env
|
||||
|
||||
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
|
||||
|
||||
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
|
||||
source $s3env
|
||||
|
||||
pull() {
|
||||
aws s3 sync s3://$bucket /opt/nginx/$bucket
|
||||
}
|
||||
|
||||
|
||||
case $bucket in
|
||||
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
|
||||
*) echo "Invalid bucket name" && exit 1 ;;
|
||||
esac
|
||||
|
||||
|
40
infra/containers/docker-compose.yaml
Normal file
40
infra/containers/docker-compose.yaml
Normal file
@ -0,0 +1,40 @@
|
||||
networks:
|
||||
gitea:
|
||||
external: false
|
||||
|
||||
|
||||
services:
|
||||
gitea:
|
||||
image: gitea/gitea:latest-rootless
|
||||
container_name: gitea
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /opt/containers/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "2222:22"
|
||||
gitea-runner:
|
||||
image: gitea/act_runner:nightly
|
||||
container_name: gitea-runner
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /opt/containers/gitea_runner/
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
|
||||
- GITEA_RUNNER_NAME=gitea-main
|
||||
- GITEA_RUNNER_LABELS=gitea-main
|
||||
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}
|
||||
|
||||
|
||||
|
||||
|
29
infra/containers/readme.md
Normal file
29
infra/containers/readme.md
Normal file
@ -0,0 +1,29 @@
|
||||
What is this
|
||||
============
|
||||
|
||||
Here we contain scripts to build out all the containers that are run.
|
||||
All of these images are based on images that are made from other projects
|
||||
|
||||
docker-compose.yaml
|
||||
===================
|
||||
|
||||
Services that are more/less "special" go here since most of the stuff that is
|
||||
run on the main host are basically just static html websites
|
||||
|
||||
Services & Containers
|
||||
=====================
|
||||
|
||||
| Service | Docker Image Used |
|
||||
|------------|--------------------------|
|
||||
| Gitea | gitea/gitea:latest |
|
||||
| Act Runner | gitea/act_runner:nightly |
|
||||
|
||||
Why the servics above?
|
||||
======================
|
||||
|
||||
The Gitea related services are there so that I can host my own Git projects
|
||||
away from "Git as a service" services. I have no issue with Github/Gitlab
|
||||
but I just like being able to host my own stuff when possible :smiley:
|
||||
|
||||
|
||||
|
34
infra/dns/build.sh
Normal file
34
infra/dns/build.sh
Normal file
@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
opt=$1
|
||||
plan=tfplan
|
||||
|
||||
build_plan() {
|
||||
echo Generating plan
|
||||
set -x
|
||||
terraform plan -var-file variables.tfvars -input=false -out $plan
|
||||
}
|
||||
|
||||
deploy_plan() {
|
||||
terraform apply $plan
|
||||
}
|
||||
|
||||
init() {
|
||||
terraform init
|
||||
}
|
||||
|
||||
help_prompt() {
|
||||
cat <<- EOF
|
||||
Options: plan deploy help
|
||||
EOF
|
||||
}
|
||||
|
||||
# Default to building a plan
|
||||
source ./secrets.sh
|
||||
case $opt in
|
||||
plan) build_plan;;
|
||||
deploy) deploy_plan;;
|
||||
*) help_prompt;;
|
||||
esac
|
@ -37,7 +37,6 @@ locals {
|
||||
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "lmao.shockrah.xyz", records = [ "207.246.107.99" ] },
|
||||
]
|
||||
}
|
||||
|
||||
|
@ -1,46 +0,0 @@
|
||||
# Nigel's Container Registry
|
||||
job "ncr" {
|
||||
type = "service"
|
||||
|
||||
group "ncr" {
|
||||
count = 1
|
||||
network {
|
||||
port "docker" {
|
||||
static = 5000
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "ncr"
|
||||
port = "docker"
|
||||
provider = "nomad"
|
||||
}
|
||||
|
||||
volume "container_images" {
|
||||
type = "host"
|
||||
read_only = false
|
||||
source = "registry"
|
||||
}
|
||||
|
||||
restart {
|
||||
attempts = 10
|
||||
interval = "5m"
|
||||
delay = "30s"
|
||||
mode = "delay"
|
||||
}
|
||||
|
||||
task "ncr" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "container_images"
|
||||
destination = "/registry/data"
|
||||
read_only = false
|
||||
}
|
||||
config {
|
||||
image = "registry:latest"
|
||||
ports = [ "docker" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,30 +0,0 @@
|
||||
# This 'service' job is just a simple nginx container that lives here as a kind of sanity check
|
||||
# PORT: 8080
|
||||
# DNS : sanity.nigel.local
|
||||
job "health" {
|
||||
type = "service"
|
||||
|
||||
group "health" {
|
||||
count = 1
|
||||
network {
|
||||
port "http" {
|
||||
static = 8080
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "health-svc"
|
||||
port = "http"
|
||||
provider = "nomad"
|
||||
}
|
||||
|
||||
task "health-setup" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "shockrah/sanity:latest"
|
||||
ports = [ "http" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -1,28 +0,0 @@
|
||||
resource tls_private_key tarpit {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = 4096
|
||||
}
|
||||
|
||||
resource vultr_ssh_key tarpit {
|
||||
name = "tarpit_ssh_key"
|
||||
ssh_key = chomp(tls_private_key.tarpit.public_key_openssh)
|
||||
}
|
||||
|
||||
resource vultr_instance tarpit {
|
||||
# Core configuration
|
||||
plan = var.host.plan
|
||||
region = var.host.region
|
||||
os_id = var.host.os
|
||||
enable_ipv6 = true
|
||||
|
||||
|
||||
ssh_key_ids = [ vultr_ssh_key.host.id ]
|
||||
firewall_group_id = vultr_firewall_group.host.id
|
||||
label = "Tarpit"
|
||||
}
|
||||
|
||||
|
||||
output tarpit_ssh_key {
|
||||
sensitive = true
|
||||
value = tls_private_key.host.private_key_pem
|
||||
}
|
62
infra/vultr-kubernetes/admin-services.tf
Normal file
62
infra/vultr-kubernetes/admin-services.tf
Normal file
@ -0,0 +1,62 @@
|
||||
resource kubernetes_namespace admin-servers {
|
||||
count = length(var.admin_services.configs) > 0 ? 1 : 0
|
||||
metadata {
|
||||
name = var.admin_services.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_pod admin {
|
||||
for_each = var.admin_services.configs
|
||||
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.admin_services.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
spec {
|
||||
node_selector = {
|
||||
NodeType = var.admin_services.namespace
|
||||
}
|
||||
container {
|
||||
image = each.value.image
|
||||
name = coalesce(each.value.name, each.key)
|
||||
resources {
|
||||
limits = {
|
||||
cpu = each.value.cpu
|
||||
memory = each.value.mem
|
||||
}
|
||||
}
|
||||
port {
|
||||
container_port = each.value.port.internal
|
||||
protocol = coalesce(each.value.proto, "TCP")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service admin {
|
||||
for_each = var.admin_services.configs
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.admin_services.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
# TODO: don't make these NodePorts since we're gonna want them
|
||||
# to be purely internal to the Cluster.
|
||||
# WHY? Because we want to keep dashboards as unexposed as possible
|
||||
spec {
|
||||
selector = {
|
||||
app = each.key
|
||||
}
|
||||
port {
|
||||
target_port = each.value.port.internal
|
||||
port = each.value.port.expose
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
||||
|
@ -9,15 +9,15 @@ terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "5.98.0"
|
||||
version = "~> 5.0"
|
||||
}
|
||||
vultr = {
|
||||
source = "vultr/vultr"
|
||||
version = "2.26.0"
|
||||
version = "2.22.1"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.37.1"
|
||||
version = "2.34.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1,18 +1,30 @@
|
||||
resource vultr_kubernetes athens {
|
||||
region = var.cluster.region
|
||||
region = var.cluster.region
|
||||
version = var.cluster.version
|
||||
label = var.cluster.label
|
||||
vpc_id = vultr_vpc.athens.id
|
||||
|
||||
label = var.cluster.label
|
||||
# BUG: only have this set when creating the resource for the first time
|
||||
# once the cluster is up, we should comment this out again
|
||||
# enable_firewall = true
|
||||
node_pools {
|
||||
node_quantity = var.cluster.pools["main"].min_nodes
|
||||
plan = var.cluster.pools["main"].plan
|
||||
label = var.cluster.pools["main"].label
|
||||
min_nodes = var.cluster.pools["main"].min_nodes
|
||||
max_nodes = var.cluster.pools["main"].max_nodes
|
||||
node_quantity = 1
|
||||
plan = var.cluster.pools["meta"].plan
|
||||
label = var.admin_services.namespace
|
||||
min_nodes = var.cluster.pools["meta"].min
|
||||
max_nodes = var.cluster.pools["meta"].max
|
||||
# tag = var.admin_services.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource vultr_kubernetes_node_pools games {
|
||||
cluster_id = vultr_kubernetes.athens.id
|
||||
node_quantity = var.cluster.pools["games"].min
|
||||
plan = var.cluster.pools["games"].plan
|
||||
label = var.game_servers.namespace
|
||||
min_nodes = var.cluster.pools["games"].min
|
||||
max_nodes = var.cluster.pools["games"].max
|
||||
tag = var.admin_services.namespace
|
||||
}
|
||||
|
||||
output k8s_config {
|
||||
value = vultr_kubernetes.athens.kube_config
|
||||
sensitive = true
|
||||
|
4
infra/vultr-kubernetes/dev/.gitignore
vendored
Normal file
4
infra/vultr-kubernetes/dev/.gitignore
vendored
Normal file
@ -0,0 +1,4 @@
|
||||
# created by virtualenv automatically
|
||||
bin/
|
||||
lib/
|
||||
|
51
infra/vultr-kubernetes/dev/find-server.py
Normal file
51
infra/vultr-kubernetes/dev/find-server.py
Normal file
@ -0,0 +1,51 @@
|
||||
from argparse import ArgumentParser
|
||||
from argparse import Namespace
|
||||
from kubernetes import client, config
|
||||
import re
|
||||
|
||||
def get_args() -> Namespace:
|
||||
parser = ArgumentParser(
|
||||
prog="Cluster Search Thing",
|
||||
description="General utility for finding resources for game server bot"
|
||||
)
|
||||
games = {"reflex", "minecraft"}
|
||||
parser.add_argument('-g', '--game', required=False, choices=games)
|
||||
|
||||
admin = {"health"}
|
||||
parser.add_argument('-a', '--admin', required=False, choices=admin)
|
||||
return parser.parse_args()
|
||||
|
||||
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
|
||||
config.load_kube_config("../config.yaml")
|
||||
return client.CoreV1Api()
|
||||
|
||||
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||
print('admin thing requested', args.admin)
|
||||
|
||||
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
|
||||
node_name = pods.items[0].spec.node_name
|
||||
|
||||
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
|
||||
port = services.items[0].spec.ports[0].port
|
||||
|
||||
# Collecting the IPV4 of the node that contains the pod(container)
|
||||
# we actually care about. Since these pods only have 1 container
|
||||
# Now we collect specific data about the game server we requested
|
||||
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
|
||||
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
|
||||
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = get_args()
|
||||
api = k8s_api('../config.yaml')
|
||||
|
||||
if args.game:
|
||||
get_game_server_ip(args, api)
|
||||
|
||||
if args.admin:
|
||||
get_admin_service_details(args, api)
|
||||
|
8
infra/vultr-kubernetes/dev/pyvenv.cfg
Normal file
8
infra/vultr-kubernetes/dev/pyvenv.cfg
Normal file
@ -0,0 +1,8 @@
|
||||
home = /usr
|
||||
implementation = CPython
|
||||
version_info = 3.10.12.final.0
|
||||
virtualenv = 20.13.0+ds
|
||||
include-system-site-packages = false
|
||||
base-prefix = /usr
|
||||
base-exec-prefix = /usr
|
||||
base-executable = /usr/bin/python3
|
18
infra/vultr-kubernetes/dev/requirements.txt
Normal file
18
infra/vultr-kubernetes/dev/requirements.txt
Normal file
@ -0,0 +1,18 @@
|
||||
cachetools==5.5.0
|
||||
certifi==2024.8.30
|
||||
charset-normalizer==3.4.0
|
||||
durationpy==0.9
|
||||
google-auth==2.36.0
|
||||
idna==3.10
|
||||
kubernetes==31.0.0
|
||||
oauthlib==3.2.2
|
||||
pyasn1==0.6.1
|
||||
pyasn1_modules==0.4.1
|
||||
python-dateutil==2.9.0.post0
|
||||
PyYAML==6.0.2
|
||||
requests==2.32.3
|
||||
requests-oauthlib==2.0.0
|
||||
rsa==4.9
|
||||
six==1.17.0
|
||||
urllib3==2.2.3
|
||||
websocket-client==1.8.0
|
@ -6,4 +6,27 @@ resource vultr_firewall_rule web_inbound {
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
port = each.value
|
||||
}
|
||||
}
|
||||
|
||||
resource vultr_firewall_rule game-server-inbound {
|
||||
for_each = var.game_servers.configs
|
||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
protocol = "tcp"
|
||||
ip_type = "v4"
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
port = each.value.port.expose
|
||||
}
|
||||
|
||||
|
||||
resource vultr_firewall_rule admin-service-inbound {
|
||||
for_each = var.admin_services.configs
|
||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
protocol = "tcp"
|
||||
ip_type = "v4"
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
notes = each.value.port.notes
|
||||
port = each.value.port.expose
|
||||
}
|
||||
|
||||
|
55
infra/vultr-kubernetes/game-server.tf
Normal file
55
infra/vultr-kubernetes/game-server.tf
Normal file
@ -0,0 +1,55 @@
|
||||
resource kubernetes_namespace game-servers {
|
||||
count = length(var.game_servers.configs) > 0 ? 1 : 0
|
||||
metadata {
|
||||
name = var.game_servers.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_pod game {
|
||||
for_each = var.game_servers.configs
|
||||
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.game_servers.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
image = each.value.image
|
||||
name = coalesce(each.value.name, each.key)
|
||||
resources {
|
||||
limits = {
|
||||
cpu = each.value.cpu
|
||||
memory = each.value.mem
|
||||
}
|
||||
}
|
||||
port {
|
||||
container_port = each.value.port.internal
|
||||
protocol = coalesce(each.value.proto, "TCP")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service game {
|
||||
for_each = var.game_servers.configs
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.game_servers.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
app = each.key
|
||||
}
|
||||
port {
|
||||
target_port = each.value.port.internal
|
||||
port = each.value.port.expose
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
@ -26,18 +26,46 @@ variable cluster {
|
||||
label = string
|
||||
version = string
|
||||
pools = map(object({
|
||||
node_quantity = number
|
||||
plan = string
|
||||
label = string
|
||||
min_nodes = number
|
||||
max_nodes = number
|
||||
tag = string
|
||||
plan = string
|
||||
autoscale = bool
|
||||
min = number
|
||||
max = number
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
variable personal {
|
||||
variable game_servers {
|
||||
type = object({
|
||||
namespace = string
|
||||
configs = map(object({
|
||||
name = optional(string)
|
||||
image = string
|
||||
cpu = string
|
||||
mem = string
|
||||
port = object({
|
||||
internal = number
|
||||
expose = number
|
||||
})
|
||||
proto = optional(string)
|
||||
}))
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
variable admin_services {
|
||||
type = object({
|
||||
namespace = string
|
||||
configs = map(object({
|
||||
name = string
|
||||
image = string
|
||||
cpu = string
|
||||
mem = string
|
||||
port = object({
|
||||
notes = optional(string)
|
||||
internal = number
|
||||
expose = number
|
||||
})
|
||||
proto = optional(string)
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
|
@ -1,19 +1,51 @@
|
||||
cluster = {
|
||||
region = "lax"
|
||||
label = "athens-cluster"
|
||||
version = "v1.33.0+1"
|
||||
version = "v1.31.2+1"
|
||||
pools = {
|
||||
main = {
|
||||
node_quantity = 1
|
||||
plan = "vc2-1c-2gb"
|
||||
label = "main"
|
||||
min_nodes = 1
|
||||
max_nodes = 2
|
||||
tag = "athens-main"
|
||||
meta = {
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 2
|
||||
}
|
||||
games = {
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
personal = {
|
||||
namespace = "athens-main"
|
||||
}
|
||||
game_servers = {
|
||||
namespace = "games"
|
||||
configs = {
|
||||
# minecraft = {
|
||||
# image = "itzg/minecraft-server"
|
||||
# cpu = "1000m"
|
||||
# mem = "2048Mi"
|
||||
# port = {
|
||||
# expose = 30808
|
||||
# internal = 80
|
||||
# }
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
admin_services = {
|
||||
namespace = "admin-services"
|
||||
configs = {
|
||||
# health = {
|
||||
# image = "nginx:latest"
|
||||
# name = "health"
|
||||
# cpu = "200m"
|
||||
# mem = "64Mi"
|
||||
# port = {
|
||||
# notes = "Basic nginx sanity check service"
|
||||
# expose = 30800
|
||||
# internal = 80
|
||||
# }
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +0,0 @@
|
||||
resource vultr_vpc athens {
|
||||
description = "Private VPC for private and personal service projects"
|
||||
region = var.cluster.region
|
||||
}
|
3
playbooks/inventory.yaml
Normal file
3
playbooks/inventory.yaml
Normal file
@ -0,0 +1,3 @@
|
||||
static-web:
|
||||
hosts:
|
||||
shockrah.xyz:
|
9
playbooks/manual-prerequisites.yaml
Normal file
9
playbooks/manual-prerequisites.yaml
Normal file
@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Pre Pipeline Playbook for Static Hosts
|
||||
hosts: static-web
|
||||
remote_user: root
|
||||
tasks:
|
||||
- name: Import manual setup steps
|
||||
ansible.builtin.import_role:
|
||||
name: static
|
||||
tasks_from: setup-webadmin.yaml
|
5
playbooks/readme.md
Normal file
5
playbooks/readme.md
Normal file
@ -0,0 +1,5 @@
|
||||
# What is this
|
||||
|
||||
Here be the ansible based workflows that we use to keep things like the static
|
||||
hosts properly setup with all the resources they need to properly host the
|
||||
services we intended on hosting.
|
8
playbooks/static-setup.yaml
Normal file
8
playbooks/static-setup.yaml
Normal file
@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Static Host Maintenance and Setup
|
||||
hosts: static-web
|
||||
remote_user: webadmin
|
||||
tasks:
|
||||
- name: Import static host role
|
||||
ansible.builtin.import_role:
|
||||
name: static
|
0
playbooks/static/handlers/.gitkeep
Normal file
0
playbooks/static/handlers/.gitkeep
Normal file
5
playbooks/static/handlers/main.yaml
Normal file
5
playbooks/static/handlers/main.yaml
Normal file
@ -0,0 +1,5 @@
|
||||
- name: Restart SSH
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: sshd
|
||||
state: restarted
|
0
playbooks/static/tasks/.gitkeep
Normal file
0
playbooks/static/tasks/.gitkeep
Normal file
40
playbooks/static/tasks/main.yaml
Normal file
40
playbooks/static/tasks/main.yaml
Normal file
@ -0,0 +1,40 @@
|
||||
# Things that we definitely want to have are the following
|
||||
# docker docker-compose python(latest) certbot
|
||||
- name: Uhhh yea
|
||||
become: true
|
||||
block:
|
||||
- name: Install base dependencies
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
pkg:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- lsb-release
|
||||
- name: Setup keyring directory
|
||||
ansible.builtin.command:
|
||||
cmd: "install -m 0755 -d {{ static_keyring_dir }}"
|
||||
creates: "{{ static_keyring_dir }}"
|
||||
- name: Download the docker GPG key
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ static_docker_ubuntu }}/gpg"
|
||||
dest: "{{ static_keyring_dir }}/docker.asc"
|
||||
mode: "0644"
|
||||
- name: Ensure docker.lst is present
|
||||
vars:
|
||||
key_path: "{{ static_keyring_dir }}/docker.asc"
|
||||
repo: "{{ static_docker_ubuntu }}"
|
||||
os_codename: jammy
|
||||
ansible.builtin.template:
|
||||
src: docker.list
|
||||
dest: "{{ static_apt_sources_dir }}/docker.list"
|
||||
mode: "0644"
|
||||
- name: Install docker and python packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
pkg:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-buildx-plugin
|
||||
- docker-compose-plugin
|
||||
- python3
|
43
playbooks/static/tasks/setup-webadmin.yaml
Normal file
43
playbooks/static/tasks/setup-webadmin.yaml
Normal file
@ -0,0 +1,43 @@
|
||||
- name: Ensure sudo is available
|
||||
ansible.builtin.apt:
|
||||
state: present
|
||||
update_cache: true
|
||||
pkg:
|
||||
- sudo
|
||||
- zsh
|
||||
- name: Create webadmin user
|
||||
ansible.builtin.user:
|
||||
name: webadmin
|
||||
state: present
|
||||
shell: /bin/zsh
|
||||
groups:
|
||||
- nginx
|
||||
append: true
|
||||
- name: Copy webadmin public key
|
||||
ansible.posix.authorized_key:
|
||||
user: webadmin
|
||||
state: present
|
||||
key: "{{ lookup('file', 'files/webadmin.pem.pub') }}"
|
||||
- name: Add webadmin to sudoers
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/sudoers.d/webadmin"
|
||||
content: "webadmin ALL=(ALL) NOPASSWD: ALL"
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
- name: Disable Password Authentication
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PasswordAuthentication no
|
||||
state: present
|
||||
backup: true
|
||||
notify:
|
||||
- Restart SSH
|
||||
- name: Disable root login
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PermitRootLogin no
|
||||
state: present
|
||||
backup: true
|
||||
notify:
|
||||
- Restart SSH
|
0
playbooks/static/templates/.gitkeep
Normal file
0
playbooks/static/templates/.gitkeep
Normal file
1
playbooks/static/templates/docker.list
Normal file
1
playbooks/static/templates/docker.list
Normal file
@ -0,0 +1 @@
|
||||
deb [arch=amd64 signed-by={{ key_path }}] {{ repo }} {{ os_codename }} stable
|
0
playbooks/static/vars/.gitkeep
Normal file
0
playbooks/static/vars/.gitkeep
Normal file
4
playbooks/static/vars/main.yaml
Normal file
4
playbooks/static/vars/main.yaml
Normal file
@ -0,0 +1,4 @@
|
||||
static_keyring_dir: /etc/apt/keyrings
|
||||
static_docker_ubuntu: https://download.docker.com/linux/ubuntu
|
||||
static_apt_sources_dir: /etc/apt/sources.list.d
|
||||
static_codename: jammy
|
@ -1,45 +0,0 @@
|
||||
# What this covers
|
||||
|
||||
The creation of Atlas as it happened in order
|
||||
|
||||
## Commands Ran
|
||||
|
||||
Once the infra was provisioned and verified to be configured by Terraform correctly
|
||||
we move on to the following
|
||||
|
||||
```sh
|
||||
# Setup the machine to run docker
|
||||
ansible-playbook -i hosts.ini atlas/init/system-deps.yml
|
||||
|
||||
# Second we copy over the contents of Alpha's mounted docker volumes
|
||||
ansible-playbook -i hosts.ini atlas/init/perma-mount-drives.yml
|
||||
|
||||
# Next we copy over the data that we want to migrate ( if any )
|
||||
ansible-playbook -i hosts.ini -e filebrowser=/path -e clippable=/path atlas/init/migrate-clips-files.yml
|
||||
|
||||
# Setup the services on the host that we want to run
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-containers.yml
|
||||
|
||||
# Next we put up the reverse proxy (nginx)
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-reverse-proxy.yml
|
||||
|
||||
# Finally we add TLS on top of nginx and we're done
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-certbot.yml
|
||||
```
|
||||
|
||||
Maintenance should be straight forward for this machine as TLS is automatically
|
||||
renewed every 3 months by a cron job. We can manually update the certs however
|
||||
if we really want to. They also don't require anymore manual variable injection
|
||||
like Alpha did as the only thing protected was `dev@shockrah.xyz` which is at
|
||||
this point becoming semi-public. This means while it is associated with code
|
||||
it is more of a _business e-mail_ so it can be placed in this repository with
|
||||
very little concern.
|
||||
|
||||
System updates are now also to be fetched with a:
|
||||
|
||||
```sh
|
||||
ansible-playbook -i hosts.ini atlas/maintain/analyze-system-deps.yml
|
||||
```
|
||||
|
||||
Which performs purely read operations and does not affect the state of the
|
||||
machine.
|
@ -1,33 +0,0 @@
|
||||
# Mounting an attached drive
|
||||
|
||||
Assumptions:
|
||||
|
||||
* New drive is attached(in AWS) and detected in software
|
||||
Ideally attachment is made through terraform
|
||||
|
||||
## Mounting Instructions (Step-by-Step)
|
||||
|
||||
1. Verify data does not have data: `sudo file -s /dev/xvdf`
|
||||
|
||||
Should return `data` if its ok. Other wise we're probably looking at the wrong
|
||||
drive.
|
||||
|
||||
2. Create the filesystem on the new empty drive: `sudo mkfs -t ext4 /dev/xvdf`
|
||||
|
||||
3. Create mountpoint other wares to actaully use the drive
|
||||
`sudo mkdir /mnt/example`.
|
||||
|
||||
Change _example_ to something that actually makes sense.
|
||||
|
||||
4. Add a new entry to /etc/fstab for automounting
|
||||
|
||||
`/dev/xvdf /newvolume ext4 defaults,nofail 0 0`
|
||||
|
||||
Tab delimited btw.
|
||||
|
||||
5. Mount all drives listed in `/etc/fstab` from before. `sudo mount -a`
|
||||
|
||||
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user