Compare commits
9 Commits
f5f670e5f2
...
fix/ansibl
| Author | SHA1 | Date | |
|---|---|---|---|
| 510baa7f94 | |||
| 088846cad9 | |||
| 1be3a8e588 | |||
| da580eb7d2 | |||
| a8d7c01efe | |||
| f2c4506245 | |||
| ac11487feb | |||
| ee23406f49 | |||
| 6e4982fffd |
62
infra/vultr-kubernetes/admin-services.tf
Normal file
62
infra/vultr-kubernetes/admin-services.tf
Normal file
@@ -0,0 +1,62 @@
|
||||
resource kubernetes_namespace admin-servers {
|
||||
count = length(var.admin_services.configs) > 0 ? 1 : 0
|
||||
metadata {
|
||||
name = var.admin_services.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_pod admin {
|
||||
for_each = var.admin_services.configs
|
||||
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.admin_services.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
spec {
|
||||
node_selector = {
|
||||
NodeType = var.admin_services.namespace
|
||||
}
|
||||
container {
|
||||
image = each.value.image
|
||||
name = coalesce(each.value.name, each.key)
|
||||
resources {
|
||||
limits = {
|
||||
cpu = each.value.cpu
|
||||
memory = each.value.mem
|
||||
}
|
||||
}
|
||||
port {
|
||||
container_port = each.value.port.internal
|
||||
protocol = coalesce(each.value.proto, "TCP")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service admin {
|
||||
for_each = var.admin_services.configs
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.admin_services.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
# TODO: don't make these NodePorts since we're gonna want them
|
||||
# to be purely internal to the Cluster.
|
||||
# WHY? Because we want to keep dashboards as unexposed as possible
|
||||
spec {
|
||||
selector = {
|
||||
app = each.key
|
||||
}
|
||||
port {
|
||||
target_port = each.value.port.internal
|
||||
port = each.value.port.expose
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,15 +6,25 @@ resource vultr_kubernetes athens {
|
||||
# once the cluster is up, we should comment this out again
|
||||
# enable_firewall = true
|
||||
node_pools {
|
||||
# how many nodes do we want in this pool
|
||||
node_quantity = 1
|
||||
plan = var.cluster.metapool.plan
|
||||
label = var.cluster.label
|
||||
min_nodes = var.cluster.metapool.min
|
||||
max_nodes = var.cluster.metapool.max
|
||||
plan = var.cluster.pools["meta"].plan
|
||||
label = var.admin_services.namespace
|
||||
min_nodes = var.cluster.pools["meta"].min
|
||||
max_nodes = var.cluster.pools["meta"].max
|
||||
# tag = var.admin_services.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource vultr_kubernetes_node_pools games {
|
||||
cluster_id = vultr_kubernetes.athens.id
|
||||
node_quantity = var.cluster.pools["games"].min
|
||||
plan = var.cluster.pools["games"].plan
|
||||
label = var.game_servers.namespace
|
||||
min_nodes = var.cluster.pools["games"].min
|
||||
max_nodes = var.cluster.pools["games"].max
|
||||
tag = var.admin_services.namespace
|
||||
}
|
||||
|
||||
output k8s_config {
|
||||
value = vultr_kubernetes.athens.kube_config
|
||||
sensitive = true
|
||||
|
||||
@@ -17,3 +17,16 @@ resource vultr_firewall_rule game-server-inbound {
|
||||
subnet_size = 0
|
||||
port = each.value.port.expose
|
||||
}
|
||||
|
||||
|
||||
resource vultr_firewall_rule admin-service-inbound {
|
||||
for_each = var.admin_services.configs
|
||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
protocol = "tcp"
|
||||
ip_type = "v4"
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
notes = each.value.port.notes
|
||||
port = each.value.port.expose
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
resource kubernetes_namespace game-servers {
|
||||
count = length(var.game_servers.configs) > 0 ? 1 : 0
|
||||
metadata {
|
||||
name = var.game_servers.namespace
|
||||
}
|
||||
@@ -28,10 +29,6 @@ resource kubernetes_pod game {
|
||||
container_port = each.value.port.internal
|
||||
protocol = coalesce(each.value.proto, "TCP")
|
||||
}
|
||||
env {
|
||||
name = "EULA"
|
||||
value = "TRUE"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -25,12 +25,12 @@ variable cluster {
|
||||
region = string
|
||||
label = string
|
||||
version = string
|
||||
metapool = object({
|
||||
pools = map(object({
|
||||
plan = string
|
||||
autoscale = bool
|
||||
min = number
|
||||
max = number
|
||||
})
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
@@ -38,17 +38,34 @@ variable game_servers {
|
||||
type = object({
|
||||
namespace = string
|
||||
configs = map(object({
|
||||
name = optional(string)
|
||||
image = string
|
||||
cpu = string
|
||||
mem = string
|
||||
port = object({
|
||||
internal = number
|
||||
expose = number
|
||||
})
|
||||
proto = optional(string)
|
||||
name = optional(string)
|
||||
image = string
|
||||
cpu = string
|
||||
mem = string
|
||||
port = object({
|
||||
internal = number
|
||||
expose = number
|
||||
})
|
||||
)
|
||||
proto = optional(string)
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
variable admin_services {
|
||||
type = object({
|
||||
namespace = string
|
||||
configs = map(object({
|
||||
name = string
|
||||
image = string
|
||||
cpu = string
|
||||
mem = string
|
||||
port = object({
|
||||
notes = optional(string)
|
||||
internal = number
|
||||
expose = number
|
||||
})
|
||||
proto = optional(string)
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -2,11 +2,19 @@ cluster = {
|
||||
region = "lax"
|
||||
label = "athens-cluster"
|
||||
version = "v1.31.2+1"
|
||||
metapool = {
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 2
|
||||
pools = {
|
||||
meta = {
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 2
|
||||
}
|
||||
games = {
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 3
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,3 +32,20 @@ game_servers = {
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
admin_services = {
|
||||
namespace = "admin-services"
|
||||
configs = {
|
||||
# health = {
|
||||
# image = "nginx:latest"
|
||||
# name = "health"
|
||||
# cpu = "200m"
|
||||
# mem = "64Mi"
|
||||
# port = {
|
||||
# notes = "Basic nginx sanity check service"
|
||||
# expose = 30800
|
||||
# internal = 80
|
||||
# }
|
||||
# }
|
||||
}
|
||||
}
|
||||
|
||||
3
playbooks/inventory.yaml
Normal file
3
playbooks/inventory.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
static-web:
|
||||
hosts:
|
||||
shockrah.xyz:
|
||||
9
playbooks/manual-prerequisites.yaml
Normal file
9
playbooks/manual-prerequisites.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Pre Pipeline Playbook for Static Hosts
|
||||
hosts: static-web
|
||||
remote_user: root
|
||||
tasks:
|
||||
- name: Import manual setup steps
|
||||
ansible.builtin.import_role:
|
||||
name: static
|
||||
tasks_from: setup-webadmin.yaml
|
||||
5
playbooks/readme.md
Normal file
5
playbooks/readme.md
Normal file
@@ -0,0 +1,5 @@
|
||||
# What is this
|
||||
|
||||
Here be the ansible based workflows that we use to keep things like the static
|
||||
hosts properly setup with all the resources they need to properly host the
|
||||
services we intended on hosting.
|
||||
8
playbooks/static-setup.yaml
Normal file
8
playbooks/static-setup.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Static Host Maintenance and Setup
|
||||
hosts: static-web
|
||||
remote_user: webadmin
|
||||
tasks:
|
||||
- name: Import static host role
|
||||
ansible.builtin.import_role:
|
||||
name: static
|
||||
0
playbooks/static/files/.gitkeep
Normal file
0
playbooks/static/files/.gitkeep
Normal file
0
playbooks/static/handlers/.gitkeep
Normal file
0
playbooks/static/handlers/.gitkeep
Normal file
5
playbooks/static/handlers/main.yaml
Normal file
5
playbooks/static/handlers/main.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
- name: Restart SSH
|
||||
become: true
|
||||
ansible.builtin.systemd:
|
||||
name: sshd
|
||||
state: restarted
|
||||
0
playbooks/static/tasks/.gitkeep
Normal file
0
playbooks/static/tasks/.gitkeep
Normal file
40
playbooks/static/tasks/main.yaml
Normal file
40
playbooks/static/tasks/main.yaml
Normal file
@@ -0,0 +1,40 @@
|
||||
# Things that we definitely want to have are the following
|
||||
# docker docker-compose python(latest) certbot
|
||||
- name: Uhhh yea
|
||||
become: true
|
||||
block:
|
||||
- name: Install base dependencies
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
pkg:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- lsb-release
|
||||
- name: Setup keyring directory
|
||||
ansible.builtin.command:
|
||||
cmd: "install -m 0755 -d {{ static_keyring_dir }}"
|
||||
creates: "{{ static_keyring_dir }}"
|
||||
- name: Download the docker GPG key
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ static_docker_ubuntu }}/gpg"
|
||||
dest: "{{ static_keyring_dir }}/docker.asc"
|
||||
mode: "0644"
|
||||
- name: Ensure docker.lst is present
|
||||
vars:
|
||||
key_path: "{{ static_keyring_dir }}/docker.asc"
|
||||
repo: "{{ static_docker_ubuntu }}"
|
||||
os_codename: jammy
|
||||
ansible.builtin.template:
|
||||
src: docker.list
|
||||
dest: "{{ static_apt_sources_dir }}/docker.list"
|
||||
mode: "0644"
|
||||
- name: Install docker and python packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
pkg:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-buildx-plugin
|
||||
- docker-compose-plugin
|
||||
- python3
|
||||
43
playbooks/static/tasks/setup-webadmin.yaml
Normal file
43
playbooks/static/tasks/setup-webadmin.yaml
Normal file
@@ -0,0 +1,43 @@
|
||||
- name: Ensure sudo is available
|
||||
ansible.builtin.apt:
|
||||
state: present
|
||||
update_cache: true
|
||||
pkg:
|
||||
- sudo
|
||||
- zsh
|
||||
- name: Create webadmin user
|
||||
ansible.builtin.user:
|
||||
name: webadmin
|
||||
state: present
|
||||
shell: /bin/zsh
|
||||
groups:
|
||||
- nginx
|
||||
append: true
|
||||
- name: Copy webadmin public key
|
||||
ansible.posix.authorized_key:
|
||||
user: webadmin
|
||||
state: present
|
||||
key: "{{ lookup('file', 'files/webadmin.pem.pub') }}"
|
||||
- name: Add webadmin to sudoers
|
||||
ansible.builtin.copy:
|
||||
dest: "/etc/sudoers.d/webadmin"
|
||||
content: "webadmin ALL=(ALL) NOPASSWD: ALL"
|
||||
mode: "0644"
|
||||
owner: root
|
||||
group: root
|
||||
- name: Disable Password Authentication
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PasswordAuthentication no
|
||||
state: present
|
||||
backup: true
|
||||
notify:
|
||||
- Restart SSH
|
||||
- name: Disable root login
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PermitRootLogin no
|
||||
state: present
|
||||
backup: true
|
||||
notify:
|
||||
- Restart SSH
|
||||
0
playbooks/static/templates/.gitkeep
Normal file
0
playbooks/static/templates/.gitkeep
Normal file
1
playbooks/static/templates/docker.list
Normal file
1
playbooks/static/templates/docker.list
Normal file
@@ -0,0 +1 @@
|
||||
deb [arch=amd64 signed-by={{ key_path }}] {{ repo }} {{ os_codename }} stable
|
||||
0
playbooks/static/vars/.gitkeep
Normal file
0
playbooks/static/vars/.gitkeep
Normal file
4
playbooks/static/vars/main.yaml
Normal file
4
playbooks/static/vars/main.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
static_keyring_dir: /etc/apt/keyrings
|
||||
static_docker_ubuntu: https://download.docker.com/linux/ubuntu
|
||||
static_apt_sources_dir: /etc/apt/sources.list.d
|
||||
static_codename: jammy
|
||||
@@ -1,45 +0,0 @@
|
||||
# What this covers
|
||||
|
||||
The creation of Atlas as it happened in order
|
||||
|
||||
## Commands Ran
|
||||
|
||||
Once the infra was provisioned and verified to be configured by Terraform correctly
|
||||
we move on to the following
|
||||
|
||||
```sh
|
||||
# Setup the machine to run docker
|
||||
ansible-playbook -i hosts.ini atlas/init/system-deps.yml
|
||||
|
||||
# Second we copy over the contents of Alpha's mounted docker volumes
|
||||
ansible-playbook -i hosts.ini atlas/init/perma-mount-drives.yml
|
||||
|
||||
# Next we copy over the data that we want to migrate ( if any )
|
||||
ansible-playbook -i hosts.ini -e filebrowser=/path -e clippable=/path atlas/init/migrate-clips-files.yml
|
||||
|
||||
# Setup the services on the host that we want to run
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-containers.yml
|
||||
|
||||
# Next we put up the reverse proxy (nginx)
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-reverse-proxy.yml
|
||||
|
||||
# Finally we add TLS on top of nginx and we're done
|
||||
ansible-playbook -i hosts.ini atlas/init/setup-certbot.yml
|
||||
```
|
||||
|
||||
Maintenance should be straight forward for this machine as TLS is automatically
|
||||
renewed every 3 months by a cron job. We can manually update the certs however
|
||||
if we really want to. They also don't require anymore manual variable injection
|
||||
like Alpha did as the only thing protected was `dev@shockrah.xyz` which is at
|
||||
this point becoming semi-public. This means while it is associated with code
|
||||
it is more of a _business e-mail_ so it can be placed in this repository with
|
||||
very little concern.
|
||||
|
||||
System updates are now also to be fetched with a:
|
||||
|
||||
```sh
|
||||
ansible-playbook -i hosts.ini atlas/maintain/analyze-system-deps.yml
|
||||
```
|
||||
|
||||
Which performs purely read operations and does not affect the state of the
|
||||
machine.
|
||||
@@ -1,33 +0,0 @@
|
||||
# Mounting an attached drive
|
||||
|
||||
Assumptions:
|
||||
|
||||
* New drive is attached(in AWS) and detected in software
|
||||
Ideally attachment is made through terraform
|
||||
|
||||
## Mounting Instructions (Step-by-Step)
|
||||
|
||||
1. Verify data does not have data: `sudo file -s /dev/xvdf`
|
||||
|
||||
Should return `data` if its ok. Other wise we're probably looking at the wrong
|
||||
drive.
|
||||
|
||||
2. Create the filesystem on the new empty drive: `sudo mkfs -t ext4 /dev/xvdf`
|
||||
|
||||
3. Create mountpoint other wares to actaully use the drive
|
||||
`sudo mkdir /mnt/example`.
|
||||
|
||||
Change _example_ to something that actually makes sense.
|
||||
|
||||
4. Add a new entry to /etc/fstab for automounting
|
||||
|
||||
`/dev/xvdf /newvolume ext4 defaults,nofail 0 0`
|
||||
|
||||
Tab delimited btw.
|
||||
|
||||
5. Mount all drives listed in `/etc/fstab` from before. `sudo mount -a`
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user