Compare commits
131 Commits
c50deddf53
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| eea4c61537 | |||
| ee860c6e1f | |||
| 1c11410c2d | |||
| 4d71994b85 | |||
| 79cb4eb1a6 | |||
| e8817fe093 | |||
| 97bffd2042 | |||
| 37305fd74e | |||
| 555124bf2f | |||
| e209da949b | |||
| caa2eba639 | |||
| 982669ed4a | |||
| 4446ef813f | |||
| 9dc2f1d769 | |||
| 01b7b4ced8 | |||
| 29cdfcb695 | |||
| bbbc9ed477 | |||
| d64c5526e6 | |||
| 469b3d08ce | |||
| 7f5b3205d0 | |||
| 67ff5ce729 | |||
| 6aadb47c61 | |||
| 0624161f53 | |||
| c6b2a062e9 | |||
| 718647f617 | |||
| cfe631eba7 | |||
| 29e049cf7f | |||
| 990d29ae6c | |||
| 859201109e | |||
| de3bff8f14 | |||
| 54a6ddbe5d | |||
| 82333fe6ce | |||
| cddf67de2f | |||
| affa03bed5 | |||
| 34e1f6afdf | |||
| fd9bd290af | |||
| d992556032 | |||
| fce73d06e0 | |||
| 7f5d81f0ee | |||
| 410790765f | |||
| 9454e03f53 | |||
| e6ed85920d | |||
| 2775d354f8 | |||
| 1f6f013634 | |||
| 778b995980 | |||
| fc897bdd0e | |||
| 8f06ef269a | |||
| f15da0c88d | |||
| c602773657 | |||
| cd908d9c14 | |||
| 56e9c0ae4a | |||
| 30bc6ee2fa | |||
| cd9822bb85 | |||
| 0efe6ca642 | |||
| 2ef4b00097 | |||
| e183055282 | |||
| 514909fc8d | |||
| 5b4a440cb4 | |||
| 826d334c3c | |||
| 77590b067a | |||
| 850570faf5 | |||
| 12831fbaf3 | |||
| a6123dd7e2 | |||
| 9c2e0a84d7 | |||
| 1281ea8857 | |||
| ee2d502ca6 | |||
| 88059a5e0f | |||
| 4024809cc4 | |||
| 029a3c80d5 | |||
| 75b7f2fa3d | |||
| 8ef606153f | |||
| be34327791 | |||
| c6ef6ae4d2 | |||
| eb7871584b | |||
| 4a0a12242a | |||
| 053db8793b | |||
| 24fcbc957a | |||
| 9675fbacef | |||
| 3f0c8a865d | |||
| 3f2e6d86f6 | |||
| 08560c945b | |||
| 506a9b32d9 | |||
| d4ece741e0 | |||
| 311a592d6e | |||
| 153ea8e982 | |||
| 943e9651da | |||
| 669c414288 | |||
| e3afed5e4f | |||
| e337989a59 | |||
| 7f36ff272e | |||
| 79e6698db1 | |||
| 603559b255 | |||
| 4851b6521c | |||
| 9785e8a40a | |||
| 79bd7424c3 | |||
| 5227bea568 | |||
| 47b69d7f49 | |||
| a3fdc5fcc7 | |||
| 5a1afb4a07 | |||
| e03daa62e5 | |||
| 15dfaea8db | |||
| ef4967cd88 | |||
| 55217ce50b | |||
| 2bbc9095f7 | |||
| fcf7ded218 | |||
| b68d53b143 | |||
| 3c6bc90feb | |||
| 3521b840ae | |||
| 5f10976264 | |||
| 10e936a8da | |||
| 8bbaea8fd9 | |||
| d39e0c04e5 | |||
| b99525955e | |||
| 9b6f9b6656 | |||
| f2c4506245 | |||
| ac11487feb | |||
| ee23406f49 | |||
| 6e4982fffd | |||
| f5f670e5f2 | |||
| 6d642a7359 | |||
| 7a41d033b5 | |||
| 280a1f7a87 | |||
| 90c61d7c00 | |||
| ad0f3e6089 | |||
| f9c73b1e4a | |||
| 5d03f6b218 | |||
| 7f2ee6d35b | |||
| a4a1d55a53 | |||
| bf812cce4c | |||
| abf3297498 | |||
| 52e8c56682 |
@@ -10,6 +10,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repo content
|
||||
uses: actions/checkout@v4
|
||||
- run: ansible-lint
|
||||
- run: ansible-lint -c linter.yaml
|
||||
working-directory: ansible/
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@ docker/beta/shockrah.xyz/
|
||||
docker/beta/resume.shockrah.xyz/
|
||||
k8s/config.yaml
|
||||
infra/**/tfplan
|
||||
.ansible/
|
||||
|
||||
3
ansible/inventory.yaml
Normal file
3
ansible/inventory.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
nigel:
|
||||
hosts:
|
||||
nigel.local:
|
||||
4
ansible/linter.yaml
Normal file
4
ansible/linter.yaml
Normal file
@@ -0,0 +1,4 @@
|
||||
---
|
||||
skip_list:
|
||||
- role-name
|
||||
- var-naming[no-role-prefix]
|
||||
28
ansible/local-setup-admin-user.yaml
Normal file
28
ansible/local-setup-admin-user.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
# This playbook is meant to be a oneshot to be ran manually on the dev box
|
||||
# The rest of the role stuff is meant to be ran as the admin user that
|
||||
# this playbook creates for us
|
||||
---
|
||||
- name: Setup local admin user with a fresh ubuntu host
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
vars:
|
||||
admin:
|
||||
username: nigel
|
||||
tasks:
|
||||
- name: Copy the nigel admin key
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ admin.username }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
|
||||
- name: Prevent password based logins
|
||||
become: true
|
||||
ansible.builtin.lineinfile:
|
||||
dest: /etc/ssh/sshd_config
|
||||
line: PasswordAuthentication no
|
||||
state: present
|
||||
backup: true
|
||||
- name: Restart SSH Daemon
|
||||
become: true
|
||||
ansible.builtin.service:
|
||||
name: ssh
|
||||
state: restarted
|
||||
9
ansible/nomad.yaml
Normal file
9
ansible/nomad.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Setup all the responsibilities of the nomad server
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Apply the nomad role
|
||||
ansible.builtin.include_role:
|
||||
name: nomad
|
||||
|
||||
14
ansible/nuc.yaml
Normal file
14
ansible/nuc.yaml
Normal file
@@ -0,0 +1,14 @@
|
||||
---
|
||||
- name: Setup bare metal requirements
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Apply the base role to the nuc
|
||||
ansible.builtin.include_role:
|
||||
name: base
|
||||
- name: Apply the k3s base role
|
||||
ansible.builtin.include_role:
|
||||
name: k3s
|
||||
- name: Apply the proxy role
|
||||
ansible.builtin.include_role:
|
||||
name: proxy
|
||||
8
ansible/proxy.yaml
Normal file
8
ansible/proxy.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Setup host as a reverse proxy
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Apply reverse proxy role
|
||||
ansible.builtin.include_role:
|
||||
name: proxy
|
||||
1
ansible/roles/base/files/docker.list
Normal file
1
ansible/roles/base/files/docker.list
Normal file
@@ -0,0 +1 @@
|
||||
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable
|
||||
41
ansible/roles/base/tasks/ensure-docker-basic.yaml
Normal file
41
ansible/roles/base/tasks/ensure-docker-basic.yaml
Normal file
@@ -0,0 +1,41 @@
|
||||
- name: Ensure we have basic updated packages setting up docker
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
update_cache: true
|
||||
loop:
|
||||
- ca-certificates
|
||||
- curl
|
||||
- name: Running install on the keyrings directory
|
||||
ansible.builtin.command:
|
||||
cmd: install -m 0755 -d /etc/apt/keyrings
|
||||
register: install
|
||||
changed_when: install.rc == 0
|
||||
- name: Fetch Docker GPG Key
|
||||
vars:
|
||||
keylink: https://download.docker.com/linux/ubuntu/gpg
|
||||
ansible.builtin.get_url:
|
||||
url: "{{ keylink }}"
|
||||
dest: /etc/apt/keyrings/docker.asc
|
||||
mode: "0644"
|
||||
- name: Add repo to apt sources
|
||||
ansible.builtin.copy:
|
||||
src: docker.list
|
||||
dest: /etc/apt/sources.list.d/docker.list
|
||||
mode: "0644"
|
||||
- name: Update Apt cache with latest docker.list packages
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
- name: Ensure all docker packages are updated to the latest versions
|
||||
ansible.builtin.apt:
|
||||
name: "{{ item }}"
|
||||
loop:
|
||||
- docker-ce
|
||||
- docker-ce-cli
|
||||
- containerd.io
|
||||
- docker-buildx-plugin
|
||||
- docker-compose-plugin
|
||||
- name: Verify that the docker components are installed properly
|
||||
ansible.builtin.command:
|
||||
cmd: docker run hello-world
|
||||
register: docker
|
||||
changed_when: docker.rc == 0
|
||||
8
ansible/roles/base/tasks/k3s.yaml
Normal file
8
ansible/roles/base/tasks/k3s.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
- name: Download the setup script
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /tmp/k3s.sh
|
||||
mode: "0644"
|
||||
- name: Run installation script
|
||||
ansible.builtin.command:
|
||||
cmd: bash /tmp/k3s.sh
|
||||
25
ansible/roles/base/tasks/main.yaml
Normal file
25
ansible/roles/base/tasks/main.yaml
Normal file
@@ -0,0 +1,25 @@
|
||||
- name: Ensure nigel can use sudo without password
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "nigel ALL=(ALL) NOPASSWD:ALL"
|
||||
- name: Ensure docker components are installed
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.include_tasks:
|
||||
file: ensure-docker-basic.yaml
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
- name: Run through nomad removal steps
|
||||
tags: nomad
|
||||
ansible.builtin.include_tasks:
|
||||
file: nomad.yaml
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- nomad
|
||||
12
ansible/roles/base/templates/consul.hcl
Normal file
12
ansible/roles/base/templates/consul.hcl
Normal file
@@ -0,0 +1,12 @@
|
||||
bind_addr = "{{ ip }}"
|
||||
advertise_addr = "{{ ip }}"
|
||||
bootstrap = true
|
||||
bootstrap_expect = 1
|
||||
client_addr = "{{ ip }}"
|
||||
server = true
|
||||
data_dir = "/opt/consul"
|
||||
|
||||
ui_config {
|
||||
enabled = true
|
||||
}
|
||||
|
||||
1
ansible/roles/base/templates/hashicorp.list
Normal file
1
ansible/roles/base/templates/hashicorp.list
Normal file
@@ -0,0 +1 @@
|
||||
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main
|
||||
0
ansible/roles/base/vars/main.yaml
Normal file
0
ansible/roles/base/vars/main.yaml
Normal file
11
ansible/roles/k3s/tasks/main.yaml
Normal file
11
ansible/roles/k3s/tasks/main.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
- name: Download the installation script
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /tmp
|
||||
register: install_script
|
||||
- name: Run installation script
|
||||
become: true
|
||||
environment:
|
||||
INSTALL_K3S_EXEC: server
|
||||
ansible.builtin.command:
|
||||
cmd: sh {{ install_script.dest }}
|
||||
0
ansible/roles/k3s/vars/main.yaml
Normal file
0
ansible/roles/k3s/vars/main.yaml
Normal file
24
ansible/roles/nomad/files/nomad.hcl
Normal file
24
ansible/roles/nomad/files/nomad.hcl
Normal file
@@ -0,0 +1,24 @@
|
||||
data_dir = "/opt/nomad/data"
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
server {
|
||||
enabled = true
|
||||
bootstrap_expect = 1
|
||||
}
|
||||
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
servers = ["127.0.0.1"]
|
||||
}
|
||||
|
||||
host_volume "registry" {
|
||||
path = "/opt/volumes/registry"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "nfs" {
|
||||
path = "/opt/volumes/nfs"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
18
ansible/roles/nomad/tasks/main.yaml
Normal file
18
ansible/roles/nomad/tasks/main.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
- name: Nomad server configuration
|
||||
become: true
|
||||
block:
|
||||
- name: Ensure the root data directory is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.root }}"
|
||||
state: absent
|
||||
mode: "0755"
|
||||
- name: Ensure registry volume is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.registry }}"
|
||||
state: absent
|
||||
mode: "0755"
|
||||
- name: Ensure the MinIO diretory is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.nfs }}"
|
||||
state: absent
|
||||
mode: "0755"
|
||||
5
ansible/roles/nomad/vars/main.yaml
Normal file
5
ansible/roles/nomad/vars/main.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
nomad:
|
||||
volumes:
|
||||
root: /opt/volumes
|
||||
registry: /opt/volumes/ncr
|
||||
nfs: /opt/volumes/nfs
|
||||
15
ansible/roles/proxy/files/host-file
Normal file
15
ansible/roles/proxy/files/host-file
Normal file
@@ -0,0 +1,15 @@
|
||||
127.0.0.1 localhost
|
||||
127.0.1.1 nigel
|
||||
|
||||
# Our own dns stuff
|
||||
127.0.1.1 nigel.local
|
||||
127.0.1.1 nomad.nigel.local
|
||||
127.0.1.1 sanity.nigel.local
|
||||
127.0.1.1 ncr.nigel.local
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 ip6-localhost ip6-loopback
|
||||
fe00::0 ip6-localnet
|
||||
ff00::0 ip6-mcastprefix
|
||||
ff02::1 ip6-allnodes
|
||||
ff02::2 ip6-allrouters
|
||||
6
ansible/roles/proxy/files/ncr.conf
Normal file
6
ansible/roles/proxy/files/ncr.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
server {
|
||||
server_name ncr.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://localhost:5000;
|
||||
}
|
||||
}
|
||||
25
ansible/roles/proxy/files/nomad.conf
Normal file
25
ansible/roles/proxy/files/nomad.conf
Normal file
@@ -0,0 +1,25 @@
|
||||
server {
|
||||
server_name nomad.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://nomad-ws;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
proxy_read_timeout 319s;
|
||||
|
||||
# This is for log streaming requests
|
||||
proxy_buffering off;
|
||||
|
||||
# Upgrade and Connection headers for upgrading to websockets
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
|
||||
proxy_set_header Origin "${scheme}://${proxy_host}";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
upstream nomad-ws {
|
||||
ip_hash;
|
||||
server nomad.nigel.local:4646;
|
||||
}
|
||||
28
ansible/roles/proxy/tasks/main.yaml
Normal file
28
ansible/roles/proxy/tasks/main.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
- name: Reverse proxy role configuration
|
||||
become: true
|
||||
block:
|
||||
- name: Ensure /etc/hosts are up to date
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/hosts
|
||||
src: host-file
|
||||
mode: "0644"
|
||||
- name: Ensure nginx is setup as latest
|
||||
ansible.builtin.apt:
|
||||
name: nginx
|
||||
- name: Copy the nomad.conf to available configurations
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop: "{{ proxy_nginx_configs }}"
|
||||
- name: Link the nomad.conf to sites-enabled
|
||||
ansible.builtin.file:
|
||||
path: "/etc/nginx/sites-enabled/{{ item }}"
|
||||
state: link
|
||||
src: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop: "{{ proxy_nginx_configs }}"
|
||||
- name: Restart nginx
|
||||
ansible.builtin.systemd_service:
|
||||
name: nginx
|
||||
state: restarted
|
||||
3
ansible/roles/proxy/vars/main.yaml
Normal file
3
ansible/roles/proxy/vars/main.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
proxy_nginx_configs:
|
||||
- nomad.conf
|
||||
- ncr.conf
|
||||
@@ -1,23 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
bucket="$1"
|
||||
s3env=/opt/nginx/s3.env
|
||||
|
||||
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
|
||||
|
||||
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
|
||||
source $s3env
|
||||
|
||||
pull() {
|
||||
aws s3 sync s3://$bucket /opt/nginx/$bucket
|
||||
}
|
||||
|
||||
|
||||
case $bucket in
|
||||
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
|
||||
*) echo "Invalid bucket name" && exit 1 ;;
|
||||
esac
|
||||
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
networks:
|
||||
gitea:
|
||||
external: false
|
||||
|
||||
|
||||
services:
|
||||
gitea:
|
||||
image: gitea/gitea:latest-rootless
|
||||
container_name: gitea
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /opt/containers/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "2222:22"
|
||||
gitea-runner:
|
||||
image: gitea/act_runner:nightly
|
||||
container_name: gitea-runner
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /opt/containers/gitea_runner/
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
|
||||
- GITEA_RUNNER_NAME=gitea-main
|
||||
- GITEA_RUNNER_LABELS=gitea-main
|
||||
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
What is this
|
||||
============
|
||||
|
||||
Here we contain scripts to build out all the containers that are run.
|
||||
All of these images are based on images that are made from other projects
|
||||
|
||||
docker-compose.yaml
|
||||
===================
|
||||
|
||||
Services that are more/less "special" go here since most of the stuff that is
|
||||
run on the main host are basically just static html websites
|
||||
|
||||
Services & Containers
|
||||
=====================
|
||||
|
||||
| Service | Docker Image Used |
|
||||
|------------|--------------------------|
|
||||
| Gitea | gitea/gitea:latest |
|
||||
| Act Runner | gitea/act_runner:nightly |
|
||||
|
||||
Why the servics above?
|
||||
======================
|
||||
|
||||
The Gitea related services are there so that I can host my own Git projects
|
||||
away from "Git as a service" services. I have no issue with Github/Gitlab
|
||||
but I just like being able to host my own stuff when possible :smiley:
|
||||
|
||||
|
||||
|
||||
@@ -36,10 +36,11 @@ locals {
|
||||
},
|
||||
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "immich.shockrah.xyz", records = [ "45.32.92.196" ] },
|
||||
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
# This entry will be for a mega simple website that we're gonna try and host for the lulz
|
||||
{ name = "test.shockrah.xyz", records = [ "45.77.123.107" ] }
|
||||
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
{ name = "wiki.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -33,3 +33,11 @@ resource "aws_route53_record" "temper-tv-mx" {
|
||||
"50 fb.mail.gandi.net.",
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_route53_record" "temper-tv-test" {
|
||||
zone_id = aws_route53_zone.temper-tv.id
|
||||
name = "test.temper.tv"
|
||||
type = "A"
|
||||
ttl = 300
|
||||
records = [ var.vke_lb ]
|
||||
}
|
||||
@@ -26,3 +26,7 @@ variable "vultr_host" {
|
||||
description = "IP of the temp Vultr host"
|
||||
}
|
||||
|
||||
variable "vke_lb" {
|
||||
type = string
|
||||
description = "IP of our VKE load balancer"
|
||||
}
|
||||
|
||||
2
infra/dns/variables.tfvars
Normal file
2
infra/dns/variables.tfvars
Normal file
@@ -0,0 +1,2 @@
|
||||
vultr_host = "45.32.83.83"
|
||||
vke_lb = "45.32.89.101"
|
||||
1
infra/nigel-k3s/.gitignore
vendored
Normal file
1
infra/nigel-k3s/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
config.yaml
|
||||
35
infra/nigel-k3s/health.yaml
Normal file
35
infra/nigel-k3s/health.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: nginx-port
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: nginx
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30808
|
||||
targetPort: nginx-port
|
||||
19
infra/nigel-k3s/sample-cron.yaml
Normal file
19
infra/nigel-k3s/sample-cron.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: hello
|
||||
spec:
|
||||
schedule: "* * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- date; echo Hello from the sample cron-container
|
||||
restartPolicy: OnFailure
|
||||
@@ -30,5 +30,7 @@ source ./secrets.sh
|
||||
case $opt in
|
||||
plan) build_plan;;
|
||||
deploy) deploy_plan;;
|
||||
init) init;;
|
||||
*) help_prompt;;
|
||||
esac
|
||||
|
||||
@@ -1,22 +0,0 @@
|
||||
# Here we define the resources for our temporary immich server
|
||||
|
||||
resource vultr_instance immich {
|
||||
plan = var.host.plan
|
||||
region = var.host.region
|
||||
os_id = var.host.os
|
||||
enable_ipv6 = true
|
||||
|
||||
# Enable backups for now since we're getting off of s3 as well at some point
|
||||
backups = "enabled"
|
||||
backups_schedule {
|
||||
type = "weekly"
|
||||
dow = var.host.backups.day
|
||||
hour = var.host.backups.hour
|
||||
}
|
||||
|
||||
ssh_key_ids = [ vultr_ssh_key.immich.id ]
|
||||
firewall_group_id = vultr_firewall_group.host.id
|
||||
label = "Immich Server"
|
||||
}
|
||||
|
||||
|
||||
@@ -14,10 +14,3 @@ output vultr_key_id {
|
||||
}
|
||||
|
||||
|
||||
output immich_key {
|
||||
sensitive = true
|
||||
# value = tls_private_key.host.private_key_openssh
|
||||
value = vultr_instance.immich.default_password
|
||||
}
|
||||
|
||||
|
||||
|
||||
@@ -5,18 +5,6 @@ resource tls_private_key host {
|
||||
|
||||
resource vultr_ssh_key host {
|
||||
name = "static_ssh_key"
|
||||
ssh_key = tls_private_key.host.public_key_openssh
|
||||
ssh_key = chomp(tls_private_key.host.public_key_openssh)
|
||||
}
|
||||
|
||||
####################
|
||||
# Immich keys #
|
||||
####################
|
||||
resource tls_private_key immich {
|
||||
algorithm = "RSA"
|
||||
rsa_bits = 4096
|
||||
}
|
||||
|
||||
resource vultr_ssh_key immich {
|
||||
name = "static_ssh_key"
|
||||
ssh_key = tls_private_key.immich.public_key_openssh
|
||||
}
|
||||
|
||||
@@ -9,11 +9,31 @@ terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
version = "6.27.0"
|
||||
}
|
||||
vultr = {
|
||||
source = "vultr/vultr"
|
||||
version = "2.22.1"
|
||||
version = "2.26.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "3.0.1"
|
||||
}
|
||||
kubectl = {
|
||||
source = "gavinbunney/kubectl"
|
||||
version = " 1.19.0"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = "3.0.2"
|
||||
}
|
||||
tls = {
|
||||
source = "hashicorp/tls"
|
||||
version = "4.1.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.7.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -31,4 +51,16 @@ provider aws {
|
||||
max_retries = 1
|
||||
}
|
||||
|
||||
provider kubernetes {
|
||||
config_path = "config.yaml"
|
||||
}
|
||||
|
||||
provider kubectl {
|
||||
config_path = "config.yaml"
|
||||
}
|
||||
|
||||
provider helm {
|
||||
kubernetes = {
|
||||
config_path = "config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
18
infra/vultr-kubernetes/cluster-issuer.yaml
Normal file
18
infra/vultr-kubernetes/cluster-issuer.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
preferredChain: "ISRG Root X1"
|
||||
# Email address used for ACME registration
|
||||
email: dev@shockrah.xyz
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
@@ -2,14 +2,15 @@ resource vultr_kubernetes athens {
|
||||
region = var.cluster.region
|
||||
version = var.cluster.version
|
||||
label = var.cluster.label
|
||||
enable_firewall = true
|
||||
# vpc_id = vultr_vpc.athens.id
|
||||
|
||||
node_pools {
|
||||
# how many nodes do we want in this pool
|
||||
node_quantity = 1
|
||||
plan = var.cluster.pool.plan
|
||||
label = var.cluster.label
|
||||
min_nodes = var.cluster.pool.min
|
||||
max_nodes = var.cluster.pool.max
|
||||
node_quantity = var.cluster.pools["main"].min_nodes
|
||||
plan = var.cluster.pools["main"].plan
|
||||
label = var.cluster.pools["main"].label
|
||||
min_nodes = var.cluster.pools["main"].min_nodes
|
||||
max_nodes = var.cluster.pools["main"].max_nodes
|
||||
auto_scaler = true
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
6
infra/vultr-kubernetes/data.tf
Normal file
6
infra/vultr-kubernetes/data.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
data vultr_kubernetes athens {
|
||||
filter {
|
||||
name = "label"
|
||||
values = [ var.cluster.label ]
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,10 @@
|
||||
# Inbound rules for web traffic
|
||||
resource vultr_firewall_rule web_inbound {
|
||||
for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
#firewall_group_id = vultr_firewall_group.cluster.id
|
||||
protocol = "tcp"
|
||||
ip_type = "v4"
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
port = each.value
|
||||
}
|
||||
# resource vultr_firewall_rule web_inbound {
|
||||
# for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
||||
# firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
# protocol = "tcp"
|
||||
# ip_type = "v4"
|
||||
# subnet = "0.0.0.0"
|
||||
# subnet_size = 0
|
||||
# port = each.value
|
||||
# }
|
||||
|
||||
|
||||
74
infra/vultr-kubernetes/git.tf
Normal file
74
infra/vultr-kubernetes/git.tf
Normal file
@@ -0,0 +1,74 @@
|
||||
# NOTE: this is a simple deployment for demo purposes only.
|
||||
# Currently it does support SSH access and lacks Gitea runners.
|
||||
# However a fully working setup can be found at: https://git.shockrah.xyz
|
||||
resource kubernetes_deployment gitea {
|
||||
metadata {
|
||||
name = "gitea"
|
||||
namespace = var.playground.namespace
|
||||
labels = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "gitea"
|
||||
image = "gitea/gitea:latest"
|
||||
port {
|
||||
container_port = 3000
|
||||
name = "gitea-main"
|
||||
}
|
||||
port {
|
||||
container_port = 2222
|
||||
name = "gitea-ssh"
|
||||
}
|
||||
volume_mount {
|
||||
name = "gitea"
|
||||
mount_path = "/data"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "gitea"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim_v1.gitea.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_service gitea {
|
||||
metadata {
|
||||
name = "gitea"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
port {
|
||||
target_port = "gitea-main"
|
||||
port = 3000
|
||||
name = "http"
|
||||
}
|
||||
port {
|
||||
target_port = "gitea-ssh"
|
||||
port = 2222
|
||||
name = "ssh"
|
||||
}
|
||||
}
|
||||
}
|
||||
47
infra/vultr-kubernetes/health.tf
Normal file
47
infra/vultr-kubernetes/health.tf
Normal file
@@ -0,0 +1,47 @@
|
||||
resource kubernetes_deployment_v1 health {
|
||||
metadata {
|
||||
name = "health"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
name = "health"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
name = "health"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "health"
|
||||
image = "quanhua92/whoami:latest"
|
||||
port {
|
||||
container_port = "8080"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service_v1 health {
|
||||
metadata {
|
||||
name = "health"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
name = "health"
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = 8080
|
||||
name = "http"
|
||||
}
|
||||
}
|
||||
}
|
||||
7
infra/vultr-kubernetes/ingress-controller.tf
Normal file
7
infra/vultr-kubernetes/ingress-controller.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
resource helm_release nginx {
|
||||
name = "ingress-nginx"
|
||||
repository = "https://kubernetes.github.io/ingress-nginx"
|
||||
chart = "ingress-nginx"
|
||||
namespace = "ingress-nginx"
|
||||
create_namespace = true
|
||||
}
|
||||
48
infra/vultr-kubernetes/ingress.tf
Normal file
48
infra/vultr-kubernetes/ingress.tf
Normal file
@@ -0,0 +1,48 @@
|
||||
locals {
|
||||
services = {
|
||||
"code.shockrah.xyz" = kubernetes_service.gitea
|
||||
"sanity.shockrah.xyz" = kubernetes_service_v1.health
|
||||
"uptime.shockrah.xyz" = kubernetes_service.kuma
|
||||
"wiki.shockrah.xyz" = kubernetes_service.otterwiki
|
||||
}
|
||||
}
|
||||
resource kubernetes_ingress_v1 health {
|
||||
metadata {
|
||||
name = "health-ingress"
|
||||
namespace = var.playground.namespace
|
||||
annotations = {
|
||||
"cert-manager.io/cluster-issuer" = "letsencrypt"
|
||||
"cert-manager.io/ingress.class" = "nginx"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
ingress_class_name = "nginx"
|
||||
dynamic tls {
|
||||
for_each = local.services
|
||||
content {
|
||||
hosts = [tls.key]
|
||||
secret_name = "${tls.value.metadata[0].name}-secret"
|
||||
}
|
||||
}
|
||||
dynamic "rule" {
|
||||
for_each = local.services
|
||||
content {
|
||||
host = "${rule.key}"
|
||||
http {
|
||||
path {
|
||||
path = "/"
|
||||
backend {
|
||||
service {
|
||||
name = rule.value.metadata[0].name
|
||||
port {
|
||||
number = rule.value.spec[0].port[0].port
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1
infra/vultr-kubernetes/k8s/.gitignore
vendored
1
infra/vultr-kubernetes/k8s/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
terraform.yaml
|
||||
@@ -1,33 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13"
|
||||
backend s3 {
|
||||
bucket = "project-athens"
|
||||
key = "infra/vke/k8s/state/build.tfstate"
|
||||
region = "us-west-1"
|
||||
encrypt = true
|
||||
}
|
||||
required_providers {
|
||||
# For interacting with S3
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.30.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider aws {
|
||||
access_key = var.aws_key
|
||||
secret_key = var.aws_secret
|
||||
region = var.aws_region
|
||||
max_retries = 1
|
||||
}
|
||||
|
||||
provider kubernetes {
|
||||
config_path = "terraform.yaml"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
resource kubernetes_ingress_v1 athens {
|
||||
metadata {
|
||||
name = var.shockrahxyz.name
|
||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
||||
labels = {
|
||||
app = "websites"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
rule {
|
||||
host = "test.shockrah.xyz"
|
||||
http {
|
||||
path {
|
||||
backend {
|
||||
service {
|
||||
name = var.shockrahxyz.name
|
||||
port {
|
||||
number = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
path = "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_service athens_lb {
|
||||
metadata {
|
||||
name = "athens-websites"
|
||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
||||
labels = {
|
||||
app = "websites"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
app = kubernetes_ingress_v1.athens.metadata.0.labels.app
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = 80
|
||||
}
|
||||
type = "LoadBalancer"
|
||||
external_ips = [ var.cluster.ip ]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
resource kubernetes_namespace websites {
|
||||
metadata {
|
||||
name = "websites"
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
# First we setup the ingress controller with helm
|
||||
|
||||
|
||||
```sh
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
helm repo update
|
||||
# Now we can install this to our cluster
|
||||
helm install --kubeconfig config.yaml traefik traefik/traefik
|
||||
```
|
||||
|
||||
# Prove the service is present with
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml get svc
|
||||
```
|
||||
|
||||
# Create the pods
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml -f k8s/nginx-dep.yaml
|
||||
```
|
||||
|
||||
# Expose on port 80
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml -f k8s/nginx-service.yaml
|
||||
```
|
||||
|
||||
# Create ingress on k8s
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml -f k8s/traefik-ingress.yaml
|
||||
```
|
||||
|
||||
# Take the external IP from the ingress
|
||||
|
||||
Put that into terraform's A record for the domain since this is a load balancer
|
||||
in Vultr ( actual resource apparantly )
|
||||
|
||||
# Configure cert-manager for traefik ingress
|
||||
|
||||
Using the latest version from here:
|
||||
https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.crds.yaml
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml \
|
||||
apply --validate=false \
|
||||
-f https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.yaml
|
||||
```
|
||||
|
||||
# Create the cert issuer and certificate
|
||||
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
|
||||
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
|
||||
```
|
||||
|
||||
Because we just have 1 cert for now we are looking for it's status to be `READY`
|
||||
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
Plain nginx for now so that we can test out reverse dns
|
||||
resource kubernetes_pod shockrah {
|
||||
metadata {
|
||||
name = var.shockrahxyz.name
|
||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
||||
labels = {
|
||||
app = var.shockrahxyz.name
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
image = "nginx"
|
||||
name = "${var.shockrahxyz.name}"
|
||||
port {
|
||||
container_port = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# API Keys required to reach AWS/Vultr
|
||||
variable vultr_api_key {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable aws_key {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable aws_secret {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable aws_region {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable shockrahxyz {
|
||||
type = object({
|
||||
name = string
|
||||
port = number
|
||||
dns = string
|
||||
})
|
||||
}
|
||||
|
||||
variable cluster {
|
||||
type = object({
|
||||
ip = string
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# Here we are going to define the deployment and service
|
||||
# Basically all things directly related to the actual service we want to provide
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: alternate-nginx-web
|
||||
namespace: default
|
||||
labels:
|
||||
app: alternate-nginx-web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: alternate-nginx-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: alternate-nginx-web
|
||||
spec:
|
||||
# Container comes from an example thing i randomly found on docker hub
|
||||
containers:
|
||||
- name: alternate-nginx-web
|
||||
image: dockerbogo/docker-nginx-hello-world
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: alternate-nginx-web
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: alternate-nginx-web
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: 80
|
||||
port: 80
|
||||
@@ -1,30 +0,0 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: hello.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: hello.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
commonName: hello.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- hello.temprah-lab.xyz
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod-hello
|
||||
namespace: default
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: dev@shockrah.xyz
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod-hello
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: sample.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: sample.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
commonName: sample.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- sample.temprah-lab.xyz
|
||||
@@ -1,20 +0,0 @@
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: nginx-web
|
||||
namespace: default
|
||||
labels:
|
||||
app: nginx-web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-web
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: nginx-web
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: 80
|
||||
port: 80
|
||||
@@ -1,44 +0,0 @@
|
||||
# This is the first thing we need to create, an issue to put certs into
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
namespace: default
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: dev@shockrah.xyz
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-temprah-lab
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: hello.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: hello.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-temprah-lab
|
||||
kind: ClusterIssuer
|
||||
commonName: hello.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- hello.temprah-lab.xyz
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: sample.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: sample.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-temprah-lab
|
||||
kind: ClusterIssuer
|
||||
commonName: sample.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- sample.temprah-lab.xyz
|
||||
@@ -1,31 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: traefik-ingress
|
||||
namespace: default
|
||||
labels:
|
||||
name: project-athens-lb
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: sample.temprah-lab.xyz
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: nginx-web
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
pathType: Prefix
|
||||
- host: hello.temprah-lab.xyz
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: alternate-nginx-web
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
pathType: Prefix
|
||||
@@ -1,15 +1,14 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
namespace: default
|
||||
name: letsencrypt-nginx
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: dev@shockrah.xyz
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
name: example
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
class: nginx
|
||||
36
infra/vultr-kubernetes/legacy/ingress.yaml
Normal file
36
infra/vultr-kubernetes/legacy/ingress.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: whoami-service
|
||||
spec:
|
||||
selector:
|
||||
name: whoami
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whoami-ingress
|
||||
annotations:
|
||||
cert-manager.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- secretName: whoami-tls
|
||||
hosts:
|
||||
- example.shockrah.xyz
|
||||
rules:
|
||||
- host: example.shockrah.xyz
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: whoami-service
|
||||
port:
|
||||
number: 80
|
||||
21
infra/vultr-kubernetes/legacy/service.yaml
Normal file
21
infra/vultr-kubernetes/legacy/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: whoami-lb
|
||||
annotations:
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-interval: "30"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-response-timeout: "5"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-unhealthy-threshold: "5"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-healthy-threshold: "5"
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
name: whoami
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
20
infra/vultr-kubernetes/legacy/test.yaml
Normal file
20
infra/vultr-kubernetes/legacy/test.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: whoami
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
name: whoami
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: whoami
|
||||
spec:
|
||||
containers:
|
||||
- name: whoami
|
||||
image: quanhua92/whoami:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
37
infra/vultr-kubernetes/legacy/tls.yaml
Normal file
37
infra/vultr-kubernetes/legacy/tls.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
preferredChain: "ISRG Root X1"
|
||||
# Email address used for ACME registration
|
||||
email: dev@shockrah.xyz
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
# Email address used for ACME registration
|
||||
email: dev@shockrah.xyz
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
|
||||
10
infra/vultr-kubernetes/namespaces.tf
Normal file
10
infra/vultr-kubernetes/namespaces.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
resource kubernetes_namespace playground {
|
||||
metadata {
|
||||
annotations = {
|
||||
names = var.playground.namespace
|
||||
}
|
||||
name = var.playground.namespace
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
30
infra/vultr-kubernetes/tls.tf
Normal file
30
infra/vultr-kubernetes/tls.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource helm_release shockrah_cert_manager {
|
||||
name = "cert-manager"
|
||||
repository = "https://charts.jetstack.io"
|
||||
chart = "cert-manager"
|
||||
version = "v1.18.2"
|
||||
namespace = "cert-manager"
|
||||
create_namespace = true
|
||||
cleanup_on_fail = true
|
||||
|
||||
set = [
|
||||
{
|
||||
name = "crds.enabled"
|
||||
value = "true"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
data kubectl_file_documents cluster_issuer {
|
||||
content = file("cluster-issuer.yaml")
|
||||
}
|
||||
|
||||
resource kubectl_manifest cluster_issuer {
|
||||
for_each = data.kubectl_file_documents.cluster_issuer.manifests
|
||||
yaml_body = each.value
|
||||
|
||||
depends_on = [
|
||||
data.kubectl_file_documents.cluster_issuer
|
||||
]
|
||||
}
|
||||
61
infra/vultr-kubernetes/uptime.tf
Normal file
61
infra/vultr-kubernetes/uptime.tf
Normal file
@@ -0,0 +1,61 @@
|
||||
resource kubernetes_deployment kuma {
|
||||
metadata {
|
||||
name = "kuma"
|
||||
namespace = var.playground.namespace
|
||||
labels = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "kuma"
|
||||
image = "louislam/uptime-kuma:2"
|
||||
port {
|
||||
container_port = 3001
|
||||
name = "uptime-kuma"
|
||||
}
|
||||
volume_mount {
|
||||
name = "kuma-data"
|
||||
mount_path = "/app/data"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "kuma-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim_v1.kuma.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service kuma {
|
||||
metadata {
|
||||
name = "kuma"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
port {
|
||||
target_port = "uptime-kuma"
|
||||
port = 3001
|
||||
name = "http"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -25,15 +25,25 @@ variable cluster {
|
||||
region = string
|
||||
label = string
|
||||
version = string
|
||||
pool = object({
|
||||
pools = map(object({
|
||||
node_quantity = number
|
||||
plan = string
|
||||
autoscale = bool
|
||||
min = number
|
||||
max = number
|
||||
label = string
|
||||
min_nodes = number
|
||||
max_nodes = number
|
||||
tag = string
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
variable playground {
|
||||
type = object({
|
||||
namespace = string
|
||||
# TODO: Re-incorporate this var for templating later
|
||||
tls = object({
|
||||
email = string
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
variable lab_domain {
|
||||
type = string
|
||||
}
|
||||
|
||||
@@ -1,14 +1,24 @@
|
||||
|
||||
cluster = {
|
||||
region = "lax"
|
||||
label = "athens-cluster"
|
||||
version = "v1.31.2+1"
|
||||
pool = {
|
||||
version = "v1.34.1+2"
|
||||
pools = {
|
||||
main = {
|
||||
node_quantity = 1
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 2
|
||||
label = "main"
|
||||
min_nodes = 1
|
||||
max_nodes = 2
|
||||
tag = "athens-main"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
lab_domain = "temprah-lab.xyz"
|
||||
playground = {
|
||||
namespace = "playground"
|
||||
# Sanity check service that is used purely for the sake of ensuring
|
||||
# things are ( at a basic level ) functional
|
||||
tls = {
|
||||
email = "dev@shockrah.xyz"
|
||||
}
|
||||
}
|
||||
|
||||
49
infra/vultr-kubernetes/volumes.tf
Normal file
49
infra/vultr-kubernetes/volumes.tf
Normal file
@@ -0,0 +1,49 @@
|
||||
resource kubernetes_persistent_volume_claim_v1 kuma {
|
||||
metadata {
|
||||
name = "kuma-data"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
volume_mode = "Filesystem"
|
||||
access_modes = [ "ReadWriteOnce"]
|
||||
resources {
|
||||
requests = {
|
||||
storage = "10Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_persistent_volume_claim_v1 gitea {
|
||||
metadata {
|
||||
name = "gitea-data"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
volume_mode = "Filesystem"
|
||||
access_modes = [ "ReadWriteOnce"]
|
||||
resources {
|
||||
requests = {
|
||||
storage = "10Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_persistent_volume_claim_v1 otterwiki {
|
||||
metadata {
|
||||
name = "otterwiki-data"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
volume_mode = "Filesystem"
|
||||
access_modes = [ "ReadWriteOnce"]
|
||||
resources {
|
||||
requests = {
|
||||
storage = "10Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
63
infra/vultr-kubernetes/wiki.tf
Normal file
63
infra/vultr-kubernetes/wiki.tf
Normal file
@@ -0,0 +1,63 @@
|
||||
resource kubernetes_deployment otterwiki {
|
||||
metadata {
|
||||
name = "otterwiki"
|
||||
namespace = var.playground.namespace
|
||||
labels = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "otterwiki"
|
||||
image = "redimp/otterwiki:2"
|
||||
port {
|
||||
container_port = 8080
|
||||
name = "otterwiki-main"
|
||||
}
|
||||
volume_mount {
|
||||
name = "otterwiki-data"
|
||||
mount_path = "/var/lib/otterwiki"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "otterwiki-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim_v1.otterwiki.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service otterwiki {
|
||||
metadata {
|
||||
name = "otterwiki"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = "otterwiki-main"
|
||||
protocol = "TCP"
|
||||
name = "http"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user