Compare commits

...

103 Commits

Author SHA1 Message Date
982669ed4a Cleaning up the logging namespace and resource as they are not getting value
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 7s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 24s
2025-12-12 14:41:29 -08:00
4446ef813f Fixing auto_scaler issue with root node pool in athens cluster 2025-12-12 14:40:54 -08:00
9dc2f1d769 Adding sample filese and fluent bit configs which still need some work
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2025-11-10 14:18:05 -08:00
01b7b4ced8 Moving logging related things to the new logging namespace
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-11-05 21:55:40 -08:00
29cdfcb695 openobserve inimal setup running now with it's own namespace and volumes
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 7s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-11-04 23:24:16 -08:00
bbbc9ed477 Upsizing the singular node to accomodate the new observability stack 2025-11-04 23:20:03 -08:00
d64c5526e6 Creating namespace for the openserve 2025-11-04 23:18:39 -08:00
469b3d08ce Adding hashicorp/random provider 2025-11-04 23:16:58 -08:00
7f5b3205d0 Ingress functional however this is all in a cooked af namespace
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-11-03 02:14:06 -08:00
67ff5ce729 Gitea appearing functional with the service in place, now waiting on LB setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-11-03 01:48:29 -08:00
6aadb47c61 Adding code.shockrah.xyz to DNS member list 2025-11-03 01:48:09 -08:00
0624161f53 Fixing the PV for gitea which now lives in the dev namespace
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-11-03 01:30:16 -08:00
c6b2a062e9 Creating dev namespace
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-11-03 01:17:54 -08:00
718647f617 Adding a new uptime service to configure later on
For now I'm staging this in the playground namespace since IDK if I'm going to keep it 5ever + it's an excuse to learn how to use basic volumes
2025-11-02 21:31:22 -08:00
cfe631eba7 Creating pvc for gitea setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 7s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2025-10-22 16:15:04 -07:00
29e049cf7f Moving legacy yaml 2025-10-21 15:15:24 -07:00
990d29ae6c Adding annotations & host field to ingress
Also updating the staging target to production target for lets encrypt cluster issuer
2025-10-21 12:42:02 -07:00
859201109e Adding required annotations for cert-manager on the ingress resource
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-10-03 18:04:20 -07:00
de3bff8f14 Creating cluster issuer with yaml piped into terraform
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-10-03 18:01:16 -07:00
54a6ddbe5d Changing out the kubectl provider for a new one 2025-10-03 17:59:01 -07:00
82333fe6ce Setting up cert-manager helm_release
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-10-03 17:25:23 -07:00
cddf67de2f Updating health ingress resource with better naming/referecing
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-09-28 15:56:24 -07:00
affa03bed5 Updating DNS for load balancer sanity.shockrah.xyz A record
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2025-09-28 13:56:47 -07:00
34e1f6afdf Converting backend provider helm config to use config.yaml
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-09-27 14:38:26 -07:00
fd9bd290af Adding support for helm releases
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
Intended for setting up the nginx-ingress controller
2025-09-20 11:05:00 -07:00
d992556032 Basic sanity service now working under public DNS
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-09-17 23:32:50 -07:00
fce73d06e0 Adding dns vars for sanity.shockrah.xyz
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-09-17 23:08:33 -07:00
7f5d81f0ee Deployment and Service for a simple health check tier service
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-09-17 22:37:03 -07:00
410790765f Creating new namespace in cluster for random k8s experiments 2025-09-17 22:33:34 -07:00
9454e03f53 Example service now uses tls for some reason
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-09-09 18:05:12 -07:00
e6ed85920d Creating semi-functional tls cert with k8s
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 9s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 5s
Certificate resource is created but not deployed at this time
2025-09-08 21:00:24 -07:00
2775d354f8 Creating functional ingress 2025-09-08 20:58:44 -07:00
1f6f013634 Cleaning up unused resources
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 10s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 9s
2025-09-08 20:58:29 -07:00
778b995980 Adding DNS entry for VKE LB
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-09-03 14:27:52 -07:00
fc897bdd0e New yaml for a working MVP
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
Still need to add thing like TLS but this will basically
be  the template for routing and service setup, going forward
2025-08-29 16:34:18 -07:00
8f06ef269a Basic health setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-08-27 18:13:39 -07:00
f15da0c88d Removing old kubernetes tf infrastructure
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-08-27 00:30:38 -07:00
c602773657 Removing tarpit project to save on costs
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2025-08-08 07:32:33 -07:00
cd908d9c14 Sample cronjob for k3s
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-08-05 21:50:30 -07:00
56e9c0ae4a Merge branch 'master' of ssh://git.shockrah.xyz:2222/shockrah/infra
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-07-25 12:43:56 -07:00
30bc6ee2fa Ignore kubectl config 2025-07-25 12:43:53 -07:00
cd9822bb85 Basic health service on port 30808 on the new k3s cluster 2025-07-25 12:43:30 -07:00
0efe6ca642 removing nomad configs 2025-07-25 12:02:31 -07:00
2ef4b00097 Removing old nomad configs
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 13s
2025-07-14 20:43:40 -07:00
e183055282 Nomad removal
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 31s
2025-07-14 20:15:54 -07:00
514909fc8d Removing nomad and consul in favor of K3S for more well supported architecture 2025-07-14 20:14:37 -07:00
5b4a440cb4 Removing the last remnants of nomad and getting k3s setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-07-10 20:47:27 -07:00
826d334c3c Completing the state required to setup k3s
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 15s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 37s
2025-07-10 14:06:53 -07:00
77590b067a Create the static host volume for the new NFS
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-06-18 17:08:00 -07:00
850570faf5 Creating simple bastion host for testing deployment setup scripts
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-06-16 15:15:09 -07:00
12831fbaf3 Adding vars for the new bastion host
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-06-12 09:20:21 -07:00
a6123dd7e2 Adding tls into required providers
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-06-12 09:16:10 -07:00
9c2e0a84d7 Creating VKE cluster in a private VPC
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-06-04 23:29:26 -07:00
1281ea8857 simplifying vars
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-28 21:22:10 -07:00
ee2d502ca6 Slimming down the cluster definition
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-05-28 18:24:31 -07:00
88059a5e0f upgrading providers 2025-05-28 17:43:56 -07:00
4024809cc4 Removing scripts that won't ever be used again 2025-05-29 00:35:32 -07:00
029a3c80d5 Ignoring .ansible directory
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-29 00:34:25 -07:00
75b7f2fa3d Unreal amounts of linter fixes
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-05-26 01:10:00 -07:00
8ef606153f Swapping basic setup steps for sudo access
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-25 22:57:30 -07:00
be34327791 Moving nomad host volume setup to its own role
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-25 22:56:07 -07:00
c6ef6ae4d2 moving more files around for nomad this time
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-25 22:41:37 -07:00
eb7871584b Renaming role to be more generic going forward
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-25 22:35:10 -07:00
4a0a12242a Moving common vars for role to vars/main.yaml
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2025-05-25 22:33:26 -07:00
053db8793b Moving proxy things to its own playbook + role
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-23 01:42:40 -07:00
24fcbc957a almost functional registry, still need to figure out tls 2025-05-23 00:47:32 -07:00
9675fbacef Tracking nomad client config 2025-05-23 00:34:22 -07:00
3f0c8a865d DNS Endpoint for a tarpit meme project
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-23 00:31:24 -07:00
3f2e6d86f6 Proxying a new container registry service
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2025-05-23 00:31:00 -07:00
08560c945b Removing succ build script
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 13s
2025-05-21 22:24:50 -07:00
506a9b32d9 renaming tarpit
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-21 21:36:48 -07:00
d4ece741e0 tarpit server that i'll use for the lulz
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-21 21:35:20 -07:00
311a592d6e Adding a task subset for host volume setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-05-20 15:06:04 -07:00
153ea8e982 Improving nomad nginx config to be more responsive or soemthing
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
https://developer.hashicorp.com/nomad/tutorials/manage-clusters/reverse-proxy-ui#extend-connection-timeout
2025-05-15 22:59:07 -07:00
943e9651da Swapping the health container to our own thing
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
it's just nginx on port 8080 :)
2025-05-13 18:40:12 -07:00
669c414288 Simple sanity container on port 8080 for testing purposes
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Has been cancelled
2025-05-13 18:39:47 -07:00
e3afed5e4f sanity service on 8080 now 2025-05-12 02:01:31 -07:00
e337989a59 just roll with it at this point 2025-05-12 02:01:20 -07:00
7f36ff272e boostrap since we have literally 1 node
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:44:25 -07:00
79e6698db1 Templatizing consul config
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:28:39 -07:00
603559b255 omfg this config i swear
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:08:11 -07:00
4851b6521c consul config
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-12 01:05:54 -07:00
9785e8a40a Even more file shuffling
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 27s
2025-05-12 00:21:08 -07:00
79bd7424c3 Moving around more stuff
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-05-12 00:18:24 -07:00
5227bea568 renaming stuff to note that it's not used anymore 2025-05-12 00:17:30 -07:00
47b69d7f49 Nomad now responds to the basic nomad.nigel.local DNS name
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-10 17:26:45 -07:00
a3fdc5fcc7 Sanity check job with nomad :D
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-05-10 15:38:16 -07:00
5a1afb4a07 Make sure the nomad agent is running on boot
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-05-10 14:58:19 -07:00
e03daa62e5 removing unused role
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-05-10 14:52:32 -07:00
15dfaea8db Nomad completely setup with --tags nomad now
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2025-05-04 23:35:58 -07:00
ef4967cd88 wari wari da it's so over ( im using ansible again )
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
:(((
2025-04-23 23:25:34 -07:00
55217ce50b Ensure nigel sudo ability is setup
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-04-23 22:25:23 -07:00
2bbc9095f7 Removing services role as it's being replaced by terraform
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
2025-04-23 22:14:50 -07:00
fcf7ded218 Removing docker resources for now
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 7s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 22s
Migrating to terraform for better state control
2025-04-23 22:14:05 -07:00
b68d53b143 Opting for an example minio setup over filebrowser
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-04-16 21:00:39 -07:00
3c6bc90feb health container and filebrowser container now active
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
Configuration needed at this point however
2025-04-16 20:28:48 -07:00
3521b840ae Seperating the roles of basic infra requirements and docker service requirements into seperate roles
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
With this we have a working proof of concept for a proper simple docker host
2025-04-16 18:25:24 -07:00
5f10976264 Docker now setup with ansible
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 15s
2025-04-16 17:34:03 -07:00
10e936a8da Basic docker setup verified by ansible-lint locally
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-04-16 14:55:02 -07:00
8bbaea8fd9 Simple admin user setup on a clean buntu machine
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
2025-04-11 02:43:22 -07:00
d39e0c04e5 Adding health to games selector set
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
2025-02-10 22:11:09 -08:00
b99525955e Swapping health pod to admin-services 2025-02-10 22:10:46 -08:00
9b6f9b6656 Fixing tag issues with pod selector
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2025-02-10 22:10:02 -08:00
97 changed files with 1349 additions and 845 deletions

View File

@ -10,6 +10,6 @@ jobs:
steps: steps:
- name: Checkout repo content - name: Checkout repo content
uses: actions/checkout@v4 uses: actions/checkout@v4
- run: ansible-lint - run: ansible-lint -c linter.yaml
working-directory: ansible/ working-directory: ansible/

1
.gitignore vendored
View File

@ -21,3 +21,4 @@ docker/beta/shockrah.xyz/
docker/beta/resume.shockrah.xyz/ docker/beta/resume.shockrah.xyz/
k8s/config.yaml k8s/config.yaml
infra/**/tfplan infra/**/tfplan
.ansible/

3
ansible/inventory.yaml Normal file
View File

@ -0,0 +1,3 @@
nigel:
hosts:
nigel.local:

4
ansible/linter.yaml Normal file
View File

@ -0,0 +1,4 @@
---
skip_list:
- role-name
- var-naming[no-role-prefix]

View File

@ -0,0 +1,28 @@
# This playbook is meant to be a oneshot to be ran manually on the dev box
# The rest of the role stuff is meant to be ran as the admin user that
# this playbook creates for us
---
- name: Setup local admin user with a fresh ubuntu host
hosts: nigel.local
remote_user: nigel
vars:
admin:
username: nigel
tasks:
- name: Copy the nigel admin key
ansible.posix.authorized_key:
user: "{{ admin.username }}"
state: present
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
- name: Prevent password based logins
become: true
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
- name: Restart SSH Daemon
become: true
ansible.builtin.service:
name: ssh
state: restarted

9
ansible/nomad.yaml Normal file
View File

@ -0,0 +1,9 @@
---
- name: Setup all the responsibilities of the nomad server
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the nomad role
ansible.builtin.include_role:
name: nomad

14
ansible/nuc.yaml Normal file
View File

@ -0,0 +1,14 @@
---
- name: Setup bare metal requirements
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the base role to the nuc
ansible.builtin.include_role:
name: base
- name: Apply the k3s base role
ansible.builtin.include_role:
name: k3s
- name: Apply the proxy role
ansible.builtin.include_role:
name: proxy

8
ansible/proxy.yaml Normal file
View File

@ -0,0 +1,8 @@
---
- name: Setup host as a reverse proxy
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply reverse proxy role
ansible.builtin.include_role:
name: proxy

View File

@ -0,0 +1 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable

View File

@ -0,0 +1,41 @@
- name: Ensure we have basic updated packages setting up docker
ansible.builtin.apt:
name: "{{ item }}"
update_cache: true
loop:
- ca-certificates
- curl
- name: Running install on the keyrings directory
ansible.builtin.command:
cmd: install -m 0755 -d /etc/apt/keyrings
register: install
changed_when: install.rc == 0
- name: Fetch Docker GPG Key
vars:
keylink: https://download.docker.com/linux/ubuntu/gpg
ansible.builtin.get_url:
url: "{{ keylink }}"
dest: /etc/apt/keyrings/docker.asc
mode: "0644"
- name: Add repo to apt sources
ansible.builtin.copy:
src: docker.list
dest: /etc/apt/sources.list.d/docker.list
mode: "0644"
- name: Update Apt cache with latest docker.list packages
ansible.builtin.apt:
update_cache: true
- name: Ensure all docker packages are updated to the latest versions
ansible.builtin.apt:
name: "{{ item }}"
loop:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- name: Verify that the docker components are installed properly
ansible.builtin.command:
cmd: docker run hello-world
register: docker
changed_when: docker.rc == 0

View File

@ -0,0 +1,8 @@
- name: Download the setup script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s.sh
mode: "0644"
- name: Run installation script
ansible.builtin.command:
cmd: bash /tmp/k3s.sh

View File

@ -0,0 +1,25 @@
- name: Ensure nigel can use sudo without password
become: true
tags:
- setup
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "nigel ALL=(ALL) NOPASSWD:ALL"
- name: Ensure docker components are installed
tags:
- setup
ansible.builtin.include_tasks:
file: ensure-docker-basic.yaml
apply:
become: true
tags:
- setup
- name: Run through nomad removal steps
tags: nomad
ansible.builtin.include_tasks:
file: nomad.yaml
apply:
become: true
tags:
- nomad

View File

@ -0,0 +1,12 @@
bind_addr = "{{ ip }}"
advertise_addr = "{{ ip }}"
bootstrap = true
bootstrap_expect = 1
client_addr = "{{ ip }}"
server = true
data_dir = "/opt/consul"
ui_config {
enabled = true
}

View File

@ -0,0 +1 @@
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main

View File

View File

@ -0,0 +1,11 @@
- name: Download the installation script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp
register: install_script
- name: Run installation script
become: true
environment:
INSTALL_K3S_EXEC: server
ansible.builtin.command:
cmd: sh {{ install_script.dest }}

View File

View File

@ -0,0 +1,24 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
server {
enabled = true
bootstrap_expect = 1
}
client {
enabled = true
servers = ["127.0.0.1"]
}
host_volume "registry" {
path = "/opt/volumes/registry"
read_only = false
}
host_volume "nfs" {
path = "/opt/volumes/nfs"
read_only = false
}

View File

@ -0,0 +1,18 @@
- name: Nomad server configuration
become: true
block:
- name: Ensure the root data directory is present
ansible.builtin.file:
path: "{{ nomad.volumes.root }}"
state: absent
mode: "0755"
- name: Ensure registry volume is present
ansible.builtin.file:
path: "{{ nomad.volumes.registry }}"
state: absent
mode: "0755"
- name: Ensure the MinIO diretory is present
ansible.builtin.file:
path: "{{ nomad.volumes.nfs }}"
state: absent
mode: "0755"

View File

@ -0,0 +1,5 @@
nomad:
volumes:
root: /opt/volumes
registry: /opt/volumes/ncr
nfs: /opt/volumes/nfs

View File

@ -0,0 +1,15 @@
127.0.0.1 localhost
127.0.1.1 nigel
# Our own dns stuff
127.0.1.1 nigel.local
127.0.1.1 nomad.nigel.local
127.0.1.1 sanity.nigel.local
127.0.1.1 ncr.nigel.local
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

View File

@ -0,0 +1,6 @@
server {
server_name ncr.nigel.local;
location / {
proxy_pass http://localhost:5000;
}
}

View File

@ -0,0 +1,25 @@
server {
server_name nomad.nigel.local;
location / {
proxy_pass http://nomad-ws;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 319s;
# This is for log streaming requests
proxy_buffering off;
# Upgrade and Connection headers for upgrading to websockets
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "${scheme}://${proxy_host}";
}
}
upstream nomad-ws {
ip_hash;
server nomad.nigel.local:4646;
}

View File

@ -0,0 +1,28 @@
- name: Reverse proxy role configuration
become: true
block:
- name: Ensure /etc/hosts are up to date
ansible.builtin.copy:
dest: /etc/hosts
src: host-file
mode: "0644"
- name: Ensure nginx is setup as latest
ansible.builtin.apt:
name: nginx
- name: Copy the nomad.conf to available configurations
ansible.builtin.copy:
src: "{{ item }}"
dest: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Link the nomad.conf to sites-enabled
ansible.builtin.file:
path: "/etc/nginx/sites-enabled/{{ item }}"
state: link
src: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Restart nginx
ansible.builtin.systemd_service:
name: nginx
state: restarted

View File

@ -0,0 +1,3 @@
proxy_nginx_configs:
- nomad.conf
- ncr.conf

View File

@ -1,23 +0,0 @@
#!/bin/bash
set -e
bucket="$1"
s3env=/opt/nginx/s3.env
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
source $s3env
pull() {
aws s3 sync s3://$bucket /opt/nginx/$bucket
}
case $bucket in
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
*) echo "Invalid bucket name" && exit 1 ;;
esac

View File

@ -1,40 +0,0 @@
networks:
gitea:
external: false
services:
gitea:
image: gitea/gitea:latest-rootless
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "2222:22"
gitea-runner:
image: gitea/act_runner:nightly
container_name: gitea-runner
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea_runner/
- /var/run/docker.sock:/var/run/docker.sock
environment:
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
- GITEA_RUNNER_NAME=gitea-main
- GITEA_RUNNER_LABELS=gitea-main
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}

View File

@ -1,29 +0,0 @@
What is this
============
Here we contain scripts to build out all the containers that are run.
All of these images are based on images that are made from other projects
docker-compose.yaml
===================
Services that are more/less "special" go here since most of the stuff that is
run on the main host are basically just static html websites
Services & Containers
=====================
| Service | Docker Image Used |
|------------|--------------------------|
| Gitea | gitea/gitea:latest |
| Act Runner | gitea/act_runner:nightly |
Why the servics above?
======================
The Gitea related services are there so that I can host my own Git projects
away from "Git as a service" services. I have no issue with Github/Gitlab
but I just like being able to host my own stuff when possible :smiley:

View File

@ -1,34 +0,0 @@
#!/bin/bash
set -e
opt=$1
plan=tfplan
build_plan() {
echo Generating plan
set -x
terraform plan -var-file variables.tfvars -input=false -out $plan
}
deploy_plan() {
terraform apply $plan
}
init() {
terraform init
}
help_prompt() {
cat <<- EOF
Options: plan deploy help
EOF
}
# Default to building a plan
source ./secrets.sh
case $opt in
plan) build_plan;;
deploy) deploy_plan;;
*) help_prompt;;
esac

View File

@ -37,6 +37,9 @@ locals {
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] }, { name = "www.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] }, { name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] }, { name = "git.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] },
] ]
} }

View File

@ -26,3 +26,7 @@ variable "vultr_host" {
description = "IP of the temp Vultr host" description = "IP of the temp Vultr host"
} }
variable "vke_lb" {
type = string
description = "IP of our VKE load balancer"
}

View File

@ -1 +1,2 @@
vultr_host = "45.32.83.83" vultr_host = "45.32.83.83"
vke_lb = "45.32.89.101"

1
infra/nigel-k3s/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
config.yaml

View File

@ -0,0 +1,35 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
name: nginx-port
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80
nodePort: 30808
targetPort: nginx-port

View File

@ -0,0 +1,19 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: hello
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the sample cron-container
restartPolicy: OnFailure

View File

@ -1,62 +0,0 @@
resource kubernetes_namespace admin-servers {
count = length(var.admin_services.configs) > 0 ? 1 : 0
metadata {
name = var.admin_services.namespace
}
}
resource kubernetes_pod admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
spec {
node_selector = {
NodeType = var.admin_services.namespace
}
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
# TODO: don't make these NodePorts since we're gonna want them
# to be purely internal to the Cluster.
# WHY? Because we want to keep dashboards as unexposed as possible
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -9,15 +9,31 @@ terraform {
required_providers { required_providers {
aws = { aws = {
source = "hashicorp/aws" source = "hashicorp/aws"
version = "~> 5.0" version = "5.98.0"
} }
vultr = { vultr = {
source = "vultr/vultr" source = "vultr/vultr"
version = "2.22.1" version = "2.26.0"
} }
kubernetes = { kubernetes = {
source = "hashicorp/kubernetes" source = "hashicorp/kubernetes"
version = "2.34.0" version = "2.37.1"
}
kubectl = {
source = "gavinbunney/kubectl"
version = " 1.19.0"
}
helm = {
source = "hashicorp/helm"
version = "3.0.2"
}
tls = {
source = "hashicorp/tls"
version = "4.1.0"
}
random = {
source = "hashicorp/random"
version = "3.7.2"
} }
} }
} }
@ -39,4 +55,12 @@ provider kubernetes {
config_path = "config.yaml" config_path = "config.yaml"
} }
provider kubectl {
config_path = "config.yaml"
}
provider helm {
kubernetes = {
config_path = "config.yaml"
}
}

View File

@ -0,0 +1,42 @@
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
filters: |
[FILTER]
Name kubernetes
Match kube.*Merge_log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
outputs: |
[OUTPUT]
Name openobserve
Match *
URI /api/default/default/_json
Host openobserve.logging.svc.cluster.local
Port 5080
tls On
Format json
Json_date_key _timestamp
Json_date_format iso8601
HTTP_User mail@shockrah.xyz
HTTP_Passwd kXWpwEK4SIxUzjgp

View File

@ -0,0 +1,382 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# General configuration shared across resources
app:
# Mode determines if chart should deploy a full Dashboard with all containers or just the API.
# - dashboard - deploys all the containers
# - api - deploys just the API
mode: 'dashboard'
image:
pullPolicy: IfNotPresent
pullSecrets: []
scheduling:
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
nodeSelector: {}
security:
# Allow overriding csrfKey used by API/Auth containers.
# It has to be base64 encoded random 256 bytes string.
# If empty, it will be autogenerated.
csrfKey: ~
# SecurityContext to be added to pods
# To disable set the following configuration to null:
# securityContext: null
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# ContainerSecurityContext to be added to containers
# To disable set the following configuration to null:
# containerSecurityContext: null
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop: ["ALL"]
# Pod Disruption Budget configuration
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget:
enabled: false
minAvailable: 0
maxUnavailable: 0
networkPolicy:
enabled: false
ingressDenyAll: false
# Raw network policy spec that overrides predefined spec
# Example:
# spec:
# egress:
# - ports:
# - port: 123
spec: {}
# Common labels & annotations shared across all deployed resources
labels: {}
annotations: {}
# Common priority class used for all deployed resources
priorityClassName: null
settings:
## Global dashboard settings
global:
# # Cluster name that appears in the browser window title if it is set
clusterName: "Athens Cluster"
# # Max number of items that can be displayed on each list page
# itemsPerPage: 10
# # Max number of labels that are displayed by default on most views.
# labelsLimit: 3
# # Number of seconds between every auto-refresh of logs
# logsAutoRefreshTimeInterval: 5
# # Number of seconds between every auto-refresh of every resource. Set 0 to disable
# resourceAutoRefreshTimeInterval: 10
# # Hide all access denied warnings in the notification panel
# disableAccessDeniedNotifications: false
# # Hide all namespaces option in namespace selection dropdown to avoid accidental selection in large clusters thus preventing OOM errors
# hideAllNamespaces: false
# # Namespace that should be selected by default after logging in.
defaultNamespace: playground
# # Enable/Disable namespace isolation mode. When enabled users without cluster-wide permissions will
# # only see resources within their own namespaces.
namespaceIsolation: false
# # List of namespaces that should be presented to user without namespace list privileges.
# namespaceFallbackList:
# - default
## Pinned resources that will be displayed in dashboard's menu
pinnedResources: []
# - kind: customresourcedefinition
# # Fully qualified name of a CRD
# name: prometheus.monitoring.coreos.com
# # Display name
# displayName: Prometheus
# # Is this CRD namespaced?
# namespaced: true
ingress:
enabled: false
hosts:
# Keep 'localhost' host only if you want to access Dashboard using 'kubectl port-forward ...' on:
# https://localhost:8443
- localhost
# - kubernetes.dashboard.domain.com
ingressClassName: internal-nginx
# Use only if your ingress controllers support default ingress classes.
# If set to true ingressClassName will be ignored and not added to the Ingress resources.
# It should fall back to using IngressClass marked as the default.
useDefaultIngressClass: false
# This will append our Ingress with annotations required by our default configuration.
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/ssl-passthrough: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
useDefaultAnnotations: true
pathType: ImplementationSpecific
# If path is not the default (/), rewrite-target annotation will be added to the Ingress.
# It allows serving Kubernetes Dashboard on a sub-path. Make sure that the configured path
# does not conflict with gateway route configuration.
path: /
issuer:
name: selfsigned
# Scope determines what kind of issuer annotation will be used on ingress resource
# - default - adds 'cert-manager.io/issuer'
# - cluster - adds 'cert-manager.io/cluster-issuer'
# - disabled - disables cert-manager annotations
scope: default
tls:
enabled: true
# If provided it will override autogenerated secret name
secretName: ""
labels: {}
annotations: {}
# Use the following toleration if Dashboard can be deployed on a tainted control-plane nodes
# - key: node-role.kubernetes.io/control-plane
# effect: NoSchedule
tolerations: []
affinity: {}
auth:
role: auth
image:
repository: docker.io/kubernetesui/dashboard-auth
tag: 1.4.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: auth
containerPort: 8000
protocol: TCP
args: []
env: []
volumeMounts:
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Auth related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# API deployment configuration
api:
role: api
image:
repository: docker.io/kubernetesui/dashboard-api
tag: 1.14.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: api
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store exec logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for API related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# WEB UI deployment configuration
web:
role: web
image:
repository: docker.io/kubernetesui/dashboard-web
tag: 1.7.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: web
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for WEB UI related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
### Metrics Scraper
### Container to scrape, store, and retrieve a window of time from the Metrics Server.
### refs: https://github.com/kubernetes/dashboard/tree/master/modules/metrics-scraper
metricsScraper:
enabled: true
role: metrics-scraper
image:
repository: docker.io/kubernetesui/dashboard-metrics-scraper
tag: 1.2.2
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- containerPort: 8000
protocol: TCP
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Metrics Scraper related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
## Optional Metrics Server sub-chart configuration
## Enable this if you don't already have metrics-server enabled on your cluster and
## want to use it with dashboard metrics-scraper
## refs:
## - https://github.com/kubernetes-sigs/metrics-server
## - https://github.com/kubernetes-sigs/metrics-server/tree/master/charts/metrics-server
metrics-server:
enabled: false
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls

View File

@ -0,0 +1,18 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
preferredChain: "ISRG Root X1"
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View File

@ -1,28 +1,17 @@
resource vultr_kubernetes athens { resource vultr_kubernetes athens {
region = var.cluster.region region = var.cluster.region
version = var.cluster.version version = var.cluster.version
label = var.cluster.label label = var.cluster.label
# BUG: only have this set when creating the resource for the first time # vpc_id = vultr_vpc.athens.id
# once the cluster is up, we should comment this out again
# enable_firewall = true
node_pools {
node_quantity = 1
plan = var.cluster.pools["meta"].plan
label = var.admin_services.namespace
min_nodes = var.cluster.pools["meta"].min
max_nodes = var.cluster.pools["meta"].max
# tag = var.admin_services.namespace
}
}
resource vultr_kubernetes_node_pools games { node_pools {
cluster_id = vultr_kubernetes.athens.id node_quantity = var.cluster.pools["main"].min_nodes
node_quantity = var.cluster.pools["games"].min plan = var.cluster.pools["main"].plan
plan = var.cluster.pools["games"].plan label = var.cluster.pools["main"].label
label = var.game_servers.namespace min_nodes = var.cluster.pools["main"].min_nodes
min_nodes = var.cluster.pools["games"].min max_nodes = var.cluster.pools["main"].max_nodes
max_nodes = var.cluster.pools["games"].max auto_scaler = true
tag = var.admin_services.namespace }
} }
output k8s_config { output k8s_config {

View File

@ -0,0 +1,6 @@
data vultr_kubernetes athens {
filter {
name = "label"
values = [ var.cluster.label ]
}
}

View File

@ -1,4 +0,0 @@
# created by virtualenv automatically
bin/
lib/

View File

@ -1,51 +0,0 @@
from argparse import ArgumentParser
from argparse import Namespace
from kubernetes import client, config
import re
def get_args() -> Namespace:
parser = ArgumentParser(
prog="Cluster Search Thing",
description="General utility for finding resources for game server bot"
)
games = {"reflex", "minecraft"}
parser.add_argument('-g', '--game', required=False, choices=games)
admin = {"health"}
parser.add_argument('-a', '--admin', required=False, choices=admin)
return parser.parse_args()
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
config.load_kube_config("../config.yaml")
return client.CoreV1Api()
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
print('admin thing requested', args.admin)
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
node_name = pods.items[0].spec.node_name
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
port = services.items[0].spec.ports[0].port
# Collecting the IPV4 of the node that contains the pod(container)
# we actually care about. Since these pods only have 1 container
# Now we collect specific data about the game server we requested
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
if __name__ == '__main__':
args = get_args()
api = k8s_api('../config.yaml')
if args.game:
get_game_server_ip(args, api)
if args.admin:
get_admin_service_details(args, api)

View File

@ -1,8 +0,0 @@
home = /usr
implementation = CPython
version_info = 3.10.12.final.0
virtualenv = 20.13.0+ds
include-system-site-packages = false
base-prefix = /usr
base-exec-prefix = /usr
base-executable = /usr/bin/python3

View File

@ -1,18 +0,0 @@
cachetools==5.5.0
certifi==2024.8.30
charset-normalizer==3.4.0
durationpy==0.9
google-auth==2.36.0
idna==3.10
kubernetes==31.0.0
oauthlib==3.2.2
pyasn1==0.6.1
pyasn1_modules==0.4.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
six==1.17.0
urllib3==2.2.3
websocket-client==1.8.0

View File

@ -1,32 +1,23 @@
resource vultr_firewall_rule web_inbound { # resource vultr_firewall_rule web_inbound {
for_each = toset([for port in [80, 443, 6443] : tostring(port) ]) # for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
firewall_group_id = vultr_kubernetes.athens.firewall_group_id # firewall_group_id = vultr_kubernetes.athens.firewall_group_id
# protocol = "tcp"
# ip_type = "v4"
# subnet = "0.0.0.0"
# subnet_size = 0
# port = each.value
# }
resource vultr_firewall_group bastion {
description = "For connections into and out of the bastion host"
}
resource vultr_firewall_rule bastion_inbound {
firewall_group_id = vultr_firewall_group.bastion.id
protocol = "tcp" protocol = "tcp"
ip_type = "v4" ip_type = "v4"
subnet = "0.0.0.0" subnet = "0.0.0.0"
subnet_size = 0 subnet_size = 0
port = each.value port = 22
}
resource vultr_firewall_rule game-server-inbound {
for_each = var.game_servers.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = each.value.port.expose
}
resource vultr_firewall_rule admin-service-inbound {
for_each = var.admin_services.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
notes = each.value.port.notes
port = each.value.port.expose
} }

View File

@ -1,55 +0,0 @@
resource kubernetes_namespace game-servers {
count = length(var.game_servers.configs) > 0 ? 1 : 0
metadata {
name = var.game_servers.namespace
}
}
resource kubernetes_pod game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -0,0 +1,66 @@
resource kubernetes_deployment gitea {
metadata {
name = "gitea"
namespace = var.playground.namespace
labels = {
"app" = "gitea"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app" = "gitea"
}
}
template {
metadata {
labels = {
"app" = "gitea"
}
}
spec {
container {
name = "gitea"
image = "gitea/gitea:latest"
port {
container_port = 3000
name = "gitea-main"
}
port {
container_port = 22
name = "gitea-ssh"
}
volume_mount {
name = "gitea"
mount_path = "/data"
}
}
volume {
name = "gitea"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.gitea.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service gitea {
metadata {
name = "gitea"
namespace = var.playground.namespace
}
spec {
selector = {
"app" = "gitea"
}
port {
target_port = "gitea-main"
port = 3000
name = "http"
}
}
}

View File

@ -0,0 +1,47 @@
resource kubernetes_deployment health {
metadata {
name = "health"
namespace = var.playground.namespace
}
spec {
replicas = 1
selector {
match_labels = {
name = "health"
}
}
template {
metadata {
labels = {
name = "health"
}
}
spec {
container {
name = "health"
image = "quanhua92/whoami:latest"
port {
container_port = "8080"
}
}
}
}
}
}
resource kubernetes_service health {
metadata {
name = "health"
namespace = var.playground.namespace
}
spec {
selector = {
name = "health"
}
port {
port = 80
target_port = 8080
name = "http"
}
}
}

View File

@ -0,0 +1,7 @@
resource helm_release nginx {
name = "ingress-nginx"
repository = "https://kubernetes.github.io/ingress-nginx"
chart = "ingress-nginx"
namespace = "ingress-nginx"
create_namespace = true
}

View File

@ -0,0 +1,70 @@
resource kubernetes_ingress_v1 health {
metadata {
name = "health-ingress"
namespace = var.playground.namespace
annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt"
"cert-manager.io/ingress.class" = "nginx"
}
}
spec {
ingress_class_name = "nginx"
tls {
hosts = [
"sanity.shockrah.xyz",
"uptime.shockrah.xyz",
"code.shockrah.xyz"
]
secret_name = "shockrah"
}
rule {
host = "sanity.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.health.metadata[0].name
port {
number = kubernetes_service.health.spec[0].port[0].port
}
}
}
}
}
}
rule {
host = "uptime.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.kuma.metadata[0].name
port {
number = kubernetes_service.kuma.spec[0].port[0].port
}
}
}
}
}
}
rule {
host = "code.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.gitea.metadata[0].name
port {
number = kubernetes_service.gitea.spec[0].port[0].port
}
}
}
}
}
}
}
}

View File

@ -1 +0,0 @@
terraform.yaml

View File

@ -1,33 +0,0 @@
terraform {
required_version = ">= 0.13"
backend s3 {
bucket = "project-athens"
key = "infra/vke/k8s/state/build.tfstate"
region = "us-west-1"
encrypt = true
}
required_providers {
# For interacting with S3
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.30.0"
}
}
}
provider aws {
access_key = var.aws_key
secret_key = var.aws_secret
region = var.aws_region
max_retries = 1
}
provider kubernetes {
config_path = "terraform.yaml"
}

View File

@ -1,50 +0,0 @@
resource kubernetes_ingress_v1 athens {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
rule {
host = "test.shockrah.xyz"
http {
path {
backend {
service {
name = var.shockrahxyz.name
port {
number = 80
}
}
}
path = "/"
}
}
}
}
}
resource kubernetes_service athens_lb {
metadata {
name = "athens-websites"
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
selector = {
app = kubernetes_ingress_v1.athens.metadata.0.labels.app
}
port {
port = 80
target_port = 80
}
type = "LoadBalancer"
external_ips = [ var.cluster.ip ]
}
}

View File

@ -1,5 +0,0 @@
resource kubernetes_namespace websites {
metadata {
name = "websites"
}
}

View File

@ -1,62 +0,0 @@
# First we setup the ingress controller with helm
```sh
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
# Now we can install this to our cluster
helm install --kubeconfig config.yaml traefik traefik/traefik
```
# Prove the service is present with
```sh
kubectl --kubeconfig config.yaml get svc
```
# Create the pods
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-dep.yaml
```
# Expose on port 80
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-service.yaml
```
# Create ingress on k8s
```sh
kubectl --kubeconfig config.yaml -f k8s/traefik-ingress.yaml
```
# Take the external IP from the ingress
Put that into terraform's A record for the domain since this is a load balancer
in Vultr ( actual resource apparantly )
# Configure cert-manager for traefik ingress
Using the latest version from here:
https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.crds.yaml
```sh
kubectl --kubeconfig config.yaml \
apply --validate=false \
-f https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.yaml
```
# Create the cert issuer and certificate
```sh
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
```
Because we just have 1 cert for now we are looking for it's status to be `READY`

View File

@ -1,21 +0,0 @@
Plain nginx for now so that we can test out reverse dns
resource kubernetes_pod shockrah {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = var.shockrahxyz.name
}
}
spec {
container {
image = "nginx"
name = "${var.shockrahxyz.name}"
port {
container_port = 80
}
}
}
}

View File

@ -1,35 +0,0 @@
# API Keys required to reach AWS/Vultr
variable vultr_api_key {
type = string
sensitive = true
}
variable aws_key {
type = string
sensitive = true
}
variable aws_secret {
type = string
sensitive = true
}
variable aws_region {
type = string
sensitive = true
}
variable shockrahxyz {
type = object({
name = string
port = number
dns = string
})
}
variable cluster {
type = object({
ip = string
})
}

View File

@ -1,37 +0,0 @@
# Here we are going to define the deployment and service
# Basically all things directly related to the actual service we want to provide
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: alternate-nginx-web
namespace: default
labels:
app: alternate-nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: alternate-nginx-web
template:
metadata:
labels:
app: alternate-nginx-web
spec:
# Container comes from an example thing i randomly found on docker hub
containers:
- name: alternate-nginx-web
image: dockerbogo/docker-nginx-hello-world
---
apiVersion: v1
kind: Service
metadata:
name: alternate-nginx-web
namespace: default
spec:
selector:
app: alternate-nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@ -1,30 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod-hello
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-prod-hello
solvers:
- http01:
ingress:
class: traefik

View File

@ -1,13 +0,0 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@ -1,20 +0,0 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx-web
namespace: default
labels:
app: nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: nginx-web
template:
metadata:
labels:
app: nginx-web
spec:
containers:
- name: nginx
image: nginx

View File

@ -1,12 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-web
namespace: default
spec:
selector:
app: nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@ -1,44 +0,0 @@
# This is the first thing we need to create, an issue to put certs into
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-temprah-lab
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@ -1,31 +0,0 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
namespace: default
labels:
name: project-athens-lb
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: sample.temprah-lab.xyz
http:
paths:
- backend:
service:
name: nginx-web
port:
number: 80
path: /
pathType: Prefix
- host: hello.temprah-lab.xyz
http:
paths:
- backend:
service:
name: alternate-nginx-web
port:
number: 80
path: /
pathType: Prefix

View File

@ -1,15 +1,14 @@
apiVersion: cert-manager.io/v1 apiVersion: cert-manager.io/v1
kind: ClusterIssuer kind: Issuer
metadata: metadata:
name: letsencrypt-prod name: letsencrypt-nginx
namespace: default
spec: spec:
acme: acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz email: dev@shockrah.xyz
server: https://acme-v02.api.letsencrypt.org/directory
privateKeySecretRef: privateKeySecretRef:
name: letsencrypt-prod name: example
solvers: solvers:
- http01: - http01:
ingress: ingress:
class: traefik class: nginx

View File

@ -0,0 +1,36 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-service
spec:
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whoami-ingress
annotations:
cert-manager.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
ingressClassName: nginx
tls:
- secretName: whoami-tls
hosts:
- example.shockrah.xyz
rules:
- host: example.shockrah.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whoami-service
port:
number: 80

View File

@ -0,0 +1,21 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-lb
annotations:
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-interval: "30"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-response-timeout: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-unhealthy-threshold: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-healthy-threshold: "5"
spec:
type: LoadBalancer
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080

View File

@ -0,0 +1,20 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: whoami
spec:
replicas: 3
selector:
matchLabels:
name: whoami
template:
metadata:
labels:
name: whoami
spec:
containers:
- name: whoami
image: quanhua92/whoami:latest
imagePullPolicy: Always
ports:
- containerPort: 8080

View File

@ -0,0 +1,37 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
preferredChain: "ISRG Root X1"
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx

View File

@ -0,0 +1,18 @@
resource kubernetes_namespace playground {
metadata {
annotations = {
names = var.playground.namespace
}
name = var.playground.namespace
}
}
resource kubernetes_namespace openobserve {
metadata {
annotations = {
names = "openobserve"
}
name = "openobserve"
}
}

View File

@ -0,0 +1,30 @@
resource helm_release shockrah_cert_manager {
name = "cert-manager"
repository = "https://charts.jetstack.io"
chart = "cert-manager"
version = "v1.18.2"
namespace = "cert-manager"
create_namespace = true
cleanup_on_fail = true
set = [
{
name = "crds.enabled"
value = "true"
}
]
}
data kubectl_file_documents cluster_issuer {
content = file("cluster-issuer.yaml")
}
resource kubectl_manifest cluster_issuer {
for_each = data.kubectl_file_documents.cluster_issuer.manifests
yaml_body = each.value
depends_on = [
data.kubectl_file_documents.cluster_issuer
]
}

View File

@ -0,0 +1,61 @@
resource kubernetes_deployment kuma {
metadata {
name = "kuma"
namespace = var.playground.namespace
labels = {
"app" = "kuma"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app" = "kuma"
}
}
template {
metadata {
labels = {
"app" = "kuma"
}
}
spec {
container {
name = "kuma"
image = "louislam/uptime-kuma:2"
port {
container_port = 3001
name = "uptime-kuma"
}
volume_mount {
name = "kuma-data"
mount_path = "/app/data"
}
}
volume {
name = "kuma-data"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.kuma.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service kuma {
metadata {
name = "kuma"
namespace = var.playground.namespace
}
spec {
selector = {
"app" = "kuma"
}
port {
target_port = "uptime-kuma"
port = 3001
name = "http"
}
}
}

View File

@ -26,46 +26,36 @@ variable cluster {
label = string label = string
version = string version = string
pools = map(object({ pools = map(object({
plan = string node_quantity = number
autoscale = bool plan = string
min = number label = string
max = number min_nodes = number
max_nodes = number
tag = string
})) }))
}) })
} }
variable game_servers {
variable playground {
type = object({ type = object({
namespace = string namespace = string
configs = map(object({ health = object({
name = optional(string) dns = string
image = string })
cpu = string tls = object({
mem = string email = string
port = object({ })
internal = number
expose = number
})
proto = optional(string)
}))
}) })
} }
variable admin_services {
variable bastion {
type = object({ type = object({
namespace = string plan = string
configs = map(object({ os = string
name = string label = string
image = string
cpu = string
mem = string
port = object({
notes = optional(string)
internal = number
expose = number
})
proto = optional(string)
}))
}) })
} }

View File

@ -1,51 +1,34 @@
cluster = { cluster = {
region = "lax" region = "lax"
label = "athens-cluster" label = "athens-cluster"
version = "v1.31.2+1" version = "v1.33.0+3"
pools = { pools = {
meta = { main = {
plan = "vc2-1c-2gb" node_quantity = 1
autoscale = true plan = "vc2-2c-4gb"
min = 1 label = "main"
max = 2 min_nodes = 1
} max_nodes = 2
games = { tag = "athens-main"
plan = "vc2-1c-2gb"
autoscale = true
min = 1
max = 3
} }
} }
} }
game_servers = { playground = {
namespace = "games" namespace = "playground"
configs = { # Sanity check service that is used purely for the sake of ensuring
# minecraft = { # things are ( at a basic level ) functional
# image = "itzg/minecraft-server" health = {
# cpu = "1000m" dns = "health"
# mem = "2048Mi" }
# port = { tls = {
# expose = 30808 email = "dev@shockrah.xyz"
# internal = 80
# }
# }
} }
} }
admin_services = { bastion = {
namespace = "admin-services" plan = "vc2-1c-2gb"
configs = { label = "bastion"
# health = { os = "1743"
# image = "nginx:latest"
# name = "health"
# cpu = "200m"
# mem = "64Mi"
# port = {
# notes = "Basic nginx sanity check service"
# expose = 30800
# internal = 80
# }
# }
}
} }

View File

@ -0,0 +1,32 @@
resource kubernetes_persistent_volume_claim_v1 kuma {
metadata {
name = "kuma-data"
namespace = var.playground.namespace
}
spec {
volume_mode = "Filesystem"
access_modes = [ "ReadWriteOnce"]
resources {
requests = {
storage = "10Gi"
}
}
}
}
resource kubernetes_persistent_volume_claim_v1 gitea {
metadata {
name = "gitea-data"
namespace = var.playground.namespace
}
spec {
volume_mode = "Filesystem"
access_modes = [ "ReadWriteOnce"]
resources {
requests = {
storage = "10Gi"
}
}
}
}