5 Commits

Author SHA1 Message Date
510baa7f94 Basic setup now passing initial checks
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 15:14:22 -08:00
088846cad9 Ensure that static hosts have docker and the latest python versions installed
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 12:34:41 -08:00
1be3a8e588 Quick fix for ansible-lint things
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
2025-03-04 11:46:17 -08:00
da580eb7d2 REmoving bogus wiki stuff
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 11:44:09 -08:00
a8d7c01efe Slowing building out the new workflows
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
2025-03-04 11:20:00 -08:00
131 changed files with 1238 additions and 1189 deletions

1
.gitattributes vendored
View File

@@ -1 +0,0 @@
wiki-resources/images/* filter=lfs diff=lfs merge=lfs -text

View File

@@ -0,0 +1,15 @@
name: Ansible Linting
on:
- push
jobs:
ansible-lint:
runs-on: ubuntu-latest
container:
image: shockrah/ansible
steps:
- name: Checkout repo content
uses: actions/checkout@v4
- run: ansible-lint
working-directory: ansible/

View File

@@ -0,0 +1,19 @@
name: Secops Linting and Safety Checks
on:
push:
branches:
- master
jobs:
checkov-scan-s3:
runs-on: ubuntu-latest
steps:
- name: Checkout repo code
uses: actions/checkout@v4
- name: Scan S3 Terraform with Checkov
uses: bridgecrewio/checkov-action@master
with:
directory: infra/s3/
framework: terraform

View File

@@ -1,17 +0,0 @@
name: Wiki Resources Sanity Checks
on:
push:
branches:
- master
jobs:
ruff-checks:
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Perform linting with ruff
uses: astral-sh/ruff-action@v3
with:
src: "wiki-resources"

9
.gitignore vendored
View File

@@ -21,12 +21,3 @@ docker/beta/shockrah.xyz/
docker/beta/resume.shockrah.xyz/ docker/beta/resume.shockrah.xyz/
k8s/config.yaml k8s/config.yaml
infra/**/tfplan infra/**/tfplan
.ansible/
wiki-resources/public_infrastructure.png
bin/
lib/
lib64
pyvenv.cfg
share/

13
.gitlab-ci.yml Normal file
View File

@@ -0,0 +1,13 @@
# You can override the included template(s) by including variable overrides
# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings
# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#customization
# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings
# Container Scanning customization: https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings
# Note that environment variables can be set in several places
# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence
stages:
- test
sast:
stage: test
include:
- template: Security/SAST.gitlab-ci.yml

View File

@@ -1,21 +0,0 @@
---
repos:
- repo: https://github.com/ansible/ansible-lint
rev: v26.4.0
hooks:
- id: ansible-lint
name: ansible-lint
description: Run ansible-lint on playbooks
entry: ansible-lint
args:
- ansible
language: python
exclude: |
(?x)(
^deprecated/|
^infra/|
^runbooks/|
^wiki-resources/|
^\.pre-commit-config.yaml|
^\.gitignore
)

View File

@@ -1,3 +0,0 @@
nigel:
hosts:
nigel.local:

View File

@@ -1,7 +0,0 @@
---
skip_list:
- role-name
- var-naming[no-role-prefix]
exclude_paths:
- linter.yaml
- inventory.yaml

View File

@@ -1,28 +0,0 @@
# This playbook is meant to be a oneshot to be ran manually on the dev box
# The rest of the role stuff is meant to be ran as the admin user that
# this playbook creates for us
---
- name: Setup local admin user with a fresh ubuntu host
hosts: nigel.local
remote_user: nigel
vars:
admin:
username: nigel
tasks:
- name: Copy the nigel admin key
ansible.posix.authorized_key:
user: "{{ admin.username }}"
state: present
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
- name: Prevent password based logins
become: true
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
- name: Restart SSH Daemon
become: true
ansible.builtin.service:
name: ssh
state: restarted

View File

@@ -1,8 +0,0 @@
---
- name: Setup all the responsibilities of the nomad server
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the nomad role
ansible.builtin.include_role:
name: nomad

View File

@@ -1,14 +0,0 @@
---
- name: Setup bare metal requirements
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the base role to the nuc
ansible.builtin.include_role:
name: base
- name: Apply the k3s base role
ansible.builtin.include_role:
name: k3s
- name: Apply the proxy role
ansible.builtin.include_role:
name: proxy

View File

@@ -1,8 +0,0 @@
---
- name: Setup host as a reverse proxy
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply reverse proxy role
ansible.builtin.include_role:
name: proxy

View File

@@ -1 +0,0 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable

View File

@@ -1,41 +0,0 @@
- name: Ensure we have basic updated packages setting up docker
ansible.builtin.apt:
name: "{{ item }}"
update_cache: true
loop:
- ca-certificates
- curl
- name: Running install on the keyrings directory
ansible.builtin.command:
cmd: install -m 0755 -d /etc/apt/keyrings
register: base_install_keyrings
changed_when: base_install_keyrings.rc == 0
- name: Fetch Docker GPG Key
vars:
keylink: https://download.docker.com/linux/ubuntu/gpg
ansible.builtin.get_url:
url: "{{ keylink }}"
dest: /etc/apt/keyrings/docker.asc
mode: "0644"
- name: Add repo to apt sources
ansible.builtin.copy:
src: docker.list
dest: /etc/apt/sources.list.d/docker.list
mode: "0644"
- name: Update Apt cache with latest docker.list packages
ansible.builtin.apt:
update_cache: true
- name: Ensure all docker packages are updated to the latest versions
ansible.builtin.apt:
name: "{{ item }}"
loop:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- name: Verify that the docker components are installed properly
ansible.builtin.command:
cmd: docker run hello-world
register: base_docker_hello
changed_when: base_docker_hello.rc == 0

View File

@@ -1,10 +0,0 @@
- name: Download the setup script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s.sh
mode: "0644"
- name: Run installation script
ansible.builtin.command:
cmd: bash /tmp/k3s.sh
register: base_k3s_install_script
changed_when: base_k3_install_script.rc == 0

View File

@@ -1,17 +0,0 @@
- name: Ensure nigel can use sudo without password
become: true
tags:
- setup
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "nigel ALL=(ALL) NOPASSWD:ALL"
- name: Ensure docker components are installed
tags:
- setup
ansible.builtin.include_tasks:
file: ensure-docker-basic.yaml
apply:
become: true
tags:
- setup

View File

@@ -1,12 +0,0 @@
bind_addr = "{{ ip }}"
advertise_addr = "{{ ip }}"
bootstrap = true
bootstrap_expect = 1
client_addr = "{{ ip }}"
server = true
data_dir = "/opt/consul"
ui_config {
enabled = true
}

View File

@@ -1 +0,0 @@
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main

View File

@@ -1,14 +0,0 @@
- name: Download the installation script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp
mode: "0644"
register: k3s_dl_install_script
- name: Run installation script
become: true
environment:
INSTALL_K3S_EXEC: server
ansible.builtin.command:
cmd: sh {{ k3s_dl_install_script.dest }}
register: k3s_install_script
changed_when: k3s_install_script.rc == 0

View File

@@ -1,24 +0,0 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
server {
enabled = true
bootstrap_expect = 1
}
client {
enabled = true
servers = ["127.0.0.1"]
}
host_volume "registry" {
path = "/opt/volumes/registry"
read_only = false
}
host_volume "nfs" {
path = "/opt/volumes/nfs"
read_only = false
}

View File

@@ -1,18 +0,0 @@
- name: Nomad server configuration
become: true
block:
- name: Ensure the root data directory is present
ansible.builtin.file:
path: "{{ nomad_data.volumes.root }}"
state: absent
mode: "0755"
- name: Ensure registry volume is present
ansible.builtin.file:
path: "{{ nomad_data.volumes.registry }}"
state: absent
mode: "0755"
- name: Ensure the MinIO diretory is present
ansible.builtin.file:
path: "{{ nomad_data.volumes.nfs }}"
state: absent
mode: "0755"

View File

@@ -1,5 +0,0 @@
nomad_data:
volumes:
root: /opt/volumes
registry: /opt/volumes/ncr
nfs: /opt/volumes/nfs

View File

@@ -1,15 +0,0 @@
127.0.0.1 localhost
127.0.1.1 nigel
# Our own dns stuff
127.0.1.1 nigel.local
127.0.1.1 nomad.nigel.local
127.0.1.1 sanity.nigel.local
127.0.1.1 ncr.nigel.local
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

View File

@@ -1,6 +0,0 @@
server {
server_name ncr.nigel.local;
location / {
proxy_pass http://localhost:5000;
}
}

View File

@@ -1,25 +0,0 @@
server {
server_name nomad.nigel.local;
location / {
proxy_pass http://nomad-ws;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 319s;
# This is for log streaming requests
proxy_buffering off;
# Upgrade and Connection headers for upgrading to websockets
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "${scheme}://${proxy_host}";
}
}
upstream nomad-ws {
ip_hash;
server nomad.nigel.local:4646;
}

View File

@@ -1,28 +0,0 @@
- name: Reverse proxy role configuration
become: true
block:
- name: Ensure /etc/hosts are up to date
ansible.builtin.copy:
dest: /etc/hosts
src: host-file
mode: "0644"
- name: Ensure nginx is setup as latest
ansible.builtin.apt:
name: nginx
- name: Copy the nomad.conf to available configurations
ansible.builtin.copy:
src: "{{ item }}"
dest: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Link the nomad.conf to sites-enabled
ansible.builtin.file:
path: "/etc/nginx/sites-enabled/{{ item }}"
state: link
src: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Restart nginx
ansible.builtin.systemd_service:
name: nginx
state: restarted

View File

@@ -1,3 +0,0 @@
proxy_nginx_configs:
- nomad.conf
- ncr.conf

View File

@@ -1,4 +0,0 @@
- name: Restart host to apply any changes and clear out uptime stuff
become: true
ansible.builtin.reboot:
msg: "Reboot initiated as a part of housekeeping"

View File

@@ -1,17 +0,0 @@
- name: Run docker pull for latest images
become: true
ansible.builtin.command:
cmd: docker compose pull
chdir: "{{ webserver_admin.home }}/{{ item }}"
loop:
- services
register: webserver_docker_pull
changed_when: webserver_docker_pull.rc == 0
- name: Restart containers with newest container images
ansible.builtin.command:
cmd: docker compose up -d
chdir: "{{ webserver_admin.home }}/{{ item }}"
loop:
- services
register: webserver_docker_restart
changed_when: webserver_docker_restart.rc == 0

View File

@@ -1,8 +0,0 @@
- name: Update all packages to ensure compliance with latest updates
tags:
- housekeeping
ansible.builtin.apt:
update_cache: true
autoclean: true
autoremove: true
upgrade: safe

View File

@@ -1,2 +0,0 @@
webserver_admin:
home: /home/webadmin

View File

@@ -0,0 +1,23 @@
#!/bin/bash
set -e
bucket="$1"
s3env=/opt/nginx/s3.env
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
source $s3env
pull() {
aws s3 sync s3://$bucket /opt/nginx/$bucket
}
case $bucket in
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
*) echo "Invalid bucket name" && exit 1 ;;
esac

View File

@@ -0,0 +1,40 @@
networks:
gitea:
external: false
services:
gitea:
image: gitea/gitea:latest-rootless
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "2222:22"
gitea-runner:
image: gitea/act_runner:nightly
container_name: gitea-runner
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea_runner/
- /var/run/docker.sock:/var/run/docker.sock
environment:
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
- GITEA_RUNNER_NAME=gitea-main
- GITEA_RUNNER_LABELS=gitea-main
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}

View File

@@ -0,0 +1,29 @@
What is this
============
Here we contain scripts to build out all the containers that are run.
All of these images are based on images that are made from other projects
docker-compose.yaml
===================
Services that are more/less "special" go here since most of the stuff that is
run on the main host are basically just static html websites
Services & Containers
=====================
| Service | Docker Image Used |
|------------|--------------------------|
| Gitea | gitea/gitea:latest |
| Act Runner | gitea/act_runner:nightly |
Why the servics above?
======================
The Gitea related services are there so that I can host my own Git projects
away from "Git as a service" services. I have no issue with Github/Gitlab
but I just like being able to host my own stuff when possible :smiley:

34
infra/dns/build.sh Normal file
View File

@@ -0,0 +1,34 @@
#!/bin/bash
set -e
opt=$1
plan=tfplan
build_plan() {
echo Generating plan
set -x
terraform plan -var-file variables.tfvars -input=false -out $plan
}
deploy_plan() {
terraform apply $plan
}
init() {
terraform init
}
help_prompt() {
cat <<- EOF
Options: plan deploy help
EOF
}
# Default to building a plan
source ./secrets.sh
case $opt in
plan) build_plan;;
deploy) deploy_plan;;
*) help_prompt;;
esac

View File

@@ -37,10 +37,6 @@ locals {
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] }, { name = "www.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] }, { name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] }, { name = "git.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "wiki.shockrah.xyz", records = [ var.vke_lb ] },
] ]
} }

View File

@@ -26,7 +26,3 @@ variable "vultr_host" {
description = "IP of the temp Vultr host" description = "IP of the temp Vultr host"
} }
variable "vke_lb" {
type = string
description = "IP of our VKE load balancer"
}

View File

@@ -1,2 +1 @@
vultr_host = "45.32.83.83" vultr_host = "45.32.83.83"
vke_lb = "45.32.89.101"

24
infra/s3/Makefile Normal file
View File

@@ -0,0 +1,24 @@
plan=out.plan
SHELL := /bin/bash
$(plan):
source ../secrets/set-env.sh && terraform plan -input=false -out $(plan)
push: build
source ../secrets/set-env.sh && terraform apply $(plan)
refresh:
source ../secrets/set-env.sh && terraform apply -refresh-only
test:
terraform validate
rip:
source ../secrets/set-env.sh && terraform destroy
clean:
rm -f $(plan)
.PHONY: test build clean push rip

24
infra/s3/backend.tf Normal file
View File

@@ -0,0 +1,24 @@
terraform {
required_version = ">= 0.13"
backend "s3" {
bucket = "project-athens"
key = "infra/s3/state/build.tfstate"
region = "us-west-1"
encrypt = true
}
required_providers {
aws = {
source = "hashicorp/aws"
version = "4.13.0"
}
}
}
# Base config for using AWS features w/ Terraform
provider "aws" {
access_key = var.aws_key
secret_key = var.aws_secret
region = var.aws_region
max_retries = 1
}

93
infra/s3/input-vars.tf Normal file
View File

@@ -0,0 +1,93 @@
# All variables that are used in various places go here
######################### General provider specific values
variable "aws_key" {
description = "Access Key for AWS operations"
type = string
sensitive = true
}
variable "aws_secret" {
description = "Secret Key for AWS operations"
type = string
sensitive = true
}
variable "aws_region" {
description = "Region where the VPC is located"
type = string
sensitive = true
}
variable "vpc_id" {
description = "Project Athens VPC ID"
type = string
}
######################### Alpha Cluster variables
variable "athens_prefix" {
description = "Prefix for all things in alpha cluster"
type = string
default = "athens"
}
######################### Nginx reverse proxy vars
# Yes these buckets _could_ be public but where's the fun in that :x
variable "shockrah_xyz_s3_access_key_id" {
description = "Acess key for reading public s3 buckets"
type = string
sensitive = true
}
variable "shockrah_xyz_s3_secret_key" {
description = "Secret key for reading public s3 buckets"
type = string
sensitive = true
}
variable "nginx_port" {
description = "Port for shockrah.xyz"
type = number
default = 80
}
######################### Nginx reverse proxy vars
variable "shockrah_xyz_bucket" {
description = "S3 bucket name"
type = string
default = "shockrah_xyz"
}
variable "resume_shockrah_xyz_bucket" {
description = "S3 bucket name"
type = string
default = "resume_shockrah_xyz"
}
variable "temper" {
type = object({
cert_arn = string
})
}
variable "sg" {
type = object({
base_ecs = string
ecs_web_ingress = string
lb_health_check = string
})
}
variable "alpha" {
type = object({
dns = string
zone = string
})
}

8
infra/s3/local.tf Normal file
View File

@@ -0,0 +1,8 @@
locals {
buckets = [
"shockrah.xyz",
"resume.shockrah.xyz",
"temper.tv"
]
}

17
infra/s3/s3.tf Normal file
View File

@@ -0,0 +1,17 @@
resource "aws_s3_bucket" "static-content" {
for_each = {
for idx, record in local.buckets:
idx => record
}
bucket = each.value
tags = {
Name = each.value
Description = "Static content"
}
}

View File

@@ -0,0 +1,53 @@
##################################################################
# Below are the acl components for each bucket to make them public
##################################################################
# TODO: ensure proper dependency chaining to the buckets that these
# blocks require to be in place _before_ they come up
# Enables website configuration
resource "aws_s3_bucket_website_configuration" "site" {
for_each = aws_s3_bucket.static-content
bucket = each.value.bucket
index_document {
suffix = "index.html"
}
error_document {
key = "404.html"
}
}
# Set block public access to false
resource "aws_s3_bucket_public_access_block" "site" {
for_each = aws_s3_bucket.static-content
bucket = each.value.bucket
block_public_acls = false
block_public_policy = false
ignore_public_acls = false
restrict_public_buckets = false
}
# Set a policy on the bucket to allow reads from anywhere
resource "aws_s3_bucket_policy" "site" {
for_each = aws_s3_bucket.static-content
bucket = each.value.bucket
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Sid = "PublicReadGetObject"
Effect = "Allow"
Principal = "*"
Action = "s3:GetObject"
Resource = [
"arn:aws:s3:::${each.value.bucket}",
"arn:aws:s3:::${each.value.bucket}/*",
]
}
]
})
}

View File

@@ -0,0 +1,62 @@
resource kubernetes_namespace admin-servers {
count = length(var.admin_services.configs) > 0 ? 1 : 0
metadata {
name = var.admin_services.namespace
}
}
resource kubernetes_pod admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
spec {
node_selector = {
NodeType = var.admin_services.namespace
}
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
# TODO: don't make these NodePorts since we're gonna want them
# to be purely internal to the Cluster.
# WHY? Because we want to keep dashboards as unexposed as possible
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@@ -9,31 +9,15 @@ terraform {
required_providers { required_providers {
aws = { aws = {
source = "hashicorp/aws" source = "hashicorp/aws"
version = "6.27.0" version = "~> 5.0"
} }
vultr = { vultr = {
source = "vultr/vultr" source = "vultr/vultr"
version = "2.26.0" version = "2.22.1"
} }
kubernetes = { kubernetes = {
source = "hashicorp/kubernetes" source = "hashicorp/kubernetes"
version = "3.0.1" version = "2.34.0"
}
kubectl = {
source = "gavinbunney/kubectl"
version = " 1.19.0"
}
helm = {
source = "hashicorp/helm"
version = "3.0.2"
}
tls = {
source = "hashicorp/tls"
version = "4.1.0"
}
random = {
source = "hashicorp/random"
version = "3.7.2"
} }
} }
} }
@@ -55,12 +39,4 @@ provider kubernetes {
config_path = "config.yaml" config_path = "config.yaml"
} }
provider kubectl {
config_path = "config.yaml"
}
provider helm {
kubernetes = {
config_path = "config.yaml"
}
}

View File

@@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
preferredChain: "ISRG Root X1"
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View File

@@ -2,18 +2,29 @@ resource vultr_kubernetes athens {
region = var.cluster.region region = var.cluster.region
version = var.cluster.version version = var.cluster.version
label = var.cluster.label label = var.cluster.label
# vpc_id = vultr_vpc.athens.id # BUG: only have this set when creating the resource for the first time
# once the cluster is up, we should comment this out again
# enable_firewall = true
node_pools { node_pools {
node_quantity = var.cluster.pools["main"].min_nodes node_quantity = 1
plan = var.cluster.pools["main"].plan plan = var.cluster.pools["meta"].plan
label = var.cluster.pools["main"].label label = var.admin_services.namespace
min_nodes = var.cluster.pools["main"].min_nodes min_nodes = var.cluster.pools["meta"].min
max_nodes = var.cluster.pools["main"].max_nodes max_nodes = var.cluster.pools["meta"].max
auto_scaler = true # tag = var.admin_services.namespace
} }
} }
resource vultr_kubernetes_node_pools games {
cluster_id = vultr_kubernetes.athens.id
node_quantity = var.cluster.pools["games"].min
plan = var.cluster.pools["games"].plan
label = var.game_servers.namespace
min_nodes = var.cluster.pools["games"].min
max_nodes = var.cluster.pools["games"].max
tag = var.admin_services.namespace
}
output k8s_config { output k8s_config {
value = vultr_kubernetes.athens.kube_config value = vultr_kubernetes.athens.kube_config
sensitive = true sensitive = true

View File

@@ -1,6 +0,0 @@
data vultr_kubernetes athens {
filter {
name = "label"
values = [ var.cluster.label ]
}
}

4
infra/vultr-kubernetes/dev/.gitignore vendored Normal file
View File

@@ -0,0 +1,4 @@
# created by virtualenv automatically
bin/
lib/

View File

@@ -0,0 +1,51 @@
from argparse import ArgumentParser
from argparse import Namespace
from kubernetes import client, config
import re
def get_args() -> Namespace:
parser = ArgumentParser(
prog="Cluster Search Thing",
description="General utility for finding resources for game server bot"
)
games = {"reflex", "minecraft"}
parser.add_argument('-g', '--game', required=False, choices=games)
admin = {"health"}
parser.add_argument('-a', '--admin', required=False, choices=admin)
return parser.parse_args()
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
config.load_kube_config("../config.yaml")
return client.CoreV1Api()
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
print('admin thing requested', args.admin)
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
node_name = pods.items[0].spec.node_name
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
port = services.items[0].spec.ports[0].port
# Collecting the IPV4 of the node that contains the pod(container)
# we actually care about. Since these pods only have 1 container
# Now we collect specific data about the game server we requested
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
if __name__ == '__main__':
args = get_args()
api = k8s_api('../config.yaml')
if args.game:
get_game_server_ip(args, api)
if args.admin:
get_admin_service_details(args, api)

View File

@@ -0,0 +1,8 @@
home = /usr
implementation = CPython
version_info = 3.10.12.final.0
virtualenv = 20.13.0+ds
include-system-site-packages = false
base-prefix = /usr
base-exec-prefix = /usr
base-executable = /usr/bin/python3

View File

@@ -0,0 +1,18 @@
cachetools==5.5.0
certifi==2024.8.30
charset-normalizer==3.4.0
durationpy==0.9
google-auth==2.36.0
idna==3.10
kubernetes==31.0.0
oauthlib==3.2.2
pyasn1==0.6.1
pyasn1_modules==0.4.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
six==1.17.0
urllib3==2.2.3
websocket-client==1.8.0

View File

@@ -1,10 +1,32 @@
# resource vultr_firewall_rule web_inbound { resource vultr_firewall_rule web_inbound {
# for_each = toset([for port in [80, 443, 6443] : tostring(port) ]) for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
# firewall_group_id = vultr_kubernetes.athens.firewall_group_id firewall_group_id = vultr_kubernetes.athens.firewall_group_id
# protocol = "tcp" protocol = "tcp"
# ip_type = "v4" ip_type = "v4"
# subnet = "0.0.0.0" subnet = "0.0.0.0"
# subnet_size = 0 subnet_size = 0
# port = each.value port = each.value
# } }
resource vultr_firewall_rule game-server-inbound {
for_each = var.game_servers.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = each.value.port.expose
}
resource vultr_firewall_rule admin-service-inbound {
for_each = var.admin_services.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
notes = each.value.port.notes
port = each.value.port.expose
}

View File

@@ -0,0 +1,55 @@
resource kubernetes_namespace game-servers {
count = length(var.game_servers.configs) > 0 ? 1 : 0
metadata {
name = var.game_servers.namespace
}
}
resource kubernetes_pod game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@@ -1,74 +0,0 @@
# NOTE: this is a simple deployment for demo purposes only.
# Currently it does support SSH access and lacks Gitea runners.
# However a fully working setup can be found at: https://git.shockrah.xyz
resource kubernetes_deployment gitea {
metadata {
name = "gitea"
namespace = var.playground.namespace
labels = {
"app" = "gitea"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app" = "gitea"
}
}
template {
metadata {
labels = {
"app" = "gitea"
}
}
spec {
container {
name = "gitea"
image = "gitea/gitea:latest"
port {
container_port = 3000
name = "gitea-main"
}
port {
container_port = 2222
name = "gitea-ssh"
}
volume_mount {
name = "gitea"
mount_path = "/data"
}
}
volume {
name = "gitea"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.gitea.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service gitea {
metadata {
name = "gitea"
namespace = var.playground.namespace
}
spec {
selector = {
"app" = "gitea"
}
port {
target_port = "gitea-main"
port = 3000
name = "http"
}
port {
target_port = "gitea-ssh"
port = 2222
name = "ssh"
}
}
}

View File

@@ -1,47 +0,0 @@
resource kubernetes_deployment_v1 health {
metadata {
name = "health"
namespace = var.playground.namespace
}
spec {
replicas = 1
selector {
match_labels = {
name = "health"
}
}
template {
metadata {
labels = {
name = "health"
}
}
spec {
container {
name = "health"
image = "quanhua92/whoami:latest"
port {
container_port = "8080"
}
}
}
}
}
}
resource kubernetes_service_v1 health {
metadata {
name = "health"
namespace = var.playground.namespace
}
spec {
selector = {
name = "health"
}
port {
port = 80
target_port = 8080
name = "http"
}
}
}

View File

@@ -1,7 +0,0 @@
resource helm_release nginx {
name = "ingress-nginx"
repository = "https://kubernetes.github.io/ingress-nginx"
chart = "ingress-nginx"
namespace = "ingress-nginx"
create_namespace = true
}

View File

@@ -1,48 +0,0 @@
locals {
services = {
"code.shockrah.xyz" = kubernetes_service.gitea
"sanity.shockrah.xyz" = kubernetes_service_v1.health
"uptime.shockrah.xyz" = kubernetes_service.kuma
"wiki.shockrah.xyz" = kubernetes_service.otterwiki
}
}
resource kubernetes_ingress_v1 health {
metadata {
name = "health-ingress"
namespace = var.playground.namespace
annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt"
"cert-manager.io/ingress.class" = "nginx"
}
}
spec {
ingress_class_name = "nginx"
dynamic tls {
for_each = local.services
content {
hosts = [tls.key]
secret_name = "${tls.value.metadata[0].name}-secret"
}
}
dynamic "rule" {
for_each = local.services
content {
host = "${rule.key}"
http {
path {
path = "/"
backend {
service {
name = rule.value.metadata[0].name
port {
number = rule.value.spec[0].port[0].port
}
}
}
}
}
}
}
}
}

1
infra/vultr-kubernetes/k8s/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
terraform.yaml

View File

@@ -0,0 +1,33 @@
terraform {
required_version = ">= 0.13"
backend s3 {
bucket = "project-athens"
key = "infra/vke/k8s/state/build.tfstate"
region = "us-west-1"
encrypt = true
}
required_providers {
# For interacting with S3
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.30.0"
}
}
}
provider aws {
access_key = var.aws_key
secret_key = var.aws_secret
region = var.aws_region
max_retries = 1
}
provider kubernetes {
config_path = "terraform.yaml"
}

View File

@@ -0,0 +1,50 @@
resource kubernetes_ingress_v1 athens {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
rule {
host = "test.shockrah.xyz"
http {
path {
backend {
service {
name = var.shockrahxyz.name
port {
number = 80
}
}
}
path = "/"
}
}
}
}
}
resource kubernetes_service athens_lb {
metadata {
name = "athens-websites"
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
selector = {
app = kubernetes_ingress_v1.athens.metadata.0.labels.app
}
port {
port = 80
target_port = 80
}
type = "LoadBalancer"
external_ips = [ var.cluster.ip ]
}
}

View File

@@ -0,0 +1,5 @@
resource kubernetes_namespace websites {
metadata {
name = "websites"
}
}

View File

@@ -0,0 +1,62 @@
# First we setup the ingress controller with helm
```sh
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
# Now we can install this to our cluster
helm install --kubeconfig config.yaml traefik traefik/traefik
```
# Prove the service is present with
```sh
kubectl --kubeconfig config.yaml get svc
```
# Create the pods
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-dep.yaml
```
# Expose on port 80
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-service.yaml
```
# Create ingress on k8s
```sh
kubectl --kubeconfig config.yaml -f k8s/traefik-ingress.yaml
```
# Take the external IP from the ingress
Put that into terraform's A record for the domain since this is a load balancer
in Vultr ( actual resource apparantly )
# Configure cert-manager for traefik ingress
Using the latest version from here:
https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.crds.yaml
```sh
kubectl --kubeconfig config.yaml \
apply --validate=false \
-f https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.yaml
```
# Create the cert issuer and certificate
```sh
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
```
Because we just have 1 cert for now we are looking for it's status to be `READY`

View File

@@ -0,0 +1,21 @@
Plain nginx for now so that we can test out reverse dns
resource kubernetes_pod shockrah {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = var.shockrahxyz.name
}
}
spec {
container {
image = "nginx"
name = "${var.shockrahxyz.name}"
port {
container_port = 80
}
}
}
}

View File

@@ -0,0 +1,35 @@
# API Keys required to reach AWS/Vultr
variable vultr_api_key {
type = string
sensitive = true
}
variable aws_key {
type = string
sensitive = true
}
variable aws_secret {
type = string
sensitive = true
}
variable aws_region {
type = string
sensitive = true
}
variable shockrahxyz {
type = object({
name = string
port = number
dns = string
})
}
variable cluster {
type = object({
ip = string
})
}

View File

@@ -0,0 +1,37 @@
# Here we are going to define the deployment and service
# Basically all things directly related to the actual service we want to provide
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: alternate-nginx-web
namespace: default
labels:
app: alternate-nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: alternate-nginx-web
template:
metadata:
labels:
app: alternate-nginx-web
spec:
# Container comes from an example thing i randomly found on docker hub
containers:
- name: alternate-nginx-web
image: dockerbogo/docker-nginx-hello-world
---
apiVersion: v1
kind: Service
metadata:
name: alternate-nginx-web
namespace: default
spec:
selector:
app: alternate-nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@@ -0,0 +1,30 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod-hello
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-prod-hello
solvers:
- http01:
ingress:
class: traefik

View File

@@ -0,0 +1,13 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@@ -1,14 +1,15 @@
apiVersion: cert-manager.io/v1 apiVersion: cert-manager.io/v1
kind: Issuer kind: ClusterIssuer
metadata: metadata:
name: letsencrypt-nginx name: letsencrypt-prod
namespace: default
spec: spec:
acme: acme:
email: dev@shockrah.xyz
server: https://acme-v02.api.letsencrypt.org/directory server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef: privateKeySecretRef:
name: example name: letsencrypt-prod
solvers: solvers:
- http01: - http01:
ingress: ingress:
class: nginx class: traefik

View File

@@ -0,0 +1,20 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx-web
namespace: default
labels:
app: nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: nginx-web
template:
metadata:
labels:
app: nginx-web
spec:
containers:
- name: nginx
image: nginx

View File

@@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-web
namespace: default
spec:
selector:
app: nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@@ -0,0 +1,44 @@
# This is the first thing we need to create, an issue to put certs into
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-temprah-lab
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
namespace: default
labels:
name: project-athens-lb
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: sample.temprah-lab.xyz
http:
paths:
- backend:
service:
name: nginx-web
port:
number: 80
path: /
pathType: Prefix
- host: hello.temprah-lab.xyz
http:
paths:
- backend:
service:
name: alternate-nginx-web
port:
number: 80
path: /
pathType: Prefix

View File

@@ -1,36 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-service
spec:
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whoami-ingress
annotations:
cert-manager.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
ingressClassName: nginx
tls:
- secretName: whoami-tls
hosts:
- example.shockrah.xyz
rules:
- host: example.shockrah.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whoami-service
port:
number: 80

View File

@@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-lb
annotations:
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-interval: "30"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-response-timeout: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-unhealthy-threshold: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-healthy-threshold: "5"
spec:
type: LoadBalancer
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080

View File

@@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: whoami
spec:
replicas: 3
selector:
matchLabels:
name: whoami
template:
metadata:
labels:
name: whoami
spec:
containers:
- name: whoami
image: quanhua92/whoami:latest
imagePullPolicy: Always
ports:
- containerPort: 8080

View File

@@ -1,37 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
preferredChain: "ISRG Root X1"
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx

View File

@@ -1,10 +0,0 @@
resource kubernetes_namespace playground {
metadata {
annotations = {
names = var.playground.namespace
}
name = var.playground.namespace
}
}

Some files were not shown because too many files have changed in this diff Show More