Compare commits

..

5 Commits

Author SHA1 Message Date
510baa7f94 Basic setup now passing initial checks
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 15:14:22 -08:00
088846cad9 Ensure that static hosts have docker and the latest python versions installed
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 12:34:41 -08:00
1be3a8e588 Quick fix for ansible-lint things
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
2025-03-04 11:46:17 -08:00
da580eb7d2 REmoving bogus wiki stuff
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
2025-03-04 11:44:09 -08:00
a8d7c01efe Slowing building out the new workflows
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
2025-03-04 11:20:00 -08:00
111 changed files with 962 additions and 1426 deletions

View File

@ -10,6 +10,6 @@ jobs:
steps:
- name: Checkout repo content
uses: actions/checkout@v4
- run: ansible-lint -c linter.yaml
- run: ansible-lint
working-directory: ansible/

1
.gitignore vendored
View File

@ -21,4 +21,3 @@ docker/beta/shockrah.xyz/
docker/beta/resume.shockrah.xyz/
k8s/config.yaml
infra/**/tfplan
.ansible/

View File

@ -1,3 +0,0 @@
nigel:
hosts:
nigel.local:

View File

@ -1,4 +0,0 @@
---
skip_list:
- role-name
- var-naming[no-role-prefix]

View File

@ -1,28 +0,0 @@
# This playbook is meant to be a oneshot to be ran manually on the dev box
# The rest of the role stuff is meant to be ran as the admin user that
# this playbook creates for us
---
- name: Setup local admin user with a fresh ubuntu host
hosts: nigel.local
remote_user: nigel
vars:
admin:
username: nigel
tasks:
- name: Copy the nigel admin key
ansible.posix.authorized_key:
user: "{{ admin.username }}"
state: present
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
- name: Prevent password based logins
become: true
ansible.builtin.lineinfile:
dest: /etc/ssh/sshd_config
line: PasswordAuthentication no
state: present
backup: true
- name: Restart SSH Daemon
become: true
ansible.builtin.service:
name: ssh
state: restarted

View File

@ -1,9 +0,0 @@
---
- name: Setup all the responsibilities of the nomad server
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the nomad role
ansible.builtin.include_role:
name: nomad

View File

@ -1,14 +0,0 @@
---
- name: Setup bare metal requirements
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply the base role to the nuc
ansible.builtin.include_role:
name: base
- name: Apply the k3s base role
ansible.builtin.include_role:
name: k3s
- name: Apply the proxy role
ansible.builtin.include_role:
name: proxy

View File

@ -1,8 +0,0 @@
---
- name: Setup host as a reverse proxy
hosts: nigel.local
remote_user: nigel
tasks:
- name: Apply reverse proxy role
ansible.builtin.include_role:
name: proxy

View File

@ -1 +0,0 @@
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable

View File

@ -1,41 +0,0 @@
- name: Ensure we have basic updated packages setting up docker
ansible.builtin.apt:
name: "{{ item }}"
update_cache: true
loop:
- ca-certificates
- curl
- name: Running install on the keyrings directory
ansible.builtin.command:
cmd: install -m 0755 -d /etc/apt/keyrings
register: install
changed_when: install.rc == 0
- name: Fetch Docker GPG Key
vars:
keylink: https://download.docker.com/linux/ubuntu/gpg
ansible.builtin.get_url:
url: "{{ keylink }}"
dest: /etc/apt/keyrings/docker.asc
mode: "0644"
- name: Add repo to apt sources
ansible.builtin.copy:
src: docker.list
dest: /etc/apt/sources.list.d/docker.list
mode: "0644"
- name: Update Apt cache with latest docker.list packages
ansible.builtin.apt:
update_cache: true
- name: Ensure all docker packages are updated to the latest versions
ansible.builtin.apt:
name: "{{ item }}"
loop:
- docker-ce
- docker-ce-cli
- containerd.io
- docker-buildx-plugin
- docker-compose-plugin
- name: Verify that the docker components are installed properly
ansible.builtin.command:
cmd: docker run hello-world
register: docker
changed_when: docker.rc == 0

View File

@ -1,8 +0,0 @@
- name: Download the setup script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp/k3s.sh
mode: "0644"
- name: Run installation script
ansible.builtin.command:
cmd: bash /tmp/k3s.sh

View File

@ -1,25 +0,0 @@
- name: Ensure nigel can use sudo without password
become: true
tags:
- setup
ansible.builtin.lineinfile:
path: /etc/sudoers
state: present
line: "nigel ALL=(ALL) NOPASSWD:ALL"
- name: Ensure docker components are installed
tags:
- setup
ansible.builtin.include_tasks:
file: ensure-docker-basic.yaml
apply:
become: true
tags:
- setup
- name: Run through nomad removal steps
tags: nomad
ansible.builtin.include_tasks:
file: nomad.yaml
apply:
become: true
tags:
- nomad

View File

@ -1,12 +0,0 @@
bind_addr = "{{ ip }}"
advertise_addr = "{{ ip }}"
bootstrap = true
bootstrap_expect = 1
client_addr = "{{ ip }}"
server = true
data_dir = "/opt/consul"
ui_config {
enabled = true
}

View File

@ -1 +0,0 @@
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main

View File

@ -1,11 +0,0 @@
- name: Download the installation script
ansible.builtin.get_url:
url: https://get.k3s.io
dest: /tmp
register: install_script
- name: Run installation script
become: true
environment:
INSTALL_K3S_EXEC: server
ansible.builtin.command:
cmd: sh {{ install_script.dest }}

View File

@ -1,24 +0,0 @@
data_dir = "/opt/nomad/data"
bind_addr = "0.0.0.0"
server {
enabled = true
bootstrap_expect = 1
}
client {
enabled = true
servers = ["127.0.0.1"]
}
host_volume "registry" {
path = "/opt/volumes/registry"
read_only = false
}
host_volume "nfs" {
path = "/opt/volumes/nfs"
read_only = false
}

View File

@ -1,18 +0,0 @@
- name: Nomad server configuration
become: true
block:
- name: Ensure the root data directory is present
ansible.builtin.file:
path: "{{ nomad.volumes.root }}"
state: absent
mode: "0755"
- name: Ensure registry volume is present
ansible.builtin.file:
path: "{{ nomad.volumes.registry }}"
state: absent
mode: "0755"
- name: Ensure the MinIO diretory is present
ansible.builtin.file:
path: "{{ nomad.volumes.nfs }}"
state: absent
mode: "0755"

View File

@ -1,5 +0,0 @@
nomad:
volumes:
root: /opt/volumes
registry: /opt/volumes/ncr
nfs: /opt/volumes/nfs

View File

@ -1,15 +0,0 @@
127.0.0.1 localhost
127.0.1.1 nigel
# Our own dns stuff
127.0.1.1 nigel.local
127.0.1.1 nomad.nigel.local
127.0.1.1 sanity.nigel.local
127.0.1.1 ncr.nigel.local
# The following lines are desirable for IPv6 capable hosts
::1 ip6-localhost ip6-loopback
fe00::0 ip6-localnet
ff00::0 ip6-mcastprefix
ff02::1 ip6-allnodes
ff02::2 ip6-allrouters

View File

@ -1,6 +0,0 @@
server {
server_name ncr.nigel.local;
location / {
proxy_pass http://localhost:5000;
}
}

View File

@ -1,25 +0,0 @@
server {
server_name nomad.nigel.local;
location / {
proxy_pass http://nomad-ws;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_read_timeout 319s;
# This is for log streaming requests
proxy_buffering off;
# Upgrade and Connection headers for upgrading to websockets
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_set_header Origin "${scheme}://${proxy_host}";
}
}
upstream nomad-ws {
ip_hash;
server nomad.nigel.local:4646;
}

View File

@ -1,28 +0,0 @@
- name: Reverse proxy role configuration
become: true
block:
- name: Ensure /etc/hosts are up to date
ansible.builtin.copy:
dest: /etc/hosts
src: host-file
mode: "0644"
- name: Ensure nginx is setup as latest
ansible.builtin.apt:
name: nginx
- name: Copy the nomad.conf to available configurations
ansible.builtin.copy:
src: "{{ item }}"
dest: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Link the nomad.conf to sites-enabled
ansible.builtin.file:
path: "/etc/nginx/sites-enabled/{{ item }}"
state: link
src: "/etc/nginx/sites-available/{{ item }}"
mode: "0644"
loop: "{{ proxy_nginx_configs }}"
- name: Restart nginx
ansible.builtin.systemd_service:
name: nginx
state: restarted

View File

@ -1,3 +0,0 @@
proxy_nginx_configs:
- nomad.conf
- ncr.conf

View File

@ -0,0 +1,23 @@
#!/bin/bash
set -e
bucket="$1"
s3env=/opt/nginx/s3.env
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
source $s3env
pull() {
aws s3 sync s3://$bucket /opt/nginx/$bucket
}
case $bucket in
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
*) echo "Invalid bucket name" && exit 1 ;;
esac

View File

@ -0,0 +1,40 @@
networks:
gitea:
external: false
services:
gitea:
image: gitea/gitea:latest-rootless
container_name: gitea
environment:
- USER_UID=1000
- USER_GID=1000
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea:/data
- /etc/timezone:/etc/timezone:ro
- /etc/localtime:/etc/localtime:ro
ports:
- "3000:3000"
- "2222:22"
gitea-runner:
image: gitea/act_runner:nightly
container_name: gitea-runner
restart: always
networks:
- gitea
volumes:
- /opt/containers/gitea_runner/
- /var/run/docker.sock:/var/run/docker.sock
environment:
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
- GITEA_RUNNER_NAME=gitea-main
- GITEA_RUNNER_LABELS=gitea-main
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}

View File

@ -0,0 +1,29 @@
What is this
============
Here we contain scripts to build out all the containers that are run.
All of these images are based on images that are made from other projects
docker-compose.yaml
===================
Services that are more/less "special" go here since most of the stuff that is
run on the main host are basically just static html websites
Services & Containers
=====================
| Service | Docker Image Used |
|------------|--------------------------|
| Gitea | gitea/gitea:latest |
| Act Runner | gitea/act_runner:nightly |
Why the servics above?
======================
The Gitea related services are there so that I can host my own Git projects
away from "Git as a service" services. I have no issue with Github/Gitlab
but I just like being able to host my own stuff when possible :smiley:

34
infra/dns/build.sh Normal file
View File

@ -0,0 +1,34 @@
#!/bin/bash
set -e
opt=$1
plan=tfplan
build_plan() {
echo Generating plan
set -x
terraform plan -var-file variables.tfvars -input=false -out $plan
}
deploy_plan() {
terraform apply $plan
}
init() {
terraform init
}
help_prompt() {
cat <<- EOF
Options: plan deploy help
EOF
}
# Default to building a plan
source ./secrets.sh
case $opt in
plan) build_plan;;
deploy) deploy_plan;;
*) help_prompt;;
esac

View File

@ -37,9 +37,6 @@ locals {
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] },
]
}

View File

@ -26,7 +26,3 @@ variable "vultr_host" {
description = "IP of the temp Vultr host"
}
variable "vke_lb" {
type = string
description = "IP of our VKE load balancer"
}

View File

@ -1,2 +1 @@
vultr_host = "45.32.83.83"
vke_lb = "45.32.89.101"

View File

@ -1 +0,0 @@
config.yaml

View File

@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
name: nginx-port
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80
nodePort: 30808
targetPort: nginx-port

View File

@ -1,19 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: hello
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the sample cron-container
restartPolicy: OnFailure

View File

@ -0,0 +1,62 @@
resource kubernetes_namespace admin-servers {
count = length(var.admin_services.configs) > 0 ? 1 : 0
metadata {
name = var.admin_services.namespace
}
}
resource kubernetes_pod admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
spec {
node_selector = {
NodeType = var.admin_services.namespace
}
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service admin {
for_each = var.admin_services.configs
metadata {
name = each.key
namespace = var.admin_services.namespace
labels = {
app = each.key
}
}
# TODO: don't make these NodePorts since we're gonna want them
# to be purely internal to the Cluster.
# WHY? Because we want to keep dashboards as unexposed as possible
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -9,31 +9,15 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "5.98.0"
version = "~> 5.0"
}
vultr = {
source = "vultr/vultr"
version = "2.26.0"
version = "2.22.1"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.37.1"
}
kubectl = {
source = "gavinbunney/kubectl"
version = " 1.19.0"
}
helm = {
source = "hashicorp/helm"
version = "3.0.2"
}
tls = {
source = "hashicorp/tls"
version = "4.1.0"
}
random = {
source = "hashicorp/random"
version = "3.7.2"
version = "2.34.0"
}
}
}
@ -55,12 +39,4 @@ provider kubernetes {
config_path = "config.yaml"
}
provider kubectl {
config_path = "config.yaml"
}
provider helm {
kubernetes = {
config_path = "config.yaml"
}
}

View File

@ -1,42 +0,0 @@
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
filters: |
[FILTER]
Name kubernetes
Match kube.*Merge_log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
outputs: |
[OUTPUT]
Name openobserve
Match *
URI /api/default/default/_json
Host openobserve.logging.svc.cluster.local
Port 5080
tls On
Format json
Json_date_key _timestamp
Json_date_format iso8601
HTTP_User mail@shockrah.xyz
HTTP_Passwd kXWpwEK4SIxUzjgp

View File

@ -1,382 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# General configuration shared across resources
app:
# Mode determines if chart should deploy a full Dashboard with all containers or just the API.
# - dashboard - deploys all the containers
# - api - deploys just the API
mode: 'dashboard'
image:
pullPolicy: IfNotPresent
pullSecrets: []
scheduling:
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
nodeSelector: {}
security:
# Allow overriding csrfKey used by API/Auth containers.
# It has to be base64 encoded random 256 bytes string.
# If empty, it will be autogenerated.
csrfKey: ~
# SecurityContext to be added to pods
# To disable set the following configuration to null:
# securityContext: null
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# ContainerSecurityContext to be added to containers
# To disable set the following configuration to null:
# containerSecurityContext: null
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop: ["ALL"]
# Pod Disruption Budget configuration
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget:
enabled: false
minAvailable: 0
maxUnavailable: 0
networkPolicy:
enabled: false
ingressDenyAll: false
# Raw network policy spec that overrides predefined spec
# Example:
# spec:
# egress:
# - ports:
# - port: 123
spec: {}
# Common labels & annotations shared across all deployed resources
labels: {}
annotations: {}
# Common priority class used for all deployed resources
priorityClassName: null
settings:
## Global dashboard settings
global:
# # Cluster name that appears in the browser window title if it is set
clusterName: "Athens Cluster"
# # Max number of items that can be displayed on each list page
# itemsPerPage: 10
# # Max number of labels that are displayed by default on most views.
# labelsLimit: 3
# # Number of seconds between every auto-refresh of logs
# logsAutoRefreshTimeInterval: 5
# # Number of seconds between every auto-refresh of every resource. Set 0 to disable
# resourceAutoRefreshTimeInterval: 10
# # Hide all access denied warnings in the notification panel
# disableAccessDeniedNotifications: false
# # Hide all namespaces option in namespace selection dropdown to avoid accidental selection in large clusters thus preventing OOM errors
# hideAllNamespaces: false
# # Namespace that should be selected by default after logging in.
defaultNamespace: playground
# # Enable/Disable namespace isolation mode. When enabled users without cluster-wide permissions will
# # only see resources within their own namespaces.
namespaceIsolation: false
# # List of namespaces that should be presented to user without namespace list privileges.
# namespaceFallbackList:
# - default
## Pinned resources that will be displayed in dashboard's menu
pinnedResources: []
# - kind: customresourcedefinition
# # Fully qualified name of a CRD
# name: prometheus.monitoring.coreos.com
# # Display name
# displayName: Prometheus
# # Is this CRD namespaced?
# namespaced: true
ingress:
enabled: false
hosts:
# Keep 'localhost' host only if you want to access Dashboard using 'kubectl port-forward ...' on:
# https://localhost:8443
- localhost
# - kubernetes.dashboard.domain.com
ingressClassName: internal-nginx
# Use only if your ingress controllers support default ingress classes.
# If set to true ingressClassName will be ignored and not added to the Ingress resources.
# It should fall back to using IngressClass marked as the default.
useDefaultIngressClass: false
# This will append our Ingress with annotations required by our default configuration.
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/ssl-passthrough: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
useDefaultAnnotations: true
pathType: ImplementationSpecific
# If path is not the default (/), rewrite-target annotation will be added to the Ingress.
# It allows serving Kubernetes Dashboard on a sub-path. Make sure that the configured path
# does not conflict with gateway route configuration.
path: /
issuer:
name: selfsigned
# Scope determines what kind of issuer annotation will be used on ingress resource
# - default - adds 'cert-manager.io/issuer'
# - cluster - adds 'cert-manager.io/cluster-issuer'
# - disabled - disables cert-manager annotations
scope: default
tls:
enabled: true
# If provided it will override autogenerated secret name
secretName: ""
labels: {}
annotations: {}
# Use the following toleration if Dashboard can be deployed on a tainted control-plane nodes
# - key: node-role.kubernetes.io/control-plane
# effect: NoSchedule
tolerations: []
affinity: {}
auth:
role: auth
image:
repository: docker.io/kubernetesui/dashboard-auth
tag: 1.4.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: auth
containerPort: 8000
protocol: TCP
args: []
env: []
volumeMounts:
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Auth related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# API deployment configuration
api:
role: api
image:
repository: docker.io/kubernetesui/dashboard-api
tag: 1.14.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: api
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store exec logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for API related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# WEB UI deployment configuration
web:
role: web
image:
repository: docker.io/kubernetesui/dashboard-web
tag: 1.7.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: web
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for WEB UI related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
### Metrics Scraper
### Container to scrape, store, and retrieve a window of time from the Metrics Server.
### refs: https://github.com/kubernetes/dashboard/tree/master/modules/metrics-scraper
metricsScraper:
enabled: true
role: metrics-scraper
image:
repository: docker.io/kubernetesui/dashboard-metrics-scraper
tag: 1.2.2
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- containerPort: 8000
protocol: TCP
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Metrics Scraper related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
## Optional Metrics Server sub-chart configuration
## Enable this if you don't already have metrics-server enabled on your cluster and
## want to use it with dashboard metrics-scraper
## refs:
## - https://github.com/kubernetes-sigs/metrics-server
## - https://github.com/kubernetes-sigs/metrics-server/tree/master/charts/metrics-server
metrics-server:
enabled: false
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls

View File

@ -1,18 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
preferredChain: "ISRG Root X1"
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt
solvers:
- http01:
ingress:
class: nginx

View File

@ -1,19 +1,30 @@
resource vultr_kubernetes athens {
region = var.cluster.region
region = var.cluster.region
version = var.cluster.version
label = var.cluster.label
# vpc_id = vultr_vpc.athens.id
label = var.cluster.label
# BUG: only have this set when creating the resource for the first time
# once the cluster is up, we should comment this out again
# enable_firewall = true
node_pools {
node_quantity = var.cluster.pools["main"].min_nodes
plan = var.cluster.pools["main"].plan
label = var.cluster.pools["main"].label
min_nodes = var.cluster.pools["main"].min_nodes
max_nodes = var.cluster.pools["main"].max_nodes
auto_scaler = true
node_quantity = 1
plan = var.cluster.pools["meta"].plan
label = var.admin_services.namespace
min_nodes = var.cluster.pools["meta"].min
max_nodes = var.cluster.pools["meta"].max
# tag = var.admin_services.namespace
}
}
resource vultr_kubernetes_node_pools games {
cluster_id = vultr_kubernetes.athens.id
node_quantity = var.cluster.pools["games"].min
plan = var.cluster.pools["games"].plan
label = var.game_servers.namespace
min_nodes = var.cluster.pools["games"].min
max_nodes = var.cluster.pools["games"].max
tag = var.admin_services.namespace
}
output k8s_config {
value = vultr_kubernetes.athens.kube_config
sensitive = true

View File

@ -1,6 +0,0 @@
data vultr_kubernetes athens {
filter {
name = "label"
values = [ var.cluster.label ]
}
}

4
infra/vultr-kubernetes/dev/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# created by virtualenv automatically
bin/
lib/

View File

@ -0,0 +1,51 @@
from argparse import ArgumentParser
from argparse import Namespace
from kubernetes import client, config
import re
def get_args() -> Namespace:
parser = ArgumentParser(
prog="Cluster Search Thing",
description="General utility for finding resources for game server bot"
)
games = {"reflex", "minecraft"}
parser.add_argument('-g', '--game', required=False, choices=games)
admin = {"health"}
parser.add_argument('-a', '--admin', required=False, choices=admin)
return parser.parse_args()
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
config.load_kube_config("../config.yaml")
return client.CoreV1Api()
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
print('admin thing requested', args.admin)
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
node_name = pods.items[0].spec.node_name
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
port = services.items[0].spec.ports[0].port
# Collecting the IPV4 of the node that contains the pod(container)
# we actually care about. Since these pods only have 1 container
# Now we collect specific data about the game server we requested
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
if __name__ == '__main__':
args = get_args()
api = k8s_api('../config.yaml')
if args.game:
get_game_server_ip(args, api)
if args.admin:
get_admin_service_details(args, api)

View File

@ -0,0 +1,8 @@
home = /usr
implementation = CPython
version_info = 3.10.12.final.0
virtualenv = 20.13.0+ds
include-system-site-packages = false
base-prefix = /usr
base-exec-prefix = /usr
base-executable = /usr/bin/python3

View File

@ -0,0 +1,18 @@
cachetools==5.5.0
certifi==2024.8.30
charset-normalizer==3.4.0
durationpy==0.9
google-auth==2.36.0
idna==3.10
kubernetes==31.0.0
oauthlib==3.2.2
pyasn1==0.6.1
pyasn1_modules==0.4.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
six==1.17.0
urllib3==2.2.3
websocket-client==1.8.0

View File

@ -1,23 +1,32 @@
# resource vultr_firewall_rule web_inbound {
# for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
# firewall_group_id = vultr_kubernetes.athens.firewall_group_id
# protocol = "tcp"
# ip_type = "v4"
# subnet = "0.0.0.0"
# subnet_size = 0
# port = each.value
# }
resource vultr_firewall_group bastion {
description = "For connections into and out of the bastion host"
}
resource vultr_firewall_rule bastion_inbound {
firewall_group_id = vultr_firewall_group.bastion.id
resource vultr_firewall_rule web_inbound {
for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = 22
port = each.value
}
resource vultr_firewall_rule game-server-inbound {
for_each = var.game_servers.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = each.value.port.expose
}
resource vultr_firewall_rule admin-service-inbound {
for_each = var.admin_services.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
notes = each.value.port.notes
port = each.value.port.expose
}

View File

@ -0,0 +1,55 @@
resource kubernetes_namespace game-servers {
count = length(var.game_servers.configs) > 0 ? 1 : 0
metadata {
name = var.game_servers.namespace
}
}
resource kubernetes_pod game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port.internal
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port.internal
port = each.value.port.expose
}
type = "NodePort"
}
}

View File

@ -1,66 +0,0 @@
resource kubernetes_deployment gitea {
metadata {
name = "gitea"
namespace = var.playground.namespace
labels = {
"app" = "gitea"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app" = "gitea"
}
}
template {
metadata {
labels = {
"app" = "gitea"
}
}
spec {
container {
name = "gitea"
image = "gitea/gitea:latest"
port {
container_port = 3000
name = "gitea-main"
}
port {
container_port = 22
name = "gitea-ssh"
}
volume_mount {
name = "gitea"
mount_path = "/data"
}
}
volume {
name = "gitea"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.gitea.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service gitea {
metadata {
name = "gitea"
namespace = var.playground.namespace
}
spec {
selector = {
"app" = "gitea"
}
port {
target_port = "gitea-main"
port = 3000
name = "http"
}
}
}

View File

@ -1,47 +0,0 @@
resource kubernetes_deployment health {
metadata {
name = "health"
namespace = var.playground.namespace
}
spec {
replicas = 1
selector {
match_labels = {
name = "health"
}
}
template {
metadata {
labels = {
name = "health"
}
}
spec {
container {
name = "health"
image = "quanhua92/whoami:latest"
port {
container_port = "8080"
}
}
}
}
}
}
resource kubernetes_service health {
metadata {
name = "health"
namespace = var.playground.namespace
}
spec {
selector = {
name = "health"
}
port {
port = 80
target_port = 8080
name = "http"
}
}
}

View File

@ -1,7 +0,0 @@
resource helm_release nginx {
name = "ingress-nginx"
repository = "https://kubernetes.github.io/ingress-nginx"
chart = "ingress-nginx"
namespace = "ingress-nginx"
create_namespace = true
}

View File

@ -1,70 +0,0 @@
resource kubernetes_ingress_v1 health {
metadata {
name = "health-ingress"
namespace = var.playground.namespace
annotations = {
"cert-manager.io/cluster-issuer" = "letsencrypt"
"cert-manager.io/ingress.class" = "nginx"
}
}
spec {
ingress_class_name = "nginx"
tls {
hosts = [
"sanity.shockrah.xyz",
"uptime.shockrah.xyz",
"code.shockrah.xyz"
]
secret_name = "shockrah"
}
rule {
host = "sanity.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.health.metadata[0].name
port {
number = kubernetes_service.health.spec[0].port[0].port
}
}
}
}
}
}
rule {
host = "uptime.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.kuma.metadata[0].name
port {
number = kubernetes_service.kuma.spec[0].port[0].port
}
}
}
}
}
}
rule {
host = "code.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.gitea.metadata[0].name
port {
number = kubernetes_service.gitea.spec[0].port[0].port
}
}
}
}
}
}
}
}

1
infra/vultr-kubernetes/k8s/.gitignore vendored Normal file
View File

@ -0,0 +1 @@
terraform.yaml

View File

@ -0,0 +1,33 @@
terraform {
required_version = ">= 0.13"
backend s3 {
bucket = "project-athens"
key = "infra/vke/k8s/state/build.tfstate"
region = "us-west-1"
encrypt = true
}
required_providers {
# For interacting with S3
aws = {
source = "hashicorp/aws"
version = "~> 5.0"
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.30.0"
}
}
}
provider aws {
access_key = var.aws_key
secret_key = var.aws_secret
region = var.aws_region
max_retries = 1
}
provider kubernetes {
config_path = "terraform.yaml"
}

View File

@ -0,0 +1,50 @@
resource kubernetes_ingress_v1 athens {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
rule {
host = "test.shockrah.xyz"
http {
path {
backend {
service {
name = var.shockrahxyz.name
port {
number = 80
}
}
}
path = "/"
}
}
}
}
}
resource kubernetes_service athens_lb {
metadata {
name = "athens-websites"
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = "websites"
}
}
spec {
selector = {
app = kubernetes_ingress_v1.athens.metadata.0.labels.app
}
port {
port = 80
target_port = 80
}
type = "LoadBalancer"
external_ips = [ var.cluster.ip ]
}
}

View File

@ -0,0 +1,5 @@
resource kubernetes_namespace websites {
metadata {
name = "websites"
}
}

View File

@ -0,0 +1,62 @@
# First we setup the ingress controller with helm
```sh
helm repo add traefik https://helm.traefik.io/traefik
helm repo update
# Now we can install this to our cluster
helm install --kubeconfig config.yaml traefik traefik/traefik
```
# Prove the service is present with
```sh
kubectl --kubeconfig config.yaml get svc
```
# Create the pods
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-dep.yaml
```
# Expose on port 80
```sh
kubectl --kubeconfig config.yaml -f k8s/nginx-service.yaml
```
# Create ingress on k8s
```sh
kubectl --kubeconfig config.yaml -f k8s/traefik-ingress.yaml
```
# Take the external IP from the ingress
Put that into terraform's A record for the domain since this is a load balancer
in Vultr ( actual resource apparantly )
# Configure cert-manager for traefik ingress
Using the latest version from here:
https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.crds.yaml
```sh
kubectl --kubeconfig config.yaml \
apply --validate=false \
-f https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.yaml
```
# Create the cert issuer and certificate
```sh
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
```
Because we just have 1 cert for now we are looking for it's status to be `READY`

View File

@ -0,0 +1,21 @@
Plain nginx for now so that we can test out reverse dns
resource kubernetes_pod shockrah {
metadata {
name = var.shockrahxyz.name
namespace = kubernetes_namespace.websites.metadata.0.name
labels = {
app = var.shockrahxyz.name
}
}
spec {
container {
image = "nginx"
name = "${var.shockrahxyz.name}"
port {
container_port = 80
}
}
}
}

View File

@ -0,0 +1,35 @@
# API Keys required to reach AWS/Vultr
variable vultr_api_key {
type = string
sensitive = true
}
variable aws_key {
type = string
sensitive = true
}
variable aws_secret {
type = string
sensitive = true
}
variable aws_region {
type = string
sensitive = true
}
variable shockrahxyz {
type = object({
name = string
port = number
dns = string
})
}
variable cluster {
type = object({
ip = string
})
}

View File

@ -0,0 +1,37 @@
# Here we are going to define the deployment and service
# Basically all things directly related to the actual service we want to provide
---
kind: Deployment
apiVersion: apps/v1
metadata:
name: alternate-nginx-web
namespace: default
labels:
app: alternate-nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: alternate-nginx-web
template:
metadata:
labels:
app: alternate-nginx-web
spec:
# Container comes from an example thing i randomly found on docker hub
containers:
- name: alternate-nginx-web
image: dockerbogo/docker-nginx-hello-world
---
apiVersion: v1
kind: Service
metadata:
name: alternate-nginx-web
namespace: default
spec:
selector:
app: alternate-nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@ -0,0 +1,30 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod-hello
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-prod-hello
solvers:
- http01:
ingress:
class: traefik

View File

@ -0,0 +1,13 @@
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-prod
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@ -1,14 +1,15 @@
apiVersion: cert-manager.io/v1
kind: Issuer
kind: ClusterIssuer
metadata:
name: letsencrypt-nginx
name: letsencrypt-prod
namespace: default
spec:
acme:
email: dev@shockrah.xyz
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: example
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx
class: traefik

View File

@ -0,0 +1,20 @@
kind: Deployment
apiVersion: apps/v1
metadata:
name: nginx-web
namespace: default
labels:
app: nginx-web
spec:
replicas: 1
selector:
matchLabels:
app: nginx-web
template:
metadata:
labels:
app: nginx-web
spec:
containers:
- name: nginx
image: nginx

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-web
namespace: default
spec:
selector:
app: nginx-web
ports:
- name: http
targetPort: 80
port: 80

View File

@ -0,0 +1,44 @@
# This is the first thing we need to create, an issue to put certs into
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
namespace: default
spec:
acme:
server: https://acme-v02.api.letsencrypt.org/directory
email: dev@shockrah.xyz
privateKeySecretRef:
name: letsencrypt-temprah-lab
solvers:
- http01:
ingress:
class: traefik
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: hello.temprah-lab.xyz
namespace: default
spec:
secretName: hello.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: hello.temprah-lab.xyz
dnsNames:
- hello.temprah-lab.xyz
---
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: sample.temprah-lab.xyz
namespace: default
spec:
secretName: sample.temprah-lab.xyz-tls
issuerRef:
name: letsencrypt-temprah-lab
kind: ClusterIssuer
commonName: sample.temprah-lab.xyz
dnsNames:
- sample.temprah-lab.xyz

View File

@ -0,0 +1,31 @@
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: traefik-ingress
namespace: default
labels:
name: project-athens-lb
annotations:
kubernetes.io/ingress.class: traefik
spec:
rules:
- host: sample.temprah-lab.xyz
http:
paths:
- backend:
service:
name: nginx-web
port:
number: 80
path: /
pathType: Prefix
- host: hello.temprah-lab.xyz
http:
paths:
- backend:
service:
name: alternate-nginx-web
port:
number: 80
path: /
pathType: Prefix

View File

@ -1,36 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-service
spec:
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080
---
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: whoami-ingress
annotations:
cert-manager.io/ingress.class: nginx
cert-manager.io/cluster-issuer: letsencrypt-prod
spec:
ingressClassName: nginx
tls:
- secretName: whoami-tls
hosts:
- example.shockrah.xyz
rules:
- host: example.shockrah.xyz
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: whoami-service
port:
number: 80

View File

@ -1,21 +0,0 @@
apiVersion: v1
kind: Service
metadata:
name: whoami-lb
annotations:
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-interval: "30"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-response-timeout: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-unhealthy-threshold: "5"
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-healthy-threshold: "5"
spec:
type: LoadBalancer
selector:
name: whoami
ports:
- name: http
port: 80
targetPort: 8080

View File

@ -1,20 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: whoami
spec:
replicas: 3
selector:
matchLabels:
name: whoami
template:
metadata:
labels:
name: whoami
spec:
containers:
- name: whoami
image: quanhua92/whoami:latest
imagePullPolicy: Always
ports:
- containerPort: 8080

View File

@ -1,37 +0,0 @@
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-staging
spec:
acme:
# The ACME server URL
server: https://acme-staging-v02.api.letsencrypt.org/directory
preferredChain: "ISRG Root X1"
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-staging
solvers:
- http01:
ingress:
class: nginx
---
apiVersion: cert-manager.io/v1
kind: ClusterIssuer
metadata:
name: letsencrypt-prod
spec:
acme:
# The ACME server URL
server: https://acme-v02.api.letsencrypt.org/directory
# Email address used for ACME registration
email: dev@shockrah.xyz
# Name of a secret used to store the ACME account private key
privateKeySecretRef:
name: letsencrypt-prod
solvers:
- http01:
ingress:
class: nginx

View File

@ -1,18 +0,0 @@
resource kubernetes_namespace playground {
metadata {
annotations = {
names = var.playground.namespace
}
name = var.playground.namespace
}
}
resource kubernetes_namespace openobserve {
metadata {
annotations = {
names = "openobserve"
}
name = "openobserve"
}
}

View File

@ -1,30 +0,0 @@
resource helm_release shockrah_cert_manager {
name = "cert-manager"
repository = "https://charts.jetstack.io"
chart = "cert-manager"
version = "v1.18.2"
namespace = "cert-manager"
create_namespace = true
cleanup_on_fail = true
set = [
{
name = "crds.enabled"
value = "true"
}
]
}
data kubectl_file_documents cluster_issuer {
content = file("cluster-issuer.yaml")
}
resource kubectl_manifest cluster_issuer {
for_each = data.kubectl_file_documents.cluster_issuer.manifests
yaml_body = each.value
depends_on = [
data.kubectl_file_documents.cluster_issuer
]
}

View File

@ -1,61 +0,0 @@
resource kubernetes_deployment kuma {
metadata {
name = "kuma"
namespace = var.playground.namespace
labels = {
"app" = "kuma"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app" = "kuma"
}
}
template {
metadata {
labels = {
"app" = "kuma"
}
}
spec {
container {
name = "kuma"
image = "louislam/uptime-kuma:2"
port {
container_port = 3001
name = "uptime-kuma"
}
volume_mount {
name = "kuma-data"
mount_path = "/app/data"
}
}
volume {
name = "kuma-data"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.kuma.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service kuma {
metadata {
name = "kuma"
namespace = var.playground.namespace
}
spec {
selector = {
"app" = "kuma"
}
port {
target_port = "uptime-kuma"
port = 3001
name = "http"
}
}
}

View File

@ -26,36 +26,46 @@ variable cluster {
label = string
version = string
pools = map(object({
node_quantity = number
plan = string
label = string
min_nodes = number
max_nodes = number
tag = string
plan = string
autoscale = bool
min = number
max = number
}))
})
}
variable playground {
variable game_servers {
type = object({
namespace = string
health = object({
dns = string
})
tls = object({
email = string
})
configs = map(object({
name = optional(string)
image = string
cpu = string
mem = string
port = object({
internal = number
expose = number
})
proto = optional(string)
}))
})
}
variable bastion {
variable admin_services {
type = object({
plan = string
os = string
label = string
namespace = string
configs = map(object({
name = string
image = string
cpu = string
mem = string
port = object({
notes = optional(string)
internal = number
expose = number
})
proto = optional(string)
}))
})
}

View File

@ -1,34 +1,51 @@
cluster = {
region = "lax"
label = "athens-cluster"
version = "v1.33.0+3"
version = "v1.31.2+1"
pools = {
main = {
node_quantity = 1
plan = "vc2-2c-4gb"
label = "main"
min_nodes = 1
max_nodes = 2
tag = "athens-main"
meta = {
plan = "vc2-1c-2gb"
autoscale = true
min = 1
max = 2
}
games = {
plan = "vc2-1c-2gb"
autoscale = true
min = 1
max = 3
}
}
}
playground = {
namespace = "playground"
# Sanity check service that is used purely for the sake of ensuring
# things are ( at a basic level ) functional
health = {
dns = "health"
}
tls = {
email = "dev@shockrah.xyz"
game_servers = {
namespace = "games"
configs = {
# minecraft = {
# image = "itzg/minecraft-server"
# cpu = "1000m"
# mem = "2048Mi"
# port = {
# expose = 30808
# internal = 80
# }
# }
}
}
bastion = {
plan = "vc2-1c-2gb"
label = "bastion"
os = "1743"
admin_services = {
namespace = "admin-services"
configs = {
# health = {
# image = "nginx:latest"
# name = "health"
# cpu = "200m"
# mem = "64Mi"
# port = {
# notes = "Basic nginx sanity check service"
# expose = 30800
# internal = 80
# }
# }
}
}

View File

@ -1,32 +0,0 @@
resource kubernetes_persistent_volume_claim_v1 kuma {
metadata {
name = "kuma-data"
namespace = var.playground.namespace
}
spec {
volume_mode = "Filesystem"
access_modes = [ "ReadWriteOnce"]
resources {
requests = {
storage = "10Gi"
}
}
}
}
resource kubernetes_persistent_volume_claim_v1 gitea {
metadata {
name = "gitea-data"
namespace = var.playground.namespace
}
spec {
volume_mode = "Filesystem"
access_modes = [ "ReadWriteOnce"]
resources {
requests = {
storage = "10Gi"
}
}
}
}

3
playbooks/inventory.yaml Normal file
View File

@ -0,0 +1,3 @@
static-web:
hosts:
shockrah.xyz:

View File

@ -0,0 +1,9 @@
---
- name: Pre Pipeline Playbook for Static Hosts
hosts: static-web
remote_user: root
tasks:
- name: Import manual setup steps
ansible.builtin.import_role:
name: static
tasks_from: setup-webadmin.yaml

5
playbooks/readme.md Normal file
View File

@ -0,0 +1,5 @@
# What is this
Here be the ansible based workflows that we use to keep things like the static
hosts properly setup with all the resources they need to properly host the
services we intended on hosting.

View File

@ -0,0 +1,8 @@
---
- name: Static Host Maintenance and Setup
hosts: static-web
remote_user: webadmin
tasks:
- name: Import static host role
ansible.builtin.import_role:
name: static

Some files were not shown because too many files have changed in this diff Show More