From 718647f61712f41ec24d5fa3a1176022a702d524 Mon Sep 17 00:00:00 2001 From: shockrah Date: Sun, 2 Nov 2025 21:31:22 -0800 Subject: [PATCH] Adding a new uptime service to configure later on For now I'm staging this in the playground namespace since IDK if I'm going to keep it 5ever + it's an excuse to learn how to use basic volumes --- infra/dns/shockrah-xyz.tf | 1 + infra/vultr-kubernetes/ingress.tf | 21 ++++++++++- infra/vultr-kubernetes/uptime.tf | 61 +++++++++++++++++++++++++++++++ infra/vultr-kubernetes/volumes.tf | 18 +++++---- 4 files changed, 92 insertions(+), 9 deletions(-) create mode 100644 infra/vultr-kubernetes/uptime.tf diff --git a/infra/dns/shockrah-xyz.tf b/infra/dns/shockrah-xyz.tf index fcacb9b..46fc109 100644 --- a/infra/dns/shockrah-xyz.tf +++ b/infra/dns/shockrah-xyz.tf @@ -38,6 +38,7 @@ locals { { name = "resume.shockrah.xyz", records = [ var.vultr_host ] }, { name = "git.shockrah.xyz", records = [ var.vultr_host ] }, { name = "sanity.shockrah.xyz", records = [ var.vke_lb ] }, + { name = "uptime.shockrah.xyz", records = [ var.vke_lb ] }, ] } diff --git a/infra/vultr-kubernetes/ingress.tf b/infra/vultr-kubernetes/ingress.tf index 72b1a80..585205d 100644 --- a/infra/vultr-kubernetes/ingress.tf +++ b/infra/vultr-kubernetes/ingress.tf @@ -10,7 +10,10 @@ resource kubernetes_ingress_v1 health { spec { ingress_class_name = "nginx" tls { - hosts = [ "sanity.shockrah.xyz" ] + hosts = [ + "sanity.shockrah.xyz", + "uptime.shockrah.xyz" + ] secret_name = "shockrah" } rule { @@ -29,6 +32,22 @@ resource kubernetes_ingress_v1 health { } } } + rule { + host = "uptime.shockrah.xyz" + http { + path { + path = "/" + backend { + service { + name = kubernetes_service.kuma.metadata[0].name + port { + number = kubernetes_service.kuma.spec[0].port[0].port + } + } + } + } + } + } } } diff --git a/infra/vultr-kubernetes/uptime.tf b/infra/vultr-kubernetes/uptime.tf new file mode 100644 index 0000000..c878b37 --- /dev/null +++ b/infra/vultr-kubernetes/uptime.tf @@ -0,0 +1,61 @@ +resource kubernetes_deployment kuma { + metadata { + name = "kuma" + namespace = var.playground.namespace + labels = { + "app" = "kuma" + } + } + spec { + replicas = 1 + selector { + match_labels = { + "app" = "kuma" + } + } + template { + metadata { + labels = { + "app" = "kuma" + } + } + spec { + container { + name = "kuma" + image = "louislam/uptime-kuma:2" + port { + container_port = 3001 + name = "uptime-kuma" + } + volume_mount { + name = "kuma-data" + mount_path = "/app/data" + } + } + volume { + name = "kuma-data" + persistent_volume_claim { + claim_name = kubernetes_persistent_volume_claim_v1.kuma.metadata[0].name + } + } + } + } + } +} + +resource kubernetes_service kuma { + metadata { + name = "kuma" + namespace = var.playground.namespace + } + spec { + selector = { + "app" = "kuma" + } + port { + target_port = "uptime-kuma" + port = 3001 + name = "http" + } + } +} \ No newline at end of file diff --git a/infra/vultr-kubernetes/volumes.tf b/infra/vultr-kubernetes/volumes.tf index fa25906..b9956cb 100644 --- a/infra/vultr-kubernetes/volumes.tf +++ b/infra/vultr-kubernetes/volumes.tf @@ -1,15 +1,17 @@ -resource kubernetes_persistent_volume_claim_v1 gitea { +# This volume will have to get scrapped and reconfigured +# as this will be part of a "worksite" namespace for "boring" stuff +resource kubernetes_persistent_volume_claim_v1 kuma { metadata { - name = "gitea" + name = "kuma-data" namespace = var.playground.namespace } spec { - access_modes = [ "ReadWriteMany"] - resources { - requests = { - storage = "10Gi" - } + volume_mode = "Filesystem" + access_modes = [ "ReadWriteOnce"] + resources { + requests = { + storage = "10Gi" } - storage_class_name = "vultr-vfs-storage" + } } } \ No newline at end of file