Compare commits

13 Commits

Author SHA1 Message Date
eea4c61537 Quick A record for testing static website migration
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 17s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 1m25s
2026-01-31 12:29:49 -08:00
ee860c6e1f Common names now line up with hostnames in certificate through the 1 ingress (fire emoji)
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 8s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2026-01-13 23:18:41 -08:00
1c11410c2d More resource re-factors, upgrades and fixes for future work
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
Housekeeping but the wiki got hosed :((
2026-01-07 00:53:11 -08:00
4d71994b85 Upgrading provider versions
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2026-01-07 00:21:12 -08:00
79cb4eb1a6 Cleaning up unused code
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2026-01-07 00:02:11 -08:00
e8817fe093 Adding wiki to DNS and opening it up on the ingress for public read access 2026-01-06 19:12:31 -08:00
97bffd2042 Adding note regarding git.shockrah.xyz & code.shockrah.xyz 2026-01-06 19:06:23 -08:00
37305fd74e Exposing 2222 in gitea service however ingress still needs configuration
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2026-01-06 00:06:47 -08:00
555124bf2f Shortening ingress definition
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2026-01-03 23:07:33 -08:00
e209da949b Adding wiki service with a basic page for now
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2026-01-03 21:43:16 -08:00
caa2eba639 Removing unused helm charts
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2025-12-28 19:30:13 -08:00
982669ed4a Cleaning up the logging namespace and resource as they are not getting value
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 7s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 24s
2025-12-12 14:41:29 -08:00
4446ef813f Fixing auto_scaler issue with root node pool in athens cluster 2025-12-12 14:40:54 -08:00
17 changed files with 119 additions and 340 deletions

View File

@@ -40,6 +40,7 @@ locals {
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "wiki.shockrah.xyz", records = [ var.vke_lb ] },
]
}

View File

@@ -33,3 +33,11 @@ resource "aws_route53_record" "temper-tv-mx" {
"50 fb.mail.gandi.net.",
]
}
resource "aws_route53_record" "temper-tv-test" {
zone_id = aws_route53_zone.temper-tv.id
name = "test.temper.tv"
type = "A"
ttl = 300
records = [ var.vke_lb ]
}

View File

@@ -9,7 +9,7 @@ terraform {
required_providers {
aws = {
source = "hashicorp/aws"
version = "5.98.0"
version = "6.27.0"
}
vultr = {
source = "vultr/vultr"
@@ -17,7 +17,7 @@ terraform {
}
kubernetes = {
source = "hashicorp/kubernetes"
version = "2.37.1"
version = "3.0.1"
}
kubectl = {
source = "gavinbunney/kubectl"

View File

@@ -1,42 +0,0 @@
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
filters: |
[FILTER]
Name kubernetes
Match kube.*Merge_log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
outputs: |
[OUTPUT]
Name openobserve
Match *
URI /api/default/default/_json
Host openobserve.logging.svc.cluster.local
Port 5080
tls On
Format json
Json_date_key _timestamp
Json_date_format iso8601
HTTP_User mail@shockrah.xyz
HTTP_Passwd kXWpwEK4SIxUzjgp

View File

@@ -10,6 +10,7 @@ resource vultr_kubernetes athens {
label = var.cluster.pools["main"].label
min_nodes = var.cluster.pools["main"].min_nodes
max_nodes = var.cluster.pools["main"].max_nodes
auto_scaler = true
}
}

View File

@@ -8,16 +8,3 @@
# port = each.value
# }
resource vultr_firewall_group bastion {
description = "For connections into and out of the bastion host"
}
resource vultr_firewall_rule bastion_inbound {
firewall_group_id = vultr_firewall_group.bastion.id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = 22
}

View File

@@ -1,10 +0,0 @@
resource helm_release "fluent_bit" {
name = "fluent-bit"
repository = "https://fluent.github.io/helm-charts"
chart = "fluent-bit"
namespace = kubernetes_namespace.logging.metadata[0].name
values = [
file("${path.module}/chart/fluent-bit/values.yaml")
]
}

View File

@@ -1,3 +1,6 @@
# NOTE: this is a simple deployment for demo purposes only.
# Currently it does support SSH access and lacks Gitea runners.
# However a fully working setup can be found at: https://git.shockrah.xyz
resource kubernetes_deployment gitea {
metadata {
name = "gitea"
@@ -28,7 +31,7 @@ resource kubernetes_deployment gitea {
name = "gitea-main"
}
port {
container_port = 22
container_port = 2222
name = "gitea-ssh"
}
volume_mount {
@@ -62,5 +65,10 @@ resource kubernetes_service gitea {
port = 3000
name = "http"
}
port {
target_port = "gitea-ssh"
port = 2222
name = "ssh"
}
}
}

View File

@@ -1,4 +1,4 @@
resource kubernetes_deployment health {
resource kubernetes_deployment_v1 health {
metadata {
name = "health"
namespace = var.playground.namespace
@@ -29,7 +29,7 @@ resource kubernetes_deployment health {
}
}
resource kubernetes_service health {
resource kubernetes_service_v1 health {
metadata {
name = "health"
namespace = var.playground.namespace

View File

@@ -1,3 +1,11 @@
locals {
services = {
"code.shockrah.xyz" = kubernetes_service.gitea
"sanity.shockrah.xyz" = kubernetes_service_v1.health
"uptime.shockrah.xyz" = kubernetes_service.kuma
"wiki.shockrah.xyz" = kubernetes_service.otterwiki
}
}
resource kubernetes_ingress_v1 health {
metadata {
name = "health-ingress"
@@ -9,57 +17,27 @@ resource kubernetes_ingress_v1 health {
}
spec {
ingress_class_name = "nginx"
tls {
hosts = [
"sanity.shockrah.xyz",
"uptime.shockrah.xyz",
"code.shockrah.xyz"
]
secret_name = "shockrah"
}
rule {
host = "sanity.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.health.metadata[0].name
port {
number = kubernetes_service.health.spec[0].port[0].port
}
}
}
}
}
}
rule {
host = "uptime.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.kuma.metadata[0].name
port {
number = kubernetes_service.kuma.spec[0].port[0].port
}
}
}
}
dynamic tls {
for_each = local.services
content {
hosts = [tls.key]
secret_name = "${tls.value.metadata[0].name}-secret"
}
}
rule {
host = "code.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.gitea.metadata[0].name
dynamic "rule" {
for_each = local.services
content {
host = "${rule.key}"
http {
path {
path = "/"
backend {
service {
name = rule.value.metadata[0].name
port {
number = kubernetes_service.gitea.spec[0].port[0].port
number = rule.value.spec[0].port[0].port
}
}
}
}
}

View File

@@ -7,20 +7,4 @@ resource kubernetes_namespace playground {
}
}
resource kubernetes_namespace openobserve {
metadata {
annotations = {
names = "openobserve"
}
name = "openobserve"
}
}
resource kubernetes_namespace logging {
metadata {
annotations = {
names = "logging"
}
name = "logging"
}
}

View File

@@ -1,73 +0,0 @@
# This yaml file is a sample file used to setup open observe bu tdoesn't contain any relevant data
apiVersion: v1
kind: Service
metadata:
name: openobserve
namespace: openobserve
spec:
clusterIP: None
selector:
app: openobserve
ports:
- name: http
port: 5080
targetPort: 5080
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: openobserve
namespace: openobserve
labels:
name: openobserve
spec:
serviceName: openobserve
replicas: 1
selector:
matchLabels:
name: openobserve
app: openobserve
template:
metadata:
labels:
name: openobserve
app: openobserve
spec:
securityContext:
fsGroup: 2000
runAsUser: 10000
runAsGroup: 3000
runAsNonRoot: true
containers:
- name: openobserve
image: o2cr.ai/openobserve/openobserve-enterprise:v0.15.3
env:
- name: ZO_ROOT_USER_EMAIL
value: root@example.com
- name: ZO_ROOT_USER_PASSWORD
value: Complexpass#123
- name: ZO_DATA_DIR
value: /data
imagePullPolicy: Always
resources:
limits:
cpu: 4096m
memory: 2048Mi
requests:
cpu: 256m
memory: 50Mi
ports:
- containerPort: 5080
name: http
volumeMounts:
- name: data
mountPath: /data
volumeClaimTemplates:
- metadata:
name: data
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi

View File

@@ -1,105 +0,0 @@
resource random_password root_password {
length = 24
special = true
}
output openobserve_root_password {
value = random_password.root_password.result
sensitive = true
}
resource kubernetes_stateful_set openobserve {
metadata {
name = "openobserve"
namespace = "logging"
labels = {
"name" = "openobserve"
}
}
spec {
service_name = "openobserve"
replicas = 1
selector {
match_labels = {
"name" = "openobserve"
"app" = "openobserve"
}
}
template {
metadata {
labels = {
"name" = "openobserve"
"app" = "openobserve"
}
}
spec {
security_context {
fs_group = 2000
run_as_user = 10000
run_as_group = 3000
run_as_non_root = true
}
container {
name = "openobserve"
image = "o2cr.ai/openobserve/openobserve-enterprise:v0.15.3"
env {
name = "ZO_ROOT_USER_EMAIL"
value = "dev@shockrah.xyz"
}
env {
name = "ZO_ROOT_USER_PASSWORD"
value = random_password.root_password.result
}
env {
name = "ZO_DATA_DIR"
value = "/data"
}
image_pull_policy = "Always"
resources {
limits = {
"cpu" = "1000m"
"memory" = "2048Mi"
}
requests = {
"cpu" = "256m"
"memory" = "50Mi"
}
}
port {
container_port = 5080
name = "http"
}
volume_mount {
name = "openobserve-data"
mount_path = "/data"
}
}
volume {
name = "openobserve-data"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.openobserve.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service openobserve {
metadata {
name = "openobserve"
namespace = "logging"
}
spec {
selector = {
"app" = "openserve"
}
port {
name = "http"
port = 5080
target_port = 5080
}
}
}

View File

@@ -40,22 +40,10 @@ variable cluster {
variable playground {
type = object({
namespace = string
health = object({
dns = string
})
# TODO: Re-incorporate this var for templating later
tls = object({
email = string
})
})
}
variable bastion {
type = object({
plan = string
os = string
label = string
})
}

View File

@@ -1,11 +1,11 @@
cluster = {
region = "lax"
label = "athens-cluster"
version = "v1.33.0+3"
version = "v1.34.1+2"
pools = {
main = {
node_quantity = 1
plan = "vc2-2c-4gb"
plan = "vc2-1c-2gb"
label = "main"
min_nodes = 1
max_nodes = 2
@@ -18,17 +18,7 @@ playground = {
namespace = "playground"
# Sanity check service that is used purely for the sake of ensuring
# things are ( at a basic level ) functional
health = {
dns = "health"
}
tls = {
email = "dev@shockrah.xyz"
}
}
bastion = {
plan = "vc2-1c-2gb"
label = "bastion"
os = "1743"
}

View File

@@ -31,10 +31,11 @@ resource kubernetes_persistent_volume_claim_v1 gitea {
}
}
resource kubernetes_persistent_volume_claim_v1 openobserve {
resource kubernetes_persistent_volume_claim_v1 otterwiki {
metadata {
name = "openobserve-data"
namespace = "logging"
name = "otterwiki-data"
namespace = var.playground.namespace
}
spec {
volume_mode = "Filesystem"

View File

@@ -0,0 +1,63 @@
resource kubernetes_deployment otterwiki {
metadata {
name = "otterwiki"
namespace = var.playground.namespace
labels = {
"app" = "otterwiki"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app" = "otterwiki"
}
}
template {
metadata {
labels = {
"app" = "otterwiki"
}
}
spec {
container {
name = "otterwiki"
image = "redimp/otterwiki:2"
port {
container_port = 8080
name = "otterwiki-main"
}
volume_mount {
name = "otterwiki-data"
mount_path = "/var/lib/otterwiki"
}
}
volume {
name = "otterwiki-data"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.otterwiki.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service otterwiki {
metadata {
name = "otterwiki"
namespace = var.playground.namespace
}
spec {
selector = {
"app" = "otterwiki"
}
port {
port = 80
target_port = "otterwiki-main"
protocol = "TCP"
name = "http"
}
}
}