Compare commits

11 Commits

Author SHA1 Message Date
eea4c61537 Quick A record for testing static website migration
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 17s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 1m25s
2026-01-31 12:29:49 -08:00
ee860c6e1f Common names now line up with hostnames in certificate through the 1 ingress (fire emoji)
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 8s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
2026-01-13 23:18:41 -08:00
1c11410c2d More resource re-factors, upgrades and fixes for future work
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
Housekeeping but the wiki got hosed :((
2026-01-07 00:53:11 -08:00
4d71994b85 Upgrading provider versions
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2026-01-07 00:21:12 -08:00
79cb4eb1a6 Cleaning up unused code
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
2026-01-07 00:02:11 -08:00
e8817fe093 Adding wiki to DNS and opening it up on the ingress for public read access 2026-01-06 19:12:31 -08:00
97bffd2042 Adding note regarding git.shockrah.xyz & code.shockrah.xyz 2026-01-06 19:06:23 -08:00
37305fd74e Exposing 2222 in gitea service however ingress still needs configuration
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2026-01-06 00:06:47 -08:00
555124bf2f Shortening ingress definition
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 5s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 19s
2026-01-03 23:07:33 -08:00
e209da949b Adding wiki service with a basic page for now
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2026-01-03 21:43:16 -08:00
caa2eba639 Removing unused helm charts
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 6s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 20s
2025-12-28 19:30:13 -08:00
14 changed files with 130 additions and 522 deletions

View File

@@ -40,6 +40,7 @@ locals {
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] }, { name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] }, { name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] }, { name = "code.shockrah.xyz", records = [ var.vke_lb ] },
{ name = "wiki.shockrah.xyz", records = [ var.vke_lb ] },
] ]
} }

View File

@@ -33,3 +33,11 @@ resource "aws_route53_record" "temper-tv-mx" {
"50 fb.mail.gandi.net.", "50 fb.mail.gandi.net.",
] ]
} }
resource "aws_route53_record" "temper-tv-test" {
zone_id = aws_route53_zone.temper-tv.id
name = "test.temper.tv"
type = "A"
ttl = 300
records = [ var.vke_lb ]
}

View File

@@ -9,7 +9,7 @@ terraform {
required_providers { required_providers {
aws = { aws = {
source = "hashicorp/aws" source = "hashicorp/aws"
version = "5.98.0" version = "6.27.0"
} }
vultr = { vultr = {
source = "vultr/vultr" source = "vultr/vultr"
@@ -17,7 +17,7 @@ terraform {
} }
kubernetes = { kubernetes = {
source = "hashicorp/kubernetes" source = "hashicorp/kubernetes"
version = "2.37.1" version = "3.0.1"
} }
kubectl = { kubectl = {
source = "gavinbunney/kubectl" source = "gavinbunney/kubectl"

View File

@@ -1,42 +0,0 @@
config:
service: |
[SERVICE]
Daemon Off
Flush {{ .Values.flush }}
Log_Level {{ .Values.logLevel }}
Parsers_File /fluent-bit/etc/parsers.conf
Parsers_File /fluent-bit/etc/conf/custom_parsers.conf
inputs: |
[INPUT]
Name tail
Path /var/log/containers/*.log
multiline.parser docker, cri
Tag kube.*
Mem_Buf_Limit 5MB
Skip_Long_Lines On
[INPUT]
Name systemd
Tag host.*
Systemd_Filter _SYSTEMD_UNIT=kubelet.service
Read_From_Tail On
filters: |
[FILTER]
Name kubernetes
Match kube.*Merge_log On
Keep_Log Off
K8S-Logging.Parser On
K8S-Logging.Exclude On
outputs: |
[OUTPUT]
Name openobserve
Match *
URI /api/default/default/_json
Host openobserve.logging.svc.cluster.local
Port 5080
tls On
Format json
Json_date_key _timestamp
Json_date_format iso8601
HTTP_User mail@shockrah.xyz
HTTP_Passwd kXWpwEK4SIxUzjgp

View File

@@ -1,382 +0,0 @@
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# General configuration shared across resources
app:
# Mode determines if chart should deploy a full Dashboard with all containers or just the API.
# - dashboard - deploys all the containers
# - api - deploys just the API
mode: 'dashboard'
image:
pullPolicy: IfNotPresent
pullSecrets: []
scheduling:
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
nodeSelector: {}
security:
# Allow overriding csrfKey used by API/Auth containers.
# It has to be base64 encoded random 256 bytes string.
# If empty, it will be autogenerated.
csrfKey: ~
# SecurityContext to be added to pods
# To disable set the following configuration to null:
# securityContext: null
securityContext:
runAsNonRoot: true
seccompProfile:
type: RuntimeDefault
# ContainerSecurityContext to be added to containers
# To disable set the following configuration to null:
# containerSecurityContext: null
containerSecurityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
capabilities:
drop: ["ALL"]
# Pod Disruption Budget configuration
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
podDisruptionBudget:
enabled: false
minAvailable: 0
maxUnavailable: 0
networkPolicy:
enabled: false
ingressDenyAll: false
# Raw network policy spec that overrides predefined spec
# Example:
# spec:
# egress:
# - ports:
# - port: 123
spec: {}
# Common labels & annotations shared across all deployed resources
labels: {}
annotations: {}
# Common priority class used for all deployed resources
priorityClassName: null
settings:
## Global dashboard settings
global:
# # Cluster name that appears in the browser window title if it is set
clusterName: "Athens Cluster"
# # Max number of items that can be displayed on each list page
# itemsPerPage: 10
# # Max number of labels that are displayed by default on most views.
# labelsLimit: 3
# # Number of seconds between every auto-refresh of logs
# logsAutoRefreshTimeInterval: 5
# # Number of seconds between every auto-refresh of every resource. Set 0 to disable
# resourceAutoRefreshTimeInterval: 10
# # Hide all access denied warnings in the notification panel
# disableAccessDeniedNotifications: false
# # Hide all namespaces option in namespace selection dropdown to avoid accidental selection in large clusters thus preventing OOM errors
# hideAllNamespaces: false
# # Namespace that should be selected by default after logging in.
defaultNamespace: playground
# # Enable/Disable namespace isolation mode. When enabled users without cluster-wide permissions will
# # only see resources within their own namespaces.
namespaceIsolation: false
# # List of namespaces that should be presented to user without namespace list privileges.
# namespaceFallbackList:
# - default
## Pinned resources that will be displayed in dashboard's menu
pinnedResources: []
# - kind: customresourcedefinition
# # Fully qualified name of a CRD
# name: prometheus.monitoring.coreos.com
# # Display name
# displayName: Prometheus
# # Is this CRD namespaced?
# namespaced: true
ingress:
enabled: false
hosts:
# Keep 'localhost' host only if you want to access Dashboard using 'kubectl port-forward ...' on:
# https://localhost:8443
- localhost
# - kubernetes.dashboard.domain.com
ingressClassName: internal-nginx
# Use only if your ingress controllers support default ingress classes.
# If set to true ingressClassName will be ignored and not added to the Ingress resources.
# It should fall back to using IngressClass marked as the default.
useDefaultIngressClass: false
# This will append our Ingress with annotations required by our default configuration.
# nginx.ingress.kubernetes.io/backend-protocol: "HTTPS"
# nginx.ingress.kubernetes.io/ssl-passthrough: "true"
# nginx.ingress.kubernetes.io/ssl-redirect: "true"
useDefaultAnnotations: true
pathType: ImplementationSpecific
# If path is not the default (/), rewrite-target annotation will be added to the Ingress.
# It allows serving Kubernetes Dashboard on a sub-path. Make sure that the configured path
# does not conflict with gateway route configuration.
path: /
issuer:
name: selfsigned
# Scope determines what kind of issuer annotation will be used on ingress resource
# - default - adds 'cert-manager.io/issuer'
# - cluster - adds 'cert-manager.io/cluster-issuer'
# - disabled - disables cert-manager annotations
scope: default
tls:
enabled: true
# If provided it will override autogenerated secret name
secretName: ""
labels: {}
annotations: {}
# Use the following toleration if Dashboard can be deployed on a tainted control-plane nodes
# - key: node-role.kubernetes.io/control-plane
# effect: NoSchedule
tolerations: []
affinity: {}
auth:
role: auth
image:
repository: docker.io/kubernetesui/dashboard-auth
tag: 1.4.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: auth
containerPort: 8000
protocol: TCP
args: []
env: []
volumeMounts:
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Auth related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# API deployment configuration
api:
role: api
image:
repository: docker.io/kubernetesui/dashboard-api
tag: 1.14.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: api
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store exec logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for API related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
# WEB UI deployment configuration
web:
role: web
image:
repository: docker.io/kubernetesui/dashboard-web
tag: 1.7.0
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- name: web
containerPort: 8000
protocol: TCP
# Additional container arguments
# Full list of arguments: https://github.com/kubernetes/dashboard/blob/master/docs/common/arguments.md
# args:
# - --system-banner="Welcome to the Kubernetes Dashboard"
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
# Create on-disk volume to store exec logs (required)
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for WEB UI related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
### Metrics Scraper
### Container to scrape, store, and retrieve a window of time from the Metrics Server.
### refs: https://github.com/kubernetes/dashboard/tree/master/modules/metrics-scraper
metricsScraper:
enabled: true
role: metrics-scraper
image:
repository: docker.io/kubernetesui/dashboard-metrics-scraper
tag: 1.2.2
scaling:
replicas: 1
revisionHistoryLimit: 10
service:
type: ClusterIP
extraSpec: ~
containers:
ports:
- containerPort: 8000
protocol: TCP
args: []
# Additional container environment variables
# env:
# - name: SOME_VAR
# value: 'some value'
env: []
# Additional volume mounts
# - mountPath: /kubeconfig
# name: dashboard-kubeconfig
# readOnly: true
volumeMounts:
# Create volume mount to store logs (required)
- mountPath: /tmp
name: tmp-volume
# TODO: Validate configuration
resources:
requests:
cpu: 100m
memory: 200Mi
limits:
cpu: 250m
memory: 400Mi
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
automountServiceAccountToken: true
# Additional volumes
# - name: dashboard-kubeconfig
# secret:
# defaultMode: 420
# secretName: dashboard-kubeconfig
volumes:
- name: tmp-volume
emptyDir: {}
nodeSelector: {}
# Labels & annotations for Metrics Scraper related resources
labels: {}
annotations: {}
serviceLabels: {}
serviceAnnotations: {}
## Optional Metrics Server sub-chart configuration
## Enable this if you don't already have metrics-server enabled on your cluster and
## want to use it with dashboard metrics-scraper
## refs:
## - https://github.com/kubernetes-sigs/metrics-server
## - https://github.com/kubernetes-sigs/metrics-server/tree/master/charts/metrics-server
metrics-server:
enabled: false
args:
- --kubelet-preferred-address-types=InternalIP
- --kubelet-insecure-tls

View File

@@ -8,16 +8,3 @@
# port = each.value # port = each.value
# } # }
resource vultr_firewall_group bastion {
description = "For connections into and out of the bastion host"
}
resource vultr_firewall_rule bastion_inbound {
firewall_group_id = vultr_firewall_group.bastion.id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = 22
}

View File

@@ -1,3 +1,6 @@
# NOTE: this is a simple deployment for demo purposes only.
# Currently it does support SSH access and lacks Gitea runners.
# However a fully working setup can be found at: https://git.shockrah.xyz
resource kubernetes_deployment gitea { resource kubernetes_deployment gitea {
metadata { metadata {
name = "gitea" name = "gitea"
@@ -28,7 +31,7 @@ resource kubernetes_deployment gitea {
name = "gitea-main" name = "gitea-main"
} }
port { port {
container_port = 22 container_port = 2222
name = "gitea-ssh" name = "gitea-ssh"
} }
volume_mount { volume_mount {
@@ -62,5 +65,10 @@ resource kubernetes_service gitea {
port = 3000 port = 3000
name = "http" name = "http"
} }
port {
target_port = "gitea-ssh"
port = 2222
name = "ssh"
}
} }
} }

View File

@@ -1,4 +1,4 @@
resource kubernetes_deployment health { resource kubernetes_deployment_v1 health {
metadata { metadata {
name = "health" name = "health"
namespace = var.playground.namespace namespace = var.playground.namespace
@@ -29,7 +29,7 @@ resource kubernetes_deployment health {
} }
} }
resource kubernetes_service health { resource kubernetes_service_v1 health {
metadata { metadata {
name = "health" name = "health"
namespace = var.playground.namespace namespace = var.playground.namespace

View File

@@ -1,3 +1,11 @@
locals {
services = {
"code.shockrah.xyz" = kubernetes_service.gitea
"sanity.shockrah.xyz" = kubernetes_service_v1.health
"uptime.shockrah.xyz" = kubernetes_service.kuma
"wiki.shockrah.xyz" = kubernetes_service.otterwiki
}
}
resource kubernetes_ingress_v1 health { resource kubernetes_ingress_v1 health {
metadata { metadata {
name = "health-ingress" name = "health-ingress"
@@ -9,61 +17,31 @@ resource kubernetes_ingress_v1 health {
} }
spec { spec {
ingress_class_name = "nginx" ingress_class_name = "nginx"
tls { dynamic tls {
hosts = [ for_each = local.services
"sanity.shockrah.xyz", content {
"uptime.shockrah.xyz", hosts = [tls.key]
"code.shockrah.xyz" secret_name = "${tls.value.metadata[0].name}-secret"
]
secret_name = "shockrah"
} }
rule { }
host = "sanity.shockrah.xyz" dynamic "rule" {
for_each = local.services
content {
host = "${rule.key}"
http { http {
path { path {
path = "/" path = "/"
backend { backend {
service { service {
name = kubernetes_service.health.metadata[0].name name = rule.value.metadata[0].name
port { port {
number = kubernetes_service.health.spec[0].port[0].port number = rule.value.spec[0].port[0].port
} }
} }
} }
} }
} }
} }
rule {
host = "uptime.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.kuma.metadata[0].name
port {
number = kubernetes_service.kuma.spec[0].port[0].port
}
}
}
}
}
}
rule {
host = "code.shockrah.xyz"
http {
path {
path = "/"
backend {
service {
name = kubernetes_service.gitea.metadata[0].name
port {
number = kubernetes_service.gitea.spec[0].port[0].port
}
}
}
}
}
} }
} }
} }

View File

@@ -7,12 +7,4 @@ resource kubernetes_namespace playground {
} }
} }
resource kubernetes_namespace openobserve {
metadata {
annotations = {
names = "openobserve"
}
name = "openobserve"
}
}

View File

@@ -40,22 +40,10 @@ variable cluster {
variable playground { variable playground {
type = object({ type = object({
namespace = string namespace = string
health = object({ # TODO: Re-incorporate this var for templating later
dns = string
})
tls = object({ tls = object({
email = string email = string
}) })
}) })
} }
variable bastion {
type = object({
plan = string
os = string
label = string
})
}

View File

@@ -1,11 +1,11 @@
cluster = { cluster = {
region = "lax" region = "lax"
label = "athens-cluster" label = "athens-cluster"
version = "v1.33.0+3" version = "v1.34.1+2"
pools = { pools = {
main = { main = {
node_quantity = 1 node_quantity = 1
plan = "vc2-2c-4gb" plan = "vc2-1c-2gb"
label = "main" label = "main"
min_nodes = 1 min_nodes = 1
max_nodes = 2 max_nodes = 2
@@ -18,17 +18,7 @@ playground = {
namespace = "playground" namespace = "playground"
# Sanity check service that is used purely for the sake of ensuring # Sanity check service that is used purely for the sake of ensuring
# things are ( at a basic level ) functional # things are ( at a basic level ) functional
health = {
dns = "health"
}
tls = { tls = {
email = "dev@shockrah.xyz" email = "dev@shockrah.xyz"
} }
} }
bastion = {
plan = "vc2-1c-2gb"
label = "bastion"
os = "1743"
}

View File

@@ -30,3 +30,20 @@ resource kubernetes_persistent_volume_claim_v1 gitea {
} }
} }
} }
resource kubernetes_persistent_volume_claim_v1 otterwiki {
metadata {
name = "otterwiki-data"
namespace = var.playground.namespace
}
spec {
volume_mode = "Filesystem"
access_modes = [ "ReadWriteOnce"]
resources {
requests = {
storage = "10Gi"
}
}
}
}

View File

@@ -0,0 +1,63 @@
resource kubernetes_deployment otterwiki {
metadata {
name = "otterwiki"
namespace = var.playground.namespace
labels = {
"app" = "otterwiki"
}
}
spec {
replicas = 1
selector {
match_labels = {
"app" = "otterwiki"
}
}
template {
metadata {
labels = {
"app" = "otterwiki"
}
}
spec {
container {
name = "otterwiki"
image = "redimp/otterwiki:2"
port {
container_port = 8080
name = "otterwiki-main"
}
volume_mount {
name = "otterwiki-data"
mount_path = "/var/lib/otterwiki"
}
}
volume {
name = "otterwiki-data"
persistent_volume_claim {
claim_name = kubernetes_persistent_volume_claim_v1.otterwiki.metadata[0].name
}
}
}
}
}
}
resource kubernetes_service otterwiki {
metadata {
name = "otterwiki"
namespace = var.playground.namespace
}
spec {
selector = {
"app" = "otterwiki"
}
port {
port = 80
target_port = "otterwiki-main"
protocol = "TCP"
name = "http"
}
}
}