Compare commits
89 Commits
7f36ff272e
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| eea4c61537 | |||
| ee860c6e1f | |||
| 1c11410c2d | |||
| 4d71994b85 | |||
| 79cb4eb1a6 | |||
| e8817fe093 | |||
| 97bffd2042 | |||
| 37305fd74e | |||
| 555124bf2f | |||
| e209da949b | |||
| caa2eba639 | |||
| 982669ed4a | |||
| 4446ef813f | |||
| 9dc2f1d769 | |||
| 01b7b4ced8 | |||
| 29cdfcb695 | |||
| bbbc9ed477 | |||
| d64c5526e6 | |||
| 469b3d08ce | |||
| 7f5b3205d0 | |||
| 67ff5ce729 | |||
| 6aadb47c61 | |||
| 0624161f53 | |||
| c6b2a062e9 | |||
| 718647f617 | |||
| cfe631eba7 | |||
| 29e049cf7f | |||
| 990d29ae6c | |||
| 859201109e | |||
| de3bff8f14 | |||
| 54a6ddbe5d | |||
| 82333fe6ce | |||
| cddf67de2f | |||
| affa03bed5 | |||
| 34e1f6afdf | |||
| fd9bd290af | |||
| d992556032 | |||
| fce73d06e0 | |||
| 7f5d81f0ee | |||
| 410790765f | |||
| 9454e03f53 | |||
| e6ed85920d | |||
| 2775d354f8 | |||
| 1f6f013634 | |||
| 778b995980 | |||
| fc897bdd0e | |||
| 8f06ef269a | |||
| f15da0c88d | |||
| c602773657 | |||
| cd908d9c14 | |||
| 56e9c0ae4a | |||
| 30bc6ee2fa | |||
| cd9822bb85 | |||
| 0efe6ca642 | |||
| 2ef4b00097 | |||
| e183055282 | |||
| 514909fc8d | |||
| 5b4a440cb4 | |||
| 826d334c3c | |||
| 77590b067a | |||
| 850570faf5 | |||
| 12831fbaf3 | |||
| a6123dd7e2 | |||
| 9c2e0a84d7 | |||
| 1281ea8857 | |||
| ee2d502ca6 | |||
| 88059a5e0f | |||
| 4024809cc4 | |||
| 029a3c80d5 | |||
| 75b7f2fa3d | |||
| 8ef606153f | |||
| be34327791 | |||
| c6ef6ae4d2 | |||
| eb7871584b | |||
| 4a0a12242a | |||
| 053db8793b | |||
| 24fcbc957a | |||
| 9675fbacef | |||
| 3f0c8a865d | |||
| 3f2e6d86f6 | |||
| 08560c945b | |||
| 506a9b32d9 | |||
| d4ece741e0 | |||
| 311a592d6e | |||
| 153ea8e982 | |||
| 943e9651da | |||
| 669c414288 | |||
| e3afed5e4f | |||
| e337989a59 |
@@ -10,6 +10,6 @@ jobs:
|
||||
steps:
|
||||
- name: Checkout repo content
|
||||
uses: actions/checkout@v4
|
||||
- run: ansible-lint
|
||||
- run: ansible-lint -c linter.yaml
|
||||
working-directory: ansible/
|
||||
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@ docker/beta/shockrah.xyz/
|
||||
docker/beta/resume.shockrah.xyz/
|
||||
k8s/config.yaml
|
||||
infra/**/tfplan
|
||||
.ansible/
|
||||
|
||||
@@ -2,14 +2,15 @@
|
||||
# The rest of the role stuff is meant to be ran as the admin user that
|
||||
# this playbook creates for us
|
||||
---
|
||||
- hosts: nigel.local
|
||||
- name: Setup local admin user with a fresh ubuntu host
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
vars:
|
||||
admin:
|
||||
username: nigel
|
||||
tasks:
|
||||
- name: Copy the nigel admin key
|
||||
ansible.builtin.authorized_key:
|
||||
ansible.posix.authorized_key:
|
||||
user: "{{ admin.username }}"
|
||||
state: present
|
||||
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
|
||||
|
||||
9
ansible/nomad.yaml
Normal file
9
ansible/nomad.yaml
Normal file
@@ -0,0 +1,9 @@
|
||||
---
|
||||
- name: Setup all the responsibilities of the nomad server
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Apply the nomad role
|
||||
ansible.builtin.include_role:
|
||||
name: nomad
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
---
|
||||
- hosts: nigel.local
|
||||
- name: Setup bare metal requirements
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Setup basic role on nigel
|
||||
tags:
|
||||
- setup
|
||||
- nomad
|
||||
- proxy
|
||||
- name: Apply the base role to the nuc
|
||||
ansible.builtin.include_role:
|
||||
name: local-server-head
|
||||
name: base
|
||||
- name: Apply the k3s base role
|
||||
ansible.builtin.include_role:
|
||||
name: k3s
|
||||
- name: Apply the proxy role
|
||||
ansible.builtin.include_role:
|
||||
name: proxy
|
||||
|
||||
8
ansible/proxy.yaml
Normal file
8
ansible/proxy.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
---
|
||||
- name: Setup host as a reverse proxy
|
||||
hosts: nigel.local
|
||||
remote_user: nigel
|
||||
tasks:
|
||||
- name: Apply reverse proxy role
|
||||
ansible.builtin.include_role:
|
||||
name: proxy
|
||||
8
ansible/roles/base/tasks/k3s.yaml
Normal file
8
ansible/roles/base/tasks/k3s.yaml
Normal file
@@ -0,0 +1,8 @@
|
||||
- name: Download the setup script
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /tmp/k3s.sh
|
||||
mode: "0644"
|
||||
- name: Run installation script
|
||||
ansible.builtin.command:
|
||||
cmd: bash /tmp/k3s.sh
|
||||
@@ -1,3 +1,11 @@
|
||||
- name: Ensure nigel can use sudo without password
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "nigel ALL=(ALL) NOPASSWD:ALL"
|
||||
- name: Ensure docker components are installed
|
||||
tags:
|
||||
- setup
|
||||
@@ -7,15 +15,7 @@
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
- name: Ensure nigel can use sudo without password
|
||||
become: true
|
||||
tags:
|
||||
- setup
|
||||
ansible.builtin.lineinfile:
|
||||
path: /etc/sudoers
|
||||
state: present
|
||||
line: "nigel ALL=(ALL) NOPASSWD:ALL"
|
||||
- name: Run through nomad installation steps
|
||||
- name: Run through nomad removal steps
|
||||
tags: nomad
|
||||
ansible.builtin.include_tasks:
|
||||
file: nomad.yaml
|
||||
@@ -23,11 +23,3 @@
|
||||
become: true
|
||||
tags:
|
||||
- nomad
|
||||
- name: Setup the reverse proxy outside of nomad
|
||||
tags: proxy
|
||||
ansible.builtin.include_tasks:
|
||||
file: reverse_proxy.yaml
|
||||
apply:
|
||||
become: true
|
||||
tags:
|
||||
- proxy
|
||||
11
ansible/roles/k3s/tasks/main.yaml
Normal file
11
ansible/roles/k3s/tasks/main.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
- name: Download the installation script
|
||||
ansible.builtin.get_url:
|
||||
url: https://get.k3s.io
|
||||
dest: /tmp
|
||||
register: install_script
|
||||
- name: Run installation script
|
||||
become: true
|
||||
environment:
|
||||
INSTALL_K3S_EXEC: server
|
||||
ansible.builtin.command:
|
||||
cmd: sh {{ install_script.dest }}
|
||||
0
ansible/roles/k3s/vars/main.yaml
Normal file
0
ansible/roles/k3s/vars/main.yaml
Normal file
@@ -1,8 +0,0 @@
|
||||
server {
|
||||
server_name nomad.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://localhost:4646;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +0,0 @@
|
||||
server {
|
||||
server_name sanity.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://localhost:8000;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,54 +0,0 @@
|
||||
- name: Ensure prerequisite packages are installed
|
||||
ansible.builtin.apt:
|
||||
pkg:
|
||||
- wget
|
||||
- gpg
|
||||
- coreutils
|
||||
update_cache: true
|
||||
- name: Hashicorp repo setup
|
||||
vars:
|
||||
keypath: /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||
gpgpath: /tmp/hashicorp.gpg
|
||||
block:
|
||||
- name: Download the hashicorp GPG Key
|
||||
ansible.builtin.get_url:
|
||||
url: https://apt.releases.hashicorp.com/gpg
|
||||
dest: "{{ gpgpath }}"
|
||||
- name: Dearmor the hashicorp gpg key
|
||||
ansible.builtin.command:
|
||||
cmd: "gpg --dearmor --yes -o {{ keypath }} {{ gpgpath }}"
|
||||
register: gpg
|
||||
changed_when: gpg.rc == 0
|
||||
- name: Add the hashicorp linux repo
|
||||
vars:
|
||||
keyfile: "{{ keypath }}"
|
||||
ansible.builtin.template:
|
||||
src: hashicorp.list
|
||||
dest: /etc/apt/sources.list.d/hashicorp.list
|
||||
mode: "0644"
|
||||
- name: Update apt repo cache
|
||||
ansible.builtin.apt:
|
||||
update_cache: true
|
||||
- name: Install consul
|
||||
ansible.builtin.apt:
|
||||
name: consul
|
||||
- name: Install nomad package
|
||||
ansible.builtin.apt:
|
||||
pkg: nomad
|
||||
- name: Copy in the consul configuration
|
||||
vars:
|
||||
ip: "{{ ansible_default_ipv4['address'] }}"
|
||||
ansible.builtin.template:
|
||||
src: consul.hcl
|
||||
dest: /etc/consul.d/consul.hcl
|
||||
mode: "0644"
|
||||
- name: Start consul
|
||||
ansible.builtin.systemd_service:
|
||||
name: nomad
|
||||
state: started
|
||||
enabled: true
|
||||
- name: Make sure the nomad service is available
|
||||
ansible.builtin.systemd_service:
|
||||
name: nomad
|
||||
state: started
|
||||
enabled: true
|
||||
@@ -1,29 +0,0 @@
|
||||
- name: Keep /etc/hosts up to date
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/hosts
|
||||
src: host-file
|
||||
mode: "0644"
|
||||
- name: Ensure nginx is setup as latest
|
||||
ansible.builtin.apt:
|
||||
name: nginx
|
||||
- name: Copy the nomad.conf to available configurations
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop:
|
||||
- nomad.conf
|
||||
- sanity.conf
|
||||
- name: Link the nomad.conf to sites-enabled
|
||||
ansible.builtin.file:
|
||||
path: "/etc/nginx/sites-enabled/{{ item }}"
|
||||
state: link
|
||||
src: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop:
|
||||
- nomad.conf
|
||||
- sanity.conf
|
||||
- name: Restart nginx
|
||||
ansible.builtin.systemd_service:
|
||||
name: nginx
|
||||
state: restarted
|
||||
24
ansible/roles/nomad/files/nomad.hcl
Normal file
24
ansible/roles/nomad/files/nomad.hcl
Normal file
@@ -0,0 +1,24 @@
|
||||
data_dir = "/opt/nomad/data"
|
||||
bind_addr = "0.0.0.0"
|
||||
|
||||
server {
|
||||
enabled = true
|
||||
bootstrap_expect = 1
|
||||
}
|
||||
|
||||
|
||||
client {
|
||||
enabled = true
|
||||
servers = ["127.0.0.1"]
|
||||
}
|
||||
|
||||
host_volume "registry" {
|
||||
path = "/opt/volumes/registry"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
host_volume "nfs" {
|
||||
path = "/opt/volumes/nfs"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
18
ansible/roles/nomad/tasks/main.yaml
Normal file
18
ansible/roles/nomad/tasks/main.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
- name: Nomad server configuration
|
||||
become: true
|
||||
block:
|
||||
- name: Ensure the root data directory is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.root }}"
|
||||
state: absent
|
||||
mode: "0755"
|
||||
- name: Ensure registry volume is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.registry }}"
|
||||
state: absent
|
||||
mode: "0755"
|
||||
- name: Ensure the MinIO diretory is present
|
||||
ansible.builtin.file:
|
||||
path: "{{ nomad.volumes.nfs }}"
|
||||
state: absent
|
||||
mode: "0755"
|
||||
5
ansible/roles/nomad/vars/main.yaml
Normal file
5
ansible/roles/nomad/vars/main.yaml
Normal file
@@ -0,0 +1,5 @@
|
||||
nomad:
|
||||
volumes:
|
||||
root: /opt/volumes
|
||||
registry: /opt/volumes/ncr
|
||||
nfs: /opt/volumes/nfs
|
||||
@@ -5,6 +5,7 @@
|
||||
127.0.1.1 nigel.local
|
||||
127.0.1.1 nomad.nigel.local
|
||||
127.0.1.1 sanity.nigel.local
|
||||
127.0.1.1 ncr.nigel.local
|
||||
|
||||
# The following lines are desirable for IPv6 capable hosts
|
||||
::1 ip6-localhost ip6-loopback
|
||||
6
ansible/roles/proxy/files/ncr.conf
Normal file
6
ansible/roles/proxy/files/ncr.conf
Normal file
@@ -0,0 +1,6 @@
|
||||
server {
|
||||
server_name ncr.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://localhost:5000;
|
||||
}
|
||||
}
|
||||
25
ansible/roles/proxy/files/nomad.conf
Normal file
25
ansible/roles/proxy/files/nomad.conf
Normal file
@@ -0,0 +1,25 @@
|
||||
server {
|
||||
server_name nomad.nigel.local;
|
||||
location / {
|
||||
proxy_pass http://nomad-ws;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
|
||||
proxy_read_timeout 319s;
|
||||
|
||||
# This is for log streaming requests
|
||||
proxy_buffering off;
|
||||
|
||||
# Upgrade and Connection headers for upgrading to websockets
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "upgrade";
|
||||
|
||||
|
||||
proxy_set_header Origin "${scheme}://${proxy_host}";
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
upstream nomad-ws {
|
||||
ip_hash;
|
||||
server nomad.nigel.local:4646;
|
||||
}
|
||||
28
ansible/roles/proxy/tasks/main.yaml
Normal file
28
ansible/roles/proxy/tasks/main.yaml
Normal file
@@ -0,0 +1,28 @@
|
||||
- name: Reverse proxy role configuration
|
||||
become: true
|
||||
block:
|
||||
- name: Ensure /etc/hosts are up to date
|
||||
ansible.builtin.copy:
|
||||
dest: /etc/hosts
|
||||
src: host-file
|
||||
mode: "0644"
|
||||
- name: Ensure nginx is setup as latest
|
||||
ansible.builtin.apt:
|
||||
name: nginx
|
||||
- name: Copy the nomad.conf to available configurations
|
||||
ansible.builtin.copy:
|
||||
src: "{{ item }}"
|
||||
dest: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop: "{{ proxy_nginx_configs }}"
|
||||
- name: Link the nomad.conf to sites-enabled
|
||||
ansible.builtin.file:
|
||||
path: "/etc/nginx/sites-enabled/{{ item }}"
|
||||
state: link
|
||||
src: "/etc/nginx/sites-available/{{ item }}"
|
||||
mode: "0644"
|
||||
loop: "{{ proxy_nginx_configs }}"
|
||||
- name: Restart nginx
|
||||
ansible.builtin.systemd_service:
|
||||
name: nginx
|
||||
state: restarted
|
||||
3
ansible/roles/proxy/vars/main.yaml
Normal file
3
ansible/roles/proxy/vars/main.yaml
Normal file
@@ -0,0 +1,3 @@
|
||||
proxy_nginx_configs:
|
||||
- nomad.conf
|
||||
- ncr.conf
|
||||
@@ -1,40 +0,0 @@
|
||||
networks:
|
||||
gitea:
|
||||
external: false
|
||||
|
||||
|
||||
services:
|
||||
gitea:
|
||||
image: gitea/gitea:latest-rootless
|
||||
container_name: gitea
|
||||
environment:
|
||||
- USER_UID=1000
|
||||
- USER_GID=1000
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /opt/containers/gitea:/data
|
||||
- /etc/timezone:/etc/timezone:ro
|
||||
- /etc/localtime:/etc/localtime:ro
|
||||
ports:
|
||||
- "3000:3000"
|
||||
- "2222:22"
|
||||
gitea-runner:
|
||||
image: gitea/act_runner:nightly
|
||||
container_name: gitea-runner
|
||||
restart: always
|
||||
networks:
|
||||
- gitea
|
||||
volumes:
|
||||
- /opt/containers/gitea_runner/
|
||||
- /var/run/docker.sock:/var/run/docker.sock
|
||||
environment:
|
||||
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
|
||||
- GITEA_RUNNER_NAME=gitea-main
|
||||
- GITEA_RUNNER_LABELS=gitea-main
|
||||
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -1,29 +0,0 @@
|
||||
What is this
|
||||
============
|
||||
|
||||
Here we contain scripts to build out all the containers that are run.
|
||||
All of these images are based on images that are made from other projects
|
||||
|
||||
docker-compose.yaml
|
||||
===================
|
||||
|
||||
Services that are more/less "special" go here since most of the stuff that is
|
||||
run on the main host are basically just static html websites
|
||||
|
||||
Services & Containers
|
||||
=====================
|
||||
|
||||
| Service | Docker Image Used |
|
||||
|------------|--------------------------|
|
||||
| Gitea | gitea/gitea:latest |
|
||||
| Act Runner | gitea/act_runner:nightly |
|
||||
|
||||
Why the servics above?
|
||||
======================
|
||||
|
||||
The Gitea related services are there so that I can host my own Git projects
|
||||
away from "Git as a service" services. I have no issue with Github/Gitlab
|
||||
but I just like being able to host my own stuff when possible :smiley:
|
||||
|
||||
|
||||
|
||||
@@ -1,34 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
opt=$1
|
||||
plan=tfplan
|
||||
|
||||
build_plan() {
|
||||
echo Generating plan
|
||||
set -x
|
||||
terraform plan -var-file variables.tfvars -input=false -out $plan
|
||||
}
|
||||
|
||||
deploy_plan() {
|
||||
terraform apply $plan
|
||||
}
|
||||
|
||||
init() {
|
||||
terraform init
|
||||
}
|
||||
|
||||
help_prompt() {
|
||||
cat <<- EOF
|
||||
Options: plan deploy help
|
||||
EOF
|
||||
}
|
||||
|
||||
# Default to building a plan
|
||||
source ./secrets.sh
|
||||
case $opt in
|
||||
plan) build_plan;;
|
||||
deploy) deploy_plan;;
|
||||
*) help_prompt;;
|
||||
esac
|
||||
@@ -37,6 +37,10 @@ locals {
|
||||
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
|
||||
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
{ name = "wiki.shockrah.xyz", records = [ var.vke_lb ] },
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
@@ -33,3 +33,11 @@ resource "aws_route53_record" "temper-tv-mx" {
|
||||
"50 fb.mail.gandi.net.",
|
||||
]
|
||||
}
|
||||
|
||||
resource "aws_route53_record" "temper-tv-test" {
|
||||
zone_id = aws_route53_zone.temper-tv.id
|
||||
name = "test.temper.tv"
|
||||
type = "A"
|
||||
ttl = 300
|
||||
records = [ var.vke_lb ]
|
||||
}
|
||||
@@ -26,3 +26,7 @@ variable "vultr_host" {
|
||||
description = "IP of the temp Vultr host"
|
||||
}
|
||||
|
||||
variable "vke_lb" {
|
||||
type = string
|
||||
description = "IP of our VKE load balancer"
|
||||
}
|
||||
|
||||
@@ -1 +1,2 @@
|
||||
vultr_host = "45.32.83.83"
|
||||
vke_lb = "45.32.89.101"
|
||||
|
||||
1
infra/nigel-k3s/.gitignore
vendored
Normal file
1
infra/nigel-k3s/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
config.yaml
|
||||
35
infra/nigel-k3s/health.yaml
Normal file
35
infra/nigel-k3s/health.yaml
Normal file
@@ -0,0 +1,35 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:1.14.2
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: nginx-port
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx
|
||||
spec:
|
||||
type: NodePort
|
||||
selector:
|
||||
app: nginx
|
||||
ports:
|
||||
- port: 80
|
||||
nodePort: 30808
|
||||
targetPort: nginx-port
|
||||
19
infra/nigel-k3s/sample-cron.yaml
Normal file
19
infra/nigel-k3s/sample-cron.yaml
Normal file
@@ -0,0 +1,19 @@
|
||||
apiVersion: batch/v1
|
||||
kind: CronJob
|
||||
metadata:
|
||||
name: hello
|
||||
spec:
|
||||
schedule: "* * * * *"
|
||||
jobTemplate:
|
||||
spec:
|
||||
template:
|
||||
spec:
|
||||
containers:
|
||||
- name: hello
|
||||
image: busybox:1.28
|
||||
imagePullPolicy: IfNotPresent
|
||||
command:
|
||||
- /bin/sh
|
||||
- -c
|
||||
- date; echo Hello from the sample cron-container
|
||||
restartPolicy: OnFailure
|
||||
@@ -1,31 +0,0 @@
|
||||
# This 'service' job is just a simple nginx container that lives here as a kind of sanity check
|
||||
# PORT: 8000
|
||||
# DNS : sanity.nigel.local
|
||||
job "health" {
|
||||
type = "service"
|
||||
|
||||
group "health" {
|
||||
count = 1
|
||||
network {
|
||||
port "http" {
|
||||
static = 8000
|
||||
to = 80
|
||||
}
|
||||
}
|
||||
|
||||
service {
|
||||
name = "health-svc"
|
||||
port = "http"
|
||||
provider = "nomad"
|
||||
}
|
||||
|
||||
task "health-setup" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "nginx:latest"
|
||||
ports = [ "http" ]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
resource kubernetes_namespace admin-servers {
|
||||
count = length(var.admin_services.configs) > 0 ? 1 : 0
|
||||
metadata {
|
||||
name = var.admin_services.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_pod admin {
|
||||
for_each = var.admin_services.configs
|
||||
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.admin_services.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
spec {
|
||||
node_selector = {
|
||||
"vke.vultr.com/node-pool" = var.admin_services.namespace
|
||||
}
|
||||
container {
|
||||
image = each.value.image
|
||||
name = coalesce(each.value.name, each.key)
|
||||
resources {
|
||||
limits = {
|
||||
cpu = each.value.cpu
|
||||
memory = each.value.mem
|
||||
}
|
||||
}
|
||||
port {
|
||||
container_port = each.value.port.internal
|
||||
protocol = coalesce(each.value.proto, "TCP")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service admin {
|
||||
for_each = var.admin_services.configs
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.admin_services.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
# TODO: don't make these NodePorts since we're gonna want them
|
||||
# to be purely internal to the Cluster.
|
||||
# WHY? Because we want to keep dashboards as unexposed as possible
|
||||
spec {
|
||||
selector = {
|
||||
app = each.key
|
||||
}
|
||||
port {
|
||||
target_port = each.value.port.internal
|
||||
port = each.value.port.expose
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
||||
|
||||
@@ -9,15 +9,31 @@ terraform {
|
||||
required_providers {
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
version = "6.27.0"
|
||||
}
|
||||
vultr = {
|
||||
source = "vultr/vultr"
|
||||
version = "2.22.1"
|
||||
version = "2.26.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.34.0"
|
||||
version = "3.0.1"
|
||||
}
|
||||
kubectl = {
|
||||
source = "gavinbunney/kubectl"
|
||||
version = " 1.19.0"
|
||||
}
|
||||
helm = {
|
||||
source = "hashicorp/helm"
|
||||
version = "3.0.2"
|
||||
}
|
||||
tls = {
|
||||
source = "hashicorp/tls"
|
||||
version = "4.1.0"
|
||||
}
|
||||
random = {
|
||||
source = "hashicorp/random"
|
||||
version = "3.7.2"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -39,4 +55,12 @@ provider kubernetes {
|
||||
config_path = "config.yaml"
|
||||
}
|
||||
|
||||
provider kubectl {
|
||||
config_path = "config.yaml"
|
||||
}
|
||||
|
||||
provider helm {
|
||||
kubernetes = {
|
||||
config_path = "config.yaml"
|
||||
}
|
||||
}
|
||||
|
||||
18
infra/vultr-kubernetes/cluster-issuer.yaml
Normal file
18
infra/vultr-kubernetes/cluster-issuer.yaml
Normal file
@@ -0,0 +1,18 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
preferredChain: "ISRG Root X1"
|
||||
# Email address used for ACME registration
|
||||
email: dev@shockrah.xyz
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
@@ -1,28 +1,17 @@
|
||||
resource vultr_kubernetes athens {
|
||||
region = var.cluster.region
|
||||
region = var.cluster.region
|
||||
version = var.cluster.version
|
||||
label = var.cluster.label
|
||||
# BUG: only have this set when creating the resource for the first time
|
||||
# once the cluster is up, we should comment this out again
|
||||
# enable_firewall = true
|
||||
node_pools {
|
||||
node_quantity = 1
|
||||
plan = var.cluster.pools["meta"].plan
|
||||
label = var.admin_services.namespace
|
||||
min_nodes = var.cluster.pools["meta"].min
|
||||
max_nodes = var.cluster.pools["meta"].max
|
||||
# tag = var.admin_services.namespace
|
||||
}
|
||||
}
|
||||
label = var.cluster.label
|
||||
# vpc_id = vultr_vpc.athens.id
|
||||
|
||||
resource vultr_kubernetes_node_pools games {
|
||||
cluster_id = vultr_kubernetes.athens.id
|
||||
node_quantity = var.cluster.pools["games"].min
|
||||
plan = var.cluster.pools["games"].plan
|
||||
label = var.game_servers.namespace
|
||||
min_nodes = var.cluster.pools["games"].min
|
||||
max_nodes = var.cluster.pools["games"].max
|
||||
tag = var.game_servers.namespace
|
||||
node_pools {
|
||||
node_quantity = var.cluster.pools["main"].min_nodes
|
||||
plan = var.cluster.pools["main"].plan
|
||||
label = var.cluster.pools["main"].label
|
||||
min_nodes = var.cluster.pools["main"].min_nodes
|
||||
max_nodes = var.cluster.pools["main"].max_nodes
|
||||
auto_scaler = true
|
||||
}
|
||||
}
|
||||
|
||||
output k8s_config {
|
||||
|
||||
6
infra/vultr-kubernetes/data.tf
Normal file
6
infra/vultr-kubernetes/data.tf
Normal file
@@ -0,0 +1,6 @@
|
||||
data vultr_kubernetes athens {
|
||||
filter {
|
||||
name = "label"
|
||||
values = [ var.cluster.label ]
|
||||
}
|
||||
}
|
||||
4
infra/vultr-kubernetes/dev/.gitignore
vendored
4
infra/vultr-kubernetes/dev/.gitignore
vendored
@@ -1,4 +0,0 @@
|
||||
# created by virtualenv automatically
|
||||
bin/
|
||||
lib/
|
||||
|
||||
@@ -1,59 +0,0 @@
|
||||
from argparse import ArgumentParser
|
||||
from argparse import Namespace
|
||||
from kubernetes import client, config
|
||||
import re
|
||||
|
||||
def get_args() -> Namespace:
|
||||
parser = ArgumentParser(
|
||||
prog="Cluster Search Thing",
|
||||
description="General utility for finding resources for game server bot"
|
||||
)
|
||||
games = {"health", "reflex", "minecraft"}
|
||||
parser.add_argument('-g', '--game', required=False, choices=games)
|
||||
|
||||
admin = {"health"}
|
||||
parser.add_argument('-a', '--admin', required=False, choices=admin)
|
||||
return parser.parse_args()
|
||||
|
||||
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
|
||||
config.load_kube_config("../config.yaml")
|
||||
return client.CoreV1Api()
|
||||
|
||||
def get_admin_service_details(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||
print('admin thing requested', args.admin)
|
||||
services = api.list_service_for_all_namespaces(label_selector=f'app={args.admin}')
|
||||
if len(services.items) == 0:
|
||||
print(f'Unable to find {args.admin} amongst the admin-services')
|
||||
return
|
||||
|
||||
port = services.items[0].spec.ports[0].port
|
||||
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
|
||||
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
|
||||
print(f'{args.admin} --> {ipv4}:{port} ~~> {ipv6}:{port}')
|
||||
|
||||
def get_game_server_ip(args: ArgumentParser, api: client.api.core_v1_api.CoreV1Api):
|
||||
services = api.list_service_for_all_namespaces(label_selector=f'app={args.game}')
|
||||
port = services.items[0].spec.ports[0].port
|
||||
|
||||
# Collecting the IPV4 of the node that contains the pod(container)
|
||||
# we actually care about. Since these pods only have 1 container
|
||||
# Now we collect specific data about the game server we requested
|
||||
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
|
||||
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
|
||||
|
||||
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
args = get_args()
|
||||
api = k8s_api('../config.yaml')
|
||||
|
||||
if args.game:
|
||||
get_game_server_ip(args, api)
|
||||
|
||||
if args.admin:
|
||||
get_admin_service_details(args, api)
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
home = /usr
|
||||
implementation = CPython
|
||||
version_info = 3.10.12.final.0
|
||||
virtualenv = 20.13.0+ds
|
||||
include-system-site-packages = false
|
||||
base-prefix = /usr
|
||||
base-exec-prefix = /usr
|
||||
base-executable = /usr/bin/python3
|
||||
@@ -1,18 +0,0 @@
|
||||
cachetools==5.5.0
|
||||
certifi==2024.8.30
|
||||
charset-normalizer==3.4.0
|
||||
durationpy==0.9
|
||||
google-auth==2.36.0
|
||||
idna==3.10
|
||||
kubernetes==31.0.0
|
||||
oauthlib==3.2.2
|
||||
pyasn1==0.6.1
|
||||
pyasn1_modules==0.4.1
|
||||
python-dateutil==2.9.0.post0
|
||||
PyYAML==6.0.2
|
||||
requests==2.32.3
|
||||
requests-oauthlib==2.0.0
|
||||
rsa==4.9
|
||||
six==1.17.0
|
||||
urllib3==2.2.3
|
||||
websocket-client==1.8.0
|
||||
@@ -1,31 +1,10 @@
|
||||
resource vultr_firewall_rule web_inbound {
|
||||
for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
protocol = "tcp"
|
||||
ip_type = "v4"
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
port = each.value
|
||||
}
|
||||
# resource vultr_firewall_rule web_inbound {
|
||||
# for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
||||
# firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
# protocol = "tcp"
|
||||
# ip_type = "v4"
|
||||
# subnet = "0.0.0.0"
|
||||
# subnet_size = 0
|
||||
# port = each.value
|
||||
# }
|
||||
|
||||
resource vultr_firewall_rule game-server-inbound {
|
||||
for_each = var.game_servers.configs
|
||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
protocol = "tcp"
|
||||
ip_type = "v4"
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
port = each.value.port.expose
|
||||
}
|
||||
|
||||
|
||||
resource vultr_firewall_rule admin-service-inbound {
|
||||
for_each = var.admin_services.configs
|
||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||
protocol = "tcp"
|
||||
ip_type = "v4"
|
||||
subnet = "0.0.0.0"
|
||||
subnet_size = 0
|
||||
notes = each.value.port.notes
|
||||
port = each.value.port.expose
|
||||
}
|
||||
|
||||
@@ -1,55 +0,0 @@
|
||||
resource kubernetes_namespace game-servers {
|
||||
count = length(var.game_servers.configs) > 0 ? 1 : 0
|
||||
metadata {
|
||||
name = var.game_servers.namespace
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_pod game {
|
||||
for_each = var.game_servers.configs
|
||||
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.game_servers.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
image = each.value.image
|
||||
name = coalesce(each.value.name, each.key)
|
||||
resources {
|
||||
limits = {
|
||||
cpu = each.value.cpu
|
||||
memory = each.value.mem
|
||||
}
|
||||
}
|
||||
port {
|
||||
container_port = each.value.port.internal
|
||||
protocol = coalesce(each.value.proto, "TCP")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service game {
|
||||
for_each = var.game_servers.configs
|
||||
metadata {
|
||||
name = each.key
|
||||
namespace = var.game_servers.namespace
|
||||
labels = {
|
||||
app = each.key
|
||||
}
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
app = each.key
|
||||
}
|
||||
port {
|
||||
target_port = each.value.port.internal
|
||||
port = each.value.port.expose
|
||||
}
|
||||
type = "NodePort"
|
||||
}
|
||||
}
|
||||
74
infra/vultr-kubernetes/git.tf
Normal file
74
infra/vultr-kubernetes/git.tf
Normal file
@@ -0,0 +1,74 @@
|
||||
# NOTE: this is a simple deployment for demo purposes only.
|
||||
# Currently it does support SSH access and lacks Gitea runners.
|
||||
# However a fully working setup can be found at: https://git.shockrah.xyz
|
||||
resource kubernetes_deployment gitea {
|
||||
metadata {
|
||||
name = "gitea"
|
||||
namespace = var.playground.namespace
|
||||
labels = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "gitea"
|
||||
image = "gitea/gitea:latest"
|
||||
port {
|
||||
container_port = 3000
|
||||
name = "gitea-main"
|
||||
}
|
||||
port {
|
||||
container_port = 2222
|
||||
name = "gitea-ssh"
|
||||
}
|
||||
volume_mount {
|
||||
name = "gitea"
|
||||
mount_path = "/data"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "gitea"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim_v1.gitea.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_service gitea {
|
||||
metadata {
|
||||
name = "gitea"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
"app" = "gitea"
|
||||
}
|
||||
port {
|
||||
target_port = "gitea-main"
|
||||
port = 3000
|
||||
name = "http"
|
||||
}
|
||||
port {
|
||||
target_port = "gitea-ssh"
|
||||
port = 2222
|
||||
name = "ssh"
|
||||
}
|
||||
}
|
||||
}
|
||||
47
infra/vultr-kubernetes/health.tf
Normal file
47
infra/vultr-kubernetes/health.tf
Normal file
@@ -0,0 +1,47 @@
|
||||
resource kubernetes_deployment_v1 health {
|
||||
metadata {
|
||||
name = "health"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
name = "health"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
name = "health"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "health"
|
||||
image = "quanhua92/whoami:latest"
|
||||
port {
|
||||
container_port = "8080"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service_v1 health {
|
||||
metadata {
|
||||
name = "health"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
name = "health"
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = 8080
|
||||
name = "http"
|
||||
}
|
||||
}
|
||||
}
|
||||
7
infra/vultr-kubernetes/ingress-controller.tf
Normal file
7
infra/vultr-kubernetes/ingress-controller.tf
Normal file
@@ -0,0 +1,7 @@
|
||||
resource helm_release nginx {
|
||||
name = "ingress-nginx"
|
||||
repository = "https://kubernetes.github.io/ingress-nginx"
|
||||
chart = "ingress-nginx"
|
||||
namespace = "ingress-nginx"
|
||||
create_namespace = true
|
||||
}
|
||||
48
infra/vultr-kubernetes/ingress.tf
Normal file
48
infra/vultr-kubernetes/ingress.tf
Normal file
@@ -0,0 +1,48 @@
|
||||
locals {
|
||||
services = {
|
||||
"code.shockrah.xyz" = kubernetes_service.gitea
|
||||
"sanity.shockrah.xyz" = kubernetes_service_v1.health
|
||||
"uptime.shockrah.xyz" = kubernetes_service.kuma
|
||||
"wiki.shockrah.xyz" = kubernetes_service.otterwiki
|
||||
}
|
||||
}
|
||||
resource kubernetes_ingress_v1 health {
|
||||
metadata {
|
||||
name = "health-ingress"
|
||||
namespace = var.playground.namespace
|
||||
annotations = {
|
||||
"cert-manager.io/cluster-issuer" = "letsencrypt"
|
||||
"cert-manager.io/ingress.class" = "nginx"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
ingress_class_name = "nginx"
|
||||
dynamic tls {
|
||||
for_each = local.services
|
||||
content {
|
||||
hosts = [tls.key]
|
||||
secret_name = "${tls.value.metadata[0].name}-secret"
|
||||
}
|
||||
}
|
||||
dynamic "rule" {
|
||||
for_each = local.services
|
||||
content {
|
||||
host = "${rule.key}"
|
||||
http {
|
||||
path {
|
||||
path = "/"
|
||||
backend {
|
||||
service {
|
||||
name = rule.value.metadata[0].name
|
||||
port {
|
||||
number = rule.value.spec[0].port[0].port
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
1
infra/vultr-kubernetes/k8s/.gitignore
vendored
1
infra/vultr-kubernetes/k8s/.gitignore
vendored
@@ -1 +0,0 @@
|
||||
terraform.yaml
|
||||
@@ -1,33 +0,0 @@
|
||||
terraform {
|
||||
required_version = ">= 0.13"
|
||||
backend s3 {
|
||||
bucket = "project-athens"
|
||||
key = "infra/vke/k8s/state/build.tfstate"
|
||||
region = "us-west-1"
|
||||
encrypt = true
|
||||
}
|
||||
required_providers {
|
||||
# For interacting with S3
|
||||
aws = {
|
||||
source = "hashicorp/aws"
|
||||
version = "~> 5.0"
|
||||
}
|
||||
kubernetes = {
|
||||
source = "hashicorp/kubernetes"
|
||||
version = "2.30.0"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
provider aws {
|
||||
access_key = var.aws_key
|
||||
secret_key = var.aws_secret
|
||||
region = var.aws_region
|
||||
max_retries = 1
|
||||
}
|
||||
|
||||
provider kubernetes {
|
||||
config_path = "terraform.yaml"
|
||||
}
|
||||
|
||||
|
||||
@@ -1,50 +0,0 @@
|
||||
resource kubernetes_ingress_v1 athens {
|
||||
metadata {
|
||||
name = var.shockrahxyz.name
|
||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
||||
labels = {
|
||||
app = "websites"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
rule {
|
||||
host = "test.shockrah.xyz"
|
||||
http {
|
||||
path {
|
||||
backend {
|
||||
service {
|
||||
name = var.shockrahxyz.name
|
||||
port {
|
||||
number = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
path = "/"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_service athens_lb {
|
||||
metadata {
|
||||
name = "athens-websites"
|
||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
||||
labels = {
|
||||
app = "websites"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
app = kubernetes_ingress_v1.athens.metadata.0.labels.app
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = 80
|
||||
}
|
||||
type = "LoadBalancer"
|
||||
external_ips = [ var.cluster.ip ]
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
resource kubernetes_namespace websites {
|
||||
metadata {
|
||||
name = "websites"
|
||||
}
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
# First we setup the ingress controller with helm
|
||||
|
||||
|
||||
```sh
|
||||
helm repo add traefik https://helm.traefik.io/traefik
|
||||
helm repo update
|
||||
# Now we can install this to our cluster
|
||||
helm install --kubeconfig config.yaml traefik traefik/traefik
|
||||
```
|
||||
|
||||
# Prove the service is present with
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml get svc
|
||||
```
|
||||
|
||||
# Create the pods
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml -f k8s/nginx-dep.yaml
|
||||
```
|
||||
|
||||
# Expose on port 80
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml -f k8s/nginx-service.yaml
|
||||
```
|
||||
|
||||
# Create ingress on k8s
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml -f k8s/traefik-ingress.yaml
|
||||
```
|
||||
|
||||
# Take the external IP from the ingress
|
||||
|
||||
Put that into terraform's A record for the domain since this is a load balancer
|
||||
in Vultr ( actual resource apparantly )
|
||||
|
||||
# Configure cert-manager for traefik ingress
|
||||
|
||||
Using the latest version from here:
|
||||
https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.crds.yaml
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml \
|
||||
apply --validate=false \
|
||||
-f https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.yaml
|
||||
```
|
||||
|
||||
# Create the cert issuer and certificate
|
||||
|
||||
|
||||
```sh
|
||||
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
|
||||
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
|
||||
```
|
||||
|
||||
Because we just have 1 cert for now we are looking for it's status to be `READY`
|
||||
|
||||
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
Plain nginx for now so that we can test out reverse dns
|
||||
resource kubernetes_pod shockrah {
|
||||
metadata {
|
||||
name = var.shockrahxyz.name
|
||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
||||
labels = {
|
||||
app = var.shockrahxyz.name
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
image = "nginx"
|
||||
name = "${var.shockrahxyz.name}"
|
||||
port {
|
||||
container_port = 80
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
# API Keys required to reach AWS/Vultr
|
||||
variable vultr_api_key {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable aws_key {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable aws_secret {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable aws_region {
|
||||
type = string
|
||||
sensitive = true
|
||||
}
|
||||
|
||||
variable shockrahxyz {
|
||||
type = object({
|
||||
name = string
|
||||
port = number
|
||||
dns = string
|
||||
})
|
||||
}
|
||||
|
||||
variable cluster {
|
||||
type = object({
|
||||
ip = string
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
# Here we are going to define the deployment and service
|
||||
# Basically all things directly related to the actual service we want to provide
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: alternate-nginx-web
|
||||
namespace: default
|
||||
labels:
|
||||
app: alternate-nginx-web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: alternate-nginx-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: alternate-nginx-web
|
||||
spec:
|
||||
# Container comes from an example thing i randomly found on docker hub
|
||||
containers:
|
||||
- name: alternate-nginx-web
|
||||
image: dockerbogo/docker-nginx-hello-world
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: alternate-nginx-web
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: alternate-nginx-web
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: 80
|
||||
port: 80
|
||||
@@ -1,30 +0,0 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: hello.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: hello.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
commonName: hello.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- hello.temprah-lab.xyz
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod-hello
|
||||
namespace: default
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: dev@shockrah.xyz
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod-hello
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: sample.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: sample.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-prod
|
||||
kind: ClusterIssuer
|
||||
commonName: sample.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- sample.temprah-lab.xyz
|
||||
@@ -1,20 +0,0 @@
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: nginx-web
|
||||
namespace: default
|
||||
labels:
|
||||
app: nginx-web
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx-web
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx-web
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
@@ -1,12 +0,0 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: nginx-web
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
app: nginx-web
|
||||
ports:
|
||||
- name: http
|
||||
targetPort: 80
|
||||
port: 80
|
||||
@@ -1,44 +0,0 @@
|
||||
# This is the first thing we need to create, an issue to put certs into
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
namespace: default
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: dev@shockrah.xyz
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-temprah-lab
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: hello.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: hello.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-temprah-lab
|
||||
kind: ClusterIssuer
|
||||
commonName: hello.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- hello.temprah-lab.xyz
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: Certificate
|
||||
metadata:
|
||||
name: sample.temprah-lab.xyz
|
||||
namespace: default
|
||||
spec:
|
||||
secretName: sample.temprah-lab.xyz-tls
|
||||
issuerRef:
|
||||
name: letsencrypt-temprah-lab
|
||||
kind: ClusterIssuer
|
||||
commonName: sample.temprah-lab.xyz
|
||||
dnsNames:
|
||||
- sample.temprah-lab.xyz
|
||||
@@ -1,31 +0,0 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: traefik-ingress
|
||||
namespace: default
|
||||
labels:
|
||||
name: project-athens-lb
|
||||
annotations:
|
||||
kubernetes.io/ingress.class: traefik
|
||||
spec:
|
||||
rules:
|
||||
- host: sample.temprah-lab.xyz
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: nginx-web
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
pathType: Prefix
|
||||
- host: hello.temprah-lab.xyz
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
service:
|
||||
name: alternate-nginx-web
|
||||
port:
|
||||
number: 80
|
||||
path: /
|
||||
pathType: Prefix
|
||||
@@ -1,15 +1,14 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
kind: Issuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
namespace: default
|
||||
name: letsencrypt-nginx
|
||||
spec:
|
||||
acme:
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
email: dev@shockrah.xyz
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
name: example
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: traefik
|
||||
class: nginx
|
||||
36
infra/vultr-kubernetes/legacy/ingress.yaml
Normal file
36
infra/vultr-kubernetes/legacy/ingress.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: whoami-service
|
||||
spec:
|
||||
selector:
|
||||
name: whoami
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
---
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: Ingress
|
||||
metadata:
|
||||
name: whoami-ingress
|
||||
annotations:
|
||||
cert-manager.io/ingress.class: nginx
|
||||
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||
spec:
|
||||
ingressClassName: nginx
|
||||
tls:
|
||||
- secretName: whoami-tls
|
||||
hosts:
|
||||
- example.shockrah.xyz
|
||||
rules:
|
||||
- host: example.shockrah.xyz
|
||||
http:
|
||||
paths:
|
||||
- path: /
|
||||
pathType: Prefix
|
||||
backend:
|
||||
service:
|
||||
name: whoami-service
|
||||
port:
|
||||
number: 80
|
||||
21
infra/vultr-kubernetes/legacy/service.yaml
Normal file
21
infra/vultr-kubernetes/legacy/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: whoami-lb
|
||||
annotations:
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-interval: "30"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-response-timeout: "5"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-unhealthy-threshold: "5"
|
||||
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-healthy-threshold: "5"
|
||||
spec:
|
||||
type: LoadBalancer
|
||||
selector:
|
||||
name: whoami
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
20
infra/vultr-kubernetes/legacy/test.yaml
Normal file
20
infra/vultr-kubernetes/legacy/test.yaml
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: whoami
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
name: whoami
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: whoami
|
||||
spec:
|
||||
containers:
|
||||
- name: whoami
|
||||
image: quanhua92/whoami:latest
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
37
infra/vultr-kubernetes/legacy/tls.yaml
Normal file
37
infra/vultr-kubernetes/legacy/tls.yaml
Normal file
@@ -0,0 +1,37 @@
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-staging
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||
preferredChain: "ISRG Root X1"
|
||||
# Email address used for ACME registration
|
||||
email: dev@shockrah.xyz
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-staging
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
---
|
||||
apiVersion: cert-manager.io/v1
|
||||
kind: ClusterIssuer
|
||||
metadata:
|
||||
name: letsencrypt-prod
|
||||
spec:
|
||||
acme:
|
||||
# The ACME server URL
|
||||
server: https://acme-v02.api.letsencrypt.org/directory
|
||||
# Email address used for ACME registration
|
||||
email: dev@shockrah.xyz
|
||||
# Name of a secret used to store the ACME account private key
|
||||
privateKeySecretRef:
|
||||
name: letsencrypt-prod
|
||||
solvers:
|
||||
- http01:
|
||||
ingress:
|
||||
class: nginx
|
||||
|
||||
10
infra/vultr-kubernetes/namespaces.tf
Normal file
10
infra/vultr-kubernetes/namespaces.tf
Normal file
@@ -0,0 +1,10 @@
|
||||
resource kubernetes_namespace playground {
|
||||
metadata {
|
||||
annotations = {
|
||||
names = var.playground.namespace
|
||||
}
|
||||
name = var.playground.namespace
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
30
infra/vultr-kubernetes/tls.tf
Normal file
30
infra/vultr-kubernetes/tls.tf
Normal file
@@ -0,0 +1,30 @@
|
||||
resource helm_release shockrah_cert_manager {
|
||||
name = "cert-manager"
|
||||
repository = "https://charts.jetstack.io"
|
||||
chart = "cert-manager"
|
||||
version = "v1.18.2"
|
||||
namespace = "cert-manager"
|
||||
create_namespace = true
|
||||
cleanup_on_fail = true
|
||||
|
||||
set = [
|
||||
{
|
||||
name = "crds.enabled"
|
||||
value = "true"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
data kubectl_file_documents cluster_issuer {
|
||||
content = file("cluster-issuer.yaml")
|
||||
}
|
||||
|
||||
resource kubectl_manifest cluster_issuer {
|
||||
for_each = data.kubectl_file_documents.cluster_issuer.manifests
|
||||
yaml_body = each.value
|
||||
|
||||
depends_on = [
|
||||
data.kubectl_file_documents.cluster_issuer
|
||||
]
|
||||
}
|
||||
61
infra/vultr-kubernetes/uptime.tf
Normal file
61
infra/vultr-kubernetes/uptime.tf
Normal file
@@ -0,0 +1,61 @@
|
||||
resource kubernetes_deployment kuma {
|
||||
metadata {
|
||||
name = "kuma"
|
||||
namespace = var.playground.namespace
|
||||
labels = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "kuma"
|
||||
image = "louislam/uptime-kuma:2"
|
||||
port {
|
||||
container_port = 3001
|
||||
name = "uptime-kuma"
|
||||
}
|
||||
volume_mount {
|
||||
name = "kuma-data"
|
||||
mount_path = "/app/data"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "kuma-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim_v1.kuma.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service kuma {
|
||||
metadata {
|
||||
name = "kuma"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
"app" = "kuma"
|
||||
}
|
||||
port {
|
||||
target_port = "uptime-kuma"
|
||||
port = 3001
|
||||
name = "http"
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -26,46 +26,24 @@ variable cluster {
|
||||
label = string
|
||||
version = string
|
||||
pools = map(object({
|
||||
plan = string
|
||||
autoscale = bool
|
||||
min = number
|
||||
max = number
|
||||
node_quantity = number
|
||||
plan = string
|
||||
label = string
|
||||
min_nodes = number
|
||||
max_nodes = number
|
||||
tag = string
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
variable game_servers {
|
||||
|
||||
variable playground {
|
||||
type = object({
|
||||
namespace = string
|
||||
configs = map(object({
|
||||
name = optional(string)
|
||||
image = string
|
||||
cpu = string
|
||||
mem = string
|
||||
port = object({
|
||||
internal = number
|
||||
expose = number
|
||||
})
|
||||
proto = optional(string)
|
||||
}))
|
||||
})
|
||||
}
|
||||
|
||||
variable admin_services {
|
||||
type = object({
|
||||
namespace = string
|
||||
configs = map(object({
|
||||
name = string
|
||||
image = string
|
||||
cpu = string
|
||||
mem = string
|
||||
port = object({
|
||||
notes = optional(string)
|
||||
internal = number
|
||||
expose = number
|
||||
})
|
||||
proto = optional(string)
|
||||
}))
|
||||
# TODO: Re-incorporate this var for templating later
|
||||
tls = object({
|
||||
email = string
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,42 +1,24 @@
|
||||
cluster = {
|
||||
region = "lax"
|
||||
label = "athens-cluster"
|
||||
version = "v1.31.2+1"
|
||||
version = "v1.34.1+2"
|
||||
pools = {
|
||||
meta = {
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 2
|
||||
}
|
||||
games = {
|
||||
plan = "vc2-1c-2gb"
|
||||
autoscale = true
|
||||
min = 1
|
||||
max = 3
|
||||
main = {
|
||||
node_quantity = 1
|
||||
plan = "vc2-1c-2gb"
|
||||
label = "main"
|
||||
min_nodes = 1
|
||||
max_nodes = 2
|
||||
tag = "athens-main"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
game_servers = {
|
||||
namespace = "games"
|
||||
configs = {
|
||||
}
|
||||
}
|
||||
|
||||
admin_services = {
|
||||
namespace = "admin-services"
|
||||
configs = {
|
||||
health = {
|
||||
image = "nginx:latest"
|
||||
name = "health"
|
||||
cpu = "200m"
|
||||
mem = "64Mi"
|
||||
port = {
|
||||
notes = "Basic nginx sanity check service"
|
||||
expose = 30800
|
||||
internal = 80
|
||||
}
|
||||
}
|
||||
playground = {
|
||||
namespace = "playground"
|
||||
# Sanity check service that is used purely for the sake of ensuring
|
||||
# things are ( at a basic level ) functional
|
||||
tls = {
|
||||
email = "dev@shockrah.xyz"
|
||||
}
|
||||
}
|
||||
|
||||
49
infra/vultr-kubernetes/volumes.tf
Normal file
49
infra/vultr-kubernetes/volumes.tf
Normal file
@@ -0,0 +1,49 @@
|
||||
resource kubernetes_persistent_volume_claim_v1 kuma {
|
||||
metadata {
|
||||
name = "kuma-data"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
volume_mode = "Filesystem"
|
||||
access_modes = [ "ReadWriteOnce"]
|
||||
resources {
|
||||
requests = {
|
||||
storage = "10Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_persistent_volume_claim_v1 gitea {
|
||||
metadata {
|
||||
name = "gitea-data"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
volume_mode = "Filesystem"
|
||||
access_modes = [ "ReadWriteOnce"]
|
||||
resources {
|
||||
requests = {
|
||||
storage = "10Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
resource kubernetes_persistent_volume_claim_v1 otterwiki {
|
||||
metadata {
|
||||
name = "otterwiki-data"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
volume_mode = "Filesystem"
|
||||
access_modes = [ "ReadWriteOnce"]
|
||||
resources {
|
||||
requests = {
|
||||
storage = "10Gi"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
63
infra/vultr-kubernetes/wiki.tf
Normal file
63
infra/vultr-kubernetes/wiki.tf
Normal file
@@ -0,0 +1,63 @@
|
||||
resource kubernetes_deployment otterwiki {
|
||||
metadata {
|
||||
name = "otterwiki"
|
||||
namespace = var.playground.namespace
|
||||
labels = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
replicas = 1
|
||||
selector {
|
||||
match_labels = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
}
|
||||
template {
|
||||
metadata {
|
||||
labels = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
}
|
||||
spec {
|
||||
container {
|
||||
name = "otterwiki"
|
||||
image = "redimp/otterwiki:2"
|
||||
port {
|
||||
container_port = 8080
|
||||
name = "otterwiki-main"
|
||||
}
|
||||
volume_mount {
|
||||
name = "otterwiki-data"
|
||||
mount_path = "/var/lib/otterwiki"
|
||||
}
|
||||
}
|
||||
volume {
|
||||
name = "otterwiki-data"
|
||||
persistent_volume_claim {
|
||||
claim_name = kubernetes_persistent_volume_claim_v1.otterwiki.metadata[0].name
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource kubernetes_service otterwiki {
|
||||
metadata {
|
||||
name = "otterwiki"
|
||||
namespace = var.playground.namespace
|
||||
}
|
||||
spec {
|
||||
selector = {
|
||||
"app" = "otterwiki"
|
||||
}
|
||||
port {
|
||||
port = 80
|
||||
target_port = "otterwiki-main"
|
||||
protocol = "TCP"
|
||||
name = "http"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user