Compare commits
143 Commits
be13e9e71f
...
master
| Author | SHA1 | Date | |
|---|---|---|---|
| eea4c61537 | |||
| ee860c6e1f | |||
| 1c11410c2d | |||
| 4d71994b85 | |||
| 79cb4eb1a6 | |||
| e8817fe093 | |||
| 97bffd2042 | |||
| 37305fd74e | |||
| 555124bf2f | |||
| e209da949b | |||
| caa2eba639 | |||
| 982669ed4a | |||
| 4446ef813f | |||
| 9dc2f1d769 | |||
| 01b7b4ced8 | |||
| 29cdfcb695 | |||
| bbbc9ed477 | |||
| d64c5526e6 | |||
| 469b3d08ce | |||
| 7f5b3205d0 | |||
| 67ff5ce729 | |||
| 6aadb47c61 | |||
| 0624161f53 | |||
| c6b2a062e9 | |||
| 718647f617 | |||
| cfe631eba7 | |||
| 29e049cf7f | |||
| 990d29ae6c | |||
| 859201109e | |||
| de3bff8f14 | |||
| 54a6ddbe5d | |||
| 82333fe6ce | |||
| cddf67de2f | |||
| affa03bed5 | |||
| 34e1f6afdf | |||
| fd9bd290af | |||
| d992556032 | |||
| fce73d06e0 | |||
| 7f5d81f0ee | |||
| 410790765f | |||
| 9454e03f53 | |||
| e6ed85920d | |||
| 2775d354f8 | |||
| 1f6f013634 | |||
| 778b995980 | |||
| fc897bdd0e | |||
| 8f06ef269a | |||
| f15da0c88d | |||
| c602773657 | |||
| cd908d9c14 | |||
| 56e9c0ae4a | |||
| 30bc6ee2fa | |||
| cd9822bb85 | |||
| 0efe6ca642 | |||
| 2ef4b00097 | |||
| e183055282 | |||
| 514909fc8d | |||
| 5b4a440cb4 | |||
| 826d334c3c | |||
| 77590b067a | |||
| 850570faf5 | |||
| 12831fbaf3 | |||
| a6123dd7e2 | |||
| 9c2e0a84d7 | |||
| 1281ea8857 | |||
| ee2d502ca6 | |||
| 88059a5e0f | |||
| 4024809cc4 | |||
| 029a3c80d5 | |||
| 75b7f2fa3d | |||
| 8ef606153f | |||
| be34327791 | |||
| c6ef6ae4d2 | |||
| eb7871584b | |||
| 4a0a12242a | |||
| 053db8793b | |||
| 24fcbc957a | |||
| 9675fbacef | |||
| 3f0c8a865d | |||
| 3f2e6d86f6 | |||
| 08560c945b | |||
| 506a9b32d9 | |||
| d4ece741e0 | |||
| 311a592d6e | |||
| 153ea8e982 | |||
| 943e9651da | |||
| 669c414288 | |||
| e3afed5e4f | |||
| e337989a59 | |||
| 7f36ff272e | |||
| 79e6698db1 | |||
| 603559b255 | |||
| 4851b6521c | |||
| 9785e8a40a | |||
| 79bd7424c3 | |||
| 5227bea568 | |||
| 47b69d7f49 | |||
| a3fdc5fcc7 | |||
| 5a1afb4a07 | |||
| e03daa62e5 | |||
| 15dfaea8db | |||
| ef4967cd88 | |||
| 55217ce50b | |||
| 2bbc9095f7 | |||
| fcf7ded218 | |||
| b68d53b143 | |||
| 3c6bc90feb | |||
| 3521b840ae | |||
| 5f10976264 | |||
| 10e936a8da | |||
| 8bbaea8fd9 | |||
| d39e0c04e5 | |||
| b99525955e | |||
| 9b6f9b6656 | |||
| f2c4506245 | |||
| ac11487feb | |||
| ee23406f49 | |||
| 6e4982fffd | |||
| f5f670e5f2 | |||
| 6d642a7359 | |||
| 7a41d033b5 | |||
| 280a1f7a87 | |||
| 90c61d7c00 | |||
| ad0f3e6089 | |||
| f9c73b1e4a | |||
| 5d03f6b218 | |||
| 7f2ee6d35b | |||
| a4a1d55a53 | |||
| bf812cce4c | |||
| abf3297498 | |||
| 52e8c56682 | |||
| c50deddf53 | |||
| 6ab49d1b28 | |||
| 68acbe2842 | |||
| a6dc2da7be | |||
| d483f5ed72 | |||
| e759802ce6 | |||
| f141a42689 | |||
| fba534b9df | |||
| 3779d53810 | |||
| 16c0e5ee98 | |||
| 8f18ff8c85 | |||
| c2099e2133 |
15
.gitea/workflows/ansible-lint.yaml
Normal file
15
.gitea/workflows/ansible-lint.yaml
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
name: Ansible Linting
|
||||||
|
on:
|
||||||
|
- push
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
ansible-lint:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
container:
|
||||||
|
image: shockrah/ansible
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo content
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- run: ansible-lint -c linter.yaml
|
||||||
|
working-directory: ansible/
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
name: Actions demo
|
|
||||||
run-name: ${{ gitea.actor }} is testing the actions
|
|
||||||
on:
|
|
||||||
- push
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
simple-echo:
|
|
||||||
runs-on: gitea-main
|
|
||||||
steps:
|
|
||||||
- run: echo "🎉 The job was automatically triggered by a ${{ gitea.event_name }} event."
|
|
||||||
- run: echo "🐧 This job is now running on a ${{ runner.os }} server hosted by Gitea!"
|
|
||||||
- run: echo "🔎 The name of your branch is ${{ gitea.ref }} and your repository is ${{ gitea.repository }}."
|
|
||||||
- name: Check out repository code
|
|
||||||
uses: actions/checkout@v4
|
|
||||||
- run: echo "💡 The ${{ gitea.repository }} repository has been cloned to the runner."
|
|
||||||
- run: echo "🖥️ The workflow is now ready to test your code on the runner."
|
|
||||||
- name: List files in the repository
|
|
||||||
run: |
|
|
||||||
ls ${{ gitea.workspace }}
|
|
||||||
- run: echo "🍏 This job's status is ${{ job.status }}."
|
|
||||||
|
|
||||||
19
.gitea/workflows/sec-lint-s3.yaml
Normal file
19
.gitea/workflows/sec-lint-s3.yaml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
name: Secops Linting and Safety Checks
|
||||||
|
on:
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- master
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
checkov-scan-s3:
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
steps:
|
||||||
|
- name: Checkout repo code
|
||||||
|
uses: actions/checkout@v4
|
||||||
|
- name: Scan S3 Terraform with Checkov
|
||||||
|
uses: bridgecrewio/checkov-action@master
|
||||||
|
with:
|
||||||
|
directory: infra/s3/
|
||||||
|
framework: terraform
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -21,3 +21,4 @@ docker/beta/shockrah.xyz/
|
|||||||
docker/beta/resume.shockrah.xyz/
|
docker/beta/resume.shockrah.xyz/
|
||||||
k8s/config.yaml
|
k8s/config.yaml
|
||||||
infra/**/tfplan
|
infra/**/tfplan
|
||||||
|
.ansible/
|
||||||
|
|||||||
3
ansible/ansible.cfg
Normal file
3
ansible/ansible.cfg
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
[defaults]
|
||||||
|
stdout_callback = yaml
|
||||||
|
|
||||||
3
ansible/inventory.yaml
Normal file
3
ansible/inventory.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
nigel:
|
||||||
|
hosts:
|
||||||
|
nigel.local:
|
||||||
4
ansible/linter.yaml
Normal file
4
ansible/linter.yaml
Normal file
@@ -0,0 +1,4 @@
|
|||||||
|
---
|
||||||
|
skip_list:
|
||||||
|
- role-name
|
||||||
|
- var-naming[no-role-prefix]
|
||||||
28
ansible/local-setup-admin-user.yaml
Normal file
28
ansible/local-setup-admin-user.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
# This playbook is meant to be a oneshot to be ran manually on the dev box
|
||||||
|
# The rest of the role stuff is meant to be ran as the admin user that
|
||||||
|
# this playbook creates for us
|
||||||
|
---
|
||||||
|
- name: Setup local admin user with a fresh ubuntu host
|
||||||
|
hosts: nigel.local
|
||||||
|
remote_user: nigel
|
||||||
|
vars:
|
||||||
|
admin:
|
||||||
|
username: nigel
|
||||||
|
tasks:
|
||||||
|
- name: Copy the nigel admin key
|
||||||
|
ansible.posix.authorized_key:
|
||||||
|
user: "{{ admin.username }}"
|
||||||
|
state: present
|
||||||
|
key: "{{ lookup('file', '~/.ssh/nigel/admin.pub') }}"
|
||||||
|
- name: Prevent password based logins
|
||||||
|
become: true
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
dest: /etc/ssh/sshd_config
|
||||||
|
line: PasswordAuthentication no
|
||||||
|
state: present
|
||||||
|
backup: true
|
||||||
|
- name: Restart SSH Daemon
|
||||||
|
become: true
|
||||||
|
ansible.builtin.service:
|
||||||
|
name: ssh
|
||||||
|
state: restarted
|
||||||
9
ansible/nomad.yaml
Normal file
9
ansible/nomad.yaml
Normal file
@@ -0,0 +1,9 @@
|
|||||||
|
---
|
||||||
|
- name: Setup all the responsibilities of the nomad server
|
||||||
|
hosts: nigel.local
|
||||||
|
remote_user: nigel
|
||||||
|
tasks:
|
||||||
|
- name: Apply the nomad role
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: nomad
|
||||||
|
|
||||||
14
ansible/nuc.yaml
Normal file
14
ansible/nuc.yaml
Normal file
@@ -0,0 +1,14 @@
|
|||||||
|
---
|
||||||
|
- name: Setup bare metal requirements
|
||||||
|
hosts: nigel.local
|
||||||
|
remote_user: nigel
|
||||||
|
tasks:
|
||||||
|
- name: Apply the base role to the nuc
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: base
|
||||||
|
- name: Apply the k3s base role
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: k3s
|
||||||
|
- name: Apply the proxy role
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: proxy
|
||||||
@@ -1,14 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: webhost
|
|
||||||
remote_user: root
|
|
||||||
tasks:
|
|
||||||
- name: Copy pull script
|
|
||||||
copy:
|
|
||||||
src: ../scripts/pull-down-s3.sh
|
|
||||||
dest: /opt/nginx/pull-down-s3.sh
|
|
||||||
- name: Pull down all sites from S3
|
|
||||||
shell: bash /opt/nginx/pull-down-s3.sh {{ item }}
|
|
||||||
loop:
|
|
||||||
- shockrah.xyz
|
|
||||||
- resume.shockrah.xyz
|
|
||||||
- temper.tv
|
|
||||||
8
ansible/proxy.yaml
Normal file
8
ansible/proxy.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
---
|
||||||
|
- name: Setup host as a reverse proxy
|
||||||
|
hosts: nigel.local
|
||||||
|
remote_user: nigel
|
||||||
|
tasks:
|
||||||
|
- name: Apply reverse proxy role
|
||||||
|
ansible.builtin.include_role:
|
||||||
|
name: proxy
|
||||||
1
ansible/roles/base/files/docker.list
Normal file
1
ansible/roles/base/files/docker.list
Normal file
@@ -0,0 +1 @@
|
|||||||
|
deb [arch=amd64 signed-by=/etc/apt/keyrings/docker.asc] https://download.docker.com/linux/ubuntu noble stable
|
||||||
41
ansible/roles/base/tasks/ensure-docker-basic.yaml
Normal file
41
ansible/roles/base/tasks/ensure-docker-basic.yaml
Normal file
@@ -0,0 +1,41 @@
|
|||||||
|
- name: Ensure we have basic updated packages setting up docker
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
update_cache: true
|
||||||
|
loop:
|
||||||
|
- ca-certificates
|
||||||
|
- curl
|
||||||
|
- name: Running install on the keyrings directory
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: install -m 0755 -d /etc/apt/keyrings
|
||||||
|
register: install
|
||||||
|
changed_when: install.rc == 0
|
||||||
|
- name: Fetch Docker GPG Key
|
||||||
|
vars:
|
||||||
|
keylink: https://download.docker.com/linux/ubuntu/gpg
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: "{{ keylink }}"
|
||||||
|
dest: /etc/apt/keyrings/docker.asc
|
||||||
|
mode: "0644"
|
||||||
|
- name: Add repo to apt sources
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: docker.list
|
||||||
|
dest: /etc/apt/sources.list.d/docker.list
|
||||||
|
mode: "0644"
|
||||||
|
- name: Update Apt cache with latest docker.list packages
|
||||||
|
ansible.builtin.apt:
|
||||||
|
update_cache: true
|
||||||
|
- name: Ensure all docker packages are updated to the latest versions
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: "{{ item }}"
|
||||||
|
loop:
|
||||||
|
- docker-ce
|
||||||
|
- docker-ce-cli
|
||||||
|
- containerd.io
|
||||||
|
- docker-buildx-plugin
|
||||||
|
- docker-compose-plugin
|
||||||
|
- name: Verify that the docker components are installed properly
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: docker run hello-world
|
||||||
|
register: docker
|
||||||
|
changed_when: docker.rc == 0
|
||||||
8
ansible/roles/base/tasks/k3s.yaml
Normal file
8
ansible/roles/base/tasks/k3s.yaml
Normal file
@@ -0,0 +1,8 @@
|
|||||||
|
- name: Download the setup script
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://get.k3s.io
|
||||||
|
dest: /tmp/k3s.sh
|
||||||
|
mode: "0644"
|
||||||
|
- name: Run installation script
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: bash /tmp/k3s.sh
|
||||||
25
ansible/roles/base/tasks/main.yaml
Normal file
25
ansible/roles/base/tasks/main.yaml
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
- name: Ensure nigel can use sudo without password
|
||||||
|
become: true
|
||||||
|
tags:
|
||||||
|
- setup
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: /etc/sudoers
|
||||||
|
state: present
|
||||||
|
line: "nigel ALL=(ALL) NOPASSWD:ALL"
|
||||||
|
- name: Ensure docker components are installed
|
||||||
|
tags:
|
||||||
|
- setup
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: ensure-docker-basic.yaml
|
||||||
|
apply:
|
||||||
|
become: true
|
||||||
|
tags:
|
||||||
|
- setup
|
||||||
|
- name: Run through nomad removal steps
|
||||||
|
tags: nomad
|
||||||
|
ansible.builtin.include_tasks:
|
||||||
|
file: nomad.yaml
|
||||||
|
apply:
|
||||||
|
become: true
|
||||||
|
tags:
|
||||||
|
- nomad
|
||||||
12
ansible/roles/base/templates/consul.hcl
Normal file
12
ansible/roles/base/templates/consul.hcl
Normal file
@@ -0,0 +1,12 @@
|
|||||||
|
bind_addr = "{{ ip }}"
|
||||||
|
advertise_addr = "{{ ip }}"
|
||||||
|
bootstrap = true
|
||||||
|
bootstrap_expect = 1
|
||||||
|
client_addr = "{{ ip }}"
|
||||||
|
server = true
|
||||||
|
data_dir = "/opt/consul"
|
||||||
|
|
||||||
|
ui_config {
|
||||||
|
enabled = true
|
||||||
|
}
|
||||||
|
|
||||||
1
ansible/roles/base/templates/hashicorp.list
Normal file
1
ansible/roles/base/templates/hashicorp.list
Normal file
@@ -0,0 +1 @@
|
|||||||
|
deb [signed-by={{ keyfile }}] https://apt.releases.hashicorp.com jammy main
|
||||||
0
ansible/roles/base/vars/main.yaml
Normal file
0
ansible/roles/base/vars/main.yaml
Normal file
11
ansible/roles/k3s/tasks/main.yaml
Normal file
11
ansible/roles/k3s/tasks/main.yaml
Normal file
@@ -0,0 +1,11 @@
|
|||||||
|
- name: Download the installation script
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://get.k3s.io
|
||||||
|
dest: /tmp
|
||||||
|
register: install_script
|
||||||
|
- name: Run installation script
|
||||||
|
become: true
|
||||||
|
environment:
|
||||||
|
INSTALL_K3S_EXEC: server
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: sh {{ install_script.dest }}
|
||||||
0
ansible/roles/k3s/vars/main.yaml
Normal file
0
ansible/roles/k3s/vars/main.yaml
Normal file
24
ansible/roles/nomad/files/nomad.hcl
Normal file
24
ansible/roles/nomad/files/nomad.hcl
Normal file
@@ -0,0 +1,24 @@
|
|||||||
|
data_dir = "/opt/nomad/data"
|
||||||
|
bind_addr = "0.0.0.0"
|
||||||
|
|
||||||
|
server {
|
||||||
|
enabled = true
|
||||||
|
bootstrap_expect = 1
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
client {
|
||||||
|
enabled = true
|
||||||
|
servers = ["127.0.0.1"]
|
||||||
|
}
|
||||||
|
|
||||||
|
host_volume "registry" {
|
||||||
|
path = "/opt/volumes/registry"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
|
host_volume "nfs" {
|
||||||
|
path = "/opt/volumes/nfs"
|
||||||
|
read_only = false
|
||||||
|
}
|
||||||
|
|
||||||
18
ansible/roles/nomad/tasks/main.yaml
Normal file
18
ansible/roles/nomad/tasks/main.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
- name: Nomad server configuration
|
||||||
|
become: true
|
||||||
|
block:
|
||||||
|
- name: Ensure the root data directory is present
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ nomad.volumes.root }}"
|
||||||
|
state: absent
|
||||||
|
mode: "0755"
|
||||||
|
- name: Ensure registry volume is present
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ nomad.volumes.registry }}"
|
||||||
|
state: absent
|
||||||
|
mode: "0755"
|
||||||
|
- name: Ensure the MinIO diretory is present
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "{{ nomad.volumes.nfs }}"
|
||||||
|
state: absent
|
||||||
|
mode: "0755"
|
||||||
5
ansible/roles/nomad/vars/main.yaml
Normal file
5
ansible/roles/nomad/vars/main.yaml
Normal file
@@ -0,0 +1,5 @@
|
|||||||
|
nomad:
|
||||||
|
volumes:
|
||||||
|
root: /opt/volumes
|
||||||
|
registry: /opt/volumes/ncr
|
||||||
|
nfs: /opt/volumes/nfs
|
||||||
15
ansible/roles/proxy/files/host-file
Normal file
15
ansible/roles/proxy/files/host-file
Normal file
@@ -0,0 +1,15 @@
|
|||||||
|
127.0.0.1 localhost
|
||||||
|
127.0.1.1 nigel
|
||||||
|
|
||||||
|
# Our own dns stuff
|
||||||
|
127.0.1.1 nigel.local
|
||||||
|
127.0.1.1 nomad.nigel.local
|
||||||
|
127.0.1.1 sanity.nigel.local
|
||||||
|
127.0.1.1 ncr.nigel.local
|
||||||
|
|
||||||
|
# The following lines are desirable for IPv6 capable hosts
|
||||||
|
::1 ip6-localhost ip6-loopback
|
||||||
|
fe00::0 ip6-localnet
|
||||||
|
ff00::0 ip6-mcastprefix
|
||||||
|
ff02::1 ip6-allnodes
|
||||||
|
ff02::2 ip6-allrouters
|
||||||
6
ansible/roles/proxy/files/ncr.conf
Normal file
6
ansible/roles/proxy/files/ncr.conf
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
server {
|
||||||
|
server_name ncr.nigel.local;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://localhost:5000;
|
||||||
|
}
|
||||||
|
}
|
||||||
25
ansible/roles/proxy/files/nomad.conf
Normal file
25
ansible/roles/proxy/files/nomad.conf
Normal file
@@ -0,0 +1,25 @@
|
|||||||
|
server {
|
||||||
|
server_name nomad.nigel.local;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://nomad-ws;
|
||||||
|
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||||
|
|
||||||
|
proxy_read_timeout 319s;
|
||||||
|
|
||||||
|
# This is for log streaming requests
|
||||||
|
proxy_buffering off;
|
||||||
|
|
||||||
|
# Upgrade and Connection headers for upgrading to websockets
|
||||||
|
proxy_set_header Upgrade $http_upgrade;
|
||||||
|
proxy_set_header Connection "upgrade";
|
||||||
|
|
||||||
|
|
||||||
|
proxy_set_header Origin "${scheme}://${proxy_host}";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
upstream nomad-ws {
|
||||||
|
ip_hash;
|
||||||
|
server nomad.nigel.local:4646;
|
||||||
|
}
|
||||||
28
ansible/roles/proxy/tasks/main.yaml
Normal file
28
ansible/roles/proxy/tasks/main.yaml
Normal file
@@ -0,0 +1,28 @@
|
|||||||
|
- name: Reverse proxy role configuration
|
||||||
|
become: true
|
||||||
|
block:
|
||||||
|
- name: Ensure /etc/hosts are up to date
|
||||||
|
ansible.builtin.copy:
|
||||||
|
dest: /etc/hosts
|
||||||
|
src: host-file
|
||||||
|
mode: "0644"
|
||||||
|
- name: Ensure nginx is setup as latest
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: nginx
|
||||||
|
- name: Copy the nomad.conf to available configurations
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: "{{ item }}"
|
||||||
|
dest: "/etc/nginx/sites-available/{{ item }}"
|
||||||
|
mode: "0644"
|
||||||
|
loop: "{{ proxy_nginx_configs }}"
|
||||||
|
- name: Link the nomad.conf to sites-enabled
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/etc/nginx/sites-enabled/{{ item }}"
|
||||||
|
state: link
|
||||||
|
src: "/etc/nginx/sites-available/{{ item }}"
|
||||||
|
mode: "0644"
|
||||||
|
loop: "{{ proxy_nginx_configs }}"
|
||||||
|
- name: Restart nginx
|
||||||
|
ansible.builtin.systemd_service:
|
||||||
|
name: nginx
|
||||||
|
state: restarted
|
||||||
3
ansible/roles/proxy/vars/main.yaml
Normal file
3
ansible/roles/proxy/vars/main.yaml
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
proxy_nginx_configs:
|
||||||
|
- nomad.conf
|
||||||
|
- ncr.conf
|
||||||
@@ -1,23 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
bucket="$1"
|
|
||||||
s3env=/opt/nginx/s3.env
|
|
||||||
|
|
||||||
[[ -z "$bucket" ]] && echo "No bucket selected" && exit 1
|
|
||||||
|
|
||||||
[[ ! -f $s3env ]] && echo "No credentials to source!" && exit 1
|
|
||||||
source $s3env
|
|
||||||
|
|
||||||
pull() {
|
|
||||||
aws s3 sync s3://$bucket /opt/nginx/$bucket
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
case $bucket in
|
|
||||||
resume.shockrah.xyz|shockrah.xyz|temper.tv) pull;;
|
|
||||||
*) echo "Invalid bucket name" && exit 1 ;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
|
|
||||||
@@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- name: Setup all attributes of the html-deployer user for static website CI
|
||||||
|
hosts: webhost
|
||||||
|
vars:
|
||||||
|
username: html-deployer
|
||||||
|
remote_user: webadmin
|
||||||
|
tasks:
|
||||||
|
- name: Create user for git actions to deploy html
|
||||||
|
become: true
|
||||||
|
ansible.builtin.user:
|
||||||
|
name: "{{ username }}"
|
||||||
|
comment: Used for deploying html from Gitea Actions
|
||||||
|
group: nginx
|
||||||
|
- name: Set the authorized keys
|
||||||
|
become: true
|
||||||
|
ansible.posix.authorized_key:
|
||||||
|
user: "{{ username }}"
|
||||||
|
state: present
|
||||||
|
key: "{{ lookup('file', '~/.ssh/vultr/html-deployer.pem.pub') }}"
|
||||||
|
- name: Ensure /opt/nginx website folders are owned by html-deployer
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: "/opt/nginx/{{ item }}"
|
||||||
|
recurse: true
|
||||||
|
owner: "{{ username }}"
|
||||||
|
group: "nginx"
|
||||||
20
deprecated/playbooks/playbooks-deprecated/update.yml
Normal file
20
deprecated/playbooks/playbooks-deprecated/update.yml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
# Purpose: General update to the system to keep packages up to date
|
||||||
|
---
|
||||||
|
- hosts: webhost
|
||||||
|
remote_user: webadmin
|
||||||
|
tasks:
|
||||||
|
- name: Informational Dump of what is upgradeable
|
||||||
|
ansible.builtin.command: apt list --upgradable
|
||||||
|
register: pkg
|
||||||
|
- name: Show list of packages to upgrade
|
||||||
|
ansible.builtin.debug:
|
||||||
|
msg: "{{ pkg.stdout_lines }}"
|
||||||
|
- name: Update the packages at the system level to the latest versions
|
||||||
|
become: true
|
||||||
|
ansible.builtin.apt:
|
||||||
|
name: "*"
|
||||||
|
state: latest
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
FROM nginx:latest
|
|
||||||
|
|
||||||
COPY nginx.conf /etc/nginx/nginx.conf
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# Build the image locally first
|
|
||||||
docker build . -t reverse-proxy:latest
|
|
||||||
|
|
||||||
# Tag as required
|
|
||||||
docker tag reverse-proxy:latest 805875567437.dkr.ecr.us-west-1.amazonaws.com/reverse-proxy:latest
|
|
||||||
|
|
||||||
if [ "$1" = "dev" ]; then
|
|
||||||
###########################
|
|
||||||
# Development build steps
|
|
||||||
###########################
|
|
||||||
echo "Building local dev image"
|
|
||||||
echo "Skipping docker push because this is a local build"
|
|
||||||
elif [ "$1" = "prod" ]; then
|
|
||||||
###########################
|
|
||||||
# Production build steps
|
|
||||||
###########################
|
|
||||||
echo "Building production image"
|
|
||||||
echo "Authenticating to push to production registry"
|
|
||||||
# ECR Authentication
|
|
||||||
aws ecr get-login-password --region us-west-1 | docker login --username AWS --password-stdin 805875567437.dkr.ecr.us-west-1.amazonaws.com
|
|
||||||
# Pushing tagged image
|
|
||||||
docker push 805875567437.dkr.ecr.us-west-1.amazonaws.com/reverse-proxy:latest
|
|
||||||
else
|
|
||||||
echo "Unknown option given to build.sh"
|
|
||||||
exit 1
|
|
||||||
fi
|
|
||||||
|
|
||||||
@@ -1,52 +0,0 @@
|
|||||||
events {
|
|
||||||
worker_connections 768;
|
|
||||||
}
|
|
||||||
|
|
||||||
http {
|
|
||||||
proxy_cache_path /var/cache/nginx levels=1:2 keys_zone=my_cache:10m inactive=60m;
|
|
||||||
proxy_cache_valid 200 60m;
|
|
||||||
proxy_cache_valid 404 1m;
|
|
||||||
proxy_cache my_cache;
|
|
||||||
proxy_cache_key "$scheme$request$request_method$host$request_uri";
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
server_name shockrah.xyz;
|
|
||||||
location / {
|
|
||||||
proxy_pass http://shockrah.xyz.s3-website-us-west-1.amazonaws.com;
|
|
||||||
}
|
|
||||||
location /health {
|
|
||||||
access_log off;
|
|
||||||
add_header 'Content-Type' 'text/plain';
|
|
||||||
return 200 "healthy";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
server_name resume.shockrah.xyz;
|
|
||||||
location / {
|
|
||||||
proxy_pass http://resume.shockrah.xyz.s3-website-us-west-1.amazonaws.com;
|
|
||||||
}
|
|
||||||
location /health {
|
|
||||||
access_log off;
|
|
||||||
add_header 'Content-Type' 'text/plain';
|
|
||||||
return 200 "healthy";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
server {
|
|
||||||
listen 80;
|
|
||||||
listen [::]:80;
|
|
||||||
server_name temper.tv;
|
|
||||||
location / {
|
|
||||||
proxy_pass http://temper.tv.s3-website-us-west-1.amazonaws.com;
|
|
||||||
}
|
|
||||||
location /health {
|
|
||||||
access_log off;
|
|
||||||
add_header 'Content-Type' 'text/plain';
|
|
||||||
return 200 "healthy";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,8 +0,0 @@
|
|||||||
#!/bin/bash
|
|
||||||
|
|
||||||
set -e
|
|
||||||
|
|
||||||
# This script is used for running the image locally for testing purposes
|
|
||||||
|
|
||||||
docker run --publish 80:80 --name gateway --rm \
|
|
||||||
805875567437.dkr.ecr.us-west-1.amazonaws.com/reverse-proxy:latest
|
|
||||||
@@ -1,9 +0,0 @@
|
|||||||
# What is this
|
|
||||||
|
|
||||||
This folder contains docker images that live in ECR
|
|
||||||
|
|
||||||
## `beta`
|
|
||||||
|
|
||||||
Reverse proxy for all things relating to static content under Project Athens.
|
|
||||||
|
|
||||||
All static site content lives in S3 and thus this proxies that content.
|
|
||||||
@@ -1,40 +0,0 @@
|
|||||||
networks:
|
|
||||||
gitea:
|
|
||||||
external: false
|
|
||||||
|
|
||||||
|
|
||||||
services:
|
|
||||||
gitea:
|
|
||||||
image: gitea/gitea:latest
|
|
||||||
container_name: gitea
|
|
||||||
environment:
|
|
||||||
- USER_UID=1000
|
|
||||||
- USER_GID=1000
|
|
||||||
restart: always
|
|
||||||
networks:
|
|
||||||
- gitea
|
|
||||||
volumes:
|
|
||||||
- /opt/containers/gitea:/data
|
|
||||||
- /etc/timezone:/etc/timezone:ro
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
ports:
|
|
||||||
- "3000:3000"
|
|
||||||
- "2222:22"
|
|
||||||
gitea-runner:
|
|
||||||
image: gitea/act_runner:nightly
|
|
||||||
container_name: gitea-runner
|
|
||||||
restart: always
|
|
||||||
networks:
|
|
||||||
- gitea
|
|
||||||
volumes:
|
|
||||||
- /opt/containers/gitea_runner/
|
|
||||||
- /var/run/docker.sock:/var/run/docker.sock
|
|
||||||
environment:
|
|
||||||
- GITEA_INSTANCE_URL=https://git.shockrah.xyz
|
|
||||||
- GITEA_RUNNER_NAME=gitea-main
|
|
||||||
- GITEA_RUNNER_LABELS=gitea-main
|
|
||||||
- GITEA_RUNNER_REGISTRATION_TOKEN=${token}
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,29 +0,0 @@
|
|||||||
What is this
|
|
||||||
============
|
|
||||||
|
|
||||||
Here we contain scripts to build out all the containers that are run.
|
|
||||||
All of these images are based on images that are made from other projects
|
|
||||||
|
|
||||||
docker-compose.yaml
|
|
||||||
===================
|
|
||||||
|
|
||||||
Services that are more/less "special" go here since most of the stuff that is
|
|
||||||
run on the main host are basically just static html websites
|
|
||||||
|
|
||||||
Services & Containers
|
|
||||||
=====================
|
|
||||||
|
|
||||||
| Service | Docker Image Used |
|
|
||||||
|------------|--------------------------|
|
|
||||||
| Gitea | gitea/gitea:latest |
|
|
||||||
| Act Runner | gitea/act_runner:nightly |
|
|
||||||
|
|
||||||
Why the servics above?
|
|
||||||
======================
|
|
||||||
|
|
||||||
The Gitea related services are there so that I can host my own Git projects
|
|
||||||
away from "Git as a service" services. I have no issue with Github/Gitlab
|
|
||||||
but I just like being able to host my own stuff when possible :smiley:
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,24 +0,0 @@
|
|||||||
plan=out.plan
|
|
||||||
|
|
||||||
SHELL := /bin/bash
|
|
||||||
|
|
||||||
$(plan): *.tf
|
|
||||||
source ../secrets/set-env.sh && terraform plan -input=false -out $(plan)
|
|
||||||
|
|
||||||
push: build
|
|
||||||
source ../secrets/set-env.sh && terraform apply $(plan)
|
|
||||||
|
|
||||||
refresh:
|
|
||||||
source ../secrets/set-env.sh && terraform apply -refresh-only
|
|
||||||
|
|
||||||
test:
|
|
||||||
terraform validate
|
|
||||||
|
|
||||||
|
|
||||||
rip:
|
|
||||||
source ../secrets/set-env.sh && terraform destroy
|
|
||||||
|
|
||||||
clean:
|
|
||||||
rm -f $(plan)
|
|
||||||
|
|
||||||
.PHONY: test build clean push rip
|
|
||||||
@@ -1,49 +0,0 @@
|
|||||||
#############################
|
|
||||||
# project-athens.xyz DNS ZONE
|
|
||||||
#############################
|
|
||||||
|
|
||||||
# This entry is just for the sample service that is just plain nginx
|
|
||||||
# No TLS will be placed on this just yet as we need to make sure this
|
|
||||||
# and the load balancer are setup to receive things properly
|
|
||||||
resource "aws_route53_zone" "project-athens" {
|
|
||||||
name = "project-athens.xyz"
|
|
||||||
comment = "Project Athens domain zone"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
locals {
|
|
||||||
project-athens-records = [
|
|
||||||
{
|
|
||||||
name = "project-athens.xyz"
|
|
||||||
type = "NS"
|
|
||||||
ttl = 172800
|
|
||||||
records = [
|
|
||||||
"ns-806.awsdns-36.net.",
|
|
||||||
"ns-1881.awsdns-43.co.uk.",
|
|
||||||
"ns-1109.awsdns-10.org.",
|
|
||||||
"ns-11.awsdns-01.com.",
|
|
||||||
]
|
|
||||||
},
|
|
||||||
{
|
|
||||||
name = "project-athens.xyz"
|
|
||||||
type = "SOA"
|
|
||||||
ttl = 900
|
|
||||||
records = [
|
|
||||||
"ns-806.awsdns-36.net. awsdns-hostmaster.amazon.com. 1 7200 900 1209600 86400"
|
|
||||||
]
|
|
||||||
}
|
|
||||||
]
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_route53_record" "project-athens-record" {
|
|
||||||
for_each = {
|
|
||||||
for index, record in local.project-athens-records:
|
|
||||||
index => record
|
|
||||||
}
|
|
||||||
zone_id = aws_route53_zone.project-athens.id
|
|
||||||
name = each.value.name
|
|
||||||
type = lookup(each.value, "type", "A")
|
|
||||||
ttl = lookup(each.value, "ttl", 300)
|
|
||||||
records = each.value.records
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -36,8 +36,11 @@ locals {
|
|||||||
},
|
},
|
||||||
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
|
{ name = "www.shockrah.xyz", records = [ var.vultr_host ] },
|
||||||
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
|
{ name = "resume.shockrah.xyz", records = [ var.vultr_host ] },
|
||||||
{ name = "immich.shockrah.xyz", records = [ "45.32.92.196" ] },
|
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
|
||||||
{ name = "git.shockrah.xyz", records = [ var.vultr_host ] },
|
{ name = "sanity.shockrah.xyz", records = [ var.vke_lb ] },
|
||||||
|
{ name = "uptime.shockrah.xyz", records = [ var.vke_lb ] },
|
||||||
|
{ name = "code.shockrah.xyz", records = [ var.vke_lb ] },
|
||||||
|
{ name = "wiki.shockrah.xyz", records = [ var.vke_lb ] },
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -33,3 +33,11 @@ resource "aws_route53_record" "temper-tv-mx" {
|
|||||||
"50 fb.mail.gandi.net.",
|
"50 fb.mail.gandi.net.",
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
resource "aws_route53_record" "temper-tv-test" {
|
||||||
|
zone_id = aws_route53_zone.temper-tv.id
|
||||||
|
name = "test.temper.tv"
|
||||||
|
type = "A"
|
||||||
|
ttl = 300
|
||||||
|
records = [ var.vke_lb ]
|
||||||
|
}
|
||||||
@@ -26,3 +26,7 @@ variable "vultr_host" {
|
|||||||
description = "IP of the temp Vultr host"
|
description = "IP of the temp Vultr host"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
variable "vke_lb" {
|
||||||
|
type = string
|
||||||
|
description = "IP of our VKE load balancer"
|
||||||
|
}
|
||||||
|
|||||||
2
infra/dns/variables.tfvars
Normal file
2
infra/dns/variables.tfvars
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
vultr_host = "45.32.83.83"
|
||||||
|
vke_lb = "45.32.89.101"
|
||||||
1
infra/nigel-k3s/.gitignore
vendored
Normal file
1
infra/nigel-k3s/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
|||||||
|
config.yaml
|
||||||
35
infra/nigel-k3s/health.yaml
Normal file
35
infra/nigel-k3s/health.yaml
Normal file
@@ -0,0 +1,35 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: nginx-deployment
|
||||||
|
labels:
|
||||||
|
app: nginx
|
||||||
|
spec:
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: nginx
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: nginx
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: nginx
|
||||||
|
image: nginx:1.14.2
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
name: nginx-port
|
||||||
|
---
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: nginx
|
||||||
|
spec:
|
||||||
|
type: NodePort
|
||||||
|
selector:
|
||||||
|
app: nginx
|
||||||
|
ports:
|
||||||
|
- port: 80
|
||||||
|
nodePort: 30808
|
||||||
|
targetPort: nginx-port
|
||||||
19
infra/nigel-k3s/sample-cron.yaml
Normal file
19
infra/nigel-k3s/sample-cron.yaml
Normal file
@@ -0,0 +1,19 @@
|
|||||||
|
apiVersion: batch/v1
|
||||||
|
kind: CronJob
|
||||||
|
metadata:
|
||||||
|
name: hello
|
||||||
|
spec:
|
||||||
|
schedule: "* * * * *"
|
||||||
|
jobTemplate:
|
||||||
|
spec:
|
||||||
|
template:
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: hello
|
||||||
|
image: busybox:1.28
|
||||||
|
imagePullPolicy: IfNotPresent
|
||||||
|
command:
|
||||||
|
- /bin/sh
|
||||||
|
- -c
|
||||||
|
- date; echo Hello from the sample cron-container
|
||||||
|
restartPolicy: OnFailure
|
||||||
36
infra/static-vultr/build.sh
Normal file
36
infra/static-vultr/build.sh
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
|
opt=$1
|
||||||
|
plan=tfplan
|
||||||
|
|
||||||
|
build_plan() {
|
||||||
|
echo Generating plan
|
||||||
|
set -x
|
||||||
|
terraform plan -var-file variables.tfvars -input=false -out $plan
|
||||||
|
}
|
||||||
|
|
||||||
|
deploy_plan() {
|
||||||
|
terraform apply $plan
|
||||||
|
}
|
||||||
|
|
||||||
|
init() {
|
||||||
|
terraform init
|
||||||
|
}
|
||||||
|
|
||||||
|
help_prompt() {
|
||||||
|
cat <<- EOF
|
||||||
|
Options: plan deploy help
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Default to building a plan
|
||||||
|
source ./secrets.sh
|
||||||
|
case $opt in
|
||||||
|
plan) build_plan;;
|
||||||
|
deploy) deploy_plan;;
|
||||||
|
init) init;;
|
||||||
|
*) help_prompt;;
|
||||||
|
esac
|
||||||
|
|
||||||
@@ -1,22 +0,0 @@
|
|||||||
# Here we define the resources for our temporary immich server
|
|
||||||
|
|
||||||
resource vultr_instance immich {
|
|
||||||
plan = var.host.plan
|
|
||||||
region = var.host.region
|
|
||||||
os_id = var.host.os
|
|
||||||
enable_ipv6 = true
|
|
||||||
|
|
||||||
# Enable backups for now since we're getting off of s3 as well at some point
|
|
||||||
backups = "enabled"
|
|
||||||
backups_schedule {
|
|
||||||
type = "weekly"
|
|
||||||
dow = var.host.backups.day
|
|
||||||
hour = var.host.backups.hour
|
|
||||||
}
|
|
||||||
|
|
||||||
ssh_key_ids = [ vultr_ssh_key.immich.id ]
|
|
||||||
firewall_group_id = vultr_firewall_group.host.id
|
|
||||||
label = "Immich Server"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -14,10 +14,3 @@ output vultr_key_id {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
output immich_key {
|
|
||||||
sensitive = true
|
|
||||||
# value = tls_private_key.host.private_key_openssh
|
|
||||||
value = vultr_instance.immich.default_password
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -5,18 +5,6 @@ resource tls_private_key host {
|
|||||||
|
|
||||||
resource vultr_ssh_key host {
|
resource vultr_ssh_key host {
|
||||||
name = "static_ssh_key"
|
name = "static_ssh_key"
|
||||||
ssh_key = tls_private_key.host.public_key_openssh
|
ssh_key = chomp(tls_private_key.host.public_key_openssh)
|
||||||
}
|
}
|
||||||
|
|
||||||
####################
|
|
||||||
# Immich keys #
|
|
||||||
####################
|
|
||||||
resource tls_private_key immich {
|
|
||||||
algorithm = "RSA"
|
|
||||||
rsa_bits = 4096
|
|
||||||
}
|
|
||||||
|
|
||||||
resource vultr_ssh_key immich {
|
|
||||||
name = "static_ssh_key"
|
|
||||||
ssh_key = tls_private_key.immich.public_key_openssh
|
|
||||||
}
|
|
||||||
|
|||||||
@@ -9,11 +9,31 @@ terraform {
|
|||||||
required_providers {
|
required_providers {
|
||||||
aws = {
|
aws = {
|
||||||
source = "hashicorp/aws"
|
source = "hashicorp/aws"
|
||||||
version = "~> 5.0"
|
version = "6.27.0"
|
||||||
}
|
}
|
||||||
vultr = {
|
vultr = {
|
||||||
source = "vultr/vultr"
|
source = "vultr/vultr"
|
||||||
version = "2.19.0"
|
version = "2.26.0"
|
||||||
|
}
|
||||||
|
kubernetes = {
|
||||||
|
source = "hashicorp/kubernetes"
|
||||||
|
version = "3.0.1"
|
||||||
|
}
|
||||||
|
kubectl = {
|
||||||
|
source = "gavinbunney/kubectl"
|
||||||
|
version = " 1.19.0"
|
||||||
|
}
|
||||||
|
helm = {
|
||||||
|
source = "hashicorp/helm"
|
||||||
|
version = "3.0.2"
|
||||||
|
}
|
||||||
|
tls = {
|
||||||
|
source = "hashicorp/tls"
|
||||||
|
version = "4.1.0"
|
||||||
|
}
|
||||||
|
random = {
|
||||||
|
source = "hashicorp/random"
|
||||||
|
version = "3.7.2"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -31,4 +51,16 @@ provider aws {
|
|||||||
max_retries = 1
|
max_retries = 1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
provider kubernetes {
|
||||||
|
config_path = "config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider kubectl {
|
||||||
|
config_path = "config.yaml"
|
||||||
|
}
|
||||||
|
|
||||||
|
provider helm {
|
||||||
|
kubernetes = {
|
||||||
|
config_path = "config.yaml"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -15,6 +15,10 @@ deploy_plan() {
|
|||||||
terraform apply $plan
|
terraform apply $plan
|
||||||
}
|
}
|
||||||
|
|
||||||
|
init() {
|
||||||
|
terraform init
|
||||||
|
}
|
||||||
|
|
||||||
help_prompt() {
|
help_prompt() {
|
||||||
cat <<- EOF
|
cat <<- EOF
|
||||||
Options: plan deploy help
|
Options: plan deploy help
|
||||||
|
|||||||
18
infra/vultr-kubernetes/cluster-issuer.yaml
Normal file
18
infra/vultr-kubernetes/cluster-issuer.yaml
Normal file
@@ -0,0 +1,18 @@
|
|||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
# The ACME server URL
|
||||||
|
server: https://acme-v02.api.letsencrypt.org/directory
|
||||||
|
preferredChain: "ISRG Root X1"
|
||||||
|
# Email address used for ACME registration
|
||||||
|
email: dev@shockrah.xyz
|
||||||
|
# Name of a secret used to store the ACME account private key
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt
|
||||||
|
solvers:
|
||||||
|
- http01:
|
||||||
|
ingress:
|
||||||
|
class: nginx
|
||||||
@@ -1,15 +1,16 @@
|
|||||||
resource vultr_kubernetes athens {
|
resource vultr_kubernetes athens {
|
||||||
region = var.cluster.region
|
region = var.cluster.region
|
||||||
version = var.cluster.version
|
version = var.cluster.version
|
||||||
label = var.cluster.label
|
label = var.cluster.label
|
||||||
enable_firewall = true
|
# vpc_id = vultr_vpc.athens.id
|
||||||
|
|
||||||
node_pools {
|
node_pools {
|
||||||
# how many nodes do we want in this pool
|
node_quantity = var.cluster.pools["main"].min_nodes
|
||||||
node_quantity = 1
|
plan = var.cluster.pools["main"].plan
|
||||||
plan = var.cluster.pool.plan
|
label = var.cluster.pools["main"].label
|
||||||
label = var.cluster.label
|
min_nodes = var.cluster.pools["main"].min_nodes
|
||||||
min_nodes = var.cluster.pool.min
|
max_nodes = var.cluster.pools["main"].max_nodes
|
||||||
max_nodes = var.cluster.pool.max
|
auto_scaler = true
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
6
infra/vultr-kubernetes/data.tf
Normal file
6
infra/vultr-kubernetes/data.tf
Normal file
@@ -0,0 +1,6 @@
|
|||||||
|
data vultr_kubernetes athens {
|
||||||
|
filter {
|
||||||
|
name = "label"
|
||||||
|
values = [ var.cluster.label ]
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,11 +1,10 @@
|
|||||||
# Inbound rules for web traffic
|
# resource vultr_firewall_rule web_inbound {
|
||||||
resource vultr_firewall_rule web_inbound {
|
# for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
||||||
for_each = toset([for port in [80, 443, 6443] : tostring(port) ])
|
# firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
||||||
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
|
# protocol = "tcp"
|
||||||
#firewall_group_id = vultr_firewall_group.cluster.id
|
# ip_type = "v4"
|
||||||
protocol = "tcp"
|
# subnet = "0.0.0.0"
|
||||||
ip_type = "v4"
|
# subnet_size = 0
|
||||||
subnet = "0.0.0.0"
|
# port = each.value
|
||||||
subnet_size = 0
|
# }
|
||||||
port = each.value
|
|
||||||
}
|
|
||||||
|
|||||||
74
infra/vultr-kubernetes/git.tf
Normal file
74
infra/vultr-kubernetes/git.tf
Normal file
@@ -0,0 +1,74 @@
|
|||||||
|
# NOTE: this is a simple deployment for demo purposes only.
|
||||||
|
# Currently it does support SSH access and lacks Gitea runners.
|
||||||
|
# However a fully working setup can be found at: https://git.shockrah.xyz
|
||||||
|
resource kubernetes_deployment gitea {
|
||||||
|
metadata {
|
||||||
|
name = "gitea"
|
||||||
|
namespace = var.playground.namespace
|
||||||
|
labels = {
|
||||||
|
"app" = "gitea"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
replicas = 1
|
||||||
|
selector {
|
||||||
|
match_labels = {
|
||||||
|
"app" = "gitea"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template {
|
||||||
|
metadata {
|
||||||
|
labels = {
|
||||||
|
"app" = "gitea"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
container {
|
||||||
|
name = "gitea"
|
||||||
|
image = "gitea/gitea:latest"
|
||||||
|
port {
|
||||||
|
container_port = 3000
|
||||||
|
name = "gitea-main"
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
container_port = 2222
|
||||||
|
name = "gitea-ssh"
|
||||||
|
}
|
||||||
|
volume_mount {
|
||||||
|
name = "gitea"
|
||||||
|
mount_path = "/data"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
volume {
|
||||||
|
name = "gitea"
|
||||||
|
persistent_volume_claim {
|
||||||
|
claim_name = kubernetes_persistent_volume_claim_v1.gitea.metadata[0].name
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
resource kubernetes_service gitea {
|
||||||
|
metadata {
|
||||||
|
name = "gitea"
|
||||||
|
namespace = var.playground.namespace
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
selector = {
|
||||||
|
"app" = "gitea"
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
target_port = "gitea-main"
|
||||||
|
port = 3000
|
||||||
|
name = "http"
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
target_port = "gitea-ssh"
|
||||||
|
port = 2222
|
||||||
|
name = "ssh"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
47
infra/vultr-kubernetes/health.tf
Normal file
47
infra/vultr-kubernetes/health.tf
Normal file
@@ -0,0 +1,47 @@
|
|||||||
|
resource kubernetes_deployment_v1 health {
|
||||||
|
metadata {
|
||||||
|
name = "health"
|
||||||
|
namespace = var.playground.namespace
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
replicas = 1
|
||||||
|
selector {
|
||||||
|
match_labels = {
|
||||||
|
name = "health"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
template {
|
||||||
|
metadata {
|
||||||
|
labels = {
|
||||||
|
name = "health"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
container {
|
||||||
|
name = "health"
|
||||||
|
image = "quanhua92/whoami:latest"
|
||||||
|
port {
|
||||||
|
container_port = "8080"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
resource kubernetes_service_v1 health {
|
||||||
|
metadata {
|
||||||
|
name = "health"
|
||||||
|
namespace = var.playground.namespace
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
selector = {
|
||||||
|
name = "health"
|
||||||
|
}
|
||||||
|
port {
|
||||||
|
port = 80
|
||||||
|
target_port = 8080
|
||||||
|
name = "http"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
7
infra/vultr-kubernetes/ingress-controller.tf
Normal file
7
infra/vultr-kubernetes/ingress-controller.tf
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
resource helm_release nginx {
|
||||||
|
name = "ingress-nginx"
|
||||||
|
repository = "https://kubernetes.github.io/ingress-nginx"
|
||||||
|
chart = "ingress-nginx"
|
||||||
|
namespace = "ingress-nginx"
|
||||||
|
create_namespace = true
|
||||||
|
}
|
||||||
48
infra/vultr-kubernetes/ingress.tf
Normal file
48
infra/vultr-kubernetes/ingress.tf
Normal file
@@ -0,0 +1,48 @@
|
|||||||
|
locals {
|
||||||
|
services = {
|
||||||
|
"code.shockrah.xyz" = kubernetes_service.gitea
|
||||||
|
"sanity.shockrah.xyz" = kubernetes_service_v1.health
|
||||||
|
"uptime.shockrah.xyz" = kubernetes_service.kuma
|
||||||
|
"wiki.shockrah.xyz" = kubernetes_service.otterwiki
|
||||||
|
}
|
||||||
|
}
|
||||||
|
resource kubernetes_ingress_v1 health {
|
||||||
|
metadata {
|
||||||
|
name = "health-ingress"
|
||||||
|
namespace = var.playground.namespace
|
||||||
|
annotations = {
|
||||||
|
"cert-manager.io/cluster-issuer" = "letsencrypt"
|
||||||
|
"cert-manager.io/ingress.class" = "nginx"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
spec {
|
||||||
|
ingress_class_name = "nginx"
|
||||||
|
dynamic tls {
|
||||||
|
for_each = local.services
|
||||||
|
content {
|
||||||
|
hosts = [tls.key]
|
||||||
|
secret_name = "${tls.value.metadata[0].name}-secret"
|
||||||
|
}
|
||||||
|
}
|
||||||
|
dynamic "rule" {
|
||||||
|
for_each = local.services
|
||||||
|
content {
|
||||||
|
host = "${rule.key}"
|
||||||
|
http {
|
||||||
|
path {
|
||||||
|
path = "/"
|
||||||
|
backend {
|
||||||
|
service {
|
||||||
|
name = rule.value.metadata[0].name
|
||||||
|
port {
|
||||||
|
number = rule.value.spec[0].port[0].port
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
1
infra/vultr-kubernetes/k8s/.gitignore
vendored
1
infra/vultr-kubernetes/k8s/.gitignore
vendored
@@ -1 +0,0 @@
|
|||||||
terraform.yaml
|
|
||||||
@@ -1,33 +0,0 @@
|
|||||||
terraform {
|
|
||||||
required_version = ">= 0.13"
|
|
||||||
backend s3 {
|
|
||||||
bucket = "project-athens"
|
|
||||||
key = "infra/vke/k8s/state/build.tfstate"
|
|
||||||
region = "us-west-1"
|
|
||||||
encrypt = true
|
|
||||||
}
|
|
||||||
required_providers {
|
|
||||||
# For interacting with S3
|
|
||||||
aws = {
|
|
||||||
source = "hashicorp/aws"
|
|
||||||
version = "~> 5.0"
|
|
||||||
}
|
|
||||||
kubernetes = {
|
|
||||||
source = "hashicorp/kubernetes"
|
|
||||||
version = "2.30.0"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
provider aws {
|
|
||||||
access_key = var.aws_key
|
|
||||||
secret_key = var.aws_secret
|
|
||||||
region = var.aws_region
|
|
||||||
max_retries = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
provider kubernetes {
|
|
||||||
config_path = "terraform.yaml"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,50 +0,0 @@
|
|||||||
resource kubernetes_ingress_v1 athens {
|
|
||||||
metadata {
|
|
||||||
name = var.shockrahxyz.name
|
|
||||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
|
||||||
labels = {
|
|
||||||
app = "websites"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
rule {
|
|
||||||
host = "test.shockrah.xyz"
|
|
||||||
http {
|
|
||||||
path {
|
|
||||||
backend {
|
|
||||||
service {
|
|
||||||
name = var.shockrahxyz.name
|
|
||||||
port {
|
|
||||||
number = 80
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
path = "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resource kubernetes_service athens_lb {
|
|
||||||
metadata {
|
|
||||||
name = "athens-websites"
|
|
||||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
|
||||||
labels = {
|
|
||||||
app = "websites"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
selector = {
|
|
||||||
app = kubernetes_ingress_v1.athens.metadata.0.labels.app
|
|
||||||
}
|
|
||||||
port {
|
|
||||||
port = 80
|
|
||||||
target_port = 80
|
|
||||||
}
|
|
||||||
type = "LoadBalancer"
|
|
||||||
external_ips = [ var.cluster.ip ]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,5 +0,0 @@
|
|||||||
resource kubernetes_namespace websites {
|
|
||||||
metadata {
|
|
||||||
name = "websites"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -1,62 +0,0 @@
|
|||||||
# First we setup the ingress controller with helm
|
|
||||||
|
|
||||||
|
|
||||||
```sh
|
|
||||||
helm repo add traefik https://helm.traefik.io/traefik
|
|
||||||
helm repo update
|
|
||||||
# Now we can install this to our cluster
|
|
||||||
helm install --kubeconfig config.yaml traefik traefik/traefik
|
|
||||||
```
|
|
||||||
|
|
||||||
# Prove the service is present with
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl --kubeconfig config.yaml get svc
|
|
||||||
```
|
|
||||||
|
|
||||||
# Create the pods
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl --kubeconfig config.yaml -f k8s/nginx-dep.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
# Expose on port 80
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl --kubeconfig config.yaml -f k8s/nginx-service.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
# Create ingress on k8s
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl --kubeconfig config.yaml -f k8s/traefik-ingress.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
# Take the external IP from the ingress
|
|
||||||
|
|
||||||
Put that into terraform's A record for the domain since this is a load balancer
|
|
||||||
in Vultr ( actual resource apparantly )
|
|
||||||
|
|
||||||
# Configure cert-manager for traefik ingress
|
|
||||||
|
|
||||||
Using the latest version from here:
|
|
||||||
https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.crds.yaml
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl --kubeconfig config.yaml \
|
|
||||||
apply --validate=false \
|
|
||||||
-f https://github.com/cert-manager/cert-manager/releases/download/v1.14.2/cert-manager.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
# Create the cert issuer and certificate
|
|
||||||
|
|
||||||
|
|
||||||
```sh
|
|
||||||
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
|
|
||||||
kubectl --kubeconfig config.yaml apply -f k8s/letsencrypt-issuer.yaml
|
|
||||||
```
|
|
||||||
|
|
||||||
Because we just have 1 cert for now we are looking for it's status to be `READY`
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,21 +0,0 @@
|
|||||||
Plain nginx for now so that we can test out reverse dns
|
|
||||||
resource kubernetes_pod shockrah {
|
|
||||||
metadata {
|
|
||||||
name = var.shockrahxyz.name
|
|
||||||
namespace = kubernetes_namespace.websites.metadata.0.name
|
|
||||||
labels = {
|
|
||||||
app = var.shockrahxyz.name
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
container {
|
|
||||||
image = "nginx"
|
|
||||||
name = "${var.shockrahxyz.name}"
|
|
||||||
port {
|
|
||||||
container_port = 80
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
@@ -1,35 +0,0 @@
|
|||||||
# API Keys required to reach AWS/Vultr
|
|
||||||
variable vultr_api_key {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable aws_key {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable aws_secret {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable aws_region {
|
|
||||||
type = string
|
|
||||||
sensitive = true
|
|
||||||
}
|
|
||||||
|
|
||||||
variable shockrahxyz {
|
|
||||||
type = object({
|
|
||||||
name = string
|
|
||||||
port = number
|
|
||||||
dns = string
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
variable cluster {
|
|
||||||
type = object({
|
|
||||||
ip = string
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
@@ -1,37 +0,0 @@
|
|||||||
# Here we are going to define the deployment and service
|
|
||||||
# Basically all things directly related to the actual service we want to provide
|
|
||||||
---
|
|
||||||
kind: Deployment
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
name: alternate-nginx-web
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
app: alternate-nginx-web
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: alternate-nginx-web
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: alternate-nginx-web
|
|
||||||
spec:
|
|
||||||
# Container comes from an example thing i randomly found on docker hub
|
|
||||||
containers:
|
|
||||||
- name: alternate-nginx-web
|
|
||||||
image: dockerbogo/docker-nginx-hello-world
|
|
||||||
---
|
|
||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: alternate-nginx-web
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: alternate-nginx-web
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
targetPort: 80
|
|
||||||
port: 80
|
|
||||||
@@ -1,30 +0,0 @@
|
|||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: hello.temprah-lab.xyz
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
secretName: hello.temprah-lab.xyz-tls
|
|
||||||
issuerRef:
|
|
||||||
name: letsencrypt-prod
|
|
||||||
kind: ClusterIssuer
|
|
||||||
commonName: hello.temprah-lab.xyz
|
|
||||||
dnsNames:
|
|
||||||
- hello.temprah-lab.xyz
|
|
||||||
---
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: ClusterIssuer
|
|
||||||
metadata:
|
|
||||||
name: letsencrypt-prod-hello
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
acme:
|
|
||||||
server: https://acme-v02.api.letsencrypt.org/directory
|
|
||||||
email: dev@shockrah.xyz
|
|
||||||
privateKeySecretRef:
|
|
||||||
name: letsencrypt-prod-hello
|
|
||||||
solvers:
|
|
||||||
- http01:
|
|
||||||
ingress:
|
|
||||||
class: traefik
|
|
||||||
|
|
||||||
@@ -1,13 +0,0 @@
|
|||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: sample.temprah-lab.xyz
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
secretName: sample.temprah-lab.xyz-tls
|
|
||||||
issuerRef:
|
|
||||||
name: letsencrypt-prod
|
|
||||||
kind: ClusterIssuer
|
|
||||||
commonName: sample.temprah-lab.xyz
|
|
||||||
dnsNames:
|
|
||||||
- sample.temprah-lab.xyz
|
|
||||||
@@ -1,20 +0,0 @@
|
|||||||
kind: Deployment
|
|
||||||
apiVersion: apps/v1
|
|
||||||
metadata:
|
|
||||||
name: nginx-web
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
app: nginx-web
|
|
||||||
spec:
|
|
||||||
replicas: 1
|
|
||||||
selector:
|
|
||||||
matchLabels:
|
|
||||||
app: nginx-web
|
|
||||||
template:
|
|
||||||
metadata:
|
|
||||||
labels:
|
|
||||||
app: nginx-web
|
|
||||||
spec:
|
|
||||||
containers:
|
|
||||||
- name: nginx
|
|
||||||
image: nginx
|
|
||||||
@@ -1,12 +0,0 @@
|
|||||||
apiVersion: v1
|
|
||||||
kind: Service
|
|
||||||
metadata:
|
|
||||||
name: nginx-web
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
selector:
|
|
||||||
app: nginx-web
|
|
||||||
ports:
|
|
||||||
- name: http
|
|
||||||
targetPort: 80
|
|
||||||
port: 80
|
|
||||||
@@ -1,44 +0,0 @@
|
|||||||
# This is the first thing we need to create, an issue to put certs into
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: ClusterIssuer
|
|
||||||
metadata:
|
|
||||||
name: letsencrypt-prod
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
acme:
|
|
||||||
server: https://acme-v02.api.letsencrypt.org/directory
|
|
||||||
email: dev@shockrah.xyz
|
|
||||||
privateKeySecretRef:
|
|
||||||
name: letsencrypt-temprah-lab
|
|
||||||
solvers:
|
|
||||||
- http01:
|
|
||||||
ingress:
|
|
||||||
class: traefik
|
|
||||||
---
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: hello.temprah-lab.xyz
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
secretName: hello.temprah-lab.xyz-tls
|
|
||||||
issuerRef:
|
|
||||||
name: letsencrypt-temprah-lab
|
|
||||||
kind: ClusterIssuer
|
|
||||||
commonName: hello.temprah-lab.xyz
|
|
||||||
dnsNames:
|
|
||||||
- hello.temprah-lab.xyz
|
|
||||||
---
|
|
||||||
apiVersion: cert-manager.io/v1
|
|
||||||
kind: Certificate
|
|
||||||
metadata:
|
|
||||||
name: sample.temprah-lab.xyz
|
|
||||||
namespace: default
|
|
||||||
spec:
|
|
||||||
secretName: sample.temprah-lab.xyz-tls
|
|
||||||
issuerRef:
|
|
||||||
name: letsencrypt-temprah-lab
|
|
||||||
kind: ClusterIssuer
|
|
||||||
commonName: sample.temprah-lab.xyz
|
|
||||||
dnsNames:
|
|
||||||
- sample.temprah-lab.xyz
|
|
||||||
@@ -1,31 +0,0 @@
|
|||||||
apiVersion: networking.k8s.io/v1
|
|
||||||
kind: Ingress
|
|
||||||
metadata:
|
|
||||||
name: traefik-ingress
|
|
||||||
namespace: default
|
|
||||||
labels:
|
|
||||||
name: project-athens-lb
|
|
||||||
annotations:
|
|
||||||
kubernetes.io/ingress.class: traefik
|
|
||||||
spec:
|
|
||||||
rules:
|
|
||||||
- host: sample.temprah-lab.xyz
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- backend:
|
|
||||||
service:
|
|
||||||
name: nginx-web
|
|
||||||
port:
|
|
||||||
number: 80
|
|
||||||
path: /
|
|
||||||
pathType: Prefix
|
|
||||||
- host: hello.temprah-lab.xyz
|
|
||||||
http:
|
|
||||||
paths:
|
|
||||||
- backend:
|
|
||||||
service:
|
|
||||||
name: alternate-nginx-web
|
|
||||||
port:
|
|
||||||
number: 80
|
|
||||||
path: /
|
|
||||||
pathType: Prefix
|
|
||||||
@@ -1,15 +1,14 @@
|
|||||||
apiVersion: cert-manager.io/v1
|
apiVersion: cert-manager.io/v1
|
||||||
kind: ClusterIssuer
|
kind: Issuer
|
||||||
metadata:
|
metadata:
|
||||||
name: letsencrypt-prod
|
name: letsencrypt-nginx
|
||||||
namespace: default
|
|
||||||
spec:
|
spec:
|
||||||
acme:
|
acme:
|
||||||
server: https://acme-v02.api.letsencrypt.org/directory
|
|
||||||
email: dev@shockrah.xyz
|
email: dev@shockrah.xyz
|
||||||
|
server: https://acme-v02.api.letsencrypt.org/directory
|
||||||
privateKeySecretRef:
|
privateKeySecretRef:
|
||||||
name: letsencrypt-prod
|
name: example
|
||||||
solvers:
|
solvers:
|
||||||
- http01:
|
- http01:
|
||||||
ingress:
|
ingress:
|
||||||
class: traefik
|
class: nginx
|
||||||
36
infra/vultr-kubernetes/legacy/ingress.yaml
Normal file
36
infra/vultr-kubernetes/legacy/ingress.yaml
Normal file
@@ -0,0 +1,36 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: whoami-service
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
name: whoami
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
targetPort: 8080
|
||||||
|
---
|
||||||
|
apiVersion: networking.k8s.io/v1
|
||||||
|
kind: Ingress
|
||||||
|
metadata:
|
||||||
|
name: whoami-ingress
|
||||||
|
annotations:
|
||||||
|
cert-manager.io/ingress.class: nginx
|
||||||
|
cert-manager.io/cluster-issuer: letsencrypt-prod
|
||||||
|
spec:
|
||||||
|
ingressClassName: nginx
|
||||||
|
tls:
|
||||||
|
- secretName: whoami-tls
|
||||||
|
hosts:
|
||||||
|
- example.shockrah.xyz
|
||||||
|
rules:
|
||||||
|
- host: example.shockrah.xyz
|
||||||
|
http:
|
||||||
|
paths:
|
||||||
|
- path: /
|
||||||
|
pathType: Prefix
|
||||||
|
backend:
|
||||||
|
service:
|
||||||
|
name: whoami-service
|
||||||
|
port:
|
||||||
|
number: 80
|
||||||
21
infra/vultr-kubernetes/legacy/service.yaml
Normal file
21
infra/vultr-kubernetes/legacy/service.yaml
Normal file
@@ -0,0 +1,21 @@
|
|||||||
|
apiVersion: v1
|
||||||
|
kind: Service
|
||||||
|
metadata:
|
||||||
|
name: whoami-lb
|
||||||
|
annotations:
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-protocol: "http"
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-algorithm: "least_connections"
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-protocol: "http"
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-path: "/health"
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-interval: "30"
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-response-timeout: "5"
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-unhealthy-threshold: "5"
|
||||||
|
service.beta.kubernetes.io/vultr-loadbalancer-healthcheck-healthy-threshold: "5"
|
||||||
|
spec:
|
||||||
|
type: LoadBalancer
|
||||||
|
selector:
|
||||||
|
name: whoami
|
||||||
|
ports:
|
||||||
|
- name: http
|
||||||
|
port: 80
|
||||||
|
targetPort: 8080
|
||||||
20
infra/vultr-kubernetes/legacy/test.yaml
Normal file
20
infra/vultr-kubernetes/legacy/test.yaml
Normal file
@@ -0,0 +1,20 @@
|
|||||||
|
apiVersion: apps/v1
|
||||||
|
kind: Deployment
|
||||||
|
metadata:
|
||||||
|
name: whoami
|
||||||
|
spec:
|
||||||
|
replicas: 3
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
name: whoami
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
name: whoami
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: whoami
|
||||||
|
image: quanhua92/whoami:latest
|
||||||
|
imagePullPolicy: Always
|
||||||
|
ports:
|
||||||
|
- containerPort: 8080
|
||||||
37
infra/vultr-kubernetes/legacy/tls.yaml
Normal file
37
infra/vultr-kubernetes/legacy/tls.yaml
Normal file
@@ -0,0 +1,37 @@
|
|||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt-staging
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
# The ACME server URL
|
||||||
|
server: https://acme-staging-v02.api.letsencrypt.org/directory
|
||||||
|
preferredChain: "ISRG Root X1"
|
||||||
|
# Email address used for ACME registration
|
||||||
|
email: dev@shockrah.xyz
|
||||||
|
# Name of a secret used to store the ACME account private key
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt-staging
|
||||||
|
solvers:
|
||||||
|
- http01:
|
||||||
|
ingress:
|
||||||
|
class: nginx
|
||||||
|
---
|
||||||
|
apiVersion: cert-manager.io/v1
|
||||||
|
kind: ClusterIssuer
|
||||||
|
metadata:
|
||||||
|
name: letsencrypt-prod
|
||||||
|
spec:
|
||||||
|
acme:
|
||||||
|
# The ACME server URL
|
||||||
|
server: https://acme-v02.api.letsencrypt.org/directory
|
||||||
|
# Email address used for ACME registration
|
||||||
|
email: dev@shockrah.xyz
|
||||||
|
# Name of a secret used to store the ACME account private key
|
||||||
|
privateKeySecretRef:
|
||||||
|
name: letsencrypt-prod
|
||||||
|
solvers:
|
||||||
|
- http01:
|
||||||
|
ingress:
|
||||||
|
class: nginx
|
||||||
|
|
||||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user