Compare commits

8 Commits

Author SHA1 Message Date
771596968a removing fluff
All checks were successful
Wiki Resources Sanity Checks / ruff-checks (push) Successful in 12s
2026-05-04 14:43:51 -07:00
62d4129b73 S3 infra cleanup
All checks were successful
Wiki Resources Sanity Checks / ruff-checks (push) Successful in 6s
2026-04-26 01:53:14 -07:00
074139e0dd removing ansible-lint gh action in favor of the pre-commit hooks
Some checks failed
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 34s
Wiki Resources Sanity Checks / ruff-checks (push) Successful in 17s
2026-04-26 01:38:48 -07:00
fbe6e5c3e2 Removing all ansible-linter errors
Some checks failed
Ansible Linting / build (push) Failing after 16s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 18s
Wiki Resources Sanity Checks / ruff-checks (push) Successful in 7s
2026-04-26 01:35:56 -07:00
c44babf3d8 Adding pre-commit hooks for ansible linting
Some checks failed
Ansible Linting / build (push) Failing after 20s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 21s
Wiki Resources Sanity Checks / ruff-checks (push) Successful in 7s
2026-04-25 22:41:56 -07:00
5ee16b4766 Running a simple ansible-lint job
Some checks failed
Ansible Linting / build (push) Failing after 1m26s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
Wiki Resources Sanity Checks / ruff-checks (push) Successful in 7s
2026-04-24 14:44:51 -07:00
42fae4c5d8 Fixing workflow syntax error
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 16s
Wiki Resources Sanity Checks / ruff-checks (push) Successful in 16s
2026-04-24 14:35:02 -07:00
6a0cd25aee new simple workflow for ruff linter checks
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 4s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 17s
Wiki Resources Sanity Checks / ruff-checks (push) Failing after 0s
2026-04-24 14:32:35 -07:00
30 changed files with 79 additions and 351 deletions

View File

@@ -1,15 +0,0 @@
name: Ansible Linting
on:
- push
jobs:
ansible-lint:
runs-on: ubuntu-latest
container:
image: shockrah/ansible
steps:
- name: Checkout repo content
uses: actions/checkout@v4
- run: ansible-lint -c linter.yaml
working-directory: ansible/

View File

@@ -1,19 +0,0 @@
name: Secops Linting and Safety Checks
on:
push:
branches:
- master
jobs:
checkov-scan-s3:
runs-on: ubuntu-latest
steps:
- name: Checkout repo code
uses: actions/checkout@v4
- name: Scan S3 Terraform with Checkov
uses: bridgecrewio/checkov-action@master
with:
directory: infra/s3/
framework: terraform

View File

@@ -0,0 +1,17 @@
name: Wiki Resources Sanity Checks
on:
push:
branches:
- master
jobs:
ruff-checks:
steps:
- name: Checkout repo
uses: actions/checkout@v4
- name: Perform linting with ruff
uses: astral-sh/ruff-action@v3
with:
src: "wiki-resources"

8
.gitignore vendored
View File

@@ -22,3 +22,11 @@ docker/beta/resume.shockrah.xyz/
k8s/config.yaml k8s/config.yaml
infra/**/tfplan infra/**/tfplan
.ansible/ .ansible/
wiki-resources/public_infrastructure.png
bin/
lib/
lib64
pyvenv.cfg
share/

View File

@@ -1,13 +0,0 @@
# You can override the included template(s) by including variable overrides
# SAST customization: https://docs.gitlab.com/ee/user/application_security/sast/#customizing-the-sast-settings
# Secret Detection customization: https://docs.gitlab.com/ee/user/application_security/secret_detection/pipeline/#customization
# Dependency Scanning customization: https://docs.gitlab.com/ee/user/application_security/dependency_scanning/#customizing-the-dependency-scanning-settings
# Container Scanning customization: https://docs.gitlab.com/ee/user/application_security/container_scanning/#customizing-the-container-scanning-settings
# Note that environment variables can be set in several places
# See https://docs.gitlab.com/ee/ci/variables/#cicd-variable-precedence
stages:
- test
sast:
stage: test
include:
- template: Security/SAST.gitlab-ci.yml

21
.pre-commit-config.yaml Normal file
View File

@@ -0,0 +1,21 @@
---
repos:
- repo: https://github.com/ansible/ansible-lint
rev: v26.4.0
hooks:
- id: ansible-lint
name: ansible-lint
description: Run ansible-lint on playbooks
entry: ansible-lint
args:
- ansible
language: python
exclude: |
(?x)(
^deprecated/|
^infra/|
^runbooks/|
^wiki-resources/|
^\.pre-commit-config.yaml|
^\.gitignore
)

View File

@@ -1,4 +1,7 @@
--- ---
skip_list: skip_list:
- role-name - role-name
- var-naming[no-role-prefix] - var-naming[no-role-prefix]
exclude_paths:
- linter.yaml
- inventory.yaml

View File

@@ -6,4 +6,3 @@
- name: Apply the nomad role - name: Apply the nomad role
ansible.builtin.include_role: ansible.builtin.include_role:
name: nomad name: nomad

View File

@@ -8,8 +8,8 @@
- name: Running install on the keyrings directory - name: Running install on the keyrings directory
ansible.builtin.command: ansible.builtin.command:
cmd: install -m 0755 -d /etc/apt/keyrings cmd: install -m 0755 -d /etc/apt/keyrings
register: install register: base_install_keyrings
changed_when: install.rc == 0 changed_when: base_install_keyrings.rc == 0
- name: Fetch Docker GPG Key - name: Fetch Docker GPG Key
vars: vars:
keylink: https://download.docker.com/linux/ubuntu/gpg keylink: https://download.docker.com/linux/ubuntu/gpg
@@ -37,5 +37,5 @@
- name: Verify that the docker components are installed properly - name: Verify that the docker components are installed properly
ansible.builtin.command: ansible.builtin.command:
cmd: docker run hello-world cmd: docker run hello-world
register: docker register: base_docker_hello
changed_when: docker.rc == 0 changed_when: base_docker_hello.rc == 0

View File

@@ -6,3 +6,5 @@
- name: Run installation script - name: Run installation script
ansible.builtin.command: ansible.builtin.command:
cmd: bash /tmp/k3s.sh cmd: bash /tmp/k3s.sh
register: base_k3s_install_script
changed_when: base_k3_install_script.rc == 0

View File

@@ -15,11 +15,3 @@
become: true become: true
tags: tags:
- setup - setup
- name: Run through nomad removal steps
tags: nomad
ansible.builtin.include_tasks:
file: nomad.yaml
apply:
become: true
tags:
- nomad

View File

@@ -2,10 +2,13 @@
ansible.builtin.get_url: ansible.builtin.get_url:
url: https://get.k3s.io url: https://get.k3s.io
dest: /tmp dest: /tmp
register: install_script mode: "0644"
register: k3s_dl_install_script
- name: Run installation script - name: Run installation script
become: true become: true
environment: environment:
INSTALL_K3S_EXEC: server INSTALL_K3S_EXEC: server
ansible.builtin.command: ansible.builtin.command:
cmd: sh {{ install_script.dest }} cmd: sh {{ k3s_dl_install_script.dest }}
register: k3s_install_script
changed_when: k3s_install_script.rc == 0

View File

@@ -1,18 +1,18 @@
- name: Nomad server configuration - name: Nomad server configuration
become: true become: true
block: block:
- name: Ensure the root data directory is present - name: Ensure the root data directory is present
ansible.builtin.file: ansible.builtin.file:
path: "{{ nomad.volumes.root }}" path: "{{ nomad_data.volumes.root }}"
state: absent state: absent
mode: "0755" mode: "0755"
- name: Ensure registry volume is present - name: Ensure registry volume is present
ansible.builtin.file: ansible.builtin.file:
path: "{{ nomad.volumes.registry }}" path: "{{ nomad_data.volumes.registry }}"
state: absent state: absent
mode: "0755" mode: "0755"
- name: Ensure the MinIO diretory is present - name: Ensure the MinIO diretory is present
ansible.builtin.file: ansible.builtin.file:
path: "{{ nomad.volumes.nfs }}" path: "{{ nomad_data.volumes.nfs }}"
state: absent state: absent
mode: "0755" mode: "0755"

View File

@@ -1,4 +1,4 @@
nomad: nomad_data:
volumes: volumes:
root: /opt/volumes root: /opt/volumes
registry: /opt/volumes/ncr registry: /opt/volumes/ncr

View File

@@ -1,4 +1,4 @@
- name: Restart host to apply any changes and clear out uptime stuff - name: Restart host to apply any changes and clear out uptime stuff
become: true become: true
ansible.builtin.reboot: ansible.builtin.reboot:
msg: "Reboot initiated as a part of housekeeping" msg: "Reboot initiated as a part of housekeeping"

View File

@@ -2,12 +2,16 @@
become: true become: true
ansible.builtin.command: ansible.builtin.command:
cmd: docker compose pull cmd: docker compose pull
chdir: "{{ webadmin.home }}/{{ item }}" chdir: "{{ webserver_admin.home }}/{{ item }}"
loop: loop:
- services - services
register: webserver_docker_pull
changed_when: webserver_docker_pull.rc == 0
- name: Restart containers with newest container images - name: Restart containers with newest container images
ansible.builtin.command: ansible.builtin.command:
cmd: docker compose up -d cmd: docker compose up -d
chdir: "{{ webadmin.home }}/{{ item }}" chdir: "{{ webserver_admin.home }}/{{ item }}"
loop: loop:
- services - services
register: webserver_docker_restart
changed_when: webserver_docker_restart.rc == 0

View File

@@ -4,5 +4,5 @@
ansible.builtin.apt: ansible.builtin.apt:
update_cache: true update_cache: true
autoclean: true autoclean: true
autoremove: true autoremove: true
upgrade: safe upgrade: safe

View File

@@ -1,2 +1,2 @@
webadmin: webserver_admin:
home: /home/webadmin home: /home/webadmin

View File

@@ -1 +0,0 @@
config.yaml

View File

@@ -1,35 +0,0 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: nginx-deployment
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.14.2
ports:
- containerPort: 80
name: nginx-port
---
apiVersion: v1
kind: Service
metadata:
name: nginx
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80
nodePort: 30808
targetPort: nginx-port

View File

@@ -1,19 +0,0 @@
apiVersion: batch/v1
kind: CronJob
metadata:
name: hello
spec:
schedule: "* * * * *"
jobTemplate:
spec:
template:
spec:
containers:
- name: hello
image: busybox:1.28
imagePullPolicy: IfNotPresent
command:
- /bin/sh
- -c
- date; echo Hello from the sample cron-container
restartPolicy: OnFailure

View File

@@ -1,24 +0,0 @@
plan=out.plan
SHELL := /bin/bash
$(plan):
source ../secrets/set-env.sh && terraform plan -input=false -out $(plan)
push: build
source ../secrets/set-env.sh && terraform apply $(plan)
refresh:
source ../secrets/set-env.sh && terraform apply -refresh-only
test:
terraform validate
rip:
source ../secrets/set-env.sh && terraform destroy
clean:
rm -f $(plan)
.PHONY: test build clean push rip

View File

@@ -1,24 +0,0 @@
terraform {
required_version = ">= 0.13"
backend "s3" {
bucket = "project-athens"
key = "infra/s3/state/build.tfstate"
region = "us-west-1"
encrypt = true
}
required_providers {
aws = {
source = "hashicorp/aws"
version = "4.13.0"
}
}
}
# Base config for using AWS features w/ Terraform
provider "aws" {
access_key = var.aws_key
secret_key = var.aws_secret
region = var.aws_region
max_retries = 1
}

View File

@@ -1,93 +0,0 @@
# All variables that are used in various places go here
######################### General provider specific values
variable "aws_key" {
description = "Access Key for AWS operations"
type = string
sensitive = true
}
variable "aws_secret" {
description = "Secret Key for AWS operations"
type = string
sensitive = true
}
variable "aws_region" {
description = "Region where the VPC is located"
type = string
sensitive = true
}
variable "vpc_id" {
description = "Project Athens VPC ID"
type = string
}
######################### Alpha Cluster variables
variable "athens_prefix" {
description = "Prefix for all things in alpha cluster"
type = string
default = "athens"
}
######################### Nginx reverse proxy vars
# Yes these buckets _could_ be public but where's the fun in that :x
variable "shockrah_xyz_s3_access_key_id" {
description = "Acess key for reading public s3 buckets"
type = string
sensitive = true
}
variable "shockrah_xyz_s3_secret_key" {
description = "Secret key for reading public s3 buckets"
type = string
sensitive = true
}
variable "nginx_port" {
description = "Port for shockrah.xyz"
type = number
default = 80
}
######################### Nginx reverse proxy vars
variable "shockrah_xyz_bucket" {
description = "S3 bucket name"
type = string
default = "shockrah_xyz"
}
variable "resume_shockrah_xyz_bucket" {
description = "S3 bucket name"
type = string
default = "resume_shockrah_xyz"
}
variable "temper" {
type = object({
cert_arn = string
})
}
variable "sg" {
type = object({
base_ecs = string
ecs_web_ingress = string
lb_health_check = string
})
}
variable "alpha" {
type = object({
dns = string
zone = string
})
}

View File

@@ -1,8 +0,0 @@
locals {
buckets = [
"shockrah.xyz",
"resume.shockrah.xyz",
"temper.tv"
]
}

View File

@@ -1,17 +0,0 @@
resource "aws_s3_bucket" "static-content" {
for_each = {
for idx, record in local.buckets:
idx => record
}
bucket = each.value
tags = {
Name = each.value
Description = "Static content"
}
}

View File

@@ -1,53 +0,0 @@
##################################################################
# Below are the acl components for each bucket to make them public
##################################################################
# TODO: ensure proper dependency chaining to the buckets that these
# blocks require to be in place _before_ they come up
# Enables website configuration
resource "aws_s3_bucket_website_configuration" "site" {
for_each = aws_s3_bucket.static-content
bucket = each.value.bucket
index_document {
suffix = "index.html"
}
error_document {
key = "404.html"
}
}
# Set block public access to false
resource "aws_s3_bucket_public_access_block" "site" {
for_each = aws_s3_bucket.static-content
bucket = each.value.bucket
block_public_acls = false
block_public_policy = false
ignore_public_acls = false
restrict_public_buckets = false
}
# Set a policy on the bucket to allow reads from anywhere
resource "aws_s3_bucket_policy" "site" {
for_each = aws_s3_bucket.static-content
bucket = each.value.bucket
policy = jsonencode({
Version = "2012-10-17"
Statement = [
{
Sid = "PublicReadGetObject"
Effect = "Allow"
Principal = "*"
Action = "s3:GetObject"
Resource = [
"arn:aws:s3:::${each.value.bucket}",
"arn:aws:s3:::${each.value.bucket}/*",
]
}
]
})
}