Compare commits
No commits in common. "30c8f9128c1f76283a4372ae23ac0a50e5f0c31c" and "0f7f95a1b04473679384c1fd6826b3164165ccea" have entirely different histories.
30c8f9128c
...
0f7f95a1b0
@ -1,67 +0,0 @@
|
|||||||
locals {
|
|
||||||
filebrowser = {
|
|
||||||
# Name that is basically used everywhere
|
|
||||||
name = "filebrowser"
|
|
||||||
# For the claim itself
|
|
||||||
vol = {
|
|
||||||
size = "15Gi"
|
|
||||||
mode = "ReadWriteOnce"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource kubernetes_pod filebrowser {
|
|
||||||
metadata {
|
|
||||||
name = "filebrowser"
|
|
||||||
namespace = kubernetes_namespace.vault.metadata.0.name
|
|
||||||
labels = {
|
|
||||||
app = "filebrowser"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
container {
|
|
||||||
image = "filebrowser/filebrowser"
|
|
||||||
name = "filebrowser"
|
|
||||||
env {
|
|
||||||
name = "TZ"
|
|
||||||
value = "PST"
|
|
||||||
}
|
|
||||||
port {
|
|
||||||
container_port = 80
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource kubernetes_service filebrowser {
|
|
||||||
metadata {
|
|
||||||
name = "filebrowser"
|
|
||||||
namespace = kubernetes_namespace.vault.metadata.0.name
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
selector = {
|
|
||||||
app = kubernetes_pod.filebrowser.metadata.0.labels.app
|
|
||||||
}
|
|
||||||
port {
|
|
||||||
port = 8000
|
|
||||||
target_port = 80
|
|
||||||
}
|
|
||||||
type = "LoadBalancer"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource kubernetes_persistent_volume_claim filebrowser {
|
|
||||||
metadata {
|
|
||||||
name = "${local.filebrowser.name}-pvc"
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
access_modes = [ local.filebrowser.vol.mode ]
|
|
||||||
resources {
|
|
||||||
requests = {
|
|
||||||
storage = local.filebrowser.vol.size
|
|
||||||
}
|
|
||||||
}
|
|
||||||
volume_name = "${local.filebrowser.name}-vol"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
|||||||
resource kubernetes_pod nginx_plain {
|
|
||||||
metadata {
|
|
||||||
name = "plain-service"
|
|
||||||
namespace = kubernetes_namespace.vault.metadata.0.name
|
|
||||||
labels = {
|
|
||||||
app = "plain-app"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
container {
|
|
||||||
image = "nginx"
|
|
||||||
name = "plain-hello"
|
|
||||||
env {
|
|
||||||
name = "arbitrary"
|
|
||||||
value = "rando value"
|
|
||||||
}
|
|
||||||
port {
|
|
||||||
container_port = 80
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource kubernetes_service nginx_plain {
|
|
||||||
metadata {
|
|
||||||
name = "plain-service"
|
|
||||||
namespace = kubernetes_namespace.vault.metadata.0.name
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
selector = {
|
|
||||||
app = kubernetes_pod.nginx_plain.metadata.0.labels.app
|
|
||||||
}
|
|
||||||
port {
|
|
||||||
port = 8080
|
|
||||||
target_port = 80
|
|
||||||
}
|
|
||||||
type = "LoadBalancer"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,16 +1,5 @@
|
|||||||
# This namespace is used for things that we need/want to store somewhere secure
|
|
||||||
resource kubernetes_namespace vault {
|
resource kubernetes_namespace vault {
|
||||||
metadata {
|
metadata {
|
||||||
name = "vault"
|
name = "vault"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# This namespace is for more generic things like a simple nginx page or some
|
|
||||||
# documentation, etc.
|
|
||||||
resource kubernetes_namespace web {
|
|
||||||
metadata {
|
|
||||||
name = "web-services"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -1,7 +0,0 @@
|
|||||||
output hello {
|
|
||||||
value = "${var.cluster_dns}:${kubernetes_service.nginx_plain.spec.0.port.0.port}"
|
|
||||||
}
|
|
||||||
|
|
||||||
output filebrowser {
|
|
||||||
value = "${var.cluster_dns}:${kubernetes_service.filebrowser.spec.0.port.0.port}"
|
|
||||||
}
|
|
@ -1,16 +0,0 @@
|
|||||||
DNS Names and things
|
|
||||||
====================
|
|
||||||
|
|
||||||
Some services such as nginx.cluster.local while mentioned in this Terraform
|
|
||||||
module are not setup here. They are setup in the folder above with the setup
|
|
||||||
simple dns playbook.
|
|
||||||
|
|
||||||
|
|
||||||
Service Access
|
|
||||||
==============
|
|
||||||
|
|
||||||
Hostname: cluster.local
|
|
||||||
|
|
||||||
Filebrowser files.cluster.local:8000
|
|
||||||
Sample Page nginx.cluster.local:8080
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
|||||||
variable cluster_dns {
|
|
||||||
default = "cluster.local"
|
|
||||||
type = string
|
|
||||||
}
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
|||||||
# Creating the volume for the vault namespace that we can use
|
|
||||||
locals {
|
|
||||||
vault = {
|
|
||||||
volume = {
|
|
||||||
modes = [ "ReadWriteOnce" ]
|
|
||||||
name = "vault-vol"
|
|
||||||
size = "25Gi"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
resource kubernetes_persistent_volume vault {
|
|
||||||
metadata {
|
|
||||||
name = local.vault.volume.name
|
|
||||||
}
|
|
||||||
spec {
|
|
||||||
capacity = {
|
|
||||||
storage = local.vault.volume.size
|
|
||||||
}
|
|
||||||
access_modes = local.vault.volume.modes
|
|
||||||
persistent_volume_source {
|
|
||||||
gce_persistent_disk {
|
|
||||||
pd_name = "${local.filebrowser.name}-vol-pd"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
3
lab/roles/setup/defaults/main.yml
Normal file
3
lab/roles/setup/defaults/main.yml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
---
|
||||||
|
# defaults file for setup
|
||||||
|
LAB_FQDN: lab.local
|
8
lab/roles/setup/files/default.lablad
Normal file
8
lab/roles/setup/files/default.lablad
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
# This config is purely for the default landing page that nginx normally serves
|
||||||
|
# up. The intention is to make Nginx's default page useful
|
||||||
|
|
||||||
|
server {
|
||||||
|
listen 80 default_server;
|
||||||
|
listen [::]:80 default_server;
|
||||||
|
root /var/www/html;
|
||||||
|
}
|
18
lab/roles/setup/files/etc/hosts
Normal file
18
lab/roles/setup/files/etc/hosts
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
# WARN: this should never be committed to source control
|
||||||
|
# The following is used because we
|
||||||
|
# use DNSMasq for its dead simple nature
|
||||||
|
|
||||||
|
127.0.0.1 localhost
|
||||||
|
127.0.1.1 recycled-lab-host
|
||||||
|
|
||||||
|
# Sugma
|
||||||
|
192.168.1.23 git.lablad
|
||||||
|
192.168.1.23 files.lablad
|
||||||
|
192.168.1.23 main.lablad
|
||||||
|
192.168.1.23 music.lablad
|
||||||
|
192.168.1.1 router
|
||||||
|
|
||||||
|
# The following lines are desirable for IPv6 capable hosts
|
||||||
|
::1 localhost ip6-localhost ip6-loopback
|
||||||
|
ff02::1 ip6-allnodes
|
||||||
|
ff02::2 ip6-allrouters
|
6
lab/roles/setup/files/files.lablad
Normal file
6
lab/roles/setup/files/files.lablad
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
server {
|
||||||
|
server_name files.lablad.net;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:8080;
|
||||||
|
}
|
||||||
|
}
|
10
lab/roles/setup/files/git.lablad
Normal file
10
lab/roles/setup/files/git.lablad
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# This file pertains to the Gitea service which is served under the FQDN:
|
||||||
|
# git.lablad
|
||||||
|
# The default port is listed under /vars/main.yml as 3000 as GITEA_PUB_PORT
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name git.lablad.net;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:3000;
|
||||||
|
}
|
||||||
|
}
|
8
lab/roles/setup/files/home.lablad
Normal file
8
lab/roles/setup/files/home.lablad
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
server {
|
||||||
|
root /var/www/html;
|
||||||
|
index index.html;
|
||||||
|
server_name lablad.net;
|
||||||
|
location / {
|
||||||
|
try_files $uri $uri/ =404;
|
||||||
|
}
|
||||||
|
}
|
10
lab/roles/setup/files/metrics.lablad
Normal file
10
lab/roles/setup/files/metrics.lablad
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# metrics.lablad
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name metrics.lablad.net;
|
||||||
|
proxy_set_header Host $http_host;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:6000;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
6
lab/roles/setup/files/music.lablad
Normal file
6
lab/roles/setup/files/music.lablad
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
server {
|
||||||
|
server_name music.lablad.net;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:4040;
|
||||||
|
}
|
||||||
|
}
|
17
lab/roles/setup/files/services/node_exporter.service
Normal file
17
lab/roles/setup/files/services/node_exporter.service
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
[Unit]
|
||||||
|
Description=Node Exporter which exposes host metrics for Prometheus
|
||||||
|
After=network.target
|
||||||
|
|
||||||
|
[Service]
|
||||||
|
WorkingDirectory=/opt/prom-node-exporter
|
||||||
|
Type=simple
|
||||||
|
Restart=simple
|
||||||
|
Restart=always
|
||||||
|
Restart=10
|
||||||
|
ExecStart=/opt/prom-node-exporter/node_exporter
|
||||||
|
|
||||||
|
[Install]
|
||||||
|
WantedBy=multi-user.target
|
||||||
|
|
||||||
|
|
||||||
|
|
8
lab/roles/setup/files/todo.lablad
Normal file
8
lab/roles/setup/files/todo.lablad
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
server {
|
||||||
|
server_name todo.lablad.net;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:9238;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
14
lab/roles/setup/files/vault-config.json
Normal file
14
lab/roles/setup/files/vault-config.json
Normal file
@ -0,0 +1,14 @@
|
|||||||
|
{
|
||||||
|
"backend": {
|
||||||
|
"file": {
|
||||||
|
"path": "/vault/file"
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"listener": {
|
||||||
|
"tcp": {
|
||||||
|
"address": "0.0.0.0:{{VAULT_PORT}}",
|
||||||
|
"tls_disable": 1
|
||||||
|
}
|
||||||
|
},
|
||||||
|
"ui": true
|
||||||
|
}
|
11
lab/roles/setup/files/vault.lablad
Normal file
11
lab/roles/setup/files/vault.lablad
Normal file
@ -0,0 +1,11 @@
|
|||||||
|
# This file pertains to the Gitea service which is served under the FQDN:
|
||||||
|
# vault.lablad
|
||||||
|
# The default port is listed under /vars/main.yml as 8200 as VAULT_PORT
|
||||||
|
|
||||||
|
server {
|
||||||
|
server_name vault.lablad.net;
|
||||||
|
location / {
|
||||||
|
proxy_pass http://127.0.0.1:8200;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
31
lab/roles/setup/meta/main.yml
Normal file
31
lab/roles/setup/meta/main.yml
Normal file
@ -0,0 +1,31 @@
|
|||||||
|
galaxy_info:
|
||||||
|
author: shockrah
|
||||||
|
description: This role provides multiple tools for setting up a private lab server
|
||||||
|
company: Personal
|
||||||
|
|
||||||
|
# If the issue tracker for your role is not on github, uncomment the
|
||||||
|
# next line and provide a value
|
||||||
|
# issue_tracker_url: http://example.com/issue/tracker
|
||||||
|
|
||||||
|
license: GPL-3.0-only
|
||||||
|
|
||||||
|
min_ansible_version: 2.1
|
||||||
|
|
||||||
|
# If this a Container Enabled role, provide the minimum Ansible Container version.
|
||||||
|
# min_ansible_container_version:
|
||||||
|
|
||||||
|
platforms:
|
||||||
|
- name: Ubuntu
|
||||||
|
versions:
|
||||||
|
- Jammy
|
||||||
|
|
||||||
|
galaxy_tags: []
|
||||||
|
# List tags for your role here, one per line. A tag is a keyword that describes
|
||||||
|
# and categorizes the role. Users find roles by searching for tags. Be sure to
|
||||||
|
# remove the '[]' above, if you add tags to this list.
|
||||||
|
#
|
||||||
|
# NOTE: A tag is limited to a single word comprised of alphanumeric characters.
|
||||||
|
# Maximum 20 tags per role.
|
||||||
|
|
||||||
|
dependencies:
|
||||||
|
- community.docker
|
25
lab/roles/setup/tasks/firewall_setup.yml
Normal file
25
lab/roles/setup/tasks/firewall_setup.yml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- hosts: lab
|
||||||
|
become: yes
|
||||||
|
vars:
|
||||||
|
lan_subnet: "192.168.1.0/24"
|
||||||
|
tasks:
|
||||||
|
- name: Enable Firewall rules for basic LAN access
|
||||||
|
community.general.ufw:
|
||||||
|
rule: allow
|
||||||
|
port: '{{ item }}'
|
||||||
|
proto: tcp
|
||||||
|
src: '{{ lan_subnet }}'
|
||||||
|
loop:
|
||||||
|
- 22
|
||||||
|
- 53
|
||||||
|
- 80
|
||||||
|
- 443
|
||||||
|
- name: Enable K3s Ports
|
||||||
|
community.general.ufw:
|
||||||
|
rule: allow
|
||||||
|
port: '{{ item }}'
|
||||||
|
proto: tcp
|
||||||
|
loop:
|
||||||
|
- 6443
|
||||||
|
|
10
lab/roles/setup/tasks/get_latest_base_packages.yml
Normal file
10
lab/roles/setup/tasks/get_latest_base_packages.yml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
# Simply update all required packages that we have on the system
|
||||||
|
# This also takes care of updating any packages that must updated through means
|
||||||
|
# of Git or some other non-apt method should it be required
|
||||||
|
---
|
||||||
|
- hosts: leftcoastlab
|
||||||
|
become: yes
|
||||||
|
tasks:
|
||||||
|
- name: Distribution Upgrade
|
||||||
|
apt:
|
||||||
|
upgrade: dist
|
154
lab/roles/setup/tasks/k3s_setup.yml
Normal file
154
lab/roles/setup/tasks/k3s_setup.yml
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
---
|
||||||
|
# Auto install from the quickstart
|
||||||
|
# If airgapped, all K3s artifacts are already on the node.
|
||||||
|
- hosts:
|
||||||
|
tasks:
|
||||||
|
- name: Download K3s install script
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://get.k3s.io/
|
||||||
|
timeout: 120
|
||||||
|
dest: /usr/local/bin/k3s-install.sh
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Download K3s binary
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: /usr/local/bin/k3s-install.sh
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Add K3s autocomplete to user bashrc
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "~{{ ansible_user }}/.bashrc"
|
||||||
|
regexp: '\.\s+<\(k3s completion bash\)'
|
||||||
|
line: ". <(k3s completion bash) # Added by k3s-ansible"
|
||||||
|
|
||||||
|
|
||||||
|
- name: Enable and check K3s service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s
|
||||||
|
daemon_reload: true
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: Pause to allow first server startup
|
||||||
|
when: (groups['server'] | length) > 1
|
||||||
|
ansible.builtin.pause:
|
||||||
|
seconds: 10
|
||||||
|
|
||||||
|
- name: Check whether kubectl is installed on control node
|
||||||
|
ansible.builtin.command: 'kubectl'
|
||||||
|
register: kubectl_installed
|
||||||
|
ignore_errors: true
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Apply K3S kubeconfig to control node
|
||||||
|
when: kubectl_installed.rc == 0
|
||||||
|
block:
|
||||||
|
- name: Copy kubeconfig to control node
|
||||||
|
ansible.builtin.fetch:
|
||||||
|
src: /etc/rancher/k3s/k3s.yaml
|
||||||
|
dest: "{{ kubeconfig }}"
|
||||||
|
flat: true
|
||||||
|
|
||||||
|
- name: Change server address in kubeconfig on control node
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
KUBECONFIG={{ kubeconfig }} kubectl config set-cluster default --server=https://{{ api_endpoint }}:{{ api_port }}
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
register: csa_result
|
||||||
|
changed_when:
|
||||||
|
- csa_result.rc == 0
|
||||||
|
|
||||||
|
- name: Setup kubeconfig k3s-ansible context on control node
|
||||||
|
when: kubeconfig != "~/.kube/config"
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: "{{ kubeconfig }}"
|
||||||
|
regexp: 'name: default'
|
||||||
|
replace: 'name: k3s-ansible'
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
|
||||||
|
- name: Merge with any existing kubeconfig on control node
|
||||||
|
when: kubeconfig != "~/.kube/config"
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
TFILE=$(mktemp)
|
||||||
|
KUBECONFIG={{ kubeconfig }} kubectl config set-context k3s-ansible --user=k3s-ansible --cluster=k3s-ansible
|
||||||
|
KUBECONFIG={{ kubeconfig }} kubectl config view --flatten > ${TFILE}
|
||||||
|
mv ${TFILE} {{ kubeconfig }}
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
register: mv_result
|
||||||
|
changed_when:
|
||||||
|
- mv_result.rc == 0
|
||||||
|
|
||||||
|
- name: Start other server if any and verify status
|
||||||
|
when:
|
||||||
|
- (groups['server'] | length) > 1
|
||||||
|
- inventory_hostname != groups['server'][0]
|
||||||
|
block:
|
||||||
|
- name: Copy K3s service file [HA]
|
||||||
|
when: groups['server'] | length > 1
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "k3s-ha.service.j2"
|
||||||
|
dest: "{{ systemd_dir }}/k3s.service"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
|
- name: Enable and check K3s service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s
|
||||||
|
daemon_reload: true
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: Verify that all server nodes joined
|
||||||
|
when: (groups['server'] | length) > 1
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
k3s kubectl get nodes -l "node-role.kubernetes.io/control-plane=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||||
|
register: nodes
|
||||||
|
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['server'] | length)
|
||||||
|
retries: 20
|
||||||
|
delay: 10
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Setup kubectl for user
|
||||||
|
block:
|
||||||
|
- name: Create kubectl symlink
|
||||||
|
when: lookup('fileglob', '/usr/local/bin/kubectl', errors='warn') | length == 0
|
||||||
|
ansible.builtin.file:
|
||||||
|
src: /usr/local/bin/k3s
|
||||||
|
dest: /usr/local/bin/kubectl
|
||||||
|
state: link
|
||||||
|
|
||||||
|
- name: Create directory .kube
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: ~{{ ansible_user }}/.kube
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
mode: "u=rwx,g=rx,o="
|
||||||
|
|
||||||
|
- name: Copy config file to user home directory
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: /etc/rancher/k3s/k3s.yaml
|
||||||
|
dest: ~{{ ansible_user }}/.kube/config
|
||||||
|
remote_src: true
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
mode: "u=rw,g=,o="
|
||||||
|
|
||||||
|
- name: Configure default KUBECONFIG for user
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: ~{{ ansible_user }}/.bashrc
|
||||||
|
regexp: 'export KUBECONFIG=~/.kube/config'
|
||||||
|
line: 'export KUBECONFIG=~/.kube/config # Added by k3s-ansible'
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Configure kubectl autocomplete
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: ~{{ ansible_user }}/.bashrc
|
||||||
|
regexp: '\.\s+<\(kubectl completion bash\)'
|
||||||
|
line: ". <(kubectl completion bash) # Added by k3s-ansible"
|
0
lab/roles/setup/templates/.gitkeep
Normal file
0
lab/roles/setup/templates/.gitkeep
Normal file
19
lab/roles/setup/tests/test.yml
Normal file
19
lab/roles/setup/tests/test.yml
Normal file
@ -0,0 +1,19 @@
|
|||||||
|
---
|
||||||
|
- hosts: lab
|
||||||
|
tasks:
|
||||||
|
- name: Normal Echo
|
||||||
|
command: echo Quick test of an echo
|
||||||
|
register: normal_echo
|
||||||
|
|
||||||
|
- debug:
|
||||||
|
var: normal_echo.stdout
|
||||||
|
|
||||||
|
- name: Checking Sudo access
|
||||||
|
become: yes
|
||||||
|
command: echo One more test with sudo this time
|
||||||
|
register: sudo_echo
|
||||||
|
|
||||||
|
- debug:
|
||||||
|
var: sudo_echo.stdout
|
||||||
|
- debug:
|
||||||
|
var: sudo_echo.stderr
|
13
lab/roles/setup/vars/main.yml
Normal file
13
lab/roles/setup/vars/main.yml
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
---
|
||||||
|
# vars file for setup
|
||||||
|
UBUNTU_CODENAME: jammy
|
||||||
|
GITEA_PUB_PORT: 3000
|
||||||
|
GITEA_SSH_PORT: 2222
|
||||||
|
FILEBROWSER_PORT: 8080
|
||||||
|
LOKI_PORT: 3100
|
||||||
|
GRAFANA_PORT: 6000
|
||||||
|
SUBSONIC_PORT: 4040
|
||||||
|
PROM_PORT: 9090
|
||||||
|
KANBOARD_PORT: 9238
|
||||||
|
KANBOARD_PORT_SSL: 9239
|
||||||
|
VAULT_PORT: 8200
|
@ -5,11 +5,3 @@
|
|||||||
- name: Install K3S
|
- name: Install K3S
|
||||||
become: yes
|
become: yes
|
||||||
import_tasks: tasks/install-k3s.yml
|
import_tasks: tasks/install-k3s.yml
|
||||||
tags:
|
|
||||||
- bare-setup
|
|
||||||
- name: Ensure the service DNS names are in /etc/hosts
|
|
||||||
become: yes
|
|
||||||
import_tasks: tasks/setup-etc-hosts.yml
|
|
||||||
tags:
|
|
||||||
- dns
|
|
||||||
|
|
||||||
|
@ -1,36 +0,0 @@
|
|||||||
# Do not run this anymore cuz dns masq breaks too much shit at once
|
|
||||||
- name: Install dnsmasq
|
|
||||||
apt:
|
|
||||||
name: dnsmasq
|
|
||||||
- name: Ensure config ready for local network use
|
|
||||||
lineinfile:
|
|
||||||
path: /etc/dnsmasq.conf
|
|
||||||
line: "{{ item }}"
|
|
||||||
state: present
|
|
||||||
loop:
|
|
||||||
- domain-needed
|
|
||||||
- bogus-priv
|
|
||||||
- "cache-size=1000"
|
|
||||||
- "server=208.67.222.222"
|
|
||||||
- "server=208.67.220.220"
|
|
||||||
- "domain=cluster.lan"
|
|
||||||
- "expand-hosts"
|
|
||||||
- "listen-address=127.0.0.1,192.168.1.100"
|
|
||||||
- name: Add entries to the /etc/hosts file
|
|
||||||
lineinfile:
|
|
||||||
path: /etc/hosts
|
|
||||||
line: "192.168.1.100 {{ item }}"
|
|
||||||
loop:
|
|
||||||
- nginx.lan
|
|
||||||
- git.lan
|
|
||||||
- name: Stop resolvd from ever being a thing again
|
|
||||||
systemd_service:
|
|
||||||
name: systemd-resolved
|
|
||||||
state: stopped
|
|
||||||
masked: yes
|
|
||||||
enabled: no
|
|
||||||
- name: Restart the dnsmasq service
|
|
||||||
service:
|
|
||||||
state: restarted
|
|
||||||
name: dnsmasq
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
|||||||
# Allows the host to resolve names that we use in cluster
|
|
||||||
- name: Ensure host can resolve its own service names
|
|
||||||
lineinfile:
|
|
||||||
path: /etc/hosts
|
|
||||||
state: present
|
|
||||||
line: "192.168.1.100 {{ item }}"
|
|
||||||
loop:
|
|
||||||
- files.cluster.local
|
|
||||||
- nginx.cluster.local
|
|
Loading…
Reference in New Issue
Block a user