Removing playbooks that will not be used for barebones setup
This commit is contained in:
parent
afd64caebd
commit
2cc9ab0815
@ -1,15 +0,0 @@
|
|||||||
# Further changes to DNS config are going to vary so heavily that I'm not
|
|
||||||
# even going to bother putting down anything else for dns configuration
|
|
||||||
# At this point basically just change /etc/resolv.conf to change dns targets
|
|
||||||
# and then add stuff to the /etc/hosts file as required
|
|
||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
tasks:
|
|
||||||
- name: Install DNSMasq
|
|
||||||
become: yes
|
|
||||||
apt:
|
|
||||||
name: "{{ item }}"
|
|
||||||
update_cache: yes
|
|
||||||
loop:
|
|
||||||
- dnsmasq
|
|
||||||
|
|
@ -1,31 +0,0 @@
|
|||||||
# This playbook goes through the process of setting up a simple FTP server on
|
|
||||||
# the target host.
|
|
||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
tasks:
|
|
||||||
- name: Include Vars
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
- name: Ensure Filebrowser Volume Exists
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: file-storage
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Setup Filebrowser Container
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: filebrowser
|
|
||||||
image: filebrowser/filebrowser
|
|
||||||
restart_policy: always
|
|
||||||
recreate: yes
|
|
||||||
volumes:
|
|
||||||
- "file-storage:/srv"
|
|
||||||
ports:
|
|
||||||
- "{{FILEBROWSER_PORT}}:80"
|
|
||||||
log_driver: loki
|
|
||||||
log_options:
|
|
||||||
loki-url: "http://localhost:{{LOKI_PORT}}/loki/api/v1/push"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
25
docker-host/roles/setup/tasks/firewall_setup.yml
Normal file
25
docker-host/roles/setup/tasks/firewall_setup.yml
Normal file
@ -0,0 +1,25 @@
|
|||||||
|
---
|
||||||
|
- hosts: lab
|
||||||
|
become: yes
|
||||||
|
vars:
|
||||||
|
lan_subnet: "192.168.1.0/24"
|
||||||
|
tasks:
|
||||||
|
- name: Enable Firewall rules for basic LAN access
|
||||||
|
community.general.ufw:
|
||||||
|
rule: allow
|
||||||
|
port: '{{ item }}'
|
||||||
|
proto: tcp
|
||||||
|
src: '{{ lan_subnet }}'
|
||||||
|
loop:
|
||||||
|
- 22
|
||||||
|
- 53
|
||||||
|
- 80
|
||||||
|
- 443
|
||||||
|
- name: Enable K3s Ports
|
||||||
|
community.general.ufw:
|
||||||
|
rule: allow
|
||||||
|
port: '{{ item }}'
|
||||||
|
proto: tcp
|
||||||
|
loop:
|
||||||
|
- 6443
|
||||||
|
|
@ -1,23 +0,0 @@
|
|||||||
# Setup a brand new system from the ground up.
|
|
||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
tasks:
|
|
||||||
- name: Import Global Vars
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
|
|
||||||
# System pre-reqs
|
|
||||||
- import_playbook: 'system-update.yml'
|
|
||||||
- import_playbook: 'install-docker.yml'
|
|
||||||
- import_playbook: 'dns.yml'
|
|
||||||
# Logging stack
|
|
||||||
- import_playbook: 'install-loki-logger.yml'
|
|
||||||
- import_playbook: 'grafana.yml'
|
|
||||||
# For host metrics
|
|
||||||
- import_playbook: 'vector.yml'
|
|
||||||
# Containerized services
|
|
||||||
- import_playbook: 'file-server.yml'
|
|
||||||
- import_playbook: 'git-stack.yml'
|
|
||||||
- import_playbook: 'subsonic.yml'
|
|
||||||
- import_playbook: 'nginx.yml'
|
|
@ -1,33 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
vars:
|
|
||||||
GITEA_VOLUME: gitea-volume
|
|
||||||
tasks:
|
|
||||||
- name: Import Vars required for configuration
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
- name: Create Gitea Volume
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: "{{GITEA_VOLUME}}"
|
|
||||||
|
|
||||||
- name: Update/Install Git Tea Container Service
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: gitea
|
|
||||||
image: gitea/gitea
|
|
||||||
pull: yes
|
|
||||||
restart_policy: always
|
|
||||||
recreate: yes
|
|
||||||
volumes:
|
|
||||||
- "{{GITEA_VOLUME}}:/data"
|
|
||||||
- /etc/timezone:/etc/timezone:ro
|
|
||||||
- /etc/localtime:/etc/localtime:ro
|
|
||||||
ports:
|
|
||||||
- "{{GITEA_PUB_PORT}}:3000"
|
|
||||||
- "{{GITEA_SSH_PORT}}:22"
|
|
||||||
log_driver: loki
|
|
||||||
log_options:
|
|
||||||
loki-url: "http://localhost:{{LOKI_PORT}}/loki/api/v1/push"
|
|
||||||
|
|
||||||
|
|
@ -1,26 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
tasks:
|
|
||||||
- name: Include Vars
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
- name: Create Grafana Volume
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: grafana-volume
|
|
||||||
|
|
||||||
- name: Setup Grafana Container(latest)
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: grafana
|
|
||||||
image: grafana/grafana-oss:latest
|
|
||||||
pull: yes
|
|
||||||
restart_policy: always
|
|
||||||
recreate: yes
|
|
||||||
timeout: 120
|
|
||||||
ports:
|
|
||||||
- "{{GRAFANA_PORT}}:3000"
|
|
||||||
volumes:
|
|
||||||
- "grafana-volume:/var/lib/grafana/"
|
|
||||||
|
|
||||||
|
|
@ -1,29 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
tasks:
|
|
||||||
- name: Install docker dependencies
|
|
||||||
apt:
|
|
||||||
name: "{{item}}"
|
|
||||||
update_cache: yes
|
|
||||||
loop:
|
|
||||||
- apt-transport-https
|
|
||||||
- ca-certificates
|
|
||||||
- curl
|
|
||||||
- gnupg
|
|
||||||
- software-properties-common
|
|
||||||
- lsb-release
|
|
||||||
- python3-pip
|
|
||||||
|
|
||||||
- name: Install Docker
|
|
||||||
apt:
|
|
||||||
name: docker.io
|
|
||||||
update_cache: yes
|
|
||||||
|
|
||||||
- name: Install Docker Ansible Driver
|
|
||||||
pip:
|
|
||||||
name: docker
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
|||||||
# This playbook installs the loki docker plugin which allows other docker based
|
|
||||||
# services to do they're regular logging actions but centralize them
|
|
||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
tasks:
|
|
||||||
# --grant-all-permissions is the default for ansible
|
|
||||||
- name: Install Loki Docker Plugin
|
|
||||||
community.docker.docker_plugin:
|
|
||||||
plugin_name: grafana/loki-docker-driver:latest
|
|
||||||
state: enabled
|
|
||||||
alias: loki
|
|
154
docker-host/roles/setup/tasks/k3s_setup.yml
Normal file
154
docker-host/roles/setup/tasks/k3s_setup.yml
Normal file
@ -0,0 +1,154 @@
|
|||||||
|
---
|
||||||
|
# Auto install from the quickstart
|
||||||
|
# If airgapped, all K3s artifacts are already on the node.
|
||||||
|
- hosts:
|
||||||
|
tasks:
|
||||||
|
- name: Download K3s install script
|
||||||
|
ansible.builtin.get_url:
|
||||||
|
url: https://get.k3s.io/
|
||||||
|
timeout: 120
|
||||||
|
dest: /usr/local/bin/k3s-install.sh
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0755
|
||||||
|
|
||||||
|
- name: Download K3s binary
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: /usr/local/bin/k3s-install.sh
|
||||||
|
changed_when: true
|
||||||
|
|
||||||
|
- name: Add K3s autocomplete to user bashrc
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: "~{{ ansible_user }}/.bashrc"
|
||||||
|
regexp: '\.\s+<\(k3s completion bash\)'
|
||||||
|
line: ". <(k3s completion bash) # Added by k3s-ansible"
|
||||||
|
|
||||||
|
|
||||||
|
- name: Enable and check K3s service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s
|
||||||
|
daemon_reload: true
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: Pause to allow first server startup
|
||||||
|
when: (groups['server'] | length) > 1
|
||||||
|
ansible.builtin.pause:
|
||||||
|
seconds: 10
|
||||||
|
|
||||||
|
- name: Check whether kubectl is installed on control node
|
||||||
|
ansible.builtin.command: 'kubectl'
|
||||||
|
register: kubectl_installed
|
||||||
|
ignore_errors: true
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Apply K3S kubeconfig to control node
|
||||||
|
when: kubectl_installed.rc == 0
|
||||||
|
block:
|
||||||
|
- name: Copy kubeconfig to control node
|
||||||
|
ansible.builtin.fetch:
|
||||||
|
src: /etc/rancher/k3s/k3s.yaml
|
||||||
|
dest: "{{ kubeconfig }}"
|
||||||
|
flat: true
|
||||||
|
|
||||||
|
- name: Change server address in kubeconfig on control node
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
KUBECONFIG={{ kubeconfig }} kubectl config set-cluster default --server=https://{{ api_endpoint }}:{{ api_port }}
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
register: csa_result
|
||||||
|
changed_when:
|
||||||
|
- csa_result.rc == 0
|
||||||
|
|
||||||
|
- name: Setup kubeconfig k3s-ansible context on control node
|
||||||
|
when: kubeconfig != "~/.kube/config"
|
||||||
|
ansible.builtin.replace:
|
||||||
|
path: "{{ kubeconfig }}"
|
||||||
|
regexp: 'name: default'
|
||||||
|
replace: 'name: k3s-ansible'
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
|
||||||
|
- name: Merge with any existing kubeconfig on control node
|
||||||
|
when: kubeconfig != "~/.kube/config"
|
||||||
|
ansible.builtin.shell: |
|
||||||
|
TFILE=$(mktemp)
|
||||||
|
KUBECONFIG={{ kubeconfig }} kubectl config set-context k3s-ansible --user=k3s-ansible --cluster=k3s-ansible
|
||||||
|
KUBECONFIG={{ kubeconfig }} kubectl config view --flatten > ${TFILE}
|
||||||
|
mv ${TFILE} {{ kubeconfig }}
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
become: false
|
||||||
|
register: mv_result
|
||||||
|
changed_when:
|
||||||
|
- mv_result.rc == 0
|
||||||
|
|
||||||
|
- name: Start other server if any and verify status
|
||||||
|
when:
|
||||||
|
- (groups['server'] | length) > 1
|
||||||
|
- inventory_hostname != groups['server'][0]
|
||||||
|
block:
|
||||||
|
- name: Copy K3s service file [HA]
|
||||||
|
when: groups['server'] | length > 1
|
||||||
|
ansible.builtin.template:
|
||||||
|
src: "k3s-ha.service.j2"
|
||||||
|
dest: "{{ systemd_dir }}/k3s.service"
|
||||||
|
owner: root
|
||||||
|
group: root
|
||||||
|
mode: 0644
|
||||||
|
|
||||||
|
- name: Enable and check K3s service
|
||||||
|
ansible.builtin.systemd:
|
||||||
|
name: k3s
|
||||||
|
daemon_reload: true
|
||||||
|
state: started
|
||||||
|
enabled: true
|
||||||
|
|
||||||
|
- name: Verify that all server nodes joined
|
||||||
|
when: (groups['server'] | length) > 1
|
||||||
|
ansible.builtin.command:
|
||||||
|
cmd: >
|
||||||
|
k3s kubectl get nodes -l "node-role.kubernetes.io/control-plane=true" -o=jsonpath="{.items[*].metadata.name}"
|
||||||
|
register: nodes
|
||||||
|
until: nodes.rc == 0 and (nodes.stdout.split() | length) == (groups['server'] | length)
|
||||||
|
retries: 20
|
||||||
|
delay: 10
|
||||||
|
changed_when: false
|
||||||
|
|
||||||
|
- name: Setup kubectl for user
|
||||||
|
block:
|
||||||
|
- name: Create kubectl symlink
|
||||||
|
when: lookup('fileglob', '/usr/local/bin/kubectl', errors='warn') | length == 0
|
||||||
|
ansible.builtin.file:
|
||||||
|
src: /usr/local/bin/k3s
|
||||||
|
dest: /usr/local/bin/kubectl
|
||||||
|
state: link
|
||||||
|
|
||||||
|
- name: Create directory .kube
|
||||||
|
ansible.builtin.file:
|
||||||
|
path: ~{{ ansible_user }}/.kube
|
||||||
|
state: directory
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
mode: "u=rwx,g=rx,o="
|
||||||
|
|
||||||
|
- name: Copy config file to user home directory
|
||||||
|
ansible.builtin.copy:
|
||||||
|
src: /etc/rancher/k3s/k3s.yaml
|
||||||
|
dest: ~{{ ansible_user }}/.kube/config
|
||||||
|
remote_src: true
|
||||||
|
owner: "{{ ansible_user }}"
|
||||||
|
mode: "u=rw,g=,o="
|
||||||
|
|
||||||
|
- name: Configure default KUBECONFIG for user
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: ~{{ ansible_user }}/.bashrc
|
||||||
|
regexp: 'export KUBECONFIG=~/.kube/config'
|
||||||
|
line: 'export KUBECONFIG=~/.kube/config # Added by k3s-ansible'
|
||||||
|
state: present
|
||||||
|
|
||||||
|
- name: Configure kubectl autocomplete
|
||||||
|
ansible.builtin.lineinfile:
|
||||||
|
path: ~{{ ansible_user }}/.bashrc
|
||||||
|
regexp: '\.\s+<\(kubectl completion bash\)'
|
||||||
|
line: ". <(kubectl completion bash) # Added by k3s-ansible"
|
@ -1,63 +0,0 @@
|
|||||||
# Sets up an instance of Kanboard from top to bottom
|
|
||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
vars:
|
|
||||||
PRE: kanboard
|
|
||||||
DATA_VOLUME: "{{PRE}}-data-volume"
|
|
||||||
PLUGINS_VOLUME: "{{PRE}}-plugins-volume"
|
|
||||||
SSL_VOLUME: "{{PRE}}-ssl-volume"
|
|
||||||
tasks:
|
|
||||||
- name: Include Vars
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
- name: Ensure Data volume exists
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: "{{DATA_VOLUME}}"
|
|
||||||
|
|
||||||
- name: Ensure Plugins Volume exists
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: "{{PLUGINS_VOLUME}}"
|
|
||||||
|
|
||||||
- name: Ensure SSL Volume exists
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: "{{SSL_VOLUME}}"
|
|
||||||
|
|
||||||
- name: Ensure Docker service is active
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: kanboard
|
|
||||||
image: kanboard/kanboard:latest
|
|
||||||
pull: yes
|
|
||||||
restart_policy: always
|
|
||||||
recreate: yes
|
|
||||||
ports:
|
|
||||||
- "{{ KANBOARD_PORT }}:80"
|
|
||||||
- "{{ KANBOARD_PORT_SSL }}:443"
|
|
||||||
volumes:
|
|
||||||
- "{{DATA_VOLUME}}:/var/www/app/data"
|
|
||||||
- "{{PLUGINS_VOLUME}}:/var/www/app/plugins"
|
|
||||||
- "{{SSL_VOLUME}}:/etc/nginx/ssl"
|
|
||||||
|
|
||||||
- name: Create Nginx Config for service
|
|
||||||
template:
|
|
||||||
dest: /etc/nginx/sites-available/todo.lablad
|
|
||||||
src: ../templates/proxy.conf.j2
|
|
||||||
notify:
|
|
||||||
- restart-nginx
|
|
||||||
vars:
|
|
||||||
SERVER_NAME: todo.lablad
|
|
||||||
PORT: KANBOARD_PORT
|
|
||||||
|
|
||||||
- name: Restart Nginx
|
|
||||||
service:
|
|
||||||
name: nginx
|
|
||||||
state: restarted
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,8 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
tasks:
|
|
||||||
- name: Copy landing page
|
|
||||||
copy:
|
|
||||||
src: "{{LANDING_PAGE_ROOT_PATH}}/public/"
|
|
||||||
dest: /var/www/html/
|
|
@ -1,77 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
vars:
|
|
||||||
STATIC_USER: staticweb
|
|
||||||
tasks:
|
|
||||||
- name: Install Nginx Package
|
|
||||||
apt:
|
|
||||||
name: nginx
|
|
||||||
update_cache: yes
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Stop Apache2 Service
|
|
||||||
systemd:
|
|
||||||
name: apache2
|
|
||||||
state: stopped
|
|
||||||
enabled: no
|
|
||||||
|
|
||||||
- name: Remove Apache2
|
|
||||||
apt:
|
|
||||||
name: apache2
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: Ensure the default website is disabled
|
|
||||||
file:
|
|
||||||
path: /etc/nginx/sites-enabled/default
|
|
||||||
state: absent
|
|
||||||
|
|
||||||
- name: website user account
|
|
||||||
user:
|
|
||||||
name: "{{STATIC_USER}}"
|
|
||||||
comment: Static Website maintainer account
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Create Static web root directory
|
|
||||||
file:
|
|
||||||
path: /var/www
|
|
||||||
state: directory
|
|
||||||
recurse: yes
|
|
||||||
owner: "{{STATIC_USER}}"
|
|
||||||
group: "{{STATIC_USER}}"
|
|
||||||
|
|
||||||
- name: Copy Reverse proxy configs
|
|
||||||
copy:
|
|
||||||
src: "{{ item }}"
|
|
||||||
dest: /etc/nginx/sites-available/
|
|
||||||
loop:
|
|
||||||
- ../files/git.lablad
|
|
||||||
- ../files/music.lablad
|
|
||||||
- ../files/files.lablad
|
|
||||||
- ../files/metrics.lablad
|
|
||||||
- ../files/todo.lablad
|
|
||||||
- ../files/home.lablad
|
|
||||||
- ../files/vault.lablad
|
|
||||||
|
|
||||||
- name: Enable Sites
|
|
||||||
file:
|
|
||||||
src: /etc/nginx/sites-available/{{item}}
|
|
||||||
dest: /etc/nginx/sites-enabled/{{item}}
|
|
||||||
state: link
|
|
||||||
loop:
|
|
||||||
- git.lablad
|
|
||||||
- music.lablad
|
|
||||||
- files.lablad
|
|
||||||
- metrics.lablad
|
|
||||||
- vault.lablad
|
|
||||||
- todo.lablad
|
|
||||||
- home.lablad
|
|
||||||
notify:
|
|
||||||
- restart-nginx
|
|
||||||
|
|
||||||
handlers:
|
|
||||||
- import_tasks: ../handlers/nginx.yml
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,63 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
vars:
|
|
||||||
PROM_URL: "https://github.com/prometheus/node_exporter/releases/download/v1.3.1/node_exporter-1.3.1.linux-amd64.tar.gz"
|
|
||||||
become: yes
|
|
||||||
tasks:
|
|
||||||
- name: Include Global Vars
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
- name: Download prometheus Node Explorer static binary tarball
|
|
||||||
get_url:
|
|
||||||
url: "{{ PROM_URL }}"
|
|
||||||
dest: /tmp/prom-node-exporer.tar.gz
|
|
||||||
|
|
||||||
- name: Untar node exporter tarball package
|
|
||||||
unarchive:
|
|
||||||
src: /tmp/prom-node-exporer.tar.gz
|
|
||||||
dest: /opt/prom-node-exporter
|
|
||||||
creates: /opt/prom-node-exporter
|
|
||||||
remote_src: yes
|
|
||||||
|
|
||||||
- name: Copy Unit file for node exporter
|
|
||||||
copy:
|
|
||||||
src: ../files/services/node_exporter.service
|
|
||||||
dest: /etc/systemd/system/node_exporter.service
|
|
||||||
|
|
||||||
- name: Install Node Exporter Service and Start
|
|
||||||
systemd:
|
|
||||||
daemon_reload: yes
|
|
||||||
name: node_exporter
|
|
||||||
state: restarted
|
|
||||||
enabled: yes
|
|
||||||
|
|
||||||
- name: Ensure Prom Docker Volume Exists
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: prometheus-volume
|
|
||||||
|
|
||||||
- name: Ensure Config directory exists
|
|
||||||
file:
|
|
||||||
path: /opt/prometheus
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Copy Prometheus template to target directory
|
|
||||||
template:
|
|
||||||
dest: /opt/prometheus/prometheus.yml
|
|
||||||
src: ../templates/prometheus.yml.j2
|
|
||||||
|
|
||||||
- name: Setup Prometheus (Containerized) Service
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: prometheus
|
|
||||||
image: prom/prometheus
|
|
||||||
pull: yes
|
|
||||||
restart_policy: always
|
|
||||||
recreate: yes
|
|
||||||
volumes:
|
|
||||||
- "prometheus-volume:/prometheus"
|
|
||||||
- "/opt/prometheus/prometheus.yml:/etc/prometheus/prometheus.yml"
|
|
||||||
ports:
|
|
||||||
- "{{PROM_PORT}}:{{PROM_PORT}}"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1,37 +0,0 @@
|
|||||||
# This playbook sets up subsonic with/without a fresh to work with
|
|
||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
become: yes
|
|
||||||
vars:
|
|
||||||
SUBSONIC_DIR: /opt/subsonic/data
|
|
||||||
SUBSONIC_CONFIG_VOL: subsonic-config-volume
|
|
||||||
tasks:
|
|
||||||
- name: Include Vars
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
|
|
||||||
- name: Ensure Subsonic music directory exists
|
|
||||||
file:
|
|
||||||
path: "{{SUBSONIC_DIR}}"
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Ensure Subsonic configuration volume exists
|
|
||||||
community.docker.docker_volume:
|
|
||||||
name: "{{SUBSONIC_CONFIG_VOL}}"
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Deploy Subsonic container
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: subsonic
|
|
||||||
image: gzurowski/subsonic
|
|
||||||
restart_policy: always
|
|
||||||
recreate: yes
|
|
||||||
ports:
|
|
||||||
- "{{SUBSONIC_PORT}}:4040"
|
|
||||||
volumes:
|
|
||||||
- "{{SUBSONIC_DIR}}:/var/music"
|
|
||||||
log_driver: loki
|
|
||||||
log_options:
|
|
||||||
loki-url: "http://localhost:{{LOKI_PORT}}/loki/api/v1/push"
|
|
||||||
|
|
@ -1,47 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: leftcoastlab
|
|
||||||
tasks:
|
|
||||||
- name: Pull In Vars
|
|
||||||
include_vars:
|
|
||||||
dir: ../vars
|
|
||||||
|
|
||||||
- name: Create Vault Bind Root
|
|
||||||
become: yes
|
|
||||||
file:
|
|
||||||
path: /opt/vault
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Ensure Vault Bind directories in place
|
|
||||||
become: yes
|
|
||||||
file:
|
|
||||||
path: "/opt/vault/{{item}}"
|
|
||||||
state: directory
|
|
||||||
loop:
|
|
||||||
- config
|
|
||||||
- logs
|
|
||||||
- file
|
|
||||||
|
|
||||||
- name: Create Vault Config
|
|
||||||
become: yes
|
|
||||||
template:
|
|
||||||
src: ../templates/vault-config.json.j2
|
|
||||||
dest: /opt/vault/config/vault.json
|
|
||||||
|
|
||||||
- name: Start up Vault Service
|
|
||||||
community.docker.docker_container:
|
|
||||||
name: vault
|
|
||||||
image: vault
|
|
||||||
ports:
|
|
||||||
- "{{VAULT_PORT}}:8200"
|
|
||||||
restart_policy: always
|
|
||||||
volumes:
|
|
||||||
- /opt/vault/logs:/vault/logs
|
|
||||||
- /opt/vault/file:/vault/file
|
|
||||||
- /opt/vault/config:/vault/config
|
|
||||||
env:
|
|
||||||
VAULT_ADDR: http://127.0.0.1:8200
|
|
||||||
capabilities:
|
|
||||||
- IPC_LOCK
|
|
||||||
entrypoint:
|
|
||||||
- vault server -config=/vault/config/vault.json
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user