- Removing out of scope code
This commit is contained in:
parent
dc98feef5f
commit
54af3628e4
@ -1,14 +0,0 @@
|
|||||||
resource "aws_ebs_volume" "app_volume" {
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
size = 20
|
|
||||||
type = "standard"
|
|
||||||
tags = {
|
|
||||||
Name = "APP Video block storage"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_volume_attachment" "ebs_att" {
|
|
||||||
device_name = "/dev/sdf"
|
|
||||||
volume_id = aws_ebs_volume.app_volume.id
|
|
||||||
instance_id = aws_instance.app_instance.id
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
# This here module takes care of setting up the ec2 instances that our
|
|
||||||
# containers will bind to later on
|
|
||||||
|
|
||||||
variable "aws_key" {}
|
|
||||||
variable "aws_secret" {}
|
|
||||||
variable "aws_region" {}
|
|
||||||
variable "ami_id" {}
|
|
||||||
variable "instance_type" {}
|
|
||||||
variable "ssh_key_name" {}
|
|
||||||
variable "public_key_path" {}
|
|
||||||
variable "availability_zone" {}
|
|
||||||
|
|
||||||
provider "aws" {
|
|
||||||
access_key = var.aws_key
|
|
||||||
secret_key = var.aws_secret
|
|
||||||
region = var.aws_region
|
|
||||||
max_retries = 1
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_key_pair" "sshkey" {
|
|
||||||
key_name = var.ssh_key_name
|
|
||||||
public_key = file(var.public_key_path)
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_instance" "app_instance" {
|
|
||||||
ami = var.ami_id
|
|
||||||
instance_type = var.instance_type
|
|
||||||
|
|
||||||
key_name = var.ssh_key_name
|
|
||||||
security_groups = [ aws_security_group.app_security_group.id ]
|
|
||||||
subnet_id = aws_subnet.app_public_subnet.id
|
|
||||||
tags = {
|
|
||||||
Name = "Clippable App Instance"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,7 +0,0 @@
|
|||||||
resource "aws_eip" "app_eip" {
|
|
||||||
instance = aws_instance.app_instance.id
|
|
||||||
vpc = true
|
|
||||||
tags = {
|
|
||||||
Name = "Clippable EIP"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,6 +0,0 @@
|
|||||||
resource "aws_internet_gateway" "app_gateway" {
|
|
||||||
vpc_id = aws_vpc.app_vpc.id
|
|
||||||
tags = {
|
|
||||||
Name = "Clippable app internet gateway"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,12 +0,0 @@
|
|||||||
resource "aws_route_table" "app_route_table" {
|
|
||||||
vpc_id = aws_vpc.app_vpc.id
|
|
||||||
route {
|
|
||||||
cidr_block = "0.0.0.0/0"
|
|
||||||
gateway_id = aws_internet_gateway.app_gateway.id
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
resource "aws_route_table_association" "app_subnet_assoc" {
|
|
||||||
subnet_id = aws_subnet.app_public_subnet.id
|
|
||||||
route_table_id = aws_route_table.app_route_table.id
|
|
||||||
}
|
|
@ -1,32 +0,0 @@
|
|||||||
#!/bin/sh
|
|
||||||
|
|
||||||
# This script runs in order to to set things up for so that we don't have to do
|
|
||||||
# much else by ourselves
|
|
||||||
|
|
||||||
# No harm in using sudo even as root its just a little pointless
|
|
||||||
# Doing this with our ami however means we don't have to check if we're root
|
|
||||||
# for privileged operations at provision-time
|
|
||||||
apt="sudo apt"
|
|
||||||
server_name=$1
|
|
||||||
if [ -z "$server_name" ];then
|
|
||||||
echo A servername must be given as an argument
|
|
||||||
fi
|
|
||||||
|
|
||||||
|
|
||||||
$apt update
|
|
||||||
$apt upgrade
|
|
||||||
$apt install -y nginx certbot
|
|
||||||
|
|
||||||
sudo mkdir -p /var/www/clippable
|
|
||||||
# Creating the reverse proxy configuration for nginx
|
|
||||||
# WARN: Also we're assuming that the webserver has the default port
|
|
||||||
# Only this because certbot does the rest
|
|
||||||
cat << EOF > /etc/nginx/sites-available/clippable
|
|
||||||
server {
|
|
||||||
server_name $server_name;
|
|
||||||
location / {
|
|
||||||
proxy_pass http://0.0.0.0:8482;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
EOF
|
|
||||||
|
|
@ -1,39 +0,0 @@
|
|||||||
resource "aws_security_group" "app_security_group" {
|
|
||||||
name = "App sec group"
|
|
||||||
description = "Allowing SSH and web traffic"
|
|
||||||
vpc_id = aws_vpc.app_vpc.id
|
|
||||||
|
|
||||||
ingress {
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
from_port = 443
|
|
||||||
to_port = 443
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
ingress {
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
from_port = 80
|
|
||||||
to_port = 80
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
ingress {
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
from_port = 22
|
|
||||||
to_port = 22
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
|
|
||||||
# These are so that we can update the system regularly using apt and sometimes
|
|
||||||
# with tarballs if we're updating something from source
|
|
||||||
egress {
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
from_port = 443
|
|
||||||
to_port = 443
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
egress {
|
|
||||||
cidr_blocks = ["0.0.0.0/0"]
|
|
||||||
from_port = 80
|
|
||||||
to_port = 80
|
|
||||||
protocol = "tcp"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,5 +0,0 @@
|
|||||||
resource "aws_subnet" "app_public_subnet" {
|
|
||||||
vpc_id = aws_vpc.app_vpc.id
|
|
||||||
cidr_block = "10.0.0.128/26"
|
|
||||||
availability_zone = var.availability_zone
|
|
||||||
}
|
|
@ -1,10 +0,0 @@
|
|||||||
|
|
||||||
resource "aws_vpc" "app_vpc" {
|
|
||||||
cidr_block = "10.0.0.128/26"
|
|
||||||
enable_dns_support = true
|
|
||||||
enable_dns_hostnames = true
|
|
||||||
|
|
||||||
tags = {
|
|
||||||
Name = "Clippable APP VPC"
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,57 +0,0 @@
|
|||||||
Role Name
|
|
||||||
=========
|
|
||||||
|
|
||||||
This role is dedicated to making the setup and administration of a clippable
|
|
||||||
server a little bit easier for those intending on running their own instance.
|
|
||||||
|
|
||||||
There are playbooks for maintaining this service both as a container and as a
|
|
||||||
service running on System D.
|
|
||||||
|
|
||||||
|
|
||||||
Role Variables
|
|
||||||
--------------
|
|
||||||
|
|
||||||
Vars in: `defaults/main.yml`
|
|
||||||
|
|
||||||
* `remote_user`: Default username to use for regular tasks
|
|
||||||
|
|
||||||
Set to `admin` by default.
|
|
||||||
|
|
||||||
* `remote_app_dir`: Directory to install application files into
|
|
||||||
|
|
||||||
Set to `/home/{{remote_user}}/app` by default
|
|
||||||
|
|
||||||
This includes things like the server binary and HTML template files.
|
|
||||||
You only need to worry about this if you're not going to run this
|
|
||||||
in a container.
|
|
||||||
|
|
||||||
* `main_host`
|
|
||||||
|
|
||||||
Set to `main` by default.
|
|
||||||
|
|
||||||
Host that you intend on targeting.
|
|
||||||
|
|
||||||
|
|
||||||
Dependencies
|
|
||||||
------------
|
|
||||||
|
|
||||||
* community.docker
|
|
||||||
|
|
||||||
This is only required if you are planning on using any of the docker playbooks.
|
|
||||||
|
|
||||||
Example Playbook
|
|
||||||
----------------
|
|
||||||
|
|
||||||
Including an example of how to use your role (for instance, with variables passed in as parameters) is always nice for users too:
|
|
||||||
|
|
||||||
|
|
||||||
License
|
|
||||||
-------
|
|
||||||
|
|
||||||
GPL V3
|
|
||||||
|
|
||||||
Author Information
|
|
||||||
------------------
|
|
||||||
|
|
||||||
Author: Shockrah
|
|
||||||
Email: dev@shockrah.xyz
|
|
@ -1,4 +0,0 @@
|
|||||||
---
|
|
||||||
remote_user: admin
|
|
||||||
remote_app_dir: "/home/{{remote_user}}/app"
|
|
||||||
main_host: main
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
# handlers file for playbooks
|
|
@ -1,17 +0,0 @@
|
|||||||
galaxy_info:
|
|
||||||
author: shockrah
|
|
||||||
description: Role for easing the deployment of Clippable instances
|
|
||||||
|
|
||||||
# issue_tracker_url: http://example.com/issue/tracker
|
|
||||||
|
|
||||||
license: GPL-3.0-only
|
|
||||||
|
|
||||||
min_ansible_version: 2.1
|
|
||||||
|
|
||||||
|
|
||||||
platforms:
|
|
||||||
- name: Debian
|
|
||||||
versions:
|
|
||||||
- all
|
|
||||||
|
|
||||||
|
|
@ -1,41 +0,0 @@
|
|||||||
---
|
|
||||||
tasks:
|
|
||||||
- name: Install docker dependencies
|
|
||||||
become: yes
|
|
||||||
become_method: sudo
|
|
||||||
apt:
|
|
||||||
name: "{{item}}"
|
|
||||||
update_cache: yes
|
|
||||||
loop:
|
|
||||||
- apt-transport-https
|
|
||||||
- ca-certificates
|
|
||||||
- curl
|
|
||||||
- gnupg
|
|
||||||
- software-properties-common
|
|
||||||
- lsb-release
|
|
||||||
|
|
||||||
- name: Install docker GPG key
|
|
||||||
become: yes
|
|
||||||
become_method: sudo
|
|
||||||
apt_key:
|
|
||||||
url: https://download.docker.com/linux/ubuntu/gpg
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Add Docker Apt Repo
|
|
||||||
become: yes
|
|
||||||
become_method: sudo
|
|
||||||
apt_repository:
|
|
||||||
repo: deb https://download.docker.com/linux/ubuntu impish stable
|
|
||||||
state: present
|
|
||||||
|
|
||||||
- name: Install Docker components
|
|
||||||
become: yes
|
|
||||||
become_method: sudo
|
|
||||||
apt:
|
|
||||||
name: "{{item}}"
|
|
||||||
update_cache: yes
|
|
||||||
loop:
|
|
||||||
- docker-ce
|
|
||||||
- docker-ce-cli
|
|
||||||
- containerd.io
|
|
||||||
|
|
@ -1,9 +0,0 @@
|
|||||||
# This playbook is setup to install docker on debian based systems
|
|
||||||
---
|
|
||||||
- hosts: main
|
|
||||||
tasks:
|
|
||||||
- include_tasks: 'debian.yml'
|
|
||||||
when:
|
|
||||||
ansible_distribution: Debian
|
|
||||||
|
|
||||||
|
|
@ -1,59 +0,0 @@
|
|||||||
# WHEN TO USE THIS PLAYBOOK:
|
|
||||||
# Use this if you're running clippable under systemd
|
|
||||||
# WHAT THIS PLAYBOOK DOES:
|
|
||||||
# This playbooks basically takes a build/ directory similar to what the Gitlab
|
|
||||||
# pipelines generate and uploads those files to the desired directory
|
|
||||||
---
|
|
||||||
- hosts: {{ main_host }}
|
|
||||||
remote_user: {{ remote_user }}
|
|
||||||
tasks:
|
|
||||||
- name: Build skeleton root directory
|
|
||||||
file:
|
|
||||||
path: '{{remote_app_dir}}'
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Build skeleton static directory
|
|
||||||
file:
|
|
||||||
path: '{{remote_app_dir}}/static'
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Build skeleton css directory
|
|
||||||
file:
|
|
||||||
path: '{{remote_app_dir}}/static/css'
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Build skeleton js directory
|
|
||||||
file:
|
|
||||||
path: '{{remote_app_dir}}/static/js'
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Build skeleton templates directory
|
|
||||||
file:
|
|
||||||
path: '{{remote_app_dir}}/templates'
|
|
||||||
state: directory
|
|
||||||
|
|
||||||
- name: Update Binary installation
|
|
||||||
copy:
|
|
||||||
src: "{{ item.src }}"
|
|
||||||
dest: "{{ item.dest }}"
|
|
||||||
with_items:
|
|
||||||
- { src: ../../build/static/css/style.css, dest: '{{remote_app_dir}}/static/css/'}
|
|
||||||
|
|
||||||
- { src: ../../build/static/js/index.js, dest: '{{remote_app_dir}}/static/js/'}
|
|
||||||
- { src: ../../build/static/js/category.js, dest: '{{remote_app_dir}}/static//js/'}
|
|
||||||
|
|
||||||
- { src: ../../build/static/cantfindshit.jpg, dest: '{{remote_app_dir}}/static/'}
|
|
||||||
- { src: ../../build/static/favicon.png, dest: '{{remote_app_dir}}/static/'}
|
|
||||||
|
|
||||||
- { src: ../../build/templates/list.html.tera, dest: '{{remote_app_dir}}/templates/list.html.tera'}
|
|
||||||
- { src: ../../build/templates/video.html.tera, dest: '{{remote_app_dir}}/templates/video.html.tera'}
|
|
||||||
|
|
||||||
- { src: ../../build/Rocket.toml, dest: '{{remote_app_dir}}/'}
|
|
||||||
- { src: ../../build/server, dest: '{{remote_app_dir}}/'}
|
|
||||||
|
|
||||||
- name: Restart web service
|
|
||||||
become: yes
|
|
||||||
become_method: sudo
|
|
||||||
service:
|
|
||||||
name: app
|
|
||||||
state: restarted
|
|
@ -1,2 +0,0 @@
|
|||||||
localhost
|
|
||||||
|
|
@ -1,5 +0,0 @@
|
|||||||
---
|
|
||||||
- hosts: localhost
|
|
||||||
remote_user: root
|
|
||||||
roles:
|
|
||||||
- playbooks
|
|
@ -1,2 +0,0 @@
|
|||||||
---
|
|
||||||
# vars file for playbooks
|
|
@ -1,8 +0,0 @@
|
|||||||
# AWS Configuration
|
|
||||||
|
|
||||||
For those that would like to deploy a minimal instance to AWS via terraform
|
|
||||||
this directory will basically have everything you need to get a working instance
|
|
||||||
up and running for very cheap with an EC2 instance.
|
|
||||||
|
|
||||||
There is still the question of preparing the EC2 instance itself however the
|
|
||||||
amount of configuration is very light.
|
|
@ -1,5 +0,0 @@
|
|||||||
# NOTE: sample inventory either use your own inventory file or just
|
|
||||||
# replace the hostname/ip below
|
|
||||||
[main]
|
|
||||||
1.1.1.1
|
|
||||||
|
|
Loading…
Reference in New Issue
Block a user