Compare commits

...

4 Commits

Author SHA1 Message Date
7f2ee6d35b Cheeky script to pull IP's out from the cluster w/ ports
Some checks failed
Ansible Linting / ansible-lint (push) Failing after 3s
Secops Linting and Safety Checks / checkov-scan-s3 (push) Failing after 14s
Good poc of how to get game server
connection info that we will provide to a bot/user
2024-12-08 18:30:06 -08:00
a4a1d55a53 Dynamically creating the pods/services
Allows us configurize everything w/ json later
2024-12-08 18:29:15 -08:00
bf812cce4c Adding variable structure for the new game_server config 2024-12-08 18:28:34 -08:00
abf3297498 Example nodeport based service now done 2024-12-08 15:54:14 -08:00
9 changed files with 219 additions and 4 deletions

4
infra/vultr-kubernetes/dev/.gitignore vendored Normal file
View File

@ -0,0 +1,4 @@
# created by virtualenv automatically
bin/
lib/

View File

@ -0,0 +1,38 @@
from argparse import ArgumentParser
from argparse import Namespace
from kubernetes import client, config
import re
def get_args() -> Namespace:
parser = ArgumentParser(
prog="Cluster Search Thing",
description="General utility for finding resources for game server bot"
)
games = {"reflex", "minecraft", "nginx"}
parser.add_argument('-g', '--game', required=True, choices=games)
return parser.parse_args()
def k8s_api(config_path: str) -> client.api.core_v1_api.CoreV1Api:
config.load_kube_config("../config.yaml")
print("Getting pod name of our game requested")
return client.CoreV1Api()
if __name__ == '__main__':
args = get_args()
# Setting up k8s stuff
api = k8s_api('../config.yaml')
print(type(api))
pods = api.list_pod_for_all_namespaces(label_selector=f'app={args.game}')
# Now we collect specific data about the game server we requested
port = pods.items[0].spec.containers[0].ports[0].container_port
# Collecting the IPV4 of the node that contains the pod(container)
# we actually care about. Since these pods only have 1 container
node_name = pods.items[0].spec.node_name
node_ips = list(filter(lambda a: a.type == 'ExternalIP', api.list_node().items[0].status.addresses))
ipv4 = list(filter(lambda item: not re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
ipv6 = list(filter(lambda item: re.match('[\d\.]{3}\d', item.address), node_ips))[0].address
print(f'{args.game} --> {ipv4}:{port} ~~> {ipv6}:{port}')

View File

@ -0,0 +1,8 @@
home = /usr
implementation = CPython
version_info = 3.10.12.final.0
virtualenv = 20.13.0+ds
include-system-site-packages = false
base-prefix = /usr
base-exec-prefix = /usr
base-executable = /usr/bin/python3

View File

@ -0,0 +1,18 @@
cachetools==5.5.0
certifi==2024.8.30
charset-normalizer==3.4.0
durationpy==0.9
google-auth==2.36.0
idna==3.10
kubernetes==31.0.0
oauthlib==3.2.2
pyasn1==0.6.1
pyasn1_modules==0.4.1
python-dateutil==2.9.0.post0
PyYAML==6.0.2
requests==2.32.3
requests-oauthlib==2.0.0
rsa==4.9
six==1.17.0
urllib3==2.2.3
websocket-client==1.8.0

View File

@ -7,3 +7,13 @@ resource vultr_firewall_rule web_inbound {
subnet_size = 0
port = each.value
}
resource vultr_firewall_rule game-server-inbound {
for_each = var.game_servers.configs
firewall_group_id = vultr_kubernetes.athens.firewall_group_id
protocol = "tcp"
ip_type = "v4"
subnet = "0.0.0.0"
subnet_size = 0
port = each.value.port
}

View File

@ -0,0 +1,52 @@
resource kubernetes_namespace game-servers {
metadata {
name = var.game_servers.namespace
}
}
resource kubernetes_pod game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
labels = {
app = each.key
}
}
spec {
container {
image = each.value.image
name = coalesce(each.value.name, each.key)
resources {
limits = {
cpu = each.value.cpu
memory = each.value.mem
}
}
port {
container_port = each.value.port
protocol = coalesce(each.value.proto, "TCP")
}
}
}
}
resource kubernetes_service game {
for_each = var.game_servers.configs
metadata {
name = each.key
namespace = var.game_servers.namespace
}
spec {
selector = {
app = each.key
}
port {
target_port = each.value.port
port = each.value.port
node_port = each.value.port
}
type = "NodePort"
}
}

View File

@ -0,0 +1,64 @@
# Here we create a super simple pod that we can reach via IP
# Using nginx as the service to expose
locals {
sanity = {
namespace = "sanity"
service = "web-health"
port = 30808
}
}
resource kubernetes_namespace sanity {
metadata {
name = local.sanity.namespace
}
}
resource kubernetes_pod nginx {
metadata {
name = local.sanity.service
labels = {
app = local.sanity.service
}
}
spec {
container {
image = "nginx:latest"
name = "nginx"
resources {
limits = {
cpu = "200m"
memory = "64Mi"
}
}
liveness_probe {
http_get {
path = "/"
port = 80
}
initial_delay_seconds = 30
period_seconds = 30
}
}
}
}
resource kubernetes_service nginx {
metadata {
name = local.sanity.service
}
spec {
selector = {
app = local.sanity.service
}
port {
port = 8080
target_port = 80
node_port = local.sanity.port
}
type = "NodePort"
}
}

View File

@ -34,6 +34,18 @@ variable cluster {
})
}
variable lab_domain {
type = string
variable game_servers {
type = object({
namespace = string
configs = map(object({
name = optional(string)
image = string
cpu = string
mem = string
port = number
proto = optional(string)
})
)
})
}

View File

@ -1,4 +1,3 @@
cluster = {
region = "lax"
label = "athens-cluster"
@ -11,4 +10,14 @@ cluster = {
}
}
lab_domain = "temprah-lab.xyz"
game_servers = {
namespace = "games"
configs = {
nginx = {
image = "nginx:latest"
cpu = "200m"
mem = "64Mi"
port = 30808
}
}
}