Reorganization and better Terraform

Signed-off-by: main <magic_rb@redalder.org>
This commit is contained in:
main 2022-07-30 23:27:40 +02:00
parent b0de53e57f
commit 2e61e7ef3c
No known key found for this signature in database
GPG key ID: 08D5287CC5DDCA0E
55 changed files with 1572 additions and 1477 deletions

View file

@ -1,3 +0,0 @@
path "kv/data/camptules" {
capabilities = ["read"]
}

View file

@ -1,29 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
# data "nomad_plugin" "nomad-driver-containerd" {
# plugin_id = "nomad-driver-containerd"
# wait_for_healthy = true
# }
resource "vault_policy" "camputules-policy" {
name = "camptules-policy"
policy = file("${path.module}/camptules-policy.hcl")
}
resource "nomad_job" "camptules" {
jobspec = file("${path.module}/nomad.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_sha = var.flake_sha
}
}
}

View file

@ -1,3 +0,0 @@
path "kv/data/dovecot" {
capabilities = ["read"]
}

View file

@ -1,3 +0,0 @@
path "kv/data/getmail" {
capabilities = ["read"]
}

View file

@ -1,3 +0,0 @@
path "kv/data/gitea" {
capabilities = ["read"]
}

View file

@ -1,3 +0,0 @@
path "kv/data/home-assistant" {
capabilities = ["read"]
}

View file

@ -1,3 +0,0 @@
path "kv/data/mqtt" {
capabilities = ["read"]
}

View file

@ -1,7 +0,0 @@
path "kv/data/mqtt" {
capabilities = ["read"]
}
path "kv/data/zigbee2mqtt" {
capabilities = ["read"]
}

View file

@ -1,3 +0,0 @@
path "kv/data/hydra" {
capabilities = ["read"]
}

View file

@ -1,583 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
job "ingress" {
datacenters = [ "do-1", "homelab-1" ]
type = "service"
group "ingress-toothpick" {
count = 1
constraint {
attribute = "${attr.unique.hostname}"
value = "toothpick"
}
volume "ingress-letsencrypt" {
type = "csi"
source = "ingress-letsencrypt"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
network {
mode = "bridge"
port "http" {
static = 80
to = 80
host_network = "public"
}
port "https" {
static = 443
to = 443
host_network = "public"
}
port "minecraft" {
static = 25565
to = 25565
host_network = "public"
}
}
service {
name = "ingress-toothpick"
port = "http"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "gitea"
local_bind_port = 3000
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "hydra"
local_bind_port = 8666
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "nextcloud"
local_bind_port = 8777
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "website"
local_bind_port = 8080
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "minecraft"
local_bind_port = 2666
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "reicio"
local_bind_port = 8000
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
}
}
}
}
task "nginx" {
driver = "docker"
volume_mount {
volume = "ingress-letsencrypt"
destination = "/etc/letsencrypt"
read_only = false
}
# artifact {
# source = "http://hydra/build/99/download/1/image.tar.gz"
# }
config {
# load = "nixng-ingress.tar.gz"
image = "nixng-ingress:local"
ports = ["http", "https", "minecraft"]
memory_hard_limit = 128
}
resources {
cpu = 200
memory = 32
}
template {
data = <<EOF
ssl_certificate_key /etc/letsencrypt/live/redalder.org/privkey.pem;
ssl_certificate /etc/letsencrypt/live/redalder.org/fullchain.pem;
ssl_session_cache shared:le_nginx_SSL:10m;
ssl_session_timeout 1440m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
EOF
destination = "local/ssl.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
EOF
destination = "local/headers.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
add_header X-Frame-Options "SAMEORIGIN";
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
EOF
destination = "local/security.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
upstream minecraft {
server {{ env "NOMAD_UPSTREAM_ADDR_minecraft" }};
}
server {
listen 25565;
proxy_pass minecraft;
}
EOF
destination = "local/streams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
upstream gitea {
server {{ env "NOMAD_UPSTREAM_ADDR_gitea" }};
}
upstream hydra {
server {{ env "NOMAD_UPSTREAM_ADDR_hydra" }};
}
upstream nextcloud {
server {{ env "NOMAD_UPSTREAM_ADDR_nextcloud" }};
}
upstream website {
server {{ env "NOMAD_UPSTREAM_ADDR_website" }};
}
upstream reicio {
server {{ env "NOMAD_UPSTREAM_ADDR_reicio" }};
}
server {
listen 443 ssl;
server_name _;
include /local/ssl.conf;
return 404;
}
server {
listen 443 ssl;
server_name gitea.redalder.org;
include /local/security.conf;
include /local/ssl.conf;
client_max_body_size 100M;
location / {
include /local/headers.conf;
proxy_pass http://gitea;
}
}
server {
listen 443 ssl;
server_name hydra.redalder.org;
include /local/security.conf;
include /local/ssl.conf;
location / {
include /local/headers.conf;
proxy_pass http://hydra;
}
}
server {
listen 443 ssl;
server_name redalder.org nixng.org;
include /local/security.conf;
include /local/ssl.conf;
location /nextcloud/ {
include /local/headers.conf;
proxy_pass http://nextcloud/;
}
location /reicio/ {
include /local/headers.conf;
proxy_pass http://reicio/;
}
location / {
include /local/headers.conf;
proxy_pass http://website;
}
}
EOF
destination = "local/upstreams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
}
}
group "ingress-blowhole" {
count = 1
constraint {
attribute = "${attr.unique.hostname}"
value = "blowhole"
}
network {
mode = "bridge"
port "http" {
static = 8080
to = 80
}
port "https" {
static = 443
to = 443
}
port "jellyfin" {
static = 8096
to = 8096
}
port "imap" {
static = 143
to = 143
}
}
service {
name = "ingress-blowhole"
port = "80"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "jellyfin"
local_bind_port = 8001
}
upstreams {
destination_name = "zigbee2mqtt"
local_bind_port = 8002
}
upstreams {
destination_name = "home-assistant"
local_bind_port = 8003
}
upstreams {
destination_name = "syncthing"
local_bind_port = 8004
}
upstreams {
destination_name = "dovecot-imap"
local_bind_port = 8005
}
}
}
sidecar_task {
resources {
cpu = 75
memory = 48
}
config {
memory_hard_limit = 96
image = "envoyproxy/envoy:v1.20.2"
}
}
}
}
task "nginx" {
driver = "docker"
config {
image = "ra-systems-ingress-blowhole:local"
ports = ["http", "https", "jellyfin"]
memory_hard_limit = 128
}
resources {
cpu = 200
memory = 32
}
template {
data = <<EOF
ssl_certificate_key /etc/letsencrypt/live/redalder.org/privkey.pem;
ssl_certificate /etc/letsencrypt/live/redalder.org/fullchain.pem;
ssl_session_cache shared:le_nginx_SSL:10m;
ssl_session_timeout 1440m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
EOF
destination = "local/ssl.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
EOF
destination = "local/headers.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
add_header X-Frame-Options "SAMEORIGIN";
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
EOF
destination = "local/security.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
upstream jellyfin {
server {{ env "NOMAD_UPSTREAM_ADDR_jellyfin" }};
}
upstream zigbee2mqtt {
server {{ env "NOMAD_UPSTREAM_ADDR_zigbee2mqtt" }};
}
upstream home-assistant {
server {{ env "NOMAD_UPSTREAM_ADDR_home-assistant" }};
}
upstream syncthing {
server {{ env "NOMAD_UPSTREAM_ADDR_syncthing" }};
}
server {
listen 8096;
server_name _;
include /local/security.conf;
client_max_body_size 100M;
location /jellyfin/ {
# Proxy main Jellyfin traffic
# The / at the end is significant.
# https://www.acunetix.com/blog/articles/a-fresh-look-on-reverse-proxy-related-attacks/
proxy_pass http://jellyfin/jellyfin/;
proxy_pass_request_headers on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
# Disable buffering when the nginx proxy gets very resource heavy upon streaming
proxy_buffering off;
}
location /syncthing/ {
include /local/headers.conf;
proxy_pass http://syncthing/;
}
location ~ "^/(static/icons/|static/fonts/|static/translations/|static/images/|static/panels/|static/polyfills|api/|auth/|frontend_latest/|frontend_es5/|local/|lovelace|map|config|developer-tools|history|logbook|profile|states|hassio|onboarding.html|service_worker.js|authorize.html|manifest.json)" {
include /local/headers.conf;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://home-assistant;
}
location /home-assistant {
include /local/headers.conf;
rewrite /home-assistant/(.*) /$1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://home-assistant;
}
location /zigbee2mqtt/ {
include /local/headers.conf;
# rewrite /zigbee2mqtt/(.*) /$1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://zigbee2mqtt/;
}
}
server {
listen 80;
server_name _;
include /local/security.conf;
client_max_body_size 100M;
location /jellyfin/ {
# Proxy main Jellyfin traffic
# The / at the end is significant.
# https://www.acunetix.com/blog/articles/a-fresh-look-on-reverse-proxy-related-attacks/
proxy_pass http://jellyfin/jellyfin/;
proxy_pass_request_headers on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
# Disable buffering when the nginx proxy gets very resource heavy upon streaming
proxy_buffering off;
}
}
EOF
destination = "local/upstreams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
upstream dovecot-imap {
server {{ env "NOMAD_UPSTREAM_ADDR_dovecot-imap" }};
}
server {
listen 143;
proxy_pass dovecot-imap;
}
EOF
destination = "local/streams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
}
}
}

View file

@ -1,101 +0,0 @@
variable "flake_rev" {
type = string
}
variable "flake_host" {
type = string
}
variable "flake_host_alt" {
type = string
}
variable "flake_sha" {
type = string
}
module "camptules" {
source = "./camptules"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "email" {
source = "./email"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "gitea" {
source = "./gitea"
flake_ref = "${var.flake_host_alt}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "home-assistant" {
source = "./home-assistant"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "hydra" {
source = "./hydra"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "ingress" {
source = "./ingress"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "jellyfin" {
source = "./jellyfin"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "mesh" {
source = "./syncthing"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
// minecraft
module "plugin-nfs" {
source = "./plugin-nfs"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "reicio" {
source = "./reicio"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "syncthing" {
source = "./syncthing"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
module "website" {
source = "./website"
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}

View file

@ -1,19 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
resource "nomad_job" "mesh" {
jobspec = file("${path.module}/nomad.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_sha = var.flake_sha
}
}
}

View file

@ -1,19 +0,0 @@
type = "csi"
id = "minecraft"
name = "minecraft"
plugin_id = "nfs"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context {
server = "10.64.1.201"
share = "/var/nfs/minecraft/atm6"
}
mount_options {
fs_type = "nfs"
mount_flags = [ "nolock" ]
}

View file

@ -1,58 +0,0 @@
job "minecraft" {
datacenters = [ "homelab-1" ]
type = "service"
group "minecraft" {
count = 1
volume "minecraft" {
type = "csi"
source = "minecraft"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
network {
mode = "bridge"
}
service {
name = "minecraft"
port = "25565"
connect {
sidecar_service {}
}
}
task "minecraft" {
driver = "docker"
volume_mount {
volume = "minecraft"
destination = "/run/cfg/minecraft"
read_only = false
}
config {
image = "nixng-minecraft:local"
memory_hard_limit = 8192
cap_add = ["sys_admin"]
devices = [
{
host_path = "/dev/fuse"
container_path = "/dev/fuse"
}
]
}
resources {
cpu = 4096
memory = 4096
}
}
}
}

View file

@ -1,31 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
resource "nomad_job" "nfs-controller" {
jobspec = file("${path.module}/nfs-controller.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_sha = var.flake_sha
}
}
}
resource "nomad_job" "nfs-nodes" {
jobspec = file("${path.module}/nfs-nodes.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_sha = var.flake_sha
}
}
}

View file

@ -1,43 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
job "plugin-nfs-nodes" {
datacenters = [ "homelab-1", "do-1" ]
type = "system"
group "nodes" {
task "plugin" {
driver = "docker"
config {
image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest" # "csi-driver-nfs:local"
args = [
"--endpoint=unix://csi/csi.sock",
"--nodeid=blowhole-0",
"--logtostderr",
"--v=5",
]
privileged = true
}
csi_plugin {
id = "nfs"
type = "node"
mount_dir = "/csi"
}
resources {
cpu = 250
memory = 128
}
}
}
}

View file

@ -1,19 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
resource "nomad_job" "reicio" {
jobspec = file("${path.module}/reicio.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_sha = var.flake_sha
}
}
}

View file

@ -1,111 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
job "syncthing" {
datacenters = [ "homelab-1" ]
type = "service"
group "syncthing" {
count = 1
volume "syncthing-data" {
type = "csi"
source = "syncthing-data"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
volume "syncthing-config" {
type = "csi"
source = "syncthing-config"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
volume "syncthing-storage" {
type = "csi"
source = "syncthing-storage"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
network {
mode = "bridge"
}
service {
name = "syncthing"
port = "8384"
## Syncthing with auth returns 402: Unauthorized and Nomad interprets it as
## service failure.
# check {
# type = "http"
# address_mode = "alloc"
# path = "/"
# port = "8384"
# interval = "10s"
# timeout = "10s"
# }
connect {
sidecar_service {}
sidecar_task {
resources {
cpu = 75
memory = 48
}
config {
memory_hard_limit = 96
}
}
}
}
task "syncthing" {
driver = "containerd-driver"
config {
flake_ref = "${var.flake_ref}#nixngSystems.syncthing.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 128
memory = 128
}
volume_mount {
volume = "syncthing-data"
destination = "/var/syncthing/data"
read_only = false
}
volume_mount {
volume = "syncthing-config"
destination = "/var/syncthing/config"
read_only = false
}
volume_mount {
volume = "syncthing-storage"
destination = "/var/syncthing/storage"
read_only = false
}
}
}
}

View file

@ -1,24 +0,0 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
# data "nomad_plugin" "nomad-driver-containerd" {
# plugin_id = "nomad-driver-containerd"
# wait_for_healthy = true
# }
resource "nomad_job" "website" {
jobspec = file("${path.module}/nomad.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_sha = var.flake_sha
}
}
}

17
main.tf
View file

@ -8,6 +8,14 @@ terraform {
provider "nomad" {
address = "http://10.64.1.201:4646"
region = "homelab-1"
alias = "homelab"
}
provider "nomad" {
address = "http://10.64.1.201:4646"
region = "do-1"
alias = "do"
}
provider "vault" {
@ -38,3 +46,12 @@ module "infrastructure" {
flake_host_alt = var.flake_host_alt
flake_sha = var.flake_sha
}
module "nomad" {
source = "./nomad"
flake_rev = var.flake_rev
flake_host = var.flake_host
flake_host_alt = var.flake_host_alt
flake_sha = var.flake_sha
}

24
nomad/main.tf Normal file
View file

@ -0,0 +1,24 @@
variable "flake_rev" {
type = string
}
variable "flake_host" {
type = string
}
variable "flake_host_alt" {
type = string
}
variable "flake_sha" {
type = string
}
module "regions" {
source = "./regions"
flake_rev = var.flake_rev
flake_host = var.flake_host
flake_host_alt = var.flake_host_alt
flake_sha = var.flake_sha
}

View file

@ -1,21 +1,16 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
variable "datacenters" {
type = list(string)
}
job "gateway-mesh" {
datacenters = [ "homelab-1", "do-1" ]
type = "system"
datacenters = var.datacenters
group "envoy" {
network {
mode = "bridge"
port "mesh_wan" {
host_network = "mesh"
}
}

View file

@ -0,0 +1,20 @@
variable "datacenters" {
type = list(string)
}
terraform {
required_providers {
nomad = {}
}
}
resource "nomad_job" "mesh" {
jobspec = file("${path.module}/main.hcl")
hcl2 {
enabled = true
vars = {
"datacenters" = jsonencode(var.datacenters)
}
}
}

View file

@ -1,25 +1,24 @@
variable "flake_ref" {
variable "region" {
type = string
}
variable "flake_sha" {
type = string
variable "datacenters" {
type = list(string)
}
job "plugin-nfs-controller" {
datacenters = [ "homelab-1" ]
job "nfs-controller" {
datacenters = var.datacenters
region = var.region
group "controller" {
task "plugin" {
driver = "docker"
config {
image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest" # "csi-driver-nfs:local"
image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest"
args = [
"--endpoint=unix://csi/csi.sock",
"--nodeid=contoller",
"--logtostderr",
"-v=5",
]
}

42
nomad/modules/nfs/main.tf Normal file
View file

@ -0,0 +1,42 @@
variable "region" {
type = string
}
variable "node_dcs" {
type = list(string)
}
variable "controller_dcs" {
type = list(string)
default = null
}
terraform {
required_providers {
nomad = {}
}
}
resource "nomad_job" "nfs-controller" {
jobspec = file("${path.module}/controller.hcl")
hcl2 {
enabled = true
vars = {
"region" = var.region
"datacenters" = jsonencode(var.controller_dcs == null ? var.node_dcs : var.controller_dcs)
}
}
}
resource "nomad_job" "nfs-node" {
jobspec = file("${path.module}/node.hcl")
hcl2 {
enabled = true
vars = {
"region" = var.region
"datacenters" = jsonencode(var.node_dcs)
}
}
}

View file

@ -0,0 +1,44 @@
variable "region" {
type = string
}
variable "datacenters" {
type = list(string)
}
job "nfs-node" {
datacenters = var.datacenters
region = var.region
type = "system"
group "nodes" {
task "plugin" {
driver = "docker"
config {
image = "mcr.microsoft.com/k8s/csi/nfs-csi:latest"
args = [
"--v=5",
"--nodeid=${attr.unique.hostname}",
"--endpoint=unix:///csi/csi.sock",
"--drivername=nfs.csi.k8s.io"
]
privileged = true
}
csi_plugin {
id = "nfs"
type = "node"
mount_dir = "/csi"
}
resources {
cpu = 250
memory = 128
}
}
}
}

View file

@ -0,0 +1,83 @@
upstream gitea {
server {{ env "NOMAD_UPSTREAM_ADDR_gitea" }};
}
upstream hydra {
server {{ env "NOMAD_UPSTREAM_ADDR_hydra" }};
}
upstream nextcloud {
server {{ env "NOMAD_UPSTREAM_ADDR_nextcloud" }};
}
upstream website {
server {{ env "NOMAD_UPSTREAM_ADDR_website" }};
}
upstream reicio {
server {{ env "NOMAD_UPSTREAM_ADDR_reicio" }};
}
server {
listen 443 ssl;
server_name _;
include /local/ssl.conf;
return 404;
}
server {
listen 443 ssl;
server_name gitea.redalder.org;
include /local/security.conf;
include /local/ssl.conf;
client_max_body_size 100M;
location / {
include /local/headers.conf;
proxy_pass http://gitea;
}
}
server {
listen 443 ssl;
server_name hydra.redalder.org;
include /local/security.conf;
include /local/ssl.conf;
location / {
include /local/headers.conf;
proxy_pass http://hydra;
}
}
server {
listen 443 ssl;
server_name redalder.org nixng.org;
include /local/security.conf;
include /local/ssl.conf;
location /nextcloud/ {
include /local/headers.conf;
proxy_pass http://nextcloud/;
}
location /reicio/ {
include /local/headers.conf;
proxy_pass http://reicio/;
}
location / {
include /local/headers.conf;
proxy_pass http://website;
}
}

View file

@ -0,0 +1,219 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
variable "upstreams" {
type = string
}
job "ingress" {
datacenters = [ "do-1" ]
region = "do-1"
type = "service"
group "ingress" {
count = 1
constraint {
attribute = "${attr.unique.hostname}"
value = "toothpick"
}
volume "ingress-letsencrypt" {
type = "csi"
source = "ingress-letsencrypt"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
network {
mode = "bridge"
port "http" {
static = 80
to = 80
host_network = "public"
}
port "https" {
static = 443
to = 443
host_network = "public"
}
port "minecraft" {
static = 25565
to = 25565
host_network = "public"
}
}
service {
name = "ingress-toothpick"
port = "http"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "gitea"
local_bind_port = 3000
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "hydra"
local_bind_port = 8666
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "nextcloud"
local_bind_port = 8777
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "website"
local_bind_port = 8080
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "minecraft"
local_bind_port = 2666
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
upstreams {
destination_name = "reicio"
local_bind_port = 8000
datacenter = "homelab-1"
mesh_gateway {
mode = "local"
}
}
}
}
}
}
task "nginx" {
driver = "docker"
volume_mount {
volume = "ingress-letsencrypt"
destination = "/etc/letsencrypt"
read_only = false
}
# artifact {
# source = "http://hydra/build/99/download/1/image.tar.gz"
# }
config {
# load = "nixng-ingress.tar.gz"
image = "nixng-ingress:local"
ports = ["http", "https", "minecraft"]
memory_hard_limit = 128
}
resources {
cpu = 200
memory = 32
}
template {
data = <<EOF
ssl_certificate_key /etc/letsencrypt/live/redalder.org/privkey.pem;
ssl_certificate /etc/letsencrypt/live/redalder.org/fullchain.pem;
ssl_session_cache shared:le_nginx_SSL:10m;
ssl_session_timeout 1440m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
EOF
destination = "local/ssl.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
EOF
destination = "local/headers.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
add_header X-Frame-Options "SAMEORIGIN";
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
EOF
destination = "local/security.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
upstream minecraft {
server {{ env "NOMAD_UPSTREAM_ADDR_minecraft" }};
}
server {
listen 25565;
proxy_pass minecraft;
}
EOF
destination = "local/streams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = var.upstreams
destination = "local/upstreams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
}
}
}

View file

@ -1,13 +1,8 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
data "local_file" "ingress-upstreams" {
filename = "${path.module}/ingress-upstreams.conf"
}
resource "nomad_volume" "ingress-letsencrypt" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "ingress-letsencrypt"
@ -31,13 +26,15 @@ resource "nomad_volume" "ingress-letsencrypt" {
}
resource "nomad_job" "ingress" {
jobspec = file("${path.module}/nomad.hcl")
jobspec = file("${path.module}/ingress.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_ref = "${var.flake_host_alt}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
upstreams = data.local_file.ingress-upstreams.content
}
}
}

View file

@ -0,0 +1,43 @@
variable "flake_rev" {
type = string
}
variable "flake_sha" {
type = string
}
variable "flake_host" {
type = string
}
variable "flake_host_alt" {
type = string
}
provider "nomad" {
address = "http://10.64.1.201:4646"
region = "do-1"
alias = "do-1"
}
module "nfs" {
source = "../../modules/nfs"
node_dcs = [ "do-1" ]
region = "do-1"
providers = {
nomad = nomad.do-1
}
}
module "gateway-mesh" {
source = "../../modules/gateway-mesh"
datacenters = [ "do-1" ]
providers = {
nomad = nomad.do-1
}
}

View file

@ -0,0 +1,57 @@
resource "nomad_volume" "baikal-specific" {
type = "csi"
plugin_id = "nfs"
volume_id = "baikal-specific"
name = "baikal-specific"
external_id = "baikal-specific"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/baikal/specific"
}
mount_options {
fs_type = "nfs"
mount_flags = [ "nolock", "hard" ]
}
}
resource "nomad_volume" "baikal-config" {
type = "csi"
plugin_id = "nfs"
volume_id = "baikal-config"
name = "baikal-config"
external_id = "baikal-config"
capability {
access_mode = "single-node-writer"
attachment_mode = "file-system"
}
context = {
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/baikal/config"
}
mount_options {
fs_type = "nfs"
mount_flags = [ "nolock", "hard" ]
}
}
resource "nomad_job" "baikal" {
jobspec = file("${path.module}/job/baikal.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}
}

View file

@ -0,0 +1,20 @@
resource "vault_policy" "camputules-policy" {
name = "camptules-policy"
policy = <<EOF
path "kv/data/camptules" {
capabilities = ["read"]
}
EOF
}
resource "nomad_job" "camptules" {
jobspec = file("${path.module}/job/camptules.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}
}

View file

@ -1,23 +1,22 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
resource "vault_policy" "dovecot-policy" {
name = "dovecot-policy"
policy = file("${path.module}/dovecot-policy.hcl")
policy = <<EOF
path "kv/data/getmail" {
capabilities = ["read"]
}
EOF
}
resource "vault_policy" "getmail-policy" {
name = "getmail-policy"
policy = file("${path.module}/getmail-policy.hcl")
policy = <<EOF
path "kv/data/dovecot" {
capabilities = ["read"]
}
EOF
}
resource "nomad_volume" "dovecot_maildir" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "dovecot_maildir"
@ -30,7 +29,7 @@ resource "nomad_volume" "dovecot_maildir" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/dovecot/maildir"
}
@ -41,7 +40,6 @@ resource "nomad_volume" "dovecot_maildir" {
}
resource "nomad_volume" "getmail_getmail-d" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "getmail_getmail-d"
@ -54,7 +52,7 @@ resource "nomad_volume" "getmail_getmail-d" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/getmail/getmail.d"
}
@ -65,12 +63,12 @@ resource "nomad_volume" "getmail_getmail-d" {
}
resource "nomad_job" "email" {
jobspec = file("${path.module}/nomad.hcl")
jobspec = file("${path.module}/job/email.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}

View file

@ -1,18 +1,4 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
# data "nomad_plugin" "nomad-driver-containerd" {
# plugin_id = "nomad-driver-containerd"
# wait_for_healthy = true
# }
resource "nomad_volume" "gitea-db" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "gitea-db"
@ -25,7 +11,7 @@ resource "nomad_volume" "gitea-db" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/gitea-db"
}
@ -36,7 +22,6 @@ resource "nomad_volume" "gitea-db" {
}
resource "nomad_volume" "gitea-data" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "gitea-data"
@ -49,7 +34,7 @@ resource "nomad_volume" "gitea-data" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/gitea-data"
}
@ -61,16 +46,20 @@ resource "nomad_volume" "gitea-data" {
resource "vault_policy" "gitea-policy" {
name = "gitea-policy"
policy = file("${path.module}/gitea-policy.hcl")
policy = <<EOF
path "kv/data/gitea" {
capabilities = ["read"]
}
EOF
}
resource "nomad_job" "gitea" {
jobspec = file("${path.module}/nomad.hcl")
jobspec = file("${path.module}/job/gitea.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_ref = "${var.flake_host_alt}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}

View file

@ -1,13 +1,4 @@
ariable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
resource "nomad_volume" "home-assistant_hass" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "home-assistant_hass"
@ -20,7 +11,7 @@ resource "nomad_volume" "home-assistant_hass" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/home-assistant_hass"
}
@ -31,7 +22,6 @@ resource "nomad_volume" "home-assistant_hass" {
}
resource "nomad_volume" "home-assistant_zigbee2mqtt" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "home-assistant_zigbee2mqtt"
@ -44,7 +34,7 @@ resource "nomad_volume" "home-assistant_zigbee2mqtt" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/home-assistant_zigbee2mqtt"
}
@ -55,7 +45,6 @@ resource "nomad_volume" "home-assistant_zigbee2mqtt" {
}
resource "nomad_volume" "home-assistant_mosquitto" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "home-assistant_mosquitto"
@ -68,7 +57,7 @@ resource "nomad_volume" "home-assistant_mosquitto" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/home-assistant_mosquitto"
}
@ -80,26 +69,42 @@ resource "nomad_volume" "home-assistant_mosquitto" {
resource "vault_policy" "home-assistant-policy" {
name = "home-assistant-policy"
policy = file("${path.module}/home-assistant-policy.hcl")
policy = <<EOF
path "kv/data/home-assistant" {
capabilities = ["read"]
}
EOF
}
resource "vault_policy" "zigbee2mqtt-policy" {
name = "zigbee2mqtt-policy"
policy = file("${path.module}/zigbee2mqtt-policy.hcl")
policy = <<EOF
path "kv/data/mqtt" {
capabilities = ["read"]
}
path "kv/data/zigbee2mqtt" {
capabilities = ["read"]
}
EOF
}
resource "vault_policy" "mosquitto-policy" {
name = "mosquitto-policy"
policy = file("${path.module}/mosquitto-policy.hcl")
policy = <<EOF
path "kv/data/mqtt" {
capabilities = ["read"]
}
EOF
}
resource "nomad_job" "home-assistant" {
jobspec = file("${path.module}/nomad.hcl")
jobspec = file("${path.module}/job/home-assistant.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}

View file

@ -1,16 +1,3 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
# data "nomad_plugin" "nomad-driver-containerd" {
# plugin_id = "nomad-driver-containerd"
# wait_for_healthy = true
# }
resource "nomad_volume" "hydra-db" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
@ -25,7 +12,7 @@ resource "nomad_volume" "hydra-db" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/hydra-db"
}
@ -49,7 +36,7 @@ resource "nomad_volume" "hydra-data" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/hydra-data"
}
@ -73,7 +60,7 @@ resource "nomad_volume" "hydra-nix" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/hydra-nix"
}
@ -85,16 +72,20 @@ resource "nomad_volume" "hydra-nix" {
resource "vault_policy" "hydra-policy" {
name = "hydra-policy"
policy = file("${path.module}/hydra-policy.hcl")
policy = <<EOF
path "kv/data/hydra" {
capabilities = ["read"]
}
EOF
}
resource "nomad_job" "hydra" {
jobspec = file("${path.module}/nomad.hcl")
jobspec = file("${path.module}/job/hydra.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}

View file

@ -0,0 +1,139 @@
upstream jellyfin {
server {{ env "NOMAD_UPSTREAM_ADDR_jellyfin" }};
}
upstream zigbee2mqtt {
server {{ env "NOMAD_UPSTREAM_ADDR_zigbee2mqtt" }};
}
upstream home-assistant {
server {{ env "NOMAD_UPSTREAM_ADDR_home-assistant" }};
}
upstream syncthing {
server {{ env "NOMAD_UPSTREAM_ADDR_syncthing" }};
}
upstream baikal {
server {{ env "NOMAD_UPSTREAM_ADDR_baikal" }};
}
server {
listen 8096;
server_name _;
include /local/security.conf;
client_max_body_size 100M;
location /jellyfin/ {
# Proxy main Jellyfin traffic
# The / at the end is significant.
# https://www.acunetix.com/blog/articles/a-fresh-look-on-reverse-proxy-related-attacks/
proxy_pass http://jellyfin/jellyfin/;
proxy_pass_request_headers on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
# Disable buffering when the nginx proxy gets very resource heavy upon streaming
proxy_buffering off;
}
location /syncthing/ {
include /local/headers.conf;
proxy_pass http://syncthing/;
}
location ~ "^/(static/icons/|static/fonts/|static/translations/|static/images/|static/panels/|static/polyfills|api/|auth/|frontend_latest/|frontend_es5/|local/|lovelace|map|config|developer-tools|history|logbook|profile|states|hassio|onboarding.html|service_worker.js|authorize.html|manifest.json)" {
include /local/headers.conf;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://home-assistant;
}
location /home-assistant {
include /local/headers.conf;
rewrite /home-assistant/(.*) /$1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://home-assistant;
}
location /zigbee2mqtt/ {
include /local/headers.conf;
# rewrite /zigbee2mqtt/(.*) /$1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://zigbee2mqtt/;
}
location /baikal/ {
include /local/headers.conf;
# rewrite /baikal/(.*) /$1 break;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "upgrade";
proxy_pass http://baikal/;
}
}
server {
listen 80;
server_name _;
include /local/security.conf;
client_max_body_size 100M;
location /jellyfin/ {
# Proxy main Jellyfin traffic
# The / at the end is significant.
# https://www.acunetix.com/blog/articles/a-fresh-look-on-reverse-proxy-related-attacks/
proxy_pass http://jellyfin/jellyfin/;
proxy_pass_request_headers on;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
proxy_set_header X-Forwarded-Host $http_host;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection $http_connection;
# Disable buffering when the nginx proxy gets very resource heavy upon streaming
proxy_buffering off;
}
}

View file

@ -0,0 +1,17 @@
data "local_file" "ingress-upstreams" {
filename = "${path.module}/ingress-upstreams.conf"
}
resource "nomad_job" "ingress" {
jobspec = file("${path.module}/job/ingress.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
upstreams = data.local_file.ingress-upstreams.content
}
}
}

View file

@ -1,18 +1,4 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
# data "nomad_plugin" "nomad-driver-containerd" {
# plugin_id = "nomad-driver-containerd"
# wait_for_healthy = true
# }
resource "nomad_volume" "jellyfin-cache" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "jellyfin-cache"
@ -25,7 +11,7 @@ resource "nomad_volume" "jellyfin-cache" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/jellyfin/cache"
}
@ -36,7 +22,6 @@ resource "nomad_volume" "jellyfin-cache" {
}
resource "nomad_volume" "jellyfin-config" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "jellyfin-config"
@ -49,7 +34,7 @@ resource "nomad_volume" "jellyfin-config" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/jellyfin/config"
}
@ -60,7 +45,6 @@ resource "nomad_volume" "jellyfin-config" {
}
resource "nomad_volume" "jellyfin-media" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "jellyfin-media"
@ -73,7 +57,7 @@ resource "nomad_volume" "jellyfin-media" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/jellyfin/media"
}
@ -84,7 +68,7 @@ resource "nomad_volume" "jellyfin-media" {
}
resource "nomad_job" "jellyfin" {
jobspec = file("${path.module}/nomad.hcl")
jobspec = file("${path.module}/job/jellyfin.hcl")
hcl2 {
enabled = true

View file

@ -0,0 +1,96 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
job "baikal" {
datacenters = [ "homelab-1" ]
type = "service"
group "baikal" {
count = 1
volume "baikal-specific" {
type = "csi"
source = "baikal-specific"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
volume "baikal-config" {
type = "csi"
source = "baikal-config"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
network {
mode = "bridge"
}
service {
name = "baikal"
port = "80"
## Syncthing with auth returns 402: Unauthorized and Nomad interprets it as
## service failure.
# check {
# type = "http"
# address_mode = "alloc"
# path = "/"
# port = "8384"
# interval = "10s"
# timeout = "10s"
# }
connect {
sidecar_service {}
sidecar_task {
resources {
cpu = 75
memory = 48
}
config {
memory_hard_limit = 96
}
}
}
}
task "baikal" {
driver = "containerd-driver"
config {
flake_ref = "${var.flake_ref}#nixngSystems.baikal.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 128
memory = 128
}
volume_mount {
volume = "baikal-specific"
destination = "/var/baikal/specific"
read_only = false
}
volume_mount {
volume = "baikal-config"
destination = "/var/baikal/config"
read_only = false
}
}
}
}

View file

@ -21,37 +21,37 @@ job "camptules" {
driver = "containerd-driver"
config {
flake_ref = "${var.flake_ref}#nixngSystems.camptules.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.camptules.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 512
memory = 128
memory_max = 192
cpu = 512
memory = 128
memory_max = 192
}
vault {
policies = ["camptules-policy"]
policies = ["camptules-policy"]
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/camptules" }}
{{ .Data.data.token }}
{{ end }}
EOF
destination = "secrets/cfg"
destination = "secrets/cfg"
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/camptules" }}
{{ .Data.data.ytb_api_key }}
{{ end }}
EOF
destination = "secrets/ytb-api-key"
destination = "secrets/ytb-api-key"
}
}
}

View file

@ -50,15 +50,15 @@ job "email" {
port = "666"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "dovecot-lmtp"
local_bind_port = 24
datacenter = "homelab-1"
}
}
}
sidecar_service {
proxy {
upstreams {
destination_name = "dovecot-lmtp"
local_bind_port = 24
datacenter = "homelab-1"
}
}
}
}
}
@ -66,29 +66,29 @@ job "email" {
driver = "containerd-driver"
volume_mount {
volume = "dovecot_maildir"
destination = "/maildir"
read_only = false
volume = "dovecot_maildir"
destination = "/maildir"
read_only = false
}
volume_mount {
volume = "getmail_getmail-d"
destination = "/getmail.d"
read_only = false
volume = "getmail_getmail-d"
destination = "/getmail.d"
read_only = false
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.getmail.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.getmail.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
env {
}
resources {
cpu = 256
memory = 512
cpu = 256
memory = 512
}
}
}
@ -119,7 +119,7 @@ job "email" {
port = "24"
connect {
sidecar_service {}
sidecar_service {}
}
}
@ -128,7 +128,7 @@ job "email" {
port = "143"
connect {
sidecar_service {}
sidecar_service {}
}
}
@ -136,35 +136,35 @@ job "email" {
driver = "containerd-driver"
volume_mount {
volume = "dovecot_maildir"
destination = "/maildir"
read_only = false
volume = "dovecot_maildir"
destination = "/maildir"
read_only = false
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.dovecot.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.dovecot.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
env {
}
resources {
cpu = 128
memory = 256
cpu = 128
memory = 256
}
vault {
policies = ["dovecot-policy"]
policies = ["dovecot-policy"]
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/dovecot" }}{{ .Data.data.passwd }}{{ end }}
EOF
destination = "secrets/passwd.dovecot"
change_mode = "noop"
destination = "secrets/passwd.dovecot"
change_mode = "noop"
}
}
}

View file

@ -48,18 +48,18 @@ job "gitea" {
service {
name = "gitea"
port = "3000"
check {
type = "http"
address_mode = "alloc"
path = "/"
port = "3000"
interval = "2s"
timeout = "2s"
type = "http"
address_mode = "alloc"
path = "/"
port = "3000"
interval = "2s"
timeout = "2s"
}
connect {
sidecar_service {}
sidecar_service {}
}
}
@ -67,63 +67,63 @@ job "gitea" {
driver = "containerd-driver"
volume_mount {
volume = "gitea-data"
destination = "/data/gitea"
read_only = false
volume = "gitea-data"
destination = "/data/gitea"
read_only = false
}
volume_mount {
volume = "gitea-db"
destination = "/var/lib/mysql"
read_only = false
volume = "gitea-db"
destination = "/var/lib/mysql"
read_only = false
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.gitea.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.gitea.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
env {
USER_UID = "5001"
USER_GID = "5001"
USER_UID = "5001"
USER_GID = "5001"
}
resources {
cpu = 500
memory = 1024
cpu = 500
memory = 1024
}
vault {
policies = ["gitea-policy"]
policies = ["gitea-policy"]
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/gitea" }}{{ .Data.data.secret_key }}{{ end }}
EOF
destination = "secrets/secret_key"
destination = "secrets/secret_key"
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/gitea" }}{{ .Data.data.internal_token }}{{ end }}
EOF
destination = "secrets/internal_token"
destination = "secrets/internal_token"
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/gitea" }}{{ .Data.data.jwt_secret }}{{ end }}
EOF
destination = "secrets/jwt_secret"
destination = "secrets/jwt_secret"
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/gitea" }}{{ .Data.data.lfs_jwt_secret }}{{ end }}
EOF
destination = "secrets/lfs_jwt_secret"
destination = "secrets/lfs_jwt_secret"
}
}
}

View file

@ -27,8 +27,8 @@ job "home-assistant" {
mode = "bridge"
port "http" {
static = 8456
to = 8456
static = 8456
to = 8456
}
}
@ -46,15 +46,15 @@ job "home-assistant" {
port = "8456"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "mqtt"
local_bind_port = 1883
datacenter = "homelab-1"
}
}
}
sidecar_service {
proxy {
upstreams {
destination_name = "mqtt"
local_bind_port = 1883
datacenter = "homelab-1"
}
}
}
}
}
@ -62,33 +62,33 @@ job "home-assistant" {
driver = "containerd-driver"
vault {
policies = ["zigbee2mqtt-policy"]
policies = ["zigbee2mqtt-policy"]
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.zigbee2mqtt.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.zigbee2mqtt.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
devices = [
"/dev/ttyUSB0" #"serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_4c004e9c53c9eb118a9f8b4f1d69213e-if00-port0"
]
devices = [
"/dev/ttyUSB0" #"serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_4c004e9c53c9eb118a9f8b4f1d69213e-if00-port0"
]
}
resources {
cpu = 128
memory = 128
memory_max = 256
cpu = 128
memory = 128
memory_max = 256
}
volume_mount {
volume = "home-assistant_zigbee2mqtt"
destination = "/var/zigbee2mqtt"
read_only = false
volume = "home-assistant_zigbee2mqtt"
destination = "/var/zigbee2mqtt"
read_only = false
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/zigbee2mqtt" }}
XIAOMI_HUB_ADDRESS={{ .Data.data.xiaomi_hub_address }}
{{ end }}
@ -97,9 +97,9 @@ MQTT_PASSWORD={{ .Data.data.password }}
MQTT_USER={{ .Data.data.user }}
{{ end }}
EOF
destination = "secrets/environment"
env = true
perms = "444"
destination = "secrets/environment"
env = true
perms = "444"
}
}
}
@ -130,7 +130,7 @@ EOF
port = "1883"
connect {
sidecar_service {}
sidecar_service {}
}
}
@ -138,35 +138,35 @@ EOF
driver = "containerd-driver"
vault {
policies = ["mosquitto-policy"]
policies = ["mosquitto-policy"]
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.mosquitto.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.mosquitto.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 128
memory = 128
memory_max = 256
cpu = 128
memory = 128
memory_max = 256
}
volume_mount {
volume = "home-assistant_mosquitto"
destination = "/var/mosquitto"
read_only = false
volume = "home-assistant_mosquitto"
destination = "/var/mosquitto"
read_only = false
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/mqtt" }}
{{ .Data.data.user}}:{{ .Data.data.hash }}
{{ end }}
EOF
destination = "secrets/mqtt_password"
perms = "444"
destination = "secrets/mqtt_password"
perms = "444"
}
}
}
@ -183,8 +183,8 @@ EOF
mode = "bridge"
port "http" {
static = 8123
to = 8123
static = 8123
to = 8123
}
}
@ -202,15 +202,15 @@ EOF
port = "8123"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "mqtt"
local_bind_port = 1883
datacenter = "homelab-1"
}
}
}
sidecar_service {
proxy {
upstreams {
destination_name = "mqtt"
local_bind_port = 1883
datacenter = "homelab-1"
}
}
}
}
}
@ -218,29 +218,29 @@ EOF
driver = "containerd-driver"
vault {
policies = ["home-assistant-policy"]
policies = ["home-assistant-policy"]
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.home-assistant.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.home-assistant.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 128
memory = 128
memory_max = 256
cpu = 128
memory = 128
memory_max = 256
}
volume_mount {
volume = "home-assistant_hass"
destination = "/var/home-assistant"
read_only = false
volume = "home-assistant_hass"
destination = "/var/home-assistant"
read_only = false
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/home-assistant" }}
LATITUDE={{ .Data.data.latitude }}
LONGTITUDE={{ .Data.data.longtitude }}
@ -248,9 +248,9 @@ ELEVATION={{ .Data.data.elevation }}
TIME_ZONE={{ .Data.data.time_zone }}
{{ end }}
EOF
destination = "secrets/environment"
env = true
perms = "400"
destination = "secrets/environment"
env = true
perms = "400"
}
}
}

View file

@ -31,7 +31,7 @@ job "hydra" {
type = "csi"
source = "hydra-nix"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
@ -59,16 +59,16 @@ job "hydra" {
port = "3000"
check {
type = "http"
address_mode = "alloc"
path = "/"
port = "3000"
interval = "2s"
timeout = "2s"
type = "http"
address_mode = "alloc"
path = "/"
port = "3000"
interval = "2s"
timeout = "2s"
}
connect {
sidecar_service {}
sidecar_service {}
}
}
@ -76,75 +76,75 @@ job "hydra" {
driver = "containerd-driver"
volume_mount {
volume = "hydra-data"
destination = "/var/lib/hydra"
read_only = false
volume = "hydra-data"
destination = "/var/lib/hydra"
read_only = false
}
volume_mount {
volume = "hydra-nix"
destination = "/nix-persist"
read_only = false
volume = "hydra-nix"
destination = "/nix-persist"
read_only = false
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.hydra.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.hydra.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
devices = [
"/dev/fuse"
]
privileged = true
devices = [
"/dev/fuse"
]
privileged = true
}
vault {
policies = ["hydra-policy"]
policies = ["hydra-policy"]
}
resources {
cpu = 4000
memory = 1024
memory_max = 3072
cpu = 4000
memory = 1024
memory_max = 3072
}
template {
data = <<EOF
data = <<EOF
{{ with secret "kv/data/hydra" }}{{ .Data.data.nixbuild_key }}{{ end }}
EOF
destination = "secrets/ssh-key"
perms = "400"
destination = "secrets/ssh-key"
perms = "400"
}
template {
data = <<EOF
data = <<EOF
dbi:Pg:dbname=hydra;host=127.0.0.1;port=5432;user=hydra;
EOF
destination = "local/dbi"
destination = "local/dbi"
}
template {
data = <<EOF
data = <<EOF
127.0.0.1:*:*:hydra:{{ with secret "kv/data/hydra" }}{{ .Data.data.pgpass}}{{ end }}
EOF
destination = "secrets/pgpass"
perms = "400"
destination = "secrets/pgpass"
perms = "400"
}
template {
data = <<EOF
data = <<EOF
127.0.0.1:*:*:hydra:{{ with secret "kv/data/hydra" }}{{ .Data.data.pgpass}}{{ end }}
EOF
destination = "secrets/pgpass-www"
perms = "400"
destination = "secrets/pgpass-www"
perms = "400"
}
template {
data = <<EOF
data = <<EOF
127.0.0.1:*:*:hydra:{{ with secret "kv/data/hydra" }}{{ .Data.data.pgpass}}{{ end }}
EOF
destination = "secrets/pgpass-queue-runner"
perms = "400"
destination = "secrets/pgpass-queue-runner"
perms = "400"
}
}
@ -152,32 +152,32 @@ EOF
driver = "containerd-driver"
volume_mount {
volume = "hydra-db"
destination = "/var/lib/postgresql"
read_only = false
volume = "hydra-db"
destination = "/var/lib/postgresql"
read_only = false
}
config {
flake_ref = "${var.flake_ref}#nixngSystems.hydraPostgresql.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.hydraPostgresql.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 500
memory = 128
memory_max = 256
cpu = 500
memory = 128
memory_max = 256
}
template {
data = <<EOF
data = <<EOF
alter user hydra with encrypted password '{{ with secret "kv/data/hydra" }}{{ .Data.data.pgpass}}{{ end }}';
EOF
destination = "secrets/init.sql"
destination = "secrets/init.sql"
}
vault {
policies = ["hydra-policy"]
policies = ["hydra-policy"]
}
}
}

View file

@ -0,0 +1,184 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
variable "upstreams" {
type = string
}
job "ingress" {
datacenters = [ "homelab-1" ]
type = "service"
group "ingress" {
count = 1
constraint {
attribute = "${attr.unique.hostname}"
value = "blowhole"
}
network {
mode = "bridge"
port "http" {
static = 8080
to = 80
}
port "https" {
static = 443
to = 443
}
port "jellyfin" {
static = 8096
to = 8096
}
port "imap" {
static = 143
to = 143
}
}
service {
name = "ingress-blowhole"
port = "80"
connect {
sidecar_service {
proxy {
upstreams {
destination_name = "jellyfin"
local_bind_port = 8001
}
upstreams {
destination_name = "zigbee2mqtt"
local_bind_port = 8002
}
upstreams {
destination_name = "home-assistant"
local_bind_port = 8003
}
upstreams {
destination_name = "syncthing"
local_bind_port = 8004
}
upstreams {
destination_name = "dovecot-imap"
local_bind_port = 8005
}
upstreams {
destination_name = "baikal"
local_bind_port = 8006
}
}
}
sidecar_task {
resources {
cpu = 75
memory = 48
}
config {
memory_hard_limit = 96
image = "envoyproxy/envoy:v1.20.2"
}
}
}
}
task "nginx" {
driver = "containerd-driver"
# driver = "docker"
config {
flake_ref = "${var.flake_ref}#nixngSystems.ingressBlowhole.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 200
memory = 32
memory_max = 128
}
template {
data = <<EOF
ssl_certificate_key /etc/letsencrypt/live/redalder.org/privkey.pem;
ssl_certificate /etc/letsencrypt/live/redalder.org/fullchain.pem;
ssl_session_cache shared:le_nginx_SSL:10m;
ssl_session_timeout 1440m;
ssl_session_tickets off;
ssl_protocols TLSv1.2 TLSv1.3;
ssl_prefer_server_ciphers off;
ssl_ciphers "ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES256-GCM-SHA384:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-CHACHA20-POLY1305:ECDHE-RSA-CHACHA20-POLY1305:DHE-RSA-AES128-GCM-SHA256:DHE-RSA-AES256-GCM-SHA384";
EOF
destination = "local/ssl.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;
EOF
destination = "local/headers.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
add_header X-Frame-Options "SAMEORIGIN";
add_header Content-Security-Policy "default-src 'self' http: https: data: blob: 'unsafe-inline'" always;
EOF
destination = "local/security.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = var.upstreams
destination = "local/upstreams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
template {
data = <<EOF
upstream dovecot-imap {
server {{ env "NOMAD_UPSTREAM_ADDR_dovecot-imap" }};
}
server {
listen 143;
proxy_pass dovecot-imap;
}
EOF
destination = "local/streams.conf"
change_mode = "signal"
change_signal = "SIGHUP"
}
}
}
}

View file

@ -17,7 +17,7 @@ job "jellyfin" {
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
}
volume "jellyfin-config" {
type = "csi"
@ -26,7 +26,7 @@ job "jellyfin" {
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
}
volume "jellyfin-media" {
type = "csi"
@ -35,19 +35,19 @@ job "jellyfin" {
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
}
volume "jellyfin-mount" {
type = "host"
read_only = true
source = "jellyfin-mount"
}
}
network {
mode = "bridge"
port "http" {
to = 8096
to = 8096
}
}
@ -56,27 +56,27 @@ job "jellyfin" {
port = "8096"
check {
type = "http"
address_mode = "alloc"
path = "/"
port = "8096"
interval = "10s"
timeout = "10s"
type = "http"
address_mode = "alloc"
path = "/"
port = "8096"
interval = "10s"
timeout = "10s"
}
connect {
sidecar_service {}
sidecar_service {}
sidecar_task {
resources {
cpu = 75
memory = 48
}
sidecar_task {
resources {
cpu = 75
memory = 48
}
config {
memory_hard_limit = 96
}
}
config {
memory_hard_limit = 96
}
}
}
}
@ -84,47 +84,47 @@ job "jellyfin" {
driver = "docker"
config {
image = "jellyfin/jellyfin@sha256:b4500b143d8a0d4383c50721517908d679af1c5ec00e791f0097f8c419b3e853"
image = "jellyfin/jellyfin@sha256:b4500b143d8a0d4383c50721517908d679af1c5ec00e791f0097f8c419b3e853"
devices = [
{
host_path = "/dev/dri/renderD128"
container_path = "/dev/dri/renderD128"
},
{
host_path = "/dev/dri/card0"
container_path = "/dev/dri/card0"
}
]
devices = [
{
host_path = "/dev/dri/renderD128"
container_path = "/dev/dri/renderD128"
},
{
host_path = "/dev/dri/card0"
container_path = "/dev/dri/card0"
}
]
}
resources {
cpu = 1024
memory = 1024
cpu = 1024
memory = 1024
}
volume_mount {
volume = "jellyfin-cache"
destination = "/cache"
read_only = false
volume = "jellyfin-cache"
destination = "/cache"
read_only = false
}
volume_mount {
volume = "jellyfin-config"
destination = "/config"
read_only = false
volume = "jellyfin-config"
destination = "/config"
read_only = false
}
volume_mount {
volume = "jellyfin-media"
destination = "/media"
read_only = false
volume = "jellyfin-media"
destination = "/media"
read_only = false
}
volume_mount {
volume = "jellyfin-mount"
destination = "/mount"
read_only = true
volume = "jellyfin-mount"
destination = "/mount"
read_only = true
}
}
}

View file

@ -20,18 +20,18 @@ job "reicio" {
service {
name = "reicio"
port = "8000"
check {
type = "http"
address_mode = "alloc"
path = "/"
port = "8000"
interval = "2s"
timeout = "2s"
type = "http"
address_mode = "alloc"
path = "/"
port = "8000"
interval = "2s"
timeout = "2s"
}
connect {
sidecar_service {}
sidecar_service {}
}
}
@ -39,15 +39,15 @@ job "reicio" {
driver = "containerd-driver"
config {
flake_ref = "${var.flake_ref}#nixngSystems.reicio.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.reicio.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 64
memory = 16
memory_max = 32
cpu = 64
memory = 16
memory_max = 32
}
}
}

View file

@ -0,0 +1,111 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
job "syncthing" {
datacenters = [ "homelab-1" ]
type = "service"
group "syncthing" {
count = 1
volume "syncthing-data" {
type = "csi"
source = "syncthing-data"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
volume "syncthing-config" {
type = "csi"
source = "syncthing-config"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
volume "syncthing-storage" {
type = "csi"
source = "syncthing-storage"
read_only = false
attachment_mode = "file-system"
access_mode = "single-node-writer"
}
network {
mode = "bridge"
}
service {
name = "syncthing"
port = "8384"
## Syncthing with auth returns 402: Unauthorized and Nomad interprets it as
## service failure.
# check {
# type = "http"
# address_mode = "alloc"
# path = "/"
# port = "8384"
# interval = "10s"
# timeout = "10s"
# }
connect {
sidecar_service {}
sidecar_task {
resources {
cpu = 75
memory = 48
}
config {
memory_hard_limit = 96
}
}
}
}
task "syncthing" {
driver = "containerd-driver"
config {
flake_ref = "${var.flake_ref}#nixngSystems.syncthing.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 128
memory = 128
}
volume_mount {
volume = "syncthing-data"
destination = "/var/syncthing/data"
read_only = false
}
volume_mount {
volume = "syncthing-config"
destination = "/var/syncthing/config"
read_only = false
}
volume_mount {
volume = "syncthing-storage"
destination = "/var/syncthing/storage"
read_only = false
}
}
}
}

View file

@ -31,27 +31,27 @@ job "website" {
port = "80"
check {
type = "http"
address_mode = "alloc"
path = "/"
port = "80"
interval = "10s"
timeout = "10s"
type = "http"
address_mode = "alloc"
path = "/"
port = "80"
interval = "10s"
timeout = "10s"
}
connect {
sidecar_service {}
sidecar_service {}
sidecar_task {
resources {
cpu = 75
memory = 48
}
sidecar_task {
resources {
cpu = 75
memory = 48
}
config {
memory_hard_limit = 96
}
}
config {
memory_hard_limit = 96
}
}
}
}
@ -59,15 +59,15 @@ job "website" {
driver = "containerd-driver"
config {
flake_ref = "${var.flake_ref}#nixngSystems.website.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
flake_ref = "${var.flake_ref}#nixngSystems.website.config.system.build.toplevel"
flake_sha = var.flake_sha
entrypoint = [ "init" ]
}
resources {
cpu = 128
memory = 32
memory_max = 64
cpu = 128
memory = 32
memory_max = 64
}
}
}

View file

@ -0,0 +1,42 @@
variable "flake_rev" {
type = string
}
variable "flake_sha" {
type = string
}
variable "flake_host" {
type = string
}
variable "flake_host_alt" {
type = string
}
provider "nomad" {
address = "http://10.64.1.201:4646"
region = "homelab-1"
alias = "homelab-1"
}
module "nfs" {
source = "../../modules/nfs"
node_dcs = [ "homelab-1" ]
region = "homelab-1"
providers = {
nomad = nomad.homelab-1
}
}
module "gateway-mesh" {
source = "../../modules/gateway-mesh"
datacenters = [ "homelab-1" ]
providers = {
nomad = nomad.homelab-1
}
}

View file

@ -0,0 +1,11 @@
resource "nomad_job" "reicio" {
jobspec = file("${path.module}/job/reicio.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}
}

View file

@ -1,13 +1,4 @@
variable "flake_ref" {
type = string
}
variable "flake_sha" {
type = string
}
resource "nomad_volume" "syncthing-data" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "syncthing-data"
@ -20,7 +11,7 @@ resource "nomad_volume" "syncthing-data" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/syncthing/data"
}
@ -31,7 +22,6 @@ resource "nomad_volume" "syncthing-data" {
}
resource "nomad_volume" "syncthing-storage" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "syncthing-storage"
@ -44,7 +34,7 @@ resource "nomad_volume" "syncthing-storage" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/syncthing/storage"
}
@ -55,7 +45,6 @@ resource "nomad_volume" "syncthing-storage" {
}
resource "nomad_volume" "syncthing-config" {
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
type = "csi"
plugin_id = "nfs"
volume_id = "syncthing-config"
@ -68,7 +57,7 @@ resource "nomad_volume" "syncthing-config" {
}
context = {
server = "blowhole.in.redalder.org"
server = "blowhole.hosts.in.redalder.org"
share = "/var/nfs/syncthing/config"
}
@ -79,12 +68,12 @@ resource "nomad_volume" "syncthing-config" {
}
resource "nomad_job" "syncthing" {
jobspec = file("${path.module}/nomad.hcl")
jobspec = file("${path.module}/job/syncthing.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = var.flake_ref
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}

View file

@ -0,0 +1,11 @@
resource "nomad_job" "website" {
jobspec = file("${path.module}/job/website.hcl")
hcl2 {
enabled = true
vars = {
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
flake_sha = var.flake_sha
}
}
}

33
nomad/regions/main.tf Normal file
View file

@ -0,0 +1,33 @@
variable "flake_rev" {
type = string
}
variable "flake_sha" {
type = string
}
variable "flake_host" {
type = string
}
variable "flake_host_alt" {
type = string
}
module "do-1" {
source = "./do-1"
flake_rev = var.flake_rev
flake_host = var.flake_host
flake_host_alt = var.flake_host_alt
flake_sha = var.flake_sha
}
module "homelab-1" {
source = "./homelab-1"
flake_rev = var.flake_rev
flake_host = var.flake_host
flake_host_alt = var.flake_host_alt
flake_sha = var.flake_sha
}