mirror of
https://git.sr.ht/~magic_rb/cluster
synced 2024-11-21 07:44:22 +01:00
Maku use of Terraform for deployment and related cleanup
Signed-off-by: main <magic_rb@redalder.org>
This commit is contained in:
parent
2d1f2957b7
commit
6835af938c
38
.gitignore
vendored
38
.gitignore
vendored
|
@ -1,4 +1,6 @@
|
|||
bin/
|
||||
result
|
||||
.direnv
|
||||
|
||||
# Created by https://www.toptal.com/developers/gitignore/api/emacs
|
||||
# Edit at https://www.toptal.com/developers/gitignore?templates=emacs
|
||||
|
@ -56,3 +58,39 @@ flycheck_*.el
|
|||
|
||||
|
||||
# End of https://www.toptal.com/developers/gitignore/api/emacs
|
||||
|
||||
# Local .terraform directories
|
||||
**/.terraform/*
|
||||
|
||||
# .tfstate files
|
||||
*.tfstate
|
||||
*.tfstate.*
|
||||
|
||||
# Crash log files
|
||||
crash.log
|
||||
crash.*.log
|
||||
|
||||
# Exclude all .tfvars files, which are likely to contain sensitive data, such as
|
||||
# password, private keys, and other secrets. These should not be part of version
|
||||
# control as they are data points which are potentially sensitive and subject
|
||||
# to change depending on the environment.
|
||||
# *.tfvars
|
||||
# *.tfvars.json
|
||||
# Don't exclude them, we don't keep secrets only pinned flakes
|
||||
|
||||
# Ignore override files as they are usually used to override resources locally and so
|
||||
# are not checked in
|
||||
override.tf
|
||||
override.tf.json
|
||||
*_override.tf
|
||||
*_override.tf.json
|
||||
|
||||
# Include override files you do wish to add to version control using negated pattern
|
||||
# !example_override.tf
|
||||
|
||||
# Include tfplan files to ignore the plan output of command: terraform plan -out=tfplan
|
||||
# example: *tfplan*
|
||||
|
||||
# Ignore CLI configuration files
|
||||
.terraformrc
|
||||
terraform.rc
|
39
.terraform.lock.hcl
Normal file
39
.terraform.lock.hcl
Normal file
|
@ -0,0 +1,39 @@
|
|||
# This file is maintained automatically by "terraform init".
|
||||
# Manual edits may be lost in future updates.
|
||||
|
||||
provider "registry.terraform.io/hashicorp/nomad" {
|
||||
version = "1.4.16"
|
||||
hashes = [
|
||||
"h1:PQxNPNmMVOErxryTWIJwr22k95DTSODmgRylqjc2TjI=",
|
||||
"zh:0d4fbb7030d9caac3b123e60afa44f50c83cc2a983e1866aec7f30414abe7b0e",
|
||||
"zh:0db080228e07c72d6d8ca8c45249d6f97cd0189fce82a77abbdcd49a52e57572",
|
||||
"zh:0df88393271078533a217654b96f0672c60eb59570d72e6aefcb839eea87a7a0",
|
||||
"zh:2883b335bb6044b0db6a00e602d6926c047c7f330294a73a90d089f98b24d084",
|
||||
"zh:390158d928009a041b3a182bdd82376b50530805ae92be2b84ed7c3b0fa902a0",
|
||||
"zh:7169b8f8df4b8e9659c49043848fd5f7f8473d0471f67815e8b04980f827f5ef",
|
||||
"zh:9417ee1383b1edd137024882d7035be4dca51fb4f725ca00ed87729086ec1755",
|
||||
"zh:a22910b5a29eeab5610350700b4899267c1b09b66cf21f7e4d06afc61d425800",
|
||||
"zh:a6185c9cd7aa458cd81861058ba568b6411fbac344373a20155e20256f4a7557",
|
||||
"zh:b6260ca9f034df1b47905b4e2a9c33b67dbf77224a694d5b10fb09ae92ffad4c",
|
||||
"zh:d87c12a6a7768f2b6c2a59495c7dc00f9ecc52b1b868331d4c284f791e278a1e",
|
||||
]
|
||||
}
|
||||
|
||||
provider "registry.terraform.io/hashicorp/vault" {
|
||||
version = "3.5.0"
|
||||
hashes = [
|
||||
"h1:Yqv4nraqqab//lMOjNVQ6PbOmbUBzZU83IronbsQUt4=",
|
||||
"zh:0db1f4cba469993952cfd3fedba4d20760ec38c9f46bcf26a92c7ac48b2d5812",
|
||||
"zh:31d110c9866cd370bbd730a78a9621a8cdf226ded0f47ce4c02468365a469817",
|
||||
"zh:417a00c137e2015e24069068240daf1ae4d8f0d866c54594a6a17d1e030cd2cc",
|
||||
"zh:5945fe89e324ba5b3db0b1d08b2aa026f24f9d15f4876e89bd34ecf9cf5e4641",
|
||||
"zh:78d5eefdd9e494defcb3c68d282b8f96630502cac21d1ea161f53cfe9bb483b3",
|
||||
"zh:7f246064bd5452979f7e02eb2e97cba4e479136b7d3d7b58c2e7a2b25ea675e1",
|
||||
"zh:966e508880af89d3e4e4781f90e2f781a6d3d79d2e588ea74f95f2de29bf8df9",
|
||||
"zh:b1d906f534a70545117417f5672147f882730808d7338b491ea21509b794cd76",
|
||||
"zh:bcfcbdfce3838741795968b1461391e45309958cf1b8ea6fd2c2c0d1cad6a7e1",
|
||||
"zh:c272f4633a228d5c69cf1ad3f1acdcaf6d90f4018a3854b0c13d1053fb3e977c",
|
||||
"zh:cd60fe5389f934d860f0eabe96de41898c2332ece8c7270605909ab57fe4fd14",
|
||||
"zh:d3f6fa1470fa8c8041f9e391cf93b068914a1cf53ad32c05b0da114036e1a8cd",
|
||||
]
|
||||
}
|
|
@ -1,35 +0,0 @@
|
|||
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
|
||||
index 8784d5faf..7e01b4960 100644
|
||||
--- a/src/libstore/globals.hh
|
||||
+++ b/src/libstore/globals.hh
|
||||
@@ -805,6 +805,15 @@ public:
|
||||
may be useful in certain scenarios (e.g. to spin up containers or
|
||||
set up userspace network interfaces in tests).
|
||||
)"};
|
||||
+
|
||||
+ Setting<StringSet> ignoredAcls{
|
||||
+ this, {"security.selinux"}, "ignored-acls",
|
||||
+ R"(
|
||||
+ A list of ACLs that should be ignored, normally Nix attempts to
|
||||
+ remove all ACLs from files and directories in the Nix store, but
|
||||
+ some ACLs like `security.selinux` or `system.nfs4_acl` can't be
|
||||
+ removed even by root. Therefore it's best to just ignore them.
|
||||
+ )"};
|
||||
#endif
|
||||
|
||||
Setting<Strings> hashedMirrors{
|
||||
diff --git a/src/libstore/local-store.cc b/src/libstore/local-store.cc
|
||||
index 747eb205e..c6f774bc2 100644
|
||||
--- a/src/libstore/local-store.cc
|
||||
+++ b/src/libstore/local-store.cc
|
||||
@@ -583,9 +583,7 @@ static void canonicalisePathMetaData_(const Path & path, uid_t fromUid, InodesSe
|
||||
throw SysError("querying extended attributes of '%s'", path);
|
||||
|
||||
for (auto & eaName: tokenizeString<Strings>(std::string(eaBuf.data(), eaSize), std::string("\000", 1))) {
|
||||
- /* Ignore SELinux security labels since these cannot be
|
||||
- removed even by root. */
|
||||
- if (eaName == "security.selinux") continue;
|
||||
+ if (settings.ignoredAcls.get().count(eaName)) continue;
|
||||
if (lremovexattr(path.c_str(), eaName.c_str()) == -1)
|
||||
throw SysError("removing extended attribute '%s' from '%s'", eaName, path);
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
diff --git a/src/libstore/globals.hh b/src/libstore/globals.hh
|
||||
index 2f9e8c6e8..433deaf0f 100644
|
||||
--- a/src/libstore/globals.hh
|
||||
+++ b/src/libstore/globals.hh
|
||||
@@ -799,7 +799,7 @@ public:
|
||||
)"};
|
||||
|
||||
Setting<StringSet> ignoredAcls{
|
||||
- this, {"security.selinux"}, "ignored-acls",
|
||||
+ this, {"security.selinux", "system.nfs4_acl"}, "ignored-acls",
|
||||
R"(
|
||||
A list of ACLs that should be ignored, normally Nix attempts to
|
||||
remove all ACLs from files and directories in the Nix store, but
|
|
@ -47,12 +47,7 @@
|
|||
type.services = {};
|
||||
};
|
||||
nix = {
|
||||
package = pkgs.nixUnstable.overrideAttrs (old: {
|
||||
patches =
|
||||
[ # ./Add-ignored_acls-setting.patch
|
||||
# ./Ignore-system.nfs4_acl.patch
|
||||
];
|
||||
});
|
||||
package = pkgs.nixUnstable;
|
||||
loadNixDb = true;
|
||||
persistNix = "/nix-persist";
|
||||
config = {
|
||||
|
|
|
@ -1,26 +0,0 @@
|
|||
{ nglib, nixpkgs }:
|
||||
nglib.makeSystem {
|
||||
system = "x86_64-linux";
|
||||
name = "nixng-jmusicbot";
|
||||
inherit nixpkgs;
|
||||
config = ({ pkgs, config, nglib, ... }:
|
||||
{
|
||||
dumb-init = {
|
||||
enable = true;
|
||||
type.services = {};
|
||||
};
|
||||
init.services.jmusicbot = {
|
||||
shutdownOnExit = true;
|
||||
};
|
||||
|
||||
services.jmusicbot = {
|
||||
enable = true;
|
||||
|
||||
config = {
|
||||
prefix = "sudo";
|
||||
token = "\${BOT_TOKEN}";
|
||||
owner = "\${BOT_OWNER}";
|
||||
};
|
||||
};
|
||||
});
|
||||
}
|
157
flake.nix
157
flake.nix
|
@ -23,126 +23,47 @@
|
|||
forAllSystems' = systems: fun: nixpkgs.lib.genAttrs systems fun;
|
||||
forAllSystems = forAllSystems' supportedSystems;
|
||||
|
||||
containers =
|
||||
let base = { nglib = nixng.nglib nixpkgs.lib; inherit nixpkgs; };
|
||||
in
|
||||
{
|
||||
hydra = (import ./containers/hydra.nix base).hydra;
|
||||
hydraPostgresql = (import ./containers/hydra.nix base).postgresql;
|
||||
ingressToothpick = import ./containers/ingress-toothpick.nix base;
|
||||
ingressBlowhole = import ./containers/ingress-blowhole.nix base;
|
||||
website = import ./containers/website.nix (base // { inherit (inputs.website) website; });
|
||||
jmusicbot = import ./containers/jmusicbot.nix base;
|
||||
camptules = (import ./containers/camptules.nix (base // { inherit (inputs) camptules; }));
|
||||
gitea = import ./containers/gitea.nix base;
|
||||
minecraft = import ./containers/minecraft.nix base;
|
||||
mosquitto = import ./containers/mosquitto.nix base;
|
||||
dovecot = import ./containers/dovecot.nix base;
|
||||
getmail = import ./containers/getmail base;
|
||||
syncthing = import ./containers/syncthing.nix base;
|
||||
zigbee2mqtt = import ./containers/zigbee2mqtt.nix base;
|
||||
home-assistant = import ./containers/home-assistant.nix base;
|
||||
reicio = import ./containers/reicio.nix base;
|
||||
};
|
||||
pkgsForSystem = system:
|
||||
import nixpkgs { inherit system; };
|
||||
in
|
||||
{
|
||||
ociImages = mapAttrs (n: v: v.config.system.build.ociImage) containers;
|
||||
{
|
||||
nixngSystems =
|
||||
let base = { nglib = nixng.nglib nixpkgs.lib; inherit nixpkgs; };
|
||||
in
|
||||
{ hydra = (import ./containers/hydra.nix base).hydra;
|
||||
hydraPostgresql = (import ./containers/hydra.nix base).postgresql;
|
||||
ingressToothpick = import ./containers/ingress-toothpick.nix base;
|
||||
ingressBlowhole = import ./containers/ingress-blowhole.nix base;
|
||||
website = import ./containers/website.nix (base // { inherit (inputs.website) website; });
|
||||
camptules = (import ./containers/camptules.nix (base // { inherit (inputs) camptules; }));
|
||||
gitea = import ./containers/gitea.nix base;
|
||||
minecraft = import ./containers/minecraft.nix base;
|
||||
mosquitto = import ./containers/mosquitto.nix base;
|
||||
dovecot = import ./containers/dovecot.nix base;
|
||||
getmail = import ./containers/getmail base;
|
||||
syncthing = import ./containers/syncthing.nix base;
|
||||
zigbee2mqtt = import ./containers/zigbee2mqtt.nix base;
|
||||
home-assistant = import ./containers/home-assistant.nix base;
|
||||
reicio = import ./containers/reicio.nix base;
|
||||
};
|
||||
|
||||
nixngSystems = containers;
|
||||
|
||||
hydraJobs =
|
||||
let
|
||||
pkgs = import nixpkgs { system = "x86_64-linux"; };
|
||||
makeJob = container:
|
||||
pkgs.stdenv.mkDerivation
|
||||
|
||||
{ name = "${container.stream.imageName}-hydra-job";
|
||||
buildPhase =
|
||||
''
|
||||
_workdir=$(mktemp -d)
|
||||
mkdir -p $out/nix-support $out/layers
|
||||
|
||||
${container.stream} | tar -xf - -C $_workdir
|
||||
for img in $_workdir/*/*.tar ; do
|
||||
_hash=$(basename $(dirname $img))
|
||||
|
||||
cp $img $out/layers/$_hash.tar
|
||||
done
|
||||
|
||||
_config=$(basename $(find $_workdir -name '*.json' ! -name 'manifest.json' -type f))
|
||||
|
||||
cp $_workdir/manifest.json $out/manifest.json
|
||||
cp $_workdir/$_config $out/$_config
|
||||
|
||||
ln -s ${container.stream} $out/stream
|
||||
|
||||
cat > $out/nix-support/hydra-build-products <<EOF
|
||||
directory image-layers $out/layers
|
||||
|
||||
file manifest $out/manifest.json
|
||||
file config $out/$_config
|
||||
|
||||
file executable $out/stream
|
||||
EOF
|
||||
'' ;
|
||||
|
||||
phases = [ "buildPhase" ];
|
||||
|
||||
nativeBuildInputs = with pkgs; [ jq ];
|
||||
};
|
||||
in
|
||||
nixpkgs.lib.mapAttrs (n: v: makeJob v) self.ociImages;
|
||||
hydraJobs =
|
||||
let
|
||||
pkgs = import nixpkgs { system = "x86_64-linux"; };
|
||||
makeJob = container:
|
||||
container.config.system.build.toplevel;
|
||||
in
|
||||
nixpkgs.lib.mapAttrs (n: v: makeJob v) self.nixngSystems;
|
||||
|
||||
|
||||
devShell = forAllSystems (system:
|
||||
let
|
||||
pkgs = import nixpkgs { system = "x86_64-linux"; };
|
||||
copy-containers = pkgs.writeShellScriptBin "copy-containers"
|
||||
''
|
||||
_profile="$1"
|
||||
_address="$2"
|
||||
_selector="$3"
|
||||
|
||||
function profile_blowhole()
|
||||
{
|
||||
_images=("camptules" "gitea" "home-assistant" "hydra" \
|
||||
"minecraft" "mosquitto" "website" "zigbee2mqtt")
|
||||
|
||||
for _image in ''${_images[@]}
|
||||
do
|
||||
deploy $_address $_image
|
||||
done
|
||||
}
|
||||
|
||||
function profile_toothpick()
|
||||
{
|
||||
images=("ingress")
|
||||
|
||||
for _image in ''${_images[@]}
|
||||
do
|
||||
deploy $_address $_image
|
||||
done
|
||||
}
|
||||
|
||||
function deploy()
|
||||
{
|
||||
_address="$1"
|
||||
_image="$2"
|
||||
|
||||
nix build ".#ociImages.$_image.stream" -o result-script && \
|
||||
./result-script | ssh "$_address" docker load && \
|
||||
ssh "$_address" docker tag "nixng-$_image:latest" "nixng-$_image:local"
|
||||
}
|
||||
|
||||
"profile_$_profile"
|
||||
'';
|
||||
in
|
||||
pkgs.mkShell {
|
||||
nativeBuildInputs = with pkgs;
|
||||
[ nomad_1_1 consul vault jq
|
||||
copy-containers
|
||||
];
|
||||
}
|
||||
);
|
||||
};
|
||||
devShell = forAllSystems (system:
|
||||
let pkgs = pkgsForSystem system;
|
||||
in
|
||||
pkgs.mkShell {
|
||||
nativeBuildInputs = with pkgs;
|
||||
[ nomad_1_1 consul vault packer jq terraform
|
||||
];
|
||||
}
|
||||
);
|
||||
};
|
||||
}
|
||||
|
|
29
infrastructure/camptules/main.tf
Normal file
29
infrastructure/camptules/main.tf
Normal file
|
@ -0,0 +1,29 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
# data "nomad_plugin" "nomad-driver-containerd" {
|
||||
# plugin_id = "nomad-driver-containerd"
|
||||
# wait_for_healthy = true
|
||||
# }
|
||||
|
||||
resource "vault_policy" "camputules-policy" {
|
||||
name = "camptules-policy"
|
||||
policy = file("${path.module}/camptules-policy.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_job" "camptules" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "camptules" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -5,17 +13,23 @@ job "camptules" {
|
|||
group "camptules" {
|
||||
count = 1
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
task "camptules" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
config {
|
||||
image = "nixng-camptules:local"
|
||||
memory_hard_limit = 192
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.camptules.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 512
|
||||
memory = 128
|
||||
memory_max = 192
|
||||
}
|
||||
|
||||
vault {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "dovecot_maildir"
|
||||
name = "dovecot_maildir"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/dovecot/maildir"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "getmail_getmail-d"
|
||||
name = "getmail_getmail-d"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "multi-node-multi-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/getmail/getmail.d"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
77
infrastructure/email/main.tf
Normal file
77
infrastructure/email/main.tf
Normal file
|
@ -0,0 +1,77 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "vault_policy" "dovecot-policy" {
|
||||
name = "dovecot-policy"
|
||||
policy = file("${path.module}/dovecot-policy.hcl")
|
||||
}
|
||||
|
||||
resource "vault_policy" "getmail-policy" {
|
||||
name = "getmail-policy"
|
||||
policy = file("${path.module}/getmail-policy.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_volume" "dovecot_maildir" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "dovecot_maildir"
|
||||
name = "dovecot_maildir"
|
||||
external_id = "dovecot_maildir"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/dovecot/maildir"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "getmail_getmail-d" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "getmail_getmail-d"
|
||||
name = "getmail_getmail-d"
|
||||
external_id = "getmail_getmail-d"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/getmail/getmail.d"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_job" "email" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "email" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -55,7 +63,7 @@ job "email" {
|
|||
}
|
||||
|
||||
task "app" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
volume_mount {
|
||||
volume = "dovecot_maildir"
|
||||
|
@ -70,7 +78,9 @@ job "email" {
|
|||
}
|
||||
|
||||
config {
|
||||
image = "ra-systems-getmail:local"
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.getmail.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
env {
|
||||
|
@ -123,7 +133,7 @@ job "email" {
|
|||
}
|
||||
|
||||
task "app" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
volume_mount {
|
||||
volume = "dovecot_maildir"
|
||||
|
@ -132,7 +142,9 @@ job "email" {
|
|||
}
|
||||
|
||||
config {
|
||||
image = "ra-systems-dovecot:local"
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.dovecot.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
env {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "gitea-data"
|
||||
name = "gitea-data"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/gitea-data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "gitea-db"
|
||||
name = "gitea-db"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/gitea-db"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,3 +1,3 @@
|
|||
path "kv/data/jmusicbot" {
|
||||
path "kv/data/gitea" {
|
||||
capabilities = ["read"]
|
||||
}
|
77
infrastructure/gitea/main.tf
Normal file
77
infrastructure/gitea/main.tf
Normal file
|
@ -0,0 +1,77 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
# data "nomad_plugin" "nomad-driver-containerd" {
|
||||
# plugin_id = "nomad-driver-containerd"
|
||||
# wait_for_healthy = true
|
||||
# }
|
||||
|
||||
resource "nomad_volume" "gitea-db" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "gitea-db"
|
||||
name = "gitea-db"
|
||||
external_id = "gitea-db"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/gitea-db"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "gitea-data" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "gitea-data"
|
||||
name = "gitea-data"
|
||||
external_id = "gitea-data"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/gitea-data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "vault_policy" "gitea-policy" {
|
||||
name = "gitea-policy"
|
||||
policy = file("${path.module}/gitea-policy.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_job" "gitea" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "gitea" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -56,7 +64,7 @@ job "gitea" {
|
|||
}
|
||||
|
||||
task "app" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
volume_mount {
|
||||
volume = "gitea-data"
|
||||
|
@ -71,7 +79,9 @@ job "gitea" {
|
|||
}
|
||||
|
||||
config {
|
||||
image = "nixng-gitea:local"
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.gitea.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
env {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "home-assistant_hass"
|
||||
name = "home-assistant_hass"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/home-assistant_hass"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "home-assistant_mosquitto"
|
||||
name = "home-assistant_mosquitto"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/home-assistant_mosquitto"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "home-assistant_zigbee2mqtt"
|
||||
name = "home-assistant_zigbee2mqtt"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/home-assistant_zigbee2mqtt"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
106
infrastructure/home-assistant/main.tf
Normal file
106
infrastructure/home-assistant/main.tf
Normal file
|
@ -0,0 +1,106 @@
|
|||
ariable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "nomad_volume" "home-assistant_hass" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "home-assistant_hass"
|
||||
name = "home-assistant_hass"
|
||||
external_id = "home-assistant_hass"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/home-assistant_hass"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "home-assistant_zigbee2mqtt" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "home-assistant_zigbee2mqtt"
|
||||
name = "home-assistant_zigbee2mqtt"
|
||||
external_id = "home-assistant_zigbee2mqtt"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/home-assistant_zigbee2mqtt"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "home-assistant_mosquitto" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "home-assistant_mosquitto"
|
||||
name = "home-assistant_mosquitto"
|
||||
external_id = "home-assistant_mosquitto"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/home-assistant_mosquitto"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "vault_policy" "home-assistant-policy" {
|
||||
name = "home-assistant-policy"
|
||||
policy = file("${path.module}/home-assistant-policy.hcl")
|
||||
}
|
||||
|
||||
resource "vault_policy" "zigbee2mqtt-policy" {
|
||||
name = "zigbee2mqtt-policy"
|
||||
policy = file("${path.module}/zigbee2mqtt-policy.hcl")
|
||||
}
|
||||
|
||||
resource "vault_policy" "mosquitto-policy" {
|
||||
name = "mosquitto-policy"
|
||||
policy = file("${path.module}/mosquitto-policy.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_job" "home-assistant" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "home-assistant" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -51,28 +59,26 @@ job "home-assistant" {
|
|||
}
|
||||
|
||||
task "zigbee2mqtt" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
vault {
|
||||
policies = ["zigbee2mqtt-policy"]
|
||||
}
|
||||
|
||||
config {
|
||||
image = "nixng-zigbee2mqtt:local"
|
||||
|
||||
memory_hard_limit = 256
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.zigbee2mqtt.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
|
||||
devices = [
|
||||
{
|
||||
host_path = "/dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_4c004e9c53c9eb118a9f8b4f1d69213e-if00-port0"
|
||||
container_path = "/dev/serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_4c004e9c53c9eb118a9f8b4f1d69213e-if00-port0"
|
||||
}
|
||||
"/dev/ttyUSB0" #"serial/by-id/usb-ITead_Sonoff_Zigbee_3.0_USB_Dongle_Plus_4c004e9c53c9eb118a9f8b4f1d69213e-if00-port0"
|
||||
]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 128
|
||||
memory = 128
|
||||
memory_max = 256
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
|
@ -129,21 +135,22 @@ EOF
|
|||
}
|
||||
|
||||
task "mosquitto" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
vault {
|
||||
policies = ["mosquitto-policy"]
|
||||
}
|
||||
|
||||
config {
|
||||
image = "nixng-mosquitto:local"
|
||||
|
||||
memory_hard_limit = 256
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.mosquitto.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 128
|
||||
memory = 128
|
||||
memory_max = 256
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
|
@ -208,21 +215,22 @@ EOF
|
|||
}
|
||||
|
||||
task "home-assistant" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
vault {
|
||||
policies = ["home-assistant-policy"]
|
||||
}
|
||||
|
||||
config {
|
||||
image = "nixng-home-assistant:local"
|
||||
|
||||
memory_hard_limit = 256
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.home-assistant.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 128
|
||||
memory = 128
|
||||
memory_max = 256
|
||||
}
|
||||
|
||||
volume_mount {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "hydra-data"
|
||||
name = "hydra-data"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/hydra-data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "hydra-db"
|
||||
name = "hydra-db"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/hydra-db"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "hydra-nix"
|
||||
name = "hydra-nix"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/hydra-nix"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
101
infrastructure/hydra/main.tf
Normal file
101
infrastructure/hydra/main.tf
Normal file
|
@ -0,0 +1,101 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
# data "nomad_plugin" "nomad-driver-containerd" {
|
||||
# plugin_id = "nomad-driver-containerd"
|
||||
# wait_for_healthy = true
|
||||
# }
|
||||
|
||||
resource "nomad_volume" "hydra-db" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "hydra-db"
|
||||
name = "hydra-db"
|
||||
external_id = "hydra-db"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/hydra-db"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "hydra-data" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "hydra-data"
|
||||
name = "hydra-data"
|
||||
external_id = "hydra-data"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/hydra-data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "hydra-nix" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "hydra-nix"
|
||||
name = "hydra-nix"
|
||||
external_id = "hydra-nix"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/hydra-nix"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "vault_policy" "hydra-policy" {
|
||||
name = "hydra-policy"
|
||||
policy = file("${path.module}/hydra-policy.hcl")
|
||||
}
|
||||
|
||||
resource "nomad_job" "hydra" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "hydra" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -65,7 +73,7 @@ job "hydra" {
|
|||
}
|
||||
|
||||
task "hydra" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
volume_mount {
|
||||
volume = "hydra-data"
|
||||
|
@ -80,17 +88,14 @@ job "hydra" {
|
|||
}
|
||||
|
||||
config {
|
||||
image = "nixng-hydra:local"
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.hydra.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
|
||||
devices = [
|
||||
{
|
||||
host_path = "/dev/fuse"
|
||||
container_path = "/dev/fuse"
|
||||
},
|
||||
"/dev/fuse"
|
||||
]
|
||||
privileged = true
|
||||
|
||||
memory_hard_limit = 3072
|
||||
}
|
||||
|
||||
vault {
|
||||
|
@ -100,6 +105,7 @@ job "hydra" {
|
|||
resources {
|
||||
cpu = 4000
|
||||
memory = 1024
|
||||
memory_max = 3072
|
||||
}
|
||||
|
||||
template {
|
||||
|
@ -143,7 +149,7 @@ EOF
|
|||
}
|
||||
|
||||
task "postgresql" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
volume_mount {
|
||||
volume = "hydra-db"
|
||||
|
@ -152,14 +158,15 @@ EOF
|
|||
}
|
||||
|
||||
config {
|
||||
image = "nixng-hydra-postgresql:local"
|
||||
|
||||
memory_hard_limit = 256
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.hydraPostgresql.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500
|
||||
memory = 128
|
||||
memory_max = 256
|
||||
}
|
||||
|
||||
template {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "ingress-letsencrypt"
|
||||
name = "ingress-letsencrypt"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/ingress-letsencrypt"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "soft" ]
|
||||
}
|
43
infrastructure/ingress/main.tf
Normal file
43
infrastructure/ingress/main.tf
Normal file
|
@ -0,0 +1,43 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "nomad_volume" "ingress-letsencrypt" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "ingress-letsencrypt"
|
||||
name = "ingress-letsencrypt"
|
||||
external_id = "ingress-letsencrypt"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/ingress-letsencrypt"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_job" "ingress" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "ingress" {
|
||||
datacenters = [ "do-1", "homelab-1" ]
|
||||
type = "service"
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "jellyfin-cache"
|
||||
name = "jellyfin-cache"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/jellyfin/cache"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "jellyfin-config"
|
||||
name = "jellyfin-config"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/jellyfin/config"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "jellyfin-media"
|
||||
name = "jellyfin-media"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/jellyfin/media"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
96
infrastructure/jellyfin/main.tf
Normal file
96
infrastructure/jellyfin/main.tf
Normal file
|
@ -0,0 +1,96 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
# data "nomad_plugin" "nomad-driver-containerd" {
|
||||
# plugin_id = "nomad-driver-containerd"
|
||||
# wait_for_healthy = true
|
||||
# }
|
||||
|
||||
resource "nomad_volume" "jellyfin-cache" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "jellyfin-cache"
|
||||
name = "jellyfin-cache"
|
||||
external_id = "jellyfin-cache"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/jellyfin/cache"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "jellyfin-config" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "jellyfin-config"
|
||||
name = "jellyfin-config"
|
||||
external_id = "jellyfin-config"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/jellyfin/config"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "jellyfin-media" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "jellyfin-media"
|
||||
name = "jellyfin-media"
|
||||
external_id = "jellyfin-media"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/jellyfin/media"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_job" "jellyfin" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
# flake_ref = var.flake_ref
|
||||
# flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -84,7 +84,7 @@ job "jellyfin" {
|
|||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "jellyfin/jellyfin@sha256:655acb8793150f0386dfd48cf0a3238e567f07e3db1ed53c40f99c534eaeb49c"
|
||||
image = "jellyfin/jellyfin@sha256:b4500b143d8a0d4383c50721517908d679af1c5ec00e791f0097f8c419b3e853"
|
||||
|
||||
devices = [
|
||||
{
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
job "jmusicbot" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
||||
group "jmusicbot" {
|
||||
count = 1
|
||||
|
||||
task "jmusicbot" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "nixng-jmusicbot:local"
|
||||
memory_hard_limit = 192
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 512
|
||||
memory = 128
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = ["jmusicbot-policy"]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/jmusicbot" }}
|
||||
BOT_TOKEN={{ .Data.data.token }}
|
||||
BOT_OWNER={{ .Data.data.owner }}
|
||||
{{ end }}
|
||||
EOF
|
||||
destination = "secrets/env"
|
||||
env = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
101
infrastructure/main.tf
Normal file
101
infrastructure/main.tf
Normal file
|
@ -0,0 +1,101 @@
|
|||
variable "flake_rev" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_host" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_host_alt" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
module "camptules" {
|
||||
source = "./camptules"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "email" {
|
||||
source = "./email"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "gitea" {
|
||||
source = "./gitea"
|
||||
|
||||
flake_ref = "${var.flake_host_alt}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "home-assistant" {
|
||||
source = "./home-assistant"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "hydra" {
|
||||
source = "./hydra"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "ingress" {
|
||||
source = "./ingress"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "jellyfin" {
|
||||
source = "./jellyfin"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "mesh" {
|
||||
source = "./syncthing"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
// minecraft
|
||||
|
||||
module "plugin-nfs" {
|
||||
source = "./plugin-nfs"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "reicio" {
|
||||
source = "./reicio"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "syncthing" {
|
||||
source = "./syncthing"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
|
||||
module "website" {
|
||||
source = "./website"
|
||||
|
||||
flake_ref = "${var.flake_host}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
19
infrastructure/mesh/main.tf
Normal file
19
infrastructure/mesh/main.tf
Normal file
|
@ -0,0 +1,19 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "nomad_job" "mesh" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "gateway-mesh" {
|
||||
datacenters = [ "homelab-1", "do-1" ]
|
||||
|
||||
|
|
|
@ -1,18 +0,0 @@
|
|||
type = "csi"
|
||||
id = "nextcloud-data"
|
||||
name = "nextcloud-data"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/nextcloud-data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
type = "csi"
|
||||
id = "nextcloud-db"
|
||||
name = "nextcloud-db"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/nextcloud-db"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
}
|
|
@ -1,3 +0,0 @@
|
|||
path "kv/data/nextcloud" {
|
||||
capabilities = ["read"]
|
||||
}
|
|
@ -1,170 +0,0 @@
|
|||
job "nextcloud" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
||||
constraint {
|
||||
attribute = "${attr.unique.hostname}"
|
||||
value = "blowhole"
|
||||
}
|
||||
|
||||
group "nextcloud" {
|
||||
count = 1
|
||||
|
||||
volume "nextcloud-db" {
|
||||
type = "csi"
|
||||
source = "nextcloud-db"
|
||||
read_only = false
|
||||
|
||||
attachment_mode = "file-system"
|
||||
access_mode = "single-node-writer"
|
||||
}
|
||||
|
||||
volume "nextcloud-data" {
|
||||
type = "csi"
|
||||
source = "nextcloud-data"
|
||||
read_only = false
|
||||
|
||||
attachment_mode = "file-system"
|
||||
access_mode = "single-node-writer"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "nextcloud"
|
||||
port = "80"
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
address_mode = "alloc"
|
||||
path = "/"
|
||||
port = "80"
|
||||
interval = "10s"
|
||||
timeout = "10s"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
||||
|
||||
task "apache" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "nextcloud-data"
|
||||
destination = "/var/www/html/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
config {
|
||||
image = "nextcloud:21.0.1-apache"
|
||||
|
||||
volumes = [
|
||||
"local/10-opcache.ini:/usr/local/etc/php/conf.d/10-opcache.ini",
|
||||
"local/nextcloud-upload-limit.ini:/usr/local/etc/php/conf.d/nextcloud-upload-limit.ini"
|
||||
]
|
||||
}
|
||||
|
||||
template {
|
||||
# REDIS_HOST={{ env "NOMAD_HOST_IP_redis" }}
|
||||
# REDIS_HOST_PORT={{ env "NOMAD_HOST_PORT_redis" }}
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/nextcloud" }}
|
||||
MYSQL_USER={{ .Data.data.mysql_user }}
|
||||
MYSQL_PASSWORD={{ .Data.data.mysql_password }}
|
||||
MYSQL_DATABASE={{ .Data.data.mysql_database }}
|
||||
{{ end }}
|
||||
MYSQL_HOST=127.0.0.1:3306
|
||||
EOF
|
||||
destination = "local/env"
|
||||
env = true
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
opcache.enable=1
|
||||
opcache.enable_cli=1
|
||||
opcache.interned_strings_buffer=8
|
||||
opcache.max_accelerated_files=10000
|
||||
opcache.memory_consumption=128
|
||||
opcache.save_comments=1
|
||||
opcache.revalidate_freq=1
|
||||
EOF
|
||||
destination = "local/10-opcache.ini"
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
upload_max_filesize=512M
|
||||
post_max_size=550M
|
||||
memory_limit=1G
|
||||
EOF
|
||||
destination = "local/nextcloud-upload-limit.ini"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 256
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = ["nextcloud-policy"]
|
||||
}
|
||||
}
|
||||
|
||||
task "mariadb" {
|
||||
driver = "docker"
|
||||
|
||||
volume_mount {
|
||||
volume = "nextcloud-db"
|
||||
destination = "/var/lib/mysql/"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
config {
|
||||
image = "mariadb:10.6.0"
|
||||
|
||||
command = "--innodb-read-only-compressed=OFF"
|
||||
# command = [ "--transaction-isolation=READ-COMMITTED", "--binlog-format=ROW" ]
|
||||
}
|
||||
|
||||
template {
|
||||
data = <<EOF
|
||||
{{ with secret "kv/data/nextcloud" }}
|
||||
MYSQL_ROOT_PASSWORD={{ .Data.data.mysql_root_pass }}
|
||||
MYSQL_USER={{ .Data.data.mysql_user }}
|
||||
MYSQL_PASSWORD={{ .Data.data.mysql_password }}
|
||||
MYSQL_DATABASE={{ .Data.data.mysql_database }}
|
||||
{{ end }}
|
||||
EOF
|
||||
destination = "local/env"
|
||||
env = true
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 256
|
||||
}
|
||||
|
||||
vault {
|
||||
policies = ["nextcloud-policy"]
|
||||
}
|
||||
}
|
||||
|
||||
task "redis" {
|
||||
driver = "docker"
|
||||
|
||||
config {
|
||||
image = "redis:6.0.9-alpine"
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 100
|
||||
memory = 256
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
31
infrastructure/plugin-nfs/main.tf
Normal file
31
infrastructure/plugin-nfs/main.tf
Normal file
|
@ -0,0 +1,31 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "nomad_job" "nfs-controller" {
|
||||
jobspec = file("${path.module}/nfs-controller.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_job" "nfs-nodes" {
|
||||
jobspec = file("${path.module}/nfs-nodes.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "plugin-nfs-controller" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
|
||||
|
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "plugin-nfs-nodes" {
|
||||
datacenters = [ "homelab-1", "do-1" ]
|
||||
|
||||
|
|
19
infrastructure/reicio/main.tf
Normal file
19
infrastructure/reicio/main.tf
Normal file
|
@ -0,0 +1,19 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "nomad_job" "reicio" {
|
||||
jobspec = file("${path.module}/reicio.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "reicio" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -28,16 +36,18 @@ job "reicio" {
|
|||
}
|
||||
|
||||
task "reicio" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
config {
|
||||
image = "nixng-reicio:local"
|
||||
memory_hard_limit = 32
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.reicio.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 64
|
||||
memory = 16
|
||||
memory_max = 32
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
91
infrastructure/syncthing/main.tf
Normal file
91
infrastructure/syncthing/main.tf
Normal file
|
@ -0,0 +1,91 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
resource "nomad_volume" "syncthing-data" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "syncthing-data"
|
||||
name = "syncthing-data"
|
||||
external_id = "syncthing-data"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/syncthing/data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "syncthing-storage" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "syncthing-storage"
|
||||
name = "syncthing-storage"
|
||||
external_id = "syncthing-storage"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/syncthing/storage"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_volume" "syncthing-config" {
|
||||
# depends_on = [data.nomad_plugin.nomad-driver-containerd]
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "syncthing-config"
|
||||
name = "syncthing-config"
|
||||
external_id = "syncthing-config"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.in.redalder.org"
|
||||
share = "/var/nfs/syncthing/config"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_job" "syncthing" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "syncthing" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -68,10 +76,12 @@ job "syncthing" {
|
|||
}
|
||||
|
||||
task "syncthing" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
config {
|
||||
image = "ra-systems-syncthing:local"
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.syncthing.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
|
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "syncthing-config"
|
||||
name = "syncthing-config"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/syncthing/config"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "syncthing-data"
|
||||
name = "syncthing-data"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/syncthing/data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
type = "csi"
|
||||
id = "syncthing-storage"
|
||||
name = "syncthing-storage"
|
||||
plugin_id = "nfs"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context {
|
||||
server = "10.64.1.201"
|
||||
share = "/var/nfs/syncthing/storage"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
24
infrastructure/website/main.tf
Normal file
24
infrastructure/website/main.tf
Normal file
|
@ -0,0 +1,24 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
# data "nomad_plugin" "nomad-driver-containerd" {
|
||||
# plugin_id = "nomad-driver-containerd"
|
||||
# wait_for_healthy = true
|
||||
# }
|
||||
|
||||
resource "nomad_job" "website" {
|
||||
jobspec = file("${path.module}/nomad.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = var.flake_ref
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,3 +1,11 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "website" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
@ -48,16 +56,18 @@ job "website" {
|
|||
}
|
||||
|
||||
task "apache" {
|
||||
driver = "docker"
|
||||
driver = "containerd-driver"
|
||||
|
||||
config {
|
||||
image = "nixng-website:local"
|
||||
memory_hard_limit = 64
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.website.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 128
|
||||
memory = 32
|
||||
memory_max = 64
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
40
main.tf
Normal file
40
main.tf
Normal file
|
@ -0,0 +1,40 @@
|
|||
terraform {
|
||||
backend "consul" {
|
||||
address = "http://10.64.1.201:8500"
|
||||
scheme = "http"
|
||||
path = "team/terraform/state"
|
||||
}
|
||||
}
|
||||
|
||||
provider "nomad" {
|
||||
address = "http://10.64.1.201:4646"
|
||||
}
|
||||
|
||||
provider "vault" {
|
||||
address = "https://vault.in.redalder.org:8200/"
|
||||
}
|
||||
|
||||
variable "flake_rev" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_host" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_host_alt" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
module "infrastructure" {
|
||||
source = "./infrastructure"
|
||||
|
||||
flake_rev = var.flake_rev
|
||||
flake_host = var.flake_host
|
||||
flake_host_alt = var.flake_host_alt
|
||||
flake_sha = var.flake_sha
|
||||
}
|
4
terraform.tfvars
Normal file
4
terraform.tfvars
Normal file
|
@ -0,0 +1,4 @@
|
|||
flake_rev = "2d1f2957b73249e729b417d512cd7a35d114846f"
|
||||
flake_host = "git+https://gitea.redalder.org/RedAlder/systems"
|
||||
flake_host_alt = "git+https://git.irunx.org/MagicRB/ra-systems"
|
||||
flake_sha = "sha256-faVx3/2JgbulG85FsmW0zKhe8VHmUTq/dWZiXsdrg4E="
|
Loading…
Reference in a new issue