mirror of
https://git.sr.ht/~magic_rb/cluster
synced 2024-11-25 09:36:14 +01:00
Add conduit
Signed-off-by: main <magic_rb@redalder.org>
This commit is contained in:
parent
d0db663ef6
commit
5d38fe62c9
69
containers/conduit.nix
Normal file
69
containers/conduit.nix
Normal file
|
@ -0,0 +1,69 @@
|
|||
{ nglib, nixpkgs }:
|
||||
nglib.makeSystem {
|
||||
system = "x86_64-linux";
|
||||
name = "nixng-gitea";
|
||||
inherit nixpkgs;
|
||||
config = ({ pkgs, ... }:
|
||||
{
|
||||
dumb-init = {
|
||||
enable = true;
|
||||
type.services = { };
|
||||
};
|
||||
|
||||
init.services.conduit = {
|
||||
enabled = true;
|
||||
shutdownOnExit = true;
|
||||
script =
|
||||
let
|
||||
conduitConfig = pkgs.writeText "conduit.toml"
|
||||
''
|
||||
[global]
|
||||
# The server_name is the pretty name of this server. It is used as a suffix for user
|
||||
# and room ids. Examples: matrix.org, conduit.rs
|
||||
|
||||
# The Conduit server needs all /_matrix/ requests to be reachable at
|
||||
# https://your.server.name/ on port 443 (client-server) and 8448 (federation).
|
||||
|
||||
# If that's not possible for you, you can create /.well-known files to redirect
|
||||
# requests. See
|
||||
# https://matrix.org/docs/spec/client_server/latest#get-well-known-matrix-client
|
||||
# and
|
||||
# https://matrix.org/docs/spec/server_server/r0.1.4#get-well-known-matrix-server
|
||||
# for more information
|
||||
|
||||
# YOU NEED TO EDIT THIS
|
||||
server_name = "matrix.redalder.org"
|
||||
|
||||
# This is the only directory where Conduit will save its data
|
||||
database_path = "/var/lib/matrix-conduit/"
|
||||
database_backend = "rocksdb"
|
||||
|
||||
# The port Conduit will be running on. You need to set up a reverse proxy in
|
||||
# your web server (e.g. apache or nginx), so all requests to /_matrix on port
|
||||
# 443 and 8448 will be forwarded to the Conduit instance running on this port
|
||||
port = 6167
|
||||
|
||||
# Max size for uploads
|
||||
max_request_size = 20_000_000 # in bytes
|
||||
|
||||
# Enables registration. If set to false, no users can register on this server.
|
||||
allow_registration = true
|
||||
|
||||
allow_federation = true
|
||||
|
||||
trusted_servers = ["matrix.org"]
|
||||
|
||||
# How many requests Conduit sends to other servers at the same time
|
||||
#max_concurrent_requests = 100
|
||||
#log = "info,state_res=warn,rocket=off,_=off,sled=off"
|
||||
|
||||
address = "127.0.0.1"
|
||||
'';
|
||||
in
|
||||
pkgs.writeShellScript "conduit"
|
||||
''
|
||||
CONDUIT_CONFIG=${conduitConfig} ${pkgs.matrix-conduit}/bin/conduit
|
||||
'';
|
||||
};
|
||||
});
|
||||
}
|
|
@ -46,6 +46,7 @@
|
|||
home-assistant = import ./containers/home-assistant.nix base;
|
||||
reicio = import ./containers/reicio.nix base;
|
||||
baikal = import ./containers/baikal.nix base;
|
||||
conduit = import ./containers/conduit.nix base;
|
||||
};
|
||||
|
||||
hydraJobs =
|
||||
|
@ -62,7 +63,7 @@
|
|||
in
|
||||
pkgs.mkShell {
|
||||
nativeBuildInputs = with pkgs;
|
||||
[ nomad_1_1 consul vault packer jq terraform
|
||||
[ nomad_1_3 consul vault packer jq terraform
|
||||
];
|
||||
}
|
||||
);
|
||||
|
|
6
main.tf
6
main.tf
|
@ -1,19 +1,19 @@
|
|||
terraform {
|
||||
backend "consul" {
|
||||
address = "http://10.64.1.201:8500"
|
||||
address = "http://10.64.0.2:8500"
|
||||
scheme = "http"
|
||||
path = "team/terraform/state"
|
||||
}
|
||||
}
|
||||
|
||||
provider "nomad" {
|
||||
address = "http://10.64.1.201:4646"
|
||||
address = "http://10.64.0.2:4646"
|
||||
region = "homelab-1"
|
||||
alias = "homelab-1"
|
||||
}
|
||||
|
||||
provider "nomad" {
|
||||
address = "http://10.64.1.201:4646"
|
||||
address = "http://10.64.0.2:4646"
|
||||
region = "do-1"
|
||||
alias = "do-1"
|
||||
}
|
||||
|
|
34
nomad/regions/homelab-1/conduit.tf
Normal file
34
nomad/regions/homelab-1/conduit.tf
Normal file
|
@ -0,0 +1,34 @@
|
|||
resource "nomad_volume" "conduit-data" {
|
||||
type = "csi"
|
||||
plugin_id = "nfs"
|
||||
volume_id = "conduit-data"
|
||||
name = "conduit-data"
|
||||
external_id = "conduit-data"
|
||||
|
||||
capability {
|
||||
access_mode = "single-node-writer"
|
||||
attachment_mode = "file-system"
|
||||
}
|
||||
|
||||
context = {
|
||||
server = "blowhole.hosts.in.redalder.org"
|
||||
share = "/var/nfs/conduit-data"
|
||||
}
|
||||
|
||||
mount_options {
|
||||
fs_type = "nfs"
|
||||
mount_flags = [ "nolock", "hard" ]
|
||||
}
|
||||
}
|
||||
|
||||
resource "nomad_job" "conduit" {
|
||||
jobspec = file("${path.module}/job/conduit.hcl")
|
||||
|
||||
hcl2 {
|
||||
enabled = true
|
||||
vars = {
|
||||
flake_ref = "${var.flake_host_alt}?rev=${var.flake_rev}"
|
||||
flake_sha = var.flake_sha
|
||||
}
|
||||
}
|
||||
}
|
73
nomad/regions/homelab-1/job/conduit.hcl
Normal file
73
nomad/regions/homelab-1/job/conduit.hcl
Normal file
|
@ -0,0 +1,73 @@
|
|||
variable "flake_ref" {
|
||||
type = string
|
||||
}
|
||||
|
||||
variable "flake_sha" {
|
||||
type = string
|
||||
}
|
||||
|
||||
job "conduit" {
|
||||
datacenters = [ "homelab-1" ]
|
||||
type = "service"
|
||||
|
||||
group "svc" {
|
||||
count = 1
|
||||
|
||||
volume "conduit-data" {
|
||||
type = "csi"
|
||||
source = "conduit-data"
|
||||
read_only = false
|
||||
|
||||
attachment_mode = "file-system"
|
||||
access_mode = "single-node-writer"
|
||||
}
|
||||
|
||||
restart {
|
||||
attempts = 5
|
||||
delay = "5s"
|
||||
}
|
||||
|
||||
network {
|
||||
mode = "bridge"
|
||||
}
|
||||
|
||||
service {
|
||||
name = "conduit"
|
||||
port = "6167"
|
||||
|
||||
check {
|
||||
type = "http"
|
||||
address_mode = "alloc"
|
||||
path = "/"
|
||||
port = "6167"
|
||||
interval = "2s"
|
||||
timeout = "2s"
|
||||
}
|
||||
|
||||
connect {
|
||||
sidecar_service {}
|
||||
}
|
||||
}
|
||||
|
||||
task "app" {
|
||||
driver = "containerd-driver"
|
||||
|
||||
volume_mount {
|
||||
volume = "conduit-data"
|
||||
destination = "/var/lib/matrix-conduit"
|
||||
read_only = false
|
||||
}
|
||||
|
||||
config {
|
||||
flake_ref = "${var.flake_ref}#nixngSystems.conduit.config.system.build.toplevel"
|
||||
flake_sha = var.flake_sha
|
||||
entrypoint = [ "init" ]
|
||||
}
|
||||
|
||||
resources {
|
||||
cpu = 500
|
||||
memory = 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
Loading…
Reference in a new issue