Signed-off-by: Magic_RB <magic_rb@redalder.org>
This commit is contained in:
Magic_RB 2021-03-10 23:39:02 +01:00
parent 292874ff17
commit 7353ecca26
9 changed files with 664 additions and 34 deletions

View file

@ -8,6 +8,11 @@
src: url(../ttf/ubuntu-regular.ttf) format("truetype");
}
@font-face {
font-family: "Ubuntu Mono";
src: url(../ttf/ubuntu-mono.ttf) format("truetype");
}
* {
font-family: "Ubuntu Regular";
}
@ -19,7 +24,20 @@ ul {
code {
background-color: #eee;
border: 0.01rem solid #999;
padding: 0.1rem;
padding: 0rem 0.1rem 0rem 0.1rem;
}
.TINY {
font-size: 0.75rem;
}
.WARNING {
background-color: #eee;
border: 1px solid #FF0000;
display: block;
padding: 0.5rem;
font-family: "Ubuntu Mono";
}
pre {
@ -27,6 +45,7 @@ pre {
border: 1px solid #999;
display: block;
padding: 0.5rem;
font-family: "Ubuntu Mono";
}
#table-of-contents {

BIN
assets/ttf/ubuntu-mono.ttf Normal file

Binary file not shown.

View file

@ -66,9 +66,9 @@
now. Not knowing whether a setting changed? Poof, gone too, Nix is fully declarative, which means it identifies
*everything* by a sha256 hash in addition to its developer configured name. Nix also serves as a single point of
truth, which means even if the user modifies the config files, they will be overwritten before they are used
again. This makes mix ups are impossible. Messy and flat "configuration" files? Solved too, the Nix expression language can be
as flat or as deep as you need it to be, you can create complex APIs with functions and all that jazz. Basically Nix
is awesome!
again. This makes mix ups are impossible. Messy and flat "configuration" files? Solved too, the Nix expression
language can be as flat or as deep as you need it to be, you can create complex APIs with functions and all that
jazz. Basically Nix is awesome!
** Conclusion
The take-away from this rant, is that the best course of action is to figure out either how to completely replace

View file

@ -1,8 +1,8 @@
#+TITLE: Scalable ConcourseCI with Nomad and Nix
In this blog post, I will explain to you how you can deploy ConcourseCI on HashiCorp Nomad with fully automatic and
Op-free scaling. We will utilize 3 HashiCorp tools, namely Nomad, Vault, and Consul, then Nix (not necessary, can be
replaced) and finally ConcourseCI itself.
Op-free scaling. We will utilize 3 HashiCorp tools, namely Nomad, Vault, and Consul, then PosgresSQL, Nix (not
necessary, can be replaced) and finally ConcourseCI itself.
* Requirements
+ a functional Nomad installation with Consul and Vault integration
@ -16,6 +16,7 @@ replaced) and finally ConcourseCI itself.
+ Linux - 5.11.0
+ Nix - 2.4pre20201205_a5d85d0
+ ConcourseCI - 7.0.0
+ PostgreSQL - TODO
* Overview
Our goal is to be able to add a Nomad node to the cluster and have ConcourseCI automatically expand to that node, (We
@ -28,8 +29,8 @@ replaced) and finally ConcourseCI itself.
constraints are met.
#+END_QUOTE
A ConcourseCI worker node, needs it's own key pair, the best case scenario would be, that we would generate this key
pair, every time a worker node is brought up, and store it in Vault. Fortunately this is possible with a ~pre-start~
A ConcourseCI worker node needs it's own key pair, the best case scenario would be, that we would generate this key
pair, every time a worker node is brought up and store it in Vault. Fortunately this is possible with a ~pre-start~
task, and Consul Template. \\
That's about it when it comes to the special and interesting bits of this post, so if you already know how to do this,
@ -42,6 +43,444 @@ replaced) and finally ConcourseCI itself.
to structure it like so, but you are free to change it around, the only thing that you need to keep the same, is to
have a directory with files representing the individual worker nodes, such as =concourse/workers/<worker-hostname>=.
#+BEGIN_VERBATIM
*** Structure
- [[*concourse][concourse]]
- [[web][web]]
- [[*db][db]]
- [[workers][workers]]
- [[*<worker-hostname>][<worker-hostname>]]
#+END_VERBATIM
**** concourse
Nothing here, just a folder for the other secrets
***** web
- tsa_host_key - key used for tsa (communication between a web and a worker node)
- tsa_host_key_pub
- session_signing key
- local_user_name - username of the administrator user
- local_user_pass - password of the administrator user
***** db
- password
- user
- database
- root_user
- root_password
***** workers
Holds dynamically generated secrets, of all the workers nodes
***** <worker-hostname>
- private_key - the worker's private key
- public_key - the worker's public key, used for authentication when connecting to a web node
*** Policies
We'll need 3 policies, =concourse-web-policy=, =concourse-worker-policy= and =concourse-db-policy=.
#+TITLE: concourse-db-policy.hcl
#+BEGIN_SRC hcl
path "kv/data/concourse/db" {
capabilities = ["read"]
}
#+END_SRC
#+TITLE: concourse-web-policy.hcl
#+BEGIN_SRC hcl
path "kv/data/concourse/workers/*" {
capabilities = ["read"]
}
path "kv/metadata/concourse/workers" {
capabilities = ["list"]
}
path "kv/data/concourse/web" {
capabilities = ["read"]
}
path "kv/data/concourse/db" {
capabilities = ["read"]
}
#+END_SRC
#+TITLE: concourse-worker-policy.hcl
#+BEGIN_SRC hcl
path "kv/data/concourse/workers/*" {
capabilities = ["read", "update", "delete"]
}
path "kv/data/concourse/web" {
capabilities = ["read"]
}
#+END_SRC
*** Application
Create the =web= secrets.
#+BEGIN_SRC shell-script
concourse generate-key -t rsa -f ./session_signing_key
_session_signing_key="$(cat session_signing_key)"
concourse generate-key -t ssh -f ./tsa_host_key
_tsa_host_key="$(cat tsa_host_key)"
_tsa_host_key_pub="$(cat tsa_host_key.pub)"
#+END_SRC
Upload them.
#+BEGIN_SRC shell-script
vault kv put concourse/web \
session_signing_key="$_session_signing_key" \
tsa_host_key="$_tsa_host_key" \
tsa_host_key_pub="$_tsa_host_key_pub" \
local_user_pass="changeme" \
local_user_name="changeme"
#+END_SRC
Manually specify and upload the secrets for PostgreSQL.
#+BEGIN_SRC shell-script
vault kv put concourse/db \
password="changeme" \
user="changeme" \
database="changeme" \
root_user="changeme" \
root_password="changeme"
#+END_SRC
#+BEGIN_TINY
The policy file expects the path to be prefixed ~kv/~ if you're using KVv1, with v2 it expects ~kv/data/~. That's
not the case for the ~vault kv~ subcommand, because it automatically prepends the corrects prefix.
#+END_TINY
** Nomad Web Job
The basic idea of this job is that we start 2 tasks, one for PostgreSQL and the other one for a ConcourseCI, this
could be clustered, but it's out of the scope of this post.
#+BEGIN_WARNING
The paths in =read=-like go template calls, must be prefixed with =kv/data= while the =list=-like calls, must be
prefixed with =kv/metadata= if you're using KVv2. I figured that out by inspecting the =vault kv= subcommand with
the =-output-curl-string= flag.
#+END_WARNING
#+BEGIN_SRC hcl
job "concourse-ci-web" {
datacenters = ["homelab-1"]
type = "service"
group "svc" {
count = 1
network {
mode ="bridge"
port "db" {
to = "5432"
}
port "http" {
static = "8080"
to = "8080"
}
port "tsa" {
static = "2222"
to = "2222"
}
}
service {
name = "concourse-web"
port = "http"
check {
type = "http"
path = "/"
interval = "2s"
timeout = "2s"
}
}
service {
name = "concourse-tsa"
port = "2222"
}
service {
name = "concourse-db"
port = "db"
}
task "db" {
driver = "docker"
config {
image = "magicrb/postgresql@sha256:changeme"
ports = ["db"]
volumes = [
"secrets/main.sh:/data/scripts/main.sh",
]
}
vault {
policies = ["concourse-db-policy"]
}
template {
data = <<EOF
{{ with secret "kv/data/concourse/db" }}
USER={{ .Data.data.root_user }}
PASSWORD={{ .Data.data.root_password }}
{{ end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/data.env"
env = true
}
template {
data = <<EOF
#!/usr/bin/env bash
env
{{ with secret "kv/data/concourse/db" }}
if process_psql -tc "SELECT 1 FROM pg_database WHERE datname = '{{ .Data.data.database }}'" | grep -q 1
then
process_psql -c "ALTER USER {{ .Data.data.user }} WITH PASSWORD '{{ .Data.data.password }}'";
else
process_psql -c "CREATE DATABASE {{ .Data.data.database }}"
process_psql -c "CREATE USER {{ .Data.data.user }} WITH ENCRYPTED PASSWORD '{{ .Data.data.password }}'"
process_psql -c "GRANT ALL PRIVILEGES ON DATABASE {{ .Data.data.database }} TO {{ .Data.data.user }}"
{{ end }}
echo "host all all all md5" >> /data/postgresql/pg_hba.conf
cat << EOD >> /data/postgresql/postgresql.conf
listen_addresses = '0.0.0.0'
password_encryption = md5
EOD
fi
EOF
destination = "${NOMAD_SECRETS_DIR}/main.sh"
}
}
task "web" {
driver = "docker"
config {
image = "concourse/concourse@sha256:changeme"
command = "web"
ports = ["http", "tsa"]
}
vault {
policies = ["concourse-web-policy"]
}
restart {
attempts = 5
delay = "15s"
}
template {
data = <<EOF
{{ with secret "kv/data/concourse/web" }}
CONCOURSE_ADD_LOCAL_USER={{ .Data.data.local_user_name }}:{{ .Data.data.local_user_pass }}
CONCOURSE_MAIN_TEAM_LOCAL_USER={{ .Data.data.local_user_name }}
{{ end }}
CONCOURSE_SESSION_SIGNING_KEY={{ env "NOMAD_SECRETS_DIR" }}/session_signing_key
CONCOURSE_TSA_HOST_KEY={{ env "NOMAD_SECRETS_DIR" }}/tsa_host_key
CONCOURSE_TSA_AUTHORIZED_KEYS={{ env "NOMAD_SECRETS_DIR" }}/authorized_worker_keys
CONCOURSE_EXTERNAL_URL=http://blowhole.in.redalder.org:8019/
CONCOURSE_POSTGRES_HOST=127.0.0.1
CONCOURSE_POSTGRES_PORT=5432
{{ with secret "kv/data/concourse/db" }}
CONCOURSE_POSTGRES_DATABASE={{ .Data.data.database }}
CONCOURSE_POSTGRES_USER={{ .Data.data.user }}
CONCOURSE_POSTGRES_PASSWORD={{ .Data.data.password }}
{{ end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/data.env"
env = true
}
template {
data = <<EOF
{{ with secret "kv/data/concourse/web" }}{{ .Data.data.session_signing_key }}{{ end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/session_signing_key"
}
template {
data = <<EOF
{{ with secret "kv/data/concourse/web" }}{{ .Data.data.tsa_host_key }}{{ end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/tsa_host_key"
}
template {
data = <<EOF
{{ range secrets "kv/metadata/concourse/workers/" }}
{{ with secret (printf "kv/data/concourse/workers/%s" .) }}
{{ .Data.data.public_key }}
{{ end }}
{{ end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/authorized_worker_keys"
change_mode = "signal"
change_signal = "SIGHUP"
}
}
}
}
#+END_SRC
The interesting bits are the init script for the PostgreSQL container (it's Nix based and written by me), and the last
template stanza.
*** PostgreSQL
Whatever files you put into =/data/scripts=, will be execute after the initial setup of the database but before the
real DB process starts. They are a really convenient and flexible to setup databases. In the script you have access
to =bash=, =postgresql=, and =busybox=. You can find it on [[https://hub.docker.com/r/magicrb/postgresql][Docker Hub.]] \\
You may be asking how can we get the script into =/data/scripts=, when the template system of Nomad only allows one
to output into =/alloc=, =/local=, or =/secrets=. Well, that's what the single volume is for, you can specify the
source and destination, and if the source is *relative* not absolute, docker will bind mount from a path in the
container to another path in the container.
#+BEGIN_WARNING
The source path must be *relative*, if it's absolute, Docker will treat it as a host volume!
#+END_WARNING
*** Template Stanza
The piece of Go template magic you can see here,
#+BEGIN_SRC fundamental
{{ range secrets "kv/metadata/concourse/workers/" }}
{{ with secret (printf "kv/data/concourse/workers/%s" .) }}
{{ .Data.data.public_key }}
{{ end }}
{{ end }}
#+END_SRC
will iterate through all the entries in =concourse/workers= and execute the inner templating for each, then with
just fetch each secret and get the public_key. This template will automatically re-execute every so often and by
setting the =change_mode= to =signal= and the =change_singal= to =SIGHUP=, we tell Nomad to send a =SIGHUP= to PID 1
in the container. ConcourseCI will reload it's configuration upon receiving a =SIGHUP=, which includes reloading the
list of authorized keys, neat huh?
** Nomad Worker Job
This job is weirder and more complex, we first use a custom built container, which generates the short lived worker
keypair and saves it to Vault, at =concourse/workers/<host_hostname>=. You can actually get the host's hostname from
the =node.unique.name= environment variable, which is not available to the actual code running the in container, but
only in the =template= stanzas. We therefore save it's content into a real environment variable. After that it's
quite simple. \\
I'll add the simplified script, which generates and saves the generated secrets. The container must run as a
pre-start task, which is not a sidecar, so that it completes before the main task starts. Template evaluation happens
at runtime, so the secrets will be properly resolved.
#+BEGIN_SRC shell-script
concourse generate-key -t ssh -f /worker_key
_worker_key="$(cat /worker_key)"
_worker_key_pub="$(cat /worker_key.pub)"
echo -e "${_worker_key//$'\n'/\\\\n}" > /worker_key
echo -e "${_worker_key_pub//$'\n'/\\\\n}" > /worker_key.pub
JSON_FMT='{"public_key":"%s","private_key":"%s"}'
printf "$JSON_FMT" "$(< /worker_key.pub)" "$(< /worker_key)" > secret.json
vault kv put kv/concourse/workers/blowhole @secret.json
#+END_SRC
The Bash substitutions are there only to avoid depending on another program like =sed=, which could do it too, and it
would be readable. I also opted for using JSON file, because I was worried I might hit the maximum argument length. \
One thing to note is that I haven't yet figured out a way to dynamically get the address of one of the available
Vault instances. So for now, it's okay to hardcode it in.
#+BEGIN_SRC hcl
job "concourse-ci-worker" {
datacenters = ["homelab-1"]
type = "system"
group "svc" {
count = 1
network {
mode = "bridge"
}
task "create-secret" {
driver = "docker"
config {
image = "useyourown"
}
vault {
policies = ["concourse-worker-policy"]
}
lifecycle {
sidecar = false
hook = "prestart"
}
template {
data = <<EOF
HOST_HOSTNAME="{{ env "node.unique.name" }}"
VAULT_ADDR="https://example.com:8200/"
EOF
env = true
destination = "${NOMAD_TASK_DIR}/data.env"
}
}
task "worker" {
driver = "docker"
config {
image = "concourse/concourse@sha256:changeme"
command = "worker"
privileged = true
}
vault {
policies = ["concourse-worker-policy"]
}
template {
data = <<EOF
CONCOURSE_WORK_DIR=/opt/concourse/worker
CONCOURSE_TSA_HOST=example.com:2222
CONCOURSE_TSA_PUBLIC_KEY={{ env "NOMAD_SECRETS_DIR" }}/tsa_host_key.pub
CONCOURSE_TSA_WORKER_PRIVATE_KEY={{ env "NOMAD_SECRETS_DIR" }}/worker.key
EOF
env = true
destination = "${NOMAD_SECRETS_DIR}/data.env"
}
template {
data = <<EOF
{{ with secret (printf "kv/data/concourse/workers/%s" (env "node.unique.name") ) }}
{{ .Data.data.private_key }}
{{ end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/worker.key"
}
template {
data = <<EOF
{{ with secret "kv/data/concourse/web" }}{{ .Data.data.tsa_host_key_pub }}{{ end }}
EOF
destination = "${NOMAD_SECRETS_DIR}/tsa_host_key.pub"
}
}
}
}
#+END_SRC

34
docker/apache.cfg Normal file
View file

@ -0,0 +1,34 @@
LoadModule mpm_event_module modules/mod_mpm_event.so
LoadModule log_config_module modules/mod_log_config.so
LoadModule unixd_module modules/mod_unixd.so
LoadModule authz_core_module modules/mod_authz_core.so
LoadModule dir_module modules/mod_dir.so
ErrorLog /dev/stderr
TransferLog /dev/stdout
LogLevel info
Listen 0.0.0.0:80
ServerRoot /var/www
ServerName blowhole
PidFile /httpd.pid
User www-data
Group www-data
DocumentRoot /var/www
<Directory />
Require all denied
Options SymLinksIfOwnerMatch
</Directory>
<VirtualHost *:80>
<Directory /var/www>
Require all granted
Options +Indexes +FollowSymlinks
DirectoryIndex index.html
</Directory>
</VirtualHost>

1
docker/entrypoint.sh Normal file
View file

@ -0,0 +1 @@
apachectl start -f $_apache_cfg -D FOREGROUND

View file

@ -1,5 +1,21 @@
{
"nodes": {
"emacs-htmlize": {
"flake": false,
"locked": {
"lastModified": 1597563983,
"narHash": "sha256-wiRnlWKYQSvQijrymSkEbsW3581LOeuTItkxvTgHXDE=",
"owner": "hniksic",
"repo": "emacs-htmlize",
"rev": "49205105898ba8993b5253beec55d8bddd820a70",
"type": "github"
},
"original": {
"owner": "hniksic",
"repo": "emacs-htmlize",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1614447571,
@ -29,10 +45,27 @@
"type": "github"
}
},
"rlib": {
"locked": {
"lastModified": 1615407344,
"narHash": "sha256-b9fQG73Znv3x4UENEhe75EXDH8x1RcIz6NXj05OHBmY=",
"ref": "master",
"rev": "ea13b638781b3c2a5baf97149873f51a1927d89c",
"revCount": 11,
"type": "git",
"url": "https://gitea.redalder.org/RedAlder/rlib"
},
"original": {
"type": "git",
"url": "https://gitea.redalder.org/RedAlder/rlib"
}
},
"root": {
"inputs": {
"emacs-htmlize": "emacs-htmlize",
"nixpkgs": "nixpkgs",
"org-thtml": "org-thtml"
"org-thtml": "org-thtml",
"rlib": "rlib"
}
}
},

113
flake.nix
View file

@ -5,19 +5,38 @@
url = "github:juanjosegarciaripoll/org-thtml";
flake = false;
};
emacs-htmlize = {
url = "github:hniksic/emacs-htmlize";
flake = false;
};
outputs = { self, nixpkgs, org-thtml }:
rlib = {
url = "git+https://gitea.redalder.org/RedAlder/rlib";
flake = true;
};
};
outputs = { self, nixpkgs, org-thtml, emacs-htmlize, ... }@inputs:
let
supportedSystems = [ "x86_64-linux" "i686-linux" "aarch64-linux" ];
forAllSystems = f: nixpkgs.lib.genAttrs supportedSystems (system: f system);
in
{
website = forAllSystems (system:
let
pkgs = import nixpkgs { inherit system; };
in
rlib = inputs.rlib.lib {
inherit nixpkgs;
system = "x86_64-linux";
packages = {
nixpkgs = {
config = {};
versions = {
stable = inputs.nixpkgs;
};
};
custom = {};
};
self = rlib;
};
websiteBase = pkgs:
pkgs.stdenv.mkDerivation {
name = "magic_rb-website";
version = "0.1";
@ -26,6 +45,7 @@
buildPhase = ''
cp ${org-thtml}/ox-thtml.el ./ox-thtml.el
cp ${emacs-htmlize}/htmlize.el ./htmlize.el
mkdir tmp && export HOME=$(pwd)/tmp
emacs --script ./make.el
@ -36,7 +56,84 @@
mkdir -p $out
cp -r public_html/* $out
'';
}
};
in
{
website = forAllSystems (system:
let
pkgs = import nixpkgs { inherit system; };
in
websiteBase pkgs
);
dockerImages = with rlib.dockerTools; {
apache = buildLayeredImage
({ nixpkgs, custom, rlib }:
with rlib.dockerTools;
let
shadow = makeShadow {
withNixbld = false;
users = [
{
name = "www-data";
uid = "5000";
gid = "5000";
home = "/var/empty";
shell = "${nixpkgs.stable.bash}/bin/bash";
description = "Apache HTTPD user";
}
];
groups = [
{
name = "www-data";
id = 5000;
}
];
};
ca-certificates = makeCerts {
certs = [];
};
apache = nixpkgs.stable.apache.override {
proxySupport = false;
sslSupport = false;
http2Support = false;
ldapSupport = false;
libxml2Support = false;
brotliSupport = false;
};
entrypoint = nixpkgs.stable.writeShellScriptBin "entrypoint.sh" (builtins.readFile ./docker/entrypoint.sh);
website = nixpkgs.stable.runCommandNoCCLocal "website" {} ''
mkdir -p $out/var/
ln -s ${websiteBase nixpkgs.stable} $out/var/www
'';
logs = nixpkgs.stable.runCommandNoCCLocal "logs" {} ''
mkdir -p $out/var/log/apache2
'';
in
{
name = "magic_rb-website-apache";
tag = "latest";
contents = [
entrypoint
shadow
ca-certificates
makeTmp
makeBasicBin
website
logs
];
config = with nixpkgs.stable; {
Entrypoint = [ "${dumb-init}/bin/dumb-init" "--" "/bin/entrypoint.sh" ];
Env = [
"PATH=${lib.makeBinPath [ busybox apacheHttpd bash ]}"
"_apache_cfg=${./docker/apache.cfg}"
];
};
});
};
};
}

11
make.el
View file

@ -4,6 +4,7 @@
;;; Code:
(load-file "./ox-thtml.el")
;; (load-file "./htmlize.el")
(require 'org)
(require 'ox)
(require 'ox-html)
@ -22,7 +23,12 @@
;; DOES NOT WORK https://github.com/alphapapa/unpackaged.el#export-to-html-with-useful-anchors
(setq org-export-with-sub-superscripts '{})
(setq org-export-with-sub-superscripts '{}
org-export-headline-levels 6
; emacs-htmlize does not work when Emacs is ran in =script= mode unfortunately
;; org-html-html5-fancy t
;; org-html-htmlize-output-type 'inline-css
)
(defvar org-publish-project-alist)
(setq org-publish-project-alist
@ -36,7 +42,8 @@
:with-date nil
:html-template ,(templated-html-load-template "templates/blog.html")
:publishing-function org-html-publish-to-templated-html
:headline-levels 4
;; :htmlized-source t
:headline-levels 5
:exclude "^\\(index.org\\|*.inc\\)"
:auto-preamble t
:auto-sitemap t