diff --git a/flake.nix b/flake.nix index f137edc..09ae101 100644 --- a/flake.nix +++ b/flake.nix @@ -47,6 +47,11 @@ nixos/systems/liveusb nixos/systems/blowhole + nixng/containers/matrix/mautrix-signal + nixng/containers/matrix/mautrix-discord + nixng/containers/matrix/mautrix-facebook + nixng/containers/matrix/heisenbridge + nixng/containers/matrix/synapse nixng/containers/website nixng/containers/ds3os overlays/udp-over-tcp.nix diff --git a/nixng/containers/matrix/heisenbridge/default.nix b/nixng/containers/matrix/heisenbridge/default.nix new file mode 100644 index 0000000..0cae0b3 --- /dev/null +++ b/nixng/containers/matrix/heisenbridge/default.nix @@ -0,0 +1,36 @@ +{ inputs, ... }: +{ + flake.nixngConfigurations.heisenbridge = inputs.nixng.nglib.makeSystem { + system = "x86_64-linux"; + name = "heisenbridge"; + inherit (inputs) nixpkgs; + config = + { pkgs, lib, ... }: + let + inherit (lib) + getExe; + in + { + dumb-init = { + enable = true; + type.services = { }; + }; + + init.services.heisenbridge = { + enabled = true; + # heisenbridge needs to retry the connection and task restarting does not work currently + shutdownOnExit = false; + script = pkgs.writeShellScript "heisenbridge" '' + REGISTRATION_FILE="/var/lib/registrations/heisenbridge.yaml" + + ${getExe pkgs.heisenbridge} 'https://matrix.redalder.org/' \ + -c "$REGISTRATION_FILE" \ + $([ -e "$REGISTRATION_FILE" ] || echo "--generate") \ + -l 127.0.0.1 \ + -p 9898 \ + -o @magic_rb:matrix.redalder.org + ''; + }; + }; + }; +} diff --git a/nixng/containers/matrix/mautrix-discord/default.nix b/nixng/containers/matrix/mautrix-discord/default.nix new file mode 100644 index 0000000..6129eb8 --- /dev/null +++ b/nixng/containers/matrix/mautrix-discord/default.nix @@ -0,0 +1,41 @@ +{ inputs, config, ... }: +{ + flake.nixngConfigurations.mautrixDiscord = inputs.nixng.nglib.makeSystem { + system = "x86_64-linux"; + name = "mautrix-discord"; + inherit (inputs) nixpkgs; + config = + { pkgs, lib, ... }: + { + dumb-init = { + enable = true; + type.services = { }; + }; + + init.services.mautrix-discord = { + enabled = true; + shutdownOnExit = true; + script = + let + inherit (lib) + getExe; + mautrix-discord = (pkgs.appendOverlays [ config.flake.overlays.mautrix-discord ]).mautrix-discord; + in + pkgs.writeShellScript "mautrix-discord" + '' + DATA_DIR="/var/lib/mautrix-discord" + CONFIG_FILE="$DATA_DIR/config.yaml" + REGISTRATION_FILE="/var/lib/registrations/mautrix-discord.yaml" + DB_FILE="$DATA_DIR/sqlite.db" + + ${getExe pkgs.envsubst} < ${./mautrix-discord.yaml} > "$CONFIG_FILE" + chmod 755 "$CONFIG_FILE" + + [ -e "$REGISTRATION_FILE" ] || \ + ${getExe mautrix-discord} -c "$CONFIG_FILE" -r "$REGISTRATION_FILE" -g + ${getExe mautrix-discord} -c "$CONFIG_FILE" -r "$REGISTRATION_FILE" -n + ''; + }; + }; + }; +} diff --git a/nixng/containers/matrix/mautrix-discord/mautrix-discord.yaml b/nixng/containers/matrix/mautrix-discord/mautrix-discord.yaml new file mode 100644 index 0000000..5f3c523 --- /dev/null +++ b/nixng/containers/matrix/mautrix-discord/mautrix-discord.yaml @@ -0,0 +1,319 @@ +# Homeserver details. +homeserver: + # The address that this appservice can use to connect to the homeserver. + address: https://matrix.redalder.org + # Publicly accessible base URL for media, used for avatars in relay mode. + # If not set, the connection address above will be used. + public_address: null + # The domain of the homeserver (also known as server_name, used for MXIDs, etc). + domain: matrix.redalder.org + + # What software is the homeserver running? + # Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here. + software: standard + # The URL to push real-time bridge status to. + # If set, the bridge will make POST requests to this URL whenever a user's discord connection state changes. + # The bridge will use the appservice as_token to authorize requests. + status_endpoint: null + # Endpoint for reporting per-message status. + message_send_checkpoint_endpoint: null + # Does the homeserver support https://github.com/matrix-org/matrix-spec-proposals/pull/2246? + async_media: false + +# Application service host/registration related details. +# Changing these values requires regeneration of the registration. +appservice: + # The address that the homeserver can use to connect to this appservice. + address: http://localhost:29334 + + # The hostname and port where this appservice should listen. + hostname: 0.0.0.0 + port: 29334 + + # Database config. + database: + # The database type. "sqlite3-fk-wal" and "postgres" are supported. + type: postgres + # The database URI. + # SQLite: A raw file path is supported, but `file:?_txlock=immediate` is recommended. + # https://github.com/mattn/go-sqlite3#connection-string + # Postgres: Connection string. For example, postgres://user:password@host/database?sslmode=disable + # To connect via Unix socket, use something like postgres:///dbname?host=/var/run/postgresql + uri: postgres://mautrix-discord:${MAUTRIX_DISCORD_APPSERVICE_DATABASE_PASSWORD}@127.0.0.1/mautrix-discord?sslmode=disable + # Maximum number of connections. Mostly relevant for Postgres. + max_open_conns: 20 + max_idle_conns: 2 + # Maximum connection idle time and lifetime before they're closed. Disabled if null. + # Parsed with https://pkg.go.dev/time#ParseDuration + max_conn_idle_time: null + max_conn_lifetime: null + + # The unique ID of this appservice. + id: mudiscord + # Appservice bot details. + bot: + # Username of the appservice bot. + username: mudiscordbot + # Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty + # to leave display name/avatar as-is. + displayname: Mautrix Discord bridge bot + avatar: mxc://maunium.net/nIdEykemnwdisvHbpxflpDlC + + # Whether or not to receive ephemeral events via appservice transactions. + # Requires MSC2409 support (i.e. Synapse 1.22+). + ephemeral_events: true + + # Should incoming events be handled asynchronously? + # This may be necessary for large public instances with lots of messages going through. + # However, messages will not be guaranteed to be bridged in the same order they were sent in. + async_transactions: false + + # Authentication tokens for AS <-> HS communication. Autogenerated; do not modify. + as_token: "${MAUTRIX_DISCORD_APPSERVICE_AS_TOKEN}" + hs_token: "${MAUTRIX_DISCORD_APPSERVICE_HS_TOKEN}" + +# Bridge config +bridge: + # Localpart template of MXIDs for Discord users. + # {{.}} is replaced with the internal ID of the Discord user. + username_template: discord_{{.}} + # Displayname template for Discord users. This is also used as the room name in DMs if private_chat_portal_meta is enabled. + # Available variables: + # .ID - Internal user ID + # .Username - User's displayname on Discord + # .Discriminator - The 4 numbers after the name on Discord + # .Bot - Whether the user is a bot + # .System - Whether the user is an official system user + displayname_template: '{{.Username}}#{{.Discriminator}}{{if .Bot}} (bot){{end}}' + # Displayname template for Discord channels (bridged as rooms, or spaces when type=4). + # Available variables: + # .Name - Channel name, or user displayname (pre-formatted with displayname_template) in DMs. + # .ParentName - Parent channel name (used for categories). + # .GuildName - Guild name. + # .NSFW - Whether the channel is marked as NSFW. + # .Type - Channel type (see values at https://github.com/bwmarrin/discordgo/blob/v0.25.0/structs.go#L251-L267) + channel_name_template: ' + {{- if or (eq .Type 3) (eq .Type 4) -}} + {{.Name -}} + {{- else if or (eq .Type 0) (eq .Type 2) -}} + {{ .Name}} - {{.ParentName}} - {{.GuildName}} + {{- else -}} + #{{.Name}} + {{- end -}}' + # Displayname template for Discord guilds (bridged as spaces). + # Available variables: + # .Name - Guild name + guild_name_template: '{{.Name}}' + # Whether to explicitly set the avatar and room name for private chat portal rooms. + # If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms. + # If set to `always`, all DM rooms will have explicit names and avatars set. + # If set to `never`, DM rooms will never have names and avatars set. + private_chat_portal_meta: default + + portal_message_buffer: 128 + + # Number of private channel portals to create on bridge startup. + # Other portals will be created when receiving messages. + startup_private_channel_create_limit: 5 + # Should the bridge send a read receipt from the bridge bot when a message has been sent to Discord? + delivery_receipts: false + # Whether the bridge should send the message status as a custom com.beeper.message_send_status event. + message_status_events: false + # Whether the bridge should send error notices via m.notice events when a message fails to bridge. + message_error_notices: true + # Should the bridge use space-restricted join rules instead of invite-only for guild rooms? + # This can avoid unnecessary invite events in guild rooms when members are synced in. + restricted_rooms: true + # Should the bridge automatically join the user to threads on Discord when the thread is opened on Matrix? + # This only works with clients that support thread read receipts (MSC3771 added in Matrix v1.4). + autojoin_thread_on_open: true + # Should inline fields in Discord embeds be bridged as HTML tables to Matrix? + # Tables aren't supported in all clients, but are the only way to emulate the Discord inline field UI. + embed_fields_as_tables: true + # Should guild channels be muted when the portal is created? This only meant for single-user instances, + # it won't mute it for all users if there are multiple Matrix users in the same Discord guild. + mute_channels_on_create: false + # Should the bridge update the m.direct account data event when double puppeting is enabled. + # Note that updating the m.direct event is not atomic (except with mautrix-asmux) + # and is therefore prone to race conditions. + sync_direct_chat_list: false + # Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run. + # This field will automatically be changed back to false after it, except if the config file is not writable. + resend_bridge_info: false + # Should incoming custom emoji reactions be bridged as mxc:// URIs? + # If set to false, custom emoji reactions will be bridged as the shortcode instead, and the image won't be available. + custom_emoji_reactions: true + # Should the bridge attempt to completely delete portal rooms when a channel is deleted on Discord? + # If true, the bridge will try to kick Matrix users from the room. Otherwise, the bridge only makes ghosts leave. + delete_portal_on_channel_delete: false + # Should the bridge delete all portal rooms when you leave a guild on Discord? + # This only applies if the guild has no other Matrix users on this bridge instance. + delete_guild_on_leave: true + # Whether or not created rooms should have federation enabled. + # If false, created portal rooms will never be federated. + federate_rooms: false + # Settings for converting animated stickers. + animated_sticker: + # Format to which animated stickers should be converted. + # disable - No conversion, send as-is (lottie JSON) + # png - converts to non-animated png (fastest) + # gif - converts to animated gif + # webm - converts to webm video, requires ffmpeg executable with vp9 codec and webm container support + # webp - converts to animated webp, requires ffmpeg executable with webp codec/container support + target: webp + # Arguments for converter. All converters take width and height. + args: + width: 320 + height: 320 + fps: 25 # only for webm, webp and gif (2, 5, 10, 20 or 25 recommended) + # Servers to always allow double puppeting from + double_puppet_server_map: {} + # example.com: https://example.com + # Allow using double puppeting from any server with a valid client .well-known file. + double_puppet_allow_discovery: false + # Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth + # + # If set, double puppeting will be enabled automatically for local users + # instead of users having to find an access token and run `login-matrix` + # manually. + login_shared_secret_map: {} + # example.com: foobar + + # The prefix for commands. Only required in non-management rooms. + command_prefix: '!discord' + # Messages sent upon joining a management room. + # Markdown is supported. The defaults are listed below. + management_room_text: + # Sent when joining a room. + welcome: "Hello, I'm a Discord bridge bot." + # Sent when joining a management room and the user is already logged in. + welcome_connected: "Use `help` for help." + # Sent when joining a management room and the user is not logged in. + welcome_unconnected: "Use `help` for help or `login` to log in." + # Optional extra text sent when joining a management room. + additional_help: "" + + # Settings for backfilling messages. + backfill: + # Limits for forward backfilling. + forward_limits: + # Initial backfill (when creating portal). 0 means backfill is disabled. + # A special unlimited value is not supported, you must set a limit. Initial backfill will + # fetch all messages first before backfilling anything, so high limits can take a lot of time. + initial: + dm: 0 + channel: 0 + # Missed message backfill (on startup). + # 0 means backfill is disabled, -1 means fetch all messages since last bridged message. + # When using unlimited backfill (-1), messages are backfilled as they are fetched. + # With limits, all messages up to the limit are fetched first and backfilled afterwards. + missed: + dm: 0 + channel: 0 + # Maximum members in a guild to enable backfilling. Set to -1 to disable limit. + # This can be used as a rough heuristic to disable backfilling in channels that are too active. + # Currently only applies to missed message backfill. + max_guild_members: -1 + + # End-to-bridge encryption support options. + # + # See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info. + encryption: + # Allow encryption, work in group chat rooms with e2ee enabled + allow: false + # Default to encryption, force-enable encryption in all portals the bridge creates + # This will cause the bridge bot to be in private chats for the encryption to work properly. + default: false + # Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data. + appservice: false + # Require encryption, drop any unencrypted messages. + require: false + # Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled. + # You must use a client that supports requesting keys from other users to use this feature. + allow_key_sharing: false + # Options for deleting megolm sessions from the bridge. + delete_keys: + # Beeper-specific: delete outbound sessions when hungryserv confirms + # that the user has uploaded the key to key backup. + delete_outbound_on_ack: false + # Don't store outbound sessions in the inbound table. + dont_store_outbound: false + # Ratchet megolm sessions forward after decrypting messages. + ratchet_on_decrypt: false + # Delete fully used keys (index >= max_messages) after decrypting messages. + delete_fully_used_on_decrypt: false + # Delete previous megolm sessions from same device when receiving a new one. + delete_prev_on_new_session: false + # Delete megolm sessions received from a device when the device is deleted. + delete_on_device_delete: false + # Periodically delete megolm sessions when 2x max_age has passed since receiving the session. + periodically_delete_expired: false + # What level of device verification should be required from users? + # + # Valid levels: + # unverified - Send keys to all device in the room. + # cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys. + # cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes). + # cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot. + # Note that creating user signatures from the bridge bot is not currently possible. + # verified - Require manual per-device verification + # (currently only possible by modifying the `trust` column in the `crypto_device` database table). + verification_levels: + # Minimum level for which the bridge should send keys to when bridging messages from WhatsApp to Matrix. + receive: unverified + # Minimum level that the bridge should accept for incoming Matrix messages. + send: unverified + # Minimum level that the bridge should require for accepting key requests. + share: cross-signed-tofu + # Options for Megolm room key rotation. These options allow you to + # configure the m.room.encryption event content. See: + # https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for + # more information about that event. + rotation: + # Enable custom Megolm room key rotation settings. Note that these + # settings will only apply to rooms created after this option is + # set. + enable_custom: false + # The maximum number of milliseconds a session should be used + # before changing it. The Matrix spec recommends 604800000 (a week) + # as the default. + milliseconds: 604800000 + # The maximum number of messages that should be sent with a given a + # session before changing it. The Matrix spec recommends 100 as the + # default. + messages: 100 + + # Settings for provisioning API + provisioning: + # Prefix for the provisioning API paths. + prefix: /_matrix/provision + # Shared secret for authentication. If set to "generate", a random secret will be generated, + # or if set to "disable", the provisioning API will be disabled. + shared_secret: generate + + # Permissions for using the bridge. + # Permitted values: + # relay - Talk through the relaybot (if enabled), no access otherwise + # user - Access to use the bridge to chat with a Discord account. + # admin - User level and some additional administration tools + # Permitted keys: + # * - All Matrix users + # domain - All users on that homeserver + # mxid - Specific user + permissions: + "*": relay + "matrix.redalder.org": user + "@magic_rb:matrix.redalder.org": admin + +# Logging config. See https://github.com/tulir/zeroconfig for details. +logging: + min_level: debug + writers: + - type: stdout + format: pretty-colored + - type: file + format: json + filename: ./logs/mautrix-discord.log + max_size: 100 + max_backups: 10 + compress: true diff --git a/nixng/containers/matrix/mautrix-facebook/default.nix b/nixng/containers/matrix/mautrix-facebook/default.nix new file mode 100644 index 0000000..e593891 --- /dev/null +++ b/nixng/containers/matrix/mautrix-facebook/default.nix @@ -0,0 +1,45 @@ +{ inputs, ... }: +{ + flake.nixngConfigurations.mautrixFacebook = inputs.nixng.nglib.makeSystem { + system = "x86_64-linux"; + name = "mautrix-facebook"; + inherit (inputs) nixpkgs; + config = + { pkgs, lib, ... }: + let + inherit (lib) + singleton; + in + { + dumb-init = { + enable = true; + type.services = { }; + }; + + init.services.mautrix-facebook = { + enabled = true; + shutdownOnExit = true; + script = + let + mautrix-facebook = pkgs.mautrix-facebook.overridePythonAttrs (old: { + propagatedBuildInputs = singleton pkgs.python3.pkgs.aiosqlite ++ old.propagatedBuildInputs; + }); + in + pkgs.writeShellScript "mautrix-facebook" + '' + DATA_DIR="/var/lib/mautrix-facebook" + CONFIG_FILE="$DATA_DIR/config.yaml" + REGISTRATION_FILE="/var/lib/registrations/mautrix-facebook.yaml" + DB_FILE="$DATA_DIR/sqlite.db" + + cp ${./mautrix-facebook.yaml} "$CONFIG_FILE" ; chmod 755 "$CONFIG_FILE" + ${pkgs.sqlite}/bin/sqlite3 $DB_FILE '.databases ; .quit' + + [ -e "$REGISTRATION_FILE" ] || \ + ${mautrix-facebook}/bin/mautrix-facebook -c "$CONFIG_FILE" -r "$REGISTRATION_FILE" -g + ${mautrix-facebook}/bin/mautrix-facebook -c "$CONFIG_FILE" -r "$REGISTRATION_FILE" -n + ''; + }; + }; + }; +} diff --git a/nixng/containers/matrix/mautrix-facebook/mautrix-facebook.yaml b/nixng/containers/matrix/mautrix-facebook/mautrix-facebook.yaml new file mode 100644 index 0000000..513d60e --- /dev/null +++ b/nixng/containers/matrix/mautrix-facebook/mautrix-facebook.yaml @@ -0,0 +1,433 @@ +# Homeserver details +homeserver: + # The address that this appservice can use to connect to the homeserver. + address: https://matrix.redalder.org + # The domain of the homeserver (for MXIDs, etc). + domain: matrix.redalder.org + # Whether or not to verify the SSL certificate of the homeserver. + # Only applies if address starts with https:// + verify_ssl: true + # What software is the homeserver running? + # Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here. + software: standard + # Number of retries for all HTTP requests if the homeserver isn't reachable. + http_retry_count: 4 + # The URL to push real-time bridge status to. + # If set, the bridge will make POST requests to this URL whenever a user's Facebook MQTT connection state changes. + # The bridge will use the appservice as_token to authorize requests. + status_endpoint: null + # Endpoint for reporting per-message status. + message_send_checkpoint_endpoint: null + # Whether asynchronous uploads via MSC2246 should be enabled for media. + # Requires a media repo that supports MSC2246. + async_media: false + +# Application service host/registration related details +# Changing these values requires regeneration of the registration. +appservice: + # The address that the homeserver can use to connect to this appservice. + address: http://localhost:29319 + + # The hostname and port where this appservice should listen. + hostname: 0.0.0.0 + port: 29319 + # The maximum body size of appservice API requests (from the homeserver) in mebibytes + # Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s + max_body_size: 1 + + # The full URI to the database. SQLite and Postgres are supported. + # Format examples: + # SQLite: sqlite:///filename.db + # Postgres: postgres://username:password@hostname/dbname + database: sqlite:////var/lib/mautrix-facebook/sqlite.db + # Additional arguments for asyncpg.create_pool() or sqlite3.connect() + # https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.pool.create_pool + # https://docs.python.org/3/library/sqlite3.html#sqlite3.connect + # For sqlite, min_size is used as the connection thread pool size and max_size is ignored. + # Additionally, SQLite supports init_commands as an array of SQL queries to run on connect (e.g. to set PRAGMAs). + database_opts: + min_size: 1 + max_size: 10 + + # Public part of web server for out-of-Matrix interaction with the bridge. + public: + # Whether or not the public-facing endpoints should be enabled. + enabled: true + # The prefix to use in the public-facing endpoints. + prefix: /mufb + # The base URL where the public-facing endpoints are available. The prefix is not added + # implicitly. + external: https://matrix.redalder.org/mufb + # Shared secret for integration managers such as mautrix-manager. + # If set to "generate", a random string will be generated on the next startup. + # If null, integration manager access to the API will not be possible. + shared_secret: generate + # Allow logging in within Matrix. If false, users can only log in using the web interface. + allow_matrix_login: true + # Segment API key to enable analytics tracking for web server endpoints. Set to null to disable. + # Currently the only events are login start, success and fail. + segment_key: null + # Optional user_id to use when sending Segment events. If null, defaults to using mxID. + segment_user_id: null + + # The unique ID of this appservice. + id: mufacebook + # Username of the appservice bot. + bot_username: mufacebookbot + # Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty + # to leave display name/avatar as-is. + bot_displayname: Mautrix Facebook bridge bot + bot_avatar: mxc://maunium.net/ygtkteZsXnGJLJHRchUwYWak + + # Whether or not to receive ephemeral events via appservice transactions. + # Requires MSC2409 support (i.e. Synapse 1.22+). + # You should disable bridge -> sync_with_custom_puppets when this is enabled. + ephemeral_events: true + + # Authentication tokens for AS <-> HS communication. Autogenerated; do not modify. + # as_token: "This value is generated when generating the registration" + # hs_token: "This value is generated when generating the registration" + +# Prometheus telemetry config. Requires prometheus-client to be installed. +metrics: + enabled: false + listen_port: 8000 + +# Manhole config. +manhole: + # Whether or not opening the manhole is allowed. + enabled: false + # The path for the unix socket. + path: /var/tmp/mautrix-facebook.manhole + # The list of UIDs who can be added to the whitelist. + # If empty, any UIDs can be specified in the open-manhole command. + whitelist: + - 0 + +# Bridge config +bridge: + # Localpart template of MXIDs for Facebook users. + # {userid} is replaced with the user ID of the Facebook user. + username_template: "facebook_{userid}" + # Displayname template for Facebook users. + # {displayname} is replaced with the display name of the Facebook user + # as defined below in displayname_preference. + # Keys available for displayname_preference are also available here. + displayname_template: "{displayname} (FB)" + # Available keys: + # "name" (full name) + # "first_name" + # "last_name" + # "nickname" + # "own_nickname" (user-specific!) + displayname_preference: + - name + - first_name + + # The prefix for commands. Only required in non-management rooms. + command_prefix: "!fb" + + # Whether or not the Facebook users of logged in Matrix users should be + # invited to private chats when the user sends a message from another client. + invite_own_puppet_to_pm: false + # Whether or not to use /sync to get presence, read receipts and typing notifications + # when double puppeting is enabled + sync_with_custom_puppets: false + # Whether or not to update the m.direct account data event when double puppeting is enabled. + # Note that updating the m.direct event is not atomic (except with mautrix-asmux) + # and is therefore prone to race conditions. + sync_direct_chat_list: false + # Servers to always allow double puppeting from + double_puppet_server_map: {} + # Allow using double puppeting from any server with a valid client .well-known file. + double_puppet_allow_discovery: false + # Shared secrets for https://github.com/devture/matrix-synapse-shared-secret-auth + # + # If set, custom puppets will be enabled automatically for local users + # instead of users having to find an access token and run `login-matrix` + # manually. + # If using this for other servers than the bridge's server, + # you must also set the URL in the double_puppet_server_map. + login_shared_secret_map: {} + # Should presence from Facebook be bridged? This doesn't use the same API as the Android app, + # so it might be more suspicious to Facebook. + presence_from_facebook: false + # Whether or not to update avatars when syncing all contacts at startup. + update_avatar_initial_sync: true + + # Whether or not the bridge should send a read receipt from the bridge bot when a message has + # been sent to Facebook. + delivery_receipts: false + # Whether or not delivery errors should be reported as messages in the Matrix room. + delivery_error_reports: true + # Whether the bridge should send the message status as a custom com.beeper.message_send_status event. + message_status_events: false + # Whether to allow inviting arbitrary mxids to portal rooms + allow_invites: false + # Whether or not created rooms should have federation enabled. + # If false, created portal rooms will never be federated. + federate_rooms: true + # Settings for backfilling messages from Facebook. + backfill: + # Allow backfilling at all? + enable: true + # Use MSC2716 for backfilling? If this is disabled, backfilling only happens when syncing threads, + # and the incremental settings below don't apply. + # + # This requires a server with MSC2716 support, which is currently an experimental feature in Synapse. + # It can be enabled by setting experimental_features -> msc2716_enabled to true in homeserver.yaml. + msc2716: false + # Use double puppets for backfilling? + # + # If using MSC2716, the double puppets must be in the appservice's user ID namespace + # (because the bridge can't use the double puppet access token with batch sending). + # + # Even without MSC2716, bridging old messages with correct timestamps requires the double + # puppets to be in an appservice namespace, or the server to be modified to allow + # overriding timestamps anyway. + double_puppet_backfill: false + # The maximum number of conversations that should be synced. + # Other conversations will be backfilled on demand when the start PM + # provisioning endpoint is used or when a message comes in from that + # chat. + # If set to -1, all conversations will by synced. + max_conversations: 20 + # The minimum amount of time to wait between syncing each thread. This + # helps avoid situations where you sync too quickly. + min_sync_thread_delay: 5 + # If this value is greater than 0, then if the conversation's last + # message was more than this number of hours ago, then the conversation + # will automatically be marked it as read. + # Conversations that have a last message that is less than this number + # of hours ago will have their unread status synced from Facebook. + unread_hours_threshold: 0 + + # Settings for how quickly to backoff when rate-limits are encountered + # while backfilling. + backoff: + # How many seconds to wait after getting rate limited during a + # thread list fetch. + thread_list: 300 + # How many seconds to wait after getting rate limited during a + # message history fetch. + message_history: 300 + + # Settings for backfills. + # + # During initial/incremental sync, the entirety of the thread that is + # available will be backfilled. For example, on initial sync, about 20 + # messages are included for each thread in the thread list returned by + # the server. After that, incremental backfills will be run for each of + # the portals in a round-robin fashion until all portals have been + # backfilled as configured below. + incremental: + # The maximum number of pages to backfill per batch. + max_pages: 10 + # The maximum number of total pages to backfill per portal. + # If set to -1, infinite pages will be synced. + max_total_pages: -1 + # The number of seconds to wait between backfilling each page. + page_delay: 5 + # The number of seconds to wait after backfilling the batch of + # messages. + post_batch_delay: 20 + + periodic_reconnect: + # Interval in seconds in which to automatically reconnect all users. + # This can be used to automatically mitigate the bug where Facebook stops sending messages. + # Set to -1 to disable periodic reconnections entirely. + # Set to a list of two items to randomize the interval (min, max). + interval: -1 + # What to do in periodic reconnects. Either "refresh" or "reconnect" + mode: refresh + # Should even disconnected users be reconnected? + always: false + # Only reconnect if the user has been connected for longer than this value + min_connected_time: 0 + # The number of seconds that a disconnection can last without triggering an automatic re-sync + # and missed message backfilling when reconnecting. + # Set to 0 to always re-sync, or -1 to never re-sync automatically. + resync_max_disconnected_time: 5 + # The maximum number of conversations that should be synced when we get a + # message sync error. In general, 1 page (20) is sufficient. + max_startup_thread_sync_count: 20 + # Whether or not temporary disconnections should send notices to the notice room. + # If this is false, disconnections will never send messages and connections will only send + # messages if it was disconnected for more than resync_max_disconnected_time seconds. + temporary_disconnect_notices: false + # Disable bridge notices entirely + disable_bridge_notices: false + # Should Matrix m.notice-type messages be bridged to Facebook? + bridge_matrix_notices: true + on_reconnection_fail: + # What to do if a reconnection attempt fails? Options: reconnect, refresh, null + action: reconnect + # Seconds to wait before attempting to refresh the connection, set a list of two items to + # to randomize the interval (min, max). + wait_for: 0 + # Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run. + # This field will automatically be changed back to false after it, + # except if the config file is not writable. + resend_bridge_info: false + # When using double puppeting, should muted chats be muted in Matrix? + mute_bridging: false + # Whether or not mute status and tags should only be bridged when the portal room is created. + tag_only_on_create: true + # If set to true, downloading media from the CDN will use a plain aiohttp client without the usual headers or + # other configuration. This may be useful if you don't want to use the default proxy for large files. + sandbox_media_download: false + # URL to call to retrieve a proxy URL from (defaults to the http_proxy environment variable). + get_proxy_api_url: null + # Whether to explicitly set the avatar and room name for private chat portal rooms. + # If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms. + # If set to `always`, all DM rooms will have explicit names and avatars set. + # If set to `never`, DM rooms will never have names and avatars set. + private_chat_portal_meta: default + # Disable generating reply fallbacks? Some extremely bad clients still rely on them, + # but they're being phased out and will be completely removed in the future. + disable_reply_fallbacks: false + + # End-to-bridge encryption support options. + # + # See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info. + encryption: + # Allow encryption, work in group chat rooms with e2ee enabled + allow: false + # Default to encryption, force-enable encryption in all portals the bridge creates + # This will cause the bridge bot to be in private chats for the encryption to work properly. + default: false + # Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data. + appservice: false + # Require encryption, drop any unencrypted messages. + require: false + # Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled. + # You must use a client that supports requesting keys from other users to use this feature. + allow_key_sharing: false + # Options for deleting megolm sessions from the bridge. + delete_keys: + # Beeper-specific: delete outbound sessions when hungryserv confirms + # that the user has uploaded the key to key backup. + delete_outbound_on_ack: false + # Don't store outbound sessions in the inbound table. + dont_store_outbound: false + # Ratchet megolm sessions forward after decrypting messages. + ratchet_on_decrypt: false + # Delete fully used keys (index >= max_messages) after decrypting messages. + delete_fully_used_on_decrypt: false + # Delete previous megolm sessions from same device when receiving a new one. + delete_prev_on_new_session: false + # Delete megolm sessions received from a device when the device is deleted. + delete_on_device_delete: false + # Periodically delete megolm sessions when 2x max_age has passed since receiving the session. + periodically_delete_expired: false + # What level of device verification should be required from users? + # + # Valid levels: + # unverified - Send keys to all device in the room. + # cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys. + # cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes). + # cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot. + # Note that creating user signatures from the bridge bot is not currently possible. + # verified - Require manual per-device verification + # (currently only possible by modifying the `trust` column in the `crypto_device` database table). + verification_levels: + # Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix. + receive: unverified + # Minimum level that the bridge should accept for incoming Matrix messages. + send: unverified + # Minimum level that the bridge should require for accepting key requests. + share: cross-signed-tofu + # Options for Megolm room key rotation. These options allow you to + # configure the m.room.encryption event content. See: + # https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for + # more information about that event. + rotation: + # Enable custom Megolm room key rotation settings. Note that these + # settings will only apply to rooms created after this option is + # set. + enable_custom: false + # The maximum number of milliseconds a session should be used + # before changing it. The Matrix spec recommends 604800000 (a week) + # as the default. + milliseconds: 604800000 + # The maximum number of messages that should be sent with a given a + # session before changing it. The Matrix spec recommends 100 as the + # default. + messages: 100 + + # Permissions for using the bridge. + # Permitted values: + # relay - Allowed to be relayed through the bridge, no access to commands. + # user - Use the bridge with puppeting. + # admin - Use and administrate the bridge. + # Permitted keys: + # * - All Matrix users + # domain - All users on that homeserver + # mxid - Specific user + permissions: + "*": "relay" + "matrix.redalder.org": "user" + "@magic_rb:matrix.redalder.org": "admin" + + relay: + # Whether relay mode should be allowed. If allowed, `!fb set-relay` can be used to turn any + # authenticated user into a relaybot for that chat. + enabled: false + # The formats to use when sending messages to Messenger via a relay user. + # + # Available variables: + # $sender_displayname - The display name of the sender (e.g. Example User) + # $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser) + # $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com) + # $message - The message content + message_formats: + m.text: '$sender_displayname: $message' + m.notice: '$sender_displayname: $message' + m.emote: '* $sender_displayname $message' + m.file: '$sender_displayname sent a file' + m.image: '$sender_displayname sent an image' + m.audio: '$sender_displayname sent an audio file' + m.video: '$sender_displayname sent a video' + m.location: '$sender_displayname sent a location' + +facebook: + device_seed: generate + default_region_hint: ODN + connection_type: WIFI + carrier: Verizon + hni: 311390 + +# Python logging configuration. +# +# See section 16.7.2 of the Python documentation for more info: +# https://docs.python.org/3.6/library/logging.config.html#configuration-dictionary-schema +logging: + version: 1 + formatters: + colored: + (): mautrix_facebook.util.ColorFormatter + format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s" + normal: + format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s" + handlers: + file: + class: logging.handlers.RotatingFileHandler + formatter: normal + filename: ./mautrix-facebook.log + maxBytes: 10485760 + backupCount: 10 + console: + class: logging.StreamHandler + formatter: colored + loggers: + mau: + level: DEBUG + maufbapi: + level: DEBUG + paho: + level: INFO + aiohttp: + level: INFO + root: + level: DEBUG + handlers: [file, console] diff --git a/nixng/containers/matrix/mautrix-signal/default.nix b/nixng/containers/matrix/mautrix-signal/default.nix new file mode 100644 index 0000000..39d2eda --- /dev/null +++ b/nixng/containers/matrix/mautrix-signal/default.nix @@ -0,0 +1,46 @@ +{ inputs, ... }: +{ + flake.nixngConfigurations.mautrixSignal = inputs.nixng.nglib.makeSystem { + system = "x86_64-linux"; + name = "mautrix-signal"; + inherit (inputs) nixpkgs; + config = + { pkgs, lib, ... }: + { + dumb-init = { + enable = true; + type.services = { }; + }; + + init.services.mautrix-signal = { + enabled = true; + shutdownOnExit = true; + script = pkgs.writeShellScript "mautrix-signal" '' + DATA_DIR="/var/lib/mautrix-signal" + CONFIG_FILE="$DATA_DIR/config.yaml" + REGISTRATION_FILE="/var/lib/registrations/mautrix-signal.yaml" + DB_FILE="$DATA_DIR/sqlite.db" + + cp ${./mautrix-signal.yaml} "$CONFIG_FILE" ; chmod 755 "$CONFIG_FILE" + + [ -e "$REGISTRATION_FILE" ] || \ + ${lib.getExe pkgs.mautrix-signal} -c "$CONFIG_FILE" -r "$REGISTRATION_FILE" -g + ${lib.getExe pkgs.mautrix-signal} -c "$CONFIG_FILE" -r "$REGISTRATION_FILE" -n + ''; + }; + + init.services.signald = { + enabled = true; + shutdownOnExit = true; + script = pkgs.writeShellScript "signald" '' + DATA_DIR="/var/lib/signald" + SOCKET_PATH="/var/run/signald/signald.sock" + mkdir -p $(dirname $SOCKET_PATH) + + export LANG=en_US.UTF-8 + ${lib.getExe pkgs.signald} -d $DATA_DIR -s $SOCKET_PATH + ''; + }; + }; + }; +} diff --git a/nixng/containers/matrix/mautrix-signal/mautrix-signal.yaml b/nixng/containers/matrix/mautrix-signal/mautrix-signal.yaml new file mode 100644 index 0000000..a41edda --- /dev/null +++ b/nixng/containers/matrix/mautrix-signal/mautrix-signal.yaml @@ -0,0 +1,372 @@ +# Homeserver details +homeserver: + # The address that this appservice can use to connect to the homeserver. + address: https://matrix.redalder.org + # The domain of the homeserver (also known as server_name, used for MXIDs, etc). + domain: matrix.redalder.org + # Whether or not to verify the SSL certificate of the homeserver. + # Only applies if address starts with https:// + verify_ssl: true + # What software is the homeserver running? + # Standard Matrix homeservers like Synapse, Dendrite and Conduit should just use "standard" here. + software: standard + # Number of retries for all HTTP requests if the homeserver isn't reachable. + http_retry_count: 4 + # The URL to push real-time bridge status to. + # If set, the bridge will make POST requests to this URL whenever a user's Signal connection state changes. + # The bridge will use the appservice as_token to authorize requests. + status_endpoint: null + # Endpoint for reporting per-message status. + message_send_checkpoint_endpoint: null + # Maximum number of simultaneous HTTP connections to the homeserver. + connection_limit: 100 + # Whether asynchronous uploads via MSC2246 should be enabled for media. + # Requires a media repo that supports MSC2246. + async_media: false + +# Application service host/registration related details +# Changing these values requires regeneration of the registration. +appservice: + # The address that the homeserver can use to connect to this appservice. + address: http://localhost:29328 + # When using https:// the TLS certificate and key files for the address. + tls_cert: false + tls_key: false + + # The hostname and port where this appservice should listen. + hostname: 0.0.0.0 + port: 29328 + # The maximum body size of appservice API requests (from the homeserver) in mebibytes + # Usually 1 is enough, but on high-traffic bridges you might need to increase this to avoid 413s + max_body_size: 1 + + # The full URI to the database. SQLite and Postgres are supported. + # Format examples: + # SQLite: sqlite:///filename.db + # Postgres: postgres://username:password@hostname/dbname + database: postgres://mautrix-signal@127.0.0.1/mautrix-signal + # Additional arguments for asyncpg.create_pool() or sqlite3.connect() + # https://magicstack.github.io/asyncpg/current/api/index.html#asyncpg.pool.create_pool + # https://docs.python.org/3/library/sqlite3.html#sqlite3.connect + # For sqlite, min_size is used as the connection thread pool size and max_size is ignored. + # Additionally, SQLite supports init_commands as an array of SQL queries to run on connect (e.g. to set PRAGMAs). + database_opts: + min_size: 1 + max_size: 10 + + # The unique ID of this appservice. + id: musignal + # Username of the appservice bot. + bot_username: musignalbot + # Display name and avatar for bot. Set to "remove" to remove display name/avatar, leave empty + # to leave display name/avatar as-is. + bot_displayname: Mautrix Signal bridge bot + bot_avatar: mxc://maunium.net/wPJgTQbZOtpBFmDNkiNEMDUp + + # Whether or not to receive ephemeral events via appservice transactions. + # Requires MSC2409 support (i.e. Synapse 1.22+). + # You should disable bridge -> sync_with_custom_puppets when this is enabled. + ephemeral_events: true + + # Authentication tokens for AS <-> HS communication. Autogenerated; do not modify. + # as_token: "This value is generated when generating the registration" + # hs_token: "This value is generated when generating the registration" + +# Prometheus telemetry config. Requires prometheus-client to be installed. +metrics: + enabled: false + listen_port: 8000 + +# Manhole config. +manhole: + # Whether or not opening the manhole is allowed. + enabled: false + # The path for the unix socket. + path: /var/tmp/mautrix-signal.manhole + # The list of UIDs who can be added to the whitelist. + # If empty, any UIDs can be specified in the open-manhole command. + whitelist: + - 0 + +signal: + # Path to signald unix socket + socket_path: /var/run/signald/signald.sock + # Directory for temp files when sending files to Signal. This should be an + # absolute path that signald can read. For attachments in the other direction, + # make sure signald is configured to use an absolute path as the data directory. + outgoing_attachment_dir: /tmp + # Directory where signald stores avatars for groups. + avatar_dir: /var/lib/signald/avatars + # Directory where signald stores auth data. Used to delete data when logging out. + data_dir: /var/lib/signald/data + # Whether or not unknown signald accounts should be deleted when the bridge is started. + # When this is enabled, any UserInUse errors should be resolved by restarting the bridge. + delete_unknown_accounts_on_start: false + # Whether or not message attachments should be removed from disk after they're bridged. + remove_file_after_handling: true + # Whether or not users can register a primary device + registration_enabled: false + # Whether or not to enable disappearing messages in groups. If enabled, then the expiration + # time of the messages will be determined by the first users to read the message, rather + # than individually. If the bridge has a single user, this can be turned on safely. + enable_disappearing_messages_in_groups: false + +# Bridge config +bridge: + # Localpart template of MXIDs for Signal users. + # {userid} is replaced with the UUID of the Signal user. + username_template: "signal_{userid}" + # Displayname template for Signal users. + # {displayname} is replaced with the displayname of the Signal user, which is the first + # available variable in displayname_preference. The variables in displayname_preference + # can also be used here directly. + displayname_template: "{displayname} (Signal)" + # Whether or not contact list displaynames should be used. + # Possible values: disallow, allow, prefer + # + # Multi-user instances are recommended to disallow contact list names, as otherwise there can + # be conflicts between names from different users' contact lists. + contact_list_names: disallow + # Available variables: full_name, first_name, last_name, phone, uuid + displayname_preference: + - full_name + - phone + + # Whether or not to create portals for all groups on login/connect. + autocreate_group_portal: true + # Whether or not to create portals for all contacts on login/connect. + autocreate_contact_portal: false + # Whether or not to make portals of Signal groups in which joining via invite link does + # not need to be approved by an administrator publicly joinable on Matrix. + public_portals: false + # Whether or not to use /sync to get read receipts and typing notifications + # when double puppeting is enabled + sync_with_custom_puppets: false + # Whether or not to update the m.direct account data event when double puppeting is enabled. + # Note that updating the m.direct event is not atomic (except with mautrix-asmux) + # and is therefore prone to race conditions. + sync_direct_chat_list: false + # Allow using double puppeting from any server with a valid client .well-known file. + double_puppet_allow_discovery: false + # Servers to allow double puppeting from, even if double_puppet_allow_discovery is false. + double_puppet_server_map: {} + # example.com: https://example.com + # Shared secret for https://github.com/devture/matrix-synapse-shared-secret-auth + # + # If set, custom puppets will be enabled automatically for local users + # instead of users having to find an access token and run `login-matrix` + # manually. + # If using this for other servers than the bridge's server, + # you must also set the URL in the double_puppet_server_map. + login_shared_secret_map: {} + # example.com: foo + # Whether or not created rooms should have federation enabled. + # If false, created portal rooms will never be federated. + federate_rooms: false + # End-to-bridge encryption support options. + # + # See https://docs.mau.fi/bridges/general/end-to-bridge-encryption.html for more info. + encryption: + # Allow encryption, work in group chat rooms with e2ee enabled + allow: false + # Default to encryption, force-enable encryption in all portals the bridge creates + # This will cause the bridge bot to be in private chats for the encryption to work properly. + default: false + # Whether to use MSC2409/MSC3202 instead of /sync long polling for receiving encryption-related data. + appservice: false + # Require encryption, drop any unencrypted messages. + require: false + # Enable key sharing? If enabled, key requests for rooms where users are in will be fulfilled. + # You must use a client that supports requesting keys from other users to use this feature. + allow_key_sharing: false + # Options for deleting megolm sessions from the bridge. + delete_keys: + # Beeper-specific: delete outbound sessions when hungryserv confirms + # that the user has uploaded the key to key backup. + delete_outbound_on_ack: false + # Don't store outbound sessions in the inbound table. + dont_store_outbound: false + # Ratchet megolm sessions forward after decrypting messages. + ratchet_on_decrypt: false + # Delete fully used keys (index >= max_messages) after decrypting messages. + delete_fully_used_on_decrypt: false + # Delete previous megolm sessions from same device when receiving a new one. + delete_prev_on_new_session: false + # Delete megolm sessions received from a device when the device is deleted. + delete_on_device_delete: false + # Periodically delete megolm sessions when 2x max_age has passed since receiving the session. + periodically_delete_expired: false + # What level of device verification should be required from users? + # + # Valid levels: + # unverified - Send keys to all device in the room. + # cross-signed-untrusted - Require valid cross-signing, but trust all cross-signing keys. + # cross-signed-tofu - Require valid cross-signing, trust cross-signing keys on first use (and reject changes). + # cross-signed-verified - Require valid cross-signing, plus a valid user signature from the bridge bot. + # Note that creating user signatures from the bridge bot is not currently possible. + # verified - Require manual per-device verification + # (currently only possible by modifying the `trust` column in the `crypto_device` database table). + verification_levels: + # Minimum level for which the bridge should send keys to when bridging messages from Telegram to Matrix. + receive: unverified + # Minimum level that the bridge should accept for incoming Matrix messages. + send: unverified + # Minimum level that the bridge should require for accepting key requests. + share: cross-signed-tofu + # Options for Megolm room key rotation. These options allow you to + # configure the m.room.encryption event content. See: + # https://spec.matrix.org/v1.3/client-server-api/#mroomencryption for + # more information about that event. + rotation: + # Enable custom Megolm room key rotation settings. Note that these + # settings will only apply to rooms created after this option is + # set. + enable_custom: false + # The maximum number of milliseconds a session should be used + # before changing it. The Matrix spec recommends 604800000 (a week) + # as the default. + milliseconds: 604800000 + # The maximum number of messages that should be sent with a given a + # session before changing it. The Matrix spec recommends 100 as the + # default. + messages: 100 + + # Whether to explicitly set the avatar and room name for private chat portal rooms. + # If set to `default`, this will be enabled in encrypted rooms and disabled in unencrypted rooms. + # If set to `always`, all DM rooms will have explicit names and avatars set. + # If set to `never`, DM rooms will never have names and avatars set. + private_chat_portal_meta: default + # Whether or not the bridge should send a read receipt from the bridge bot when a message has + # been sent to Signal. This let's you check manually whether the bridge is receiving your + # messages. + # Note that this is not related to Signal delivery receipts. + delivery_receipts: false + # Whether or not delivery errors should be reported as messages in the Matrix room. + delivery_error_reports: true + # Whether the bridge should send the message status as a custom com.beeper.message_send_status event. + message_status_events: false + # Set this to true to tell the bridge to re-send m.bridge events to all rooms on the next run. + # This field will automatically be changed back to false after it, + # except if the config file is not writable. + resend_bridge_info: false + # Interval at which to resync contacts (in seconds). + periodic_sync: 0 + # Should leaving the room on Matrix make the user leave on Signal? + bridge_matrix_leave: true + # Should the bridge auto-create a group chat on Signal when a ghost is invited to a room? + # Requires the user to have sufficient power level and double puppeting enabled. + create_group_on_invite: true + hacky_contact_name_mixup_detection: false + + # Provisioning API part of the web server for automated portal creation and fetching information. + # Used by things like mautrix-manager (https://github.com/tulir/mautrix-manager). + provisioning: + # Whether or not the provisioning API should be enabled. + enabled: true + # The prefix to use in the provisioning API endpoints. + prefix: /_matrix/provision + # The shared secret to authorize users of the API. + # Set to "generate" to generate and save a new token. + shared_secret: generate + # Segment API key to enable analytics tracking for web server + # endpoints. Set to null to disable. + # Currently the only events are login start, QR code scan, and login + # success/failure. + segment_key: null + # Optional user_id to use when sending Segment events. If null, defaults to using mxID. + segment_user_id: null + + # The prefix for commands. Only required in non-management rooms. + command_prefix: "!signal" + + # Messages sent upon joining a management room. + # Markdown is supported. The defaults are listed below. + management_room_text: + # Sent when joining a room. + welcome: "Hello, I'm a Signal bridge bot." + # Sent when joining a management room and the user is already logged in. + welcome_connected: "Use `help` for help." + # Sent when joining a management room and the user is not logged in. + welcome_unconnected: "Use `help` for help or `link` to log in." + # Optional extra text sent when joining a management room. + additional_help: "" + + # Send each message separately (for readability in some clients) + management_room_multiple_messages: false + + # Permissions for using the bridge. + # Permitted values: + # relay - Allowed to be relayed through the bridge, no access to commands. + # user - Use the bridge with puppeting. + # admin - Use and administrate the bridge. + # Permitted keys: + # * - All Matrix users + # domain - All users on that homeserver + # mxid - Specific user + permissions: + "*": "relay" + "matrix.redalder.org": "user" + "@magic_rb:matrix.redalder.org": "admin" + + relay: + # Whether relay mode should be allowed. If allowed, `!signal set-relay` can be used to turn any + # authenticated user into a relaybot for that chat. + enabled: false + # The formats to use when sending messages to Signal via a relay user. + # + # Available variables: + # $sender_displayname - The display name of the sender (e.g. Example User) + # $sender_username - The username (Matrix ID localpart) of the sender (e.g. exampleuser) + # $sender_mxid - The Matrix ID of the sender (e.g. @exampleuser:example.com) + # $message - The message content + message_formats: + m.text: '$sender_displayname: $message' + m.notice: '$sender_displayname: $message' + m.emote: '* $sender_displayname $message' + m.file: '$sender_displayname sent a file' + m.image: '$sender_displayname sent an image' + m.audio: '$sender_displayname sent an audio file' + m.video: '$sender_displayname sent a video' + m.location: '$sender_displayname sent a location' + # Specify a dedicated relay account. Must be a regular matrix account logged into this bridge + # and double puppeting working to auto-accept invites. When this user is invited to a room + # it will automatically be set as the relay user. May be overridden with `set-relay` or `unset-relay` + relaybot: '@relaybot:example.com' + # Whether or not invites from non-logged-in users should be relayed + invite: true + + # Format for generating URLs from location messages for sending to Signal + # Google Maps: 'https://www.google.com/maps/place/{lat},{long}' + # OpenStreetMap: 'https://www.openstreetmap.org/?mlat={lat}&mlon={long}' + location_format: 'https://www.google.com/maps/place/{lat},{long}' + +# Python logging configuration. +# +# See section 16.7.2 of the Python documentation for more info: +# https://docs.python.org/3.6/library/logging.config.html#configuration-dictionary-schema +logging: + version: 1 + formatters: + colored: + (): mautrix_signal.util.ColorFormatter + format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s" + normal: + format: "[%(asctime)s] [%(levelname)s@%(name)s] %(message)s" + handlers: + file: + class: logging.handlers.RotatingFileHandler + formatter: normal + filename: ./mautrix-signal.log + maxBytes: 10485760 + backupCount: 10 + console: + class: logging.StreamHandler + formatter: colored + loggers: + mau: + level: DEBUG + aiohttp: + level: INFO + root: + level: DEBUG + handlers: [file, console] diff --git a/nixng/containers/matrix/synapse/common_config.nix b/nixng/containers/matrix/synapse/common_config.nix new file mode 100644 index 0000000..3f22e5e --- /dev/null +++ b/nixng/containers/matrix/synapse/common_config.nix @@ -0,0 +1,21 @@ +{ logConfig }: +{ + server_name = "matrix.redalder.org"; + report_stats = "yes"; + pid_file = "/homeserver.pid"; + + log_config = logConfig; + + trusted_key_servers = [ { + server_name = "matrix.org"; + } ]; + media_store_path = "/var/lib/synapse/media_store"; + signing_key_path = "/var/lib/synapse/signing.key"; + + enable_registration = false; + enable_registration_without_verification = false; + + federation_sender_instances = [ + "worker-federation-sender-0" + ]; +} diff --git a/nixng/containers/matrix/synapse/default.nix b/nixng/containers/matrix/synapse/default.nix new file mode 100644 index 0000000..a61855b --- /dev/null +++ b/nixng/containers/matrix/synapse/default.nix @@ -0,0 +1,52 @@ +{ inputs, lib, ... }: +let + inherit (lib) + singleton; + + commonConfig = pkgs: + (pkgs.formats.yaml {}).generate "common.yaml" + (import ./common_config.nix { logConfig = logConfig pkgs; }); + logConfig = pkgs: + (pkgs.formats.yaml {}).generate "log.yaml" + (import ./log_config.nix {}); + + callPackage = lib.callPackageWith { + inherit (inputs) + nixpkgs; + inherit (inputs.nixng.nglib) + makeSystem; + inherit + commonConfig + logConfig; + }; +in +{ + flake.nixngConfigurations.synapseFederationSender = callPackage ./generic_worker.nix { + name = "generic"; + listener_resources = singleton "health"; + }; + flake.nixngConfigurations.synapseFederationReceiver = callPackage ./generic_worker.nix { + name = "generic"; + listener_resources = [ + "health" + "federation" + ]; + }; + flake.nixngConfigurations.synapseClient = callPackage ./generic_worker.nix { + name = "generic"; + listener_resources = [ + "client" + "health" + ]; + }; + flake.nixngConfigurations.synapseSync = callPackage ./generic_worker.nix { + name = "generic"; + listener_resources = [ + "client" + "health" + ]; + }; + flake.nixngConfigurations.synapseRedis = callPackage ./redis.nix {}; + flake.nixngConfigurations.synapsePostgreSQL = callPackage ./postgresql.nix {}; + flake.nixngConfigurations.synapse = callPackage ./synapse.nix {}; +} diff --git a/nixng/containers/matrix/synapse/generic_worker.nix b/nixng/containers/matrix/synapse/generic_worker.nix new file mode 100644 index 0000000..9a33a48 --- /dev/null +++ b/nixng/containers/matrix/synapse/generic_worker.nix @@ -0,0 +1,59 @@ +{ makeSystem +, nixpkgs + +, listener_resources +, name +, logConfig +, commonConfig +}: +makeSystem { + system = "x86_64-linux"; + name = "synapse-worker-${name}"; + inherit nixpkgs; + config = ({ pkgs, ... }: + { + dumb-init = { + enable = true; + type.services = { }; + }; + + services.synapse.workers.${name} = { + settings = { + worker_app = "synapse.app.generic_worker"; + + # The replication listener on the main synapse process. + worker_replication_host = "127.0.0.1"; + worker_replication_http_port = 9093; + + worker_listeners = [ + { + port = 6167; + tls = false; + type = "http"; + x_forwarded = true; + bind_adrresses = [ "0.0.0.0" ]; + resources = + [ + { + names = listener_resources; + compress = false; + } + ]; + } + ]; + + worker_log_config = logConfig pkgs; + }; + arguments = { + config-path = [ + (commonConfig pkgs) + "/secrets/extra.yaml" + "/var/lib/registrations/extra.yaml" + ]; + keys-directory = [ + "/var/lib/synapse/keys" + ]; + }; + }; + }); +} diff --git a/nixng/containers/matrix/synapse/log_config.nix b/nixng/containers/matrix/synapse/log_config.nix new file mode 100644 index 0000000..104ad64 --- /dev/null +++ b/nixng/containers/matrix/synapse/log_config.nix @@ -0,0 +1,18 @@ +{ }: +{ + version = 1; + + formatters.precise.format = "%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - %(request)s - %(message)s"; + handlers.console = { + class = "logging.StreamHandler"; + formatter = "precise"; + }; + + loggers."synapse.storage.SQL".level = "INFO"; + root = { + level = "INFO"; + handlers = [ "console" ]; + }; + + disable_existing_loggers = false; +} diff --git a/nixng/containers/matrix/synapse/postgresql.nix b/nixng/containers/matrix/synapse/postgresql.nix new file mode 100644 index 0000000..ccf534a --- /dev/null +++ b/nixng/containers/matrix/synapse/postgresql.nix @@ -0,0 +1,58 @@ +{ makeSystem +, nixpkgs +}: +makeSystem { + system = "x86_64-linux"; + name = "nixng-synapse-postgresql"; + inherit nixpkgs; + config = + { pkgs, config, ... }: + { + config = { + dumb-init = { + enable = true; + type.services = {}; + }; + services.postgresql = { + enable = true; + package = pkgs.postgresql_12; + + initialScript = "/secrets/init.sql"; + enableTCPIP = true; + + authentication = "host all all all md5"; + + ensureDatabases = { + "synapse" = { ENCODING = "UTF8"; TEMPLATE = "template0"; }; + "mautrix-facebook" = { ENCODING = "UTF8"; TEMPLATE = "template0"; }; + "mautrix-signal" = { ENCODING = "UTF8"; TEMPLATE = "template0"; }; + "mautrix-whatsapp" = { ENCODING = "UTF8"; TEMPLATE = "template0"; }; + "mautrix-discord" = { ENCODING = "UTF8"; TEMPLATE = "template0"; }; + }; + ensureExtensions = {}; + ensureUsers = [ + { + name = "synapse"; + ensurePermissions."DATABASE \"synapse\"" = "ALL PRIVILEGES"; + } + { + name = "mautrix-facebook"; + ensurePermissions."DATABASE \"mautrix-facebook\"" = "ALL PRIVILEGES"; + } + { + name = "mautrix-signal"; + ensurePermissions."DATABASE \"mautrix-signal\"" = "ALL PRIVILEGES"; + } + { + name = "mautrix-whatsapp"; + ensurePermissions."DATABASE \"mautrix-whatsapp\"" = "ALL PRIVILEGES"; + } + { + name = "mautrix-discord"; + ensurePermissions."DATABASE \"mautrix-discord\"" = "ALL PRIVILEGES"; + } + ]; + }; + }; + }; +} diff --git a/nixng/containers/matrix/synapse/redis.conf b/nixng/containers/matrix/synapse/redis.conf new file mode 100644 index 0000000..a84c99d --- /dev/null +++ b/nixng/containers/matrix/synapse/redis.conf @@ -0,0 +1,2276 @@ +# Redis configuration file example. +# +# Note that in order to read the configuration file, Redis must be +# started with the file path as first argument: +# +# ./redis-server /path/to/redis.conf + +# Note on units: when memory size is needed, it is possible to specify +# it in the usual form of 1k 5GB 4M and so forth: +# +# 1k => 1000 bytes +# 1kb => 1024 bytes +# 1m => 1000000 bytes +# 1mb => 1024*1024 bytes +# 1g => 1000000000 bytes +# 1gb => 1024*1024*1024 bytes +# +# units are case insensitive so 1GB 1Gb 1gB are all the same. + +################################## INCLUDES ################################### + +# Include one or more other config files here. This is useful if you +# have a standard template that goes to all Redis servers but also need +# to customize a few per-server settings. Include files can include +# other files, so use this wisely. +# +# Note that option "include" won't be rewritten by command "CONFIG REWRITE" +# from admin or Redis Sentinel. Since Redis always uses the last processed +# line as value of a configuration directive, you'd better put includes +# at the beginning of this file to avoid overwriting config change at runtime. +# +# If instead you are interested in using includes to override configuration +# options, it is better to use include as the last line. +# +# Included paths may contain wildcards. All files matching the wildcards will +# be included in alphabetical order. +# Note that if an include path contains a wildcards but no files match it when +# the server is started, the include statement will be ignored and no error will +# be emitted. It is safe, therefore, to include wildcard files from empty +# directories. +# +# include /path/to/local.conf +# include /path/to/other.conf +# include /path/to/fragments/*.conf +# + +################################## MODULES ##################################### + +# Load modules at startup. If the server is not able to load modules +# it will abort. It is possible to use multiple loadmodule directives. +# +# loadmodule /path/to/my_module.so +# loadmodule /path/to/other_module.so + +################################## NETWORK ##################################### + +# By default, if no "bind" configuration directive is specified, Redis listens +# for connections from all available network interfaces on the host machine. +# It is possible to listen to just one or multiple selected interfaces using +# the "bind" configuration directive, followed by one or more IP addresses. +# Each address can be prefixed by "-", which means that redis will not fail to +# start if the address is not available. Being not available only refers to +# addresses that does not correspond to any network interface. Addresses that +# are already in use will always fail, and unsupported protocols will always BE +# silently skipped. +# +# Examples: +# +# bind 192.168.1.100 10.0.0.1 # listens on two specific IPv4 addresses +# bind 127.0.0.1 ::1 # listens on loopback IPv4 and IPv6 +# bind * -::* # like the default, all available interfaces +# +# ~~~ WARNING ~~~ If the computer running Redis is directly exposed to the +# internet, binding to all the interfaces is dangerous and will expose the +# instance to everybody on the internet. So by default we uncomment the +# following bind directive, that will force Redis to listen only on the +# IPv4 and IPv6 (if available) loopback interface addresses (this means Redis +# will only be able to accept client connections from the same host that it is +# running on). +# +# IF YOU ARE SURE YOU WANT YOUR INSTANCE TO LISTEN TO ALL THE INTERFACES +# COMMENT OUT THE FOLLOWING LINE. +# +# You will also need to set a password unless you explicitly disable protected +# mode. +# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +bind 127.0.0.1 + +# By default, outgoing connections (from replica to master, from Sentinel to +# instances, cluster bus, etc.) are not bound to a specific local address. In +# most cases, this means the operating system will handle that based on routing +# and the interface through which the connection goes out. +# +# Using bind-source-addr it is possible to configure a specific address to bind +# to, which may also affect how the connection gets routed. +# +# Example: +# +# bind-source-addr 10.0.0.1 + +# Protected mode is a layer of security protection, in order to avoid that +# Redis instances left open on the internet are accessed and exploited. +# +# When protected mode is on and the default user has no password, the server +# only accepts local connections from the IPv4 address (127.0.0.1), IPv6 address +# (::1) or Unix domain sockets. +# +# By default protected mode is enabled. You should disable it only if +# you are sure you want clients from other hosts to connect to Redis +# even if no authentication is configured. +protected-mode yes + +# Redis uses default hardened security configuration directives to reduce the +# attack surface on innocent users. Therefore, several sensitive configuration +# directives are immutable, and some potentially-dangerous commands are blocked. +# +# Configuration directives that control files that Redis writes to (e.g., 'dir' +# and 'dbfilename') and that aren't usually modified during runtime +# are protected by making them immutable. +# +# Commands that can increase the attack surface of Redis and that aren't usually +# called by users are blocked by default. +# +# These can be exposed to either all connections or just local ones by setting +# each of the configs listed below to either of these values: +# +# no - Block for any connection (remain immutable) +# yes - Allow for any connection (no protection) +# local - Allow only for local connections. Ones originating from the +# IPv4 address (127.0.0.1), IPv6 address (::1) or Unix domain sockets. +# +# enable-protected-configs no +# enable-debug-command no +# enable-module-command no + +# Accept connections on the specified port, default is 6379 (IANA #815344). +# If port 0 is specified Redis will not listen on a TCP socket. +port 6379 + +# TCP listen() backlog. +# +# In high requests-per-second environments you need a high backlog in order +# to avoid slow clients connection issues. Note that the Linux kernel +# will silently truncate it to the value of /proc/sys/net/core/somaxconn so +# make sure to raise both the value of somaxconn and tcp_max_syn_backlog +# in order to get the desired effect. +tcp-backlog 511 + +# Unix socket. +# +# Specify the path for the Unix socket that will be used to listen for +# incoming connections. There is no default, so Redis will not listen +# on a unix socket when not specified. +# +# unixsocket /run/redis.sock +# unixsocketperm 700 + +# Close the connection after a client is idle for N seconds (0 to disable) +timeout 0 + +# TCP keepalive. +# +# If non-zero, use SO_KEEPALIVE to send TCP ACKs to clients in absence +# of communication. This is useful for two reasons: +# +# 1) Detect dead peers. +# 2) Force network equipment in the middle to consider the connection to be +# alive. +# +# On Linux, the specified value (in seconds) is the period used to send ACKs. +# Note that to close the connection the double of the time is needed. +# On other kernels the period depends on the kernel configuration. +# +# A reasonable value for this option is 300 seconds, which is the new +# Redis default starting with Redis 3.2.1. +tcp-keepalive 300 + +# Apply OS-specific mechanism to mark the listening socket with the specified +# ID, to support advanced routing and filtering capabilities. +# +# On Linux, the ID represents a connection mark. +# On FreeBSD, the ID represents a socket cookie ID. +# On OpenBSD, the ID represents a route table ID. +# +# The default value is 0, which implies no marking is required. +# socket-mark-id 0 + +################################# TLS/SSL ##################################### + +# By default, TLS/SSL is disabled. To enable it, the "tls-port" configuration +# directive can be used to define TLS-listening ports. To enable TLS on the +# default port, use: +# +# port 0 +# tls-port 6379 + +# Configure a X.509 certificate and private key to use for authenticating the +# server to connected clients, masters or cluster peers. These files should be +# PEM formatted. +# +# tls-cert-file redis.crt +# tls-key-file redis.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-key-file-pass secret + +# Normally Redis uses the same certificate for both server functions (accepting +# connections) and client functions (replicating from a master, establishing +# cluster bus connections, etc.). +# +# Sometimes certificates are issued with attributes that designate them as +# client-only or server-only certificates. In that case it may be desired to use +# different certificates for incoming (server) and outgoing (client) +# connections. To do that, use the following directives: +# +# tls-client-cert-file client.crt +# tls-client-key-file client.key +# +# If the key file is encrypted using a passphrase, it can be included here +# as well. +# +# tls-client-key-file-pass secret + +# Configure a DH parameters file to enable Diffie-Hellman (DH) key exchange, +# required by older versions of OpenSSL (<3.0). Newer versions do not require +# this configuration and recommend against it. +# +# tls-dh-params-file redis.dh + +# Configure a CA certificate(s) bundle or directory to authenticate TLS/SSL +# clients and peers. Redis requires an explicit configuration of at least one +# of these, and will not implicitly use the system wide configuration. +# +# tls-ca-cert-file ca.crt +# tls-ca-cert-dir /etc/ssl/certs + +# By default, clients (including replica servers) on a TLS port are required +# to authenticate using valid client side certificates. +# +# If "no" is specified, client certificates are not required and not accepted. +# If "optional" is specified, client certificates are accepted and must be +# valid if provided, but are not required. +# +# tls-auth-clients no +# tls-auth-clients optional + +# By default, a Redis replica does not attempt to establish a TLS connection +# with its master. +# +# Use the following directive to enable TLS on replication links. +# +# tls-replication yes + +# By default, the Redis Cluster bus uses a plain TCP connection. To enable +# TLS for the bus protocol, use the following directive: +# +# tls-cluster yes + +# By default, only TLSv1.2 and TLSv1.3 are enabled and it is highly recommended +# that older formally deprecated versions are kept disabled to reduce the attack surface. +# You can explicitly specify TLS versions to support. +# Allowed values are case insensitive and include "TLSv1", "TLSv1.1", "TLSv1.2", +# "TLSv1.3" (OpenSSL >= 1.1.1) or any combination. +# To enable only TLSv1.2 and TLSv1.3, use: +# +# tls-protocols "TLSv1.2 TLSv1.3" + +# Configure allowed ciphers. See the ciphers(1ssl) manpage for more information +# about the syntax of this string. +# +# Note: this configuration applies only to <= TLSv1.2. +# +# tls-ciphers DEFAULT:!MEDIUM + +# Configure allowed TLSv1.3 ciphersuites. See the ciphers(1ssl) manpage for more +# information about the syntax of this string, and specifically for TLSv1.3 +# ciphersuites. +# +# tls-ciphersuites TLS_CHACHA20_POLY1305_SHA256 + +# When choosing a cipher, use the server's preference instead of the client +# preference. By default, the server follows the client's preference. +# +# tls-prefer-server-ciphers yes + +# By default, TLS session caching is enabled to allow faster and less expensive +# reconnections by clients that support it. Use the following directive to disable +# caching. +# +# tls-session-caching no + +# Change the default number of TLS sessions cached. A zero value sets the cache +# to unlimited size. The default size is 20480. +# +# tls-session-cache-size 5000 + +# Change the default timeout of cached TLS sessions. The default timeout is 300 +# seconds. +# +# tls-session-cache-timeout 60 + +################################# GENERAL ##################################### + +# By default Redis does not run as a daemon. Use 'yes' if you need it. +# Note that Redis will write a pid file in /var/run/redis.pid when daemonized. +# When Redis is supervised by upstart or systemd, this parameter has no impact. +daemonize no + +# If you run Redis from upstart or systemd, Redis can interact with your +# supervision tree. Options: +# supervised no - no supervision interaction +# supervised upstart - signal upstart by putting Redis into SIGSTOP mode +# requires "expect stop" in your upstart job config +# supervised systemd - signal systemd by writing READY=1 to $NOTIFY_SOCKET +# on startup, and updating Redis status on a regular +# basis. +# supervised auto - detect upstart or systemd method based on +# UPSTART_JOB or NOTIFY_SOCKET environment variables +# Note: these supervision methods only signal "process is ready." +# They do not enable continuous pings back to your supervisor. +# +# The default is "no". To run under upstart/systemd, you can simply uncomment +# the line below: +# +# supervised auto + +# If a pid file is specified, Redis writes it where specified at startup +# and removes it at exit. +# +# When the server runs non daemonized, no pid file is created if none is +# specified in the configuration. When the server is daemonized, the pid file +# is used even if not specified, defaulting to "/var/run/redis.pid". +# +# Creating a pid file is best effort: if Redis is not able to create it +# nothing bad happens, the server will start and run normally. +# +# Note that on modern Linux systems "/run/redis.pid" is more conforming +# and should be used instead. +pidfile /var/run/redis_6379.pid + +# Specify the server verbosity level. +# This can be one of: +# debug (a lot of information, useful for development/testing) +# verbose (many rarely useful info, but not a mess like the debug level) +# notice (moderately verbose, what you want in production probably) +# warning (only very important / critical messages are logged) +loglevel notice + +# Specify the log file name. Also the empty string can be used to force +# Redis to log on the standard output. Note that if you use standard +# output for logging but daemonize, logs will be sent to /dev/null +logfile "" + +# To enable logging to the system logger, just set 'syslog-enabled' to yes, +# and optionally update the other syslog parameters to suit your needs. +# syslog-enabled no + +# Specify the syslog identity. +# syslog-ident redis + +# Specify the syslog facility. Must be USER or between LOCAL0-LOCAL7. +# syslog-facility local0 + +# To disable the built in crash log, which will possibly produce cleaner core +# dumps when they are needed, uncomment the following: +# +# crash-log-enabled no + +# To disable the fast memory check that's run as part of the crash log, which +# will possibly let redis terminate sooner, uncomment the following: +# +# crash-memcheck-enabled no + +# Set the number of databases. The default database is DB 0, you can select +# a different one on a per-connection basis using SELECT where +# dbid is a number between 0 and 'databases'-1 +databases 16 + +# By default Redis shows an ASCII art logo only when started to log to the +# standard output and if the standard output is a TTY and syslog logging is +# disabled. Basically this means that normally a logo is displayed only in +# interactive sessions. +# +# However it is possible to force the pre-4.0 behavior and always show a +# ASCII art logo in startup logs by setting the following option to yes. +always-show-logo no + +# By default, Redis modifies the process title (as seen in 'top' and 'ps') to +# provide some runtime information. It is possible to disable this and leave +# the process name as executed by setting the following to no. +set-proc-title yes + +# When changing the process title, Redis uses the following template to construct +# the modified title. +# +# Template variables are specified in curly brackets. The following variables are +# supported: +# +# {title} Name of process as executed if parent, or type of child process. +# {listen-addr} Bind address or '*' followed by TCP or TLS port listening on, or +# Unix socket if only that's available. +# {server-mode} Special mode, i.e. "[sentinel]" or "[cluster]". +# {port} TCP port listening on, or 0. +# {tls-port} TLS port listening on, or 0. +# {unixsocket} Unix domain socket listening on, or "". +# {config-file} Name of configuration file used. +# +proc-title-template "{title} {listen-addr} {server-mode}" + +################################ SNAPSHOTTING ################################ + +# Save the DB to disk. +# +# save [ ...] +# +# Redis will save the DB if the given number of seconds elapsed and it +# surpassed the given number of write operations against the DB. +# +# Snapshotting can be completely disabled with a single empty string argument +# as in following example: +# +# save "" +# +# Unless specified otherwise, by default Redis will save the DB: +# * After 3600 seconds (an hour) if at least 1 change was performed +# * After 300 seconds (5 minutes) if at least 100 changes were performed +# * After 60 seconds if at least 10000 changes were performed +# +# You can set these explicitly by uncommenting the following line. +# +# save 3600 1 300 100 60 10000 + +# By default Redis will stop accepting writes if RDB snapshots are enabled +# (at least one save point) and the latest background save failed. +# This will make the user aware (in a hard way) that data is not persisting +# on disk properly, otherwise chances are that no one will notice and some +# disaster will happen. +# +# If the background saving process will start working again Redis will +# automatically allow writes again. +# +# However if you have setup your proper monitoring of the Redis server +# and persistence, you may want to disable this feature so that Redis will +# continue to work as usual even if there are problems with disk, +# permissions, and so forth. +stop-writes-on-bgsave-error yes + +# Compress string objects using LZF when dump .rdb databases? +# By default compression is enabled as it's almost always a win. +# If you want to save some CPU in the saving child set it to 'no' but +# the dataset will likely be bigger if you have compressible values or keys. +rdbcompression yes + +# Since version 5 of RDB a CRC64 checksum is placed at the end of the file. +# This makes the format more resistant to corruption but there is a performance +# hit to pay (around 10%) when saving and loading RDB files, so you can disable it +# for maximum performances. +# +# RDB files created with checksum disabled have a checksum of zero that will +# tell the loading code to skip the check. +rdbchecksum yes + +# Enables or disables full sanitization checks for ziplist and listpack etc when +# loading an RDB or RESTORE payload. This reduces the chances of a assertion or +# crash later on while processing commands. +# Options: +# no - Never perform full sanitization +# yes - Always perform full sanitization +# clients - Perform full sanitization only for user connections. +# Excludes: RDB files, RESTORE commands received from the master +# connection, and client connections which have the +# skip-sanitize-payload ACL flag. +# The default should be 'clients' but since it currently affects cluster +# resharding via MIGRATE, it is temporarily set to 'no' by default. +# +# sanitize-dump-payload no + +# The filename where to dump the DB +dbfilename dump.rdb + +# Remove RDB files used by replication in instances without persistence +# enabled. By default this option is disabled, however there are environments +# where for regulations or other security concerns, RDB files persisted on +# disk by masters in order to feed replicas, or stored on disk by replicas +# in order to load them for the initial synchronization, should be deleted +# ASAP. Note that this option ONLY WORKS in instances that have both AOF +# and RDB persistence disabled, otherwise is completely ignored. +# +# An alternative (and sometimes better) way to obtain the same effect is +# to use diskless replication on both master and replicas instances. However +# in the case of replicas, diskless is not always an option. +rdb-del-sync-files no + +# The working directory. +# +# The DB will be written inside this directory, with the filename specified +# above using the 'dbfilename' configuration directive. +# +# The Append Only File will also be created inside this directory. +# +# Note that you must specify a directory here, not a file name. +dir ./ + +################################# REPLICATION ################################# + +# Master-Replica replication. Use replicaof to make a Redis instance a copy of +# another Redis server. A few things to understand ASAP about Redis replication. +# +# +------------------+ +---------------+ +# | Master | ---> | Replica | +# | (receive writes) | | (exact copy) | +# +------------------+ +---------------+ +# +# 1) Redis replication is asynchronous, but you can configure a master to +# stop accepting writes if it appears to be not connected with at least +# a given number of replicas. +# 2) Redis replicas are able to perform a partial resynchronization with the +# master if the replication link is lost for a relatively small amount of +# time. You may want to configure the replication backlog size (see the next +# sections of this file) with a sensible value depending on your needs. +# 3) Replication is automatic and does not need user intervention. After a +# network partition replicas automatically try to reconnect to masters +# and resynchronize with them. +# +# replicaof + +# If the master is password protected (using the "requirepass" configuration +# directive below) it is possible to tell the replica to authenticate before +# starting the replication synchronization process, otherwise the master will +# refuse the replica request. +# +# masterauth +# +# However this is not enough if you are using Redis ACLs (for Redis version +# 6 or greater), and the default user is not capable of running the PSYNC +# command and/or other commands needed for replication. In this case it's +# better to configure a special user to use with replication, and specify the +# masteruser configuration as such: +# +# masteruser +# +# When masteruser is specified, the replica will authenticate against its +# master using the new AUTH form: AUTH . + +# When a replica loses its connection with the master, or when the replication +# is still in progress, the replica can act in two different ways: +# +# 1) if replica-serve-stale-data is set to 'yes' (the default) the replica will +# still reply to client requests, possibly with out of date data, or the +# data set may just be empty if this is the first synchronization. +# +# 2) If replica-serve-stale-data is set to 'no' the replica will reply with error +# "MASTERDOWN Link with MASTER is down and replica-serve-stale-data is set to 'no'" +# to all data access commands, excluding commands such as: +# INFO, REPLICAOF, AUTH, SHUTDOWN, REPLCONF, ROLE, CONFIG, SUBSCRIBE, +# UNSUBSCRIBE, PSUBSCRIBE, PUNSUBSCRIBE, PUBLISH, PUBSUB, COMMAND, POST, +# HOST and LATENCY. +# +replica-serve-stale-data yes + +# You can configure a replica instance to accept writes or not. Writing against +# a replica instance may be useful to store some ephemeral data (because data +# written on a replica will be easily deleted after resync with the master) but +# may also cause problems if clients are writing to it because of a +# misconfiguration. +# +# Since Redis 2.6 by default replicas are read-only. +# +# Note: read only replicas are not designed to be exposed to untrusted clients +# on the internet. It's just a protection layer against misuse of the instance. +# Still a read only replica exports by default all the administrative commands +# such as CONFIG, DEBUG, and so forth. To a limited extent you can improve +# security of read only replicas using 'rename-command' to shadow all the +# administrative / dangerous commands. +replica-read-only yes + +# Replication SYNC strategy: disk or socket. +# +# New replicas and reconnecting replicas that are not able to continue the +# replication process just receiving differences, need to do what is called a +# "full synchronization". An RDB file is transmitted from the master to the +# replicas. +# +# The transmission can happen in two different ways: +# +# 1) Disk-backed: The Redis master creates a new process that writes the RDB +# file on disk. Later the file is transferred by the parent +# process to the replicas incrementally. +# 2) Diskless: The Redis master creates a new process that directly writes the +# RDB file to replica sockets, without touching the disk at all. +# +# With disk-backed replication, while the RDB file is generated, more replicas +# can be queued and served with the RDB file as soon as the current child +# producing the RDB file finishes its work. With diskless replication instead +# once the transfer starts, new replicas arriving will be queued and a new +# transfer will start when the current one terminates. +# +# When diskless replication is used, the master waits a configurable amount of +# time (in seconds) before starting the transfer in the hope that multiple +# replicas will arrive and the transfer can be parallelized. +# +# With slow disks and fast (large bandwidth) networks, diskless replication +# works better. +repl-diskless-sync yes + +# When diskless replication is enabled, it is possible to configure the delay +# the server waits in order to spawn the child that transfers the RDB via socket +# to the replicas. +# +# This is important since once the transfer starts, it is not possible to serve +# new replicas arriving, that will be queued for the next RDB transfer, so the +# server waits a delay in order to let more replicas arrive. +# +# The delay is specified in seconds, and by default is 5 seconds. To disable +# it entirely just set it to 0 seconds and the transfer will start ASAP. +repl-diskless-sync-delay 5 + +# When diskless replication is enabled with a delay, it is possible to let +# the replication start before the maximum delay is reached if the maximum +# number of replicas expected have connected. Default of 0 means that the +# maximum is not defined and Redis will wait the full delay. +repl-diskless-sync-max-replicas 0 + +# ----------------------------------------------------------------------------- +# WARNING: RDB diskless load is experimental. Since in this setup the replica +# does not immediately store an RDB on disk, it may cause data loss during +# failovers. RDB diskless load + Redis modules not handling I/O reads may also +# cause Redis to abort in case of I/O errors during the initial synchronization +# stage with the master. Use only if you know what you are doing. +# ----------------------------------------------------------------------------- +# +# Replica can load the RDB it reads from the replication link directly from the +# socket, or store the RDB to a file and read that file after it was completely +# received from the master. +# +# In many cases the disk is slower than the network, and storing and loading +# the RDB file may increase replication time (and even increase the master's +# Copy on Write memory and replica buffers). +# However, parsing the RDB file directly from the socket may mean that we have +# to flush the contents of the current database before the full rdb was +# received. For this reason we have the following options: +# +# "disabled" - Don't use diskless load (store the rdb file to the disk first) +# "on-empty-db" - Use diskless load only when it is completely safe. +# "swapdb" - Keep current db contents in RAM while parsing the data directly +# from the socket. Replicas in this mode can keep serving current +# data set while replication is in progress, except for cases where +# they can't recognize master as having a data set from same +# replication history. +# Note that this requires sufficient memory, if you don't have it, +# you risk an OOM kill. +repl-diskless-load disabled + +# Master send PINGs to its replicas in a predefined interval. It's possible to +# change this interval with the repl_ping_replica_period option. The default +# value is 10 seconds. +# +# repl-ping-replica-period 10 + +# The following option sets the replication timeout for: +# +# 1) Bulk transfer I/O during SYNC, from the point of view of replica. +# 2) Master timeout from the point of view of replicas (data, pings). +# 3) Replica timeout from the point of view of masters (REPLCONF ACK pings). +# +# It is important to make sure that this value is greater than the value +# specified for repl-ping-replica-period otherwise a timeout will be detected +# every time there is low traffic between the master and the replica. The default +# value is 60 seconds. +# +# repl-timeout 60 + +# Disable TCP_NODELAY on the replica socket after SYNC? +# +# If you select "yes" Redis will use a smaller number of TCP packets and +# less bandwidth to send data to replicas. But this can add a delay for +# the data to appear on the replica side, up to 40 milliseconds with +# Linux kernels using a default configuration. +# +# If you select "no" the delay for data to appear on the replica side will +# be reduced but more bandwidth will be used for replication. +# +# By default we optimize for low latency, but in very high traffic conditions +# or when the master and replicas are many hops away, turning this to "yes" may +# be a good idea. +repl-disable-tcp-nodelay no + +# Set the replication backlog size. The backlog is a buffer that accumulates +# replica data when replicas are disconnected for some time, so that when a +# replica wants to reconnect again, often a full resync is not needed, but a +# partial resync is enough, just passing the portion of data the replica +# missed while disconnected. +# +# The bigger the replication backlog, the longer the replica can endure the +# disconnect and later be able to perform a partial resynchronization. +# +# The backlog is only allocated if there is at least one replica connected. +# +# repl-backlog-size 1mb + +# After a master has no connected replicas for some time, the backlog will be +# freed. The following option configures the amount of seconds that need to +# elapse, starting from the time the last replica disconnected, for the backlog +# buffer to be freed. +# +# Note that replicas never free the backlog for timeout, since they may be +# promoted to masters later, and should be able to correctly "partially +# resynchronize" with other replicas: hence they should always accumulate backlog. +# +# A value of 0 means to never release the backlog. +# +# repl-backlog-ttl 3600 + +# The replica priority is an integer number published by Redis in the INFO +# output. It is used by Redis Sentinel in order to select a replica to promote +# into a master if the master is no longer working correctly. +# +# A replica with a low priority number is considered better for promotion, so +# for instance if there are three replicas with priority 10, 100, 25 Sentinel +# will pick the one with priority 10, that is the lowest. +# +# However a special priority of 0 marks the replica as not able to perform the +# role of master, so a replica with priority of 0 will never be selected by +# Redis Sentinel for promotion. +# +# By default the priority is 100. +replica-priority 100 + +# The propagation error behavior controls how Redis will behave when it is +# unable to handle a command being processed in the replication stream from a master +# or processed while reading from an AOF file. Errors that occur during propagation +# are unexpected, and can cause data inconsistency. However, there are edge cases +# in earlier versions of Redis where it was possible for the server to replicate or persist +# commands that would fail on future versions. For this reason the default behavior +# is to ignore such errors and continue processing commands. +# +# If an application wants to ensure there is no data divergence, this configuration +# should be set to 'panic' instead. The value can also be set to 'panic-on-replicas' +# to only panic when a replica encounters an error on the replication stream. One of +# these two panic values will become the default value in the future once there are +# sufficient safety mechanisms in place to prevent false positive crashes. +# +# propagation-error-behavior ignore + +# Replica ignore disk write errors controls the behavior of a replica when it is +# unable to persist a write command received from its master to disk. By default, +# this configuration is set to 'no' and will crash the replica in this condition. +# It is not recommended to change this default, however in order to be compatible +# with older versions of Redis this config can be toggled to 'yes' which will just +# log a warning and execute the write command it got from the master. +# +# replica-ignore-disk-write-errors no + +# ----------------------------------------------------------------------------- +# By default, Redis Sentinel includes all replicas in its reports. A replica +# can be excluded from Redis Sentinel's announcements. An unannounced replica +# will be ignored by the 'sentinel replicas ' command and won't be +# exposed to Redis Sentinel's clients. +# +# This option does not change the behavior of replica-priority. Even with +# replica-announced set to 'no', the replica can be promoted to master. To +# prevent this behavior, set replica-priority to 0. +# +# replica-announced yes + +# It is possible for a master to stop accepting writes if there are less than +# N replicas connected, having a lag less or equal than M seconds. +# +# The N replicas need to be in "online" state. +# +# The lag in seconds, that must be <= the specified value, is calculated from +# the last ping received from the replica, that is usually sent every second. +# +# This option does not GUARANTEE that N replicas will accept the write, but +# will limit the window of exposure for lost writes in case not enough replicas +# are available, to the specified number of seconds. +# +# For example to require at least 3 replicas with a lag <= 10 seconds use: +# +# min-replicas-to-write 3 +# min-replicas-max-lag 10 +# +# Setting one or the other to 0 disables the feature. +# +# By default min-replicas-to-write is set to 0 (feature disabled) and +# min-replicas-max-lag is set to 10. + +# A Redis master is able to list the address and port of the attached +# replicas in different ways. For example the "INFO replication" section +# offers this information, which is used, among other tools, by +# Redis Sentinel in order to discover replica instances. +# Another place where this info is available is in the output of the +# "ROLE" command of a master. +# +# The listed IP address and port normally reported by a replica is +# obtained in the following way: +# +# IP: The address is auto detected by checking the peer address +# of the socket used by the replica to connect with the master. +# +# Port: The port is communicated by the replica during the replication +# handshake, and is normally the port that the replica is using to +# listen for connections. +# +# However when port forwarding or Network Address Translation (NAT) is +# used, the replica may actually be reachable via different IP and port +# pairs. The following two options can be used by a replica in order to +# report to its master a specific set of IP and port, so that both INFO +# and ROLE will report those values. +# +# There is no need to use both the options if you need to override just +# the port or the IP address. +# +# replica-announce-ip 5.5.5.5 +# replica-announce-port 1234 + +############################### KEYS TRACKING ################################# + +# Redis implements server assisted support for client side caching of values. +# This is implemented using an invalidation table that remembers, using +# a radix key indexed by key name, what clients have which keys. In turn +# this is used in order to send invalidation messages to clients. Please +# check this page to understand more about the feature: +# +# https://redis.io/topics/client-side-caching +# +# When tracking is enabled for a client, all the read only queries are assumed +# to be cached: this will force Redis to store information in the invalidation +# table. When keys are modified, such information is flushed away, and +# invalidation messages are sent to the clients. However if the workload is +# heavily dominated by reads, Redis could use more and more memory in order +# to track the keys fetched by many clients. +# +# For this reason it is possible to configure a maximum fill value for the +# invalidation table. By default it is set to 1M of keys, and once this limit +# is reached, Redis will start to evict keys in the invalidation table +# even if they were not modified, just to reclaim memory: this will in turn +# force the clients to invalidate the cached values. Basically the table +# maximum size is a trade off between the memory you want to spend server +# side to track information about who cached what, and the ability of clients +# to retain cached objects in memory. +# +# If you set the value to 0, it means there are no limits, and Redis will +# retain as many keys as needed in the invalidation table. +# In the "stats" INFO section, you can find information about the number of +# keys in the invalidation table at every given moment. +# +# Note: when key tracking is used in broadcasting mode, no memory is used +# in the server side so this setting is useless. +# +# tracking-table-max-keys 1000000 + +################################## SECURITY ################################### + +# Warning: since Redis is pretty fast, an outside user can try up to +# 1 million passwords per second against a modern box. This means that you +# should use very strong passwords, otherwise they will be very easy to break. +# Note that because the password is really a shared secret between the client +# and the server, and should not be memorized by any human, the password +# can be easily a long string from /dev/urandom or whatever, so by using a +# long and unguessable password no brute force attack will be possible. + +# Redis ACL users are defined in the following format: +# +# user ... acl rules ... +# +# For example: +# +# user worker +@list +@connection ~jobs:* on >ffa9203c493aa99 +# +# The special username "default" is used for new connections. If this user +# has the "nopass" rule, then new connections will be immediately authenticated +# as the "default" user without the need of any password provided via the +# AUTH command. Otherwise if the "default" user is not flagged with "nopass" +# the connections will start in not authenticated state, and will require +# AUTH (or the HELLO command AUTH option) in order to be authenticated and +# start to work. +# +# The ACL rules that describe what a user can do are the following: +# +# on Enable the user: it is possible to authenticate as this user. +# off Disable the user: it's no longer possible to authenticate +# with this user, however the already authenticated connections +# will still work. +# skip-sanitize-payload RESTORE dump-payload sanitization is skipped. +# sanitize-payload RESTORE dump-payload is sanitized (default). +# + Allow the execution of that command. +# May be used with `|` for allowing subcommands (e.g "+config|get") +# - Disallow the execution of that command. +# May be used with `|` for blocking subcommands (e.g "-config|set") +# +@ Allow the execution of all the commands in such category +# with valid categories are like @admin, @set, @sortedset, ... +# and so forth, see the full list in the server.c file where +# the Redis command table is described and defined. +# The special category @all means all the commands, but currently +# present in the server, and that will be loaded in the future +# via modules. +# +|first-arg Allow a specific first argument of an otherwise +# disabled command. It is only supported on commands with +# no sub-commands, and is not allowed as negative form +# like -SELECT|1, only additive starting with "+". This +# feature is deprecated and may be removed in the future. +# allcommands Alias for +@all. Note that it implies the ability to execute +# all the future commands loaded via the modules system. +# nocommands Alias for -@all. +# ~ Add a pattern of keys that can be mentioned as part of +# commands. For instance ~* allows all the keys. The pattern +# is a glob-style pattern like the one of KEYS. +# It is possible to specify multiple patterns. +# %R~ Add key read pattern that specifies which keys can be read +# from. +# %W~ Add key write pattern that specifies which keys can be +# written to. +# allkeys Alias for ~* +# resetkeys Flush the list of allowed keys patterns. +# & Add a glob-style pattern of Pub/Sub channels that can be +# accessed by the user. It is possible to specify multiple channel +# patterns. +# allchannels Alias for &* +# resetchannels Flush the list of allowed channel patterns. +# > Add this password to the list of valid password for the user. +# For example >mypass will add "mypass" to the list. +# This directive clears the "nopass" flag (see later). +# < Remove this password from the list of valid passwords. +# nopass All the set passwords of the user are removed, and the user +# is flagged as requiring no password: it means that every +# password will work against this user. If this directive is +# used for the default user, every new connection will be +# immediately authenticated with the default user without +# any explicit AUTH command required. Note that the "resetpass" +# directive will clear this condition. +# resetpass Flush the list of allowed passwords. Moreover removes the +# "nopass" status. After "resetpass" the user has no associated +# passwords and there is no way to authenticate without adding +# some password (or setting it as "nopass" later). +# reset Performs the following actions: resetpass, resetkeys, off, +# -@all. The user returns to the same state it has immediately +# after its creation. +# () Create a new selector with the options specified within the +# parentheses and attach it to the user. Each option should be +# space separated. The first character must be ( and the last +# character must be ). +# clearselectors Remove all of the currently attached selectors. +# Note this does not change the "root" user permissions, +# which are the permissions directly applied onto the +# user (outside the parentheses). +# +# ACL rules can be specified in any order: for instance you can start with +# passwords, then flags, or key patterns. However note that the additive +# and subtractive rules will CHANGE MEANING depending on the ordering. +# For instance see the following example: +# +# user alice on +@all -DEBUG ~* >somepassword +# +# This will allow "alice" to use all the commands with the exception of the +# DEBUG command, since +@all added all the commands to the set of the commands +# alice can use, and later DEBUG was removed. However if we invert the order +# of two ACL rules the result will be different: +# +# user alice on -DEBUG +@all ~* >somepassword +# +# Now DEBUG was removed when alice had yet no commands in the set of allowed +# commands, later all the commands are added, so the user will be able to +# execute everything. +# +# Basically ACL rules are processed left-to-right. +# +# The following is a list of command categories and their meanings: +# * keyspace - Writing or reading from keys, databases, or their metadata +# in a type agnostic way. Includes DEL, RESTORE, DUMP, RENAME, EXISTS, DBSIZE, +# KEYS, EXPIRE, TTL, FLUSHALL, etc. Commands that may modify the keyspace, +# key or metadata will also have `write` category. Commands that only read +# the keyspace, key or metadata will have the `read` category. +# * read - Reading from keys (values or metadata). Note that commands that don't +# interact with keys, will not have either `read` or `write`. +# * write - Writing to keys (values or metadata) +# * admin - Administrative commands. Normal applications will never need to use +# these. Includes REPLICAOF, CONFIG, DEBUG, SAVE, MONITOR, ACL, SHUTDOWN, etc. +# * dangerous - Potentially dangerous (each should be considered with care for +# various reasons). This includes FLUSHALL, MIGRATE, RESTORE, SORT, KEYS, +# CLIENT, DEBUG, INFO, CONFIG, SAVE, REPLICAOF, etc. +# * connection - Commands affecting the connection or other connections. +# This includes AUTH, SELECT, COMMAND, CLIENT, ECHO, PING, etc. +# * blocking - Potentially blocking the connection until released by another +# command. +# * fast - Fast O(1) commands. May loop on the number of arguments, but not the +# number of elements in the key. +# * slow - All commands that are not Fast. +# * pubsub - PUBLISH / SUBSCRIBE related +# * transaction - WATCH / MULTI / EXEC related commands. +# * scripting - Scripting related. +# * set - Data type: sets related. +# * sortedset - Data type: zsets related. +# * list - Data type: lists related. +# * hash - Data type: hashes related. +# * string - Data type: strings related. +# * bitmap - Data type: bitmaps related. +# * hyperloglog - Data type: hyperloglog related. +# * geo - Data type: geo related. +# * stream - Data type: streams related. +# +# For more information about ACL configuration please refer to +# the Redis web site at https://redis.io/topics/acl + +# ACL LOG +# +# The ACL Log tracks failed commands and authentication events associated +# with ACLs. The ACL Log is useful to troubleshoot failed commands blocked +# by ACLs. The ACL Log is stored in memory. You can reclaim memory with +# ACL LOG RESET. Define the maximum entry length of the ACL Log below. +acllog-max-len 128 + +# Using an external ACL file +# +# Instead of configuring users here in this file, it is possible to use +# a stand-alone file just listing users. The two methods cannot be mixed: +# if you configure users here and at the same time you activate the external +# ACL file, the server will refuse to start. +# +# The format of the external ACL user file is exactly the same as the +# format that is used inside redis.conf to describe users. +# +# aclfile /etc/redis/users.acl + +# IMPORTANT NOTE: starting with Redis 6 "requirepass" is just a compatibility +# layer on top of the new ACL system. The option effect will be just setting +# the password for the default user. Clients will still authenticate using +# AUTH as usually, or more explicitly with AUTH default +# if they follow the new protocol: both will work. +# +# The requirepass is not compatible with aclfile option and the ACL LOAD +# command, these will cause requirepass to be ignored. +# +# requirepass foobared + +# New users are initialized with restrictive permissions by default, via the +# equivalent of this ACL rule 'off resetkeys -@all'. Starting with Redis 6.2, it +# is possible to manage access to Pub/Sub channels with ACL rules as well. The +# default Pub/Sub channels permission if new users is controlled by the +# acl-pubsub-default configuration directive, which accepts one of these values: +# +# allchannels: grants access to all Pub/Sub channels +# resetchannels: revokes access to all Pub/Sub channels +# +# From Redis 7.0, acl-pubsub-default defaults to 'resetchannels' permission. +# +# acl-pubsub-default resetchannels + +# Command renaming (DEPRECATED). +# +# ------------------------------------------------------------------------ +# WARNING: avoid using this option if possible. Instead use ACLs to remove +# commands from the default user, and put them only in some admin user you +# create for administrative purposes. +# ------------------------------------------------------------------------ +# +# It is possible to change the name of dangerous commands in a shared +# environment. For instance the CONFIG command may be renamed into something +# hard to guess so that it will still be available for internal-use tools +# but not available for general clients. +# +# Example: +# +# rename-command CONFIG b840fc02d524045429941cc15f59e41cb7be6c52 +# +# It is also possible to completely kill a command by renaming it into +# an empty string: +# +# rename-command CONFIG "" +# +# Please note that changing the name of commands that are logged into the +# AOF file or transmitted to replicas may cause problems. + +################################### CLIENTS #################################### + +# Set the max number of connected clients at the same time. By default +# this limit is set to 10000 clients, however if the Redis server is not +# able to configure the process file limit to allow for the specified limit +# the max number of allowed clients is set to the current file limit +# minus 32 (as Redis reserves a few file descriptors for internal uses). +# +# Once the limit is reached Redis will close all the new connections sending +# an error 'max number of clients reached'. +# +# IMPORTANT: When Redis Cluster is used, the max number of connections is also +# shared with the cluster bus: every node in the cluster will use two +# connections, one incoming and another outgoing. It is important to size the +# limit accordingly in case of very large clusters. +# +# maxclients 10000 + +############################## MEMORY MANAGEMENT ################################ + +# Set a memory usage limit to the specified amount of bytes. +# When the memory limit is reached Redis will try to remove keys +# according to the eviction policy selected (see maxmemory-policy). +# +# If Redis can't remove keys according to the policy, or if the policy is +# set to 'noeviction', Redis will start to reply with errors to commands +# that would use more memory, like SET, LPUSH, and so on, and will continue +# to reply to read-only commands like GET. +# +# This option is usually useful when using Redis as an LRU or LFU cache, or to +# set a hard memory limit for an instance (using the 'noeviction' policy). +# +# WARNING: If you have replicas attached to an instance with maxmemory on, +# the size of the output buffers needed to feed the replicas are subtracted +# from the used memory count, so that network problems / resyncs will +# not trigger a loop where keys are evicted, and in turn the output +# buffer of replicas is full with DELs of keys evicted triggering the deletion +# of more keys, and so forth until the database is completely emptied. +# +# In short... if you have replicas attached it is suggested that you set a lower +# limit for maxmemory so that there is some free RAM on the system for replica +# output buffers (but this is not needed if the policy is 'noeviction'). +# +# maxmemory + +# MAXMEMORY POLICY: how Redis will select what to remove when maxmemory +# is reached. You can select one from the following behaviors: +# +# volatile-lru -> Evict using approximated LRU, only keys with an expire set. +# allkeys-lru -> Evict any key using approximated LRU. +# volatile-lfu -> Evict using approximated LFU, only keys with an expire set. +# allkeys-lfu -> Evict any key using approximated LFU. +# volatile-random -> Remove a random key having an expire set. +# allkeys-random -> Remove a random key, any key. +# volatile-ttl -> Remove the key with the nearest expire time (minor TTL) +# noeviction -> Don't evict anything, just return an error on write operations. +# +# LRU means Least Recently Used +# LFU means Least Frequently Used +# +# Both LRU, LFU and volatile-ttl are implemented using approximated +# randomized algorithms. +# +# Note: with any of the above policies, when there are no suitable keys for +# eviction, Redis will return an error on write operations that require +# more memory. These are usually commands that create new keys, add data or +# modify existing keys. A few examples are: SET, INCR, HSET, LPUSH, SUNIONSTORE, +# SORT (due to the STORE argument), and EXEC (if the transaction includes any +# command that requires memory). +# +# The default is: +# +# maxmemory-policy noeviction + +# LRU, LFU and minimal TTL algorithms are not precise algorithms but approximated +# algorithms (in order to save memory), so you can tune it for speed or +# accuracy. By default Redis will check five keys and pick the one that was +# used least recently, you can change the sample size using the following +# configuration directive. +# +# The default of 5 produces good enough results. 10 Approximates very closely +# true LRU but costs more CPU. 3 is faster but not very accurate. +# +# maxmemory-samples 5 + +# Eviction processing is designed to function well with the default setting. +# If there is an unusually large amount of write traffic, this value may need to +# be increased. Decreasing this value may reduce latency at the risk of +# eviction processing effectiveness +# 0 = minimum latency, 10 = default, 100 = process without regard to latency +# +# maxmemory-eviction-tenacity 10 + +# Starting from Redis 5, by default a replica will ignore its maxmemory setting +# (unless it is promoted to master after a failover or manually). It means +# that the eviction of keys will be just handled by the master, sending the +# DEL commands to the replica as keys evict in the master side. +# +# This behavior ensures that masters and replicas stay consistent, and is usually +# what you want, however if your replica is writable, or you want the replica +# to have a different memory setting, and you are sure all the writes performed +# to the replica are idempotent, then you may change this default (but be sure +# to understand what you are doing). +# +# Note that since the replica by default does not evict, it may end using more +# memory than the one set via maxmemory (there are certain buffers that may +# be larger on the replica, or data structures may sometimes take more memory +# and so forth). So make sure you monitor your replicas and make sure they +# have enough memory to never hit a real out-of-memory condition before the +# master hits the configured maxmemory setting. +# +# replica-ignore-maxmemory yes + +# Redis reclaims expired keys in two ways: upon access when those keys are +# found to be expired, and also in background, in what is called the +# "active expire key". The key space is slowly and interactively scanned +# looking for expired keys to reclaim, so that it is possible to free memory +# of keys that are expired and will never be accessed again in a short time. +# +# The default effort of the expire cycle will try to avoid having more than +# ten percent of expired keys still in memory, and will try to avoid consuming +# more than 25% of total memory and to add latency to the system. However +# it is possible to increase the expire "effort" that is normally set to +# "1", to a greater value, up to the value "10". At its maximum value the +# system will use more CPU, longer cycles (and technically may introduce +# more latency), and will tolerate less already expired keys still present +# in the system. It's a tradeoff between memory, CPU and latency. +# +# active-expire-effort 1 + +############################# LAZY FREEING #################################### + +# Redis has two primitives to delete keys. One is called DEL and is a blocking +# deletion of the object. It means that the server stops processing new commands +# in order to reclaim all the memory associated with an object in a synchronous +# way. If the key deleted is associated with a small object, the time needed +# in order to execute the DEL command is very small and comparable to most other +# O(1) or O(log_N) commands in Redis. However if the key is associated with an +# aggregated value containing millions of elements, the server can block for +# a long time (even seconds) in order to complete the operation. +# +# For the above reasons Redis also offers non blocking deletion primitives +# such as UNLINK (non blocking DEL) and the ASYNC option of FLUSHALL and +# FLUSHDB commands, in order to reclaim memory in background. Those commands +# are executed in constant time. Another thread will incrementally free the +# object in the background as fast as possible. +# +# DEL, UNLINK and ASYNC option of FLUSHALL and FLUSHDB are user-controlled. +# It's up to the design of the application to understand when it is a good +# idea to use one or the other. However the Redis server sometimes has to +# delete keys or flush the whole database as a side effect of other operations. +# Specifically Redis deletes objects independently of a user call in the +# following scenarios: +# +# 1) On eviction, because of the maxmemory and maxmemory policy configurations, +# in order to make room for new data, without going over the specified +# memory limit. +# 2) Because of expire: when a key with an associated time to live (see the +# EXPIRE command) must be deleted from memory. +# 3) Because of a side effect of a command that stores data on a key that may +# already exist. For example the RENAME command may delete the old key +# content when it is replaced with another one. Similarly SUNIONSTORE +# or SORT with STORE option may delete existing keys. The SET command +# itself removes any old content of the specified key in order to replace +# it with the specified string. +# 4) During replication, when a replica performs a full resynchronization with +# its master, the content of the whole database is removed in order to +# load the RDB file just transferred. +# +# In all the above cases the default is to delete objects in a blocking way, +# like if DEL was called. However you can configure each case specifically +# in order to instead release memory in a non-blocking way like if UNLINK +# was called, using the following configuration directives. + +lazyfree-lazy-eviction no +lazyfree-lazy-expire no +lazyfree-lazy-server-del no +replica-lazy-flush no + +# It is also possible, for the case when to replace the user code DEL calls +# with UNLINK calls is not easy, to modify the default behavior of the DEL +# command to act exactly like UNLINK, using the following configuration +# directive: + +lazyfree-lazy-user-del no + +# FLUSHDB, FLUSHALL, SCRIPT FLUSH and FUNCTION FLUSH support both asynchronous and synchronous +# deletion, which can be controlled by passing the [SYNC|ASYNC] flags into the +# commands. When neither flag is passed, this directive will be used to determine +# if the data should be deleted asynchronously. + +lazyfree-lazy-user-flush no + +################################ THREADED I/O ################################# + +# Redis is mostly single threaded, however there are certain threaded +# operations such as UNLINK, slow I/O accesses and other things that are +# performed on side threads. +# +# Now it is also possible to handle Redis clients socket reads and writes +# in different I/O threads. Since especially writing is so slow, normally +# Redis users use pipelining in order to speed up the Redis performances per +# core, and spawn multiple instances in order to scale more. Using I/O +# threads it is possible to easily speedup two times Redis without resorting +# to pipelining nor sharding of the instance. +# +# By default threading is disabled, we suggest enabling it only in machines +# that have at least 4 or more cores, leaving at least one spare core. +# Using more than 8 threads is unlikely to help much. We also recommend using +# threaded I/O only if you actually have performance problems, with Redis +# instances being able to use a quite big percentage of CPU time, otherwise +# there is no point in using this feature. +# +# So for instance if you have a four cores boxes, try to use 2 or 3 I/O +# threads, if you have a 8 cores, try to use 6 threads. In order to +# enable I/O threads use the following configuration directive: +# +# io-threads 4 +# +# Setting io-threads to 1 will just use the main thread as usual. +# When I/O threads are enabled, we only use threads for writes, that is +# to thread the write(2) syscall and transfer the client buffers to the +# socket. However it is also possible to enable threading of reads and +# protocol parsing using the following configuration directive, by setting +# it to yes: +# +# io-threads-do-reads no +# +# Usually threading reads doesn't help much. +# +# NOTE 1: This configuration directive cannot be changed at runtime via +# CONFIG SET. Also, this feature currently does not work when SSL is +# enabled. +# +# NOTE 2: If you want to test the Redis speedup using redis-benchmark, make +# sure you also run the benchmark itself in threaded mode, using the +# --threads option to match the number of Redis threads, otherwise you'll not +# be able to notice the improvements. + +############################ KERNEL OOM CONTROL ############################## + +# On Linux, it is possible to hint the kernel OOM killer on what processes +# should be killed first when out of memory. +# +# Enabling this feature makes Redis actively control the oom_score_adj value +# for all its processes, depending on their role. The default scores will +# attempt to have background child processes killed before all others, and +# replicas killed before masters. +# +# Redis supports these options: +# +# no: Don't make changes to oom-score-adj (default). +# yes: Alias to "relative" see below. +# absolute: Values in oom-score-adj-values are written as is to the kernel. +# relative: Values are used relative to the initial value of oom_score_adj when +# the server starts and are then clamped to a range of -1000 to 1000. +# Because typically the initial value is 0, they will often match the +# absolute values. +oom-score-adj no + +# When oom-score-adj is used, this directive controls the specific values used +# for master, replica and background child processes. Values range -2000 to +# 2000 (higher means more likely to be killed). +# +# Unprivileged processes (not root, and without CAP_SYS_RESOURCE capabilities) +# can freely increase their value, but not decrease it below its initial +# settings. This means that setting oom-score-adj to "relative" and setting the +# oom-score-adj-values to positive values will always succeed. +oom-score-adj-values 0 200 800 + + +#################### KERNEL transparent hugepage CONTROL ###################### + +# Usually the kernel Transparent Huge Pages control is set to "madvise" or +# or "never" by default (/sys/kernel/mm/transparent_hugepage/enabled), in which +# case this config has no effect. On systems in which it is set to "always", +# redis will attempt to disable it specifically for the redis process in order +# to avoid latency problems specifically with fork(2) and CoW. +# If for some reason you prefer to keep it enabled, you can set this config to +# "no" and the kernel global to "always". + +disable-thp yes + +############################## APPEND ONLY MODE ############################### + +# By default Redis asynchronously dumps the dataset on disk. This mode is +# good enough in many applications, but an issue with the Redis process or +# a power outage may result into a few minutes of writes lost (depending on +# the configured save points). +# +# The Append Only File is an alternative persistence mode that provides +# much better durability. For instance using the default data fsync policy +# (see later in the config file) Redis can lose just one second of writes in a +# dramatic event like a server power outage, or a single write if something +# wrong with the Redis process itself happens, but the operating system is +# still running correctly. +# +# AOF and RDB persistence can be enabled at the same time without problems. +# If the AOF is enabled on startup Redis will load the AOF, that is the file +# with the better durability guarantees. +# +# Please check https://redis.io/topics/persistence for more information. + +appendonly no + +# The base name of the append only file. +# +# Redis 7 and newer use a set of append-only files to persist the dataset +# and changes applied to it. There are two basic types of files in use: +# +# - Base files, which are a snapshot representing the complete state of the +# dataset at the time the file was created. Base files can be either in +# the form of RDB (binary serialized) or AOF (textual commands). +# - Incremental files, which contain additional commands that were applied +# to the dataset following the previous file. +# +# In addition, manifest files are used to track the files and the order in +# which they were created and should be applied. +# +# Append-only file names are created by Redis following a specific pattern. +# The file name's prefix is based on the 'appendfilename' configuration +# parameter, followed by additional information about the sequence and type. +# +# For example, if appendfilename is set to appendonly.aof, the following file +# names could be derived: +# +# - appendonly.aof.1.base.rdb as a base file. +# - appendonly.aof.1.incr.aof, appendonly.aof.2.incr.aof as incremental files. +# - appendonly.aof.manifest as a manifest file. + +appendfilename "appendonly.aof" + +# For convenience, Redis stores all persistent append-only files in a dedicated +# directory. The name of the directory is determined by the appenddirname +# configuration parameter. + +appenddirname "appendonlydir" + +# The fsync() call tells the Operating System to actually write data on disk +# instead of waiting for more data in the output buffer. Some OS will really flush +# data on disk, some other OS will just try to do it ASAP. +# +# Redis supports three different modes: +# +# no: don't fsync, just let the OS flush the data when it wants. Faster. +# always: fsync after every write to the append only log. Slow, Safest. +# everysec: fsync only one time every second. Compromise. +# +# The default is "everysec", as that's usually the right compromise between +# speed and data safety. It's up to you to understand if you can relax this to +# "no" that will let the operating system flush the output buffer when +# it wants, for better performances (but if you can live with the idea of +# some data loss consider the default persistence mode that's snapshotting), +# or on the contrary, use "always" that's very slow but a bit safer than +# everysec. +# +# More details please check the following article: +# http://antirez.com/post/redis-persistence-demystified.html +# +# If unsure, use "everysec". + +# appendfsync always +appendfsync everysec +# appendfsync no + +# When the AOF fsync policy is set to always or everysec, and a background +# saving process (a background save or AOF log background rewriting) is +# performing a lot of I/O against the disk, in some Linux configurations +# Redis may block too long on the fsync() call. Note that there is no fix for +# this currently, as even performing fsync in a different thread will block +# our synchronous write(2) call. +# +# In order to mitigate this problem it's possible to use the following option +# that will prevent fsync() from being called in the main process while a +# BGSAVE or BGREWRITEAOF is in progress. +# +# This means that while another child is saving, the durability of Redis is +# the same as "appendfsync no". In practical terms, this means that it is +# possible to lose up to 30 seconds of log in the worst scenario (with the +# default Linux settings). +# +# If you have latency problems turn this to "yes". Otherwise leave it as +# "no" that is the safest pick from the point of view of durability. + +no-appendfsync-on-rewrite no + +# Automatic rewrite of the append only file. +# Redis is able to automatically rewrite the log file implicitly calling +# BGREWRITEAOF when the AOF log size grows by the specified percentage. +# +# This is how it works: Redis remembers the size of the AOF file after the +# latest rewrite (if no rewrite has happened since the restart, the size of +# the AOF at startup is used). +# +# This base size is compared to the current size. If the current size is +# bigger than the specified percentage, the rewrite is triggered. Also +# you need to specify a minimal size for the AOF file to be rewritten, this +# is useful to avoid rewriting the AOF file even if the percentage increase +# is reached but it is still pretty small. +# +# Specify a percentage of zero in order to disable the automatic AOF +# rewrite feature. + +auto-aof-rewrite-percentage 100 +auto-aof-rewrite-min-size 64mb + +# An AOF file may be found to be truncated at the end during the Redis +# startup process, when the AOF data gets loaded back into memory. +# This may happen when the system where Redis is running +# crashes, especially when an ext4 filesystem is mounted without the +# data=ordered option (however this can't happen when Redis itself +# crashes or aborts but the operating system still works correctly). +# +# Redis can either exit with an error when this happens, or load as much +# data as possible (the default now) and start if the AOF file is found +# to be truncated at the end. The following option controls this behavior. +# +# If aof-load-truncated is set to yes, a truncated AOF file is loaded and +# the Redis server starts emitting a log to inform the user of the event. +# Otherwise if the option is set to no, the server aborts with an error +# and refuses to start. When the option is set to no, the user requires +# to fix the AOF file using the "redis-check-aof" utility before to restart +# the server. +# +# Note that if the AOF file will be found to be corrupted in the middle +# the server will still exit with an error. This option only applies when +# Redis will try to read more data from the AOF file but not enough bytes +# will be found. +aof-load-truncated yes + +# Redis can create append-only base files in either RDB or AOF formats. Using +# the RDB format is always faster and more efficient, and disabling it is only +# supported for backward compatibility purposes. +aof-use-rdb-preamble yes + +# Redis supports recording timestamp annotations in the AOF to support restoring +# the data from a specific point-in-time. However, using this capability changes +# the AOF format in a way that may not be compatible with existing AOF parsers. +aof-timestamp-enabled no + +################################ SHUTDOWN ##################################### + +# Maximum time to wait for replicas when shutting down, in seconds. +# +# During shut down, a grace period allows any lagging replicas to catch up with +# the latest replication offset before the master exists. This period can +# prevent data loss, especially for deployments without configured disk backups. +# +# The 'shutdown-timeout' value is the grace period's duration in seconds. It is +# only applicable when the instance has replicas. To disable the feature, set +# the value to 0. +# +# shutdown-timeout 10 + +# When Redis receives a SIGINT or SIGTERM, shutdown is initiated and by default +# an RDB snapshot is written to disk in a blocking operation if save points are configured. +# The options used on signaled shutdown can include the following values: +# default: Saves RDB snapshot only if save points are configured. +# Waits for lagging replicas to catch up. +# save: Forces a DB saving operation even if no save points are configured. +# nosave: Prevents DB saving operation even if one or more save points are configured. +# now: Skips waiting for lagging replicas. +# force: Ignores any errors that would normally prevent the server from exiting. +# +# Any combination of values is allowed as long as "save" and "nosave" are not set simultaneously. +# Example: "nosave force now" +# +# shutdown-on-sigint default +# shutdown-on-sigterm default + +################ NON-DETERMINISTIC LONG BLOCKING COMMANDS ##################### + +# Maximum time in milliseconds for EVAL scripts, functions and in some cases +# modules' commands before Redis can start processing or rejecting other clients. +# +# If the maximum execution time is reached Redis will start to reply to most +# commands with a BUSY error. +# +# In this state Redis will only allow a handful of commands to be executed. +# For instance, SCRIPT KILL, FUNCTION KILL, SHUTDOWN NOSAVE and possibly some +# module specific 'allow-busy' commands. +# +# SCRIPT KILL and FUNCTION KILL will only be able to stop a script that did not +# yet call any write commands, so SHUTDOWN NOSAVE may be the only way to stop +# the server in the case a write command was already issued by the script when +# the user doesn't want to wait for the natural termination of the script. +# +# The default is 5 seconds. It is possible to set it to 0 or a negative value +# to disable this mechanism (uninterrupted execution). Note that in the past +# this config had a different name, which is now an alias, so both of these do +# the same: +# lua-time-limit 5000 +# busy-reply-threshold 5000 + +################################ REDIS CLUSTER ############################### + +# Normal Redis instances can't be part of a Redis Cluster; only nodes that are +# started as cluster nodes can. In order to start a Redis instance as a +# cluster node enable the cluster support uncommenting the following: +# +# cluster-enabled yes + +# Every cluster node has a cluster configuration file. This file is not +# intended to be edited by hand. It is created and updated by Redis nodes. +# Every Redis Cluster node requires a different cluster configuration file. +# Make sure that instances running in the same system do not have +# overlapping cluster configuration file names. +# +# cluster-config-file nodes-6379.conf + +# Cluster node timeout is the amount of milliseconds a node must be unreachable +# for it to be considered in failure state. +# Most other internal time limits are a multiple of the node timeout. +# +# cluster-node-timeout 15000 + +# The cluster port is the port that the cluster bus will listen for inbound connections on. When set +# to the default value, 0, it will be bound to the command port + 10000. Setting this value requires +# you to specify the cluster bus port when executing cluster meet. +# cluster-port 0 + +# A replica of a failing master will avoid to start a failover if its data +# looks too old. +# +# There is no simple way for a replica to actually have an exact measure of +# its "data age", so the following two checks are performed: +# +# 1) If there are multiple replicas able to failover, they exchange messages +# in order to try to give an advantage to the replica with the best +# replication offset (more data from the master processed). +# Replicas will try to get their rank by offset, and apply to the start +# of the failover a delay proportional to their rank. +# +# 2) Every single replica computes the time of the last interaction with +# its master. This can be the last ping or command received (if the master +# is still in the "connected" state), or the time that elapsed since the +# disconnection with the master (if the replication link is currently down). +# If the last interaction is too old, the replica will not try to failover +# at all. +# +# The point "2" can be tuned by user. Specifically a replica will not perform +# the failover if, since the last interaction with the master, the time +# elapsed is greater than: +# +# (node-timeout * cluster-replica-validity-factor) + repl-ping-replica-period +# +# So for example if node-timeout is 30 seconds, and the cluster-replica-validity-factor +# is 10, and assuming a default repl-ping-replica-period of 10 seconds, the +# replica will not try to failover if it was not able to talk with the master +# for longer than 310 seconds. +# +# A large cluster-replica-validity-factor may allow replicas with too old data to failover +# a master, while a too small value may prevent the cluster from being able to +# elect a replica at all. +# +# For maximum availability, it is possible to set the cluster-replica-validity-factor +# to a value of 0, which means, that replicas will always try to failover the +# master regardless of the last time they interacted with the master. +# (However they'll always try to apply a delay proportional to their +# offset rank). +# +# Zero is the only value able to guarantee that when all the partitions heal +# the cluster will always be able to continue. +# +# cluster-replica-validity-factor 10 + +# Cluster replicas are able to migrate to orphaned masters, that are masters +# that are left without working replicas. This improves the cluster ability +# to resist to failures as otherwise an orphaned master can't be failed over +# in case of failure if it has no working replicas. +# +# Replicas migrate to orphaned masters only if there are still at least a +# given number of other working replicas for their old master. This number +# is the "migration barrier". A migration barrier of 1 means that a replica +# will migrate only if there is at least 1 other working replica for its master +# and so forth. It usually reflects the number of replicas you want for every +# master in your cluster. +# +# Default is 1 (replicas migrate only if their masters remain with at least +# one replica). To disable migration just set it to a very large value or +# set cluster-allow-replica-migration to 'no'. +# A value of 0 can be set but is useful only for debugging and dangerous +# in production. +# +# cluster-migration-barrier 1 + +# Turning off this option allows to use less automatic cluster configuration. +# It both disables migration to orphaned masters and migration from masters +# that became empty. +# +# Default is 'yes' (allow automatic migrations). +# +# cluster-allow-replica-migration yes + +# By default Redis Cluster nodes stop accepting queries if they detect there +# is at least a hash slot uncovered (no available node is serving it). +# This way if the cluster is partially down (for example a range of hash slots +# are no longer covered) all the cluster becomes, eventually, unavailable. +# It automatically returns available as soon as all the slots are covered again. +# +# However sometimes you want the subset of the cluster which is working, +# to continue to accept queries for the part of the key space that is still +# covered. In order to do so, just set the cluster-require-full-coverage +# option to no. +# +# cluster-require-full-coverage yes + +# This option, when set to yes, prevents replicas from trying to failover its +# master during master failures. However the replica can still perform a +# manual failover, if forced to do so. +# +# This is useful in different scenarios, especially in the case of multiple +# data center operations, where we want one side to never be promoted if not +# in the case of a total DC failure. +# +# cluster-replica-no-failover no + +# This option, when set to yes, allows nodes to serve read traffic while the +# cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful for two cases. The first case is for when an application +# doesn't require consistency of data during node failures or network partitions. +# One example of this is a cache, where as long as the node has the data it +# should be able to serve it. +# +# The second use case is for configurations that don't meet the recommended +# three shards but want to enable cluster mode and scale later. A +# master outage in a 1 or 2 shard configuration causes a read/write outage to the +# entire cluster without this option set, with it set there is only a write outage. +# Without a quorum of masters, slot ownership will not change automatically. +# +# cluster-allow-reads-when-down no + +# This option, when set to yes, allows nodes to serve pubsub shard traffic while +# the cluster is in a down state, as long as it believes it owns the slots. +# +# This is useful if the application would like to use the pubsub feature even when +# the cluster global stable state is not OK. If the application wants to make sure only +# one shard is serving a given channel, this feature should be kept as yes. +# +# cluster-allow-pubsubshard-when-down yes + +# Cluster link send buffer limit is the limit on the memory usage of an individual +# cluster bus link's send buffer in bytes. Cluster links would be freed if they exceed +# this limit. This is to primarily prevent send buffers from growing unbounded on links +# toward slow peers (E.g. PubSub messages being piled up). +# This limit is disabled by default. Enable this limit when 'mem_cluster_links' INFO field +# and/or 'send-buffer-allocated' entries in the 'CLUSTER LINKS` command output continuously increase. +# Minimum limit of 1gb is recommended so that cluster link buffer can fit in at least a single +# PubSub message by default. (client-query-buffer-limit default value is 1gb) +# +# cluster-link-sendbuf-limit 0 + +# Clusters can configure their announced hostname using this config. This is a common use case for +# applications that need to use TLS Server Name Indication (SNI) or dealing with DNS based +# routing. By default this value is only shown as additional metadata in the CLUSTER SLOTS +# command, but can be changed using 'cluster-preferred-endpoint-type' config. This value is +# communicated along the clusterbus to all nodes, setting it to an empty string will remove +# the hostname and also propagate the removal. +# +# cluster-announce-hostname "" + +# Clusters can advertise how clients should connect to them using either their IP address, +# a user defined hostname, or by declaring they have no endpoint. Which endpoint is +# shown as the preferred endpoint is set by using the cluster-preferred-endpoint-type +# config with values 'ip', 'hostname', or 'unknown-endpoint'. This value controls how +# the endpoint returned for MOVED/ASKING requests as well as the first field of CLUSTER SLOTS. +# If the preferred endpoint type is set to hostname, but no announced hostname is set, a '?' +# will be returned instead. +# +# When a cluster advertises itself as having an unknown endpoint, it's indicating that +# the server doesn't know how clients can reach the cluster. This can happen in certain +# networking situations where there are multiple possible routes to the node, and the +# server doesn't know which one the client took. In this case, the server is expecting +# the client to reach out on the same endpoint it used for making the last request, but use +# the port provided in the response. +# +# cluster-preferred-endpoint-type ip + +# In order to setup your cluster make sure to read the documentation +# available at https://redis.io web site. + +########################## CLUSTER DOCKER/NAT support ######################## + +# In certain deployments, Redis Cluster nodes address discovery fails, because +# addresses are NAT-ted or because ports are forwarded (the typical case is +# Docker and other containers). +# +# In order to make Redis Cluster working in such environments, a static +# configuration where each node knows its public address is needed. The +# following four options are used for this scope, and are: +# +# * cluster-announce-ip +# * cluster-announce-port +# * cluster-announce-tls-port +# * cluster-announce-bus-port +# +# Each instructs the node about its address, client ports (for connections +# without and with TLS) and cluster message bus port. The information is then +# published in the header of the bus packets so that other nodes will be able to +# correctly map the address of the node publishing the information. +# +# If cluster-tls is set to yes and cluster-announce-tls-port is omitted or set +# to zero, then cluster-announce-port refers to the TLS port. Note also that +# cluster-announce-tls-port has no effect if cluster-tls is set to no. +# +# If the above options are not used, the normal Redis Cluster auto-detection +# will be used instead. +# +# Note that when remapped, the bus port may not be at the fixed offset of +# clients port + 10000, so you can specify any port and bus-port depending +# on how they get remapped. If the bus-port is not set, a fixed offset of +# 10000 will be used as usual. +# +# Example: +# +# cluster-announce-ip 10.1.1.5 +# cluster-announce-tls-port 6379 +# cluster-announce-port 0 +# cluster-announce-bus-port 6380 + +################################## SLOW LOG ################################### + +# The Redis Slow Log is a system to log queries that exceeded a specified +# execution time. The execution time does not include the I/O operations +# like talking with the client, sending the reply and so forth, +# but just the time needed to actually execute the command (this is the only +# stage of command execution where the thread is blocked and can not serve +# other requests in the meantime). +# +# You can configure the slow log with two parameters: one tells Redis +# what is the execution time, in microseconds, to exceed in order for the +# command to get logged, and the other parameter is the length of the +# slow log. When a new command is logged the oldest one is removed from the +# queue of logged commands. + +# The following time is expressed in microseconds, so 1000000 is equivalent +# to one second. Note that a negative number disables the slow log, while +# a value of zero forces the logging of every command. +slowlog-log-slower-than 10000 + +# There is no limit to this length. Just be aware that it will consume memory. +# You can reclaim memory used by the slow log with SLOWLOG RESET. +slowlog-max-len 128 + +################################ LATENCY MONITOR ############################## + +# The Redis latency monitoring subsystem samples different operations +# at runtime in order to collect data related to possible sources of +# latency of a Redis instance. +# +# Via the LATENCY command this information is available to the user that can +# print graphs and obtain reports. +# +# The system only logs operations that were performed in a time equal or +# greater than the amount of milliseconds specified via the +# latency-monitor-threshold configuration directive. When its value is set +# to zero, the latency monitor is turned off. +# +# By default latency monitoring is disabled since it is mostly not needed +# if you don't have latency issues, and collecting data has a performance +# impact, that while very small, can be measured under big load. Latency +# monitoring can easily be enabled at runtime using the command +# "CONFIG SET latency-monitor-threshold " if needed. +latency-monitor-threshold 0 + +################################ LATENCY TRACKING ############################## + +# The Redis extended latency monitoring tracks the per command latencies and enables +# exporting the percentile distribution via the INFO latencystats command, +# and cumulative latency distributions (histograms) via the LATENCY command. +# +# By default, the extended latency monitoring is enabled since the overhead +# of keeping track of the command latency is very small. +# latency-tracking yes + +# By default the exported latency percentiles via the INFO latencystats command +# are the p50, p99, and p999. +# latency-tracking-info-percentiles 50 99 99.9 + +############################# EVENT NOTIFICATION ############################## + +# Redis can notify Pub/Sub clients about events happening in the key space. +# This feature is documented at https://redis.io/topics/notifications +# +# For instance if keyspace events notification is enabled, and a client +# performs a DEL operation on key "foo" stored in the Database 0, two +# messages will be published via Pub/Sub: +# +# PUBLISH __keyspace@0__:foo del +# PUBLISH __keyevent@0__:del foo +# +# It is possible to select the events that Redis will notify among a set +# of classes. Every class is identified by a single character: +# +# K Keyspace events, published with __keyspace@__ prefix. +# E Keyevent events, published with __keyevent@__ prefix. +# g Generic commands (non-type specific) like DEL, EXPIRE, RENAME, ... +# $ String commands +# l List commands +# s Set commands +# h Hash commands +# z Sorted set commands +# x Expired events (events generated every time a key expires) +# e Evicted events (events generated when a key is evicted for maxmemory) +# n New key events (Note: not included in the 'A' class) +# t Stream commands +# d Module key type events +# m Key-miss events (Note: It is not included in the 'A' class) +# A Alias for g$lshzxetd, so that the "AKE" string means all the events +# (Except key-miss events which are excluded from 'A' due to their +# unique nature). +# +# The "notify-keyspace-events" takes as argument a string that is composed +# of zero or multiple characters. The empty string means that notifications +# are disabled. +# +# Example: to enable list and generic events, from the point of view of the +# event name, use: +# +# notify-keyspace-events Elg +# +# Example 2: to get the stream of the expired keys subscribing to channel +# name __keyevent@0__:expired use: +# +# notify-keyspace-events Ex +# +# By default all notifications are disabled because most users don't need +# this feature and the feature has some overhead. Note that if you don't +# specify at least one of K or E, no events will be delivered. +notify-keyspace-events "" + +############################### ADVANCED CONFIG ############################### + +# Hashes are encoded using a memory efficient data structure when they have a +# small number of entries, and the biggest entry does not exceed a given +# threshold. These thresholds can be configured using the following directives. +hash-max-listpack-entries 512 +hash-max-listpack-value 64 + +# Lists are also encoded in a special way to save a lot of space. +# The number of entries allowed per internal list node can be specified +# as a fixed maximum size or a maximum number of elements. +# For a fixed maximum size, use -5 through -1, meaning: +# -5: max size: 64 Kb <-- not recommended for normal workloads +# -4: max size: 32 Kb <-- not recommended +# -3: max size: 16 Kb <-- probably not recommended +# -2: max size: 8 Kb <-- good +# -1: max size: 4 Kb <-- good +# Positive numbers mean store up to _exactly_ that number of elements +# per list node. +# The highest performing option is usually -2 (8 Kb size) or -1 (4 Kb size), +# but if your use case is unique, adjust the settings as necessary. +list-max-listpack-size -2 + +# Lists may also be compressed. +# Compress depth is the number of quicklist ziplist nodes from *each* side of +# the list to *exclude* from compression. The head and tail of the list +# are always uncompressed for fast push/pop operations. Settings are: +# 0: disable all list compression +# 1: depth 1 means "don't start compressing until after 1 node into the list, +# going from either the head or tail" +# So: [head]->node->node->...->node->[tail] +# [head], [tail] will always be uncompressed; inner nodes will compress. +# 2: [head]->[next]->node->node->...->node->[prev]->[tail] +# 2 here means: don't compress head or head->next or tail->prev or tail, +# but compress all nodes between them. +# 3: [head]->[next]->[next]->node->node->...->node->[prev]->[prev]->[tail] +# etc. +list-compress-depth 0 + +# Sets have a special encoding in just one case: when a set is composed +# of just strings that happen to be integers in radix 10 in the range +# of 64 bit signed integers. +# The following configuration setting sets the limit in the size of the +# set in order to use this special memory saving encoding. +set-max-intset-entries 512 + +# Similarly to hashes and lists, sorted sets are also specially encoded in +# order to save a lot of space. This encoding is only used when the length and +# elements of a sorted set are below the following limits: +zset-max-listpack-entries 128 +zset-max-listpack-value 64 + +# HyperLogLog sparse representation bytes limit. The limit includes the +# 16 bytes header. When an HyperLogLog using the sparse representation crosses +# this limit, it is converted into the dense representation. +# +# A value greater than 16000 is totally useless, since at that point the +# dense representation is more memory efficient. +# +# The suggested value is ~ 3000 in order to have the benefits of +# the space efficient encoding without slowing down too much PFADD, +# which is O(N) with the sparse encoding. The value can be raised to +# ~ 10000 when CPU is not a concern, but space is, and the data set is +# composed of many HyperLogLogs with cardinality in the 0 - 15000 range. +hll-sparse-max-bytes 3000 + +# Streams macro node max size / items. The stream data structure is a radix +# tree of big nodes that encode multiple items inside. Using this configuration +# it is possible to configure how big a single node can be in bytes, and the +# maximum number of items it may contain before switching to a new node when +# appending new stream entries. If any of the following settings are set to +# zero, the limit is ignored, so for instance it is possible to set just a +# max entries limit by setting max-bytes to 0 and max-entries to the desired +# value. +stream-node-max-bytes 4096 +stream-node-max-entries 100 + +# Active rehashing uses 1 millisecond every 100 milliseconds of CPU time in +# order to help rehashing the main Redis hash table (the one mapping top-level +# keys to values). The hash table implementation Redis uses (see dict.c) +# performs a lazy rehashing: the more operation you run into a hash table +# that is rehashing, the more rehashing "steps" are performed, so if the +# server is idle the rehashing is never complete and some more memory is used +# by the hash table. +# +# The default is to use this millisecond 10 times every second in order to +# actively rehash the main dictionaries, freeing memory when possible. +# +# If unsure: +# use "activerehashing no" if you have hard latency requirements and it is +# not a good thing in your environment that Redis can reply from time to time +# to queries with 2 milliseconds delay. +# +# use "activerehashing yes" if you don't have such hard requirements but +# want to free memory asap when possible. +activerehashing yes + +# The client output buffer limits can be used to force disconnection of clients +# that are not reading data from the server fast enough for some reason (a +# common reason is that a Pub/Sub client can't consume messages as fast as the +# publisher can produce them). +# +# The limit can be set differently for the three different classes of clients: +# +# normal -> normal clients including MONITOR clients +# replica -> replica clients +# pubsub -> clients subscribed to at least one pubsub channel or pattern +# +# The syntax of every client-output-buffer-limit directive is the following: +# +# client-output-buffer-limit +# +# A client is immediately disconnected once the hard limit is reached, or if +# the soft limit is reached and remains reached for the specified number of +# seconds (continuously). +# So for instance if the hard limit is 32 megabytes and the soft limit is +# 16 megabytes / 10 seconds, the client will get disconnected immediately +# if the size of the output buffers reach 32 megabytes, but will also get +# disconnected if the client reaches 16 megabytes and continuously overcomes +# the limit for 10 seconds. +# +# By default normal clients are not limited because they don't receive data +# without asking (in a push way), but just after a request, so only +# asynchronous clients may create a scenario where data is requested faster +# than it can read. +# +# Instead there is a default limit for pubsub and replica clients, since +# subscribers and replicas receive data in a push fashion. +# +# Note that it doesn't make sense to set the replica clients output buffer +# limit lower than the repl-backlog-size config (partial sync will succeed +# and then replica will get disconnected). +# Such a configuration is ignored (the size of repl-backlog-size will be used). +# This doesn't have memory consumption implications since the replica client +# will share the backlog buffers memory. +# +# Both the hard or the soft limit can be disabled by setting them to zero. +client-output-buffer-limit normal 0 0 0 +client-output-buffer-limit replica 256mb 64mb 60 +client-output-buffer-limit pubsub 32mb 8mb 60 + +# Client query buffers accumulate new commands. They are limited to a fixed +# amount by default in order to avoid that a protocol desynchronization (for +# instance due to a bug in the client) will lead to unbound memory usage in +# the query buffer. However you can configure it here if you have very special +# needs, such us huge multi/exec requests or alike. +# +# client-query-buffer-limit 1gb + +# In some scenarios client connections can hog up memory leading to OOM +# errors or data eviction. To avoid this we can cap the accumulated memory +# used by all client connections (all pubsub and normal clients). Once we +# reach that limit connections will be dropped by the server freeing up +# memory. The server will attempt to drop the connections using the most +# memory first. We call this mechanism "client eviction". +# +# Client eviction is configured using the maxmemory-clients setting as follows: +# 0 - client eviction is disabled (default) +# +# A memory value can be used for the client eviction threshold, +# for example: +# maxmemory-clients 1g +# +# A percentage value (between 1% and 100%) means the client eviction threshold +# is based on a percentage of the maxmemory setting. For example to set client +# eviction at 5% of maxmemory: +# maxmemory-clients 5% + +# In the Redis protocol, bulk requests, that are, elements representing single +# strings, are normally limited to 512 mb. However you can change this limit +# here, but must be 1mb or greater +# +# proto-max-bulk-len 512mb + +# Redis calls an internal function to perform many background tasks, like +# closing connections of clients in timeout, purging expired keys that are +# never requested, and so forth. +# +# Not all tasks are performed with the same frequency, but Redis checks for +# tasks to perform according to the specified "hz" value. +# +# By default "hz" is set to 10. Raising the value will use more CPU when +# Redis is idle, but at the same time will make Redis more responsive when +# there are many keys expiring at the same time, and timeouts may be +# handled with more precision. +# +# The range is between 1 and 500, however a value over 100 is usually not +# a good idea. Most users should use the default of 10 and raise this up to +# 100 only in environments where very low latency is required. +hz 10 + +# Normally it is useful to have an HZ value which is proportional to the +# number of clients connected. This is useful in order, for instance, to +# avoid too many clients are processed for each background task invocation +# in order to avoid latency spikes. +# +# Since the default HZ value by default is conservatively set to 10, Redis +# offers, and enables by default, the ability to use an adaptive HZ value +# which will temporarily raise when there are many connected clients. +# +# When dynamic HZ is enabled, the actual configured HZ will be used +# as a baseline, but multiples of the configured HZ value will be actually +# used as needed once more clients are connected. In this way an idle +# instance will use very little CPU time while a busy instance will be +# more responsive. +dynamic-hz yes + +# When a child rewrites the AOF file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +aof-rewrite-incremental-fsync yes + +# When redis saves RDB file, if the following option is enabled +# the file will be fsync-ed every 4 MB of data generated. This is useful +# in order to commit the file to the disk more incrementally and avoid +# big latency spikes. +rdb-save-incremental-fsync yes + +# Redis LFU eviction (see maxmemory setting) can be tuned. However it is a good +# idea to start with the default settings and only change them after investigating +# how to improve the performances and how the keys LFU change over time, which +# is possible to inspect via the OBJECT FREQ command. +# +# There are two tunable parameters in the Redis LFU implementation: the +# counter logarithm factor and the counter decay time. It is important to +# understand what the two parameters mean before changing them. +# +# The LFU counter is just 8 bits per key, it's maximum value is 255, so Redis +# uses a probabilistic increment with logarithmic behavior. Given the value +# of the old counter, when a key is accessed, the counter is incremented in +# this way: +# +# 1. A random number R between 0 and 1 is extracted. +# 2. A probability P is calculated as 1/(old_value*lfu_log_factor+1). +# 3. The counter is incremented only if R < P. +# +# The default lfu-log-factor is 10. This is a table of how the frequency +# counter changes with a different number of accesses with different +# logarithmic factors: +# +# +--------+------------+------------+------------+------------+------------+ +# | factor | 100 hits | 1000 hits | 100K hits | 1M hits | 10M hits | +# +--------+------------+------------+------------+------------+------------+ +# | 0 | 104 | 255 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 1 | 18 | 49 | 255 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 10 | 10 | 18 | 142 | 255 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# | 100 | 8 | 11 | 49 | 143 | 255 | +# +--------+------------+------------+------------+------------+------------+ +# +# NOTE: The above table was obtained by running the following commands: +# +# redis-benchmark -n 1000000 incr foo +# redis-cli object freq foo +# +# NOTE 2: The counter initial value is 5 in order to give new objects a chance +# to accumulate hits. +# +# The counter decay time is the time, in minutes, that must elapse in order +# for the key counter to be divided by two (or decremented if it has a value +# less <= 10). +# +# The default value for the lfu-decay-time is 1. A special value of 0 means to +# decay the counter every time it happens to be scanned. +# +# lfu-log-factor 10 +# lfu-decay-time 1 + +########################### ACTIVE DEFRAGMENTATION ####################### +# +# What is active defragmentation? +# ------------------------------- +# +# Active (online) defragmentation allows a Redis server to compact the +# spaces left between small allocations and deallocations of data in memory, +# thus allowing to reclaim back memory. +# +# Fragmentation is a natural process that happens with every allocator (but +# less so with Jemalloc, fortunately) and certain workloads. Normally a server +# restart is needed in order to lower the fragmentation, or at least to flush +# away all the data and create it again. However thanks to this feature +# implemented by Oran Agra for Redis 4.0 this process can happen at runtime +# in a "hot" way, while the server is running. +# +# Basically when the fragmentation is over a certain level (see the +# configuration options below) Redis will start to create new copies of the +# values in contiguous memory regions by exploiting certain specific Jemalloc +# features (in order to understand if an allocation is causing fragmentation +# and to allocate it in a better place), and at the same time, will release the +# old copies of the data. This process, repeated incrementally for all the keys +# will cause the fragmentation to drop back to normal values. +# +# Important things to understand: +# +# 1. This feature is disabled by default, and only works if you compiled Redis +# to use the copy of Jemalloc we ship with the source code of Redis. +# This is the default with Linux builds. +# +# 2. You never need to enable this feature if you don't have fragmentation +# issues. +# +# 3. Once you experience fragmentation, you can enable this feature when +# needed with the command "CONFIG SET activedefrag yes". +# +# The configuration parameters are able to fine tune the behavior of the +# defragmentation process. If you are not sure about what they mean it is +# a good idea to leave the defaults untouched. + +# Active defragmentation is disabled by default +# activedefrag no + +# Minimum amount of fragmentation waste to start active defrag +# active-defrag-ignore-bytes 100mb + +# Minimum percentage of fragmentation to start active defrag +# active-defrag-threshold-lower 10 + +# Maximum percentage of fragmentation at which we use maximum effort +# active-defrag-threshold-upper 100 + +# Minimal effort for defrag in CPU percentage, to be used when the lower +# threshold is reached +# active-defrag-cycle-min 1 + +# Maximal effort for defrag in CPU percentage, to be used when the upper +# threshold is reached +# active-defrag-cycle-max 25 + +# Maximum number of set/hash/zset/list fields that will be processed from +# the main dictionary scan +# active-defrag-max-scan-fields 1000 + +# Jemalloc background thread for purging will be enabled by default +jemalloc-bg-thread yes + +# It is possible to pin different threads and processes of Redis to specific +# CPUs in your system, in order to maximize the performances of the server. +# This is useful both in order to pin different Redis threads in different +# CPUs, but also in order to make sure that multiple Redis instances running +# in the same host will be pinned to different CPUs. +# +# Normally you can do this using the "taskset" command, however it is also +# possible to this via Redis configuration directly, both in Linux and FreeBSD. +# +# You can pin the server/IO threads, bio threads, aof rewrite child process, and +# the bgsave child process. The syntax to specify the cpu list is the same as +# the taskset command: +# +# Set redis server/io threads to cpu affinity 0,2,4,6: +# server_cpulist 0-7:2 +# +# Set bio threads to cpu affinity 1,3: +# bio_cpulist 1,3 +# +# Set aof rewrite child process to cpu affinity 8,9,10,11: +# aof_rewrite_cpulist 8-11 +# +# Set bgsave child process to cpu affinity 1,10,11 +# bgsave_cpulist 1,10-11 + +# In some cases redis will emit warnings and even refuse to start if it detects +# that the system is in bad state, it is possible to suppress these warnings +# by setting the following config which takes a space delimited list of warnings +# to suppress +# +# ignore-warnings ARM64-COW-BUG diff --git a/nixng/containers/matrix/synapse/redis.nix b/nixng/containers/matrix/synapse/redis.nix new file mode 100644 index 0000000..ded8ca5 --- /dev/null +++ b/nixng/containers/matrix/synapse/redis.nix @@ -0,0 +1,46 @@ +{ makeSystem +, nixpkgs +}: +makeSystem { + system = "x86_64-linux"; + name = "redis"; + inherit nixpkgs; + config = + { pkgs, ... }: + { + dumb-init = { + enable = true; + type.services = { }; + }; + + users.users."redis" = { + home = "/var/empty"; + uid = 9001; + group = "redis"; + }; + + users.groups."redis" = { + gid = 9001; + }; + + init.services.redis = { + enabled = true; + shutdownOnExit = true; + script = pkgs.writeShellScript "redis-run" '' + cd /var/lib/redis + chpst -U redis:redis ${pkgs.redis}/bin/redis-server ${./redis.conf} + ''; + }; + + init.services.redis-setup = { + enabled = true; + script = pkgs.writeShellScript "redis-run" '' + export PATH="${pkgs.redis}/bin:$PATH" + nc -z 127.0.0.1 6379 -w 10 -v || exit 1 + + redis-cli acl setuser default on '>'"$(cat /secrets/redis_password)" allcommands allkeys + sleep 86400 + ''; + }; + }; +} diff --git a/nixng/containers/matrix/synapse/synapse.nix b/nixng/containers/matrix/synapse/synapse.nix new file mode 100644 index 0000000..8612c61 --- /dev/null +++ b/nixng/containers/matrix/synapse/synapse.nix @@ -0,0 +1,72 @@ +{ makeSystem +, nixpkgs + +, commonConfig +}: +makeSystem { + system = "x86_64-linux"; + name = "synapse"; + inherit nixpkgs; + config = + { pkgs, lib, ... }: + let + inherit (lib) + singleton; + in + { + dumb-init = { + enable = true; + type.services = { }; + }; + + init.services.synapse = { + enabled = true; + shutdownOnExit = true; + script = + let + synapseConfig = (pkgs.formats.yaml {}).generate "synapse.yaml" + { + listeners = + [ + # The HTTP replication port + { + port = 9093; + bind_addresses = [ "0.0.0.0" ]; + type = "http"; + resources = [ + { + names = [ "replication" ]; + } + ]; + } + { + port = 6167; + tls = false; + type = "http"; + x_forwarded = true; + bind_adrresses = [ "0.0.0.0" ]; + resources = singleton { + names = [ "client" "federation" ]; + compress = false; + }; + } + ]; + + public_baseurl = "https://matrix.redalder.org/"; + + # Add a random shared secret to authenticate traffic. + worker_replication_secret = ""; + }; + in + pkgs.writeShellScript "synapse" '' + ${pkgs.matrix-synapse}/bin/synapse_homeserver \ + --config-path ${synapseConfig} \ + --config-path ${commonConfig pkgs} \ + --config-path /secrets/extra.yaml \ + --config-path /var/lib/registrations/extra.yaml \ + --keys-directory /var/lib/synapse/keys \ + $([ -e /var/lib/synapse/signing.key ] || echo --generate-keys) + ''; + }; + }; +}