mirror of
https://github.com/spantaleev/matrix-docker-ansible-deploy.git
synced 2024-11-08 19:57:35 +01:00
410a915a8a
This paves the way for installing other roles into `roles/galaxy` using `ansible-galaxy`, similar to how it's done in: - https://github.com/spantaleev/gitea-docker-ansible-deploy - https://github.com/spantaleev/nextcloud-docker-ansible-deploy In the near future, we'll be removing a lot of the shared role code from here and using upstream roles for it. Some of the core `matrix-*` roles have already been extracted out into other reusable roles: - https://github.com/devture/com.devture.ansible.role.postgres - https://github.com/devture/com.devture.ansible.role.systemd_docker_base - https://github.com/devture/com.devture.ansible.role.timesync - https://github.com/devture/com.devture.ansible.role.vars_preserver - https://github.com/devture/com.devture.ansible.role.playbook_runtime_messages - https://github.com/devture/com.devture.ansible.role.playbook_help We just need to migrate to those.
457 lines
18 KiB
YAML
457 lines
18 KiB
YAML
---
|
|
|
|
matrix_synapse_workers_generic_worker_endpoints:
|
|
# This worker can handle API requests matching the following regular expressions.
|
|
# These endpoints can be routed to any worker. If a worker is set up to handle a
|
|
# stream then, for maximum efficiency, additional endpoints should be routed to that
|
|
# worker: refer to the [stream writers](#stream-writers) section below for further
|
|
# information.
|
|
|
|
# Sync requests
|
|
- ^/_matrix/client/(r0|v3)/sync$
|
|
- ^/_matrix/client/(api/v1|r0|v3)/events$
|
|
- ^/_matrix/client/(api/v1|r0|v3)/initialSync$
|
|
- ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
|
|
|
|
# Federation requests
|
|
- ^/_matrix/federation/v1/event/
|
|
- ^/_matrix/federation/v1/state/
|
|
- ^/_matrix/federation/v1/state_ids/
|
|
- ^/_matrix/federation/v1/backfill/
|
|
- ^/_matrix/federation/v1/get_missing_events/
|
|
- ^/_matrix/federation/v1/publicRooms
|
|
- ^/_matrix/federation/v1/query/
|
|
- ^/_matrix/federation/v1/make_join/
|
|
- ^/_matrix/federation/v1/make_leave/
|
|
- ^/_matrix/federation/(v1|v2)/send_join/
|
|
- ^/_matrix/federation/(v1|v2)/send_leave/
|
|
- ^/_matrix/federation/(v1|v2)/invite/
|
|
- ^/_matrix/federation/v1/event_auth/
|
|
- ^/_matrix/federation/v1/exchange_third_party_invite/
|
|
- ^/_matrix/federation/v1/user/devices/
|
|
- ^/_matrix/key/v2/query
|
|
- ^/_matrix/federation/v1/hierarchy/
|
|
|
|
# Inbound federation transaction request
|
|
- ^/_matrix/federation/v1/send/
|
|
|
|
# Client API requests
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
|
|
- ^/_matrix/client/v1/rooms/.*/hierarchy$
|
|
- ^/_matrix/client/(v1|unstable)/rooms/.*/relations/
|
|
- ^/_matrix/client/v1/rooms/.*/threads$
|
|
- ^/_matrix/client/unstable/org.matrix.msc2716/rooms/.*/batch_send$
|
|
- ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
|
- ^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
|
- ^/_matrix/client/(r0|v3|unstable)/account/whoami$
|
|
- ^/_matrix/client/(r0|v3|unstable)/devices$
|
|
- ^/_matrix/client/versions$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
|
|
|
# Encryption requests
|
|
# Note that ^/_matrix/client/(r0|v3|unstable)/keys/upload/ requires `worker_main_http_uri`
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/query$
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
|
- ^/_matrix/client/(r0|v3|unstable)/room_keys/
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/upload/
|
|
|
|
# Registration/login requests
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
|
- ^/_matrix/client/(r0|v3|unstable)/register$
|
|
- ^/_matrix/client/v1/register/m.login.registration_token/validity$
|
|
|
|
# Event sending requests
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/join/
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
|
|
|
|
# These appear to be conditional and should not be enabled by default.
|
|
# We need to fix up our workers-doc-to-yaml.awk parsing script to exclude them.
|
|
# For now, they've been commented out manually.
|
|
# # Account data requests
|
|
# - ^/_matrix/client/(r0|v3|unstable)/.*/tags
|
|
# - ^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
|
#
|
|
# # Receipts requests
|
|
# - ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
|
|
# - ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
|
|
#
|
|
# # Presence requests
|
|
# - ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
|
|
|
# User directory search requests
|
|
# Any worker can handle these, but we have a dedicated user_dir worker for this,
|
|
# so we'd like for other generic workers to not try and capture these requests.
|
|
# - ^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
|
|
|
# Additionally, the following REST endpoints can be handled for GET requests:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
|
|
|
# Pagination requests can also be handled, but all requests for a given
|
|
# room must be routed to the same instance. Additionally, care must be taken to
|
|
# ensure that the purge history admin API is not used while pagination requests
|
|
# for the room are in flight:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$
|
|
|
|
# Additionally, the following endpoints should be included if Synapse is configured
|
|
# to use SSO (you only need to include the ones for whichever SSO provider you're
|
|
# using):
|
|
|
|
# for all SSO providers
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/login/sso/redirect
|
|
# ^/_synapse/client/pick_idp$
|
|
# ^/_synapse/client/pick_username
|
|
# ^/_synapse/client/new_user_consent$
|
|
# ^/_synapse/client/sso_register$
|
|
|
|
# OpenID Connect requests.
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_synapse/client/oidc/callback$
|
|
|
|
# SAML requests.
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_synapse/client/saml2/authn_response$
|
|
|
|
# CAS requests.
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/login/cas/ticket$
|
|
|
|
# Ensure that all SSO logins go to a single process.
|
|
# For multiple workers not handling the SSO endpoints properly, see
|
|
# [#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
|
# [#9427](https://github.com/matrix-org/synapse/issues/9427).
|
|
|
|
# Note that a [HTTP listener](usage/configuration/config_documentation.md#listeners)
|
|
# with `client` and `federation` `resources` must be configured in the `worker_listeners`
|
|
# option in the worker config.
|
|
|
|
# #### Load balancing
|
|
|
|
# It is possible to run multiple instances of this worker app, with incoming requests
|
|
# being load-balanced between them by the reverse-proxy. However, different endpoints
|
|
# have different characteristics and so admins
|
|
# may wish to run multiple groups of workers handling different endpoints so that
|
|
# load balancing can be done in different ways.
|
|
|
|
# For `/sync` and `/initialSync` requests it will be more efficient if all
|
|
# requests from a particular user are routed to a single instance. Extracting a
|
|
# user ID from the access token or `Authorization` header is currently left as an
|
|
# exercise for the reader. Admins may additionally wish to separate out `/sync`
|
|
# requests that have a `since` query parameter from those that don't (and
|
|
# `/initialSync`), as requests that don't are known as "initial sync" that happens
|
|
# when a user logs in on a new device and can be *very* resource intensive, so
|
|
# isolating these requests will stop them from interfering with other users ongoing
|
|
# syncs.
|
|
|
|
# Federation and client requests can be balanced via simple round robin.
|
|
|
|
# The inbound federation transaction request `^/_matrix/federation/v1/send/`
|
|
# should be balanced by source IP so that transactions from the same remote server
|
|
# go to the same process.
|
|
|
|
# Registration/login requests can be handled separately purely to help ensure that
|
|
# unexpected load doesn't affect new logins and sign ups.
|
|
|
|
# Finally, event sending requests can be balanced by the room ID in the URI (or
|
|
# the full URI, or even just round robin), the room ID is the path component after
|
|
# `/rooms/`. If there is a large bridge connected that is sending or may send lots
|
|
# of events, then a dedicated set of workers can be provisioned to limit the
|
|
# effects of bursts of events from that bridge on events sent by normal users.
|
|
|
|
# #### Stream writers
|
|
|
|
# Additionally, the writing of specific streams (such as events) can be moved off
|
|
# of the main process to a particular worker.
|
|
|
|
# To enable this, the worker must have a
|
|
# [HTTP `replication` listener](usage/configuration/config_documentation.md#listeners) configured,
|
|
# have a `worker_name` and be listed in the `instance_map` config. The same worker
|
|
# can handle multiple streams, but unless otherwise documented, each stream can only
|
|
# have a single writer.
|
|
|
|
# For example, to move event persistence off to a dedicated worker, the shared
|
|
# configuration would include:
|
|
|
|
# ```yaml
|
|
# instance_map:
|
|
# event_persister1:
|
|
# host: localhost
|
|
# port: 8034
|
|
|
|
# stream_writers:
|
|
# events: event_persister1
|
|
# ```
|
|
|
|
# An example for a stream writer instance:
|
|
|
|
# ```yaml
|
|
# {{#include systemd-with-workers/workers/event_persister.yaml}}
|
|
# ```
|
|
|
|
# Some of the streams have associated endpoints which, for maximum efficiency, should
|
|
# be routed to the workers handling that stream. See below for the currently supported
|
|
# streams and the endpoints associated with them:
|
|
|
|
# ##### The `events` stream
|
|
|
|
# The `events` stream experimentally supports having multiple writers, where work
|
|
# is sharded between them by room ID. Note that you *must* restart all worker
|
|
# instances when adding or removing event persisters. An example `stream_writers`
|
|
# configuration with multiple writers:
|
|
|
|
# ```yaml
|
|
# stream_writers:
|
|
# events:
|
|
# - event_persister1
|
|
# - event_persister2
|
|
# ```
|
|
|
|
# ##### The `typing` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `typing` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing
|
|
|
|
# ##### The `to_device` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `to_device` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(r0|v3|unstable)/sendToDevice/
|
|
|
|
# ##### The `account_data` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `account_data` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(r0|v3|unstable)/.*/tags
|
|
# ^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
|
|
|
# ##### The `receipts` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `receipts` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
|
|
# ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
|
|
|
|
# ##### The `presence` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `presence` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
|
|
|
# #### Background tasks
|
|
|
|
# There is also support for moving background tasks to a separate
|
|
# worker. Background tasks are run periodically or started via replication. Exactly
|
|
# which tasks are configured to run depends on your Synapse configuration (e.g. if
|
|
# stats is enabled). This worker doesn't handle any REST endpoints itself.
|
|
|
|
# To enable this, the worker must have a `worker_name` and can be configured to run
|
|
# background tasks. For example, to move background tasks to a dedicated worker,
|
|
# the shared configuration would include:
|
|
|
|
# ```yaml
|
|
# run_background_tasks_on: background_worker
|
|
# ```
|
|
|
|
# You might also wish to investigate the `update_user_directory_from_worker` and
|
|
# `media_instance_running_background_jobs` settings.
|
|
|
|
# An example for a dedicated background worker instance:
|
|
|
|
# ```yaml
|
|
# {{#include systemd-with-workers/workers/background_worker.yaml}}
|
|
# ```
|
|
|
|
# #### Updating the User Directory
|
|
|
|
# You can designate one generic worker to update the user directory.
|
|
|
|
# Specify its name in the shared configuration as follows:
|
|
|
|
# ```yaml
|
|
# update_user_directory_from_worker: worker_name
|
|
# ```
|
|
|
|
# This work cannot be load-balanced; please ensure the main process is restarted
|
|
# after setting this option in the shared configuration!
|
|
|
|
# User directory updates allow REST endpoints matching the following regular
|
|
# expressions to work:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
|
|
|
# The above endpoints can be routed to any worker, though you may choose to route
|
|
# it to the chosen user directory worker.
|
|
|
|
# This style of configuration supersedes the legacy `synapse.app.user_dir`
|
|
# worker application type.
|
|
|
|
|
|
# #### Notifying Application Services
|
|
|
|
# You can designate one generic worker to send output traffic to Application Services.
|
|
# Doesn't handle any REST endpoints itself, but you should specify its name in the
|
|
# shared configuration as follows:
|
|
|
|
# ```yaml
|
|
# notify_appservices_from_worker: worker_name
|
|
# ```
|
|
|
|
# This work cannot be load-balanced; please ensure the main process is restarted
|
|
# after setting this option in the shared configuration!
|
|
|
|
# This style of configuration supersedes the legacy `synapse.app.appservice`
|
|
# worker application type.
|
|
|
|
|
|
# pusher worker (no API endpoints) [
|
|
# Handles sending push notifications to sygnal and email. Doesn't handle any
|
|
# REST endpoints itself, but you should set `start_pushers: False` in the
|
|
# shared configuration file to stop the main synapse sending push notifications.
|
|
|
|
# To run multiple instances at once the `pusher_instances` option should list all
|
|
# pusher instances by their worker name, e.g.:
|
|
|
|
# ```yaml
|
|
# pusher_instances:
|
|
# - pusher_worker1
|
|
# - pusher_worker2
|
|
# ```
|
|
|
|
# An example for a pusher instance:
|
|
|
|
# ```yaml
|
|
# {{#include systemd-with-workers/workers/pusher_worker.yaml}}
|
|
# ```
|
|
|
|
# ]
|
|
|
|
# appservice worker (no API endpoints) [
|
|
# **Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
|
# `notify_appservices_from_worker` option instead.](#notifying-application-services)
|
|
|
|
# Handles sending output traffic to Application Services. Doesn't handle any
|
|
# REST endpoints itself, but you should set `notify_appservices: False` in the
|
|
# shared configuration file to stop the main synapse sending appservice notifications.
|
|
|
|
# Note this worker cannot be load-balanced: only one instance should be active.
|
|
|
|
# ]
|
|
|
|
# federation_sender worker (no API endpoints) [
|
|
# Handles sending federation traffic to other servers. Doesn't handle any
|
|
# REST endpoints itself, but you should set `send_federation: False` in the
|
|
# shared configuration file to stop the main synapse sending this traffic.
|
|
|
|
# If running multiple federation senders then you must list each
|
|
# instance in the `federation_sender_instances` option by their `worker_name`.
|
|
# All instances must be stopped and started when adding or removing instances.
|
|
# For example:
|
|
|
|
# ```yaml
|
|
# federation_sender_instances:
|
|
# - federation_sender1
|
|
# - federation_sender2
|
|
# ```
|
|
|
|
# An example for a federation sender instance:
|
|
|
|
# ```yaml
|
|
# {{#include systemd-with-workers/workers/federation_sender.yaml}}
|
|
# ```
|
|
# ]
|
|
|
|
matrix_synapse_workers_media_repository_endpoints:
|
|
# Handles the media repository. It can handle all endpoints starting with:
|
|
|
|
- ^/_matrix/media/
|
|
|
|
# ... and the following regular expressions matching media-specific administration APIs:
|
|
|
|
- ^/_synapse/admin/v1/purge_media_cache$
|
|
- ^/_synapse/admin/v1/room/.*/media.*$
|
|
- ^/_synapse/admin/v1/user/.*/media.*$
|
|
- ^/_synapse/admin/v1/media/.*$
|
|
- ^/_synapse/admin/v1/quarantine_media/.*$
|
|
- ^/_synapse/admin/v1/users/.*/media$
|
|
|
|
# You should also set `enable_media_repo: False` in the shared configuration
|
|
# file to stop the main synapse running background jobs related to managing the
|
|
# media repository. Note that doing so will prevent the main process from being
|
|
# able to handle the above endpoints.
|
|
|
|
# In the `media_repository` worker configuration file, configure the
|
|
# [HTTP listener](usage/configuration/config_documentation.md#listeners) to
|
|
# expose the `media` resource. For example:
|
|
|
|
# ```yaml
|
|
# {{#include systemd-with-workers/workers/media_worker.yaml}}
|
|
# ```
|
|
|
|
# Note that if running multiple media repositories they must be on the same server
|
|
# and you must configure a single instance to run the background tasks, e.g.:
|
|
|
|
# ```yaml
|
|
# media_instance_running_background_jobs: "media-repository-1"
|
|
# ```
|
|
|
|
# Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for both inbound client and federation requests (if they are handled separately).
|
|
|
|
matrix_synapse_workers_user_dir_endpoints:
|
|
# **Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
|
# `update_user_directory_from_worker` option instead.](#updating-the-user-directory)
|
|
|
|
# Handles searches in the user directory. It can handle REST endpoints matching
|
|
# the following regular expressions:
|
|
|
|
- ^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
|
|
|
# When using this worker you must also set `update_user_directory: false` in the
|
|
# shared configuration file to stop the main synapse running background
|
|
# jobs related to updating the user directory.
|
|
|
|
# Above endpoint is not *required* to be routed to this worker. By default,
|
|
# `update_user_directory` is set to `true`, which means the main process
|
|
# will handle updates. All workers configured with `client` can handle the above
|
|
# endpoint as long as either this worker or the main process are configured to
|
|
# handle it, and are online.
|
|
|
|
# If `update_user_directory` is set to `false`, and this worker is not running,
|
|
# the above endpoint may give outdated results.
|
|
|
|
matrix_synapse_workers_avail_list:
|
|
- appservice
|
|
- federation_sender
|
|
- generic_worker
|
|
- media_repository
|
|
- pusher
|
|
- user_dir
|