mirror of
https://github.com/spantaleev/matrix-docker-ansible-deploy.git
synced 2024-12-25 10:28:29 +01:00
78204619ea
Source: https://github.com/matrix-org/synapse/blob/v1.59.0/docs/upgrade.md#deprecation-of-the-synapseappappservice-and-synapseappuser_dir-worker-application-types As an alternative, we should probably find a way to run one or a few more generic workers (which will handle appservice and user_dir stuff) and update `homeserver.yaml` so that it would point to the name of these workers using `notify_appservices_from_worker` and `update_user_directory_from_worker` options. For now, this solves the deprecation, so we can have a peace of mind going forward. We're force-setting these worker counts to 0, so that we can clean up existing homeservers which use these worker types. In the future, these options will either be removed or repurposed (so that they transparently create more generic workers that handle user_dir/appservice loads).
465 lines
18 KiB
YAML
465 lines
18 KiB
YAML
---
|
|
|
|
matrix_synapse_workers_generic_worker_endpoints:
|
|
# This worker can handle API requests matching the following regular expressions.
|
|
# These endpoints can be routed to any worker. If a worker is set up to handle a
|
|
# stream then, for maximum efficiency, additional endpoints should be routed to that
|
|
# worker: refer to the [stream writers](#stream-writers) section below for further
|
|
# information.
|
|
|
|
# Sync requests
|
|
- ^/_matrix/client/(r0|v3)/sync$
|
|
- ^/_matrix/client/(api/v1|r0|v3)/events$
|
|
- ^/_matrix/client/(api/v1|r0|v3)/initialSync$
|
|
- ^/_matrix/client/(api/v1|r0|v3)/rooms/[^/]+/initialSync$
|
|
|
|
# Federation requests
|
|
- ^/_matrix/federation/v1/event/
|
|
- ^/_matrix/federation/v1/state/
|
|
- ^/_matrix/federation/v1/state_ids/
|
|
- ^/_matrix/federation/v1/backfill/
|
|
- ^/_matrix/federation/v1/get_missing_events/
|
|
- ^/_matrix/federation/v1/publicRooms
|
|
- ^/_matrix/federation/v1/query/
|
|
- ^/_matrix/federation/v1/make_join/
|
|
- ^/_matrix/federation/v1/make_leave/
|
|
- ^/_matrix/federation/(v1|v2)/send_join/
|
|
- ^/_matrix/federation/(v1|v2)/send_leave/
|
|
- ^/_matrix/federation/(v1|v2)/invite/
|
|
- ^/_matrix/federation/v1/event_auth/
|
|
- ^/_matrix/federation/v1/exchange_third_party_invite/
|
|
- ^/_matrix/federation/v1/user/devices/
|
|
- ^/_matrix/federation/v1/get_groups_publicised$
|
|
- ^/_matrix/key/v2/query
|
|
- ^/_matrix/federation/(v1|unstable/org.matrix.msc2946)/hierarchy/
|
|
|
|
# Inbound federation transaction request
|
|
- ^/_matrix/federation/v1/send/
|
|
|
|
# Client API requests
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/createRoom$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/publicRooms$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/joined_members$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/context/.*$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/members$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state$
|
|
- ^/_matrix/client/(v1|unstable/org.matrix.msc2946)/rooms/.*/hierarchy$
|
|
- ^/_matrix/client/unstable/im.nheko.summary/rooms/.*/summary$
|
|
- ^/_matrix/client/(r0|v3|unstable)/account/3pid$
|
|
- ^/_matrix/client/(r0|v3|unstable)/devices$
|
|
- ^/_matrix/client/versions$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/voip/turnServer$
|
|
- ^/_matrix/client/(r0|v3|unstable)/joined_groups$
|
|
- ^/_matrix/client/(r0|v3|unstable)/publicised_groups$
|
|
- ^/_matrix/client/(r0|v3|unstable)/publicised_groups/
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/event/
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/joined_rooms$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/search$
|
|
|
|
# Encryption requests
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/query$
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/changes$
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/claim$
|
|
- ^/_matrix/client/(r0|v3|unstable)/room_keys/
|
|
|
|
# Registration/login requests
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/login$
|
|
- ^/_matrix/client/(r0|v3|unstable)/register$
|
|
- ^/_matrix/client/v1/register/m.login.registration_token/validity$
|
|
|
|
# Event sending requests
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/redact
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/send
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/state/
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/(join|invite|leave|ban|unban|kick)$
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/join/
|
|
- ^/_matrix/client/(api/v1|r0|v3|unstable)/profile/
|
|
|
|
# These appear to be conditional and should not be enabled by default.
|
|
# We need to fix up our workers-doc-to-yaml.awk parsing script to exclude them.
|
|
# For now, they've been commented out manually.
|
|
#
|
|
# # Device requests
|
|
# - ^/_matrix/client/(r0|v3|unstable)/sendToDevice/
|
|
|
|
# # Account data requests
|
|
# - ^/_matrix/client/(r0|v3|unstable)/.*/tags
|
|
# - ^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
|
|
|
# # Receipts requests
|
|
# - ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
|
|
# - ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
|
|
|
|
# # Presence requests
|
|
# - ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
|
|
|
|
|
# Additionally, the following REST endpoints can be handled for GET requests:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/federation/v1/groups/
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/pushrules/
|
|
# ^/_matrix/client/(r0|v3|unstable)/groups/
|
|
|
|
# Pagination requests can also be handled, but all requests for a given
|
|
# room must be routed to the same instance. Additionally, care must be taken to
|
|
# ensure that the purge history admin API is not used while pagination requests
|
|
# for the room are in flight:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/messages$
|
|
|
|
# Additionally, the following endpoints should be included if Synapse is configured
|
|
# to use SSO (you only need to include the ones for whichever SSO provider you're
|
|
# using):
|
|
|
|
# for all SSO providers
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/login/sso/redirect
|
|
# ^/_synapse/client/pick_idp$
|
|
# ^/_synapse/client/pick_username
|
|
# ^/_synapse/client/new_user_consent$
|
|
# ^/_synapse/client/sso_register$
|
|
|
|
# OpenID Connect requests.
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_synapse/client/oidc/callback$
|
|
|
|
# SAML requests.
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_synapse/client/saml2/authn_response$
|
|
|
|
# CAS requests.
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/login/cas/ticket$
|
|
|
|
# Ensure that all SSO logins go to a single process.
|
|
# For multiple workers not handling the SSO endpoints properly, see
|
|
# [#7530](https://github.com/matrix-org/synapse/issues/7530) and
|
|
# [#9427](https://github.com/matrix-org/synapse/issues/9427).
|
|
|
|
# Note that a HTTP listener with `client` and `federation` resources must be
|
|
# configured in the `worker_listeners` option in the worker config.
|
|
|
|
# #### Load balancing
|
|
|
|
# It is possible to run multiple instances of this worker app, with incoming requests
|
|
# being load-balanced between them by the reverse-proxy. However, different endpoints
|
|
# have different characteristics and so admins
|
|
# may wish to run multiple groups of workers handling different endpoints so that
|
|
# load balancing can be done in different ways.
|
|
|
|
# For `/sync` and `/initialSync` requests it will be more efficient if all
|
|
# requests from a particular user are routed to a single instance. Extracting a
|
|
# user ID from the access token or `Authorization` header is currently left as an
|
|
# exercise for the reader. Admins may additionally wish to separate out `/sync`
|
|
# requests that have a `since` query parameter from those that don't (and
|
|
# `/initialSync`), as requests that don't are known as "initial sync" that happens
|
|
# when a user logs in on a new device and can be *very* resource intensive, so
|
|
# isolating these requests will stop them from interfering with other users ongoing
|
|
# syncs.
|
|
|
|
# Federation and client requests can be balanced via simple round robin.
|
|
|
|
# The inbound federation transaction request `^/_matrix/federation/v1/send/`
|
|
# should be balanced by source IP so that transactions from the same remote server
|
|
# go to the same process.
|
|
|
|
# Registration/login requests can be handled separately purely to help ensure that
|
|
# unexpected load doesn't affect new logins and sign ups.
|
|
|
|
# Finally, event sending requests can be balanced by the room ID in the URI (or
|
|
# the full URI, or even just round robin), the room ID is the path component after
|
|
# `/rooms/`. If there is a large bridge connected that is sending or may send lots
|
|
# of events, then a dedicated set of workers can be provisioned to limit the
|
|
# effects of bursts of events from that bridge on events sent by normal users.
|
|
|
|
# #### Stream writers
|
|
|
|
# Additionally, the writing of specific streams (such as events) can be moved off
|
|
# of the main process to a particular worker.
|
|
# (This is only supported with Redis-based replication.)
|
|
|
|
# To enable this, the worker must have a HTTP replication listener configured,
|
|
# have a `worker_name` and be listed in the `instance_map` config. The same worker
|
|
# can handle multiple streams, but unless otherwise documented, each stream can only
|
|
# have a single writer.
|
|
|
|
# For example, to move event persistence off to a dedicated worker, the shared
|
|
# configuration would include:
|
|
|
|
# ```yaml
|
|
# instance_map:
|
|
# event_persister1:
|
|
# host: localhost
|
|
# port: 8034
|
|
|
|
# stream_writers:
|
|
# events: event_persister1
|
|
# ```
|
|
|
|
# An example for a stream writer instance:
|
|
|
|
# ```yaml
|
|
# {{#include systemd-with-workers/workers/event_persister.yaml}}
|
|
# ```
|
|
|
|
# Some of the streams have associated endpoints which, for maximum efficiency, should
|
|
# be routed to the workers handling that stream. See below for the currently supported
|
|
# streams and the endpoints associated with them:
|
|
|
|
# ##### The `events` stream
|
|
|
|
# The `events` stream experimentally supports having multiple writers, where work
|
|
# is sharded between them by room ID. Note that you *must* restart all worker
|
|
# instances when adding or removing event persisters. An example `stream_writers`
|
|
# configuration with multiple writers:
|
|
|
|
# ```yaml
|
|
# stream_writers:
|
|
# events:
|
|
# - event_persister1
|
|
# - event_persister2
|
|
# ```
|
|
|
|
# ##### The `typing` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `typing` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/rooms/.*/typing
|
|
|
|
# ##### The `to_device` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `to_device` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(r0|v3|unstable)/sendToDevice/
|
|
|
|
# ##### The `account_data` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `account_data` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(r0|v3|unstable)/.*/tags
|
|
# ^/_matrix/client/(r0|v3|unstable)/.*/account_data
|
|
|
|
# ##### The `receipts` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `receipts` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(r0|v3|unstable)/rooms/.*/receipt
|
|
# ^/_matrix/client/(r0|v3|unstable)/rooms/.*/read_markers
|
|
|
|
# ##### The `presence` stream
|
|
|
|
# The following endpoints should be routed directly to the worker configured as
|
|
# the stream writer for the `presence` stream:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/
|
|
|
|
# #### Background tasks
|
|
|
|
# There is also support for moving background tasks to a separate
|
|
# worker. Background tasks are run periodically or started via replication. Exactly
|
|
# which tasks are configured to run depends on your Synapse configuration (e.g. if
|
|
# stats is enabled).
|
|
|
|
# To enable this, the worker must have a `worker_name` and can be configured to run
|
|
# background tasks. For example, to move background tasks to a dedicated worker,
|
|
# the shared configuration would include:
|
|
|
|
# ```yaml
|
|
# run_background_tasks_on: background_worker
|
|
# ```
|
|
|
|
# You might also wish to investigate the `update_user_directory_from_worker` and
|
|
# `media_instance_running_background_jobs` settings.
|
|
|
|
# An example for a dedicated background worker instance:
|
|
|
|
# ```yaml
|
|
# {{#include systemd-with-workers/workers/background_worker.yaml}}
|
|
# ```
|
|
|
|
# #### Updating the User Directory
|
|
|
|
# You can designate one generic worker to update the user directory.
|
|
|
|
# Specify its name in the shared configuration as follows:
|
|
|
|
# ```yaml
|
|
# update_user_directory_from_worker: worker_name
|
|
# ```
|
|
|
|
# This work cannot be load-balanced; please ensure the main process is restarted
|
|
# after setting this option in the shared configuration!
|
|
|
|
# This style of configuration supersedes the legacy `synapse.app.user_dir`
|
|
# worker application type.
|
|
|
|
|
|
# #### Notifying Application Services
|
|
|
|
# You can designate one generic worker to send output traffic to Application Services.
|
|
|
|
# Specify its name in the shared configuration as follows:
|
|
|
|
# ```yaml
|
|
# notify_appservices_from_worker: worker_name
|
|
# ```
|
|
|
|
# This work cannot be load-balanced; please ensure the main process is restarted
|
|
# after setting this option in the shared configuration!
|
|
|
|
# This style of configuration supersedes the legacy `synapse.app.appservice`
|
|
# worker application type.
|
|
|
|
|
|
# pusher worker (no API endpoints) [
|
|
# Handles sending push notifications to sygnal and email. Doesn't handle any
|
|
# REST endpoints itself, but you should set `start_pushers: False` in the
|
|
# shared configuration file to stop the main synapse sending push notifications.
|
|
|
|
# To run multiple instances at once the `pusher_instances` option should list all
|
|
# pusher instances by their worker name, e.g.:
|
|
|
|
# ```yaml
|
|
# pusher_instances:
|
|
# - pusher_worker1
|
|
# - pusher_worker2
|
|
# ```
|
|
|
|
# ]
|
|
|
|
# appservice worker (no API endpoints) [
|
|
# **Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
|
# `notify_appservices_from_worker` option instead.](#notifying-application-services)
|
|
|
|
# Handles sending output traffic to Application Services. Doesn't handle any
|
|
# REST endpoints itself, but you should set `notify_appservices: False` in the
|
|
# shared configuration file to stop the main synapse sending appservice notifications.
|
|
|
|
# Note this worker cannot be load-balanced: only one instance should be active.
|
|
|
|
# ]
|
|
|
|
# federation_sender worker (no API endpoints) [
|
|
# Handles sending federation traffic to other servers. Doesn't handle any
|
|
# REST endpoints itself, but you should set `send_federation: False` in the
|
|
# shared configuration file to stop the main synapse sending this traffic.
|
|
|
|
# If running multiple federation senders then you must list each
|
|
# instance in the `federation_sender_instances` option by their `worker_name`.
|
|
# All instances must be stopped and started when adding or removing instances.
|
|
# For example:
|
|
|
|
# ```yaml
|
|
# federation_sender_instances:
|
|
# - federation_sender1
|
|
# - federation_sender2
|
|
# ```
|
|
# ]
|
|
|
|
matrix_synapse_workers_media_repository_endpoints:
|
|
# Handles the media repository. It can handle all endpoints starting with:
|
|
|
|
- ^/_matrix/media/
|
|
|
|
# ... and the following regular expressions matching media-specific administration APIs:
|
|
|
|
- ^/_synapse/admin/v1/purge_media_cache$
|
|
- ^/_synapse/admin/v1/room/.*/media.*$
|
|
- ^/_synapse/admin/v1/user/.*/media.*$
|
|
- ^/_synapse/admin/v1/media/.*$
|
|
- ^/_synapse/admin/v1/quarantine_media/.*$
|
|
- ^/_synapse/admin/v1/users/.*/media$
|
|
|
|
# You should also set `enable_media_repo: False` in the shared configuration
|
|
# file to stop the main synapse running background jobs related to managing the
|
|
# media repository. Note that doing so will prevent the main process from being
|
|
# able to handle the above endpoints.
|
|
|
|
# In the `media_repository` worker configuration file, configure the http listener to
|
|
# expose the `media` resource. For example:
|
|
|
|
# ```yaml
|
|
# worker_listeners:
|
|
# - type: http
|
|
# port: 8085
|
|
# resources:
|
|
# - names:
|
|
# - media
|
|
# ```
|
|
|
|
# Note that if running multiple media repositories they must be on the same server
|
|
# and you must configure a single instance to run the background tasks, e.g.:
|
|
|
|
# ```yaml
|
|
# media_instance_running_background_jobs: "media-repository-1"
|
|
# ```
|
|
|
|
# Note that if a reverse proxy is used , then `/_matrix/media/` must be routed for both inbound client and federation requests (if they are handled separately).
|
|
|
|
matrix_synapse_workers_user_dir_endpoints:
|
|
# **Deprecated as of Synapse v1.59.** [Use `synapse.app.generic_worker` with the
|
|
# `update_user_directory_from_worker` option instead.](#updating-the-user-directory)
|
|
|
|
# Handles searches in the user directory. It can handle REST endpoints matching
|
|
# the following regular expressions:
|
|
|
|
- ^/_matrix/client/(r0|v3|unstable)/user_directory/search$
|
|
|
|
# When using this worker you must also set `update_user_directory: false` in the
|
|
# shared configuration file to stop the main synapse running background
|
|
# jobs related to updating the user directory.
|
|
|
|
# Above endpoint is not *required* to be routed to this worker. By default,
|
|
# `update_user_directory` is set to `true`, which means the main process
|
|
# will handle updates. All workers configured with `client` can handle the above
|
|
# endpoint as long as either this worker or the main process are configured to
|
|
# handle it, and are online.
|
|
|
|
# If `update_user_directory` is set to `false`, and this worker is not running,
|
|
# the above endpoint may give outdated results.
|
|
|
|
matrix_synapse_workers_frontend_proxy_endpoints:
|
|
# Proxies some frequently-requested client endpoints to add caching and remove
|
|
# load from the main synapse. It can handle REST endpoints matching the following
|
|
# regular expressions:
|
|
|
|
- ^/_matrix/client/(r0|v3|unstable)/keys/upload
|
|
|
|
# If `use_presence` is False in the homeserver config, it can also handle REST
|
|
# endpoints matching the following regular expressions:
|
|
|
|
# FIXME: ADDITIONAL CONDITIONS REQUIRED: to be enabled manually
|
|
# ^/_matrix/client/(api/v1|r0|v3|unstable)/presence/[^/]+/status
|
|
|
|
# This "stub" presence handler will pass through `GET` request but make the
|
|
# `PUT` effectively a no-op.
|
|
|
|
# It will proxy any requests it cannot handle to the main synapse instance. It
|
|
# must therefore be configured with the location of the main instance, via
|
|
# the `worker_main_http_uri` setting in the `frontend_proxy` worker configuration
|
|
# file. For example:
|
|
|
|
# ```yaml
|
|
# worker_main_http_uri: http://127.0.0.1:8008
|
|
# ```
|
|
|
|
matrix_synapse_workers_avail_list:
|
|
- appservice
|
|
- federation_sender
|
|
- frontend_proxy
|
|
- generic_worker
|
|
- media_repository
|
|
- pusher
|
|
- user_dir
|