mirror of
https://github.com/spantaleev/matrix-docker-ansible-deploy.git
synced 2024-12-24 01:48:29 +01:00
Upgrade Synapse (v1.19.3 -> v1.20.0)
This commit is contained in:
parent
65e22a6888
commit
dd217137b6
@ -5,7 +5,7 @@ matrix_synapse_enabled: true
|
||||
|
||||
matrix_synapse_container_image_self_build: false
|
||||
|
||||
matrix_synapse_docker_image: "matrixdotorg/synapse:v1.19.3"
|
||||
matrix_synapse_docker_image: "matrixdotorg/synapse:v1.20.0"
|
||||
matrix_synapse_docker_image_force_pull: "{{ matrix_synapse_docker_image.endswith(':latest') }}"
|
||||
|
||||
matrix_synapse_base_path: "{{ matrix_base_data_path }}/synapse"
|
||||
|
@ -366,11 +366,10 @@ retention:
|
||||
# min_lifetime: 1d
|
||||
# max_lifetime: 1y
|
||||
|
||||
# Retention policy limits. If set, a user won't be able to send a
|
||||
# 'm.room.retention' event which features a 'min_lifetime' or a 'max_lifetime'
|
||||
# that's not within this range. This is especially useful in closed federations,
|
||||
# in which server admins can make sure every federating server applies the same
|
||||
# rules.
|
||||
# Retention policy limits. If set, and the state of a room contains a
|
||||
# 'm.room.retention' event in its state which contains a 'min_lifetime' or a
|
||||
# 'max_lifetime' that's out of these bounds, Synapse will cap the room's policy
|
||||
# to these limits when running purge jobs.
|
||||
#
|
||||
#allowed_lifetime_min: 1d
|
||||
#allowed_lifetime_max: 1y
|
||||
@ -390,18 +389,19 @@ retention:
|
||||
# 'longest_max_lifetime' of '3d' will handle every room with a retention policy
|
||||
# which 'max_lifetime' is lower than or equal to three days.
|
||||
#
|
||||
# The rationale for this per-job configuration is that some rooms might have a
|
||||
# retention policy with a low 'max_lifetime', where history needs to be purged
|
||||
# of outdated messages on a more frequent basis than for the rest of the rooms
|
||||
# (e.g. every 12h), but not want that purge to be performed by a job that's
|
||||
# iterating over every room it knows, which could be heavy on the server.
|
||||
# If any purge job is configured, it is strongly recommended to have at least
|
||||
# a single job with neither 'shortest_max_lifetime' nor 'longest_max_lifetime'
|
||||
# set, or one job without 'shortest_max_lifetime' and one job without
|
||||
# 'longest_max_lifetime' set. Otherwise some rooms might be ignored, even if
|
||||
# 'allowed_lifetime_min' and 'allowed_lifetime_max' are set, because capping a
|
||||
# room's policy to these values is done after the policies are retrieved from
|
||||
# Synapse's database (which is done using the range specified in a purge job's
|
||||
# configuration).
|
||||
#
|
||||
#purge_jobs:
|
||||
# - shortest_max_lifetime: 1d
|
||||
# longest_max_lifetime: 3d
|
||||
# - longest_max_lifetime: 3d
|
||||
# interval: 12h
|
||||
# - shortest_max_lifetime: 3d
|
||||
# longest_max_lifetime: 1y
|
||||
# interval: 1d
|
||||
|
||||
# Inhibits the /requestToken endpoints from returning an error that might leak
|
||||
@ -1923,9 +1923,7 @@ email:
|
||||
# Directory in which Synapse will try to find the template files below.
|
||||
# If not set, default templates from within the Synapse package will be used.
|
||||
#
|
||||
# DO NOT UNCOMMENT THIS SETTING unless you want to customise the templates.
|
||||
# If you *do* uncomment it, you will need to make sure that all the templates
|
||||
# below are in the directory.
|
||||
# Do not uncomment this setting unless you want to customise the templates.
|
||||
#
|
||||
# Synapse will look for the following templates in this directory:
|
||||
#
|
||||
|
Loading…
Reference in New Issue
Block a user