mirror of
https://github.com/spantaleev/matrix-docker-ansible-deploy.git
synced 2024-11-06 18:57:37 +01:00
Merge pull request #1085 from GoMatrixHosting/master
GoMatrixHosting v0.4.7
This commit is contained in:
commit
bca37aba1e
@ -8,9 +8,7 @@ Members can be assigned a server from Digitalocean, or they can connect their ow
|
|||||||
|
|
||||||
The AWX system is arranged into 'members' each with their own 'subscriptions'. After creating a subscription the user enters the 'provision stage' where they defined the URLs they will use, the servers location and whether or not there's already a website at the base domain. They then proceed onto the 'deploy stage' where they can configure their Matrix server.
|
The AWX system is arranged into 'members' each with their own 'subscriptions'. After creating a subscription the user enters the 'provision stage' where they defined the URLs they will use, the servers location and whether or not there's already a website at the base domain. They then proceed onto the 'deploy stage' where they can configure their Matrix server.
|
||||||
|
|
||||||
Ideally this system can manage the updates, configuration, backups and monitoring on it's own. It is an extension of the popular deploy script [spantaleev/matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy).
|
This system can manage the updates, configuration, import and export, backups and monitoring on its own. It is an extension of the popular deploy script [spantaleev/matrix-docker-ansible-deploy](https://github.com/spantaleev/matrix-docker-ansible-deploy).
|
||||||
|
|
||||||
Warning: This project is currently alpha quality and should only be run by the brave.
|
|
||||||
|
|
||||||
|
|
||||||
## Other Required Playbooks
|
## Other Required Playbooks
|
||||||
@ -23,6 +21,7 @@ The following repositories allow you to copy and use this setup:
|
|||||||
|
|
||||||
[Ansible Provision Server](https://gitlab.com/GoMatrixHosting/ansible-provision-server) - Used by AWX members to perform initial configuration of their DigitalOcean or On-Premises server.
|
[Ansible Provision Server](https://gitlab.com/GoMatrixHosting/ansible-provision-server) - Used by AWX members to perform initial configuration of their DigitalOcean or On-Premises server.
|
||||||
|
|
||||||
|
|
||||||
## Testing Fork For This Playbook
|
## Testing Fork For This Playbook
|
||||||
|
|
||||||
Updates to this section are trailed here:
|
Updates to this section are trailed here:
|
||||||
|
28
roles/matrix-awx/scripts/matrix_build_room_list.py
Normal file
28
roles/matrix-awx/scripts/matrix_build_room_list.py
Normal file
@ -0,0 +1,28 @@
|
|||||||
|
|
||||||
|
import sys
|
||||||
|
import requests
|
||||||
|
import json
|
||||||
|
|
||||||
|
janitor_token = sys.argv[1]
|
||||||
|
synapse_container_ip = sys.argv[2]
|
||||||
|
|
||||||
|
# collect total amount of rooms
|
||||||
|
|
||||||
|
rooms_raw_url = 'http://' + synapse_container_ip + ':8008/_synapse/admin/v1/rooms'
|
||||||
|
rooms_raw_header = {'Authorization': 'Bearer ' + janitor_token}
|
||||||
|
rooms_raw = requests.get(rooms_raw_url, headers=rooms_raw_header)
|
||||||
|
rooms_raw_python = json.loads(rooms_raw.text)
|
||||||
|
total_rooms = rooms_raw_python["total_rooms"]
|
||||||
|
|
||||||
|
# build complete room list file
|
||||||
|
|
||||||
|
room_list_file = open("/tmp/room_list_complete.json", "w")
|
||||||
|
|
||||||
|
for i in range(0, total_rooms, 100):
|
||||||
|
rooms_inc_url = 'http://' + synapse_container_ip + ':8008/_synapse/admin/v1/rooms?from=' + str(i)
|
||||||
|
rooms_inc = requests.get(rooms_inc_url, headers=rooms_raw_header)
|
||||||
|
room_list_file.write(rooms_inc.text)
|
||||||
|
|
||||||
|
room_list_file.close()
|
||||||
|
|
||||||
|
print(total_rooms)
|
@ -17,136 +17,132 @@
|
|||||||
file: '/var/lib/awx/projects/clients/{{ member_id }}/{{ subscription_id }}/matrix_vars.yml'
|
file: '/var/lib/awx/projects/clients/{{ member_id }}/{{ subscription_id }}/matrix_vars.yml'
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Collect size of Synapse database
|
- name: Collect before shrink size of Synapse database
|
||||||
shell: du -sh /matrix/postgres/data
|
shell: du -sh /matrix/postgres/data
|
||||||
register: db_size_before_stat
|
register: db_size_before_stat
|
||||||
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Print before size of Synapse database
|
|
||||||
debug:
|
|
||||||
msg: "{{ db_size_before_stat.stdout.split('\n') }}"
|
|
||||||
when: db_size_before_stat is defined
|
|
||||||
|
|
||||||
- name: Collect the internal IP of the matrix-synapse container
|
- name: Collect the internal IP of the matrix-synapse container
|
||||||
shell: "/usr/bin/docker inspect --format '{''{range.NetworkSettings.Networks}''}{''{.IPAddress}''}{''{end}''}' matrix-synapse"
|
shell: "/usr/bin/docker inspect --format '{''{range.NetworkSettings.Networks}''}{''{.IPAddress}''}{''{end}''}' matrix-synapse"
|
||||||
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
register: synapse_container_ip
|
register: synapse_container_ip
|
||||||
|
|
||||||
- name: Collect access token for janitor user
|
- name: Collect access token for janitor user
|
||||||
shell: |
|
shell: |
|
||||||
curl -X POST -d '{"type":"m.login.password", "user":"janitor", "password":"{{ matrix_awx_janitor_user_password }}"}' "{{ synapse_container_ip.stdout }}:8008/_matrix/client/r0/login" | jq '.access_token'
|
curl -X POST -d '{"type":"m.login.password", "user":"janitor", "password":"{{ matrix_awx_janitor_user_password }}"}' "{{ synapse_container_ip.stdout }}:8008/_matrix/client/r0/login" | jq '.access_token'
|
||||||
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
register: janitors_token
|
register: janitors_token
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Collect total number of rooms
|
- name: Copy build_room_list.py script to target machine
|
||||||
|
copy:
|
||||||
|
src: ./roles/matrix-awx/scripts/matrix_build_room_list.py
|
||||||
|
dest: /usr/local/bin/matrix_build_room_list.py
|
||||||
|
owner: matrix
|
||||||
|
group: matrix
|
||||||
|
mode: '0755'
|
||||||
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
|
- name: Run build_room_list.py script
|
||||||
shell: |
|
shell: |
|
||||||
curl -X GET --header "Authorization: Bearer {{ janitors_token.stdout[1:-1] }}" '{{ synapse_container_ip.stdout }}:8008/_synapse/admin/v1/rooms' | jq '.total_rooms'
|
runuser -u matrix -- python3 /usr/local/bin/matrix_build_room_list.py {{ janitors_token.stdout[1:-1] }} {{ synapse_container_ip.stdout }}
|
||||||
when: purge_rooms|bool
|
|
||||||
register: rooms_total
|
register: rooms_total
|
||||||
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Print total number of rooms
|
- name: Fetch complete room list from target machine
|
||||||
debug:
|
fetch:
|
||||||
msg: '{{ rooms_total.stdout }}'
|
src: /tmp/room_list_complete.json
|
||||||
when: purge_rooms|bool
|
dest: "/tmp/{{ subscription_id }}_room_list_complete.json"
|
||||||
|
flat: yes
|
||||||
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Calculate every 100 values for total number of rooms
|
- name: Remove complete room list from target machine
|
||||||
delegate_to: 127.0.0.1
|
|
||||||
shell: |
|
|
||||||
seq 0 100 {{ rooms_total.stdout }}
|
|
||||||
when: purge_rooms|bool
|
|
||||||
register: every_100_rooms
|
|
||||||
|
|
||||||
- name: Ensure room_list_complete.json file exists
|
|
||||||
delegate_to: 127.0.0.1
|
|
||||||
file:
|
file:
|
||||||
path: /tmp/{{ subscription_id }}_room_list_complete.json
|
path: /tmp/room_list_complete.json
|
||||||
state: touch
|
state: absent
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Build file with total room list
|
|
||||||
include_tasks: purge_database_build_list.yml
|
|
||||||
loop: "{{ every_100_rooms.stdout_lines | flatten(levels=1) }}"
|
|
||||||
when: purge_rooms|bool
|
|
||||||
|
|
||||||
- name: Generate list of rooms with no local users
|
- name: Generate list of rooms with no local users
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
jq 'try .rooms[] | select(.joined_local_members == 0) | .room_id' < /tmp/{{ subscription_id }}_room_list_complete.json > /tmp/{{ subscription_id }}_room_list_no_local_users.txt
|
jq 'try .rooms[] | select(.joined_local_members == 0) | .room_id' < /tmp/{{ subscription_id }}_room_list_complete.json > /tmp/{{ subscription_id }}_room_list_no_local_users.txt
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Count number of rooms with no local users
|
- name: Count number of rooms with no local users
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
wc -l /tmp/{{ subscription_id }}_room_list_no_local_users.txt | awk '{ print $1 }'
|
wc -l /tmp/{{ subscription_id }}_room_list_no_local_users.txt | awk '{ print $1 }'
|
||||||
register: rooms_no_local_total
|
register: rooms_no_local_total
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Setting host fact room_list_no_local_users
|
- name: Setting host fact room_list_no_local_users
|
||||||
set_fact:
|
set_fact:
|
||||||
room_list_no_local_users: "{{ lookup('file', '/tmp/{{ subscription_id }}_room_list_no_local_users.txt') }}"
|
room_list_no_local_users: "{{ lookup('file', '/tmp/{{ subscription_id }}_room_list_no_local_users.txt') }}"
|
||||||
no_log: True
|
no_log: True
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Purge all rooms with no local users
|
- name: Purge all rooms with no local users
|
||||||
include_tasks: purge_database_no_local.yml
|
include_tasks: purge_database_no_local.yml
|
||||||
loop: "{{ room_list_no_local_users.splitlines() | flatten(levels=1) }}"
|
loop: "{{ room_list_no_local_users.splitlines() | flatten(levels=1) }}"
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Collect epoche time from date
|
- name: Collect epoche time from date
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
date -d '{{ purge_date }}' +"%s"
|
date -d '{{ purge_date }}' +"%s"
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
register: purge_epoche_time
|
register: purge_epoche_time
|
||||||
|
|
||||||
- name: Generate list of rooms with more then N users
|
- name: Generate list of rooms with more then N users
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
jq 'try .rooms[] | select(.joined_members > {{ purge_metric_value }}) | .room_id' < /tmp/{{ subscription_id }}_room_list_complete.json > /tmp/{{ subscription_id }}_room_list_joined_members.txt
|
jq 'try .rooms[] | select(.joined_members > {{ purge_metric_value }}) | .room_id' < /tmp/{{ subscription_id }}_room_list_complete.json > /tmp/{{ subscription_id }}_room_list_joined_members.txt
|
||||||
when: (purge_metric.find("Number of users") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of users [slower]") != -1
|
||||||
|
|
||||||
- name: Count number of rooms with more then N users
|
- name: Count number of rooms with more then N users
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
wc -l /tmp/{{ subscription_id }}_room_list_joined_members.txt | awk '{ print $1 }'
|
wc -l /tmp/{{ subscription_id }}_room_list_joined_members.txt | awk '{ print $1 }'
|
||||||
register: rooms_join_members_total
|
register: rooms_join_members_total
|
||||||
when: (purge_metric.find("Number of users") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of users [slower]") != -1
|
||||||
|
|
||||||
- name: Setting host fact room_list_joined_members
|
- name: Setting host fact room_list_joined_members
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
set_fact:
|
set_fact:
|
||||||
room_list_joined_members: "{{ lookup('file', '/tmp/{{ subscription_id }}_room_list_joined_members.txt') }}"
|
room_list_joined_members: "{{ lookup('file', '/tmp/{{ subscription_id }}_room_list_joined_members.txt') }}"
|
||||||
when: (purge_metric.find("Number of users") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of users [slower]") != -1
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Purge all rooms with more then N users
|
- name: Purge all rooms with more then N users
|
||||||
include_tasks: purge_database_users.yml
|
include_tasks: purge_database_users.yml
|
||||||
loop: "{{ room_list_joined_members.splitlines() | flatten(levels=1) }}"
|
loop: "{{ room_list_joined_members.splitlines() | flatten(levels=1) }}"
|
||||||
when: (purge_metric.find("Number of users") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of users [slower]") != -1
|
||||||
|
|
||||||
- name: Generate list of rooms with more then N events
|
- name: Generate list of rooms with more then N events
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
jq 'try .rooms[] | select(.state_events > {{ purge_metric_value }}) | .room_id' < /tmp/{{ subscription_id }}_room_list_complete.json > /tmp/{{ subscription_id }}_room_list_state_events.txt
|
jq 'try .rooms[] | select(.state_events > {{ purge_metric_value }}) | .room_id' < /tmp/{{ subscription_id }}_room_list_complete.json > /tmp/{{ subscription_id }}_room_list_state_events.txt
|
||||||
when: (purge_metric.find("Number of events") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of events [slower]") != -1
|
||||||
|
|
||||||
- name: Count number of rooms with more then N users
|
- name: Count number of rooms with more then N events
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
wc -l /tmp/{{ subscription_id }}_room_list_state_events.txt | awk '{ print $1 }'
|
wc -l /tmp/{{ subscription_id }}_room_list_state_events.txt | awk '{ print $1 }'
|
||||||
register: rooms_state_events_total
|
register: rooms_state_events_total
|
||||||
when: (purge_metric.find("Number of events") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of events [slower]") != -1
|
||||||
|
|
||||||
- name: Setting host fact room_list_state_events
|
- name: Setting host fact room_list_state_events
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
set_fact:
|
set_fact:
|
||||||
room_list_state_events: "{{ lookup('file', '/tmp/{{ subscription_id }}_room_list_state_events.txt') }}"
|
room_list_state_events: "{{ lookup('file', '/tmp/{{ subscription_id }}_room_list_state_events.txt') }}"
|
||||||
when: (purge_metric.find("Number of events") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of events [slower]") != -1
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Purge all rooms with more then N events
|
- name: Purge all rooms with more then N events
|
||||||
include_tasks: purge_database_events.yml
|
include_tasks: purge_database_events.yml
|
||||||
loop: "{{ room_list_state_events.splitlines() | flatten(levels=1) }}"
|
loop: "{{ room_list_state_events.splitlines() | flatten(levels=1) }}"
|
||||||
when: (purge_metric.find("Number of events") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of events [slower]") != -1
|
||||||
|
|
||||||
- name: Collect AWX admin token the hard way!
|
- name: Collect AWX admin token the hard way!
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
@ -155,75 +151,162 @@
|
|||||||
register: tower_token
|
register: tower_token
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
|
- name: Adjust 'Deploy/Update a Server' job template
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
awx.awx.tower_job_template:
|
||||||
|
name: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
||||||
|
description: "Creates a new matrix service with Spantaleev's playbooks"
|
||||||
|
extra_vars: "{{ lookup('file', '/var/lib/awx/projects/clients/{{ member_id }}/{{ subscription_id }}/extra_vars.json') }}"
|
||||||
|
job_type: run
|
||||||
|
job_tags: "rust-synapse-compress-state"
|
||||||
|
inventory: "{{ member_id }}"
|
||||||
|
project: "{{ member_id }} - Matrix Docker Ansible Deploy"
|
||||||
|
playbook: setup.yml
|
||||||
|
credential: "{{ member_id }} - AWX SSH Key"
|
||||||
|
state: present
|
||||||
|
verbosity: 1
|
||||||
|
tower_host: "https://{{ tower_host }}"
|
||||||
|
tower_oauthtoken: "{{ tower_token.stdout }}"
|
||||||
|
validate_certs: yes
|
||||||
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1) or (purge_mode.find("Skip purging rooms [faster]") != -1)
|
||||||
|
|
||||||
- name: Execute rust-synapse-compress-state job template
|
- name: Execute rust-synapse-compress-state job template
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
awx.awx.tower_job_launch:
|
awx.awx.tower_job_launch:
|
||||||
job_template: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
job_template: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
||||||
tags: "rust-synapse-compress-state"
|
|
||||||
wait: yes
|
wait: yes
|
||||||
tower_host: "https://{{ tower_host }}"
|
tower_host: "https://{{ tower_host }}"
|
||||||
tower_oauthtoken: "{{ tower_token.stdout }}"
|
tower_oauthtoken: "{{ tower_token.stdout }}"
|
||||||
validate_certs: yes
|
validate_certs: yes
|
||||||
register: job
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1) or (purge_mode.find("Skip purging rooms [faster]") != -1)
|
||||||
|
|
||||||
- name: Stop Synapse service
|
- name: Revert 'Deploy/Update a Server' job template
|
||||||
shell: systemctl stop matrix-synapse.service
|
delegate_to: 127.0.0.1
|
||||||
|
awx.awx.tower_job_template:
|
||||||
|
name: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
||||||
|
description: "Creates a new matrix service with Spantaleev's playbooks"
|
||||||
|
extra_vars: "{{ lookup('file', '/var/lib/awx/projects/clients/{{ member_id }}/{{ subscription_id }}/extra_vars.json') }}"
|
||||||
|
job_type: run
|
||||||
|
job_tags: "setup-all,start"
|
||||||
|
inventory: "{{ member_id }}"
|
||||||
|
project: "{{ member_id }} - Matrix Docker Ansible Deploy"
|
||||||
|
playbook: setup.yml
|
||||||
|
credential: "{{ member_id }} - AWX SSH Key"
|
||||||
|
state: present
|
||||||
|
verbosity: 1
|
||||||
|
tower_host: "https://{{ tower_host }}"
|
||||||
|
tower_oauthtoken: "{{ tower_token.stdout }}"
|
||||||
|
validate_certs: yes
|
||||||
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1) or (purge_mode.find("Skip purging rooms [faster]") != -1)
|
||||||
|
|
||||||
|
- name: Ensure matrix-synapse is stopped
|
||||||
|
service:
|
||||||
|
name: matrix-synapse
|
||||||
|
state: stopped
|
||||||
|
daemon_reload: yes
|
||||||
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
- name: Re-index Synapse database
|
- name: Re-index Synapse database
|
||||||
shell: docker exec -i matrix-postgres psql "host=127.0.0.1 port=5432 dbname=synapse user=synapse password={{ matrix_synapse_connection_password }}" -c 'REINDEX (VERBOSE) DATABASE synapse'
|
shell: docker exec -i matrix-postgres psql "host=127.0.0.1 port=5432 dbname=synapse user=synapse password={{ matrix_synapse_connection_password }}" -c 'REINDEX (VERBOSE) DATABASE synapse'
|
||||||
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
|
- name: Ensure matrix-synapse is started
|
||||||
|
service:
|
||||||
|
name: matrix-synapse
|
||||||
|
state: started
|
||||||
|
daemon_reload: yes
|
||||||
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
|
- name: Adjust 'Deploy/Update a Server' job template
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
awx.awx.tower_job_template:
|
||||||
|
name: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
||||||
|
description: "Creates a new matrix service with Spantaleev's playbooks"
|
||||||
|
extra_vars: "{{ lookup('file', '/var/lib/awx/projects/clients/{{ member_id }}/{{ subscription_id }}/extra_vars.json') }}"
|
||||||
|
job_type: run
|
||||||
|
job_tags: "run-postgres-vacuum,start"
|
||||||
|
inventory: "{{ member_id }}"
|
||||||
|
project: "{{ member_id }} - Matrix Docker Ansible Deploy"
|
||||||
|
playbook: setup.yml
|
||||||
|
credential: "{{ member_id }} - AWX SSH Key"
|
||||||
|
state: present
|
||||||
|
verbosity: 1
|
||||||
|
tower_host: "https://{{ tower_host }}"
|
||||||
|
tower_oauthtoken: "{{ tower_token.stdout }}"
|
||||||
|
validate_certs: yes
|
||||||
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
- name: Execute run-postgres-vacuum job template
|
- name: Execute run-postgres-vacuum job template
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
awx.awx.tower_job_launch:
|
awx.awx.tower_job_launch:
|
||||||
job_template: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
job_template: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
||||||
tags: "run-postgres-vacuum,start"
|
|
||||||
wait: yes
|
wait: yes
|
||||||
tower_host: "https://{{ tower_host }}"
|
tower_host: "https://{{ tower_host }}"
|
||||||
tower_oauthtoken: "{{ tower_token.stdout }}"
|
tower_oauthtoken: "{{ tower_token.stdout }}"
|
||||||
validate_certs: yes
|
validate_certs: yes
|
||||||
register: job
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
|
- name: Revert 'Deploy/Update a Server' job template
|
||||||
|
delegate_to: 127.0.0.1
|
||||||
|
awx.awx.tower_job_template:
|
||||||
|
name: "{{ matrix_domain }} - 0 - Deploy/Update a Server"
|
||||||
|
description: "Creates a new matrix service with Spantaleev's playbooks"
|
||||||
|
extra_vars: "{{ lookup('file', '/var/lib/awx/projects/clients/{{ member_id }}/{{ subscription_id }}/extra_vars.json') }}"
|
||||||
|
job_type: run
|
||||||
|
job_tags: "setup-all,start"
|
||||||
|
inventory: "{{ member_id }}"
|
||||||
|
project: "{{ member_id }} - Matrix Docker Ansible Deploy"
|
||||||
|
playbook: setup.yml
|
||||||
|
credential: "{{ member_id }} - AWX SSH Key"
|
||||||
|
state: present
|
||||||
|
verbosity: 1
|
||||||
|
tower_host: "https://{{ tower_host }}"
|
||||||
|
tower_oauthtoken: "{{ tower_token.stdout }}"
|
||||||
|
validate_certs: yes
|
||||||
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
- name: Cleanup room_list files
|
- name: Cleanup room_list files
|
||||||
delegate_to: 127.0.0.1
|
delegate_to: 127.0.0.1
|
||||||
shell: |
|
shell: |
|
||||||
rm /tmp/{{ subscription_id }}_room_list*
|
rm /tmp/{{ subscription_id }}_room_list*
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
ignore_errors: yes
|
ignore_errors: yes
|
||||||
|
|
||||||
- name: Collect size of Synapse database
|
- name: Collect after shrink size of Synapse database
|
||||||
shell: du -sh /matrix/postgres/data
|
shell: du -sh /matrix/postgres/data
|
||||||
register: db_size_after_stat
|
register: db_size_after_stat
|
||||||
|
when: (purge_mode.find("Perform final shrink") != -1)
|
||||||
no_log: True
|
no_log: True
|
||||||
|
|
||||||
- name: Print total number of rooms processed
|
- name: Print total number of rooms processed
|
||||||
debug:
|
debug:
|
||||||
msg: '{{ rooms_total.stdout }}'
|
msg: '{{ rooms_total.stdout }}'
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Print the number of rooms purged with no local users
|
- name: Print the number of rooms purged with no local users
|
||||||
debug:
|
debug:
|
||||||
msg: '{{ rooms_no_local_total.stdout }}'
|
msg: '{{ rooms_no_local_total.stdout }}'
|
||||||
when: purge_rooms|bool
|
when: (purge_mode.find("No local users [recommended]") != -1) or (purge_mode.find("Number of users [slower]") != -1) or (purge_mode.find("Number of events [slower]") != -1)
|
||||||
|
|
||||||
- name: Print the number of rooms purged with more then N users
|
- name: Print the number of rooms purged with more then N users
|
||||||
debug:
|
debug:
|
||||||
msg: '{{ rooms_join_members_total.stdout }}'
|
msg: '{{ rooms_join_members_total.stdout }}'
|
||||||
when: (purge_metric.find("Number of users") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of users") != -1
|
||||||
|
|
||||||
- name: Print the number of rooms purged with more then N events
|
- name: Print the number of rooms purged with more then N events
|
||||||
debug:
|
debug:
|
||||||
msg: '{{ rooms_state_events_total.stdout }}'
|
msg: '{{ rooms_state_events_total.stdout }}'
|
||||||
when: (purge_metric.find("Number of events") != -1) and (purge_rooms|bool)
|
when: purge_mode.find("Number of events") != -1
|
||||||
|
|
||||||
- name: Print before purge size of Synapse database
|
- name: Print before purge size of Synapse database
|
||||||
debug:
|
debug:
|
||||||
msg: "{{ db_size_before_stat.stdout.split('\n') }}"
|
msg: "{{ db_size_before_stat.stdout.split('\n') }}"
|
||||||
when: db_size_before_stat is defined
|
when: (db_size_before_stat is defined) and (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
- name: Print after purge size of Synapse database
|
- name: Print after purge size of Synapse database
|
||||||
debug:
|
debug:
|
||||||
msg: "{{ db_size_after_stat.stdout.split('\n') }}"
|
msg: "{{ db_size_after_stat.stdout.split('\n') }}"
|
||||||
when: db_size_after_stat is defined
|
when: (db_size_after_stat is defined) and (purge_mode.find("Perform final shrink") != -1)
|
||||||
|
|
||||||
- name: Set boolean value to exit playbook
|
- name: Set boolean value to exit playbook
|
||||||
set_fact:
|
set_fact:
|
||||||
|
@ -10,7 +10,7 @@
|
|||||||
|
|
||||||
- name: Set matrix_synapse_rust_synapse_compress_state_find_rooms_command_wait_time, if not provided
|
- name: Set matrix_synapse_rust_synapse_compress_state_find_rooms_command_wait_time, if not provided
|
||||||
set_fact:
|
set_fact:
|
||||||
matrix_synapse_rust_synapse_compress_state_find_rooms_command_wait_time: 180
|
matrix_synapse_rust_synapse_compress_state_find_rooms_command_wait_time: 300
|
||||||
when: "matrix_synapse_rust_synapse_compress_state_find_rooms_command_wait_time|default('') == ''"
|
when: "matrix_synapse_rust_synapse_compress_state_find_rooms_command_wait_time|default('') == ''"
|
||||||
|
|
||||||
- name: Set matrix_synapse_rust_synapse_compress_state_compress_room_time, if not provided
|
- name: Set matrix_synapse_rust_synapse_compress_state_compress_room_time, if not provided
|
||||||
|
Loading…
Reference in New Issue
Block a user