datanommer: Create manual playbook to run alembic

This commit is contained in:
Lenka Segura 2021-09-20 11:38:04 +02:00 committed by kevin
parent 577a385645
commit 260c3f98ef
2 changed files with 92 additions and 113 deletions

View file

@ -1,40 +1,11 @@
- name: push packages out
- name: Verify the badges backend and stop it
hosts:
- badges-backend
- badges-backend-stg
- datagrepper
- datagrepper-stg
- notifs-web
- notifs-web-stg
- busgateway
- busgateway-stg
- badges_backend
- badges_backend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
testing: False
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
command: yum clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: update datanommer packages from main repo
package: name="*datanommer*" state=latest
when: not testing
- name: yum update datanommer packages from testing repo
yum: name="*datanommer*" state=latest enablerepo=infrastructure-tags-stg
when: testing
- name: verify the badges backend and stop it
hosts: badges_backend:badges_backend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
@ -48,107 +19,90 @@
tasks:
- service: name="fedmsg-hub" state=stopped
- name: verify the datagrepper frontend and stop it
hosts: datagrepper:datagrepper_stg
- name: Stop datagrepper
hosts:
- os_masters[0]
- os_masters_stg[0]
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush
nagios: action=downtime minutes=120 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
roles:
- datagrepper
tasks:
- service: name="httpd" state=stopped
- name: Scale down datagrepper to 0 pods
command: oc -n datagrepper scale dc/datagrepper --replicas=0
- name: verify the notifs frontend and stop it
hosts: notifs_web:notifs_web_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush
nagios: action=downtime minutes=120 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
roles:
- notifs/frontend
tasks:
- service: name="httpd" state=stopped
- name: verify the datanommer backend, stop it, and then upgrade the db
hosts: busgateway:busgateway_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush
nagios: action=downtime minutes=120 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
roles:
- fedmsg/datanommer
tasks:
- name: Stop the notification backend
service: name="fedmsg-hub" state=stopped
- name: Upgrade the database
command: /usr/bin/alembic -c /usr/share/datanommer.models/alembic.ini upgrade head
args:
chdir: /usr/share/datanommer.models/
async: 20000
poll: 60
- name: And... start the backend again
service: name="fedmsg-hub" state=started
post_tasks:
- name: tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: restart the frontend pieces (fmn.web and datagrepper)
- name: Stop datanommer
hosts:
- datagrepper
- datagrepper-stg
- notifs-web
- notifs-web-stg
- os_masters[0]
- os_masters_stg[0]
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: Scale down datanommer to 0 pods
command: oc -n datanommer scale dc/datanommer --replicas=0
- name: Upgrade the database
role: openshift/object
app: datanommer
template: job.yml
objectname: job.yml
- name: Wait for the db-upgrade completion
command: oc get jobs/db-upgrade -o jsonpath='{@.status.succeeded}'
register: status
until: status.stdout | int == 1
retries: 5
delay: 30
- name: Delete the job in case it finished
role: openshift/object-delete
app: datanommer
objecttype: job
objectname: db-upgrade
when: status.stdout | int == 1
- name: Start the datanommer again
hosts:
- os_masters[0]
- os_masters_stg[0]
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- service: name="httpd" state=started
post_tasks:
- name: tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: Scale up datanommer pods
command: oc -n datanommer scale dc/db-datanommer --replicas=1
- name: restart the last backend piece (badges)
- name: Start the datagrepper again
hosts:
- os_masters[0]
- os_masters_stg[0]
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Scale up datagrepper pods
command: oc -n datagrepper scale dc/datagrepper --replicas=1
- name: Restart the last backend piece (badges)
hosts:
- badges-backend
- badges-backend-stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- service: name="fedmsg-hub" state=started

View file

@ -0,0 +1,25 @@
apiVersion: batch/v1
kind: Job
metadata:
name: db-upgrade
spec:
activeDeadlineSeconds: 86400
backoffLimit: 1
completion: 1
template:
metadata:
name: db-upgrade
spec:
containers:
- name: db-upgrade
image: docker-registry.default.svc:5000/datanommer/datanommer:latest
command: ["/opt/app-root/src/.s2i/datanommer-upgrade-db.sh"]
volumeMounts:
- name: fedora-messaging-config-volume
mountPath: "/etc/fedora-messaging"
readOnly: true
volumes:
- name: fedora-messaging-config-volume
configMap:
name: fedora-messaging-config
restartPolicy: Never