Fix name[casing] ansible-lint issues

fix 1900 failures of the following case issue:

`name[casing]: All names should start with an uppercase letter.`

Signed-off-by: Ryan Lerch <rlerch@redhat.com>
This commit is contained in:
Ryan Lercho 2025-01-14 20:18:57 +10:00
parent 01030eaf26
commit 691adee6ee
299 changed files with 1935 additions and 1935 deletions

View file

@ -9,25 +9,25 @@
#
---
- name: check for updates (EL)
- name: Check for updates (EL)
hosts: virt_host:&distro_RedHat
gather_facts: false
tasks:
- name: check for updates (yum)
- name: Check for updates (yum)
yum: list=updates update_cache=true
register: yumoutput
- debug: msg="{{ inventory_hostname}} {{ yumoutput.results|length }}"
- name: check for updates (Fedora)
- name: Check for updates (Fedora)
hosts: virt_host:&distro_Fedora
gather_facts: false
tasks:
- name: check for updates (dnf)
- name: Check for updates (dnf)
dnf: list=updates
register: dnfoutput
@ -37,25 +37,25 @@
# For some reason ansible detects aarch64/armv7 hosts as virt type "NA"
#
- name: check for updates (aarch64/armv7) EL
- name: Check for updates (aarch64/armv7) EL
hosts: virt_NA:&distro_RedHat
gather_facts: false
tasks:
- name: check for updates (yum)
- name: Check for updates (yum)
yum: list=updates update_cache=true
register: yumoutput
- debug: msg="{{ inventory_hostname}} {{ yumoutput.results|length }}"
- name: check for updates (aarch64/armv7) Fedora
- name: Check for updates (aarch64/armv7) Fedora
hosts: virt_NA:&distro_Fedora
gather_facts: false
tasks:
- name: check for updates (dnf)
- name: Check for updates (dnf)
dnf: list=updates
register: dnfoutput

View file

@ -9,20 +9,20 @@
#
---
- name: check for updates
- name: Check for updates
hosts: distro_RedHat:distro_CentOS:!ocp*:!worker*
gather_facts: false
tasks:
- name: check for updates (yum)
- name: Check for updates (yum)
yum: list=updates update_cache=true
register: yumoutput
- debug: msg="{{ inventory_hostname}} {{ yumoutput.results|length }}"
when: yumoutput.results|length > 0
- name: check for updates
- name: Check for updates
hosts: distro_Fedora:!ocp*:!worker*
gather_facts: false
@ -32,10 +32,10 @@
# We use the command module here because the real module can't expire
#
- name: make dnf recheck for new metadata from repos
- name: Make dnf recheck for new metadata from repos
command: dnf clean expire-cache
- name: check for updates (dnf)
- name: Check for updates (dnf)
dnf: list=updates
register: dnfoutput

View file

@ -9,7 +9,7 @@
tasks:
- name: create temp dir for collecting info
- name: Create temp dir for collecting info
shell: mktemp -d
register: temp_dir
changed_when: false
@ -70,7 +70,7 @@
- services
- name: output enabled service list chkconfig
- name: Output enabled service list chkconfig
shell: echo {{enabled_services_chkconfig.stdout_lines}} >> {{temp_dir.stdout}}/eservices
when: enabled_services_chkconfig is defined and enabled_services_chkconfig.rc == 0
changed_when: false
@ -78,7 +78,7 @@
- check
- services
- name: output disabled loaded service list chkconfig
- name: Output disabled loaded service list chkconfig
shell: echo {{disabled_services_chkconfig.stdout_lines}} >> {{temp_dir.stdout}}/dservices
when: disabled_services_chkconfig is defined and disabled_services_chkconfig.rc == 0
changed_when: false
@ -87,7 +87,7 @@
- services
- name: output loaded active service list systemctl
- name: Output loaded active service list systemctl
shell: echo {{loaded_active_services_systemctl.stdout_lines}} >> {{temp_dir.stdout}}/laservices
when: loaded_active_services_systemctl is defined and loaded_active_services_systemctl.rc == 0
changed_when: false
@ -95,7 +95,7 @@
- check
- services
- name: output loaded inactive service list systemctl
- name: Output loaded inactive service list systemctl
shell: echo {{loaded_inactive_services_systemctl.stdout_lines}} >> {{temp_dir.stdout}}/liservices
when: loaded_inactive_services_systemctl is defined and loaded_inactive_services_systemctl.rc == 0
changed_when: false
@ -224,7 +224,7 @@
- selinux
- name: misMatch current SELinux status with boot status
- name: MisMatch current SELinux status with boot status
shell: echo "SElinux Current and Boot modes are NOT in sync" >> {{temp_dir.stdout}}/selinux
when: ansible_selinux.status != "disabled" and ansible_selinux.config_mode != ansible_selinux.mode
changed_when: false
@ -232,40 +232,40 @@
- check
- selinux
- name: resolve last persisted dir - if one is present
- name: Resolve last persisted dir - if one is present
local_action: shell ls -d -1 {{datadir_prfx_path}}/{{inventory_hostname}}-* 2>/dev/null | sort -r | head -1
register: last_dir
changed_when: false
ignore_errors: true
- name: get file list
- name: Get file list
shell: ls -1 {{temp_dir.stdout}}/*
register: file_list
changed_when: false
- name: get timestamp
- name: Get timestamp
shell: "date +%Y-%m-%d-%H-%M-%S"
register: timestamp
changed_when: false
- name: create persisting-state directory
- name: Create persisting-state directory
local_action: file path=/{{datadir_prfx_path}}/{{inventory_hostname}}-{{timestamp.stdout}} state=directory
changed_when: false
- name: fetch file list
- name: Fetch file list
fetch: src={{item}} dest=/{{datadir_prfx_path}}/{{inventory_hostname}}-{{timestamp.stdout}}/ flat=true
with_items: "{{file_list.stdout_lines}}"
changed_when: false
- name: diff the new files with last ones presisted
- name: Diff the new files with last ones presisted
local_action: shell for file in {{datadir_prfx_path}}/{{inventory_hostname}}-{{timestamp.stdout}}/*; do filename=$(basename $file); diff {{datadir_prfx_path}}/{{inventory_hostname}}-{{timestamp.stdout}}/$filename {{last_dir.stdout.strip(':')}}/$filename; done
ignore_errors: true
changed_when: false
register: file_diff
when: last_dir is defined and last_dir.stdout != ""
- name: display diff
- name: Display diff
debug: var=file_diff.stdout_lines
ignore_errors: true
changed_when: false
@ -273,11 +273,11 @@
# clean up: can also be put as handlers
- name: clean remote temp dir
- name: Clean remote temp dir
file: path={{temp_dir.stdout}} state=absent
changed_when: false
- name: clean rpm temp file
- name: Clean rpm temp file
file: path={{localchanges.stdout}} state=absent
changed_when: false

View file

@ -6,18 +6,18 @@
gather_facts: false
tasks:
- name: listing failed units
- name: Listing failed units
shell: systemctl list-units --state failed --no-legend | awk '{ print $1 }'
register: listing_failed
- name: check log with journalctl
- name: Check log with journalctl
shell: journalctl -lru {{ item }} -n 50
register: display_log
with_items: "{{ listing_failed.stdout_lines[0:] }}"
- debug: var=listing_failed.stdout_lines[0:]
- name: display log
- name: Display log
debug: var=display_log.stdout_lines
ignore_errors: true
when: display_log is defined

View file

@ -1,8 +1,8 @@
---
- name: clear memcache
- name: Clear memcache
hosts: memcached:memcached-stg
serial: 1
tasks:
- name: clear memcache
- name: Clear memcache
command: echo flush_all | nc localhost 11211

View file

@ -1,9 +1,9 @@
---
- name: clear varnish cache
- name: Clear varnish cache
hosts: proxies
user: root
serial: 1
tasks:
- name: clear varnish
- name: Clear varnish
command: varnishadm -S /etc/varnish/secret -T 127.0.0.1:6082 ban req.url == .

View file

@ -2,7 +2,7 @@
# thanks threebean on this.
---
- name: kills postfix which has been left around alive after update.
- name: Kills postfix which has been left around alive after update.
hosts: "{{ target }}"
user: root

View file

@ -10,37 +10,37 @@
# requires --extra-vars="target=hostspec"
---
- name: destroy and undefine vm
- name: Destroy and undefine vm
hosts: "{{ target }}"
user: root
gather_facts: false
tasks:
- name: get vm list on the vmhost
- name: Get vm list on the vmhost
delegate_to: "{{ vmhost }}"
virt: command=list_vms
register: result
- name: fail if the host is not already defined/existent
- name: Fail if the host is not already defined/existent
local_action: fail msg="host does not exist on {{ vmhost }}"
when: inventory_hostname not in result.list_vms
- name: schedule 30m host downtime in nagios
- name: Schedule 30m host downtime in nagios
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: pause for 30s before doing it
- name: Pause for 30s before doing it
pause: seconds=30 prompt="Destroying (and lvremove for) vm now {{ target }}, abort if this is wrong"
- name: destroy the vm
- name: Destroy the vm
virt: name={{ inventory_hostname }} command=destroy
delegate_to: "{{ vmhost }}"
- name: undefine the vm
- name: Undefine the vm
virt: name={{ inventory_hostname }} command=undefine
delegate_to: "{{ vmhost }}"
- name: destroy the lv
- name: Destroy the lv
command: /sbin/lvremove -f {{volgroup}}/{{inventory_hostname}}
delegate_to: "{{ vmhost }}"

View file

@ -4,7 +4,7 @@
# NOTE: most of these vars_path come from group_vars/backup_server or from hostvars
---
- name: make backup server system
- name: Make backup server system
hosts: backup
user: root
gather_facts: true

View file

@ -4,7 +4,7 @@
# covered by this playbook.
---
- name: enable an ephemeral builder
- name: Enable an ephemeral builder
hosts: build_x86_kcs:build_x86_kcs_stg
user: root
gather_facts: false
@ -19,7 +19,7 @@
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
tasks:
- name: make a keytab dir
- name: Make a keytab dir
file: dest="/etc/kojid-cloud-scheduler/" state=directory
roles:

View file

@ -1,5 +1,5 @@
---
- name: check/create instance
- name: Check/create instance
hosts: copr_back_dev_aws:copr_back_aws
user: root
gather_facts: false
@ -23,7 +23,7 @@
- datacenter == 'aws'
- swap_file_size_mb is defined
- name: cloud basic setup
- name: Cloud basic setup
hosts: copr_back_dev_aws:copr_back_aws
user: root
gather_facts: true
@ -43,7 +43,7 @@
# hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org"
# when: env != 'production'
- name: provision instance
- name: Provision instance
hosts: copr_back_dev_aws:copr_back_aws
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: check/create instance
- name: Check/create instance
hosts: copr_dist_git_dev_aws:copr_dist_git_aws
user: root
gather_facts: false
@ -18,7 +18,7 @@
- import_tasks: "{{ tasks_path }}/aws_cloud.yml"
when: datacenter == 'aws'
- name: cloud basic setup
- name: Cloud basic setup
hosts: copr_dist_git_dev_aws:copr_dist_git_aws
user: root
gather_facts: true
@ -35,11 +35,11 @@
- datacenter == 'aws'
- swap_file_size_mb is defined
- import_tasks: "{{ tasks_path }}/cloud_setup_basic.yml"
- name: set hostname (required by some services, at least postfix need it)
- name: Set hostname (required by some services, at least postfix need it)
hostname: name="{{copr_hostbase}}.fedorainfracloud.org"
when: datacenter != "aws"
- name: provision instance
- name: Provision instance
hosts: copr_dist_git_dev_aws:copr_dist_git_aws
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: check/create instance
- name: Check/create instance
hosts: copr_front_dev_aws:copr_front_aws
user: root
gather_facts: false
@ -24,7 +24,7 @@
- datacenter == 'aws'
- swap_file_size_mb is defined
- name: cloud basic setup
- name: Cloud basic setup
hosts: copr_front_dev_aws:copr_front_aws
gather_facts: true
vars_files:
@ -36,11 +36,11 @@
tasks:
- import_tasks: "{{ tasks_path }}/cloud_setup_basic.yml"
- name: set hostname (required by some services, at least postfix need it)
- name: Set hostname (required by some services, at least postfix need it)
hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org"
when: datacenter != "aws"
- name: provision instance
- name: Provision instance
hosts: copr_front_dev_aws:copr_front_aws
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: check/create instance
- name: Check/create instance
hosts: copr_keygen_dev_aws:copr_keygen_aws
gather_facts: false
@ -18,16 +18,16 @@
when: datacenter == 'aws'
- name: gather facts
- name: Gather facts
setup:
check_mode: no
ignore_errors: true
register: facts
- name: install python2 and dnf stuff
- name: Install python2 and dnf stuff
raw: dnf -y install python-dnf libselinux-python yum
when: facts is failed
- name: cloud basic setup
- name: Cloud basic setup
hosts: copr_keygen_dev_aws:copr_keygen_aws
gather_facts: true
vars_files:
@ -39,11 +39,11 @@
tasks:
- import_tasks: "{{ tasks_path }}/cloud_setup_basic.yml"
- name: set hostname (required by some services, at least postfix need it)
- name: Set hostname (required by some services, at least postfix need it)
hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org"
when: datacenter != "aws"
- name: provision instance
- name: Provision instance
hosts: copr_keygen_dev_aws:copr_keygen_aws
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: check/create instance
- name: Check/create instance
hosts: copr_pulp_dev_aws:copr_pulp_aws
gather_facts: false
@ -24,14 +24,14 @@
- swap_file_size_mb is defined
# TODO Remove in favor of base/tasks/main.yml
- name: install platform-python
- name: Install platform-python
package: name={{ item }} state=present
with_items:
- platform-python
become: true
# TODO Remove in favor of base/tasks/main.yml
- name: symlink /usr/bin/python to /usr/bin/python3
- name: Symlink /usr/bin/python to /usr/bin/python3
alternatives:
name: python
link: /usr/bin/python
@ -39,13 +39,13 @@
become: true
# TODO Move this to base/tasks/main.yml
- name: install iptables-services
- name: Install iptables-services
package: name=iptables-services state=present
become: true
# TODO Postfix should have probably been installed by base/tasks/main.yml
# but it wasn't for this instance
- name: install postfix
- name: Install postfix
package: name=postfix state=present
become: true
@ -55,7 +55,7 @@
dest: /tmp/pulp_default_admin_password
mode: "000"
- name: cloud basic setup
- name: Cloud basic setup
hosts: copr_pulp_dev_aws:copr_pulp_aws
become: true
gather_facts: true
@ -69,11 +69,11 @@
tasks:
- import_tasks: "{{ tasks_path }}/cloud_setup_basic.yml"
- name: set hostname (required by some services, at least postfix need it)
- name: Set hostname (required by some services, at least postfix need it)
hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org"
when: datacenter != "aws"
- name: provision instance
- name: Provision instance
hosts: copr_pulp_dev_aws:copr_pulp_aws
become: true
gather_facts: true

View file

@ -4,7 +4,7 @@
# Once the instance exists, configure it.
---
- name: check/create instance
- name: Check/create instance
hosts: db.stg.aws.fedoraproject.org
user: root
gather_facts: false
@ -25,7 +25,7 @@
- datacenter == 'aws'
- swap_file_size_mb is defined
- name: cloud basic setup
- name: Cloud basic setup
hosts: db.stg.aws.fedoraproject.org
user: root
gather_facts: true
@ -39,7 +39,7 @@
tasks:
- import_tasks: "{{ tasks_path }}/cloud_setup_basic.yml"
- name: configure server
- name: Configure server
hosts: db.stg.aws.fedoraproject.org
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: check/create instance
- name: Check/create instance
hosts: logdetective
user: root
gather_facts: false
@ -19,7 +19,7 @@
- datacenter == 'aws'
- swap_file_size_mb is defined
- name: provision instance
- name: Provision instance
hosts: logdetective
become: true
become_user: root

View file

@ -26,7 +26,7 @@
# this is how you include other task lists
- import_tasks: "{{ tasks_path }}/motd.yml"
- name: install packager tools (dnf)
- name: Install packager tools (dnf)
dnf: state=present pkg={{ item }}
with_items:
- fedora-packager
@ -34,7 +34,7 @@
tags:
- packages
- name: allow packagers to use mock
- name: Allow packagers to use mock
copy: dest=/etc/pam.d/mock src="{{ files }}/common/mock"
tags:
- config

View file

@ -1,7 +1,7 @@
# This is a basic playbook
---
- name: dole out the basic configuration
- name: Dole out the basic configuration
hosts: nfs_servers
user: root
gather_facts: true
@ -49,7 +49,7 @@
- drives
- copr
- name: create copr-be storage
- name: Create copr-be storage
lvol: vg=VG_nfs lv=copr-be size=30t shrink=no
tags:
- drives

View file

@ -1,5 +1,5 @@
---
- name: setup base openQA host
- name: Setup base openQA host
hosts: openqa_onebox_test
user: root
gather_facts: true
@ -29,7 +29,7 @@
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: configure openQA
- name: Configure openQA
hosts: openqa_onebox_test
user: root
gather_facts: true

View file

@ -15,7 +15,7 @@
# fedora-messaging setup
- name: setup RabbitMQ
- name: Setup RabbitMQ
hosts: rabbitmq[0]:rabbitmq_stg[0]
user: root
gather_facts: false
@ -78,7 +78,7 @@
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: setup FAF server
- name: Setup FAF server
hosts: retrace,retrace_stg
gather_facts: true
@ -98,7 +98,7 @@
faf_spool_dir: /srv/faf/
- import_role: name=abrt/faf-post
- name: setup retrace server
- name: Setup retrace server
hosts: retrace,retrace_stg
gather_facts: true

View file

@ -1,28 +1,28 @@
# requires --extra-vars="target=hostspec"
---
- name: reboot hosts
- name: Reboot hosts
hosts: "{{ target }}"
gather_facts: false
user: root
serial: 1
tasks:
- name: tell nagios to shush
- name: Tell nagios to shush
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: reboot the host
- name: Reboot the host
command: /sbin/shutdown -r 1
- name: wait for host to come back - up to 15 minutes
- name: Wait for host to come back - up to 15 minutes
local_action: wait_for host={{ target }} port=22 delay=120 timeout=900 search_regex=OpenSSH
# - name: sync time
# - name: Sync time
# command: ntpdate -u 1.rhel.pool.ntp.org
- name: tell nagios to unshush
- name: Tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -4,30 +4,30 @@
---
- name: update the system
- name: Update the system
hosts: "{{ target }}"
gather_facts: false
user: root
tasks:
- name: expire-caches
- name: Expire-caches
command: yum clean expire-cache
- name: yum -y {{ yumcommand }}
- name: Yum -y {{ yumcommand }}
command: yum -y {{ yumcommand }}
async: 7200
poll: 30
- name: run rkhunter if installed
- name: Run rkhunter if installed
hosts: "{{ target }}"
user: root
tasks:
- name: check for rkhunter
- name: Check for rkhunter
command: /usr/bin/test -f /usr/bin/rkhunter
register: rkhunter
ignore_errors: true
- name: run rkhunter --propupd
- name: Run rkhunter --propupd
command: /usr/bin/rkhunter --propupd
when: rkhunter is success

View file

@ -1,5 +1,5 @@
---
- name: handle ssh keys on a hosts birthday (new hw machine)
- name: Handle ssh keys on a hosts birthday (new hw machine)
hosts: "{{ myhosts }}"
gather_facts: false

View file

@ -22,8 +22,8 @@
- '/etc/httpd/conf.d/fedorahosted.org'
- '/etc/httpd/conf.d/git.fedorahosted.org'
- name: install special fedorahosted-redirects.conf with fedorahosted redirects
- name: Install special fedorahosted-redirects.conf with fedorahosted redirects
copy: src={{ files }}/httpd/fedorahosted-redirects.conf dest=/etc/httpd/conf.d/fedorahosted.org/fedorahosted-redirects.conf
- name: install special git.fedorahosted-redirects.conf with git.fedorahosted redirects
- name: Install special git.fedorahosted-redirects.conf with git.fedorahosted redirects
copy: src={{ files }}/httpd/git.fedorahosted-redirects.conf dest=/etc/httpd/conf.d/git.fedorahosted.org/fedorahosted-redirects.conf

View file

@ -19,7 +19,7 @@
- httpd
- httpd/website
- name: check the selinux context of webdir
- name: Check the selinux context of webdir
command: matchpathcon /srv/web
register: webdir
check_mode: no

View file

@ -1,5 +1,5 @@
---
- name: make the virtual instance
- name: Make the virtual instance
hosts: "{{ myhosts }}"
gather_facts: false

View file

@ -1,8 +1,8 @@
---
- name: detect package versions
- name: Detect package versions
package_facts: manager=auto
- name: assure minimal versions of dnf-plugins-core
- name: Assure minimal versions of dnf-plugins-core
dnf: name=dnf-plugins-core
state=latest
when: ansible_facts.packages['dnf-plugins-core'][0].version is version('4.0.12', '<')

View file

@ -1,5 +1,5 @@
---
- name: upgrade copr backend
- name: Upgrade copr backend
hosts: copr_back_dev_aws:copr_back_aws
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: upgrade copr distgit
- name: Upgrade copr distgit
hosts: copr_dist_git_dev_aws:copr_dist_git_aws
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: upgrade copr frontend
- name: Upgrade copr frontend
hosts: copr_front_dev_aws:copr_front_aws
user: root
gather_facts: true
@ -22,7 +22,7 @@
changed_when: "frontend_has_update.rc != 0"
failed_when: false
- name: make httpd stopped
- name: Make httpd stopped
service: name=httpd state=stopped
register: httpd_stopped
when: frontend_has_update.changed
@ -36,7 +36,7 @@
- copr-selinux
- python3-copr-common
- name: upgrade db to head, logs in /var/log/copr-frontend/migrations.log
- name: Upgrade db to head, logs in /var/log/copr-frontend/migrations.log
shell: alembic-3 upgrade head &> /var/log/copr-frontend/migrations.log
become: yes
become_user: copr-fe
@ -44,7 +44,7 @@
chdir: /usr/share/copr/coprs_frontend/
when: frontend_has_update.changed
- name: make httpd started
- name: Make httpd started
service: name=httpd state=started
when: httpd_stopped.changed

View file

@ -1,5 +1,5 @@
---
- name: upgrade copr keygen
- name: Upgrade copr keygen
hosts: copr_keygen_dev_aws:copr_keygen_aws
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: upgrade copr pulp
- name: Upgrade copr pulp
hosts: copr_pulp_dev_aws:copr_pulp_aws
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: change fas db to readonly for most actions
- name: Change fas db to readonly for most actions
hosts: db-fas01.iad2.fedoraproject.org:db-fas01.stg.iad2.fedoraproject.org
user: root
vars_files:
@ -7,10 +7,10 @@
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: copy the sql script to file
- name: Copy the sql script to file
template: src=fas-readonly.sql dest=/var/lib/pgsql/fas-readonly.sql
- name: change permissions for relevant tables in the fas db
- name: Change permissions for relevant tables in the fas db
shell: psql -1 -v ON_ERROR_STOP=1 fas2 </var/lib/pgsql/fas-readonly.sql
become: yes
become_user: postgres

View file

@ -1,5 +1,5 @@
---
- name: change fas db to writable again
- name: Change fas db to writable again
hosts: db-fas01.iad2.fedoraproject.org:db-fas01.stg.iad2.fedoraproject.org
user: root
vars_files:
@ -7,10 +7,10 @@
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: copy the sql script to file
- name: Copy the sql script to file
template: src=rollback.sql dest=/var/lib/pgsql/rollback.sql
- name: rollback permissions for relevant tables in the fas db
- name: Rollback permissions for relevant tables in the fas db
shell: psql -1 -v ON_ERROR_STOP=1 fas2 </var/lib/pgsql/rollback.sql
become: yes
become_user: postgres

View file

@ -9,11 +9,11 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: fas2discourse Role
- name: Fas2discourse Role
include_role:
name: fas2discourse
tasks_from: create-keytab
- name: fas2discourse Role
- name: Fas2discourse Role
include_role:
name: fas2discourse
tasks_from: administration-tasks

View file

@ -14,7 +14,7 @@
user: root
tasks:
- name: find the ID of the last yum transaction
- name: Find the ID of the last yum transaction
shell: yum history package {{ package }} | sed -n 3p | awk -F "|" '{ print $1 }' | tr -d ' '
register: transaction_id
@ -23,7 +23,7 @@
- debug: var=transaction_id.stdout
when: transaction_id.stderr == ""
- name: get info on that transaction
- name: Get info on that transaction
command: yum history info {{ transaction_id.stdout }}
register: transaction_info
when: transaction_id.stderr == ""

View file

@ -12,7 +12,7 @@
tasks:
- name: add the pgpass file
- name: Add the pgpass file
copy:
content: |
db01{{ env_suffix }}.iad2.fedoraproject.org:*:maubot:maubot:{{ (env == 'production')|ternary(maubot_prod_db_password, maubot_stg_db_password) }}
@ -21,18 +21,18 @@
group: root
mode: "0400"
- name: install the required package
- name: Install the required package
dnf:
state: installed
name: python3-psycopg2
- name: install the import script
- name: Install the import script
copy:
src: "{{ files }}/zodbot/karma-to-cookies-db.py"
dest: /usr/local/bin/karma-to-cookies-db
mode: "0755"
- name: run the import script
- name: Run the import script
command:
argv:
- /usr/local/bin/karma-to-cookies-db

View file

@ -3,7 +3,7 @@
# NOTE: most of these vars_path come from group_vars/kernel-qa or from hostvars
---
- name: make kernel-qa
- name: Make kernel-qa
hosts: kernel_qa
user: root
gather_facts: true

View file

@ -1,5 +1,5 @@
---
- name: be quiet please...
- name: Be quiet please...
hosts: notifs_backend:notifs_backend_stg
user: root
vars_files:
@ -8,7 +8,7 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: tell nagios to shush.
- name: Tell nagios to shush.
nagios: action=downtime minutes=15 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,15 +1,15 @@
---
- name: backup IPA data for testing
- name: Backup IPA data for testing
hosts: ipa_stg
# vars_files:
# - /srv/web/infra/ansible/vars/global.yml
# - "/srv/private/ansible/vars.yml"
# - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: create backup of FreeIPA server data
- name: Create backup of FreeIPA server data
command: ipa-backup
- name: make the latest backup available under a fixed name
- name: Make the latest backup available under a fixed name
shell:
cmd: ln -snf $(ls -1t ipa-full* | head -n 1) ipa-full-latest
chdir: /var/lib/ipa/backup

View file

@ -1,10 +1,10 @@
---
- name: restore latest IPA backup for testing
- name: Restore latest IPA backup for testing
hosts: ipa_stg
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
# - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: restore latest backup of FreeIPA server data
- name: Restore latest backup of FreeIPA server data
command: ipa-restore /var/lib/ipa/backup/ipa-full-latest -p {{ ipa_admin_password }}

View file

@ -93,7 +93,7 @@
- kvm_deploy
when: inventory_hostname in groups['ocp_vms_stg'] or inventory_hostname in groups['ocp_vms']
- block:
- name: ensure the lv for the guest is made
- name: Ensure the lv for the guest is made
lvol: lv={{ inventory_hostname }} vg={{ volgroup }} size={{ lvm_size }} state=present
delegate_to: "{{ vmhost }}"
tags:

View file

@ -1,5 +1,5 @@
---
- name: restart worker services on openQA worker hosts (production)
- name: Restart worker services on openQA worker hosts (production)
hosts: openqa_workers:openqa_lab_workers
user: root
vars_files:
@ -10,6 +10,6 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: restart all the worker services
- name: Restart all the worker services
service: name=openqa-worker@{{ item }} state=restarted
with_sequence: "count={{ openqa_workers }}"

View file

@ -4,7 +4,7 @@
#
---
- name: make qadevel server
- name: Make qadevel server
hosts: qadevel:qadevel_stg
user: root
gather_facts: false
@ -20,7 +20,7 @@
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: make the box be real
- name: Make the box be real
hosts: qadevel:qadevel_stg
user: root
gather_facts: true

View file

@ -22,7 +22,7 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: rsync each site in to place
- name: Rsync each site in to place
command: /usr/bin/rsync --delete -a --no-owner --no-group sundries01::{{item}}/ /srv/web/{{item}}/
with_items:
- getfedora.org

View file

@ -6,7 +6,7 @@
# need to be periodically updated with new things.
---
- name: restart fedmsg-gateway instances
- name: Restart fedmsg-gateway instances
hosts: fedmsg_gateways:fedmsg_gateways_stg
user: root
gather_facts: false
@ -17,10 +17,10 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: bounce the fedmsg-gateway service
- name: Bounce the fedmsg-gateway service
service: name=fedmsg-gateway state=restarted
- name: restart fedmsg-relay instances
- name: Restart fedmsg-relay instances
hosts: fedmsg_relays:fedmsg_relays_stg
user: root
gather_facts: false
@ -31,10 +31,10 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: bounce the fedmsg-relay service
- name: Bounce the fedmsg-relay service
service: name=fedmsg-relay state=restarted
- name: restart fedmsg-irc instances
- name: Restart fedmsg-irc instances
hosts: fedmsg_ircs:fedmsg_ircs_stg
user: root
gather_facts: false
@ -45,10 +45,10 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: bounce the fedmsg-irc service
- name: Bounce the fedmsg-irc service
service: name=fedmsg-irc state=restarted
- name: tell nagios to be quiet about FMN for the moment
- name: Tell nagios to be quiet about FMN for the moment
hosts: notifs_backend:notifs_backend_stg
user: root
gather_facts: false
@ -59,14 +59,14 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: schedule a 25 minute downtime. give notifs backend time to start up.
- name: Schedule a 25 minute downtime. give notifs backend time to start up.
nagios: action=downtime minutes=25 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
# - name: bounce the fmn-digests service
# - name: Bounce the fmn-digests service
# service: name=fmn-digests@1 state=restarted
- name: restart fedmsg-hub instances
- name: Restart fedmsg-hub instances
hosts: fedmsg_hubs:fedmsg_hubs_stg
user: root
gather_facts: false
@ -77,10 +77,10 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: bounce the fedmsg-hub service
- name: Bounce the fedmsg-hub service
service: name=fedmsg-hub state=restarted
- name: restart moksha-hub instances
- name: Restart moksha-hub instances
hosts: moksha_hubs:moksha_hubs_stg
user: root
gather_facts: false
@ -91,5 +91,5 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: bounce the moksha-hub service
- name: Bounce the moksha-hub service
service: name=moksha-hub state=restarted

View file

@ -1,5 +1,5 @@
---
- name: reload the frontend
- name: Reload the frontend
hosts: pagure:pagure_stg
user: root
vars_files:
@ -16,7 +16,7 @@
service: name="httpd" state=reloaded
post_tasks:
- name: tell nagios to unshush w.r.t. apache
- name: Tell nagios to unshush w.r.t. apache
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -15,7 +15,7 @@
# TODO -- other arches than x86_64?
---
- name: batch sign and import a directory full of rpms
- name: Batch sign and import a directory full of rpms
user: root
hosts: localhost
connection: local
@ -40,34 +40,34 @@
# TODO -- I'd also like to fail if rpmdir does not exist.
# TODO -- I'd also like to fail if there are no *.rpm files in there.
- name: sign all the rpms with our gpg key
- name: Sign all the rpms with our gpg key
shell: /bin/rpm --resign {{ rpmdir }}/*.rpm
- name: make a directory where we store the rpms afterwards
- name: Make a directory where we store the rpms afterwards
file: path={{ rpmdir }}-old state=directory
- name: copy the source rpms to the SRPMS dir of {{ repodir }}
- name: Copy the source rpms to the SRPMS dir of {{ repodir }}
copy: src={{ item }} dest={{ repodir }}/SRPMS/
with_fileglob:
- "{{ rpmdir }}/*.src.rpm"
- name: move processed srpms out to {{ rpmdir }}-old
- name: Move processed srpms out to {{ rpmdir }}-old
command: /bin/mv {{ item }} {{ rpmdir }}-old/
when: not testing
with_fileglob:
- "{{ rpmdir }}/*.src.rpm"
- name: copy the binary rpms to the x86_64 dir of {{ repodir }}
- name: Copy the binary rpms to the x86_64 dir of {{ repodir }}
copy: src={{ item }} dest={{ repodir }}/x86_64/
with_fileglob:
- "{{ rpmdir }}/*.rpm"
- name: copy the binary rpms to the i386 dir of {{ repodir }}
- name: Copy the binary rpms to the i386 dir of {{ repodir }}
copy: src={{ item }} dest={{ repodir }}/i386/
with_fileglob:
- "{{ rpmdir }}/*.rpm"
- name: move processed rpms out to {{ rpmdir }}-old
- name: Move processed rpms out to {{ rpmdir }}-old
command: /bin/mv {{ item }} {{ rpmdir }}-old/
when: not testing
with_fileglob:

View file

@ -7,7 +7,7 @@
# Please check with rel-eng before doing anything here.
---
- name: make sign-vault server vm (secondary and stg only)
- name: Make sign-vault server vm (secondary and stg only)
hosts: sign-vault01.stg.iad2.fedoraproject.org
user: root
gather_facts: false
@ -23,7 +23,7 @@
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: make sign vault server
- name: Make sign vault server
hosts: sign_vault
user: root
gather_facts: true

View file

@ -1,6 +1,6 @@
# This playbook syncs the production bodhi instance with staging.
---
- name: bring staging services down (httpd)
- name: Bring staging services down (httpd)
hosts: bodhi2_stg
user: root
vars_files:
@ -11,7 +11,7 @@
tasks:
- service: name=httpd state=stopped
- name: bring staging services down (OpenShift web services)
- name: Bring staging services down (OpenShift web services)
hosts: os-control01.stg.iad2.fedoraproject.org
user: root
vars_files:
@ -24,7 +24,7 @@
- command: oc -n bodhi scale dc/bodhi-consumer --replicas=0
- command: oc -n bodhi scale dc/bodhi-celery --replicas=0
- name: bring staging services down (messaging)
- name: Bring staging services down (messaging)
hosts: bodhi_backend_stg
user: root
vars_files:
@ -41,7 +41,7 @@
- bodhi-celery
# Here's the meaty part in the middle
- name: drop and re-create the staging db entirely
- name: Drop and re-create the staging db entirely
hosts: db01.stg.iad2.fedoraproject.org
user: root
become: yes
@ -66,7 +66,7 @@
shell: cat /var/tmp/bodhi2.dump | psql bodhi2
- file: path=/var/tmp/bodhi2.dump state=absent
- name: bring staging services up (OpenShift web services)
- name: Bring staging services up (OpenShift web services)
hosts: os-control01.stg.iad2.fedoraproject.org
user: root
vars_files:
@ -79,7 +79,7 @@
- command: oc -n bodhi scale dc/bodhi-consumer --replicas=1
- command: oc -n bodhi scale dc/bodhi-celery --replicas=1
- name: bring staging services up (httpd)
- name: Bring staging services up (httpd)
hosts: bodhi2_stg
user: root
vars_files:
@ -90,7 +90,7 @@
tasks:
- service: name=httpd state=started
- name: bring staging services up (messaging)
- name: Bring staging services up (messaging)
hosts: bodhi_backend_stg
user: root
vars_files:

View file

@ -15,7 +15,7 @@
# db => The database name on both database server (must be the same)
---
- name: dump the prod db out
- name: Dump the prod db out
hosts: "{{ dbhost }}.iad2.fedoraproject.org"
user: root
become: yes
@ -40,7 +40,7 @@
dest: /var/tmp/
flat: yes
- name: bring staging services down
- name: Bring staging services down
hosts: "{{ server or 'batcave01.iad2.fedoraproject.org' }}"
user: root
any_errors_fatal: false
@ -52,11 +52,11 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: stop apache
- name: Stop apache
service: name=httpd state=stopped
when: (server is defined) and (server|length > 0)
- name: drop and re-create the staging db entirely
- name: Drop and re-create the staging db entirely
hosts: "{{ dbhost }}.stg.iad2.fedoraproject.org"
user: root
become: yes
@ -72,7 +72,7 @@
tasks:
# push dump to stg from batcave
- name: push the DB dump from batcave to the dbhost in stg
- name: Push the DB dump from batcave to the dbhost in stg
copy:
src: /var/tmp/{{ db }}.dump.xz
dest: /var/tmp/{{ db }}.dump.xz
@ -91,7 +91,7 @@
- name: Import the prod db. This will take quite a while. Go get a snack!
shell: cat /var/tmp/{{ db }}.dump | psql {{ db }}
- name: bring staging services up
- name: Bring staging services up
hosts: "{{ server or 'batcave01.iad2.fedoraproject.org' }}"
user: root
any_errors_fatal: false
@ -103,7 +103,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: restart apache
- name: Restart apache
service: name=httpd state=started
when: (server is defined) and (server|length > 0)
@ -137,5 +137,5 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: remove the DB dump from batcave
- name: Remove the DB dump from batcave
command: rm -f /var/tmp/{{ db }}.dump.xz

View file

@ -10,7 +10,7 @@
---
- name: bring staging services down
- name: Bring staging services down
hosts: koji_stg
user: root
vars_files:
@ -25,7 +25,7 @@
- service: name=kojira state=stopped
- name: drop and re-create the staging db entirely
- name: Drop and re-create the staging db entirely
hosts: db-koji01.stg.iad2.fedoraproject.org
user: root
become: yes
@ -35,17 +35,17 @@
- db-sync
vars:
- builder_groups:
- name: buildvm_stg
- name: Buildvm_stg
arches: i386 x86_64
extra_channels:
- container
- createrepo
- livemedia
- name: buildvm_aarch64_stg
- name: Buildvm_aarch64_stg
arches: aarch64
- name: buildvm_ppc64le_stg
- name: Buildvm_ppc64le_stg
arches: ppc64le
- name: buildvm_s390x_stg
- name: Buildvm_s390x_stg
arches: s390x
# Users allowed to use content generators, only in staging
- cg_users:
@ -81,12 +81,12 @@
sed '/COPY buildroot_listing /,/\./d' |
mbuffer -q -s 16M -m 512M -l /tmp/mbuffer-sed-to-psql.log |
psql -v ON_ERROR_STOP=1
- name: repoint all the prod rpm entries at the secondary volume (and other stuff)
- name: Repoint all the prod rpm entries at the secondary volume (and other stuff)
shell: psql -1 -v ON_ERROR_STOP=1 koji </var/lib/pgsql/koji-reset-staging.sql
- name: vacuum database
- name: Vacuum database
shell: psql -v ON_ERROR_STOP=1 koji <<<"VACUUM ANALYZE"
- name: bring staging services up
- name: Bring staging services up
hosts: koji_stg
user: root
vars_files:
@ -99,19 +99,19 @@
- wipe-fs
tasks:
- name: stop nfs-server
- name: Stop nfs-server
service: name=nfs-server state=stopped
- name: unmount /mnt/fedora_koji/koji/ostree
- name: Unmount /mnt/fedora_koji/koji/ostree
mount: name=/mnt/fedora_koji/koji/ostree state=unmounted
- name: unmount /mnt/fedora_koji/koji/compose/ostree
- name: Unmount /mnt/fedora_koji/koji/compose/ostree
mount: name=/mnt/fedora_koji/koji/compose/ostree state=unmounted
- name: unmount /mnt/fedora_koji
- name: Unmount /mnt/fedora_koji
mount: name=/mnt/fedora_koji state=unmounted
- name: wipe fedora_koji volume
- name: Wipe fedora_koji volume
filesystem: dev=/dev/GuestVolGroup00/fedora_koji fstype=xfs force=yes
- name: mount newly-created empty koji filesystem
- name: Mount newly-created empty koji filesystem
mount: name=/mnt/fedora_koji src=/dev/GuestVolGroup00/fedora_koji fstype=xfs state=mounted
- name: recreate koji directory structure
- name: Recreate koji directory structure
file: name={{item}} state=directory owner=apache group=apache setype=httpd_sys_rw_content_t
with_items:
- /mnt/fedora_koji/koji
@ -124,16 +124,16 @@
- /mnt/koji/vol
- /mnt/koji/work
# TODO recreate directories under /mnt/koji/compose (which ones?) that composer expects to exist
- name: symlink production volume
- name: Symlink production volume
file: src=/mnt/fedora_koji_prod/koji dest=/mnt/koji/vol/prod state=link
- name: restart nfs-server
- name: Restart nfs-server
service: name=nfs-server state=started
- name: restart httpd
- name: Restart httpd
service: name=httpd state=started
- name: restart kojira
- name: Restart kojira
service: name=kojira state=started
- name: remount NFS on builders, composers, bodhi
- name: Remount NFS on builders, composers, bodhi
hosts: builders_stg:releng_compose_stg:bodhi_backend_stg
user: root
vars_files:
@ -146,13 +146,13 @@
- remount-nfs
tasks:
- name: remount koji nfs
- name: Remount koji nfs
shell: umount /mnt/fedora_koji && mount /mnt/fedora_koji
args:
warn: false
when: "'.s390.' not in inventory_hostname"
- name: restart kojid on staging builders
- name: Restart kojid on staging builders
hosts: builders_stg
user: root
vars_files:
@ -165,7 +165,7 @@
- restart-kojid
tasks:
- name: restart kojid
- name: Restart kojid
service: name=kojid state=restarted
# TODO run createrepo for selected tags

View file

@ -7,7 +7,7 @@
# production Koschei database dump, but this is not needed for now.
---
- name: sync staging Koschei with production
- name: Sync staging Koschei with production
hosts: os_control_stg[0]
user: root
vars_files:

View file

@ -3,7 +3,7 @@
---
- name: bring staging services down
- name: Bring staging services down
hosts: mailman-stg
user: root
vars_files:
@ -19,7 +19,7 @@
- service: name=mailman3 state=stopped
- name: drop and re-create the staging dbs entirely
- name: Drop and re-create the staging dbs entirely
hosts: db01.stg.iad2.fedoraproject.org
user: root
become: yes
@ -80,7 +80,7 @@
file: path=/var/tmp/prod-mailman-dump/ state=absent
- name: convert the data to staging and bring services back up. This will take a while too.
- name: Convert the data to staging and bring services back up. This will take a while too.
hosts: mailman-stg
user: root
vars_files:

View file

@ -1,5 +1,5 @@
---
- name: run fasjson playbook on bastion for alias changes
- name: Run fasjson playbook on bastion for alias changes
hosts: bastion
user: root
gather_facts: true

View file

@ -7,7 +7,7 @@
# --extra-vars="target='host1:host2' package='python-t*' testing=True"
---
- name: push packages out
- name: Push packages out
hosts: "{{target}}"
user: root
@ -16,26 +16,26 @@
tasks:
- name: yum update {{ package }} from main repo
- name: Yum update {{ package }} from main repo
yum: name="{{ package }}" state=latest update_cache=yes
when: not testing and ansible_distribution_major_version|int < 8 and ansible_distribution == 'RedHat'
- name: yum update {{ package }} from testing repo
- name: Yum update {{ package }} from testing repo
yum: name="{{ package }}" state=latest enablerepo=infrastructure-tags-stg update_cache=yes
when: testing and ansible_distribution_major_version|int < 8 and ansible_distribution == 'RedHat'
- name: dnf clean all (since we can't do it when updating)
- name: Dnf clean all (since we can't do it when updating)
command: dnf clean all
when: not testing and ansible_distribution_major_version|int > 21 and ansible_distribution == 'Fedora'
- name: dnf update {{ package }} from main repo
- name: Dnf update {{ package }} from main repo
dnf: name="{{ package }}" state=latest
when: not testing and ansible_distribution_major_version|int > 21 and ansible_distribution == 'Fedora'
- name: dnf clean all (since we can't do it when updating)
- name: Dnf clean all (since we can't do it when updating)
command: dnf clean all --enablerepo=infrastructure-tags-stg
when: testing and ansible_distribution_major_version|int > 21 and ansible_distribution == 'Fedora'
- name: dnf update {{ package }} from testing repo
- name: Dnf update {{ package }} from testing repo
dnf: name="{{ package }}" state=latest enablerepo=infrastructure-tags-stg
when: testing and ansible_distribution_major_version|int > 21 and ansible_distribution == 'Fedora'

View file

@ -1,5 +1,5 @@
---
- name: check to see if a compose is going on before we do anything...
- name: Check to see if a compose is going on before we do anything...
hosts: bodhi_backend:bodhi_backend_stg
user: root
vars_files:
@ -20,7 +20,7 @@
any_errors_fatal: true
when: "composes.stdout != '{\"composes\": []}' and env != 'staging'"
- name: push packages out
- name: Push packages out
hosts: bodhi_backend:bodhi_backend_stg
user: root
vars_files:
@ -31,14 +31,14 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: update bodhi-server packages from main repo
- name: Update bodhi-server packages from main repo
dnf:
name:
- bodhi-server
- bodhi-composer
state: latest
update_cache: true
- name: update bodhi-server packages from testing repo
- name: Update bodhi-server packages from testing repo
dnf:
name:
- bodhi-server
@ -48,7 +48,7 @@
update_cache: true
when: testing
- name: find out if there are migrations to run
- name: Find out if there are migrations to run
hosts: bodhi_backend:bodhi_backend_stg
user: root
vars_files:
@ -72,7 +72,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush w.r.t. the frontend
- name: Tell nagios to shush w.r.t. the frontend
nagios: action=downtime minutes=15 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
@ -85,7 +85,7 @@
command: oc -n bodhi scale dc/bodhi-web --replicas=0
when: migrations
- name: verify the backends, stop them, and then upgrade the db
- name: Verify the backends, stop them, and then upgrade the db
hosts: bodhi_backend:bodhi_backend_stg
user: root
vars_files:
@ -96,7 +96,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush w.r.t. the backend
- name: Tell nagios to shush w.r.t. the backend
nagios: action=downtime minutes=15 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
@ -127,12 +127,12 @@
- bodhi-celery
post_tasks:
- name: tell nagios to unshush w.r.t. the backend
- name: Tell nagios to unshush w.r.t. the backend
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: restart the frontend
- name: Restart the frontend
hosts: os_control[0]:os_control_stg[0]
user: root
vars_files:
@ -169,7 +169,7 @@
when: env != "staging"
post_tasks:
- name: tell nagios to unshush w.r.t. the frontend
- name: Tell nagios to unshush w.r.t. the frontend
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,5 +1,5 @@
---
- name: push packages out
- name: Push packages out
hosts: bugzilla2fedmsg:bugzilla2fedmsg_stg
user: root
vars_files:
@ -12,17 +12,17 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
- name: Clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
command: yum clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: update bugzilla2fedmsg packages from main repo
- name: Update bugzilla2fedmsg packages from main repo
package: name="python-bugzilla2fedmsg" state=latest
when: not testing
- name: update bugzilla2fedmsg packages from testing repo
- name: Update bugzilla2fedmsg packages from testing repo
yum: name="python-bugzilla2fedmsg" state=latest enablerepo=infrastructure-tags-stg
when: testing
- name: verify the backend and restart it
- name: Verify the backend and restart it
hosts: bugzilla2fedmsg:bugzilla2fedmsg_stg
user: root
vars_files:
@ -33,7 +33,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush
- name: Tell nagios to shush
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
@ -43,7 +43,7 @@
post_tasks:
- service: name="moksha-hub" state=restarted
- name: tell nagios to unshush
- name: Tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,5 +1,5 @@
---
- name: push packages out
- name: Push packages out
hosts: datagrepper:datagrepper_stg
user: root
vars_files:
@ -12,17 +12,17 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
- name: Clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
command: yum clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: yum update datagrepper packages from main repo
- name: Yum update datagrepper packages from main repo
package: name="datagrepper" state=latest
when: not testing
- name: yum update datagrepper packages from testing repo
- name: Yum update datagrepper packages from testing repo
package: name="datagrepper" state=latest enablerepo=infrastructure-tags-stg
when: testing
- name: verify the config and restart it
- name: Verify the config and restart it
hosts: datagrepper:datagrepper_stg
user: root
vars_files:
@ -33,7 +33,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush
- name: Tell nagios to shush
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
@ -44,7 +44,7 @@
post_tasks:
- service: name="httpd" state=restarted
- name: tell nagios to unshush
- name: Tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,5 +1,5 @@
---
- name: push packages out
- name: Push packages out
hosts:
- fedmsg-hubs
- fedmsg-hubs-stg
@ -34,14 +34,14 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
- name: Clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
command: yum clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: yum update fedmsg packages from the main repo
- name: Yum update fedmsg packages from the main repo
package: name={{item}} state=latest
when: not testing
with_items: "{{packages}}"
- name: yum update fedmsg packages from testing repo
- name: Yum update fedmsg packages from testing repo
package: name={{item}} state=latest enablerepo=infrastructure-tags-stg
when: testing
with_items: "{{packages}}"
@ -50,7 +50,7 @@
# - import_tasks: "{{tasks_path}}../restart-fedmsg-services.yml"
# Also restart the frontend web services
- name: bounce apache
- name: Bounce apache
hosts: datagrepper:datagrepper_stg
user: root
vars_files:

View file

@ -24,7 +24,7 @@
# - nagios outage stuff didn't seem to work as well as we would want last time.
---
- name: preliminary tasks
- name: Preliminary tasks
hosts: koji:koji_stg
user: root
vars_files:
@ -33,26 +33,26 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: ask admin if an outage ticket was created.
- name: Ask admin if an outage ticket was created.
pause: seconds=30 prompt="Did you file an outage ticket before running this?"
- name: ask admin if an outage ticket was reviewed.
- name: Ask admin if an outage ticket was reviewed.
pause: seconds=30 prompt="Did you have someone review that outage ticket? This is koji we're talking about here..."
- name: ask admin if an outage ticket was announced.
- name: Ask admin if an outage ticket was announced.
pause: seconds=30 prompt="Did you send the outage announcement to devel-announce? People need to know."
- name: ask admin if no db upgrade script is ok.
- name: Ask admin if no db upgrade script is ok.
pause: seconds=30 prompt="You didn't specify a db_upgrade_file extra-var. Are you sure there is no db upgrade required?"
when: db_upgrade_file is undefined
- name: tell nagios that everything is fine
- name: Tell nagios that everything is fine
nagios: action=downtime minutes=30 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: stop httpd on the koji-hubs.
- name: Stop httpd on the koji-hubs.
service: name="httpd" state=stopped
- name: run commands on the database host.
- name: Run commands on the database host.
# Note that the hosts are used explicitly here to choose only the "primary".
# We don't want to run upgrades on both pgbdr nodes at the same time.
# ... is anything special needed to upgrade pgbdr nodes?
@ -79,7 +79,7 @@
package: name=koji state=absent
when: db_upgrade_file is defined
- name: update and restart the koji hubs before we touch the builders
- name: Update and restart the koji hubs before we touch the builders
hosts: koji:koji_stg
user: root
vars_files:
@ -88,14 +88,14 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- package: name=koji-hub state=latest update_cache=yes
- name: restart httpd on the koji-hubs.
- name: Restart httpd on the koji-hubs.
service: name="httpd" state=started
- name: unsilence nagios
- name: Unsilence nagios
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: update and restart the koji builders, now that we're done with the hubs
- name: Update and restart the koji builders, now that we're done with the hubs
hosts: builders:builders_stg
user: root
vars_files:
@ -104,5 +104,5 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- package: name=koji-builder state=latest update_cache=yes
- name: restart all the builders. so many.
- name: Restart all the builders. so many.
service: name="kojid" state=restarted

View file

@ -1,5 +1,5 @@
---
- name: push packages out
- name: Push packages out
hosts: value:value_stg
user: root
vars_files:
@ -12,17 +12,17 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
- name: Clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
command: yum clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: update mote packages from main repo
- name: Update mote packages from main repo
package: name="mote" state=latest
when: not testing
- name: update mote packages from testing repo
- name: Update mote packages from testing repo
yum: name="mote" state=latest enablerepo=infrastructure-tags-stg
when: testing
- name: verify the config and restart it
- name: Verify the config and restart it
hosts: value:value_stg
user: root
vars_files:
@ -33,7 +33,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush
- name: Tell nagios to shush
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
@ -44,7 +44,7 @@
post_tasks:
- service: name="httpd" state=restarted
- service: name="mote-updater" state=restarted
- name: tell nagios to unshush
- name: Tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,5 +1,5 @@
---
- name: push packages out
- name: Push packages out
hosts: packages:packages_stg
user: root
vars_files:
@ -12,17 +12,17 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
- name: Clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
command: dnf clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: update fedora-packages packages from main repo
- name: Update fedora-packages packages from main repo
package: name="fedora-packages" state=latest
when: not testing
- name: update fedora-packages packages from testing repo
- name: Update fedora-packages packages from testing repo
dnf: name="fedora-packages" state=latest enablerepo=infrastructure-tags-stg
when: testing
- name: verify the config and restart it
- name: Verify the config and restart it
hosts: packages:packages_stg
user: root
vars_files:
@ -33,7 +33,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush
- name: Tell nagios to shush
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
@ -48,7 +48,7 @@
- service: name="httpd" state=started
- service: name="fedmsg-hub" state=started
- name: tell nagios to unshush
- name: Tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -6,7 +6,7 @@
# Main task to upgrade pagure
---
- name: upgrade pagure
- name: Upgrade pagure
hosts: pagure:pagure_stg
user: root
vars_files:
@ -19,7 +19,7 @@
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush w.r.t. the frontend
- name: Tell nagios to shush w.r.t. the frontend
nagios:
action=downtime
minutes=15
@ -33,13 +33,13 @@
##
tasks:
- name: yum update pagure packages from main repo
- name: Yum update pagure packages from main repo
yum: name="pagure*"
state=latest
update_cache=yes
when: not testing
- name: yum update pagure* packages from testing repo
- name: Yum update pagure* packages from testing repo
yum: name="pagure*"
state=latest
enablerepo=infrastructure-tags-stg
@ -47,7 +47,7 @@
when: testing
# This is needed so the alembic will work.
- name: stop the general pagure workers
- name: Stop the general pagure workers
service:
name: "{{ item }}"
state: stopped
@ -60,7 +60,7 @@
- pagure_webhook
- pagure_worker
- name: stop the specific workers on pagure.io
- name: Stop the specific workers on pagure.io
service:
name: "{{ item }}"
state: stopped
@ -83,7 +83,7 @@
post_tasks:
- name: start the general pagure workers
- name: Start the general pagure workers
service:
name: "{{ item }}"
state: started
@ -96,7 +96,7 @@
- pagure_webhook
- pagure_worker
- name: start_pagure_workers on pagure servers
- name: Start_pagure_workers on pagure servers
service:
name: "{{ item }}"
state: started
@ -115,7 +115,7 @@
##
- name: tell nagios to unshush w.r.t. the frontend
- name: Tell nagios to unshush w.r.t. the frontend
nagios:
action=unsilence
service=host

View file

@ -4,7 +4,7 @@
# this seemed like an obvious cheat
---
- name: rdiff-backup
- name: Rdiff-backup
hosts: backup_clients
user: root
gather_facts: false
@ -20,17 +20,17 @@
- global_backup_targets: ['/etc', '/home']
tasks:
- name: run rdiff-backup hitting all the global targets
- name: Run rdiff-backup hitting all the global targets
local_action: "shell rdiff-backup --remote-schema 'ssh -p {{ ansible_port|default(22) }} -C %s rdiff-backup --server' --create-full-path --print-statistics {{ inventory_hostname }}::{{ item }} /fedora_backups/{{ inventory_hostname }}/`basename {{ item }}` | mail -r sysadmin-backup-members@fedoraproject.org -s 'rdiff-backup: {{ inventory_hostname }}:{{ item }}' sysadmin-backup-members@fedoraproject.org"
with_items: '{{ global_backup_targets }}'
when: global_backup_targets is defined
- name: copy new database dumps into the backup server database dir
- name: Copy new database dumps into the backup server database dir
local_action: "shell rsync -a {{ inventory_hostname }}:{{ item }}/ /fedora_backups/databases/{{ inventory_hostname }}/"
with_items: '{{ db_backup_dir }}'
when: db_backup_dir is defined
- name: run rdiff-backup hitting all the host targets
- name: Run rdiff-backup hitting all the host targets
local_action: "shell rdiff-backup --remote-schema 'ssh -p {{ ansible_port|default(22) }} -C %s rdiff-backup --server' --exclude='**git-seed*' --exclude='**git_seed' --exclude='**.snapshot' --create-full-path --print-statistics {{ inventory_hostname }}::{{ item }} /fedora_backups/{{ inventory_hostname }}/`basename {{ item }}` | mail -r sysadmin-backup-members@fedoraproject.org -s 'rdiff-backup: {{ inventory_hostname }}:{{ item }}' sysadmin-backup-members@fedoraproject.org"
with_items: '{{ host_backup_targets }}'
when: host_backup_targets is defined

View file

@ -6,23 +6,23 @@
gather_facts: false
tasks:
- name: listing failed units
- name: Listing failed units
shell: systemctl list-units --state failed --no-legend | awk '{ print $1 }'
register: listing_failed
- name: check log with journalctl
- name: Check log with journalctl
shell: journalctl -lru {{ item }} -n 50
register: display_log
with_items: "{{ listing_failed.stdout_lines[0:] }}"
- debug: var=listing_failed.stdout_lines[0:]
- name: display log
- name: Display log
debug: var=display_log.stdout_lines
ignore_errors: true
when: display_log is defined
- name: restart failed service
- name: Restart failed service
systemd:
name: "{{ item }}"
state: restarted

View file

@ -1,16 +1,16 @@
# requires --extra-vars="target='host1:host2:group etc'"
---
- name: run rkhunter for times when rkhunter didn't seem to run.
- name: Run rkhunter for times when rkhunter didn't seem to run.
hosts: "{{ target }}"
user: root
tasks:
- name: check for rkhunter
- name: Check for rkhunter
command: /usr/bin/test -f /usr/bin/rkhunter
register: rkhunter
ignore_errors: true
- name: run rkhunter --propupd
- name: Run rkhunter --propupd
command: /usr/bin/rkhunter --propupd
when: rkhunter is success

View file

@ -1,24 +1,24 @@
# requires --extra-vars="target='host1:host2:group etc' yumcommand=update'"
---
- name: update all run rkhunter if installed
- name: Update all run rkhunter if installed
hosts: "{{ target }}"
user: root
tasks:
- name: expire-caches
- name: Expire-caches
command: yum clean expire-cache
- name: yum -y {{ yumcommand }}
- name: Yum -y {{ yumcommand }}
command: yum -y {{ yumcommand }}
async: 7200
poll: 15
- name: check for rkhunter
- name: Check for rkhunter
command: /usr/bin/test -f /usr/bin/rkhunter
register: rkhunter
ignore_errors: true
- name: run rkhunter --propupd
- name: Run rkhunter --propupd
command: /usr/bin/rkhunter --propupd
when: rkhunter is success

View file

@ -3,7 +3,7 @@
#
---
- name: set downtime
- name: Set downtime
hosts: all
gather_facts: false
user: root
@ -11,14 +11,14 @@
tasks:
- name: silence nagios completely
- name: Silence nagios completely
nagios: action=silence
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
failed_when: no
when: nonagios is not defined or not "true" in nonagios
- name: update the world
- name: Update the world
hosts: all
gather_facts: true
user: root
@ -51,16 +51,16 @@
poll: 30
when: package_excludes is defined
- name: run rkhunter if installed
- name: Run rkhunter if installed
hosts: all
user: root
tasks:
- name: check for rkhunter
- name: Check for rkhunter
command: /usr/bin/test -f /usr/bin/rkhunter
register: rkhunter
ignore_errors: true
- name: run rkhunter --propupd
- name: Run rkhunter --propupd
command: /usr/bin/rkhunter --propupd
when: rkhunter is success

View file

@ -1,9 +1,9 @@
---
- name: push dns changes out
- name: Push dns changes out
hosts: dns
user: root
tasks:
- name: push dns changes out
- name: Push dns changes out
command: /usr/local/bin/update-dns

View file

@ -20,34 +20,34 @@
---
- name: find instances
- name: Find instances
hosts: "{{ vhost }}"
user: root
tasks:
- name: get list of guests
- name: Get list of guests
virt: command=list_vms
register: vmlist
- name: add them to myvms_new group
- name: Add them to myvms_new group
local_action: add_host hostname={{ item }} groupname=myvms_new
with_items: "{{ vmlist.list_vms }}"
- name: halt instances
- name: Halt instances
hosts: myvms_new
user: root
serial: 1
tasks:
- name: tell nagios to shush
- name: Tell nagios to shush
nagios: action=silence host={{ inventory_hostname_short }}
delegate_to: noc01.iad2.fedoraproject.org
- name: echo-y
- name: Echo-y
command: /sbin/halt -p
ignore_errors: true
# if one of them is down we don't care
- name: wait for them to die
- name: Wait for them to die
local_action: wait_for port=22 delay=30 timeout=300 state=stopped host={{ inventory_hostname }}

View file

@ -15,50 +15,50 @@
# TODO: Figure out how to compare virt info pre and post boot.
---
- name: find instances
- name: Find instances
hosts: "{{ target }}"
gather_facts: false
user: root
tasks:
- name: get list of guests
- name: Get list of guests
virt: command=list_vms
register: vmlist
# - name: get info on guests (prereboot)
# - name: Get info on guests (prereboot)
# virt: command=info
# register: vminfo_pre
- name: add them to myvms_new group
- name: Add them to myvms_new group
local_action: add_host hostname={{ item }} groupname=myvms_new
with_items: "{{ vmlist.list_vms }}"
- name: halt instances
- name: Halt instances
hosts: myvms_new
user: root
gather_facts: false
serial: 1
tasks:
- name: halt the vm instances - to poweroff
- name: Halt the vm instances - to poweroff
command: /sbin/shutdown -h 1
ignore_errors: true
# if one of them is down we don't care
- name: wait for the whole set to die.
- name: Wait for the whole set to die.
hosts: myvms_new
gather_facts: false
user: root
tasks:
- name: wait for them to die
- name: Wait for them to die
local_action: wait_for port=22 delay=30 timeout=300 state=stopped host={{ inventory_hostname }}
- name: reboot vhost
- name: Reboot vhost
hosts: "{{ target }}"
gather_facts: false
user: root
tasks:
- name: halt the virthost
- name: Halt the virthost
command: /sbin/shutdown -h 1

View file

@ -5,9 +5,9 @@
#
---
- name: find instances
- name: Find instances
vars_prompt:
- name: target
- name: Target
prompt: What is the target vhost
private: false
hosts: "{{ target }}"
@ -15,21 +15,21 @@
user: root
tasks:
- name: get list of guests
- name: Get list of guests
virt: command=list_vms
register: vmlist
- name: add them to myvms_new group
- name: Add them to myvms_new group
local_action: add_host hostname={{ item }} groupname=myvms_new
with_items: '{{vmlist.list_vms}}'
- name: add the vmhost to target group
- name: Add the vmhost to target group
local_action: add_host hostname={{ target }} groupname=target
# Call out to another playbook. Disable any proxies that may live here
# - include_playbook: update-proxy-dns.yml status=disable proxies=myvms_new:&proxies
- name: set downtime
- name: Set downtime
hosts: "target:myvms_new"
gather_facts: false
user: root
@ -37,14 +37,14 @@
tasks:
- name: schedule regular host downtime
- name: Schedule regular host downtime
nagios: action=downtime minutes=30 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
failed_when: no
when: nonagios is not defined or not "true" in nonagios
- name: update the system
- name: Update the system
hosts: "target:myvms_new"
gather_facts: true
user: root
@ -77,16 +77,16 @@
poll: 30
when: package_excludes is defined
- name: run rkhunter if installed
- name: Run rkhunter if installed
hosts: "target:myvms_new"
user: root
tasks:
- name: check for rkhunter
- name: Check for rkhunter
command: /usr/bin/test -f /usr/bin/rkhunter
register: rkhunter
ignore_errors: true
- name: run rkhunter --propupd
- name: Run rkhunter --propupd
command: /usr/bin/rkhunter --propupd
when: rkhunter is success

View file

@ -27,7 +27,7 @@
special_time: daily
state: present
- name: koops_to_xorg.py
- name: Koops_to_xorg.py
cron:
name: "koops_to_xorg.py"
user: faf

View file

@ -1,5 +1,5 @@
---
- name: install postgresql packages
- name: Install postgresql packages
package:
state: present
name:
@ -8,14 +8,14 @@
- postgresql
- pg-semver
- name: install ssl packages for https
- name: Install ssl packages for https
package:
state: present
name:
- openssl
- mod_ssl
- name: memcached rhel
- name: Memcached rhel
package:
state: present
name:
@ -23,7 +23,7 @@
- python3-memcached
when: ansible_distribution == "RedHat" and faf_web_cache_type == "memcached"
- name: memcached fedora
- name: Memcached fedora
package:
state: present
name:

View file

@ -17,10 +17,10 @@
owner: faf
group: faf
- name: create folders where we place certs for fedora-messaging
- name: Create folders where we place certs for fedora-messaging
file: path=/etc/fedora-messaging/faf owner=root group=root mode=0755 state=directory
- name: install certs for fedora-messaging
- name: Install certs for fedora-messaging
copy: src={{ item.src }}
dest=/etc/fedora-messaging/faf/{{ item.dest }}
owner={{ item.owner }} group=root mode={{ item.mode }}
@ -42,61 +42,61 @@
}
# landing page
- name: install abrt-server-info-page
- name: Install abrt-server-info-page
package:
name: abrt-server-info-page
state: latest
- name: configure ADMINS
- name: Configure ADMINS
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'ADMINS ='
line: ' ADMINS = "infrastructure@lists.fedoraproject.org"'
notify: restart httpd
- name: configure MORE_FAF
- name: Configure MORE_FAF
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'MORE_FAF ='
line: ' MORE_FAF = "https://github.com/abrt/faf/"'
notify: restart httpd
- name: configure MORE_RS
- name: Configure MORE_RS
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'MORE_RS ='
line: ' MORE_RS = "https://github.com/abrt/retrace-server"'
notify: restart httpd
- name: configure MORE_ABRT
- name: Configure MORE_ABRT
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'MORE_ABRT ='
line: ' MORE_ABRT = "https://github.com/abrt/abrt/"'
notify: restart httpd
- name: configure MORE_GABRT
- name: Configure MORE_GABRT
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'MORE_GABRT ='
line: ' MORE_GABRT = "https://github.com/abrt/gnome-abrt/"'
notify: restart httpd
- name: configure MORE_LR
- name: Configure MORE_LR
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'MORE_LR ='
line: ' MORE_LR = "https://github.com/abrt/libreport/"'
notify: restart httpd
- name: configure MORE_SATYR
- name: Configure MORE_SATYR
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'MORE_SATYR ='
line: ' MORE_SATYR = "https://github.com/abrt/satyr/"'
notify: restart httpd
- name: configure URL_FOR_FAF
- name: Configure URL_FOR_FAF
lineinfile:
dest: /usr/lib/python3.6/site-packages/abrt-server-info-page/config.py
regexp: 'URL_FOR_FAF ='

View file

@ -1,18 +1,18 @@
---
- name: start and enable httpd
- name: Start and enable httpd
service:
name: httpd
state: started
enabled: yes
- name: start and enable memcached
- name: Start and enable memcached
service:
name: memcached
state: started
enabled: yes
when: faf_web_cache_type == "memcached"
- name: turn off selinux
- name: Turn off selinux
selinux:
state: permissive
policy: targeted

View file

@ -4,77 +4,77 @@
args:
creates: "/var/lib/pgsql/data/PG_VERSION"
- name: set max_connections for PostgreSQL
- name: Set max_connections for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^max_connections ='
line: 'max_connections = 150'
notify: restart postgresql
- name: set shared_buffers for PostgreSQL
- name: Set shared_buffers for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^shared_buffers ='
line: 'shared_buffers = 25536MB'
notify: restart postgresql
- name: set effective_cache_size for PostgreSQL
- name: Set effective_cache_size for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^effective_cache_size ='
line: 'effective_cache_size = 50608MB'
notify: restart postgresql
- name: set work_mem for PostgreSQL
- name: Set work_mem for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^work_mem ='
line: 'work_mem = 6MB'
notify: restart postgresql
- name: set maintenance_work_mem for PostgreSQL
- name: Set maintenance_work_mem for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^maintenance_work_mem ='
line: 'maintenance_work_mem = 2GB'
notify: restart postgresql
- name: set checkpoint_completion_target for PostgreSQL
- name: Set checkpoint_completion_target for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^checkpoint_completion_target ='
line: 'checkpoint_completion_target = 0.9'
notify: restart postgresql
- name: set wal_buffers for PostgreSQL
- name: Set wal_buffers for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^wal_buffers ='
line: 'wal_buffers = -1'
notify: restart postgresql
- name: set default_statistics_target for PostgreSQL
- name: Set default_statistics_target for PostgreSQL
lineinfile:
path: /var/lib/pgsql/data/postgresql.conf
regexp: '^default_statistics_target ='
line: 'default_statistics_target = 100'
notify: restart postgresql
- name: drop faf database
- name: Drop faf database
postgresql_db:
name: faf
owner: postgres
state: absent
when: faf_recreate_database|bool
- name: start service postgresql
- name: Start service postgresql
service:
name: postgresql
state: started
enabled: yes
become: true
- name: pgsql create db faf
- name: Pgsql create db faf
postgresql_db:
name: faf
owner: postgres
@ -82,7 +82,7 @@
become: true
become_user: postgres
- name: pgsql create user faf
- name: Pgsql create user faf
postgresql_user:
db: faf
name: faf
@ -92,7 +92,7 @@
become: true
become_user: postgres
- name: create extension for faf
- name: Create extension for faf
postgresql_ext:
name: semver
db: faf

View file

@ -1,12 +1,12 @@
---
- name: check whether we need to initialize letsencrypt first
- name: Check whether we need to initialize letsencrypt first
stat: path="/etc/letsencrypt/live/{{ item.key }}"
register: le_stat_checks
with_dict: "{{ letsencrypt.certificates }}"
when:
- letsencrypt is defined
- name: stop httpd when letsencrypt has not been run
- name: Stop httpd when letsencrypt has not been run
service:
name: httpd
state: stopped
@ -16,12 +16,12 @@
with_items: "{{ le_stat_checks.results }}"
- name: install letsencrypt ssl certificates for dev
- name: Install letsencrypt ssl certificates for dev
include_role: name=copr/certbot
tags:
- config
- name: install retrace ssl vhost
- name: Install retrace ssl vhost
template: src="httpd/retrace_ssl.conf.j2" dest="/etc/httpd/conf.d/retrace_ssl.conf"
when: letsencrypt is defined
tags:

View file

@ -1,12 +1,12 @@
---
- name: install faf web celery packages
- name: Install faf web celery packages
package:
name: "{{ faf_web_celery_packages }}"
state: present
tags:
- packages
- name: install redis package
- name: Install redis package
package:
name:
- redis
@ -15,7 +15,7 @@
tags:
- packages
- name: enable redis service
- name: Enable redis service
service:
name: redis
state: started
@ -23,7 +23,7 @@
tags:
- service
- name: enable faf-celery-worker
- name: Enable faf-celery-worker
service:
name: faf-celery-worker
state: started
@ -31,7 +31,7 @@
tags:
- service
- name: enable faf-celery-beat
- name: Enable faf-celery-beat
service:
name: faf-celery-beat
state: started

View file

@ -22,14 +22,14 @@
become_user: faf
changed_when: false
- name: cpf
- name: Cpf
copy:
src: ureport_sample
dest: "{{ faf_spool_dir }}/reports/incoming"
owner: faf
group: faf
- name: faf
- name: Faf
command: faf {{ item }}
become: yes
become_user: faf

View file

@ -6,7 +6,7 @@
main_url: "https://{{ ansible_default_ipv4.address }}{{ url_suffix }}"
problems_url: "https://{{ ansible_default_ipv4.address }}{{ url_suffix }}/problems/"
- name: check main
- name: Check main
uri:
url: "{{ main_url }}"
return_content: yes
@ -16,7 +16,7 @@
delegate_to: localhost
failed_when: "'ABRT' not in uri_res.content"
- name: fetch problems
- name: Fetch problems
uri:
url: "{{ problems_url }}"
return_content: yes

View file

@ -1,5 +1,5 @@
---
- name: remove EOLed opsys
- name: Remove EOLed opsys
command: faf releasemod -o "{{ item.opsys | lower }}" --opsys-release "{{ item.release }}" -s EOL
loop: "{{ eol_opsys }}"
become: yes
@ -7,7 +7,7 @@
failed_when: false
changed_when: false
- name: remove EOLed packages
- name: Remove EOLed packages
command: faf cleanup-packages "{{ item.opsys }}" "{{ item.release }}"
loop: "{{ eol_opsys }}"
become: yes
@ -15,7 +15,7 @@
failed_when: false
changed_when: false
- name: remove unassigned packages
- name: Remove unassigned packages
command: faf cleanup-unassigned -f
become: yes
become_user: faf

View file

@ -1,12 +1,12 @@
---
- name: provide /etc/faf/faf.conf
- name: Provide /etc/faf/faf.conf
template:
src: etc-faf-faf.conf.j2
dest: /etc/faf/faf.conf
# setup fedora-messaging
- name: create the config folder for fedora-messaging
- name: Create the config folder for fedora-messaging
file:
path: /etc/fedora-messaging/
owner: root
@ -15,7 +15,7 @@
state: directory
when: faf_with_fedmsg|bool
- name: provide configuration for fedora-messaging
- name: Provide configuration for fedora-messaging
template:
src: etc-fedora-messaging-config.toml.j2
dest: /etc/fedora-messaging/config.toml

View file

@ -1,6 +1,6 @@
---
- name: cron save-reports
- name: Cron save-reports
cron:
name: "faf save-reports"
user: faf
@ -8,7 +8,7 @@
minute: "*/5"
state: present
- name: cron create-problems-speedup
- name: Cron create-problems-speedup
cron:
name: "faf create-problems-speedup with type {{ item }}"
user: faf
@ -23,7 +23,7 @@
- "ruby"
- "java"
- name: cron create-problems
- name: Cron create-problems
cron:
name: "faf create-problems with type {{ item }}"
user: faf
@ -39,7 +39,7 @@
- "ruby"
- "java"
- name: cron reposync
- name: Cron reposync
cron:
name: "faf reposync"
user: faf
@ -48,7 +48,7 @@
hour: "3"
state: present
- name: retrace symbols
- name: Retrace symbols
cron:
name: "retrace symbols with type {{ item.type }}"
user: faf
@ -61,7 +61,7 @@
- { type: "core", day: "2,4,6" }
- { type: "kerneloops", day: "1,3,5" }
- name: cron - faf find-crashfn
- name: Cron - faf find-crashfn
cron:
name: "cron for faf find-crashfn for {{ item }}"
user: faf
@ -76,7 +76,7 @@
- "ruby"
- "java"
- name: cron pull-releases
- name: Cron pull-releases
cron:
name: "faf pull-releases {{ item }}"
user: faf
@ -86,7 +86,7 @@
state: present
loop: "{{ faf_opsys_list }}"
- name: cron pull-components
- name: Cron pull-components
cron:
name: "faf pull-components {{ item }}"
user: faf
@ -96,7 +96,7 @@
state: present
loop: "{{ faf_opsys_list }}"
- name: cron - faf find-components
- name: Cron - faf find-components
cron:
name: "cron for faf find-components -o {{ item }}"
user: faf
@ -106,7 +106,7 @@
state: present
loop: "{{ faf_opsys_list }}"
- name: cron - faf match-unknown-packages
- name: Cron - faf match-unknown-packages
cron:
name: "cron for faf match-unknown-packages"
user: faf

View file

@ -1,6 +1,6 @@
---
- name: check for count of faf tables
- name: Check for count of faf tables
shell: psql -c "SELECT COUNT(*) FROM pg_stat_user_tables"
register: count_tables
changed_when: "( count_tables.stdout_lines[2]|int ) == 0"
@ -10,17 +10,17 @@
# Skip whole block if faf owns atleast 1 table in db
- block:
- name: create faf's database schema
- name: Create faf's database schema
command: faf-migrate-db --create-all
become: yes
become_user: faf
- name: stamp database as migrated to latest version
- name: Stamp database as migrated to latest version
command: faf-migrate-db --stamp-only
become: yes
become_user: faf
- name: init faf
- name: Init faf
command: faf init
become: yes
become_user: faf

View file

@ -1,62 +1,62 @@
---
- name: enable Copr repo for RHEL
- name: Enable Copr repo for RHEL
copy:
src: group_abrt-faf-el8-epel-8.repo
dest: /etc/yum.repos.d/
when: ansible_distribution == 'RedHat'
- name: enable Copr repo for Fedora
- name: Enable Copr repo for Fedora
copy:
src: group_abrt-faf-el8-fedora.repo
dest: /etc/yum.repos.d/
when: ansible_distribution == 'Fedora'
- name: erase faf packages
- name: Erase faf packages
package:
name: "faf-*"
state: absent
when: faf_force_reinstall|bool
- name: install core faf packages
- name: Install core faf packages
package:
name: "{{ faf_packages }}"
state: present
- name: install faf problem packages
- name: Install faf problem packages
package:
name: "{{ faf_problem_packages }}"
state: present
- name: install faf opsys packages
- name: Install faf opsys packages
package:
name: "{{ faf_opsys_packages }}"
state: present
- name: install faf action packages
- name: Install faf action packages
package:
name: "{{ faf_action_packages }}"
state: present
- name: install faf bugtracker packages
- name: Install faf bugtracker packages
package:
name: "{{ faf_bugtracker_packages }}"
state: present
when: faf_with_bugtrackers|bool
- name: install faf celery packages
- name: Install faf celery packages
package:
name: "{{ faf_celery_packages }}"
state: present
when: faf_with_celery|bool
- name: install faf fedmsg packages
- name: Install faf fedmsg packages
package:
name: "{{ faf_fedmsg_packages }}"
state: present
when: faf_with_fedmsg|bool
- name: install faf solutionfinder packages
- name: Install faf solutionfinder packages
package:
name: "{{ faf_solutionfinder_packages }}"
state: present

View file

@ -1,5 +1,5 @@
---
- name: run database migrations
- name: Run database migrations
command: faf-migrate-db
become: yes
become_user: faf

View file

@ -1,5 +1,5 @@
---
- name: update faf packages
- name: Update faf packages
package:
name: "faf*"
state: latest

View file

@ -9,7 +9,7 @@
url_suffix: "/faf"
when: not faf_web_on_root|bool
- name: install faf-webui packages
- name: Install faf-webui packages
package:
name: "{{ faf_web_packages }}"
state: present
@ -18,20 +18,20 @@
import_tasks: celery.yml
when: faf_with_celery|bool
- name: install faf web symboltransfer packages
- name: Install faf web symboltransfer packages
package:
name: "{{ faf_web_symboltransfer_packages }}"
state: present
when: faf_with_symboltransfer|bool
- name: provide /etc/faf/plugins/web.conf
- name: Provide /etc/faf/plugins/web.conf
template:
src: etc-faf-plugins-web.conf.j2
dest: /etc/faf/plugins/web.conf
notify:
- restart httpd
- name: put webfaf on root (/) if configured
- name: Put webfaf on root (/) if configured
template:
src: etc-httpd-conf.d-faf-web.conf.j2
dest: /etc/httpd/conf.d/faf-web.conf

View file

@ -1,7 +1,7 @@
---
# long running tasks - run them in background - we do not actually care about the results
- name: reposync for fedora
- name: Reposync for fedora
shell: nohup retrace-server-reposync fedora {{ item[0] }} {{ item[1] }} </dev/null >$(mktemp /tmp/ansible.reposync_for_fedoraXXXXXX.log) &
loop: "{{ rs_internal_fedora_vers | product(rs_internal_arch_list) | list }}"
become: yes
@ -9,7 +9,7 @@
tags: [rs_reposync, rs_fedora]
when: env != 'staging'
- name: reposync for centos
- name: Reposync for centos
shell: nohup retrace-server-reposync centos {{ item }} x86_64 </dev/null >$(mktemp /tmp/ansible.reposync_for_centosXXXXXX.log) &
loop: "{{ rs_internal_centos_vers }}"
become: yes

View file

@ -8,7 +8,7 @@
set_fact:
settings_url: "https://{{ hostname }}/settings"
- name: fetch settings
- name: Fetch settings
uri:
url: "{{ settings_url }}"
return_content: yes

View file

@ -1,19 +1,19 @@
---
- name: configure retrace-server
- name: Configure retrace-server
template:
src: etc-retrace-server.conf.j2
dest: /etc/retrace-server/retrace-server.conf
mode: "0644"
notify: restart httpd
- name: retrace-server http config
- name: Retrace-server http config
template:
src: retrace-server-httpd.conf.j2
dest: /etc/httpd/conf.d/retrace-server-httpd.conf
mode: "0644"
notify: restart httpd
- name: configure retrace-server hooks config
- name: Configure retrace-server hooks config
template:
src: etc-retrace-server-hooks.conf.j2
dest: /etc/retrace-server/retrace-server-hooks.conf

View file

@ -1,11 +1,11 @@
---
- name: erase retrace-server packages
- name: Erase retrace-server packages
package:
name: retrace-server
state: absent
when: rs_force_reinstall|bool
- name: install retrace-server package
- name: Install retrace-server package
package:
name: retrace-server
state: present

View file

@ -1,9 +1,9 @@
---
- name: check if faf is installed
- name: Check if faf is installed
command: rpm -q faf
changed_when: false
- name: add user retrace to faf db
- name: Add user retrace to faf db
postgresql_user:
db: faf
name: retrace

View file

@ -2,7 +2,7 @@
#
# Setup ansible-server instance
#
- name: install needed packages
- name: Install needed packages
package:
name:
- ansible-core
@ -19,17 +19,17 @@
- packages
- ansible-server
- name: generate default ansible config
- name: Generate default ansible config
template: src=ansible.cfg.j2 dest=/etc/ansible/ansible.cfg owner=root group=root mode=0644
tags:
- ansible-server
- name: installing the phx2 dns check script
- name: Installing the phx2 dns check script
copy: src=dns_check.py dest=/usr/local/bin/dns_check owner=root mode=0755
tags:
- ansible-server
- name: install required collections
- name: Install required collections
command: ansible-galaxy install -r {{ ansible_base }}/ansible/roles/ansible-server/files/requirements.yml
tags:
- ansible-server

View file

@ -1,3 +1,3 @@
---
- name: restart apache
- name: Restart apache
command: /usr/local/bin/conditional-restart.sh httpd httpd

View file

@ -1,6 +1,6 @@
---
# install apache(httpd)
- name: install apache (package)
- name: Install apache (package)
package:
state: present
name:
@ -11,7 +11,7 @@
- apache
when: ansible_cmdline.ostree is not defined
- name: install mod_http2 on rhel8 hosts
- name: Install mod_http2 on rhel8 hosts
package:
state: present
name:
@ -21,7 +21,7 @@
- apache
when: ansible_distribution_major_version|int >= 8 and ansible_distribution == 'RedHat'
- name: set apache running/enabled
- name: Set apache running/enabled
service: name=httpd enabled=yes
ignore_errors: true
notify:
@ -31,7 +31,7 @@
- apache
# install hash randomization hotfix
- name: hotfix - copy over new httpd init script
- name: Hotfix - copy over new httpd init script
copy: src="{{ files }}/hotfix/httpd/httpd.init" dest=/etc/init.d/httpd
owner=root group=root mode=0755
when: ansible_distribution_major_version|int < 30 and ansible_distribution == 'Fedora'
@ -43,7 +43,7 @@
- apache
# install hash randomization hotfix
- name: hotfix - copy over new httpd init script
- name: Hotfix - copy over new httpd init script
copy: src="{{ files }}/hotfix/httpd/httpd.init" dest=/etc/init.d/httpd
owner=root group=root mode=0755
when: ansible_distribution_major_version|int <= 8 and ansible_distribution == 'RedHat'
@ -54,7 +54,7 @@
- hotfix
- apache
- name: add appserver headers.conf
- name: Add appserver headers.conf
template: src="{{ files }}/httpd/headers.conf.j2" dest=/etc/httpd/conf.d/headers.conf
notify:
- reload apache
@ -63,7 +63,7 @@
- apache
- apache/headers
- name: add appserver h2.conf
- name: Add appserver h2.conf
template: src="{{ files }}/httpd/h2.conf.j2" dest=/etc/httpd/conf.d/h2.conf
when: ansible_distribution == 'Fedora'
notify:
@ -73,7 +73,7 @@
- apache
- h2
- name: add apache_status location for collectd
- name: Add apache_status location for collectd
template: src="{{ files }}/httpd/apachestatus.conf" dest=/etc/httpd/conf.d/apachestatus.conf
notify:
- reload apache
@ -82,7 +82,7 @@
- apache
- apachestatus
- name: setup logrotate to our needs
- name: Setup logrotate to our needs
copy: src="{{ files }}/httpd/httpd.logrotate" dest=/etc/logrotate.d/httpd
tags:
- config

View file

@ -1,6 +1,6 @@
---
- name: rebuild apps-fp-o html
- name: Rebuild apps-fp-o html
shell: /usr/bin/apps-fp-o-yaml2html.py > /srv/web/apps-fp-o/apps-yaml.html
- name: rebuild apps-fp-o json
- name: Rebuild apps-fp-o json
shell: /usr/bin/apps-fp-o-yaml2json.py > /srv/web/apps-fp-o/js/data.js

Some files were not shown because too many files have changed in this diff Show more