Merge branch 'master' of /git/ansible
Conflicts: playbooks/vhost_update.yml
This commit is contained in:
commit
c165cf7746
5 changed files with 76 additions and 52 deletions
20
playbooks/restart_unbound.yml
Normal file
20
playbooks/restart_unbound.yml
Normal file
|
@ -0,0 +1,20 @@
|
|||
# This playboook restarts unbound on a host
|
||||
#
|
||||
# requires --extra-vars="target=somevhostname"
|
||||
#
|
||||
|
||||
- name: find instances
|
||||
hosts: "{{ target }}"
|
||||
accelerate: true
|
||||
gather_facts: False
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: set selinux permissive
|
||||
selinux: policy=targeted state=permissive
|
||||
|
||||
- name: restart unbound
|
||||
service: name=unbound state=restarted
|
||||
|
||||
- name: set selinux back to enforcing
|
||||
selinux: policy=targeted state=enforcing
|
|
@ -1,3 +1,6 @@
|
|||
#
|
||||
# This playbook lets you safely reboot a virthost and all it's guests.
|
||||
#
|
||||
# requires --extra-vars="target=somevhost fqdn"
|
||||
|
||||
#General overview:
|
||||
|
@ -9,82 +12,88 @@
|
|||
# third play, reboot the vhost
|
||||
# wait for vhost to come back
|
||||
|
||||
# TO BE DONE - should be fixable w/ansible 0.9
|
||||
# wait for all of the instances to return(?)
|
||||
# compare the first list to the second for state info
|
||||
# how to do this:
|
||||
# capture output of action: virt command=info before halting guests
|
||||
# compare to same command after vhost comes back
|
||||
# ansible 0.9 should allow us to preserve content of two registered variables
|
||||
# across multiple plays
|
||||
|
||||
# TODO: Figure out how to compare virt info pre and post boot.
|
||||
|
||||
- name: find instances
|
||||
hosts: $target
|
||||
hosts: "{{ target }}"
|
||||
gather_facts: False
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: get list of guests
|
||||
action: virt command=list_vms
|
||||
virt: command=list_vms
|
||||
register: vmlist
|
||||
|
||||
- name: add them to myvms_new group
|
||||
local_action: add_host hostname=$item groupname=myvms_new
|
||||
with_items: ${vmlist.list_vms}
|
||||
# - name: get info on guests (prereboot)
|
||||
# virt: command=info
|
||||
# register: vminfo_pre
|
||||
|
||||
- name: add them to myvms_new group
|
||||
local_action: add_host hostname={{ item }} groupname=myvms_new
|
||||
with_items: vmlist.list_vms
|
||||
|
||||
- name: halt instances
|
||||
hosts: myvms_new
|
||||
user: root
|
||||
gather_facts: False
|
||||
serial: 1
|
||||
|
||||
tasks:
|
||||
- name: schedule host downtime
|
||||
action: nagios action=downtime minutes=30 service=host host={{ inventory_hostname_short }}
|
||||
- name: schedule regular host downtime
|
||||
nagios: action=downtime minutes=30 service=host host={{ inventory_hostname_short }}
|
||||
delegate_to: noc01.phx2.fedoraproject.org
|
||||
ignore_errors: true
|
||||
when: not ansible_domain.startswith('stg')
|
||||
when: not inventory_hostname.startswith('stg')
|
||||
|
||||
- name: schedule host downtime
|
||||
action: nagios action=downtime minutes=30 service=host host={{ inventory_hostname_short }}.stg
|
||||
- name: schedule stg host downtime
|
||||
nagios: action=downtime minutes=30 service=host host={{ inventory_hostname_short }}.stg
|
||||
delegate_to: noc01.phx2.fedoraproject.org
|
||||
ignore_errors: true
|
||||
when: ansible_domain.startswith('stg')
|
||||
when: inventory_hostname.startswith('stg')
|
||||
|
||||
- name: halt the instances - to poweroff
|
||||
action: command /sbin/halt -p
|
||||
- name: halt the vm instances - to poweroff
|
||||
command: /sbin/halt -p
|
||||
ignore_errors: true
|
||||
# if one of them is down we don't care
|
||||
|
||||
|
||||
- name: wait for the whole set to die.
|
||||
hosts: myvms_new
|
||||
gather_facts: False
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: wait for them to die
|
||||
local_action: wait_for port=22 delay=30 timeout=300 state=stopped host=${inventory_hostname}
|
||||
|
||||
local_action: wait_for port=22 delay=30 timeout=300 state=stopped host={{ inventory_hostname }}
|
||||
|
||||
- name: reboot vhost
|
||||
hosts: $target
|
||||
hosts: "{{ target }}"
|
||||
gather_facts: False
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: tell nagios to shush
|
||||
action: nagios action=downtime minutes=60 service=host host=$inventory_hostname
|
||||
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }}
|
||||
delegate_to: noc01.phx2.fedoraproject.org
|
||||
ignore_errors: true
|
||||
|
||||
- name: halt -r the $target
|
||||
action: command /sbin/reboot
|
||||
- name: reboot the virthost
|
||||
command: /sbin/reboot
|
||||
|
||||
- name: wait for $target to come back - up to 6 minutes
|
||||
local_action: wait_for host=$target port=22 delay=120 timeout=420
|
||||
- name: wait for virthost to come back - up to 6 minutes
|
||||
local_action: wait_for host={{ target }} port=22 delay=120 timeout=420
|
||||
|
||||
- name: look up vmlist
|
||||
action: virt command=list_vms
|
||||
virt: command=list_vms
|
||||
register: newvmlist
|
||||
|
||||
- name: sync time
|
||||
command: ntpdate -u 66.187.233.4
|
||||
|
||||
- name: serverbeach hosts need a special iptables config
|
||||
command: /root/fix-iptables.sh
|
||||
when: inventory_hostname_short.startswith('serverbeach')
|
||||
|
||||
# - name: get info on guests (postreboot)
|
||||
# virt: command=info
|
||||
# register: vminfo_post
|
||||
|
||||
|
|
|
@ -1,52 +1,49 @@
|
|||
# This playboook updates a virthost and all it's guests.
|
||||
#
|
||||
# requires --extra-vars="target=somevhostname yumcommand=update"
|
||||
#
|
||||
|
||||
- name: find instances
|
||||
hosts: "{{ target }}"
|
||||
accelerate: true
|
||||
gather_facts: False
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: get list of guests
|
||||
action: virt command=list_vms
|
||||
virt: command=list_vms
|
||||
register: vmlist
|
||||
|
||||
- name: add them to myvms_new group
|
||||
local_action: add_host hostname={{ item }} groupname=myvms_new
|
||||
with_items: vmlist.list_vms
|
||||
|
||||
- name: add the host to myvms_new group
|
||||
local_action: add_host hostname={{ target }} groupname=myvms_new
|
||||
|
||||
- name: update the system
|
||||
hosts: myvms_new
|
||||
hosts: "{{ target }}:myvms_new"
|
||||
accelerate: true
|
||||
gather_facts: False
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: expire-caches
|
||||
action: command yum clean expire-cache
|
||||
command: yum clean expire-cache
|
||||
|
||||
- name: yum -y {{ yumcommand }}
|
||||
action: command yum -y {{ yumcommand }}
|
||||
command: yum -y {{ yumcommand }}
|
||||
async: 7200
|
||||
poll: 50
|
||||
poll: 30
|
||||
|
||||
- name: update all run rkhunter if installed
|
||||
hosts: myvms_new
|
||||
- name: run rkhunter if installed
|
||||
hosts: "{{ target }}:myvms_new"
|
||||
accelerate: true
|
||||
user: root
|
||||
|
||||
tasks:
|
||||
- name: check for rkhunter
|
||||
action: command /usr/bin/test -f /usr/bin/rkhunter
|
||||
command: /usr/bin/test -f /usr/bin/rkhunter
|
||||
register: rkhunter
|
||||
ignore_errors: true
|
||||
|
||||
- name: run rkhunter --propupd
|
||||
action: command /usr/bin/rkhunter --propupd
|
||||
command: /usr/bin/rkhunter --propupd
|
||||
when: rkhunter|success
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -116,7 +116,7 @@ handlers =
|
|||
qualname = tahrir
|
||||
|
||||
[logger_sqlalchemy]
|
||||
level = INFO
|
||||
level = WARN
|
||||
handlers =
|
||||
qualname = sqlalchemy.engine
|
||||
# "level = INFO" logs SQL queries.
|
||||
|
|
|
@ -1,3 +1 @@
|
|||
#ansible root key
|
||||
ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAmS3g5fSXizcCqKMI1n5WPFrfMyu7BMrMkMYyck07rB/cf2orO8kKj5schjILA8NYJFStlv2CGRXmQlendj523FPzPmzxvTP/OT4qdywa4LKGvAxOkRGCMMxWzVFLdEMzsLUE/+FLX+xd1US9UPLGRsbMkdz4ORCc0G8gqTr835H56mQPI+/zPFeQjHoHGYtQA1wnJH/0LCuFFfU82IfzrXzFDIBAA5i2S+eEOk7/SA4Ciek1CthNtqPX27M6UqkJMBmVpnAdeDz2noWMvlzAAUQ7dHL84CiXbUnF3hhYrHDbmD+kEK+KiRrYh3PT+5YfEPVI/xiDJ2fdHGxY7Dr2TQ== root@lockbox01.phx2.fedoraproject.org
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue