Spring cleaning time. :)

I removed all the old files, inventory, playbooks, roles and other from
services we no longer run or use. There was a bunch of cruft in there
and I hope that will make the repo cleaner and easier to look for things
we actually do run and care about.

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
This commit is contained in:
Kevin Fenzi 2020-05-13 14:02:41 -07:00
parent 0eca617652
commit c529380547
200 changed files with 4 additions and 6667 deletions

View file

@ -1,89 +0,0 @@
---
- name: Install desired extra packages (yum)
package: state=present pkg={{ item }}
with_items:
- ntpdate
- ntp
- libsemanage-python
- libselinux-python
when: ansible_distribution_major_version|int < 8 and ansible_distribution == 'RedHat'
tags:
- packages
- name: Install desired extra packages (dnf)
dnf:
state: present
pkg:
- ntpdate
- libselinux-python
when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
tags:
- packages
- name: Include basessh
include_role: name=basessh
#- name: edit hostname to be instance name - prefix hostbase var if it exists
# shell: hostname {{ hostbase }}`curl -s http://169.254.169.254/latest/meta-data/instance-id`
# tags:
# - config
- name: add ansible root key
authorized_key: user=root key="{{ item }}"
with_file:
- /srv/web/infra/ansible/roles/base/files/ansible-pub-key
tags:
- config
- sshkeys
- name: add root keys for sysadmin-main and other allowed users
authorized_key: user=root key="{{ item }}"
with_lines:
- "/srv/web/infra/ansible/scripts/auth-keys-from-fas @sysadmin-main {{ root_auth_users }}"
tags:
- config
- sshkeys
ignore_errors: true
- name: enable ssh_sysadm_login sebool
seboolean: name=ssh_sysadm_login state=yes persistent=yes
ignore_errors: true
# note - kinda should be a handler - but handlers need args
- name: restorecon
file: path=/root/.ssh setype=ssh_home_t recurse=yes
tags:
- config
- name: update all
command: yum -y update creates=/etc/sysconfig/global-update-applied
register: updated
when: ansible_distribution_major_version|int < 8 and ansible_distribution == 'RedHat'
tags:
- packages
- name: update all
command: dnf -y update creates=/etc/sysconfig/global-update-applied
register: updated
when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
tags:
- packages
- name: update all
command: dnf -y update creates=/etc/sysconfig/global-update-applied
register: updated
when: ansible_distribution_major_version|int >= 29 and ansible_distribution == 'Fedora' and ansible_cmdline.ostree is not defined
tags:
- packages
- name: write out global-update-applied file if we updated
copy: content="updated" dest=/etc/sysconfig/global-update-applied
when: updated is defined
tags:
- packages
- name: ensure tmp.mount is not masked, logrotate start would fail
systemd:
name: tmp.mount
masked: no
when: ansible_distribution_major_version|int >= 30 and ansible_distribution == 'Fedora'

View file

@ -1,31 +0,0 @@
- name: add infra repo
get_url: url=http://infrastructure.fedoraproject.org/el/infrastructure.repo dest=/etc/yum.repos.d/
when: is_rhel is defined
tags:
- config
- name: install cloud-utils
package: name=cloud-utils state=present
tags:
- packages
- name: growpart the second partition (/) to full size
command: growpart /dev/vda 2
register: growpart
check_mode: no
changed_when: "growpart.rc != 1"
#failed_when: growpart.rc == 2
ignore_errors: true
- name: reboot the box
command: /sbin/reboot
when: growpart.rc == 0
ignore_errors: true
- name: wait for it to come back (should be quick)
local_action: wait_for host={{ inventory_hostname }} port=22 delay=10 timeout=120
when: growpart.rc == 0
- name: resize the /dev/vda 2 fs
command: resize2fs /dev/vda2
when: growpart.rc == 0

View file

@ -1,30 +0,0 @@
- name: add infra repo
get_url: url=http://infrastructure.fedoraproject.org/el/infrastructure.repo dest=/etc/yum.repos.d/
when: is_rhel is defined
tags:
- config
- name: install cloud-utils
package: name=cloud-utils state=present
tags:
- packages
- name: growpart the first partition (/) to full size
command: growpart /dev/vda 1
register: growpart
check_mode: no
changed_when: "growpart.rc != 1"
failed_when: growpart.rc == 2
- name: reboot the box
command: /sbin/reboot
when: growpart.rc == 0
ignore_errors: true
- name: wait for it to come back (should be quick)
local_action: wait_for host={{ inventory_hostname }} port=22 delay=10 timeout=120
when: growpart.rc == 0
- name: resize the /dev/vda 1 fs
command: xfs_growfs /
when: growpart.rc == 0

View file

@ -1,109 +0,0 @@
# New tasks to spin up instance in https://fedorainfracloud.org
- include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README
- name: check it out
local_action: shell nc -w 5 {{ inventory_hostname }} 22 < /dev/null
register: host_is_up
ignore_errors: true
changed_when: false
check_mode: no
- name: spin UP VM using nova_compute
become: False
local_action:
module: nova_compute
auth_url: "{{os_auth_url}}"
login_username: "admin"
login_password: "{{ADMIN_PASS}}"
login_tenant_name: "{{inventory_tenant}}"
name: "{{inventory_instance_name}}"
image_id: "{{ image|image_name_to_id('admin', ADMIN_PASS, inventory_tenant, os_auth_url) }}"
wait_for: 600
flavor_id: "{{ instance_type|flavor_name_to_id('admin', ADMIN_PASS, inventory_tenant, os_auth_url) }}"
security_groups: "{{security_group}}"
key_name: "{{ keypair }}"
nics: "{{ cloud_networks }}"
user_data: "#cloud-config\ndisable_root: 0"
floating_ips:
- "{{public_ip}}"
register: nova_result
when: host_is_up is failed
# instance can be both id and name, volume must be id
# volume must be id
#
# Check that the volume is available
#
- local_action: shell nova --os-auth-url="{{os_auth_url}}" --os-username="admin" --os-password="{{ADMIN_PASS}}" --os-tenant-name={{inventory_tenant}} volume-list | grep ' {{item.volume_id}} ' | grep 'available'
with_items: "{{ volumes|default([]) }}"
register: volume_available
failed_when: volume_available.rc == 2
changed_when: volume_available.rc == 0
ignore_errors: True
when: volumes is defined
check_mode: no
#
# If it is attach it.
#
- local_action: shell nova --os-auth-url="{{os_auth_url}}" --os-username="admin" --os-password="{{ADMIN_PASS}}" --os-tenant-name={{inventory_tenant}} volume-attach "{{inventory_instance_name}}" "{{item.volume_id}}" "{{item.device}}"
with_items: "{{ volume_available.results|default([]) }}"
ignore_errors: True
failed_when: False
when: volumes is defined and volume_available is defined and item.changed
- name: wait for he host to be hot
local_action: wait_for host={{ public_ip }} port=22 delay=1 timeout=600
when: host_is_up is failed
# SSH is up and running, however cloud-init still did not deployed ssh keypair
# we have to wait some time. 10 sec is usually enough, but not always.
- name: waiting for cloud-init
pause: seconds=30
when: host_is_up is failed
- name: gather ssh host key from new instance
local_action: command ssh-keyscan -t rsa {{ inventory_hostname }}
ignore_errors: True
register: hostkey
when: host_is_up is failed
- name: add new ssh host key (until we can sign it)
local_action: known_hosts path={{item}} key="{{ hostkey.stdout }}" host={{ inventory_hostname }} state=present
ignore_errors: True
with_items:
- /root/.ssh/known_hosts
when: host_is_up is failed
#
# Next we try and gather facts. If the host doesn't have python2 this will fail.
#
- name: gather facts
setup:
check_mode: no
ignore_errors: True
register: facts
#
# If that failed, then we use the raw module to install things
#
- name: install python2 and dnf stuff
raw: dnf -y install python-dnf libselinux-python
when: facts is failed
# TODO - somehow guess when keypair is finally deployed and return little bit earlier
## We need to specify user, here we trying with fedora or root
#- name: wait until ssh is available
# # local_action: shell false; until [ "$?" -eq "0" ]; do sleep 2; ssh -o PasswordAuthentication=no fedora@{{ public_ip }} 'echo foobar' || ssh -o PasswordAuthentication=no root@{{ public_ip }} 'echo foobar'; done
# # local_action: shell false; until [ "$?" -eq "0" ]; do sleep 2; ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no fedora@{{ public_ip }} 'echo foobar'; done
# local_action: shell whoami && ssh -vvvv -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no -o PasswordAuthentication=no fedora@{{ public_ip }} 'echo foobar'
# # async: 20
# # poll: 5
# ignore_errors: True
#
- name: Include SSH config
import_role: name=basessh

View file

@ -1,55 +0,0 @@
# New tasks to spin up instance in https://fedorainfracloud.org
- name: spin UP VM using nova_compute
become: False
local_action:
security_groups: all-icmp-transient,web-80-anywhere-transient,web-443-anywhere-transient,ssh-anywhere-transient,default
# transient-net
nics:
- net-id: 96e0590b-e572-4340-9408-ce4d4e4f4536
name: "{{ name }}"
module: nova_compute
auth_url: "{{os_auth_url}}"
login_username: "admin"
login_password: "{{ADMIN_PASS}}"
login_tenant_name: transient
image_id: "{{ image|image_name_to_id('admin', ADMIN_PASS, 'transient', os_auth_url) }}"
wait_for: 300
flavor_id: "{{ instance_type|flavor_name_to_id('admin', ADMIN_PASS, 'transient', os_auth_url) }}"
key_name: fedora-admin-20130801
auto_floating_ip: true
user_data: "#cloud-config\ndisable_root: 0"
register: nova_result
- name: add it to the special group
local_action: add_host hostname="{{ nova_result.public_ip }}" groupname=tmp_just_created
- name: mail off about where it is
local_action: mail
to=sysadmin-main-members@fedoraproject.org
from=ansible-create@fedoraproject.org
subject="{{ nova_result.public_ip }}"
body="transient cloud instance created on {{ nova_result.public_ip }}
name = {{ name }}
root_auth_users = {{ root_auth_users }}
image = {{ image }}"
- name: wait for he host to be hot
local_action: wait_for host={{ nova_result.public_ip }} port=22 delay=1 timeout=600
- name: gather ssh host key from new instance
local_action: command ssh-keyscan -t rsa {{ nova_result.public_ip }}
ignore_errors: True
register: hostkey
- name: add new ssh host key (you still need to add it to official ssh_host_keys later)
local_action: known_hosts path={{item}} key="{{ hostkey.stdout }}" host={{ nova_result.public_ip }} state=present
ignore_errors: True
with_items:
- /root/.ssh/known_hosts
- /etc/ssh/ssh_known_hosts
# SSH is up and running, however cloud-init still did not deployed ssh keypair
# we have to wait some time. 10 sec is usually enough, but not always.
- name: waiting for cloud-init
pause: seconds=30

View file

@ -1,52 +0,0 @@
- name: spin UP VM using os_server
delegate_to: undercloud01.cloud.fedoraproject.org
os_server:
state: present
security_groups: default
nics:
- net-id: d18c60b1-bba2-416f-87c3-a4416191bd7c
name: "{{ name }}"
auth:
auth_url: http://172.23.1.52:5000/v2.0
username: "admin"
password: "{{newcloud_os_admin_pw}}"
project_name: relrod-super-cool-test-project
image: fedora-28-cloud-test
flavor: test.flavor.blah
key_name: fedora-admin-20130801
auto_floating_ip: true
userdata: "#cloud-config\ndisable_root: 0"
register: instance
- name: add it to the special group
local_action: add_host hostname="{{ instance.server.accessIPv4 }}" groupname=tmp_just_created
- name: mail off about where it is
local_action: mail
to=codeblock@fedoraproject.org
from=ansible-create@fedoraproject.org
subject="{{ instance.server.accessIPv4 }}"
body="transient cloud instance created on {{ instance.server.accessIPv4 }}
name = {{ name }}
root_auth_users = {{ root_auth_users }}
image = {{ image }}"
- name: wait for he host to be hot
local_action: wait_for host={{ instance.server.accessIPv4 }} port=22 delay=1 timeout=600
- name: gather ssh host key from new instance
local_action: command ssh-keyscan -t rsa {{ instance.server.accessIPv4 }}
ignore_errors: True
register: hostkey
- name: add new ssh host key (you still need to add it to official ssh_host_keys later)
local_action: known_hosts path={{item}} key="{{ hostkey.stdout }}" host={{ instance.server.accessIPv4 }} state=present
ignore_errors: True
with_items:
- /root/.ssh/known_hosts
- /etc/ssh/ssh_known_hosts
# SSH is up and running, however cloud-init still did not deployed ssh keypair
# we have to wait some time. 10 sec is usually enough, but not always.
- name: waiting for cloud-init
pause: seconds=30