openstack cloud: decomission

Finally take fed-cloud* out and all playbooks associated with the old (and attempts to make a new one).
This cloud was a pain at times, but it did serve long and well, we salute it!

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
This commit is contained in:
Kevin Fenzi 2020-02-28 20:33:18 +00:00 committed by Pierre-Yves Chibon
parent 0135fc1102
commit 00af04a024
38 changed files with 1 additions and 1570 deletions

View file

@ -1,23 +0,0 @@
-----BEGIN CERTIFICATE-----
MIID2DCCAsACCQCxRWmzwjSj6TANBgkqhkiG9w0BAQUFADCBrTELMAkGA1UEBhMC
VVMxCzAJBgNVBAgMAk5NMRAwDgYDVQQHDAdSYWxlaWdoMRAwDgYDVQQKDAdSZWQg
SGF0MRcwFQYDVQQLDA5GZWRvcmEgUHJvamVjdDEsMCoGA1UEAwwjZmVkLWNsb3Vk
MDkuY2xvdWQuZmVkb3JhcHJvamVjdC5vcmcxJjAkBgkqhkiG9w0BCQEWF2FkbWlu
QGZlZG9yYXByb2plY3Qub3JnMB4XDTE0MDkxODEwMjMxMloXDTE1MDkxODEwMjMx
Mlowga0xCzAJBgNVBAYTAlVTMQswCQYDVQQIDAJOTTEQMA4GA1UEBwwHUmFsZWln
aDEQMA4GA1UECgwHUmVkIEhhdDEXMBUGA1UECwwORmVkb3JhIFByb2plY3QxLDAq
BgNVBAMMI2ZlZC1jbG91ZDA5LmNsb3VkLmZlZG9yYXByb2plY3Qub3JnMSYwJAYJ
KoZIhvcNAQkBFhdhZG1pbkBmZWRvcmFwcm9qZWN0Lm9yZzCCASIwDQYJKoZIhvcN
AQEBBQADggEPADCCAQoCggEBALFOYDRhow6sEyCvm4jNlIAxs9vYDF07q3sEHzVj
zXy0NNlUgZPRCijWFyHRDwy383f7ZtRlqVCGXxm4l8ltQUU+jmXcnIY1xY2A1TPv
nWv+f1dGSv+SfWGAjqgwyajr6wyPAOnpwui2v03/xalAx6Xl7padfdlAEsNjAvNb
5uZkW7DLlDu3jSIroDSKsJUQW9kc1elT90W0mNgw3MpFA5zdj0QRxi2JpBth6PeT
CewN4r7QZ5cP4EzfHMLKT21kJzm+j5jlaQEak4yKWDEeLh4+RxgTnmss4zYKTUit
7H+j9KaxqVsneB8Sg7EtVnXafYLrSlr9fwOV5DWklLzvjBMCAwEAATANBgkqhkiG
9w0BAQUFAAOCAQEAHToeNGFaGlybHICw1ncLCmdu6vikPPn/UShfS25U54Q9eIMn
zqlhbbEyzuF4wKjV35W0BORWKJ+hQ2vpfk21jUMVOsdl7IMEXtIWotfO17ufWM28
zhwcPAlrs/Pr5dF7ihbOGKAHhEYVopSH8OTFayAQKWWKGv52lZsgwfrnDDu0TjIo
zmhCEmOWZf+CeEWT/AP7BJ6g4Apz9grUmaRvaQGft5y5sGC8tsV0im/C9WaMfVhF
wemG2KcOuKJDXtvd7DHNBoHcDrB1cN1i0uKhj0nxXsXpeag9Xh4BmkgHMU8rnegK
q7hOy15qVU/lOBZUtfx69aYHPpOGJ7Jc1xFIiQ==
-----END CERTIFICATE-----

View file

@ -1,24 +0,0 @@
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
# http://docs.openstack.org/icehouse/install-guide/install/yum/content/basics-neutron-networking-controller-node.html
# controller
{{ controller_public_ip }} controller
# network
{{ network_public_ip }} network
# compute1
{{ compute1_private_ip }} fed-cloud10.cloud.fedoraproject.org
{{ compute2_private_ip }} fed-cloud11.cloud.fedoraproject.org
{{ compute3_private_ip }} fed-cloud12.cloud.fedoraproject.org
{{ compute4_private_ip }} fed-cloud13.cloud.fedoraproject.org
{{ compute5_private_ip }} fed-cloud14.cloud.fedoraproject.org
{{ compute6_private_ip }} fed-cloud15.cloud.fedoraproject.org
#
# This is needed for 2fa to work correctly.
#
209.132.181.6 infrastructure infrastructure.fedoraproject.org
209.132.181.32 fas-all.phx2.fedoraproject.org
{{ controller_private_ip }} fed-cloud09.cloud.fedoraproject.org fedorainfracloud.org

View file

@ -2,24 +2,6 @@
##
## Hardware
##
#fed-cloud01.cloud.fedoraproject.org
#fed-cloud02.cloud.fedoraproject.org
fed-cloud03.cloud.fedoraproject.org
fed-cloud04.cloud.fedoraproject.org
fed-cloud05.cloud.fedoraproject.org
fed-cloud06.cloud.fedoraproject.org
fed-cloud07.cloud.fedoraproject.org
fed-cloud08.cloud.fedoraproject.org
fed-cloud09.cloud.fedoraproject.org
#fed-cloud10.cloud.fedoraproject.org
#fed-cloud11.cloud.fedoraproject.org
fed-cloud12.cloud.fedoraproject.org
fed-cloud13.cloud.fedoraproject.org
#fed-cloud14.cloud.fedoraproject.org
fed-cloud15.cloud.fedoraproject.org
#fed-cloud16.cloud.fedoraproject.org
#fed-cloud-ppc01.cloud.fedoraproject.org
fed-cloud-ppc02.cloud.fedoraproject.org
virthost-aarch64-os01.fedorainfracloud.org
virthost-aarch64-os02.fedorainfracloud.org
virthost-cloud01.fedorainfracloud.org

View file

@ -32,13 +32,6 @@ ibiblio05.fedoraproject.org
virthost-comm01.qa.fedoraproject.org
virthost-comm03.qa.fedoraproject.org
virthost-comm04.qa.fedoraproject.org
fed-cloud09.cloud.fedoraproject.org
#fed-cloud10.cloud.fedoraproject.org
#fed-cloud11.cloud.fedoraproject.org
fed-cloud12.cloud.fedoraproject.org
fed-cloud13.cloud.fedoraproject.org
#fed-cloud14.cloud.fedoraproject.org
fed-cloud15.cloud.fedoraproject.org
#osuosl03.fedoraproject.org
# ssh often disabled
#autosign01.phx2.fedoraproject.org
@ -75,14 +68,6 @@ bkernel04.phx2.fedoraproject.org
[ibms]
virthost-cc-rdu03.fedoraproject.org
osuosl02.fedoraproject.org
#fed-cloud01.cloud.fedoraproject.org
#fed-cloud02.cloud.fedoraproject.org
fed-cloud03.cloud.fedoraproject.org
fed-cloud04.cloud.fedoraproject.org
fed-cloud05.cloud.fedoraproject.org
fed-cloud06.cloud.fedoraproject.org
fed-cloud07.cloud.fedoraproject.org
fed-cloud08.cloud.fedoraproject.org
#cloud-noc01.cloud.fedoraproject.org
data-analysis01.phx2.fedoraproject.org
download-rdu01.fedoraproject.org

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.18
eth1_ip: 172.24.0.18
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.18

View file

@ -1,5 +0,0 @@
---
nagios_Check_Services:
mail: false
nrpe: false
swap: false

View file

@ -1,5 +0,0 @@
---
nagios_Check_Services:
mail: false
nrpe: false
swap: false

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.3
eth1_ip: 172.24.0.3
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.3

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.4
eth1_ip: 172.24.0.4
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.4

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.5
eth1_ip: 172.24.0.5
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.5

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.6
eth1_ip: 172.24.0.6
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.6

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.7
eth1_ip: 172.24.0.7
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.7

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.8
eth1_ip: 172.24.0.8
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.8

View file

@ -1,13 +0,0 @@
---
root_auth_users: msuchy
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
host_group: openstack-compute
ansible_ifcfg_blacklist: true
nagios_Check_Services:
nrpe: true
sshd: true
swap: true
baseiptables: False

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.10
eth1_ip: 172.24.0.10
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.10

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.11
eth1_ip: 172.24.0.11
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.11

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.12
eth1_ip: 172.24.0.12
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.12

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.13
eth1_ip: 172.24.0.13
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.13

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.14
eth1_ip: 172.24.0.14
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.14

View file

@ -1,11 +0,0 @@
---
freezes: false
fas_client_groups: sysadmin-cloud,sysadmin-main
sudoers: "{{ private }}/files/sudo/sysadmin-cloud"
datacenter: cloud
eth0_ip: 209.132.184.15
eth1_ip: 172.24.0.15
nm: 255.255.255.0
gw: 209.132.184.254
compute_private_ip: 172.24.0.15

View file

@ -924,45 +924,11 @@ openqa
## END fedmsg services
[cloud_hardware]
#fed-cloud01.cloud.fedoraproject.org
#fed-cloud02.cloud.fedoraproject.org
fed-cloud03.cloud.fedoraproject.org
fed-cloud04.cloud.fedoraproject.org
fed-cloud05.cloud.fedoraproject.org
fed-cloud06.cloud.fedoraproject.org
fed-cloud07.cloud.fedoraproject.org
fed-cloud08.cloud.fedoraproject.org
fed-cloud09.cloud.fedoraproject.org
#fed-cloud10.cloud.fedoraproject.org
#fed-cloud11.cloud.fedoraproject.org
fed-cloud12.cloud.fedoraproject.org
fed-cloud13.cloud.fedoraproject.org
#fed-cloud14.cloud.fedoraproject.org
fed-cloud15.cloud.fedoraproject.org
#fed-cloud16.cloud.fedoraproject.org
#fed-cloud-ppc01.cloud.fedoraproject.org
fed-cloud-ppc02.cloud.fedoraproject.org
cloud-noc01.cloud.fedoraproject.org
virthost-aarch64-os01.fedorainfracloud.org
virthost-aarch64-os02.fedorainfracloud.org
virthost-cloud01.fedorainfracloud.org
[openstack_compute]
fed-cloud03.cloud.fedoraproject.org
fed-cloud04.cloud.fedoraproject.org
fed-cloud05.cloud.fedoraproject.org
fed-cloud06.cloud.fedoraproject.org
fed-cloud07.cloud.fedoraproject.org
fed-cloud08.cloud.fedoraproject.org
#fed-cloud10.cloud.fedoraproject.org
#fed-cloud11.cloud.fedoraproject.org
fed-cloud12.cloud.fedoraproject.org
fed-cloud13.cloud.fedoraproject.org
#fed-cloud14.cloud.fedoraproject.org
fed-cloud15.cloud.fedoraproject.org
#fed-cloud-ppc01.cloud.fedoraproject.org
fed-cloud-ppc02.cloud.fedoraproject.org
[pdc_web]
pdc-web01.phx2.fedoraproject.org
pdc-web02.phx2.fedoraproject.org
@ -1248,8 +1214,6 @@ fas
[zombie_infested]
# anon git via systemd socket seems to get zombies from time to time
pkgs02.phx2.fedoraproject.org
# the openstack 5.0 vnc console viewer causes bunches of Zombies
fed-cloud09.cloud.fedoraproject.org
# Ansible from time to time in large runs has zombie threads
batcave01.phx2.fedoraproject.org
# bodhi-backend01 gets zombies right at the end of pushes

View file

@ -1,30 +0,0 @@
---
- name: deploy Open Stack compute nodes
hosts: openstack_compute
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/RedHat.yml
- /srv/web/infra/ansible/vars/fedora-cloud.yml
- "/srv/private/ansible/files/openstack/passwords.yml"
roles:
- base
- rkhunter
- nagios_client
- fas_client
- sudo
- cloud_compute
pre_tasks:
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
tasks:
- import_tasks: "{{ tasks_path }}/2fa_client.yml"
- import_tasks: "{{ tasks_path }}/motd.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"

View file

@ -1,4 +0,0 @@
search cloud.fedoraproject.org fedoraproject.org
nameserver 66.35.62.163
nameserver 140.211.169.201
options rotate timeout:1

View file

@ -139,7 +139,7 @@
tags:
- rootpw
- base
when: not inventory_hostname.startswith(('buildvm-','buildhw-','bkernel','koji01.stg','fed-cloud09','compose','rawhide','branched'))
when: not inventory_hostname.startswith(('buildvm-','buildhw-','bkernel','koji01.stg','compose','rawhide','branched'))
- name: add ansible root key
authorized_key: user=root key="{{ item }}"

View file

@ -1,21 +0,0 @@
[epel]
name=Extras Packages for Enterprise Linux $releasever - $basearch
baseurl=http://infrastructure.fedoraproject.org/pub/epel/7/$basearch/
enabled=1
gpgcheck=1
gpgkey=http://infrastructure.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
includepkgs=nagios-plugins*,rkhunter,python-saslwrapper,python-cheetah,python-simplejson,saslwrapper,python2-crypto,libtomcrypt,libtommath,python2-openidc-client,atop
[epel-testing]
name=Extras Packages for Enterprise Linux $releasever - $basearch
baseurl=http://infrastructure.fedoraproject.org/pub/epel/testing/7/$basearch/
enabled=0
gpgcheck=1
gpgkey=http://infrastructure.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7
[epel-beta]
name=Extras Packages for Enterprise Linux beta $releasever - $basearch
baseurl=http://infrastructure.fedoraproject.org/pub/epel/beta/7/$basearch/
enabled=0
gpgcheck=1
gpgkey=http://infrastructure.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-7

View file

@ -1,5 +0,0 @@
[rhel7-os]
name = rhel7 os $basearch
baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-server-rhv-4-mgmt-agent-for-power-le-rpms/
includepkgs=qemu*
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

View file

@ -1,9 +0,0 @@
[RHOS-5]
name=Red Hat OpenStack
baseurl=https://infrastructure.fedoraproject.org/repo/rhel/rhel7/x86_64/rhel-7-openstack-5.0-rpms/
enabled=1
skip_if_unavailable=0
gpgcheck=1
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
#priority=98

View file

@ -1,8 +0,0 @@
- name: "update ca-trust"
command: /usr/bin/update-ca-trust
- name: "restart neutron-openvswitch-agent"
service: name=neutron-openvswitch-agent state=restarted
- name: "restart openstack-nova-compute"
service: name=openstack-nova-compute state=restarted

View file

@ -1,330 +0,0 @@
---
# Configure another compute node for Fedora Cloud
- authorized_key: user=root key="{{ lookup('file', files + '/fedora-cloud/fed09-ssh-key.pub') }}"
- template: src={{ files }}/fedora-cloud/hosts dest=/etc/hosts owner=root mode=0644
- name: Copy customized EPEL7 repo
copy: src=cloud-epel7.repo dest=/etc/yum.repos.d/cloud-epel7.repo
owner=root group=root mode=0644
- name: Enable nested virtualization
copy: content="options kvm_intel nested=1" dest=/etc/modprobe.d/nested_virt.conf
owner=root group=root mode=0644
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="^ONBOOT=" line="ONBOOT=yes"
notify:
- restart network
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="^NETMASK=" line="NETMASK=255.255.255.0"
notify:
- restart network
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="^IPADDR=" line="IPADDR={{compute_private_ip}}"
notify:
- restart network
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="BOOTPROTO=" line="BOOTPROTO=none"
notify:
- restart network
- meta: flush_handlers
- name: copy RHOS repo file
copy: src=rhos-5.repo dest=/etc/yum.repos.d/rhos-5.repo
owner=root group=root mode=0644
- name: install RHEV/ppc64le for el7 repo file
copy: src=rhel7-rhev-ppc64le.repo dest=/etc/yum.repos.d/rhel7-rhev-ppc64le.repo
tags:
- repos
- rhel7-rhev-ppc64le
when: ansible_distribution == 'RedHat' and ansible_distribution_major_version|int == 7 and ansible_architecture == 'ppc64le'
- package: state=present pkg=openstack-nova-common
- name: create logical volume for ephemeral storage
lvol: vg=vg_guests lv=nova size=100%FREE shrink=no
- filesystem: fstype=ext4 dev=/dev/mapper/vg_guests-nova
- mount: name=/var/lib/nova src=/dev/mapper/vg_guests-nova fstype=ext4 state=mounted
- name: Create logical volume for Swift
lvol: vg=vg_server lv=swift_store size=100g
- name: Create FS on Swift storage
filesystem: fstype=ext4 dev=/dev/vg_server/swift_store
- file: path=/var/lib/nova/{{item}} owner=nova group=nova mode=0755 state=directory
with_items:
- buckets
- images
- instances
- keys
- networks
- tmp
- meta: flush_handlers
# http://docs.openstack.org/icehouse/install-guide/install/yum/content/nova-compute.html
- name: install the Compute packages necessary for the controller node.
package: state=present pkg={{ item }}
with_items:
- openstack-nova-compute
- python-novaclient
- openstack-utils
- openstack-selinux
- name: add ssl cert for keystone
copy: src={{ private }}/files/openstack/fedorainfracloud.org.digicert.pem dest=/etc/pki/tls/certs/fedorainfracloud.org.digicert.pem mode=644 owner=root group=root
- name: Set up db connection to controller
ini_file: dest=/etc/nova/nova.conf section=database option=connection value=mysql://nova:{{NOVA_DBPASS}}@{{controller_private_ip}}/nova
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=auth_strategy value=keystone
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_uri value=https://{{controller_publicname}}:5000
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_host value={{controller_publicname}}
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_protocol value=https
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_port value=35357
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fedorainfracloud.org.digicert.pem
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=admin_user value=nova
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=admin_tenant_name value=services
notify:
- restart openstack-nova-compute
- name: set admin_password
ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=admin_password value={{NOVA_PASS}}
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rpc_backend value=nova.openstack.common.rpc.impl_kombu
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_host value={{controller_private_ip}}
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_hosts value={{controller_private_ip}}:5672
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_userid value=amqp_user
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_password value={{ CONFIG_AMQP_AUTH_PASSWORD }}
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_port value=5672
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_use_ssl value=False
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=my_ip value={{compute_private_ip}}
notify:
- restart openstack-nova-compute
# Cirrus VGA is not available on PPC, thus no VNC
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=vnc_enabled value=True
notify:
- restart openstack-nova-compute
when: not inventory_hostname.startswith('fed-cloud-ppc')
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=vnc_enabled value=False
notify:
- restart openstack-nova-compute
when: inventory_hostname.startswith('fed-cloud-ppc')
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=vncserver_listen value=0.0.0.0
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=vncserver_proxyclient_address value={{compute_private_ip}}
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=novncproxy_base_url value=https://{{controller_publicname}}:6080/vnc_auto.html
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_host value={{controller_publicname}}
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_protocol value=https
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_api_servers value=https://{{ controller_publicname }}:9292
notify:
- restart openstack-nova-compute
- name: set up storage for ephemeral disks
ini_file: dest=/etc/nova/nova.conf section=libvirt option=images_type state=absent
notify:
- restart openstack-nova-compute
- service: name=libvirtd state=started enabled=yes
- service: name=messagebus state=started
- service: name=openstack-nova-compute state=started enabled=yes
- service: name=neutron-openvswitch-agent state=started enabled=yes
# http://docs.openstack.org/icehouse/install-guide/install/yum/content/neutron-ml2-compute-node.html
- sysctl: name=net.ipv4.conf.all.rp_filter value=0 state=present sysctl_set=yes reload=yes
- sysctl: name=net.ipv4.conf.default.rp_filter value=0 state=present sysctl_set=yes reload=yes
- name: install the Networking components
package: state=present pkg={{ item }}
with_items:
- openstack-neutron-ml2
- openstack-neutron-openvswitch
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=auth_strategy value=keystone
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_uri value=https://{{controller_publicname}}:5000
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_host value={{controller_publicname}}
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_protocol value=https
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_port value=35357
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fedorainfracloud.org.digicert.pem
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=admin_user value=neutron
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=admin_tenant_name value=services
notify:
- restart neutron-openvswitch-agent
- name: set admin_password
ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=admin_password value={{NEUTRON_PASS}}
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rpc_backend value=neutron.openstack.common.rpc.impl_kombu
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_host value={{controller_private_ip}}
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_hosts value={{controller_private_ip}}:5672
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_userid value=amqp_user
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_password value={{ CONFIG_AMQP_AUTH_PASSWORD }}
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_port value=5672
notify:
- restart neutron-openvswitch-agent
# uncomment if you want to debug compute instance
#- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=verbose value=True
# notify:
# - restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=core_plugin value=neutron.plugins.ml2.plugin.Ml2Plugin
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=service_plugins value=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2 option=type_drivers value=local,flat,gre
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2 option=tenant_network_types value=gre
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2 option=mechanism_drivers value=openvswitch
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2_type_gre option=tunnel_id_ranges value=1:1000
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=local_ip value={{compute_private_ip}}
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=tunnel_type value=gre
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=tunnel_types value=gre
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=agent option=tunnel_types value=gre
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=enable_tunneling value=True
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=securitygroup option=firewall_driver value=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
notify:
- restart neutron-openvswitch-agent
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=securitygroup option=enable_security_group value=True
notify:
- restart neutron-openvswitch-agent
- name: Deploy Neutron plugin configuration
template: src=neutron_plugin.ini.j2 dest=/etc/neutron/plugins/openvswitch/ovs_neutron_plugin.ini
notify:
- restart neutron-openvswitch-agent
- service: name=openvswitch state=started enabled=yes
- command: ovs-vsctl --may-exist add-br br-int
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=network_api_class value=nova.network.neutronv2.api.API
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_url value=https://{{controller_publicname}}:9696
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_auth_strategy value=keystone
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_tenant_name value=services
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_username value=neutron
notify:
- restart openstack-nova-compute
- name: set neutron_admin_password
ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_password value={{NEUTRON_PASS}}
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_auth_url value=https://{{controller_publicname}}:35357/v2.0
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=linuxnet_interface_driver value=nova.network.linux_net.LinuxOVSInterfaceDriver
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=firewall_driver value=nova.virt.firewall.NoopFirewallDriver
notify:
- restart openstack-nova-compute
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=security_group_api value=neutron
notify:
- restart openstack-nova-compute
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
notify:
- restart openstack-nova-compute
- name: Set shell to nova user to allow cold migrations
user: name=nova shell=/bin/bash
- name: SSH authorized key for nova user
authorized_key: user=nova key="{{fed_cloud09_nova_public_key}}"
- name: SSH public key for nova user
template: src={{ files }}/fedora-cloud/fed_cloud09_nova_public_key dest=/var/lib/nova/.ssh/id_rsa.pub owner=nova group=nova
- name: Deploy private SSH key
copy: src={{ private }}/files/openstack/fed-cloud09-nova.key dest=/var/lib/nova/.ssh/id_rsa mode=600 owner=nova group=nova
- copy: src={{files}}/fedora-cloud/nova-ssh-config dest=/var/lib/nova/.ssh/config owner=nova group=nova mode=640
# This needs to be run after controller reprovision
#FIXME
#- name: "restart neutron-openvswitch-agent"
# service: name=neutron-openvswitch-agent state=restarted
#- name: "restart openstack-nova-compute"
# service: name=openstack-nova-compute state=restarted

View file

@ -1,26 +0,0 @@
[ml2]
type_drivers = local,flat,gre
tenant_network_types = gre
mechanism_drivers = openvswitch
[ml2_type_flat]
[ml2_type_vlan]
[ml2_type_gre]
tunnel_id_ranges = 1:1000
[ml2_type_vxlan]
[securitygroup]
firewall_driver = neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
enable_security_group = True
[ovs]
local_ip = {{ compute_private_ip }}
tunnel_type = gre
tunnel_types = gre
enable_tunneling = True
[agent]
tunnel_types = gre

View file

@ -1,18 +0,0 @@
#!/bin/bash
openstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml\
-e /home/stack/templates/overcloud_images.yaml \
-r /home/stack/templates/roles_data.yaml \
--ntp-server cloud-noc01.cloud.fedoraproject.org \
-e /home/stack/templates/custom-storage.yaml \
--overcloud-ssh-user heat-admin \
--overcloud-ssh-key ~/.ssh/id_rsa \
-e /home/stack/templates/custom_domain.yaml \
--timeout 1800 \
--validation-errors-nonfatal
# --config-download \
# -e /usr/share/openstack-tripleo-heat-templates/environments/config-download-environment.yaml \
# -e /home/stack/templates/rhel-registration/environment-rhel-registration.yaml \
# -e /home/stack/templates/rhel-registration/rhel-registration-resource-registry.yaml \

View file

@ -1,9 +0,0 @@
parameter_defaults:
OvercloudControllerFlavor: control
OvercloudComputeFlavor: compute
OvercloudComputePPC64LEFlavor: computeppc64le
ComputePPC64LECount: 1
OvercloudCephStorageFlavor: ceph-storage
ControllerCount: 3
ComputeCount: 10
CephStorageCount: 0

View file

@ -1,405 +0,0 @@
[DEFAULT]
#
# From instack-undercloud
#
# Fully qualified hostname (including domain) to set on the
# Undercloud. If left unset, the current hostname will be used, but
# the user is responsible for configuring all system hostname settings
# appropriately. If set, the undercloud install will configure all
# system hostname settings. (string value)
undercloud_hostname = undercloud01.cloud.fedoraproject.org
# IP information for the interface on the Undercloud that will be
# handling the PXE boots and DHCP for Overcloud instances. The IP
# portion of the value will be assigned to the network interface
# defined by local_interface, with the netmask defined by the prefix
# portion of the value. (string value)
#local_ip = 192.168.24.1/24
local_ip = 192.168.20.1/24
# Virtual IP or DNS address to use for the public endpoints of
# Undercloud services. Only used with SSL. (string value)
# Deprecated group/name - [DEFAULT]/undercloud_public_vip
#undercloud_public_host = 192.168.24.2
undercloud_public_host = 192.168.20.2
# Virtual IP or DNS address to use for the admin endpoints of
# Undercloud services. Only used with SSL. (string value)
# Deprecated group/name - [DEFAULT]/undercloud_admin_vip
undercloud_admin_host = 192.168.20.3
# DNS nameserver(s) to use for the undercloud node. (list value)
undercloud_nameservers = 8.8.8.8
# List of ntp servers to use. (list value)
undercloud_ntp_servers = cloud-noc01.cloud.fedoraproject.org
# DNS domain name to use when deploying the overcloud. The overcloud
# parameter "CloudDomain" must be set to a matching value. (string
# value)
#overcloud_domain_name = localdomain
overcloud_domain_name = cloud.fedoraproject.org
# List of routed network subnets for provisioning and introspection.
# Comma separated list of names/tags. For each network a section/group
# needs to be added to the configuration file with these parameters
# set: cidr, dhcp_start, dhcp_end, inspection_iprange, gateway and
# masquerade_network.
#
# Example:
#
# subnets = subnet1,subnet2
#
# An example section/group in config file:
#
# [subnet1]
# cidr = 192.168.10.0/24
# dhcp_start = 192.168.10.100
# dhcp_end = 192.168.10.200
# inspection_iprange = 192.168.10.20,192.168.10.90
# gateway = 192.168.10.254
# masquerade_network = True
# [subnet2]
# . . .
# (list value)
#subnets = ctlplane-subnet
# Name of the local subnet, where the PXE boot and DHCP interfaces for
# overcloud instances is located. The IP address of the
# local_ip/local_interface should reside in this subnet. (string
# value)
#local_subnet = ctlplane-subnet
# Certificate file to use for OpenStack service SSL connections.
# Setting this enables SSL for the OpenStack API endpoints, leaving it
# unset disables SSL. (string value)
#undercloud_service_certificate =
# When set to True, an SSL certificate will be generated as part of
# the undercloud install and this certificate will be used in place of
# the value for undercloud_service_certificate. The resulting
# certificate will be written to
# /etc/pki/tls/certs/undercloud-[undercloud_public_host].pem. This
# certificate is signed by CA selected by the
# "certificate_generation_ca" option. (boolean value)
#generate_service_certificate = false
generate_service_certificate = true
# The certmonger nickname of the CA from which the certificate will be
# requested. This is used only if the generate_service_certificate
# option is set. Note that if the "local" CA is selected the
# certmonger's local CA certificate will be extracted to /etc/pki/ca-
# trust/source/anchors/cm-local-ca.pem and subsequently added to the
# trust chain. (string value)
#certificate_generation_ca = local
certificate_generation_ca = local
# The kerberos principal for the service that will use the
# certificate. This is only needed if your CA requires a kerberos
# principal. e.g. with FreeIPA. (string value)
#service_principal =
# Network interface on the Undercloud that will be handling the PXE
# boots and DHCP for Overcloud instances. (string value)
local_interface = eth1
#local_interface = eth1
# MTU to use for the local_interface. (integer value)
#local_mtu = 1500
# DEPRECATED: Network that will be masqueraded for external access, if
# required. This should be the subnet used for PXE booting. (string
# value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: With support for routed networks, masquerading of the
# provisioning networks is moved to a boolean option for each subnet.
#masquerade_network = 192.168.24.0/24
# Path to hieradata override file. If set, the file will be copied
# under /etc/puppet/hieradata and set as the first file in the hiera
# hierarchy. This can be used to custom configure services beyond what
# undercloud.conf provides (string value)
#hieradata_override =
# Path to network config override template. If set, this template will
# be used to configure the networking via os-net-config. Must be in
# json format. Templated tags can be used within the template, see
# instack-undercloud/elements/undercloud-stack-config/net-
# config.json.template for example tags (string value)
#net_config_override =
# Network interface on which inspection dnsmasq will listen. If in
# doubt, use the default value. (string value)
# Deprecated group/name - [DEFAULT]/discovery_interface
#inspection_interface = br-ctlplane
# Whether to enable extra hardware collection during the inspection
# process. Requires python-hardware or python-hardware-detect package
# on the introspection image. (boolean value)
inspection_extras = true
# Whether to run benchmarks when inspecting nodes. Requires
# inspection_extras set to True. (boolean value)
# Deprecated group/name - [DEFAULT]/discovery_runbench
#inspection_runbench = false
# Whether to support introspection of nodes that have UEFI-only
# firmware. (boolean value)
inspection_enable_uefi = true
# Makes ironic-inspector enroll any unknown node that PXE-boots
# introspection ramdisk in Ironic. By default, the "fake" driver is
# used for new nodes (it is automatically enabled when this option is
# set to True). Set discovery_default_driver to override.
# Introspection rules can also be used to specify driver information
# for newly enrolled nodes. (boolean value)
#enable_node_discovery = false
# The default driver or hardware type to use for newly discovered
# nodes (requires enable_node_discovery set to True). It is
# automatically added to enabled_drivers or enabled_hardware_types
# accordingly. (string value)
#discovery_default_driver = ipmi
# Whether to enable the debug log level for Undercloud OpenStack
# services. (boolean value)
undercloud_debug = true
# Whether to update packages during the Undercloud install. (boolean
# value)
#undercloud_update_packages = true
# Whether to install Tempest in the Undercloud. (boolean value)
#enable_tempest = true
# Whether to install Telemetry services (ceilometer, gnocchi, aodh,
# panko ) in the Undercloud. (boolean value)
#enable_telemetry = false
# Whether to install the TripleO UI. (boolean value)
enable_ui = true
# Whether to install requirements to run the TripleO validations.
# (boolean value)
#enable_validations = true
# Whether to install the Volume service. It is not currently used in
# the undercloud. (boolean value)
#enable_cinder = false
# Whether to install novajoin metadata service in the Undercloud.
# (boolean value)
#enable_novajoin = false
# One Time Password to register Undercloud node with an IPA server.
# Required when enable_novajoin = True. (string value)
#ipa_otp =
# Whether to use iPXE for deploy and inspection. (boolean value)
# Deprecated group/name - [DEFAULT]/ipxe_deploy
ipxe_enabled = false
# Maximum number of attempts the scheduler will make when deploying
# the instance. You should keep it greater or equal to the number of
# bare metal nodes you expect to deploy at once to work around
# potential race condition when scheduling. (integer value)
# Minimum value: 1
#scheduler_max_attempts = 30
# Whether to clean overcloud nodes (wipe the hard drive) between
# deployments and after the introspection. (boolean value)
clean_nodes = true
# DEPRECATED: List of enabled bare metal drivers. (list value)
# This option is deprecated for removal.
# Its value may be silently ignored in the future.
# Reason: Please switch to hardware types and the
# enabled_hardware_types option.
#enabled_drivers = pxe_ipmitool,pxe_drac,pxe_ilo
# List of enabled bare metal hardware types (next generation drivers).
# (list value)
#enabled_hardware_types = ipmi,redfish,ilo,idrac
enabled_hardware_types = ipmi,idrac
# An optional docker 'registry-mirror' that will beconfigured in
# /etc/docker/daemon.json. (string value)
#docker_registry_mirror =
# List of additional architectures enabled in your cloud environment.
# The list of supported values is: ppc64le (list value)
#additional_architectures =
additional_architectures = ppc64le
# Enable support for routed ctlplane networks. (boolean value)
#enable_routed_networks = false
[auth]
#
# From instack-undercloud
#
# Password used for MySQL root user. If left unset, one will be
# automatically generated. (string value)
#undercloud_db_password = <None>
# Keystone admin token. If left unset, one will be automatically
# generated. (string value)
#undercloud_admin_token = <None>
# Keystone admin password. If left unset, one will be automatically
# generated. (string value)
#undercloud_admin_password = <None>
# Glance service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_glance_password = <None>
# Heat db encryption key(must be 16, 24, or 32 characters. If left
# unset, one will be automatically generated. (string value)
#undercloud_heat_encryption_key = <None>
# Heat service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_heat_password = <None>
# Heat cfn service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_heat_cfn_password = <None>
# Neutron service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_neutron_password = <None>
# Nova service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_nova_password = <None>
# Ironic service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_ironic_password = <None>
# Aodh service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_aodh_password = <None>
# Gnocchi service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_gnocchi_password = <None>
# Ceilometer service password. If left unset, one will be
# automatically generated. (string value)
#undercloud_ceilometer_password = <None>
# Panko service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_panko_password = <None>
# Ceilometer metering secret. If left unset, one will be automatically
# generated. (string value)
#undercloud_ceilometer_metering_secret = <None>
# Ceilometer snmpd read-only user. If this value is changed from the
# default, the new value must be passed in the overcloud environment
# as the parameter SnmpdReadonlyUserName. This value must be between 1
# and 32 characters long. (string value)
#undercloud_ceilometer_snmpd_user = ro_snmp_user
# Ceilometer snmpd password. If left unset, one will be automatically
# generated. (string value)
#undercloud_ceilometer_snmpd_password = <None>
# Swift service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_swift_password = <None>
# Mistral service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_mistral_password = <None>
# Rabbitmq cookie. If left unset, one will be automatically generated.
# (string value)
#undercloud_rabbit_cookie = <None>
# Rabbitmq password. If left unset, one will be automatically
# generated. (string value)
#undercloud_rabbit_password = <None>
# Rabbitmq username. If left unset, one will be automatically
# generated. (string value)
#undercloud_rabbit_username = <None>
# Heat stack domain admin password. If left unset, one will be
# automatically generated. (string value)
#undercloud_heat_stack_domain_admin_password = <None>
# Swift hash suffix. If left unset, one will be automatically
# generated. (string value)
#undercloud_swift_hash_suffix = <None>
# HAProxy stats password. If left unset, one will be automatically
# generated. (string value)
#undercloud_haproxy_stats_password = <None>
# Zaqar password. If left unset, one will be automatically generated.
# (string value)
#undercloud_zaqar_password = <None>
# Horizon secret key. If left unset, one will be automatically
# generated. (string value)
#undercloud_horizon_secret_key = <None>
# Cinder service password. If left unset, one will be automatically
# generated. (string value)
#undercloud_cinder_password = <None>
# Novajoin vendordata plugin service password. If left unset, one will
# be automatically generated. (string value)
#undercloud_novajoin_password = <None>
[ctlplane-subnet]
#
# From instack-undercloud
#
# Network CIDR for the Neutron-managed subnet for Overcloud instances.
# (string value)
# Deprecated group/name - [DEFAULT]/network_cidr
#cidr = 192.168.24.0/24
cidr = 192.168.20.0/24
# Start of DHCP allocation range for PXE and DHCP of Overcloud
# instances on this network. (string value)
# Deprecated group/name - [DEFAULT]/dhcp_start
#dhcp_start = 192.168.24.5
dhcp_start = 192.168.20.50
# End of DHCP allocation range for PXE and DHCP of Overcloud instances
# on this network. (string value)
# Deprecated group/name - [DEFAULT]/dhcp_end
#dhcp_end = 192.168.24.24
dhcp_end = 192.168.20.75
# Temporary IP range that will be given to nodes on this network
# during the inspection process. Should not overlap with the range
# defined by dhcp_start and dhcp_end, but should be in the same ip
# subnet. (string value)
# Deprecated group/name - [DEFAULT]/inspection_iprange
#inspection_iprange = 192.168.24.100,192.168.24.120
inspection_iprange = 192.168.20.100,192.168.20.120
# Network gateway for the Neutron-managed network for Overcloud
# instances on this network. (string value)
# Deprecated group/name - [DEFAULT]/network_gateway
#gateway = 192.168.24.1
gateway = 192.168.20.1
# The network will be masqueraded for external access. (boolean value)
masquerade = true

View file

@ -1,91 +0,0 @@
---
- name: Install undercloud repo file
copy: src="{{ files }}/newcloud/rhos13.repo" dest=/etc/yum.repos.d/rhos13.repo
tags:
- config
- packages
- yumrepos
- undercloud
- name: Install packages
package: name={{ item }} state=present
with_items:
- subscription-manager-rhsm-certificates
- python-tripleoclient
- rhosp-director-images-all
tags:
- packages
- undercloud
- name: Create stack user
user: name=stack comment="openstack user" home=/home/stack
tags:
- config
- users
- undercloud
- name: Create some directories
file: path=/home/stack/{{item}} state=directory owner=stack group=stack mode=0755
with_items:
- templates
- images
- images/x86_64
- images/ppc64le
tags:
- config
- undercloud
- name: Create web directory
file: path=/srv/web state=directory owner=root group=root mode=0755
tags:
- config
- undercloud
- name: Copy files to ~/stack/
copy: src={{item}} dest=/home/stack/{{item}} owner=stack group=stack mode=0644
with_items:
- undercloud.conf
- templates/node-info.yaml
- openstack-overcloud-deploy.sh
tags:
- config
- undercloud
- name: Copy templates to ~/stack/
template: src={{item}} dest=/home/stack/{{item}} owner=stack group=stack mode=0644
with_items:
- custom-storage.yaml
- environment-rhel-registration.yaml
tags:
- config
- undercloud
- name: Copy nodes.json
template: src=nodes.json dest=/home/stack/nodes.json owner=stack group=stack mode=0644
tags:
- config
- undercloud
- nodes
- name: Do an undercloud install if it has yet to be done
become: yes
become_user: stack
shell: openstack undercloud install
args:
chdir: /home/stack/
creates: /home/stack/stackrc
- name: Copy over images
become: yes
become_user: stack
shell: |
for arch in x86_64 ppc64le; do
for i in /usr/share/rhosp-director-images/overcloud-full-latest-13.0-${arch}.tar \
/usr/share/rhosp-director-images/ironic-python-agent-latest-13.0-${arch}.tar; do
tar -C $arch -xf $i;
done;
done
args:
chdir: /home/stack/images/
creates: /home/stack/imagex/x86_64/overcloud-full.initrd

View file

@ -1,45 +0,0 @@
parameter_defaults:
CinderEnableIscsiBackend: false
CinderEnableRbdBackend: false
CinderEnableNfsBackend: false
NovaEnableRbdBackend: false
GlanceBackend: file
ControllerExtraConfig:
cinder::config::cinder_config:
fed-cloud-eql01/volume_driver:
value: cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver
fed-cloud-eql01/eqlx_pool:
value: default
fed-cloud-eql01/eqlx_group_name:
value: cloud-equallogics
fed-cloud-eql01/volume_backend_name:
value: fed-cloud-eql01
fed-cloud-eql01/san_ip:
value: 192.168.20.31
fed-cloud-eql01/san_login:
value: "{{ san01_login }}"
fed-cloud-eql01/san_password:
value: "{{ san01_pass }}"
fed-cloud-eql01/san_thin_provision:
value: True
fed-cloud-eql01/use_chap_auth:
value: False
fed-cloud-eql02/volume_driver:
value: cinder.volume.drivers.dell_emc.ps.PSSeriesISCSIDriver
fed-cloud-eql02/eqlx_pool:
value: default
fed-cloud-eql02/eqlx_group_name:
value: fedora-equallogics
fed-cloud-eql02/volume_backend_name:
value: fed-cloud-eql02
fed-cloud-eql02/san_ip:
value: 192.168.20.30
fed-cloud-eql02/san_login:
value: "{{ san02_login }}"
fed-cloud-eql02/san_password:
value: "{{ san02_pass }}"
fed-cloud-eql02/san_thin_provision:
value: True
fed-cloud-eql02/use_chap_auth:
value: False
cinder_user_enabled_backends: ['fed-cloud-eql01','fed-cloud-eql02']

View file

@ -1,27 +0,0 @@
# Note this can be specified either in the call
# to heat stack-create via an additional -e option
# or via the global environment on the seed in
# /etc/heat/environment.d/default.yaml
parameter_defaults:
rhel_reg_auto_attach: "true"
rhel_reg_activation_key: "openstack-cloud"
rhel_reg_org: "{{ rhel_reg_org }}"
rhel_reg_pool_id: "{{ rhel_reg_pool_id }}"
rhel_reg_repos: "rhel-7-server-rpms,rhel-7-server-extras-rpms,rhel-7-server-rh-common-rpms,rhel-ha-for-rhel-7-server-rpms,rhel-7-server-openstack-13-rpms,rhel-7-server-rhceph-3-osd-rpms,rhel-7-server-rhceph-3-mon-rpms,rhel-7-server-rhceph-3-tools-rpms"
rhel_reg_method: "portal"
rhel_reg_sat_repo: ""
rhel_reg_base_url: ""
rhel_reg_environment: ""
rhel_reg_force: ""
rhel_reg_machine_name: ""
rhel_reg_password: ""
rhel_reg_release: ""
rhel_reg_sat_url: ""
rhel_reg_server_url: ""
rhel_reg_service_level: ""
rhel_reg_user: ""
rhel_reg_type: ""
rhel_reg_http_proxy_host: ""
rhel_reg_http_proxy_port: ""
rhel_reg_http_proxy_username: ""
rhel_reg_http_proxy_password: ""

View file

@ -1,214 +0,0 @@
{
"nodes":[
{
"mac":[
"40:5C:FD:A5:7F:C0"
],
"name":"node01",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.11"
},
{
"mac":[
"40:5C:FD:A5:80:90"
],
"name":"node02",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.12"
},
{
"mac":[
"40:5C:FD:A5:86:49"
],
"name":"node03",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.13"
},
{
"mac":[
"40:5C:FD:A5:86:4A"
],
"name":"node04",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.14"
},
{
"mac":[
"40:5C:FD:A5:7F:DA"
],
"name":"node05",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.15"
},
{
"mac":[
"40:5C:FD:A5:81:FE"
],
"name":"node06",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.16"
},
{
"mac":[
"40:5C:FD:A5:87:B7"
],
"name":"node07",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.17"
},
{
"mac":[
"40:5C:FD:A5:87:B8"
],
"name":"node08",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.18"
},
{
"mac":[
"48:4D:7E:05:4E:F0"
],
"name":"node09",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.19"
},
{
"mac":[
"48:4D:7E:05:4F:C0"
],
"name":"node10",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.20"
},
{
"mac":[
"48:4D:7E:05:55:79"
],
"name":"node11",
"cpu":"64",
"memory":"134217",
"disk":"350",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.21"
},
{
"mac":[
"F0:1F:AF:E2:69:17"
],
"name":"node12",
"cpu":"32",
"memory":"64216",
"disk":"558",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.26"
},
{
"mac":[
"F0:1F:AF:E2:6D:1F"
],
"name":"node13",
"cpu":"32",
"memory":"64216",
"disk":"558",
"arch":"x86_64",
"pm_type":"idrac",
"pm_user":"{{ undercloud_pm_username }}",
"pm_password":"{{ undercloud_pm_password }}",
"pm_addr":"192.168.20.23"
},
# {
# "mac":[
# "B8:2A:72:FC:C5:2C"
# ],
# "name":"node14",
# "cpu":"32",
# "memory":"64216",
# "disk":"558",
# "arch":"x86_64",
# "pm_type":"idrac",
# "pm_user":"{{ undercloud_pm_username }}",
# "pm_password":"{{ undercloud_pm_password }}",
# "pm_addr":"192.168.20.24"
# },
{
"mac":[
"40:F2:E9:A5:59:ED"
],
"name":"node15",
"cpu":"10",
"memory":"131071",
"disk":"558",
"arch":"ppc64le",
"pm_type":"ipmi",
"pm_user":"admin",
"pm_password":"{{ undercloud_ppc_pm_password }}",
"pm_addr":"192.168.20.27"
}
]
}

View file

@ -1,45 +0,0 @@
# public means "managed" in terms of OS installation guide
public_interface_cidr: 209.132.184.0/24
internal_interface_cidr: 172.24.0.1/24
public_gateway_ip: 209.132.184.254
public_dns: 66.35.62.163
public_floating_start: 209.132.184.31
public_floating_end: 209.132.184.69
controller_public_ip: 209.132.184.9
controller_private_ip: 172.24.0.9
controller_hostname: fed-cloud09.cloud.fedoraproject.org
controller_publicname: fedorainfracloud.org
network_public_ip: 209.132.184.9
public_netmask: 255.255.255.0
network_private_ip: 172.24.0.9
compute1_public_ip: 209.132.184.10
compute1_private_ip: 172.24.0.10
compute2_public_ip: 209.132.184.11
compute2_private_ip: 172.24.0.11
compute3_public_ip: 209.132.184.12
compute3_private_ip: 172.24.0.12
compute4_public_ip: 209.132.184.13
compute4_private_ip: 172.24.0.13
compute5_public_ip: 209.132.184.14
compute5_private_ip: 172.24.0.14
compute6_public_ip: 209.132.184.15
compute6_private_ip: 172.24.0.15
fed_cloud09_nova_public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEA3hXR1h1L5PKIB0DXN7QQcpBg43NeCx8xmSXVqz9EEcOkF48SQjjqLSI/NX/1B9sWJbnCiJXfcH5WiYUFRIcOKK7dPyp7cw/HghljkzeBVN0z4T1/p0p39svmqhzhMyxtbeVZr/s/ES61dj/J2VIsN6ynuIgVgxUj2fNWUt8x8z5Bdu0Q2ThovU8rA+lEM1C/uB2MIpOoXYjkn1a2FsRUz17c1Rn50zL2w8JFCHHcHsHgbQ4G3OvZSTrEiSp7ggZtLepwBWuGSjix69484URaiYSf284+6Sb2dIXo9HV/0Vt108Qc0x9Anw2WARAcPqqAPGF4agA4mere//LBPW+OxQ== fed-cloud09-nova"
swift_storages: "/dev/vg_server/swift_store"
#Dell Equalogic public variables
EQLX_GROUP: cloud-equallogics
os_auth_url: "https://fedorainfracloud.org:5000/v2.0"