Add a bunch more openstack junk. Some of it incomplete yet.

This commit is contained in:
Kevin Fenzi 2018-11-14 23:06:51 +00:00
parent cf520645be
commit c2cf00435d
7 changed files with 1564 additions and 4 deletions

View file

@ -0,0 +1,656 @@
- name: configure overcloud from undercloud
hosts: newcloud-undercloud
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: create non-standard flavor
nova_flavor:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
name="{{item.name}}" ram="{{item.ram}}" root="{{item.disk}}" vcpus="{{item.vcpus}}" swap="{{item.swap}}"
ephemeral=0
with_items:
- { name: m1.builder, ram: 5120, disk: 50, vcpus: 2, swap: 5120 }
- { name: ms2.builder, ram: 5120, disk: 20, vcpus: 2, swap: 100000 }
- { name: m2.prepare_builder, ram: 5000, disk: 16, vcpus: 2, swap: 0 }
# same as m.* but with swap
- { name: ms1.tiny, ram: 512, disk: 1, vcpus: 1, swap: 512 }
- { name: ms1.small, ram: 2048, disk: 20, vcpus: 1, swap: 2048 }
- { name: ms1.medium, ram: 4096, disk: 40, vcpus: 2, swap: 4096 }
- { name: ms1.medium.bigswap, ram: 4096, disk: 40, vcpus: 2, swap: 40000 }
- { name: ms1.large, ram: 8192, disk: 50, vcpus: 4, swap: 4096 }
- { name: ms1.xlarge, ram: 16384, disk: 160, vcpus: 8, swap: 16384 }
# inspired by http://aws.amazon.com/ec2/instance-types/
- { name: c4.large, ram: 3072, disk: 0, vcpus: 2, swap: 0 }
- { name: c4.xlarge, ram: 7168, disk: 0, vcpus: 4, swap: 0 }
- { name: c4.2xlarge, ram: 14336, disk: 0, vcpus: 8, swap: 0 }
- { name: r3.large, ram: 16384, disk: 32, vcpus: 2, swap: 16384 }
##### download common Images #####
# restricted images (RHEL) are handled two steps below
- name: Add the images
glance_image:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
name="{{ item.name }}"
disk_format=qcow2
is_public=True
copy_from="{{ item.copy_from }}"
with_items:
- name: Fedora-Cloud-Base-27-1.6.ppc64le
copy_from: https://download.fedoraproject.org/pub/fedora-secondary/releases/27/CloudImages/ppc64le/images/Fedora-Cloud-Base-27-1.6.ppc64le.qcow2
##### PROJECTS ######
- name: Create tenants
keystone_user:
login_user="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
endpoint="https://{{controller_publicname}}:35357/v2.0"
tenant="{{ item.name }}"
tenant_description="{{ item.desc }}"
state=present
with_items:
- { name: persistent, desc: "persistent instances" }
- { name: qa, desc: "developmnet and test-day applications of QA" }
- { name: transient, desc: 'transient instances' }
- { name: infrastructure, desc: "one off instances for infrastructure folks to test or check something (proof-of-concept)" }
- { name: copr, desc: 'Space for Copr builders' }
- { name: coprdev, desc: 'Development version of Copr' }
- { name: pythonbots, desc: 'project for python build bot users - twisted, etc' }
- { name: openshift, desc: 'Tenant for openshift deployment' }
- { name: maintainertest, desc: 'Tenant for maintainer test machines' }
- { name: aos-ci-cd, desc: 'Tenant for aos-ci-cd' }
##### USERS #####
- name: Create users
keystone_user:
login_user="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
endpoint="https://{{controller_publicname}}:35357/v2.0"
user="{{ item.name }}"
email="{{ item.email }}"
tenant="{{ item.tenant }}"
password="{{ item.password }}"
state=present
no_log: True
with_items:
- { name: anthomas, email: 'anthomas@redhat.com', tenant: cloudintern, password: "{{anthomas_password}}" }
- { name: ausil, email: 'dennis@ausil.us', tenant: infrastructure, password: "{{ausil_password}}" }
- { name: atomic, email: 'walters@redhat.com', tenant: scratch, password: "{{cockpit_password}}" }
- { name: codeblock, email: 'codeblock@elrod.me', tenant: infrastructure, password: "{{codeblock_password}}" }
- { name: copr, email: 'admin@fedoraproject.org', tenant: copr, password: "{{copr_password}}" }
- { name: gholms, email: 'gholms@fedoraproject.org', tenant: cloudintern, password: "{{gholms_password}}" }
- { name: jskladan, email: 'jskladan@redhat.com', tenant: qa, password: "{{jskladan_password}}" }
- { name: kevin, email: 'kevin@fedoraproject.org', tenant: infrastructure, password: "{{kevin_password}}" }
- { name: laxathom, email: 'laxathom@fedoraproject.org', tenant: infrastructure, password: "{{laxathom_password}}" }
- { name: mattdm, email: 'mattdm@fedoraproject.org', tenant: infrastructure, password: "{{mattdm_password}}" }
- { name: msuchy, email: 'msuchy@redhat.com', tenant: copr, password: "{{msuchy_password}}" }
- { name: nb, email: 'nb@fedoraproject.org', tenant: infrastructure, password: "{{nb_password}}" }
- { name: pingou, email: 'pingou@pingoured.fr', tenant: infrastructure, password: "{{pingou_password}}" }
- { name: puiterwijk, email: 'puiterwijk@fedoraproject.org', tenant: infrastructure, password: "{{puiterwijk_password}}" }
- { name: stefw, email: 'stefw@fedoraproject.org', tenant: scratch, password: "{{stefw_password}}" }
- { name: mizdebsk, email: 'mizdebsk@fedoraproject.org', tenant: infrastructure, password: "{{mizdebsk_password}}" }
- { name: kushal, email: 'kushal@fedoraproject.org', tenant: infrastructure, password: "{{kushal_password}}" }
- { name: red, email: 'red@fedoraproject.org', tenant: infrastructure, password: "{{red_password}}" }
- { name: samkottler, email: 'samkottler@fedoraproject.org', tenant: infrastructure, password: "{{samkottler_password}}" }
- { name: tflink, email: 'tflink@fedoraproject.org', tenant: qa, password: "{{tflink_password}}" }
- { name: twisted, email: 'buildbot@twistedmatrix.com', tenant: pythonbots, password: "{{twisted_password}}" }
- { name: roshi, email: 'roshi@fedoraproject.org', tenant: qa, password: "{{roshi_password}}" }
- { name: maxamillion, email: 'maxamillion@fedoraproject.org', tenant: infrastructure, password: "{{maxamillion_password}}" }
- { name: clime, email: 'clime@redhat.com', tenant: copr, password: "{{clime_password}}" }
- { name: jkadlcik, email: 'jkadlcik@redhat.com', tenant: copr, password: "{{clime_password}}" }
- { name: misc, email: 'misc@redhat.com', tenant: openshift, password: "{{misc_password}}" }
- { name: bowlofeggs, email: 'bowlofeggs@fedoraproject.org', tenant: transient, password: "{{bowlofeggs_password}}" }
- { name: alivigni, email: 'alivigni@redhat.com', tenant: aos-ci-cd, password: "{{alivigni_password}}" }
- { name: jbieren, email: 'jbieren@redhat.com', tenant: aos-ci-cd, password: "{{jbieren_password}}" }
- { name: bpeck, email: 'bpeck@redhat.com', tenant: aos-ci-cd, password: "{{bpeck_password}}" }
- { name: srallaba, email: 'srallaba@redhat.com', tenant: aos-ci-cd, password: "{{srallaba_password}}" }
- { name: jburke, email: 'jburke@redhat.com', tenant: aos-ci-cd, password: "{{jburke_password}}" }
tags:
- openstack_users
- name: upload SSH keys for users
nova_keypair:
auth_url="https://{{controller_publicname}}:35357/v2.0"
login_username="{{ item.username }}"
login_password="{{ item.password }}" login_tenant_name="{{item.tenant}}" name="{{ item.name }}"
public_key="{{ item.public_key }}"
ignore_errors: yes
no_log: True
with_items:
- { username: anthomas, name: anthomas, tenant: cloudintern, password: "{{anthomas_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas anthomas') }}" }
- { username: ausil, name: ausil, tenant: infrastructure, password: "{{ausil_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas ausil') }}" }
- { username: codeblock, name: codeblock, tenant: infrastructure, password: "{{codeblock_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas codeblock') }}" }
- { username: buildsys, name: buildsys, tenant: copr, password: "{{copr_password}}", public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCeTO0ddXuhDZYM9HyM0a47aeV2yIVWhTpddrQ7/RAIs99XyrsicQLABzmdMBfiZnP0FnHBF/e+2xEkT8hHJpX6bX81jjvs2bb8KP18Nh8vaXI3QospWrRygpu1tjzqZT0Llh4ZVFscum8TrMw4VWXclzdDw6x7csCBjSttqq8F3iTJtQ9XM9/5tCAAOzGBKJrsGKV1CNIrfUo5CSzY+IUVIr8XJ93IB2ZQVASK34T/49egmrWlNB32fqAbDMC+XNmobgn6gO33Yq5Ly7Dk4kqTUx2TEaqDkZfhsVu0YcwV81bmqsltRvpj6bIXrEoMeav7nbuqKcPLTxWEY/2icePF" }
- { username: gholms, name: gholms, tenant: cloudintern, password: "{{gholms_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas gholms') }}" }
- { username: jskladan, name: jskladan, tenant: qa, password: "{{jskladan_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas jskladan') }}" }
- { username: kevin, name: kevin, tenant: infrastructure, password: "{{kevin_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas kevin') }}" }
- { username: maxamillion, name: maxamillion, tenant: infrastructure, password: "{{maxamillion_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas maxamillion') }}" }
- { username: laxathom, name: laxathom, tenant: infrastructure, password: "{{laxathom_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas laxathom') }}" }
- { username: mattdm, name: mattdm, tenant: infrastructure, password: "{{mattdm_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas mattdm') }}" }
- { username: msuchy, name: msuchy, tenant: copr, password: "{{msuchy_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas msuchy') }}" }
- { username: nb, name: nb, tenant: infrastructure, password: "{{nb_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas nb') }}" }
- { username: pingou, name: pingou, tenant: infrastructure, password: "{{pingou_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas pingou') }}" }
- { username: puiterwijk, name: puiterwijk, tenant: infrastructure, password: "{{puiterwijk_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas puiterwijk') }}" }
- { username: stefw, name: stefw, tenant: scratch, password: "{{stefw_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas stefw') }}" }
- { username: mizdebsk, name: mizdebsk, tenant: infrastructure, password: "{{mizdebsk_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas mizdebsk') }}" }
- { username: kushal, name: kushal, tenant: infrastructure, password: "{{kushal_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas kushal') }}" }
- { username: red, name: red, tenant: infrastructure, password: "{{red_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas red') }}" }
- { username: roshi, name: roshi, tenant: qa, password: "{{roshi_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas roshi') }}" }
- { username: samkottler, name: samkottler, tenant: infrastructure, password: "{{samkottler_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas skottler') }}" }
- { username: tflink, name: tflink, tenant: qa, password: "{{tflink_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas tflink') }}" }
- { username: atomic, name: atomic, tenant: scratch, password: "{{cockpit_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas walters') }}" }
# - { name: twisted, tenant: pythonbots, password: "{{twisted_password}}", public_key: "" }
- { username: admin, name: fedora-admin-20130801, tenant: admin, password: "{{ADMIN_PASS}}", public_key: "{{ lookup('file', files + '/fedora-cloud/fedora-admin-20130801.pub') }}" }
- { username: asamalik, name: asamalik, tenant: scratch, password: "{{asamalik_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas asamalik') }}" }
- { username: clime, name: clime, tenant: copr, password: "{{clime_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas clime') }}" }
- { username: jkadlcik, name: jkadlcik, tenant: copr, password: "{{clime_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas jkadlcik') }}" }
- { username: misc, name: misc, tenant: openshift, password: "{{misc_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas misc') }}" }
- { username: alivigni, name: alivigni, tenant: aos-ci-cd, password: "{{alivigni_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas alivigni') }}" }
- { username: jbieren, name: jbieren, tenant: aos-ci-cd, password: "{{jbieren_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas jbieren') }}" }
- { username: bpeck, name: bpeck, tenant: aos-ci-cd, password: "{{bpeck_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas bpeck') }}" }
- { username: srallaba, name: srallaba, tenant: aos-ci-cd, password: "{{srallaba_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas srallaba') }}" }
- { username: jburke, name: jburke, tenant: aos-ci-cd, password: "{{jburke_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas jburke') }}" }
tags:
- openstack_users
- name: Create roles for additional tenants
shell: source /root/keystonerc_admin && keystone role-list |grep ' {{item}} ' || keystone role-create --name {{ item }}
with_items: "{{all_tenants}}"
- name: Assign users to secondary tentants
shell: source /root/keystonerc_admin && keystone user-role-list --user "{{item.user}}" --tenant "{{item.tenant}}" | grep ' {{item.tenant }} ' || keystone user-role-add --user {{item.user}} --role {{item.tenant}} --tenant {{item.tenant}} || true
#keystone_user:
# endpoint="https://{{controller_publicname}}:35357/v2.0"
# login_user="admin" login_password="{{ ADMIN_PASS }}"
# role=coprdev user={{ item }} tenant=coprdev
with_items:
- { user: admin, tenant: cloudintern }
- { user: admin, tenant: cloudsig }
- { user: admin, tenant: copr }
- { user: admin, tenant: coprdev }
- { user: admin, tenant: persistent }
- { user: admin, tenant: pythonbots }
- { user: admin, tenant: qa }
- { user: admin, tenant: infrastructure }
- { user: admin, tenant: scratch }
- { user: admin, tenant: transient }
- { user: admin, tenant: maintainertest }
- { user: admin, tenant: aos-ci-cd }
- { user: copr, tenant: coprdev }
- { user: kevin, tenant: cloudintern }
- { user: kevin, tenant: cloudsig }
- { user: kevin, tenant: copr }
- { user: kevin, tenant: coprdev }
- { user: kevin, tenant: persistent }
- { user: kevin, tenant: pythonbots }
- { user: kevin, tenant: qa }
- { user: kevin, tenant: scratch }
- { user: kevin, tenant: transient }
- { user: kevin, tenant: maintainertest }
- { user: kevin, tenant: aos-ci-cd }
- { user: msuchy, tenant: cloudintern }
- { user: msuchy, tenant: cloudsig }
- { user: msuchy, tenant: coprdev }
- { user: msuchy, tenant: infrastructure }
- { user: msuchy, tenant: persistent }
- { user: msuchy, tenant: pythonbots }
- { user: msuchy, tenant: qa }
- { user: msuchy, tenant: scratch }
- { user: msuchy, tenant: transient }
- { user: pingou, tenant: persistent }
- { user: puiterwijk, tenant: cloudintern }
- { user: puiterwijk, tenant: cloudsig }
- { user: puiterwijk, tenant: copr }
- { user: puiterwijk, tenant: coprdev }
- { user: puiterwijk, tenant: persistent }
- { user: puiterwijk, tenant: pythonbots }
- { user: puiterwijk, tenant: qa }
- { user: puiterwijk, tenant: scratch }
- { user: puiterwijk, tenant: transient }
- { user: puiterwijk, tenant: maintainertest }
- { user: puiterwijk, tenant: aos-ci-cd }
- { user: mizdebsk, tenant: aos-ci-cd }
- { user: mizdebsk, tenant: cloudintern }
- { user: mizdebsk, tenant: cloudsig }
- { user: mizdebsk, tenant: copr }
- { user: mizdebsk, tenant: coprdev }
- { user: mizdebsk, tenant: infrastructure }
- { user: mizdebsk, tenant: maintainertest }
- { user: mizdebsk, tenant: openshift }
- { user: mizdebsk, tenant: persistent }
- { user: mizdebsk, tenant: pythonbots }
- { user: mizdebsk, tenant: qa }
- { user: mizdebsk, tenant: scratch }
- { user: mizdebsk, tenant: transient }
- { user: clime, tenant: coprdev }
- { user: clime, tenant: persistent }
- { user: jkadlcik, tenant: coprdev }
tags:
- openstack_users
##### NETWORK ####
# http://docs.openstack.org/havana/install-guide/install/apt/content/install-neutron.configure-networks.html
#
# external network is a class C: 209.132.184.0/24
# 209.132.184.1 to .25 - reserved for hardware.
# 209.132.184.26 to .30 - reserver for test cloud external ips
# 209.132.184.31 to .69 - icehouse cloud
# 209.132.184.70 to .89 - reserved for arm03 SOCs
# 209.132.184.90 to .251 - folsom cloud
#
- name: Create en external network
neutron_network:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
name=external
router_external=True
provider_network_type=flat
provider_physical_network=floatnet
register: EXTERNAL_ID
- name: Create an external subnet
neutron_subnet:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
name=external-subnet
network_name=external
cidr="{{ public_interface_cidr }}"
allocation_pool_start="{{ public_floating_start }}"
allocation_pool_end="{{ public_floating_end }}"
gateway_ip="{{ public_gateway_ip }}"
enable_dhcp=false
register: EXTERNAL_SUBNET_ID
#- shell: source /root/keystonerc_admin && nova floating-ip-create external
# when: packstack_sucessfully_finished.stat.exists == False
# 172.16.0.1/16 -- 172.22.0.1/16 - free (can be split to /20)
# 172.23.0.1/16 - free (but used by old cloud)
# 172.24.0.1/24 - RESERVED it is used internally for OS
# 172.24.1.0/24 -- 172.24.255.0/24 - likely free (?)
# 172.25.0.1/20 - Cloudintern (172.25.0.1 - 172.25.15.254)
# 172.25.16.1/20 - infrastructure (172.25.16.1 - 172.25.31.254)
# 172.25.32.1/20 - persistent (172.25.32.1 - 172.25.47.254)
# 172.25.48.1/20 - transient (172.25.48.1 - 172.25.63.254)
# 172.25.64.1/20 - scratch (172.25.64.1 - 172.25.79.254)
# 172.25.80.1/20 - copr (172.25.80.1 - 172.25.95.254)
# 172.25.96.1/20 - cloudsig (172.25.96.1 - 172.25.111.254)
# 172.25.112.1/20 - qa (172.25.112.1 - 172.25.127.254)
# 172.25.128.1/20 - pythonbots (172.25.128.1 - 172.25.143.254)
# 172.25.144.1/20 - coprdev (172.25.144.1 - 172.25.159.254)
# 172.25.160.1/20 -- 172.25.240.1/20 - free
# 172.26.0.1/16 -- 172.31.0.1/16 - free (can be split to /20)
- name: Create a router for all tenants
neutron_router:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
tenant_name="{{ item }}"
name="ext-to-{{ item }}"
with_items: "{{all_tenants}}"
- name: "Connect router's gateway to the external network"
neutron_router_gateway:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
router_name="ext-to-{{ item }}"
network_name="external"
with_items: "{{all_tenants}}"
- name: Create a private network for all tenants
neutron_network:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
tenant_name="{{ item.name }}"
name="{{ item.name }}-net"
shared="{{ item.shared }}"
with_items:
- { name: cloudintern, shared: false }
- { name: cloudsig, shared: false }
- { name: copr, shared: true }
- { name: coprdev, shared: true }
- { name: infrastructure, shared: false }
- { name: persistent, shared: false }
- { name: pythonbots, shared: false }
- { name: qa, shared: false }
- { name: scratch, shared: false }
- { name: transient, shared: false }
- { name: openshift, shared: false }
- { name: maintainertest, shared: false }
- { name: aos-ci-cd, shared: false }
- name: Create a subnet for all tenants
neutron_subnet:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
tenant_name="{{ item.name }}"
network_name="{{ item.name }}-net"
name="{{ item.name }}-subnet"
cidr="{{ item.cidr }}"
gateway_ip="{{ item.gateway }}"
dns_nameservers="66.35.62.163,140.211.169.201"
with_items:
- { name: cloudintern, cidr: '172.25.0.1/20', gateway: '172.25.0.1' }
- { name: cloudsig, cidr: '172.25.96.1/20', gateway: '172.25.96.1' }
- { name: copr, cidr: '172.25.80.1/20', gateway: '172.25.80.1' }
- { name: coprdev, cidr: '172.25.144.1/20', gateway: '172.25.144.1' }
- { name: infrastructure, cidr: '172.25.16.1/20', gateway: '172.25.16.1' }
- { name: persistent, cidr: '172.25.32.1/20', gateway: '172.25.32.1' }
- { name: pythonbots, cidr: '172.25.128.1/20', gateway: '172.25.128.1' }
- { name: qa, cidr: '172.25.112.1/20', gateway: '172.25.112.1' }
- { name: scratch, cidr: '172.25.64.1/20', gateway: '172.25.64.1' }
- { name: transient, cidr: '172.25.48.1/20', gateway: '172.25.48.1' }
- { name: openshift, cidr: '172.25.160.1/20', gateway: '172.25.160.1' }
- { name: maintainertest, cidr: '172.25.176.1/20', gateway: '172.25.176.1' }
- { name: aos-ci-cd, cidr: '172.25.180.1/20', gateway: '172.25.180.1' }
- name: "Connect router's interface to the TENANT-subnet"
neutron_router_interface:
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
auth_url="https://{{controller_publicname}}:35357/v2.0"
tenant_name="{{ item }}"
router_name="ext-to-{{ item }}"
subnet_name="{{ item }}-subnet"
with_items: "{{all_tenants}}"
#################
# Security Groups
################
- name: "Create 'ssh-anywhere' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'ssh-anywhere-{{item}}'
description: "allow ssh from anywhere"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "22"
port_range_max: "22"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "0.0.0.0/0"
with_items: "{{all_tenants}}"
- name: "Allow nagios checks"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'allow-nagios-{{item}}'
description: "allow nagios checks"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "5666"
port_range_max: "5666"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "209.132.181.35/32"
- direction: "ingress"
ethertype: "IPv4"
protocol: "icmp"
remote_ip_prefix: "209.132.181.35/32"
with_items:
- persistent
- name: "Create 'ssh-from-persistent' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'ssh-from-persistent-{{item}}'
description: "allow ssh from persistent"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "22"
port_range_max: "22"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "172.25.32.1/20"
with_items:
- copr
- coprdev
- name: "Create 'ssh-internal' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'ssh-internal-{{item.name}}'
description: "allow ssh from {{item.name}}-network"
tenant_name: "{{ item.name }}"
rules:
- direction: "ingress"
port_range_min: "22"
port_range_max: "22"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "{{ item.prefix }}"
with_items:
- { name: cloudintern, prefix: '172.25.0.1/20' }
- { name: cloudsig, prefix: '172.25.96.1/20' }
- { name: copr, prefix: '172.25.80.1/20' }
- { name: coprdev, prefix: '172.25.80.1/20' }
- { name: infrastructure, prefix: "172.25.16.1/20" }
- { name: persistent, prefix: "172.25.32.1/20" }
- { name: pythonbots, prefix: '172.25.128.1/20' }
- { name: qa, prefix: "172.25.112.1/20" }
- { name: scratch, prefix: '172.25.64.1/20' }
- { name: transient, prefix: '172.25.48.1/20' }
- { name: openshift, prefix: '172.25.160.1/20' }
- { name: maintainertest, prefix: '172.25.180.1/20' }
- { name: aos-ci-cd, prefix: '172.25.200.1/20' }
- name: "Create 'web-80-anywhere' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'web-80-anywhere-{{item}}'
description: "allow web-80 from anywhere"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "80"
port_range_max: "80"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "0.0.0.0/0"
with_items: "{{all_tenants}}"
- name: "Create 'web-443-anywhere' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'web-443-anywhere-{{item}}'
description: "allow web-443 from anywhere"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "443"
port_range_max: "443"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "0.0.0.0/0"
with_items: "{{all_tenants}}"
- name: "Create 'oci-registry-5000-anywhere' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'oci-registry-5000-anywhere-{{item}}'
description: "allow oci-registry-5000 from anywhere"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "5000"
port_range_max: "5000"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "0.0.0.0/0"
with_items: "{{all_tenants}}"
- name: "Create 'wide-open' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'wide-open-{{item}}'
description: "allow anything from anywhere"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "0"
port_range_max: "65535"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "0.0.0.0/0"
- direction: "ingress"
port_range_min: "0"
port_range_max: "65535"
ethertype: "IPv4"
protocol: "udp"
remote_ip_prefix: "0.0.0.0/0"
with_items: "{{all_tenants}}"
- name: "Create 'ALL ICMP' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'all-icmp-{{item}}'
description: "allow all ICMP traffic"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
ethertype: "IPv4"
protocol: "icmp"
remote_ip_prefix: "0.0.0.0/0"
with_items: "{{all_tenants}}"
- name: "Create 'keygen-persistent' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'keygen-persistent'
description: "rules for copr-keygen"
tenant_name: "persistent"
rules:
- direction: "ingress"
port_range_min: "5167"
port_range_max: "5167"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "172.25.32.1/20"
- direction: "ingress"
port_range_min: "80"
port_range_max: "80"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "172.25.32.1/20"
- name: "Create 'pg-5432-anywhere' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'pg-5432-anywhere-{{item}}'
description: "allow postgresql-5432 from anywhere"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "5432"
port_range_max: "5432"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "0.0.0.0/0"
with_items: "{{all_tenants}}"
- name: "Create 'fedmsg-relay-persistent' security group"
neutron_sec_group:
login_username: "admin"
login_password: "{{ ADMIN_PASS }}"
login_tenant_name: "admin"
auth_url: "https://{{controller_publicname}}:35357/v2.0"
state: "present"
name: 'fedmsg-relay-persistent'
description: "allow incoming 2003 and 4001 from internal network"
tenant_name: "{{item}}"
rules:
- direction: "ingress"
port_range_min: "2003"
port_range_max: "2003"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "172.25.80.1/16"
- direction: "ingress"
port_range_min: "4001"
port_range_max: "4001"
ethertype: "IPv4"
protocol: "tcp"
remote_ip_prefix: "172.25.80.1/16"
with_items: "{{all_tenants}}"
# Update quota for Copr
# SEE:
# nova quota-defaults
# nova quota-show --tenant $TENANT_ID
# default is 10 instances, 20 cores, 51200 RAM, 10 floating IPs
- shell: source /root/keystonerc_admin && keystone tenant-list | grep 'copr ' | awk '{print $2}'
register: TENANT_ID
check_mode: no
changed_when: false
- shell: source /root/keystonerc_admin && nova quota-update --instances 50 --cores 100 --ram 350000 --floating-ips 10 --security-groups 20 {{ TENANT_ID.stdout }}
- shell: source /root/keystonerc_admin && keystone tenant-list | grep 'coprdev ' | awk '{print $2}'
check_mode: no
changed_when: false
register: TENANT_ID
- shell: source /root/keystonerc_admin && nova quota-update --instances 40 --cores 80 --ram 300000 --floating-ips 10 --security-groups 20 {{ TENANT_ID.stdout }}
#
# Note that we set manually the amount of volumes for this tenant to 20 in the web interface.
# nova quota-update cannot do so.
#
- shell: source /root/keystonerc_admin && keystone tenant-list | grep 'persistent ' | awk '{print $2}'
check_mode: no
changed_when: false
register: TENANT_ID
- shell: source /root/keystonerc_admin && nova quota-update --instances 60 --cores 175 --ram 288300 --security-groups 20 {{ TENANT_ID.stdout }}
# Transient quota
- shell: source /root/keystonerc_admin && keystone tenant-list | grep 'transient ' | awk '{print $2}'
check_mode: no
changed_when: false
register: TENANT_ID
- shell: source /root/keystonerc_admin && nova quota-update --instances 30 --cores 70 --ram 153600 --security-groups 20 {{ TENANT_ID.stdout }}

View file

@ -0,0 +1,786 @@
#!/bin/sh
prog_name=`basename $0`
action=
dry_run=0
verbose=0
base_dir=$(pwd)
stage_dir="${base_dir}/fed_deployment"
mellon_root="/v3"
mellon_endpoint="mellon"
mellon_app_name="v3"
overcloud_deploy_script="overcloud_deploy.sh"
overcloudrc_file="./overcloudrc"
function cmd_template {
local status=0
local cmd="$1"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function cmds_template {
local return_status=0
declare -a cmds=(
"date"
"ls xxx"
"head $0"
)
if [ $dry_run -ne 0 ]; then
for cmd in "${cmds[@]}"; do
echo $cmd
done
else
for cmd in "${cmds[@]}"; do
if [ $verbose -ne 0 ]; then
echo $cmd
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
return_status=$status
fi
done
fi
return $return_status
}
function show_variables {
echo "base_dir: $base_dir"
echo "stage_dir: $stage_dir"
echo "config_tar_filename: $config_tar_filename"
echo "config_tar_pathname: $config_tar_pathname"
echo "overcloud_deploy_script: $overcloud_deploy_script"
echo "overcloudrc_file: $overcloudrc_file"
echo "puppet_override_apache_pathname: $puppet_override_apache_pathname"
echo "puppet_override_keystone_pathname: $puppet_override_keystone_pathname"
echo
echo "FED_RHSSO_URL: $FED_RHSSO_URL"
echo "FED_RHSSO_ADMIN_PASSWORD: $FED_RHSSO_ADMIN_PASSWORD"
echo "FED_RHSSO_REALM: $FED_RHSSO_REALM"
echo
echo "FED_KEYSTONE_HOST: $FED_KEYSTONE_HOST"
echo "FED_KEYSTONE_HTTPS_PORT: $FED_KEYSTONE_HTTPS_PORT"
echo "mellon_http_url: $mellon_http_url"
echo "mellon_root: $mellon_root"
echo "mellon_endpoint: $mellon_endpoint"
echo "mellon_app_name: $mellon_app_name"
echo "mellon_endpoint_path: $mellon_endpoint_path"
echo "mellon_entity_id: $mellon_entity_id"
echo
echo "FED_OPENSTACK_IDP_NAME: $FED_OPENSTACK_IDP_NAME"
echo "openstack_mapping_pathname: $openstack_mapping_pathname"
echo "FED_OPENSTACK_MAPPING_NAME: $FED_OPENSTACK_MAPPING_NAME"
echo
echo "idp_metadata_filename: $idp_metadata_filename"
echo "mellon_httpd_config_filename: $mellon_httpd_config_filename"
}
function initialize {
local return_status=0
declare -a cmds=(
"mkdir -p $stage_dir"
)
if [ $dry_run -ne 0 ]; then
for cmd in "${cmds[@]}"; do
echo $cmd
done
else
for cmd in "${cmds[@]}"; do
if [ $verbose -ne 0 ]; then
echo $cmd
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
return_status=$status
fi
done
fi
return $return_status
}
function copy_helper_to_controller {
local status=0
local controller=${1:-"controller-0"}
local cmd="scp configure-federation fed_variables heat-admin@${controller}:/home/heat-admin"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function install_mod_auth_mellon {
local status=0
local cmd="sudo yum -y install mod_auth_mellon"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function create_ipa_service_account {
# Note, after setting up the service account it can be tested
# by performing a user search like this:
# ldapsearch -H $ldap_url -x -D "$service_dn" -w "$FED_IPA_RHSSO_SERVICE_PASSWD" -b "cn=users,cn=accounts,$FED_IPA_BASE_DN"
local status=0
local ldap_url="ldaps://$FED_IPA_HOST"
local dir_mgr_dn="cn=Directory Manager"
local service_name="rhsso"
local service_dn="uid=$service_name,cn=sysaccounts,cn=etc,$FED_IPA_BASE_DN"
local cmd="ldapmodify -H \"$ldap_url\" -x -D \"$dir_mgr_dn\" -w \"$FED_IPA_ADMIN_PASSWD\""
read -r -d '' contents <<EOF
dn: $service_dn
changetype: add
objectclass: account
objectclass: simplesecurityobject
uid: $service_name
userPassword: $FED_IPA_RHSSO_SERVICE_PASSWD
passwordExpirationTime: 20380119031407Z
nsIdleTimeout: 0
EOF
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
echo -e "$contents"
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
sh <<< "$cmd <<< \"$contents\""
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function client_install {
local status=0
local cmd_client_install="sudo yum -y install keycloak-httpd-client-install"
local cmd="sudo keycloak-httpd-client-install \
--client-originate-method registration \
--mellon-https-port $FED_KEYSTONE_HTTPS_PORT \
--mellon-hostname $FED_KEYSTONE_HOST \
--mellon-root $mellon_root \
--keycloak-server-url $FED_RHSSO_URL \
--keycloak-admin-password $FED_RHSSO_ADMIN_PASSWORD \
--app-name $mellon_app_name \
--keycloak-realm $FED_RHSSO_REALM \
-l "/v3/auth/OS-FEDERATION/websso/mapped" \
-l "/v3/auth/OS-FEDERATION/identity_providers/rhsso/protocols/mapped/websso" \
-l "/v3/OS-FEDERATION/identity_providers/rhsso/protocols/mapped/auth"
"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd_client_install
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd_client_install
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd_client_install\" failed\nstatus = $status")
else
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
fi
return $status
}
function create_sp_archive {
# Note, we put the exclude patterns in a file because it is
# insanely difficult to put --exclude patttern in the $cmd shell
# variable and get the final quoting correct.
local status=0
local cmd="tar -cvzf $config_tar_pathname --exclude-from $stage_dir/tar_excludes /etc/httpd/saml2 /etc/httpd/conf.d/$mellon_httpd_config_filename"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
cat <<'EOF' > $stage_dir/tar_excludes
*.orig
*~
EOF
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function fetch_sp_archive {
local return_status=0
declare -a cmds=(
"scp heat-admin@controller-0:/home/heat-admin/fed_deployment/$config_tar_filename $stage_dir"
"tar -C $stage_dir -xvf $config_tar_pathname"
)
if [ $dry_run -ne 0 ]; then
for cmd in "${cmds[@]}"; do
echo $cmd
done
else
for cmd in "${cmds[@]}"; do
if [ $verbose -ne 0 ]; then
echo $cmd
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
return_status=$status
fi
done
fi
return $return_status
}
function deploy_mellon_configuration {
local status=0
local cmd="upload-swift-artifacts -f $config_tar_pathname"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function idp_entity_id {
local metadata_file=${1:-$idp_metadata_filename}
# Extract the entitID from the metadata file, should really be parsed
# with an XML xpath but a simple string match is probably OK
entity_id=`sed -rne 's/^.*entityID="([^"]*)".*$/\1/p' ${metadata_file}`
status=$?
if [ $status -ne 0 -o "$entity_id"x = "x" ]; then
(>&2 echo -e "ERROR search for entityID in ${metadata_file} failed\nstatus = $status")
return 1
fi
echo $entity_id
return 0
}
function append_deploy_script {
local status=0
local deploy_script=$1
local extra_line=$2
local count
count=$(grep -c -e "$extra_line" $deploy_script)
if [ $count -eq 1 ]; then
echo -e "SKIP appending:\n$extra_line"
echo "already present in $deploy_script"
return $status
elif [ $count -gt 1 ]; then
status=1
(>&2 echo -e "ERROR multiple copies of line in ${deploy_script}\nstatus = $status\nline=$extra_line")
return $status
fi
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo "appending $deploy_script with:"
echo -e $extra_line
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
# insert line after last -e line already in script
#
# This is not easy with sed, we'll use tac and awk instead. Here
# is how this works: The logic is easier if you insert before the
# first line rather than trying to find the last line and insert
# after it. We use tac to reverse the lines in the file. Then the
# awk script looks for the candidate line. If found it outputs the
# line we're adding, sets a flag (p) to indicate it's already been
# printed. The "; 1" pattern always output the input line. Then we
# run the output through tac again to set things back in the
# original order.
local tmp_file=$(mktemp)
tac $deploy_script | awk "!p && /^-e/{print \"${extra_line} \\\\\"; p=1}; 1" | tac > $tmp_file
count=$(grep -c -e "${extra_line}" $tmp_file)
if [ $count -ne 1 ]; then
status=1
fi
if [ $status -ne 0 ]; then
rm $tmp_file
(>&2 echo -e "ERROR failed to append ${deploy_script}\nstatus = $status\nline=$extra_line")
else
mv $tmp_file $deploy_script
fi
return $status
}
function puppet_override_apache {
local status=0
local pathname=${1:-$puppet_override_apache_pathname}
local deploy_cmd="-e $pathname"
read -r -d '' contents <<'EOF'
parameter_defaults:
ControllerExtraConfig:
apache::purge_configs: false
EOF
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo "writing pathname = $pathname with contents"
echo -e "$contents"
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
echo -e "$contents" > $pathname
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR failed to write ${pathname}\nstatus = $status")
fi
append_deploy_script $overcloud_deploy_script "$deploy_cmd"
status=$?
return $status
}
function puppet_override_keystone {
local status=0
local pathname=${1:-$puppet_override_keystone_pathname}
local deploy_cmd="-e $pathname"
read -r -d '' contents <<EOF
parameter_defaults:
controllerExtraConfig:
keystone::using_domain_config: true
keystone::config::keystone_config:
identity/domain_configurations_from_database:
value: true
auth/methods:
value: external,password,token,oauth1,mapped
federation/trusted_dashboard:
value: https://$FED_KEYSTONE_HOST/dashboard/auth/websso/
federation/sso_callback_template:
value: /etc/keystone/sso_callback_template.html
federation/remote_id_attribute:
value: MELLON_IDP
EOF
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo "writing pathname = $pathname with contents"
echo -e "$contents"
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
echo -e "$contents" > $pathname
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR failed to write ${pathname}\nstatus = $status")
fi
append_deploy_script $overcloud_deploy_script "$deploy_cmd"
status=$?
return $status
}
function create_federated_resources {
# follow example in Keystone federation documentation
# http://docs.openstack.org/developer/keystone/federation/federated_identity.html#create-keystone-groups-and-assign-roles
local return_status=0
declare -a cmds=(
"openstack domain create federated_domain"
"openstack project create --domain federated_domain federated_project"
"openstack group create federated_users --domain federated_domain"
"openstack role add --group federated_users --group-domain federated_domain --domain federated_domain _member_"
"openstack role add --group federated_users --project federated_project Member"
)
if [ $dry_run -ne 0 ]; then
for cmd in "${cmds[@]}"; do
echo $cmd
done
else
for cmd in "${cmds[@]}"; do
if [ $verbose -ne 0 ]; then
echo $cmd
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
return_status=$status
fi
done
fi
return $return_status
}
function create_mapping {
# Matches documentation
# http://docs.openstack.org/developer/keystone/federation/federated_identity.html#create-keystone-groups-and-assign-roles
local status=0
local pathname=${1:-$openstack_mapping_pathname}
read -r -d '' contents <<'EOF'
[
{
"local": [
{
"user": {
"name": "{0}"
},
"group": {
"domain": {
"name": "federated_domain"
},
"name": "federated_users"
}
}
],
"remote": [
{
"type": "MELLON_NAME_ID"
},
{
"type": "MELLON_groups",
"any_one_of": ["openstack-users"]
}
]
}
]
EOF
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo "writing pathname = $pathname with contents"
echo -e "$contents"
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
echo -e "$contents" > $pathname
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR failed to write ${pathname}\nstatus = $status")
fi
return $status
}
function create_v3_rcfile {
local status=0
local input_file=${1:-$overcloudrc_file}
local output_file="${input_file}.v3"
source $input_file
#clear the old environment
NEW_OS_AUTH_URL=`echo $OS_AUTH_URL | sed 's!v2.0!v3!'`
read -r -d '' contents <<EOF
for key in \$( set | sed 's!=.*!!g' | grep -E '^OS_') ; do unset $key ; done
export OS_AUTH_URL=$NEW_OS_AUTH_URL
export OS_USERNAME=$OS_USERNAME
export OS_PASSWORD=$OS_PASSWORD
export OS_USER_DOMAIN_NAME=Default
export OS_PROJECT_DOMAIN_NAME=Default
export OS_PROJECT_NAME=$OS_TENANT_NAME
export OS_IDENTITY_API_VERSION=3
EOF
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo "writeing output_file = $output_file with contents:"
echo -e "$contents"
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
echo -e "$contents" > $output_file
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR failed to write ${output_file}\nstatus = $status")
fi
return $status
}
function openstack_create_idp {
local status=0
local metadata_file="$stage_dir/etc/httpd/saml2/$idp_metadata_filename"
local entity_id
entity_id=$(idp_entity_id $metadata_file)
status=$?
if [ $status -ne 0 ]; then
return $status
fi
local cmd="openstack identity provider create --remote-id $entity_id $FED_OPENSTACK_IDP_NAME"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function openstack_create_mapping {
local status=0
local mapping_file=${1:-$openstack_mapping_pathname}
local mapping_name=${2:-$FED_OPENSTACK_MAPPING_NAME}
cmd="openstack mapping create --rules $mapping_file $mapping_name"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function openstack_create_protocol {
local status=0
local idp_name=${1:-$FED_OPENSTACK_IDP_NAME}
local mapping_name=${2:-$FED_OPENSTACK_MAPPING_NAME}
cmd="openstack federation protocol create --identity-provider $idp_name --mapping $mapping_name mapped"
if [ $verbose -ne 0 -o $dry_run -ne 0 ]; then
echo $cmd
fi
if [ $dry_run -ne 0 ]; then
return $status
fi
$cmd
status=$?
if [ $status -ne 0 ]; then
(>&2 echo -e "ERROR cmd \"$cmd\" failed\nstatus = $status")
fi
return $status
}
function usage {
cat <<EOF
$prog_name action
-h --help print usage
-n --dry-run dry run, just print computed command
-v --verbose be chatty
action may be one of:
show-variables
initialize
copy-helper-to-controller
install-mod-auth-mellon
create-ipa-service-account
client-install
create-sp-archive
fetch-sp-archive
deploy-mellon-configuration
puppet-override-apache
puppet-override-keystone
create-federated-resources
create-mapping
create-v3-rcfile
openstack-create-idp
openstack-create-mapping
openstack-create-protocol
EOF
}
#-----------------------------------------------------------------------------
# options may be followed by one colon to indicate they have a required argument
if ! options=$(getopt -o hnv -l help,dry-run,verbose -- "$@")
then
# something went wrong, getopt will put out an error message for us
exit 1
fi
eval set -- "$options"
while [ $# -gt 0 ]
do
case $1 in
-h|--help) usage; exit 1 ;;
-n|--dry-run) dry_run=1 ;;
-v|--verbose) verbose=1 ;;
# for options with required arguments, an additional shift is required
(--) shift; break;;
(-*) echo "$0: error - unrecognized option $1" 1>&2; exit 1;;
(*) break;;
esac
shift
done
#-----------------------------------------------------------------------------
source ./fed_variables
# Strip leading and trailing space and slash from these variables
mellon_root=`echo ${mellon_root} | perl -pe 's!^[ /]*(.*?)[ /]*$!\1!'`
mellon_endpoint=`echo ${mellon_endpoint} | perl -pe 's!^[ /]*(.*?)[ /]*$!\1!'`
mellon_root="/${mellon_root}"
mellon_endpoint_path="${mellon_root}/${mellon_endpoint}"
mellon_http_url="https://${FED_KEYSTONE_HOST}:${FED_KEYSTONE_HTTPS_PORT}"
mellon_entity_id="${mellon_http_url}${mellon_endpoint_path}/metadata"
openstack_mapping_pathname="${stage_dir}/mapping_${FED_OPENSTACK_IDP_NAME}_saml2.json"
idp_metadata_filename="${mellon_app_name}_keycloak_${FED_RHSSO_REALM}_idp_metadata.xml"
mellon_httpd_config_filename="${mellon_app_name}_mellon_keycloak_${FED_RHSSO_REALM}.conf"
config_tar_filename="rhsso_config.tar.gz"
config_tar_pathname="${stage_dir}/${config_tar_filename}"
puppet_override_apache_pathname="${stage_dir}/puppet_override_apache.yaml"
puppet_override_keystone_pathname="${stage_dir}/puppet_override_keystone.yaml"
#-----------------------------------------------------------------------------
if [ $# -lt 1 ]; then
echo "ERROR: no action specified"
exit 1
fi
action="$1"; shift
if [ $dry_run -ne 0 ]; then
echo "Dry Run Enabled!"
fi
case $action in
show-var*)
show_variables ;;
initialize)
initialize ;;
copy-helper-to-controller)
copy_helper_to_controller "$1" ;;
install-mod-auth-mellon)
install_mod_auth_mellon ;;
create-ipa-service-account)
create_ipa_service_account ;;
client-install)
client_install ;;
create-sp-archive)
create_sp_archive ;;
fetch-sp-archive)
fetch_sp_archive ;;
deploy-mellon-configuration)
deploy_mellon_configuration ;;
create-v3-rcfile)
create_v3_rcfile "$1" ;;
puppet-override-apache)
puppet_override_apache "$1" ;;
puppet-override-keystone)
puppet_override_keystone "$1" ;;
create-federated-resources)
create_federated_resources ;;
create-mapping)
create_mapping "$1" ;;
openstack-create-idp)
openstack_create_idp "$1" ;;
openstack-create-mapping)
openstack_create_mapping "$1" "$2" ;;
openstack-create-protocol)
openstack_create_protocol "$1" "$2" ;;
*)
echo "unknown action: $action"
usage
exit 1
;;
esac

View file

@ -3,12 +3,11 @@
openstack overcloud deploy --templates \
-e /home/stack/templates/node-info.yaml\
-e /home/stack/templates/overcloud_images.yaml \
-e /usr/share/openstack-tripleo-heat-templates/environments/network-isolation.yaml \
-r /home/stack/templates/roles_data.yaml \
--ntp-server cloud-noc01.cloud.fedoraproject.org \
--config-download \
-e /usr/share/openstack-tripleo-heat-templates/environments/config-download-environment.yaml \
--overcloud-ssh-user heat-admin \
--overcloud-ssh-key ~/.ssh/id_rsa
# -e /home/stack/templates/network-environment.yaml \
--overcloud-ssh-key ~/.ssh/id_rsa \
-e /home/stack/templates/cinder-dellps-config.yaml \
-e /home/stack/templates/rhel-registration/environment-rhel-registration.yaml

View file

@ -35,6 +35,16 @@
- undercloud.conf
- templates/node-info.yaml
- openstack-overcloud-deploy.sh
- configure-federation
tags:
- config
- undercloud
- name: Copy files to ~/stack/templates/
copy: src={{item}} dest=/home/stack/templates/{{item}} owner=stack group=stack mode=0644
with_items:
- cinder-dellps-config.yaml
- environment-rhel-registration.yaml
tags:
- config
- undercloud
@ -44,3 +54,9 @@
tags:
- config
- undercloud
- name: Copy fed_variables
template: src=fed_variables dest=/home/stack/fed_variables owner=stack group=stack mode=0644
tags:
- config
- undercloud

View file

@ -0,0 +1,32 @@
Copyright (c) 2016-2017 Dell Inc, or its subsidiaries.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# A Heat environment file which can be used to enable a
# a Cinder Dell EMC PS Series backend, configured via puppet
resource_registry:
OS::TripleO::Services::CinderBackendDellPs: ../puppet/services/cinder-backend-dellps.yaml
parameter_defaults:
CinderEnableDellPsBackend: true
CinderDellPsBackendName: 'fed-cloud-eql02'
CinderDellPsSanIp: '172.24.0.102'
CinderDellPsSanLogin: '{{ san02_login }}'
CinderDellPsSanPassword: '{{ san02_pass }}'
CinderDellPsSanPrivateKey: ''
CinderDellPsSanThinProvision: true
CinderDellPsGroupname: 'cloud-equallogics'
CinderDellPsPool: 'default'
CinderDellPsChapLogin: ''
CinderDellPsChapPassword: ''
CinderDellPsUseChap: false

View file

@ -0,0 +1,27 @@
# Note this can be specified either in the call
# to heat stack-create via an additional -e option
# or via the global environment on the seed in
# /etc/heat/environment.d/default.yaml
parameter_defaults:
rhel_reg_auto_attach: "true"
rhel_reg_activation_key: "openstack-cloud"
rhel_reg_org: "{{ rhel_reg_org }}"
rhel_reg_pool_id: "{{ rhel_reg_pool_id }}"
rhel_reg_repos: "rhel-7-server-rpms,rhel-7-server-extras-rpms,rhel-7-server-rh-common-rpms,rhel-ha-for-rhel-7-server-rpms,rhel-7-server-openstack-13-rpms,rhel-7-server-rhceph-3-osd-rpms,rhel-7-server-rhceph-3-mon-rpms,rhel-7-server-rhceph-3-tools-rpms"
rhel_reg_method: "portal"
rhel_reg_sat_repo: ""
rhel_reg_base_url: ""
rhel_reg_environment: ""
rhel_reg_force: ""
rhel_reg_machine_name: ""
rhel_reg_password: ""
rhel_reg_release: ""
rhel_reg_sat_url: ""
rhel_reg_server_url: ""
rhel_reg_service_level: ""
rhel_reg_user: ""
rhel_reg_type: ""
rhel_reg_http_proxy_host: ""
rhel_reg_http_proxy_port: ""
rhel_reg_http_proxy_username: ""
rhel_reg_http_proxy_password: ""

View file

@ -0,0 +1,44 @@
# FQDN of IPA server
FED_IPA_HOST="jdennis-ipa.example.com"
# Base DN of IPA server
FED_IPA_BASE_DN="dc=example,dc=com"
# IPA administrator password
FED_IPA_ADMIN_PASSWD="FreeIPA4All"
# Password used by RH-SSO service to authenticate to IPA
# when RH-SSO obtains user/group information from IPA as part of
# RH-SSO's User Federation.
FED_IPA_RHSSO_SERVICE_PASSWD="rhsso-passwd"
# RH-SSO server IP address
FED_RHSSO_IP_ADDR="10.16.18.217"
# RH-SSO server FQDN
FED_RHSSO_FQDN="jdennis-rhsso-7"
# URL used to access the RH-SSO server
FED_RHSSO_URL="https://$FED_RHSSO_FQDN"
# Administrator password for RH-SSO server
FED_RHSSO_ADMIN_PASSWORD=FreeIPA4All
# Name of the RH-SSO realm
FED_RHSSO_REALM="openstack"
# Host name of the mellon server
# Note, this is identical to the Keystone server since Keystone is
# being front by Apache which is protecting it's resources with mellon.
FED_KEYSTONE_HOST="overcloud.localdomain"
# Port number mellon is running on the FED_KEYSTONE_HOST
# Note, this is identical to the Keystone server port
FED_KEYSTONE_HTTPS_PORT=13000
# Name assigned in Openstack to our IdP
FED_OPENSTACK_IDP_NAME="rhsso"
# Name of our Keystone mapping rules
FED_OPENSTACK_MAPPING_NAME="${FED_OPENSTACK_IDP_NAME}_mapping"