Merge branch 'master' of /git/ansible
This commit is contained in:
commit
dbeec354c2
37 changed files with 1329 additions and 569 deletions
117
files/fedora-cloud/haproxy.cfg
Normal file
117
files/fedora-cloud/haproxy.cfg
Normal file
|
@ -0,0 +1,117 @@
|
|||
#---------------------------------------------------------------------
|
||||
# Example configuration for a possible web application. See the
|
||||
# full configuration options online.
|
||||
#
|
||||
# http://haproxy.1wt.eu/download/1.4/doc/configuration.txt
|
||||
#
|
||||
#---------------------------------------------------------------------
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# Global settings
|
||||
#---------------------------------------------------------------------
|
||||
global
|
||||
# to have these messages end up in /var/log/haproxy.log you will
|
||||
# need to:
|
||||
#
|
||||
# 1) configure syslog to accept network log events. This is done
|
||||
# by adding the '-r' option to the SYSLOGD_OPTIONS in
|
||||
# /etc/sysconfig/syslog
|
||||
#
|
||||
# 2) configure local2 events to go to the /var/log/haproxy.log
|
||||
# file. A line like the following can be added to
|
||||
# /etc/sysconfig/syslog
|
||||
#
|
||||
# local2.* /var/log/haproxy.log
|
||||
#
|
||||
log 127.0.0.1 local2
|
||||
|
||||
chroot /var/lib/haproxy
|
||||
pidfile /var/run/haproxy.pid
|
||||
maxconn 4000
|
||||
user haproxy
|
||||
group haproxy
|
||||
daemon
|
||||
|
||||
# turn on stats unix socket
|
||||
stats socket /var/lib/haproxy/stats
|
||||
|
||||
#---------------------------------------------------------------------
|
||||
# common defaults that all the 'listen' and 'backend' sections will
|
||||
# use if not designated in their block
|
||||
#---------------------------------------------------------------------
|
||||
defaults
|
||||
mode http
|
||||
log global
|
||||
option httplog
|
||||
option dontlognull
|
||||
option http-server-close
|
||||
option forwardfor except 127.0.0.0/8
|
||||
option redispatch
|
||||
retries 3
|
||||
timeout http-request 10s
|
||||
timeout queue 1m
|
||||
timeout connect 10s
|
||||
timeout client 1m
|
||||
timeout server 1m
|
||||
timeout http-keep-alive 10s
|
||||
timeout check 10s
|
||||
maxconn 3000
|
||||
|
||||
#frontend keystone_public *:5000
|
||||
# default_backend keystone_public
|
||||
#frontend keystone_admin *:35357
|
||||
# default_backend keystone_admin
|
||||
frontend neutron
|
||||
bind 0.0.0.0:9696 ssl crt /etc/haproxy/fed-cloud09.combined
|
||||
default_backend neutron
|
||||
|
||||
frontend cinder
|
||||
bind 0.0.0.0:8776 ssl crt /etc/haproxy/fed-cloud09.combined
|
||||
default_backend cinder
|
||||
|
||||
frontend swift
|
||||
bind 0.0.0.0:8080 ssl crt /etc/haproxy/fed-cloud09.combined
|
||||
default_backend swift
|
||||
|
||||
frontend nova
|
||||
bind 0.0.0.0:8774 ssl crt /etc/haproxy/fed-cloud09.combined
|
||||
default_backend nova
|
||||
|
||||
frontend ceilometer
|
||||
bind 0.0.0.0:8777 ssl crt /etc/haproxy/fed-cloud09.combined
|
||||
default_backend ceilometer
|
||||
|
||||
frontend ec2
|
||||
bind 0.0.0.0:8773 ssl crt /etc/haproxy/fed-cloud09.combined
|
||||
default_backend ec2
|
||||
|
||||
frontend glance
|
||||
bind 0.0.0.0:9292 ssl crt /etc/haproxy/fed-cloud09.combined
|
||||
default_backend glance
|
||||
|
||||
backend neutron
|
||||
server neutron 127.0.0.1:8696 check
|
||||
|
||||
backend cinder
|
||||
server cinder 127.0.0.1:6776 check
|
||||
|
||||
backend swift
|
||||
server swift 127.0.0.1:7080 check
|
||||
|
||||
backend nova
|
||||
server nova 127.0.0.1:6774 check
|
||||
|
||||
backend ceilometer
|
||||
server ceilometer 127.0.0.1:6777 check
|
||||
|
||||
backend ec2
|
||||
server ec2 127.0.0.1:6773 check
|
||||
|
||||
backend glance
|
||||
server glance 127.0.0.1:7292 check
|
||||
|
||||
backend keystone_public
|
||||
server keystone_public 127.0.0.1:5000 check
|
||||
|
||||
backend keystone_admin
|
||||
server keystone_admin 127.0.0.1:35357 check
|
2
files/fedora-cloud/openstack-nova-novncproxy
Normal file
2
files/fedora-cloud/openstack-nova-novncproxy
Normal file
|
@ -0,0 +1,2 @@
|
|||
# You may specify other parameters to the nova-novncproxy here
|
||||
OPTIONS="--novncproxy_host 209.132.184.9 --ssl_only"
|
|
@ -1,7 +1,7 @@
|
|||
---
|
||||
lvm_size: 20000
|
||||
mem_size: 8192
|
||||
num_cpus: 4
|
||||
num_cpus: 6
|
||||
# for systems that do not match the above - specify the same parameter in
|
||||
# the host_vars/$hostname file
|
||||
|
||||
|
|
|
@ -34,10 +34,14 @@ custom_rules: [
|
|||
'-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT',
|
||||
'-A INPUT -p tcp -m tcp -s 209.132.181.102 --dport 873 -j ACCEPT',
|
||||
|
||||
# only allow varnish from localhost
|
||||
# allow varnish from localhost
|
||||
'-A INPUT -p tcp -m tcp -s 127.0.0.1 --dport 6081 -j ACCEPT',
|
||||
'-A INPUT -p tcp -m tcp -s 127.0.0.1 --dport 6082 -j ACCEPT',
|
||||
|
||||
# also allow varnish from internal for purge requests
|
||||
'-A INPUT -p tcp -m tcp -s 192.168.1.0/24 --dport 6081 -j ACCEPT',
|
||||
'-A INPUT -p tcp -m tcp -s 10.5.126.0/24 --dport 6081 -j ACCEPT',
|
||||
|
||||
# Allow koschei.cloud to talk to the inbound fedmsg relay.
|
||||
'-A INPUT -p tcp -m tcp --dport 9941 -s 209.132.184.151 -j ACCEPT',
|
||||
# Allow jenkins.cloud to talk to the inbound fedmsg relay.
|
||||
|
|
|
@ -33,10 +33,14 @@ custom_rules: [
|
|||
'-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT',
|
||||
'-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT',
|
||||
|
||||
# only allow varnish from localhost
|
||||
# allow varnish from localhost
|
||||
'-A INPUT -p tcp -m tcp -s 127.0.0.1 --dport 6081 -j ACCEPT',
|
||||
'-A INPUT -p tcp -m tcp -s 127.0.0.1 --dport 6082 -j ACCEPT',
|
||||
|
||||
# also allow varnish from internal for purge requests
|
||||
'-A INPUT -p tcp -m tcp -s 192.168.1.0/24 --dport 6081 -j ACCEPT',
|
||||
'-A INPUT -p tcp -m tcp -s 10.5.126.0/24 --dport 6081 -j ACCEPT',
|
||||
|
||||
# Allow koschei.cloud to talk to the inbound fedmsg relay.
|
||||
'-A INPUT -p tcp -m tcp --dport 9941 -s 209.132.184.151 -j ACCEPT',
|
||||
# Allow jenkins.cloud to talk to the inbound fedmsg relay.
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
instance_type: m1.small
|
||||
image: "{{ el6_qcow_id }}"
|
||||
keypair: fedora-admin-20130801
|
||||
security_group: webserver
|
||||
zone: nova
|
||||
hostbase: hrf-
|
||||
public_ip: 209.132.184.156
|
||||
root_auth_users: codeblock
|
||||
description: "hrf instance (https://github.com/fedora-infra/hrf)"
|
|
@ -2,9 +2,9 @@
|
|||
nm: 255.255.255.192
|
||||
gw: 140.211.169.193
|
||||
dns: 140.211.166.130
|
||||
ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-6
|
||||
ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL6-x86_64/
|
||||
volgroup: /dev/vg_guests
|
||||
ks_url: http://209.132.181.6/repo/rhel/ks/kvm-rhel-6
|
||||
ks_repo: http://209.132.181.6/repo/rhel/RHEL6-x86_64/
|
||||
volgroup: /dev/vg_server
|
||||
eth0_ip: 140.211.169.228
|
||||
vmhost: osuosl01.fedoraproject.org
|
||||
vmhost: osuosl02.fedoraproject.org
|
||||
datacenter: osuosl
|
||||
|
|
|
@ -14,5 +14,5 @@ eth0_ip: 10.5.126.52
|
|||
# This is consumed by the roles/fedora-web/main role
|
||||
sponsor: redhat
|
||||
|
||||
nrpe_procs_warn: 900
|
||||
nrpe_procs_crit: 1000
|
||||
nrpe_procs_warn: 1200
|
||||
nrpe_procs_crit: 1400
|
||||
|
|
|
@ -16,5 +16,5 @@ sponsor: internetx
|
|||
datacenter: internetx
|
||||
postfix_group: vpn
|
||||
|
||||
nrpe_procs_warn: 900
|
||||
nrpe_procs_crit: 1000
|
||||
nrpe_procs_warn: 1200
|
||||
nrpe_procs_crit: 1400
|
||||
|
|
|
@ -16,5 +16,5 @@ sponsor: osuosl
|
|||
datacenter: osuosl
|
||||
postfix_group: vpn
|
||||
|
||||
nrpe_procs_warn: 900
|
||||
nrpe_procs_crit: 1000
|
||||
nrpe_procs_warn: 1200
|
||||
nrpe_procs_crit: 1400
|
||||
|
|
|
@ -13,3 +13,6 @@ eth0_ip: 10.5.126.51
|
|||
|
||||
# This is consumed by the roles/fedora-web/main role
|
||||
sponsor: redhat
|
||||
|
||||
nrpe_procs_warn: 1200
|
||||
nrpe_procs_crit: 1400
|
||||
|
|
|
@ -730,8 +730,6 @@ copr-be.cloud.fedoraproject.org
|
|||
# copr dev instances
|
||||
copr-be-dev.cloud.fedoraproject.org
|
||||
copr-fe-dev.cloud.fedoraproject.org
|
||||
#hrf
|
||||
hrf.cloud.fedoraproject.org
|
||||
#shogun-ca.cloud.fedoraproject.org
|
||||
209.132.184.157
|
||||
# bodhi.dev.fedoraproject.org
|
||||
|
|
|
@ -99,7 +99,6 @@
|
|||
- include: /srv/web/infra/ansible/playbooks/hosts/cloud-noc01.cloud.fedoraproject.org.yml
|
||||
- include: /srv/web/infra/ansible/playbooks/hosts/elections-dev.cloud.fedoraproject.org.yml
|
||||
- include: /srv/web/infra/ansible/playbooks/hosts/fedocal.dev.fedoraproject.org.yml
|
||||
- include: /srv/web/infra/ansible/playbooks/hosts/hrf.cloud.fedoraproject.org.yml
|
||||
- include: /srv/web/infra/ansible/playbooks/hosts/koschei.cloud.fedoraproject.org.yml
|
||||
- include: /srv/web/infra/ansible/playbooks/hosts/lists-dev.cloud.fedoraproject.org.yml
|
||||
- include: /srv/web/infra/ansible/playbooks/hosts/logserver.yml
|
||||
|
|
|
@ -53,7 +53,7 @@
|
|||
- command: vgrename vg_guests cinder-volumes
|
||||
ignore_errors: yes
|
||||
|
||||
- lvg: vg=cinder-volumes pvs=/dev/md127 pesize=32 vg_options=''
|
||||
- lvg: vg=cinder-volumes pvs=/dev/md127 pesize=32 vg_options=""
|
||||
|
||||
- template: src={{ files }}/fedora-cloud/hosts dest=/etc/hosts owner=root mode=0644
|
||||
|
||||
|
@ -93,20 +93,31 @@
|
|||
when: packstack_sucessfully_finished.stat.exists == False
|
||||
ignore_errors: yes
|
||||
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="^ONBOOT=" line="ONBOOT=yes"
|
||||
notify:
|
||||
- restart network
|
||||
# only for first run
|
||||
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="^NETMASK=" line="NETMASK=255.255.255.0"
|
||||
when: packstack_sucessfully_finished.stat.exists == False
|
||||
notify:
|
||||
- restart network
|
||||
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="^IPADDR=" line="IPADDR={{controller_private_ip}}"
|
||||
when: packstack_sucessfully_finished.stat.exists == False
|
||||
notify:
|
||||
- restart network
|
||||
- lineinfile: dest=/etc/sysconfig/network-scripts/ifcfg-eth1 regexp="BOOTPROTO=" line="BOOTPROTO=none"
|
||||
notify:
|
||||
- restart network
|
||||
- template: src={{files}}/fedora-cloud/ifcfg-br-ex dest=/etc/sysconfig/network-scripts/ifcfg-br-ex owner=root mode=0644
|
||||
when: packstack_sucessfully_finished.stat.exists == False
|
||||
notify:
|
||||
- restart network
|
||||
- template: src={{files}}/fedora-cloud/ifcfg-eth0 dest=/etc/sysconfig/network-scripts/ifcfg-eth0 owner=root mode=0644
|
||||
when: packstack_sucessfully_finished.stat.exists == False
|
||||
notify:
|
||||
- restart network
|
||||
- command: ifup eth1
|
||||
when: packstack_sucessfully_finished.stat.exists == False
|
||||
|
||||
# FIXME notify network service restart, eth1 must be up and configured
|
||||
- meta: flush_handlers
|
||||
|
||||
# http://docs.openstack.org/trunk/install-guide/install/yum/content/basics-ntp.html
|
||||
- service: name=ntpd state=started enabled=yes
|
||||
|
@ -125,6 +136,8 @@
|
|||
- ansible-openstack-modules
|
||||
- openstack-keystone
|
||||
- openstack-neutron
|
||||
- openstack-nova-common
|
||||
- haproxy
|
||||
- yum: name=* state=latest
|
||||
|
||||
- name: add ssl cert
|
||||
|
@ -144,7 +157,6 @@
|
|||
- name: add ssl key for neutron
|
||||
copy: src={{ private }}/files/openstack/fed-cloud09.key dest=/etc/pki/tls/private/fed-cloud09-neutron.key mode=600 owner=neutron group=root
|
||||
|
||||
|
||||
# http://docs.openstack.org/trunk/install-guide/install/yum/content/basics-database-controller.html
|
||||
- name: install mysql packages
|
||||
action: yum state=present pkg={{ item }}
|
||||
|
@ -189,31 +201,6 @@
|
|||
regexp="RABBITMQ_NODE_PORT"
|
||||
line=" 'RABBITMQ_NODE_PORTTTTT' => $port,"
|
||||
backup=yes
|
||||
#- lineinfile:
|
||||
# dest=/usr/share/openstack-puppet/modules/rabbitmq/templates/rabbitmq.config.erb
|
||||
# regexp="cacertfile"
|
||||
# line=" {ssl_options, [{cacertfile,\"<%= @ssl_cert %>\"},"
|
||||
# backup=yes
|
||||
#- lineinfile:
|
||||
# dest=/usr/share/openstack-puppet/modules/neutron/manifests/init.pp
|
||||
# regexp="rabbit_use_ssl = "
|
||||
# line=" $rabbit_use_ssl = true,"
|
||||
# backup=yes
|
||||
#- lineinfile:
|
||||
# dest=/usr/share/openstack-puppet/modules/nova/manifests/init.pp
|
||||
# regexp="rabbit_use_ssl = "
|
||||
# line=" $rabbit_use_ssl = true,"
|
||||
# backup=yes
|
||||
#- lineinfile:
|
||||
# dest=/usr/share/openstack-puppet/modules/glance/manifests/notify/rabbitmq.pp
|
||||
# regexp="rabbit_use_ssl = "
|
||||
# line=" $rabbit_use_ssl = true,"
|
||||
# backup=yes
|
||||
#- lineinfile:
|
||||
# dest=/usr/share/openstack-puppet/modules/ceilometer/manifests/init.pp
|
||||
# regexp="rabbit_use_ssl = "
|
||||
# line=" $rabbit_use_ssl = true,"
|
||||
# backup=yes
|
||||
- lineinfile:
|
||||
dest=/usr/lib/python2.7/site-packages/packstack/puppet/templates/mongodb.pp
|
||||
regexp="pidfilepath"
|
||||
|
@ -234,58 +221,31 @@
|
|||
- lineinfile: dest=/etc/rabbitmq/rabbitmq-env.conf regexp="^RABBITMQ_NODE_PORT=" state="absent"
|
||||
- service: name=rabbitmq-server state=started
|
||||
|
||||
# WORKAROUND again
|
||||
#- ini_file: dest=/etc/keystone/keystone.conf section="DEFAULT" option="rabbit_use_ssl" value="true"
|
||||
#- service: name=rabbitmq-server state=restarted
|
||||
#- ini_file: dest=/etc/nova/nova.conf section="DEFAULT" option="rabbit_use_ssl" value="true"
|
||||
#- ini_file: dest=/etc/cinder/cinder.conf section="DEFAULT" option="rabbit_use_ssl" value="true"
|
||||
#- ini_file: dest=/etc/ceilometer/ceilometer.conf section="DEFAULT" option="rabbit_use_ssl" value="true"
|
||||
#- service: name="{{item}}" state=restarted
|
||||
# with_items:
|
||||
# - openstack-ceilometer-alarm-evaluator
|
||||
# - openstack-ceilometer-alarm-notifier
|
||||
# - openstack-ceilometer-api
|
||||
# - openstack-ceilometer-central
|
||||
# - openstack-ceilometer-collector
|
||||
# - openstack-ceilometer-compute
|
||||
# - openstack-ceilometer-notification
|
||||
# - openstack-cinder-api
|
||||
# - openstack-cinder-backup
|
||||
# - openstack-cinder-scheduler
|
||||
# - openstack-cinder-volume
|
||||
# - openstack-nova-api
|
||||
# - openstack-nova-cert
|
||||
# - openstack-nova-compute
|
||||
# - openstack-nova-conductor
|
||||
# - openstack-nova-consoleauth
|
||||
# - openstack-nova-novncproxy
|
||||
# - openstack-nova-scheduler
|
||||
|
||||
# flip endpoints internalurl to internal IP
|
||||
# ceilometer
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep ceilometer | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:8777' --adminurl 'http://{{ controller_hostname }}:8777' --internalurl 'http://{{ controller_hostname }}:8777' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:8777' --adminurl 'https://{{ controller_hostname }}:8777' --internalurl 'https://{{ controller_hostname }}:8777' ) || true
|
||||
# cinder
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'cinder ' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:8776/v1/%(tenant_id)s' --adminurl 'http://{{ controller_hostname }}:8776/v1/%(tenant_id)s' --internalurl 'http://{{ controller_hostname }}:8776/v1/%(tenant_id)s' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:8776/v1/%(tenant_id)s' --adminurl 'https://{{ controller_hostname }}:8776/v1/%(tenant_id)s' --internalurl 'https://{{ controller_hostname }}:8776/v1/%(tenant_id)s' ) || true
|
||||
# cinderv2
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'cinderv2' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:8776/v2/%(tenant_id)s' --adminurl 'http://{{ controller_hostname }}:8776/v2/%(tenant_id)s' --internalurl 'http://{{ controller_hostname }}:8776/v2/%(tenant_id)s' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:8776/v2/%(tenant_id)s' --adminurl 'https://{{ controller_hostname }}:8776/v2/%(tenant_id)s' --internalurl 'https://{{ controller_hostname }}:8776/v2/%(tenant_id)s' ) || true
|
||||
# glance
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'glance' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:9292' --adminurl 'http://{{ controller_hostname }}:9292' --internalurl 'http://{{ controller_hostname }}:9292' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:9292' --adminurl 'https://{{ controller_hostname }}:9292' --internalurl 'https://{{ controller_hostname }}:9292' ) || true
|
||||
# keystone --- !!!!! we need to use ADMIN_TOKEN here
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'keystone' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
|
@ -293,7 +253,7 @@
|
|||
register: ENDPOINT_ID
|
||||
- ini_file: dest=/etc/keystone/keystone.conf section=ssl option=certfile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
- ini_file: dest=/etc/keystone/keystone.conf section=ssl option=keyfile value=/etc/pki/tls/private/fed-cloud09-keystone.key
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone --os-token '{{ADMIN_TOKEN}}' --os-endpoint 'http://{{ controller_hostname }}:35357/v2.0' endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:5000/v2.0' --adminurl 'https://{{ controller_hostname }}:35357/v2.0' --internalurl 'https://{{ controller_hostname }}:5000/v2.0' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone --os-token '{{ADMIN_TOKEN}}' --os-endpoint 'https://{{ controller_hostname }}:35357/v2.0' endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:5000/v2.0' --adminurl 'https://{{ controller_hostname }}:35357/v2.0' --internalurl 'https://{{ controller_hostname }}:5000/v2.0' ) || true
|
||||
- ini_file: dest=/etc/keystone/keystone.conf section=ssl option=enable value=True
|
||||
- service: name=openstack-keystone state=restarted
|
||||
- lineinfile: dest=/root/keystonerc_admin regexp="^export OS_AUTH_URL" line="export OS_AUTH_URL=https://{{ controller_hostname }}:5000/v2.0/"
|
||||
|
@ -303,37 +263,40 @@
|
|||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:9696/' --adminurl 'http://{{ controller_hostname }}:9696/' --internalurl 'http://{{ controller_hostname }}:9696/' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:9696/' --adminurl 'https://{{ controller_hostname }}:9696/' --internalurl 'https://{{ controller_hostname }}:9696/' ) || true
|
||||
# nova
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'nova ' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:8774/v2/%(tenant_id)s' --adminurl 'http://{{ controller_hostname }}:8774/v2/%(tenant_id)s' --internalurl 'http://{{ controller_hostname }}:8774/v2/%(tenant_id)s' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:8774/v2/%(tenant_id)s' --adminurl 'https://{{ controller_hostname }}:8774/v2/%(tenant_id)s' --internalurl 'https://{{ controller_hostname }}:8774/v2/%(tenant_id)s' ) || true
|
||||
# nova_ec2
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'nova_ec2' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:8773/services/Cloud' --adminurl 'http://{{ controller_hostname }}:8773/services/Admin' --internalurl 'http://{{ controller_hostname }}:8773/services/Cloud' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:8773/services/Cloud' --adminurl 'https://{{ controller_hostname }}:8773/services/Admin' --internalurl 'https://{{ controller_hostname }}:8773/services/Cloud' ) || true
|
||||
# novav3
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'novav3' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:8774/v3' --adminurl 'http://{{ controller_hostname }}:8774/v3' --internalurl 'http://{{ controller_hostname }}:8774/v3' ) || true
|
||||
# swift - it actually only listen on public port!
|
||||
#- shell: source /root/keystonerc_admin && keystone service-list | grep 'swift ' | awk '{print $2}'
|
||||
# register: SERVICE_ID
|
||||
#- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
# register: ENDPOINT_ID
|
||||
#- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_private_ip }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{controller_hostname}}:8080/v1/AUTH_%(tenant_id)s' --adminurl 'http://{{controller_private_ip}}:8080' --internalurl 'http://{{controller_private_ip}}:8080/v1/AUTH_%(tenant_id)s' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:8774/v3' --adminurl 'https://{{ controller_hostname }}:8774/v3' --internalurl 'https://{{ controller_hostname }}:8774/v3' ) || true
|
||||
# swift
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'swift ' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{controller_hostname}}:8080/v1/AUTH_%(tenant_id)s' --adminurl 'https://{{controller_private_ip}}:8080' --internalurl 'https://{{controller_private_ip}}:8080/v1/AUTH_%(tenant_id)s' ) || true
|
||||
# swift_s3
|
||||
- shell: source /root/keystonerc_admin && keystone service-list | grep 'swift_s3' | awk '{print $2}'
|
||||
register: SERVICE_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}'
|
||||
register: ENDPOINT_ID
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'http://{{ controller_hostname }}:8080' --adminurl 'http://{{ controller_hostname }}:8080' --internalurl 'http://{{ controller_hostname }}:8080' ) || true
|
||||
- shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{ controller_hostname }}:8080' --adminurl 'https://{{ controller_hostname }}:8080' --internalurl 'https://{{ controller_hostname }}:8080' ) || true
|
||||
|
||||
# Setup sysconfig file for novncproxy
|
||||
- copy: src={{ files }}/fedora-cloud/openstack-nova-novncproxy dest=/etc/sysconfig/openstack-nova-novncproxy mode=644 owner=root group=root
|
||||
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=novncproxy_base_url value=https://{{ controller_hostname }}:6080/vnc_auto.html
|
||||
|
||||
|
@ -344,11 +307,18 @@
|
|||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_auth_url value=https://{{ controller_hostname }}:35357/v2.0
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_url value=https://{{ controller_hostname }}:9696
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=osapi_compute_listen_port value=6774
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=ec2_listen_port value=6773
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_api_servers value=https://{{ controller_hostname }}:9292
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=novncproxy_host value={{ controller_hostname }}
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=ssl_only value=False
|
||||
|
||||
|
||||
- ini_file: dest=/etc/glance/glance-api.conf section=keystone_authtoken option=auth_uri value=https://{{ controller_hostname }}:5000
|
||||
- ini_file: dest=/etc/glance/glance-api.conf section=keystone_authtoken option=auth_protocol value=https
|
||||
- ini_file: dest=/etc/glance/glance-api.conf section=keystone_authtoken option=auth_host value={{ controller_hostname }}
|
||||
- ini_file: dest=/etc/glance/glance-api.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
- ini_file: dest=/etc/glance/glance-api.conf section=DEFAULT option=bind_port value=7292
|
||||
|
||||
- ini_file: dest=/etc/glance/glance-registry.conf section=keystone_authtoken option=auth_uri value=https://{{ controller_hostname }}:5000
|
||||
- ini_file: dest=/etc/glance/glance-registry.conf section=keystone_authtoken option=auth_host value={{ controller_hostname }}
|
||||
|
@ -363,23 +333,27 @@
|
|||
- ini_file: dest=/etc/cinder/cinder.conf section=keystone_authtoken option=auth_protocol value=https
|
||||
- ini_file: dest=/etc/cinder/cinder.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
- ini_file: dest=/etc/cinder/cinder.conf section=DEFAULT option=backup_swift_url value=https://{{ controller_hostname }}:8080/v1/AUTH_
|
||||
- ini_file: dest=/etc/cinder/cinder.conf section=DEFAULT option=osapi_volume_listen_port value=6776
|
||||
- ini_file: dest=/etc/cinder/api-paste.conf section="filter:authtoken" option=auth_uri value=https://{{ controller_hostname }}:5000
|
||||
- ini_file: dest=/etc/cinder/api-paste.conf section="filter:authtoken" option=auth_host value={{ controller_hostname }}
|
||||
- ini_file: dest=/etc/cinder/api-paste.conf section="filter:authtoken" option=auth_protocol value=https
|
||||
- ini_file: dest=/etc/cinder/api-paste.conf section="filter:authtoken" option=service_protocol value=https
|
||||
- ini_file: dest=/etc/cinder/api-paste.conf section="filter:authtoken" option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
|
||||
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_uri value=https://{{ controller_hostname }}:5000
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_protocol value=https
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_host value={{ controller_hostname }}
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=nova_url value=https://{{ controller_hostname }}:8774/v2
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=nova_admin_auth_url value=https://{{ controller_hostname }}:35357/v2.0
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=use_ssl value=True
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=use_ssl value=False
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=ssl_cert_file value=/etc/pki/tls/certs/fed-cloud09-neutron.pem
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=ssl_key_file value=/etc/pki/tls/private/fed-cloud09-neutron.key
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=ssl_ca_file value=/etc/pki/tls/certs/fed-cloud09-neutron.pem
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=bind_port value=8696
|
||||
|
||||
- ini_file: dest=/etc/neutron/api-paste.conf section="filter:authtoken" option=auth_uri value=https://{{ controller_hostname }}:5000
|
||||
- ini_file: dest=/etc/neutron/api-paste.conf section="filter:authtoken" option=auth_host value={{ controller_hostname }}
|
||||
- ini_file: dest=/etc/neutron/api-paste.conf section="filter:authtoken" option=auth_protocol value=https
|
||||
- ini_file: dest=/etc/neutron/api-paste.conf section="filter:authtoken" option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
|
||||
|
@ -390,16 +364,26 @@
|
|||
- ini_file: dest=/etc/swift/proxy-server.conf section="filter:authtoken" option=auth_protocol value=https
|
||||
- ini_file: dest=/etc/swift/proxy-server.conf section="filter:authtoken" option=auth_host value={{ controller_hostname }}
|
||||
- ini_file: dest=/etc/swift/proxy-server.conf section="filter:authtoken" option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
- ini_file: dest=/etc/swift/proxy-server.conf section=DEFAULT option=bind_port value=7080
|
||||
- ini_file: dest=/etc/swift/proxy-server.conf section=DEFAULT option=bind_ip value=127.0.0.1
|
||||
|
||||
- ini_file: dest=/etc/ceilometer/ceilometer.conf section=keystone_authtoken option=auth_uri value=https://{{ controller_hostname }}:5000
|
||||
- ini_file: dest=/etc/ceilometer/ceilometer.conf section=keystone_authtoken option=auth_protocol value=https
|
||||
- ini_file: dest=/etc/ceilometer/ceilometer.conf section=keystone_authtoken option=auth_host value={{ controller_hostname }}
|
||||
- ini_file: dest=/etc/ceilometer/ceilometer.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
- ini_file: dest=/etc/ceilometer/ceilometer.conf section=service_credentials option=os_auth_url value=https://{{ controller_hostname }}:35357/v2.0
|
||||
- ini_file: dest=/etc/ceilometer/ceilometer.conf section=api value=6777
|
||||
|
||||
# enable stunell to neutron
|
||||
- shell: cat /etc/pki/tls/certs/fed-cloud09-keystone.pem /etc/pki/tls/private/fed-cloud09.key > /etc/haproxy/fed-cloud09.combined
|
||||
- file: path=/etc/haproxy/fed-cloud09.combined owner=haproxy mode=644
|
||||
- copy: src={{ files }}/fedora-cloud/haproxy.cfg dest=/etc/haproxy/haproxy.cfg mode=644 owner=root group=root
|
||||
- service: name=haproxy state=started enabled=yes
|
||||
|
||||
- shell: openstack-service restart
|
||||
|
||||
- lineinfile: dest=/etc/openstack-dashboard/local_settings regexp="^OPENSTACK_KEYSTONE_URL " line="OPENSTACK_KEYSTONE_URL = 'https://{{controller_hostname}}:5000/v2.0'"
|
||||
- lineinfile: dest=/etc/openstack-dashboard/local_settings regexp="OPENSTACK_SSL_CACERT " line="OPENSTACK_SSL_CACERT = '/etc/pki/tls/certs/fed-cloud09-keystone.pem'"
|
||||
- service: name=httpd state=restarted
|
||||
|
||||
|
||||
|
@ -515,12 +499,46 @@
|
|||
- { name: codeblock, email: 'codeblock@elrod.me', tenant: infrastructure, password: "{{codeblock_password}}" }
|
||||
- { name: msuchy, email: 'msuchy@redhat.com', tenant: copr, password: "{{msuchy_password}}" }
|
||||
- { name: red, email: 'red@fedoraproject.org', tenant: infrastructure, password: "{{red_password}}" }
|
||||
#- template: src={{ files }}/fedora-cloud/keystonerc_msuchy dest=/root/ owner=root mode=0600
|
||||
#- shell: source /root/keystonerc_admin && keystone user-password-update --pass 'XXXX' msuchy
|
||||
- name: upload SSH keys for users
|
||||
nova_keypair:
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
login_username="{{ item.name }}"
|
||||
login_password="{{ item.password }}" login_tenant_name="{{item.tenant}}" name="{{ item.name }}"
|
||||
public_key="{{ item.public_key }}"
|
||||
ignore_errors: yes
|
||||
no_log: True
|
||||
with_items:
|
||||
- { name: kevin, email: 'kevin@fedoraproject.org', tenant: infrastructure, password: "{{kevin_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas kevin') }}" }
|
||||
- { name: laxathom, email: 'laxathom@fedoraproject.org', tenant: infrastructure, password: "{{laxathom_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas laxathom') }}" }
|
||||
- { name: samkottler, email: 'samkottler@fedoraproject.org', tenant: infrastructure, password: "{{samkottler_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas skottler') }}" }
|
||||
- { name: puiterwijk, email: 'puiterwijk@fedoraproject.org', tenant: infrastructure, password: "{{puiterwijk_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas puiterwijk') }}" }
|
||||
- { name: mattdm, email: 'mattdm@fedoraproject.org', tenant: infrastructure, password: "{{mattdm_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas mattdm') }}" }
|
||||
- { name: tflink, email: 'tflink@fedoraproject.org', tenant: qa, password: "{{tflink_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas tflink') }}" }
|
||||
- { name: copr, email: 'admin@fedoraproject.org', tenant: copr, password: "{{copr_password}}", public_key: "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCeTO0ddXuhDZYM9HyM0a47aeV2yIVWhTpddrQ7/RAIs99XyrsicQLABzmdMBfiZnP0FnHBF/e+2xEkT8hHJpX6bX81jjvs2bb8KP18Nh8vaXI3QospWrRygpu1tjzqZT0Llh4ZVFscum8TrMw4VWXclzdDw6x7csCBjSttqq8F3iTJtQ9XM9/5tCAAOzGBKJrsGKV1CNIrfUo5CSzY+IUVIr8XJ93IB2ZQVASK34T/49egmrWlNB32fqAbDMC+XNmobgn6gO33Yq5Ly7Dk4kqTUx2TEaqDkZfhsVu0YcwV81bmqsltRvpj6bIXrEoMeav7nbuqKcPLTxWEY/2icePF" }
|
||||
# - { name: twisted, email: 'buildbot@twistedmatrix.com', tenant: pythonbots, password: "{{twisted_password}}", public_key: "" }
|
||||
- { name: ausil, email: 'dennis@ausil.us', tenant: infrastructure, password: "{{ausil_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas ausil') }}" }
|
||||
- { name: anthomas, email: 'anthomas@redhat.com', tenant: cloudintern, password: "{{anthomas_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas anthomas') }}" }
|
||||
- { name: jskladan, email: 'jskladan@redhat.com', tenant: qa, password: "{{jskladan_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas jskladan') }}" }
|
||||
- { name: gholms, email: 'gholms@fedoraproject.org', tenant: cloudintern, password: "{{gholms_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas gholms') }}" }
|
||||
# - { name: cockpit, email: 'walters@redhat.com', tenant: scratch, password: "{{cockpit_password}}", public_key: "" }
|
||||
- { name: nb, email: 'nb@fedoraproject.org', tenant: infrastructure, password: "{{nb_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas nb') }}" }
|
||||
- { name: pingou, email: 'pingou@pingoured.fr', tenant: infrastructure, password: "{{pingou_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas pingou') }}" }
|
||||
- { name: codeblock, email: 'codeblock@elrod.me', tenant: infrastructure, password: "{{codeblock_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas codeblock') }}" }
|
||||
- { name: msuchy, email: 'msuchy@redhat.com', tenant: copr, password: "{{msuchy_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas msuchy') }}" }
|
||||
- { name: red, email: 'red@fedoraproject.org', tenant: infrastructure, password: "{{red_password}}", public_key: "{{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas red') }}" }
|
||||
|
||||
#- shell: source /root/keystonerc_admin && F=$(mktemp) && {{ lookup('pipe', '/srv/web/infra/ansible/scripts/auth-keys-from-fas msuchy') }}> "$F" && nova --os-username msuchy --os-password {{msuchy_password}} --os-tenant-name copr keypair-list | ( grep msuchy || nova --os-username msuchy --os-password {{msuchy_password}} --os-tenant-name copr keypair-add --pub_key "$F" msuchy ); rm -f "$F"
|
||||
|
||||
##### NETWORK ####
|
||||
# http://docs.openstack.org/havana/install-guide/install/apt/content/install-neutron.configure-networks.html
|
||||
#
|
||||
# external network is a class C: 209.132.184.0/24
|
||||
# 209.132.184.1 to .25 - reserved for hardware.
|
||||
# 209.132.184.26 to .30 - reserver for test cloud external ips
|
||||
# 209.132.184.31 to .69 - icehouse cloud
|
||||
# 209.132.184.70 to .89 - reserved for arm03 SOCs
|
||||
# 209.132.184.90 to .251 - folsom cloud
|
||||
#
|
||||
- name: Create en external network
|
||||
neutron_network:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
|
@ -545,257 +563,113 @@
|
|||
- shell: source /root/keystonerc_admin && nova floating-ip-create external
|
||||
when: packstack_sucessfully_finished.stat.exists == False
|
||||
|
||||
# 172.16.0.1/12 -- 172.21.0.1/12 - Free to take
|
||||
# 172.23.0.1/12 - free (but used by old cloud)
|
||||
# 172.24.0.1/12 - RESERVED it is used internally for OS
|
||||
# 172.25.0.1/12 - Cloudintern
|
||||
# 172.26.0.1/12 - infrastructure
|
||||
# 172.27.0.1/12 - persistent
|
||||
# 172.28.0.1/12 - transient
|
||||
# 172.29.0.1/12 - scratch
|
||||
# 172.30.0.1/12 - copr
|
||||
# 172.31.0.1/12 - Free to take
|
||||
# 172.16.0.1/16 -- 172.22.0.1/16 - free (can be split to /20)
|
||||
# 172.23.0.1/16 - free (but used by old cloud)
|
||||
# 172.24.0.1/24 - RESERVED it is used internally for OS
|
||||
# 172.24.1.0/24 -- 172.24.255.0/24 - likely free (?)
|
||||
# 172.25.0.1/20 - Cloudintern (172.25.0.1 - 172.25.15.254)
|
||||
# 172.25.16.1/20 - infrastructure (172.25.16.1 - 172.25.31.254)
|
||||
# 172.25.32.1/20 - persistent (172.25.32.1 - 172.25.47.254)
|
||||
# 172.25.48.1/20 - transient (172.25.48.1 - 172.25.63.254)
|
||||
# 172.25.64.1/20 - scratch (172.25.64.1 - 172.25.79.254)
|
||||
# 172.25.80.1/20 - copr (172.25.80.1 - 172.25.95.254)
|
||||
# 172.25.96.1/20 - cloudsig (172.25.96.1 - 172.25.111.254)
|
||||
# 172.25.112.1/20 - qa (172.25.112.1 - 172.25.127.254)
|
||||
# 172.25.128.1/20 - pythonbots (172.25.128.1 - 172.25.143.254)
|
||||
# 172.25.143.1/20 -- 172.25.240.1/20 - free
|
||||
# 172.26.0.1/16 -- 172.31.0.1/16 - free (can be split to /20)
|
||||
|
||||
# Cloudintern network
|
||||
- name: Create a router for Cloudintern
|
||||
- name: Create a router for all tenants
|
||||
neutron_router:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=cloudintern
|
||||
name=ext-to-cloudintern
|
||||
register: ROUTER_ID
|
||||
- name: Connect router's gateway to the external network
|
||||
tenant_name="{{ item }}"
|
||||
name="ext-to-{{ item }}"
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
- name: "Connect router's gateway to the external network"
|
||||
neutron_router_gateway:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
router_name="ext-to-cloudintern"
|
||||
router_name="ext-to-{{ item }}"
|
||||
network_name="external"
|
||||
- name: Create a private network for cloudintern
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
- name: Create a private network for all tenants
|
||||
neutron_network:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=cloudintern
|
||||
name=cloudintern-net
|
||||
- name: Create a subnet in the cloudintern-net
|
||||
tenant_name="{{ item }}"
|
||||
name="{{ item }}-net"
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
- name: Create a subnet for all tenants
|
||||
neutron_subnet:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=cloudintern
|
||||
network_name=cloudintern-net
|
||||
name=cloudintern-subnet
|
||||
cidr=172.25.0.1/12
|
||||
gateway_ip=172.25.0.1
|
||||
dns_nameservers=66.35.62.163,140.211.169.201
|
||||
register: CLOUDINTERN_SUBNET_ID
|
||||
- name: Connect router's interface to the cloudintern-subnet
|
||||
tenant_name="{{ item.name }}"
|
||||
network_name="{{ item.name }}-net"
|
||||
name="{{ item.name }}-subnet"
|
||||
cidr="{{ item.cidr }}"
|
||||
gateway_ip="{{ item.gateway }}"
|
||||
dns_nameservers="66.35.62.163,140.211.169.201"
|
||||
with_items:
|
||||
- { name: cloudintern, cidr: '172.25.0.1/20', gateway: '172.25.0.1' }
|
||||
- { name: cloudsig, cidr: '172.25.96.1/20', gateway: '172.25.96.1' }
|
||||
- { name: copr, cidr: '172.25.80.1/20', gateway: '172.25.80.1' }
|
||||
- { name: infrastructure, cidr: '172.25.16.1/20', gateway: '172.25.16.1' }
|
||||
- { name: persistent, cidr: '172.25.32.1/20', gateway: '172.25.32.1' }
|
||||
- { name: pythonbots, cidr: '172.25.128.1/20', gateway: '172.25.128.1' }
|
||||
- { name: qa, cidr: '172.25.112.1/20', gateway: '172.25.112.1' }
|
||||
- { name: scratch, cidr: '172.25.64.1/20', gateway: '172.25.64.1' }
|
||||
- { name: transient, cidr: '172.25.48.1/20', gateway: '172.25.48.1' }
|
||||
- name: "Connect router's interface to the TENANT-subnet"
|
||||
neutron_router_interface:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=cloudintern
|
||||
router_name="ext-to-cloudintern"
|
||||
subnet_name="cloudintern-subnet"
|
||||
tenant_name="{{ item }}"
|
||||
router_name="ext-to-{{ item }}"
|
||||
subnet_name="{{ item }}-subnet"
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
|
||||
# Copr network
|
||||
- name: Create a router for copr
|
||||
neutron_router:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=copr
|
||||
name=ext-to-copr
|
||||
register: ROUTER_ID
|
||||
- name: Connect router's gateway to the external network
|
||||
neutron_router_gateway:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
router_name="ext-to-copr"
|
||||
network_name="external"
|
||||
- name: Create a private network for copr
|
||||
neutron_network:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=copr
|
||||
name=copr-net
|
||||
- name: Create a subnet in the copr-net
|
||||
neutron_subnet:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=copr
|
||||
network_name=copr-net
|
||||
name=copr-subnet
|
||||
cidr=172.30.0.1/12
|
||||
gateway_ip=172.30.0.1
|
||||
dns_nameservers=66.35.62.163,140.211.169.201
|
||||
register: COPR_SUBNET_ID
|
||||
- name: Connect router's interface to the copr-subnet
|
||||
neutron_router_interface:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=copr
|
||||
router_name="ext-to-copr"
|
||||
subnet_name="copr-subnet"
|
||||
|
||||
# infrastructure network
|
||||
- name: Create a router for infrastructure
|
||||
neutron_router:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=infrastructure
|
||||
name=ext-to-infrastructure
|
||||
register: ROUTER_ID
|
||||
- name: Connect router's gateway to the external network
|
||||
neutron_router_gateway:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
router_name="ext-to-infrastructure"
|
||||
network_name="external"
|
||||
- name: Create a private network for infrastructure
|
||||
neutron_network:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=infrastructure
|
||||
name=infrastructure-net
|
||||
- name: Create a subnet in the infrastructure-net
|
||||
neutron_subnet:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=infrastructure
|
||||
network_name=infrastructure-net
|
||||
name=infrastructure-subnet
|
||||
cidr=172.26.0.1/12
|
||||
gateway_ip=172.26.0.1
|
||||
dns_nameservers=66.35.62.163,140.211.169.201
|
||||
register: INFRASTRUCTURE_SUBNET_ID
|
||||
- name: Connect router's interface to the infrastructure-subnet
|
||||
neutron_router_interface:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=infrastructure
|
||||
router_name="ext-to-infrastructure"
|
||||
subnet_name="infrastructure-subnet"
|
||||
|
||||
# persistent network
|
||||
- name: Create a router for persistent
|
||||
neutron_router:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=persistent
|
||||
name=ext-to-persistent
|
||||
register: ROUTER_ID
|
||||
- name: Connect router's gateway to the external network
|
||||
neutron_router_gateway:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
router_name="ext-to-persistent"
|
||||
network_name="external"
|
||||
- name: Create a private network for persistent
|
||||
neutron_network:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=persistent
|
||||
name=persistent-net
|
||||
- name: Create a subnet in the persistent-net
|
||||
neutron_subnet:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=persistent
|
||||
network_name=persistent-net
|
||||
name=persistent-subnet
|
||||
cidr=172.27.0.1/12
|
||||
gateway_ip=172.27.0.1
|
||||
dns_nameservers=66.35.62.163,140.211.169.201
|
||||
register: PERSISTENT_SUBNET_ID
|
||||
- name: Connect router's interface to the persistent-subnet
|
||||
neutron_router_interface:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=persistent
|
||||
router_name="ext-to-persistent"
|
||||
subnet_name="persistent-subnet"
|
||||
|
||||
# transient network
|
||||
- name: Create a router for transient
|
||||
neutron_router:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=transient
|
||||
name=ext-to-transient
|
||||
register: ROUTER_ID
|
||||
- name: Connect router's gateway to the external network
|
||||
neutron_router_gateway:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
router_name="ext-to-transient"
|
||||
network_name="external"
|
||||
- name: Create a private network for transient
|
||||
neutron_network:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=transient
|
||||
name=transient-net
|
||||
- name: Create a subnet in the transient-net
|
||||
neutron_subnet:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=transient
|
||||
network_name=transient-net
|
||||
name=transient-subnet
|
||||
cidr=172.28.0.1/12
|
||||
gateway_ip=172.28.0.1
|
||||
dns_nameservers=66.35.62.163,140.211.169.201
|
||||
register: TRANSIENT_SUBNET_ID
|
||||
- name: Connect router's interface to the transient-subnet
|
||||
neutron_router_interface:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=transient
|
||||
router_name="ext-to-transient"
|
||||
subnet_name="transient-subnet"
|
||||
|
||||
# scratch network
|
||||
- name: Create a router for scratch
|
||||
neutron_router:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=scratch
|
||||
name=ext-to-scratch
|
||||
register: ROUTER_ID
|
||||
- name: Connect router's gateway to the external network
|
||||
neutron_router_gateway:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
router_name="ext-to-scratch"
|
||||
network_name="external"
|
||||
- name: Create a private network for scratch
|
||||
neutron_network:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=scratch
|
||||
name=scratch-net
|
||||
- name: Create a subnet in the scratch-net
|
||||
neutron_subnet:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=scratch
|
||||
network_name=scratch-net
|
||||
name=scratch-subnet
|
||||
cidr=172.29.0.1/12
|
||||
gateway_ip=172.29.0.1
|
||||
dns_nameservers=66.35.62.163,140.211.169.201
|
||||
register: SCRATCH_SUBNET_ID
|
||||
- name: Connect router's interface to the scratch-subnet
|
||||
neutron_router_interface:
|
||||
login_username="admin" login_password="{{ ADMIN_PASS }}" login_tenant_name="admin"
|
||||
auth_url="https://{{controller_hostname}}:35357/v2.0"
|
||||
tenant_name=scratch
|
||||
router_name="ext-to-scratch"
|
||||
subnet_name="scratch-subnet"
|
||||
|
||||
|
||||
|
||||
################
|
||||
# Copr
|
||||
# ##############
|
||||
- name: Copr - Create 'ssh-anywhere' security group
|
||||
#################
|
||||
# Security Groups
|
||||
################
|
||||
- name: "Create 'ssh-anywhere' security group"
|
||||
neutron_sec_group:
|
||||
login_username: "admin"
|
||||
login_password: "{{ ADMIN_PASS }}"
|
||||
|
@ -804,7 +678,7 @@
|
|||
state: "present"
|
||||
name: 'ssh-anywhere'
|
||||
description: "allow ssh from anywhere"
|
||||
tenant_name: "copr"
|
||||
tenant_name: "{{item}}"
|
||||
rules:
|
||||
- direction: "ingress"
|
||||
port_range_min: "22"
|
||||
|
@ -812,8 +686,18 @@
|
|||
ethertype: "IPv4"
|
||||
protocol: "tcp"
|
||||
remote_ip_prefix: "0.0.0.0/0"
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
|
||||
- name: Copr - Create 'ssh-internal' security group
|
||||
- name: "Create 'ssh-internal' security group"
|
||||
neutron_sec_group:
|
||||
login_username: "admin"
|
||||
login_password: "{{ ADMIN_PASS }}"
|
||||
|
@ -821,12 +705,117 @@
|
|||
auth_url: "https://{{controller_hostname}}:35357/v2.0"
|
||||
state: "present"
|
||||
name: 'ssh-internal'
|
||||
description: "allow ssh from copr-network"
|
||||
tenant_name: "copr"
|
||||
description: "allow ssh from {{item.name}}-network"
|
||||
tenant_name: "{{ item.name }}"
|
||||
rules:
|
||||
- direction: "ingress"
|
||||
port_range_min: "22"
|
||||
port_range_max: "22"
|
||||
ethertype: "IPv4"
|
||||
protocol: "tcp"
|
||||
remote_ip_prefix: "172.30.0.1/12"
|
||||
remote_ip_prefix: "{{ item.prefix }}"
|
||||
with_items:
|
||||
- { name: cloudintern, prefix: '172.25.0.1/20' }
|
||||
- { name: cloudsig, prefix: '172.25.96.1/20' }
|
||||
- { name: copr, prefix: '172.25.80.1/20' }
|
||||
- { name: infrastructure, prefix: "172.25.16.1/20" }
|
||||
- { name: persistent, prefix: "172.25.32.1/20" }
|
||||
- { name: pythonbots, prefix: '172.25.128.1/20' }
|
||||
- { name: qa, prefix: "172.25.112.1/20" }
|
||||
- { name: scratch, prefix: '172.25.64.1/20' }
|
||||
- { name: transient, prefix: '172.25.48.1/20' }
|
||||
|
||||
- name: "Create 'web-80-anywhere' security group"
|
||||
neutron_sec_group:
|
||||
login_username: "admin"
|
||||
login_password: "{{ ADMIN_PASS }}"
|
||||
login_tenant_name: "admin"
|
||||
auth_url: "https://{{controller_hostname}}:35357/v2.0"
|
||||
state: "present"
|
||||
name: 'web-80-anywhere'
|
||||
description: "allow web-80 from anywhere"
|
||||
tenant_name: "{{item}}"
|
||||
rules:
|
||||
- direction: "ingress"
|
||||
port_range_min: "80"
|
||||
port_range_max: "80"
|
||||
ethertype: "IPv4"
|
||||
protocol: "tcp"
|
||||
remote_ip_prefix: "0.0.0.0/0"
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
|
||||
- name: "Create 'web-443-anywhere' security group"
|
||||
neutron_sec_group:
|
||||
login_username: "admin"
|
||||
login_password: "{{ ADMIN_PASS }}"
|
||||
login_tenant_name: "admin"
|
||||
auth_url: "https://{{controller_hostname}}:35357/v2.0"
|
||||
state: "present"
|
||||
name: 'web-443-anywhere'
|
||||
description: "allow web-443 from anywhere"
|
||||
tenant_name: "{{item}}"
|
||||
rules:
|
||||
- direction: "ingress"
|
||||
port_range_min: "443"
|
||||
port_range_max: "443"
|
||||
ethertype: "IPv4"
|
||||
protocol: "tcp"
|
||||
remote_ip_prefix: "0.0.0.0/0"
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
|
||||
- name: "Create 'wide-open' security group"
|
||||
neutron_sec_group:
|
||||
login_username: "admin"
|
||||
login_password: "{{ ADMIN_PASS }}"
|
||||
login_tenant_name: "admin"
|
||||
auth_url: "https://{{controller_hostname}}:35357/v2.0"
|
||||
state: "present"
|
||||
name: 'wide-open'
|
||||
description: "allow anything from anywhere"
|
||||
tenant_name: "{{item}}"
|
||||
rules:
|
||||
- direction: "ingress"
|
||||
port_range_min: "0"
|
||||
port_range_max: "65535"
|
||||
ethertype: "IPv4"
|
||||
protocol: "tcp"
|
||||
remote_ip_prefix: "0.0.0.0/0"
|
||||
with_items:
|
||||
- cloudintern
|
||||
- cloudsig
|
||||
- copr
|
||||
- infrastructure
|
||||
- persistent
|
||||
- pythonbots
|
||||
- qa
|
||||
- scratch
|
||||
- transient
|
||||
|
||||
# Update quota for Copr
|
||||
# SEE:
|
||||
# nova quota-defaults
|
||||
# nova quota-show --tenant $TENANT_ID
|
||||
# default is 10 instances, 20 cores, 51200 RAM, 10 floating IPs
|
||||
- shell: source /root/keystonerc_admin && keystone tenant-list | grep 'copr' | awk '{print $2}'
|
||||
register: TENANT_ID
|
||||
- shell: source /root/keystonerc_admin && nova quota-update --instances 40 --cores 80 --ram 512000 --floating-ips 40 {{ TENANT_ID.stdout }}
|
||||
|
||||
|
||||
|
|
|
@ -1,59 +0,0 @@
|
|||
- name: check/create instance
|
||||
hosts: hrf.cloud.fedoraproject.org
|
||||
user: root
|
||||
gather_facts: False
|
||||
|
||||
vars_files:
|
||||
- /srv/web/infra/ansible/vars/global.yml
|
||||
- "/srv/private/ansible/vars.yml"
|
||||
|
||||
tasks:
|
||||
- include: "{{ tasks }}/persistent_cloud.yml"
|
||||
|
||||
- name: provision instance
|
||||
hosts: hrf.cloud.fedoraproject.org
|
||||
user: root
|
||||
gather_facts: True
|
||||
|
||||
vars_files:
|
||||
- /srv/web/infra/ansible/vars/global.yml
|
||||
- "/srv/private/ansible/vars.yml"
|
||||
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
|
||||
|
||||
tasks:
|
||||
- include: "{{ tasks }}/cloud_setup_basic.yml"
|
||||
|
||||
handlers:
|
||||
- include: "{{ handlers }}/restart_services.yml"
|
||||
|
||||
- name: deploy hrf
|
||||
hosts: hrf.cloud.fedoraproject.org
|
||||
user: root
|
||||
gather_facts: True
|
||||
|
||||
vars_files:
|
||||
- /srv/web/infra/ansible/vars/global.yml
|
||||
- "/srv/private/ansible/vars.yml"
|
||||
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
|
||||
|
||||
tasks:
|
||||
- name: install deps
|
||||
yum: state=present name={{ item }}
|
||||
with_items:
|
||||
- httpd
|
||||
- python-flask
|
||||
- python-fedmsg-meta-fedora-infrastructure
|
||||
- fedmsg
|
||||
- mod_wsgi
|
||||
- htop # not a dep, but handy
|
||||
- git
|
||||
- fail2ban
|
||||
|
||||
- name: enable fail2ban and start it
|
||||
shell: chkconfig fail2ban on && service fail2ban start
|
||||
|
||||
- name: clone the flask repo
|
||||
git: repo=git://github.com/fedora-infra/hrf.git dest=/srv/www/hrf accept_hostkey=true
|
||||
|
||||
- name: enable port 80
|
||||
command: lokkit -p '80:tcp'
|
BIN
roles/apps-fp-o/files/img/icons/fedimg.png
Normal file
BIN
roles/apps-fp-o/files/img/icons/fedimg.png
Normal file
Binary file not shown.
After Width: | Height: | Size: 1.5 KiB |
|
@ -28,3 +28,8 @@
|
|||
- restart httpd
|
||||
tags:
|
||||
- apps-fp-o
|
||||
|
||||
- name: Copy over any extra icons we carry
|
||||
synchronize: src=img/icons/ dest=/srv/web/apps-fp-o/img/icons/
|
||||
tags:
|
||||
- apps-fp-o
|
||||
|
|
|
@ -21,6 +21,9 @@
|
|||
# openstack needs this to handle external ips right
|
||||
-A INPUT -p gre -m comment --comment "001 neutron tunnel port incoming neutron_tunnel" -j ACCEPT
|
||||
|
||||
# compute nodes need to allow vnc ports from the controller
|
||||
-A INPUT -s 172.24.0.9 -p tcp -m tcp --dport 5900:6900 -j ACCEPT
|
||||
|
||||
# for nrpe - allow it from nocs
|
||||
-A INPUT -p tcp -m tcp --dport 5666 -s 192.168.1.10 -j ACCEPT
|
||||
# FIXME - this is the global nat-ip and we need the noc01-specific ip
|
||||
|
@ -28,22 +31,6 @@
|
|||
-A INPUT -p tcp -m tcp --dport 5666 -s 209.132.181.35 -j ACCEPT
|
||||
-A INPUT -p tcp -m tcp --dport 5666 -s 10.5.126.41 -j ACCEPT
|
||||
|
||||
{% if env != 'staging' and datacenter == 'phx2' and inventory_hostname not in groups['staging-friendly'] %}
|
||||
#
|
||||
# In the phx2 datacenter, both production and staging hosts are in the same
|
||||
# subnet/vlan. We want production hosts to reject connectons from staging group hosts
|
||||
# to prevent them from interfering with production. There are however a few hosts in
|
||||
# production we have marked 'staging-friendly' that we do allow staging to talk to for
|
||||
# mostly read-only data they need.
|
||||
#
|
||||
{% for host in groups['staging'] %}
|
||||
{% if 'eth0_ip' in hostvars[host] %}# {{ host }}
|
||||
-A INPUT -s {{ hostvars[host]['eth0_ip'] }} -j REJECT --reject-with icmp-host-prohibited
|
||||
{% else %}# {{ host }} has no 'eth0_ip' listed
|
||||
{% endif %}
|
||||
{% endfor %}
|
||||
{% endif %}
|
||||
|
||||
# if the host/group defines incoming tcp_ports - allow them
|
||||
{% if tcp_ports is defined %}
|
||||
{% for port in tcp_ports %}
|
||||
|
|
8
roles/cloud_compute/handlers/main.yml
Normal file
8
roles/cloud_compute/handlers/main.yml
Normal file
|
@ -0,0 +1,8 @@
|
|||
- name: "update ca-trust"
|
||||
command: /usr/bin/update-ca-trust
|
||||
|
||||
- name: "restart neutron-openvswitch-agent"
|
||||
service: name=neutron-openvswitch-agent state=restarted
|
||||
|
||||
- name: "restart openstack-nova-compute"
|
||||
service: name=openstack-nova-compute state=restarted
|
|
@ -17,6 +17,12 @@
|
|||
notify:
|
||||
- restart network
|
||||
|
||||
- name: add cert to ca-bundle.crt so plain curl works
|
||||
copy: src={{ private }}/files/openstack/fed-cloud09.pem dest=/etc/pki/ca-trust/source/anchors/ mode=600 owner=root group=root
|
||||
notify:
|
||||
- update ca-trust
|
||||
- meta: flush_handlers
|
||||
|
||||
- yum: state=present name=https://repos.fedorapeople.org/repos/openstack/openstack-icehouse/rdo-release-icehouse-4.noarch.rpm
|
||||
|
||||
# http://docs.openstack.org/icehouse/install-guide/install/yum/content/nova-compute.html
|
||||
|
@ -31,33 +37,84 @@
|
|||
|
||||
- name: Set up db connection to controller
|
||||
ini_file: dest=/etc/nova/nova.conf section=database option=connection value=mysql://nova:{{NOVA_DBPASS}}@{{controller_private_ip}}/nova
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=auth_strategy value=keystone
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_uri value=https://{{controller_private_ip}}:5000
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_host value={{controller_private_ip}}
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_uri value=https://{{controller_hostname}}:5000
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_host value={{controller_hostname}}
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_protocol value=https
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=auth_port value=35357
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=admin_user value=nova
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=admin_tenant_name value=services
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- name: set admin_password
|
||||
ini_file: dest=/etc/nova/nova.conf section=keystone_authtoken option=admin_password value={{NOVA_PASS}}
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rpc_backend value=nova.openstack.common.rpc.impl_kombu
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_host value={{controller_private_ip}}
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_hosts value={{controller_private_ip}}:5672
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_userid value=amqp_user
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_password value={{ CONFIG_AMQP_AUTH_PASSWORD }}
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_port value=5672
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=rabbit_use_ssl value=False
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=my_ip value={{compute_private_ip}}
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=vnc_enabled value=True
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=vncserver_listen value=0.0.0.0
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=vncserver_proxyclient_address value={{compute_private_ip}}
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=novncproxy_base_url value=http://{{controller_private_ip}}:6080/vnc_auto.html
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=novncproxy_base_url value=https://{{controller_hostname}}:6080/vnc_auto.html
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_host value={{controller_private_ip}}
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_host value={{controller_hostname}}
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_protocol value=https
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=glance_api_servers value=https://{{ controller_hostname }}:9292
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
|
||||
- service: name=libvirtd state=started enabled=yes
|
||||
- service: name=messagebus state=started
|
||||
|
@ -74,59 +131,138 @@
|
|||
- openstack-neutron-openvswitch
|
||||
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=auth_strategy value=keystone
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_uri value=https://{{controller_private_ip}}:5000
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_host value={{controller_private_ip}}
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_uri value=https://{{controller_hostname}}:5000
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_host value={{controller_hostname}}
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_protocol value=https
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=auth_port value=35357
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=cafile value=/etc/pki/tls/certs/fed-cloud09-keystone.pem
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=admin_user value=neutron
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=admin_tenant_name value=services
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- name: set admin_password
|
||||
ini_file: dest=/etc/neutron/neutron.conf section=keystone_authtoken option=admin_password value={{NEUTRON_PASS}}
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rpc_backend value=neutron.openstack.common.rpc.impl_kombu
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_host value={{controller_private_ip}}
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_hosts value={{controller_private_ip}}:5672
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_userid value=amqp_user
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_password value={{ CONFIG_AMQP_AUTH_PASSWORD }}
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=rabbit_port value=5672
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
|
||||
# uncomment if you want to debug compute instance
|
||||
#- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=verbose value=True
|
||||
# notify:
|
||||
# - restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=core_plugin value=neutron.plugins.ml2.plugin.Ml2Plugin
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/neutron.conf section=DEFAULT option=service_plugins value=neutron.services.l3_router.l3_router_plugin.L3RouterPlugin
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2 option=type_drivers value=local,flat,gre
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2 option=tenant_network_types value=gre
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2 option=mechanism_drivers value=openvswitch
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ml2_type_gre option=tunnel_id_ranges value=1:1000
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=local_ip value={{compute_private_ip}}
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=tunnel_type value=gre
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=tunnel_types value=gre
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=agent option=tunnel_types value=gre
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=ovs option=enable_tunneling value=True
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=securitygroup option=firewall_driver value=neutron.agent.linux.iptables_firewall.OVSHybridIptablesFirewallDriver
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
- ini_file: dest=/etc/neutron/plugins/ml2/ml2_conf.ini section=securitygroup option=enable_security_group value=True
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
|
||||
# WORKAROUND https://ask.openstack.org/en/question/28734/instance-failed-to-spawn-you-must-call-aug-init-first-to-initialize-augeas/
|
||||
- ini_file: dest=/usr/lib/systemd/system/neutron-openvswitch-agent.service section=Service option=ExecStart value="/usr/bin/neutron-openvswitch-agent --config-file /usr/share/neutron/neutron-dist.conf --config-file /etc/neutron/neutron.conf --config-file /etc/neutron/plugin.ini --log-file /var/log/neutron/openvswitch-agent.log"
|
||||
notify:
|
||||
- restart neutron-openvswitch-agent
|
||||
|
||||
- service: name=openvswitch state=started enabled=yes
|
||||
- command: ovs-vsctl --may-exist add-br br-int
|
||||
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=network_api_class value=nova.network.neutronv2.api.API
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_url value=http://{{controller_private_ip}}:9696
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_url value=https://{{controller_hostname}}:9696
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_auth_strategy value=keystone
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_tenant_name value=services
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_username value=neutron
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- name: set neutron_admin_password
|
||||
ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_password value={{NEUTRON_PASS}}
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_auth_url value=https://{{controller_private_ip}}:35357/v2.0
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=neutron_admin_auth_url value=https://{{controller_hostname}}:35357/v2.0
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=linuxnet_interface_driver value=nova.network.linux_net.LinuxOVSInterfaceDriver
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=firewall_driver value=nova.virt.firewall.NoopFirewallDriver
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
- ini_file: dest=/etc/nova/nova.conf section=DEFAULT option=security_group_api value=neutron
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
|
||||
- file: src=/etc/neutron/plugins/ml2/ml2_conf.ini dest=/etc/neutron/plugin.ini state=link
|
||||
|
||||
- service: name=neutron-openvswitch-agent state=restarted enabled=yes
|
||||
- service: name=openstack-nova-compute state=restarted enabled=yes
|
||||
notify:
|
||||
- restart openstack-nova-compute
|
||||
|
|
|
@ -26,8 +26,10 @@ the missing branches (or even the missing repo)
|
|||
|
||||
"""
|
||||
|
||||
import multiprocessing.pool
|
||||
import os
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
import requests
|
||||
|
||||
|
@ -56,6 +58,7 @@ GIT_FOLDER = '/srv/git/rpms/'
|
|||
MKBRANCH = '/usr/local/bin/mkbranch'
|
||||
SETUP_PACKAGE = '/usr/local/bin/setup_git_package'
|
||||
|
||||
THREADS = 20
|
||||
VERBOSE = False
|
||||
|
||||
|
||||
|
@ -67,7 +70,7 @@ class ProcessError(InternalError):
|
|||
pass
|
||||
|
||||
|
||||
def _invoke(program, args):
|
||||
def _invoke(program, args, cwd=None):
|
||||
'''Run a command and raise an exception if an error occurred.
|
||||
|
||||
:arg program: The program to invoke
|
||||
|
@ -79,63 +82,60 @@ def _invoke(program, args):
|
|||
cmdLine.extend(args)
|
||||
if VERBOSE:
|
||||
print ' '.join(cmdLine)
|
||||
print ' in', cwd
|
||||
|
||||
if VERBOSE:
|
||||
program = subprocess.Popen(
|
||||
cmdLine, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||
else:
|
||||
program = subprocess.Popen(cmdLine, stderr=subprocess.STDOUT)
|
||||
program = subprocess.Popen(
|
||||
cmdLine, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, cwd=cwd)
|
||||
|
||||
retCode = program.wait()
|
||||
if retCode != 0:
|
||||
stdout, stderr = program.communicate()
|
||||
|
||||
if program.returncode != 0:
|
||||
e = ProcessError()
|
||||
e.returnCode = retCode
|
||||
e.returnCode = program.returncode
|
||||
e.cmd = ' '.join(cmdLine)
|
||||
if VERBOSE:
|
||||
output = program.stdout.read()
|
||||
e.message = 'Error, "%s" returned %s: %s' % (
|
||||
e.cmd, e.returnCode, output)
|
||||
print e.message
|
||||
else:
|
||||
e.message = 'Error, "%s" returned %s' % (e.cmd, e.returnCode)
|
||||
e.cwd = cwd
|
||||
e.message = 'Error, "%s" (in %r) returned %s\n stdout: %s\n stderr: %s' % (
|
||||
e.cmd, e.cwd, e.returnCode, stdout, stderr)
|
||||
print e.message
|
||||
raise e
|
||||
|
||||
return stdout.strip()
|
||||
|
||||
def _create_branch(pkgname, branch):
|
||||
|
||||
def _create_branch(pkgname, branch, existing_branches):
|
||||
'''Create a specific branch for a package.
|
||||
|
||||
:arg pkgname: Name of the package to branch
|
||||
:arg branch: Name of the branch to create
|
||||
:arg existing_branches: A list of the branches that already exist locally.
|
||||
|
||||
'''
|
||||
branch = branch.replace('*', '').strip()
|
||||
if branch == 'master':
|
||||
print 'ERROR: Proudly refusing to create master branch. Invalid repo?'
|
||||
print 'INFO: Please check %s repo' % pkgname
|
||||
return
|
||||
|
||||
branchpath = os.path.join(
|
||||
GIT_FOLDER, '%s.git' % pkgname, 'refs/heads', branch)
|
||||
if not os.path.exists(branchpath):
|
||||
try:
|
||||
_invoke(MKBRANCH, [branch, pkgname])
|
||||
except ProcessError, e:
|
||||
if e.returnCode == 255:
|
||||
# This is a warning, not an error
|
||||
return
|
||||
raise
|
||||
finally:
|
||||
fedmsg.publish(
|
||||
topic='branch',
|
||||
modname='git',
|
||||
msg=dict(
|
||||
agent='pkgdb',
|
||||
name=pkgname,
|
||||
branch=branch,
|
||||
),
|
||||
)
|
||||
elif VERBOSE:
|
||||
print 'Was asked to create branch %s of package %s, but it '\
|
||||
'already exists' % (pkgname, branch)
|
||||
if branch in existing_branches:
|
||||
print 'ERROR: Refusing to create a branch %s that exists' % branch
|
||||
return
|
||||
|
||||
try:
|
||||
_invoke(MKBRANCH, [branch, pkgname])
|
||||
fedmsg.publish(
|
||||
topic='branch',
|
||||
modname='git',
|
||||
msg=dict(
|
||||
agent='pkgdb',
|
||||
name=pkgname,
|
||||
branch=branch,
|
||||
),
|
||||
)
|
||||
except ProcessError, e:
|
||||
if e.returnCode == 255:
|
||||
# This is a warning, not an error
|
||||
return
|
||||
raise
|
||||
|
||||
|
||||
def pkgdb_pkg_branch():
|
||||
|
@ -168,43 +168,48 @@ def get_git_branch(pkg):
|
|||
"""
|
||||
git_folder = os.path.join(GIT_FOLDER, '%s.git' % pkg)
|
||||
if not os.path.exists(git_folder):
|
||||
print 'Could not find %s' % git_folder
|
||||
if VERBOSE:
|
||||
print 'Could not find %s' % git_folder
|
||||
return set()
|
||||
|
||||
head_folder = os.path.join(git_folder, 'refs', 'heads')
|
||||
return set(os.listdir(head_folder))
|
||||
branches = [
|
||||
lclbranch.replace('*', '').strip()
|
||||
for lclbranch in _invoke('git', ['branch'], cwd=git_folder).split('\n')
|
||||
]
|
||||
return set(branches)
|
||||
|
||||
|
||||
def branch_package(pkgname, branches):
|
||||
def branch_package(pkgname, requested_branches, existing_branches):
|
||||
'''Create all the branches that are listed in the pkgdb for a package.
|
||||
|
||||
:arg pkgname: The package to create branches for
|
||||
:arg branches: The branches to creates
|
||||
:arg requested_branches: The branches to creates
|
||||
:arg existing_branches: A list of existing local branches
|
||||
|
||||
'''
|
||||
if VERBOSE:
|
||||
print 'Fixing package %s for branches %s' % (pkgname, branches)
|
||||
print 'Fixing package %s for branches %s' % (pkgname, requested_branches)
|
||||
|
||||
# Create the devel branch if necessary
|
||||
if not os.path.exists(
|
||||
os.path.join(GIT_FOLDER, '%s.git/refs/heads/master' % pkgname)):
|
||||
exists = os.path.exists(os.path.join(GIT_FOLDER, '%s.git' % pkgname))
|
||||
if not exists or 'master' not in existing_branches:
|
||||
_invoke(SETUP_PACKAGE, [pkgname])
|
||||
if 'master' in branches:
|
||||
branches.remove('master') # SETUP_PACKAGE creates master
|
||||
fedmsg.publish(
|
||||
topic='branch',
|
||||
modname='git',
|
||||
msg=dict(
|
||||
agent='pkgdb',
|
||||
name=pkgname,
|
||||
branch='master',
|
||||
),
|
||||
)
|
||||
if 'master' in requested_branches:
|
||||
requested_branches.remove('master') # SETUP_PACKAGE creates master
|
||||
fedmsg.publish(
|
||||
topic='branch',
|
||||
modname='git',
|
||||
msg=dict(
|
||||
agent='pkgdb',
|
||||
name=pkgname,
|
||||
branch='master',
|
||||
),
|
||||
)
|
||||
|
||||
# Create all the required branches for the package
|
||||
# Use the translated branch name until pkgdb falls inline
|
||||
for branch in branches:
|
||||
_create_branch(pkgname, branch)
|
||||
for branch in requested_branches:
|
||||
_create_branch(pkgname, branch, existing_branches)
|
||||
|
||||
|
||||
def main():
|
||||
|
@ -214,10 +219,14 @@ def main():
|
|||
|
||||
local_pkgs = set(os.listdir(GIT_FOLDER))
|
||||
local_pkgs = set([it.replace('.git', '') for it in local_pkgs])
|
||||
if VERBOSE:
|
||||
print "Found %i local packages" % len(local_pkgs)
|
||||
|
||||
pkgdb_info = pkgdb_pkg_branch()
|
||||
|
||||
pkgdb_pkgs = set(pkgdb_info.keys())
|
||||
if VERBOSE:
|
||||
print "Found %i pkgdb packages" % len(pkgdb_pkgs)
|
||||
|
||||
## Commented out as we keep the git of retired packages while they won't
|
||||
## show up in the information retrieved from pkgdb.
|
||||
|
@ -230,19 +239,38 @@ def main():
|
|||
print 'Some packages are present in pkgdb but not locally:'
|
||||
print ', '.join(sorted(pkgdb_pkgs - local_pkgs))
|
||||
|
||||
|
||||
if VERBOSE:
|
||||
print "Finding the lists of local branches for local repos."
|
||||
start = time.time()
|
||||
if THREADS == 1:
|
||||
git_branch_lookup = map(get_git_branch, sorted(pkgdb_info))
|
||||
else:
|
||||
threadpool = multiprocessing.pool.ThreadPool(processes=THREADS)
|
||||
git_branch_lookup = threadpool.map(get_git_branch, sorted(pkgdb_info))
|
||||
|
||||
# Zip that list of results up into a lookup dict.
|
||||
git_branch_lookup = dict(zip(sorted(pkgdb_info), git_branch_lookup))
|
||||
|
||||
if VERBOSE:
|
||||
print "Found all local git branches in %0.2fs" % (time.time() - start)
|
||||
|
||||
tofix = set()
|
||||
for pkg in sorted(pkgdb_info):
|
||||
pkgdb_branches = pkgdb_info[pkg]
|
||||
git_branches = get_git_branch(pkg)
|
||||
git_branches = git_branch_lookup[pkg]
|
||||
diff = (pkgdb_branches - git_branches)
|
||||
if diff:
|
||||
print '%s missing: %s' % (pkg, ','.join(sorted(diff)))
|
||||
tofix.add(pkg)
|
||||
branch_package(pkg, diff)
|
||||
branch_package(pkg, diff, git_branches)
|
||||
|
||||
if tofix:
|
||||
print 'Packages fixed (%s): %s' % (
|
||||
len(tofix), ', '.join(sorted(tofix)))
|
||||
else:
|
||||
if VERBOSE:
|
||||
print 'Didn\'t find any packages to fix.'
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -63,7 +63,7 @@
|
|||
|
||||
- name: copy keys into pki directory for production
|
||||
copy: src={{private}}/files/fedimg/{{item}} dest=/etc/pki/fedimg/{{item}}
|
||||
owner=fedmsg group=fedmsg mode=0100
|
||||
owner=fedmsg group=fedmsg mode=0400
|
||||
with_items:
|
||||
- fedimg-prod
|
||||
- fedimg-prod.pub
|
||||
|
|
|
@ -3,8 +3,8 @@ delete_images_on_failure: True
|
|||
|
||||
aws_util_username: ec2-user
|
||||
aws_test_username: fedora
|
||||
aws_util_volume_size: 3
|
||||
aws_test_volume_size: 3
|
||||
aws_util_volume_size: 6
|
||||
aws_test_volume_size: 6
|
||||
# access_id and secret_key are in private vars
|
||||
aws_iam_profile: "arn:aws:iam::013116697141:user/oddshocks"
|
||||
aws_test: "/bin/true"
|
||||
|
|
|
@ -4,11 +4,15 @@ suffix = 'stg.phx2.fedoraproject.org'
|
|||
suffix = 'phx2.fedoraproject.org'
|
||||
{% endif %}
|
||||
|
||||
primary_threads = 4
|
||||
atomic_threads = 2
|
||||
NUM_FEDIMG_PORTS = 2 * ((primary_threads + atomic_threads) + 1)
|
||||
|
||||
config = dict(
|
||||
endpoints={
|
||||
"fedimg.fedimg01": [
|
||||
"tcp://fedimg01.%s:30%0.2i" % (suffix, i)
|
||||
for i in range(4)
|
||||
for i in range(NUM_FEDIMG_PORTS)
|
||||
],
|
||||
},
|
||||
)
|
||||
|
|
7
roles/hosts/files/fedimg01.phx2.fedoraproject.org-hosts
Normal file
7
roles/hosts/files/fedimg01.phx2.fedoraproject.org-hosts
Normal file
|
@ -0,0 +1,7 @@
|
|||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
10.5.126.51 proxy01.phx2.fedoraproject.org proxy1 proxy2 proxy3 proxy4 proxy01 proxy02 proxy03 proxy04 proxy05 proxy06 proxy07 proxy08 proxy09 fedoraproject.org admin.fedoraproject.org
|
||||
10.5.126.23 infrastructure.fedoraproject.org
|
||||
10.5.125.44 pkgs.fedoraproject.org
|
||||
10.5.125.63 koji.fedoraproject.org
|
||||
10.5.125.36 kojipkgs.fedoraproject.org
|
|
@ -1,25 +1,12 @@
|
|||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
10.5.126.23 infrastructure.fedoraproject.org
|
||||
10.5.126.23 puppet.fedoraproject.org puppet puppet01 puppet01.phx2.fedoraproject.org
|
||||
10.5.126.51 admin.fedoraproject.org
|
||||
10.5.126.88 proxy01.phx2.fedoraproject.org proxy1 proxy2 proxy3 proxy4 proxy01 proxy02 proxy03 proxy04 proxy05 proxy06 proxy07 proxy08 proxy09 fedoraproject.org
|
||||
10.5.126.83 pkgs.fedoraproject.org pkgs pkgs01
|
||||
10.5.126.81 app01.phx2.fedoraproject.org app1 app3 app5 bapp1 app01 app03 app05 bapp01 bapp02
|
||||
10.5.126.81 memcached04.phx2.fedoraproject.org memcached04 memcached03 memcached01 memcached02
|
||||
10.5.126.91 value03.phx2.fedoraproject.org value3 value03
|
||||
10.5.125.119 nfs01.phx2.fedoraproject.org nfs01 nfs1
|
||||
10.5.126.92 noc01.phx2.fedoraproject.org noc1 noc01
|
||||
10.5.126.82 app02.phx2.fedoraproject.org app2 app4 app02 app04
|
||||
10.5.126.85 db02.stg.phx2.fedoraproject.org db05
|
||||
10.5.126.204 db01.stg.phx2.fedoraproject.org db-koji01
|
||||
10.5.126.23 lockbox01.phx2.fedoraproject.org infrastructure.fedoraproject.org
|
||||
10.5.125.63 koji.fedoraproject.org koji.stg.fedoraproject.org koji1 koji01 s390.koji.fedoraproject.org sparc.koji.fedoraproject.org arm.koji.fedoraproject.org ppc.koji.fedoraproject.org
|
||||
10.5.126.27 archives.fedoraproject.org
|
||||
10.5.126.86 fas01.phx2.fedoraproject.org fas1 fas2 fas01 fas02 fas03
|
||||
10.5.125.36 kojipkgs.fedoraproject.org
|
||||
10.5.126.79 ask01.fedoraproject.org ask01
|
||||
10.5.126.60 packages01.phx2.fedoraproject.org pacakges01 packages02
|
||||
10.5.126.80 ask01.phx2.fedoraproject.org ask ask01
|
||||
209.132.183.72 bugzilla.redhat.com
|
||||
10.5.126.61 paste01.phx2.fedoraproject.org paste01 paste02
|
||||
127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4
|
||||
::1 localhost localhost.localdomain localhost6 localhost6.localdomain6
|
||||
|
||||
10.5.126.88 proxy01.phx2.fedoraproject.org proxy1 proxy2 proxy3 proxy4 proxy01 proxy02 proxy03 proxy04 proxy05 proxy06 proxy07 proxy08 proxy09 fedoraproject.org admin.fedoraproject.org admin.stg.fedoraproject.org
|
||||
10.5.126.23 infrastructure.fedoraproject.org
|
||||
10.5.125.44 pkgs.fedoraproject.org
|
||||
10.5.126.81 app01.stg.fedoraproject.org bapp02 memcached01 memcached02 memcached03 memcached04
|
||||
10.5.126.85 db02.stg.fedoraproject.org db05 db-ask db-tahrir db-elections db-fedocal db-github2fedmsg db-kerneltest db-notifs nuancier_db db-pkgdb2 db-summershum tagger_db
|
||||
10.5.126.204 db01.stg.phx2.fedoraproject.org db-koji01 db-datanommer db-datanommer01 db-datanommer02 db-datanommer02.phx2.fedoraproject.org
|
||||
10.5.126.86 fas01.phx2.fedoraproject.org fas1 fas2 fas01 fas02 fas03 fas-all
|
||||
10.5.126.87 koji01.stg.phx2.fedoraproject.org koji.stg.fedoraproject.org koji01 kojipkgs kojipkgs.stg.phx2.fedoraproject.org kojipkgs.stg.fedoraproject.org
|
||||
10.5.125.36 kojipkgs.fedoraproject.org
|
||||
|
|
|
@ -322,9 +322,10 @@ $wgSkipSkins = array("chick", "cologneblue", "monobook", "myskin", "nostalgia",
|
|||
|
||||
$wgSVGConverter = 'rsvg';
|
||||
|
||||
#We use apache, but apparently it's the same difference
|
||||
# This series of settings is used for reverse proxies
|
||||
$wgUseSquid = true;
|
||||
$wgSquidServers = array(
|
||||
# The SquidNoPurge setting is used to determine reverse proxies
|
||||
$wgSquidServersNoPurge = array(
|
||||
{% if environment == "staging" %}
|
||||
# proxy01.stg
|
||||
"10.5.126.88",
|
||||
|
@ -368,7 +369,32 @@ $wgSquidServers = array(
|
|||
"192.168.1.17",
|
||||
{% endif %}
|
||||
);
|
||||
$wgSquidServersNoPurge = array('127.0.0.1');
|
||||
# This setting is used to send PURGE requests to varnish on reverse proxies upon page changes
|
||||
$wgSquidServers = array(
|
||||
{% if environment == "staging" %}
|
||||
# proxy01.stg
|
||||
"10.5.126.88:6081",
|
||||
{% else %}
|
||||
# proxy01
|
||||
"10.5.126.52:6081",
|
||||
# proxy02
|
||||
"192.168.1.12:6081",
|
||||
# proxy03
|
||||
"192.168.1.7:6081",
|
||||
# proxy04
|
||||
"192.168.1.14:6081",
|
||||
# proxy06
|
||||
"192.168.1.63:6081",
|
||||
# proxy07
|
||||
"192.168.1.52:6081",
|
||||
# proxy08
|
||||
"192.168.1.78:6081",
|
||||
# proxy09
|
||||
"192.168.1.15:6081",
|
||||
# proxy10
|
||||
"10.5.126.51:6081",
|
||||
{% endif %}
|
||||
);
|
||||
$wgSquidMaxage = 432000;
|
||||
|
||||
# Don't add rel="nofollow"
|
||||
|
|
|
@ -1,6 +1,10 @@
|
|||
# Global list of koji tags we care about
|
||||
tags = ({'name': 'Rawhide', 'tag': 'f22'},
|
||||
|
||||
{'name': 'Fedora 22', 'tag': 'f22-updates'},
|
||||
{'name': 'Fedora 22', 'tag': 'f22'},
|
||||
{'name': 'Fedora 22 Testing', 'tag': 'f22-updates-testing'},
|
||||
|
||||
{'name': 'Fedora 21', 'tag': 'f21-updates'},
|
||||
{'name': 'Fedora 21', 'tag': 'f21'},
|
||||
{'name': 'Fedora 21 Testing', 'tag': 'f21-updates-testing'},
|
||||
|
|
|
@ -45,6 +45,54 @@ enabled=0
|
|||
gpgcheck=0
|
||||
|
||||
|
||||
[fedora-22-x86_64]
|
||||
name=Fedora 22
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/fedora/linux/releases/22/Everything/x86_64/os/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=fedora-22&arch=x86_64
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
||||
[fedora-22-updates-x86_64]
|
||||
name=Fedora 22 - Updates
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/fedora/linux/updates/22/x86_64/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=updates-released-f22&arch=x86_64
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
||||
[fedora-22-testing-x86_64]
|
||||
name=Fedora 22 - Testing
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/fedora/linux/updates/testing/22/x86_64/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=updates-testing-f22&arch=x86_64
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
||||
[fedora-22-i686]
|
||||
name=Fedora 22
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/fedora/linux/releases/22/Everything/i386/os/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=fedora-22&arch=i386
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
||||
[fedora-22-updates-i686]
|
||||
name=Fedora 22 - Updates
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/fedora/linux/updates/22/i386/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=updates-released-f22&arch=i386
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
||||
[fedora-22-testing-i686]
|
||||
name=Fedora 22 - Testing
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/fedora/linux/updates/testing/22/i386/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=updates-testing-f22&arch=i386
|
||||
enabled=0
|
||||
|
||||
|
||||
|
||||
[fedora-21-x86_64]
|
||||
name=Fedora 21
|
||||
|
@ -222,19 +270,3 @@ baseurl=http://download01.phx2.fedoraproject.org/pub/epel/testing/7/x86_64/
|
|||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-epel7&arch=x86_64
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
||||
[epel-7-i686]
|
||||
name=EPEL 7
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/epel/7/i386/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=epel-7&arch=i386
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
||||
[epel-7-testing-i686]
|
||||
name=EPEL 7 - Testing
|
||||
failovermethod=priority
|
||||
baseurl=http://download01.phx2.fedoraproject.org/pub/epel/testing/7/i386/
|
||||
#mirrorlist=https://mirrors.fedoraproject.org/metalink?repo=testing-epel7&arch=i386
|
||||
enabled=0
|
||||
gpgcheck=0
|
||||
|
|
|
@ -40,6 +40,7 @@ $sg_php_days = 90;
|
|||
$sg_php_score = 50;
|
||||
$sg_php_type = 2;
|
||||
$sg_censor = "vipshare.me
|
||||
freepremium.info.tm
|
||||
filevis.com
|
||||
terafile.co
|
||||
lafiles.com
|
||||
|
|
444
roles/pkgdb2/files/pkgdb-sync-bugzilla
Executable file
444
roles/pkgdb2/files/pkgdb-sync-bugzilla
Executable file
|
@ -0,0 +1,444 @@
|
|||
#!/usr/bin/python -tt
|
||||
# -*- coding: utf-8 -*-
|
||||
#
|
||||
# Copyright © 2013-2014 Red Hat, Inc.
|
||||
#
|
||||
# This copyrighted material is made available to anyone wishing to use, modify,
|
||||
# copy, or redistribute it subject to the terms and conditions of the GNU
|
||||
# General Public License v.2, or (at your option) any later version. This
|
||||
# program is distributed in the hope that it will be useful, but WITHOUT ANY
|
||||
# WARRANTY expressed or implied, including the implied warranties of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
|
||||
# Public License for more details. You should have received a copy of the GNU
|
||||
# General Public License along with this program; if not, write to the Free
|
||||
# Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
|
||||
# 02110-1301, USA. Any Red Hat trademarks that are incorporated in the source
|
||||
# code or documentation are not subject to the GNU General Public License and
|
||||
# may only be used or replicated with the express permission of Red Hat, Inc.
|
||||
#
|
||||
# Red Hat Author(s): Toshio Kuratomi <tkuratom@redhat.com>
|
||||
# Author(s): Mike Watters <valholla75@fedoraproject.org>
|
||||
# Author(s): Pierre-Yves Chibon <pingou@pingoured.fr>
|
||||
#
|
||||
'''
|
||||
sync information from the packagedb into bugzilla
|
||||
|
||||
This short script takes information about package onwership and imports it
|
||||
into bugzilla.
|
||||
'''
|
||||
|
||||
## These two lines are needed to run on EL6
|
||||
__requires__ = ['SQLAlchemy >= 0.7', 'jinja2 >= 2.4']
|
||||
import pkg_resources
|
||||
|
||||
import argparse
|
||||
import datetime
|
||||
import time
|
||||
import sys
|
||||
import os
|
||||
import itertools
|
||||
import json
|
||||
import xmlrpclib
|
||||
import codecs
|
||||
import smtplib
|
||||
import bugzilla
|
||||
import requests
|
||||
from email.Message import Message
|
||||
from fedora.client.fas2 import AccountSystem
|
||||
|
||||
|
||||
if 'PKGDB2_CONFIG' not in os.environ \
|
||||
and os.path.exists('/etc/pkgdb2/pkgdb2.cfg'):
|
||||
print 'Using configuration file `/etc/pkgdb2/pkgdb2.cfg`'
|
||||
os.environ['PKGDB2_CONFIG'] = '/etc/pkgdb2/pkgdb2.cfg'
|
||||
|
||||
|
||||
try:
|
||||
import pkgdb2
|
||||
except ImportError:
|
||||
sys.path.insert(
|
||||
0, os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
|
||||
import pkgdb2
|
||||
|
||||
|
||||
BZSERVER = pkgdb2.APP.config.get('PKGDB2_BUGZILLA_URL')
|
||||
BZUSER = pkgdb2.APP.config.get('PKGDB2_BUGZILLA_NOTIFY_USER')
|
||||
BZPASS = pkgdb2.APP.config.get('PKGDB2_BUGZILLA_NOTIFY_PASSWORD')
|
||||
BZCOMPAPI = pkgdb2.APP.config.get('BUGZILLA_COMPONENT_API')
|
||||
FASURL = pkgdb2.APP.config.get('PKGDB2_FAS_URL')
|
||||
FASUSER = pkgdb2.APP.config.get('PKGDB2_FAS_USER')
|
||||
FASPASS = pkgdb2.APP.config.get('PKGDB2_FAS_PASSWORD')
|
||||
FASINSECURE = pkgdb2.APP.config.get('PKGDB2_FAS_INSECURE')
|
||||
NOTIFYEMAIL = pkgdb2.APP.config.get('PKGDB2_BUGZILLA_NOTIFY_EMAIL')
|
||||
PKGDBSERVER = pkgdb2.APP.config.get('SITE_URL')
|
||||
DRY_RUN = pkgdb2.APP.config.get('PKGDB2_BUGZILLA_DRY_RUN', False)
|
||||
|
||||
EMAIL_FROM = 'accounts@fedoraproject.org'
|
||||
DATA_CACHE = '/var/tmp/pkgdb_sync_bz.json'
|
||||
|
||||
# When querying for current info, take segments of 1000 packages a time
|
||||
BZ_PKG_SEGMENT = 1000
|
||||
|
||||
|
||||
class DataChangedError(Exception):
|
||||
'''Raised when data we are manipulating changes while we're modifying it.'''
|
||||
pass
|
||||
|
||||
|
||||
def segment(iterable, chunk, fill=None):
|
||||
'''Collect data into `chunk` sized block'''
|
||||
args = [iter(iterable)] * chunk
|
||||
return itertools.izip_longest(*args, fillvalue=fill)
|
||||
|
||||
|
||||
class ProductCache(dict):
|
||||
def __init__(self, bz, acls):
|
||||
self.bz = bz
|
||||
self.acls = acls
|
||||
|
||||
# Ask bugzilla for a section of the pkglist.
|
||||
# Save the information from the section that we want.
|
||||
def __getitem__(self, key):
|
||||
try:
|
||||
return super(ProductCache, self).__getitem__(key)
|
||||
except KeyError:
|
||||
# We can only cache products we have pkgdb information for
|
||||
if key not in self.acls:
|
||||
raise
|
||||
|
||||
if BZCOMPAPI == 'getcomponentsdetails':
|
||||
# Old API -- in python-bugzilla. But with current server, this
|
||||
# gives ProxyError
|
||||
products = self.server.getcomponentsdetails(key)
|
||||
elif BZCOMPAPI == 'component.get':
|
||||
# Way that's undocumented in the partner-bugzilla api but works
|
||||
# currently
|
||||
pkglist = acls[key].keys()
|
||||
products = {}
|
||||
for pkg_segment in segment(pkglist, BZ_PKG_SEGMENT):
|
||||
# Format that bugzilla will understand. Strip None's that segment() pads
|
||||
# out the final data segment() with
|
||||
query = [dict(product=key, component=p) for p in pkg_segment if p is not None]
|
||||
raw_data = self.bz._proxy.Component.get(dict(names=query))
|
||||
for package in raw_data['components']:
|
||||
# Reformat data to be the same as what's returned from
|
||||
# getcomponentsdetails
|
||||
product = dict(initialowner=package['default_assignee'],
|
||||
description=package['description'],
|
||||
initialqacontact=package['default_qa_contact'],
|
||||
initialcclist=package['default_cc'])
|
||||
products[package['name'].lower()] = product
|
||||
self[key] = products
|
||||
|
||||
return super(ProductCache, self).__getitem__(key)
|
||||
|
||||
|
||||
class Bugzilla(object):
|
||||
|
||||
def __init__(self, bzServer, username, password, acls):
|
||||
self.bzXmlRpcServer = bzServer
|
||||
self.username = username
|
||||
self.password = password
|
||||
|
||||
self.server = bugzilla.Bugzilla(
|
||||
url=self.bzXmlRpcServer,
|
||||
user=self.username,
|
||||
password=self.password)
|
||||
self.productCache = ProductCache(self.server, acls)
|
||||
|
||||
# Connect to the fedora account system
|
||||
self.fas = AccountSystem(
|
||||
base_url=FASURL,
|
||||
username=FASUSER,
|
||||
password=FASPASS)
|
||||
self.userCache = self.fas.people_by_key(
|
||||
key='username',
|
||||
fields=['bugzilla_email'])
|
||||
|
||||
def _get_bugzilla_email(self, username):
|
||||
'''Return the bugzilla email address for a user.
|
||||
|
||||
First looks in a cache for a username => bugzilla email. If not found,
|
||||
reloads the cache from fas and tries again.
|
||||
'''
|
||||
try:
|
||||
return self.userCache[username]['bugzilla_email'].lower()
|
||||
except KeyError:
|
||||
if username.startswith('@'):
|
||||
group = self.fas.group_by_name(username[1:])
|
||||
self.userCache[username] = {
|
||||
'bugzilla_email': group.mailing_list}
|
||||
else:
|
||||
person = self.fas.person_by_username(username)
|
||||
bz_email = person.get('bugzilla_email', None)
|
||||
if bz_email is None:
|
||||
print '%s has no bugzilla email, valid account?' % username
|
||||
else:
|
||||
self.userCache[username] = {'bugzilla_email': bz_email}
|
||||
return self.userCache[username]['bugzilla_email'].lower()
|
||||
|
||||
def add_edit_component(self, package, collection, owner, description,
|
||||
qacontact=None, cclist=None):
|
||||
'''Add or update a component to have the values specified.
|
||||
'''
|
||||
# Turn the cclist into something usable by bugzilla
|
||||
if not cclist or 'people' not in cclist:
|
||||
initialCCList = list()
|
||||
else:
|
||||
initialCCList = [
|
||||
self._get_bugzilla_email(cc) for cc in cclist['people']]
|
||||
if 'groups' in cclist:
|
||||
group_cc = [
|
||||
self._get_bugzilla_email(cc) for cc in cclist['groups']]
|
||||
initialCCList.extend(group_cc)
|
||||
|
||||
# Add owner to the cclist so comaintainers taking over a bug don't
|
||||
# have to do this manually
|
||||
owner = self._get_bugzilla_email(owner)
|
||||
if owner not in initialCCList:
|
||||
initialCCList.append(owner)
|
||||
|
||||
# Lookup product
|
||||
try:
|
||||
product = self.productCache[collection]
|
||||
except xmlrpclib.Fault as e:
|
||||
# Output something useful in args
|
||||
e.args = (e.faultCode, e.faultString)
|
||||
raise
|
||||
except xmlrpclib.ProtocolError as e:
|
||||
e.args = ('ProtocolError', e.errcode, e.errmsg)
|
||||
raise
|
||||
|
||||
pkgKey = package.lower()
|
||||
if pkgKey in product:
|
||||
# edit the package information
|
||||
data = {}
|
||||
|
||||
# Grab bugzilla email for things changable via xmlrpc
|
||||
if qacontact:
|
||||
qacontact = self._get_bugzilla_email(qacontact)
|
||||
else:
|
||||
qacontact = 'extras-qa@fedoraproject.org'
|
||||
|
||||
# Check for changes to the owner, qacontact, or description
|
||||
if product[pkgKey]['initialowner'] != owner:
|
||||
data['initialowner'] = owner
|
||||
|
||||
if product[pkgKey]['description'] != description:
|
||||
data['description'] = description
|
||||
if product[pkgKey]['initialqacontact'] != qacontact and (
|
||||
qacontact or product[pkgKey]['initialqacontact']):
|
||||
data['initialqacontact'] = qacontact
|
||||
|
||||
if len(product[pkgKey]['initialcclist']) != len(initialCCList):
|
||||
data['initialcclist'] = initialCCList
|
||||
else:
|
||||
for ccMember in product[pkgKey]['initialcclist']:
|
||||
if ccMember not in initialCCList:
|
||||
data['initialcclist'] = initialCCList
|
||||
break
|
||||
|
||||
if data:
|
||||
### FIXME: initialowner has been made mandatory for some
|
||||
# reason. Asking dkl why.
|
||||
data['initialowner'] = owner
|
||||
|
||||
# Changes occurred. Submit a request to change via xmlrpc
|
||||
data['product'] = collection
|
||||
data['component'] = package
|
||||
if DRY_RUN:
|
||||
print '[EDITCOMP] Changing via editComponent(' \
|
||||
'%s, %s, "xxxxx")' % (data, self.username)
|
||||
print '[EDITCOMP] Former values: %s|%s|%s|%s' % (
|
||||
product[pkgKey]['initialowner'],
|
||||
product[pkgKey]['description'],
|
||||
product[pkgKey]['initialqacontact'],
|
||||
product[pkgKey]['initialcclist'])
|
||||
else:
|
||||
try:
|
||||
self.server.editcomponent(data)
|
||||
except xmlrpclib.Fault, e:
|
||||
# Output something useful in args
|
||||
e.args = (data, e.faultCode, e.faultString)
|
||||
raise
|
||||
except xmlrpclib.ProtocolError, e:
|
||||
e.args = ('ProtocolError', e.errcode, e.errmsg)
|
||||
raise
|
||||
else:
|
||||
# Add component
|
||||
if qacontact:
|
||||
qacontact = self._get_bugzilla_email(qacontact)
|
||||
else:
|
||||
qacontact = 'extras-qa@fedoraproject.org'
|
||||
|
||||
data = {
|
||||
'product': collection,
|
||||
'component': package,
|
||||
'description': description,
|
||||
'initialowner': owner,
|
||||
'initialqacontact': qacontact
|
||||
}
|
||||
if initialCCList:
|
||||
data['initialcclist'] = initialCCList
|
||||
|
||||
if DRY_RUN:
|
||||
print '[ADDCOMP] Adding new component AddComponent:(' \
|
||||
'%s, %s, "xxxxx")' % (data, self.username)
|
||||
else:
|
||||
try:
|
||||
self.server.addcomponent(data)
|
||||
except xmlrpclib.Fault, e:
|
||||
# Output something useful in args
|
||||
e.args = (data, e.faultCode, e.faultString)
|
||||
raise
|
||||
|
||||
|
||||
def send_email(fromAddress, toAddress, subject, message, ccAddress=None):
|
||||
'''Send an email if there's an error.
|
||||
|
||||
This will be replaced by sending messages to a log later.
|
||||
'''
|
||||
msg = Message()
|
||||
msg.add_header('To', ','.join(toAddress))
|
||||
msg.add_header('From', fromAddress)
|
||||
msg.add_header('Subject', subject)
|
||||
if ccAddress is not None:
|
||||
msg.add_header('Cc', ','.join(ccAddress))
|
||||
msg.set_payload(message)
|
||||
smtp = smtplib.SMTP('bastion')
|
||||
smtp.sendmail(fromAddress, toAddress, msg.as_string())
|
||||
smtp.quit()
|
||||
|
||||
|
||||
def notify_users(errors):
|
||||
''' Browse the list of errors and when we can retrieve the email
|
||||
address, use it to notify the user about the issue.
|
||||
'''
|
||||
tmpl_email = pkgdb2.APP.config.get('PKGDB_SYNC_BUGZILLA_EMAIL', None)
|
||||
if not tmpl_email:
|
||||
print 'No template email configured in the configuration file, '\
|
||||
'no notification sent to the users'
|
||||
return
|
||||
|
||||
data = {}
|
||||
if os.path.exists(DATA_CACHE):
|
||||
try:
|
||||
with open(DATA_CACHE) as stream:
|
||||
data = json.load(stream)
|
||||
except Exception as err:
|
||||
print 'Could not read the json file at %s: \nError: %s' % (
|
||||
DATA_CACHE, err)
|
||||
|
||||
new_data = {}
|
||||
for error in errors:
|
||||
notify_user = False
|
||||
if 'The name ' in error and ' is not a valid username' in error:
|
||||
user_email = error.split(' is not a valid username')[0].split(
|
||||
'The name ')[1].strip()
|
||||
now = datetime.datetime.utcnow()
|
||||
|
||||
# See if we already know about this user
|
||||
if user_email in data and data[user_email]['last_update']:
|
||||
last_update = datetime.datetime.fromtimestamp(
|
||||
int(data[user_email]['last_update']))
|
||||
# Only notify users once per hour
|
||||
if (now - last_update).seconds >= 3600:
|
||||
notify_user = True
|
||||
else:
|
||||
new_data[user_email] = data[user_email]
|
||||
elif not data or user_email not in data:
|
||||
notify_user = True
|
||||
|
||||
if notify_user:
|
||||
send_email(
|
||||
EMAIL_FROM,
|
||||
[user_email],
|
||||
subject='Please fix your bugzilla.redhat.com account',
|
||||
message=tmpl_email,
|
||||
ccAddress=NOTIFYEMAIL,
|
||||
)
|
||||
|
||||
new_data[user_email] = {
|
||||
'last_update': time.mktime(now.timetuple())
|
||||
}
|
||||
|
||||
with open(DATA_CACHE, 'w') as stream:
|
||||
json.dump(new_data, stream)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
sys.stdout = codecs.getwriter('utf-8')(sys.stdout)
|
||||
|
||||
|
||||
parser = argparse.ArgumentParser(
|
||||
description='Script syncing information between pkgdb and bugzilla'
|
||||
)
|
||||
parser.add_argument(
|
||||
'--debug', dest='debug', action='store_true', default=False,
|
||||
help='Print the changes instead of making them in bugzilla')
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.debug:
|
||||
DRY_RUN = True
|
||||
|
||||
# Non-fatal errors to alert people about
|
||||
errors = []
|
||||
|
||||
# Get bugzilla information from the package database
|
||||
req = requests.get('%s/api/bugzilla/?format=json' % PKGDBSERVER)
|
||||
acls = req.json()['bugzillaAcls']
|
||||
|
||||
# Initialize the connection to bugzilla
|
||||
bugzilla = Bugzilla(BZSERVER, BZUSER, BZPASS, acls)
|
||||
|
||||
for product in acls.keys():
|
||||
if product not in ('Fedora', 'Fedora EPEL'):
|
||||
continue
|
||||
for pkg in sorted(acls[product]):
|
||||
if DRY_RUN:
|
||||
print pkg
|
||||
pkgInfo = acls[product][pkg]
|
||||
try:
|
||||
bugzilla.add_edit_component(
|
||||
pkg,
|
||||
product,
|
||||
pkgInfo['owner'],
|
||||
pkgInfo['summary'],
|
||||
pkgInfo['qacontact'],
|
||||
pkgInfo['cclist'])
|
||||
except ValueError, e:
|
||||
# A username didn't have a bugzilla address
|
||||
errors.append(str(e.args))
|
||||
except DataChangedError, e:
|
||||
# A Package or Collection was returned via xmlrpc but wasn't
|
||||
# present when we tried to change it
|
||||
errors.append(str(e.args))
|
||||
except xmlrpclib.ProtocolError, e:
|
||||
# Unrecoverable and likely means that nothing is going to
|
||||
# succeed.
|
||||
errors.append(str(e.args))
|
||||
break
|
||||
except xmlrpclib.Error, e:
|
||||
# An error occurred in the xmlrpc call. Shouldn't happen but
|
||||
# we better see what it is
|
||||
errors.append('%s -- %s' % (pkg, e.args[-1]))
|
||||
|
||||
# Send notification of errors
|
||||
if errors:
|
||||
if DRY_RUN:
|
||||
print '[DEBUG]', '\n'.join(errors)
|
||||
else:
|
||||
notify_users(errors)
|
||||
send_email(
|
||||
EMAIL_FROM,
|
||||
NOTIFYEMAIL,
|
||||
'Errors while syncing bugzilla with the PackageDB',
|
||||
'''
|
||||
The following errors were encountered while updating bugzilla with information
|
||||
from the Package Database. Please have the problems taken care of:
|
||||
|
||||
%s
|
||||
''' % ('\n'.join(errors),))
|
||||
|
||||
sys.exit(0)
|
|
@ -52,6 +52,12 @@
|
|||
notify:
|
||||
- restart apache
|
||||
|
||||
- name: HOTFIX pkgdb-sync-bugzilla script to notify the users
|
||||
when: inventory_hostname.startswith('pkgdb02')
|
||||
copy: src=pkgdb-sync-bugzilla dest=/usr/bin/pkgdb-sync-bugzilla mode=755
|
||||
tags:
|
||||
- config
|
||||
|
||||
- name: Install the pkgdb cron jobs - sync bugzilla, update pkg info
|
||||
when: inventory_hostname.startswith('pkgdb02')
|
||||
template: src={{ item.file }}
|
||||
|
|
|
@ -115,3 +115,38 @@ SESSION_COOKIE_SECURE = True
|
|||
# Used by SESSION_COOKIE_PATH
|
||||
APPLICATION_ROOT = '/pkgdb/'
|
||||
|
||||
|
||||
# PkgDB sync bugzilla email
|
||||
PKGDB_SYNC_BUGZILLA_EMAIL = """Greetings.
|
||||
|
||||
You are receiving this email because there's a problem with your
|
||||
bugzilla.redhat.com account.
|
||||
|
||||
If you recently changed the email address associated with your
|
||||
Fedora account in the Fedora Account System, it is now out of sync
|
||||
with your bugzilla.redhat.com account. This leads to problems
|
||||
with Fedora packages you own or are CC'ed on bug reports for.
|
||||
|
||||
Please take one of the following actions:
|
||||
|
||||
a) login to your old bugzilla.redhat.com account and change the email
|
||||
address to match your current email in the Fedora account system.
|
||||
https://bugzilla.redhat.com login, click preferences, account
|
||||
information and enter new email address.
|
||||
|
||||
b) Create a new account in bugzilla.redhat.com to match your
|
||||
email listed in your Fedora account system account.
|
||||
https://bugzilla.redhat.com/ click 'new account' and enter email
|
||||
address.
|
||||
|
||||
c) Change your Fedora Account System email to match your existing
|
||||
bugzilla.redhat.com account.
|
||||
https://admin.fedoraproject.org/accounts login, click on 'my account',
|
||||
then 'edit' and change your email address.
|
||||
|
||||
If you have questions or concerns, please let us know.
|
||||
|
||||
Your prompt attention in this matter is appreciated.
|
||||
|
||||
The Fedora admins.
|
||||
"""
|
||||
|
|
|
@ -173,17 +173,30 @@ factory.addStep(ShellCommand(command=["runtask", '-i',
|
|||
|
||||
|
||||
{% if deployment_type == 'dev' %}
|
||||
# create artifacts dir on master
|
||||
factory.addStep(MasterShellCommand(command=["mkdir", '-m', '0755', Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s')],
|
||||
descriptionDone=['Create artifacs dir']))
|
||||
|
||||
# copy artifacts to master
|
||||
factory.addStep(DirectoryUpload(slavesrc=Interpolate('/var/lib/taskotron/artifacts/%(prop:uuid)s/'),
|
||||
masterdest=Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s/task_output')))
|
||||
|
||||
# copy taskotron log to master
|
||||
factory.addStep(FileUpload(slavesrc="/var/log/taskotron/taskotron.log",
|
||||
masterdest=Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s/taskotron.log')))
|
||||
factory.addStep(FileUpload(slavesrc='/var/log/taskotron/taskotron.log',
|
||||
masterdest=Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s/taskotron.log'),
|
||||
mode=0644))
|
||||
'''
|
||||
import datetime
|
||||
from buildbot.process.properties import renderer
|
||||
|
||||
# change permissions for uuid dir on master to be accessible via http
|
||||
factory.addStep(MasterShellCommand(command=["chmod", '-R', '0755', Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s/')]))
|
||||
@renderer
|
||||
def today(props):
|
||||
return datetime.datetime.now().strftime("%Y%m%d")
|
||||
|
||||
# move artifacts dir
|
||||
factory.addStep(MasterShellCommand(command=["mkdir", '-p', '-m', '0755', Interpolate('{{ public_artifacts_dir }}/%(kw:today)s', today=today), '&&', 'mv', Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s/'), Interpolate('{{ public_artifacts_dir }}/%(kw:today)s/', today=today)],
|
||||
descriptionDone=['Move artifacs dir']))
|
||||
'''
|
||||
{% else %}
|
||||
# capture the taskotron log
|
||||
factory.addStep(ShellCommand(command=["cat", "/var/log/taskotron/taskotron.log"], name="cat_log",
|
||||
|
|
|
@ -52,7 +52,7 @@ passwd = '{{ qadevel_stg_buildslave_password }}'
|
|||
|
||||
keepalive = 600
|
||||
usepty = 0
|
||||
umask = None
|
||||
umask = 0022
|
||||
maxdelay = 300
|
||||
|
||||
s = BuildSlave(buildmaster_host, port, slavename, passwd, basedir,
|
||||
|
|
|
@ -124,33 +124,23 @@ backend mirrormanager2 {
|
|||
}
|
||||
|
||||
|
||||
#acl purge {
|
||||
# "192.168.1.3";
|
||||
# "192.168.1.4";
|
||||
# "192.168.1.5";
|
||||
# "192.168.1.6";
|
||||
# "192.168.1.13";
|
||||
# "192.168.1.24";
|
||||
# "192.168.1.23";
|
||||
# "192.168.1.41";
|
||||
# "10.5.126.31";
|
||||
# "10.5.126.32";
|
||||
# "10.5.126.33";
|
||||
# "10.5.126.34";
|
||||
# "10.5.126.37";
|
||||
# "10.5.126.38";
|
||||
#}
|
||||
acl purge {
|
||||
"192.168.1.129"; // wiki01.vpn
|
||||
"192.168.1.130"; // wiki02.vpn
|
||||
"10.5.126.60"; // wiki01.stg
|
||||
"10.5.126.63"; // wiki01
|
||||
"10.5.126.73"; // wiki02
|
||||
"10.5.126.23"; // lockbox01
|
||||
"192.168.1.58"; //lockbox01.vpn
|
||||
}
|
||||
|
||||
sub vcl_recv {
|
||||
# if (req.request == "PURGE") {
|
||||
# if (!client.ip ~ purge) {
|
||||
# error 405 "Not allowed.";
|
||||
# }
|
||||
# if (req.url ~ "^http://") {
|
||||
# set req.url = regsub(req.url, "http://localhost:6081","");
|
||||
# }
|
||||
# purge_url(req.url);
|
||||
# }
|
||||
if (req.method == "PURGE") {
|
||||
if (!client.ip ~ purge) {
|
||||
return (synth(405, "Not allowed"));
|
||||
}
|
||||
return(purge);
|
||||
}
|
||||
|
||||
if (req.url ~ "^/wiki/") {
|
||||
set req.backend_hint = wiki;
|
||||
|
|
|
@ -4,8 +4,8 @@ internal_interface_cidr: 172.24.0.1/24
|
|||
public_gateway_ip: 209.132.184.254
|
||||
public_dns: 66.35.62.163
|
||||
|
||||
public_floating_start: 209.132.184.33
|
||||
public_floating_end: 209.132.184.46
|
||||
public_floating_start: 209.132.184.31
|
||||
public_floating_end: 209.132.184.69
|
||||
|
||||
controller_public_ip: 209.132.184.9
|
||||
controller_private_ip: 172.24.0.9
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue