[copr] moving to the new cloud ...
This commit is contained in:
parent
781b9b79b4
commit
7764a62b27
16 changed files with 816 additions and 275 deletions
|
@ -2,6 +2,15 @@
|
||||||
_lighttpd_conf_src: "lighttpd/lighttpd.conf"
|
_lighttpd_conf_src: "lighttpd/lighttpd.conf"
|
||||||
_copr_be_conf: "copr-be.conf"
|
_copr_be_conf: "copr-be.conf"
|
||||||
|
|
||||||
|
copr_nova_auth_url: "https://fed-cloud09.cloud.fedoraproject.org:5000/v2.0"
|
||||||
|
copr_nova_tenant_id: "undefined_tenant_id"
|
||||||
|
copr_nova_tenant_name: "coprv"
|
||||||
|
copr_nova_username: "msuchy"
|
||||||
|
|
||||||
|
copr_builder_image_name: "builder_base_image_2015_04_01"
|
||||||
|
copr_builder_flavor_name: "m1.builder"
|
||||||
|
copr_builder_network_name: "copr-net"
|
||||||
|
|
||||||
do_sign: "true"
|
do_sign: "true"
|
||||||
|
|
||||||
spawn_in_advance: "true"
|
spawn_in_advance: "true"
|
||||||
|
|
|
@ -2,7 +2,18 @@
|
||||||
_lighttpd_conf_src: "lighttpd/lighttpd_dev.conf"
|
_lighttpd_conf_src: "lighttpd/lighttpd_dev.conf"
|
||||||
_copr_be_conf: "copr-be.conf-dev"
|
_copr_be_conf: "copr-be.conf-dev"
|
||||||
|
|
||||||
do_sign: "true"
|
copr_nova_auth_url: "https://fed-cloud09.cloud.fedoraproject.org:5000/v2.0"
|
||||||
|
copr_nova_tenant_id: "566a072fb1694950998ad191fee3833b"
|
||||||
|
copr_nova_tenant_name: "coprdev"
|
||||||
|
copr_nova_username: "msuchy"
|
||||||
|
|
||||||
|
copr_builder_image_name: "builder_base_image_2015_04_01"
|
||||||
|
copr_builder_flavor_name: "m1.small"
|
||||||
|
# copr_builder_network_name: "coprdev-net" # uncomment after cloud09 redeploy
|
||||||
|
copr_builder_network_name: "copr-net"
|
||||||
|
|
||||||
|
|
||||||
|
do_sign: "false"
|
||||||
|
|
||||||
spawn_in_advance: "true"
|
spawn_in_advance: "true"
|
||||||
frontend_base_url: "http://copr-fe-dev.cloud.fedoraproject.org"
|
frontend_base_url: "http://copr-fe-dev.cloud.fedoraproject.org"
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
[epel]
|
|
||||||
name=Extras Packages for Enterprise Linux $releasever - $basearch
|
|
||||||
baseurl=http://infrastructure.fedoraproject.org/pub/epel/6/$basearch/
|
|
||||||
enabled=1
|
|
||||||
gpgcheck=1
|
|
||||||
gpgkey=http://infrastructure.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6
|
|
||||||
|
|
||||||
[epel-testing]
|
|
||||||
name=Extras Packages for Enterprise Linux $releasever - $basearch
|
|
||||||
baseurl=http://infrastructure.fedoraproject.org/pub/epel/testing/6/$basearch/
|
|
||||||
enabled=0
|
|
||||||
gpgcheck=1
|
|
||||||
gpgkey=http://infrastructure.fedoraproject.org/pub/epel/RPM-GPG-KEY-EPEL-6
|
|
|
@ -1,63 +0,0 @@
|
||||||
config_opts['root'] = 'fedora-21-i386'
|
|
||||||
config_opts['target_arch'] = 'i686'
|
|
||||||
config_opts['legal_host_arches'] = ('i386', 'i586', 'i686', 'x86_64')
|
|
||||||
config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
|
|
||||||
config_opts['dist'] = 'fc21' # only useful for --resultdir variable subst
|
|
||||||
config_opts['extra_chroot_dirs'] = [ '/run/lock', ]
|
|
||||||
config_opts['releasever'] = '21'
|
|
||||||
|
|
||||||
config_opts['yum.conf'] = """
|
|
||||||
[main]
|
|
||||||
cachedir=/var/cache/yum
|
|
||||||
debuglevel=1
|
|
||||||
reposdir=/dev/null
|
|
||||||
logfile=/var/log/yum.log
|
|
||||||
retries=20
|
|
||||||
obsoletes=1
|
|
||||||
gpgcheck=0
|
|
||||||
assumeyes=1
|
|
||||||
syslog_ident=mock
|
|
||||||
syslog_device=
|
|
||||||
|
|
||||||
# repos
|
|
||||||
|
|
||||||
[fedora]
|
|
||||||
name=fedora
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
|
|
||||||
[updates]
|
|
||||||
name=updates
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
|
|
||||||
[updates-testing]
|
|
||||||
name=updates-testing
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-testing-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[local]
|
|
||||||
name=local
|
|
||||||
baseurl=http://kojipkgs.fedoraproject.org/repos/f21-build/latest/i386/
|
|
||||||
cost=2000
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[fedora-debuginfo]
|
|
||||||
name=fedora-debuginfo
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-debug-$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[updates-debuginfo]
|
|
||||||
name=updates-debuginfo
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-debug-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[updates-testing-debuginfo]
|
|
||||||
name=updates-testing-debuginfo
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-testing-debug-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
"""
|
|
|
@ -1,63 +0,0 @@
|
||||||
config_opts['root'] = 'fedora-21-x86_64'
|
|
||||||
config_opts['target_arch'] = 'x86_64'
|
|
||||||
config_opts['legal_host_arches'] = ('x86_64',)
|
|
||||||
config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
|
|
||||||
config_opts['dist'] = 'fc21' # only useful for --resultdir variable subst
|
|
||||||
config_opts['extra_chroot_dirs'] = [ '/run/lock', ]
|
|
||||||
config_opts['releasever'] = '21'
|
|
||||||
|
|
||||||
config_opts['yum.conf'] = """
|
|
||||||
[main]
|
|
||||||
cachedir=/var/cache/yum
|
|
||||||
debuglevel=1
|
|
||||||
reposdir=/dev/null
|
|
||||||
logfile=/var/log/yum.log
|
|
||||||
retries=20
|
|
||||||
obsoletes=1
|
|
||||||
gpgcheck=0
|
|
||||||
assumeyes=1
|
|
||||||
syslog_ident=mock
|
|
||||||
syslog_device=
|
|
||||||
|
|
||||||
# repos
|
|
||||||
|
|
||||||
[fedora]
|
|
||||||
name=fedora
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
|
|
||||||
[updates]
|
|
||||||
name=updates
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
|
|
||||||
[updates-testing]
|
|
||||||
name=updates-testing
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-testing-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[local]
|
|
||||||
name=local
|
|
||||||
baseurl=http://kojipkgs.fedoraproject.org/repos/f21-build/latest/x86_64/
|
|
||||||
cost=2000
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[fedora-debuginfo]
|
|
||||||
name=fedora-debuginfo
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=fedora-debug-$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[updates-debuginfo]
|
|
||||||
name=updates-debuginfo
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-released-debug-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
|
|
||||||
[updates-testing-debuginfo]
|
|
||||||
name=updates-testing-debuginfo
|
|
||||||
metalink=https://mirrors.fedoraproject.org/metalink?repo=updates-testing-debug-f$releasever&arch=$basearch
|
|
||||||
failovermethod=priority
|
|
||||||
enabled=0
|
|
||||||
"""
|
|
|
@ -1,114 +0,0 @@
|
||||||
from ansible import errors, runner
|
|
||||||
from glanceclient import Client as GlanceClient
|
|
||||||
from keystoneclient import session
|
|
||||||
from keystoneclient.auth.identity import v2 as identity
|
|
||||||
from neutronclient.neutron.client import Client as NeutronClient
|
|
||||||
from novaclient.v3.client import Client
|
|
||||||
import glanceclient.exc
|
|
||||||
import json
|
|
||||||
import novaclient.exceptions
|
|
||||||
|
|
||||||
|
|
||||||
def nova_result_to_builder_ip(nova_result):
|
|
||||||
return nova_result["addresses"].values()[0][0]["addr"]
|
|
||||||
|
|
||||||
|
|
||||||
# def flavor_id_to_name(host_vars, user, password, tenant, auth_url):
|
|
||||||
# nt = Client(user, password, tenant, auth_url, service_type="compute")
|
|
||||||
# try:
|
|
||||||
# flavor = nt.flavors.get(host_vars)
|
|
||||||
# except novaclient.exceptions.NotFound:
|
|
||||||
# raise errors.AnsibleFilterError('There is no flavor of name {0} accessible for tenant {1}'.format(host_vars, tenant))
|
|
||||||
# return flavor.name
|
|
||||||
#
|
|
||||||
#
|
|
||||||
# def flavor_name_to_id(host_vars, user, password, tenant, auth_url):
|
|
||||||
# nt = Client(user, password, tenant, auth_url, service_type="compute")
|
|
||||||
# for i in nt.flavors.list():
|
|
||||||
# if i.name == host_vars:
|
|
||||||
# return i.id
|
|
||||||
# raise errors.AnsibleFilterError('There is no flavor of id {0} accessible for tenant {1}'.format(host_vars, tenant))
|
|
||||||
#
|
|
||||||
# def image_id_to_name(host_vars, user, password, tenant, auth_url):
|
|
||||||
# auth = identity.Password(auth_url=auth_url, username=user,
|
|
||||||
# password=password, tenant_name=tenant)
|
|
||||||
# sess = session.Session(auth=auth)
|
|
||||||
# token = auth.get_token(sess)
|
|
||||||
# endpoint = auth.get_endpoint(sess, service_name='glance', service_type='image')
|
|
||||||
# glance = GlanceClient('2', endpoint=endpoint, token=token)
|
|
||||||
# try:
|
|
||||||
# return glance.images.get(host_vars).name
|
|
||||||
# except glanceclient.exc.HTTPNotFound:
|
|
||||||
# raise errors.AnsibleFilterError('There is no image of id {0} accessible for tenant {1}'.format(host_vars, tenant))
|
|
||||||
#
|
|
||||||
# def image_name_to_id(host_vars, user, password, tenant, auth_url):
|
|
||||||
# auth = identity.Password(auth_url=auth_url, username=user,
|
|
||||||
# password=password, tenant_name=tenant)
|
|
||||||
# sess = session.Session(auth=auth)
|
|
||||||
# token = auth.get_token(sess)
|
|
||||||
# endpoint = auth.get_endpoint(sess, service_name='glance', service_type='image')
|
|
||||||
# glance = GlanceClient('2', endpoint=endpoint, token=token)
|
|
||||||
# for i in glance.images.list():
|
|
||||||
# if i.name == host_vars:
|
|
||||||
# return i.id
|
|
||||||
# raise errors.AnsibleFilterError('There is no image of name {0} accessible for tenant {1}'.format(host_vars, tenant))
|
|
||||||
#
|
|
||||||
# def network_name_to_id(host_vars, user, password, tenant, auth_url):
|
|
||||||
# """ Accept one name of network or list of names of networks and return the same
|
|
||||||
# structure, but names replaced by ids of the network(s). """
|
|
||||||
# auth = identity.Password(auth_url=auth_url, username=user,
|
|
||||||
# password=password, tenant_name=tenant)
|
|
||||||
# sess = session.Session(auth=auth)
|
|
||||||
# token = auth.get_token(sess)
|
|
||||||
# endpoint = auth.get_endpoint(sess, service_name='neutron', service_type='network')
|
|
||||||
# neutron = NeutronClient('2.0', endpoint_url=endpoint, token=token)
|
|
||||||
# result_as_list = isinstance(host_vars, list)
|
|
||||||
# if not result_as_list:
|
|
||||||
# host_vars = [host_vars]
|
|
||||||
# result = []
|
|
||||||
# for net in host_vars:
|
|
||||||
# networks = neutron.list_networks(name=net, fields='name')["networks"]
|
|
||||||
# if networks:
|
|
||||||
# result += [networks[0]['id']]
|
|
||||||
# else:
|
|
||||||
# raise errors.AnsibleFilterError('There is no network of name {0} accessible for tenant {1}'.format(net, tenant))
|
|
||||||
# if result_as_list:
|
|
||||||
# return result
|
|
||||||
# else:
|
|
||||||
# return result[0]
|
|
||||||
#
|
|
||||||
# def network_id_to_name(host_vars, user, password, tenant, auth_url):
|
|
||||||
# """ Accept one id of network or list of ids of networks and return the same
|
|
||||||
# structure, but ids replaced by name of the network(s). """
|
|
||||||
# auth = identity.Password(auth_url=auth_url, username=user,
|
|
||||||
# password=password, tenant_name=tenant)
|
|
||||||
# sess = session.Session(auth=auth)
|
|
||||||
# token = auth.get_token(sess)
|
|
||||||
# endpoint = auth.get_endpoint(sess, service_name='neutron', service_type='network')
|
|
||||||
# neutron = NeutronClient('2.0', endpoint_url=endpoint, token=token)
|
|
||||||
# result_as_list = isinstance(host_vars, list)
|
|
||||||
# if not result_as_list:
|
|
||||||
# host_vars = [host_vars]
|
|
||||||
# result = []
|
|
||||||
# for net in host_vars:
|
|
||||||
# networks = neutron.list_networks(id=net, fields='name')["networks"]
|
|
||||||
# if networks:
|
|
||||||
# result += [networks[0]['name']]
|
|
||||||
# else:
|
|
||||||
# raise errors.AnsibleFilterError('There is no network of id {0} accessible for tenant {1}'.format(net, tenant))
|
|
||||||
# if result_as_list:
|
|
||||||
# return result
|
|
||||||
# else:
|
|
||||||
# return result[0]
|
|
||||||
|
|
||||||
class FilterModule(object):
|
|
||||||
def filters(self):
|
|
||||||
return {
|
|
||||||
"nova_result_to_builder_ip": nova_result_to_builder_ip
|
|
||||||
# "flavor_id_to_name": flavor_id_to_name,
|
|
||||||
# "flavor_name_to_id": flavor_name_to_id,
|
|
||||||
# "image_id_to_name": image_id_to_name,
|
|
||||||
# "image_name_to_id": image_name_to_id,
|
|
||||||
# "network_name_to_id": network_name_to_id,
|
|
||||||
# "network_id_to_name": network_id_to_name,
|
|
||||||
}
|
|
34
roles/copr/backend/files/provision/filter_plugins/os_nova.py
Normal file
34
roles/copr/backend/files/provision/filter_plugins/os_nova.py
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
from novaclient.v1_1.client import Client
|
||||||
|
|
||||||
|
|
||||||
|
def nova_result_to_builder_ip(nova_result, network_name):
|
||||||
|
return nova_result["addresses"][network_name][0]["addr"]
|
||||||
|
|
||||||
|
|
||||||
|
def network_name_to_id(network_name, username, password, tenant_name, auth_url):
|
||||||
|
nt = Client(username, password, tenant_name, auth_url, insecure=True)
|
||||||
|
return nt.networks.find(label=network_name).id
|
||||||
|
|
||||||
|
|
||||||
|
def image_name_to_id(image_name, username, password, tenant_name, auth_url):
|
||||||
|
nt = Client(username, password, tenant_name, auth_url, insecure=True)
|
||||||
|
return nt.images.find(name=image_name).id
|
||||||
|
|
||||||
|
|
||||||
|
def flavor_name_to_id(flavor_name, username, password, tenant_name, auth_url):
|
||||||
|
nt = Client(username, password, tenant_name, auth_url, insecure=True)
|
||||||
|
return nt.flavors.find(name=flavor_name).id
|
||||||
|
|
||||||
|
|
||||||
|
class FilterModule(object):
|
||||||
|
def filters(self):
|
||||||
|
return {
|
||||||
|
"nova_result_to_builder_ip": nova_result_to_builder_ip,
|
||||||
|
# "flavor_id_to_name": flavor_id_to_name,
|
||||||
|
"flavor_name_to_id": flavor_name_to_id,
|
||||||
|
# "image_id_to_name": image_id_to_name,
|
||||||
|
"image_name_to_id": image_name_to_id,
|
||||||
|
"network_name_to_id": network_name_to_id,
|
||||||
|
# "network_id_to_name": network_id_to_name,
|
||||||
|
}
|
||||||
|
|
587
roles/copr/backend/files/provision/library/nova_compute.py
Normal file
587
roles/copr/backend/files/provision/library/nova_compute.py
Normal file
|
@ -0,0 +1,587 @@
|
||||||
|
#!/usr/bin/python
|
||||||
|
#coding: utf-8 -*-
|
||||||
|
|
||||||
|
# (c) 2013, Benno Joy <benno@ansible.com>
|
||||||
|
# (c) 2013, John Dewey <john@dewey.ws>
|
||||||
|
#
|
||||||
|
# This module is free software: you can redistribute it and/or modify
|
||||||
|
# it under the terms of the GNU General Public License as published by
|
||||||
|
# the Free Software Foundation, either version 3 of the License, or
|
||||||
|
# (at your option) any later version.
|
||||||
|
#
|
||||||
|
# This software is distributed in the hope that it will be useful,
|
||||||
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||||
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||||
|
# GNU General Public License for more details.
|
||||||
|
#
|
||||||
|
# You should have received a copy of the GNU General Public License
|
||||||
|
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||||
|
|
||||||
|
import operator
|
||||||
|
import os
|
||||||
|
|
||||||
|
try:
|
||||||
|
from novaclient.v1_1 import client as nova_client
|
||||||
|
from novaclient.v1_1 import floating_ips
|
||||||
|
from novaclient import exceptions
|
||||||
|
from novaclient import utils
|
||||||
|
import time
|
||||||
|
except ImportError:
|
||||||
|
print("failed=True msg='novaclient is required for this module'")
|
||||||
|
|
||||||
|
DOCUMENTATION = '''
|
||||||
|
---
|
||||||
|
module: nova_compute
|
||||||
|
version_added: "1.2"
|
||||||
|
short_description: Create/Delete VMs from OpenStack
|
||||||
|
description:
|
||||||
|
- Create or Remove virtual machines from Openstack.
|
||||||
|
options:
|
||||||
|
login_username:
|
||||||
|
description:
|
||||||
|
- login username to authenticate to keystone
|
||||||
|
required: true
|
||||||
|
default: admin
|
||||||
|
login_password:
|
||||||
|
description:
|
||||||
|
- Password of login user
|
||||||
|
required: true
|
||||||
|
default: 'yes'
|
||||||
|
login_tenant_name:
|
||||||
|
description:
|
||||||
|
- The tenant name of the login user
|
||||||
|
required: true
|
||||||
|
default: 'yes'
|
||||||
|
auth_url:
|
||||||
|
description:
|
||||||
|
- The keystone url for authentication
|
||||||
|
required: false
|
||||||
|
default: 'http://127.0.0.1:35357/v2.0/'
|
||||||
|
region_name:
|
||||||
|
description:
|
||||||
|
- Name of the region
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
state:
|
||||||
|
description:
|
||||||
|
- Indicate desired state of the resource
|
||||||
|
choices: ['present', 'absent']
|
||||||
|
default: present
|
||||||
|
name:
|
||||||
|
description:
|
||||||
|
- Name that has to be given to the instance
|
||||||
|
required: true
|
||||||
|
default: None
|
||||||
|
image_id:
|
||||||
|
description:
|
||||||
|
- The id of the base image to boot. Mutually exclusive with image_name
|
||||||
|
required: true
|
||||||
|
default: None
|
||||||
|
image_name:
|
||||||
|
description:
|
||||||
|
- The name of the base image to boot. Mutually exclusive with image_id
|
||||||
|
required: true
|
||||||
|
default: None
|
||||||
|
version_added: "1.8"
|
||||||
|
image_exclude:
|
||||||
|
description:
|
||||||
|
- Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)"
|
||||||
|
version_added: "1.8"
|
||||||
|
flavor_id:
|
||||||
|
description:
|
||||||
|
- The id of the flavor in which the new VM has to be created. Mutually exclusive with flavor_ram
|
||||||
|
required: false
|
||||||
|
default: 1
|
||||||
|
flavor_ram:
|
||||||
|
description:
|
||||||
|
- The minimum amount of ram in MB that the flavor in which the new VM has to be created must have. Mutually exclusive with flavor_id
|
||||||
|
required: false
|
||||||
|
default: 1
|
||||||
|
version_added: "1.8"
|
||||||
|
flavor_include:
|
||||||
|
description:
|
||||||
|
- Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name.
|
||||||
|
version_added: "1.8"
|
||||||
|
key_name:
|
||||||
|
description:
|
||||||
|
- The key pair name to be used when creating a VM
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
security_groups:
|
||||||
|
description:
|
||||||
|
- The name of the security group to which the VM should be added
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
nics:
|
||||||
|
description:
|
||||||
|
- A list of network id's to which the VM's interface should be attached
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
auto_floating_ip:
|
||||||
|
description:
|
||||||
|
- Should a floating ip be auto created and assigned
|
||||||
|
required: false
|
||||||
|
default: 'yes'
|
||||||
|
version_added: "1.8"
|
||||||
|
floating_ips:
|
||||||
|
description:
|
||||||
|
- list of valid floating IPs that pre-exist to assign to this node
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
version_added: "1.8"
|
||||||
|
floating_ip_pools:
|
||||||
|
description:
|
||||||
|
- list of floating IP pools from which to choose a floating IP
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
version_added: "1.8"
|
||||||
|
availability_zone:
|
||||||
|
description:
|
||||||
|
- Name of the availability zone
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
version_added: "1.8"
|
||||||
|
meta:
|
||||||
|
description:
|
||||||
|
- A list of key value pairs that should be provided as a metadata to the new VM
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
wait:
|
||||||
|
description:
|
||||||
|
- If the module should wait for the VM to be created.
|
||||||
|
required: false
|
||||||
|
default: 'yes'
|
||||||
|
wait_for:
|
||||||
|
description:
|
||||||
|
- The amount of time the module should wait for the VM to get into active state
|
||||||
|
required: false
|
||||||
|
default: 180
|
||||||
|
config_drive:
|
||||||
|
description:
|
||||||
|
- Whether to boot the server with config drive enabled
|
||||||
|
required: false
|
||||||
|
default: 'no'
|
||||||
|
version_added: "1.8"
|
||||||
|
user_data:
|
||||||
|
description:
|
||||||
|
- Opaque blob of data which is made available to the instance
|
||||||
|
required: false
|
||||||
|
default: None
|
||||||
|
version_added: "1.6"
|
||||||
|
requirements: ["novaclient"]
|
||||||
|
'''
|
||||||
|
|
||||||
|
EXAMPLES = '''
|
||||||
|
# Creates a new VM and attaches to a network and passes metadata to the instance
|
||||||
|
- nova_compute:
|
||||||
|
state: present
|
||||||
|
login_username: admin
|
||||||
|
login_password: admin
|
||||||
|
login_tenant_name: admin
|
||||||
|
name: vm1
|
||||||
|
image_id: 4f905f38-e52a-43d2-b6ec-754a13ffb529
|
||||||
|
key_name: ansible_key
|
||||||
|
wait_for: 200
|
||||||
|
flavor_id: 4
|
||||||
|
nics:
|
||||||
|
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
|
||||||
|
meta:
|
||||||
|
hostname: test1
|
||||||
|
group: uge_master
|
||||||
|
|
||||||
|
# Creates a new VM in HP Cloud AE1 region availability zone az2 and automatically assigns a floating IP
|
||||||
|
- name: launch a nova instance
|
||||||
|
hosts: localhost
|
||||||
|
tasks:
|
||||||
|
- name: launch an instance
|
||||||
|
nova_compute:
|
||||||
|
state: present
|
||||||
|
login_username: username
|
||||||
|
login_password: Equality7-2521
|
||||||
|
login_tenant_name: username-project1
|
||||||
|
name: vm1
|
||||||
|
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||||
|
region_name: region-b.geo-1
|
||||||
|
availability_zone: az2
|
||||||
|
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
|
||||||
|
key_name: test
|
||||||
|
wait_for: 200
|
||||||
|
flavor_id: 101
|
||||||
|
security_groups: default
|
||||||
|
auto_floating_ip: yes
|
||||||
|
|
||||||
|
# Creates a new VM in HP Cloud AE1 region availability zone az2 and assigns a pre-known floating IP
|
||||||
|
- name: launch a nova instance
|
||||||
|
hosts: localhost
|
||||||
|
tasks:
|
||||||
|
- name: launch an instance
|
||||||
|
nova_compute:
|
||||||
|
state: present
|
||||||
|
login_username: username
|
||||||
|
login_password: Equality7-2521
|
||||||
|
login_tenant_name: username-project1
|
||||||
|
name: vm1
|
||||||
|
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||||
|
region_name: region-b.geo-1
|
||||||
|
availability_zone: az2
|
||||||
|
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
|
||||||
|
key_name: test
|
||||||
|
wait_for: 200
|
||||||
|
flavor_id: 101
|
||||||
|
floating-ips:
|
||||||
|
- 12.34.56.79
|
||||||
|
|
||||||
|
# Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images
|
||||||
|
- name: launch a nova instance
|
||||||
|
hosts: localhost
|
||||||
|
tasks:
|
||||||
|
- name: launch an instance
|
||||||
|
nova_compute:
|
||||||
|
name: vm1
|
||||||
|
state: present
|
||||||
|
login_username: username
|
||||||
|
login_password: Equality7-2521
|
||||||
|
login_tenant_name: username-project1
|
||||||
|
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||||
|
region_name: region-b.geo-1
|
||||||
|
image_name: Ubuntu Server 14.04
|
||||||
|
image_exclude: deprecated
|
||||||
|
flavor_ram: 4096
|
||||||
|
|
||||||
|
# Creates a new VM with 4G of RAM on Ubuntu Trusty on a Rackspace Performance node in DFW
|
||||||
|
- name: launch a nova instance
|
||||||
|
hosts: localhost
|
||||||
|
tasks:
|
||||||
|
- name: launch an instance
|
||||||
|
nova_compute:
|
||||||
|
name: vm1
|
||||||
|
state: present
|
||||||
|
login_username: username
|
||||||
|
login_password: Equality7-2521
|
||||||
|
login_tenant_name: username-project1
|
||||||
|
auth_url: https://identity.api.rackspacecloud.com/v2.0/
|
||||||
|
region_name: DFW
|
||||||
|
image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
|
||||||
|
flavor_ram: 4096
|
||||||
|
flavor_include: Performance
|
||||||
|
'''
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def _delete_server(module, nova):
|
||||||
|
name = None
|
||||||
|
server_list = None
|
||||||
|
try:
|
||||||
|
server_list = nova.servers.list(True, {'name': module.params['name']})
|
||||||
|
if server_list:
|
||||||
|
server = [x for x in server_list if x.name == module.params['name']]
|
||||||
|
nova.servers.delete(server.pop())
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json( msg = "Error in deleting vm: %s" % e.message)
|
||||||
|
if module.params['wait'] == 'no':
|
||||||
|
module.exit_json(changed = True, result = "deleted")
|
||||||
|
expire = time.time() + int(module.params['wait_for'])
|
||||||
|
while time.time() < expire:
|
||||||
|
name = nova.servers.list(True, {'name': module.params['name']})
|
||||||
|
if not name:
|
||||||
|
module.exit_json(changed = True, result = "deleted")
|
||||||
|
time.sleep(5)
|
||||||
|
module.fail_json(msg = "Timed out waiting for server to get deleted, please check manually")
|
||||||
|
|
||||||
|
|
||||||
|
def _add_floating_ip_from_pool(module, nova, server):
|
||||||
|
|
||||||
|
# instantiate FloatingIPManager object
|
||||||
|
floating_ip_obj = floating_ips.FloatingIPManager(nova)
|
||||||
|
|
||||||
|
# empty dict and list
|
||||||
|
usable_floating_ips = {}
|
||||||
|
pools = []
|
||||||
|
|
||||||
|
# user specified
|
||||||
|
pools = module.params['floating_ip_pools']
|
||||||
|
|
||||||
|
# get the list of all floating IPs. Mileage may
|
||||||
|
# vary according to Nova Compute configuration
|
||||||
|
# per cloud provider
|
||||||
|
all_floating_ips = floating_ip_obj.list()
|
||||||
|
|
||||||
|
# iterate through all pools of IP address. Empty
|
||||||
|
# string means all and is the default value
|
||||||
|
for pool in pools:
|
||||||
|
# temporary list per pool
|
||||||
|
pool_ips = []
|
||||||
|
# loop through all floating IPs
|
||||||
|
for f_ip in all_floating_ips:
|
||||||
|
# if not reserved and the correct pool, add
|
||||||
|
if f_ip.instance_id is None and (f_ip.pool == pool):
|
||||||
|
pool_ips.append(f_ip.ip)
|
||||||
|
# only need one
|
||||||
|
break
|
||||||
|
|
||||||
|
# if the list is empty, add for this pool
|
||||||
|
if not pool_ips:
|
||||||
|
try:
|
||||||
|
new_ip = nova.floating_ips.create(pool)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg = "Unable to create floating ip")
|
||||||
|
pool_ips.append(new_ip.ip)
|
||||||
|
# Add to the main list
|
||||||
|
usable_floating_ips[pool] = pool_ips
|
||||||
|
|
||||||
|
# finally, add ip(s) to instance for each pool
|
||||||
|
for pool in usable_floating_ips:
|
||||||
|
for ip in usable_floating_ips[pool]:
|
||||||
|
try:
|
||||||
|
server.add_floating_ip(ip)
|
||||||
|
# We only need to assign one ip - but there is an inherent
|
||||||
|
# race condition and some other cloud operation may have
|
||||||
|
# stolen an available floating ip
|
||||||
|
break
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
|
||||||
|
|
||||||
|
|
||||||
|
def _add_floating_ip_list(module, server, ips):
|
||||||
|
# add ip(s) to instance
|
||||||
|
for ip in ips:
|
||||||
|
try:
|
||||||
|
server.add_floating_ip(ip)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
|
||||||
|
|
||||||
|
|
||||||
|
def _add_auto_floating_ip(module, nova, server):
|
||||||
|
|
||||||
|
try:
|
||||||
|
new_ip = nova.floating_ips.create()
|
||||||
|
except Exception as e:
|
||||||
|
module.fail_json(msg = "Unable to create floating ip: %s" % (e.message))
|
||||||
|
|
||||||
|
try:
|
||||||
|
server.add_floating_ip(new_ip)
|
||||||
|
except Exception as e:
|
||||||
|
# Clean up - we auto-created this ip, and it's not attached
|
||||||
|
# to the server, so the cloud will not know what to do with it
|
||||||
|
server.floating_ips.delete(new_ip)
|
||||||
|
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
|
||||||
|
|
||||||
|
|
||||||
|
def _add_floating_ip(module, nova, server):
|
||||||
|
|
||||||
|
if module.params['floating_ip_pools']:
|
||||||
|
_add_floating_ip_from_pool(module, nova, server)
|
||||||
|
elif module.params['floating_ips']:
|
||||||
|
_add_floating_ip_list(module, server, module.params['floating_ips'])
|
||||||
|
elif module.params['auto_floating_ip']:
|
||||||
|
_add_auto_floating_ip(module, nova, server)
|
||||||
|
else:
|
||||||
|
return server
|
||||||
|
|
||||||
|
# this may look redundant, but if there is now a
|
||||||
|
# floating IP, then it needs to be obtained from
|
||||||
|
# a recent server object if the above code path exec'd
|
||||||
|
try:
|
||||||
|
server = nova.servers.get(server.id)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg = "Error in getting info from instance: %s " % e.message)
|
||||||
|
return server
|
||||||
|
|
||||||
|
|
||||||
|
def _get_image_id(module, nova):
|
||||||
|
if module.params['image_name']:
|
||||||
|
for image in nova.images.list():
|
||||||
|
if (module.params['image_name'] in image.name and (
|
||||||
|
not module.params['image_exclude']
|
||||||
|
or module.params['image_exclude'] not in image.name)):
|
||||||
|
return image.id
|
||||||
|
module.fail_json(msg = "Error finding image id from name(%s)" % module.params['image_name'])
|
||||||
|
return module.params['image_id']
|
||||||
|
|
||||||
|
|
||||||
|
def _get_flavor_id(module, nova):
|
||||||
|
if module.params['flavor_ram']:
|
||||||
|
for flavor in sorted(nova.flavors.list(), key=operator.attrgetter('ram')):
|
||||||
|
if (flavor.ram >= module.params['flavor_ram'] and
|
||||||
|
(not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)):
|
||||||
|
return flavor.id
|
||||||
|
module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram'])
|
||||||
|
return module.params['flavor_id']
|
||||||
|
|
||||||
|
|
||||||
|
def _create_server(module, nova):
|
||||||
|
image_id = _get_image_id(module, nova)
|
||||||
|
flavor_id = _get_flavor_id(module, nova)
|
||||||
|
bootargs = [module.params['name'], image_id, flavor_id]
|
||||||
|
bootkwargs = {
|
||||||
|
'nics' : module.params['nics'],
|
||||||
|
'meta' : module.params['meta'],
|
||||||
|
'security_groups': module.params['security_groups'].split(','),
|
||||||
|
#userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module:
|
||||||
|
'userdata': module.params['user_data'],
|
||||||
|
'config_drive': module.params['config_drive'],
|
||||||
|
}
|
||||||
|
|
||||||
|
for optional_param in ('region_name', 'key_name', 'availability_zone'):
|
||||||
|
if module.params[optional_param]:
|
||||||
|
bootkwargs[optional_param] = module.params[optional_param]
|
||||||
|
try:
|
||||||
|
server = nova.servers.create(*bootargs, **bootkwargs)
|
||||||
|
server = nova.servers.get(server.id)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json( msg = "Error in creating instance: %s " % e.message)
|
||||||
|
if module.params['wait'] == 'yes':
|
||||||
|
expire = time.time() + int(module.params['wait_for'])
|
||||||
|
while time.time() < expire:
|
||||||
|
try:
|
||||||
|
server = nova.servers.get(server.id)
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json( msg = "Error in getting info from instance: %s" % e.message)
|
||||||
|
if server.status == 'ACTIVE':
|
||||||
|
server = _add_floating_ip(module, nova, server)
|
||||||
|
|
||||||
|
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
|
||||||
|
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
|
||||||
|
|
||||||
|
# now exit with info
|
||||||
|
module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
|
||||||
|
|
||||||
|
if server.status == 'ERROR':
|
||||||
|
module.fail_json(msg = "Error in creating the server, please check logs")
|
||||||
|
time.sleep(2)
|
||||||
|
|
||||||
|
module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually")
|
||||||
|
if server.status == 'ERROR':
|
||||||
|
module.fail_json(msg = "Error in creating the server.. Please check manually")
|
||||||
|
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
|
||||||
|
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
|
||||||
|
|
||||||
|
module.exit_json(changed = True, id = info['id'], private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
|
||||||
|
|
||||||
|
|
||||||
|
def _delete_floating_ip_list(module, nova, server, extra_ips):
|
||||||
|
for ip in extra_ips:
|
||||||
|
nova.servers.remove_floating_ip(server=server.id, address=ip)
|
||||||
|
|
||||||
|
|
||||||
|
def _check_floating_ips(module, nova, server):
|
||||||
|
changed = False
|
||||||
|
if module.params['floating_ip_pools'] or module.params['floating_ips'] or module.params['auto_floating_ip']:
|
||||||
|
ips = openstack_find_nova_addresses(server.addresses, 'floating')
|
||||||
|
if not ips:
|
||||||
|
# If we're configured to have a floating but we don't have one,
|
||||||
|
# let's add one
|
||||||
|
server = _add_floating_ip(module, nova, server)
|
||||||
|
changed = True
|
||||||
|
elif module.params['floating_ips']:
|
||||||
|
# we were configured to have specific ips, let's make sure we have
|
||||||
|
# those
|
||||||
|
missing_ips = []
|
||||||
|
for ip in module.params['floating_ips']:
|
||||||
|
if ip not in ips:
|
||||||
|
missing_ips.append(ip)
|
||||||
|
if missing_ips:
|
||||||
|
server = _add_floating_ip_list(module, server, missing_ips)
|
||||||
|
changed = True
|
||||||
|
extra_ips = []
|
||||||
|
for ip in ips:
|
||||||
|
if ip not in module.params['floating_ips']:
|
||||||
|
extra_ips.append(ip)
|
||||||
|
if extra_ips:
|
||||||
|
_delete_floating_ip_list(module, server, extra_ips)
|
||||||
|
changed = True
|
||||||
|
return (changed, server)
|
||||||
|
|
||||||
|
|
||||||
|
def _get_server_state(module, nova):
|
||||||
|
server = None
|
||||||
|
try:
|
||||||
|
servers = nova.servers.list(True, {'name': module.params['name']})
|
||||||
|
if servers:
|
||||||
|
# the {'name': module.params['name']} will also return servers
|
||||||
|
# with names that partially match the server name, so we have to
|
||||||
|
# strictly filter here
|
||||||
|
servers = [x for x in servers if x.name == module.params['name']]
|
||||||
|
if servers:
|
||||||
|
server = servers[0]
|
||||||
|
except Exception, e:
|
||||||
|
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
|
||||||
|
if server and module.params['state'] == 'present':
|
||||||
|
if server.status != 'ACTIVE':
|
||||||
|
module.fail_json( msg="The VM is available but not Active. state:" + server.status)
|
||||||
|
(ip_changed, server) = _check_floating_ips(module, nova, server)
|
||||||
|
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
|
||||||
|
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
|
||||||
|
module.exit_json(changed = ip_changed, id = server.id, public_ip = ''.join(public), private_ip = ''.join(private), info = server._info)
|
||||||
|
if server and module.params['state'] == 'absent':
|
||||||
|
return True
|
||||||
|
if module.params['state'] == 'absent':
|
||||||
|
module.exit_json(changed = False, result = "not present")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def main():
|
||||||
|
argument_spec = openstack_argument_spec()
|
||||||
|
argument_spec.update(dict(
|
||||||
|
name = dict(required=True),
|
||||||
|
image_id = dict(default=None),
|
||||||
|
image_name = dict(default=None),
|
||||||
|
image_exclude = dict(default='(deprecated)'),
|
||||||
|
flavor_id = dict(default=1),
|
||||||
|
flavor_ram = dict(default=None, type='int'),
|
||||||
|
flavor_include = dict(default=None),
|
||||||
|
key_name = dict(default=None),
|
||||||
|
security_groups = dict(default='default'),
|
||||||
|
nics = dict(default=None),
|
||||||
|
meta = dict(default=None),
|
||||||
|
wait = dict(default='yes', choices=['yes', 'no']),
|
||||||
|
wait_for = dict(default=180),
|
||||||
|
state = dict(default='present', choices=['absent', 'present']),
|
||||||
|
user_data = dict(default=None),
|
||||||
|
config_drive = dict(default=False, type='bool'),
|
||||||
|
auto_floating_ip = dict(default=False, type='bool'),
|
||||||
|
floating_ips = dict(default=None),
|
||||||
|
floating_ip_pools = dict(default=None),
|
||||||
|
insecure = dict(default=False, type='bool'),
|
||||||
|
))
|
||||||
|
module = AnsibleModule(
|
||||||
|
argument_spec=argument_spec,
|
||||||
|
mutually_exclusive=[
|
||||||
|
['auto_floating_ip','floating_ips'],
|
||||||
|
['auto_floating_ip','floating_ip_pools'],
|
||||||
|
['floating_ips','floating_ip_pools'],
|
||||||
|
['image_id','image_name'],
|
||||||
|
['flavor_id','flavor_ram'],
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
nova = nova_client.Client(module.params['login_username'],
|
||||||
|
module.params['login_password'],
|
||||||
|
module.params['login_tenant_name'],
|
||||||
|
module.params['auth_url'],
|
||||||
|
region_name=module.params['region_name'],
|
||||||
|
insecure=module.params['insecure'],
|
||||||
|
service_type='compute')
|
||||||
|
try:
|
||||||
|
nova.authenticate()
|
||||||
|
except exceptions.Unauthorized, e:
|
||||||
|
module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message)
|
||||||
|
except exceptions.AuthorizationFailure, e:
|
||||||
|
module.fail_json(msg = "Unable to authorize user: %s" % e.message)
|
||||||
|
|
||||||
|
if module.params['state'] == 'present':
|
||||||
|
if not module.params['image_id'] and not module.params['image_name']:
|
||||||
|
module.fail_json( msg = "Parameter 'image_id' or `image_name` is required if state == 'present'")
|
||||||
|
else:
|
||||||
|
_get_server_state(module, nova)
|
||||||
|
_create_server(module, nova)
|
||||||
|
if module.params['state'] == 'absent':
|
||||||
|
_get_server_state(module, nova)
|
||||||
|
_delete_server(module, nova)
|
||||||
|
|
||||||
|
# this is magic, see lib/ansible/module_common.py
|
||||||
|
from ansible.module_utils.basic import *
|
||||||
|
from ansible.module_utils.openstack import *
|
||||||
|
main()
|
||||||
|
|
|
@ -93,13 +93,13 @@
|
||||||
service: state=running enabled=yes name=lighttpd
|
service: state=running enabled=yes name=lighttpd
|
||||||
|
|
||||||
# setup dirs for the ansible execution off of provisioning
|
# setup dirs for the ansible execution off of provisioning
|
||||||
- name: dirs from provision
|
#- name: dirs from provision
|
||||||
file: state=directory path="/home/copr/provision/{{ item }}" owner=copr group=copr
|
# file: state=directory path="/home/copr/provision/{{ item }}" owner=copr group=copr
|
||||||
with_items:
|
# with_items:
|
||||||
- action_plugins
|
# - action_plugins
|
||||||
- library
|
# - library
|
||||||
tags:
|
# tags:
|
||||||
- provision_config
|
# - provision_config
|
||||||
|
|
||||||
- name: put ansible.cfg for all this into /etc/ansible/ on the system
|
- name: put ansible.cfg for all this into /etc/ansible/ on the system
|
||||||
copy: src="provision/ansible.cfg" dest=/etc/ansible/ansible.cfg
|
copy: src="provision/ansible.cfg" dest=/etc/ansible/ansible.cfg
|
||||||
|
@ -107,11 +107,13 @@
|
||||||
- provision_config
|
- provision_config
|
||||||
|
|
||||||
- name: put some files into the provision subdir
|
- name: put some files into the provision subdir
|
||||||
template: src="provision/{{ item }}" dest="/home/copr/provision/{{ item }}"
|
template: src="{{ item }}" dest="/home/copr/provision/{{ item }}"
|
||||||
with_items:
|
with_fileglob:
|
||||||
- inventory
|
- templates/provision/*.yml
|
||||||
- builderpb.yml
|
#with_items:
|
||||||
- terminatepb.yml
|
#- inventory
|
||||||
|
#- builderpb.yml
|
||||||
|
#- terminatepb.yml
|
||||||
tags:
|
tags:
|
||||||
- provision_config
|
- provision_config
|
||||||
|
|
||||||
|
|
|
@ -4,10 +4,7 @@
|
||||||
# default is http://copr
|
# default is http://copr
|
||||||
results_baseurl=https://copr-be.cloud.fedoraproject.org/results
|
results_baseurl=https://copr-be.cloud.fedoraproject.org/results
|
||||||
|
|
||||||
# ??? What is this
|
|
||||||
# default is http://coprs/rest/api
|
|
||||||
frontend_url=http://copr-fe.cloud.fedoraproject.org/backend
|
frontend_url=http://copr-fe.cloud.fedoraproject.org/backend
|
||||||
#frontend_url=https://172.16.5.31/backend
|
|
||||||
frontend_base_url=http://copr-fe.cloud.fedoraproject.org
|
frontend_base_url=http://copr-fe.cloud.fedoraproject.org
|
||||||
|
|
||||||
# must have same value as BACKEND_PASSWORD from have frontend in /etc/copr/copr.conf
|
# must have same value as BACKEND_PASSWORD from have frontend in /etc/copr/copr.conf
|
||||||
|
@ -19,12 +16,12 @@ group0_archs=i386,x86_64
|
||||||
group0_spawn_playbook=/home/copr/provision/builderpb.yml
|
group0_spawn_playbook=/home/copr/provision/builderpb.yml
|
||||||
group0_terminate_playbook=/home/copr/provision/terminatepb.yml
|
group0_terminate_playbook=/home/copr/provision/terminatepb.yml
|
||||||
group0_max_workers=7
|
group0_max_workers=7
|
||||||
# please don't set more, 12 builders leads to frequient OpenStack deniel of service
|
# please don't set more, 12 builders leads to frequent OpenStack denial of service
|
||||||
|
|
||||||
# path to ansible playbook which spawns builder
|
# path to ansible playbook which spawns builder
|
||||||
# see /usr/share/copr*/playbooks/ for examples
|
# see /usr/share/copr*/playbooks/ for examples
|
||||||
# default is /etc/copr/builder_playbook.yml
|
# default is /etc/copr/builder_playbook.yml
|
||||||
spawn_playbook=/home/copr/provision/builderpb.yml
|
spawn_playbook=/home/copr/provision/builderpb_nova.yml
|
||||||
|
|
||||||
# path to ansible playbook which terminate builder
|
# path to ansible playbook which terminate builder
|
||||||
# default is /etc/copr/terminate_playbook.yml
|
# default is /etc/copr/terminate_playbook.yml
|
||||||
|
|
|
@ -4,8 +4,6 @@
|
||||||
# default is http://copr
|
# default is http://copr
|
||||||
results_baseurl=http://copr-be-dev.cloud.fedoraproject.org/results
|
results_baseurl=http://copr-be-dev.cloud.fedoraproject.org/results
|
||||||
|
|
||||||
# ??? What is this
|
|
||||||
# default is http://coprs/rest/api
|
|
||||||
frontend_url=http://copr-fe-dev.cloud.fedoraproject.org/backend
|
frontend_url=http://copr-fe-dev.cloud.fedoraproject.org/backend
|
||||||
frontend_base_url=http://copr-fe-dev.cloud.fedoraproject.org
|
frontend_base_url=http://copr-fe-dev.cloud.fedoraproject.org
|
||||||
|
|
||||||
|
@ -23,7 +21,7 @@ group0_max_workers=2
|
||||||
# path to ansible playbook which spawns builder
|
# path to ansible playbook which spawns builder
|
||||||
# see /usr/share/copr*/playbooks/ for examples
|
# see /usr/share/copr*/playbooks/ for examples
|
||||||
# default is /etc/copr/builder_playbook.yml
|
# default is /etc/copr/builder_playbook.yml
|
||||||
spawn_playbook=/home/copr/provision/builderpb.yml
|
spawn_playbook=/home/copr/provision/builderpb_nova.yml
|
||||||
|
|
||||||
# path to ansible playbook which terminate builder
|
# path to ansible playbook which terminate builder
|
||||||
# default is /etc/copr/terminate_playbook.yml
|
# default is /etc/copr/terminate_playbook.yml
|
||||||
|
|
48
roles/copr/backend/templates/provision/builderpb_nova.yml
Normal file
48
roles/copr/backend/templates/provision/builderpb_nova.yml
Normal file
|
@ -0,0 +1,48 @@
|
||||||
|
- name: check/create instance
|
||||||
|
hosts: 127.0.0.1
|
||||||
|
gather_facts: False
|
||||||
|
|
||||||
|
vars_files:
|
||||||
|
- nova_cloud_vars.yml
|
||||||
|
|
||||||
|
vars:
|
||||||
|
# _OS_AUTH_OPTS: "--os-auth-url {{OS_AUTH_URL}} --os-username {{OS_USERNAME}} --os-password {{OS_PASSWORD}} --os-tenant-name {{OS_TENANT_NAME}} --os-tenant-id {{OS_TENANT_ID}} "
|
||||||
|
|
||||||
|
image_name: "{{ copr_builder_image_name }}"
|
||||||
|
flavor_name: "{{ copr_builder_flavor_name }}"
|
||||||
|
|
||||||
|
network_name: "{{ copr_builder_network_name }}"
|
||||||
|
keypair: buildsys
|
||||||
|
max_spawn_time: 600
|
||||||
|
spawning_vm_user: "fedora"
|
||||||
|
|
||||||
|
# pass this options if you need to create new base image from snapshot
|
||||||
|
#prepare_base_image: True
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- name: generate builder name
|
||||||
|
local_action: set_fact vm_name="Copr_builder_{{ 999999999 | random }}"
|
||||||
|
|
||||||
|
- debug: msg="VM_NAME={{ vm_name }}"
|
||||||
|
- include: "spinup_nova_task.yml"
|
||||||
|
|
||||||
|
- debug: msg="VM_IP={{ builder_ip }}"
|
||||||
|
|
||||||
|
- name: wait for he host to be hot
|
||||||
|
local_action: wait_for host={{ builder_ip }} port=22 delay=1 timeout=600
|
||||||
|
|
||||||
|
- name: wait until ssh is available
|
||||||
|
local_action: shell false; until [ "$?" -eq "0" ]; do sleep 2; ssh -o PasswordAuthentication=no {{ spawning_vm_user|default('fedora') }}@{{ builder_ip }} 'echo foobar' 2>/dev/null; done
|
||||||
|
async: 600
|
||||||
|
poll: 2
|
||||||
|
|
||||||
|
|
||||||
|
- name: provision builder
|
||||||
|
hosts: builder_temp_group
|
||||||
|
gather_facts: False
|
||||||
|
sudo: True
|
||||||
|
user: fedora
|
||||||
|
|
||||||
|
tasks:
|
||||||
|
- include: "provision_builder_tasks.yml"
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
localhost
|
|
|
@ -0,0 +1,9 @@
|
||||||
|
---
|
||||||
|
OS_AUTH_URL: "{{ copr_builders_os_auth_url }}"
|
||||||
|
|
||||||
|
OS_TENANT_ID: "{{ copr_builders_os_tenant_id }}"
|
||||||
|
OS_TENANT_NAME: "{{ copr_builders_os_tenant_name }}"
|
||||||
|
|
||||||
|
OS_USERNAME: "{{ copr_builders_os_username }}"
|
||||||
|
OS_PASSWORD: "{{ copr_nova_password }}"
|
||||||
|
|
|
@ -0,0 +1,72 @@
|
||||||
|
- name: install pkgs
|
||||||
|
yum: state=present pkg={{ item }}
|
||||||
|
with_items:
|
||||||
|
- dnf
|
||||||
|
- mock
|
||||||
|
- mock-lvm
|
||||||
|
- createrepo
|
||||||
|
- yum-utils
|
||||||
|
- pyliblzma
|
||||||
|
- rsync
|
||||||
|
- openssh-clients
|
||||||
|
- rsync
|
||||||
|
- libselinux-python
|
||||||
|
- libsemanage-python
|
||||||
|
|
||||||
|
- name: make sure newest rpm
|
||||||
|
# todo: replace with dnf after ansible 1.9 is available
|
||||||
|
yum: name={{ item }} state=latest
|
||||||
|
with_items:
|
||||||
|
- rpm
|
||||||
|
- glib2
|
||||||
|
- ca-certificates
|
||||||
|
|
||||||
|
- name: put updated mock configs into /etc/mock
|
||||||
|
copy: src=files/mock/{{ item }} dest=/etc/mock
|
||||||
|
with_items:
|
||||||
|
- site-defaults.cfg
|
||||||
|
|
||||||
|
# ansible doesn't support simultaneously usage of async and with_* options
|
||||||
|
# it's not even planned for implementation, see https://github.com/ansible/ansible/issues/5841
|
||||||
|
#- name: prepare caches
|
||||||
|
# when: prepare_base_image is defined
|
||||||
|
# async: 3600
|
||||||
|
# command: mock -r {{ item }} --init
|
||||||
|
# with_items:
|
||||||
|
# - epel-5-i386
|
||||||
|
# - epel-5-x86_64
|
||||||
|
# - epel-6-i386
|
||||||
|
# - epel-6-x86_64
|
||||||
|
# - epel-7-x86_64
|
||||||
|
# - fedora-20-i386
|
||||||
|
# - fedora-20-x86_64
|
||||||
|
# - fedora-21-i386
|
||||||
|
# - fedora-21-x86_64
|
||||||
|
# - fedora-22-i386
|
||||||
|
# - fedora-22-x86_64
|
||||||
|
# - fedora-rawhide-i386
|
||||||
|
# - fedora-rawhide-x86_64
|
||||||
|
|
||||||
|
|
||||||
|
- name: prepare cache
|
||||||
|
when: prepare_base_image is defined
|
||||||
|
async: 3600
|
||||||
|
shell: "for i in epel-5-i386 epel-5-x86_64 epel-6-i386 epel-6-x86_64 epel-7-x86_64 fedora-20-i386 fedora-20-x86_64 fedora-21-i386 fedora-21-x86_64 fedora-22-i386 fedora-22-x86_64 fedora-rawhide-i386 fedora-rawhide-x86_64; do mock --init -r $i; done"
|
||||||
|
|
||||||
|
- name: mockbuilder user
|
||||||
|
user: name=mockbuilder groups=mock
|
||||||
|
|
||||||
|
- name: mockbuilder .ssh
|
||||||
|
file: state=directory path=/home/mockbuilder/.ssh mode=0700 owner=mockbuilder group=mockbuilder
|
||||||
|
|
||||||
|
- name: mockbuilder authorized_keys
|
||||||
|
authorized_key: user=mockbuilder key='{{ lookup('file', '/home/copr/provision/files/buildsys.pub') }}'
|
||||||
|
|
||||||
|
- name: root authorized_keys
|
||||||
|
authorized_key: user=root key='{{ lookup('file', '/home/copr/provision/files/buildsys.pub') }}'
|
||||||
|
|
||||||
|
- lineinfile: dest=/etc/mock/fedora-rawhide-x86_64.cfg line="config_opts['package_manager'] = 'dnf'" state=absent
|
||||||
|
- lineinfile: dest=/etc/mock/fedora-rawhide-i386.cfg line="config_opts['package_manager'] = 'dnf'" state=absent
|
||||||
|
|
||||||
|
- lineinfile: dest=/etc/security/limits.conf line="* soft nofile 10240" insertafter=EOF
|
||||||
|
- lineinfile: dest=/etc/security/limits.conf line="* hard nofile 10240" insertafter=EOF
|
28
roles/copr/backend/templates/provision/spinup_nova_task.yml
Normal file
28
roles/copr/backend/templates/provision/spinup_nova_task.yml
Normal file
|
@ -0,0 +1,28 @@
|
||||||
|
- name: spin/ensure vm with nova_compute
|
||||||
|
local_action:
|
||||||
|
module: nova_compute
|
||||||
|
insecure: True
|
||||||
|
auth_url: "{{OS_AUTH_URL}}"
|
||||||
|
login_username: "{{OS_USERNAME}}"
|
||||||
|
login_password: "{{OS_PASSWORD}}"
|
||||||
|
login_tenant_name: "{{OS_TENANT_NAME}}"
|
||||||
|
name: "{{ vm_name }}"
|
||||||
|
# image_id: 86422ca2-6eeb-435c-87e8-402b3c7c3b7b
|
||||||
|
image_id: "{{ image_name|image_name_to_id(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL) }}"
|
||||||
|
wait_for: "{{ max_spawn_time }}"
|
||||||
|
flavor_id: "{{ flavor_name|flavor_name_to_id(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL) }}"
|
||||||
|
security_groups: ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev #,ssh-internal-persistent
|
||||||
|
key_name: "{{ keypair }}"
|
||||||
|
nics:
|
||||||
|
- net-id: "{{ network_name|network_name_to_id(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL) }}"
|
||||||
|
register: nova
|
||||||
|
|
||||||
|
# - debug: msg="{{ nova.info }}"
|
||||||
|
|
||||||
|
|
||||||
|
- set_fact: builder_ip="{{ nova.info|nova_result_to_builder_ip(network_name) }}"
|
||||||
|
|
||||||
|
- name: add builder ip to the special group
|
||||||
|
local_action: add_host hostname={{ builder_ip }} groupname=builder_temp_group
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue