copr-be: finish autumn cleanup in provision/ dir
None of those are used nowadays. The old scripts/playbooks were just confusing for newcomers when they tried to debug some problems in spawner.
This commit is contained in:
parent
8f87caf6bb
commit
1b7aa66cae
14 changed files with 0 additions and 1063 deletions
|
@ -1,48 +0,0 @@
|
|||
- name: create an instance in aws
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: False
|
||||
|
||||
roles:
|
||||
- spawner
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
keypair: copr-builder
|
||||
instance_type: a1.xlarge
|
||||
security_group: ssh-only
|
||||
max_spawn_time: 1100
|
||||
spawning_vm_user: "fedora"
|
||||
arch: aarch64
|
||||
image_name: "{{ builder_images[arch] }}"
|
||||
instance_volumes:
|
||||
- device_name: sdb
|
||||
delete_on_termination: True
|
||||
volume_type: gp2
|
||||
volume_size: 160
|
||||
|
||||
tasks:
|
||||
- include: "spinup_aws_task.yml"
|
||||
|
||||
|
||||
- name: provision builder
|
||||
hosts: builder_temp_group
|
||||
gather_facts: False
|
||||
become: true
|
||||
user: fedora
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
tasks:
|
||||
- include: "create_swap_file.yml"
|
||||
when:
|
||||
- prepare_base_image is defined
|
||||
|
||||
- include: "provision_builder_tasks.yml"
|
|
@ -1,52 +0,0 @@
|
|||
---
|
||||
- name: create an aarch64 spot instance in aws
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: false
|
||||
|
||||
roles:
|
||||
- spawner
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
keypair: copr-builder
|
||||
instance_type: a1.xlarge
|
||||
# We keep this around the on-demand price, as we don't want unnecessary
|
||||
# interrupts.
|
||||
spot_price: 0.102
|
||||
security_group: ssh-only
|
||||
max_spawn_time: 1100
|
||||
spawning_vm_user: "fedora"
|
||||
arch: aarch64
|
||||
image_name: "{{ builder_images[arch] }}"
|
||||
instance_volumes:
|
||||
- device_name: sdb
|
||||
delete_on_termination: True
|
||||
volume_type: gp2
|
||||
volume_size: 160
|
||||
|
||||
tasks:
|
||||
- include: "spinup_aws_task.yml"
|
||||
|
||||
|
||||
- name: provision builder
|
||||
hosts: builder_temp_group
|
||||
gather_facts: false
|
||||
become: true
|
||||
user: fedora
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
tasks:
|
||||
- include: "create_swap_file.yml"
|
||||
when:
|
||||
- prepare_base_image is defined
|
||||
|
||||
- include: "provision_builder_tasks.yml"
|
|
@ -1,47 +0,0 @@
|
|||
---
|
||||
- name: create an x86_64 spot instance in aws
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: false
|
||||
|
||||
roles:
|
||||
- spawner
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
keypair: copr-builder
|
||||
instance_type: i3.large
|
||||
# We keep this around the on-demand price, as we don't want unnecessary
|
||||
# interrupts.
|
||||
spot_price: 0.156
|
||||
security_group: ssh-only
|
||||
max_spawn_time: 1100
|
||||
spawning_vm_user: "fedora"
|
||||
arch: x86_64
|
||||
image_name: "{{ builder_images[arch] }}"
|
||||
|
||||
tasks:
|
||||
- include: "spinup_aws_task.yml"
|
||||
|
||||
|
||||
- name: provision builder
|
||||
hosts: builder_temp_group
|
||||
gather_facts: false
|
||||
become: true
|
||||
user: fedora
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
tasks:
|
||||
- include: "create_swap_file.yml"
|
||||
when:
|
||||
- prepare_base_image is defined
|
||||
|
||||
- include: "provision_builder_tasks.yml"
|
|
@ -1,43 +0,0 @@
|
|||
- name: create an instance in aws
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: False
|
||||
|
||||
roles:
|
||||
- spawner
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
keypair: copr-builder
|
||||
instance_type: i3.large
|
||||
security_group: ssh-only
|
||||
max_spawn_time: 1100
|
||||
spawning_vm_user: "fedora"
|
||||
arch: x86_64
|
||||
image_name: "{{ builder_images[arch] }}"
|
||||
|
||||
tasks:
|
||||
- include: "spinup_aws_task.yml"
|
||||
|
||||
|
||||
- name: provision builder
|
||||
hosts: builder_temp_group
|
||||
gather_facts: False
|
||||
become: true
|
||||
user: fedora
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
tasks:
|
||||
- include: "create_swap_file.yml"
|
||||
when:
|
||||
- prepare_base_image is defined
|
||||
|
||||
- include: "provision_builder_tasks.yml"
|
|
@ -1,49 +0,0 @@
|
|||
- name: check/create instance
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: False
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- nova_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
keypair: buildsys
|
||||
max_spawn_time: 1100
|
||||
spawning_vm_user: "fedora"
|
||||
image_name: "{{ builder_images.x86_64 }}"
|
||||
|
||||
tasks:
|
||||
- name: generate builder name
|
||||
local_action: set_fact vm_name="Copr_builder_{{ 999999999 | random }}"
|
||||
|
||||
- debug: msg="vm_name={{ vm_name }}"
|
||||
- include: "spinup_nova_task.yml"
|
||||
|
||||
- debug: msg="VM_IP={{ builder_ip }}"
|
||||
|
||||
- name: wait for he host to be hot
|
||||
local_action: wait_for host={{ builder_ip }} port=22 delay=1 timeout=1100
|
||||
|
||||
- name: wait until ssh is available
|
||||
local_action: shell false; until [ "$?" -eq "0" ]; do sleep 2; ssh -o PasswordAuthentication=no {{ spawning_vm_user|default('fedora') }}@{{ builder_ip }} 'echo foobar' 2>/dev/null; done
|
||||
async: 600
|
||||
poll: 2
|
||||
|
||||
|
||||
- name: provision builder
|
||||
hosts: builder_temp_group
|
||||
gather_facts: False
|
||||
become: true
|
||||
user: fedora
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- nova_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
tasks:
|
||||
- include: "provision_builder_tasks.yml"
|
||||
|
||||
- include: "offloading_hack.yml"
|
|
@ -1,49 +0,0 @@
|
|||
- name: check/create instance
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: False
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- nova_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
keypair: buildsys
|
||||
max_spawn_time: 1100
|
||||
spawning_vm_user: "fedora"
|
||||
image_name: "{{ builder_images.ppc64le }}"
|
||||
|
||||
tasks:
|
||||
- name: generate builder name
|
||||
local_action: set_fact vm_name="Copr_builder_{{ 999999999 | random }}"
|
||||
|
||||
- debug: msg="vm_name={{ vm_name }}"
|
||||
- include: "spinup_nova_task.yml"
|
||||
|
||||
- debug: msg="VM_IP={{ builder_ip }}"
|
||||
|
||||
- name: wait for he host to be hot
|
||||
local_action: wait_for host={{ builder_ip }} port=22 delay=1 timeout=1100
|
||||
|
||||
- name: wait until ssh is available
|
||||
local_action: shell false; until [ "$?" -eq "0" ]; do sleep 2; ssh -o PasswordAuthentication=no {{ spawning_vm_user|default('fedora') }}@{{ builder_ip }} 'echo foobar' 2>/dev/null; done
|
||||
async: 600
|
||||
poll: 2
|
||||
|
||||
|
||||
- name: provision builder
|
||||
hosts: builder_temp_group
|
||||
gather_facts: False
|
||||
become: true
|
||||
user: fedora
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- nova_cloud_vars.yml
|
||||
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
|
||||
tasks:
|
||||
- include: "provision_builder_tasks.yml"
|
||||
|
||||
- include: "offloading_hack.yml"
|
|
@ -1,40 +0,0 @@
|
|||
from novaclient.client import Client
|
||||
import re
|
||||
|
||||
def extract_ip_from_stdout(output):
|
||||
match = re.search(r'IP=([^\{\}\n"]+)', output, re.MULTILINE)
|
||||
if match:
|
||||
return match.group(1)
|
||||
|
||||
|
||||
def nova_result_to_builder_ip(nova_result, network_name):
|
||||
return nova_result["addresses"][network_name][0]["addr"]
|
||||
|
||||
|
||||
def network_name_to_id(network_name, username, password, tenant_name, auth_url):
|
||||
nt = Client('2', username, password, tenant_name, auth_url)
|
||||
return nt.networks.find(label=network_name).id
|
||||
|
||||
|
||||
def image_name_to_id(image_name, username, password, tenant_name, auth_url):
|
||||
nt = Client('2', username, password, tenant_name, auth_url)
|
||||
return nt.images.find(name=image_name).id
|
||||
|
||||
|
||||
def flavor_name_to_id(flavor_name, username, password, tenant_name, auth_url):
|
||||
nt = Client('2', username, password, tenant_name, auth_url)
|
||||
return nt.flavors.find(name=flavor_name).id
|
||||
|
||||
|
||||
class FilterModule(object):
|
||||
def filters(self):
|
||||
return {
|
||||
"nova_result_to_builder_ip": nova_result_to_builder_ip,
|
||||
# "flavor_id_to_name": flavor_id_to_name,
|
||||
"flavor_name_to_id": flavor_name_to_id,
|
||||
# "image_id_to_name": image_id_to_name,
|
||||
"image_name_to_id": image_name_to_id,
|
||||
"network_name_to_id": network_name_to_id,
|
||||
"extract_ip_from_stdout": extract_ip_from_stdout,
|
||||
# "network_id_to_name": network_id_to_name,
|
||||
}
|
|
@ -1,599 +0,0 @@
|
|||
#!/usr/bin/python
|
||||
#coding: utf-8 -*-
|
||||
|
||||
# (c) 2013, Benno Joy <benno@ansible.com>
|
||||
# (c) 2013, John Dewey <john@dewey.ws>
|
||||
#
|
||||
# This module is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU General Public License as published by
|
||||
# the Free Software Foundation, either version 3 of the License, or
|
||||
# (at your option) any later version.
|
||||
#
|
||||
# This software is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU General Public License
|
||||
# along with this software. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import operator
|
||||
import os
|
||||
import time
|
||||
|
||||
try:
|
||||
from novaclient.v2 import client as nova_client
|
||||
from novaclient.v2 import floating_ips
|
||||
from novaclient import exceptions
|
||||
from novaclient import utils
|
||||
HAS_NOVACLIENT = True
|
||||
except ImportError:
|
||||
HAS_NOVACLIENT = False
|
||||
|
||||
DOCUMENTATION = '''
|
||||
---
|
||||
module: nova_compute
|
||||
version_added: "1.2"
|
||||
deprecated: Deprecated in 2.0. Use os_server instead
|
||||
short_description: Create/Delete VMs from OpenStack
|
||||
description:
|
||||
- Create or Remove virtual machines from Openstack.
|
||||
options:
|
||||
login_username:
|
||||
description:
|
||||
- login username to authenticate to keystone
|
||||
required: true
|
||||
default: admin
|
||||
login_password:
|
||||
description:
|
||||
- Password of login user
|
||||
required: true
|
||||
default: 'yes'
|
||||
login_tenant_name:
|
||||
description:
|
||||
- The tenant name of the login user
|
||||
required: true
|
||||
default: 'yes'
|
||||
auth_url:
|
||||
description:
|
||||
- The keystone url for authentication
|
||||
required: false
|
||||
default: 'http://127.0.0.1:35357/v2.0/'
|
||||
region_name:
|
||||
description:
|
||||
- Name of the region
|
||||
required: false
|
||||
default: None
|
||||
state:
|
||||
description:
|
||||
- Indicate desired state of the resource
|
||||
choices: ['present', 'absent']
|
||||
default: present
|
||||
name:
|
||||
description:
|
||||
- Name that has to be given to the instance
|
||||
required: true
|
||||
default: None
|
||||
image_id:
|
||||
description:
|
||||
- The id of the base image to boot. Mutually exclusive with image_name
|
||||
required: true
|
||||
default: None
|
||||
image_name:
|
||||
description:
|
||||
- The name of the base image to boot. Mutually exclusive with image_id
|
||||
required: true
|
||||
default: None
|
||||
version_added: "1.8"
|
||||
image_exclude:
|
||||
description:
|
||||
- Text to use to filter image names, for the case, such as HP, where there are multiple image names matching the common identifying portions. image_exclude is a negative match filter - it is text that may not exist in the image name. Defaults to "(deprecated)"
|
||||
version_added: "1.8"
|
||||
flavor_id:
|
||||
description:
|
||||
- The id of the flavor in which the new VM has to be created. Mutually exclusive with flavor_ram
|
||||
required: false
|
||||
default: 1
|
||||
flavor_ram:
|
||||
description:
|
||||
- The minimum amount of ram in MB that the flavor in which the new VM has to be created must have. Mutually exclusive with flavor_id
|
||||
required: false
|
||||
default: 1
|
||||
version_added: "1.8"
|
||||
flavor_include:
|
||||
description:
|
||||
- Text to use to filter flavor names, for the case, such as Rackspace, where there are multiple flavors that have the same ram count. flavor_include is a positive match filter - it must exist in the flavor name.
|
||||
version_added: "1.8"
|
||||
key_name:
|
||||
description:
|
||||
- The key pair name to be used when creating a VM
|
||||
required: false
|
||||
default: None
|
||||
security_groups:
|
||||
description:
|
||||
- The name of the security group to which the VM should be added
|
||||
required: false
|
||||
default: None
|
||||
nics:
|
||||
description:
|
||||
- A list of network id's to which the VM's interface should be attached
|
||||
required: false
|
||||
default: None
|
||||
auto_floating_ip:
|
||||
description:
|
||||
- Should a floating ip be auto created and assigned
|
||||
required: false
|
||||
default: 'no'
|
||||
version_added: "1.8"
|
||||
floating_ips:
|
||||
description:
|
||||
- list of valid floating IPs that pre-exist to assign to this node
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.8"
|
||||
floating_ip_pools:
|
||||
description:
|
||||
- list of floating IP pools from which to choose a floating IP
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.8"
|
||||
availability_zone:
|
||||
description:
|
||||
- Name of the availability zone
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.8"
|
||||
meta:
|
||||
description:
|
||||
- A list of key value pairs that should be provided as a metadata to the new VM
|
||||
required: false
|
||||
default: None
|
||||
wait:
|
||||
description:
|
||||
- If the module should wait for the VM to be created.
|
||||
required: false
|
||||
default: 'yes'
|
||||
wait_for:
|
||||
description:
|
||||
- The amount of time the module should wait for the VM to get into active state
|
||||
required: false
|
||||
default: 180
|
||||
config_drive:
|
||||
description:
|
||||
- Whether to boot the server with config drive enabled
|
||||
required: false
|
||||
default: 'no'
|
||||
version_added: "1.8"
|
||||
user_data:
|
||||
description:
|
||||
- Opaque blob of data which is made available to the instance
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.6"
|
||||
scheduler_hints:
|
||||
description:
|
||||
- Arbitrary key/value pairs to the scheduler for custom use
|
||||
required: false
|
||||
default: None
|
||||
version_added: "1.9"
|
||||
requirements:
|
||||
- "python >= 2.6"
|
||||
- "python-novaclient"
|
||||
'''
|
||||
|
||||
EXAMPLES = '''
|
||||
# Creates a new VM and attaches to a network and passes metadata to the instance
|
||||
- nova_compute:
|
||||
state: present
|
||||
login_username: admin
|
||||
login_password: admin
|
||||
login_tenant_name: admin
|
||||
name: vm1
|
||||
image_id: 4f905f38-e52a-43d2-b6ec-754a13ffb529
|
||||
key_name: ansible_key
|
||||
wait_for: 200
|
||||
flavor_id: 4
|
||||
nics:
|
||||
- net-id: 34605f38-e52a-25d2-b6ec-754a13ffb723
|
||||
meta:
|
||||
hostname: test1
|
||||
group: uge_master
|
||||
|
||||
# Creates a new VM in HP Cloud AE1 region availability zone az2 and automatically assigns a floating IP
|
||||
- name: launch a nova instance
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: launch an instance
|
||||
nova_compute:
|
||||
state: present
|
||||
login_username: username
|
||||
login_password: Equality7-2521
|
||||
login_tenant_name: username-project1
|
||||
name: vm1
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
region_name: region-b.geo-1
|
||||
availability_zone: az2
|
||||
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
|
||||
key_name: test
|
||||
wait_for: 200
|
||||
flavor_id: 101
|
||||
security_groups: default
|
||||
auto_floating_ip: yes
|
||||
|
||||
# Creates a new VM in HP Cloud AE1 region availability zone az2 and assigns a pre-known floating IP
|
||||
- name: launch a nova instance
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: launch an instance
|
||||
nova_compute:
|
||||
state: present
|
||||
login_username: username
|
||||
login_password: Equality7-2521
|
||||
login_tenant_name: username-project1
|
||||
name: vm1
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
region_name: region-b.geo-1
|
||||
availability_zone: az2
|
||||
image_id: 9302692b-b787-4b52-a3a6-daebb79cb498
|
||||
key_name: test
|
||||
wait_for: 200
|
||||
flavor_id: 101
|
||||
floating_ips:
|
||||
- 12.34.56.79
|
||||
|
||||
# Creates a new VM with 4G of RAM on Ubuntu Trusty, ignoring deprecated images
|
||||
- name: launch a nova instance
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: launch an instance
|
||||
nova_compute:
|
||||
name: vm1
|
||||
state: present
|
||||
login_username: username
|
||||
login_password: Equality7-2521
|
||||
login_tenant_name: username-project1
|
||||
auth_url: https://region-b.geo-1.identity.hpcloudsvc.com:35357/v2.0/
|
||||
region_name: region-b.geo-1
|
||||
image_name: Ubuntu Server 14.04
|
||||
image_exclude: deprecated
|
||||
flavor_ram: 4096
|
||||
|
||||
# Creates a new VM with 4G of RAM on Ubuntu Trusty on a Rackspace Performance node in DFW
|
||||
- name: launch a nova instance
|
||||
hosts: localhost
|
||||
tasks:
|
||||
- name: launch an instance
|
||||
nova_compute:
|
||||
name: vm1
|
||||
state: present
|
||||
login_username: username
|
||||
login_password: Equality7-2521
|
||||
login_tenant_name: username-project1
|
||||
auth_url: https://identity.api.rackspacecloud.com/v2.0/
|
||||
region_name: DFW
|
||||
image_name: Ubuntu 14.04 LTS (Trusty Tahr) (PVHVM)
|
||||
flavor_ram: 4096
|
||||
flavor_include: Performance
|
||||
'''
|
||||
|
||||
|
||||
|
||||
def _delete_server(module, nova):
|
||||
name = None
|
||||
server_list = None
|
||||
try:
|
||||
server_list = nova.servers.list(True, {'name': module.params['name']})
|
||||
if server_list:
|
||||
server = [x for x in server_list if x.name == module.params['name']]
|
||||
nova.servers.delete(server.pop())
|
||||
except Exception as e:
|
||||
module.fail_json( msg = "Error in deleting vm: %s" % e.message)
|
||||
if module.params['wait'] == 'no':
|
||||
module.exit_json(changed = True, result = "deleted")
|
||||
expire = time.time() + int(module.params['wait_for'])
|
||||
while time.time() < expire:
|
||||
name = nova.servers.list(True, {'name': module.params['name']})
|
||||
if not name:
|
||||
module.exit_json(changed = True, result = "deleted")
|
||||
time.sleep(5)
|
||||
module.fail_json(msg = "Timed out waiting for server to get deleted, please check manually")
|
||||
|
||||
|
||||
def _add_floating_ip_from_pool(module, nova, server):
|
||||
|
||||
# instantiate FloatingIPManager object
|
||||
floating_ip_obj = floating_ips.FloatingIPManager(nova)
|
||||
|
||||
# empty dict and list
|
||||
usable_floating_ips = {}
|
||||
pools = []
|
||||
|
||||
# user specified
|
||||
pools = module.params['floating_ip_pools']
|
||||
|
||||
# get the list of all floating IPs. Mileage may
|
||||
# vary according to Nova Compute configuration
|
||||
# per cloud provider
|
||||
all_floating_ips = floating_ip_obj.list()
|
||||
|
||||
# iterate through all pools of IP address. Empty
|
||||
# string means all and is the default value
|
||||
for pool in pools:
|
||||
# temporary list per pool
|
||||
pool_ips = []
|
||||
# loop through all floating IPs
|
||||
for f_ip in all_floating_ips:
|
||||
# if not reserved and the correct pool, add
|
||||
if f_ip.fixed_ip is None and (f_ip.pool == pool):
|
||||
pool_ips.append(f_ip.ip)
|
||||
# only need one
|
||||
break
|
||||
|
||||
# if the list is empty, add for this pool
|
||||
if not pool_ips:
|
||||
try:
|
||||
new_ip = nova.floating_ips.create(pool)
|
||||
except Exception as e:
|
||||
module.fail_json(msg = "Unable to create floating ip: %s" % (e.message))
|
||||
pool_ips.append(new_ip.ip)
|
||||
# Add to the main list
|
||||
usable_floating_ips[pool] = pool_ips
|
||||
|
||||
# finally, add ip(s) to instance for each pool
|
||||
for pool in usable_floating_ips:
|
||||
for ip in usable_floating_ips[pool]:
|
||||
try:
|
||||
server.add_floating_ip(ip)
|
||||
# We only need to assign one ip - but there is an inherent
|
||||
# race condition and some other cloud operation may have
|
||||
# stolen an available floating ip
|
||||
break
|
||||
except Exception as e:
|
||||
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
|
||||
|
||||
|
||||
def _add_floating_ip_list(module, server, ips):
|
||||
# add ip(s) to instance
|
||||
for ip in ips:
|
||||
try:
|
||||
server.add_floating_ip(ip)
|
||||
except Exception as e:
|
||||
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
|
||||
|
||||
|
||||
def _add_auto_floating_ip(module, nova, server):
|
||||
|
||||
try:
|
||||
new_ip = nova.floating_ips.create()
|
||||
except Exception as e:
|
||||
module.fail_json(msg = "Unable to create floating ip: %s" % (e))
|
||||
|
||||
try:
|
||||
server.add_floating_ip(new_ip)
|
||||
except Exception as e:
|
||||
# Clean up - we auto-created this ip, and it's not attached
|
||||
# to the server, so the cloud will not know what to do with it
|
||||
server.floating_ips.delete(new_ip)
|
||||
module.fail_json(msg = "Error attaching IP %s to instance %s: %s " % (ip, server.id, e.message))
|
||||
|
||||
|
||||
def _add_floating_ip(module, nova, server):
|
||||
|
||||
if module.params['floating_ip_pools']:
|
||||
_add_floating_ip_from_pool(module, nova, server)
|
||||
elif module.params['floating_ips']:
|
||||
_add_floating_ip_list(module, server, module.params['floating_ips'])
|
||||
elif module.params['auto_floating_ip']:
|
||||
_add_auto_floating_ip(module, nova, server)
|
||||
else:
|
||||
return server
|
||||
|
||||
# this may look redundant, but if there is now a
|
||||
# floating IP, then it needs to be obtained from
|
||||
# a recent server object if the above code path exec'd
|
||||
try:
|
||||
server = nova.servers.get(server.id)
|
||||
except Exception as e:
|
||||
module.fail_json(msg = "Error in getting info from instance: %s " % e.message)
|
||||
return server
|
||||
|
||||
|
||||
def _get_image_id(module, nova):
|
||||
if module.params['image_name']:
|
||||
for image in nova.images.list():
|
||||
if (module.params['image_name'] in image.name and (
|
||||
not module.params['image_exclude']
|
||||
or module.params['image_exclude'] not in image.name)):
|
||||
return image.id
|
||||
module.fail_json(msg = "Error finding image id from name(%s)" % module.params['image_name'])
|
||||
return module.params['image_id']
|
||||
|
||||
|
||||
def _get_flavor_id(module, nova):
|
||||
if module.params['flavor_ram']:
|
||||
for flavor in sorted(nova.flavors.list(), key=operator.attrgetter('ram')):
|
||||
if (flavor.ram >= module.params['flavor_ram'] and
|
||||
(not module.params['flavor_include'] or module.params['flavor_include'] in flavor.name)):
|
||||
return flavor.id
|
||||
module.fail_json(msg = "Error finding flavor with %sMB of RAM" % module.params['flavor_ram'])
|
||||
return module.params['flavor_id']
|
||||
|
||||
|
||||
def _create_server(module, nova):
|
||||
image_id = _get_image_id(module, nova)
|
||||
flavor_id = _get_flavor_id(module, nova)
|
||||
bootargs = [module.params['name'], image_id, flavor_id]
|
||||
bootkwargs = {
|
||||
'nics' : module.params['nics'],
|
||||
'meta' : module.params['meta'],
|
||||
'security_groups': module.params['security_groups'].split(','),
|
||||
#userdata is unhyphenated in novaclient, but hyphenated here for consistency with the ec2 module:
|
||||
'userdata': module.params['user_data'],
|
||||
'config_drive': module.params['config_drive'],
|
||||
}
|
||||
|
||||
for optional_param in ('region_name', 'key_name', 'availability_zone', 'scheduler_hints'):
|
||||
if module.params[optional_param]:
|
||||
bootkwargs[optional_param] = module.params[optional_param]
|
||||
try:
|
||||
server = nova.servers.create(*bootargs, **bootkwargs)
|
||||
server = nova.servers.get(server.id)
|
||||
except Exception as e:
|
||||
module.fail_json( msg = "Error in creating instance: %s " % e.message)
|
||||
if module.params['wait'] == 'yes':
|
||||
expire = time.time() + int(module.params['wait_for'])
|
||||
while time.time() < expire:
|
||||
try:
|
||||
server = nova.servers.get(server.id)
|
||||
except Exception as e:
|
||||
module.fail_json( msg = "Error in getting info from instance: %s" % e.message)
|
||||
if server.status == 'ACTIVE':
|
||||
server = _add_floating_ip(module, nova, server)
|
||||
|
||||
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
|
||||
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
|
||||
|
||||
# now exit with info
|
||||
module.exit_json(changed = True, id = server.id, private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
|
||||
|
||||
if server.status == 'ERROR':
|
||||
module.fail_json(msg = "Error in creating the server, please check logs")
|
||||
time.sleep(2)
|
||||
|
||||
module.fail_json(msg = "Timeout waiting for the server to come up.. Please check manually")
|
||||
if server.status == 'ERROR':
|
||||
module.fail_json(msg = "Error in creating the server.. Please check manually")
|
||||
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
|
||||
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
|
||||
|
||||
module.exit_json(changed = True, id = info['id'], private_ip=''.join(private), public_ip=''.join(public), status = server.status, info = server._info)
|
||||
|
||||
|
||||
def _delete_floating_ip_list(module, nova, server, extra_ips):
|
||||
for ip in extra_ips:
|
||||
nova.servers.remove_floating_ip(server=server.id, address=ip)
|
||||
|
||||
|
||||
def _check_floating_ips(module, nova, server):
|
||||
changed = False
|
||||
if module.params['floating_ip_pools'] or module.params['floating_ips'] or module.params['auto_floating_ip']:
|
||||
ips = openstack_find_nova_addresses(server.addresses, 'floating')
|
||||
if not ips:
|
||||
# If we're configured to have a floating but we don't have one,
|
||||
# let's add one
|
||||
server = _add_floating_ip(module, nova, server)
|
||||
changed = True
|
||||
elif module.params['floating_ips']:
|
||||
# we were configured to have specific ips, let's make sure we have
|
||||
# those
|
||||
missing_ips = []
|
||||
for ip in module.params['floating_ips']:
|
||||
if ip not in ips:
|
||||
missing_ips.append(ip)
|
||||
if missing_ips:
|
||||
server = _add_floating_ip_list(module, server, missing_ips)
|
||||
changed = True
|
||||
extra_ips = []
|
||||
for ip in ips:
|
||||
if ip not in module.params['floating_ips']:
|
||||
extra_ips.append(ip)
|
||||
if extra_ips:
|
||||
_delete_floating_ip_list(module, server, extra_ips)
|
||||
changed = True
|
||||
return (changed, server)
|
||||
|
||||
|
||||
def _get_server_state(module, nova):
|
||||
server = None
|
||||
try:
|
||||
servers = nova.servers.list(True, {'name': module.params['name']})
|
||||
if servers:
|
||||
# the {'name': module.params['name']} will also return servers
|
||||
# with names that partially match the server name, so we have to
|
||||
# strictly filter here
|
||||
servers = [x for x in servers if x.name == module.params['name']]
|
||||
if servers:
|
||||
server = servers[0]
|
||||
except Exception as e:
|
||||
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
|
||||
if server and module.params['state'] == 'present':
|
||||
if server.status != 'ACTIVE':
|
||||
module.fail_json( msg="The VM is available but not Active. state:" + server.status)
|
||||
(ip_changed, server) = _check_floating_ips(module, nova, server)
|
||||
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
|
||||
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
|
||||
module.exit_json(changed = ip_changed, id = server.id, public_ip = public, private_ip = private, info = server._info)
|
||||
if server and module.params['state'] == 'absent':
|
||||
return True
|
||||
if module.params['state'] == 'absent':
|
||||
module.exit_json(changed = False, result = "not present")
|
||||
return True
|
||||
|
||||
|
||||
|
||||
def main():
|
||||
argument_spec = openstack_argument_spec()
|
||||
argument_spec.update(dict(
|
||||
name = dict(required=True),
|
||||
image_id = dict(default=None),
|
||||
image_name = dict(default=None),
|
||||
image_exclude = dict(default='(deprecated)'),
|
||||
flavor_id = dict(default=1),
|
||||
flavor_ram = dict(default=None, type='int'),
|
||||
flavor_include = dict(default=None),
|
||||
key_name = dict(default=None),
|
||||
security_groups = dict(default='default'),
|
||||
nics = dict(default=None, type='list'),
|
||||
meta = dict(default=None, type='dict'),
|
||||
wait = dict(default='yes', choices=['yes', 'no']),
|
||||
wait_for = dict(default=180),
|
||||
state = dict(default='present', choices=['absent', 'present']),
|
||||
user_data = dict(default=None),
|
||||
config_drive = dict(default=False, type='bool'),
|
||||
auto_floating_ip = dict(default=False, type='bool'),
|
||||
floating_ips = dict(default=None, type='list'),
|
||||
floating_ip_pools = dict(default=None, type='list'),
|
||||
scheduler_hints = dict(default=None, type='dict'),
|
||||
))
|
||||
module = AnsibleModule(
|
||||
argument_spec=argument_spec,
|
||||
mutually_exclusive=[
|
||||
['auto_floating_ip','floating_ips'],
|
||||
['auto_floating_ip','floating_ip_pools'],
|
||||
['floating_ips','floating_ip_pools'],
|
||||
['image_id','image_name'],
|
||||
['flavor_id','flavor_ram'],
|
||||
],
|
||||
)
|
||||
|
||||
if not HAS_NOVACLIENT:
|
||||
module.fail_json(msg='python-novaclient is required for this module')
|
||||
|
||||
nova = nova_client.Client(module.params['login_username'],
|
||||
module.params['login_password'],
|
||||
module.params['login_tenant_name'],
|
||||
module.params['auth_url'],
|
||||
region_name=module.params['region_name'],
|
||||
service_type='compute')
|
||||
try:
|
||||
nova.authenticate()
|
||||
except exceptions.Unauthorized as e:
|
||||
module.fail_json(msg = "Invalid OpenStack Nova credentials.: %s" % e.message)
|
||||
except exceptions.AuthorizationFailure as e:
|
||||
module.fail_json(msg = "Unable to authorize user: %s" % e.message)
|
||||
|
||||
if module.params['state'] == 'present':
|
||||
if not module.params['image_id'] and not module.params['image_name']:
|
||||
module.fail_json( msg = "Parameter 'image_id' or `image_name` is required if state == 'present'")
|
||||
else:
|
||||
_get_server_state(module, nova)
|
||||
_create_server(module, nova)
|
||||
if module.params['state'] == 'absent':
|
||||
_get_server_state(module, nova)
|
||||
_delete_server(module, nova)
|
||||
|
||||
# this is magic, see lib/ansible/module_common.py
|
||||
from ansible.module_utils.basic import *
|
||||
from ansible.module_utils.openstack import *
|
||||
if __name__ == '__main__':
|
||||
main()
|
|
@ -1,9 +0,0 @@
|
|||
# TODO: is this still needed?
|
||||
# https://bugzilla.redhat.com/show_bug.cgi?id=1268192
|
||||
|
||||
- name: install ethtool
|
||||
package: state=present name=ethtool
|
||||
when: prepare_base_image is defined
|
||||
|
||||
- name: disable offloading
|
||||
command: ethtool -K eth0 tso off gro off gso off
|
|
@ -1,7 +0,0 @@
|
|||
- name: generate unique builder name
|
||||
local_action: shell date +"%Y%m%d_%H%M%S_%N"
|
||||
register: date
|
||||
|
||||
- name: set vm_name variable
|
||||
set_fact: vm_name="copr-{% if devel %}dev{% else %}prod{% endif %}-builder-{{ arch }}-{{ date.stdout }}"
|
||||
when: vm_name is not defined
|
|
@ -1,48 +0,0 @@
|
|||
---
|
||||
- debug: msg="vm_name={{ vm_name }}"
|
||||
|
||||
- name: random subnet to overcome datacenter failures
|
||||
set_fact: subnet_id={{ item }}
|
||||
with_random_choice: "{{ aws_arch_subnets[arch] }}"
|
||||
|
||||
- name: Launch instance
|
||||
ec2:
|
||||
key_name: "{{ keypair }}"
|
||||
group: "{{ security_group }}"
|
||||
instance_type: "{{ instance_type }}"
|
||||
image: "{{ image_name }}"
|
||||
wait: true
|
||||
region: "{{ aws_region }}"
|
||||
# both x86_64 and aarch64 arches can be allocated in us-east-1c
|
||||
vpc_subnet_id: "{{ subnet_id }}"
|
||||
assign_public_ip: yes
|
||||
instance_tags:
|
||||
FedoraGroup: copr
|
||||
CoprPurpose: builder
|
||||
CoprInstance: "{% if devel %}devel{% else %}production{% endif %}"
|
||||
Name: "{{ vm_name }}"
|
||||
arch: "{{ arch }}"
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
volumes: "{% if instance_volumes is defined %}{{ instance_volumes }}{% else %}[]{% endif %}"
|
||||
spot_price: "{{ spot_price if spot_price is defined else '' }}"
|
||||
instance_initiated_shutdown_behavior: terminate
|
||||
register: ec2
|
||||
|
||||
- name: Add new instance to host group
|
||||
add_host:
|
||||
hostname: "{{ item.public_ip }}"
|
||||
groupname: builder_temp_group
|
||||
loop: "{{ ec2.instances }}"
|
||||
|
||||
- set_fact: builder_ip={{ ec2.instances[0].public_ip }}
|
||||
|
||||
- debug: msg="VM_IP={{ builder_ip }}"
|
||||
|
||||
- name: wait for he host to be hot
|
||||
local_action: wait_for host={{ builder_ip }} port=22 delay=1 timeout={{ max_spawn_time }}
|
||||
|
||||
- name: wait until ssh is available
|
||||
local_action: shell false; until [ "$?" -eq "0" ]; do sleep 2; ssh -o PasswordAuthentication=no {{ spawning_vm_user|default('fedora') }}@{{ builder_ip }} 'echo foobar' 2>/dev/null; done
|
||||
async: 600
|
||||
poll: 2
|
|
@ -1,27 +0,0 @@
|
|||
- name: spin/ensure vm with nova_compute
|
||||
local_action:
|
||||
module: nova_compute
|
||||
auth_url: "{{OS_AUTH_URL}}"
|
||||
login_username: "{{OS_USERNAME}}"
|
||||
login_password: "{{OS_PASSWORD}}"
|
||||
login_tenant_name: "{{OS_TENANT_NAME}}"
|
||||
name: "{{ vm_name }}"
|
||||
# image_id: 86422ca2-6eeb-435c-87e8-402b3c7c3b7b
|
||||
image_id: "{{ image_name|image_name_to_id(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL) }}"
|
||||
wait_for: "{{ max_spawn_time }}"
|
||||
flavor_id: "{{ flavor_name|flavor_name_to_id(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL) }}"
|
||||
security_groups: "{{ security_groups }}" #,ssh-internal-persistent
|
||||
key_name: "{{ key_name }}"
|
||||
nics:
|
||||
- net-id: "{{ network_name|network_name_to_id(OS_USERNAME, OS_PASSWORD, OS_TENANT_NAME, OS_AUTH_URL) }}"
|
||||
register: nova
|
||||
|
||||
# - debug: msg="{{ nova.info }}"
|
||||
|
||||
|
||||
- set_fact: builder_ip="{{ nova.info|nova_result_to_builder_ip(network_name) }}"
|
||||
|
||||
- name: add builder ip to the special group
|
||||
local_action: add_host hostname={{ builder_ip }} groupname=builder_temp_group
|
||||
|
||||
|
|
@ -1,27 +0,0 @@
|
|||
- name: terminate instance
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: False
|
||||
|
||||
vars_files:
|
||||
- aws_cloud_vars.yml
|
||||
|
||||
tasks:
|
||||
- local_action:
|
||||
module: ec2
|
||||
instance_tags:
|
||||
Name: "{{ copr_task.vm_name }}"
|
||||
region: "{{ aws_region }}"
|
||||
state: running
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
ignore_errors: yes
|
||||
register: ec2_list_result
|
||||
|
||||
- local_action:
|
||||
module: ec2
|
||||
region: "{{ aws_region }}"
|
||||
state: absent
|
||||
instance_ids: "{{ ec2_list_result.instance_ids }}"
|
||||
aws_access_key: "{{ aws_access_key }}"
|
||||
aws_secret_key: "{{ aws_secret_key }}"
|
||||
when: not ec2_list_result.failed
|
|
@ -1,18 +0,0 @@
|
|||
- name: terminate instance
|
||||
hosts: 127.0.0.1
|
||||
gather_facts: False
|
||||
|
||||
vars_files:
|
||||
- vars.yml
|
||||
- nova_cloud_vars.yml
|
||||
|
||||
tasks:
|
||||
- name: terminate nova_compute
|
||||
local_action:
|
||||
module: nova_compute
|
||||
auth_url: "{{OS_AUTH_URL}}"
|
||||
login_username: "{{OS_USERNAME}}"
|
||||
login_password: "{{OS_PASSWORD}}"
|
||||
login_tenant_name: "{{OS_TENANT_NAME}}"
|
||||
name: "{{ copr_task.vm_name }}"
|
||||
state: absent
|
Loading…
Add table
Add a link
Reference in a new issue