missed some openshift bits for OSBS in the initial commit

This commit is contained in:
Adam Miller 2015-08-05 16:06:21 +00:00
parent f887d1ec08
commit 2778c0fe1c
32 changed files with 1406 additions and 0 deletions

View file

@ -0,0 +1,315 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom filters for use in openshift-ansible
'''
from ansible import errors
from operator import itemgetter
import pdb
import re
import json
class FilterModule(object):
''' Custom ansible filters '''
@staticmethod
def oo_pdb(arg):
''' This pops you into a pdb instance where arg is the data passed in
from the filter.
Ex: "{{ hostvars | oo_pdb }}"
'''
pdb.set_trace()
return arg
@staticmethod
def get_attr(data, attribute=None):
''' This looks up dictionary attributes of the form a.b.c and returns
the value.
Ex: data = {'a': {'b': {'c': 5}}}
attribute = "a.b.c"
returns 5
'''
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
ptr = data
for attr in attribute.split('.'):
ptr = ptr[attr]
return ptr
@staticmethod
def oo_flatten(data):
''' This filter plugin will flatten a list of lists
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to flatten a List")
return [item for sublist in data for item in sublist]
@staticmethod
def oo_collect(data, attribute=None, filters=None):
''' This takes a list of dict and collects all attributes specified into a
list. If filter is specified then we will include all items that
match _ALL_ of filters. If a dict entry is missing the key in a
filter it will be excluded from the match.
Ex: data = [ {'a':1, 'b':5, 'z': 'z'}, # True, return
{'a':2, 'z': 'z'}, # True, return
{'a':3, 'z': 'z'}, # True, return
{'a':4, 'z': 'b'}, # FAILED, obj['z'] != obj['z']
]
attribute = 'a'
filters = {'z': 'z'}
returns [1, 2, 3]
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to filter on a List")
if not attribute:
raise errors.AnsibleFilterError("|failed expects attribute to be set")
if filters is not None:
if not issubclass(type(filters), dict):
raise errors.AnsibleFilterError("|fialed expects filter to be a"
" dict")
retval = [FilterModule.get_attr(d, attribute) for d in data if (
all([d.get(key, None) == filters[key] for key in filters]))]
else:
retval = [FilterModule.get_attr(d, attribute) for d in data]
return retval
@staticmethod
def oo_select_keys(data, keys):
''' This returns a list, which contains the value portions for the keys
Ex: data = { 'a':1, 'b':2, 'c':3 }
keys = ['a', 'c']
returns [1, 3]
'''
if not issubclass(type(data), dict):
raise errors.AnsibleFilterError("|failed expects to filter on a dict")
if not issubclass(type(keys), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
# Gather up the values for the list of keys passed in
retval = [data[key] for key in keys]
return retval
@staticmethod
def oo_prepend_strings_in_list(data, prepend):
''' This takes a list of strings and prepends a string to each item in the
list
Ex: data = ['cart', 'tree']
prepend = 'apple-'
returns ['apple-cart', 'apple-tree']
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not all(isinstance(x, basestring) for x in data):
raise errors.AnsibleFilterError("|failed expects first param is a list"
" of strings")
retval = [prepend + s for s in data]
return retval
@staticmethod
def oo_combine_key_value(data, joiner='='):
'''Take a list of dict in the form of { 'key': 'value'} and
arrange them as a list of strings ['key=value']
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
rval = []
for item in data:
rval.append("%s%s%s" % (item['key'], joiner, item['value']))
return rval
@staticmethod
def oo_ami_selector(data, image_name):
''' This takes a list of amis and an image name and attempts to return
the latest ami.
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects first param is a list")
if not data:
return None
else:
if image_name is None or not image_name.endswith('_*'):
ami = sorted(data, key=itemgetter('name'), reverse=True)[0]
return ami['ami_id']
else:
ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data]
ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0]
return ami['ami_id']
@staticmethod
def oo_ec2_volume_definition(data, host_type, docker_ephemeral=False):
''' This takes a dictionary of volume definitions and returns a valid ec2
volume definition based on the host_type and the values in the
dictionary.
The dictionary should look similar to this:
{ 'master':
{ 'root':
{ 'volume_size': 10, 'device_type': 'gp2',
'iops': 500
}
},
'node':
{ 'root':
{ 'volume_size': 10, 'device_type': 'io1',
'iops': 1000
},
'docker':
{ 'volume_size': 40, 'device_type': 'gp2',
'iops': 500, 'ephemeral': 'true'
}
}
}
'''
if not issubclass(type(data), dict):
raise errors.AnsibleFilterError("|failed expects first param is a dict")
if host_type not in ['master', 'node', 'etcd']:
raise errors.AnsibleFilterError("|failed expects etcd, master or node"
" as the host type")
root_vol = data[host_type]['root']
root_vol['device_name'] = '/dev/sda1'
root_vol['delete_on_termination'] = True
if root_vol['device_type'] != 'io1':
root_vol.pop('iops', None)
if host_type == 'node':
docker_vol = data[host_type]['docker']
docker_vol['device_name'] = '/dev/xvdb'
docker_vol['delete_on_termination'] = True
if docker_vol['device_type'] != 'io1':
docker_vol.pop('iops', None)
if docker_ephemeral:
docker_vol.pop('device_type', None)
docker_vol.pop('delete_on_termination', None)
docker_vol['ephemeral'] = 'ephemeral0'
return [root_vol, docker_vol]
elif host_type == 'etcd':
etcd_vol = data[host_type]['etcd']
etcd_vol['device_name'] = '/dev/xvdb'
etcd_vol['delete_on_termination'] = True
if etcd_vol['device_type'] != 'io1':
etcd_vol.pop('iops', None)
return [root_vol, etcd_vol]
return [root_vol]
@staticmethod
def oo_split(string, separator=','):
''' This splits the input string into a list
'''
return string.split(separator)
@staticmethod
def oo_filter_list(data, filter_attr=None):
''' This returns a list, which contains all items where filter_attr
evaluates to true
Ex: data = [ { a: 1, b: True },
{ a: 3, b: False },
{ a: 5, b: True } ]
filter_attr = 'b'
returns [ { a: 1, b: True },
{ a: 5, b: True } ]
'''
if not issubclass(type(data), list):
raise errors.AnsibleFilterError("|failed expects to filter on a list")
if not issubclass(type(filter_attr), str):
raise errors.AnsibleFilterError("|failed expects filter_attr is a str")
# Gather up the values for the list of keys passed in
return [x for x in data if x[filter_attr]]
@staticmethod
def oo_parse_heat_stack_outputs(data):
''' Formats the HEAT stack output into a usable form
The goal is to transform something like this:
+---------------+-------------------------------------------------+
| Property | Value |
+---------------+-------------------------------------------------+
| capabilities | [] | |
| creation_time | 2015-06-26T12:26:26Z | |
| description | OpenShift cluster | |
| | |
| outputs | [ |
| | { |
| | "output_value": "value_A" |
| | "description": "This is the value of Key_A" |
| | "output_key": "Key_A" |
| | }, |
| | { |
| | "output_value": [ |
| | "value_B1", |
| | "value_B2" |
| | ], |
| | "description": "This is the value of Key_B" |
| | "output_key": "Key_B" |
| | }, |
| | ] |
| parameters | { |
| | |
+---------------+-------------------------------------------------+
into something like this:
{
"Key_A": "value_A",
"Key_B": [
"value_B1",
"value_B2"
]
}
'''
# Extract the “outputs” JSON snippet from the pretty-printed array
in_outputs = False
outputs = ''
line_regex = re.compile(r'\|\s*(.*?)\s*\|\s*(.*?)\s*\|')
for line in data['stdout_lines']:
match = line_regex.match(line)
if match:
if match.group(1) == 'outputs':
in_outputs = True
elif match.group(1) != '':
in_outputs = False
if in_outputs:
outputs += match.group(2)
outputs = json.loads(outputs)
# Revamp the “outputs” to put it in the form of a “Key: value” map
revamped_outputs = {}
for output in outputs:
revamped_outputs[output['output_key']] = output['output_value']
return revamped_outputs
def filters(self):
''' returns a mapping of filters to methods '''
return {
"oo_select_keys": self.oo_select_keys,
"oo_collect": self.oo_collect,
"oo_flatten": self.oo_flatten,
"oo_pdb": self.oo_pdb,
"oo_prepend_strings_in_list": self.oo_prepend_strings_in_list,
"oo_ami_selector": self.oo_ami_selector,
"oo_ec2_volume_definition": self.oo_ec2_volume_definition,
"oo_combine_key_value": self.oo_combine_key_value,
"oo_split": self.oo_split,
"oo_filter_list": self.oo_filter_list,
"oo_parse_heat_stack_outputs": self.oo_parse_heat_stack_outputs
}

View file

@ -0,0 +1,79 @@
#!/usr/bin/python
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
Custom zabbix filters for use in openshift-ansible
'''
import pdb
class FilterModule(object):
''' Custom zabbix ansible filters '''
@staticmethod
def create_data(data, results, key, new_key):
'''Take a dict, filter through results and add results['key'] to dict
'''
new_list = [app[key] for app in results]
data[new_key] = new_list
return data
@staticmethod
def oo_set_zbx_trigger_triggerid(item, trigger_results):
'''Set zabbix trigger id from trigger results
'''
if isinstance(trigger_results, list):
item['triggerid'] = trigger_results[0]['triggerid']
return item
item['triggerid'] = trigger_results['triggerids'][0]
return item
@staticmethod
def oo_set_zbx_item_hostid(item, template_results):
''' Set zabbix host id from template results
'''
if isinstance(template_results, list):
item['hostid'] = template_results[0]['templateid']
return item
item['hostid'] = template_results['templateids'][0]
return item
@staticmethod
def oo_pdb(arg):
''' This pops you into a pdb instance where arg is the data passed in
from the filter.
Ex: "{{ hostvars | oo_pdb }}"
'''
pdb.set_trace()
return arg
@staticmethod
def select_by_name(ans_data, data):
''' test
'''
for zabbix_item in data:
if ans_data['name'] == zabbix_item:
data[zabbix_item]['params']['hostid'] = ans_data['templateid']
return data[zabbix_item]['params']
return None
@staticmethod
def oo_build_zabbix_list_dict(values, string):
''' Build a list of dicts with string as key for each value
'''
rval = []
for value in values:
rval.append({string: value})
return rval
def filters(self):
''' returns a mapping of filters to methods '''
return {
"select_by_name": self.select_by_name,
"oo_set_zbx_item_hostid": self.oo_set_zbx_item_hostid,
"oo_set_zbx_trigger_triggerid": self.oo_set_zbx_trigger_triggerid,
"oo_build_zabbix_list_dict": self.oo_build_zabbix_list_dict,
"create_data": self.create_data,
}

View file

@ -0,0 +1,3 @@
---
ansible_ssh_user=root
deployment_type=origin

View file

@ -314,6 +314,19 @@ ppc-hub.qa.fedoraproject.org
[koji-stg]
koji01.stg.phx2.fedoraproject.org
# Create an OSEv3 group that contains the masters and nodes groups
[OSv3:children]
openshift_masters
openshift_nodes
# host group for OpenShift v3 masters
[openshift_masters]
osbs01.stg.phx2.fedoraproject.org
# host group for OpenShift v3 nodes
[openshift_nodes]
osbs01.stg.phx2.fedoraproject.org
[osbs-stg]
osbs01.stg.phx2.fedoraproject.org

View file

@ -0,0 +1,73 @@
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
# vim: expandtab:tabstop=4:shiftwidth=4
'''
oo_option lookup plugin for openshift-ansible
Usage:
- debug:
msg: "{{ lookup('oo_option', '<key>') | default('<default_value>', True) }}"
This returns, by order of priority:
* if it exists, the `cli_<key>` ansible variable. This variable is set by `bin/cluster --option <key>=<value> `
* if it exists, the envirnoment variable named `<key>`
* if none of the above conditions are met, empty string is returned
'''
from ansible.utils import template
import os
# Reason: disable too-few-public-methods because the `run` method is the only
# one required by the Ansible API
# Status: permanently disabled
# pylint: disable=too-few-public-methods
class LookupModule(object):
''' oo_option lookup plugin main class '''
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def __init__(self, basedir=None, **kwargs):
''' Constructor '''
self.basedir = basedir
# Reason: disable unused-argument because Ansible is calling us with many
# parameters we are not interested in.
# The lookup plugins of Ansible have this kwargs “catch-all” parameter
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
def run(self, terms, inject=None, **kwargs):
''' Main execution path '''
try:
terms = template.template(self.basedir, terms, inject)
# Reason: disable broad-except to really ignore any potential exception
# This is inspired by the upstream "env" lookup plugin:
# https://github.com/ansible/ansible/blob/devel/v1/ansible/runner/lookup_plugins/env.py#L29
# pylint: disable=broad-except
except Exception:
pass
if isinstance(terms, basestring):
terms = [terms]
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = 'cli_' + option_name
if inject and cli_key in inject:
ret.append(inject[cli_key])
elif option_name in os.environ:
ret.append(os.environ[option_name])
else:
ret.append('')
return ret

215
lookup_plugins/sequence.py Normal file
View file

@ -0,0 +1,215 @@
# (c) 2013, Jayson Vantuyl <jayson@aggressive.ly>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.errors import AnsibleError
import ansible.utils as utils
from re import compile as re_compile, IGNORECASE
# shortcut format
NUM = "(0?x?[0-9a-f]+)"
SHORTCUT = re_compile(
"^(" + # Group 0
NUM + # Group 1: Start
"-)?" +
NUM + # Group 2: End
"(/" + # Group 3
NUM + # Group 4: Stride
")?" +
"(:(.+))?$", # Group 5, Group 6: Format String
IGNORECASE
)
class LookupModule(object):
"""
sequence lookup module
Used to generate some sequence of items. Takes arguments in two forms.
The simple / shortcut form is:
[start-]end[/stride][:format]
As indicated by the brackets: start, stride, and format string are all
optional. The format string is in the style of printf. This can be used
to pad with zeros, format in hexadecimal, etc. All of the numerical values
can be specified in octal (i.e. 0664) or hexadecimal (i.e. 0x3f8).
Negative numbers are not supported.
Some examples:
5 -> ["1","2","3","4","5"]
5-8 -> ["5", "6", "7", "8"]
2-10/2 -> ["2", "4", "6", "8", "10"]
4:host%02d -> ["host01","host02","host03","host04"]
The standard Ansible key-value form is accepted as well. For example:
start=5 end=11 stride=2 format=0x%02x -> ["0x05","0x07","0x09","0x0a"]
This format takes an alternate form of "end" called "count", which counts
some number from the starting value. For example:
count=5 -> ["1", "2", "3", "4", "5"]
start=0x0f00 count=4 format=%04x -> ["0f00", "0f01", "0f02", "0f03"]
start=0 count=5 stride=2 -> ["0", "2", "4", "6", "8"]
start=1 count=5 stride=2 -> ["1", "3", "5", "7", "9"]
The count option is mostly useful for avoiding off-by-one errors and errors
calculating the number of entries in a sequence when a stride is specified.
"""
def __init__(self, basedir, **kwargs):
"""absorb any keyword args"""
self.basedir = basedir
def reset(self):
"""set sensible defaults"""
self.start = 1
self.count = None
self.end = None
self.stride = 1
self.format = "%d"
def parse_kv_args(self, args):
"""parse key-value style arguments"""
for arg in ["start", "end", "count", "stride"]:
try:
arg_raw = args.pop(arg, None)
if arg_raw is None:
continue
arg_cooked = int(arg_raw, 0)
setattr(self, arg, arg_cooked)
except ValueError:
raise AnsibleError(
"can't parse arg %s=%r as integer"
% (arg, arg_raw)
)
if 'format' in args:
self.format = args.pop("format")
if args:
raise AnsibleError(
"unrecognized arguments to with_sequence: %r"
% args.keys()
)
def parse_simple_args(self, term):
"""parse the shortcut forms, return True/False"""
match = SHORTCUT.match(term)
if not match:
return False
_, start, end, _, stride, _, format = match.groups()
if start is not None:
try:
start = int(start, 0)
except ValueError:
raise AnsibleError("can't parse start=%s as integer" % start)
if end is not None:
try:
end = int(end, 0)
except ValueError:
raise AnsibleError("can't parse end=%s as integer" % end)
if stride is not None:
try:
stride = int(stride, 0)
except ValueError:
raise AnsibleError("can't parse stride=%s as integer" % stride)
if start is not None:
self.start = start
if end is not None:
self.end = end
if stride is not None:
self.stride = stride
if format is not None:
self.format = format
def sanity_check(self):
if self.count is None and self.end is None:
raise AnsibleError(
"must specify count or end in with_sequence"
)
elif self.count is not None and self.end is not None:
raise AnsibleError(
"can't specify both count and end in with_sequence"
)
elif self.count is not None:
# convert count to end
if self.count != 0:
self.end = self.start + self.count * self.stride - 1
else:
self.start = 0
self.end = 0
self.stride = 0
del self.count
if self.stride > 0 and self.end < self.start:
raise AnsibleError("to count backwards make stride negative")
if self.stride < 0 and self.end > self.start:
raise AnsibleError("to count forward don't make stride negative")
if self.format.count('%') != 1:
raise AnsibleError("bad formatting string: %s" % self.format)
def generate_sequence(self):
if self.stride > 0:
adjust = 1
else:
adjust = -1
numbers = xrange(self.start, self.end + adjust, self.stride)
for i in numbers:
try:
formatted = self.format % i
yield formatted
except (ValueError, TypeError):
raise AnsibleError(
"problem formatting %r with %r" % self.format
)
def run(self, terms, inject=None, **kwargs):
results = []
terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject)
if isinstance(terms, basestring):
terms = [ terms ]
for term in terms:
try:
self.reset() # clear out things for this iteration
try:
if not self.parse_simple_args(term):
self.parse_kv_args(utils.parse_kv(term))
except Exception:
raise AnsibleError(
"unknown error parsing with_sequence arguments: %r"
% term
)
self.sanity_check()
if self.stride != 0:
results.extend(self.generate_sequence())
except AnsibleError:
raise
except Exception, e:
raise AnsibleError(
"unknown error generating sequence: %s" % str(e)
)
return results

View file

@ -43,3 +43,19 @@
handlers:
- include: "{{ handlers }}/restart_services.yml"
- name: install openshift
hosts: osbs-stg
user: root
gather_facts: True
vars:
g_etcd_group: "{{ 'etcd' }}"
g_masters_group: "{{ 'openshift_masters' }}"
g_nodes_group: "{{ 'openshift_nodes' }}"
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: 4
openshift_deployment_type: "{{ deployment_type }}"
tasks:
- include: ../openshift_common/openshift-cluster/config.yml

View file

@ -0,0 +1,20 @@
This file contains playbooks imported from the upstream OpenShift Ansible
github repository[0].
In order to re-import/update these scripts,
# This can really be anywhere, just outside this git tree
$ cd /tmp/
$ git clone https://github.com/openshift/openshift-ansible.git
# Assuming your local copy of this git repo lives in ~/src/fedora-ansible/
$ cp -r \
openshift-ansible/playbooks/common/* \
~/src/fedora-ansible/playbooks/openshift_common/
There are relative symlinks involved and at the time of this writing, the
directory structure of this git repository matches where appropriate with the
upstream repository making this mostly a clean import.
[0] - https://github.com/openshift/openshift-ansible.git

View file

@ -0,0 +1,74 @@
---
- name: Populate config host groups
hosts: localhost
gather_facts: no
tasks:
- fail:
msg: This playbook rquires g_etcd_group to be set
when: g_etcd_group is not defined
- fail:
msg: This playbook rquires g_masters_group to be set
when: g_masters_group is not defined
- fail:
msg: This playbook rquires g_nodes_group to be set
when: g_nodes_group is not defined
- name: Evaluate oo_etcd_to_config
add_host:
name: "{{ item }}"
groups: oo_etcd_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_etcd_group] | default([])
- name: Evaluate oo_masters_to_config
add_host:
name: "{{ item }}"
groups: oo_masters_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_masters_group] | default([])
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_nodes_group] | default([])
- name: Evaluate oo_nodes_to_config
add_host:
name: "{{ item }}"
groups: oo_nodes_to_config
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
with_items: groups[g_masters_group] | default([])
when: g_nodeonmaster is defined and g_nodeonmaster == true
- name: Evaluate oo_first_etcd
add_host:
name: "{{ groups[g_etcd_group][0] }}"
groups: oo_first_etcd
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
when: g_etcd_group in groups and (groups[g_etcd_group] | length) > 0
- name: Evaluate oo_first_master
add_host:
name: "{{ groups[g_masters_group][0] }}"
groups: oo_first_master
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_sudo: "{{ g_sudo | default(omit) }}"
when: g_masters_group in groups and (groups[g_masters_group] | length) > 0
- include: ../openshift-etcd/config.yml
- include: ../openshift-master/config.yml
- include: ../openshift-node/config.yml
vars:
osn_cluster_dns_domain: "{{ hostvars[groups.oo_first_master.0].openshift.dns.domain }}"
osn_cluster_dns_ip: "{{ hostvars[groups.oo_first_master.0].openshift.dns.ip }}"

View file

@ -0,0 +1,8 @@
---
- name: Deploy OpenShift Services
hosts: "{{ g_svc_master }}"
connection: ssh
gather_facts: yes
roles:
- openshift_registry
- openshift_router

View file

@ -0,0 +1 @@
../../../filter_plugins

View file

@ -0,0 +1 @@
../../../lookup_plugins

View file

@ -0,0 +1 @@
../../../roles

View file

@ -0,0 +1,13 @@
---
- set_fact: k8s_type="etcd"
- name: Generate etcd instance names(s)
set_fact:
scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: etcd_names_output
with_sequence: count={{ num_etcd }}
- set_fact:
etcd_names: "{{ etcd_names_output.results | default([])
| oo_collect('ansible_facts')
| oo_collect('scratch_name') }}"

View file

@ -0,0 +1,13 @@
---
- set_fact: k8s_type="master"
- name: Generate master instance names(s)
set_fact:
scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ '%05x' | format(1048576 | random) }}"
register: master_names_output
with_sequence: count={{ num_masters }}
- set_fact:
master_names: "{{ master_names_output.results | default([])
| oo_collect('ansible_facts')
| oo_collect('scratch_name') }}"

View file

@ -0,0 +1,15 @@
---
- set_fact: k8s_type=node
- set_fact: sub_host_type="{{ type }}"
- set_fact: number_nodes="{{ count }}"
- name: Generate node instance names(s)
set_fact:
scratch_name: "{{ cluster_id }}-{{ k8s_type }}-{{ sub_host_type }}-{{ '%05x' | format(1048576 | random) }}"
register: node_names_output
with_sequence: count={{ number_nodes }}
- set_fact:
node_names: "{{ node_names_output.results | default([])
| oo_collect('ansible_facts')
| oo_collect('scratch_name') }}"

View file

@ -0,0 +1,12 @@
---
- hosts: oo_hosts_to_update
vars:
openshift_deployment_type: "{{ deployment_type }}"
roles:
- role: rhel_subscribe
when: deployment_type == "enterprise" and
ansible_distribution == "RedHat" and
lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) |
default('no', True) | lower in ['no', 'false']
- openshift_repos
- os_update_latest

View file

@ -0,0 +1,96 @@
---
- name: Set etcd facts needed for generating certs
hosts: oo_etcd_to_config
roles:
- openshift_facts
tasks:
- openshift_facts:
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- name: Check status of etcd certificates
stat:
path: "{{ item }}"
with_items:
- /etc/etcd/server.crt
- /etc/etcd/peer.crt
- /etc/etcd/ca.crt
register: g_etcd_server_cert_stat_result
- set_fact:
etcd_server_certs_missing: "{{ g_etcd_server_cert_stat_result.results | map(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: etcd-{{ openshift.common.hostname }}
etcd_cert_config_dir: /etc/etcd
etcd_cert_prefix:
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
sudo: false
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
register: g_etcd_mktemp
changed_when: False
- name: Configure etcd certificates
hosts: oo_first_etcd
vars:
etcd_generated_certs_dir: /etc/etcd/generated_certs
etcd_needing_server_certs: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config'])
| oo_filter_list(filter_attr='etcd_server_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
roles:
- etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
with_items: etcd_needing_server_certs
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
dest: "{{ sync_tmpdir }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
with_items: etcd_needing_server_certs
- name: Configure etcd hosts
hosts: oo_etcd_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.g_etcd_mktemp.stdout }}"
etcd_url_scheme: https
etcd_peer_url_scheme: https
etcd_peers_group: oo_etcd_to_config
pre_tasks:
- name: Ensure certificate directory exists
file:
path: "{{ etcd_cert_config_dir }}"
state: directory
- name: Unarchive the tarball on the etcd host
unarchive:
src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
dest: "{{ etcd_cert_config_dir }}"
when: etcd_server_certs_missing
roles:
- etcd
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
sudo: false
gather_facts: no
tasks:
- file: name={{ g_etcd_mktemp.stdout }} state=absent
changed_when: False

View file

@ -0,0 +1 @@
../../../filter_plugins

View file

@ -0,0 +1 @@
../../../lookup_plugins

View file

@ -0,0 +1 @@
../../../roles/

View file

@ -0,0 +1,18 @@
---
- name: Populate g_service_masters host group if needed
hosts: localhost
gather_facts: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
when: new_cluster_state is not defined
- name: Evaluate g_service_etcd
add_host: name={{ item }} groups=g_service_etcd
with_items: oo_host_group_exp | default([])
- name: Change etcd state on etcd instance(s)
hosts: g_service_etcd
connection: ssh
gather_facts: no
tasks:
- service: name=etcd state="{{ new_cluster_state }}"

View file

@ -0,0 +1,233 @@
---
- name: Set master facts and determine if external etcd certs need to be generated
hosts: oo_masters_to_config
pre_tasks:
- set_fact:
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
openshift_master_etcd_hosts: "{{ hostvars
| oo_select_keys(groups['oo_etcd_to_config']
| default([]))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
roles:
- openshift_facts
post_tasks:
- openshift_facts:
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- role: master
local_facts:
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
public_api_url: "{{ openshift_master_public_api_url | default(None) }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
cluster_defer_ha: "{{ openshift_master_cluster_defer_ha | default(None) }}"
console_path: "{{ openshift_master_console_path | default(None) }}"
console_port: "{{ openshift_master_console_port | default(None) }}"
console_url: "{{ openshift_master_console_url | default(None) }}"
console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"
public_console_url: "{{ openshift_master_public_console_url | default(None) }}"
- name: Check status of external etcd certificatees
stat:
path: "/etc/openshift/master/{{ item }}"
with_items:
- master.etcd-client.crt
- master.etcd-ca.crt
register: g_external_etcd_cert_stat_result
- set_fact:
etcd_client_certs_missing: "{{ g_external_etcd_cert_stat_result.results
| map(attribute='stat.exists')
| list | intersect([false])}}"
etcd_cert_subdir: openshift-master-{{ openshift.common.hostname }}
etcd_cert_config_dir: /etc/openshift/master
etcd_cert_prefix: master.etcd-
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
sudo: false
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
register: g_master_mktemp
changed_when: False
- name: Configure etcd certificates
hosts: oo_first_etcd
vars:
etcd_generated_certs_dir: /etc/etcd/generated_certs
etcd_needing_client_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'])
| oo_filter_list(filter_attr='etcd_client_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- etcd_certificates
post_tasks:
- name: Create a tarball of the etcd certs
command: >
tar -czvf {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz
-C {{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }} .
args:
creates: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
with_items: etcd_needing_client_certs
- name: Retrieve the etcd cert tarballs
fetch:
src: "{{ etcd_generated_certs_dir }}/{{ item.etcd_cert_subdir }}.tgz"
dest: "{{ sync_tmpdir }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
with_items: etcd_needing_client_certs
- name: Copy the external etcd certs to the masters
hosts: oo_masters_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
tasks:
- name: Ensure certificate directory exists
file:
path: /etc/openshift/master
state: directory
when: etcd_client_certs_missing is defined and etcd_client_certs_missing
- name: Unarchive the tarball on the master
unarchive:
src: "{{ sync_tmpdir }}/{{ etcd_cert_subdir }}.tgz"
dest: "{{ etcd_cert_config_dir }}"
when: etcd_client_certs_missing is defined and etcd_client_certs_missing
- file:
path: "{{ etcd_cert_config_dir }}/{{ item }}"
owner: root
group: root
mode: 0600
with_items:
- master.etcd-client.crt
- master.etcd-client.key
- master.etcd-ca.crt
when: etcd_client_certs_missing is defined and etcd_client_certs_missing
- name: Determine if master certificates need to be generated
hosts: oo_masters_to_config
tasks:
- set_fact:
openshift_master_certs_no_etcd:
- admin.crt
- master.kubelet-client.crt
- master.server.crt
- openshift-master.crt
- openshift-registry.crt
- openshift-router.crt
- etcd.server.crt
openshift_master_certs_etcd:
- master.etcd-client.crt
- set_fact:
openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
- name: Check status of master certificates
stat:
path: "/etc/openshift/master/{{ item }}"
with_items: openshift_master_certs
register: g_master_cert_stat_result
- set_fact:
master_certs_missing: "{{ g_master_cert_stat_result.results
| map(attribute='stat.exists')
| list | intersect([false])}}"
master_cert_subdir: master-{{ openshift.common.hostname }}
master_cert_config_dir: /etc/openshift/master
- name: Configure master certificates
hosts: oo_first_master
vars:
master_generated_certs_dir: /etc/openshift/generated-configs
masters_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
| oo_filter_list(filter_attr='master_certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
roles:
- openshift_master_certificates
post_tasks:
- name: Remove generated etcd client certs when using external etcd
file:
path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
state: absent
when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
with_nested:
- masters_needing_certs
- - master.etcd-client.crt
- master.etcd-client.key
- name: Create a tarball of the master certs
command: >
tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
-C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
args:
creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
with_items: masters_needing_certs
- name: Retrieve the master cert tarball from the master
fetch:
src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
dest: "{{ sync_tmpdir }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
with_items: masters_needing_certs
- name: Configure master instances
hosts: oo_masters_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
path: /etc/openshift/master
state: directory
when: master_certs_missing and 'oo_first_master' not in group_names
- name: Unarchive the tarball on the master
unarchive:
src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
dest: "{{ master_cert_config_dir }}"
when: master_certs_missing and 'oo_first_master' not in group_names
roles:
- openshift_master
- role: fluentd_master
when: openshift.common.use_fluentd | bool
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
- name: Additional master configuration
hosts: oo_first_master
vars:
openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"
omc_cluster_hosts: "{{ groups.oo_masters_to_config | join(' ')}}"
roles:
- role: openshift_master_cluster
when: openshift_master_ha | bool
- openshift_examples
# Additional instance config for online deployments
- name: Additional instance config
hosts: oo_masters_deployment_type_online
roles:
- pods
- os_env_extras
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
sudo: false
gather_facts: no
tasks:
- file: name={{ g_master_mktemp.stdout }} state=absent
changed_when: False

View file

@ -0,0 +1 @@
../../../filter_plugins

View file

@ -0,0 +1 @@
../../../lookup_plugins

View file

@ -0,0 +1 @@
../../../roles/

View file

@ -0,0 +1,18 @@
---
- name: Populate g_service_masters host group if needed
hosts: localhost
gather_facts: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
when: new_cluster_state is not defined
- name: Evaluate g_service_masters
add_host: name={{ item }} groups=g_service_masters
with_items: oo_host_group_exp | default([])
- name: Change openshift-master state on master instance(s)
hosts: g_service_masters
connection: ssh
gather_facts: no
tasks:
- service: name=openshift-master state="{{ new_cluster_state }}"

View file

@ -0,0 +1,142 @@
---
- name: Gather and set facts for node hosts
hosts: oo_nodes_to_config
roles:
- openshift_facts
tasks:
# Since the master is generating the node certificates before they are
# configured, we need to make sure to set the node properties beforehand if
# we do not want the defaults
- openshift_facts:
role: "{{ item.role }}"
local_facts: "{{ item.local_facts }}"
with_items:
- role: common
local_facts:
hostname: "{{ openshift_hostname | default(None) }}"
public_hostname: "{{ openshift_public_hostname | default(None) }}"
deployment_type: "{{ openshift_deployment_type }}"
- role: node
local_facts:
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
- name: Check status of node certificates
stat:
path: "/etc/openshift/node/{{ item }}"
with_items:
- "system:node:{{ openshift.common.hostname }}.crt"
- "system:node:{{ openshift.common.hostname }}.key"
- "system:node:{{ openshift.common.hostname }}.kubeconfig"
- ca.crt
- server.key
- server.crt
register: stat_result
- set_fact:
certs_missing: "{{ stat_result.results | map(attribute='stat.exists')
| list | intersect([false])}}"
node_subdir: node-{{ openshift.common.hostname }}
config_dir: /etc/openshift/generated-configs/node-{{ openshift.common.hostname }}
node_cert_dir: /etc/openshift/node
- name: Create temp directory for syncing certs
hosts: localhost
connection: local
sudo: false
gather_facts: no
tasks:
- name: Create local temp directory for syncing certs
local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
register: mktemp
changed_when: False
- name: Create node certificates
hosts: oo_first_master
vars:
nodes_needing_certs: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config']
| default([]))
| oo_filter_list(filter_attr='certs_missing') }}"
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
roles:
- openshift_node_certificates
post_tasks:
- name: Create a tarball of the node config directories
command: >
tar -czvf {{ item.config_dir }}.tgz
--transform 's|system:{{ item.node_subdir }}|node|'
-C {{ item.config_dir }} .
args:
creates: "{{ item.config_dir }}.tgz"
with_items: nodes_needing_certs
- name: Retrieve the node config tarballs from the master
fetch:
src: "{{ item.config_dir }}.tgz"
dest: "{{ sync_tmpdir }}/"
flat: yes
fail_on_missing: yes
validate_checksum: yes
with_items: nodes_needing_certs
- name: Configure node instances
hosts: oo_nodes_to_config
vars:
sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
openshift_node_master_api_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"
pre_tasks:
- name: Ensure certificate directory exists
file:
path: "{{ node_cert_dir }}"
state: directory
# TODO: notify restart openshift-node
# possibly test service started time against certificate/config file
# timestamps in openshift-node to trigger notify
- name: Unarchive the tarball on the node
unarchive:
src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
dest: "{{ node_cert_dir }}"
when: certs_missing
roles:
- openshift_node
- role: fluentd_node
when: openshift.common.use_fluentd | bool
tasks:
- name: Create group for deployment type
group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }}
changed_when: False
- name: Delete temporary directory on localhost
hosts: localhost
connection: local
sudo: false
gather_facts: no
tasks:
- file: name={{ mktemp.stdout }} state=absent
changed_when: False
# Additional config for online type deployments
- name: Additional instance config
hosts: oo_nodes_deployment_type_online
gather_facts: no
roles:
- os_env_extras
- os_env_extras_node
- name: Set scheduleability
hosts: oo_first_master
vars:
openshift_nodes: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config'])
| oo_collect('openshift.common.hostname') }}"
openshift_unscheduleable_nodes: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] | default([]))
| oo_collect('openshift.common.hostname', {'openshift_scheduleable': False}) }}"
pre_tasks:
- set_fact:
openshift_scheduleable_nodes: "{{ hostvars
| oo_select_keys(groups['oo_nodes_to_config'] | default([]))
| oo_collect('openshift.common.hostname')
| difference(openshift_unscheduleable_nodes) }}"
roles:
- openshift_manage_node

View file

@ -0,0 +1 @@
../../../filter_plugins

View file

@ -0,0 +1 @@
../../../lookup_plugins

View file

@ -0,0 +1 @@
../../../roles/

View file

@ -0,0 +1,18 @@
---
- name: Populate g_service_nodes host group if needed
hosts: localhost
gather_facts: no
tasks:
- fail: msg="new_cluster_state is required to be injected in this playbook"
when: new_cluster_state is not defined
- name: Evaluate g_service_nodes
add_host: name={{ item }} groups=g_service_nodes
with_items: oo_host_group_exp | default([])
- name: Change openshift-node state on node instance(s)
hosts: g_service_nodes
connection: ssh
gather_facts: no
tasks:
- service: name=openshift-node state="{{ new_cluster_state }}"