- Add osbs-on-openshift role

- update osbs-cluster gorup playbook
- fix inventory now that we're using osbs-control for deploy

Signed-off-by: Adam Miller <admiller@redhat.com>
This commit is contained in:
Adam Miller 2016-10-26 20:47:03 +00:00
parent 7a7f47923f
commit a83b7a20c7
15 changed files with 462 additions and 33 deletions

View file

@ -1221,38 +1221,6 @@ osbs-master01.phx2.fedoraproject.org
osbs-node01.phx2.fedoraproject.org
osbs-node02.phx2.fedoraproject.org
#####
# This is for the OSBS scale-out deployment work, the following group names are
# unfortunately hard-coded into the openshift-ansible playbooks and roles and
# must be used:
# OSEv3
# masters
# nodes
# etcd
# lb
#
# In an attempt to separate this out the best as we can, the descriptive group
# names are used below and we then set those groups as children as necessary
[OSEv3:children]
masters
nodes
etcd
lb
[nodes:children]
osbs-masters-stg
osbs-nodes-stg
[masters:children]
osbs-masters-stg
[etcd:children]
osbs-masters-stg
[lb:children]
osbs-masters-stg
[osbs-masters-stg]
osbs-master01.stg.phx2.fedoraproject.org

View file

@ -71,7 +71,7 @@
- rsync
- dbus-python
- name: Deploy OpenShift Cluster and OSBS
- name: Deploy OpenShift Cluster
hosts: osbs-control:osbs-control-stg
user: root
gather_facts: True
@ -94,3 +94,53 @@
when: env == 'staging',
tags: ['openshift-cluster','ansible-ansible-openshift-ansible']
}
- name: Setup OSBS requirements on hosts in the cluster
hosts: osbs-masters-stg:osbs-nodes-stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- osbs-common
tasks:
- name:
- name: Deploy OSBS on top of OpenShift
hosts: osbs-masters-stg[0]:osbs-masters[0]
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- {
role: osbs-on-openshift,
osbs_openshift_home: "/var/lib/origin",
osbs_namespace: "default",
osbs_namespace_create: "false",
osbs_kubeconf_path: "/etc/origin/master/admin.kubeconfig",
osbs_environment: [
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
],
osbs_service_accounts: [],
osbs_readonly_users: [],
osbs_readonly_groups: [],
osbs_readwrite_users: [],
osbs_readwrite_groups: [ "system:authenticated"],
osbs_admin_users: [],
osbs_admin_groups: [],
osbs_docker_registry: false,
osbs_docker_registry_storage: "/opt/openshift-registry",
when: env == "staging"
}

View file

@ -0,0 +1,19 @@
osbs-on-openshift
=================
Role for deploying OSBS on top of a pre-existing [OpenShift](https://openshift.org)
cluster where we do not have cluster admin.
- [OpenShift build service](https://github.com/projectatomic/osbs-client/),
service for building layered Docker images.
This role is based on
[ansible-role-osbs-common](https://github.com/projectatomic/ansible-role-osbs-common)
upstream but the `osbs-common` role in Fedora Infra was pre-existing and used as
a location for common tasks required of all nodes in an osbs cluster.
This role is part of
[ansible-osbs](https://github.com/projectatomic/ansible-osbs/)
playbook for deploying OpenShift build service. Please refer to that github
repository for [documentation](https://github.com/projectatomic/ansible-osbs/blob/master/README.md)
and [issue tracker](https://github.com/projectatomic/ansible-osbs/issues).

View file

@ -0,0 +1,53 @@
---
osbs_openshift_home: /var/lib/origin
osbs_namespace: default
osbs_namespace_create: false
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
osbs_service_accounts: []
# openshift authorization - which users should be assigned the view (readonly),
# osbs-builder (readwrite), and cluster-admin (admin) roles
# in default configuration, everyone has read/write access
osbs_readonly_users: []
osbs_readonly_groups: []
osbs_readwrite_users: []
osbs_readwrite_groups:
- system:authenticated
- system:unauthenticated
osbs_admin_users: []
osbs_admin_groups: []
## development w/ auth proxy:
#osbs_readonly_users: []
#osbs_readonly_groups: []
#osbs_readwrite_users: []
#osbs_readwrite_groups:
# - system:authenticated
#osbs_admin_users: []
#osbs_admin_groups: []
## example production configuration:
#osbs_readonly_users: []
#osbs_readonly_groups:
# - system:authenticated
#osbs_readwrite_groups: []
#osbs_readwrite_users:
# - kojibuilder
# - "{{ ansible_hostname }}"
# - system:serviceaccount:default:default
#osbs_admin_users:
# - foo@EXAMPLE.COM
# - bar@EXAMPLE.COM
#osbs_admin_groups: []
# limit on the number of running pods - undefine or set to -1 to remove limit
#osbs_master_max_pods: 3
osbs_docker_registry: false
osbs_docker_registry_storage: /opt/openshift-registry

View file

@ -0,0 +1,17 @@
---
- name: restart openshift-master
service:
name: "{{ osbs_deployment_type }}-master"
state: restarted
- name: restart httpd
service: name=httpd state=restarted
- name: restart firewalld
service: name=firewalld state=restarted
- name: convert privkey to rsa
command: openssl rsa -in {{ osbs_proxy_key_file }} -out {{ osbs_proxy_key_file }}
- name: concatenate cert and key
shell: cat {{ osbs_proxy_cert_file }} {{ osbs_proxy_key_file }} > {{ osbs_proxy_certkey_file }}

View file

@ -0,0 +1,22 @@
# Standards: 1.2
---
galaxy_info:
author: Martin Milata
description: OpenShift build service common role - builder of layered Docker images
company: Red Hat
issue_tracker_url: https://github.com/projectatomic/ansible-osbs/issues
license: BSD
min_ansible_version: 1.9
platforms:
- name: EL
versions:
- 7
- name: Fedora
versions:
- 24
- 25
categories:
- cloud
- development
- packaging
dependencies: []

View file

@ -0,0 +1,120 @@
---
### openshift service ###
- name: create osbs namespace
command: >
oc new-project {{ osbs_namespace }}
register: new_project
failed_when: new_project.rc != 0 and ('already exists' not in new_project.stderr)
changed_when: new_project.rc == 0
environment: "{{osbs_environment}}"
when: osbs_namespace_create
- name: copy service accounts
template: src=openshift-serviceaccount.yml.j2 dest={{ osbs_openshift_home }}/serviceaccount-{{ item }}.yml
with_items: "{{ osbs_service_accounts }}"
register: yaml_sa
- name: import service accounts
command: >
oc create
--namespace={{ osbs_namespace }}
--filename={{ osbs_openshift_home }}/serviceaccount-{{ item.item }}.yml
register: service_account_import
failed_when: service_account_import.rc != 0 and ('already exists' not in service_account_import.stderr)
environment: "{{osbs_environment}}"
with_items: "{{ yaml_sa.results | default([]) }}"
when: item.changed
- name: copy role bindings
template: src=openshift-rolebinding.yml.j2 dest={{ osbs_openshift_home }}/rolebinding-{{ item.name }}.yml
with_items:
- name: osbs-readonly
role: view
users: "{{ osbs_readonly_users }}"
groups: "{{ osbs_readonly_groups }}"
- name: osbs-readwrite
role: edit
users: "{{ osbs_readwrite_users }}"
groups: "{{ osbs_readwrite_groups }}"
- name: osbs-admin
role: admin
users: "{{ osbs_admin_users }}"
groups: "{{ osbs_admin_groups }}"
register: yaml_rolebindings
- name: import the role bindings
command: >
oc replace
--namespace={{ osbs_namespace }}
--force=true
--filename={{ osbs_openshift_home }}/rolebinding-{{ item.item.name }}.yml
environment: "{{osbs_environment}}"
with_items: "{{ yaml_rolebindings.results }}"
when: item.changed
- name: copy resource quotas
template: src=openshift-resourcequota.yml.j2 dest={{ osbs_openshift_home }}/resourcequota.yml
when: osbs_master_max_pods is defined and osbs_master_max_pods >= 0
register: yaml_resourcequotas
tags:
- resourcequotas
- name: import resource quotas
command: >
oc replace
--namespace={{ osbs_namespace }}
--force=true
--filename={{ osbs_openshift_home }}/resourcequota.yml
environment: "{{osbs_environment}}"
when: osbs_master_max_pods is defined and osbs_master_max_pods >= 0 and yaml_resourcequotas.changed
tags:
- resourcequotas
- name: delete resource quotas
command: >
oc delete
--namespace={{ osbs_namespace }}
--ignore-not-found=true
resourcequota concurrentbuilds
environment: "{{osbs_environment}}"
when: osbs_master_max_pods is not defined or osbs_master_max_pods < 0
tags:
- resourcequotas
- name: copy cpu limitrange
template:
src: openshift-limitrange.yml.j2
dest: "{{ osbs_openshift_home }}/limitrange.yml"
when: osbs_master_cpu_limitrange is defined and osbs_master_cpu_limitrange
register: yaml_limitrange
tags:
- limitranges
- name: import cpu limitrange
command: >
oc replace
--namespace={{ osbs_namespace }}
--force=true
--filename={{ osbs_openshift_home }}/limitrange.yml
environment: "{{osbs_environment}}"
when: osbs_master_cpu_limitrange is defined and osbs_master_cpu_limitrange and yaml_limitrange.changed
tags:
- limitranges
- name: delete cpu limitrange
command: >
oc delete
--namespace={{ osbs_namespace }}
--ignore-not-found=true
limitrange cpureq
environment: "{{osbs_environment}}"
when: osbs_master_cpu_limitrange is not defined or not osbs_master_cpu_limitrange
tags:
- limitranges
- include: yum_proxy.yml
when: osbs_yum_proxy_image is defined
- include: registry.yml
when: osbs_docker_registry is defined and osbs_docker_registry

View file

@ -0,0 +1,54 @@
---
- name: copy registry service account
template:
src: openshift-serviceaccount.yml.j2
dest: "{{ osbs_openshift_home }}/serviceaccount-{{ item }}.yml"
with_items:
- registry
register: yaml_sa
tags:
- oc
- name: import registry service account
command: >
oc create
--namespace=default
--filename={{ osbs_openshift_home }}/serviceaccount-{{ item.item }}.yml
register: service_account_import
failed_when: service_account_import.rc != 0 and ('already exists' not in service_account_import.stderr)
environment: "{{osbs_environment}}"
with_items: "{{ yaml_sa.results | default([]) }}"
when: item.changed
tags:
- oc
- name: make registry serviceaccount privileged
command: >
oadm policy
--namespace=default
add-scc-to-user
privileged -z registry
environment: "{{osbs_environment}}"
tags:
- oc
- name: create registry storage
file:
path: "{{ osbs_docker_registry_storage }}"
owner: 1001
group: root
mode: "0770"
state: directory
- name: set up internal registry
command: >
oadm registry
--namespace=default
--service-account registry
--credentials /etc/origin/master/openshift-registry.kubeconfig
--mount-host {{ osbs_docker_registry_storage }}
register: create_registry
changed_when: "'service exists' not in create_registry.stdout"
environment: "{{osbs_environment}}"
tags:
- oc

View file

@ -0,0 +1,36 @@
---
- name: copy yum proxy deployment config
template: src=openshift-yumproxy-dc.yml.j2 dest={{ osbs_openshift_home }}/yumproxy-dc.yml
register: yaml_dc
tags:
- oc
- yumproxy
- name: import yum proxy deployment config
command: >
oc replace
--force=true
--namespace={{ osbs_namespace }}
--filename={{ osbs_openshift_home }}/yumproxy-dc.yml
when: yaml_dc.changed
tags:
- oc
- yumproxy
- name: copy yum proxy service
template: src=openshift-yumproxy-svc.yml.j2 dest={{ osbs_openshift_home }}/yumproxy-svc.yml
register: yaml_svc
tags:
- oc
- yumproxy
- name: import yum proxy service
command: >
oc replace
--force=true
--namespace={{ osbs_namespace }}
--filename={{ osbs_openshift_home }}/yumproxy-svc.yml
when: yaml_svc.changed
tags:
- oc
- yumproxy

View file

@ -0,0 +1,9 @@
apiVersion: v1
kind: LimitRange
metadata:
name: cpureq
spec:
limits:
- type: Container
defaultRequest:
cpu: {{ osbs_master_cpu_limitrange }}

View file

@ -0,0 +1,7 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: concurrentbuilds
spec:
hard:
pods: {{ osbs_master_max_pods }}

View file

@ -0,0 +1,24 @@
apiVersion: v1
kind: RoleBinding
metadata:
name: {{ item.name }}
roleRef:
name: {{ item.role }}
{% if item.users == [] %}
userNames: []
{% else %}
userNames:
{% for u in item.users %}
- {{ u }}
{% endfor %}
{% endif %}
{% if item.groups == [] %}
groupNames: []
{% else %}
groupNames:
{% for g in item.groups %}
- {{ g }}
{% endfor %}
{% endif %}

View file

@ -0,0 +1,4 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ item }}

View file

@ -0,0 +1,31 @@
apiVersion: v1
kind: DeploymentConfig
metadata:
name: {{ osbs_yum_proxy_name }}
labels:
app: {{ osbs_yum_proxy_name }}
spec:
replicas: 1
selector:
app: {{ osbs_yum_proxy_name }}
deploymentconfig: {{ osbs_yum_proxy_name }}
template:
metadata:
labels:
app: {{ osbs_yum_proxy_name }}
deploymentconfig: {{ osbs_yum_proxy_name }}
spec:
containers:
- name: {{ osbs_yum_proxy_name }}
image: {{ osbs_yum_proxy_image }}
ports:
- containerPort: 3128
protocol: TCP
volumeMounts:
- mountPath: /squid
name: {{ osbs_yum_proxy_name }}-volume-1
volumes:
- emptyDir: {}
name: {{ osbs_yum_proxy_name }}-volume-1
triggers:
- type: ConfigChange

View file

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ osbs_yum_proxy_name }}
labels:
app: {{ osbs_yum_proxy_name }}
spec:
ports:
- name: 3128-tcp
protocol: TCP
port: 3128
targetPort: 3128
selector:
app: {{ osbs_yum_proxy_name }}
deploymentconfig: {{ osbs_yum_proxy_name }}