add osbs orchestrator/worker playbooks/roles

Signed-off-by: Adam Miller <maxamillion@fedoraproject.org>
Signed-off-by: Adam Miller <admiller@redhat.com>
This commit is contained in:
Adam Miller 2017-07-12 16:14:20 -05:00 committed by Adam Miller
parent 74087cff26
commit 037d4931b4
39 changed files with 2066 additions and 53 deletions

View file

@ -226,17 +226,6 @@ csi_relationship: |
To update this text, add the csi_* vars to group_vars/ in ansible.
# docker images required by OpenShift Origin
openshift_required_images:
- "openshift/origin-pod"
# docker images required by OSBS for builds
fedora_required_images:
- "fedora:24"
- "fedora:25"
- "fedora:latest"
#
# say if we want the apache role dependency for mod_wsgi or not
# In some cases we want mod_wsgi and no apache (for python3 httpaio stuff)

View file

@ -21,3 +21,8 @@ koji_url: "koji.fedoraproject.org"
osbs_client_conf_path: /etc/osbs.conf
baseiptables: False
# docker images required by OpenShift Origin
openshift_required_images:
- "openshift/origin-pod"

View file

@ -6,19 +6,5 @@ num_cpus: 2
tcp_ports: [ 80, 443, 8443]
fas_client_groups: sysadmin-releng,fi-apprentice,sysadmin-noc,sysadmin-veteran
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
osbs_url: "osbs.stg.fedoraproject.org"
osbs_koji_username: "kojibuilder_stg"
koji_url: "koji.stg.fedoraproject.org"
osbs_client_conf_path: /etc/osbs.conf
openshift_node_labels: {'region':'infra'}
openshift_schedulable: False

View file

@ -6,18 +6,4 @@ num_cpus: 2
tcp_ports: [ 80, 443, 8443, 10250]
fas_client_groups: sysadmin-releng,fi-apprentice,sysadmin-noc,sysadmin-veteran
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
osbs_url: "osbs.stg.fedoraproject.org"
osbs_koji_username: "kojibuilder_stg"
koji_url: "koji.stg.fedoraproject.org"
osbs_client_conf_path: /etc/osbs.conf
openshift_node_labels: {'region': 'primary', 'zone': 'default'}

View file

@ -0,0 +1,34 @@
---
osbs_manage_firewalld: false
osbs_namespace: "osbs"
# Allow 'oc' command to find its configuration file
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
HOME: "{{ lookup('env', 'HOME') }}"
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
osbs_service_accounts:
- koji
- metrics
osbs_readonly_users:
- "system:serviceaccount:{{ osbs_namespace }}:metrics"
osbs_readonly_groups:
- "system:authenticated"
osbs_readwrite_groups: []
osbs_readwrite_users:
- "{{ ansible_hostname }}"
- "system:serviceaccount:{{ osbs_namespace }}:default"
- "system:serviceaccount:{{ osbs_namespace }}:builder"
osbs_admin_users:
- kevin
- puiterwijk
- maxamillion
- dgilmore
osbs_admin_groups: []
env: prod
osbs_ha_install: true
osbs_nodes: "{{ groups['osbs-orchestrator-' + env + '-nodes'] }}"
#nodeselectors
osbs_orchestrator_default_nodeselector: "orchestrator=true"
osbs_worker_default_nodeselector: "worker=true"

View file

@ -1,2 +1,103 @@
---
baseiptables: False
fas_client_groups: sysadmin-releng,fi-apprentice,sysadmin-noc,sysadmin-veteran
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org"
stable_registry: "registry.stg.fedoraproject.org"
candidate_registry: "candidate-registry.stg.fedoraproject.org"
osbs_url: "osbs.stg.fedoraproject.org"
osbsworker_x86_64_url: "osbsworker-x86-64.stg.fedoraproject.org"
koji_url: "koji.stg.fedoraproject.org"
osbs_builder_user: builder
koji_builder_user: dockerbuilder
osbs_client_conf_path: /etc/osbs.conf
openshift_htpasswd_file: /etc/origin/htpasswd
openshift_ansible_version: openshift-ansible-3.5.97-1
openshift_ansible_ssh_user: root
openshift_ansible_install_examples: false
openshift_ansible_containerized_deploy: false
openshift_auth_profile: osbs
origin_release: v1.5.1
openshift_debug_level: 2
osbs_namespace: "osbs"
osbs_koji_username: "kojibuilder_stg"
osbs_openshift_home: /var/lib/origin
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_generated_config_path: /tmp
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
osbs_is_admin: true
osbs_service_accounts:
- worker
- orchestrator
- metrics
osbs_cpu_limitrange: '200m'
# FIXME
# I'm not sure who all should be admins and we might want some read-only user
# for the purpose of monitoring
osbs_admin_groups: []
osbs_admin_users: []
osbs_readonly_groups: []
osbs_readonly_users: []
osbs_readwrite_groups: []
osbs_readwrite_users: []
osbs_orchestrator: false
osbs_worker_namespace: "worker"
osbs_worker_service_accounts:
- worker
- orchestrator
osbs_worker_clusters:
x86_64:
- name: osbsworker-x86-64
max_concurrent_builds: 12
openshift_url: "https://{{ osbsworker_x86_64_url }}"
verify_ssl: 'false'
artifacts_allowed_domains:
- "{{stable_registry}}"
- "{{candidate_registry}}"
osbs_koji_hub: "https://{{koji_url}}/kojihub
osbs_koji_root: https://{{koji_url}}/koji
osbs_pulp_registry_name: brew-prod
osbs_registry_uri: https://{{candidate_registry}}/v2
osbs_source_registry_uri: http://brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888
osbs_koji_secret_name: koji
osbs_distribution_scope: public
osbs_authoritative_registry: "{{ stable_registry }}"
osbs_registry_api_versions:
- v2
osbs_registry_secret_name: v2-registry-dockercfg
osbs_registry_uri: ''
osbs_source_registry_uri: ''
osbs_build_json_dir: /usr/share/osbs
osbs_sources_command: fedpkg sources
osbs_vendor: Fedora Project
#nodeselectors
osbs_orchestrator_default_nodeselector: "orchestrator=true"
osbs_worker_default_nodeselector: "worker=true"

View file

@ -0,0 +1,10 @@
---
# Define resources for this group of hosts here.
lvm_size: 60000
mem_size: 8192
num_cpus: 2
tcp_ports: [ 80, 443, 8443]
openshift_node_labels: {'region':'infra'}
openshift_schedulable: False

View file

@ -0,0 +1,9 @@
---
# Define resources for this group of hosts here.
lvm_size: 60000
mem_size: 8192
num_cpus: 2
tcp_ports: [ 80, 443, 8443, 10250]
openshift_node_labels: {'region': 'primary', 'zone': 'default'}

View file

@ -1333,16 +1333,16 @@ osbs-master01.phx2.fedoraproject.org
[osbs-masters-stg]
osbs-master01.stg.phx2.fedoraproject.org
[osbsworker-masters-stg]
#osbsworker-x86-64-master01.stg.phx2.fedoraproject.org
[osbsworker-x86-64-masters-stg]
osbsworker-x86-64-master01.stg.phx2.fedoraproject.org
[osbs-nodes-stg]
osbs-node01.stg.phx2.fedoraproject.org
osbs-node02.stg.phx2.fedoraproject.org
[osbsworker-nodes-stg]
#osbsworker-x86-64-node01.stg.phx2.fedoraproject.org
#osbsworker-x86-64-node02.stg.phx2.fedoraproject.org
[osbsworker-x86-64-nodes-stg]
osbsworker-x86-64-node01.stg.phx2.fedoraproject.org
osbsworker-x86-64-node02.stg.phx2.fedoraproject.org
[osbs:children]
osbs-control
@ -1353,8 +1353,16 @@ osbs-masters
osbs-control-stg
osbs-nodes-stg
osbs-masters-stg
osbsworker-nodes-stg
osbsworker-masters-stg
osbsworker-x86-64-nodes-stg
osbsworker-x86-64-masters-stg
[osbs-orchestrators-stg:children]
osbs-nodes-stg
osbs-masters-stg
[osbs-workers-stg:children]
osbsworker-x86-64-nodes-stg
osbsworker-x86-64-masters-stg
[os-control-stg]
os-control01.stg.phx2.fedoraproject.org

View file

@ -1,9 +1,9 @@
# create an osbs server
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=osbs-control-stg:osbs-control"
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=osbs-nodes-stg:osbs-masters-stg:osbs-nodes:osbs-masters:osbsworker-nodes-stg:osbsworker-masters-stg"
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=osbs-control"
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=osbs-nodes:osbs-masters"
- name: make the box be real
hosts: osbs-control:osbs-control-stg:osbs-masters-stg:osbs-nodes-stg:osbs-masters:osbs-nodes:osbsworker-nodes-stg:osbsworker-masters-stg
hosts: osbs-control:osbs-masters:osbs-nodes
user: root
gather_facts: True
@ -738,7 +738,7 @@
- name: Post-Install image stream refresh
hosts: osbs-masters-stg[0]:osbs-masters[0]
hosts: osbs-masters[0]
tags:
- osbs-post-install
vars_files:

View file

@ -0,0 +1,657 @@
# create an osbs server
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=osbs-stg"
- name: make the box be real
hosts: osbs-stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- base
- rkhunter
- nagios_client
- hosts
- fas_client
- collectd/base
- rsyncd
- sudo
tasks:
- include: "{{ tasks_path }}/yumrepos.yml"
- include: "{{ tasks_path }}/2fa_client.yml"
- include: "{{ tasks_path }}/motd.yml"
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
# Prepare the Control host to be able to run ansible-ansible-openshift-ansible
# against the Orchestration and Worker cluster machines
- name: OSBS control hosts pre-req setup
hosts: osbs-control-stg
tags:
- osbs-orchestrator-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: deploy private key to control hosts
copy:
src: "{{private}}/files/osbs/{{env}}/control_key"
dest: "/root/.ssh/id_rsa"
owner: root
mode: 0600
- name: set ansible to use pipelining
ini_file:
dest: /etc/ansible/ansible.cfg
section: ssh_connection
option: pipelining
value: "True"
# This section sets up the SSL Certs for "public facing" which is how Koji will
# interact with the OSBS Orchestration cluster. This is not needed on the worker
# clusters.
- name: Setup orchestrator cluster masters pre-reqs
hosts: osbs-masters-stg
tags:
- osbs-orchestrator-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: ensure origin conf dir exists
file:
path: "/etc/origin"
state: "directory"
- name: create cert dir for openshift public facing REST API SSL
file:
path: "/etc/origin/master/named_certificates"
state: "directory"
- name: install cert for openshift public facing REST API SSL
copy:
src: "{{private}}/files/osbs/{{env}}/osbs-internal.pem"
dest: "/etc/origin/master/named_certificates/{{osbs_url}}.pem"
- name: install key for openshift public facing REST API SSL
copy:
src: "{{private}}/files/osbs/{{env}}/osbs-internal.key"
dest: "/etc/origin/master/named_certificates/{{osbs_url}}.key"
- name: place htpasswd file
copy:
src: "{{private}}/files/httpd/osbs-{{env}}.htpasswd"
dest: "{{ openshift_htpasswd_file }}"
# This installs required pre-reqs and deploys the Controler's public key to all
# machines in both the Orchestrator and Worker clusters in order to allow
# ansible-ansible-openshift-ansible to be run against them
- name: Setup cluster hosts pre-reqs
hosts: osbs-orchestrators-stg:osbs-workers-stg
tags:
- osbs-orchestrator-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- name: restart NetworkManager
service:
name: NetworkManager
state: restarted
roles:
- role: openshift-prerequisites
tasks:
- name: Install necessary packages that openshift-ansible needs
package: name="{{ item }}" state=installed
with_items:
- tar
- rsync
- dbus-python
- NetworkManager
- libselinux-python
- origin
- name: Deploy controller public ssh keys to osbs cluster hosts
authorized_key:
user: root
key: "{{ lookup('file', '{{private}}/files/osbs/{{env}}/control_key.pub') }}"
# This is required for OpenShift built-in SkyDNS inside the overlay network
# of the cluster
- name: ensure NM_CONTROLLED is set to "yes" for osbs cluster
lineinfile:
dest: "/etc/sysconfig/network-scripts/ifcfg-eth0"
line: "NM_CONTROLLED=yes"
notify:
- restart NetworkManager
# This is required for OpenShift built-in SkyDNS inside the overlay network
# of the cluster
- name: ensure NetworkManager is enabled and started
service:
name: NetworkManager
state: started
enabled: yes
- name: cron entry to clean up docker storage
copy:
src: "{{files}}/osbs/cleanup-docker-storage"
dest: "/etc/cron.d/cleanup-docker-storage"
- name: copy docker-storage-setup config
copy:
src: "{{files}}/osbs/docker-storage-setup"
dest: "/etc/sysconfig/docker-storage-setup"
# This keytab needs to be on any system that is going to talk to koji and
# unfortunately, that's all of them.
- name: Deploy kerberose keytab to cluster hosts
hosts: osbs-masters-stg:osbs-nodes-stg:osbsworker-masters-stg:osbsworker-nodes-stg
tags:
- osbs-cluster-prereq
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: keytab/service
owner_user: root
owner_group: root
service: osbs
host: "osbs.stg.fedoraproject.org"
when: env == "staging"
- name: Deploy OpenShift Clusters
hosts: osbs-control-stg
tags:
- osbs-deploy-openshift
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: ansible-ansible-openshift-ansible
cluster_inventory_filename: "orchestrator-cluster-inventory-stg"
openshift_htpasswd_file: "{{ openshift_htpasswd_file }}"
openshift_master_public_api_url: "https://{{ osbs_url }}:8443"
openshift_release: "{{ origin_release }}"
openshift_ansible_path: "/root/openshift-ansible"
openshift_ansible_playbook: "playbooks/byo/config.yml"
openshift_ansible_version: "{{ openshift_ansible_version }}"
openshift_ansible_ssh_user: "{{ openshift_ansible_ssh_user }}"
openshift_ansible_install_examples: "{{ openshift_ansible_install_examples }}"
openshift_ansible_containerized_deploy: "{{ openshift_ansible_containerized_deploy }}"
openshift_cluster_masters_group: "osbs-masters-stg"
openshift_cluster_nodes_group: "osbs-nodes-stg"
openshift_cluster_infra_group: "osbs-masters-stg"
openshift_auth_profile: "{{ openshift_auth_profile }}"
openshift_cluster_url: "{{ osbs_url }}"
openshift_master_ha: false
openshift_debug_level: "{{ openshift_debug_level }}"
openshift_shared_infra: true
openshift_deployment_type: "origin"
openshift_metrics_deploy: true
when: env == 'staging'
tags: ['openshift-cluster','ansible-ansible-openshift-ansible']
- role: ansible-ansible-openshift-ansible
cluster_inventory_filename: "x86-64-worker-cluster-inventory-stg"
openshift_htpasswd_file: "{{ openshift_htpasswd_file }}"
openshift_master_public_api_url: "https://{{ osbsworker_x86_64_url }}:8443"
openshift_release: "{{ origin_release }}"
openshift_ansible_path: "/root/openshift-ansible"
openshift_ansible_playbook: "playbooks/byo/config.yml"
openshift_ansible_version: "openshift-ansible-3.3.57-1"
openshift_ansible_ssh_user: "{{ openshift_ansible_ssh_user }}"
openshift_ansible_install_examples: "{{ openshift_ansible_install_examples }}"
openshift_ansible_containerized_deploy: "{{ openshift_ansible_containerized_deploy }}"
openshift_cluster_masters_group: "osbsworker-masters-stg"
openshift_cluster_nodes_group: "osbsworker-nodes-stg"
openshift_cluster_infra_group: "osbsworker-masters-stg"
openshift_auth_profile: "{{ openshift_auth_profile }}"
openshift_cluster_url: "{{ osbsworker_x86_64_url }}"
openshift_master_ha: false
openshift_debug_level: "{{ openshift_debug_level }}"
openshift_shared_infra: true
openshift_deployment_type: "origin"
openshift_metrics_deploy: true
when: env == 'staging'
tags: ['openshift-cluster','ansible-ansible-openshift-ansible']
- name: Setup OSBS requirements for OpenShift cluster hosts
hosts: osbs-masters-stg:osbs-nodes-stg:osbsworker-masters-stg:osbsworker-nodes-stg
tags:
- osbs-cluster-req
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: osbs-common
osbs_manage_firewalld: false
- role: osbs-atomic-reactor
- role: push-docker
docker_cert_name: "containerbuild"
docker_cert_dir: "/etc/docker/certs.d/{{ candidate_registry }}"
when: env == "staging"
# The images that come out of the builds need to be pushed somwhere
- role: "manage-container-images"
cert_dest_dir: "/etc/docker/certs.d/{{ candidate_registry }}"
cert_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.pem"
key_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.key"
when: env == "staging"
handlers:
- name: restart dnsmasq
service:
name: dnsmasq
state: restarted
tasks:
- name: install fedora dnsmasq specific config
copy:
src: "{{files}}/osbs/fedora-dnsmasq.conf.{{env}}"
dest: "/etc/dnsmasq.d/fedora-dns.conf"
notify:
- restart dnsmasq
- name: setup orchestrator namespace
hosts: osbs-masters-stg[0]
tags:
- osbs-cluster-req
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
koji_pki_dir: /etc/pki/koji
koji_ca_cert_path: "{{koji_pki_dir}}/fedora-server-ca.cert"
koji_cert_path: "{{koji_pki_dir}}/fedora-builder.pem"
koji_builder_user: dockerbuilder
osbs_secret_name: kojisecret
osbs_secret_service_account: "{{ osbs_builder_user }}"
osbs_secret_remote_dir: /var/lib/origin
osbs_secret_can_fail: false
roles:
- role: osbs-namespace
osbs_orchestrator: true
osbs_cpu_limitrange: "{{ osbs_orchestrator_cpu_limitrange }}"
osbs_nodeselector: "{{ osbs_orchestrator_default_nodeselector|default('') }}"
- role: osbs-secret
osbs_namespace: "{{ osbs_worker_namespace }}"
osbs_secret_name: kojisecret
osbs_secret_files:
- source: "{{ secret_repo }}/groups/osbs-{{ env }}/koji/cert"
dest: cert
- source: "{{ secret_repo }}/groups/osbs-{{ env }}/koji/ca"
dest: ca
- source: "{{ secret_repo }}/groups/osbs-{{ env }}/koji/serverca"
dest: serverca
- name: setup worker namespace
hosts: osbsworker-x86-64-masters-stg[0]
tags:
- osbs-cluster-req
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
koji_pki_dir: /etc/pki/koji
koji_ca_cert_path: "{{koji_pki_dir}}/fedora-server-ca.cert"
koji_cert_path: "{{koji_pki_dir}}/fedora-builder.pem"
koji_builder_user: dockerbuilder
osbs_builder_user: builder
osbs_secret_name: kojisecret
osbs_secret_service_account: "{{ osbs_builder_user }}"
osbs_secret_remote_dir: /var/lib/origin
osbs_secret_can_fail: false
roles:
- role: osbs-namespace
osbs_namespace: "{{ osbs_worker_namespace }}"
osbs_service_accounts: "{{ osbs_worker_service_accounts }}"
osbs_nodeselector: "{{ osbs_worker_default_nodeselector|default('') }}"
- role: osbs-secret
osbs_namespace: "{{ osbs_worker_namespace }}"
osbs_secret_name: kojisecret
osbs_secret_files:
- source: "{{ secret_repo }}/groups/osbs-{{ env }}/koji/cert"
dest: cert
- source: "{{ secret_repo }}/groups/osbs-{{ env }}/koji/ca"
dest: ca
- source: "{{ secret_repo }}/groups/osbs-{{ env }}/koji/serverca"
dest: serverca
- name: Setup Koji auth for OpenShift Orchestrator
hosts: osbs-masters-stg[0]
tags:
- osbs-cluster-req
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tags:
- osbs-master-req
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: set policy for koji builder in openshift for osbs
shell: "oadm policy add-role-to-user -n {{ osbs_namespace }} edit htpasswd_provider: {{ osbs_koji_stg_username }} && touch /etc/origin/koji-builder-policy-added"
args:
creates: "/etc/origin/koji-builder-policy-added"
when: env == "staging"
- name: set policy for koji builder in openshift for atomic-reactor
shell: "oadm policy add-role-to-user -n {{ osbs_namespace }} edit system:serviceaccount:{{osbs_namespace}}:{{osbs_ && touch /etc/origin/atomic-reactor-policy-added"
args:
creates: "/etc/origin/atomic-reactor-policy-added"
- name: Manage docker images and image stream
hosts: osbs-masters-stg[0]:osbsworker-x86-64-masters-stg[0]
tags:
- osbs-post-install
- manage-docker-images
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
koji_pki_dir: /etc/pki/koji
koji_ca_cert_path: "{{koji_pki_dir}}/fedora-server-ca.cert"
koji_cert_path: "{{koji_pki_dir}}/fedora-builder.pem"
koji_builder_user: dockerbuilder
tasks:
- name: pull openshift required docker images
shell: "docker pull {{item}}:v{{origin_release}}"
with_items: "{{openshift_required_images}}"
delegate_to: compose-x86-01.phx2.fedoraproject.org
register: docker_pull_openshift_delegated
changed_when: "'Downloaded newer image' in docker_pull_openshift_delegated.stdout"
- name: tag openshift required docker images for our registry
shell: "docker tag {{item}}:v{{origin_release}} {{candidate_registry}}/{{item}}:v{{origin_release}}"
with_items: "{{openshift_required_images}}"
delegate_to: compose-x86-01.phx2.fedoraproject.org
when: docker_pull_openshift_delegated|changed
- name: push openshift required docker images to our registry
shell: "docker push {{candidate_registry}}/{{item}}:v{{origin_release}}"
with_items: "{{openshift_required_images}}"
delegate_to: compose-x86-01.phx2.fedoraproject.org
when: docker_pull_openshift_delegated|changed
- name: create fedora image stream for OpenShift
shell: "echo '{ \"apiVersion\": \"v1\", \"kind\": \"ImageStream\", \"metadata\": { \"name\": \"fedora\" }, \"spec\": { \"dockerImageRepository\": \"{{candidate_registry}}/fedora\" } }' | oc create -f - && touch /etc/origin/fedoraimagestreamcreated"
environment: "{{ osbs_environment }}"
args:
creates: /etc/origin/fedoraimagestreamcreated
- name: post-install osbs tasks
hosts: osbs-masters-stg:osbs-nodes-stg
tags:
- osbs-post-install
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
koji_pki_dir: /etc/pki/koji
koji_ca_cert_path: "{{koji_pki_dir}}/fedora-server-ca.cert"
koji_cert_path: "{{koji_pki_dir}}/fedora-builder.pem"
koji_builder_user: dockerbuilder
osbs_builder_user: builder
handlers:
- name: buildroot container
shell: 'docker rmi buildroot; docker build --no-cache --rm -t buildroot /etc/osbs/buildroot/'
- name: restart docker
service:
name: docker
state: restarted
- name: systemctl daemon-reload
shell: 'systemctl daemon-reload'
roles:
- {
role: osbs-client,
general: {
verbose: 0,
build_json_dir: '/etc/osbs/input/',
openshift_required_version: 1.1.0,
},
default: {
username: "{{ osbs_koji_stg_username }}",
password: "{{ osbs_koji_stg_password }}",
koji_use_kerberos: True,
koji_kerberos_keytab: "FILE:/etc/krb5.osbs_{{osbs_url}}.keytab",
koji_kerberos_principal: "osbs/{{osbs_url}}@{{ipa_realm}}",
openshift_url: 'https://{{osbs_url}}/',
registry_uri: 'https://{{candidate_registry}}/v2',
source_registry_uri: 'https://{{stable_registry}}/v2',
build_host: '{{osbs_url}}',
koji_root: '{{osbs_koji_root}}',
koji_hub: '{{osbs_koji_hub}}',
sources_command: 'fedpkg sources',
build_type: 'prod',
authoritative_registry: '{{stable_registry}}',
vendor: 'Fedora Project',
verify_ssl: true,
use_auth: true,
builder_use_auth: true,
distribution_scope: 'private',
registry_api_versions: 'v2',
builder_openshift_url: 'https://{{osbs_url}}'
},
when: env == "staging"
}
tasks:
- name: copy docker iptables script
copy:
src: "{{files}}/osbs/fix-docker-iptables.{{ env }}"
dest: /usr/local/bin/fix-docker-iptables
mode: 0755
notify:
- restart docker
- name: copy docker service config
copy:
src: "{{files}}/osbs/docker.service"
dest: /etc/systemd/system/docker.service
notify:
- systemctl daemon-reload
- restart docker
- name: set nrpe read access for osbs.conf for nagios monitoring
acl: name={{ osbs_client_conf_path }} entity=nrpe etype=user permissions=r state=present
- name: Create buildroot container conf directory
file:
path: "/etc/osbs/buildroot/"
state: directory
- name: Upload Dockerfile for buildroot container
template:
src: "{{ files }}/osbs/buildroot-Dockerfile-{{env}}.j2"
dest: "/etc/osbs/buildroot/Dockerfile"
mode: 0400
notify:
- buildroot container
- name: Upload internal CA for buildroot
copy:
src: "{{private}}/files/osbs/{{env}}/osbs-internal.pem"
dest: "/etc/osbs/buildroot/ca.crt"
mode: 0400
notify:
- buildroot container
- name: stat infra repofile
stat:
path: "/etc/yum.repos.d/infra-tags.repo"
register: infra_repo_stat
- name: stat /etc/osbs/buildroot/ infra repofile
stat:
path: "/etc/osbs/buildroot/infra-tags.repo"
register: etcosbs_infra_repo_stat
- name: remove old /etc/osbs/buildroot/ infra repofile
file:
path: "/etc/osbs/buildroot/infra-tags.repo"
state: absent
when: etcosbs_infra_repo_stat.stat.exists and infra_repo_stat.stat.checksum != etcosbs_infra_repo_stat.stat.checksum
- name: Copy repofile for buildroot container (because Docker)
copy:
src: "/etc/yum.repos.d/infra-tags.repo"
dest: "/etc/osbs/buildroot/infra-tags.repo"
remote_src: true
notify:
- buildroot container
when: etcosbs_infra_repo_stat.stat.exists == false
- name: stat /etc/ keytab
stat:
path: "/etc/krb5.osbs_{{osbs_url}}.keytab"
register: etc_kt_stat
- name: stat /etc/osbs/buildroot/ keytab
stat:
path: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab"
register: etcosbs_kt_stat
- name: remove old hardlink to /etc/osbs/buildroot/ keytab
file:
path: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab"
state: absent
when: etcosbs_kt_stat.stat.exists and etc_kt_stat.stat.checksum != etcosbs_kt_stat.stat.checksum
- name: Hardlink keytab for buildroot container (because Docker)
file:
src: "/etc/krb5.osbs_{{osbs_url}}.keytab"
dest: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab"
state: hard
notify:
- buildroot container
when: etcosbs_kt_stat.stat.exists == false
- name: pull openshift required docker images
shell: "docker pull {{candidate_registry}}/{{item}}:v{{origin_release}}"
with_items: "{{openshift_required_images}}"
register: docker_pull_openshift
changed_when: "'Downloaded newer image' in docker_pull_openshift.stdout"
- name: tag openshift required docker images locally
shell: "docker tag {{candidate_registry}}/{{item}}:v{{origin_release}} {{item}}:v{{origin_release}}"
with_items: "{{openshift_required_images}}"
when: docker_pull_openshift|changed
- set_fact:
docker_pull_openshift: "{{ docker_pull_openshift }}"
- name: Post-Install image stream refresh
hosts: osbs-masters-stg[0]
tags:
- osbs-post-install
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: refresh fedora image streams
shell: "oc import-image fedora --all"
when: env == "staging" and hostvars[groups["osbs-masters-stg"][0]]["docker_pull_fedora"]|changed
- name: enable nrpe for monitoring (noc01)
iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.5.126.41 state=present jump=ACCEPT
# - name: enable nrpe for monitoring (noc01.stg)
# iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=1#0.5.126.2 state=present jump=ACCEPT

View file

@ -457,8 +457,13 @@ openshift_master_default_subdomain={{openshift_app_subdomain}}
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
# openshift_hosted_metrics_deploy=true
#
# openshift_hosted_metrics_deploy=true
{% if openshift_metrics_deploy is defined %}
{% if openshift_metrics_deploy %}
#
openshift_hosted_metrics_deploy=true
# Storage Options
# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
@ -499,7 +504,9 @@ openshift_master_default_subdomain={{openshift_app_subdomain}}
# Defaults to https://hawkular-metrics.openshift_master_default_subdomain/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
#openshift_hosted_metrics_public_url=https://hawkular-metrics.{{osbs_cluster_url}}/hawkular/metrics
{% endif %}
{% endif %}
# Logging deployment
#

View file

@ -0,0 +1,34 @@
OpenShift prerequisites Role
============================
An Ansible role to manage prerequisites for OSE installation.
It performs the following operations:
* Sets necessary sebools for GlusterFS and NFS.
https://docs.openshift.com/container-platform/3.3/install_config/install/prerequisites.html#prereq-selinux
* Installs python-six package
https://github.com/openshift/openshift-ansible/issues/3020
Role Variables Example
----------------------
# Set up sebools
openshift_sebools:
- name: virt_sandbox_use_fusefs
state: yes
persistent: yes
Example Playbook
----------------
- hosts: all
roles:
- openshift-prerequisites
Dependencies
------------
None.

View file

@ -0,0 +1,14 @@
---
# SELinux booleans
openshift_sebools:
# https://docs.openshift.com/container-platform/3.3/install_config/persistent_storage/persistent_storage_glusterfs.html#selinux
- name: virt_sandbox_use_fusefs
state: yes
persistent: yes
# https://docs.openshift.com/container-platform/3.3/install_config/persistent_storage/persistent_storage_nfs.html#nfs-selinux
- name: virt_use_nfs
state: yes
persistent: yes
- name: virt_sandbox_use_nfs
state: yes
persistent: yes

View file

@ -0,0 +1,13 @@
# Standards: 1.6
---
galaxy_info:
author: Andrej Golis
description: Set up prerequisites for OSE installation
company: Red Hat, Inc.
license: BSD
min_ansible_version: 2.1
platforms:
- name: EL
versions:
- 7
dependencies: []

View file

@ -0,0 +1,23 @@
---
- name: Install python-six package
package:
name: python-six
state: present
- name: Install libselinux-python package
package:
name: libselinux-python
state: present
- name: Configure SELinux
selinux:
state: enforcing
policy: targeted
- name: enable/disable sebools
seboolean:
name: "{{ item.name }}"
state: "{{ item.state }}"
persistent: "{{ item.persistent }}"
with_items: "{{ openshift_sebools }}"
when: openshift_sebools != ""

View file

@ -0,0 +1,138 @@
Role Name
=========
Setup an OpenShift namespace as required by OSBS:
- Create namespace, also referred to as project (`osbs_namespace`)
- Create service accounts (`osbs_service_accounts`)
If user is cluster admin (`osbs_is_admin`), the following is also performed:
- Create policy binding
- Create osbs-custom-build role to allow custom builds
- Sets up rolebindings for specified users, groups and service accounts
For orchestrator namespaces (`osbs_orchestrator`):
- reactor-config-secret is generated and stored in `osbs_generated_config_path`
use osbs-secret to import it
- client-config-secret is generated and stored in `osbs_generated_config_path`
use osbs-secret to import it
Requirements
------------
A running instance of OpenShift.
Role Variables
--------------
# Namespace name to be used
osbs_namespace: 'my-namespace'
# Is user running playbook as cluster admin?
osbs_is_admin: true
# Will the namespace be used for orchestrator builds?
osbs_orchestrator: true
# Worker clusters to be used for generating reactor and client config secrets
# in orchestrator workspace
osbs_worker_clusters:
x86_64:
- name: prod-x86_64-on-prem
max_concurrent_builds: 6
openshift_url: https://my-x86_64-on-premise-cluster.redhat.com:8443
- name: prod-x86_64-osd
max_concurrent_builds: 16
openshift_url: https://my-x86_64-osd-cluster.redhat.com:8443
# optional params, and their defaults:
enabled: true # yaml boolean
namespace: worker
use_auth: 'true' # yaml string
verify_ssl: 'true' # yaml string
ppc64le:
- name: prod-ppc64le-on-prem
max_concurrent_builds: 6
openshift_url: https://my-ppc64le-on-premise-cluster.redhat.com:8443
# Service accounts to be created - these accounts will also be bound to
# edit clusterrole and osbs-custom-build role in specified namespace
osbs_service_accounts:
- bot
- ci
# User and groups to be assigned view clusterrole in specified namespace
osbs_readonly_groups:
- group1
- group2
osbs_readonly_users:
- user1
- user2
# Users and groups to be assigned edit clusterrole and osbs-custom-build
# role in specified namespace
osbs_readwrite_groups:
- group1
- group2
osbs_readwrite_users:
- user1
- user2
# Users and groups to be assigned admin clusterrole and osbs-custom-build
# role in specified namespace
osbs_admin_groups:
- group1
- group2
osbs_admin_users:
- user1
- user2
# Koji integration
osbs_koji_secret_name: kojisecret
osbs_koji_hub: https://koji-hub.redhat.com # Empty default value
osbs_koji_root: https://koji-root.redhat.com # Empty default value
# Pulp integration
osbs_pulp_secret_name: pulpsecret
osbs_pulp_registry_name: brew-qa # Empty default value
# Distribution registry integration
osbs_registry_secret_name: v2-registry-dockercfg
osbs_registry_api_version:
- v1
- v2
osbs_registry_uri: https://distribution-registry.redhat.com/v2 # Empty default value
# Dist-git integration
osbs_sources_command: rhpkg sources
osbs_source_registry_uri: https://source-registry.redhat.com # Empty default value
For a full list, see defaults/main.yml
Dependencies
------------
None.
Example Playbook
----------------
- name: setup worker namespace
hosts: master
roles:
- role: osbs-namespace
osbs_namespace: worker
- name: setup orchestrator namespace
hosts: master
roles:
- role: osbs-namespace
osbs_namespace: orchestrator
osbs_orchestrator: true
License
-------
BSD
Author Information
------------------
Luiz Carvalho <lui@redhat.com>

View file

@ -0,0 +1,41 @@
---
osbs_openshift_home: /var/lib/origin
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_generated_config_path: /tmp
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
osbs_is_admin: true
osbs_service_accounts: []
osbs_cpu_limitrange: ''
osbs_admin_groups: []
osbs_admin_users: []
osbs_readonly_groups: []
osbs_readonly_users: []
osbs_readwrite_groups: []
osbs_readwrite_users: []
osbs_orchestrator: false
osbs_worker_clusters: {}
osbs_koji_secret_name: kojisecret
osbs_distribution_scope: public
osbs_authoritative_registry: registry.access.redhat.com
osbs_koji_hub: ''
osbs_koji_root: ''
osbs_pulp_registry_name: ''
osbs_pulp_secret_name: pulpsecret
osbs_registry_api_versions:
- v1
- v2
osbs_registry_secret_name: v2-registry-dockercfg
osbs_registry_uri: ''
osbs_source_registry_uri: ''
osbs_build_json_dir: /usr/share/osbs
osbs_sources_command: rhpkg sources
osbs_vendor: Red Hat, Inc.
osbs_nodeselector: ''

View file

@ -0,0 +1,12 @@
# Standards: 1.8
galaxy_info:
author: Luiz Carvalho
description: setup OpenShift namespace for OSBS usage
company: Red Hat, Inc.
license: BSD
min_ansible_version: 2.1
platforms:
name: EL
versions:
- 7
dependencies: []

View file

@ -0,0 +1,192 @@
---
# Create namespace
- name: create osbs namespace
command: oc new-project {{ osbs_namespace }}
register: new_project
failed_when: new_project.rc != 0 and ('already exists' not in new_project.stderr)
changed_when: new_project.rc == 0
environment: "{{ osbs_environment }}"
tags:
- oc
# Setup service account
- name: copy service accounts
template:
src: openshift-serviceaccount.yml.j2
dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-serviceaccount-{{ item }}.yml"
with_items: "{{ osbs_service_accounts }}"
register: yaml_sa
tags:
- oc
- name: import service accounts
command: >
oc create
--namespace={{ osbs_namespace }}
--filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-serviceaccount-{{ item.item }}.yml
register: service_account_import
failed_when: service_account_import.rc != 0 and ('already exists' not in service_account_import.stderr)
environment: "{{ osbs_environment }}"
with_items: "{{ yaml_sa.results | default([]) }}"
when: item.changed
tags:
- oc
# Setup policy binding
- name: query policybinding
command: oc get policybinding {{ osbs_namespace }}:default --namespace {{ osbs_namespace }}
environment: "{{ osbs_environment }}"
register: policybinding_query
failed_when: policybinding_query.rc != 0 and ('not found' not in policybinding_query.stderr)
changed_when: false
when: osbs_is_admin
tags:
- oc
- name: create a policybinding
command: oc create policybinding {{ osbs_namespace }} --namespace {{ osbs_namespace }}
environment: "{{ osbs_environment }}"
when: "osbs_is_admin and 'not found' in policybinding_query.stderr"
tags:
- oc
# Setup role
- name: copy role
template:
src: role-osbs-custom-build.yml.j2
dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-role-osbs-custom-build.yml"
environment: "{{ osbs_environment }}"
register: yaml_role
when: osbs_is_admin
tags:
- oc
- name: import role
command: >
oc replace
--namespace={{ osbs_namespace }}
--force=true
--filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-role-osbs-custom-build.yml
environment: "{{ osbs_environment }}"
when: yaml_role.changed
tags:
- oc
# Setup role bindings
- name: copy role bindings
template:
src: "openshift-rolebinding.{{ item.yaml_version | default('v2') }}.yml.j2"
dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-rolebinding-{{ item.name }}.yml"
with_items:
- name: osbs-readonly
role: view
yaml_version: v1
users: "{{ osbs_readonly_users }}"
groups: "{{ osbs_readonly_groups }}"
- name: osbs-readwrite
role: edit
yaml_version: v1
users: "{{ osbs_readwrite_users }}"
groups: "{{ osbs_readwrite_groups }}"
- name: osbs-admin
role: admin
yaml_version: v1
users: "{{ osbs_admin_users }}"
groups: "{{ osbs_admin_groups }}"
- name: osbs-custom-build-readwrite
role: osbs-custom-build
yaml_version: v1
role_namespace: "{{ osbs_namespace }}"
users: "{{ osbs_readwrite_users }}"
groups: "{{ osbs_readwrite_groups }}"
- name: osbs-custom-build-admin
role: osbs-custom-build
yaml_version: v1
role_namespace: "{{ osbs_namespace }}"
users: "{{ osbs_admin_users }}"
groups: "{{ osbs_admin_groups }}"
- name: osbs-readwrite-serviceaccounts
role: edit
serviceaccounts: "{{ osbs_service_accounts }}"
- name: osbs-custom-build-serviceaccounts
role: osbs-custom-build
role_namespace: "{{ osbs_namespace }}"
serviceaccounts: "{{ osbs_service_accounts }}"
register: yaml_rolebindings
when: osbs_is_admin
tags:
- oc
- name: import the role bindings
command: >
oc replace
--namespace={{ osbs_namespace }}
--force=true
--filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-rolebinding-{{ item.item.name }}.yml
environment: "{{ osbs_environment }}"
with_items: "{{ yaml_rolebindings.results }}"
when: item.changed
tags:
- oc
- name: copy cpu limitrange
template:
src: openshift-limitrange.yml.j2
dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-limitrange.yml"
when: osbs_cpu_limitrange and osbs_is_admin
register: yaml_limitrange
tags:
- oc
- name: import cpu limitrange
command: >
oc replace
--namespace={{ osbs_namespace }}
--force=true
--filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-limitrange.yml
environment: "{{ osbs_environment }}"
when: yaml_limitrange.changed
tags:
- oc
- name: delete cpu limitrange
command: >
oc delete
--namespace={{ osbs_namespace }}
--ignore-not-found=true
limitrange cpureq
environment: "{{ osbs_environment }}"
when: not osbs_cpu_limitrange and osbs_is_admin
tags:
- oc
- name: get nodeselector value
command: >
oc get namespace {{ osbs_namespace }} -o go-template
--template={% raw %}'{{index .metadata.annotations "openshift.io/node-selector"}}'{% endraw %}
environment: "{{ osbs_environment }}"
register: node_selector_value
when: osbs_nodeselector != ''
changed_when: false
tags:
- oc
- name: set default node selector
command: >
oc patch namespace {{ osbs_namespace }}
-p '{"metadata":{"annotations":{"openshift.io/node-selector": "{{ osbs_nodeselector }}"}}}'
environment: "{{ osbs_environment }}"
when: osbs_nodeselector != '' and osbs_nodeselector != node_selector_value.stdout
tags:
- oc
- include: orchestrator.yml
when: osbs_orchestrator

View file

@ -0,0 +1,18 @@
---
- name: generate reactor config secret
local_action: >
template
src=reactor-config-secret.yml.j2
dest="{{ osbs_generated_config_path }}/{{ osbs_namespace }}-reactor-config-secret.yml"
register: yaml_reactor_config_secret
tags:
- oc
- name: generate client config secret
local_action: >
template
src=client-config-secret.conf.j2
dest="{{ osbs_generated_config_path }}/{{ osbs_namespace }}-client-config-secret.conf"
register: yaml_client_config_secret
tags:
- oc

View file

@ -0,0 +1,53 @@
[general]
build_json_dir = {{ osbs_build_json_dir }}
{% for platform, clusters in osbs_worker_clusters.iteritems() %}
{% for cluster in clusters | default([]) %}
[{{ cluster.name }}]
namespace = {{ cluster.namespace | default('worker') }}
openshift_url = {{ cluster.openshift_url }}
token_file = /var/run/secrets/atomic-reactor/{{ cluster.name | replace('_', '-') }}-orchestrator/token
use_auth = {{ cluster.use_auth | default('true') }}
verify_ssl = {{ cluster.verify_ssl | default('true') }}
low_priority_node_selector = {{ cluster.low_priority_node_selector | default('') }}
authoritative_registry = {{ osbs_authoritative_registry }}
distribution_scope = {{ osbs_distribution_scope }}
# Koji integration
{% if osbs_koji_secret_name %}
koji_certs_secret = {{ osbs_koji_secret_name }}
{% endif %}
{% if osbs_koji_hub %}
koji_hub = {{ osbs_koji_hub }}
{% endif %}
{% if osbs_koji_root %}
koji_root = {{ osbs_koji_root }}
{% endif %}
# Pulp integration
{% if osbs_pulp_registry_name %}
pulp_registry_name = {{ osbs_pulp_registry_name }}
{% endif %}
{% if osbs_pulp_secret_name %}
pulp_secret = {{ osbs_pulp_secret_name }}
{% endif %}
# Distribution registry integration
{% if osbs_registry_api_versions %}
registry_api_versions = {{ osbs_registry_api_versions | join(',') }}
{% endif %}
{% if osbs_registry_secret_name %}
registry_secret = {{ osbs_registry_secret_name }}
{% endif %}
{% if osbs_registry_uri %}
registry_uri = {{ osbs_registry_uri }}
{% endif %}
{% if osbs_source_registry_uri %}
source_registry_uri = {{ osbs_source_registry_uri }}
{% endif %}
sources_command = {{ osbs_sources_command }}
vendor = {{ osbs_vendor }}
{% endfor %}
{% endfor %}

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: LimitRange
metadata:
name: cpureq
namespace: {{ osbs_namespace }}
spec:
limits:
- type: Container
defaultRequest:
cpu: {{ osbs_cpu_limitrange }}

View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: RoleBinding
metadata:
name: {{ item.name }}
namespace: {{ osbs_namespace }}
roleRef:
name: {{ item.role }}
namespace: {{ item.role_namespace | default() }}
userNames:
{% for u in item.users | default([]) %}
- {{ u }}
{% endfor %}
groupNames:
{% for g in item.groups | default([]) %}
- {{ g }}
{% endfor %}

View file

@ -0,0 +1,29 @@
apiVersion: v1
kind: RoleBinding
metadata:
name: {{ item.name }}
namespace: {{ osbs_namespace }}
roleRef:
name: {{ item.role }}
namespace: {{ item.role_namespace | default() }}
subjects:
{% for sa in item.serviceaccounts | default([]) %}
- kind: 'ServiceAccount'
namespace: {{ osbs_namespace }}
name: {{ sa }}
{% endfor %}
{% for u in item.users | default([]) %}
- kind: 'User'
namespace: {{ osbs_namespace }}
name: {{ u }}
{% endfor %}
{% for g in item.groups | default([]) %}
- kind: 'Group'
namespace: {{ osbs_namespace }}
name: {{ g }}
{% endfor %}

View file

@ -0,0 +1,5 @@
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ item }}
namespace: {{ osbs_namespace }}

View file

@ -0,0 +1,13 @@
---
version: 1
clusters:
{% for platform, clusters in osbs_worker_clusters.iteritems() %}
{{ platform }}:
{% for cluster in clusters | default([]) %}
- name: {{ cluster.name }}
max_concurrent_builds: {{ cluster.max_concurrent_builds }}
enabled: {{ cluster.enabled | default(true) }}
{% endfor %}
{% endfor %}

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Role
metadata:
name: osbs-custom-build
namespace: {{ osbs_namespace }}
rules:
- verbs:
- create
resources:
- builds/custom

View file

@ -0,0 +1,36 @@
apiVersion: v1
kind: ClusterRole
metadata:
name: dedicated-project-admin
rules:
- apiGroups:
- ""
attributeRestrictions: null
resources:
- limitranges
- resourcequotas
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- apiGroups:
- extensions
attributeRestrictions: null
resources:
- daemonsets
verbs:
- create
- delete
- get
- list
- patch
- update
- watch
- resources:
- builds/custom
verbs:
- create

View file

@ -0,0 +1,49 @@
[general]
build_json_dir = /usr/share/osbs
[minimum]
namespace = worker
openshift_url = https://minimum-worker.test.redhat.com
token_file = /var/run/secrets/atomic-reactor/minimum-orchestrator/token
use_auth = true
verify_ssl = false
artifacts_allowed_domains =
authoritative_registry = registry.access.redhat.com
distribution_scope = public
# Koji integration
koji_certs_secret = kojisecret
# Pulp integration
pulp_secret = pulpsecret
# Distribution registry integration
registry_api_versions = v1,v2
registry_secret = v2-registry-dockercfg
sources_command = rhpkg sources
vendor = Red Hat, Inc.
[all_values]
namespace = spam
openshift_url = https://all_values-worker.test.redhat.com
token_file = /var/run/secrets/atomic-reactor/all-values-orchestrator/token
use_auth = false
verify_ssl = false
artifacts_allowed_domains = allowed.domain.com,also-allowed.domain.com
authoritative_registry = registry.access.redhat.com
distribution_scope = public
# Koji integration
koji_certs_secret = kojisecret
# Pulp integration
pulp_secret = pulpsecret
# Distribution registry integration
registry_api_versions = v1,v2
registry_secret = v2-registry-dockercfg
sources_command = rhpkg sources
vendor = Red Hat, Inc.

View file

@ -0,0 +1,17 @@
osbs_worker_clusters:
x86_64:
- name: 'minimum'
max_concurrent_builds: 1
openshift_url: 'https://minimum-worker.test.redhat.com'
verify_ssl: 'false'
- name: 'all_values'
namespace: 'spam'
max_concurrent_builds: 99
openshift_url: 'https://all_values-worker.test.redhat.com'
verify_ssl: 'false'
use_auth: 'false'
artifacts_allowed_domains:
- allowed.domain.com
- also-allowed.domain.com

View file

@ -0,0 +1,2 @@
[masters]
test-host ansible_connection=local ansible_become=false

View file

@ -0,0 +1,313 @@
# Standards: 1.8
---
# Run playbook
# ansible-playbook -i test-inventory test.yml
# During active development, you can re-use the same
# environment setup:
# ansible-playbook -i test-inventory test.yml --skip-tags 'environment-setup'
- name: setup environment
hosts: masters
tasks:
- name: cleanup existing cluster
command: >
oc cluster down
register: cmd_cluster_down
changed_when: cmd_cluster_down.rc == 0
- name: bring up new cluster
command: >
oc cluster up
--version v3.4.1.10
--image registry.access.redhat.com/openshift3/ose
register: cmd_cluster_up
changed_when: cmd_cluster_up.rc == 0
- name: login as admin
command: >
oc login -u system:admin
register: cmd_login_admin
changed_when: cmd_login_admin.rc == 0
- name: cleanup tmp folder
file:
path: tmp
state: absent
- name: setup tmp folder
file:
path: tmp
state: directory
tags:
- environment-setup
- name: setup worker namespace
hosts: masters
roles:
- role: "{{ playbook_dir }}/../."
osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config"
osbs_openshift_home: tmp
osbs_namespace: test-worker
osbs_nodeselector: "worker=true"
osbs_service_accounts:
- orchestrator
- name: test worker namespace
hosts: masters
tasks:
- name: namespace worker created
command: >
oc get project test-worker
changed_when: false
- name: orchestrator service account created in worker namespace
command: >
oc -n test-worker get serviceaccount orchestrator
changed_when: false
- name: policy binding created
command: >
oc -n test-worker get policybinding ':default'
changed_when: false
- name: custom builds roles created
command: >
oc -n test-worker get role osbs-custom-build
changed_when: false
- name: expected rolebindings created in worker namespace
command: >
oc -n test-worker get rolebinding {{ item }}
with_items:
- osbs-admin
- osbs-admin
- osbs-custom-build-admin
- osbs-custom-build-readwrite
- osbs-custom-build-serviceaccounts
- osbs-readonly
- osbs-readwrite
- osbs-readwrite-serviceaccounts
changed_when: false
- name: nodeselector exists
shell: >
oc get namespace test-worker -o json |grep 'node-selector'
register: node_selector_exists
failed_when: "'node-selector' not in node_selector_exists.stdout"
- name: setup orchestrator namespace
hosts: masters
tags:
orchestrator
roles:
- role: "{{ playbook_dir }}/../."
osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config"
osbs_openshift_home: tmp
osbs_generated_config_path: tmp
osbs_namespace: test-orchestrator
osbs_orchestrator: true
- name: test orchestrator namespace
hosts: masters
tags:
orchestrator
tasks:
- name: reactor config secret generated
stat:
path: tmp/test-orchestrator-reactor-config-secret.yml
register: stat_reactor_config_secret
changed_when: false
- name: fail if reactor config secret was generated
fail:
msg: Reactor config secret file not created!
when: not stat_reactor_config_secret.stat.exists
- name: client-config-secret was generated properly
command: >
diff {{ playbook_dir }}/files/expected-client-config-secret.conf
{{ playbook_dir }}/tmp/test-orchestrator-client-config-secret.conf
changed_when: false
- name: setup namespace as non admin
hosts: masters
pre_tasks:
- name: Login with non cluster admin account
command: >
oc login -u non-admin -p non-admin
register: cmd_login_non_admin
changed_when: cmd_login_non_admin.rc == 0
roles:
- role: "{{ playbook_dir }}/../."
osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config"
osbs_openshift_home: tmp
osbs_namespace: test-non-admin
osbs_is_admin: false
osbs_service_accounts:
- orchestrator
post_tasks:
- name: Log back in with cluster admin account
command: >
oc login -u system:admin
register: cmd_login_admin
changed_when: cmd_login_admin.rc == 0
- name: test non-admin namespace
hosts: masters
tasks:
- name: namespace non-admin created
command: >
oc get project test-non-admin
changed_when: false
- name: orchestrator service account created in non-admin namespace
command: >
oc -n test-non-admin get serviceaccount orchestrator
changed_when: false
- name: custom builds roles NOT created in non-admin namespace
command: >
oc -n test-non-admin get role osbs-custom-build
register: cmd_role
failed_when: "'No resources found' not in cmd_role.stderr"
changed_when: false
- name: custom rolebindings NOT created in non-admin namespace
command: >
oc -n test-non-admin get rolebinding {{ item }}
register: cmd_rolebinding
failed_when: "'No resources found' not in cmd_rolebinding.stderr"
with_items:
- osbs-admin
- osbs-admin
- osbs-custom-build-admin
- osbs-custom-build-readwrite
- osbs-custom-build-serviceaccounts
- osbs-readonly
- osbs-readwrite
- osbs-readwrite-serviceaccounts
changed_when: false
- name: create limitrange namespace
hosts: masters
roles:
- role: "{{ playbook_dir }}/../."
osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config"
osbs_openshift_home: tmp
osbs_namespace: test-limitrange
osbs_cpu_limitrange: '100m'
- name: test limitrange namespace
hosts: masters
tasks:
- name: namespace limitrange created
command: >
oc get project test-limitrange
changed_when: false
- name: limitrange created
command: >
oc -n test-limitrange get limitrange cpureq
changed_when: false
- name: update limitrange namespace
hosts: masters
roles:
- role: "{{ playbook_dir }}/../."
osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config"
osbs_openshift_home: tmp
osbs_namespace: test-limitrange
# No osbs_cpu_limitrage provided should trigger removal
- name: test updated limitrange namespace
hosts: masters
tasks:
- name: limitrange deleted
command: >
oc -n test-limitrange get limitrange cpureq
register: cmd_limitrange
failed_when: "'No resources found' not in cmd_limitrange.stderr"
changed_when: false
- name: setup policybinding dedicated-admin namespace
hosts: masters
pre_tasks:
- name: login as admin
command: >
oc login -u system:admin
register: cmd_login_admin
changed_when: cmd_login_admin.rc == 0
- name: Create dedicated-poject-admin clusterrole
command: >
oc create -f {{ playbook_dir }}/files/dedicated-project-admin.yaml
register: cmd_create_clusterrole
changed_when: cmd_create_clusterrole.rc == 0
- name: Create the namespace as cluster admin
command: >
oc new-project test-policybinding-dedicated-admin
register: cmd_pre_create_namespace
changed_when: cmd_pre_create_namespace.rc == 0
- name: Create dedicated-admin user
command: >
oc -n test-policybinding-dedicated-admin
create user dedicated-admin
register: cmd_create_user
changed_when: cmd_create_user.rc == 0
- name: Add dedicated-project-admin role to dedicated-admin
command: >
oc -n test-policybinding-dedicated-admin
policy add-role-to-user dedicated-project-admin dedicated-admin
register: cmd_role_dedicated_project_admin
changed_when: cmd_role_dedicated_project_admin.rc == 0
- name: Create policybinding as cluster admin
command: >
oc -n test-policybinding-dedicated-admin
create policybinding test-policybinding-dedicated-admin
register: cmd_pre_create_policybinding
changed_when: cmd_pre_create_policybinding.rc == 0
# This is only needed because the project was created
# by a different user: system:admin.
- name: Give dedicated-admin user project admin access
command: >
oc -n test-policybinding-dedicated-admin
adm policy add-role-to-user admin dedicated-admin
register: cmd_role_project_admin
changed_when: cmd_role_project_admin.rc == 0
- name: Login with non cluster admin account
command: >
oc login -u dedicated-admin -p dedicated-admin
register: cmd_login_dedicated_admin
changed_when: cmd_login_dedicated_admin.rc == 0
roles:
- role: "{{ playbook_dir }}/../."
osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config"
osbs_openshift_home: tmp
osbs_namespace: test-policybinding-dedicated-admin
osbs_is_admin: true
osbs_service_accounts:
- orchestrator
post_tasks:
- name: Log back in with cluster admin account
command: >
oc login -u system:admin
register: cmd_login_admin
changed_when: cmd_login_admin.rc == 0
- name: test policybinding dedicated-admin namespace
hosts: masters
tasks:
- name: custom rolebindings created in dedicated-admin namespace
command: >
oc -n test-policybinding-dedicated-admin get rolebinding {{ item }}
register: cmd_rolebinding
with_items:
- osbs-admin
- osbs-admin
- osbs-custom-build-admin
- osbs-custom-build-readwrite
- osbs-custom-build-serviceaccounts
- osbs-readonly
- osbs-readwrite
- osbs-readwrite-serviceaccounts
changed_when: false

View file

@ -0,0 +1,70 @@
osbs-secret
===========
This role imports various secrets, such as Pulp or Koji certificates, from
filesystem into OpenShift. See the [OSBS
documentation](https://github.com/projectatomic/osbs-client/blob/master/docs/secret.md)
for more information.
This role is part of
[ansible-osbs](https://github.com/projectatomic/ansible-osbs/) playbook for
deploying OpenShift build service. Please refer to that github repository for
[documentation](https://github.com/projectatomic/ansible-osbs/blob/master/README.md)
and [issue tracker](https://github.com/projectatomic/ansible-osbs/issues).
Role Variables
--------------
The role imports the keys from the machine running ansible. You have to provide
`osbs_secret_files` list, which enumerates what files to import. Elements of
the list are dictionaries with two keys: `source` and `dest`. Source is the
location of the file on the machine where ansible is run. Dest is the filename
of the secret.
osbs_secret_files:
- source: /home/user/.pulp/pulp.cer
dest: pulp.cer
- source: /home/user/.pulp/pulp.key
dest: pulp.key
The name of the secret in OpenShift is defined by the `osbs_secret_name`
variable.
osbs_secret_name: pulpsecret
The secret has to be associated with a service account. This service account
can be set by the `osbs_secret_service_account` variable.
osbs_secret_service_account: builder
We need a kubeconfig file on the remote machine in order to talk to OpenShift.
Its location is contained in the `pulp_secret_kubeconfig`.
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
Example Playbook
----------------
Following playbook imports the keys from my home directory on the machine where
ansible is executed. You may need to run something like this after the current
set of keys expires.
- hosts: builders
roles:
- role: osbs-secret
osbs_secret_name: pulpsecret
osbs_secret_files:
- source: /home/mmilata/.pulp/pulp.cer
dest: pulp.cer
- source: {{ pulp_secret_local_dir }}/pulp.key
dest: pulp.key
License
-------
BSD
Author Information
------------------
Martin Milata &lt;mmilata@redhat.com&gt;

View file

@ -0,0 +1,17 @@
---
osbs_secret_name: pulpsecret
osbs_secret_type: Opaque
osbs_secret_service_account: builder
osbs_secret_remote_dir: /var/lib/origin
osbs_secret_can_fail: false
osbs_secret_files:
- source: /home/user/.pulp/pulp.cer
dest: pulp.cer
- source: /home/user/.pulp/pulp.key
dest: pulp.key
osbs_namespace: default
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"

View file

@ -0,0 +1,22 @@
---
- name: import osbs secret
command: >
oc replace
--namespace={{ osbs_namespace }}
--force=true
--filename={{ osbs_secret_remote_dir }}/openshift-secret-{{ inventory_hostname }}-{{ osbs_namespace }}-{{ osbs_secret_name }}.yml
environment: "{{ osbs_environment }}"
notify: allow service account
- name: allow service account
command: >
oc secrets
add serviceaccount/{{ osbs_secret_service_account }} secrets/{{ osbs_secret_name }}
--for=mount
--namespace={{ osbs_namespace }}
environment: "{{ osbs_environment }}"
- name: delete secret resource file
file:
path: "{{ osbs_secret_remote_dir }}/openshift-secret-{{ inventory_hostname }}-{{ osbs_namespace }}-{{ osbs_secret_name }}.yml"
state: absent

View file

@ -0,0 +1,21 @@
---
galaxy_info:
author: Martin Milata
description: Import secrets from local filesystem into OpenShift.
company: Red Hat
issue_tracker_url: https://github.com/projectatomic/ansible-osbs/issues
license: BSD
min_ansible_version: 1.2
platforms:
- name: EL
versions:
- 7
- name: Fedora
versions:
- 21
- 22
categories:
- cloud
- development
- packaging
dependencies: []

View file

@ -0,0 +1,40 @@
---
- set_fact:
osbs_secret_files_exist: true
tags:
- oc
- set_fact:
osbs_secret_files_exist: false
when: lookup('file', lookup('first_found', [item.source, '/dev/null'])) == ''
with_items: "{{ osbs_secret_files }}"
tags:
- oc
- fail:
msg: Some of the source secret files do not exist (and osbs_secret_can_fail is false)
when: not (osbs_secret_files_exist or osbs_secret_can_fail)
tags:
- oc
- debug:
msg: Some of the source secret files do not exist, skipping import
when: not osbs_secret_files_exist
tags:
- oc
- name: create secrets resource file
template:
src: openshift-secret.yml.j2
dest: "{{ osbs_secret_remote_dir }}/openshift-secret-{{ inventory_hostname }}-{{ osbs_namespace }}-{{ osbs_secret_name }}.yml"
mode: "0600"
when: osbs_secret_files_exist
notify:
- import osbs secret
- delete secret resource file
tags:
- oc
- meta: flush_handlers
tags:
- oc

View file

@ -0,0 +1,10 @@
apiVersion: v1
kind: Secret
metadata:
name: {{ osbs_secret_name }}
type: {{ osbs_secret_type }}
data:
{% for f in osbs_secret_files %}
{{ f.dest }}: {{ lookup('file', f.source) | b64encode }}
{% endfor %}