remove old/unused OSBS roles now that osbs-cluster is in prod

Signed-off-by: Adam Miller <admiller@redhat.com>
This commit is contained in:
Adam Miller 2016-12-13 21:15:23 +00:00
parent e50b79325f
commit c438796add
34 changed files with 0 additions and 1658 deletions

View file

@ -1,58 +0,0 @@
install-openshift
=================
Installs OpenShift v3 from various sources. Currently supports installing RPM
from COPR and building and installing the RPM from source code.
This role is part of
[ansible-osbs](https://github.com/projectatomic/ansible-osbs/) playbook for
deploying OpenShift build service. Please refer to that github repository for
[documentation](https://github.com/projectatomic/ansible-osbs/blob/master/README.md)
and [issue tracker](https://github.com/projectatomic/ansible-osbs/issues).
Role Variables
--------------
You need to specify which method of installation you want to use. Valid options
are `copr` (default) and `source`.
install_openshift_method: copr
You must specify particular version that should be installed from the COPR.
Can be in either `version` or `version-release` format.
install_openshift_copr_version: 1.0.5
When building from source, you need to specify the version of the built package.
install_openshift_source_version: 1.0.5
Git commit to build packages from.
install_openshift_source_commit: c66613fded194b10ce4e4e1c473fbfc0a511405b
File name of the tarball to be downloaded from github.
install_openshift_source_archive: openshift-{{ install_openshift_source_commit }}.tar.gz
Directory for rpmbuild.
install_openshift_source_rpmbuild_dir: "{{ ansible_env.HOME }}/rpmbuild"
Example Playbook
----------------
- hosts: builders
roles:
- role: install-openshift
install_openshift_method: copr
License
-------
BSD
Author Information
------------------
Martin Milata &lt;mmilata@redhat.com&gt;

View file

@ -1,26 +0,0 @@
---
# methods: copr, source, fedora
install_openshift_method: fedora
# copr-specific variables
# specify particular version that should be installed from copr
# can be either VERSION or VERSION-RELEASE
install_openshift_copr_version: 1.0.8
# variables for source build
# version of the built package
install_openshift_source_version: 1.0.5
# git commit to build package from
install_openshift_source_commit: c66613fded194b10ce4e4e1c473fbfc0a511405b
# name of the tarball to download from github
install_openshift_source_archive: openshift-{{ install_openshift_source_commit }}.tar.gz
# rpmbuild directory
install_openshift_source_rpmbuild_dir: "{{ ansible_env.HOME }}/rpmbuild"
# update package from copr to the latest version?
osbs_update_packages: false

View file

@ -1,8 +0,0 @@
[maxamillion-fedora-openshift]
name=Copr repo for fedora-openshift owned by maxamillion
baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/epel-7-$basearch/
skip_if_unavailable=True
gpgcheck=1
gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg
enabled=1
enabled_metadata=1

View file

@ -1,8 +0,0 @@
[maxamillion-fedora-openshift]
name=Copr repo for fedora-openshift owned by maxamillion
baseurl=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/fedora-$releasever-$basearch/
skip_if_unavailable=True
gpgcheck=1
gpgkey=https://copr-be.cloud.fedoraproject.org/results/maxamillion/fedora-openshift/pubkey.gpg
enabled=1
enabled_metadata=1

View file

@ -1 +0,0 @@
{install_date: 'Thu Jan 7 17:30:46 2016', version: master}

View file

@ -1,21 +0,0 @@
---
galaxy_info:
author: Martin Milata
description: Install OpenShift v3 from various sources
company: Red Hat
issue_tracker_url: https://github.com/projectatomic/ansible-osbs/issues
license: BSD
min_ansible_version: 1.2
platforms:
- name: EL
versions:
- 7
- name: Fedora
versions:
- 21
- 22
categories:
- cloud
- development
- packaging
dependencies: []

View file

@ -1,9 +0,0 @@
---
- include: method_fedora.yml
when: install_openshift_method == 'fedora'
- include: method_copr.yml
when: install_openshift_method == 'copr'
- include: method_source.yml
when: install_openshift_method == 'source'

View file

@ -1,18 +0,0 @@
---
- name: setup repository
copy:
src: maxamillion-fedora-openshift.repo
dest: /etc/yum.repos.d/maxamillion-fedora-openshift.repo
when: is_fedora is defined
- name: setup repository
copy:
src: maxamillion-epel-openshift.repo
dest: /etc/yum.repos.d/maxamillion-epel-openshift.repo
when: is_rhel is defined
- name: install openshift
action: "{{ ansible_pkg_mgr }} name={{ item }} state={{ osbs_update_packages | ternary('latest', 'present') }}"
with_items:
- origin-master
- origin-node

View file

@ -1,6 +0,0 @@
---
- name: install openshift
action: "{{ ansible_pkg_mgr }} name={{ item }} state={{ osbs_update_packages | ternary('latest', 'present') }}"
with_items:
- origin-master
- origin-node

View file

@ -1,58 +0,0 @@
---
- name: install build packages
action: "{{ ansible_pkg_mgr }} name={{ item }} state=installed"
with_items:
- rpm-build
- rpmdevtools
- name: install build packages
yum: name=yum-utils state=installed
when: ansible_pkg_mgr == "yum"
- name: delete rpmbuild directories
file: path={{ install_openshift_source_rpmbuild_dir }}/{{ item }}/ state=absent
with_items:
- RPMS
- SRPMS
- BUILD
- BUILDROOT
- name: create rpmbuild directories
file: path={{ install_openshift_source_rpmbuild_dir }}/{{ item }}/ state=directory recurse=yes
with_items:
- SPECS
- SOURCES
- name: upload SPEC
template: src=openshift.spec.j2 dest={{ install_openshift_source_rpmbuild_dir }}/SPECS/openshift.spec
- name: install build dependencies
command: yum-builddep -y {{ install_openshift_source_rpmbuild_dir }}/SPECS/openshift.spec
when: ansible_pkg_mgr == "yum"
- name: install build dependencies
command: dnf builddep -y {{ install_openshift_source_rpmbuild_dir }}/SPECS/openshift.spec
when: ansible_pkg_mgr == "dnf"
- name: download openshift tarball
get_url:
url: https://github.com/openshift/origin/archive/{{ install_openshift_source_commit }}/{{ install_openshift_source_archive }}
dest: "{{ install_openshift_source_rpmbuild_dir }}/SOURCES/{{ install_openshift_source_archive }}"
- name: run build
command: rpmbuild -bb --clean {{ install_openshift_source_rpmbuild_dir }}/SPECS/openshift.spec
- name: find the RPMs
command: find {{ install_openshift_source_rpmbuild_dir }}/RPMS/ -type f
register: find_rpms
- name: install the RPMs
shell: yum -y localinstall {{ install_openshift_source_rpmbuild_dir }}/RPMS/x86_64/*openshift*.rpm
when: ansible_pkg_mgr == "yum"
- name: install the RPMs
shell: dnf -y install {{ install_openshift_source_rpmbuild_dir }}/RPMS/x86_64/*openshift*.rpm
when: ansible_pkg_mgr == "dnf"
- name: link /etc/openshift to /etc/origin
file: path=/etc/openshift src=/etc/origin state=link

View file

@ -1,337 +0,0 @@
#debuginfo not supported with Go
%global debug_package %{nil}
%global gopath %{_datadir}/gocode
%global import_path github.com/openshift/origin
%global kube_plugin_path /usr/libexec/kubernetes/kubelet-plugins/net/exec/redhat~openshift-ovs-subnet
%global sdn_import_path github.com/openshift/openshift-sdn
# docker_version is the version of docker requires by packages
%global docker_verison 1.6.2
# tuned_version is the version of tuned requires by packages
%global tuned_version 2.3
# openvswitch_version is the version of openvswitch requires by packages
%global openvswitch_version 2.3.1
# %commit and %ldflags are intended to be set by tito custom builders provided
# in the rel-eng directory. The values in this spec file will not be kept up to date.
%{!?commit:
%global commit {{ openshift_commit }}
}
%global shortcommit %(c=%{commit}; echo ${c:0:7})
# OpenShift specific ldflags from hack/common.sh os::build:ldflags
%{!?ldflags:
%global ldflags -X github.com/openshift/origin/pkg/version.majorFromGit 1 -X github.com/openshift/origin/pkg/version.minorFromGit 0+ -X github.com/openshift/origin/pkg/version.versionFromGit v1.0.4-366-gc66613f-dirty -X github.com/openshift/origin/pkg/version.commitFromGit {{ openshift_commit | truncate(7, True, '') }} -X k8s.io/kubernetes/pkg/version.gitCommit 44c91b1 -X k8s.io/kubernetes/pkg/version.gitVersion v1.1.0-alpha.0-1605-g44c91b1
}
Name: openshift
# Version is not kept up to date and is intended to be set by tito custom
# builders provided in the rel-eng directory of this project
Version: {{ openshift_version }}
Release: 0%{?dist}
Summary: Open Source Platform as a Service by Red Hat
License: ASL 2.0
URL: https://%{import_path}
ExclusiveArch: x86_64
Source0: https://%{import_path}/archive/%{commit}/%{name}-%{commit}.tar.gz
BuildRequires: systemd
BuildRequires: golang >= 1.4
%description
%{summary}
%package master
Summary: OpenShift Master
Requires: %{name} = %{version}-%{release}
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
%description master
%{summary}
%package node
Summary: OpenShift Node
Requires: %{name} = %{version}-%{release}
Requires: docker-io >= %{docker_version}
Requires: tuned-profiles-%{name}-node
Requires: util-linux
Requires: socat
Requires: nfs-utils
Requires(post): systemd
Requires(preun): systemd
Requires(postun): systemd
%description node
%{summary}
%package -n tuned-profiles-%{name}-node
Summary: Tuned profiles for OpenShift Node hosts
Requires: tuned >= %{tuned_version}
Requires: %{name} = %{version}-%{release}
%description -n tuned-profiles-%{name}-node
%{summary}
%package clients
Summary: Openshift Client binaries for Linux, Mac OSX, and Windows
BuildRequires: golang-pkg-darwin-amd64
BuildRequires: golang-pkg-windows-386
%description clients
%{summary}
%package dockerregistry
Summary: Docker Registry v2 for OpenShift
Requires: %{name} = %{version}-%{release}
%description dockerregistry
%{summary}
%package pod
Summary: OpenShift Pod
Requires: %{name} = %{version}-%{release}
%description pod
%{summary}
%prep
%setup -qn origin-%{commit}
%build
# Don't judge me for this ... it's so bad.
mkdir _build
# Horrid hack because golang loves to just bundle everything
pushd _build
mkdir -p src/github.com/openshift
ln -s $(dirs +1 -l) src/%{import_path}
popd
# Gaming the GOPATH to include the third party bundled libs at build
# time. This is bad and I feel bad.
mkdir _thirdpartyhacks
pushd _thirdpartyhacks
ln -s \
$(dirs +1 -l)/Godeps/_workspace/src/ \
src
popd
export GOPATH=$(pwd)/_build:$(pwd)/_thirdpartyhacks:%{buildroot}%{gopath}:%{gopath}
# Build all linux components we care about
for cmd in openshift dockerregistry
do
go install -ldflags "%{ldflags}" %{import_path}/cmd/${cmd}
done
# Build only 'openshift' for other platforms
GOOS=windows GOARCH=386 go install -ldflags "%{ldflags}" %{import_path}/cmd/openshift
GOOS=darwin GOARCH=amd64 go install -ldflags "%{ldflags}" %{import_path}/cmd/openshift
#Build our pod
pushd images/pod/
go build -ldflags "%{ldflags}" pod.go
popd
%install
install -d %{buildroot}%{_bindir}
# Install linux components
for bin in openshift dockerregistry
do
echo "+++ INSTALLING ${bin}"
install -p -m 755 _build/bin/${bin} %{buildroot}%{_bindir}/${bin}
done
# Install 'openshift' as client executable for windows and mac
for pkgname in openshift
do
install -d %{buildroot}%{_datadir}/${pkgname}/{linux,macosx,windows}
install -p -m 755 _build/bin/openshift %{buildroot}%{_datadir}/${pkgname}/linux/oc
install -p -m 755 _build/bin/darwin_amd64/openshift %{buildroot}%{_datadir}/${pkgname}/macosx/oc
install -p -m 755 _build/bin/windows_386/openshift.exe %{buildroot}%{_datadir}/${pkgname}/windows/oc.exe
done
#Install openshift pod
install -p -m 755 images/pod/pod %{buildroot}%{_bindir}/
install -d -m 0755 %{buildroot}%{_unitdir}
mkdir -p %{buildroot}%{_sysconfdir}/sysconfig
for cmd in oc oadm; do
ln -s %{_bindir}/%{name} %{buildroot}%{_bindir}/$cmd
done
ln -s %{_bindir}/%{name} %{buildroot}%{_bindir}/kubectl
install -d -m 0755 %{buildroot}%{_sysconfdir}/origin/{master,node}
for pkgname in openshift
do
install -m 0644 rel-eng/${pkgname}-master.service %{buildroot}%{_unitdir}/${pkgname}-master.service
install -m 0644 rel-eng/${pkgname}-node.service %{buildroot}%{_unitdir}/${pkgname}-node.service
install -m 0644 rel-eng/${pkgname}-master.sysconfig %{buildroot}%{_sysconfdir}/sysconfig/${pkgname}-master
install -m 0644 rel-eng/${pkgname}-node.sysconfig %{buildroot}%{_sysconfdir}/sysconfig/${pkgname}-node
install -d -m 0755 %{buildroot}%{_prefix}/lib/tuned/${pkgname}-node-{guest,host}
install -m 0644 tuned/%{name}-node-guest/tuned.conf %{buildroot}%{_prefix}/lib/tuned/${pkgname}-node-guest/tuned.conf
install -m 0644 tuned/%{name}-node-host/tuned.conf %{buildroot}%{_prefix}/lib/tuned/${pkgname}-node-host/tuned.conf
install -d -m 0755 %{buildroot}%{_mandir}/man7
install -m 0644 tuned/man/tuned-profiles-%{name}-node.7 %{buildroot}%{_mandir}/man7/tuned-profiles-${pkgname}-node.7
done
mkdir -p %{buildroot}%{_sharedstatedir}/%{name}
mkdir -p %{buildroot}%{_sharedstatedir}/origin
# Install bash completions
install -d -m 755 %{buildroot}%{_sysconfdir}/bash_completion.d/
install -p -m 644 rel-eng/completions/bash/* %{buildroot}%{_sysconfdir}/bash_completion.d/
%files
%defattr(-,root,root,-)
%doc README.md LICENSE
%{_bindir}/openshift
%{_bindir}/oc
%{_bindir}/oadm
%{_bindir}/kubectl
%{_sharedstatedir}/%{name}
%{_sysconfdir}/bash_completion.d/*
%dir %config(noreplace) %{_sysconfdir}/origin
%pre
# If /etc/openshift exists symlink it to /etc/origin
if [ -d "%{_sysconfdir}/openshift" ]; then
ln -s %{_sysconfdir}/openshift %{_sysconfdir}/origin
fi
%files master
%defattr(-,root,root,-)
%{_unitdir}/%{name}-master.service
%config(noreplace) %{_sysconfdir}/sysconfig/%{name}-master
%config(noreplace) /etc/origin/master
%post master
%systemd_post %{basename:openshift-master.service}
%preun master
%systemd_preun %{basename:openshift-master.service}
%postun master
%systemd_postun
%files node
%defattr(-,root,root,-)
%{_unitdir}/%{name}-node.service
%config(noreplace) %{_sysconfdir}/sysconfig/%{name}-node
%config(noreplace) /etc/origin/node
%post node
%systemd_post %{basename:openshift-node.service}
%preun node
%systemd_preun %{basename:openshift-node.service}
%postun node
%systemd_postun
%files -n tuned-profiles-openshift-node
%defattr(-,root,root,-)
%{_prefix}/lib/tuned/openshift-node-host
%{_prefix}/lib/tuned/openshift-node-guest
%{_mandir}/man7/tuned-profiles-openshift-node.7*
%post -n tuned-profiles-openshift-node
recommended=`/usr/sbin/tuned-adm recommend`
if [[ "${recommended}" =~ guest ]] ; then
/usr/sbin/tuned-adm profile openshift-node-guest > /dev/null 2>&1
else
/usr/sbin/tuned-adm profile openshift-node-host > /dev/null 2>&1
fi
%preun -n tuned-profiles-openshift-node
# reset the tuned profile to the recommended profile
# $1 = 0 when we're being removed > 0 during upgrades
if [ "$1" = 0 ]; then
recommended=`/usr/sbin/tuned-adm recommend`
/usr/sbin/tuned-adm profile $recommended > /dev/null 2>&1
fi
%files clients
%{_datadir}/%{name}/linux/oc
%{_datadir}/%{name}/macosx/oc
%{_datadir}/%{name}/windows/oc.exe
%files dockerregistry
%defattr(-,root,root,-)
%{_bindir}/dockerregistry
%files pod
%defattr(-,root,root,-)
%{_bindir}/pod
%changelog
* Wed Aug 19 2015 Martin Milata <mmilata@redhat.com> 0.2-9.ansible-osbs
- Modified for ansible-osbs
* Wed Aug 12 2015 Steve Milner <smilner@redhat.com> 0.2-8
- Master configs will be generated if none are found.
- Node configs will be generated if none are found and master is installed.
- All-In-One services removed.
* Wed Aug 12 2015 Steve Milner <smilner@redhat.com> 0.2-7
- Added new ovs script(s) to file lists.
* Wed Aug 5 2015 Steve Milner <smilner@redhat.com> 0.2-6
- Using _unitdir instead of _prefix for unit data
* Fri Jul 31 2015 Steve Milner <smilner@redhat.com> 0.2-5
- Configuration location now /etc/origin
- Default configs created upon installation
* Tue Jul 28 2015 Steve Milner <smilner@redhat.com> 0.2-4
- Added AEP packages
* Mon Jan 26 2015 Scott Dodson <sdodson@redhat.com> 0.2-3
- Update to 21fb40637c4e3507cca1fcab6c4d56b06950a149
- Split packaging of openshift-master and openshift-node
* Mon Jan 19 2015 Scott Dodson <sdodson@redhat.com> 0.2-2
- new package built with tito
* Fri Jan 09 2015 Adam Miller <admiller@redhat.com> - 0.2-2
- Add symlink for osc command line tooling (merged in from jhonce@redhat.com)
* Wed Jan 07 2015 Adam Miller <admiller@redhat.com> - 0.2-1
- Update to latest upstream release
- Restructured some of the golang deps build setup for restructuring done
upstream
* Thu Oct 23 2014 Adam Miller <admiller@redhat.com> - 0-0.0.9.git562842e
- Add new patches from jhonce for systemd units
* Mon Oct 20 2014 Adam Miller <admiller@redhat.com> - 0-0.0.8.git562842e
- Update to latest master snapshot
* Wed Oct 15 2014 Adam Miller <admiller@redhat.com> - 0-0.0.7.git7872f0f
- Update to latest master snapshot
* Fri Oct 03 2014 Adam Miller <admiller@redhat.com> - 0-0.0.6.gite4d4ecf
- Update to latest Alpha nightly build tag 20141003
* Wed Oct 01 2014 Adam Miller <admiller@redhat.com> - 0-0.0.5.git6d9f1a9
- Switch to consistent naming, patch by jhonce
* Tue Sep 30 2014 Adam Miller <admiller@redhat.com> - 0-0.0.4.git6d9f1a9
- Add systemd and sysconfig entries from jhonce
* Tue Sep 23 2014 Adam Miller <admiller@redhat.com> - 0-0.0.3.git6d9f1a9
- Update to latest upstream.
* Mon Sep 15 2014 Adam Miller <admiller@redhat.com> - 0-0.0.2.git2647df5
- Update to latest upstream.
* Thu Aug 14 2014 Adam Miller <admiller@redhat.com> - 0-0.0.1.gitc3839b8
- First package

View file

@ -1,155 +0,0 @@
osbs-master
===========
Main role for deploying OSBS - [OpenShift build
service](https://github.com/projectatomic/osbs-client/), service for building
layered Docker images.
It performs the necessary configuration of Docker and OpenShift and optionally
opens/closes OpenShift firewall port. It also generates self-signed certificate
that can be used by reverse proxy placed in front of the builder.
This role is part of
[ansible-osbs](https://github.com/projectatomic/ansible-osbs/) playbook for
deploying OpenShift build service. Please refer to that github repository for
[documentation](https://github.com/projectatomic/ansible-osbs/blob/master/README.md)
and [issue tracker](https://github.com/projectatomic/ansible-osbs/issues).
Role Variables
--------------
You may need to configure Docker to connect to registries over plain HTTP, or
HTTPS with self-signed certificate (especially when developing OSBS). You can
provide list of such registries in `osbs_docker_insecure_registries` (empty by
default).
osbs_docker_insecure_registries: []
Expose the OpenShift port to the outside world? Set this to `false` when using
authenticating proxy on the localhost. Has no effect if `osbs_manage_firewalld`
is `false`.
osbs_master_expose_port: true
Set to false if you don't use firewalld or do not want the playbook to modify
it.
osbs_manage_firewalld: true
If you are using authenticating proxy, this role can generate a self-signed certificate that the proxy can use to authenticate itself to OpenShift. The proxy needs the certificate and the key concatenated in one file (`osbs_proxy_cert_file`). OpenShift needs to know the CA of the certificate, which is configured in `osbs_proxy_ca_file` and which is the same as the certificate because it is self-signed.
osbs_proxy_cert_file: /etc/origin/proxy_selfsigned.crt
osbs_proxy_key_file: /etc/origin/proxy_selfsigned.key
osbs_proxy_certkey_file: /etc/httpd/openshift_proxy_certkey.crt
osbs_proxy_ca_file: /etc/origin/proxy_selfsigned.crt
OpenShift authorization policy - which users should be assigned the view
(read-only), osbs-builder (read-write), and cluster-admin (admin) roles. In
default configuration, everyone has read/write access. The authentication is
handled by the proxy - if you are not using it the everyone connecting from the
outside belongs to the `system:unauthenticated` group.
Default setup:
osbs_readonly_users: []
osbs_readonly_groups: []
osbs_readwrite_users: []
osbs_readwrite_groups:
- system:authenticated
- system:unauthenticated
osbs_admin_users: []
osbs_admin_groups: []
Development with authenticating proxy:
osbs_readonly_users: []
osbs_readonly_groups: []
osbs_readwrite_users: []
osbs_readwrite_groups:
- system:authenticated
osbs_admin_users: []
osbs_admin_groups: []
Example production configuration with only one user starting the builds:
osbs_readonly_users: []
osbs_readonly_groups:
- system:authenticated
osbs_readwrite_groups: []
osbs_readwrite_users:
- kojibuilder
osbs_admin_users:
- foo@EXAMPLE.COM
- bar@EXAMPLE.COM
osbs_admin_groups: []
Limit on the number of running pods.
osbs_master_max_pods: 3
[Image garbage
collection](https://docs.openshift.org/latest/admin_guide/garbage_collection.html#image-garbage-collection)
can be configured with following variables:
osbs_image_gc_high_threshold: 90
osbs_image_gc_low_threshold: 80
Dependencies
------------
OpenShift is expected to be installed on the remote host. This can by
accomplished by the
[install-openshift](https://github.com/projectatomic/ansible-role-install-openshift)
role.
Example Playbook
----------------
Simple development deployment:
- hosts: builders
roles:
- install-openshift
- osbs-master
- atomic-reactor
Deployment behind authentication proxy that only allows the *kojibuilder* user
to start builds (and everyone to view them). Set docker to trust registry on
localhost:5000 even though it uses HTTP.
- hosts: builders
roles:
- install-openshift
- role: osbs-master
osbs_master_expose_port: false
osbs_docker_insecure_registries: [172.42.17.1:5000]
osbs_readonly_users: []
osbs_readonly_groups:
- system:authenticated
- system:unauthenticated
osbs_readwrite_groups: []
osbs_readwrite_users:
- kojibuilder
osbs_admin_users: []
osbs_admin_groups: []
- atomic-reactor
- role: osbs-proxy
osbs_proxy_type: kerberos
osbs_proxy_kerberos_keytab_file: /etc/HTTP-FQDN.EXAMPLE.COM.keytab
osbs_proxy_kerberos_realm: EXAMPLE.COM
osbs_proxy_ssl_cert_file: /etc/fqdn.example.com.crt
osbs_proxy_ssl_key_file: /etc/fqdn.example.com.key
osbs_proxy_ip_whitelist:
- subnet: 192.168.66.0/24
user: kojibuilder
License
-------
BSD
Author Information
------------------
Martin Milata &lt;mmilata@redhat.com&gt;

View file

@ -1,114 +0,0 @@
---
# set the openshift log level
osbs_openshift_loglevel: 0
# docker needs to know which registries are insecure
osbs_docker_insecure_registries: []
# Expose the OpenShift port to the outside world? Set this to false when using
# authenticating proxy on the localhost. Has no effect if osbs_manage_firewalld
# is false.
osbs_master_expose_port: true
# set to false if you don't use firewalld or do not want the playbook to modify it
osbs_manage_firewalld: true
# these will be generated for proxy so it can authenticate itself
osbs_proxy_cert_file: /etc/origin/proxy_selfsigned.crt
osbs_proxy_key_file: /etc/origin/proxy_selfsigned.key
osbs_proxy_certkey_file: /etc/origin/proxy_certkey.crt
# CA cert to validate the proxy certificate against
osbs_proxy_ca_file: /etc/origin/proxy_selfsigned.crt
# openshift authorization - which users should be assigned the view (readonly),
# osbs-builder (readwrite), and cluster-admin (admin) roles
# in default configuration, everyone has read/write access
osbs_readonly_users: []
osbs_readonly_groups: []
osbs_readwrite_users: []
osbs_readwrite_groups:
- system:authenticated
- system:unauthenticated
osbs_admin_users: []
osbs_admin_groups: []
## development w/ auth proxy:
#osbs_readonly_users: []
#osbs_readonly_groups: []
#osbs_readwrite_users: []
#osbs_readwrite_groups:
# - system:authenticated
#osbs_admin_users: []
#osbs_admin_groups: []
## example production configuration:
#osbs_readonly_users: []
#osbs_readonly_groups:
# - system:authenticated
#osbs_readwrite_groups: []
#osbs_readwrite_users:
# - kojibuilder
# - "{{ ansible_hostname }}"
# - system:serviceaccount:default:default
#osbs_admin_users:
# - foo@EXAMPLE.COM
# - bar@EXAMPLE.COM
#osbs_admin_groups: []
# limit on the number of running pods
osbs_master_max_pods: 3
# update packagages to their latest version?
osbs_update_packages: false
# directory for exporting v1 images to koji
# if not defined, nothing will happen
#osbs_export_dir: /mnt/image-export
# mount the image export directory
#osbs_export_mount_src: /dev/vda1
#osbs_export_mount_fs: ext4
# configure httpd to publish the directory under following path of default vhost
#osbs_export_http_path: /image-export
osbs_image_gc_high_threshold: 90
osbs_image_gc_low_threshold: 80
# Specify different identity providers and options needed for the master-config
# template
#
# Currently supported options are:
# request_header
# htpasswd_provider
osbs_identity_provider: "request_header"
osbs_identity_request:
name: request_header
challenge: true
login: true
osbs_identity_htpasswd:
name: htpasswd_provider
challenge: true
login: true
provider_file: /etc/openshift/htpasswd
# This correlates to the section of OpenShift master-config.yaml for custom
# certificate configuration for the public facing API endpoint
#
# https://docs.openshift.org/latest/install_config/certificate_customization.html
#
# Following values:
# enabled: boolean - used for checking in the config template
# certfile: path to cert (.crt) file
# keyfile: path to key (.key) file
# names: list of names
osbs_named_certificates:
enabled: false
cert_file:
key_file:
names:
osbs_public_api_url:

View file

@ -1,32 +0,0 @@
[Unit]
Description=Docker Application Container Engine
Documentation=http://docs.docker.com
After=network.target
Wants=docker-storage-setup.service
[Service]
Type=notify
NotifyAccess=all
EnvironmentFile=-/etc/sysconfig/docker
EnvironmentFile=-/etc/sysconfig/docker-storage
EnvironmentFile=-/etc/sysconfig/docker-network
Environment=GOTRACEBACK=crash
ExecStart=/usr/bin/docker daemon \
--exec-opt native.cgroupdriver=systemd \
$OPTIONS \
$DOCKER_STORAGE_OPTIONS \
$DOCKER_NETWORK_OPTIONS \
$INSECURE_REGISTRY
ExecStartPost=/usr/local/bin/fix-docker-iptables
LimitNOFILE=1048576
LimitNPROC=1048576
LimitCORE=infinity
MountFlags=slave
StandardOutput=null
StandardError=null
TimeoutStartSec=0
Restart=on-abnormal
[Install]
WantedBy=multi-user.target

View file

@ -1,54 +0,0 @@
#!/bin/bash -xe
# Note: this is done as a script because it needs to be run after
# every docker service restart.
# And just doing an iptables-restore is going to mess up kubernetes'
# NAT table.
# Delete all old rules
iptables --flush FORWARD
# Re-insert some basic rules
iptables -A FORWARD -o docker0 -j DOCKER
iptables -A FORWARD -o docker0 -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
iptables -A FORWARD -i docker0 -o docker0 -j ACCEPT
# Now insert access to allowed boxes
# docker-registry
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.125.56 --dport 443 -j ACCEPT
#koji.fp.o
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.125.61 --dport 80 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.125.61 --dport 443 -j ACCEPT
# pkgs
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.125.44 --dport 80 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.125.44 --dport 443 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.125.44 --dport 9418 -j ACCEPT
# DNS
iptables -A FORWARD -i docker0 -p udp -m udp -d 10.5.126.21 --dport 53 -j ACCEPT
iptables -A FORWARD -i docker0 -p udp -m udp -d 10.5.126.22 --dport 53 -j ACCEPT
# mirrors.fp.o
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.51 --dport 443 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.52 --dport 443 -j ACCEPT
# dl.phx2
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.93 --dport 80 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.93 --dport 443 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.94 --dport 80 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.94 --dport 443 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.95 --dport 80 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.95 --dport 443 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.96 --dport 80 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.96 --dport 443 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.97 --dport 80 -j ACCEPT
iptables -A FORWARD -i docker0 -p tcp -m tcp -d 10.5.126.97 --dport 443 -j ACCEPT
# Docker is CRAZY and forces Google DNS upon us.....
iptables -A FORWARD -i docker0 -p udp -m udp -d 8.8.8.8 --dport 53 -j ACCEPT
iptables -A FORWARD -i docker0 -p udp -m udp -d 8.8.4.4 --dport 53 -j ACCEPT
iptables -A FORWARD -j REJECT --reject-with icmp-host-prohibited

View file

@ -1,68 +0,0 @@
#!/bin/bash -xe
# Note: this is done as a script because it needs to be run after
# every docker service restart.
# And just doing an iptables-restore is going to mess up kubernetes'
# NAT table.
# And it gets even better with openshift! It thinks I'm stupid and need
# to be corrected by automatically adding the "allow all" rules back at
# the top as soon as I remove them.
# To circumvent that, we're just adding a new chain for this, as it seems
# that it doesn't do anything with the firewall if we keep its rules in
# place. (it doesn't check the order of its rules, only that they exist)
if [ "`iptables -nL | grep FILTER_FORWARD`" == "" ];
then
iptables -N FILTER_FORWARD
fi
if [ "`iptables -nL | grep 'FILTER_FORWARD all'`" == "" ];
then
iptables -I FORWARD 1 -j FILTER_FORWARD
fi
# Delete all old rules
iptables --flush FILTER_FORWARD
# Re-insert some basic rules
iptables -A FILTER_FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT
iptables -A FILTER_FORWARD --src 10.1.0.0/16 --dst 10.1.0.0/16 -j ACCEPT
# Now insert access to allowed boxes
# docker-registry
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.217 --dport 443 -j ACCEPT
#koji.fp.o
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.87 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.87 --dport 443 -j ACCEPT
# pkgs.stg
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.83 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.83 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.83 --dport 9418 -j ACCEPT
# DNS
iptables -A FILTER_FORWARD -p udp -m udp -d 10.5.126.21 --dport 53 -j ACCEPT
iptables -A FILTER_FORWARD -p udp -m udp -d 10.5.126.22 --dport 53 -j ACCEPT
# mirrors.fp.o
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.51 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.52 --dport 443 -j ACCEPT
# dl.phx2
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.93 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.93 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.94 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.94 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.95 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.95 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.96 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.96 --dport 443 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.97 --dport 80 -j ACCEPT
iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.126.97 --dport 443 -j ACCEPT
# Docker is CRAZY and forces Google DNS upon us.....
iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.8.8 --dport 53 -j ACCEPT
iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.4.4 --dport 53 -j ACCEPT
iptables -A FILTER_FORWARD -j REJECT --reject-with icmp-host-prohibited

View file

@ -1,36 +0,0 @@
apiVersion: v1
kind: ClusterRole
metadata:
name: osbs-builder
rules:
- attributeRestrictions: null
resources:
- builds/custom
- pods/exec
- pods/portforward
- pods/proxy
- resourcegroup:exposedkube
- resourcegroup:exposedopenshift
- secrets
verbs:
- create
- delete
- get
- list
- update
- watch
- attributeRestrictions: null
resources:
- resourcegroup:allkube
- resourcegroup:allkube-status
- resourcegroup:allopenshift-status
verbs:
- get
- list
- watch
- attributeRestrictions: null
resources:
- imagestreams/layers
verbs:
- get
- update

View file

@ -1,12 +0,0 @@
---
- name: restart docker
service: name=docker state=restarted
- name: restart openshift-master
service: name=origin-master state=restarted
- name: restart openshift-node
service: name=origin-node state=restarted
- name: restart httpd
service: name=httpd state=restarted

View file

@ -1 +0,0 @@
{install_date: 'Thu Jan 7 17:30:46 2016', version: master}

View file

@ -1,21 +0,0 @@
---
galaxy_info:
author: Martin Milata
description: OpenShift build service - builder of layered Docker images
company: Red Hat
issue_tracker_url: https://github.com/projectatomic/ansible-osbs/issues
license: BSD
min_ansible_version: 1.9
platforms:
- name: EL
versions:
- 7
- name: Fedora
versions:
- 21
- 22
categories:
- cloud
- development
- packaging
dependencies: []

View file

@ -1,56 +0,0 @@
---
- name: mount the export directory
mount:
name: "{{ osbs_export_dir }}"
src: "{{ osbs_export_mount_src }}"
fstype: "{{ osbs_export_mount_fs }}"
state: mounted
when: osbs_export_mount_src is defined
- name: set owner of the image export directory
file:
path: "{{ osbs_export_dir }}"
state: directory
owner: apache
group: apache
when: osbs_export_http_path is defined
- name: configure selinux labels for image export directory
command: /usr/sbin/semanage fcontext --add --type httpd_sys_content_t "{{ osbs_export_dir }}(/.*)?"
when: osbs_export_http_path is defined
- name: restore selinux labels for image export directory
file:
path: "{{ osbs_export_dir }}"
state: directory
setype: httpd_sys_content_t
recurse: yes
when: osbs_export_http_path is defined
- name: export the mount directory via http
template: src=httpd-osbs_image_export.conf.j2 dest={{ osbs_export_http_conf }}
notify:
- restart httpd
when: osbs_export_http_path is defined
- name: open http ports
firewalld:
service: "{{ item }}"
state: enabled
permanent: true
immediate: true
with_items:
- http
- https
when: osbs_export_http_path is defined and osbs_manage_firewalld
- name: install dependencies for garbage collection script
action: "{{ ansible_pkg_mgr }} name=koji state=installed"
when: osbs_export_koji_hub is defined
- name: install garbage collection script
template:
src: osbs-remove-finished-docker-tarball.py.j2
dest: /etc/cron.hourly/osbs-remove-finished-docker-tarball.cron
mode: "0744"
when: osbs_export_koji_hub is defined

View file

@ -1,132 +0,0 @@
---
### docker service ###
- name: install packages required by osbs
action: "{{ ansible_pkg_mgr }} name=docker state={{ osbs_update_packages | ternary('latest', 'present') }}"
- name: install openssl for auth proxy cert generation
action: "{{ ansible_pkg_mgr }} name=openssl state=latest"
- name: configure docker
template: src=sysconfig-docker.j2 dest=/etc/sysconfig/docker backup=yes
notify: restart docker
- name: ensure docker is running
service: name=docker state=started enabled=yes
### openshift service ###
- name: open/close openshift port in the firewall
firewalld:
port: "{{ osbs_openshift_port }}/tcp"
state: "{{ osbs_master_expose_port | ternary('enabled', 'disabled') }}"
permanent: true
immediate: true
when: osbs_manage_firewalld
- name: apply modifications to /etc/sysconfig/origin
template: src=sysconfig-origin-{{ item }}.j2 dest=/etc/sysconfig/origin-{{ item }}
with_items:
- master
- node
- name: configure openshift master
template: src=master-config.yaml.j2 dest=/etc/origin/master/master-config.yaml
notify: restart openshift-master
- name: configure openshift node
template: src=node-config.yaml.j2 dest=/etc/origin/node/node-config.yaml
notify: restart openshift-node
- name: generate cert for authenticating proxy - self-signed certificate
command: >
openssl req -new -nodes -x509
-subj "/C=CZ/ST=SelfSigned/L=SelfSigned/O=IT/CN={{ inventory_hostname }}"
-days 3650
-keyout {{ osbs_proxy_key_file }}
-out {{ osbs_proxy_cert_file }}
-extensions v3_ca
args:
creates: "{{ osbs_proxy_cert_file }}"
register: auth_proxy_cert
- name: generate cert for authenticating proxy - convert privkey to rsa
command: openssl rsa -in {{ osbs_proxy_key_file }} -out {{ osbs_proxy_key_file }}
when: auth_proxy_cert.changed
- name: generate cert for authenticating proxy - concatenate cert and key
shell: cat {{ osbs_proxy_cert_file }} {{ osbs_proxy_key_file }} > {{ osbs_proxy_certkey_file }}
args:
creates: "{{ osbs_proxy_certkey_file }}"
# We need to have openshift restarted in order to configure authentication.
- meta: flush_handlers
- name: ensure openshift is running
service: name=origin-{{ item }} state=started enabled=yes
with_items:
- master
- node
- name: wait for openshift to start accepting connections
wait_for: port={{ osbs_openshift_port }} timeout=30
- name: copy osbs-builder role definition
copy: src=openshift-role-osbs-builder.yml dest={{ osbs_openshift_home }}/role-osbs-builder.yml
- name: import the osbs-builder role
shell: "oc replace --force=true --filename={{ osbs_openshift_home }}/role-osbs-builder.yml && touch /etc/origin/osbs-builder-role-imported"
args:
creates: "/etc/origin/osbs-builder-role-imported"
environment: "{{ osbs_environment }}"
- name: copy role bindings
template: src=openshift-rolebinding.yml.j2 dest={{ osbs_openshift_home }}/rolebinding-{{ item.name }}.yml
with_items:
- name: osbs-readonly
role: view
users: "{{ osbs_readonly_users }}"
groups: "{{ osbs_readonly_groups }}"
- name: osbs-readwrite
role: osbs-builder
users: "{{ osbs_readwrite_users }}"
groups: "{{ osbs_readwrite_groups }}"
- name: osbs-admin
role: cluster-admin
users: "{{ osbs_admin_users }}"
groups: "{{ osbs_admin_groups }}"
- name: import the role bindings
shell: "oc replace --force=true --filename={{ osbs_openshift_home }}/rolebinding-{{ item }}.yml && touch /etc/origin/role-bindings-imported"
args:
creates: "/etc/origin/role-bindings-imported"
environment: "{{ osbs_environment }}"
with_items:
- osbs-readonly
- osbs-readwrite
- osbs-admin
- name: copy resource quotas
template: src=openshift-resourcequota.yml.j2 dest={{ osbs_openshift_home }}/resourcequota.yml
- name: import resource quotas
shell: "oc replace --force=true --filename={{ osbs_openshift_home }}/resourcequota.yml && touch /etc/origin/resource-quotas-imported"
args:
creates: "/etc/origin/resource-quotas-imported"
environment: "{{ osbs_environment }}"
# Useful when using "oc" to inspect openshift state.
- name: add KUBECONFIG to .bashrc
lineinfile:
dest: "{{ ansible_env.HOME }}/.bashrc"
regexp: "export KUBECONFIG="
line: "export KUBECONFIG={{ osbs_environment.KUBECONFIG }}"
- include: export.yml
when: osbs_export_dir is defined
- name: copy docker iptables script
copy: src="fix-docker-iptables.{{ env }}" dest=/usr/local/bin/fix-docker-iptables mode=0755
- name: copy docker service config
copy: src=docker.service dest=/etc/systemd/system/docker.service

View file

@ -1,11 +0,0 @@
# atomic-reactor copies tarballs to this directory (via NFS share) and koji
# downloads it from this place. Garbage collection needs to be done on this
# directory.
Alias "{{ osbs_export_http_path }}" "{{ osbs_export_dir }}"
<Directory "{{ osbs_export_dir }}">
AllowOverride None
Require all granted
Options +Indexes
</Directory>

View file

@ -1,188 +0,0 @@
admissionConfig:
pluginConfig: null
apiLevels:
- v1
apiVersion: v1
assetConfig:
extensionDevelopment: false
extensionScripts: null
extensionStylesheets: null
extensions: null
loggingPublicURL: ""
logoutURL: ""
{% if osbs_public_api_url %}
masterPublicURL: https://{{ osbs_public_api_url }}:8443
{% else %}
masterPublicURL: https://{{ ansible_default_ipv4.address }}:8443
{% endif %}
metricsPublicURL: ""
publicURL: https://{{ ansible_default_ipv4.address }}:8443/console/
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
certFile: master.server.crt
clientCA: ""
keyFile: master.server.key
maxRequestsInFlight: 0
namedCertificates: null
requestTimeoutSeconds: 0
controllerLeaseTTL: 0
controllers: '*'
corsAllowedOrigins:
- 127.0.0.1
- {{ ansible_default_ipv4.address }}:8443
- localhost
disabledFeatures: null
dnsConfig:
bindAddress: 0.0.0.0:53
bindNetwork: tcp4
etcdClientInfo:
ca: ca.crt
certFile: master.etcd-client.crt
keyFile: master.etcd-client.key
urls:
- https://{{ ansible_default_ipv4.address }}:4001
etcdConfig:
address: {{ ansible_default_ipv4.address }}:4001
peerAddress: {{ ansible_default_ipv4.address }}:7001
peerServingInfo:
bindAddress: 0.0.0.0:7001
bindNetwork: tcp4
certFile: etcd.server.crt
clientCA: ca.crt
keyFile: etcd.server.key
namedCertificates: null
servingInfo:
bindAddress: 0.0.0.0:4001
bindNetwork: tcp4
certFile: etcd.server.crt
clientCA: ca.crt
keyFile: etcd.server.key
namedCertificates: null
storageDirectory: /var/lib/origin/openshift.local.etcd
etcdStorageConfig:
kubernetesStoragePrefix: kubernetes.io
kubernetesStorageVersion: v1
openShiftStoragePrefix: openshift.io
openShiftStorageVersion: v1
imageConfig:
format: openshift/origin-${component}:${version}
latest: false
imagePolicyConfig:
disableScheduledImport: false
maxImagesBulkImportedPerRepository: 5
maxScheduledImageImportsPerMinute: 60
scheduledImageImportMinimumIntervalSeconds: 900
kind: MasterConfig
kubeletClientInfo:
ca: ca.crt
certFile: master.kubelet-client.crt
keyFile: master.kubelet-client.key
port: 10250
kubernetesMasterConfig:
admissionConfig:
pluginConfig: null
apiLevels: null
apiServerArguments: null
controllerArguments: null
disabledAPIGroupVersions: {}
masterCount: 1
masterIP: {{ ansible_default_ipv4.address }}
podEvictionTimeout: 5m
proxyClientInfo:
certFile: master.proxy-client.crt
keyFile: master.proxy-client.key
schedulerConfigFile: ""
servicesNodePortRange: 30000-32767
servicesSubnet: 172.30.0.0/16
staticNodeNames:
- {{ inventory_hostname }}
masterClients:
externalKubernetesKubeConfig: ""
openshiftLoopbackKubeConfig: openshift-master.kubeconfig
masterPublicURL: https://{{ ansible_default_ipv4.address }}:8443
networkConfig:
clusterNetworkCIDR: 10.1.0.0/16
hostSubnetLength: 8
networkPluginName: ""
serviceNetworkCIDR: 172.30.0.0/16
oauthConfig:
assetPublicURL: https://{{ ansible_default_ipv4.address }}:8443/console/
grantConfig:
method: auto
identityProviders:
{% if osbs_identity_provider == "request_header" %}
- name: {{ osbs_identity_request.name }}
challenge: {{ osbs_identity_request.challenge }}
login: {{ osbs_identity_request.login }}
provider:
apiVersion: v1
kind: RequestHeaderIdentityProvider
clientCA: {{ osbs_proxy_ca_file | default('/etc/origin/master/ca.crt') }}
headers:
- X-Remote-User
{% endif %}
{% if osbs_identity_provider == "htpasswd_provider" %}
- name: {{ osbs_identity_htpasswd.name }}
challenge: {{ osbs_identity_htpasswd.challenge }}
login: {{ osbs_identity_htpasswd.login }}
provider:
apiVersion: v1
kind: HTPasswdPasswordIdentityProvider
file: {{ osbs_identity_htpasswd.provider_file }}
{% endif %}
masterCA: ca.crt
masterPublicURL: https://{{ ansible_default_ipv4.address }}:8443
masterURL: https://{{ ansible_default_ipv4.address }}:8443
sessionConfig:
sessionMaxAgeSeconds: 300
sessionName: ssn
sessionSecretsFile: ""
templates: null
tokenConfig:
accessTokenMaxAgeSeconds: 86400
authorizeTokenMaxAgeSeconds: 300
pauseControllers: false
policyConfig:
bootstrapPolicyFile: policy.json
openshiftInfrastructureNamespace: openshift-infra
openshiftSharedResourcesNamespace: openshift
projectConfig:
defaultNodeSelector: ""
projectRequestMessage: ""
projectRequestTemplate: ""
securityAllocator:
mcsAllocatorRange: s0:/2
mcsLabelsPerProject: 5
uidAllocatorRange: 1000000000-1999999999/10000
routingConfig:
subdomain: router.default.svc.cluster.local
serviceAccountConfig:
limitSecretReferences: false
managedNames:
- default
- builder
- deployer
masterCA: ca.crt
privateKeyFile: serviceaccounts.private.key
publicKeyFiles:
- serviceaccounts.public.key
servingInfo:
bindAddress: 0.0.0.0:8443
bindNetwork: tcp4
certFile: master.server.crt
clientCA: ca.crt
keyFile: master.server.key
maxRequestsInFlight: 500
requestTimeoutSeconds: 3600
{%if osbs_named_certificates.enabled %}
namedCertificates:
- certFile: {{ osbs_named_certificates.cert_file }}
keyFile: {{ osbs_named_certificates.key_file }}
names:
{% for name in osbs_named_certificates.names %}
- {{ name }}
{% endfor %}
{% else %}
namedCertificates: null
{% endif %}

View file

@ -1,36 +0,0 @@
allowDisabledDocker: false
apiVersion: v1
authConfig:
authenticationCacheSize: 1000
authenticationCacheTTL: 5m
authorizationCacheSize: 1000
authorizationCacheTTL: 5m
dnsDomain: cluster.local
dnsIP: ""
dockerConfig:
execHandlerName: native
imageConfig:
format: openshift/origin-${component}:${version}
latest: false
iptablesSyncPeriod: 5s
kind: NodeConfig
masterKubeConfig: node.kubeconfig
networkConfig:
mtu: 1450
networkPluginName: ""
nodeIP: ""
nodeName: localhost
podManifestConfig: null
servingInfo:
bindAddress: 0.0.0.0:10250
bindNetwork: tcp4
certFile: server.crt
clientCA: node-client-ca.crt
keyFile: server.key
namedCertificates: null
volumeDirectory: /var/lib/origin/openshift.local.volumes
kubeletArguments:
image-gc-high-threshold:
- "{{ osbs_image_gc_high_threshold }}"
image-gc-low-threshold:
- "{{ osbs_image_gc_low_threshold }}"

View file

@ -1,7 +0,0 @@
apiVersion: v1
kind: ResourceQuota
metadata:
name: concurrentbuilds
spec:
hard:
pods: {{ osbs_master_max_pods }}

View file

@ -1,24 +0,0 @@
apiVersion: v1
kind: RoleBinding
metadata:
name: {{ item.name }}
roleRef:
name: {{ item.role }}
{% if item.users == [] %}
userNames: []
{% else %}
userNames:
{% for u in item.users %}
- {{u}}
{% endfor %}
{% endif %}
{% if item.groups == [] %}
groupNames: []
{% else %}
groupNames:
{% for g in item.groups %}
- {{g}}
{% endfor %}
{% endif %}

View file

@ -1,54 +0,0 @@
#!/usr/bin/python
# This file was installed by ansible from role osbs-master.
import os
import re
import shutil
import syslog
import koji
SERVER = '{{ osbs_export_koji_hub }}'
PARENT = '{{ osbs_export_dir }}'
session = koji.ClientSession(SERVER)
syslog.openlog('remove-finished-docker-tarball')
for task_dir in os.listdir(PARENT):
if not task_dir.startswith('task-'):
continue
match = re.match('^task-(\d+)$', task_dir)
if not match:
continue
task_id = int(match.group(1))
if not task_id:
continue
children_tasks = session.getTaskInfo(task_id)
if 'parent' not in children_tasks or children_tasks['parent'] is None:
continue
parent_task_id = children_tasks['parent']
builds = session.listBuilds(taskID=parent_task_id)
for build in builds:
build['state'] = koji.BUILD_STATES.get(build['state'], 'BADSTATE')
if build['state'] in ('BUILDING', 'BADSTATE'):
continue
image_msg = '%s %s %s' % (task_id, build['build_id'], build['state'])
full_path = os.path.join(PARENT, task_dir)
try:
syslog.syslog('Removing %s (%s)' % (full_path, image_msg))
shutil.rmtree(full_path)
except OSError, exception:
syslog.syslog('Failed to remove %s (%s): %s' % (full_path,
image_msg,
exception))

View file

@ -1,36 +0,0 @@
# Modify these options if you want to change the way the docker daemon runs
OPTIONS='--selinux-enabled'
DOCKER_CERT_PATH=/etc/docker
# If you want to add your own registry to be used for docker search and docker pull use the
# ADD_REGISTRY option to list a set of comma separated registries.
# Note the last registry added will be the first regisry searched.
ADD_REGISTRY=''
# If you want to block registries from being used, use the
# BLOCK_REGISTRY option to list a set of comma separated registries, and uncommenting
# it. For example adding docker.io will stop users from downloading images from docker.io
# BLOCK_REGISTRY='--block-registry public'
# If you have a registry secured with https but do not have proper certs destributed, you can
# tell docker to not look for full authorization by adding the registry to the
# INSECURE_REGISTRY line and uncommentin it.
INSECURE_REGISTRY='
{%- for r in osbs_docker_insecure_registries %} --insecure-registry {{ r }}
{%- endfor %}'
# On an SELinux system, if you remove the --selinux-enabled option, you
# also need to turn on the docker_transition_unconfined boolean.
# setsebool -P docker_transition_unconfined 1
# Location used for temporary files, such as those created by
# docker load and build operations. Default is /var/lib/docker/tmp
# Can be overriden by setting the following environment variable.
# DOCKER_TMPDIR=/var/tmp
# Controls the /etc/cron.daily/docker-logrotate cron job status.
# To disable, uncomment the line below.
# LOGROTATE=false
GOTRACEBACK='crash'

View file

@ -1,9 +0,0 @@
OPTIONS="--loglevel=3"
CONFIG_FILE=/etc/origin/master/master-config.yaml
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
# NO_PROXY for your master
#NO_PROXY=master.example.com
#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT

View file

@ -1,9 +0,0 @@
OPTIONS="--loglevel={{osbs_openshift_loglevel}}"
CONFIG_FILE=/etc/origin/master/master-config.yaml
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
# NO_PROXY for your master
#NO_PROXY=master.example.com
#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT

View file

@ -1,22 +0,0 @@
OPTIONS="--loglevel=3"
# /etc/origin/node/ should contain the entire contents of
# /var/lib/origin.local.certificates/node-${node-fqdn} generated by
# running 'atomic-enterprise admin create-node-config' on your master
#
# If if your node is running on a separate host you can rsync the contents
# rsync -a root@atomic-enterprise-master:/var/lib/origin/origin.local.certificates/node-`hostname`/ /etc/origin/node
CONFIG_FILE=/etc/origin/node/node-config.yaml
# The $DOCKER_NETWORK_OPTIONS variable is used by sdn plugins to set
# $DOCKER_NETWORK_OPTIONS variable in the /etc/sysconfig/docker-network
# Most plugins include their own defaults within the scripts
# TODO: More elegant solution like this
# https://github.com/coreos/flannel/blob/master/dist/mk-docker-opts.sh
# DOCKER_NETWORK_OPTIONS='-b=lbr0 --mtu=1450'
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
# NO_PROXY for your master
#NO_PROXY=master.example.com
#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT

View file

@ -1,22 +0,0 @@
OPTIONS="--loglevel={{osbs_openshift_loglevel}}"
# /etc/origin/node/ should contain the entire contents of
# /var/lib/origin.local.certificates/node-${node-fqdn} generated by
# running 'atomic-enterprise admin create-node-config' on your master
#
# If if your node is running on a separate host you can rsync the contents
# rsync -a root@atomic-enterprise-master:/var/lib/origin/origin.local.certificates/node-`hostname`/ /etc/origin/node
CONFIG_FILE=/etc/origin/node/node-config.yaml
# The $DOCKER_NETWORK_OPTIONS variable is used by sdn plugins to set
# $DOCKER_NETWORK_OPTIONS variable in the /etc/sysconfig/docker-network
# Most plugins include their own defaults within the scripts
# TODO: More elegant solution like this
# https://github.com/coreos/flannel/blob/master/dist/mk-docker-opts.sh
# DOCKER_NETWORK_OPTIONS='-b=lbr0 --mtu=1450'
# Proxy configuration
# Origin uses standard HTTP_PROXY environment variables. Be sure to set
# NO_PROXY for your master
#NO_PROXY=master.example.com
#HTTP_PROXY=http://USER:PASSWORD@IPADDR:PORT
#HTTPS_PROXY=https://USER:PASSWORD@IPADDR:PORT

View file

@ -1,9 +0,0 @@
---
osbs_openshift_home: /var/lib/origin
osbs_openshift_port: 8443
osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig
osbs_environment:
KUBECONFIG: "{{ osbs_kubeconfig_path }}"
osbs_export_http_conf: /etc/httpd/conf.d/osbs_image_export.conf