diff --git a/files/openshift/Gemfile b/files/openshift/Gemfile deleted file mode 100644 index 1a8a78806e..0000000000 --- a/files/openshift/Gemfile +++ /dev/null @@ -1,50 +0,0 @@ -source 'http://rubygems.org' - -gem 'rails', '~> 3.0.13' -gem 'json' -gem 'parseconfig' -gem 'mongo' -gem 'xml-simple' -gem 'rack' -gem 'regin' -gem 'open4' -gem 'stickshift-common' -gem 'stickshift-controller' -gem 'rest-client' -gem 'systemu' - -# Add plugin gems here -gem 'gearchanger-mcollective-plugin' -gem 'uplift-bind-plugin' -gem 'swingshift-mongo-plugin' -gem 'dnsruby' - -# Bundle edge Rails instead: -# gem 'rails', :git => 'git://github.com/rails/rails.git' - -# Use unicorn as the web server -# gem 'unicorn' - -# Deploy with Capistrano -# gem 'capistrano' - -# To use debugger (ruby-debug for Ruby 1.8.7+, ruby-debug19 for Ruby 1.9.2+) -# gem 'ruby-debug' -# gem 'ruby-debug19', :require => 'ruby-debug' - -# Bundle the extra gems: -# gem 'bj' -# gem 'nokogiri' -# gem 'sqlite3-ruby', :require => 'sqlite3' -# gem 'aws-s3', :require => 'aws/s3' - -# Bundle gems for the local environment. Make sure to -# put test-only gems in this group so their generators -# and rake tasks are available in development mode: -group :development, :test do - # The require part from http://tinyurl.com/3pf68ho - gem 'mocha', :require => nil - gem 'cucumber' - gem 'rcov' -end - diff --git a/files/openshift/jenkins.repo b/files/openshift/jenkins.repo deleted file mode 100644 index 5ab0572477..0000000000 --- a/files/openshift/jenkins.repo +++ /dev/null @@ -1,5 +0,0 @@ -[jenkins] -name=Jenkins -baseurl=http://pkg.jenkins-ci.org/redhat -gpgcheck=1 -gpgkey=http://pkg.jenkins-ci.org/redhat/jenkins-ci.org.key diff --git a/files/openshift/mcollective-client.cfg b/files/openshift/mcollective-client.cfg deleted file mode 100644 index 3e8a44f598..0000000000 --- a/files/openshift/mcollective-client.cfg +++ /dev/null @@ -1,18 +0,0 @@ -topicprefix = /topic/ -main_collective = mcollective -collectives = mcollective -libdir = /usr/libexec/mcollective -loglevel = debug -logfile = /var/log/mcollective-client.log - -# Plugins -securityprovider = psk -plugin.psk = unset -connector = qpid -plugin.qpid.host=127.0.0.1 -plugin.qpid.secure=false -plugin.qpid.timeout=5 - -# Facts -factsource = yaml -plugin.yaml = /etc/mcollective/facts.yaml \ No newline at end of file diff --git a/files/openshift/mongo-acct.sh b/files/openshift/mongo-acct.sh deleted file mode 100644 index 15e8973e00..0000000000 --- a/files/openshift/mongo-acct.sh +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -mongo stickshift_broker_dev --eval 'db.addUser("stickshift", "mooo")' -mongo stickshift_broker_dev --eval 'db.auth_user.update({"_id":"admin"}, {"_id":"admin","user":"admin","password":"2a8462d93a13e51387a5e607cbd1139f"} , true)' -echo "Acct setup done on `date`" > /etc/mongo-acct-setup - - - \ No newline at end of file diff --git a/files/openshift/mongodb.conf b/files/openshift/mongodb.conf deleted file mode 100644 index b983014e4c..0000000000 --- a/files/openshift/mongodb.conf +++ /dev/null @@ -1,91 +0,0 @@ -## -### Basic Defaults -## -bind_ip = 127.0.0.1 -port = 27017 -fork = true -pidfilepath = /var/run/mongodb/mongodb.pid -logpath = /var/log/mongodb/mongodb.log -dbpath =/var/lib/mongodb -journal = true - -# Enables periodic logging of CPU utilization and I/O wait -#cpu = true - -# Turn on/off security. Off is currently the default -#noauth = true -auth = true - -# Verbose logging output. -#verbose = true - -# Inspect all client data for validity on receipt (useful for -# developing drivers) -#objcheck = true - -# Enable db quota management -#quota = true - -# Set oplogging level where n is -# 0=off (default) -# 1=W -# 2=R -# 3=both -# 7=W+some reads -#oplog = 0 - -# Diagnostic/debugging option -#nocursors = true - -# Ignore query hints -#nohints = true - -# Disable the HTTP interface (Defaults to port+1000). -nohttpinterface = true - -# Turns off server-side scripting. This will result in greatly limited -# functionality -#noscripting = true - -# Turns off table scans. Any query that would do a table scan fails. -#notablescan = true - -# Disable data file preallocation. -#noprealloc = true - -# Specify .ns file size for new databases. -# nssize = - -# Accout token for Mongo monitoring server. -#mms-token = - -# Server name for Mongo monitoring server. -#mms-name = - -# Ping interval for Mongo monitoring server. -#mms-interval = - -# Replication Options - -# in replicated mongo databases, specify here whether this is a slave or master -#slave = true -#source = master.example.com -# Slave only: specify a single database to replicate -#only = master.example.com -# or -#master = true -#source = slave.example.com - -# Address of a server to pair with. -#pairwith = -# Address of arbiter server. -#arbiter = -# Automatically resync if slave data is stale -#autoresync -# Custom size for replication operation log. -#oplogSize = -# Size limit for in-memory storage of op ids. -#opIdMem = - -# smallfiles -smallfiles = true \ No newline at end of file diff --git a/files/openshift/openshift-el6.repo b/files/openshift/openshift-el6.repo deleted file mode 100644 index 87e34a7074..0000000000 --- a/files/openshift/openshift-el6.repo +++ /dev/null @@ -1,5 +0,0 @@ -[openshift] -name=OpenShift -baseurl=http://mirror.openshift.com/pub/crankcase/rhel-6/x86_64/ -enabled=1 -gpgcheck=0 diff --git a/files/openshift/openshift.repo b/files/openshift/openshift.repo deleted file mode 100644 index 7cc3a12366..0000000000 --- a/files/openshift/openshift.repo +++ /dev/null @@ -1,14 +0,0 @@ -{% if inventory_hostname.startswith('osbs') %} -#TODO : remove this after freeze -[rhel7-openshift-3.11] -name = rhel7 openshift 3.11 $basearch -baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.11-rpms/ -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -enabled=0 -{% elif inventory_hostname.startswith('os') %} -[rhel7-openshift-3.11] -name = rhel7 openshift 3.11 $basearch -baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.11-rpms/ -gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -enabled=0 -{% endif %} diff --git a/files/openshift/qpidd.conf b/files/openshift/qpidd.conf deleted file mode 100644 index be2e0b862e..0000000000 --- a/files/openshift/qpidd.conf +++ /dev/null @@ -1,3 +0,0 @@ -cluster-mechanism=DIGEST-MD5 ANONYMOUS -auth=no - diff --git a/files/osbs/buildroot-Dockerfile-production.j2 b/files/osbs/buildroot-Dockerfile-production.j2 deleted file mode 100644 index 147970bf98..0000000000 --- a/files/osbs/buildroot-Dockerfile-production.j2 +++ /dev/null @@ -1,13 +0,0 @@ -FROM registry.fedoraproject.org/fedora:37 -RUN dnf -y install --refresh dnf-plugins-core && dnf -y install moby-engine git python3-setuptools e2fsprogs koji osbs-client\ - python3-osbs-client gssproxy fedpkg python3-docker-squash atomic-reactor python3-atomic-reactor* go-md2man python3-productmd sed\ - python3-gobject python3-libmodulemd python3-pdc-client ostree flatpak-module-tools flatpak skopeo && dnf clean all -ADD ./orchestrator_customize.json /usr/share/osbs/orchestrator_customize.json -ADD ./worker_customize.json /usr/share/osbs/worker_customize.json -ADD ./krb5.conf /etc -RUN printf '[libdefaults]\n default_ccache_name = DIR:/tmp/ccache_%%{uid}' >/etc/krb5.conf.d/ccache.conf -ADD ./krb5.osbs_{{osbs_url}}.keytab /etc/ -RUN sed -i -e 's|/var/lib/rpm|/usr/lib/sysimage/rpm|' /usr/lib/python*/site-packages/atomic_reactor/plugins/post_rpmqa.py -ADD ./ca.crt /etc/pki/ca-trust/source/anchors/osbs.ca.crt -RUN update-ca-trust -CMD ["python3", "/usr/bin/atomic-reactor", "--verbose", "inside-build"] diff --git a/files/osbs/buildroot-Dockerfile-staging.j2 b/files/osbs/buildroot-Dockerfile-staging.j2 deleted file mode 100644 index bc156784fb..0000000000 --- a/files/osbs/buildroot-Dockerfile-staging.j2 +++ /dev/null @@ -1,12 +0,0 @@ -FROM registry.fedoraproject.org/fedora:37 -RUN dnf -y install --refresh dnf-plugins-core && dnf -y install moby-engine git python3-setuptools e2fsprogs koji osbs-client\ - python3-osbs-client gssproxy fedpkg python3-docker-squash atomic-reactor python3-atomic-reactor* go-md2man python3-productmd\ - python3-gobject python3-libmodulemd python3-pdc-client ostree flatpak-module-tools flatpak skopeo && dnf clean all -ADD ./orchestrator_customize.json /usr/share/osbs/orchestrator_customize.json -ADD ./worker_customize.json /usr/share/osbs/worker_customize.json -ADD ./krb5.conf /etc -RUN printf '[libdefaults]\n default_ccache_name = DIR:/tmp/ccache_%%{uid}' >/etc/krb5.conf.d/ccache.conf -ADD ./krb5.osbs_{{osbs_url}}.keytab /etc/ -ADD ./ca.crt /etc/pki/ca-trust/source/anchors/osbs.ca.crt -RUN update-ca-trust -CMD ["python3", "/usr/bin/atomic-reactor", "--verbose", "inside-build"] diff --git a/files/osbs/cleanup-docker-storage b/files/osbs/cleanup-docker-storage deleted file mode 100644 index 0419b4e6f9..0000000000 --- a/files/osbs/cleanup-docker-storage +++ /dev/null @@ -1,5 +0,0 @@ -SHELL=/bin/bash -MAILTO=maxamillion@fedoraproject.org - -5 0 * * * root for i in $(docker ps -a | awk '/Exited/ { print $1 }'); do docker rm $i; done && for i in $(docker images -q -f 'dangling=true'); do docker rmi $i; done - diff --git a/files/osbs/cleanup-old-osbs-builds b/files/osbs/cleanup-old-osbs-builds deleted file mode 100644 index 7fed9eb07b..0000000000 --- a/files/osbs/cleanup-old-osbs-builds +++ /dev/null @@ -1,3 +0,0 @@ -SHELL=/bin/bash - -0 0 * * * root oc adm prune builds --keep-complete=0 --keep-failed=0 --keep-younger-than=72h0m0s --orphans --confirm diff --git a/files/osbs/docker-storage-setup b/files/osbs/docker-storage-setup deleted file mode 100644 index 5959fe01e9..0000000000 --- a/files/osbs/docker-storage-setup +++ /dev/null @@ -1 +0,0 @@ -VG="vg-docker" \ No newline at end of file diff --git a/files/osbs/docker-storage-setup.staging b/files/osbs/docker-storage-setup.staging deleted file mode 100644 index e29e2a65b0..0000000000 --- a/files/osbs/docker-storage-setup.staging +++ /dev/null @@ -1 +0,0 @@ -STORAGE_DRIVER="overlay2" \ No newline at end of file diff --git a/files/osbs/docker.custom.service b/files/osbs/docker.custom.service deleted file mode 100644 index 010b78a4f1..0000000000 --- a/files/osbs/docker.custom.service +++ /dev/null @@ -1,8 +0,0 @@ -# Ansible managed - -[Unit] -Wants=iptables.service -After=iptables.service - -[Service] -ExecStartPost=/usr/local/bin/fix-docker-iptables \ No newline at end of file diff --git a/files/osbs/docker.firewall.service b/files/osbs/docker.firewall.service deleted file mode 100644 index 3000177f64..0000000000 --- a/files/osbs/docker.firewall.service +++ /dev/null @@ -1,2 +0,0 @@ -[Service] -ExecStartPost=/usr/local/bin/fix-docker-iptables diff --git a/files/osbs/dockercfg-production-secret.j2 b/files/osbs/dockercfg-production-secret.j2 deleted file mode 100644 index 0a3abb4126..0000000000 --- a/files/osbs/dockercfg-production-secret.j2 +++ /dev/null @@ -1 +0,0 @@ -{"auths":{"candidate-registry.fedoraproject.org":{"username":"{{candidate_registry_osbs_prod_username}}","password":"{{candidate_registry_osbs_prod_password}}","email":"","auth":"{{ auth_info_prod | b64encode }}"}}} diff --git a/files/osbs/dockercfg-staging-secret.j2 b/files/osbs/dockercfg-staging-secret.j2 deleted file mode 100644 index 8d42c39326..0000000000 --- a/files/osbs/dockercfg-staging-secret.j2 +++ /dev/null @@ -1 +0,0 @@ -{"auths":{"candidate-registry.stg.fedoraproject.org":{"username":"{{candidate_registry_osbs_stg_username}}","password":"{{candidate_registry_osbs_stg_password}}","email":"","auth":"{{ auth_info_stg | b64encode }}"}}} \ No newline at end of file diff --git a/files/osbs/fedora-dnsmasq-master.conf.production b/files/osbs/fedora-dnsmasq-master.conf.production deleted file mode 100644 index 36fba33685..0000000000 --- a/files/osbs/fedora-dnsmasq-master.conf.production +++ /dev/null @@ -1,693 +0,0 @@ -# Configuration file for dnsmasq. -# -# Format is one option per line, legal options are the same -# as the long options legal on the command line. See -# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details. - -# Listen on this specific port instead of the standard DNS port -# (53). Setting this to zero completely disables DNS function, -# leaving only DHCP and/or TFTP. -#port=5353 - -# The following two options make you a better netizen, since they -# tell dnsmasq to filter out queries which the public DNS cannot -# answer, and which load the servers (especially the root servers) -# unnecessarily. If you have a dial-on-demand link they also stop -# these requests from bringing up the link unnecessarily. - -# Never forward plain names (without a dot or domain part) -#domain-needed -# Never forward addresses in the non-routed address spaces. -#bogus-priv - -# Uncomment these to enable DNSSEC validation and caching: -# (Requires dnsmasq to be built with DNSSEC option.) -#conf-file=/usr/share/dnsmasq/trust-anchors.conf -#dnssec - -# Replies which are not DNSSEC signed may be legitimate, because the domain -# is unsigned, or may be forgeries. Setting this option tells dnsmasq to -# check that an unsigned reply is OK, by finding a secure proof that a DS -# record somewhere between the root and the domain does not exist. -# The cost of setting this is that even queries in unsigned domains will need -# one or more extra DNS queries to verify. -#dnssec-check-unsigned - -# Uncomment this to filter useless windows-originated DNS requests -# which can trigger dial-on-demand links needlessly. -# Note that (amongst other things) this blocks all SRV requests, -# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk. -# This option only affects forwarding, SRV records originating for -# dnsmasq (via srv-host= lines) are not suppressed by it. -#filterwin2k - -# Change this line if you want dns to get its upstream servers from -# somewhere other that /etc/resolv.conf -#resolv-file= - -# By default, dnsmasq will send queries to any of the upstream -# servers it knows about and tries to favour servers to are known -# to be up. Uncommenting this forces dnsmasq to try each query -# with each server strictly in the order they appear in -# /etc/resolv.conf -#strict-order - -# If you don't want dnsmasq to read /etc/resolv.conf or any other -# file, getting its servers from this file instead (see below), then -# uncomment this. -#no-resolv - -# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv -# files for changes and re-read them then uncomment this. -#no-poll - -# Add other name servers here, with domain specs if they are for -# non-public domains. -#server=/localnet/192.168.0.1 - -# Example of routing PTR queries to nameservers: this will send all -# address->name queries for 192.168.3/24 to nameserver 10.1.2.3 -#server=/3.168.192.in-addr.arpa/10.1.2.3 - -# Add local-only domains here, queries in these domains are answered -# from /etc/hosts or DHCP only. -#local=/localnet/ - -# Add domains which you want to force to an IP address here. -# The example below send any host in double-click.net to a local -# web-server. -#address=/double-click.net/127.0.0.1 - -# --address (and --server) work with IPv6 addresses too. -#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83 - -# Add the IPs of all queries to yahoo.com, google.com, and their -# subdomains to the vpn and search ipsets: -#ipset=/yahoo.com/google.com/vpn,search - -# You can control how dnsmasq talks to a server: this forces -# queries to 10.1.2.3 to be routed via eth1 -# server=10.1.2.3@eth1 - -# and this sets the source (ie local) address used to talk to -# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that -# IP on the machine, obviously). -# server=10.1.2.3@192.168.1.1#55 - -# If you want dnsmasq to change uid and gid to something other -# than the default, edit the following lines. -user=dnsmasq -group=dnsmasq - -# If you want dnsmasq to listen for DHCP and DNS requests only on -# specified interfaces (and the loopback) give the name of the -# interface (eg eth0) here. -# Repeat the line for more than one interface. -#interface= -# Listen only on localhost by default -interface=lo -# Or you can specify which interface _not_ to listen on -#except-interface= -# Or which to listen on by address (remember to include 127.0.0.1 if -# you use this.) -#listen-address= -# If you want dnsmasq to provide only DNS service on an interface, -# configure it as shown above, and then use the following line to -# disable DHCP and TFTP on it. -#no-dhcp-interface= - -# Serve DNS and DHCP only to networks directly connected to this machine. -# Any interface= line will override it. -#local-service - -# On systems which support it, dnsmasq binds the wildcard address, -# even when it is listening on only some interfaces. It then discards -# requests that it shouldn't reply to. This has the advantage of -# working even when interfaces come and go and change address. If you -# want dnsmasq to really bind only the interfaces it is listening on, -# uncomment this option. About the only time you may need this is when -# running another nameserver on the same machine. -# -# To listen only on localhost and do not receive packets on other -# interfaces, bind only to lo device. Comment out to bind on single -# wildcard socket. -bind-interfaces - -# If you don't want dnsmasq to read /etc/hosts, uncomment the -# following line. -#no-hosts -# or if you want it to read another file, as well as /etc/hosts, use -# this. -#addn-hosts=/etc/banner_add_hosts - -# Set this (and domain: see below) if you want to have a domain -# automatically added to simple names in a hosts-file. -#expand-hosts - -# Set the domain for dnsmasq. this is optional, but if it is set, it -# does the following things. -# 1) Allows DHCP hosts to have fully qualified domain names, as long -# as the domain part matches this setting. -# 2) Sets the "domain" DHCP option thereby potentially setting the -# domain of all systems configured by DHCP -# 3) Provides the domain part for "expand-hosts" -#domain=thekelleys.org.uk - -# Set a different domain for a particular subnet -#domain=wireless.thekelleys.org.uk,192.168.2.0/24 - -# Same idea, but range rather then subnet -#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200 - -# Uncomment this to enable the integrated DHCP server, you need -# to supply the range of addresses available for lease and optionally -# a lease time. If you have more than one network, you will need to -# repeat this for each network on which you want to supply DHCP -# service. -#dhcp-range=192.168.0.50,192.168.0.150,12h - -# This is an example of a DHCP range where the netmask is given. This -# is needed for networks we reach the dnsmasq DHCP server via a relay -# agent. If you don't know what a DHCP relay agent is, you probably -# don't need to worry about this. -#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h - -# This is an example of a DHCP range which sets a tag, so that -# some DHCP options may be set only for this network. -#dhcp-range=set:red,192.168.0.50,192.168.0.150 - -# Use this DHCP range only when the tag "green" is set. -#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h - -# Specify a subnet which can't be used for dynamic address allocation, -# is available for hosts with matching --dhcp-host lines. Note that -# dhcp-host declarations will be ignored unless there is a dhcp-range -# of some type for the subnet in question. -# In this case the netmask is implied (it comes from the network -# configuration on the machine running dnsmasq) it is possible to give -# an explicit netmask instead. -#dhcp-range=192.168.0.0,static - -# Enable DHCPv6. Note that the prefix-length does not need to be specified -# and defaults to 64 if missing/ -#dhcp-range=1234::2, 1234::500, 64, 12h - -# Do Router Advertisements, BUT NOT DHCP for this subnet. -#dhcp-range=1234::, ra-only - -# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and -# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack -# hosts. Use the DHCPv4 lease to derive the name, network segment and -# MAC address and assume that the host will also have an -# IPv6 address calculated using the SLAAC algorithm. -#dhcp-range=1234::, ra-names - -# Do Router Advertisements, BUT NOT DHCP for this subnet. -# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.) -#dhcp-range=1234::, ra-only, 48h - -# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA -# so that clients can use SLAAC addresses as well as DHCP ones. -#dhcp-range=1234::2, 1234::500, slaac - -# Do Router Advertisements and stateless DHCP for this subnet. Clients will -# not get addresses from DHCP, but they will get other configuration information. -# They will use SLAAC for addresses. -#dhcp-range=1234::, ra-stateless - -# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses -# from DHCPv4 leases. -#dhcp-range=1234::, ra-stateless, ra-names - -# Do router advertisements for all subnets where we're doing DHCPv6 -# Unless overridden by ra-stateless, ra-names, et al, the router -# advertisements will have the M and O bits set, so that the clients -# get addresses and configuration from DHCPv6, and the A bit reset, so the -# clients don't use SLAAC addresses. -#enable-ra - -# Supply parameters for specified hosts using DHCP. There are lots -# of valid alternatives, so we will give examples of each. Note that -# IP addresses DO NOT have to be in the range given above, they just -# need to be on the same network. The order of the parameters in these -# do not matter, it's permissible to give name, address and MAC in any -# order. - -# Always allocate the host with Ethernet address 11:22:33:44:55:66 -# The IP address 192.168.0.60 -#dhcp-host=11:22:33:44:55:66,192.168.0.60 - -# Always set the name of the host with hardware address -# 11:22:33:44:55:66 to be "fred" -#dhcp-host=11:22:33:44:55:66,fred - -# Always give the host with Ethernet address 11:22:33:44:55:66 -# the name fred and IP address 192.168.0.60 and lease time 45 minutes -#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m - -# Give a host with Ethernet address 11:22:33:44:55:66 or -# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume -# that these two Ethernet interfaces will never be in use at the same -# time, and give the IP address to the second, even if it is already -# in use by the first. Useful for laptops with wired and wireless -# addresses. -#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60 - -# Give the machine which says its name is "bert" IP address -# 192.168.0.70 and an infinite lease -#dhcp-host=bert,192.168.0.70,infinite - -# Always give the host with client identifier 01:02:02:04 -# the IP address 192.168.0.60 -#dhcp-host=id:01:02:02:04,192.168.0.60 - -# Always give the InfiniBand interface with hardware address -# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the -# ip address 192.168.0.61. The client id is derived from the prefix -# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of -# hex digits of the hardware address. -#dhcp-host=id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61 - -# Always give the host with client identifier "marjorie" -# the IP address 192.168.0.60 -#dhcp-host=id:marjorie,192.168.0.60 - -# Enable the address given for "judge" in /etc/hosts -# to be given to a machine presenting the name "judge" when -# it asks for a DHCP lease. -#dhcp-host=judge - -# Never offer DHCP service to a machine whose Ethernet -# address is 11:22:33:44:55:66 -#dhcp-host=11:22:33:44:55:66,ignore - -# Ignore any client-id presented by the machine with Ethernet -# address 11:22:33:44:55:66. This is useful to prevent a machine -# being treated differently when running under different OS's or -# between PXE boot and OS boot. -#dhcp-host=11:22:33:44:55:66,id:* - -# Send extra options which are tagged as "red" to -# the machine with Ethernet address 11:22:33:44:55:66 -#dhcp-host=11:22:33:44:55:66,set:red - -# Send extra options which are tagged as "red" to -# any machine with Ethernet address starting 11:22:33: -#dhcp-host=11:22:33:*:*:*,set:red - -# Give a fixed IPv6 address and name to client with -# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2 -# Note the MAC addresses CANNOT be used to identify DHCPv6 clients. -# Note also that the [] around the IPv6 address are obligatory. -#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5] - -# Ignore any clients which are not specified in dhcp-host lines -# or /etc/ethers. Equivalent to ISC "deny unknown-clients". -# This relies on the special "known" tag which is set when -# a host is matched. -#dhcp-ignore=tag:!known - -# Send extra options which are tagged as "red" to any machine whose -# DHCP vendorclass string includes the substring "Linux" -#dhcp-vendorclass=set:red,Linux - -# Send extra options which are tagged as "red" to any machine one -# of whose DHCP userclass strings includes the substring "accounts" -#dhcp-userclass=set:red,accounts - -# Send extra options which are tagged as "red" to any machine whose -# MAC address matches the pattern. -#dhcp-mac=set:red,00:60:8C:*:*:* - -# If this line is uncommented, dnsmasq will read /etc/ethers and act -# on the ethernet-address/IP pairs found there just as if they had -# been given as --dhcp-host options. Useful if you keep -# MAC-address/host mappings there for other purposes. -#read-ethers - -# Send options to hosts which ask for a DHCP lease. -# See RFC 2132 for details of available options. -# Common options can be given to dnsmasq by name: -# run "dnsmasq --help dhcp" to get a list. -# Note that all the common settings, such as netmask and -# broadcast address, DNS server and default route, are given -# sane defaults by dnsmasq. You very likely will not need -# any dhcp-options. If you use Windows clients and Samba, there -# are some options which are recommended, they are detailed at the -# end of this section. - -# Override the default route supplied by dnsmasq, which assumes the -# router is the same machine as the one running dnsmasq. -#dhcp-option=3,1.2.3.4 - -# Do the same thing, but using the option name -#dhcp-option=option:router,1.2.3.4 - -# Override the default route supplied by dnsmasq and send no default -# route at all. Note that this only works for the options sent by -# default (1, 3, 6, 12, 28) the same line will send a zero-length option -# for all other option numbers. -#dhcp-option=3 - -# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5 -#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5 - -# Send DHCPv6 option. Note [] around IPv6 addresses. -#dhcp-option=option6:dns-server,[1234::77],[1234::88] - -# Send DHCPv6 option for namservers as the machine running -# dnsmasq and another. -#dhcp-option=option6:dns-server,[::],[1234::88] - -# Ask client to poll for option changes every six hours. (RFC4242) -#dhcp-option=option6:information-refresh-time,6h - -# Set option 58 client renewal time (T1). Defaults to half of the -# lease time if not specified. (RFC2132) -#dhcp-option=option:T1,1m - -# Set option 59 rebinding time (T2). Defaults to 7/8 of the -# lease time if not specified. (RFC2132) -#dhcp-option=option:T2,2m - -# Set the NTP time server address to be the same machine as -# is running dnsmasq -#dhcp-option=42,0.0.0.0 - -# Set the NIS domain name to "welly" -#dhcp-option=40,welly - -# Set the default time-to-live to 50 -#dhcp-option=23,50 - -# Set the "all subnets are local" flag -#dhcp-option=27,1 - -# Send the etherboot magic flag and then etherboot options (a string). -#dhcp-option=128,e4:45:74:68:00:00 -#dhcp-option=129,NIC=eepro100 - -# Specify an option which will only be sent to the "red" network -# (see dhcp-range for the declaration of the "red" network) -# Note that the tag: part must precede the option: part. -#dhcp-option = tag:red, option:ntp-server, 192.168.1.1 - -# The following DHCP options set up dnsmasq in the same way as is specified -# for the ISC dhcpcd in -# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt -# adapted for a typical dnsmasq installation where the host running -# dnsmasq is also the host running samba. -# you may want to uncomment some or all of them if you use -# Windows clients and Samba. -#dhcp-option=19,0 # option ip-forwarding off -#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s) -#dhcp-option=45,0.0.0.0 # netbios datagram distribution server -#dhcp-option=46,8 # netbios node type - -# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave. -#dhcp-option=252,"\n" - -# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client -# probably doesn't support this...... -#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com - -# Send RFC-3442 classless static routes (note the netmask encoding) -#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8 - -# Send vendor-class specific options encapsulated in DHCP option 43. -# The meaning of the options is defined by the vendor-class so -# options are sent only when the client supplied vendor class -# matches the class given here. (A substring match is OK, so "MSFT" -# matches "MSFT" and "MSFT 5.0"). This example sets the -# mtftp address to 0.0.0.0 for PXEClients. -#dhcp-option=vendor:PXEClient,1,0.0.0.0 - -# Send microsoft-specific option to tell windows to release the DHCP lease -# when it shuts down. Note the "i" flag, to tell dnsmasq to send the -# value as a four-byte integer - that's what microsoft wants. See -# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true -#dhcp-option=vendor:MSFT,2,1i - -# Send the Encapsulated-vendor-class ID needed by some configurations of -# Etherboot to allow is to recognise the DHCP server. -#dhcp-option=vendor:Etherboot,60,"Etherboot" - -# Send options to PXELinux. Note that we need to send the options even -# though they don't appear in the parameter request list, so we need -# to use dhcp-option-force here. -# See http://syslinux.zytor.com/pxe.php#special for details. -# Magic number - needed before anything else is recognised -#dhcp-option-force=208,f1:00:74:7e -# Configuration file name -#dhcp-option-force=209,configs/common -# Path prefix -#dhcp-option-force=210,/tftpboot/pxelinux/files/ -# Reboot time. (Note 'i' to send 32-bit value) -#dhcp-option-force=211,30i - -# Set the boot filename for netboot/PXE. You will only need -# this if you want to boot machines over the network and you will need -# a TFTP server; either dnsmasq's built-in TFTP server or an -# external one. (See below for how to enable the TFTP server.) -#dhcp-boot=pxelinux.0 - -# The same as above, but use custom tftp-server instead machine running dnsmasq -#dhcp-boot=pxelinux,server.name,192.168.1.100 - -# Boot for iPXE. The idea is to send two different -# filenames, the first loads iPXE, and the second tells iPXE what to -# load. The dhcp-match sets the ipxe tag for requests from iPXE. -#dhcp-boot=undionly.kpxe -#dhcp-match=set:ipxe,175 # iPXE sends a 175 option. -#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php - -# Encapsulated options for iPXE. All the options are -# encapsulated within option 175 -#dhcp-option=encap:175, 1, 5b # priority code -#dhcp-option=encap:175, 176, 1b # no-proxydhcp -#dhcp-option=encap:175, 177, string # bus-id -#dhcp-option=encap:175, 189, 1b # BIOS drive code -#dhcp-option=encap:175, 190, user # iSCSI username -#dhcp-option=encap:175, 191, pass # iSCSI password - -# Test for the architecture of a netboot client. PXE clients are -# supposed to send their architecture as option 93. (See RFC 4578) -#dhcp-match=peecees, option:client-arch, 0 #x86-32 -#dhcp-match=itanics, option:client-arch, 2 #IA64 -#dhcp-match=hammers, option:client-arch, 6 #x86-64 -#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64 - -# Do real PXE, rather than just booting a single file, this is an -# alternative to dhcp-boot. -#pxe-prompt="What system shall I netboot?" -# or with timeout before first available action is taken: -#pxe-prompt="Press F8 for menu.", 60 - -# Available boot services. for PXE. -#pxe-service=x86PC, "Boot from local disk" - -# Loads /pxelinux.0 from dnsmasq TFTP server. -#pxe-service=x86PC, "Install Linux", pxelinux - -# Loads /pxelinux.0 from TFTP server at 1.2.3.4. -# Beware this fails on old PXE ROMS. -#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4 - -# Use bootserver on network, found my multicast or broadcast. -#pxe-service=x86PC, "Install windows from RIS server", 1 - -# Use bootserver at a known IP address. -#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4 - -# If you have multicast-FTP available, -# information for that can be passed in a similar way using options 1 -# to 5. See page 19 of -# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf - - -# Enable dnsmasq's built-in TFTP server -#enable-tftp - -# Set the root directory for files available via FTP. -#tftp-root=/var/ftpd - -# Do not abort if the tftp-root is unavailable -#tftp-no-fail - -# Make the TFTP server more secure: with this set, only files owned by -# the user dnsmasq is running as will be send over the net. -#tftp-secure - -# This option stops dnsmasq from negotiating a larger blocksize for TFTP -# transfers. It will slow things down, but may rescue some broken TFTP -# clients. -#tftp-no-blocksize - -# Set the boot file name only when the "red" tag is set. -#dhcp-boot=tag:red,pxelinux.red-net - -# An example of dhcp-boot with an external TFTP server: the name and IP -# address of the server are given after the filename. -# Can fail with old PXE ROMS. Overridden by --pxe-service. -#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3 - -# If there are multiple external tftp servers having a same name -# (using /etc/hosts) then that name can be specified as the -# tftp_servername (the third option to dhcp-boot) and in that -# case dnsmasq resolves this name and returns the resultant IP -# addresses in round robin fashion. This facility can be used to -# load balance the tftp load among a set of servers. -#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name - -# Set the limit on DHCP leases, the default is 150 -#dhcp-lease-max=150 - -# The DHCP server needs somewhere on disk to keep its lease database. -# This defaults to a sane location, but if you want to change it, use -# the line below. -#dhcp-leasefile=/var/lib/dnsmasq/dnsmasq.leases - -# Set the DHCP server to authoritative mode. In this mode it will barge in -# and take over the lease for any client which broadcasts on the network, -# whether it has a record of the lease or not. This avoids long timeouts -# when a machine wakes up on a new network. DO NOT enable this if there's -# the slightest chance that you might end up accidentally configuring a DHCP -# server for your campus/company accidentally. The ISC server uses -# the same option, and this URL provides more information: -# http://www.isc.org/files/auth.html -#dhcp-authoritative - -# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039. -# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit -# option with a DHCPACK including a Rapid Commit option and fully committed address -# and configuration information. This must only be enabled if either the server is -# the only server for the subnet, or multiple servers are present and they each -# commit a binding for all clients. -#dhcp-rapid-commit - -# Run an executable when a DHCP lease is created or destroyed. -# The arguments sent to the script are "add" or "del", -# then the MAC address, the IP address and finally the hostname -# if there is one. -#dhcp-script=/bin/echo - -# Set the cachesize here. -#cache-size=150 - -# If you want to disable negative caching, uncomment this. -#no-negcache - -# Normally responses which come from /etc/hosts and the DHCP lease -# file have Time-To-Live set as zero, which conventionally means -# do not cache further. If you are happy to trade lower load on the -# server for potentially stale date, you can set a time-to-live (in -# seconds) here. -#local-ttl= - -# If you want dnsmasq to detect attempts by Verisign to send queries -# to unregistered .com and .net hosts to its sitefinder service and -# have dnsmasq instead return the correct NXDOMAIN response, uncomment -# this line. You can add similar lines to do the same for other -# registries which have implemented wildcard A records. -#bogus-nxdomain=64.94.110.11 - -# If you want to fix up DNS results from upstream servers, use the -# alias option. This only works for IPv4. -# This alias makes a result of 1.2.3.4 appear as 5.6.7.8 -#alias=1.2.3.4,5.6.7.8 -# and this maps 1.2.3.x to 5.6.7.x -#alias=1.2.3.0,5.6.7.0,255.255.255.0 -# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40 -#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0 - -# Change these lines if you want dnsmasq to serve MX records. - -# Return an MX record named "maildomain.com" with target -# servermachine.com and preference 50 -#mx-host=maildomain.com,servermachine.com,50 - -# Set the default target for MX records created using the localmx option. -#mx-target=servermachine.com - -# Return an MX record pointing to the mx-target for all local -# machines. -#localmx - -# Return an MX record pointing to itself for all local machines. -#selfmx - -# Change the following lines if you want dnsmasq to serve SRV -# records. These are useful if you want to serve ldap requests for -# Active Directory and other windows-originated DNS requests. -# See RFC 2782. -# You may add multiple srv-host lines. -# The fields are ,,,, -# If the domain part if missing from the name (so that is just has the -# service and protocol sections) then the domain given by the domain= -# config option is used. (Note that expand-hosts does not need to be -# set for this to work.) - -# A SRV record sending LDAP for the example.com domain to -# ldapserver.example.com port 389 -#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389 - -# A SRV record sending LDAP for the example.com domain to -# ldapserver.example.com port 389 (using domain=) -#domain=example.com -#srv-host=_ldap._tcp,ldapserver.example.com,389 - -# Two SRV records for LDAP, each with different priorities -#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1 -#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2 - -# A SRV record indicating that there is no LDAP server for the domain -# example.com -#srv-host=_ldap._tcp.example.com - -# The following line shows how to make dnsmasq serve an arbitrary PTR -# record. This is useful for DNS-SD. (Note that the -# domain-name expansion done for SRV records _does_not -# occur for PTR records.) -#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services" - -# Change the following lines to enable dnsmasq to serve TXT records. -# These are used for things like SPF and zeroconf. (Note that the -# domain-name expansion done for SRV records _does_not -# occur for TXT records.) - -#Example SPF. -#txt-record=example.com,"v=spf1 a -all" - -#Example zeroconf -#txt-record=_http._tcp.example.com,name=value,paper=A4 - -# Provide an alias for a "local" DNS name. Note that this _only_ works -# for targets which are names from DHCP or /etc/hosts. Give host -# "bert" another name, bertrand -#cname=bertand,bert - -# For debugging purposes, log each DNS query as it passes through -# dnsmasq. -#log-queries - -# Log lots of extra information about DHCP transactions. -#log-dhcp - -# Include another lot of configuration options. -#conf-file=/etc/dnsmasq.more.conf -#conf-dir=/etc/dnsmasq.d - -# Include all the files in a directory except those ending in .bak -#conf-dir=/etc/dnsmasq.d,.bak - -# Include all files in a directory which end in .conf -#conf-dir=/etc/dnsmasq.d/,*.conf - -# Include all files in /etc/dnsmasq.d except RPM backup files -conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig - -# If a DHCP client claims that its name is "wpad", ignore that. -# This fixes a security hole. see CERT Vulnerability VU#598349 -#dhcp-name-match=set:wpad-ignore,wpad -#dhcp-ignore-names=tag:wpad-ignore - diff --git a/files/osbs/fedora-dnsmasq-master.conf.staging b/files/osbs/fedora-dnsmasq-master.conf.staging deleted file mode 100644 index 36fba33685..0000000000 --- a/files/osbs/fedora-dnsmasq-master.conf.staging +++ /dev/null @@ -1,693 +0,0 @@ -# Configuration file for dnsmasq. -# -# Format is one option per line, legal options are the same -# as the long options legal on the command line. See -# "/usr/sbin/dnsmasq --help" or "man 8 dnsmasq" for details. - -# Listen on this specific port instead of the standard DNS port -# (53). Setting this to zero completely disables DNS function, -# leaving only DHCP and/or TFTP. -#port=5353 - -# The following two options make you a better netizen, since they -# tell dnsmasq to filter out queries which the public DNS cannot -# answer, and which load the servers (especially the root servers) -# unnecessarily. If you have a dial-on-demand link they also stop -# these requests from bringing up the link unnecessarily. - -# Never forward plain names (without a dot or domain part) -#domain-needed -# Never forward addresses in the non-routed address spaces. -#bogus-priv - -# Uncomment these to enable DNSSEC validation and caching: -# (Requires dnsmasq to be built with DNSSEC option.) -#conf-file=/usr/share/dnsmasq/trust-anchors.conf -#dnssec - -# Replies which are not DNSSEC signed may be legitimate, because the domain -# is unsigned, or may be forgeries. Setting this option tells dnsmasq to -# check that an unsigned reply is OK, by finding a secure proof that a DS -# record somewhere between the root and the domain does not exist. -# The cost of setting this is that even queries in unsigned domains will need -# one or more extra DNS queries to verify. -#dnssec-check-unsigned - -# Uncomment this to filter useless windows-originated DNS requests -# which can trigger dial-on-demand links needlessly. -# Note that (amongst other things) this blocks all SRV requests, -# so don't use it if you use eg Kerberos, SIP, XMMP or Google-talk. -# This option only affects forwarding, SRV records originating for -# dnsmasq (via srv-host= lines) are not suppressed by it. -#filterwin2k - -# Change this line if you want dns to get its upstream servers from -# somewhere other that /etc/resolv.conf -#resolv-file= - -# By default, dnsmasq will send queries to any of the upstream -# servers it knows about and tries to favour servers to are known -# to be up. Uncommenting this forces dnsmasq to try each query -# with each server strictly in the order they appear in -# /etc/resolv.conf -#strict-order - -# If you don't want dnsmasq to read /etc/resolv.conf or any other -# file, getting its servers from this file instead (see below), then -# uncomment this. -#no-resolv - -# If you don't want dnsmasq to poll /etc/resolv.conf or other resolv -# files for changes and re-read them then uncomment this. -#no-poll - -# Add other name servers here, with domain specs if they are for -# non-public domains. -#server=/localnet/192.168.0.1 - -# Example of routing PTR queries to nameservers: this will send all -# address->name queries for 192.168.3/24 to nameserver 10.1.2.3 -#server=/3.168.192.in-addr.arpa/10.1.2.3 - -# Add local-only domains here, queries in these domains are answered -# from /etc/hosts or DHCP only. -#local=/localnet/ - -# Add domains which you want to force to an IP address here. -# The example below send any host in double-click.net to a local -# web-server. -#address=/double-click.net/127.0.0.1 - -# --address (and --server) work with IPv6 addresses too. -#address=/www.thekelleys.org.uk/fe80::20d:60ff:fe36:f83 - -# Add the IPs of all queries to yahoo.com, google.com, and their -# subdomains to the vpn and search ipsets: -#ipset=/yahoo.com/google.com/vpn,search - -# You can control how dnsmasq talks to a server: this forces -# queries to 10.1.2.3 to be routed via eth1 -# server=10.1.2.3@eth1 - -# and this sets the source (ie local) address used to talk to -# 10.1.2.3 to 192.168.1.1 port 55 (there must be an interface with that -# IP on the machine, obviously). -# server=10.1.2.3@192.168.1.1#55 - -# If you want dnsmasq to change uid and gid to something other -# than the default, edit the following lines. -user=dnsmasq -group=dnsmasq - -# If you want dnsmasq to listen for DHCP and DNS requests only on -# specified interfaces (and the loopback) give the name of the -# interface (eg eth0) here. -# Repeat the line for more than one interface. -#interface= -# Listen only on localhost by default -interface=lo -# Or you can specify which interface _not_ to listen on -#except-interface= -# Or which to listen on by address (remember to include 127.0.0.1 if -# you use this.) -#listen-address= -# If you want dnsmasq to provide only DNS service on an interface, -# configure it as shown above, and then use the following line to -# disable DHCP and TFTP on it. -#no-dhcp-interface= - -# Serve DNS and DHCP only to networks directly connected to this machine. -# Any interface= line will override it. -#local-service - -# On systems which support it, dnsmasq binds the wildcard address, -# even when it is listening on only some interfaces. It then discards -# requests that it shouldn't reply to. This has the advantage of -# working even when interfaces come and go and change address. If you -# want dnsmasq to really bind only the interfaces it is listening on, -# uncomment this option. About the only time you may need this is when -# running another nameserver on the same machine. -# -# To listen only on localhost and do not receive packets on other -# interfaces, bind only to lo device. Comment out to bind on single -# wildcard socket. -bind-interfaces - -# If you don't want dnsmasq to read /etc/hosts, uncomment the -# following line. -#no-hosts -# or if you want it to read another file, as well as /etc/hosts, use -# this. -#addn-hosts=/etc/banner_add_hosts - -# Set this (and domain: see below) if you want to have a domain -# automatically added to simple names in a hosts-file. -#expand-hosts - -# Set the domain for dnsmasq. this is optional, but if it is set, it -# does the following things. -# 1) Allows DHCP hosts to have fully qualified domain names, as long -# as the domain part matches this setting. -# 2) Sets the "domain" DHCP option thereby potentially setting the -# domain of all systems configured by DHCP -# 3) Provides the domain part for "expand-hosts" -#domain=thekelleys.org.uk - -# Set a different domain for a particular subnet -#domain=wireless.thekelleys.org.uk,192.168.2.0/24 - -# Same idea, but range rather then subnet -#domain=reserved.thekelleys.org.uk,192.68.3.100,192.168.3.200 - -# Uncomment this to enable the integrated DHCP server, you need -# to supply the range of addresses available for lease and optionally -# a lease time. If you have more than one network, you will need to -# repeat this for each network on which you want to supply DHCP -# service. -#dhcp-range=192.168.0.50,192.168.0.150,12h - -# This is an example of a DHCP range where the netmask is given. This -# is needed for networks we reach the dnsmasq DHCP server via a relay -# agent. If you don't know what a DHCP relay agent is, you probably -# don't need to worry about this. -#dhcp-range=192.168.0.50,192.168.0.150,255.255.255.0,12h - -# This is an example of a DHCP range which sets a tag, so that -# some DHCP options may be set only for this network. -#dhcp-range=set:red,192.168.0.50,192.168.0.150 - -# Use this DHCP range only when the tag "green" is set. -#dhcp-range=tag:green,192.168.0.50,192.168.0.150,12h - -# Specify a subnet which can't be used for dynamic address allocation, -# is available for hosts with matching --dhcp-host lines. Note that -# dhcp-host declarations will be ignored unless there is a dhcp-range -# of some type for the subnet in question. -# In this case the netmask is implied (it comes from the network -# configuration on the machine running dnsmasq) it is possible to give -# an explicit netmask instead. -#dhcp-range=192.168.0.0,static - -# Enable DHCPv6. Note that the prefix-length does not need to be specified -# and defaults to 64 if missing/ -#dhcp-range=1234::2, 1234::500, 64, 12h - -# Do Router Advertisements, BUT NOT DHCP for this subnet. -#dhcp-range=1234::, ra-only - -# Do Router Advertisements, BUT NOT DHCP for this subnet, also try and -# add names to the DNS for the IPv6 address of SLAAC-configured dual-stack -# hosts. Use the DHCPv4 lease to derive the name, network segment and -# MAC address and assume that the host will also have an -# IPv6 address calculated using the SLAAC algorithm. -#dhcp-range=1234::, ra-names - -# Do Router Advertisements, BUT NOT DHCP for this subnet. -# Set the lifetime to 46 hours. (Note: minimum lifetime is 2 hours.) -#dhcp-range=1234::, ra-only, 48h - -# Do DHCP and Router Advertisements for this subnet. Set the A bit in the RA -# so that clients can use SLAAC addresses as well as DHCP ones. -#dhcp-range=1234::2, 1234::500, slaac - -# Do Router Advertisements and stateless DHCP for this subnet. Clients will -# not get addresses from DHCP, but they will get other configuration information. -# They will use SLAAC for addresses. -#dhcp-range=1234::, ra-stateless - -# Do stateless DHCP, SLAAC, and generate DNS names for SLAAC addresses -# from DHCPv4 leases. -#dhcp-range=1234::, ra-stateless, ra-names - -# Do router advertisements for all subnets where we're doing DHCPv6 -# Unless overridden by ra-stateless, ra-names, et al, the router -# advertisements will have the M and O bits set, so that the clients -# get addresses and configuration from DHCPv6, and the A bit reset, so the -# clients don't use SLAAC addresses. -#enable-ra - -# Supply parameters for specified hosts using DHCP. There are lots -# of valid alternatives, so we will give examples of each. Note that -# IP addresses DO NOT have to be in the range given above, they just -# need to be on the same network. The order of the parameters in these -# do not matter, it's permissible to give name, address and MAC in any -# order. - -# Always allocate the host with Ethernet address 11:22:33:44:55:66 -# The IP address 192.168.0.60 -#dhcp-host=11:22:33:44:55:66,192.168.0.60 - -# Always set the name of the host with hardware address -# 11:22:33:44:55:66 to be "fred" -#dhcp-host=11:22:33:44:55:66,fred - -# Always give the host with Ethernet address 11:22:33:44:55:66 -# the name fred and IP address 192.168.0.60 and lease time 45 minutes -#dhcp-host=11:22:33:44:55:66,fred,192.168.0.60,45m - -# Give a host with Ethernet address 11:22:33:44:55:66 or -# 12:34:56:78:90:12 the IP address 192.168.0.60. Dnsmasq will assume -# that these two Ethernet interfaces will never be in use at the same -# time, and give the IP address to the second, even if it is already -# in use by the first. Useful for laptops with wired and wireless -# addresses. -#dhcp-host=11:22:33:44:55:66,12:34:56:78:90:12,192.168.0.60 - -# Give the machine which says its name is "bert" IP address -# 192.168.0.70 and an infinite lease -#dhcp-host=bert,192.168.0.70,infinite - -# Always give the host with client identifier 01:02:02:04 -# the IP address 192.168.0.60 -#dhcp-host=id:01:02:02:04,192.168.0.60 - -# Always give the InfiniBand interface with hardware address -# 80:00:00:48:fe:80:00:00:00:00:00:00:f4:52:14:03:00:28:05:81 the -# ip address 192.168.0.61. The client id is derived from the prefix -# ff:00:00:00:00:00:02:00:00:02:c9:00 and the last 8 pairs of -# hex digits of the hardware address. -#dhcp-host=id:ff:00:00:00:00:00:02:00:00:02:c9:00:f4:52:14:03:00:28:05:81,192.168.0.61 - -# Always give the host with client identifier "marjorie" -# the IP address 192.168.0.60 -#dhcp-host=id:marjorie,192.168.0.60 - -# Enable the address given for "judge" in /etc/hosts -# to be given to a machine presenting the name "judge" when -# it asks for a DHCP lease. -#dhcp-host=judge - -# Never offer DHCP service to a machine whose Ethernet -# address is 11:22:33:44:55:66 -#dhcp-host=11:22:33:44:55:66,ignore - -# Ignore any client-id presented by the machine with Ethernet -# address 11:22:33:44:55:66. This is useful to prevent a machine -# being treated differently when running under different OS's or -# between PXE boot and OS boot. -#dhcp-host=11:22:33:44:55:66,id:* - -# Send extra options which are tagged as "red" to -# the machine with Ethernet address 11:22:33:44:55:66 -#dhcp-host=11:22:33:44:55:66,set:red - -# Send extra options which are tagged as "red" to -# any machine with Ethernet address starting 11:22:33: -#dhcp-host=11:22:33:*:*:*,set:red - -# Give a fixed IPv6 address and name to client with -# DUID 00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2 -# Note the MAC addresses CANNOT be used to identify DHCPv6 clients. -# Note also that the [] around the IPv6 address are obligatory. -#dhcp-host=id:00:01:00:01:16:d2:83:fc:92:d4:19:e2:d8:b2, fred, [1234::5] - -# Ignore any clients which are not specified in dhcp-host lines -# or /etc/ethers. Equivalent to ISC "deny unknown-clients". -# This relies on the special "known" tag which is set when -# a host is matched. -#dhcp-ignore=tag:!known - -# Send extra options which are tagged as "red" to any machine whose -# DHCP vendorclass string includes the substring "Linux" -#dhcp-vendorclass=set:red,Linux - -# Send extra options which are tagged as "red" to any machine one -# of whose DHCP userclass strings includes the substring "accounts" -#dhcp-userclass=set:red,accounts - -# Send extra options which are tagged as "red" to any machine whose -# MAC address matches the pattern. -#dhcp-mac=set:red,00:60:8C:*:*:* - -# If this line is uncommented, dnsmasq will read /etc/ethers and act -# on the ethernet-address/IP pairs found there just as if they had -# been given as --dhcp-host options. Useful if you keep -# MAC-address/host mappings there for other purposes. -#read-ethers - -# Send options to hosts which ask for a DHCP lease. -# See RFC 2132 for details of available options. -# Common options can be given to dnsmasq by name: -# run "dnsmasq --help dhcp" to get a list. -# Note that all the common settings, such as netmask and -# broadcast address, DNS server and default route, are given -# sane defaults by dnsmasq. You very likely will not need -# any dhcp-options. If you use Windows clients and Samba, there -# are some options which are recommended, they are detailed at the -# end of this section. - -# Override the default route supplied by dnsmasq, which assumes the -# router is the same machine as the one running dnsmasq. -#dhcp-option=3,1.2.3.4 - -# Do the same thing, but using the option name -#dhcp-option=option:router,1.2.3.4 - -# Override the default route supplied by dnsmasq and send no default -# route at all. Note that this only works for the options sent by -# default (1, 3, 6, 12, 28) the same line will send a zero-length option -# for all other option numbers. -#dhcp-option=3 - -# Set the NTP time server addresses to 192.168.0.4 and 10.10.0.5 -#dhcp-option=option:ntp-server,192.168.0.4,10.10.0.5 - -# Send DHCPv6 option. Note [] around IPv6 addresses. -#dhcp-option=option6:dns-server,[1234::77],[1234::88] - -# Send DHCPv6 option for namservers as the machine running -# dnsmasq and another. -#dhcp-option=option6:dns-server,[::],[1234::88] - -# Ask client to poll for option changes every six hours. (RFC4242) -#dhcp-option=option6:information-refresh-time,6h - -# Set option 58 client renewal time (T1). Defaults to half of the -# lease time if not specified. (RFC2132) -#dhcp-option=option:T1,1m - -# Set option 59 rebinding time (T2). Defaults to 7/8 of the -# lease time if not specified. (RFC2132) -#dhcp-option=option:T2,2m - -# Set the NTP time server address to be the same machine as -# is running dnsmasq -#dhcp-option=42,0.0.0.0 - -# Set the NIS domain name to "welly" -#dhcp-option=40,welly - -# Set the default time-to-live to 50 -#dhcp-option=23,50 - -# Set the "all subnets are local" flag -#dhcp-option=27,1 - -# Send the etherboot magic flag and then etherboot options (a string). -#dhcp-option=128,e4:45:74:68:00:00 -#dhcp-option=129,NIC=eepro100 - -# Specify an option which will only be sent to the "red" network -# (see dhcp-range for the declaration of the "red" network) -# Note that the tag: part must precede the option: part. -#dhcp-option = tag:red, option:ntp-server, 192.168.1.1 - -# The following DHCP options set up dnsmasq in the same way as is specified -# for the ISC dhcpcd in -# http://www.samba.org/samba/ftp/docs/textdocs/DHCP-Server-Configuration.txt -# adapted for a typical dnsmasq installation where the host running -# dnsmasq is also the host running samba. -# you may want to uncomment some or all of them if you use -# Windows clients and Samba. -#dhcp-option=19,0 # option ip-forwarding off -#dhcp-option=44,0.0.0.0 # set netbios-over-TCP/IP nameserver(s) aka WINS server(s) -#dhcp-option=45,0.0.0.0 # netbios datagram distribution server -#dhcp-option=46,8 # netbios node type - -# Send an empty WPAD option. This may be REQUIRED to get windows 7 to behave. -#dhcp-option=252,"\n" - -# Send RFC-3397 DNS domain search DHCP option. WARNING: Your DHCP client -# probably doesn't support this...... -#dhcp-option=option:domain-search,eng.apple.com,marketing.apple.com - -# Send RFC-3442 classless static routes (note the netmask encoding) -#dhcp-option=121,192.168.1.0/24,1.2.3.4,10.0.0.0/8,5.6.7.8 - -# Send vendor-class specific options encapsulated in DHCP option 43. -# The meaning of the options is defined by the vendor-class so -# options are sent only when the client supplied vendor class -# matches the class given here. (A substring match is OK, so "MSFT" -# matches "MSFT" and "MSFT 5.0"). This example sets the -# mtftp address to 0.0.0.0 for PXEClients. -#dhcp-option=vendor:PXEClient,1,0.0.0.0 - -# Send microsoft-specific option to tell windows to release the DHCP lease -# when it shuts down. Note the "i" flag, to tell dnsmasq to send the -# value as a four-byte integer - that's what microsoft wants. See -# http://technet2.microsoft.com/WindowsServer/en/library/a70f1bb7-d2d4-49f0-96d6-4b7414ecfaae1033.mspx?mfr=true -#dhcp-option=vendor:MSFT,2,1i - -# Send the Encapsulated-vendor-class ID needed by some configurations of -# Etherboot to allow is to recognise the DHCP server. -#dhcp-option=vendor:Etherboot,60,"Etherboot" - -# Send options to PXELinux. Note that we need to send the options even -# though they don't appear in the parameter request list, so we need -# to use dhcp-option-force here. -# See http://syslinux.zytor.com/pxe.php#special for details. -# Magic number - needed before anything else is recognised -#dhcp-option-force=208,f1:00:74:7e -# Configuration file name -#dhcp-option-force=209,configs/common -# Path prefix -#dhcp-option-force=210,/tftpboot/pxelinux/files/ -# Reboot time. (Note 'i' to send 32-bit value) -#dhcp-option-force=211,30i - -# Set the boot filename for netboot/PXE. You will only need -# this if you want to boot machines over the network and you will need -# a TFTP server; either dnsmasq's built-in TFTP server or an -# external one. (See below for how to enable the TFTP server.) -#dhcp-boot=pxelinux.0 - -# The same as above, but use custom tftp-server instead machine running dnsmasq -#dhcp-boot=pxelinux,server.name,192.168.1.100 - -# Boot for iPXE. The idea is to send two different -# filenames, the first loads iPXE, and the second tells iPXE what to -# load. The dhcp-match sets the ipxe tag for requests from iPXE. -#dhcp-boot=undionly.kpxe -#dhcp-match=set:ipxe,175 # iPXE sends a 175 option. -#dhcp-boot=tag:ipxe,http://boot.ipxe.org/demo/boot.php - -# Encapsulated options for iPXE. All the options are -# encapsulated within option 175 -#dhcp-option=encap:175, 1, 5b # priority code -#dhcp-option=encap:175, 176, 1b # no-proxydhcp -#dhcp-option=encap:175, 177, string # bus-id -#dhcp-option=encap:175, 189, 1b # BIOS drive code -#dhcp-option=encap:175, 190, user # iSCSI username -#dhcp-option=encap:175, 191, pass # iSCSI password - -# Test for the architecture of a netboot client. PXE clients are -# supposed to send their architecture as option 93. (See RFC 4578) -#dhcp-match=peecees, option:client-arch, 0 #x86-32 -#dhcp-match=itanics, option:client-arch, 2 #IA64 -#dhcp-match=hammers, option:client-arch, 6 #x86-64 -#dhcp-match=mactels, option:client-arch, 7 #EFI x86-64 - -# Do real PXE, rather than just booting a single file, this is an -# alternative to dhcp-boot. -#pxe-prompt="What system shall I netboot?" -# or with timeout before first available action is taken: -#pxe-prompt="Press F8 for menu.", 60 - -# Available boot services. for PXE. -#pxe-service=x86PC, "Boot from local disk" - -# Loads /pxelinux.0 from dnsmasq TFTP server. -#pxe-service=x86PC, "Install Linux", pxelinux - -# Loads /pxelinux.0 from TFTP server at 1.2.3.4. -# Beware this fails on old PXE ROMS. -#pxe-service=x86PC, "Install Linux", pxelinux, 1.2.3.4 - -# Use bootserver on network, found my multicast or broadcast. -#pxe-service=x86PC, "Install windows from RIS server", 1 - -# Use bootserver at a known IP address. -#pxe-service=x86PC, "Install windows from RIS server", 1, 1.2.3.4 - -# If you have multicast-FTP available, -# information for that can be passed in a similar way using options 1 -# to 5. See page 19 of -# http://download.intel.com/design/archives/wfm/downloads/pxespec.pdf - - -# Enable dnsmasq's built-in TFTP server -#enable-tftp - -# Set the root directory for files available via FTP. -#tftp-root=/var/ftpd - -# Do not abort if the tftp-root is unavailable -#tftp-no-fail - -# Make the TFTP server more secure: with this set, only files owned by -# the user dnsmasq is running as will be send over the net. -#tftp-secure - -# This option stops dnsmasq from negotiating a larger blocksize for TFTP -# transfers. It will slow things down, but may rescue some broken TFTP -# clients. -#tftp-no-blocksize - -# Set the boot file name only when the "red" tag is set. -#dhcp-boot=tag:red,pxelinux.red-net - -# An example of dhcp-boot with an external TFTP server: the name and IP -# address of the server are given after the filename. -# Can fail with old PXE ROMS. Overridden by --pxe-service. -#dhcp-boot=/var/ftpd/pxelinux.0,boothost,192.168.0.3 - -# If there are multiple external tftp servers having a same name -# (using /etc/hosts) then that name can be specified as the -# tftp_servername (the third option to dhcp-boot) and in that -# case dnsmasq resolves this name and returns the resultant IP -# addresses in round robin fashion. This facility can be used to -# load balance the tftp load among a set of servers. -#dhcp-boot=/var/ftpd/pxelinux.0,boothost,tftp_server_name - -# Set the limit on DHCP leases, the default is 150 -#dhcp-lease-max=150 - -# The DHCP server needs somewhere on disk to keep its lease database. -# This defaults to a sane location, but if you want to change it, use -# the line below. -#dhcp-leasefile=/var/lib/dnsmasq/dnsmasq.leases - -# Set the DHCP server to authoritative mode. In this mode it will barge in -# and take over the lease for any client which broadcasts on the network, -# whether it has a record of the lease or not. This avoids long timeouts -# when a machine wakes up on a new network. DO NOT enable this if there's -# the slightest chance that you might end up accidentally configuring a DHCP -# server for your campus/company accidentally. The ISC server uses -# the same option, and this URL provides more information: -# http://www.isc.org/files/auth.html -#dhcp-authoritative - -# Set the DHCP server to enable DHCPv4 Rapid Commit Option per RFC 4039. -# In this mode it will respond to a DHCPDISCOVER message including a Rapid Commit -# option with a DHCPACK including a Rapid Commit option and fully committed address -# and configuration information. This must only be enabled if either the server is -# the only server for the subnet, or multiple servers are present and they each -# commit a binding for all clients. -#dhcp-rapid-commit - -# Run an executable when a DHCP lease is created or destroyed. -# The arguments sent to the script are "add" or "del", -# then the MAC address, the IP address and finally the hostname -# if there is one. -#dhcp-script=/bin/echo - -# Set the cachesize here. -#cache-size=150 - -# If you want to disable negative caching, uncomment this. -#no-negcache - -# Normally responses which come from /etc/hosts and the DHCP lease -# file have Time-To-Live set as zero, which conventionally means -# do not cache further. If you are happy to trade lower load on the -# server for potentially stale date, you can set a time-to-live (in -# seconds) here. -#local-ttl= - -# If you want dnsmasq to detect attempts by Verisign to send queries -# to unregistered .com and .net hosts to its sitefinder service and -# have dnsmasq instead return the correct NXDOMAIN response, uncomment -# this line. You can add similar lines to do the same for other -# registries which have implemented wildcard A records. -#bogus-nxdomain=64.94.110.11 - -# If you want to fix up DNS results from upstream servers, use the -# alias option. This only works for IPv4. -# This alias makes a result of 1.2.3.4 appear as 5.6.7.8 -#alias=1.2.3.4,5.6.7.8 -# and this maps 1.2.3.x to 5.6.7.x -#alias=1.2.3.0,5.6.7.0,255.255.255.0 -# and this maps 192.168.0.10->192.168.0.40 to 10.0.0.10->10.0.0.40 -#alias=192.168.0.10-192.168.0.40,10.0.0.0,255.255.255.0 - -# Change these lines if you want dnsmasq to serve MX records. - -# Return an MX record named "maildomain.com" with target -# servermachine.com and preference 50 -#mx-host=maildomain.com,servermachine.com,50 - -# Set the default target for MX records created using the localmx option. -#mx-target=servermachine.com - -# Return an MX record pointing to the mx-target for all local -# machines. -#localmx - -# Return an MX record pointing to itself for all local machines. -#selfmx - -# Change the following lines if you want dnsmasq to serve SRV -# records. These are useful if you want to serve ldap requests for -# Active Directory and other windows-originated DNS requests. -# See RFC 2782. -# You may add multiple srv-host lines. -# The fields are ,,,, -# If the domain part if missing from the name (so that is just has the -# service and protocol sections) then the domain given by the domain= -# config option is used. (Note that expand-hosts does not need to be -# set for this to work.) - -# A SRV record sending LDAP for the example.com domain to -# ldapserver.example.com port 389 -#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389 - -# A SRV record sending LDAP for the example.com domain to -# ldapserver.example.com port 389 (using domain=) -#domain=example.com -#srv-host=_ldap._tcp,ldapserver.example.com,389 - -# Two SRV records for LDAP, each with different priorities -#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,1 -#srv-host=_ldap._tcp.example.com,ldapserver.example.com,389,2 - -# A SRV record indicating that there is no LDAP server for the domain -# example.com -#srv-host=_ldap._tcp.example.com - -# The following line shows how to make dnsmasq serve an arbitrary PTR -# record. This is useful for DNS-SD. (Note that the -# domain-name expansion done for SRV records _does_not -# occur for PTR records.) -#ptr-record=_http._tcp.dns-sd-services,"New Employee Page._http._tcp.dns-sd-services" - -# Change the following lines to enable dnsmasq to serve TXT records. -# These are used for things like SPF and zeroconf. (Note that the -# domain-name expansion done for SRV records _does_not -# occur for TXT records.) - -#Example SPF. -#txt-record=example.com,"v=spf1 a -all" - -#Example zeroconf -#txt-record=_http._tcp.example.com,name=value,paper=A4 - -# Provide an alias for a "local" DNS name. Note that this _only_ works -# for targets which are names from DHCP or /etc/hosts. Give host -# "bert" another name, bertrand -#cname=bertand,bert - -# For debugging purposes, log each DNS query as it passes through -# dnsmasq. -#log-queries - -# Log lots of extra information about DHCP transactions. -#log-dhcp - -# Include another lot of configuration options. -#conf-file=/etc/dnsmasq.more.conf -#conf-dir=/etc/dnsmasq.d - -# Include all the files in a directory except those ending in .bak -#conf-dir=/etc/dnsmasq.d,.bak - -# Include all files in a directory which end in .conf -#conf-dir=/etc/dnsmasq.d/,*.conf - -# Include all files in /etc/dnsmasq.d except RPM backup files -conf-dir=/etc/dnsmasq.d,.rpmnew,.rpmsave,.rpmorig - -# If a DHCP client claims that its name is "wpad", ignore that. -# This fixes a security hole. see CERT Vulnerability VU#598349 -#dhcp-name-match=set:wpad-ignore,wpad -#dhcp-ignore-names=tag:wpad-ignore - diff --git a/files/osbs/fedora-dnsmasq.conf.production b/files/osbs/fedora-dnsmasq.conf.production deleted file mode 100644 index 6f72e5cd7f..0000000000 --- a/files/osbs/fedora-dnsmasq.conf.production +++ /dev/null @@ -1,2 +0,0 @@ -server=/fedoraproject.org/10.3.163.33 -server=/fedoraproject.org/10.3.163.34 diff --git a/files/osbs/fedora-dnsmasq.conf.staging b/files/osbs/fedora-dnsmasq.conf.staging deleted file mode 100644 index 6f72e5cd7f..0000000000 --- a/files/osbs/fedora-dnsmasq.conf.staging +++ /dev/null @@ -1,2 +0,0 @@ -server=/fedoraproject.org/10.3.163.33 -server=/fedoraproject.org/10.3.163.34 diff --git a/files/osbs/fix-docker-iptables.production b/files/osbs/fix-docker-iptables.production deleted file mode 100644 index eb9e6dc548..0000000000 --- a/files/osbs/fix-docker-iptables.production +++ /dev/null @@ -1,90 +0,0 @@ -#!/bin/bash -xe -# Note: this is done as a script because it needs to be run after -# every docker service restart. -# And just doing an iptables-restore is going to mess up kubernetes' -# NAT table. -# And it gets even better with openshift! It thinks I'm stupid and need -# to be corrected by automatically adding the "allow all" rules back at -# the top as soon as I remove them. -# To circumvent that, we're just adding a new chain for this, as it seems -# that it doesn't do anything with the firewall if we keep its rules in -# place. (it doesn't check the order of its rules, only that they exist) - -if [ "`iptables -nL | grep FILTER_FORWARD`" == "" ]; -then - iptables -N FILTER_FORWARD -fi -if [ "`iptables -nL | grep 'FILTER_FORWARD all'`" == "" ]; -then - iptables -I FORWARD 1 -j FILTER_FORWARD - iptables -I FORWARD 2 -j REJECT - iptables -I DOCKER-ISOLATION 1 -j FILTER_FORWARD -fi - -# Delete all old rules -iptables --flush FILTER_FORWARD - -# Re-insert some basic rules -iptables -A FILTER_FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -iptables -A FILTER_FORWARD --src 10.1.0.0/16 --dst 10.1.0.0/16 -j ACCEPT - -# Now insert access to allowed boxes -# docker-registry no cdn -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.119 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.127 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.119 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.127 --dport 443 -j ACCEPT - -# Candidate registry -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.102 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.102 --dport 443 -j ACCEPT - -#koji.fp.o -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.104 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.104 --dport 443 -j ACCEPT - -# pkgs -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.116 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.116 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.169.116 --dport 9418 -j ACCEPT - -# DNS -iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.33 --dport 53 -j ACCEPT -iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.33 --dport 53 -j ACCEPT - -# mirrors.fp.o -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.76 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.77 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.75 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.74 --dport 443 -j ACCEPT - -# infrastructure.fp.o (infra repos) -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.35 --dport 443 -j ACCEPT - -# Kerberos -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.76 --dport 1088 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.77 --dport 1088 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.75 --dport 1088 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.74 --dport 1088 -j ACCEPT - -# dl.phx2 -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 443 -j ACCEPT - -# Docker is CRAZY and forces Google DNS upon us..... -iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.8.8 --dport 53 -j ACCEPT -iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.4.4 --dport 53 -j ACCEPT - -# aarch64 cluster -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.170.147 --dport 8443 -j ACCEPT - -iptables -A FORWARD -j REJECT --reject-with icmp-host-prohibited - diff --git a/files/osbs/fix-docker-iptables.staging b/files/osbs/fix-docker-iptables.staging deleted file mode 100644 index ccd82b1ec5..0000000000 --- a/files/osbs/fix-docker-iptables.staging +++ /dev/null @@ -1,86 +0,0 @@ -#!/bin/bash -xe -# Note: this is done as a script because it needs to be run after -# every docker service restart. -# And just doing an iptables-restore is going to mess up kubernetes' -# NAT table. -# And it gets even better with openshift! It thinks I'm stupid and need -# to be corrected by automatically adding the "allow all" rules back at -# the top as soon as I remove them. -# To circumvent that, we're just adding a new chain for this, as it seems -# that it doesn't do anything with the firewall if we keep its rules in -# place. (it doesn't check the order of its rules, only that they exist) - -if [ "`iptables -nL | grep FILTER_FORWARD`" == "" ]; -then - iptables -N FILTER_FORWARD -fi -if [ "`iptables -nL | grep 'FILTER_FORWARD all'`" == "" ]; -then - iptables -I FORWARD 1 -j FILTER_FORWARD - iptables -I FORWARD 2 -j REJECT - iptables -I DOCKER-ISOLATION 1 -j FILTER_FORWARD -fi - -# Delete all old rules -iptables --flush FILTER_FORWARD - -# Re-insert some basic rules -iptables -A FILTER_FORWARD -m conntrack --ctstate RELATED,ESTABLISHED -j ACCEPT -iptables -A FILTER_FORWARD --src 10.1.0.0/16 --dst 10.1.0.0/16 -j ACCEPT - -# Now insert access to allowed boxes -# osbs -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.166.74 --dport 443 -j ACCEPT - -# docker-registry -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.128.123 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.5.128.124 --dport 443 -j ACCEPT - -#koji.fp.o -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.64 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.64 --dport 443 -j ACCEPT - -# pkgs.stg -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 9418 -j ACCEPT - -# DNS -iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.33 --dport 53 -j ACCEPT -iptables -A FILTER_FORWARD -p udp -m udp -d 10.3.163.34 --dport 53 -j ACCEPT - -# mirrors.fp.o -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.76 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.77 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.75 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.167.74 --dport 443 -j ACCEPT - -# infrastructure.fp.o (infra repos) -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.35 --dport 443 -j ACCEPT - -# dl.phx2 -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.49 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.50 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.51 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.85 --dport 443 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 80 -j ACCEPT -iptables -A FILTER_FORWARD -p tcp -m tcp -d 10.3.163.84 --dport 443 -j ACCEPT - - -# Docker is CRAZY and forces Google DNS upon us..... -iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.8.8 --dport 53 -j ACCEPT -iptables -A FILTER_FORWARD -p udp -m udp -d 8.8.4.4 --dport 53 -j ACCEPT - -# proxy -iptables -A FILTER_FORWARD -p tcp --dst 10.3.166.74 --dport 443 -j ACCEPT - -# Kerberos -iptables -A FILTER_FORWARD -p tcp --dst 10.3.166.74 --dport 1088 -j ACCEPT - - -iptables -A FILTER_FORWARD -j REJECT --reject-with icmp-host-prohibited - diff --git a/files/osbs/orchestrator_customize.json b/files/osbs/orchestrator_customize.json deleted file mode 100644 index 4726511b94..0000000000 --- a/files/osbs/orchestrator_customize.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "disable_plugins": [ - { - "plugin_type": "exit_plugins", - "plugin_name": "import_image" - } - ], - "enable_plugins": [] -} diff --git a/files/osbs/worker_customize.json b/files/osbs/worker_customize.json deleted file mode 100644 index 054fcb2898..0000000000 --- a/files/osbs/worker_customize.json +++ /dev/null @@ -1,13 +0,0 @@ -{ - "disable_plugins": [ - { - "plugin_type": "prebuild_plugins", - "plugin_name": "fetch_maven_artifacts" - }, - { - "plugin_type": "exit_plugins", - "plugin_name": "import_image" - } - ], - "enable_plugins": [] -} diff --git a/inventory/builders b/inventory/builders index e7d97b5a21..172d81e610 100644 --- a/inventory/builders +++ b/inventory/builders @@ -160,7 +160,6 @@ bvmhost-a64-14.iad2.fedoraproject.org bvmhost-a64-15.iad2.fedoraproject.org # These are lenovo emags in IAD2 bvmhost-a64-01.stg.iad2.fedoraproject.org -bvmhost-a64-osbs-01.iad2.fedoraproject.org # ppc bvmhost-p09-01.iad2.fedoraproject.org bvmhost-p09-02.iad2.fedoraproject.org diff --git a/inventory/group_vars/batcave b/inventory/group_vars/batcave index a8f224a1a1..a218feef37 100644 --- a/inventory/group_vars/batcave +++ b/inventory/group_vars/batcave @@ -57,7 +57,6 @@ ipa_client_shell_groups: - sysadmin-messaging - sysadmin-noc - sysadmin-odcs - - sysadmin-osbs - sysadmin-osbuild - sysadmin-openscanhub - sysadmin-qa diff --git a/inventory/group_vars/buildhw b/inventory/group_vars/buildhw index b61d529663..0f7987b052 100644 --- a/inventory/group_vars/buildhw +++ b/inventory/group_vars/buildhw @@ -18,6 +18,4 @@ koji_root: "koji.fedoraproject.org/koji" koji_server_url: "https://koji.fedoraproject.org/kojihub" koji_topurl: "https://kojipkgs.fedoraproject.org/" koji_weburl: "https://koji.fedoraproject.org/koji" -# These variables are for koji-containerbuild/osbs -osbs_url: "osbs.fedoraproject.org" source_registry: "registry.fedoraproject.org" diff --git a/inventory/group_vars/buildvm b/inventory/group_vars/buildvm index b39829206a..9251c93b65 100644 --- a/inventory/group_vars/buildvm +++ b/inventory/group_vars/buildvm @@ -26,8 +26,6 @@ lvm_size: 262144 max_mem_size: "{{ mem_size }}" mem_size: 15360 num_cpus: 6 -# These variables are for koji-containerbuild/osbs -osbs_url: "osbs.fedoraproject.org" source_registry: "registry.fedoraproject.org" virt_install_command: "{{ virt_install_command_one_nic_unsafe }}" volgroup: /dev/BuildGuests diff --git a/inventory/group_vars/buildvm_aarch64 b/inventory/group_vars/buildvm_aarch64 index af4c105a8f..68844a186d 100644 --- a/inventory/group_vars/buildvm_aarch64 +++ b/inventory/group_vars/buildvm_aarch64 @@ -27,8 +27,6 @@ max_cpu: "{{ num_cpus }}" max_mem_size: "{{ mem_size }}" mem_size: 36864 num_cpus: 12 -# These variables are for koji-containerbuild/osbs -osbs_url: "osbs.fedoraproject.org" source_registry: "registry.fedoraproject.org" virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}" volgroup: /dev/vg_guests diff --git a/inventory/group_vars/buildvm_aarch64_stg b/inventory/group_vars/buildvm_aarch64_stg index fe7dd98210..8f56530160 100644 --- a/inventory/group_vars/buildvm_aarch64_stg +++ b/inventory/group_vars/buildvm_aarch64_stg @@ -34,8 +34,6 @@ max_mem_size: "{{ mem_size }}" mem_size: 40960 nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3" num_cpus: 5 -# These variables are for koji-containerbuild/osbs -osbs_url: "osbs.stg.fedoraproject.org" source_registry: "registry.stg.fedoraproject.org" # this is to enable nested virt, which we need for some builds virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}" diff --git a/inventory/group_vars/buildvm_ppc64le_stg b/inventory/group_vars/buildvm_ppc64le_stg index 672b9d66da..62f95a8880 100644 --- a/inventory/group_vars/buildvm_ppc64le_stg +++ b/inventory/group_vars/buildvm_ppc64le_stg @@ -34,8 +34,6 @@ max_mem_size: "{{ mem_size }}" mem_size: 10240 nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3" num_cpus: 4 -# These variables are for koji-containerbuild/osbs -osbs_url: "osbs.stg.fedoraproject.org" source_registry: "registry.stg.fedoraproject.org" virt_install_command: "{{ virt_install_command_ppc64le_one_nic_unsafe }}" volgroup: /dev/vg_guests diff --git a/inventory/group_vars/buildvm_stg b/inventory/group_vars/buildvm_stg index 2017e34d5c..db612f4bfd 100644 --- a/inventory/group_vars/buildvm_stg +++ b/inventory/group_vars/buildvm_stg @@ -33,8 +33,6 @@ max_mem_size: "{{ mem_size }}" mem_size: 10240 nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=4" num_cpus: 4 -# These variables are for koji-containerbuild/osbs -osbs_url: "osbs.stg.fedoraproject.org" resolvconf: "resolv.conf/iad2" source_registry: "registry.fedoraproject.org" virt_install_command: "{{ virt_install_command_one_nic_unsafe }}" diff --git a/inventory/group_vars/koji b/inventory/group_vars/koji index 139cfb4fab..1439c01608 100644 --- a/inventory/group_vars/koji +++ b/inventory/group_vars/koji @@ -36,7 +36,6 @@ mem_size: 32768 max_mem_size: 65536 nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=4" num_cpus: 16 -osbs_url: "osbs.fedoraproject.org" primary_auth_source: ipa source_registry: "registry.fedoraproject.org" # for systems that do not match the above - specify the same parameter in diff --git a/inventory/group_vars/koji_stg b/inventory/group_vars/koji_stg index 8b7a69b0a4..00fbdbd62f 100644 --- a/inventory/group_vars/koji_stg +++ b/inventory/group_vars/koji_stg @@ -1,9 +1,5 @@ --- # Define resources for this group of hosts here. -# Add custom iptable rule to allow stage koji to talk to -# osbs-dev.fedorainfracloud.org (will move to stage osbs later, this is for the -# sake of testing). -custom_rules: ['-A OUTPUT -p tcp -m tcp -d 209.132.184.60 --dport 8443 -j ACCEPT'] docker_registry: "candidate-registry.stg.fedoraproject.org" # These are consumed by a task in roles/fedmsg/base/main.yml fedmsg_certs: @@ -27,11 +23,9 @@ fedmsg_certs: ipa_client_shell_groups: - fi-apprentice - sysadmin-noc - - sysadmin-osbs - sysadmin-releng - sysadmin-veteran ipa_client_sudo_groups: - - sysadmin-osbs - sysadmin-releng ipa_host_group: kojihub ipa_host_group_desc: Koji Hub hosts @@ -45,7 +39,6 @@ mem_size: 8192 # NOTE -- staging mounts read-only nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3" num_cpus: 8 -osbs_url: "osbs.stg.fedoraproject.org" source_registry: "registry.stg.fedoraproject.org" # for systems that do not match the above - specify the same parameter in # the host_vars/$hostname file diff --git a/inventory/group_vars/nagios b/inventory/group_vars/nagios index 55d09bf91f..a5ee4e9d1d 100644 --- a/inventory/group_vars/nagios +++ b/inventory/group_vars/nagios @@ -72,7 +72,6 @@ iad2_management_hosts: - bvmhost-a64-09.mgmt.iad2.fedoraproject.org. - bvmhost-a64-10.mgmt.iad2.fedoraproject.org. - bvmhost-a64-11.mgmt.iad2.fedoraproject.org. - - bvmhost-a64-osbs-01.mgmt.iad2.fedoraproject.org. - bvmhost-p09-01.mgmt.iad2.fedoraproject.org. - bvmhost-p09-02.mgmt.iad2.fedoraproject.org. - bvmhost-p09-03.mgmt.iad2.fedoraproject.org. diff --git a/inventory/group_vars/odcs b/inventory/group_vars/odcs index e4d13f8f6d..7fcd2a3ac6 100644 --- a/inventory/group_vars/odcs +++ b/inventory/group_vars/odcs @@ -22,7 +22,6 @@ odcs_allowed_clients_users: kevin: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]} # This is token used by CCCC service running on https://jenkins-fedora-infra.apps.ci.centos.org/job/cccc. odcs@service: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]} - osbs@service: {} releng-odcs@service: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]} # Default queues for general ODCS backends. odcs_celery_queues: diff --git a/inventory/group_vars/odcs_stg b/inventory/group_vars/odcs_stg index ee076bea8c..b5321c85f2 100644 --- a/inventory/group_vars/odcs_stg +++ b/inventory/group_vars/odcs_stg @@ -17,7 +17,6 @@ odcs_allowed_clients_users: humaton: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]} jkaluza: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]} mohanboddu: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]} - osbs@service: {} # Default queues for general ODCS backends. odcs_celery_queues: - pungi_composes diff --git a/inventory/group_vars/osbs b/inventory/group_vars/osbs deleted file mode 100644 index 70c666b6f6..0000000000 --- a/inventory/group_vars/osbs +++ /dev/null @@ -1,36 +0,0 @@ ---- -# Define resources for this group of hosts here. -baseiptables: False -docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org" -docker_registry: "candidate-registry.fedoraproject.org" -# fedora container images required by buildroot -fedora_required_images: - - "fedora:latest" -ipa_client_shell_groups: - - fi-apprentice - - sysadmin-noc - - sysadmin-osbs - - sysadmin-releng - - sysadmin-veteran -ipa_client_sudo_groups: - - sysadmin-osbs - - sysadmin-releng -ipa_host_group: osbs -ipa_host_group_desc: OpenShift Build Service -koji_url: "koji.fedoraproject.org" -lvm_size: 60000 -mem_size: 8192 -num_cpus: 2 -#openshift_ansible_upgrading: True - -# docker images required by OpenShift Origin -openshift_required_images: - - "openshift/origin-pod" -osbs_client_conf_path: /etc/osbs.conf -osbs_koji_username: "kojibuilder" -osbs_url: "osbs.fedoraproject.org" -package_excludes: "docker*" -primary_auth_source: ipa -source_registry: "registry.fedoraproject.org" -sudoers: "{{ private }}/files/sudo/osbs-sudoers" -tcp_ports: [80, 443, 8443] diff --git a/inventory/group_vars/osbs_aarch64_masters b/inventory/group_vars/osbs_aarch64_masters deleted file mode 100644 index 71a875c6d7..0000000000 --- a/inventory/group_vars/osbs_aarch64_masters +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Define resources for this group of hosts here. -#Docker command delegated host -composer: compose-x86-01.iad2.fedoraproject.org -docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org" -docker_registry: "candidate-registry.fedoraproject.org" -koji_url: "koji.fedoraproject.org" -lvm_size: 60000 -max_cpu: "{{ num_cpus }}" -max_mem_size: "{{ mem_size }}" -mem_size: 8192 -# Nagios configuration -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'infra'} -openshift_schedulable: False -osbs_client_conf_path: /etc/osbs.conf -osbs_conf_readwrite_users: - - "system:serviceaccount:{{ osbs_namespace }}:default" - - "system:serviceaccount:{{ osbs_namespace }}:builder" -osbs_conf_service_accounts: - - koji - - builder -osbs_conf_sources_command: fedpkg sources -osbs_namespace: "osbs-fedora" -osbs_orchestrator_cpu_limitrange: "95m" -osbs_orchestrator_default_nodeselector: "orchestrator=true" -osbs_url: "osbs.fedoraproject.org" -osbs_worker_default_nodeselector: "worker=true" -osbs_worker_namespace: worker -osbs_worker_service_accounts: - - orchestrator - - builder -source_registry: "registry.fedoraproject.org" -tcp_ports: [80, 443, 8443] -virt_install_command: "{{ virt_install_command_aarch64_one_nic }}" diff --git a/inventory/group_vars/osbs_aarch64_masters_stg b/inventory/group_vars/osbs_aarch64_masters_stg deleted file mode 100644 index 87497fb7c1..0000000000 --- a/inventory/group_vars/osbs_aarch64_masters_stg +++ /dev/null @@ -1,42 +0,0 @@ ---- -# Define resources for this group of hosts here. -#Docker command delegated host -composer: compose-x86-01.stg.iad2.fedoraproject.org -docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org" -docker_registry: "candidate-registry.stg.fedoraproject.org" -koji_url: "koji.stg.fedoraproject.org" -lvm_size: 60000 -max_cpu: "{{ num_cpus }}" -max_mem_size: "{{ mem_size }}" -mem_size: 8192 -# Nagios configuration -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'infra'} -openshift_schedulable: False -osbs_client_conf_path: /etc/osbs.conf -osbs_conf_readwrite_users: - - "system:serviceaccount:{{ osbs_namespace }}:default" - - "system:serviceaccount:{{ osbs_namespace }}:builder" -osbs_conf_service_accounts: - - koji - - builder -osbs_conf_sources_command: fedpkg sources -osbs_namespace: "osbs-fedora" -osbs_orchestrator_cpu_limitrange: "95m" -osbs_orchestrator_default_nodeselector: "orchestrator=true" -osbs_url: "osbs.stg.fedoraproject.org" -osbs_worker_default_nodeselector: "worker=true" -osbs_worker_namespace: worker -osbs_worker_service_accounts: - - orchestrator - - builder -source_registry: "registry.stg.fedoraproject.org" -tcp_ports: [80, 443, 8443] -virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}" diff --git a/inventory/group_vars/osbs_aarch64_node b/inventory/group_vars/osbs_aarch64_node deleted file mode 100644 index 93bba6917d..0000000000 --- a/inventory/group_vars/osbs_aarch64_node +++ /dev/null @@ -1,17 +0,0 @@ ---- -# Define resources for this group of hosts here. -lvm_size: 60000 -max_cpu: "{{ num_cpus }}" -max_mem_size: "{{ mem_size }}" -mem_size: 8192 -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'primary', 'zone': 'default'} -tcp_ports: [80, 443, 8443, 10250] -virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}" diff --git a/inventory/group_vars/osbs_aarch64_nodes b/inventory/group_vars/osbs_aarch64_nodes deleted file mode 100644 index 93bba6917d..0000000000 --- a/inventory/group_vars/osbs_aarch64_nodes +++ /dev/null @@ -1,17 +0,0 @@ ---- -# Define resources for this group of hosts here. -lvm_size: 60000 -max_cpu: "{{ num_cpus }}" -max_mem_size: "{{ mem_size }}" -mem_size: 8192 -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'primary', 'zone': 'default'} -tcp_ports: [80, 443, 8443, 10250] -virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}" diff --git a/inventory/group_vars/osbs_aarch64_nodes_stg b/inventory/group_vars/osbs_aarch64_nodes_stg deleted file mode 100644 index 93bba6917d..0000000000 --- a/inventory/group_vars/osbs_aarch64_nodes_stg +++ /dev/null @@ -1,17 +0,0 @@ ---- -# Define resources for this group of hosts here. -lvm_size: 60000 -max_cpu: "{{ num_cpus }}" -max_mem_size: "{{ mem_size }}" -mem_size: 8192 -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'primary', 'zone': 'default'} -tcp_ports: [80, 443, 8443, 10250] -virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}" diff --git a/inventory/group_vars/osbs_control b/inventory/group_vars/osbs_control deleted file mode 100644 index d9e8269880..0000000000 --- a/inventory/group_vars/osbs_control +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Define resources for this group of hosts here. -aarch_infra_group: "osbs_aarch64_masters" -# Aarch64 variables -aarch_masters_group: "osbs_aarch64_masters" -aarch_nodes_group: "osbs_aarch64_nodes" -cluster_infra_group: "osbs_masters" -cluster_masters_group: "osbs_masters" -cluster_nodes_group: "osbs_nodes" -inventory_filename: "cluster-inventory" -# Variables used in the ansible-ansible-openshift-ansible role in osbs-cluster playbook -osbs_url: "osbs.fedoraproject.org" -sudoers: "{{ private }}/files/sudo/osbs-sudoers" diff --git a/inventory/group_vars/osbs_control_stg b/inventory/group_vars/osbs_control_stg deleted file mode 100644 index 5ac199e9cd..0000000000 --- a/inventory/group_vars/osbs_control_stg +++ /dev/null @@ -1,13 +0,0 @@ ---- -# Define resources for this group of hosts here. - -# Variables used in the ansible-ansible-openshift-ansible role in osbs-cluster playbook -aarch_infra_group: "osbs_aarch64_masters_stg" -# Aarch64 variables -aarch_masters_group: "osbs_aarch64_masters_stg" -aarch_nodes_group: "osbs_aarch64_nodes_stg" -cluster_infra_group: "osbs_masters_stg" -cluster_masters_group: "osbs_masters_stg" -cluster_nodes_group: "osbs_nodes_stg" -inventory_filename: "cluster-inventory-stg" -osbs_url: "osbs.stg.fedoraproject.org" diff --git a/inventory/group_vars/osbs_masters b/inventory/group_vars/osbs_masters deleted file mode 100644 index 67e31ecd63..0000000000 --- a/inventory/group_vars/osbs_masters +++ /dev/null @@ -1,134 +0,0 @@ ---- -# Define resources for this group of hosts here. -_osbs_reactor_config_map: - artifacts_allowed_domains: [] - #- download.devel.redhat.com/released - #- download.devel.redhat.com/devel/candidates - - clusters: - aarch64: - - enabled: True - max_concurrent_builds: 1 - name: "aarch64" - x86_64: - - enabled: True - max_concurrent_builds: 2 - name: "x86_64" - clusters_client_config_dir: "/var/run/secrets/atomic-reactor/client-config-secret" - content_versions: - - v2 - flatpak: - base_image: "registry.fedoraproject.org/flatpak-build-base:latest" - metadata: both - group_manifests: True - image_equal_labels: - - ['description', 'io.k8s.description'] - image_labels: - authoritative-source-url: "{{ source_registry }}" - distribution-scope: public - vendor: "Fedora Project" - koji: - auth: - krb_keytab_path: "FILE:/etc/krb5.osbs_{{ osbs_url }}.keytab" - krb_principal: "osbs/{{osbs_url}}@{{ ipa_realm }}" - hub_url: "https://koji{{ env_suffix }}.fedoraproject.org/kojihub" - root_url: "https://koji{{ env_suffix }}.fedoraproject.org/" - odcs: - api_url: "https://odcs{{ env_suffix }}.fedoraproject.org/api/1" - auth: - openidc_dir: "/var/run/secrets/atomic-reactor/odcs-oidc-secret" - default_signing_intent: "unsigned" - signing_intents: - - keys: [] - name: unsigned - openshift: - auth: - enable: True - build_json_dir: /usr/share/osbs - insecure: true - url: "https://{{ osbs_url }}" - platform_descriptors: "{{ osbs_platform_descriptors }}" - prefer_schema1_digest: False - registries: - - auth: - cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg - url: https://candidate-registry.fedoraproject.org/v2 - required_secrets: - - v2-registry-dockercfg - - odcs-oidc-secret - skip_koji_check_for_base_image: True - source_registry: - insecure: True - url: "{{ source_registry }}" - sources_command: "{{ osbs_conf_sources_command }}" - version: 1 - worker_token_secrets: - - x86-64-orchestrator - - aarch64-orchestrator - - client-config-secret -_osbs_scratch_reactor_config_map_overrides: - image_labels: - distribution-scope: private -#Docker command delegated host -composer: compose-x86-01.iad2.fedoraproject.org -docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org" -docker_registry: "candidate-registry.fedoraproject.org" -koji_url: "koji.fedoraproject.org" -lvm_size: 60000 -mem_size: 8192 -# Nagios configuration -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'infra'} -openshift_schedulable: False -osbs_client_conf_path: /etc/osbs.conf -osbs_conf_readwrite_users: - - "system:serviceaccount:{{ osbs_namespace }}:default" - - "system:serviceaccount:{{ osbs_namespace }}:builder" -osbs_conf_service_accounts: - - koji - - builder -osbs_conf_sources_command: fedpkg sources -osbs_conf_worker_clusters: - aarch64: - - max_concurrent_builds: 1 - name: aarch64 - openshift_url: "https://osbs-aarch64-master01.iad2.fedoraproject.org:8443/" - verify_ssl: 'false' - x86_64: - - max_concurrent_builds: 2 - name: x86_64 - openshift_url: "https://osbs.fedoraproject.org/" - verify_ssl: 'false' -osbs_koji_username: "kojibuilder" -osbs_namespace: "osbs-fedora" -osbs_odcs_enabled: true -osbs_orchestrator_cpu_limitrange: "95m" -osbs_orchestrator_default_nodeselector: "orchestrator=true" -osbs_platform_descriptors: - - architecture: amd64 - platform: x86_64 - - architecture: arm64 - platform: aarch64 -osbs_reactor_config_maps: - - data: "{{ _osbs_reactor_config_map }}" - name: reactor-config-map - - data: > - {{ _osbs_reactor_config_map | - - combine(_osbs_scratch_reactor_config_map_overrides, recursive=True) }} - name: reactor-config-map-scratch -osbs_url: "osbs.fedoraproject.org" -osbs_worker_default_nodeselector: "worker=true" -osbs_worker_namespace: worker -osbs_worker_service_accounts: - - orchestrator - - builder -source_registry: "registry.fedoraproject.org" -tcp_ports: [80, 443, 8443] diff --git a/inventory/group_vars/osbs_masters_stg b/inventory/group_vars/osbs_masters_stg deleted file mode 100644 index f0c4b414dc..0000000000 --- a/inventory/group_vars/osbs_masters_stg +++ /dev/null @@ -1,129 +0,0 @@ ---- -# Define resources for this group of hosts here. -_osbs_reactor_config_map: - artifacts_allowed_domains: [] - #- download.devel.redhat.com/released - #- download.devel.redhat.com/devel/candidates - - clusters: - aarch64: - - enabled: True - max_concurrent_builds: 1 - name: "aarch64" - x86_64: - - enabled: True - max_concurrent_builds: 2 - name: "x86_64" - clusters_client_config_dir: "/var/run/secrets/atomic-reactor/client-config-secret" - content_versions: - - v2 - flatpak: - base_image: "registry.fedoraproject.org/flatpak-build-base:latest" - metadata: both - group_manifests: True - image_equal_labels: - - ['description', 'io.k8s.description'] - image_labels: - authoritative-source-url: "{{ source_registry }}" - distribution-scope: public - vendor: "Fedora Project" - koji: - auth: - krb_keytab_path: "FILE:/etc/krb5.osbs_{{ osbs_url }}.keytab" - krb_principal: "osbs/{{osbs_url}}@{{ ipa_realm }}" - hub_url: "https://koji{{ env_suffix }}.fedoraproject.org/kojihub" - root_url: "https://koji{{ env_suffix }}.fedoraproject.org/" - odcs: - api_url: "https://odcs{{ env_suffix }}.fedoraproject.org/api/1" - auth: - openidc_dir: "/var/run/secrets/atomic-reactor/odcs-oidc-secret" - default_signing_intent: "unsigned" - signing_intents: - - keys: [] - name: unsigned - openshift: - auth: - enable: True - build_json_dir: /usr/share/osbs - insecure: true - url: "https://{{ osbs_url }}" - platform_descriptors: "{{ osbs_platform_descriptors }}" - prefer_schema1_digest: False - registries: - - auth: - cfg_path: /var/run/secrets/atomic-reactor/v2-registry-dockercfg - insecure: False - url: https://candidate-registry.stg.fedoraproject.org/v2 - required_secrets: - - v2-registry-dockercfg - - odcs-oidc-secret - skip_koji_check_for_base_image: True - source_registry: - insecure: True - url: "{{ source_registry }}" - sources_command: "{{ osbs_conf_sources_command }}" - version: 1 - worker_token_secrets: - - x86-64-orchestrator - - aarch64-orchestrator - - client-config-secret -_osbs_scratch_reactor_config_map_overrides: - image_labels: - distribution-scope: private -#Docker command delegated host -composer: compose-x86-01.stg.iad2.fedoraproject.org -docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org" -docker_registry: "candidate-registry.stg.fedoraproject.org" -koji_url: "koji.stg.fedoraproject.org" -lvm_size: 60000 -mem_size: 8192 -# Nagios configuration -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'infra'} -openshift_schedulable: False -osbs_client_conf_path: /etc/osbs.conf -osbs_conf_readwrite_users: - - "system:serviceaccount:{{ osbs_namespace }}:default" - - "system:serviceaccount:{{ osbs_namespace }}:builder" -osbs_conf_service_accounts: - - koji - - builder -osbs_conf_sources_command: fedpkg sources -osbs_conf_worker_clusters: - x86_64: - - max_concurrent_builds: 2 - name: x86_64 - openshift_url: "https://osbs-master01.stg.iad2.fedoraproject.org:8443" - verify_ssl: 'false' -osbs_namespace: "osbs-fedora" -osbs_odcs_enabled: true -osbs_orchestrator_cpu_limitrange: "95m" -osbs_orchestrator_default_nodeselector: "orchestrator=true" -osbs_platform_descriptors: - - architecture: amd64 - platform: x86_64 - - architecture: arm64 - platform: aarch64 -osbs_reactor_config_maps: - - data: "{{ _osbs_reactor_config_map }}" - name: reactor-config-map - - data: > - {{ _osbs_reactor_config_map | - - combine(_osbs_scratch_reactor_config_map_overrides, recursive=True) }} - name: reactor-config-map-scratch -osbs_url: "osbs.stg.fedoraproject.org" -osbs_worker_default_nodeselector: "worker=true" -osbs_worker_namespace: worker -osbs_worker_service_accounts: - - orchestrator - - builder -source_registry: "registry.fedoraproject.org" -tcp_ports: [80, 443, 8443] diff --git a/inventory/group_vars/osbs_nodes b/inventory/group_vars/osbs_nodes deleted file mode 100644 index b0ba8c8835..0000000000 --- a/inventory/group_vars/osbs_nodes +++ /dev/null @@ -1,20 +0,0 @@ ---- -# Define resources for this group of hosts here. -docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org" -docker_registry: "candidate-registry.fedoraproject.org" -koji_url: "koji.fedoraproject.org" -lvm_size: 60000 -mem_size: 8192 -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -osbs_client_conf_path: /etc/osbs.conf -osbs_koji_username: "kojibuilder" -osbs_url: "osbs.fedoraproject.org" -source_registry: "registry.fedoraproject.org" -tcp_ports: [80, 443, 8443, 10250] diff --git a/inventory/group_vars/osbs_nodes_stg b/inventory/group_vars/osbs_nodes_stg deleted file mode 100644 index 618dfb3858..0000000000 --- a/inventory/group_vars/osbs_nodes_stg +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Define resources for this group of hosts here. -lvm_size: 60000 -mem_size: 8192 -nagios_Check_Services: - dhcpd: false - httpd: false - named: false - nrpe: true - sshd: true - swap: false -num_cpus: 2 -openshift_node_labels: {'region': 'primary', 'zone': 'default'} -tcp_ports: [80, 443, 8443, 10250] diff --git a/inventory/group_vars/osbs_stg b/inventory/group_vars/osbs_stg deleted file mode 100644 index 3f06f1a64e..0000000000 --- a/inventory/group_vars/osbs_stg +++ /dev/null @@ -1,32 +0,0 @@ ---- -# Define resources for this group of hosts here. -baseiptables: False -docker_cert_dir: "/etc/docker/certs.d/candidate-registry.stg.fedoraproject.org" -docker_registry: "candidate-registry.stg.fedoraproject.org" -# fedora container images required by buildroot -fedora_required_images: - - "fedora:latest" -ipa_client_shell_groups: - - fi-apprentice - - sysadmin-noc - - sysadmin-osbs - - sysadmin-releng - - sysadmin-veteran -ipa_client_sudo_groups: - - sysadmin-osbs - - sysadmin-releng -ipa_host_group: osbs -ipa_host_group_desc: OpenShift Build Service -koji_url: "koji.stg.fedoraproject.org" -lvm_size: 60000 -mem_size: 8192 -num_cpus: 2 -openshift_ansible_upgrading: True -# docker images required by OpenShift Origin -openshift_required_images: - - "openshift/origin-pod" -osbs_client_conf_path: /etc/osbs.conf -osbs_koji_username: "kojibuilder_stg" -osbs_url: "osbs.stg.fedoraproject.org" -source_registry: "registry.fedoraproject.org" -tcp_ports: [80, 443, 8443] diff --git a/inventory/hardware b/inventory/hardware index 20eb8a8e54..0e1bf2fc5a 100644 --- a/inventory/hardware +++ b/inventory/hardware @@ -110,7 +110,6 @@ bvmhost-a64-07.iad2.fedoraproject.org bvmhost-a64-08.iad2.fedoraproject.org bvmhost-a64-09.iad2.fedoraproject.org bvmhost-a64-11.iad2.fedoraproject.org -bvmhost-a64-osbs-01.iad2.fedoraproject.org openqa-a64-worker01.iad2.fedoraproject.org openqa-a64-worker02.iad2.fedoraproject.org openqa-a64-worker03.iad2.fedoraproject.org diff --git a/inventory/host_vars/osbs-aarch64-master01.iad2.fedoraproject.org b/inventory/host_vars/osbs-aarch64-master01.iad2.fedoraproject.org deleted file mode 100644 index 637016827a..0000000000 --- a/inventory/host_vars/osbs-aarch64-master01.iad2.fedoraproject.org +++ /dev/null @@ -1,15 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.170.254 -eth0_ipv4_ip: 10.3.170.147 -host_group: osbs-aarch64-masters -ks_repo: http://10.3.163.35/pub/fedora/linux/releases/33/Everything/aarch64/os/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora-33-aarch64-osbs -lvm_size: 60g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-a64-osbs-01.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-aarch64-node01.iad2.fedoraproject.org b/inventory/host_vars/osbs-aarch64-node01.iad2.fedoraproject.org deleted file mode 100644 index 3752500ef6..0000000000 --- a/inventory/host_vars/osbs-aarch64-node01.iad2.fedoraproject.org +++ /dev/null @@ -1,15 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.170.254 -eth0_ipv4_ip: 10.3.170.148 -host_group: osbs-aarch64-nodes -ks_repo: http://10.3.163.35/pub/fedora/linux/releases/33/Everything/aarch64/os/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora-33-aarch64-osbs -lvm_size: 60g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-a64-osbs-01.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-aarch64-node02.iad2.fedoraproject.org b/inventory/host_vars/osbs-aarch64-node02.iad2.fedoraproject.org deleted file mode 100644 index 1700a996cf..0000000000 --- a/inventory/host_vars/osbs-aarch64-node02.iad2.fedoraproject.org +++ /dev/null @@ -1,15 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.170.254 -eth0_ipv4_ip: 10.3.170.149 -host_group: osbs-aarch64-nodes -ks_repo: http://10.3.163.35/pub/fedora/linux/releases/33/Everything/aarch64/os/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora-33-aarch64-osbs -lvm_size: 60g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-a64-osbs-01.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-control01.iad2.fedoraproject.org b/inventory/host_vars/osbs-control01.iad2.fedoraproject.org deleted file mode 100644 index 037838f682..0000000000 --- a/inventory/host_vars/osbs-control01.iad2.fedoraproject.org +++ /dev/null @@ -1,13 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.169.254 -eth0_ipv4_ip: 10.3.169.112 -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-iad2 -max_mem_size: 4096 -mem_size: 4096 -nagios_Check_Services: - mail: false - nrpe: false -vmhost: bvmhost-x86-02.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-control01.stg.iad2.fedoraproject.org b/inventory/host_vars/osbs-control01.stg.iad2.fedoraproject.org deleted file mode 100644 index bee6fe5a92..0000000000 --- a/inventory/host_vars/osbs-control01.stg.iad2.fedoraproject.org +++ /dev/null @@ -1,10 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.167.254 -eth0_ipv4_ip: 10.3.167.38 -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-iad2 -max_mem_size: 4096 -mem_size: 4096 -vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-master01.iad2.fedoraproject.org b/inventory/host_vars/osbs-master01.iad2.fedoraproject.org deleted file mode 100644 index d8bf28d99a..0000000000 --- a/inventory/host_vars/osbs-master01.iad2.fedoraproject.org +++ /dev/null @@ -1,14 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.169.254 -eth0_ipv4_ip: 10.3.169.113 -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2 -lvm_size: 120g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-x86-02.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-master01.stg.iad2.fedoraproject.org b/inventory/host_vars/osbs-master01.stg.iad2.fedoraproject.org deleted file mode 100644 index a5e65d7277..0000000000 --- a/inventory/host_vars/osbs-master01.stg.iad2.fedoraproject.org +++ /dev/null @@ -1,15 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.167.254 -eth0_ipv4_ip: 10.3.167.39 -host_group: osbs-stg -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2 -lvm_size: 120g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-node01.iad2.fedoraproject.org b/inventory/host_vars/osbs-node01.iad2.fedoraproject.org deleted file mode 100644 index 9aa0e6fd39..0000000000 --- a/inventory/host_vars/osbs-node01.iad2.fedoraproject.org +++ /dev/null @@ -1,14 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.169.254 -eth0_ipv4_ip: 10.3.169.114 -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2 -lvm_size: 240g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-x86-04.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-node01.stg.iad2.fedoraproject.org b/inventory/host_vars/osbs-node01.stg.iad2.fedoraproject.org deleted file mode 100644 index cc40db4052..0000000000 --- a/inventory/host_vars/osbs-node01.stg.iad2.fedoraproject.org +++ /dev/null @@ -1,15 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.167.254 -eth0_ipv4_ip: 10.3.167.40 -host_group: osbs-stg -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2 -lvm_size: 120g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-node02.iad2.fedoraproject.org b/inventory/host_vars/osbs-node02.iad2.fedoraproject.org deleted file mode 100644 index a1eb8c7eb2..0000000000 --- a/inventory/host_vars/osbs-node02.iad2.fedoraproject.org +++ /dev/null @@ -1,14 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.169.254 -eth0_ipv4_ip: 10.3.169.115 -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2 -lvm_size: 240g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-x86-05.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/host_vars/osbs-node02.stg.iad2.fedoraproject.org b/inventory/host_vars/osbs-node02.stg.iad2.fedoraproject.org deleted file mode 100644 index 151c0d5ec1..0000000000 --- a/inventory/host_vars/osbs-node02.stg.iad2.fedoraproject.org +++ /dev/null @@ -1,15 +0,0 @@ ---- -datacenter: iad2 -eth0_ipv4_gw: 10.3.167.254 -eth0_ipv4_ip: 10.3.167.41 -host_group: osbs-stg -ks_repo: http://10.3.163.35/repo/rhel/RHEL7-x86_64/ -ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-7-osbs-iad2 -lvm_size: 120g -max_mem_size: 16384 -mem_size: 16384 -nrpe_procs_crit: 1000 -nrpe_procs_warn: 900 -num_cpus: 4 -vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org -volgroup: /dev/vg_guests diff --git a/inventory/inventory b/inventory/inventory index ca31f4356e..8e453f8279 100644 --- a/inventory/inventory +++ b/inventory/inventory @@ -666,10 +666,6 @@ mm-crawler-dev.stg.iad2.fedoraproject.org odcs-backend01.stg.iad2.fedoraproject.org odcs-frontend01.stg.iad2.fedoraproject.org os-control01.stg.iad2.fedoraproject.org -osbs-control01.stg.iad2.fedoraproject.org -osbs-master01.stg.iad2.fedoraproject.org -osbs-node01.stg.iad2.fedoraproject.org -osbs-node02.stg.iad2.fedoraproject.org pdc-web01.stg.iad2.fedoraproject.org pkgs01.stg.iad2.fedoraproject.org proxy01.stg.iad2.fedoraproject.org @@ -981,45 +977,6 @@ pagure02.fedoraproject.org [pagure_stg] pagure-stg01.fedoraproject.org -[osbs_control] -osbs-control01.iad2.fedoraproject.org - -[osbs_control_stg] -osbs-control01.stg.iad2.fedoraproject.org - -[osbs_nodes] -osbs-node01.iad2.fedoraproject.org -osbs-node02.iad2.fedoraproject.org - -[osbs_masters] -osbs-master01.iad2.fedoraproject.org - -[osbs_aarch64_masters] -osbs-aarch64-master01.iad2.fedoraproject.org - -[osbs_aarch64_nodes] -osbs-aarch64-node01.iad2.fedoraproject.org -osbs-aarch64-node02.iad2.fedoraproject.org - -[osbs:children] -osbs_control -osbs_nodes -osbs_masters -osbs_aarch64_nodes -osbs_aarch64_masters - -[osbs_masters_stg] -osbs-master01.stg.iad2.fedoraproject.org - -[osbs_nodes_stg] -osbs-node01.stg.iad2.fedoraproject.org -osbs-node02.stg.iad2.fedoraproject.org - -[osbs_stg:children] -osbs_control_stg -osbs_masters_stg -osbs_nodes_stg - [ocp:children] os_control ocp_controlplane diff --git a/main.yml b/main.yml index b614d2c3fb..a765182c1a 100644 --- a/main.yml +++ b/main.yml @@ -52,8 +52,6 @@ - import_playbook: /srv/web/infra/ansible/playbooks/groups/odcs.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/openqa-workers.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/openqa.yml -- import_playbook: /srv/web/infra/ansible/playbooks/groups/osbs/deploy-cluster.yml -- import_playbook: /srv/web/infra/ansible/playbooks/groups/osbs/configure-osbs.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/pagure.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/pdc.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/people.yml diff --git a/playbooks/groups/buildhw.yml b/playbooks/groups/buildhw.yml index 8f41448a3f..de859b4897 100644 --- a/playbooks/groups/buildhw.yml +++ b/playbooks/groups/buildhw.yml @@ -14,7 +14,6 @@ pre_tasks: - import_tasks: "{{ tasks_path }}/yumrepos.yml" - - import_tasks: "{{ tasks_path }}/osbs_certs.yml" - name: override nbde_client-network-flush to work around bug copy: diff --git a/playbooks/groups/buildvm.yml b/playbooks/groups/buildvm.yml index bdea1c8165..8f0725a116 100644 --- a/playbooks/groups/buildvm.yml +++ b/playbooks/groups/buildvm.yml @@ -67,18 +67,6 @@ - role: keytab/service kt_location: /etc/kojid/kojid.keytab service: compile - - role: keytab/service - owner_user: root - owner_group: root - service: osbs - host: "osbs.fedoraproject.org" - when: env == "production" - - role: keytab/service - owner_user: root - owner_group: root - service: osbs - host: "osbs.stg.fedoraproject.org" - when: env == "staging" - role: keytab/service owner_user: root owner_group: root @@ -110,106 +98,6 @@ handlers: - import_tasks: "{{ handlers_path }}/restart_services.yml" -- name: configure osbs on koji builders - hosts: buildvm:buildvm_stg - tags: - - osbs - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - pre_tasks: - - import_tasks: "{{ tasks_path }}/osbs_certs.yml" - - import_tasks: "{{ tasks_path }}/osbs_koji_token.yml" - - roles: - - { - role: osbs-client, - when: env == 'staging' and ansible_architecture == 'x86_64', - general: - { - verbose: 0, - build_json_dir: "/usr/share/osbs/", - openshift_required_version: 1.1.0, - }, - default: - { - username: "{{ osbs_koji_stg_username }}", - password: "{{ osbs_koji_stg_password }}", - koji_use_kerberos: True, - koji_kerberos_keytab: "FILE:/etc/krb5.osbs_{{osbs_url}}.keytab", - koji_kerberos_principal: "osbs/{{osbs_url}}@{{ipa_realm}}", - openshift_url: "https://{{ osbs_url }}/", - build_host: "{{ osbs_url }}", - koji_root: "http://{{ koji_root }}", - koji_hub: "https://koji.stg.fedoraproject.org/kojihub", - sources_command: "fedpkg sources", - build_type: "prod", - verify_ssl: true, - use_auth: true, - builder_use_auth: true, - registry_api_versions: "v2", - builder_openshift_url: "https://{{osbs_url}}", - client_config_secret: "client-config-secret", - reactor_config_secret: "reactor-config-secret", - token_secrets: "x86-64-osbs:/var/run/secrets/atomic-reactor/x86-64-orchestrator", - token_file: "/etc/osbs/x86-64-osbs-koji", - namespace: "osbs-fedora", - can_orchestrate: true, - builder_odcs_url: "https://odcs{{ env_suffix }}.fedoraproject.org", - builder_odcs_openidc_secret: "odcs-oidc-secret", - builder_pdc_url: "https://pdc.stg.fedoraproject.org/api/1", - reactor_config_map: "reactor-config-map", - reactor_config_map_scratch: "reactor-config-map-scratch", - build_from: "image:buildroot:latest", - }, - } - - { - role: osbs-client, - when: env == 'production' and ansible_architecture == 'x86_64', - general: - { - verbose: 0, - build_json_dir: "/usr/share/osbs/", - openshift_required_version: 1.1.0, - }, - default: - { - username: "{{ osbs_koji_prod_username }}", - password: "{{ osbs_koji_prod_password }}", - koji_use_kerberos: True, - koji_kerberos_keytab: "FILE:/etc/krb5.osbs_{{osbs_url}}.keytab", - koji_kerberos_principal: "osbs/{{osbs_url}}@{{ipa_realm}}", - openshift_url: "https://{{ osbs_url }}/", - build_host: "{{ osbs_url }}", - koji_root: "http://{{ koji_root }}", - koji_hub: "https://koji.fedoraproject.org/kojihub", - sources_command: "fedpkg sources", - build_type: "prod", - verify_ssl: true, - use_auth: true, - builder_use_auth: true, - registry_api_versions: "v2", - builder_openshift_url: "https://{{osbs_url}}", - token_secrets: "x86-64-osbs:/var/run/secrets/atomic-reactor/x86-64-orchestrator", - token_file: "/etc/osbs/x86-64-osbs-koji", - namespace: "osbs-fedora", - can_orchestrate: true, - builder_odcs_url: "https://odcs{{ env_suffix }}.fedoraproject.org", - builder_odcs_openidc_secret: "odcs-oidc-secret", - builder_pdc_url: "https://pdc.fedoraproject.org/api/1", - reactor_config_map: "reactor-config-map", - reactor_config_map_scratch: "reactor-config-map-scratch", - build_from: "image:buildroot:latest", - }, - } - handlers: - - import_tasks: "{{ handlers_path }}/restart_services.yml" - - name: configure varnish cache hosts: buildvm-s390x-24.s390.fedoraproject.org:buildvm-s390x-01.stg.s390.fedoraproject.org:buildvm-s390x-14.s390.fedoraproject.org tags: diff --git a/playbooks/groups/koji-hub.yml b/playbooks/groups/koji-hub.yml index 5797232e1c..c739fbd4a5 100644 --- a/playbooks/groups/koji-hub.yml +++ b/playbooks/groups/koji-hub.yml @@ -21,7 +21,6 @@ pre_tasks: - include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README tags: always - - import_tasks: "{{ tasks_path }}/osbs_certs.yml" - import_tasks: "{{ tasks_path }}/yumrepos.yml" roles: diff --git a/playbooks/groups/osbs/configure-osbs.yml b/playbooks/groups/osbs/configure-osbs.yml deleted file mode 100644 index 8d7f37a6a2..0000000000 --- a/playbooks/groups/osbs/configure-osbs.yml +++ /dev/null @@ -1,3 +0,0 @@ -- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/setup-worker-namespace.yml" -- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/setup-orchestrator-namespace.yml" -- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/osbs-post-install.yml" diff --git a/playbooks/groups/osbs/deploy-cluster.yml b/playbooks/groups/osbs/deploy-cluster.yml deleted file mode 100644 index 2ef9bfdd9a..0000000000 --- a/playbooks/groups/osbs/deploy-cluster.yml +++ /dev/null @@ -1,334 +0,0 @@ -# create an osbs server -- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml" - vars: - myhosts: osbs_control -- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml" - vars: - myhosts: osbs_control_stg -- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml" - vars: - myhosts: osbs_nodes:osbs_masters -- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml" - vars: - myhosts: osbs_nodes_stg:osbs_masters_stg -- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml" - vars: - myhosts: osbs_aarch64_nodes_stg:osbs_aarch64_masters_stg:osbs_aarch64_nodes -- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml" - vars: - myhosts: osbs_aarch64_masters - -- name: make the box be real - hosts: osbs_control:osbs_masters:osbs_nodes:osbs_control_stg:osbs_masters_stg:osbs_nodes_stg:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:osbs_aarch64_masters:osbs_aarch64_nodes - tags: - - osbs-cluster-prereq - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - pre_tasks: - - include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README - - import_tasks: "{{ tasks_path }}/yumrepos.yml" - - roles: - - base - - rkhunter - - nagios_client - - hosts - - ipa/client - - sudo - - rsyncd - - tasks: - - name: put openshift repo on os- systems - template: src="{{ files }}/openshift/openshift.repo" dest="/etc/yum.repos.d/openshift.repo" - tags: - - config - - packages - - yumrepos - - name: install redhat ca file - package: - name: subscription-manager-rhsm-certificates - state: present - - import_tasks: "{{ tasks_path }}/motd.yml" - - handlers: - - import_tasks: "{{ handlers_path }}/restart_services.yml" - -- name: OSBS control hosts pre-req setup - hosts: osbs_control:osbs_control_stg - tags: - - osbs-cluster-prereq - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - tasks: - - name: deploy private key to control hosts - copy: - src: "{{private}}/files/osbs/{{env}}/control_key" - dest: "/root/.ssh/id_rsa" - owner: root - mode: 0600 - - - name: set ansible to use pipelining - ini_file: - dest: /etc/ansible/ansible.cfg - section: ssh_connection - option: pipelining - value: "True" - -- name: Setup cluster masters pre-reqs - hosts: osbs_masters_stg:osbs_masters:osbs_aarch64_masters_stg:osbs_aarch64_masters - tags: - - osbs-cluster-prereq - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - tasks: - - name: ensure origin conf dir exists - file: - path: "/etc/origin" - state: "directory" - - - name: create cert dir for openshift public facing REST API SSL - file: - path: "/etc/origin/master/named_certificates" - state: "directory" - - - name: install cert for openshift public facing REST API SSL - copy: - src: "{{private}}/files/osbs/{{env}}/osbs-internal.pem" - dest: "/etc/origin/master/named_certificates/{{osbs_url}}.pem" - - - name: install key for openshift public facing REST API SSL - copy: - src: "{{private}}/files/osbs/{{env}}/osbs-internal.key" - dest: "/etc/origin/master/named_certificates/{{osbs_url}}.key" - - - name: place htpasswd file - copy: - src: "{{private}}/files/httpd/osbs-{{env}}.htpasswd" - dest: /etc/origin/master/htpasswd - - -- name: Setup cluster hosts pre-reqs - hosts: osbs_masters_stg:osbs_nodes_stg:osbs_masters:osbs_nodes:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:osbs_aarch64_masters:osbs_aarch64_nodes - tags: - - osbs-cluster-prereq - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - handlers: - - name: restart NetworkManager - service: - name: NetworkManager - state: restarted - - tasks: - - name: Install necessary packages that openshift-ansible needs - package: - state: installed - name: - - tar - - rsync - - python3-dbus - - NetworkManager - - python3-libselinux - - python3-PyYAML - - - name: Deploy controller public ssh keys to osbs cluster hosts - authorized_key: - user: root - key: "{{ lookup('file', '{{private}}/files/osbs/{{env}}/control_key.pub') }}" - - - name: Create file for eth0 config - file: - path: "/etc/sysconfig/network-scripts/ifcfg-eth0" - state: touch - mode: 0644 - owner: root - group: root - - # This is required for OpenShift built-in SkyDNS inside the overlay network - # of the cluster - - name: ensure NM_CONTROLLED is set to "yes" for osbs cluster - lineinfile: - dest: "/etc/sysconfig/network-scripts/ifcfg-eth0" - line: "NM_CONTROLLED=yes" - notify: - - restart NetworkManager - - # This is required for OpenShift built-in SkyDNS inside the overlay network - # of the cluster - - name: ensure NetworkManager is enabled and started - service: - name: NetworkManager - state: started - enabled: yes - - - name: cron entry to clean up docker storage - copy: - src: "{{files}}/osbs/cleanup-docker-storage" - dest: "/etc/cron.d/cleanup-docker-storage" - - - name: copy docker-storage-setup config - copy: - src: "{{files}}/osbs/docker-storage-setup" - dest: "/etc/sysconfig/docker-storage-setup" - - - name: update ca certificates - command: 'update-ca-trust' - -- name: Deploy kerberose keytab to cluster hosts - hosts: osbs_masters_stg:osbs_nodes_stg:osbs_masters:osbs_nodes:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:osbs_aarch64_masters:osbs_aarch64_nodes - tags: - - osbs-cluster-prereq - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - roles: - - role: keytab/service - owner_user: root - owner_group: root - service: osbs - host: "osbs.fedoraproject.org" - when: env == "production" - - role: keytab/service - owner_user: root - owner_group: root - service: osbs - host: "osbs.stg.fedoraproject.org" - when: env == "staging" - -- name: Deploy OpenShift Cluster x86_64 - hosts: osbs_control:osbs_control_stg - tags: - - osbs-deploy-openshift - - osbs-x86-deploy-openshift - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - roles: - - role: ansible-ansible-openshift-ansible - cluster_inventory_filename: "{{ inventory_filename }}" - openshift_master_public_api_url: "https://{{ osbs_url }}:8443" - openshift_release: "v3.11" - openshift_version: "v3.11" - openshift_pkg_version: "-3.11*" - openshift_ansible_path: "/root/openshift-ansible" - openshift_ansible_pre_playbook: "playbooks/prerequisites.yml" - openshift_ansible_playbook: "playbooks/deploy_cluster.yml" - openshift_ansible_version: "openshift-ansible-3.11.51-1" - openshift_ansible_ssh_user: root - openshift_ansible_install_examples: false - openshift_ansible_containerized_deploy: false - openshift_cluster_masters_group: "{{ cluster_masters_group }}" - openshift_cluster_nodes_group: "{{ cluster_nodes_group }}" - openshift_cluster_infra_group: "{{ cluster_infra_group }}" - openshift_auth_profile: "osbs" - openshift_cluster_url: "{{osbs_url}}" - openshift_master_ha: false - openshift_debug_level: 2 - openshift_shared_infra: true - openshift_deployment_type: "openshift-enterprise" - openshift_ansible_use_crio: false - openshift_ansible_crio_only: false - tags: ['openshift-cluster-x86','ansible-ansible-openshift-ansible'] - -- name: Deploy OpenShift Cluster aarch64 - hosts: osbs_control:osbs_control_stg - tags: - - osbs-deploy-openshift - - osbs-aarch-deploy-openshift - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - roles: - - role: ansible-ansible-openshift-ansible - cluster_inventory_filename: "{{ inventory_filename }}" - openshift_htpasswd_file: "/etc/origin/htpasswd" - openshift_master_public_api_url: "https://{{ osbs_url }}:8443" - openshift_release: "v3.11" - openshift_version: "v3.11" - openshift_pkg_version: "-3.11.2" - openshift_ansible_path: "/root/openshift-ansible" - openshift_ansible_pre_playbook: "playbooks/prerequisites.yml" - openshift_ansible_playbook: "playbooks/deploy_cluster.yml" - openshift_ansible_version: "openshift-ansible-3.11.51-1" - openshift_ansible_ssh_user: root - openshift_ansible_install_examples: false - openshift_ansible_containerized_deploy: false - openshift_cluster_masters_group: "{{ aarch_masters_group }}" - openshift_cluster_nodes_group: "{{ aarch_nodes_group }}" - openshift_cluster_infra_group: "{{ aarch_infra_group }}" - openshift_auth_profile: "osbs" - openshift_cluster_url: "{{osbs_url}}" - openshift_master_ha: false - openshift_debug_level: 2 - openshift_shared_infra: true - openshift_deployment_type: "origin" - openshift_ansible_python_interpreter: "/usr/bin/python3" - openshift_ansible_use_crio: false - openshift_ansible_crio_only: false - openshift_arch: "aarch64" - tags: ['openshift-cluster-aarch','ansible-ansible-openshift-ansible'] - -- name: Setup OSBS requirements for OpenShift cluster hosts - hosts: osbs_masters_stg:osbs_nodes_stg:osbs_masters:osbs_nodes - tags: - - osbs-cluster-req - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - tasks: - - name: Ensures /etc/dnsmasq.d/ dir exists - file: path="/etc/dnsmasq.d/" state=directory - - name: install fedora dnsmasq specific top-config - copy: - src: "{{files}}/osbs/fedora-dnsmasq-master.conf.{{env}}" - dest: "/etc/dnsmasq.conf" - when: - is_fedora is defined or (ansible_distribution_major_version|int > 8 and ansible_distribution == 'RedHat') - - name: install fedora dnsmasq specific sub-config - copy: - src: "{{files}}/osbs/fedora-dnsmasq.conf.{{env}}" - dest: "/etc/dnsmasq.d/fedora-dns.conf" diff --git a/playbooks/groups/osbs/osbs-post-install.yml b/playbooks/groups/osbs/osbs-post-install.yml deleted file mode 100644 index ba6fd7e670..0000000000 --- a/playbooks/groups/osbs/osbs-post-install.yml +++ /dev/null @@ -1,222 +0,0 @@ -- name: post-install master host osbs tasks - hosts: osbs_masters_stg:osbs_masters:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0] - tags: - - osbs-post-install - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - /srv/private/ansible/vars.yml - - /srv/private/ansible/files/openstack/passwords.yml - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - vars: - osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig - osbs_environment: - KUBECONFIG: "{{ osbs_kubeconfig_path }}" - - tasks: - - name: cron entry to clean up old builds - copy: - src: "{{files}}/osbs/cleanup-old-osbs-builds" - dest: "/etc/cron.d/cleanup-old-osbs-builds" - -- name: post-install osbs control tasks - hosts: osbs_control - tags: osbs-post-install - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - /srv/private/ansible/vars.yml - - /srv/private/ansible/files/openstack/passwords.yml - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - tasks: - - name: enable nrpe for monitoring (noc01) - iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.3.163.10 state=present jump=ACCEPT - tags: - - iptables - - -- name: post-install node host osbs tasks - hosts: osbs_masters:osbs_masters_stg:osbs_aarch64_masters:osbs_nodes_stg:osbs_nodes:osbs_aarch64_nodes_stg:osbs_aarch64_nodes - tags: - - osbs-post-install - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - /srv/private/ansible/vars.yml - - /srv/private/ansible/files/openstack/passwords.yml - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - vars: - osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig - osbs_environment: - KUBECONFIG: "{{ osbs_kubeconfig_path }}" - - - handlers: - - name: Remove the previous buildroot image - docker_image: - state: absent - name: buildroot - - - name: Build the new buildroot container - docker_image: - path: /etc/osbs/buildroot/ - name: buildroot - nocache: yes - - - name: restart and reload docker service - systemd: - name: docker - state: restarted - daemon_reload: yes - - tasks: - - name: enable nrpe for monitoring (noc01) - iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.3.163.10 state=present jump=ACCEPT - tags: - - iptables - - - name: copy docker iptables script - copy: - src: "{{files}}/osbs/fix-docker-iptables.{{ env }}" - dest: /usr/local/bin/fix-docker-iptables - mode: 0755 - tags: - - iptables - notify: - - restart and reload docker service - - - name: copy docker custom service config - copy: - src: "{{files}}/osbs/docker.firewall.service" - dest: /etc/systemd/system/docker.service.d/firewall.conf - tags: - - docker - notify: - - restart and reload docker service - - - name: copy the osbs customization file - copy: - src: "{{item}}" - dest: "/etc/osbs/buildroot/" - owner: root - mode: 0600 - with_items: - - "{{files}}/osbs/worker_customize.json" - - "{{files}}/osbs/orchestrator_customize.json" - - - name: Create buildroot container conf directory - file: - path: "/etc/osbs/buildroot/" - state: directory - - - name: Upload Dockerfile for buildroot container - template: - src: "{{ files }}/osbs/buildroot-Dockerfile-{{env}}.j2" - dest: "/etc/osbs/buildroot/Dockerfile" - mode: 0400 - notify: - - Remove the previous buildroot image - - Build the new buildroot container - - - name: Upload krb5.conf for buildroot container - template: - src: "{{ roles_path }}/base/templates/krb5.conf.j2" - dest: "/etc/osbs/buildroot/krb5.conf" - mode: 0644 - notify: - - Remove the previous buildroot image - - Build the new buildroot container - - - name: Upload internal CA for buildroot - copy: - src: "{{private}}/files/osbs/{{env}}/osbs-internal.pem" - dest: "/etc/osbs/buildroot/ca.crt" - mode: 0400 - notify: - - Remove the previous buildroot image - - Build the new buildroot container - - - name: stat infra repofile - stat: - path: "/etc/yum.repos.d/infra-tags.repo" - register: infra_repo_stat - - - name: stat /etc/osbs/buildroot/ infra repofile - stat: - path: "/etc/osbs/buildroot/infra-tags.repo" - register: etcosbs_infra_repo_stat - - - name: remove old /etc/osbs/buildroot/ infra repofile - file: - path: "/etc/osbs/buildroot/infra-tags.repo" - state: absent - when: etcosbs_infra_repo_stat.stat.exists and infra_repo_stat.stat.checksum != etcosbs_infra_repo_stat.stat.checksum - - - name: Copy repofile for buildroot container (because Docker) - copy: - src: "/etc/yum.repos.d/infra-tags.repo" - dest: "/etc/osbs/buildroot/infra-tags.repo" - remote_src: true - notify: - - Remove the previous buildroot image - - Build the new buildroot container - when: etcosbs_infra_repo_stat.stat.exists == false - - - name: stat /etc/ keytab - stat: - path: "/etc/krb5.osbs_{{osbs_url}}.keytab" - register: etc_kt_stat - - - name: stat /etc/osbs/buildroot/ keytab - stat: - path: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab" - register: etcosbs_kt_stat - - - name: remove old hardlink to /etc/osbs/buildroot/ keytab - file: - path: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab" - state: absent - when: etcosbs_kt_stat.stat.exists and etc_kt_stat.stat.checksum != etcosbs_kt_stat.stat.checksum - - - name: Hardlink keytab for buildroot container (because Docker) - file: - src: "/etc/krb5.osbs_{{osbs_url}}.keytab" - dest: "/etc/osbs/buildroot/krb5.osbs_{{osbs_url}}.keytab" - state: hard - notify: - - Remove the previous buildroot image - - Build the new buildroot container - - when: etcosbs_kt_stat.stat.exists == false - - - name: pull fedora required docker images - command: "docker pull registry.fedoraproject.org/fedora:latest" - register: docker_pull_fedora - changed_when: "'Downloaded newer image' in docker_pull_fedora.stdout" - - - name: enable nrpe for monitoring (noc01) - iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.3.163.10 state=present jump=ACCEPT - - - name: make directory for cni config - file: - path: /etc/cni/net.d/ - state: directory - - - name: Add cni config - copy: - dest: /etc/cni/net.d/80-openshift-network.conf - content: | - { - "cniVersion": "0.2.0", - "name": "openshift-sdn", - "type": "openshift-sdn" - } - - - name: Set ulimit for docker - copy: - dest: /etc/systemd/system/docker.service.d/override.conf - content: | - - [Service] - LimitNOFILE=1048576 - notify: restart and reload docker service - - - diff --git a/playbooks/groups/osbs/rebuild-osbs-buildroot.yml b/playbooks/groups/osbs/rebuild-osbs-buildroot.yml deleted file mode 100644 index c287f7972b..0000000000 --- a/playbooks/groups/osbs/rebuild-osbs-buildroot.yml +++ /dev/null @@ -1,15 +0,0 @@ -# This playbook can be used to update to rebuild the buildroot image of -# OSBS. This is useful when we want to update some dependencies in the image. - -- name: rebuild the osbs buildroot image. - hosts: osbs_nodes:osbs_nodes_stg:osbs_aarch64_nodes_stg:osbs_aarch64_nodes - gather_facts: false - user: root - - tasks: - - - name: Backup the current buildroot - command: "docker tag buildroot:latest buildroot:backup" - - - name: rebuild the buildroot container image. - command: "docker build /etc/osbs/buildroot -t buildroot --no-cache --pull" diff --git a/playbooks/groups/osbs/setup-orchestrator-namespace.yml b/playbooks/groups/osbs/setup-orchestrator-namespace.yml deleted file mode 100644 index 5945ed5b89..0000000000 --- a/playbooks/groups/osbs/setup-orchestrator-namespace.yml +++ /dev/null @@ -1,168 +0,0 @@ -- name: Create orchestrator namespace - hosts: osbs_masters_stg[0]:osbs_masters[0] - roles: - - role: osbs-namespace - osbs_orchestrator: true - osbs_worker_clusters: "{{ osbs_conf_worker_clusters }}" - osbs_cpu_limitrange: "{{ osbs_orchestrator_cpu_limitrange }}" - osbs_nodeselector: "{{ osbs_orchestrator_default_nodeselector|default('') }}" - osbs_sources_command: "{{ osbs_conf_sources_command }}" - osbs_readwrite_users: "{{ osbs_conf_readwrite_users }}" - osbs_service_accounts: "{{ osbs_conf_service_accounts }}" - koji_use_kerberos: true - koji_kerberos_keytab: "FILE:/etc/krb5.osbs_{{ osbs_url }}.keytab" - koji_kerberos_principal: "osbs/{{osbs_url}}@{{ ipa_realm }}" - tags: - - osbs-orchestrator-namespace - -- name: setup reactor config secret in orchestrator namespace - hosts: osbs_masters_stg[0]:osbs_masters[0] - roles: - - role: osbs-secret - osbs_secret_name: reactor-config-secret - osbs_secret_files: - - source: "/tmp/{{ osbs_namespace }}-{{ env }}-reactor-config-secret.yml" - dest: config.yaml - tags: - - osbs-orchestrator-namespace - -- name: setup client config secret in orchestrator namespace - hosts: osbs_masters_stg[0]:osbs_masters[0] - roles: - - role: osbs-secret - osbs_secret_name: client-config-secret - osbs_secret_files: - - source: "/tmp/{{ osbs_namespace }}-{{ env }}-client-config-secret.conf" - dest: osbs.conf - tags: - - osbs-orchestrator-namespace - -- name: setup ODCS secret in orchestrator namespace - hosts: osbs_masters_stg[0]:osbs_masters[0] - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - roles: - - role: osbs-secret - osbs_secret_name: odcs-oidc-secret - osbs_secret_files: - - source: "{{ private }}/files/osbs/{{ env }}/odcs-oidc-token" - dest: token - tags: - - osbs-orchestrator-namespace - -- name: Save orchestrator token x86_64 - hosts: osbs_masters_stg[0]:osbs_masters[0] - tasks: - - name: get orchestrator service account token - command: "oc -n {{ osbs_worker_namespace }} sa get-token orchestrator" - register: orchestator_token_x86_64 - - name: save the token locally - local_action: > - copy - content="{{ orchestator_token_x86_64.stdout }}" - dest=/tmp/.orchestator-token-x86_64 - mode=0400 - tags: - - osbs-orchestrator-namespace - -- name: setup orchestrator token for x86_64-osbs - hosts: osbs_masters_stg[0]:osbs_masters[0] - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - roles: - - role: osbs-secret - osbs_secret_name: x86-64-orchestrator - osbs_secret_files: - - source: "/tmp/.orchestator-token-x86_64" - dest: token - - post_tasks: - - name: Delete the temporary secret file - local_action: > - file - state=absent - path="/tmp/.orchestator-token-x86_64" - tags: - - osbs-orchestrator-namespace - -- name: Save orchestrator token aarch64 - hosts: osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0] - tasks: - - name: get orchestrator service account token - command: "oc -n {{ osbs_worker_namespace }} sa get-token orchestrator" - register: orchestator_token_aarch64 - - name: save the token locally - local_action: > - copy - content="{{ orchestator_token_aarch64.stdout }}" - dest=/tmp/.orchestator-token-aarch64 - mode=0400 - tags: - - osbs-orchestrator-namespace - -- name: setup orchestrator token for aarch64-osbs - hosts: osbs_masters_stg[0]:osbs_masters[0] - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - roles: - - role: osbs-secret - osbs_secret_can_fail: true - osbs_secret_name: aarch64-orchestrator - osbs_secret_files: - - source: "/tmp/.orchestator-token-aarch64" - dest: token - - post_tasks: - - name: Delete the temporary secret file - local_action: > - file - state=absent - path="/tmp/.orchestator-token-aarch64" - - tags: - - osbs-orchestrator-namespace - -- name: Add dockercfg secret to allow registry push orchestrator - hosts: osbs_masters_stg[0]:osbs_masters[0] - tags: - - osbs-dockercfg-secret - user: root - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - pre_tasks: - - name: Create the username:password string needed by the template - set_fact: - auth_info_prod: "{{candidate_registry_osbs_prod_username}}:{{candidate_registry_osbs_prod_password}}" - auth_info_stg: "{{candidate_registry_osbs_stg_username}}:{{candidate_registry_osbs_stg_password}}" - - - name: Create the dockercfg secret file - local_action: > - template - src="{{ files }}/osbs/dockercfg-{{env}}-secret.j2" - dest="/tmp/.dockercfg{{ env }}" - mode=0400 - - roles: - - role: osbs-secret - osbs_secret_name: "v2-registry-dockercfg" - osbs_secret_type: kubernetes.io/dockercfg - osbs_secret_files: - - source: "/tmp/.dockercfg{{ env }}" - dest: .dockercfg - - post_tasks: - - name: Delete the temporary secret file - local_action: > - file - state=absent - path="/tmp/.dockercfg{{ env }}" diff --git a/playbooks/groups/osbs/setup-worker-namespace.yml b/playbooks/groups/osbs/setup-worker-namespace.yml deleted file mode 100644 index a3290a73d4..0000000000 --- a/playbooks/groups/osbs/setup-worker-namespace.yml +++ /dev/null @@ -1,78 +0,0 @@ -- name: Create worker namespace - hosts: osbs_masters_stg[0]:osbs_masters[0]:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0] - tags: - - osbs-worker-namespace - user: root - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - vars: - osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig - osbs_environment: - KUBECONFIG: "{{ osbs_kubeconfig_path }}" - - roles: - - role: osbs-namespace - osbs_namespace: "{{ osbs_worker_namespace }}" - osbs_service_accounts: "{{ osbs_worker_service_accounts }}" - osbs_nodeselector: "{{ osbs_worker_default_nodeselector|default('') }}" - osbs_sources_command: "{{ osbs_conf_sources_command }}" - -- name: setup ODCS secret in worker namespace - hosts: osbs_masters_stg[0]:osbs_masters[0]:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0] - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - roles: - - role: osbs-secret - osbs_namespace: "{{ osbs_worker_namespace }}" - osbs_secret_name: odcs-oidc-secret - osbs_secret_files: - - source: "{{ private }}/files/osbs/{{ env }}/odcs-oidc-token" - dest: token - tags: - - osbs-worker-namespace - -- name: Add dockercfg secret to allow registry push worker - hosts: osbs_masters_stg[0]:osbs_masters[0]:osbs_aarch64_masters_stg[0]:osbs_aarch64_masters[0] - tags: - - osbs-dockercfg-secret - - osbs-worker-namespace - user: root - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - pre_tasks: - - name: Create the username:password string needed by the template - set_fact: - auth_info_prod: "{{candidate_registry_osbs_prod_username}}:{{candidate_registry_osbs_prod_password}}" - auth_info_stg: "{{candidate_registry_osbs_stg_username}}:{{candidate_registry_osbs_stg_password}}" - - - name: Create the dockercfg secret file - local_action: > - template - src="{{ files }}/osbs/dockercfg-{{env}}-secret.j2" - dest="/tmp/.dockercfg{{ env }}" - mode=0400 - - roles: - - role: osbs-secret - osbs_namespace: "{{ osbs_worker_namespace }}" - osbs_secret_name: "v2-registry-dockercfg" - osbs_secret_type: kubernetes.io/dockercfg - osbs_secret_files: - - source: "/tmp/.dockercfg{{ env }}" - dest: .dockercfg - - post_tasks: - - name: Delete the temporary secret file - local_action: > - file - state=absent - path="/tmp/.dockercfg{{ env }}" diff --git a/playbooks/include/proxies-reverseproxy.yml b/playbooks/include/proxies-reverseproxy.yml index 32dfd8908c..98dc085bcd 100644 --- a/playbooks/include/proxies-reverseproxy.yml +++ b/playbooks/include/proxies-reverseproxy.yml @@ -613,11 +613,6 @@ header_scheme: true keephost: true - - role: httpd/reverseproxy - website: osbs.fedoraproject.org - destname: osbs - proxyurl: http://localhost:10047 - - role: httpd/reverseproxy website: registry.fedoraproject.org destname: registry-fedora diff --git a/playbooks/include/proxies-websites.yml b/playbooks/include/proxies-websites.yml index a862c30d38..51ae2afc81 100644 --- a/playbooks/include/proxies-websites.yml +++ b/playbooks/include/proxies-websites.yml @@ -660,12 +660,6 @@ tags: - fedoraloveskde - - role: httpd/website - site_name: osbs.fedoraproject.org - server_aliases: [osbs.stg.fedoraproject.org] - sslonly: true - cert_name: "{{wildcard_cert_name}}" - - role: httpd/website site_name: "provision{{ env_suffix }}.fedoraproject.org" # Zezere needs non-HTTPS for netboot diff --git a/playbooks/manual/noggin-deployment/uninstall_ipa_client.yml b/playbooks/manual/noggin-deployment/uninstall_ipa_client.yml index b3bb0e80aa..a722eb178d 100644 --- a/playbooks/manual/noggin-deployment/uninstall_ipa_client.yml +++ b/playbooks/manual/noggin-deployment/uninstall_ipa_client.yml @@ -1,5 +1,5 @@ - name: Uninstall IPA client - hosts: bodhi_backend_stg:bugzilla2fedmsg_stg:github2fedmsg_stg:ipsilon_stg:mbs_stg:osbs_control_stg:osbs_masters_stg:osbs_nodes_stg:osbs_aarch64_masters_stg:osbs_aarch64_nodes_stg:buildvm_stg:buildvm_ppc64le_stg:buildvm_aarch64_stg:buildvm_armv7_stg:buildvm_s390x_stg + hosts: bodhi_backend_stg:bugzilla2fedmsg_stg:github2fedmsg_stg:ipsilon_stg:mbs_stg:buildvm_stg:buildvm_ppc64le_stg:buildvm_aarch64_stg:buildvm_armv7_stg:buildvm_s390x_stg user: root vars_files: - /srv/web/infra/ansible/vars/global.yml @@ -16,7 +16,6 @@ - import_playbook: "/srv/web/infra/ansible/playbooks/groups/github2fedmsg.yml" - import_playbook: "/srv/web/infra/ansible/playbooks/groups/ipsilon.yml" - import_playbook: "/srv/web/infra/ansible/playbooks/groups/mbs.yml" -- import_playbook: "/srv/web/infra/ansible/playbooks/groups/osbs/deploy-cluster.yml" - import_playbook: "/srv/web/infra/ansible/playbooks/groups/buildvm.yml" diff --git a/roles/ansible-ansible-openshift-ansible/.travis.yml b/roles/ansible-ansible-openshift-ansible/.travis.yml deleted file mode 100644 index 36bbf6208c..0000000000 --- a/roles/ansible-ansible-openshift-ansible/.travis.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -language: python -python: "2.7" - -# Use the new container infrastructure -sudo: false - -# Install ansible -addons: - apt: - packages: - - python-pip - -install: - # Install ansible - - pip install ansible - - # Check ansible version - - ansible --version - - # Create ansible.cfg with correct roles_path - - printf '[defaults]\nroles_path=../' >ansible.cfg - -script: - # Basic role syntax check - - ansible-playbook tests/test.yml -i tests/inventory --syntax-check - -notifications: - webhooks: https://galaxy.ansible.com/api/v1/notifications/ \ No newline at end of file diff --git a/roles/ansible-ansible-openshift-ansible/README.rst b/roles/ansible-ansible-openshift-ansible/README.rst deleted file mode 100644 index b06f1efee5..0000000000 --- a/roles/ansible-ansible-openshift-ansible/README.rst +++ /dev/null @@ -1,54 +0,0 @@ -ansible-ansible-openshift-ansible -################################# - -Ansible role to run ansible on a remote "openshift control" what will run -`openshift-ansible`_ to deploy a cluster. - -This is a Fedora Infrastructure specific adaptation into a role of the original -prototype located in pagure: - - https://pagure.io/ansible-ansible-openshift-ansible/tree/master - -What? Why? ----------- - -The `openshift-ansible`_ playbooks require that various tasks be run on -``localhost`` in order to build their internal abstracted representation of the -inventory list. Running potentially arbitrary code from external sources on a -bastion host (which is what ``localhost`` would be as the ansible control -machine) is often frowned upon. The goal here is to allow for the deployment of -`openshift-ansible`_ via an intermediate host. - -.. note:: - There is a requirement to setup the SSH keys such that the bastion host - can passwordless ssh into the openshift control host and such that the - openshift control host can passwordless ssh into each of the hosts in - the openshift cluster. This is outside the scope of this document. - - -:: - - +---------------+ +-------------------+ - | | | | - | bastion host +----[ansible]----->| openshift control | - | | | | - +---------------+ +---------+---------+ - | - | - [ansible] - | - | - V - +--------------------------------------------------------------------------+ - | | - | openshift cluster | - | | - | +-----------+ +-----------+ +-----------+ | - | | | | | | | | - | | openshift | ...[masters] | openshift | | openshift | ...[nodes] | - | | master | | node | | node | | - | | | | | | | | - | +-----------+ +-----------+ +-----------+ | - | | - +--------------------------------------------------------------------------+ - diff --git a/roles/ansible-ansible-openshift-ansible/defaults/main.yml b/roles/ansible-ansible-openshift-ansible/defaults/main.yml deleted file mode 100644 index f425035e9c..0000000000 --- a/roles/ansible-ansible-openshift-ansible/defaults/main.yml +++ /dev/null @@ -1,87 +0,0 @@ ---- -# defaults file for ansible-ansible-openshift-ansible -# -# -# - -# Auth Profile -# These are Fedora Infra specific auth profiles -# -# Acceptable values: -# osbs - this will configure htpasswd for use with osbs -# fedoraidp - configure for fedora idp -# fedoraidp-stg - configure for fedora idp staging env -openshift_auth_profile: osbs - -# Do we want OpenShift itself to be containerized? -# This is a requirement if using Atomic Host -# -# As of v3.5.x this would mean that all our systems would completely go down -# in the event the docker daemon were to restart or crash. -# -# In the future (as of v3.6 devel branch), this is done with system containers -# and won't be bound to the docker daemon. -openshift_ansible_containerized_deploy: false - -# This will co-host the infra nodes with the primary nodes -openshift_shared_infra: false - -# OpenShift Cluster URL -# Example: openshift.fedoraproject.org -openshift_cluster_url: None - -# OpenShift Console and API listening ports -# These default to 8443 in openshift-ansible -openshift_api_port: 8443 -openshift_console_port: 8443 - -# OpenShift Applications Ingress subdomain (OpenShift routes) -openshift_app_subdomain: None - -# Setup native OpenShift Master High Availability (true or false) -openshift_master_ha: false - -# Destination file name for template-generated cluster inventory -cluster_inventory_filename: "cluster-inventory" - -# Ansible user for use with openshift-ansible playbooks -openshift_ansible_ssh_user: root - -# OpenShift Debug level (Default is 2 upstream) -openshift_debug_level: 2 - -# Release required as per the openshift-ansible -openshift_release: "v1.5.0" - -# OpenShift Deployment Type -# Possible options: -# origin -# openshift-enterprise -deployment_type: origin - -# Install the OpenShift App Examples (value should be "true" or "false") -openshift_ansible_install_examples: false - -# Path to clone the openshift-ansible git repo into -openshift_ansible_path: "/root/openshift-ansible" - -# Relative path inside the openshift-ansible git repo of the playbook to execute -# remotely -openshift_ansible_playbook: "playbooks/byo/config.yml" - -# openshift-ansible version tag, this is the git tag of the "release" of the -# openshift-ansible git repo. We need to track OpenShift v1.x to -# openshift-ansible-3.x.y-1 as that's the release/tag standard upstream. -openshift_ansible_version: "openshift-ansible-3.2.35-1" - -# The group names assigned to these variables are used to create the "effective" -# inventory (via a template) that is used to deploy the OpenShift Cluster via -# openshift-ansible (https://github.com/openshift/openshift-ansible). The values -# assigned here must match group names in the current running inventory or the -# remote effective inventory that actually deploys the OpenShift Cluster will be -# empty causing undesired effects. -openshift_cluster_masters_group: "openshift-cluster-masters" -openshift_cluster_nodes_group: "openshift-cluster-nodes" -openshift_cluster_infra_group: "openshift-cluster-nodes" - -openshift_arch: "x86_64" diff --git a/roles/ansible-ansible-openshift-ansible/tasks/main.yml b/roles/ansible-ansible-openshift-ansible/tasks/main.yml deleted file mode 100644 index c6c1bf214a..0000000000 --- a/roles/ansible-ansible-openshift-ansible/tasks/main.yml +++ /dev/null @@ -1,81 +0,0 @@ ---- -# tasks file for ansible-ansible-openshift-ansible -# - -- name: Install required packages - package: name="{{ item }}" state=present - with_items: - - ansible - - git - - pyOpenSSL - - ca-certificates - tags: - - ansible-ansible-openshift-ansible - - ansible-ansible-openshift-ansible-config - -- name: git clone the openshift-ansible repo - git: - repo: "https://github.com/openshift/openshift-ansible.git" - dest: "{{ openshift_ansible_path }}" - version: "{{ openshift_ansible_version }}" - tags: - - ansible-ansible-openshift-ansible - - ansible-ansible-openshift-ansible-config - ignore_errors: true - -- name: generate the inventory file (staging) - template: - src: "cluster-inventory-stg.j2" - dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}" - tags: - - ansible-ansible-openshift-ansible - - ansible-ansible-openshift-ansible-config - when: env == 'staging' and inventory_hostname.startswith('os-') - -- name: generate the inventory file (production) (iad2) - template: - src: "cluster-inventory-iad2-prod.j2" - dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}" - tags: - - ansible-ansible-openshift-ansible - - ansible-ansible-openshift-ansible-config - when: env == 'production' and inventory_hostname.startswith('os-') and datacenter == 'iad2' - -- name: generate the inventory file (osbs) - template: - src: "cluster-inventory-osbs.j2" - dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}" - tags: - - ansible-ansible-openshift-ansible - - ansible-ansible-openshift-ansible-config - when: inventory_hostname.startswith('osbs') - -- name: run ansible prereqs playbook - shell: "ansible-playbook {{ openshift_ansible_pre_playbook }} -i {{ cluster_inventory_filename }}" - args: - chdir: "{{ openshift_ansible_path }}" - register: run_ansible_out - when: openshift_ansible_pre_playbook is defined and openshift_ansible_upgrading is defined - tags: - - ansible-ansible-openshift-ansible - -- name: run ansible - shell: "ansible-playbook {{ openshift_ansible_playbook }} -i {{ cluster_inventory_filename }}" - args: - chdir: "{{ openshift_ansible_path }}" - register: run_ansible_out - tags: - - ansible-ansible-openshift-ansible - when: openshift_ansible_upgrading is defined - -- name: display run ansible stdout_lines - debug: - var: run_ansible_out.stdout_lines - tags: - - ansible-ansible-openshift-ansible - -- name: display run ansible stderr - debug: - var: run_ansible_out.stderr - tags: - - ansible-ansible-openshift-ansible diff --git a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-iad2-prod.j2 b/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-iad2-prod.j2 deleted file mode 100644 index a7c549519a..0000000000 --- a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-iad2-prod.j2 +++ /dev/null @@ -1,1176 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory that provides the -# minimum recommended configuration for production use. This includes 3 masters, -# two infra nodes, two compute nodes, and an haproxy load balancer to load -# balance traffic to the API servers. For a truly production environment you -# should use an external load balancing solution that itself is highly available. - -[masters] -{% for host in groups[openshift_cluster_masters_group] %} -{% if hostvars[host].datacenter == 'iad2' %} -{{ host }} -{% endif %} -{% endfor %} - -[etcd] -{% for host in groups[openshift_cluster_masters_group] %} -{% if hostvars[host].datacenter == 'iad2' %} -{{ host }} -{% endif %} -{% endfor %} - -[nodes] -{% for host in groups[openshift_cluster_masters_group] %} -{% if hostvars[host].datacenter == 'iad2' %} -{{ host }} openshift_node_group_name='node-config-master' -{% endif %} -{% endfor %} -{% for host in groups[openshift_cluster_nodes_group] %} -{% if hostvars[host].datacenter == 'iad2' %} -{{ host }} openshift_node_group_name='node-config-compute' -{% endif %} -{% endfor %} -{% for host in groups['os_infra_nodes'] %} -{% if hostvars[host].datacenter == 'iad2' %} -{{ host }} openshift_node_group_name='node-config-infra' -{% endif %} -{% endfor %} - -#[nfs] -#ose3-master1.test.example.com - -#[lb] -#ose3-lb.test.example.com - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -#lb -#nfs - -[OSEv3:vars] - -openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}] -############################################################################### -# Common/ Required configuration variables follow # -############################################################################### -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_user={{openshift_ansible_ssh_user}} - -# If ansible_user is not root, ansible_become must be set to true and the -# user must be configured for passwordless sudo -#ansible_become=yes - -# Specify the deployment type. Valid values are origin and openshift-enterprise. -#openshift_deployment_type=origin -openshift_deployment_type={{openshift_deployment_type}} - -# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we -# rely on the version running on the first master. Works best for containerized installs where we can usually -# use this to lookup the latest exact version of the container images, which is the tag actually used to configure -# the cluster. For RPM installations we just verify the version detected in your configured repos matches this -# release. -openshift_release={{openshift_release}} - -{% if openshift_master_ha is defined %} -{% if openshift_master_ha %} -# Native high availability cluster method with optional load balancer. -# If no lb group is defined, the installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -openshift_master_cluster_method=native -openshift_master_cluster_hostname={{openshift_internal_cluster_url}} -openshift_master_cluster_public_hostname={{openshift_cluster_url}} -{% endif %} -{% endif %} - -# default subdomain to use for exposed routes, you should have wildcard dns -# for *.apps.test.example.com that points at your infra nodes which will run -# your router -{% if openshift_app_subdomain is defined %} -openshift_master_default_subdomain={{openshift_app_subdomain}} -{% endif %} - -############################################################################### -# Additional configuration variables follow # -############################################################################### - -# Debug level for all OpenShift components (Defaults to 2) -debug_level={{openshift_debug_level}} - -# Specify an exact container image tag to install or configure. -# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.10.0 -openshift_image_tag="v3.11" - -# Specify an exact rpm version to install or configure. -# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.10.0 -openshift_pkg_version="-3.11.216" - -# If using Atomic Host, you may specify system container image registry for the nodes: -#system_images_registry="docker.io" -# when openshift_deployment_type=='openshift-enterprise' -#system_images_registry="registry.access.redhat.com" - -# Manage openshift example imagestreams and templates during install and upgrade -#openshift_install_examples=true -{% if openshift_ansible_install_examples is defined %} -openshift_install_examples={{openshift_ansible_install_examples}} -{% endif %} - -# Configure logoutURL in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url -#openshift_master_logout_url=http://example.com - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_oauth_templates={'login': '/path/to/login-template.html'} -# openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead. -#openshift_master_oauth_template=/path/to/login-template.html - -# Configure imagePolicyConfig in the master config -# See: https://docs.openshift.org/latest/admin_guide/image_policy.html -#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} - -# Configure master API rate limits for external clients -#openshift_master_external_ratelimit_qps=200 -#openshift_master_external_ratelimit_burst=400 -# Configure master API rate limits for loopback clients -#openshift_master_loopback_ratelimit_qps=300 -#openshift_master_loopback_ratelimit_burst=600 - -# Install and run cri-o. -#openshift_use_crio=False -#openshift_use_crio_only=False -{% if openshift_ansible_use_crio is defined %} -openshift_use_crio={{ openshift_ansible_use_crio }} -{% endif %} -{% if openshift_ansible_use_crio_only is defined %} -openshift_use_crio_only={{ openshift_ansible_crio_only }} -{% endif %} -# The following two variables are used when openshift_use_crio is True -# and cleans up after builds that pass through docker. When openshift_use_crio is True -# these variables are set to the defaults shown. You may override them here. -# NOTE: You will still need to tag crio nodes with your given label(s)! -# Enable docker garbage collection when using cri-o -#openshift_crio_enable_docker_gc=True -# Node Selectors to run the garbage collection -#openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'} - -# Items added, as is, to end of /etc/sysconfig/docker OPTIONS -# Default value: "--log-driver=journald" -#openshift_docker_options="-l warn --ipv6=false" - -# Specify exact version of Docker to configure or upgrade to. -# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. -# docker_version="1.12.1" - -# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. -# Uncomment below to disable; for example if your kernel does not support the -# Docker overlay/overlay2 storage drivers with SELinux enabled. -#openshift_docker_selinux_enabled=False - -# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. -# docker_upgrade=False - -# Specify a list of block devices to be formatted and mounted on the nodes -# during prerequisites.yml. For each hash, "device", "path", "filesystem" are -# required. To add devices only on certain classes of node, redefine -# container_runtime_extra_storage as a group var. -#container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]' - -# Enable etcd debug logging, defaults to false -# etcd_debug=true -# Set etcd log levels by package -# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" - -# Upgrade Hooks -# -# Hooks are available to run custom tasks at various points during a cluster -# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using -# absolute paths, if not the path will be treated as relative to the file where the -# hook is actually used. -# -# Tasks to run before each master is upgraded. -# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -# -# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible -# upgrade steps, but before we restart system/services. -# openshift_master_upgrade_hook=/usr/share/custom/master.yml -# -# Tasks to run after each master is upgraded and system/services have been restarted. -# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - -# Cluster Image Source (registry) configuration -# openshift-enterprise default is 'registry.access.redhat.com/openshift3/ose-${component}:${version}' -# origin default is 'docker.io/openshift/origin-${component}:${version}' -#oreg_url=example.com/openshift3/ose-${component}:${version} -# If oreg_url points to a registry other than registry.access.redhat.com we can -# modify image streams to point at that registry by setting the following to true -#openshift_examples_modify_imagestreams=true -# Add insecure and blocked registries to global docker configuration -#openshift_docker_insecure_registries=registry.example.com -#openshift_docker_blocked_registries=registry.hacker.com -# You may also configure additional default registries for docker, however this -# is discouraged. Instead you should make use of fully qualified image names. -#openshift_docker_additional_registries=registry.example.com - -# If oreg_url points to a registry requiring authentication, provide the following: -{% if datacenter != 'iad2' %} -oreg_auth_user="{{ os_prod_registry_user }}" -oreg_auth_password="{{ os_prod_registry_password }}" -{% else %} -oreg_auth_user="{{ os_prod_iad2_registry_user }}" -oreg_auth_password="{{ os_prod_iad2_registry_password }}" -{% endif %} -# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. -# oreg_auth_pass should be generated from running docker login. -# To update registry auth credentials, uncomment the following: -#oreg_auth_credentials_replace=True - -# OpenShift repository configuration -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] -#openshift_repos_enable_testing=false - -# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in -# a disconnected and containerized installation, use osm_etcd_image to specify the image to use: -#osm_etcd_image=rhel7/etcd - -# htpasswd auth -#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] -# Defining htpasswd users -#openshift_master_htpasswd_users={'user1': '', 'user2': ''} -# or -#openshift_master_htpasswd_file= - -{% if openshift_auth_profile == "osbs" %} -openshift_master_manage_htpasswd=false -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift_htpasswd_file }}'}] -{% endif %} - -{% if openshift_auth_profile == "fedoraidp" %} -openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] -{% endif %} - -{% if openshift_auth_profile == "fedoraidp-stg" %} -openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] -{% endif %} - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] -# -# Configure LDAP CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the LDAPPasswordIdentityProvider. -# -#openshift_master_ldap_ca= -# or -#openshift_master_ldap_ca_file= - -# OpenID auth -#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] -# -# Configure OpenID CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the OpenIDIdentityProvider. -# -#openshift_master_openid_ca= -# or -#openshift_master_openid_ca_file= - -# Request header auth -#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] -# -# Configure request header CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "clientCA" -# key set within the RequestHeaderIdentityProvider. -# -#openshift_master_request_header_ca= -# or -#openshift_master_request_header_ca_file= - -# CloudForms Management Engine (ManageIQ) App Install -# -# Enables installation of MIQ server. Recommended for dedicated -# clusters only. See roles/openshift_management/README.md for instructions -# and requirements. -#openshift_management_install_management=False - -# Cloud Provider Configuration -# -# Note: You may make use of environment variables rather than store -# sensitive configuration within the ansible inventory. -# For example: -#openshift_cloudprovider_aws_access_key="{ lookup('env','AWS_ACCESS_KEY_ID') }" -#openshift_cloudprovider_aws_secret_key="{ lookup('env','AWS_SECRET_ACCESS_KEY') }" -# -# AWS -#openshift_cloudprovider_kind=aws -# Note: IAM profiles may be used instead of storing API credentials on disk. -#openshift_cloudprovider_aws_access_key=aws_access_key_id -#openshift_cloudprovider_aws_secret_key=aws_secret_access_key -# -# Openstack -#openshift_cloudprovider_kind=openstack -#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ -#openshift_cloudprovider_openstack_username=username -#openshift_cloudprovider_openstack_password=password -#openshift_cloudprovider_openstack_domain_id=domain_id -#openshift_cloudprovider_openstack_domain_name=domain_name -#openshift_cloudprovider_openstack_tenant_id=tenant_id -#openshift_cloudprovider_openstack_tenant_name=tenant_name -#openshift_cloudprovider_openstack_region=region -#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id -# -# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting -#openshift_cloudprovider_openstack_blockstorage_version=v2 -# -# GCE -#openshift_cloudprovider_kind=gce -# Note: When using GCE, openshift_gcp_project and openshift_gcp_prefix must be -# defined. -# openshift_gcp_project is the project-id -#openshift_gcp_project= -# openshift_gcp_prefix is a unique string to identify each openshift cluster. -#openshift_gcp_prefix= -#openshift_gcp_multizone=False -# Note: To enable nested virtualization in gcp use the following variable and url -#openshift_gcp_licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" -# Additional details regarding nested virtualization are available: -# https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances -# -# vSphere -#openshift_cloudprovider_kind=vsphere -#openshift_cloudprovider_vsphere_username=username -#openshift_cloudprovider_vsphere_password=password -#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host -#openshift_cloudprovider_vsphere_datacenter=datacenter -#openshift_cloudprovider_vsphere_datastore=datastore -#openshift_cloudprovider_vsphere_folder=optional_folder_name - - -# Project Configuration -#osm_project_request_message='' -#osm_project_request_template='' -#osm_mcs_allocator_range='s0:/2' -#osm_mcs_labels_per_project=5 -#osm_uid_allocator_range='1000000000-1999999999/10000' - -# Configure additional projects -#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} - -# Enable cockpit -#osm_use_cockpit=true -# -# Set cockpit plugins -#osm_cockpit_plugins=['cockpit-kubernetes'] - -# If an external load balancer is used public hostname should resolve to -# external load balancer address -#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com - -# Configure controller arguments -#osm_controller_args={'resource-quota-sync-period': ['10s']} - -# Configure api server arguments -#osm_api_server_args={'max-requests-inflight': ['400']} - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# Override the default pod eviction timeout -#openshift_master_pod_eviction_timeout=5m - -# Override the default oauth tokenConfig settings: -# openshift_master_access_token_max_seconds=86400 -# openshift_master_auth_token_max_seconds=500 - -# Override master servingInfo.maxRequestsInFlight -#openshift_master_max_requests_inflight=500 - -# Override master and node servingInfo.minTLSVersion and .cipherSuites -# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 -# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants -#openshift_master_min_tls_version=VersionTLS12 -#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] -# -#openshift_node_min_tls_version=VersionTLS12 -#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] - -# OpenShift Router Options -# -# An OpenShift router will be created during install if there are -# nodes present with labels matching the default router selector, -# "node-role.kubernetes.io/infra=true". -# -# Example: -# [nodes] -# node.example.com openshift_node_group_name="node-config-infra" -# -# Router selector (optional) -# Router will only be created if nodes matching this label are present. -# Default value: 'node-role.kubernetes.io/infra=true' -#openshift_hosted_router_selector='node-role.kubernetes.io/infra=true' -# -# Router replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift router selector. -#openshift_hosted_router_replicas=2 -# -# Router force subdomain (optional) -# A router path format to force on all routes used by this router -# (will ignore the route host value) -#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' -# -# Router certificate (optional) -# Provide local certificate paths which will be configured as the -# router's default certificate. -#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} -# -# Manage the OpenShift Router (optional) -#openshift_hosted_manage_router=true -# -# Router sharding support has been added and can be achieved by supplying the correct -# data to the inventory. The variable to house the data is openshift_hosted_routers -# and is in the form of a list. If no data is passed then a default router will be -# created. There are multiple combinations of router sharding. The one described -# below supports routers on separate nodes. -# -#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] - -# OpenShift Registry Console Options -# Override the console image prefix: -# origin default is "cockpit/", enterprise default is "openshift3/" -#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# origin default is "kubernetes", enterprise default is "registry-console" -#openshift_cockpit_deployer_basename=my-console -# Override image version, defaults to latest for origin, vX.Y product version for enterprise -#openshift_cockpit_deployer_version=1.4.1 - -# Openshift Registry Options -# -# An OpenShift registry will be created during install if there are -# nodes present with labels matching the default registry selector, -# "node-role.kubernetes.io/infra=true". -# -# Example: -# [nodes] -# node.example.com openshift_node_group_name="node-config-infra" -# -# Registry selector (optional) -# Registry will only be created if nodes matching this label are present. -# Default value: 'node-role.kubernetes.io/infra=true' -#openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true' -# -# Registry replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift registry selector. -#openshift_hosted_registry_replicas=2 -# -# Validity of the auto-generated certificate in days (optional) -#openshift_hosted_registry_cert_expire_days=730 -# -# Manage the OpenShift Registry (optional) -#openshift_hosted_manage_registry=true -# Manage the OpenShift Registry Console (optional) -#openshift_hosted_manage_registry_console=true -# -# Registry Storage Options -# -# NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=nfs.example.com -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -{% if env == "staging" %} -openshift_hosted_registry_storage_kind=nfs -openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -openshift_hosted_registry_storage_host=ntap-iad2-c02-fedora01-nfs01a.iad2.fedoraproject.org -openshift_hosted_registry_storage_nfs_directory=/ -openshift_hosted_registry_storage_volume_name=openshift-stg-registry -openshift_hosted_registry_storage_volume_size=10Gi -{% else %} -openshift_hosted_registry_storage_kind=nfs -openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -openshift_hosted_registry_storage_host=ntap-iad2-c02-fedora01-nfs01a.iad2.fedoraproject.org -openshift_hosted_registry_storage_nfs_directory=/ -openshift_hosted_registry_storage_volume_name=openshift-prod-registry -openshift_hosted_registry_storage_volume_size=10Gi -{% endif %} -# -# Openstack -# Volume must already exist. -#openshift_hosted_registry_storage_kind=openstack -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem=ext4 -#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 -#openshift_hosted_registry_storage_volume_size=10Gi -# -# hostPath (local filesystem storage) -# Suitable for "all-in-one" or proof of concept deployments -# Must not be used for high-availability and production deployments -#openshift_hosted_registry_storage_kind=hostpath -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_hostpath_path=/var/lib/openshift_volumes -#openshift_hosted_registry_storage_volume_size=10Gi -# -# AWS S3 -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_encrypt=false -#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id -#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id -#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Any S3 service (Minio, ExoScale, ...): Basically the same as above -# but with regionendpoint configured -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=access_key_id -#openshift_hosted_registry_storage_s3_secretkey=secret_access_key -#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Additional CloudFront Options. When using CloudFront all three -# of the followingg variables must be defined. -#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ -#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem -#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid -# vSphere Volume with vSphere Cloud Provider -# openshift_hosted_registry_storage_kind=vsphere -# openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -# openshift_hosted_registry_storage_annotations=['volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume'] -# -# GCS Storage Bucket -#openshift_hosted_registry_storage_provider=gcs -#openshift_hosted_registry_storage_gcs_bucket=bucket01 -#openshift_hosted_registry_storage_gcs_keyfile=test.key -#openshift_hosted_registry_storage_gcs_rootdirectory=/registry - -# Metrics deployment -# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html -# -# By default metrics are not automatically deployed, set this to enable them -openshift_metrics_install_metrics=true -openshift_metrics_cassandra_storage_type=emptydir -openshift_metrics_start_cluster=true -openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"} -{% if openshift_metrics_deploy is defined %} -{% if openshift_metrics_deploy %} -openshift_hosted_metrics_deploy=true -{% endif %} -{% endif %} -# -# Storage Options -# If openshift_metrics_storage_kind is unset then metrics will be stored -# in an EmptyDir volume and will be deleted when the cassandra pod terminates. -# Storage options A & B currently support only one cassandra pod which is -# generally enough for up to 1000 pods. Additional volumes can be created -# manually after the fact and metrics scaled per the docs. -# -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_host=nfs.example.com -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_metrics_storage_kind=dynamic -# -# Other Metrics Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_metrics/README.md -# -# Override metricsPublicURL in the master config for cluster metrics -# Defaults to https://hawkular-metrics.{openshift_master_default_subdomain}/hawkular/metrics -# Currently, you may only alter the hostname portion of the url, alterting the -# `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com -# Configure the metrics component images # Note, these will be modified by oreg_url by default -#openshift_metrics_cassandra_image="docker.io/openshift/origin-metrics-cassandra:{ openshift_image_tag }" -#openshift_metrics_hawkular_agent_image="docker.io/openshift/origin-metrics-hawkular-openshift-agent:{ openshift_image_tag }" -#openshift_metrics_hawkular_metrics_image="docker.io/openshift/origin-metrics-hawkular-metrics:{ openshift_image_tag }" -#openshift_metrics_schema_installer_image="docker.io/openshift/origin-metrics-schema-installer:{ openshift_image_tag }" -#openshift_metrics_heapster_image="docker.io/openshift/origin-metrics-heapster:{ openshift_image_tag }" -# when openshift_deployment_type=='openshift-enterprise' -#openshift_metrics_cassandra_image="registry.access.redhat.com/openshift3/metrics-cassandra:{ openshift_image_tag }" -#openshift_metrics_hawkular_agent_image="registry.access.redhat.com/openshift3/metrics-hawkular-openshift-agent:{ openshift_image_tag }" -#openshift_metrics_hawkular_metrics_image="registry.access.redhat.com/openshift3/metrics-hawkular-metrics:{ openshift_image_tag }" -#openshift_metrics_schema_installer_image="registry.access.redhat.com/openshift3/metrics-schema-installer:{ openshift_image_tag }" -#openshift_metrics_heapster_image="registry.access.redhat.com/openshift3/metrics-heapster:{ openshift_image_tag }" -# -# StorageClass -# openshift_storageclass_name=gp3 -# openshift_storageclass_parameters={'type': 'gp3', 'encrypted': 'false'} -# openshift_storageclass_mount_options=['dir_mode=0777', 'file_mode=0777'] -# openshift_storageclass_reclaim_policy="Delete" -# -# PersistentLocalStorage -# If Persistent Local Storage is wanted, this boolean can be defined to True. -# This will create all necessary configuration to use persistent storage on nodes. -#openshift_persistentlocalstorage_enabled=False -#openshift_persistentlocalstorage_classes=[] -#openshift_persistentlocalstorage_path=/mnt/local-storage -#openshift_persistentlocalstorage_provisionner_image=quay.io/external_storage/local-volume-provisioner:v1.0.1 - -# Logging deployment -# -# Currently logging deployment is disabled by default, enable it by setting this -openshift_logging_install_logging=true -openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} -# -# Logging storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_host=nfs.example.com -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -openshift_logging_storage_kind=nfs -openshift_logging_storage_access_modes=['ReadWriteOnce'] -openshift_logging_storage_host=ntap-iad2-c02-fedora01-nfs01a.iad2.fedoraproject.org -openshift_logging_storage_nfs_directory=/ -openshift_logging_storage_volume_name=openshift-prod-logging -openshift_logging_storage_volume_size=100Gi -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_logging_storage_kind=dynamic -# -# Option D - none -- Logging will use emptydir volumes which are destroyed when -# pods are deleted -# -# Other Logging Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_logging/README.md -# -# Configure loggingPublicURL in the master config for aggregate logging, defaults -# to kibana.{ openshift_master_default_subdomain } -#openshift_logging_kibana_hostname=logging.apps.example.com -# Configure the number of elastic search nodes, unless you're using dynamic provisioning -# this value must be 1 -openshift_logging_es_cluster_size=1 - -# Prometheus deployment -# -# Currently prometheus deployment is disabled by default, enable it by setting this -#openshift_hosted_prometheus_deploy=true -# -# Prometheus storage config -# By default prometheus uses emptydir storage, if you want to persist you should -# configure it to use pvc storage type. Each volume must be ReadWriteOnce. -#openshift_prometheus_storage_type=emptydir -#openshift_prometheus_alertmanager_storage_type=emptydir -#openshift_prometheus_alertbuffer_storage_type=emptydir -# Use PVCs for persistence -#openshift_prometheus_storage_type=pvc -#openshift_prometheus_alertmanager_storage_type=pvc -#openshift_prometheus_alertbuffer_storage_type=pvc - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# Configure SDN cluster network and kubernetes service CIDR blocks. These -# network blocks should be private and should not conflict with network blocks -# in your infrastructure that pods may require access to. Can not be changed -# after deployment. -# -# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of -# 172.17.0.0/16. Your installation will fail and/or your configuration change will -# cause the Pod SDN or Cluster SDN to fail. -# -# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting -# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS -# environment variable located in /etc/sysconfig/docker-network. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_cluster_network_cidr: clusterNetworkCIDR -# openshift_portal_net: serviceNetworkCIDR -# When installing osm_cluster_network_cidr and openshift_portal_net must be set. -# Sane examples are provided below. -#osm_cluster_network_cidr=10.128.0.0/14 -#openshift_portal_net=172.30.0.0/16 - -# ExternalIPNetworkCIDRs controls what values are acceptable for the -# service external IP field. If empty, no externalIP may be set. It -# may contain a list of CIDRs which are checked for access. If a CIDR -# is prefixed with !, IPs in that CIDR will be rejected. Rejections -# will be applied first, then the IP checked against one of the -# allowed CIDRs. You should ensure this range does not overlap with -# your nodes, pods, or service CIDRs for security reasons. -#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] - -# IngressIPNetworkCIDR controls the range to assign ingress IPs from for -# services of type LoadBalancer on bare metal. If empty, ingress IPs will not -# be assigned. It may contain a single CIDR that will be allocated from. For -# security reasons, you should ensure that this range does not overlap with -# the CIDRs reserved for external IPs, nodes, pods, or services. -#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 - -# Configure number of bits to allocate to each host's subnet e.g. 9 -# would mean a /23 network on the host. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_host_subnet_length: hostSubnetLength -# When installing osm_host_subnet_length must be set. A sane example is provided below. -#osm_host_subnet_length=9 - -# Configure master API and console ports. -#openshift_master_api_port=8443 -#openshift_master_console_port=8443 -{% if openshift_api_port is defined and openshift_console_port is defined %} -{% if openshift_api_port and openshift_console_port %} -openshift_master_api_port={{openshift_api_port}} -openshift_master_console_port={{openshift_console_port}} -{% endif %} -{% endif %} - -# set exact RPM version (include - prefix) -#openshift_pkg_version=-3.9.0 -# you may also specify version and release, ie: -#openshift_pkg_version=-3.9.0-0.126.0.git.0.9351aae.el7 - -# Configure custom ca certificate -#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} -# -# NOTE: CA certificate will not be replaced with existing clusters. -# This option may only be specified when creating a new cluster or -# when redeploying cluster certificates with the redeploy-certificates -# playbook. - -# Configure custom named certificates (SNI certificates) -# -# https://docs.openshift.org/latest/install_config/certificate_customization.html -# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html -# -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# -# An optional CA may be specified for each named certificate. CAs will -# be added to the OpenShift CA bundle which allows for the named -# certificate to be served for internal cluster communication. -# -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates=true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] -# -# Add a trusted CA to all pods, copies from the control host, may be multiple -# certs in one file -#openshift_additional_ca=/path/to/additional-ca.crt - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_set_node_ip=True - -#openshift_node_kubelet_args is deprecated, use node config edits instead - -# Configure logrotate scripts -# See: https://github.com/nickhammond/ansible-logrotate -#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] - -# The OpenShift-Ansible installer will fail when it detects that the -# value of openshift_hostname resolves to an IP address not bound to any local -# interfaces. This mis-configuration is problematic for any pod leveraging host -# networking and liveness or readiness probes. -# Setting this variable to false will override that check. -#openshift_hostname_check=true - -# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail -# in versions >= 3.6 -#openshift_use_dnsmasq=False - -# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf -# This is useful for POC environments where DNS may not actually be available yet or to set -# options like 'strict-order' to alter dnsmasq configuration. -#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf - -# Global Proxy Configuration -# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment -# variables for docker and master services. -# -# Hosts in the openshift_no_proxy list will NOT use any globally -# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains -# (.example.com), hosts (example.com), and IP addresses. -#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT -#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT -#openshift_no_proxy='.hosts.example.com,some-host.com' -# -# Most environments don't require a proxy between openshift masters, nodes, and -# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. -# If all of your hosts share a common domain you may wish to disable this and -# specify that domain above instead. -# -# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and -# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy -# variable (above) and set this value to False -#openshift_generate_no_proxy_hosts=True -# -# These options configure the BuildDefaults admission controller which injects -# configuration into Builds. Proxy related values will default to the global proxy -# config values. You only need to set these if they differ from the global proxy settings. -# See BuildDefaults documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_no_proxy=mycorp.com -#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_no_proxy=mycorp.com -#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} -#openshift_builddefaults_resources_requests_cpu=100m -#openshift_builddefaults_resources_requests_memory=256Mi -#openshift_builddefaults_resources_limits_cpu=1000m -#openshift_builddefaults_resources_limits_memory=512Mi - -# Or you may optionally define your own build defaults configuration serialized as json -#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' - -# These options configure the BuildOverrides admission controller which injects -# configuration into Builds. -# See BuildOverrides documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_buildoverrides_force_pull=true -#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} -#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}] - -# Or you may optionally define your own build overrides configuration serialized as json -#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' - -# Enable service catalog -openshift_enable_service_catalog=true - -# Enable template service broker (requires service catalog to be enabled, above) -template_service_broker_install=true - -# Specify an openshift_service_catalog image -# (defaults for origin and openshift-enterprise, repsectively) -#openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{ openshift_image_tag }"" -openshift_service_catalog_image="registry.access.redhat.com/openshift3/ose-service-catalog:v3.11.216" - -# TSB image tag -template_service_broker_version='v3.11.216' - -# Configure one of more namespaces whose templates will be served by the TSB -openshift_template_service_broker_namespaces=['openshift'] - -# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default -#openshift_master_dynamic_provisioning_enabled=True - -# Admission plugin config -#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} - -# Configure usage of openshift_clock role. -openshift_clock_enabled=true - -# OpenShift Per-Service Environment Variables -# Environment variables are added to /etc/sysconfig files for -# each OpenShift node. -# API and controllers environment variables are merged in single -# master environments. -#openshift_node_env_vars={"ENABLE_HTTP2": "true"} -{% if no_http2 is defined %} -{% if no_http2 %} -openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} -openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} -openshift_node_env_vars={"ENABLE_HTTP2": "true"} -{% endif %} -{% endif %} - -# Enable API service auditing -#openshift_master_audit_config={"enabled": "true"} -# -# In case you want more advanced setup for the auditlog you can -# use this line. -# The directory in "auditFilePath" will be created if it's not -# exist -#openshift_master_audit_config={"enabled": "true", "auditFilePath": "/var/lib/origin/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": "14", "maximumFileSizeMegabytes": "500", "maximumRetainedFiles": "5"} - -# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by openshift_deployment_type=origin -#openshift_enable_origin_repo=false - -# Validity of the auto-generated OpenShift certificates in days. -# See also openshift_hosted_registry_cert_expire_days above. -# -#openshift_ca_cert_expire_days=1825 -#openshift_node_cert_expire_days=730 -#openshift_master_cert_expire_days=730 - -# Validity of the auto-generated external etcd certificates in days. -# Controls validity for etcd CA, peer, server and client certificates. -# -#etcd_ca_default_days=1825 -# -# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference -# openshift_master_saconfig_limitsecretreferences=false - -# Upgrade Control -# -# By default nodes are upgraded in a serial manner one at a time and all failures -# are fatal, one set of variables for normal nodes, one set of variables for -# nodes that are part of control plane as the number of hosts may be different -# in those two groups. -#openshift_upgrade_nodes_serial=1 -#openshift_upgrade_nodes_max_fail_percentage=0 -#openshift_upgrade_control_plane_nodes_serial=1 -#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 -# -# You can specify the number of nodes to upgrade at once. We do not currently -# attempt to verify that you have capacity to drain this many nodes at once -# so please be careful when specifying these values. You should also verify that -# the expected number of nodes are all schedulable and ready before starting an -# upgrade. If it's not possible to drain the requested nodes the upgrade will -# stall indefinitely until the drain is successful. -# -# If you're upgrading more than one node at a time you can specify the maximum -# percentage of failure within the batch before the upgrade is aborted. Any -# nodes that do fail are ignored for the rest of the playbook run and you should -# take care to investigate the failure and return the node to service so that -# your cluster. -# -# The percentage must exceed the value, this would fail on two failures -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 -# where as this would not -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 -# -# A timeout to wait for nodes to drain pods can be specified to ensure that the -# upgrade continues even if nodes fail to drain pods in the allowed time. The -# default value of 0 will wait indefinitely allowing the admin to investigate -# the root cause and ensuring that disruption budgets are respected. If the -# a timeout of 0 is used there will also be one attempt to re-try draining the -# node. If a non zero timeout is specified there will be no attempt to retry. -#openshift_upgrade_nodes_drain_timeout=0 -# -# Multiple data migrations take place and if they fail they will fail the upgrade -# You may wish to disable these or make them non fatal -# -# openshift_upgrade_pre_storage_migration_enabled=true -# openshift_upgrade_pre_storage_migration_fatal=true -# openshift_upgrade_post_storage_migration_enabled=true -# openshift_upgrade_post_storage_migration_fatal=false - -###################################################################### -# CloudForms/ManageIQ (CFME/MIQ) Configuration - -# See the readme for full descriptions and getting started -# instructions: ../../roles/openshift_management/README.md or go directly to -# their definitions: ../../roles/openshift_management/defaults/main.yml -# ../../roles/openshift_management/vars/main.yml -# -# Namespace for the CFME project -#openshift_management_project: openshift-management - -# Namespace/project description -#openshift_management_project_description: CloudForms Management Engine - -# Choose 'miq-template' for a podified database install -# Choose 'miq-template-ext-db' for an external database install -# -# If you are using the miq-template-ext-db template then you must add -# the required database parameters to the -# openshift_management_template_parameters variable. -#openshift_management_app_template: miq-template - -# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. -#openshift_management_storage_class: nfs - -# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a -# netapp appliance, then you must set the hostname here. Leave the -# value as 'false' if you are not using external NFS. -#openshift_management_storage_nfs_external_hostname: false - -# [OPTIONAL] - If you are using external NFS then you must set the base -# path to the exports location here. -# -# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports -# that will back the application PV and optionally the database -# pv. Export path definitions, relative to -# { openshift_management_storage_nfs_base_dir} -# -# LOCAL NFS NOTE: -# -# You may may also change this value if you want to change the default -# path used for local NFS exports. -#openshift_management_storage_nfs_base_dir: /exports - -# LOCAL NFS NOTE: -# -# You may override the automatically selected LOCAL NFS server by -# setting this variable. Useful for testing specific task files. -#openshift_management_storage_nfs_local_hostname: false - -# These are the default values for the username and password of the -# management app. Changing these values in your inventory will not -# change your username or password. You should only need to change -# these values in your inventory if you already changed the actual -# name and password AND are trying to use integration scripts. -# -# For example, adding this cluster as a container provider, -# playbooks/openshift-management/add_container_provider.yml -#openshift_management_username: admin -#openshift_management_password: smartvm - -# A hash of parameters you want to override or set in the -# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in -# your inventory file as a simple hash. Acceptable values are defined -# under the .parameters list in files/miq-template{-ext-db}.yaml -# Example: -# -# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} -#openshift_management_template_parameters: {} - -# Firewall configuration -# You can open additional firewall ports by defining them as a list. of service -# names and ports/port ranges for either masters or nodes. -#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] -#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] - -# Service port node range -#openshift_node_port_range=30000-32767 - -# Enable unsupported configurations, things that will yield a partially -# functioning cluster but would not be supported for production use -#openshift_enable_unsupported_configurations=false -openshift_enable_unsupported_configurations=True diff --git a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-osbs.j2 b/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-osbs.j2 deleted file mode 100644 index 9398944c9e..0000000000 --- a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-osbs.j2 +++ /dev/null @@ -1,882 +0,0 @@ -# This is based on the example inventories provided by the upstream -# openshift-ansible project available: -# https://github.com/openshift/openshift-ansible/tree/master/inventory/byo - - -[masters] -{% for host in groups[openshift_cluster_masters_group] %} -{% if hostvars[host].datacenter == datacenter %} -{{ host }} -{% endif %} -{% endfor %} - -[etcd] -{% for host in groups[openshift_cluster_masters_group] %} -{% if hostvars[host].datacenter == datacenter %} -{{ host }} -{% endif %} -{% endfor %} - -[nodes] -{% for host in groups[openshift_cluster_masters_group] %} -{% if hostvars[host].datacenter == datacenter %} -{{ host }} openshift_node_group_name='node-config-master' -{% endif %} -{% endfor %} -{% for host in groups[openshift_cluster_nodes_group] %} -{% if hostvars[host].datacenter == datacenter %} -{{ host }} openshift_node_group_name='node-config-compute' -{% endif %} -{% endfor %} - - - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd - -# Add this if using nfs and have defined the nfs group -#nfs - -# Set variables common for all OSEv3 hosts -[OSEv3:vars] -openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true', 'orchestrator=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true', 'node-role.kubernetes.io/infra=true', 'worker=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}] -# Disable the service catalog. We don't use it and it needs persistent storage. -openshift_enable_service_catalog=false -# Set this because we have nfs which isn't supported -openshift_enable_unsupported_configurations=true -# Have upgrader also restart systems in a rolling manner. -openshift_rolling_restart_mode=system -# Disable the disk and package version tests -openshift_disable_check=disk_availability,package_version,docker_image_availability,memory_availability,docker_storage -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_ssh_user={{openshift_ansible_ssh_user}} - -# Specify the deployment type. Valid values are origin and openshift-enterprise. -deployment_type={{openshift_deployment_type}} - -# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we -# rely on the version running on the first master. Works best for containerized installs where we can usually -# use this to lookup the latest exact version of the container images, which is the tag actually used to configure -# the cluster. For RPM installations we just verify the version detected in your configured repos matches this -# release. -openshift_release={{openshift_release}} -openshift_version={{openshift_version}} - -# For whatever reason, this keeps hitting a race condition and docker is -# excluded before docker is installed so we're just going to remove it. -openshift_enable_docker_excluder = False - -# OpenShift Containerized deployment or not? -containerized={{openshift_ansible_containerized_deploy}} - -{% if openshift_ansible_ssh_user != "root" %} -# If ansible_ssh_user is not root, ansible_become must be set to true and the -# user must be configured for passwordless sudo -ansible_become=yes -{% endif %} - -{% if openshift_ansible_python_interpreter is defined %} -ansible_python_interpreter={{openshift_ansible_python_interpreter}} -{% endif %} - -# Debug level for all OpenShift components (Defaults to 2) -debug_level={{openshift_debug_level}} - - -# Specify an exact container image tag to install or configure. -# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -openshift_image_tag={{openshift_release}} - -# Specify an exact rpm version to install or configure. -# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -openshift_pkg_version={{openshift_pkg_version}} -# Install the openshift examples -{% if openshift_ansible_install_examples is defined %} -openshift_install_examples={{openshift_ansible_install_examples}} -{% endif %} - -openshift_cluster_monitoring_operator_install = false -openshift_web_console_install = false -openshift_console_install = false -openshift_enable_olm=false - -# Configure logoutURL in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url -#openshift_master_logout_url=http://example.com - -# Configure extensionScripts in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js'] - -# Configure extensionStylesheets in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets -#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css'] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}] - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_oauth_template=/path/to/login-template.html - -# Configure imagePolicyConfig in the master config -# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig -#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} - -# Docker Configuration -# Add additional, insecure, and blocked registries to global docker configuration -# For enterprise deployment types we ensure that registry.access.redhat.com is -# included if you do not include it -#openshift_docker_additional_registries=registry.example.com -#openshift_docker_insecure_registries=registry.example.com -#openshift_docker_blocked_registries=registry.hacker.com -# Disable pushing to dockerhub -#openshift_docker_disable_push_dockerhub=True -# Install and run cri-o. -{% if openshift_ansible_use_crio is defined %} -openshift_use_crio={{ openshift_ansible_use_crio }} -{% endif %} -{% if openshift_ansible_use_crio_only is defined %} -openshift_use_crio_only={{ openshift_ansible_crio_only }} -{% endif %} -# The following two variables are used when openshift_use_crio is True -# and cleans up after builds that pass through docker. When openshift_use_crio is True -# these variables are set to the defaults shown. You may override them here. -# NOTE: You will still need to tag crio nodes with your given label(s)! -# Enable docker garbage collection when using cri-o -#openshift_crio_enable_docker_gc=True -# Node Selectors to run the garbage collection -#openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'} -openshift_crio_docker_gc_node_selector={} -openshift_crio_systemcontainer_image_override="registry.access.redhat.com/openshift3/cri-o:v3.9" - -# Use Docker inside a System Container. Note that this is a tech preview and should -# not be used to upgrade! -# The following options for docker are ignored: -# - docker_version -# - docker_upgrade -# The following options must not be used -# - openshift_docker_options -#openshift_docker_use_system_container=False -# Force the registry to use for the system container. By default the registry -# will be built off of the deployment type and ansible_distribution. Only -# use this option if you are sure you know what you are doing! -#openshift_docker_systemcontainer_image_registry_override="registry.example.com" -# Items added, as is, to end of /etc/sysconfig/docker OPTIONS -# Default value: "--log-driver=journald" -#openshift_docker_options="-l warn --ipv6=false" - -# Specify exact version of Docker to configure or upgrade to. -# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. -# docker_version="1.12.1" - -# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. -# docker_upgrade=False - -# Specify exact version of etcd to configure or upgrade to. -# etcd_version="3.1.0" -# Enable etcd debug logging, defaults to false -# etcd_debug=true -# Set etcd log levels by package -# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" - -# Upgrade Hooks -# -# Hooks are available to run custom tasks at various points during a cluster -# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using -# absolute paths, if not the path will be treated as relative to the file where the -# hook is actually used. -# -# Tasks to run before each master is upgraded. -# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -# -# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible -# upgrade steps, but before we restart system/services. -# openshift_master_upgrade_hook=/usr/share/custom/master.yml -# -# Tasks to run after each master is upgraded and system/services have been restarted. -# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - - -# Alternate image format string, useful if you've got your own registry mirror -#oreg_url=example.com/openshift3/ose-${component}:${version} -# If oreg_url points to a registry other than registry.access.redhat.com we can -# modify image streams to point at that registry by setting the following to true -#openshift_examples_modify_imagestreams=true - -# Additional yum repos to install -#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}] - -# Defining htpasswd users -#openshift_master_htpasswd_users={'user1': '', 'user2': ''} -# or -#openshift_master_htpasswd_file= - -# OSBS Specific Auth -{% if openshift_auth_profile == "osbs" %} -openshift_master_manage_htpasswd=false -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] -{% endif %} - -{% if openshift_auth_profile == "fedoraidp" %} -openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "extraScopes": ["profile", "email", "https://id.fedoraproject.org/scope/groups"], "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] -{% endif %} - -{% if openshift_auth_profile == "fedoraidp-stg" %} -openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] -{% endif %} - - -# If oreg_url points to a registry requiring authentication, provide the following: -{% if openshift_arch == "aarch64" %} -oreg_url=quay.io/multi-arch/aarch64-openshift3-ose-${component}:v3.11 -oreg_auth_user="{{ os_multiarch_registry_user }}" -oreg_auth_password="{{ os_multiarch_registry_password }}" -oreg_test_login=false -{% elif env == "staging" %} -oreg_auth_user="{{ os_stg_registry_user }}" -oreg_auth_password="{{ os_stg_registry_password }}" -{% elif datacenter != 'iad2' %} -oreg_auth_user="{{ os_prod_registry_user }}" -oreg_auth_password="{{ os_prod_registry_password }}" -{% else %} -oreg_auth_user="{{ os_prod_iad2_registry_user }}" -oreg_auth_password="{{ os_prod_iad2_registry_password }}" -{% endif %} - -# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. -# oreg_auth_pass should be generated from running docker login. -# To update registry auth credentials, uncomment the following: -#oreg_auth_credentials_replace=True - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] -# -# Configure LDAP CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the LDAPPasswordIdentityProvider. -# -#openshift_master_ldap_ca= -# or -#openshift_master_ldap_ca_file= - -# OpenID auth -#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] -# -# Configure OpenID CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the OpenIDIdentityProvider. -# -#openshift_master_openid_ca= -# or -#openshift_master_openid_ca_file= - -# Request header auth -#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] -# -# Configure request header CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "clientCA" -# key set within the RequestHeaderIdentityProvider. -# -#openshift_master_request_header_ca= -# or -#openshift_master_request_header_ca_file= - -{% if openshift_master_ha is defined %} -{% if openshift_master_ha %} -# Native high availability cluster method with optional load balancer. -# If no lb group is defined, the installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -openshift_master_cluster_method=native -openshift_master_cluster_hostname={{openshift_internal_cluster_url}} -openshift_master_cluster_public_hostname={{openshift_cluster_url}} -{% endif %} -{% endif %} - -# Override the default controller lease ttl -#osm_controller_lease_ttl=30 - -# Configure controller arguments -#osm_controller_args={'resource-quota-sync-period': ['10s']} - -# Configure api server arguments -#osm_api_server_args={'max-requests-inflight': ['400']} - -# default subdomain to use for exposed routes -{% if openshift_app_subdomain is defined %} -{% if openshift_app_subdomain %} -openshift_master_default_subdomain={{openshift_app_subdomain}} -{% endif %} -{% endif %} - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# Override the default pod eviction timeout -#openshift_master_pod_eviction_timeout=5m - -# Override the default oauth tokenConfig settings: -# openshift_master_access_token_max_seconds=86400 -# openshift_master_auth_token_max_seconds=500 - -# Override master servingInfo.maxRequestsInFlight -#openshift_master_max_requests_inflight=500 - -# Override master and node servingInfo.minTLSVersion and .cipherSuites -# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 -# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants -#openshift_master_min_tls_version=VersionTLS12 -#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] -# -#openshift_node_min_tls_version=VersionTLS12 -#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs'] - -# OpenShift Router Options -# -# An OpenShift router will be created during install if there are -# nodes present with labels matching the default router selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Router selector (optional) -# Router will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_router_selector='region=infra' -# -# Router replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift router selector. -#openshift_hosted_router_replicas=2 -# -# Router force subdomain (optional) -# A router path format to force on all routes used by this router -# (will ignore the route host value) -#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' -# -# Router certificate (optional) -# Provide local certificate paths which will be configured as the -# router's default certificate. -#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} -# -# Disable management of the OpenShift Router -openshift_hosted_manage_router=false -# -# Router sharding support has been added and can be achieved by supplying the correct -# data to the inventory. The variable to house the data is openshift_hosted_routers -# and is in the form of a list. If no data is passed then a default router will be -# created. There are multiple combinations of router sharding. The one described -# below supports routers on separate nodes. -#openshift_hosted_routers: -#- name: router1 -# stats_port: 1936 -# ports: -# - 80:80 -# - 443:443 -# replicas: 1 -# namespace: default -# serviceaccount: router -# selector: type=router1 -# images: "openshift3/ose-${component}:${version}" -# edits: [] -# certificates: -# certfile: /path/to/certificate/abc.crt -# keyfile: /path/to/certificate/abc.key -# cafile: /path/to/certificate/ca.crt -#- name: router2 -# stats_port: 1936 -# ports: -# - 80:80 -# - 443:443 -# replicas: 1 -# namespace: default -# serviceaccount: router -# selector: type=router2 -# images: "openshift3/ose-${component}:${version}" -# certificates: -# certfile: /path/to/certificate/xyz.crt -# keyfile: /path/to/certificate/xyz.key -# cafile: /path/to/certificate/ca.crt -# edits: -# # ROUTE_LABELS sets the router to listen for routes -# # tagged with the provided values -# - key: spec.template.spec.containers[0].env -# value: -# name: ROUTE_LABELS -# value: "route=external" -# action: append - -# OpenShift Registry Console Options -# Override the console image prefix for enterprise deployments, not used in origin -# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console" -#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# Override image version, defaults to latest for origin, matches the product version for enterprise -#openshift_cockpit_deployer_version=1.4.1 - -# Openshift Registry Options -# -# An OpenShift registry will be created during install if there are -# nodes present with labels matching the default registry selector, -# "region=infra". Set openshift_node_labels per node as needed in -# order to label nodes. -# -# Example: -# [nodes] -# node.example.com openshift_node_labels="{'region': 'infra'}" -# -# Registry selector (optional) -# Registry will only be created if nodes matching this label are present. -# Default value: 'region=infra' -#openshift_hosted_registry_selector='region=infra' -# -# Registry replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift registry selector. -#openshift_hosted_registry_replicas=2 -# -# Validity of the auto-generated certificate in days (optional) -#openshift_hosted_registry_cert_expire_days=730 -# -# Disable management of the OpenShift Registry -#openshift_hosted_manage_registry=false - -# Registry Storage Options -# -# NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry" -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry" -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com -#openshift_hosted_registry_storage_nfs_directory=/{{ansible_architecture}} -#openshift_hosted_registry_storage_volume_name=osbs-stg-registry -#openshift_hosted_registry_storage_volume_size=10Gi -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com -#openshift_hosted_registry_storage_nfs_directory=/{{ansible_architecture}} -#openshift_hosted_registry_storage_volume_name=osbs-prod-registry -#openshift_hosted_registry_storage_volume_size=10Gi -# Openstack -# Volume must already exist. -#openshift_hosted_registry_storage_kind=openstack -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem=ext4 -#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 -#openshift_hosted_registry_storage_volume_size=10Gi -# -# Native GlusterFS Registry Storage -#openshift_hosted_registry_storage_kind=glusterfs -# -# AWS S3 -# -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id -#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Any S3 service (Minio, ExoScale, ...): Basically the same as above -# but with regionendpoint configured -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=access_key_id -#openshift_hosted_registry_storage_s3_secretkey=secret_access_key -#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Additional CloudFront Options. When using CloudFront all three -# of the followingg variables must be defined. -#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ -#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem -#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid - -# Metrics deployment -# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html -# -# By default metrics are not automatically deployed, set this to enable them -# -# openshift_hosted_metrics_deploy=true -{% if openshift_metrics_deploy is defined %} -{% if openshift_metrics_deploy %} -# -openshift_hosted_metrics_deploy=false - -# Storage Options -# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored -# in an EmptyDir volume and will be deleted when the cassandra pod terminates. -# Storage options A & B currently support only one cassandra pod which is -# generally enough for up to 1000 pods. Additional volumes can be created -# manually after the fact and metrics scaled per the docs. -# -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics" -#openshift_hosted_metrics_storage_kind=nfs -#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_metrics_storage_host=nfs.example.com -#openshift_hosted_metrics_storage_nfs_directory=/exports -#openshift_hosted_metrics_storage_volume_name=metrics -#openshift_hosted_metrics_storage_volume_size=10Gi -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_hosted_metrics_storage_kind=dynamic -# -# Other Metrics Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_metrics/README.md -# -# Override metricsPublicURL in the master config for cluster metrics -# Defaults to https://hawkular-metrics.openshift_master_default_subdomain/hawkular/metrics -# Currently, you may only alter the hostname portion of the url, alterting the -# `/hawkular/metrics` path will break installation of metrics. -#openshift_hosted_metrics_public_url=https://hawkular-metrics.{{openshift_cluster_url}}/hawkular/metrics -{% endif %} -{% endif %} - -# Logging deployment -# -# Currently logging deployment is disabled by default, enable it by setting this -#openshift_hosted_logging_deploy=true -# -# Logging storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging" -#openshift_hosted_logging_storage_kind=nfs -#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_logging_storage_host=nfs.example.com -#openshift_hosted_logging_storage_nfs_directory=/exports -#openshift_hosted_logging_storage_volume_name=logging -#openshift_hosted_logging_storage_volume_size=10Gi -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_hosted_logging_storage_kind=dynamic -# -# Option D - none -- Logging will use emptydir volumes which are destroyed when -# pods are deleted -# -# Other Logging Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_logging/README.md -# -# Configure loggingPublicURL in the master config for aggregate logging, defaults -# to kibana.openshift_master_default_subdomain -#openshift_hosted_logging_hostname=logging.apps.example.com -# Configure the number of elastic search nodes, unless you're using dynamic provisioning -# this value must be 1 -#openshift_hosted_logging_elasticsearch_cluster_size=1 -# Configure the prefix and version for the component images -#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/ -#openshift_hosted_logging_deployer_version=3.5.0 - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# Configure SDN cluster network and kubernetes service CIDR blocks. These -# network blocks should be private and should not conflict with network blocks -# in your infrastructure that pods may require access to. Can not be changed -# after deployment. -# -# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of -# 172.17.0.0/16. Your installation will fail and/or your configuration change will -# cause the Pod SDN or Cluster SDN to fail. -# -# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting -# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS -# environment variable located in /etc/sysconfig/docker-network. -#osm_cluster_network_cidr=10.128.0.0/14 -#openshift_portal_net=172.30.0.0/16 - -# ExternalIPNetworkCIDRs controls what values are acceptable for the -# service external IP field. If empty, no externalIP may be set. It -# may contain a list of CIDRs which are checked for access. If a CIDR -# is prefixed with !, IPs in that CIDR will be rejected. Rejections -# will be applied first, then the IP checked against one of the -# allowed CIDRs. You should ensure this range does not overlap with -# your nodes, pods, or service CIDRs for security reasons. -#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] - -# IngressIPNetworkCIDR controls the range to assign ingress IPs from for -# services of type LoadBalancer on bare metal. If empty, ingress IPs will not -# be assigned. It may contain a single CIDR that will be allocated from. For -# security reasons, you should ensure that this range does not overlap with -# the CIDRs reserved for external IPs, nodes, pods, or services. -#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 - -# Configure number of bits to allocate to each host's subnet e.g. 9 -# would mean a /23 network on the host. -#osm_host_subnet_length=9 - -# Configure master API and console ports. -# These will default to 8443 -{% if openshift_api_port is defined and openshift_console_port is defined %} -{% if openshift_api_port and openshift_console_port %} -openshift_master_api_port={{openshift_api_port}} -openshift_master_console_port={{openshift_console_port}} -{% endif %} -{% endif %} - - -# set RPM version for debugging purposes -#openshift_pkg_version=-3.1.0.0 - -# Configure custom ca certificate -#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} -# -# NOTE: CA certificate will not be replaced with existing clusters. -# This option may only be specified when creating a new cluster or -# when redeploying cluster certificates with the redeploy-certificates -# playbook. - -# Configure custom named certificates (SNI certificates) -# -# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html -# -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# -# An optional CA may be specified for each named certificate. CAs will -# be added to the OpenShift CA bundle which allows for the named -# certificate to be served for internal cluster communication. -# -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates=true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_set_node_ip=True - -# Force setting of system hostname when configuring OpenShift -# This works around issues related to installations that do not have valid dns -# entries for the interfaces attached to the host. -#openshift_set_hostname=True - -# Configure dnsIP in the node config -#openshift_dns_ip=172.30.0.1 - -# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later. -#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']} - -# Configure logrotate scripts -# See: https://github.com/nickhammond/ansible-logrotate -#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] - -# openshift-ansible will wait indefinitely for your input when it detects that the -# value of openshift_hostname resolves to an IP address not bound to any local -# interfaces. This mis-configuration is problematic for any pod leveraging host -# networking and liveness or readiness probes. -# Setting this variable to true will override that check. -#openshift_override_hostname_check=true - -# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq -# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults -# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot -# be used with 1.0 and 3.0. -#openshift_use_dnsmasq=False -# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf -# This is useful for POC environments where DNS may not actually be available yet or to set -# options like 'strict-order' to alter dnsmasq configuration. -#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf - -# Global Proxy Configuration -# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment -# variables for docker and master services. -#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT -#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT -#openshift_no_proxy='.hosts.example.com,some-host.com' -# -# Most environments don't require a proxy between openshift masters, nodes, and -# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. -# If all of your hosts share a common domain you may wish to disable this and -# specify that domain above. -#openshift_generate_no_proxy_hosts=True -# -# These options configure the BuildDefaults admission controller which injects -# configuration into Builds. Proxy related values will default to the global proxy -# config values. You only need to set these if they differ from the global proxy settings. -# See BuildDefaults documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_no_proxy=mycorp.com -#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_no_proxy=mycorp.com -#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} -#openshift_builddefaults_resources_requests_cpu=100m -#openshift_builddefaults_resources_requests_memory=256m -#openshift_builddefaults_resources_limits_cpu=1000m -#openshift_builddefaults_resources_limits_memory=512m - -# Or you may optionally define your own build defaults configuration serialized as json -#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' - -# These options configure the BuildOverrides admission controller which injects -# configuration into Builds. -# See BuildOverrides documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_buildoverrides_force_pull=true -#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} - -# Or you may optionally define your own build overrides configuration serialized as json -#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' - -# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default -#openshift_master_dynamic_provisioning_enabled=False - -# Admission plugin config -#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} - -# Configure usage of openshift_clock role. -#openshift_clock_enabled=true - -# OpenShift Per-Service Environment Variables -# Environment variables are added to /etc/sysconfig files for -# each OpenShift service: node, master (api and controllers). -# API and controllers environment variables are merged in single -# master environments. -{% if no_http2 is defined %} -{% if no_http2 %} -openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} -openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} -openshift_node_env_vars={"ENABLE_HTTP2": "true"} -{% endif %} -{% endif %} - -# Enable API service auditing, available as of 3.2 -#openshift_master_audit_config={"enabled": true} - -# Validity of the auto-generated OpenShift certificates in days. -# See also openshift_hosted_registry_cert_expire_days above. -# -#openshift_ca_cert_expire_days=1825 -#openshift_node_cert_expire_days=730 -#openshift_master_cert_expire_days=730 - -# Validity of the auto-generated external etcd certificates in days. -# Controls validity for etcd CA, peer, server and client certificates. -# -#etcd_ca_default_days=1825 - -# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master. diff --git a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-stg.j2 b/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-stg.j2 deleted file mode 100644 index e694dd6e2b..0000000000 --- a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-stg.j2 +++ /dev/null @@ -1,1164 +0,0 @@ -# This is an example of an OpenShift-Ansible host inventory that provides the -# minimum recommended configuration for production use. This includes 3 masters, -# two infra nodes, two compute nodes, and an haproxy load balancer to load -# balance traffic to the API servers. For a truly production environment you -# should use an external load balancing solution that itself is highly available. - -[masters] -{% for host in groups[openshift_cluster_masters_group] %} -{{ host }} -{% endfor %} - -[etcd] -{% for host in groups[openshift_cluster_masters_group] %} -{{ host }} -{% endfor %} - -[nodes] -{% for host in groups[openshift_cluster_masters_group] %} -{{ host }} openshift_node_group_name='node-config-master' -{% endfor %} -{% for host in groups[openshift_cluster_nodes_group] %} -{{ host }} openshift_node_group_name='node-config-compute' -{% endfor %} -{% for host in groups['os_infra_nodes_stg'] %} -{{ host }} openshift_node_group_name='node-config-infra' -{% endif %} -{% endfor %} - -#[nfs] -#ose3-master1.test.example.com - -#[lb] -#ose3-lb.test.example.com - -# Create an OSEv3 group that contains the masters and nodes groups -[OSEv3:children] -masters -nodes -etcd -#lb -#nfs - -[OSEv3:vars] - -openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}] -############################################################################### -# Common/ Required configuration variables follow # -############################################################################### -# SSH user, this user should allow ssh based auth without requiring a -# password. If using ssh key based auth, then the key should be managed by an -# ssh agent. -ansible_user={{openshift_ansible_ssh_user}} - -# If ansible_user is not root, ansible_become must be set to true and the -# user must be configured for passwordless sudo -#ansible_become=yes - -# Specify the deployment type. Valid values are origin and openshift-enterprise. -#openshift_deployment_type=origin -openshift_deployment_type={{openshift_deployment_type}} - -# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we -# rely on the version running on the first master. Works best for containerized installs where we can usually -# use this to lookup the latest exact version of the container images, which is the tag actually used to configure -# the cluster. For RPM installations we just verify the version detected in your configured repos matches this -# release. -openshift_release={{openshift_release}} - -{% if openshift_master_ha is defined %} -{% if openshift_master_ha %} -# Native high availability cluster method with optional load balancer. -# If no lb group is defined, the installer assumes that a load balancer has -# been preconfigured. For installation the value of -# openshift_master_cluster_hostname must resolve to the load balancer -# or to one or all of the masters defined in the inventory if no load -# balancer is present. -openshift_master_cluster_method=native -openshift_master_cluster_hostname={{openshift_internal_cluster_url}} -openshift_master_cluster_public_hostname={{openshift_cluster_url}} -{% endif %} -{% endif %} - -# default subdomain to use for exposed routes, you should have wildcard dns -# for *.apps.test.example.com that points at your infra nodes which will run -# your router -{% if openshift_app_subdomain is defined %} -openshift_master_default_subdomain={{openshift_app_subdomain}} -{% endif %} - -{% if openshift_ansible_python_interpreter is defined %} -ansible_python_interpreter={{openshift_ansible_python_interpreter}} -{% endif %} - -############################################################################### -# Additional configuration variables follow # -############################################################################### - -# Debug level for all OpenShift components (Defaults to 2) -debug_level={{openshift_debug_level}} - -# Specify an exact container image tag to install or configure. -# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_image_tag=v3.10.0 -openshift_image_tag="v3.11" - -# Specify an exact rpm version to install or configure. -# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. -# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. -#openshift_pkg_version=-3.10.0 -openshift_pkg_version="-3.11.216" - -# If using Atomic Host, you may specify system container image registry for the nodes: -#system_images_registry="docker.io" -# when openshift_deployment_type=='openshift-enterprise' -#system_images_registry="registry.access.redhat.com" - -# Manage openshift example imagestreams and templates during install and upgrade -#openshift_install_examples=true -{% if openshift_ansible_install_examples is defined %} -openshift_install_examples={{openshift_ansible_install_examples}} -{% endif %} - -# Configure logoutURL in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url -#openshift_master_logout_url=http://example.com - -# Configure extensions in the master config for console customization -# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files -#openshift_master_oauth_templates={'login': '/path/to/login-template.html'} -# openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead. -#openshift_master_oauth_template=/path/to/login-template.html - -# Configure imagePolicyConfig in the master config -# See: https://docs.openshift.org/latest/admin_guide/image_policy.html -#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} - -# Configure master API rate limits for external clients -#openshift_master_external_ratelimit_qps=200 -#openshift_master_external_ratelimit_burst=400 -# Configure master API rate limits for loopback clients -#openshift_master_loopback_ratelimit_qps=300 -#openshift_master_loopback_ratelimit_burst=600 - -# Install and run cri-o. -#openshift_use_crio=False -#openshift_use_crio_only=False -{% if openshift_ansible_use_crio is defined %} -openshift_use_crio={{ openshift_ansible_use_crio }} -{% endif %} -{% if openshift_ansible_use_crio_only is defined %} -openshift_use_crio_only={{ openshift_ansible_crio_only }} -{% endif %} -# The following two variables are used when openshift_use_crio is True -# and cleans up after builds that pass through docker. When openshift_use_crio is True -# these variables are set to the defaults shown. You may override them here. -# NOTE: You will still need to tag crio nodes with your given label(s)! -# Enable docker garbage collection when using cri-o -#openshift_crio_enable_docker_gc=True -# Node Selectors to run the garbage collection -#openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'} - -# Items added, as is, to end of /etc/sysconfig/docker OPTIONS -# Default value: "--log-driver=journald" -#openshift_docker_options="-l warn --ipv6=false" - -# Specify exact version of Docker to configure or upgrade to. -# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. -# docker_version="1.12.1" - -# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. -# Uncomment below to disable; for example if your kernel does not support the -# Docker overlay/overlay2 storage drivers with SELinux enabled. -#openshift_docker_selinux_enabled=False - -# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. -# docker_upgrade=False - -# Specify a list of block devices to be formatted and mounted on the nodes -# during prerequisites.yml. For each hash, "device", "path", "filesystem" are -# required. To add devices only on certain classes of node, redefine -# container_runtime_extra_storage as a group var. -#container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]' - -# Enable etcd debug logging, defaults to false -# etcd_debug=true -# Set etcd log levels by package -# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" - -# Upgrade Hooks -# -# Hooks are available to run custom tasks at various points during a cluster -# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using -# absolute paths, if not the path will be treated as relative to the file where the -# hook is actually used. -# -# Tasks to run before each master is upgraded. -# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml -# -# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible -# upgrade steps, but before we restart system/services. -# openshift_master_upgrade_hook=/usr/share/custom/master.yml -# -# Tasks to run after each master is upgraded and system/services have been restarted. -# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml - -# Cluster Image Source (registry) configuration -# openshift-enterprise default is 'registry.access.redhat.com/openshift3/ose-${component}:${version}' -# origin default is 'docker.io/openshift/origin-${component}:${version}' -#oreg_url=example.com/openshift3/ose-${component}:${version} -# If oreg_url points to a registry other than registry.access.redhat.com we can -# modify image streams to point at that registry by setting the following to true -#openshift_examples_modify_imagestreams=true -# Add insecure and blocked registries to global docker configuration -#openshift_docker_insecure_registries=registry.example.com -#openshift_docker_blocked_registries=registry.hacker.com -# You may also configure additional default registries for docker, however this -# is discouraged. Instead you should make use of fully qualified image names. -#openshift_docker_additional_registries=registry.example.com - -# If oreg_url points to a registry requiring authentication, provide the following: -oreg_auth_user="{{ os_stg_registry_user }}" -oreg_auth_password="{{ os_stg_registry_password }}" -# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. -# oreg_auth_pass should be generated from running docker login. -# To update registry auth credentials, uncomment the following: -#oreg_auth_credentials_replace=True - -# OpenShift repository configuration -#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] -#openshift_repos_enable_testing=false - -# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in -# a disconnected and containerized installation, use osm_etcd_image to specify the image to use: -#osm_etcd_image=rhel7/etcd - -# htpasswd auth -#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] -# Defining htpasswd users -#openshift_master_htpasswd_users={'user1': '', 'user2': ''} -# or -#openshift_master_htpasswd_file= - -{% if openshift_auth_profile == "osbs" %} -openshift_master_manage_htpasswd=false -openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] -{% endif %} - -{% if openshift_auth_profile == "fedoraidp" %} -openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "extraScopes": ["profile", "email", "https://id.fedoraproject.org/scope/groups"], "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] -{% endif %} - -{% if openshift_auth_profile == "fedoraidp-stg" %} -openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] -{% endif %} - -# Allow all auth -#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] - -# LDAP auth -#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] -# -# Configure LDAP CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the LDAPPasswordIdentityProvider. -# -#openshift_master_ldap_ca= -# or -#openshift_master_ldap_ca_file= - -# OpenID auth -#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] -# -# Configure OpenID CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "ca" key set -# within the OpenIDIdentityProvider. -# -#openshift_master_openid_ca= -# or -#openshift_master_openid_ca_file= - -# Request header auth -#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] -# -# Configure request header CA certificate -# Specify either the ASCII contents of the certificate or the path to -# the local file that will be copied to the remote host. CA -# certificate contents will be copied to master systems and saved -# within /etc/origin/master/ with a filename matching the "clientCA" -# key set within the RequestHeaderIdentityProvider. -# -#openshift_master_request_header_ca= -# or -#openshift_master_request_header_ca_file= - -# CloudForms Management Engine (ManageIQ) App Install -# -# Enables installation of MIQ server. Recommended for dedicated -# clusters only. See roles/openshift_management/README.md for instructions -# and requirements. -#openshift_management_install_management=False - -# Cloud Provider Configuration -# -# Note: You may make use of environment variables rather than store -# sensitive configuration within the ansible inventory. -# For example: -#openshift_cloudprovider_aws_access_key="{ lookup('env','AWS_ACCESS_KEY_ID') }" -#openshift_cloudprovider_aws_secret_key="{ lookup('env','AWS_SECRET_ACCESS_KEY') }" -# -# AWS -#openshift_cloudprovider_kind=aws -# Note: IAM profiles may be used instead of storing API credentials on disk. -#openshift_cloudprovider_aws_access_key=aws_access_key_id -#openshift_cloudprovider_aws_secret_key=aws_secret_access_key -# -# Openstack -#openshift_cloudprovider_kind=openstack -#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ -#openshift_cloudprovider_openstack_username=username -#openshift_cloudprovider_openstack_password=password -#openshift_cloudprovider_openstack_domain_id=domain_id -#openshift_cloudprovider_openstack_domain_name=domain_name -#openshift_cloudprovider_openstack_tenant_id=tenant_id -#openshift_cloudprovider_openstack_tenant_name=tenant_name -#openshift_cloudprovider_openstack_region=region -#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id -# -# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting -#openshift_cloudprovider_openstack_blockstorage_version=v2 -# -# GCE -#openshift_cloudprovider_kind=gce -# Note: When using GCE, openshift_gcp_project and openshift_gcp_prefix must be -# defined. -# openshift_gcp_project is the project-id -#openshift_gcp_project= -# openshift_gcp_prefix is a unique string to identify each openshift cluster. -#openshift_gcp_prefix= -#openshift_gcp_multizone=False -# Note: To enable nested virtualization in gcp use the following variable and url -#openshift_gcp_licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" -# Additional details regarding nested virtualization are available: -# https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances -# -# vSphere -#openshift_cloudprovider_kind=vsphere -#openshift_cloudprovider_vsphere_username=username -#openshift_cloudprovider_vsphere_password=password -#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host -#openshift_cloudprovider_vsphere_datacenter=datacenter -#openshift_cloudprovider_vsphere_datastore=datastore -#openshift_cloudprovider_vsphere_folder=optional_folder_name - - -# Project Configuration -#osm_project_request_message='' -#osm_project_request_template='' -#osm_mcs_allocator_range='s0:/2' -#osm_mcs_labels_per_project=5 -#osm_uid_allocator_range='1000000000-1999999999/10000' - -# Configure additional projects -#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} - -# Enable cockpit -#osm_use_cockpit=true -# -# Set cockpit plugins -#osm_cockpit_plugins=['cockpit-kubernetes'] - -# If an external load balancer is used public hostname should resolve to -# external load balancer address -#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com - -# Configure controller arguments -#osm_controller_args={'resource-quota-sync-period': ['10s']} - -# Configure api server arguments -#osm_api_server_args={'max-requests-inflight': ['400']} - -# additional cors origins -#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] - -# default project node selector -#osm_default_node_selector='region=primary' - -# Override the default pod eviction timeout -#openshift_master_pod_eviction_timeout=5m - -# Override the default oauth tokenConfig settings: -# openshift_master_access_token_max_seconds=86400 -# openshift_master_auth_token_max_seconds=500 - -# Override master servingInfo.maxRequestsInFlight -#openshift_master_max_requests_inflight=500 - -# Override master and node servingInfo.minTLSVersion and .cipherSuites -# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 -# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants -#openshift_master_min_tls_version=VersionTLS12 -#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] -# -#openshift_node_min_tls_version=VersionTLS12 -#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] - -# default storage plugin dependencies to install, by default the ceph and -# glusterfs plugin dependencies will be installed, if available. -#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] - -# OpenShift Router Options -# -# An OpenShift router will be created during install if there are -# nodes present with labels matching the default router selector, -# "node-role.kubernetes.io/infra=true". -# -# Example: -# [nodes] -# node.example.com openshift_node_group_name="node-config-infra" -# -# Router selector (optional) -# Router will only be created if nodes matching this label are present. -# Default value: 'node-role.kubernetes.io/infra=true' -#openshift_hosted_router_selector='node-role.kubernetes.io/infra=true' -# -# Router replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift router selector. -#openshift_hosted_router_replicas=2 -# -# Router force subdomain (optional) -# A router path format to force on all routes used by this router -# (will ignore the route host value) -#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' -# -# Router certificate (optional) -# Provide local certificate paths which will be configured as the -# router's default certificate. -#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} -# -# Manage the OpenShift Router (optional) -#openshift_hosted_manage_router=true -# -# Router sharding support has been added and can be achieved by supplying the correct -# data to the inventory. The variable to house the data is openshift_hosted_routers -# and is in the form of a list. If no data is passed then a default router will be -# created. There are multiple combinations of router sharding. The one described -# below supports routers on separate nodes. -# -#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] - -# OpenShift Registry Console Options -# Override the console image prefix: -# origin default is "cockpit/", enterprise default is "openshift3/" -#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ -# origin default is "kubernetes", enterprise default is "registry-console" -#openshift_cockpit_deployer_basename=my-console -# Override image version, defaults to latest for origin, vX.Y product version for enterprise -#openshift_cockpit_deployer_version=1.4.1 - -# Openshift Registry Options -# -# An OpenShift registry will be created during install if there are -# nodes present with labels matching the default registry selector, -# "node-role.kubernetes.io/infra=true". -# -# Example: -# [nodes] -# node.example.com openshift_node_group_name="node-config-infra" -# -# Registry selector (optional) -# Registry will only be created if nodes matching this label are present. -# Default value: 'node-role.kubernetes.io/infra=true' -#openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true' -# -# Registry replicas (optional) -# Unless specified, openshift-ansible will calculate the replica count -# based on the number of nodes matching the openshift registry selector. -#openshift_hosted_registry_replicas=2 -# -# Validity of the auto-generated certificate in days (optional) -#openshift_hosted_registry_cert_expire_days=730 -# -# Manage the OpenShift Registry (optional) -#openshift_hosted_manage_registry=true -# Manage the OpenShift Registry Console (optional) -#openshift_hosted_manage_registry_console=true -# -# Registry Storage Options -# -# NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -# -# External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/registry". "exports" is -# is the name of the export served by the nfs server. "registry" is -# the name of a directory inside of "/exports". -#openshift_hosted_registry_storage_kind=nfs -#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -#openshift_hosted_registry_storage_host=nfs.example.com -# nfs_directory must conform to DNS-1123 subdomain must consist of lower case -# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character -#openshift_hosted_registry_storage_nfs_directory=/exports -#openshift_hosted_registry_storage_volume_name=registry -#openshift_hosted_registry_storage_volume_size=10Gi -openshift_hosted_registry_storage_kind=nfs -openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] -openshift_hosted_registry_storage_host=ntap-iad2-c02-fedora01-nfs01a -openshift_hosted_registry_storage_nfs_directory=/ -openshift_hosted_registry_storage_volume_name=openshift-stg-registry -openshift_hosted_registry_storage_volume_size=10Gi -# -# Openstack -# Volume must already exist. -#openshift_hosted_registry_storage_kind=openstack -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_openstack_filesystem=ext4 -#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 -#openshift_hosted_registry_storage_volume_size=10Gi -# -# hostPath (local filesystem storage) -# Suitable for "all-in-one" or proof of concept deployments -# Must not be used for high-availability and production deployments -#openshift_hosted_registry_storage_kind=hostpath -#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -#openshift_hosted_registry_storage_hostpath_path=/var/lib/openshift_volumes -#openshift_hosted_registry_storage_volume_size=10Gi -# -# AWS S3 -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_encrypt=false -#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id -#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id -#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Any S3 service (Minio, ExoScale, ...): Basically the same as above -# but with regionendpoint configured -# S3 bucket must already exist. -#openshift_hosted_registry_storage_kind=object -#openshift_hosted_registry_storage_provider=s3 -#openshift_hosted_registry_storage_s3_accesskey=access_key_id -#openshift_hosted_registry_storage_s3_secretkey=secret_access_key -#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ -#openshift_hosted_registry_storage_s3_bucket=bucket_name -#openshift_hosted_registry_storage_s3_region=bucket_region -#openshift_hosted_registry_storage_s3_chunksize=26214400 -#openshift_hosted_registry_storage_s3_rootdirectory=/registry -#openshift_hosted_registry_pullthrough=true -#openshift_hosted_registry_acceptschema2=true -#openshift_hosted_registry_enforcequota=true -# -# Additional CloudFront Options. When using CloudFront all three -# of the followingg variables must be defined. -#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ -#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem -#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid -# vSphere Volume with vSphere Cloud Provider -# openshift_hosted_registry_storage_kind=vsphere -# openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] -# openshift_hosted_registry_storage_annotations=['volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume'] -# -# GCS Storage Bucket -#openshift_hosted_registry_storage_provider=gcs -#openshift_hosted_registry_storage_gcs_bucket=bucket01 -#openshift_hosted_registry_storage_gcs_keyfile=test.key -#openshift_hosted_registry_storage_gcs_rootdirectory=/registry - -# Metrics deployment -# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html -# -# By default metrics are not automatically deployed, set this to enable them -openshift_metrics_install_metrics=true -openshift_metrics_cassandra_storage_type=emptydir -openshift_metrics_start_cluster=true -openshift_metrics_cassandra_nodeselector={"node-role.kubernetes.io/infra":"true"} -{% if openshift_metrics_deploy is defined %} -{% if openshift_metrics_deploy %} -openshift_hosted_metrics_deploy=true -{% endif %} -{% endif %} -# -# Storage Options -# If openshift_metrics_storage_kind is unset then metrics will be stored -# in an EmptyDir volume and will be deleted when the cassandra pod terminates. -# Storage options A & B currently support only one cassandra pod which is -# generally enough for up to 1000 pods. Additional volumes can be created -# manually after the fact and metrics scaled per the docs. -# -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_nfs_options='*(rw,root_squash)' -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/metrics". "exports" is -# is the name of the export served by the nfs server. "metrics" is -# the name of a directory inside of "/exports". -#openshift_metrics_storage_kind=nfs -#openshift_metrics_storage_access_modes=['ReadWriteOnce'] -#openshift_metrics_storage_host=nfs.example.com -#openshift_metrics_storage_nfs_directory=/exports -#openshift_metrics_storage_volume_name=metrics -#openshift_metrics_storage_volume_size=10Gi -#openshift_metrics_storage_labels={'storage': 'metrics'} -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_metrics_storage_kind=dynamic -# -# Other Metrics Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_metrics/README.md -# -# Override metricsPublicURL in the master config for cluster metrics -# Defaults to https://hawkular-metrics.{openshift_master_default_subdomain}/hawkular/metrics -# Currently, you may only alter the hostname portion of the url, alterting the -# `/hawkular/metrics` path will break installation of metrics. -#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com -# Configure the metrics component images # Note, these will be modified by oreg_url by default -#openshift_metrics_cassandra_image="docker.io/openshift/origin-metrics-cassandra:{ openshift_image_tag }" -#openshift_metrics_hawkular_agent_image="docker.io/openshift/origin-metrics-hawkular-openshift-agent:{ openshift_image_tag }" -#openshift_metrics_hawkular_metrics_image="docker.io/openshift/origin-metrics-hawkular-metrics:{ openshift_image_tag }" -#openshift_metrics_schema_installer_image="docker.io/openshift/origin-metrics-schema-installer:{ openshift_image_tag }" -#openshift_metrics_heapster_image="docker.io/openshift/origin-metrics-heapster:{ openshift_image_tag }" -# when openshift_deployment_type=='openshift-enterprise' -#openshift_metrics_cassandra_image="registry.access.redhat.com/openshift3/metrics-cassandra:{ openshift_image_tag }" -#openshift_metrics_hawkular_agent_image="registry.access.redhat.com/openshift3/metrics-hawkular-openshift-agent:{ openshift_image_tag }" -#openshift_metrics_hawkular_metrics_image="registry.access.redhat.com/openshift3/metrics-hawkular-metrics:{ openshift_image_tag }" -#openshift_metrics_schema_installer_image="registry.access.redhat.com/openshift3/metrics-schema-installer:{ openshift_image_tag }" -#openshift_metrics_heapster_image="registry.access.redhat.com/openshift3/metrics-heapster:{ openshift_image_tag }" -# -# StorageClass -# openshift_storageclass_name=gp3 -# openshift_storageclass_parameters={'type': 'gp3', 'encrypted': 'false'} -# openshift_storageclass_mount_options=['dir_mode=0777', 'file_mode=0777'] -# openshift_storageclass_reclaim_policy="Delete" -# -# PersistentLocalStorage -# If Persistent Local Storage is wanted, this boolean can be defined to True. -# This will create all necessary configuration to use persistent storage on nodes. -#openshift_persistentlocalstorage_enabled=False -#openshift_persistentlocalstorage_classes=[] -#openshift_persistentlocalstorage_path=/mnt/local-storage -#openshift_persistentlocalstorage_provisionner_image=quay.io/external_storage/local-volume-provisioner:v1.0.1 - -# Logging deployment -# -# Currently logging deployment is disabled by default, enable it by setting this -openshift_logging_install_logging=true -openshift_logging_es_nodeselector={"node-role.kubernetes.io/infra":"true"} -# -# Logging storage config -# Option A - NFS Host Group -# An NFS volume will be created with path "nfs_directory/volume_name" -# on the host within the [nfs] host group. For example, the volume -# path using these options would be "/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_nfs_options='*(rw,root_squash)' -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -# -# Option B - External NFS Host -# NFS volume must already exist with path "nfs_directory/_volume_name" on -# the storage_host. For example, the remote volume path using these -# options would be "nfs.example.com:/exports/logging". "exports" is -# is the name of the export served by the nfs server. "logging" is -# the name of a directory inside of "/exports". -#openshift_logging_storage_kind=nfs -#openshift_logging_storage_access_modes=['ReadWriteOnce'] -#openshift_logging_storage_host=nfs.example.com -#openshift_logging_storage_nfs_directory=/exports -#openshift_logging_storage_volume_name=logging -#openshift_logging_storage_volume_size=10Gi -#openshift_logging_storage_labels={'storage': 'logging'} -openshift_logging_storage_kind=nfs -openshift_logging_storage_access_modes=['ReadWriteOnce'] -openshift_logging_storage_host=ntap-iad2-c02-fedora01-nfs01a -openshift_logging_storage_nfs_directory=/openshift-stg-logging -openshift_logging_storage_volume_name=logging -openshift_logging_storage_volume_size=100Gi -openshift_logging_storage_nfs_options='*(rw,root_squash)' -openshift_logging_storage_labels={'storage': 'logging'} -openshift_logging_elasticsearch_storage_type=pvc -openshift_logging_es_pvc_size=10Gi -openshift_logging_es_pvc_storage_class_name='' -openshift_logging_es_pvc_dynamic=true -openshift_logging_es_pvc_prefix=logging -# -# Option C - Dynamic -- If openshift supports dynamic volume provisioning for -# your cloud platform use this. -#openshift_logging_storage_kind=dynamic -# -# Option D - none -- Logging will use emptydir volumes which are destroyed when -# pods are deleted -# -# Other Logging Options -- Common items you may wish to reconfigure, for the complete -# list of options please see roles/openshift_logging/README.md -# -# Configure loggingPublicURL in the master config for aggregate logging, defaults -# to kibana.{ openshift_master_default_subdomain } -#openshift_logging_kibana_hostname=logging.apps.example.com -# Configure the number of elastic search nodes, unless you're using dynamic provisioning -# this value must be 1 -openshift_logging_es_cluster_size=1 - -# Prometheus deployment -# -# Currently prometheus deployment is disabled by default, enable it by setting this -#openshift_hosted_prometheus_deploy=true -# -# Prometheus storage config -# By default prometheus uses emptydir storage, if you want to persist you should -# configure it to use pvc storage type. Each volume must be ReadWriteOnce. -#openshift_prometheus_storage_type=emptydir -#openshift_prometheus_alertmanager_storage_type=emptydir -#openshift_prometheus_alertbuffer_storage_type=emptydir -# Use PVCs for persistence -#openshift_prometheus_storage_type=pvc -#openshift_prometheus_alertmanager_storage_type=pvc -#openshift_prometheus_alertbuffer_storage_type=pvc - -# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') -os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' - -# Disable the OpenShift SDN plugin -# openshift_use_openshift_sdn=False - -# Configure SDN cluster network and kubernetes service CIDR blocks. These -# network blocks should be private and should not conflict with network blocks -# in your infrastructure that pods may require access to. Can not be changed -# after deployment. -# -# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of -# 172.17.0.0/16. Your installation will fail and/or your configuration change will -# cause the Pod SDN or Cluster SDN to fail. -# -# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting -# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS -# environment variable located in /etc/sysconfig/docker-network. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_cluster_network_cidr: clusterNetworkCIDR -# openshift_portal_net: serviceNetworkCIDR -# When installing osm_cluster_network_cidr and openshift_portal_net must be set. -# Sane examples are provided below. -#osm_cluster_network_cidr=10.128.0.0/14 -#openshift_portal_net=172.30.0.0/16 - -# ExternalIPNetworkCIDRs controls what values are acceptable for the -# service external IP field. If empty, no externalIP may be set. It -# may contain a list of CIDRs which are checked for access. If a CIDR -# is prefixed with !, IPs in that CIDR will be rejected. Rejections -# will be applied first, then the IP checked against one of the -# allowed CIDRs. You should ensure this range does not overlap with -# your nodes, pods, or service CIDRs for security reasons. -#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] - -# IngressIPNetworkCIDR controls the range to assign ingress IPs from for -# services of type LoadBalancer on bare metal. If empty, ingress IPs will not -# be assigned. It may contain a single CIDR that will be allocated from. For -# security reasons, you should ensure that this range does not overlap with -# the CIDRs reserved for external IPs, nodes, pods, or services. -#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 - -# Configure number of bits to allocate to each host's subnet e.g. 9 -# would mean a /23 network on the host. -# When upgrading or scaling up the following must match whats in your master config! -# Inventory: master yaml field -# osm_host_subnet_length: hostSubnetLength -# When installing osm_host_subnet_length must be set. A sane example is provided below. -#osm_host_subnet_length=9 - -# Configure master API and console ports. -#openshift_master_api_port=8443 -#openshift_master_console_port=8443 -{% if openshift_api_port is defined and openshift_console_port is defined %} -{% if openshift_api_port and openshift_console_port %} -openshift_master_api_port={{openshift_api_port}} -openshift_master_console_port={{openshift_console_port}} -{% endif %} -{% endif %} - -# set exact RPM version (include - prefix) -#openshift_pkg_version=-3.9.0 -# you may also specify version and release, ie: -#openshift_pkg_version=-3.9.0-0.126.0.git.0.9351aae.el7 - -# Configure custom ca certificate -#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} -# -# NOTE: CA certificate will not be replaced with existing clusters. -# This option may only be specified when creating a new cluster or -# when redeploying cluster certificates with the redeploy-certificates -# playbook. - -# Configure custom named certificates (SNI certificates) -# -# https://docs.openshift.org/latest/install_config/certificate_customization.html -# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html -# -# NOTE: openshift_master_named_certificates is cached on masters and is an -# additive fact, meaning that each run with a different set of certificates -# will add the newly provided certificates to the cached set of certificates. -# -# An optional CA may be specified for each named certificate. CAs will -# be added to the OpenShift CA bundle which allows for the named -# certificate to be served for internal cluster communication. -# -# If you would like openshift_master_named_certificates to be overwritten with -# the provided value, specify openshift_master_overwrite_named_certificates. -#openshift_master_overwrite_named_certificates=true -# -# Provide local certificate paths which will be deployed to masters -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] -# -# Detected names may be overridden by specifying the "names" key -#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] -# -# Add a trusted CA to all pods, copies from the control host, may be multiple -# certs in one file -#openshift_additional_ca=/path/to/additional-ca.crt - -# Session options -#openshift_master_session_name=ssn -#openshift_master_session_max_seconds=3600 - -# An authentication and encryption secret will be generated if secrets -# are not provided. If provided, openshift_master_session_auth_secrets -# and openshift_master_encryption_secrets must be equal length. -# -# Signing secrets, used to authenticate sessions using -# HMAC. Recommended to use secrets with 32 or 64 bytes. -#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] -# -# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 -# characters long, to select AES-128, AES-192, or AES-256. -#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] - -# configure how often node iptables rules are refreshed -#openshift_node_iptables_sync_period=5s - -# Configure nodeIP in the node config -# This is needed in cases where node traffic is desired to go over an -# interface other than the default network interface. -#openshift_set_node_ip=True - -#openshift_node_kubelet_args is deprecated, use node config edits instead - -# Configure logrotate scripts -# See: https://github.com/nickhammond/ansible-logrotate -#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] - -# The OpenShift-Ansible installer will fail when it detects that the -# value of openshift_hostname resolves to an IP address not bound to any local -# interfaces. This mis-configuration is problematic for any pod leveraging host -# networking and liveness or readiness probes. -# Setting this variable to false will override that check. -#openshift_hostname_check=true - -# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail -# in versions >= 3.6 -#openshift_use_dnsmasq=False - -# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf -# This is useful for POC environments where DNS may not actually be available yet or to set -# options like 'strict-order' to alter dnsmasq configuration. -#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf - -# Global Proxy Configuration -# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment -# variables for docker and master services. -# -# Hosts in the openshift_no_proxy list will NOT use any globally -# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains -# (.example.com), hosts (example.com), and IP addresses. -#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT -#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT -#openshift_no_proxy='.hosts.example.com,some-host.com' -# -# Most environments don't require a proxy between openshift masters, nodes, and -# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. -# If all of your hosts share a common domain you may wish to disable this and -# specify that domain above instead. -# -# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and -# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy -# variable (above) and set this value to False -#openshift_generate_no_proxy_hosts=True -# -# These options configure the BuildDefaults admission controller which injects -# configuration into Builds. Proxy related values will default to the global proxy -# config values. You only need to set these if they differ from the global proxy settings. -# See BuildDefaults documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_no_proxy=mycorp.com -#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT -#openshift_builddefaults_git_no_proxy=mycorp.com -#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} -#openshift_builddefaults_resources_requests_cpu=100m -#openshift_builddefaults_resources_requests_memory=256Mi -#openshift_builddefaults_resources_limits_cpu=1000m -#openshift_builddefaults_resources_limits_memory=512Mi - -# Or you may optionally define your own build defaults configuration serialized as json -#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' - -# These options configure the BuildOverrides admission controller which injects -# configuration into Builds. -# See BuildOverrides documentation at -# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html -#openshift_buildoverrides_force_pull=true -#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] -#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} -#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} -#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}] - -# Or you may optionally define your own build overrides configuration serialized as json -#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' - -# Enable service catalog -openshift_enable_service_catalog=true - -# Enable template service broker (requires service catalog to be enabled, above) -template_service_broker_install=true - -# Specify an openshift_service_catalog image -# (defaults for origin and openshift-enterprise, repsectively) -#openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{ openshift_image_tag }"" -openshift_service_catalog_image="registry.access.redhat.com/openshift3/ose-service-catalog:v3.11.216" - -# TSB image tag -template_service_broker_version='v3.11.216' - -# Configure one of more namespaces whose templates will be served by the TSB -openshift_template_service_broker_namespaces=['openshift'] - -# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default -#openshift_master_dynamic_provisioning_enabled=True - -# Admission plugin config -#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} - -# Configure usage of openshift_clock role. -openshift_clock_enabled=true - -# OpenShift Per-Service Environment Variables -# Environment variables are added to /etc/sysconfig files for -# each OpenShift node. -# API and controllers environment variables are merged in single -# master environments. -#openshift_node_env_vars={"ENABLE_HTTP2": "true"} -{% if no_http2 is defined %} -{% if no_http2 %} -openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} -openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} -openshift_node_env_vars={"ENABLE_HTTP2": "true"} -{% endif %} -{% endif %} - -# Enable API service auditing -#openshift_master_audit_config={"enabled": "true"} -# -# In case you want more advanced setup for the auditlog you can -# use this line. -# The directory in "auditFilePath" will be created if it's not -# exist -#openshift_master_audit_config={"enabled": "true", "auditFilePath": "/var/lib/origin/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": "14", "maximumFileSizeMegabytes": "500", "maximumRetainedFiles": "5"} - -# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used -# by openshift_deployment_type=origin -#openshift_enable_origin_repo=false - -# Validity of the auto-generated OpenShift certificates in days. -# See also openshift_hosted_registry_cert_expire_days above. -# -#openshift_ca_cert_expire_days=1825 -#openshift_node_cert_expire_days=730 -#openshift_master_cert_expire_days=730 - -# Validity of the auto-generated external etcd certificates in days. -# Controls validity for etcd CA, peer, server and client certificates. -# -#etcd_ca_default_days=1825 -# -# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference -# openshift_master_saconfig_limitsecretreferences=false - -# Upgrade Control -# -# By default nodes are upgraded in a serial manner one at a time and all failures -# are fatal, one set of variables for normal nodes, one set of variables for -# nodes that are part of control plane as the number of hosts may be different -# in those two groups. -#openshift_upgrade_nodes_serial=1 -#openshift_upgrade_nodes_max_fail_percentage=0 -#openshift_upgrade_control_plane_nodes_serial=1 -#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 -# -# You can specify the number of nodes to upgrade at once. We do not currently -# attempt to verify that you have capacity to drain this many nodes at once -# so please be careful when specifying these values. You should also verify that -# the expected number of nodes are all schedulable and ready before starting an -# upgrade. If it's not possible to drain the requested nodes the upgrade will -# stall indefinitely until the drain is successful. -# -# If you're upgrading more than one node at a time you can specify the maximum -# percentage of failure within the batch before the upgrade is aborted. Any -# nodes that do fail are ignored for the rest of the playbook run and you should -# take care to investigate the failure and return the node to service so that -# your cluster. -# -# The percentage must exceed the value, this would fail on two failures -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 -# where as this would not -# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 -# -# A timeout to wait for nodes to drain pods can be specified to ensure that the -# upgrade continues even if nodes fail to drain pods in the allowed time. The -# default value of 0 will wait indefinitely allowing the admin to investigate -# the root cause and ensuring that disruption budgets are respected. If the -# a timeout of 0 is used there will also be one attempt to re-try draining the -# node. If a non zero timeout is specified there will be no attempt to retry. -#openshift_upgrade_nodes_drain_timeout=0 -# -# Multiple data migrations take place and if they fail they will fail the upgrade -# You may wish to disable these or make them non fatal -# -# openshift_upgrade_pre_storage_migration_enabled=true -# openshift_upgrade_pre_storage_migration_fatal=true -# openshift_upgrade_post_storage_migration_enabled=true -# openshift_upgrade_post_storage_migration_fatal=false - -###################################################################### -# CloudForms/ManageIQ (CFME/MIQ) Configuration - -# See the readme for full descriptions and getting started -# instructions: ../../roles/openshift_management/README.md or go directly to -# their definitions: ../../roles/openshift_management/defaults/main.yml -# ../../roles/openshift_management/vars/main.yml -# -# Namespace for the CFME project -#openshift_management_project: openshift-management - -# Namespace/project description -#openshift_management_project_description: CloudForms Management Engine - -# Choose 'miq-template' for a podified database install -# Choose 'miq-template-ext-db' for an external database install -# -# If you are using the miq-template-ext-db template then you must add -# the required database parameters to the -# openshift_management_template_parameters variable. -#openshift_management_app_template: miq-template - -# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. -#openshift_management_storage_class: nfs - -# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a -# netapp appliance, then you must set the hostname here. Leave the -# value as 'false' if you are not using external NFS. -#openshift_management_storage_nfs_external_hostname: false - -# [OPTIONAL] - If you are using external NFS then you must set the base -# path to the exports location here. -# -# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports -# that will back the application PV and optionally the database -# pv. Export path definitions, relative to -# { openshift_management_storage_nfs_base_dir} -# -# LOCAL NFS NOTE: -# -# You may may also change this value if you want to change the default -# path used for local NFS exports. -#openshift_management_storage_nfs_base_dir: /exports - -# LOCAL NFS NOTE: -# -# You may override the automatically selected LOCAL NFS server by -# setting this variable. Useful for testing specific task files. -#openshift_management_storage_nfs_local_hostname: false - -# These are the default values for the username and password of the -# management app. Changing these values in your inventory will not -# change your username or password. You should only need to change -# these values in your inventory if you already changed the actual -# name and password AND are trying to use integration scripts. -# -# For example, adding this cluster as a container provider, -# playbooks/openshift-management/add_container_provider.yml -#openshift_management_username: admin -#openshift_management_password: smartvm - -# A hash of parameters you want to override or set in the -# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in -# your inventory file as a simple hash. Acceptable values are defined -# under the .parameters list in files/miq-template{-ext-db}.yaml -# Example: -# -# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} -#openshift_management_template_parameters: {} - -# Firewall configuration -# You can open additional firewall ports by defining them as a list. of service -# names and ports/port ranges for either masters or nodes. -#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] -#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] - -# Service port node range -#openshift_node_port_range=30000-32767 - -# Enable unsupported configurations, things that will yield a partially -# functioning cluster but would not be supported for production use -#openshift_enable_unsupported_configurations=false -openshift_enable_unsupported_configurations=True diff --git a/roles/haproxy/templates/haproxy.cfg b/roles/haproxy/templates/haproxy.cfg index 853382aa8d..0f87c19bf9 100644 --- a/roles/haproxy/templates/haproxy.cfg +++ b/roles/haproxy/templates/haproxy.cfg @@ -235,14 +235,6 @@ backend pdc-backend timeout server 3600000 timeout connect 3600000 -frontend osbs-frontend - bind 0.0.0.0:10047 - default_backend osbs-backend - -backend osbs-backend - balance hdr(appserver) - server osbs-master01 osbs-master01:8443 check inter 10s rise 1 fall 2 check ssl verify none - frontend oci-registry-frontend bind 0.0.0.0:10048 default_backend oci-registry-backend diff --git a/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf b/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf index 24b8ddeaa7..c0d822d36d 100644 --- a/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf +++ b/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf @@ -16,8 +16,6 @@ RewriteCond %{HTTP:VIA} !cloudfront RewriteCond %{SERVER_NAME} !^registry-no-cdn\.fedoraproject\.org$ # We don't want some methods to go to the cdn so we can update it RewriteCond %{REQUEST_METHOD} !^(PATCH|POST|PUT|DELETE|HEAD)$ -# osbs hosts shouldn't use the cdn -RewriteCond %{REMOTE_HOST} !^osbs-*$ # builders shouldn't use the cdn for flatpak building. RewriteCond expr "! -R '10.3.169.0/24'" RewriteCond expr "! -R '10.3.170.0/24'" diff --git a/roles/koji_builder/tasks/main.yml b/roles/koji_builder/tasks/main.yml index f528f5ed16..3cf14a6d39 100644 --- a/roles/koji_builder/tasks/main.yml +++ b/roles/koji_builder/tasks/main.yml @@ -278,16 +278,6 @@ tags: - koji_builder -# non-bkernel x86_64 builders run container_build, which needs osbs -- name: special pkgs for the x86_64 builders - package: - state: present - name: - - python3-osbs-client.noarch - when: "ansible_architecture == 'x86_64' and not inventory_hostname.startswith('bkernel')" - tags: - - koji_builder - # Before, the builders had the "apache" role. This is a temporary play to remove the httpd daemon everywhere - name: Uninstall httpd package: name=httpd diff --git a/roles/nagios_client/tasks/main.yml b/roles/nagios_client/tasks/main.yml index 592051c948..7998dba30e 100644 --- a/roles/nagios_client/tasks/main.yml +++ b/roles/nagios_client/tasks/main.yml @@ -56,7 +56,6 @@ - check_readonly_fs - check_lock_file_age - check_testcloud - - check_osbs_api.py - check_ipa_replication - check_redis_queue.sh - check_timestamp_from_file @@ -160,7 +159,6 @@ - check_lock_file_age.cfg - check_basset.cfg - check_fmn.cfg - - check_osbs.cfg - check_testcloud.cfg - check_mirrorlist_docker_proxy.cfg - check_mirrorlist_cache.cfg diff --git a/roles/nagios_client/templates/check_osbs.cfg.j2 b/roles/nagios_client/templates/check_osbs.cfg.j2 deleted file mode 100644 index 1bd7e2fcc9..0000000000 --- a/roles/nagios_client/templates/check_osbs.cfg.j2 +++ /dev/null @@ -1 +0,0 @@ -command[check_osbs_api]={{ libdir }}/nagios/plugins/check_osbs_api.py diff --git a/roles/nagios_server/files/nagios/services/iad2_internal/osbs.cfg b/roles/nagios_server/files/nagios/services/iad2_internal/osbs.cfg deleted file mode 100644 index 15db42ce1c..0000000000 --- a/roles/nagios_server/files/nagios/services/iad2_internal/osbs.cfg +++ /dev/null @@ -1,7 +0,0 @@ -define service { - host_name osbs-master01.iad2.fedoraproject.org - service_description Check OSBS API endpoint paths - check_command check_by_nrpe!check_osbs_api - max_check_attempts 5 - use defaulttemplate -} diff --git a/roles/nagios_server/tasks/main.yml b/roles/nagios_server/tasks/main.yml index 40ae40a8e8..0a3d1b3306 100644 --- a/roles/nagios_server/tasks/main.yml +++ b/roles/nagios_server/tasks/main.yml @@ -191,7 +191,6 @@ - locking.cfg - mailman.cfg - nrpe.cfg - - osbs.cfg - pgsql.cfg tags: - nagios_config @@ -213,7 +212,6 @@ - locking.cfg - mailman.cfg - nrpe.cfg - - osbs.cfg - pgsql.cfg - rabbitmq.cfg tags: diff --git a/roles/odcs/base/defaults/main.yml b/roles/odcs/base/defaults/main.yml index f35d98f2f4..be0b59a89a 100644 --- a/roles/odcs/base/defaults/main.yml +++ b/roles/odcs/base/defaults/main.yml @@ -6,7 +6,6 @@ odcs_pdc_develop: True odcs_target_dir: /srv/odcs odcs_target_dir_url: http://{{ inventory_hostname }}/composes odcs_allowed_clients_groups: {"sysadmin-odcs": {}, "pungi-devel": {}, "packager": {"source_types": ["module"]}} -odcs_allowed_clients_users: {"osbs@service": {}} odcs_admin_groups: ["sysadmin-odcs", "pungi-devel"] odcs_admin_users: [] odcs_raw_config_urls: {} diff --git a/roles/osbs-client/defaults/main.yml b/roles/osbs-client/defaults/main.yml deleted file mode 100644 index d31bca1c06..0000000000 --- a/roles/osbs-client/defaults/main.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -# defaults file for osbs-client -# - -# config file path - -osbs_client_conf_path: /etc/osbs.conf - -# Settings for the [general] section of the osbs.conf file -general: - verbose: 1 - build_json_dir: /usr/share/osbs/ - openshift_required_version: 1.0.8 - -# Settings for the [default] section of the osbs.conf file -default: - username: "" - password: "" - openshift_url: https://osbs.localdomain:8443/ - koji_root: http://koji.fedoraproject.org/koji - koji_hub: http://koji.fedoraproject.org/kojihub - sources_command: fedpkg sources - build_type: prod - registry_uri: https://osbs.localdomain:5000/v2 - source_registry_uri: https://osbs.localdomain:5000/v2 - vendor: Fedora Project - build_host: osbs.localdomain - verify_ssl: false - use_auth: false - builder_use_auth: true - registry_api_versions: v2 - builder_openshift_url: https://172.17.0.1:8443/ - koji_certs_secret: "" - koji_use_kerberos: false - koji_kerberos_keytab: "" - koji_kerberos_principal: "" - use_kerberos: false - kerberos_keytab: "" - kerberos_principal: "" - registry_secret_name: "" - builder_odcs_url: "" - builder_odcs_insecure: true - builder_odcs_openidc_secret: "" - builder_pdc_url: "" - builder_pdc_insecure: true - flatpak_base_image: "" diff --git a/roles/osbs-client/files/README b/roles/osbs-client/files/README deleted file mode 100644 index 6f8b02c0cf..0000000000 --- a/roles/osbs-client/files/README +++ /dev/null @@ -1,7 +0,0 @@ -The site-customize file here additionally disables or enables plugins on top of -the default set. - -The default set ships with osbs-client and can be found here: -https://github.com/projectatomic/osbs-client/blob/master/inputs/prod_inner.json - -See also: https://github.com/projectatomic/osbs-client/blob/master/docs/build_process.md diff --git a/roles/osbs-client/files/osbs-orchestrator-customize.json b/roles/osbs-client/files/osbs-orchestrator-customize.json deleted file mode 100644 index 6548858b37..0000000000 --- a/roles/osbs-client/files/osbs-orchestrator-customize.json +++ /dev/null @@ -1,11 +0,0 @@ -{ - "disable_plugins": [ - { - "plugin_type": "exit_plugins", - "plugin_name": "import_image" - } - ], - - "enable_plugins": [ - ] -} diff --git a/roles/osbs-client/files/osbs-site-customize.json b/roles/osbs-client/files/osbs-site-customize.json deleted file mode 100644 index 17b33fbb1f..0000000000 --- a/roles/osbs-client/files/osbs-site-customize.json +++ /dev/null @@ -1,44 +0,0 @@ -{ - "disable_plugins": [ - { - "plugin_type": "postbuild_plugins", - "plugin_name": "pulp_push" - }, - { - "plugin_type": "postbuild_plugins", - "plugin_name": "pulp_sync" - }, - { - "plugin_type": "postbuild_plugins", - "plugin_name": "pulp_pull" - }, - { - "plugin_type": "prebuild_plugins", - "plugin_name": "resolve_module_compose" - }, - { - "plugin_type": "prebuild_plugins", - "plugin_name": "flatpak_create_dockerfile" - }, - { - "plugin_type": "prepublish_plugins", - "plugin_name": "flatpak_create_oci" - }, - { - "plugin_type": "postbuild_plugins", - "plugin_name": "import_image" - } - ], - - "enable_plugins": [ - { - "plugin_type": "postbuild_plugins", - "plugin_name": "tag_and_push", - "plugin_args": { - "registries": { - "{{REGISTRY_URI}}": { "insecure": false } - } - } - } - ] -} diff --git a/roles/osbs-client/tasks/main.yml b/roles/osbs-client/tasks/main.yml deleted file mode 100644 index 8dcb80d386..0000000000 --- a/roles/osbs-client/tasks/main.yml +++ /dev/null @@ -1,12 +0,0 @@ ---- -# tasks file for osbs-client - -- name: install osbs-client package - action: "{{ ansible_pkg_mgr }} name=osbs-client state=present" - tags: - - osbs-client - -- name: apply osbs-client templated config - template: src=osbs.conf.j2 dest={{ osbs_client_conf_path }} mode=0640 - tags: - - osbs-client \ No newline at end of file diff --git a/roles/osbs-client/templates/osbs.conf.j2 b/roles/osbs-client/templates/osbs.conf.j2 deleted file mode 100644 index e193c08418..0000000000 --- a/roles/osbs-client/templates/osbs.conf.j2 +++ /dev/null @@ -1,170 +0,0 @@ -[general] -verbose= {{ general.verbose }} -build_json_dir = {{ general.build_json_dir }} -openshift_required_version = {{ general.openshift_required_version }} - -[default] -{% if default.username is defined %} -username = {{ default.username }} -{% endif %} -{% if default.password is defined %} -password = {{ default.password }} -{% endif %} -{% if default.koji_certs_secret != "" %} -koji_certs_secret = {{ default.koji_certs_secret }} -{% endif %} -{% if default.koji_use_kerberos is defined %} -koji_use_kerberos = {{ default.koji_use_kerberos }} -{% endif %} -{% if default.koji_kerberos_keytab is defined %} -koji_kerberos_keytab = {{ default.koji_kerberos_keytab }} -{% endif %} -{% if default.koji_kerberos_principal is defined %} -koji_kerberos_principal = {{ default.koji_kerberos_principal }} -{% endif %} -{% if default.use_kerberos is defined %} -use_kerberos = {{ default.use_kerberos }} -{% endif %} -{% if default.kerberos_keytab is defined %} -kerberos_keytab = {{ default.kerberos_keytab }} -{% endif %} -{% if default.kerberos_principal is defined %} -kerberos_principal = {{ default.kerberos_principal }} -{% endif %} -{% if default.token_file is defined %} -token_file = {{ default.token_file }} -{% endif %} - - -{% if default.can_orchestrate is defined %} -# Orchestrator/Worker Architecture split additions -can_orchestrate = {{ default.can_orchestrate }} -{% endif %} -{% if default.namespace is defined %} -namespace = {{ default.namespace }} -{% endif %} -{% if default.client_config_secret is defined %} -client_config_secret = {{ default.client_config_secret }} -{% endif %} -{% if default.reactor_config_secret is defined %} -reactor_config_secret = {{ default.reactor_config_secret }} -{% endif %} -{% if default.token_secrets is defined %} -token_secrets = {{ default.token_secrets }} -{% endif %} - -openshift_url = {{ default.openshift_url }} -koji_root = {{ default.koji_root }} -koji_hub = {{ default.koji_hub }} -sources_command = {{ default.sources_command }} -build_type = {{ default.build_type }} -registry_uri = {{ default.registry_uri }} -source_registry_uri = {{ default.source_registry_uri }} -vendor = {{ default.vendor }} -build_host = {{ default.build_host }} -verify_ssl = {{ default.verify_ssl }} -use_auth = {{ default.use_auth }} -builder_use_auth = {{ default.builder_use_auth }} -registry_api_versions = {{ default.registry_api_versions }} -{% if default.registry_secret_name %} -registry_secret = {{ default.registry_secret_name }} -{% endif %} -builder_openshift_url = {{ default.builder_openshift_url }} - -{% if default.builder_odcs_url %} -odcs_url = {{ default.builder_odcs_url }} -odcs_insecure = {{ default.builder_odcs_insecure }} -odcs_openidc_secret = {{ default.builder_odcs_openidc_secret }} -{% endif %} - -{% if default.reactor_config_map is defined %} -reactor_config_map = {{ default.reactor_config_map }} -{% endif %} - -{% if default.build_from is defined %} -build_from = {{ default.build_from }} -{% endif %} - -[scratch] -scratch = true -{% if default.username is defined %} -username = {{ default.username }} -{% endif %} -{% if default.password is defined %} -password = {{ default.password }} -{% endif %} -{% if default.koji_certs_secret != "" %} -koji_certs_secret = {{ default.koji_certs_secret }} -{% endif %} -{% if default.koji_use_kerberos is defined %} -koji_use_kerberos = {{ default.koji_use_kerberos }} -{% endif %} -{% if default.koji_kerberos_keytab is defined %} -koji_kerberos_keytab = {{ default.koji_kerberos_keytab }} -{% endif %} -{% if default.koji_kerberos_principal is defined %} -koji_kerberos_principal = {{ default.koji_kerberos_principal }} -{% endif %} -{% if default.use_kerberos is defined %} -use_kerberos = {{ default.use_kerberos }} -{% endif %} -{% if default.kerberos_keytab is defined %} -kerberos_keytab = {{ default.kerberos_keytab }} -{% endif %} -{% if default.kerberos_principal is defined %} -kerberos_principal = {{ default.kerberos_principal }} -{% endif %} -{% if default.token_file is defined %} -token_file = {{ default.token_file }} -{% endif %} - - -{% if default.can_orchestrate is defined %} -# Orchestrator/Worker Architecture split additions -can_orchestrate = {{ default.can_orchestrate }} -{% endif %} -{% if default.namespace is defined %} -namespace = {{ default.namespace }} -{% endif %} -{% if default.client_config_secret is defined %} -client_config_secret = {{ default.client_config_secret }} -{% endif %} -{% if default.reactor_config_secret is defined %} -reactor_config_secret = {{ default.reactor_config_secret }} -{% endif %} -{% if default.token_secrets is defined %} -token_secrets = {{ default.token_secrets }} -{% endif %} - -openshift_url = {{ default.openshift_url }} -koji_root = {{ default.koji_root }} -koji_hub = {{ default.koji_hub }} -sources_command = {{ default.sources_command }} -build_type = {{ default.build_type }} -registry_uri = {{ default.registry_uri }} -source_registry_uri = {{ default.source_registry_uri }} -vendor = {{ default.vendor }} -build_host = {{ default.build_host }} -verify_ssl = {{ default.verify_ssl }} -use_auth = {{ default.use_auth }} -builder_use_auth = {{ default.builder_use_auth }} -registry_api_versions = {{ default.registry_api_versions }} -{% if default.registry_secret_name %} -registry_secret = {{ default.registry_secret_name }} -{% endif %} -builder_openshift_url = {{ default.builder_openshift_url }} -unique_tag_only = true - -{% if default.builder_odcs_url %} -odcs_url = {{ default.builder_odcs_url }} -odcs_insecure = {{ default.builder_odcs_insecure }} -odcs_openidc_secret = {{ default.builder_odcs_openidc_secret }} -{% endif %} - -{% if default.reactor_config_map_scratch is defined %} -reactor_config_map = {{ default.reactor_config_map_scratch }} -{% endif %} - -{% if default.build_from is defined %} -build_from = {{ default.build_from }} -{% endif %} diff --git a/roles/osbs-namespace/README.md b/roles/osbs-namespace/README.md deleted file mode 100644 index b650ec1d91..0000000000 --- a/roles/osbs-namespace/README.md +++ /dev/null @@ -1,165 +0,0 @@ -osbs-namespace -============== - -Setup an OpenShift namespace as required by OSBS: -- Create namespace, also referred to as project (`osbs_namespace`) -- Create service accounts (`osbs_service_accounts`) - -If user is cluster admin (`osbs_is_admin`), the following is also performed: -- Create policy binding -- Create osbs-custom-build role to allow custom builds -- Sets up rolebindings for specified users, groups and service accounts - -For orchestrator namespaces (`osbs_orchestrator`): -- reactor-config-secret is generated and stored in `osbs_generated_config_path` - use osbs-secret to import it -- client-config-secret is generated and stored in `osbs_generated_config_path` - use osbs-secret to import it - -Requirements ------------- - -A running instance of OpenShift. - -Role Variables --------------- - - # Namespace name to be used - osbs_namespace: 'my-namespace' - # Is user running playbook as cluster admin? - osbs_is_admin: true - # Will the namespace be used for orchestrator builds? - osbs_orchestrator: true - - # Worker clusters to be used for generating reactor and client config secrets - # in orchestrator workspace - osbs_worker_clusters: - x86_64: - - name: prod-first-x86_64 - max_concurrent_builds: 6 - openshift_url: https://my-first-x86_64-cluster.fedoraproject.org:8443 - - name: prod-second-x86_64 - max_concurrent_builds: 16 - openshift_url: https://my-second-x86_64-cluster.fedoraproject.org - # optional params, and their defaults: - enabled: true # yaml boolean - namespace: worker - use_auth: 'true' # yaml string - verify_ssl: 'true' # yaml string - - ppc64le: - - name: prod-ppc64le - max_concurrent_builds: 6 - openshift_url: https://my-ppc64le-cluster.fedoraproject.org:8443 - - # Reactor config maps to be created in orchestrator namespace - osbs_reactor_config_maps: - - name: reactor-config-map - # See config.json schema in atomic-reactor project for details: - # https://github.com/projectatomic/atomic-reactor/blob/master/atomic_reactor/schemas/config.json - data: - clusters: - x86_64: - - enabled: true - max_concurrent_builds: 10 - name: x86_64-on-premise - version: 1 - - # Service accounts to be created - these accounts will also be bound to - # edit clusterrole and osbs-custom-build role in specified namespace - osbs_service_accounts: - - bot - - ci - - # Users and groups to be assigned view clusterrole in specified namespace - osbs_readonly_groups: - - group1 - - group2 - osbs_readonly_users: - - user1 - - user2 - - # Users and groups to be assigned edit clusterrole and osbs-custom-build - # role in specified namespace - osbs_readwrite_groups: - - group1 - - group2 - osbs_readwrite_users: - - user1 - - user2 - - # Users and groups to be assigned admin clusterrole and osbs-custom-build - # role in specified namespace - osbs_admin_groups: - - group1 - - group2 - osbs_admin_users: - - user1 - - user2 - - # Users and groups to be assigned cluster-reader clusterrole cluster wide - osbs_cluster_reader_groups: - - group1 - - group2 - osbs_cluster_reader_users: - - user1 - - user2 - - # Koji integration - osbs_koji_hub: https://koji.fedoraproject.org # Empty default value - osbs_koji_root: https://koji.fedoraproject.org/kojihub # Empty default value - - # Pulp integration - osbs_pulp_secret_name: pulpsecret - osbs_pulp_registry_name: brew-qa # Empty default value - - # Distribution registry integration - osbs_registry_secret_name: v2-registry-dockercfg - osbs_registry_api_version: - - v1 - - v2 - osbs_registry_uri: https://distribution.registry.fedoraproject.org/v2 # Empty default value - - # Dist-git integration - osbs_sources_command: fedpkg sources - osbs_source_registry_uri: https://source.registry.fedoraproject.org # Empty default value - - # Pruning - osbs_prune: false - osbs_prune_schedule: '0 0 */8 * *' - osbs_prune_secret: '' - osbs_prune_image: '' - osbs_prune_commands: ["/prune.sh"] - -For a full list, see defaults/main.yml - -Dependencies ------------- - -None. - -Example Playbook ----------------- - - - name: setup worker namespace - hosts: master - roles: - - role: osbs-namespace - osbs_namespace: worker - - - name: setup orchestrator namespace - hosts: master - roles: - - role: osbs-namespace - osbs_namespace: orchestrator - osbs_orchestrator: true - -License -------- - -BSD - -Author Information ------------------- - -Luiz Carvalho diff --git a/roles/osbs-namespace/defaults/main.yml b/roles/osbs-namespace/defaults/main.yml deleted file mode 100644 index 8b66ed57e5..0000000000 --- a/roles/osbs-namespace/defaults/main.yml +++ /dev/null @@ -1,59 +0,0 @@ ---- -osbs_openshift_home: /var/lib/origin -osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig -osbs_generated_config_path: /tmp - -osbs_environment: - KUBECONFIG: "{{ osbs_kubeconfig_path }}" - -osbs_is_admin: true - -osbs_service_accounts: [] - -osbs_cpu_limitrange: '' - -osbs_admin_groups: [] -osbs_admin_users: [] -osbs_cluster_reader_groups: [] -osbs_cluster_reader_users: [] -osbs_readonly_groups: [] -osbs_readonly_users: [] -osbs_readwrite_groups: [] -osbs_readwrite_users: [] - -osbs_orchestrator: false -osbs_worker_clusters: {} - -osbs_koji_hub: '' -osbs_koji_root: '' -osbs_pulp_registry_name: '' -osbs_pulp_secret_name: '' -osbs_registry_api_versions: -- v1 -- v2 -osbs_registry_secret_name: "" -osbs_registry_uri: '' -osbs_source_registry_uri: '' -osbs_build_json_dir: /usr/share/osbs -osbs_sources_command: fedpkg sources -osbs_vendor: "" -osbs_nodeselector: '' -osbs_buildroot_repository: '' -osbs_buildroot_imagestream: '' -osbs_insecure_repository: true -osbs_buildroot_imagestream_live_tag: '' - -osbs_prune: false -osbs_prune_schedule: '0 0 */8 * *' -osbs_prune_service_account: '' -osbs_prune_image: '' -osbs_prune_commands: ["/prune.sh"] -osbs_serviceaccount_pruner: '' - -osbs_odcs_enabled: false -osbs_odcs_signing_intents: {} -osbs_odcs_default_signing_intent: null -osbs_odcs_api_url: '' -osbs_odcs_auth_ssl_certs_dir: /usr/share/osbs - -osbs_reactor_config_maps: [] diff --git a/roles/osbs-namespace/filter_plugins/__init__.py b/roles/osbs-namespace/filter_plugins/__init__.py deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/roles/osbs-namespace/filter_plugins/with_isolated_workers.py b/roles/osbs-namespace/filter_plugins/with_isolated_workers.py deleted file mode 100644 index 8e8daa4d45..0000000000 --- a/roles/osbs-namespace/filter_plugins/with_isolated_workers.py +++ /dev/null @@ -1,56 +0,0 @@ -""" -Copyright (c) 2018 Red Hat, Inc -All rights reserved. - -This software may be modified and distributed under the terms -of the BSD license. See the LICENSE file for details. -""" -from copy import deepcopy -import re - - -# Negative regex used to exclude characters that are not allowed -# in naming a kubernetes resource -INVALID_KUBERNETES_NAME_CHARS = re.compile(r'[^a-z0-9\.-]+') - - -class FilterModule(object): - def filters(self): - return {'with_isolated_workers': do_with_isolated_workers} - - -def do_with_isolated_workers(reactor_configs): - """Generate reactor configs for each worker cluster - - :param reactor_configs: list, each dict should contain a name and - a data key. The value of name key is used to name the config map object - and the value of data key is a reactor config - - :return: a new list of reactor configs that contains new reactor configs - for each worker cluster in addition to the original reactor configs - """ - all_configs = list(reactor_configs) - - for config in reactor_configs: - clusters = config.get('data', {}).get('clusters', {}) - for arch, workers_info in clusters.items(): - for worker_info in workers_info: - worker_info = deepcopy(worker_info) - worker_info['enabled'] = True - - worker_config = deepcopy(config) - - name = _clean_kubernetes_name(config['name'] + '-' + worker_info['name']) - worker_config['name'] = name - - worker_config['data']['clusters'] = {arch: [worker_info]} - - all_configs.append(worker_config) - - return all_configs - - -def _clean_kubernetes_name(name): - name = name.lower() - name = INVALID_KUBERNETES_NAME_CHARS.sub('-', name) - return name diff --git a/roles/osbs-namespace/meta/main.yml b/roles/osbs-namespace/meta/main.yml deleted file mode 100644 index 2f6d20271d..0000000000 --- a/roles/osbs-namespace/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ -# Standards: 1.8 -galaxy_info: - author: Luiz Carvalho - description: Setup OpenShift namespace for OSBS usage - company: Red Hat, Inc. - license: BSD - min_ansible_version: 2.1 - platforms: - name: EL - versions: - - 7 -dependencies: [] diff --git a/roles/osbs-namespace/operations/README.md b/roles/osbs-namespace/operations/README.md deleted file mode 100644 index 11a5f47d02..0000000000 --- a/roles/osbs-namespace/operations/README.md +++ /dev/null @@ -1,76 +0,0 @@ -ansible-role-osbs-namespace/operations -====================================== - -Collection of common maintenance operations for an OpenShift cluster. -By default, all tasks in this sub-roles are disabled. Use the control -booleans to enable the desired operations: - -Requirements ------------- - -A running instance of OpenShift. - -Role Variables --------------- - - - # Update docker daemon on each OpenShift node. - # It's highly recommended to use `serial: 1` in your playbook. - osbs_upgrade_docker: false - # Docker version to update to. - osbs_docker_version: - - # Update OpenShift node labels. - osbs_update_node_labels: false - # A list of labels to be applied to each OpenShift node. - osbs_node_labels: [] - # A list of all predefined node selector labels - osbs_managed_node_labels: - - "auto_build=true" - - # Disable a node to make it safe to perform - # operations such as restarting docker daemon - # or any other risky maintenance - osbs_disable_node: true - # Then to re-enable node: - osbs_enable_node: true - - # Override default systemd unit files - osbs_systemd_override: true - -See `operations/defaults/main.yml` for a comprehensive list of all -available variables. - -Dependencies ------------- - -None. - -Example Playbook ----------------- - - - name: update docker - hosts: nodes - roles: - - role: ansible-role-osbs-namespace/operations - osbs_upgrade_docker: true - osbs_docker_version: docker-1.12.6-61.git85d7426.el7 - - - name: node maintenance - hosts: nodes - roles: - - role: ansible-role-osbs-namespace/operations - osbs_disable_node: true - - role: my-maintenance-role - - role: ansible-role-osbs-namespace/operations - osbs_enable_node: true - -License -------- - -BSD - -Author Information ------------------- - -Luiz Carvalho diff --git a/roles/osbs-namespace/operations/defaults/main.yml b/roles/osbs-namespace/operations/defaults/main.yml deleted file mode 100644 index cc6ee236af..0000000000 --- a/roles/osbs-namespace/operations/defaults/main.yml +++ /dev/null @@ -1,28 +0,0 @@ ---- -osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig -osbs_environment: - KUBECONFIG: "{{ osbs_kubeconfig_path }}" - -osbs_disable_node: false -osbs_enable_node: false -osbs_upgrade_docker: false -osbs_update_node_labels: false -osbs_node_labels: [] -osbs_managed_node_labels: -- "auto_build=true" - -# Retry for about 2 hours -osbs_wait_active_pods_retries: 240 -osbs_wait_active_pods_delay: 30 # seconds - -# Wait for about 5 minutes -osbs_wait_node_ready_retries: 30 -osbs_wait_node_ready_delay: 10 - -osbs_buildroot_do_tag: false -osbs_buildroot_imagestream_live_tag: '' -osbs_buildroot_imagestream: '' -osbs_buildroot_imagestream_post_build_tag: '' - -osbs_systemd_override: false -osbs_systemd_limit_nofile: 131072 diff --git a/roles/osbs-namespace/operations/handlers/main.yml b/roles/osbs-namespace/operations/handlers/main.yml deleted file mode 100644 index 726a27045c..0000000000 --- a/roles/osbs-namespace/operations/handlers/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: restart atomic-openshift-node - service: - name: atomic-openshift-node - state: restarted - daemon_reload: yes diff --git a/roles/osbs-namespace/operations/meta/main.yml b/roles/osbs-namespace/operations/meta/main.yml deleted file mode 100644 index dace97f01a..0000000000 --- a/roles/osbs-namespace/operations/meta/main.yml +++ /dev/null @@ -1,12 +0,0 @@ -# Standards: 1.8 -galaxy_info: - author: Luiz Carvalho - description: Collection of common maintenance operations for OpenShift - company: Red Hat, Inc. - license: BSD - min_ansible_version: 2.1 - platforms: - name: EL - versions: - - 7 -dependencies: [] diff --git a/roles/osbs-namespace/operations/tasks/disable-node.yml b/roles/osbs-namespace/operations/tasks/disable-node.yml deleted file mode 100644 index 09ba1260ec..0000000000 --- a/roles/osbs-namespace/operations/tasks/disable-node.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Mark node as unschedulable - command: > - oadm manage-node {{ inventory_hostname }} --schedulable=false - -- name: Evacuate infra pods - command: > - oadm manage-node {{ inventory_hostname }} --evacuate - --pod-selector={{ item }} - with_items: - - "deploymentconfig=router" - - "deploymentconfig=registry-console" - - "deploymentconfig=docker-registry" - -- name: Wait until no more pods are running in node - register: active_pods_result - shell: > - oadm manage-node {{ inventory_hostname }} --list-pods | - grep -v 'READY' | awk '{print $2}' | grep -v '0/' - until: active_pods_result.rc == 1 - failed_when: active_pods_result.rc > 1 - changed_when: false # read-only command - environment: "{{ osbs_environment }}" - retries: "{{ osbs_wait_active_pods_retries }}" - delay: "{{ osbs_wait_active_pods_delay }}" diff --git a/roles/osbs-namespace/operations/tasks/enable-node.yml b/roles/osbs-namespace/operations/tasks/enable-node.yml deleted file mode 100644 index 75fb3b5841..0000000000 --- a/roles/osbs-namespace/operations/tasks/enable-node.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- name: Mark node as schedulable - command: > - oadm manage-node {{ inventory_hostname }} --schedulable=true - -- name: Wait for node to be Ready - register: node_status - shell: > - oc get node {{ inventory_hostname }} --no-headers=true | - awk '{print $2}' - until: "'Ready' in node_status.stdout_lines" - changed_when: false # read-only command - environment: "{{ osbs_environment }}" - retries: "{{ osbs_wait_node_ready_retries }}" - delay: "{{ osbs_wait_node_ready_delay }}" diff --git a/roles/osbs-namespace/operations/tasks/main.yml b/roles/osbs-namespace/operations/tasks/main.yml deleted file mode 100644 index 29d8bc3e94..0000000000 --- a/roles/osbs-namespace/operations/tasks/main.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -- include: disable-node.yml - when: osbs_disable_node - -- include: upgrade-docker.yml - when: osbs_upgrade_docker - -- include: update-node-selector-labels.yml - when: osbs_update_node_labels - -- include: tag-buildroot.yml - when: osbs_buildroot_do_tag - -- include: enable-node.yml - when: osbs_enable_node - -- include: override-systemd.yml - when: osbs_systemd_override diff --git a/roles/osbs-namespace/operations/tasks/override-systemd.yml b/roles/osbs-namespace/operations/tasks/override-systemd.yml deleted file mode 100644 index c898879721..0000000000 --- a/roles/osbs-namespace/operations/tasks/override-systemd.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Set LimitNOFILE in atomic-openshift-node - lineinfile: - path: /etc/systemd/system/atomic-openshift-node.service - regexp: '^LimitNOFILE=' - line: 'LimitNOFILE={{ osbs_systemd_limit_nofile }}' - backup: yes - notify: - - restart atomic-openshift-node - -- meta: flush_handlers diff --git a/roles/osbs-namespace/operations/tasks/tag-buildroot.yml b/roles/osbs-namespace/operations/tasks/tag-buildroot.yml deleted file mode 100644 index b21ff0b09c..0000000000 --- a/roles/osbs-namespace/operations/tasks/tag-buildroot.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# Tag the desired build image with the post build tag (“staged” for stage, “released” for prod) -- name: tag buildroot imagestream - command: > - oc tag --namespace={{ osbs_namespace }} - {{ osbs_buildroot_imagestream }}:{{ osbs_buildroot_imagestream_live_tag }} - {{ osbs_buildroot_imagestream }}:{{ osbs_buildroot_imagestream_post_build_tag }} - environment: "{{ osbs_environment }}" - when: - - osbs_buildroot_imagestream != '' - - osbs_buildroot_imagestream_live_tag != '' - - osbs_buildroot_imagestream_post_build_tag != '' - tags: - - oc diff --git a/roles/osbs-namespace/operations/tasks/update-node-selector-labels.yml b/roles/osbs-namespace/operations/tasks/update-node-selector-labels.yml deleted file mode 100644 index bb256bbcf7..0000000000 --- a/roles/osbs-namespace/operations/tasks/update-node-selector-labels.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# delete all predefined node selector labels which are not needed anymore -- name: Remove managed node labels - command: > - oc label node {{ inventory_hostname }} {{ item | regex_replace('=.+') }}- - with_items: "{{ osbs_managed_node_labels | difference(osbs_node_labels) }}" - register: delete_label - changed_when: ('not found' not in delete_label.stderr) - environment: "{{ osbs_environment }}" - tags: - - oc - -# set specified node selector labels -- name: Apply node labels - command: > - oc label node {{ inventory_hostname }} {{ item }} - with_items: "{{ osbs_node_labels | intersect(osbs_managed_node_labels) }}" - register: add_label - changed_when: ('labeled' in add_label.stdout) - failed_when: (('labeled' not in add_label.stdout) and ('already has a value' not in add_label.stderr)) - environment: "{{ osbs_environment }}" - tags: - - oc diff --git a/roles/osbs-namespace/operations/tasks/upgrade-docker.yml b/roles/osbs-namespace/operations/tasks/upgrade-docker.yml deleted file mode 100644 index 9464b3b28b..0000000000 --- a/roles/osbs-namespace/operations/tasks/upgrade-docker.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -- include: disable-node.yml - -- name: Update docker package - yum: - name: "{{ osbs_docker_version }}" - update_cache: true - state: present - -- name: Restart docker service - systemd: - state: restarted - name: docker - -- include: enable-node.yml diff --git a/roles/osbs-namespace/tasks/main.yml b/roles/osbs-namespace/tasks/main.yml deleted file mode 100644 index 6f6cc49fa8..0000000000 --- a/roles/osbs-namespace/tasks/main.yml +++ /dev/null @@ -1,214 +0,0 @@ ---- -# Query namespace -- name: query osbs namespace - command: oc get project {{ osbs_namespace }} - register: namespace_result - failed_when: namespace_result.rc != 0 and ('not found' not in namespace_result.stderr) - changed_when: false - tags: - - oc - -# Create namespace -- name: create osbs namespace - command: oc new-project {{ osbs_namespace }} - register: new_project - failed_when: new_project.rc != 0 and ('already exists' not in new_project.stderr) - changed_when: new_project.rc == 0 - environment: "{{ osbs_environment }}" - when: "'not found' in namespace_result.stderr" - tags: - - oc - -# Setup service account -- name: copy service accounts - template: - src: openshift-serviceaccount.yml.j2 - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-serviceaccount-{{ item }}.yml" - with_items: "{{ osbs_service_accounts }}" - register: yaml_sa - tags: - - oc - -- name: import service accounts - command: > - oc create - --namespace={{ osbs_namespace }} - --filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-serviceaccount-{{ item.item }}.yml - register: service_account_import - failed_when: service_account_import.rc != 0 and ('already exists' not in service_account_import.stderr) - environment: "{{ osbs_environment }}" - with_items: "{{ yaml_sa.results | default([]) }}" - when: item.changed - tags: - - oc - -# Setup role bindings -- name: copy role bindings - template: - src: "openshift-rolebinding.{{ item.yaml_version | default('v2') }}.yml.j2" - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-rolebinding-{{ item.name }}.yml" - with_items: - - - name: osbs-readonly - role: view - yaml_version: v1 - users: "{{ osbs_readonly_users }}" - groups: "{{ osbs_readonly_groups }}" - - - name: osbs-readwrite - role: edit - yaml_version: v1 - users: "{{ osbs_readwrite_users }}" - groups: "{{ osbs_readwrite_groups }}" - - - name: osbs-admin - role: admin - yaml_version: v1 - users: "{{ osbs_admin_users }}" - groups: "{{ osbs_admin_groups }}" - - - name: osbs-custom-build-readwrite - role: system:build-strategy-custom - yaml_version: v1 - users: "{{ osbs_readwrite_users }}" - groups: "{{ osbs_readwrite_groups }}" - - - name: osbs-custom-build-admin - role: system:build-strategy-custom - yaml_version: v1 - users: "{{ osbs_admin_users }}" - groups: "{{ osbs_admin_groups }}" - - - name: osbs-readwrite-serviceaccounts - role: edit - serviceaccounts: "{{ osbs_service_accounts }}" - - - name: osbs-custom-build-serviceaccounts - role: system:build-strategy-custom - serviceaccounts: "{{ osbs_service_accounts }}" - - - name: osbs-cluster-reader - role: cluster-reader - yaml_version: v1 - type: ClusterRoleBinding - users: "{{ osbs_cluster_reader_users }}" - groups: "{{ osbs_cluster_reader_groups }}" - - register: yaml_rolebindings - when: osbs_is_admin - tags: - - oc - -- name: import the role bindings - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-rolebinding-{{ item.item.name }}.yml - environment: "{{ osbs_environment }}" - with_items: "{{ yaml_rolebindings.results }}" - when: yaml_rolebindings.changed and item.changed - tags: - - oc - -- name: copy pruner role binding - template: - src: "openshift-rolebinding.{{ item.yaml_version | default('v2') }}.yml.j2" - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-rolebinding-{{ item.name }}.yml" - with_items: - - - name: osbs-pruner-serviceaccounts - role: system:image-pruner - type: ClusterRoleBinding - serviceaccounts: ["{{ osbs_serviceaccount_pruner }}"] - - register: yaml_rolebindings_pruner - when: osbs_is_admin and osbs_serviceaccount_pruner - tags: - - oc - -- name: import pruner role bindings - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-rolebinding-{{ item.item.name }}.yml - environment: "{{ osbs_environment }}" - with_items: "{{ yaml_rolebindings_pruner.results }}" - when: yaml_rolebindings_pruner.changed and item.changed - tags: - - oc - -- name: copy cpu limitrange - template: - src: openshift-limitrange.yml.j2 - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-limitrange.yml" - when: osbs_cpu_limitrange and osbs_is_admin - register: yaml_limitrange - tags: - - oc - -- name: import cpu limitrange - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-limitrange.yml - environment: "{{ osbs_environment }}" - when: yaml_limitrange.changed - tags: - - oc - -- name: delete cpu limitrange - command: > - oc delete - --namespace={{ osbs_namespace }} - --ignore-not-found=true - limitrange cpureq - environment: "{{ osbs_environment }}" - when: not osbs_cpu_limitrange and osbs_is_admin - tags: - - oc - -- name: get nodeselector value - command: > - oc get namespace {{ osbs_namespace }} -o go-template - --template={% raw %}'{{index .metadata.annotations "openshift.io/node-selector"}}'{% endraw %} - environment: "{{ osbs_environment }}" - register: node_selector_value - when: osbs_nodeselector != '' - changed_when: false - tags: - - oc - -- name: set default node selector - command: > - oc patch namespace {{ osbs_namespace }} - -p '{"metadata":{"annotations":{"openshift.io/node-selector": "{{ osbs_nodeselector }}"}}}' - environment: "{{ osbs_environment }}" - when: osbs_nodeselector != '' and osbs_nodeselector != node_selector_value.stdout - tags: - - oc - -- name: copy prune cronjob yaml - template: - src: openshift-prune-cronjob.yml.j2 - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-prune-cronjob.yml" - register: yaml_cronjob - when: osbs_prune - tags: - - oc - -- name: import prune cronjob yaml - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-prune-cronjob.yml - environment: "{{ osbs_environment }}" - when: osbs_prune and yaml_cronjob.changed - tags: - - oc - -- include: orchestrator.yml - when: osbs_orchestrator diff --git a/roles/osbs-namespace/tasks/orchestrator.yml b/roles/osbs-namespace/tasks/orchestrator.yml deleted file mode 100644 index 302248646e..0000000000 --- a/roles/osbs-namespace/tasks/orchestrator.yml +++ /dev/null @@ -1,73 +0,0 @@ ---- -- name: generate reactor config secret - local_action: > - template - src=reactor-config-secret.yml.j2 - dest="{{ osbs_generated_config_path }}/{{ osbs_namespace }}-{{ env }}-reactor-config-secret.yml" - register: yaml_reactor_config_secret - tags: - - oc - -- name: generate client config secret - local_action: > - template - src=client-config-secret.conf.j2 - dest="{{ osbs_generated_config_path }}/{{ osbs_namespace }}-{{ env }}-client-config-secret.conf" - register: yaml_client_config_secret - tags: - - oc - -- name: generate reactor config maps - template: - src: reactor-config-map.yml.j2 - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-{{ item.name }}.yml" - with_items: "{{ osbs_reactor_config_maps | with_isolated_workers }}" - register: yaml_reactor_config_maps - tags: - - oc - -- name: create reactor config maps - shell: > - oc --namespace={{ osbs_namespace }} create configmap {{ item.item.name }} \ - --from-file='config.yaml'={{ item.dest }} --dry-run -o yaml | \ - oc --namespace={{ osbs_namespace }} replace --force -f - - when: item.changed - with_items: "{{ yaml_reactor_config_maps.results }}" - tags: - - oc - -# Setup imagestream -- name: copy imagestream - template: - src: osbs-buildroot-imagestream.yml.j2 - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-osbs-buildroot-imagestream.yml" - environment: "{{ osbs_environment }}" - when: osbs_buildroot_repository != '' and osbs_buildroot_imagestream != '' - register: yaml_imagestream - tags: - - oc - -- name: create imagestream - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-osbs-buildroot-imagestream.yml - environment: "{{ osbs_environment }}" - when: yaml_imagestream.changed - tags: - - oc - -- name: update imagestream tag - command: > - oc import-image {{ osbs_buildroot_imagestream }}:{{ osbs_buildroot_imagestream_live_tag }} - --namespace={{ osbs_namespace }} - --from {{ osbs_buildroot_repository }}:{{ osbs_buildroot_imagestream_live_tag }} - {{ " --insecure" if osbs_insecure_repository else ''}} - environment: "{{ osbs_environment }}" - when: osbs_buildroot_repository != '' and osbs_buildroot_imagestream != '' and osbs_buildroot_imagestream_live_tag != '' - register: imagestream_tag_updated - changed_when: ('The import completed successfully.' in imagestream_tag_updated.stdout) - failed_when: ('The import completed successfully.' not in imagestream_tag_updated.stdout) - tags: - - oc diff --git a/roles/osbs-namespace/templates/client-config-secret.conf.j2 b/roles/osbs-namespace/templates/client-config-secret.conf.j2 deleted file mode 100644 index 63836c2294..0000000000 --- a/roles/osbs-namespace/templates/client-config-secret.conf.j2 +++ /dev/null @@ -1,52 +0,0 @@ -[general] -build_json_dir = {{ osbs_build_json_dir }} - -{% for platform, clusters in osbs_worker_clusters.items() %} -{% for cluster in clusters | default([]) %} -[{{ cluster.name }}] -namespace = {{ cluster.namespace | default('worker') }} -openshift_url = {{ cluster.openshift_url }} -token_file = /var/run/secrets/atomic-reactor/{{ cluster.name | replace('_', '-') }}-orchestrator/token -use_auth = {{ cluster.use_auth | default('true') }} -verify_ssl = {{ cluster.verify_ssl | default('true') }} -{% if cluster.auto_build_node_selector | default('') %} -auto_build_node_selector = {{ cluster.auto_build_node_selector }} -{% endif %} - - -# Koji integration -{% if osbs_koji_hub %} -koji_hub = {{ osbs_koji_hub }} -{% endif %} -{% if osbs_koji_root %} -koji_root = {{ osbs_koji_root }} -{% endif %} - -# Pulp integration -{% if osbs_pulp_registry_name %} -pulp_registry_name = {{ osbs_pulp_registry_name }} -{% endif %} -{% if osbs_pulp_secret_name %} -pulp_secret = {{ osbs_pulp_secret_name }} -{% endif %} - -# Distribution registry integration -{% if osbs_registry_api_versions %} -registry_api_versions = {{ osbs_registry_api_versions | join(',') }} -{% endif %} -{% if osbs_registry_secret_name %} -registry_secret = {{ osbs_registry_secret_name }} -{% endif %} -{% if osbs_registry_uri %} -registry_uri = {{ osbs_registry_uri }} -{% endif %} - -{% if osbs_source_registry_uri %} -source_registry_uri = {{ osbs_source_registry_uri }} -{% endif %} -sources_command = {{ osbs_sources_command }} -{% if osbs_vendor %} -vendor = {{ osbs_vendor }} -{% endif %} -{% endfor %} -{% endfor %} diff --git a/roles/osbs-namespace/templates/openshift-limitrange.yml.j2 b/roles/osbs-namespace/templates/openshift-limitrange.yml.j2 deleted file mode 100644 index 0c15dcee2b..0000000000 --- a/roles/osbs-namespace/templates/openshift-limitrange.yml.j2 +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: LimitRange -metadata: - name: cpureq - namespace: {{ osbs_namespace }} -spec: - limits: - - type: Container - defaultRequest: - cpu: {{ osbs_cpu_limitrange }} diff --git a/roles/osbs-namespace/templates/openshift-prune-cronjob.yml.j2 b/roles/osbs-namespace/templates/openshift-prune-cronjob.yml.j2 deleted file mode 100644 index d0a4978460..0000000000 --- a/roles/osbs-namespace/templates/openshift-prune-cronjob.yml.j2 +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: batch/v2alpha1 -kind: CronJob -metadata: - name: build-pruner -spec: - schedule: "{{ osbs_prune_schedule }}" - jobTemplate: - spec: - template: - spec: - serviceAccountName: "{{ osbs_prune_serviceaccount }}" - containers: - - name: build-pruner - image: "{{ osbs_prune_image }}" - - {% if osbs_prune_commands %}command: {{ osbs_prune_commands | to_yaml }}{% endif %} - - restartPolicy: Never diff --git a/roles/osbs-namespace/templates/openshift-rolebinding.v1.yml.j2 b/roles/osbs-namespace/templates/openshift-rolebinding.v1.yml.j2 deleted file mode 100644 index e601c6d354..0000000000 --- a/roles/osbs-namespace/templates/openshift-rolebinding.v1.yml.j2 +++ /dev/null @@ -1,19 +0,0 @@ -apiVersion: v1 -kind: {{ item.type | default("RoleBinding") }} -metadata: - name: {{ item.name }} - namespace: {{ osbs_namespace }} - -roleRef: - name: {{ item.role }} - namespace: {{ item.role_namespace | default() }} - -userNames: -{% for u in item.users | default([]) %} -- {{ u }} -{% endfor %} - -groupNames: -{% for g in item.groups | default([]) %} -- {{ g }} -{% endfor %} diff --git a/roles/osbs-namespace/templates/openshift-rolebinding.v2.yml.j2 b/roles/osbs-namespace/templates/openshift-rolebinding.v2.yml.j2 deleted file mode 100644 index 833255cc95..0000000000 --- a/roles/osbs-namespace/templates/openshift-rolebinding.v2.yml.j2 +++ /dev/null @@ -1,29 +0,0 @@ -apiVersion: v1 -kind: {{ item.type | default("RoleBinding") }} -metadata: - name: {{ item.name }} - namespace: {{ osbs_namespace }} - -roleRef: - name: {{ item.role }} - namespace: {{ item.role_namespace | default() }} - -subjects: - -{% for sa in item.serviceaccounts | default([]) %} -- kind: 'ServiceAccount' - namespace: {{ osbs_namespace }} - name: {{ sa }} -{% endfor %} - -{% for u in item.users | default([]) %} -- kind: 'User' - namespace: {{ osbs_namespace }} - name: {{ u }} -{% endfor %} - -{% for g in item.groups | default([]) %} -- kind: 'Group' - namespace: {{ osbs_namespace }} - name: {{ g }} -{% endfor %} diff --git a/roles/osbs-namespace/templates/openshift-serviceaccount.yml.j2 b/roles/osbs-namespace/templates/openshift-serviceaccount.yml.j2 deleted file mode 100644 index b4487391db..0000000000 --- a/roles/osbs-namespace/templates/openshift-serviceaccount.yml.j2 +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ item }} - namespace: {{ osbs_namespace }} diff --git a/roles/osbs-namespace/templates/osbs-buildroot-imagestream.yml.j2 b/roles/osbs-namespace/templates/osbs-buildroot-imagestream.yml.j2 deleted file mode 100644 index 38b1d776b6..0000000000 --- a/roles/osbs-namespace/templates/osbs-buildroot-imagestream.yml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: ImageStream -metadata: - annotations: - openshift.io/image.insecureRepository: "{{ osbs_insecure_repository }}" - name: {{ osbs_buildroot_imagestream }} - namespace: {{ osbs_namespace }} -spec: - dockerImageRepository: {{ osbs_buildroot_repository }} diff --git a/roles/osbs-namespace/templates/reactor-config-map.yml.j2 b/roles/osbs-namespace/templates/reactor-config-map.yml.j2 deleted file mode 100644 index a2c4331005..0000000000 --- a/roles/osbs-namespace/templates/reactor-config-map.yml.j2 +++ /dev/null @@ -1,3 +0,0 @@ -# {{ item.name }} ---- -{{ item.data | to_nice_yaml }} diff --git a/roles/osbs-namespace/templates/reactor-config-secret.yml.j2 b/roles/osbs-namespace/templates/reactor-config-secret.yml.j2 deleted file mode 100644 index 3b7e442f0c..0000000000 --- a/roles/osbs-namespace/templates/reactor-config-secret.yml.j2 +++ /dev/null @@ -1,23 +0,0 @@ ---- -version: 1 -clusters: - -{% for platform, clusters in osbs_worker_clusters.items() %} - {{ platform }}: -{% for cluster in clusters | default([]) %} - - name: {{ cluster.name }} - max_concurrent_builds: {{ cluster.max_concurrent_builds }} - enabled: {{ cluster.enabled | default(true) }} - -{% endfor %} -{% endfor %} - -{% if osbs_odcs_enabled %} -odcs: - api_url: {{ osbs_odcs_api_url }} - auth: - ssl_certs_dir: {{ osbs_odcs_auth_ssl_certs_dir }} - signing_intents: - {{ osbs_odcs_signing_intents | to_yaml | indent(4) }} - default_signing_intent: {{ osbs_odcs_default_signing_intent }} -{% endif %} diff --git a/roles/osbs-namespace/templates/role-osbs-custom-build.yml.j2 b/roles/osbs-namespace/templates/role-osbs-custom-build.yml.j2 deleted file mode 100644 index 2a4b7f2bce..0000000000 --- a/roles/osbs-namespace/templates/role-osbs-custom-build.yml.j2 +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Role -metadata: - name: osbs-custom-build - namespace: {{ osbs_namespace }} -rules: - - verbs: - - create - resources: - - builds/custom diff --git a/roles/osbs-namespace/tests/files/dedicated-project-admin.yaml b/roles/osbs-namespace/tests/files/dedicated-project-admin.yaml deleted file mode 100644 index 6cebcc1977..0000000000 --- a/roles/osbs-namespace/tests/files/dedicated-project-admin.yaml +++ /dev/null @@ -1,36 +0,0 @@ -apiVersion: v1 -kind: ClusterRole -metadata: - name: dedicated-project-admin -rules: -- apiGroups: - - "" - attributeRestrictions: null - resources: - - limitranges - - resourcequotas - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- apiGroups: - - extensions - attributeRestrictions: null - resources: - - daemonsets - verbs: - - create - - delete - - get - - list - - patch - - update - - watch -- resources: - - builds/custom - verbs: - - create diff --git a/roles/osbs-namespace/tests/files/expected-client-config-secret.conf b/roles/osbs-namespace/tests/files/expected-client-config-secret.conf deleted file mode 100644 index 7be6958f67..0000000000 --- a/roles/osbs-namespace/tests/files/expected-client-config-secret.conf +++ /dev/null @@ -1,49 +0,0 @@ -[general] -build_json_dir = /usr/share/osbs - -[minimum] -namespace = worker -openshift_url = https://minimum-worker.test.fedoraproject.org -token_file = /var/run/secrets/atomic-reactor/minimum-orchestrator/token -use_auth = true -verify_ssl = false - - -# Koji integration -koji_certs_secret = kojisecret - -# Pulp integration -pulp_secret = pulpsecret - -# Distribution registry integration -registry_api_versions = v1,v2 -registry_secret = v2-registry-dockercfg - -sources_command = fedpkg sources -vendor = Fedora Project -[all_values] -namespace = spam -openshift_url = https://all_values-worker.test.fedoraproject.org -token_file = /var/run/secrets/atomic-reactor/all-values-orchestrator/token -use_auth = false -verify_ssl = false -auto_build_node_selector = auto_build=true - -# Koji integration -koji_certs_secret = kojisecret - -# Pulp integration -pulp_secret = pulpsecret - -# Distribution registry integration -registry_api_versions = v1,v2 -registry_secret = v2-registry-dockercfg - -sources_command = fedpkg sources -vendor = Fedora Project - -[platform:x86_64] -architecture = amd64 - -[platform:aarch64] -architecture = arm64 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-ppc64le-on-premise.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-ppc64le-on-premise.yml deleted file mode 100644 index 03dfd093ce..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-ppc64le-on-premise.yml +++ /dev/null @@ -1,15 +0,0 @@ -# reactor-config-map-ppc64le-on-premise ---- -artifacts_allowed_domains: -- example.com/beta -- example.com/released -clusters: - ppc64le: - - enabled: true - max_concurrent_builds: 11 - name: ppc64le-on-premise -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: public - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-ppc64le-on-premise.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-ppc64le-on-premise.yml deleted file mode 100644 index fe1a81d07e..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-ppc64le-on-premise.yml +++ /dev/null @@ -1,13 +0,0 @@ -# reactor-config-map-scratch-ppc64le-on-premise ---- -artifacts_allowed_domains: [] -clusters: - ppc64le: - - enabled: true - max_concurrent_builds: 11 - name: ppc64le-on-premise -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: private - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-aws.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-aws.yml deleted file mode 100644 index a8e1196a93..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-aws.yml +++ /dev/null @@ -1,13 +0,0 @@ -# reactor-config-map-scratch-x86-64-aws ---- -artifacts_allowed_domains: [] -clusters: - x86_64: - - enabled: true - max_concurrent_builds: 20 - name: x86_64-aws -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: private - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-azure.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-azure.yml deleted file mode 100644 index 32b099e9d0..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-azure.yml +++ /dev/null @@ -1,13 +0,0 @@ -# reactor-config-map-scratch-x86-64-azure ---- -artifacts_allowed_domains: [] -clusters: - x86_64: - - enabled: true - max_concurrent_builds: 30 - name: x86_64-azure -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: private - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-on-premise.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-on-premise.yml deleted file mode 100644 index 34e4efd881..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch-x86-64-on-premise.yml +++ /dev/null @@ -1,13 +0,0 @@ -# reactor-config-map-scratch-x86-64-on-premise ---- -artifacts_allowed_domains: [] -clusters: - x86_64: - - enabled: true - max_concurrent_builds: 10 - name: x86_64-on-premise -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: private - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch.yml deleted file mode 100644 index 897d146f33..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-scratch.yml +++ /dev/null @@ -1,23 +0,0 @@ -# reactor-config-map-scratch ---- -artifacts_allowed_domains: [] -clusters: - ppc64le: - - enabled: true - max_concurrent_builds: 11 - name: ppc64le-on-premise - x86_64: - - enabled: true - max_concurrent_builds: 10 - name: x86_64-on-premise - - enabled: false - max_concurrent_builds: 20 - name: x86_64-aws - - enabled: false - max_concurrent_builds: 30 - name: x86_64-azure -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: private - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-aws.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-aws.yml deleted file mode 100644 index 2d6f356f85..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-aws.yml +++ /dev/null @@ -1,15 +0,0 @@ -# reactor-config-map-x86-64-aws ---- -artifacts_allowed_domains: -- example.com/beta -- example.com/released -clusters: - x86_64: - - enabled: true - max_concurrent_builds: 20 - name: x86_64-aws -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: public - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-azure.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-azure.yml deleted file mode 100644 index 2d686cc989..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-azure.yml +++ /dev/null @@ -1,15 +0,0 @@ -# reactor-config-map-x86-64-azure ---- -artifacts_allowed_domains: -- example.com/beta -- example.com/released -clusters: - x86_64: - - enabled: true - max_concurrent_builds: 30 - name: x86_64-azure -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: public - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-on-premise.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-on-premise.yml deleted file mode 100644 index c583323b69..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map-x86-64-on-premise.yml +++ /dev/null @@ -1,15 +0,0 @@ -# reactor-config-map-x86-64-on-premise ---- -artifacts_allowed_domains: -- example.com/beta -- example.com/released -clusters: - x86_64: - - enabled: true - max_concurrent_builds: 10 - name: x86_64-on-premise -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: public - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/files/expected-reactor-config-map.yml b/roles/osbs-namespace/tests/files/expected-reactor-config-map.yml deleted file mode 100644 index f671afa261..0000000000 --- a/roles/osbs-namespace/tests/files/expected-reactor-config-map.yml +++ /dev/null @@ -1,25 +0,0 @@ -# reactor-config-map ---- -artifacts_allowed_domains: -- example.com/beta -- example.com/released -clusters: - ppc64le: - - enabled: true - max_concurrent_builds: 11 - name: ppc64le-on-premise - x86_64: - - enabled: true - max_concurrent_builds: 10 - name: x86_64-on-premise - - enabled: false - max_concurrent_builds: 20 - name: x86_64-aws - - enabled: false - max_concurrent_builds: 30 - name: x86_64-azure -image_labels: - authoritative-source-url: registry.example.com - distribution-scope: public - vendor: Example, Inc. -version: 1 diff --git a/roles/osbs-namespace/tests/group_vars/masters.yml b/roles/osbs-namespace/tests/group_vars/masters.yml deleted file mode 100644 index 17fa93d4c1..0000000000 --- a/roles/osbs-namespace/tests/group_vars/masters.yml +++ /dev/null @@ -1,67 +0,0 @@ - -osbs_worker_clusters: - x86_64: - - name: 'minimum' - max_concurrent_builds: 1 - openshift_url: 'https://minimum-worker.test.fedoraproject.org' - verify_ssl: 'false' - - - name: 'all_values' - namespace: 'spam' - max_concurrent_builds: 99 - openshift_url: 'https://all_values-worker.test.fedoraproject.org' - verify_ssl: 'false' - use_auth: 'false' - artifacts_allowed_domains: - - allowed.domain.com - - also-allowed.domain.com - auto_build_node_selector: 'auto_build=true' - -_reactor_config_map: - version: 1 - - clusters: - - x86_64: - - name: x86_64-on-premise - max_concurrent_builds: 10 - enabled: True - - - name: x86_64-aws - max_concurrent_builds: 20 - enabled: False - - - name: x86_64-azure - max_concurrent_builds: 30 - enabled: False - - ppc64le: - - name: ppc64le-on-premise - max_concurrent_builds: 11 - enabled: True - - artifacts_allowed_domains: - - example.com/beta - - example.com/released - - image_labels: - vendor: "Example, Inc." - authoritative-source-url: registry.example.com - distribution-scope: public - -_scratch_reactor_config_map_overrides: - - artifacts_allowed_domains: [] - - image_labels: - distribution-scope: private - -osbs_reactor_config_maps: - -- name: reactor-config-map - data: "{{ _reactor_config_map }}" - -- name: reactor-config-map-scratch - data: > - {{ _reactor_config_map | - combine(_scratch_reactor_config_map_overrides, recursive=True) }} diff --git a/roles/osbs-namespace/tests/pre-oc-cluster.sh b/roles/osbs-namespace/tests/pre-oc-cluster.sh deleted file mode 100755 index 009a7c36b8..0000000000 --- a/roles/osbs-namespace/tests/pre-oc-cluster.sh +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash -set -xeuo pipefail - -# Script inspired by: -# https://github.com/radanalyticsio/radanalyticsio.github.io/blob/master/.travis.yml - -TEST_DIR=`pwd` -ORIGIN_DIR=$TEST_DIR/../origin -OC_VERSION='v3.7.0' -OC_RELEASE_NAME='openshift-origin-client-tools-v3.7.0-7ed6862-linux-64bit' - -# Add required insecure container registry -sudo sed -i -e 's/sock/sock --insecure-registry 172.30.0.0\/16/' /etc/default/docker -sudo cat /etc/default/docker -sudo service docker restart - -# Download and setup oc binary -sudo mkdir -p $ORIGIN_DIR -sudo chmod -R 766 $ORIGIN_DIR -sudo curl -L \ - https://github.com/openshift/origin/releases/download/${OC_VERSION}/${OC_RELEASE_NAME}.tar.gz | \ - sudo tar -C $ORIGIN_DIR -xz ${OC_RELEASE_NAME}/oc -sudo cp $ORIGIN_DIR/${OC_RELEASE_NAME}/oc /bin/ -sudo chmod +x /bin/oc - -oc version - -# Below cmd is important to get oc working in ubuntu -sudo docker run -v /:/rootfs -ti --rm \ - --entrypoint=/bin/bash \ - --privileged openshift/origin:v3.7.0 \ - -c "mv /rootfs/bin/findmnt /rootfs/bin/findmnt.backup" - -# Avoid error from travis wrapper script with unbound variable: -# https://github.com/travis-ci/travis-ci/issues/5434 -set +u diff --git a/roles/osbs-namespace/tests/test-inventory b/roles/osbs-namespace/tests/test-inventory deleted file mode 100644 index 7ba4950f9a..0000000000 --- a/roles/osbs-namespace/tests/test-inventory +++ /dev/null @@ -1,2 +0,0 @@ -[masters] -test-host ansible_connection=local ansible_become=false diff --git a/roles/osbs-namespace/tests/test.yml b/roles/osbs-namespace/tests/test.yml deleted file mode 100644 index eaf70b57f2..0000000000 --- a/roles/osbs-namespace/tests/test.yml +++ /dev/null @@ -1,385 +0,0 @@ -# Standards: 1.8 ---- -# Run playbook -# ansible-playbook -i test-inventory test.yml -# During active development, you can re-use the same -# environment setup: -# ansible-playbook -i test-inventory test.yml --skip-tags 'environment-setup' -- name: setup environment - hosts: masters - tasks: - - name: cleanup existing cluster - command: > - oc cluster down - register: cmd_cluster_down - changed_when: cmd_cluster_down.rc == 0 - - - name: bring up new cluster - command: > - oc cluster up - --image {{ osbs_test_ocp_image | default('registry.access.redhat.com/openshift3/ose') }} - --version {{ osbs_test_ocp_version | default('v3.7') }} - register: cmd_cluster_up - changed_when: cmd_cluster_up.rc == 0 - - - name: login as admin - command: > - oc login -u system:admin - register: cmd_login_admin - changed_when: cmd_login_admin.rc == 0 - - - name: cleanup tmp folder - file: - path: tmp - state: absent - - - name: setup tmp folder - file: - path: tmp - state: directory - - tags: - - environment-setup - -- name: setup worker namespace - hosts: masters - roles: - - role: "{{ playbook_dir }}/../." - osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config" - osbs_openshift_home: tmp - osbs_namespace: test-worker - osbs_nodeselector: "worker=true" - osbs_service_accounts: - - orchestrator - -- name: test worker namespace - hosts: masters - tasks: - - name: namespace worker created - command: > - oc get project test-worker - changed_when: false - - - name: orchestrator service account created in worker namespace - command: > - oc -n test-worker get serviceaccount orchestrator - changed_when: false - - - name: expected rolebindings created in worker namespace - command: > - oc -n test-worker get rolebinding {{ item }} - with_items: - - osbs-admin - - osbs-custom-build-serviceaccounts - - osbs-readonly - - osbs-readwrite - - osbs-readwrite-serviceaccounts - changed_when: false - - - name: nodeselector exists - shell: > - oc get namespace test-worker -o json |grep 'node-selector' - register: node_selector_exists - failed_when: "'node-selector' not in node_selector_exists.stdout" - -- name: setup orchestrator namespace - hosts: masters - tags: - orchestrator - roles: - - role: "{{ playbook_dir }}/../." - osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config" - osbs_openshift_home: tmp - osbs_generated_config_path: tmp - osbs_namespace: test-orchestrator - osbs_orchestrator: true - -- name: test orchestrator namespace - hosts: masters - tags: - orchestrator - tasks: - - name: reactor config secret generated - stat: - path: tmp/test-orchestrator-reactor-config-secret.yml - register: stat_reactor_config_secret - changed_when: false - - - name: fail if reactor config secret was generated - fail: - msg: Reactor config secret file not created! - when: not stat_reactor_config_secret.stat.exists - - - name: client-config-secret was generated properly - command: > - diff {{ playbook_dir }}/files/expected-client-config-secret.conf - {{ playbook_dir }}/tmp/test-orchestrator-client-config-secret.conf - changed_when: false - - - name: reactor config maps were generated properly - command: > - diff {{ playbook_dir }}/files/expected-{{ item }}.yml - {{ playbook_dir }}/tmp/test-host-test-orchestrator-{{ item }}.yml - changed_when: false - with_items: - - reactor-config-map-ppc64le-on-premise - - reactor-config-map-scratch-ppc64le-on-premise - - reactor-config-map-scratch-x86-64-aws - - reactor-config-map-scratch-x86-64-azure - - reactor-config-map-scratch-x86-64-on-premise - - reactor-config-map-scratch - - reactor-config-map-x86-64-aws - - reactor-config-map-x86-64-azure - - reactor-config-map-x86-64-on-premise - - reactor-config-map - register: cmd_diff_config_maps - - - name: reactor config mpas were created - command: oc -n test-orchestrator get configmaps {{ item.item }} - changed_when: false - with_items: "{{ cmd_diff_config_maps.results }}" - -- name: setup namespace as non admin - hosts: masters - pre_tasks: - - name: Login with non cluster admin account - command: > - oc login -u non-admin -p non-admin - register: cmd_login_non_admin - changed_when: cmd_login_non_admin.rc == 0 - roles: - - role: "{{ playbook_dir }}/../." - osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config" - osbs_openshift_home: tmp - osbs_namespace: test-non-admin - osbs_is_admin: false - osbs_service_accounts: - - orchestrator - post_tasks: - - name: Log back in with cluster admin account - command: > - oc login -u system:admin - register: cmd_login_admin - changed_when: cmd_login_admin.rc == 0 - tags: - - wip - -- name: test non-admin namespace - hosts: masters - tasks: - - name: namespace non-admin created - command: > - oc get project test-non-admin - changed_when: false - - - name: orchestrator service account created in non-admin namespace - command: > - oc -n test-non-admin get serviceaccount orchestrator - changed_when: false - - - name: custom builds roles NOT created in non-admin namespace - command: > - oc -n test-non-admin get role osbs-custom-build - register: cmd_role - failed_when: ('No resources found' not in cmd_role.stderr) and ('NotFound' not in cmd_role.stderr) - changed_when: false - - - name: custom rolebindings NOT created in non-admin namespace - command: > - oc -n test-non-admin get rolebinding {{ item }} - register: cmd_rolebinding - failed_when: ('No resources found' not in cmd_rolebinding.stderr) and ('NotFound' not in cmd_rolebinding.stderr) - with_items: - - osbs-admin - - osbs-custom-build-serviceaccounts - - osbs-readonly - - osbs-readwrite - - osbs-readwrite-serviceaccounts - changed_when: false - tags: - - wip - -- name: create limitrange namespace - hosts: masters - roles: - - role: "{{ playbook_dir }}/../." - osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config" - osbs_openshift_home: tmp - osbs_namespace: test-limitrange - osbs_cpu_limitrange: '100m' - -- name: test limitrange namespace - hosts: masters - tasks: - - name: namespace limitrange created - command: > - oc get project test-limitrange - changed_when: false - - - name: limitrange created - command: > - oc -n test-limitrange get limitrange cpureq - changed_when: false - -- name: update limitrange namespace - hosts: masters - roles: - - role: "{{ playbook_dir }}/../." - osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config" - osbs_openshift_home: tmp - osbs_namespace: test-limitrange - # No osbs_cpu_limitrage provided should trigger removal - -- name: test updated limitrange namespace - hosts: masters - tasks: - - name: limitrange deleted - command: > - oc -n test-limitrange get limitrange cpureq - register: cmd_limitrange - failed_when: ('No resources found' not in cmd_limitrange.stderr) and ('NotFound' not in cmd_limitrange.stderr) - changed_when: false - -- name: setup policybinding dedicated-admin namespace - hosts: masters - pre_tasks: - - name: login as admin - command: > - oc login -u system:admin - register: cmd_login_admin - changed_when: cmd_login_admin.rc == 0 - - name: Create dedicated-poject-admin clusterrole - command: > - oc create -f {{ playbook_dir }}/files/dedicated-project-admin.yaml - register: cmd_create_clusterrole - changed_when: cmd_create_clusterrole.rc == 0 - - name: Create the namespace as cluster admin - command: > - oc new-project test-policybinding-dedicated-admin - register: cmd_pre_create_namespace - changed_when: cmd_pre_create_namespace.rc == 0 - - name: Create dedicated-admin user - command: > - oc -n test-policybinding-dedicated-admin - create user dedicated-admin - register: cmd_create_user - changed_when: cmd_create_user.rc == 0 - - name: Add dedicated-project-admin role to dedicated-admin - command: > - oc -n test-policybinding-dedicated-admin - policy add-role-to-user dedicated-project-admin dedicated-admin - register: cmd_role_dedicated_project_admin - changed_when: cmd_role_dedicated_project_admin.rc == 0 - - name: Create policybinding as cluster admin - command: > - oc -n test-policybinding-dedicated-admin - create policybinding test-policybinding-dedicated-admin - register: cmd_pre_create_policybinding - changed_when: cmd_pre_create_policybinding.rc == 0 - # This is only needed because the project was created - # by a different user: system:admin. - - name: Give dedicated-admin user project admin access - command: > - oc -n test-policybinding-dedicated-admin - adm policy add-role-to-user admin dedicated-admin - register: cmd_role_project_admin - changed_when: cmd_role_project_admin.rc == 0 - - name: Login with non cluster admin account - command: > - oc login -u dedicated-admin -p dedicated-admin - register: cmd_login_dedicated_admin - changed_when: cmd_login_dedicated_admin.rc == 0 - roles: - - role: "{{ playbook_dir }}/../." - osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config" - osbs_openshift_home: tmp - osbs_namespace: test-policybinding-dedicated-admin - osbs_is_admin: true - osbs_service_accounts: - - orchestrator - post_tasks: - - name: Log back in with cluster admin account - command: > - oc login -u system:admin - register: cmd_login_admin - changed_when: cmd_login_admin.rc == 0 - tags: - - wip - -- name: test policybinding dedicated-admin namespace - hosts: masters - tasks: - - name: custom rolebindings created in dedicated-admin namespace - command: > - oc -n test-policybinding-dedicated-admin get rolebinding {{ item }} - register: cmd_rolebinding - with_items: - - osbs-admin - - osbs-admin - - osbs-custom-build-admin - - osbs-custom-build-readwrite - - osbs-custom-build-serviceaccounts - - osbs-readonly - - osbs-readwrite - - osbs-readwrite-serviceaccounts - changed_when: false - tags: - - wip - -- name: setup users and groups in namespace - hosts: masters - roles: - - role: "{{ playbook_dir }}/../." - osbs_kubeconfig_path: "{{ lookup('env','HOME') }}/.kube/config" - osbs_openshift_home: tmp - osbs_namespace: test-users-and-groups - osbs_nodeselector: "worker=true" - osbs_admin_groups: - - admin-group - osbs_admin_users: - - admin-user - osbs_cluster_reader_groups: - - cluster-reader-group - osbs_cluster_reader_users: - - cluster-reader-user - osbs_readonly_groups: - - readonly-group - osbs_readonly_users: - - readonly-user - osbs_readwrite_groups: - - readwrite-group - osbs_readwrite_users: - - readwrite-user - -- name: test users and groups namespace - hosts: masters - vars: - osbs_users_groups_info: - - role_name: osbs-admin - type: rolebinding - expected: User Groupadmin-user admin-group - - role_name: osbs-readonly - type: rolebinding - expected: User Groupreadonly-user readonly-group - - role_name: osbs-readwrite - type: rolebinding - expected: User Groupreadwrite-user readwrite-group - - role_name: osbs-cluster-reader - type: clusterrolebinding - expected: User Groupcluster-reader-user cluster-reader-group - tasks: - - name: query rolebindings - command: > - oc -n test-users-and-groups get {{ item.type }} {{ item.role_name }} - -o jsonpath='{.subjects[*].kind}{.subjects[*].name}' - register: osbs_rolebindings - changed_when: false - with_items: "{{ osbs_users_groups_info }}" - - - name: verify rolebindings - fail: - msg: "{{ item.1.type }} {{ item.1.role_name }} not as expected" - when: "item.0.stdout != item.1.expected" - with_together: - - "{{ osbs_rolebindings.results }}" - - "{{ osbs_users_groups_info }}" diff --git a/roles/osbs-secret/README.md b/roles/osbs-secret/README.md deleted file mode 100644 index e59fab6a79..0000000000 --- a/roles/osbs-secret/README.md +++ /dev/null @@ -1,70 +0,0 @@ -osbs-secret -=========== - -This role imports various secrets, such as Pulp or Koji certificates, from -filesystem into OpenShift. See the [OSBS -documentation](https://github.com/projectatomic/osbs-client/blob/master/docs/secret.md) -for more information. - -This role is part of -[ansible-osbs](https://github.com/projectatomic/ansible-osbs/) playbook for -deploying OpenShift build service. Please refer to that github repository for -[documentation](https://github.com/projectatomic/ansible-osbs/blob/master/README.md) -and [issue tracker](https://github.com/projectatomic/ansible-osbs/issues). - -Role Variables --------------- - -The role imports the keys from the machine running ansible. You have to provide -`osbs_secret_files` list, which enumerates what files to import. Elements of -the list are dictionaries with two keys: `source` and `dest`. Source is the -location of the file on the machine where ansible is run. Dest is the filename -of the secret. - - osbs_secret_files: - - source: /home/user/.pulp/pulp.cer - dest: pulp.cer - - source: /home/user/.pulp/pulp.key - dest: pulp.key - -The name of the secret in OpenShift is defined by the `osbs_secret_name` -variable. - - osbs_secret_name: pulpsecret - -The secret has to be associated with a service account. This service account -can be set by the `osbs_secret_service_account` variable. - - osbs_secret_service_account: builder - -We need a kubeconfig file on the remote machine in order to talk to OpenShift. -Its location is contained in the `pulp_secret_kubeconfig`. - - osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig - -Example Playbook ----------------- - -Following playbook imports the keys from my home directory on the machine where -ansible is executed. You may need to run something like this after the current -set of keys expires. - - - hosts: builders - roles: - - role: osbs-secret - osbs_secret_name: pulpsecret - osbs_secret_files: - - source: /home/mmilata/.pulp/pulp.cer - dest: pulp.cer - - source: {{ pulp_secret_local_dir }}/pulp.key - dest: pulp.key - -License -------- - -BSD - -Author Information ------------------- - -Martin Milata <mmilata@redhat.com> diff --git a/roles/osbs-secret/defaults/main.yml b/roles/osbs-secret/defaults/main.yml deleted file mode 100644 index 0e317c61fc..0000000000 --- a/roles/osbs-secret/defaults/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -osbs_secret_name: pulpsecret -osbs_secret_type: Opaque -osbs_secret_service_account: builder -osbs_secret_remote_dir: /var/lib/origin -osbs_secret_can_fail: false - -osbs_secret_files: -- source: /home/user/.pulp/pulp.cer - dest: pulp.cer -- source: /home/user/.pulp/pulp.key - dest: pulp.key - -osbs_namespace: default -osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig -osbs_environment: - KUBECONFIG: "{{ osbs_kubeconfig_path }}" diff --git a/roles/osbs-secret/handlers/main.yml b/roles/osbs-secret/handlers/main.yml deleted file mode 100644 index b3c0f8a4fa..0000000000 --- a/roles/osbs-secret/handlers/main.yml +++ /dev/null @@ -1,22 +0,0 @@ ---- -- name: import osbs secret - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_secret_remote_dir }}/openshift-secret-{{ inventory_hostname }}-{{ osbs_namespace }}-{{ osbs_secret_name }}.yml - environment: "{{ osbs_environment }}" - notify: allow service account - -- name: allow service account - command: > - oc secrets - add serviceaccount/{{ osbs_secret_service_account }} secrets/{{ osbs_secret_name }} - --for=mount - --namespace={{ osbs_namespace }} - environment: "{{ osbs_environment }}" - -- name: delete secret resource file - file: - path: "{{ osbs_secret_remote_dir }}/openshift-secret-{{ inventory_hostname }}-{{ osbs_namespace }}-{{ osbs_secret_name }}.yml" - state: absent diff --git a/roles/osbs-secret/meta/main.yml b/roles/osbs-secret/meta/main.yml deleted file mode 100644 index c8d34fb335..0000000000 --- a/roles/osbs-secret/meta/main.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -galaxy_info: - author: Martin Milata - description: Import secrets from local filesystem into OpenShift. - company: Red Hat - issue_tracker_url: https://github.com/projectatomic/ansible-osbs/issues - license: BSD - min_ansible_version: 1.2 - platforms: - - name: EL - versions: - - 7 - - name: Fedora - versions: - - 21 - - 22 - categories: - - cloud - - development - - packaging -dependencies: [] diff --git a/roles/osbs-secret/tasks/main.yml b/roles/osbs-secret/tasks/main.yml deleted file mode 100644 index c1428406bd..0000000000 --- a/roles/osbs-secret/tasks/main.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- set_fact: - osbs_secret_files_exist: true - tags: - - oc - -- set_fact: - osbs_secret_files_exist: false - when: lookup('file', lookup('first_found', [item.source, '/dev/null'])) == '' - with_items: "{{ osbs_secret_files }}" - tags: - - oc - -- fail: - msg: Some of the source secret files do not exist (and osbs_secret_can_fail is false) - when: not (osbs_secret_files_exist or osbs_secret_can_fail) - tags: - - oc - -- debug: - msg: Some of the source secret files do not exist, skipping import - when: not osbs_secret_files_exist - tags: - - oc - -- name: create secrets resource file - template: - src: openshift-secret.yml.j2 - dest: "{{ osbs_secret_remote_dir }}/openshift-secret-{{ inventory_hostname }}-{{ osbs_namespace }}-{{ osbs_secret_name }}.yml" - mode: "0600" - when: osbs_secret_files_exist - notify: - - import osbs secret - - delete secret resource file - tags: - - oc - -- meta: flush_handlers - tags: - - oc diff --git a/roles/osbs-secret/templates/openshift-secret.yml.j2 b/roles/osbs-secret/templates/openshift-secret.yml.j2 deleted file mode 100644 index 2b08c05b12..0000000000 --- a/roles/osbs-secret/templates/openshift-secret.yml.j2 +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Secret -metadata: - name: {{ osbs_secret_name }} -type: {{ osbs_secret_type }} -data: -{% for f in osbs_secret_files %} - {{ f.dest }}: {{ lookup('file', f.source) | b64encode }} -{% endfor %} - diff --git a/roles/web-data-analysis/files/run-daily-awstats.sh b/roles/web-data-analysis/files/run-daily-awstats.sh index d985fde63f..6554ffd6c2 100644 --- a/roles/web-data-analysis/files/run-daily-awstats.sh +++ b/roles/web-data-analysis/files/run-daily-awstats.sh @@ -44,7 +44,7 @@ TREEDIR=${LOGDIR}/${YEAR}/${MONTH}/${DAY} AWSTATS=/usr/share/awstats/wwwroot/cgi-bin/awstats.pl HTMLDOC=/usr/bin/htmldoc -SITES="admin.fedoraproject.org apps.fedoraproject.org ask.fedoraproject.org badges.fedoraproject.org bodhi.fedoraproject.org budget.fedoraproject.org bugz.fedoraproject.org cloud.fedoraproject.org codecs.fedoraproject.org communityblog.fedoraproject.org copr.fedoraproject.org developer.fedoraproject.org developers.fedoraproject.org docs.fedoraproject.org docs-old.fedoraproject.org download.fedoraproject.org fas.fedoraproject.org fedora.my fedoracommunity.org fedoramagazine.org fedoraproject.com fedoraproject.org flocktofedora.net flocktofedora.org fonts.fedoraproject.org fpaste.org fudcon.fedoraproject.org get.fedoraproject.org getfedora.org help.fedoraproject.org id.fedoraproject.org it.fedoracommunity.org join.fedoraproject.org k12linux.org kde.fedoraproject.org l10n.fedoraproject.org labs.fedoraproject.org lists.fedorahosted.org lists.fedoraproject.org meetbot-raw.fedoraproject.org meetbot.fedoraproject.org mirrors.fedoraproject.org nightly.fedoraproject.org osbs.fedoraproject.org paste.fedoraproject.org pdc.fedoraproject.org people.fedoraproject.org port389.org qa.fedoraproject.org redirect.fedoraproject.org registry.fedoraproject.org smolts.org spins.fedoraproject.org src.fedoraproject.org start.fedoraproject.org store.fedoraproject.org translate.fedoraproject.org uk.fedoracommunity.org fedoraloveskde.org" +SITES="admin.fedoraproject.org apps.fedoraproject.org ask.fedoraproject.org badges.fedoraproject.org bodhi.fedoraproject.org budget.fedoraproject.org bugz.fedoraproject.org cloud.fedoraproject.org codecs.fedoraproject.org communityblog.fedoraproject.org copr.fedoraproject.org developer.fedoraproject.org developers.fedoraproject.org docs.fedoraproject.org docs-old.fedoraproject.org download.fedoraproject.org fas.fedoraproject.org fedora.my fedoracommunity.org fedoramagazine.org fedoraproject.com fedoraproject.org flocktofedora.net flocktofedora.org fonts.fedoraproject.org fpaste.org fudcon.fedoraproject.org get.fedoraproject.org getfedora.org help.fedoraproject.org id.fedoraproject.org it.fedoracommunity.org join.fedoraproject.org k12linux.org kde.fedoraproject.org l10n.fedoraproject.org labs.fedoraproject.org lists.fedorahosted.org lists.fedoraproject.org meetbot-raw.fedoraproject.org meetbot.fedoraproject.org mirrors.fedoraproject.org nightly.fedoraproject.org paste.fedoraproject.org pdc.fedoraproject.org people.fedoraproject.org port389.org qa.fedoraproject.org redirect.fedoraproject.org registry.fedoraproject.org smolts.org spins.fedoraproject.org src.fedoraproject.org start.fedoraproject.org store.fedoraproject.org translate.fedoraproject.org uk.fedoracommunity.org fedoraloveskde.org" pushd ${CONFDIR} for SITE in ${SITES}; do diff --git a/tasks/osbs_certs.yml b/tasks/osbs_certs.yml deleted file mode 100644 index c48b40570c..0000000000 --- a/tasks/osbs_certs.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- name: put the osbs certs in place - copy: - src: "{{private}}/files/osbs/{{env}}/osbs-internal.pem" - dest: "/etc/pki/ca-trust/source/anchors/osbs-internal.pem" - owner: root - mode: 0400 - when: env == "staging" - notify: - - update ca-trust diff --git a/tasks/osbs_koji_token.yml b/tasks/osbs_koji_token.yml deleted file mode 100644 index f219337a99..0000000000 --- a/tasks/osbs_koji_token.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- - -- name: make sure that /etc/osbs directory exists - file: path=/etc/osbs state=directory - -- name: put the koji token file in place - copy: - src: "{{ private }}/files/osbs/{{ env }}/x86-64-osbs-koji" - dest: "/etc/osbs/x86-64-osbs-koji" - owner: root - mode: 0400 diff --git a/tasks/virt_instance_create.yml b/tasks/virt_instance_create.yml index 9a60d96452..8c6d34198b 100644 --- a/tasks/virt_instance_create.yml +++ b/tasks/virt_instance_create.yml @@ -47,24 +47,6 @@ - /root/.ssh/known_hosts when: inventory_hostname not in result.list_vms -- name: (osbs-control01.stg) make sure there is no old ssh host key for the host still around - known_hosts: path={{item}} host={{ inventory_hostname }} state=absent - ignore_errors: True - with_items: - - /root/.ssh/known_hosts - - /etc/ssh/ssh_known_hosts - when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters_stg']+groups['osbs_nodes_stg'] - delegate_to: osbs-control01.stg.{{ datacenter }}.fedoraproject.org - -- name: (osbs-control01) make sure there is no old ssh host key for the host still around - known_hosts: path={{item}} host={{ inventory_hostname }} state=absent - ignore_errors: True - with_items: - - /root/.ssh/known_hosts - - /etc/ssh/ssh_known_hosts - when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters']+groups['osbs_nodes'] - delegate_to: osbs-control01.{{ datacenter }}.fedoraproject.org - - name: wait for ssh on the vm to start back local_action: wait_for delay=10 host={{ inventory_hostname }} port=22 state=started timeout=1200 when: inventory_hostname not in result.list_vms @@ -82,24 +64,6 @@ - /root/.ssh/known_hosts when: inventory_hostname not in result.list_vms -- name: (osbs-control01.stg) add new ssh host key - known_hosts: path={{item}} key="{{ hostkey.stdout }}" host={{ inventory_hostname }} state=present - ignore_errors: True - with_items: - - /root/.ssh/known_hosts - - /etc/ssh/ssh_known_hosts - when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters_stg']+groups['osbs_nodes_stg'] - delegate_to: osbs-control01.stg.{{ datacenter }}.fedoraproject.org - -- name: (osbs-control01) add new ssh host key - known_hosts: path={{item}} key="{{ hostkey.stdout }}" host={{ inventory_hostname }} state=present - ignore_errors: True - with_items: - - /root/.ssh/known_hosts - - /etc/ssh/ssh_known_hosts - when: inventory_hostname not in result.list_vms and inventory_hostname in groups['osbs_masters']+groups['osbs_nodes'] - delegate_to: osbs-control01.{{ datacenter }}.fedoraproject.org - - name: gather facts setup: check_mode: no