diff --git a/.mailmap b/.mailmap new file mode 100644 index 0000000000..af8eb2f941 --- /dev/null +++ b/.mailmap @@ -0,0 +1,5 @@ +Rick Elrod +Rick Elrod Ricky Elrod +Rick Elrod Ricky Elrod + +# ... others go here ... diff --git a/files/common/mock b/files/common/mock index e307ff7216..347fc0c820 100644 --- a/files/common/mock +++ b/files/common/mock @@ -1,6 +1,8 @@ #%PAM-1.0 auth sufficient pam_rootok.so auth sufficient pam_succeed_if.so user ingroup mock use_uid quiet +account sufficient pam_succeed_if.so user ingroup packager use_uid quiet +auth sufficient pam_succeed_if.so user ingroup packager use_uid quiet # Uncomment the following line to implicitly trust users in the "wheel" group. #auth sufficient pam_wheel.so trust use_uid # Uncomment the following line to require a user to be in the "wheel" group. @@ -10,6 +12,4 @@ account sufficient pam_succeed_if.so user ingroup mock use_uid quie account include system-auth password include system-auth session include system-auth -account sufficient pam_succeed_if.so user ingroup packager use_uid quiet -auth sufficient pam_succeed_if.so user ingroup packager use_uid quiet session optional pam_xauth.so diff --git a/files/openshift/openshift.repo b/files/openshift/openshift.repo index 5a37adbdcb..af21a47a88 100644 --- a/files/openshift/openshift.repo +++ b/files/openshift/openshift.repo @@ -5,11 +5,17 @@ baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7 gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release enabled=1 {% elif inventory_hostname.startswith('os') %} +[rhel7-openshift-3.10] +name = rhel7 openshift 3.10 $basearch +baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.10-rpms/ +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release +enabled=1 + [rhel7-openshift-3.9] name = rhel7 openshift 3.9 $basearch baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.9-rpms/ gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release -enabled=1 +enabled=0 # 3.8 is needed to upgrade from 3.7 to 3.9 [rhel7-openshift-3.8] diff --git a/files/osbs/buildroot-Dockerfile-production.j2 b/files/osbs/buildroot-Dockerfile-production.j2 index b577681fc4..988a18068a 100644 --- a/files/osbs/buildroot-Dockerfile-production.j2 +++ b/files/osbs/buildroot-Dockerfile-production.j2 @@ -1,8 +1,7 @@ FROM registry.fedoraproject.org/fedora -ADD ./infra-tags.repo /etc/yum.repos.d/infra-tags.repo -RUN dnf -y install --refresh dnf-plugins-core && dnf -y install docker git python-setuptools e2fsprogs koji python-backports-lzma osbs-client\ - python-osbs-client gssproxy fedpkg python-docker-squash atomic-reactor python-atomic-reactor* go-md2man python2-productmd python3-productmd\ - libmodulemd python2-gobject python3-gobject python2-modulemd python3-modulemd python2-pdc-client python3-pdc-client ostree flatpak skopeo +RUN dnf -y install --refresh dnf-plugins-core && dnf -y install docker git python3-setuptools e2fsprogs koji osbs-client\ + python3-osbs-client gssproxy fedpkg python3-docker-squash atomic-reactor python3-atomic-reactor* go-md2man python3-productmd\ + python3-gobject python3-modulemd python3-pdc-client ostree flatpak-module-tools flatpak skopeo && dnf clean all ADD ./orchestrator_customize.json /usr/share/osbs/orchestrator_customize.json ADD ./worker_customize.json /usr/share/osbs/worker_customize.json ADD ./krb5.conf /etc @@ -10,4 +9,4 @@ RUN printf '[libdefaults]\n default_ccache_name = DIR:/tmp/ccache_%%{uid}' >/etc ADD ./krb5.osbs_{{osbs_url}}.keytab /etc/ ADD ./ca.crt /etc/pki/ca-trust/source/anchors/osbs.ca.crt RUN update-ca-trust -CMD ["python2", "/usr/bin/atomic-reactor", "--verbose", "inside-build"] \ No newline at end of file +CMD ["python3", "/usr/bin/atomic-reactor", "--verbose", "inside-build"] diff --git a/files/osbs/buildroot-Dockerfile-staging.j2 b/files/osbs/buildroot-Dockerfile-staging.j2 index 135e4a5581..988a18068a 100644 --- a/files/osbs/buildroot-Dockerfile-staging.j2 +++ b/files/osbs/buildroot-Dockerfile-staging.j2 @@ -1,8 +1,7 @@ FROM registry.fedoraproject.org/fedora -ADD ./infra-tags.repo /etc/yum.repos.d/infra-tags.repo RUN dnf -y install --refresh dnf-plugins-core && dnf -y install docker git python3-setuptools e2fsprogs koji osbs-client\ python3-osbs-client gssproxy fedpkg python3-docker-squash atomic-reactor python3-atomic-reactor* go-md2man python3-productmd\ - libmodulemd python3-gobject python3-modulemd python3-pdc-client ostree flatpak skopeo && dnf clean all + python3-gobject python3-modulemd python3-pdc-client ostree flatpak-module-tools flatpak skopeo && dnf clean all ADD ./orchestrator_customize.json /usr/share/osbs/orchestrator_customize.json ADD ./worker_customize.json /usr/share/osbs/worker_customize.json ADD ./krb5.conf /etc diff --git a/files/osbs/orchestrator_customize.json b/files/osbs/orchestrator_customize.json index e8a69077dd..4726511b94 100644 --- a/files/osbs/orchestrator_customize.json +++ b/files/osbs/orchestrator_customize.json @@ -3,13 +3,7 @@ { "plugin_type": "exit_plugins", "plugin_name": "import_image" - }, - { - "plugin_type": "prebuild_plugins", - "plugin_name": "flatpak_create_dockerfile" } ], - - "enable_plugins": [ - ] -} \ No newline at end of file + "enable_plugins": [] +} diff --git a/files/osbs/worker_customize.json b/files/osbs/worker_customize.json index 5acab8544d..e47abdc18e 100644 --- a/files/osbs/worker_customize.json +++ b/files/osbs/worker_customize.json @@ -3,13 +3,7 @@ { "plugin_type": "prebuild_plugins", "plugin_name": "fetch_maven_artifacts" - }, - { - "plugin_type": "prebuild_plugins", - "plugin_name": "flatpak_create_dockerfile" } ], - - "enable_plugins": [ - ] -} \ No newline at end of file + "enable_plugins": [] +} diff --git a/inventory/backups b/inventory/backups index c6aa3d3d32..dfe122e765 100644 --- a/inventory/backups +++ b/inventory/backups @@ -22,6 +22,7 @@ copr-keygen.cloud.fedoraproject.org #copr-dist-git.fedorainfracloud.org value01.phx2.fedoraproject.org taiga.fedorainfracloud.org +tang01.phx2.fedoraproject.org taskotron01.qa.fedoraproject.org nuancier01.phx2.fedoraproject.org magazine2.fedorainfracloud.org diff --git a/inventory/builders b/inventory/builders index 301bd38488..1654f406df 100644 --- a/inventory/builders +++ b/inventory/builders @@ -77,8 +77,9 @@ buildvm-aarch64-19.arm.fedoraproject.org buildvm-aarch64-20.arm.fedoraproject.org buildvm-aarch64-21.arm.fedoraproject.org buildvm-aarch64-22.arm.fedoraproject.org -buildvm-aarch64-23.arm.fedoraproject.org -buildvm-aarch64-24.arm.fedoraproject.org +# These two have been dropped to allow for osbs builders. +#buildvm-aarch64-23.arm.fedoraproject.org +#buildvm-aarch64-24.arm.fedoraproject.org [buildvm-armv7] buildvm-armv7-01.arm.fedoraproject.org @@ -232,8 +233,8 @@ buildvm-ppc64le-18.ppc.fedoraproject.org buildvm-ppc64le-19.ppc.fedoraproject.org [bkernel] -bkernel01.phx2.fedoraproject.org -bkernel02.phx2.fedoraproject.org +bkernel03.phx2.fedoraproject.org +bkernel04.phx2.fedoraproject.org # # These are misc diff --git a/inventory/cloud b/inventory/cloud index 04e09200ba..24e7f51d46 100644 --- a/inventory/cloud +++ b/inventory/cloud @@ -10,14 +10,16 @@ commops.fedorainfracloud.org communityblog.fedorainfracloud.org copr-be.cloud.fedoraproject.org copr-be-dev.cloud.fedoraproject.org -copr-dist-git-dev.fedorainfracloud.org +copr-be-stg.fedorainfracloud.org copr-dist-git.fedorainfracloud.org +copr-dist-git-dev.fedorainfracloud.org +copr-dist-git-stg.fedorainfracloud.org copr-fe.cloud.fedoraproject.org copr-fe-dev.cloud.fedoraproject.org copr-keygen.cloud.fedoraproject.org copr-keygen-dev.cloud.fedoraproject.org +copr-keygen-stg.fedorainfracloud.org developer.fedorainfracloud.org -eclipse.fedorainfracloud.org elastic-dev.fedorainfracloud.org el6-test.fedorainfracloud.org el7-test.fedorainfracloud.org diff --git a/inventory/group_vars/all b/inventory/group_vars/all index 555fb58762..8c8d881145 100644 --- a/inventory/group_vars/all +++ b/inventory/group_vars/all @@ -45,6 +45,9 @@ custom_rules: [] nat_rules: [] custom6_rules: [] +# defaults for hw installs +install_noc: none + # defaults for virt installs ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-7 ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL7-x86_64/ @@ -261,7 +264,7 @@ createrepo: True # Nagios global variables nagios_Check_Services: - monitor: true + mail: true nrpe: true sshd: true named: false diff --git a/inventory/group_vars/bastion b/inventory/group_vars/bastion index a321809379..1e8e32c923 100644 --- a/inventory/group_vars/bastion +++ b/inventory/group_vars/bastion @@ -23,7 +23,7 @@ custom_rules: [ # TODO - remove modularity-wg membership here once it is not longer needed: # https://fedorahosted.org/fedora-infrastructure/ticket/5363 -fas_client_groups: sysadmin-ask,sysadmin-atomic,sysadmin-web,sysadmin-main,sysadmin-cvs,sysadmin-build,sysadmin-noc,sysadmin-releng,sysadmin-dba,sysadmin-hosted,sysadmin-tools,sysadmin-spin,sysadmin-cloud,fi-apprentice,sysadmin-badges,sysadmin-troubleshoot,sysadmin-qa,sysadmin-centos,sysadmin-ppc,sysadmin-koschei,sysadmin-secondary,sysadmin-fedimg,sysadmin-veteran,sysadmin-mbs,modularity-wg,pungi-devel,sysadmin-upstreamfirst +fas_client_groups: sysadmin-ask,sysadmin-atomic,sysadmin-web,sysadmin-main,sysadmin-cvs,sysadmin-noc,sysadmin-releng,sysadmin-dba,sysadmin-hosted,sysadmin-tools,sysadmin-spin,sysadmin-cloud,fi-apprentice,sysadmin-badges,sysadmin-troubleshoot,sysadmin-qa,sysadmin-centos,sysadmin-ppc,sysadmin-koschei,sysadmin-secondary,sysadmin-fedimg,sysadmin-veteran,sysadmin-mbs,modularity-wg,pungi-devel,sysadmin-upstreamfirst,sysadmin-releasemonitoring # # This is a postfix gateway. This will pick up gateway postfix config in base @@ -55,3 +55,6 @@ csi_relationship: | - All incoming SMTP from phx2 and VPN, as well as outgoing SMTP, pass or are filtered here. - Bastion does not accept any mail outside phx2/vpn. +nagios_Check_Services: + nrpe: true + mail: false diff --git a/inventory/group_vars/batcave b/inventory/group_vars/batcave index e1d7c820c4..55b873de89 100644 --- a/inventory/group_vars/batcave +++ b/inventory/group_vars/batcave @@ -8,7 +8,7 @@ tcp_ports: [ 80, 443 ] # Neeed for rsync from log01 for logs. custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ] -fas_client_groups: sysadmin-ask,sysadmin-atomic,sysadmin-build,sysadmin-cvs,sysadmin-main,sysadmin-web,sysadmin-noc,sysadmin-hosted,sysadmin-releng,sysadmin-qa,sysadmin-tools,sysadmin-cloud,sysadmin-bot,sysadmin-centos,sysadmin-koschei,sysadmin-datanommer,sysadmin-fedimg,fi-apprentice,sysadmin-regcfp,sysadmin-badges,sysadmin-mbs,sysadmin-veteran,sysadmin-coreos,sysadmin-upstreamfirst +fas_client_groups: sysadmin-ask,sysadmin-atomic,sysadmin-cvs,sysadmin-main,sysadmin-web,sysadmin-noc,sysadmin-hosted,sysadmin-releng,sysadmin-qa,sysadmin-tools,sysadmin-cloud,sysadmin-bot,sysadmin-centos,sysadmin-koschei,sysadmin-datanommer,sysadmin-fedimg,fi-apprentice,sysadmin-regcfp,sysadmin-badges,sysadmin-mbs,sysadmin-veteran,sysadmin-coreos,sysadmin-upstreamfirst,sysadmin-releasemonitoring ansible_base: /srv/web/infra freezes: false diff --git a/inventory/group_vars/builders b/inventory/group_vars/builders index c22c3870a9..f286a2fb97 100644 --- a/inventory/group_vars/builders +++ b/inventory/group_vars/builders @@ -5,3 +5,4 @@ nagios_Check_Services: nrpe: false swap: false + mail: false diff --git a/inventory/group_vars/builders-stg b/inventory/group_vars/builders-stg index c22c3870a9..f286a2fb97 100644 --- a/inventory/group_vars/builders-stg +++ b/inventory/group_vars/builders-stg @@ -5,3 +5,4 @@ nagios_Check_Services: nrpe: false swap: false + mail: false diff --git a/inventory/group_vars/cloud b/inventory/group_vars/cloud index daa307cee8..705a2186e4 100644 --- a/inventory/group_vars/cloud +++ b/inventory/group_vars/cloud @@ -1,5 +1,6 @@ --- nagios_Check_Services: + mail: false nrpe: false swap: false datacenter: cloud diff --git a/inventory/group_vars/copr-back-dev b/inventory/group_vars/copr-back-dev new file mode 100644 index 0000000000..e8946170e9 --- /dev/null +++ b/inventory/group_vars/copr-back-dev @@ -0,0 +1,29 @@ +--- +_lighttpd_conf_src: "lighttpd/lighttpd_dev.conf" + +copr_nova_auth_url: "https://fedorainfracloud.org:5000/v2.0" +copr_nova_tenant_id: "a6ff2158641c439a8426d7facab45437" +copr_nova_tenant_name: "coprdev" +copr_nova_username: "copr" + +copr_builder_image_name: "builder-f24" +copr_builder_flavor_name: "ms2.builder" +copr_builder_network_name: "coprdev-net" +copr_builder_key_name: "buildsys" +copr_builder_security_groups: "ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev" + +fedmsg_enabled: "true" + +do_sign: "true" + +spawn_in_advance: "false" +frontend_base_url: "http://copr-fe-dev.cloud.fedoraproject.org" + +# These variables are pushed into /etc/system_identification by the base role. +# Groups and individual hosts should override them with specific info. +# See http://infrastructure.fedoraproject.org/csi/security-policy/ + +csi_security_category: Moderate +csi_primary_contact: "msuchy (mirek), clime, frostyx, dturecek IRC #fedora-admin, #fedora-buildsys" +csi_purpose: Provide the testing environment of copr's backend +csi_relationship: This host is the testing environment for the cloud infrastructure of copr's backend diff --git a/inventory/group_vars/copr-back-stg b/inventory/group_vars/copr-back-stg index e8946170e9..f514be113b 100644 --- a/inventory/group_vars/copr-back-stg +++ b/inventory/group_vars/copr-back-stg @@ -1,4 +1,6 @@ --- +resolvconf: "resolv.conf/cloud" + _lighttpd_conf_src: "lighttpd/lighttpd_dev.conf" copr_nova_auth_url: "https://fedorainfracloud.org:5000/v2.0" @@ -17,7 +19,7 @@ fedmsg_enabled: "true" do_sign: "true" spawn_in_advance: "false" -frontend_base_url: "http://copr-fe-dev.cloud.fedoraproject.org" +frontend_base_url: "https://copr.stg.fedoraproject.org" # These variables are pushed into /etc/system_identification by the base role. # Groups and individual hosts should override them with specific info. diff --git a/inventory/group_vars/copr-dev b/inventory/group_vars/copr-dev new file mode 100644 index 0000000000..4c222bc6dd --- /dev/null +++ b/inventory/group_vars/copr-dev @@ -0,0 +1,19 @@ +--- +devel: true +#_forward-src: "{{ files }}/copr/forward-dev" +_forward_src: "forward_dev" + +# don't forget to update ip in ./copr-keygen-stg, due to custom firewall rules + +copr_backend_ips: ["172.25.32.232", "172.25.157.237"] +keygen_host: "172.25.32.238" + +resolvconf: "resolv.conf/cloud" + +backend_base_url: "http://copr-be-dev.cloud.fedoraproject.org" +postfix_maincf: "postfix/main.cf/main.cf.copr" + +frontend_base_url: "http://copr-fe-dev.cloud.fedoraproject.org" +dist_git_base_url: "copr-dist-git-dev.fedorainfracloud.org" + +ansible_ifcfg_blacklist: true diff --git a/inventory/group_vars/copr-dist-git b/inventory/group_vars/copr-dist-git index 29d3b4cc35..e165d75b91 100644 --- a/inventory/group_vars/copr-dist-git +++ b/inventory/group_vars/copr-dist-git @@ -1,5 +1,4 @@ --- -tcp_ports: [22, 80] +tcp_ports: [22, 80, 443] datacenter: cloud freezes: false -custom_rules: ['-A INPUT -p tcp -m tcp --dport 443 -j ACCEPT'] diff --git a/inventory/group_vars/copr-dist-git-dev b/inventory/group_vars/copr-dist-git-dev new file mode 100644 index 0000000000..28b1b79cb2 --- /dev/null +++ b/inventory/group_vars/copr-dist-git-dev @@ -0,0 +1,6 @@ +--- +tcp_ports: [22, 80] +datacenter: cloud +freezes: false +devel: true +custom_rules: ['-A INPUT -p tcp -m tcp --dport 443 -j ACCEPT'] diff --git a/inventory/group_vars/copr-dist-git-stg b/inventory/group_vars/copr-dist-git-stg index 28b1b79cb2..502e4fc16a 100644 --- a/inventory/group_vars/copr-dist-git-stg +++ b/inventory/group_vars/copr-dist-git-stg @@ -1,6 +1,6 @@ --- -tcp_ports: [22, 80] +resolvconf: "resolv.conf/cloud" + +tcp_ports: [22, 80, 443] datacenter: cloud freezes: false -devel: true -custom_rules: ['-A INPUT -p tcp -m tcp --dport 443 -j ACCEPT'] diff --git a/inventory/group_vars/copr-front-dev b/inventory/group_vars/copr-front-dev new file mode 100644 index 0000000000..27a5e4194b --- /dev/null +++ b/inventory/group_vars/copr-front-dev @@ -0,0 +1,9 @@ +--- +copr_frontend_public_hostname: "copr-fe-dev.cloud.fedoraproject.org" + +csi_security_category: Low +csi_primary_contact: "msuchy (mirek), clime, frostyx, dturecek IRC #fedora-admin, #fedora-buildsys" +csi_purpose: Provide the testing environment of copr's frontend +csi_relationship: This host is the testing environment for copr's web interface + +copr_mbs_cli_login: Y29wcg==##vtvvikhcjncwkfkdcssv diff --git a/inventory/group_vars/copr-front-stg b/inventory/group_vars/copr-front-stg index 27a5e4194b..b74c2a88db 100644 --- a/inventory/group_vars/copr-front-stg +++ b/inventory/group_vars/copr-front-stg @@ -1,9 +1,33 @@ --- -copr_frontend_public_hostname: "copr-fe-dev.cloud.fedoraproject.org" +# Define resources for this group of hosts here. +lvm_size: 10000 +mem_size: 2048 +num_cpus: 1 +# for systems that do not match the above - specify the same parameter in +# the host_vars/$hostname file + +copr_frontend_public_hostname: "copr.stg.fedoraproject.org" + +copruser_db_password: "{{ copruser_db_password_stg }}" + +tcp_ports: [ 80 ] + +custom_rules: [ + # Need for rsync from log01 for logs. + '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', + '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT', + ] + +fas_client_groups: sysadmin-copr,fi-apprentice,sysadmin-noc,sysadmin-veteran + +freezes: false + +# For the MOTD csi_security_category: Low -csi_primary_contact: "msuchy (mirek), clime, frostyx, dturecek IRC #fedora-admin, #fedora-buildsys" -csi_purpose: Provide the testing environment of copr's frontend -csi_relationship: This host is the testing environment for copr's web interface - -copr_mbs_cli_login: Y29wcg==##vtvvikhcjncwkfkdcssv +csi_primary_contact: Fedora admins - admin@fedoraproject.org +csi_purpose: Copr community build service +csi_relationship: | + This machine depends on: + - PostgreSQL DB server + - bastion (for mail relay) diff --git a/inventory/group_vars/copr-keygen-dev b/inventory/group_vars/copr-keygen-dev new file mode 100644 index 0000000000..cea9d8bfa3 --- /dev/null +++ b/inventory/group_vars/copr-keygen-dev @@ -0,0 +1,13 @@ +--- +copr_hostbase: copr-keygen-dev +tcp_ports: [] + +# http + signd dest ports +custom_rules: [ '-A INPUT -p tcp -m tcp -s 172.25.32.232 --dport 80 -j ACCEPT', + '-A INPUT -p tcp -m tcp -s 172.25.157.237 --dport 80 -j ACCEPT', + '-A INPUT -p tcp -m tcp -s 172.25.32.232 --dport 5167 -j ACCEPT', + '-A INPUT -p tcp -m tcp -s 172.25.157.237 --dport 5167 -j ACCEPT'] + +datacenter: cloud + +freezes: false diff --git a/inventory/group_vars/copr-keygen-stg b/inventory/group_vars/copr-keygen-stg index cea9d8bfa3..082582668b 100644 --- a/inventory/group_vars/copr-keygen-stg +++ b/inventory/group_vars/copr-keygen-stg @@ -1,12 +1,14 @@ --- -copr_hostbase: copr-keygen-dev +resolvconf: "resolv.conf/cloud" + +copr_hostbase: copr-keygen-stg tcp_ports: [] # http + signd dest ports -custom_rules: [ '-A INPUT -p tcp -m tcp -s 172.25.32.232 --dport 80 -j ACCEPT', - '-A INPUT -p tcp -m tcp -s 172.25.157.237 --dport 80 -j ACCEPT', - '-A INPUT -p tcp -m tcp -s 172.25.32.232 --dport 5167 -j ACCEPT', - '-A INPUT -p tcp -m tcp -s 172.25.157.237 --dport 5167 -j ACCEPT'] +custom_rules: ['-A INPUT -p tcp -m tcp -s 172.25.33.9 --dport 80 -j ACCEPT', + '-A INPUT -p tcp -m tcp -s 172.25.151.227 --dport 80 -j ACCEPT', + '-A INPUT -p tcp -m tcp -s 172.25.33.9 --dport 5167 -j ACCEPT', + '-A INPUT -p tcp -m tcp -s 172.25.151.227 --dport 5167 -j ACCEPT'] datacenter: cloud diff --git a/inventory/group_vars/copr-stg b/inventory/group_vars/copr-stg index 4c222bc6dd..4f5be7d34d 100644 --- a/inventory/group_vars/copr-stg +++ b/inventory/group_vars/copr-stg @@ -5,15 +5,11 @@ _forward_src: "forward_dev" # don't forget to update ip in ./copr-keygen-stg, due to custom firewall rules -copr_backend_ips: ["172.25.32.232", "172.25.157.237"] -keygen_host: "172.25.32.238" +copr_backend_ips: ["172.25.33.9", "172.25.151.227"] +keygen_host: "172.25.33.12" -resolvconf: "resolv.conf/cloud" +backend_base_url: "http://copr-be-stg.fedorainfracloud.org" +frontend_base_url: "https://copr.stg.fedoraproject.org" +dist_git_base_url: "copr-dist-git-stg.fedorainfracloud.org" -backend_base_url: "http://copr-be-dev.cloud.fedoraproject.org" -postfix_maincf: "postfix/main.cf/main.cf.copr" - -frontend_base_url: "http://copr-fe-dev.cloud.fedoraproject.org" -dist_git_base_url: "copr-dist-git-dev.fedorainfracloud.org" - -ansible_ifcfg_blacklist: true +ansible_ifcfg_blacklist: true diff --git a/inventory/group_vars/faf-stg b/inventory/group_vars/faf-stg index fda1fc1650..8a062493c6 100644 --- a/inventory/group_vars/faf-stg +++ b/inventory/group_vars/faf-stg @@ -6,6 +6,7 @@ tcp_ports: [ 80, 443 ] sudoers: "{{ private }}/files/sudo/arm-retrace-sudoers" nagios_Check_Services: + mail: false nrpe: false swap: false diff --git a/inventory/group_vars/nagios b/inventory/group_vars/nagios index 9b922aab2d..b0fc314ef4 100644 --- a/inventory/group_vars/nagios +++ b/inventory/group_vars/nagios @@ -74,8 +74,6 @@ phx2_management_hosts: - cn-x86-64-02-01.mgmt.fedoraproject.org - cn-x86-64-02-02.mgmt.fedoraproject.org - cloud-fx02.mgmt.fedoraproject.org - - download01.mgmt.fedoraproject.org - - download02.mgmt.fedoraproject.org - download03.mgmt.fedoraproject.org - download04.mgmt.fedoraproject.org - download05.mgmt.fedoraproject.org @@ -129,8 +127,6 @@ phx2_management_hosts: # to test ping against. No http/https # phx2_management_limited: - - bkernel01.mgmt.fedoraproject.org - - bkernel02.mgmt.fedoraproject.org - fed-cloud-ppc01.mgmt.fedoraproject.org - fed-cloud-ppc02.mgmt.fedoraproject.org - moonshot01-ilo.mgmt.fedoraproject.org @@ -142,8 +138,6 @@ phx2_management_limited: - qa07.mgmt.fedoraproject.org - sign-vault03.mgmt.fedoraproject.org - sign-vault04.mgmt.fedoraproject.org - - virthost-comm02.mgmt.fedoraproject.org - - virthost14.mgmt.fedoraproject.org phx2_management_slowping: - ppc8-01-fsp.mgmt.fedoraproject.org diff --git a/inventory/group_vars/newcloud b/inventory/group_vars/newcloud index 45b6b607b6..7c44caf35c 100644 --- a/inventory/group_vars/newcloud +++ b/inventory/group_vars/newcloud @@ -11,7 +11,7 @@ ansible_ifcfg_whitelist: ['eth1'] baseiptables: false ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q cloud-noc01.cloud.fedoraproject.org"' nagios_Check_Services: - monitor: false + mail: false nrpe: false sshd: false swap: false diff --git a/inventory/group_vars/docker-registry b/inventory/group_vars/oci-registry similarity index 50% rename from inventory/group_vars/docker-registry rename to inventory/group_vars/oci-registry index 0bb0d792e4..c6d6efaaca 100644 --- a/inventory/group_vars/docker-registry +++ b/inventory/group_vars/oci-registry @@ -1,6 +1,4 @@ --- -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7-docker-reg -ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/ fas_client_groups: sysadmin-releng @@ -8,7 +6,12 @@ sudoers: "{{ private }}/files/sudo/00releng-sudoers" tcp_ports: [ 5000, - # This is for the gluster server - 6996] + # These ports all required for gluster + 111, 24007, 24008, 24009, 24010, 24011, + 49152, 49153, 49154, 49155, + ] + +# gluster +udp_ports: [111] registry_gluster_username_prod: registry-prod diff --git a/inventory/group_vars/docker-registry-stg b/inventory/group_vars/oci-registry-stg similarity index 75% rename from inventory/group_vars/docker-registry-stg rename to inventory/group_vars/oci-registry-stg index 13aacb4f1b..680732381c 100644 --- a/inventory/group_vars/docker-registry-stg +++ b/inventory/group_vars/oci-registry-stg @@ -1,7 +1,4 @@ --- -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7-docker-reg -ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/ - fas_client_groups: sysadmin-releng,fi-apprentice,sysadmin-veteran sudoers: "{{ private }}/files/sudo/00releng-sudoers" diff --git a/inventory/group_vars/openqa b/inventory/group_vars/openqa index 7654a57186..f6a4988f5e 100644 --- a/inventory/group_vars/openqa +++ b/inventory/group_vars/openqa @@ -44,8 +44,7 @@ tcp_ports: [80, 2049] # These people get told when something goes wrong. fedmsg_error_recipients: - adamwill@fedoraproject.org -- tflink@fedoraproject.org -- pschindl@fedoraproject.org +- lruzicka@fedoraproject.org # These are consumed by a task in roles/fedmsg/base/main.yml fedmsg_certs: @@ -69,6 +68,14 @@ fedmsg_certs: - openqa.jobs.restart - openqa.job.update.result - openqa.job.done +- service: ci + owner: root + group: geekotest + can_send: + - ci.productmd-compose.test.queued + - ci.productmd-compose.test.running + - ci.productmd-compose.test.complete + - ci.productmd-compose.test.error # we need this to log with fedmsg-logger fedmsg_active: True diff --git a/inventory/group_vars/openqa-stg b/inventory/group_vars/openqa-stg index 971a34887f..eb63b3e05f 100644 --- a/inventory/group_vars/openqa-stg +++ b/inventory/group_vars/openqa-stg @@ -48,8 +48,7 @@ tcp_ports: [80, 2049] # These people get told when something goes wrong. fedmsg_error_recipients: - adamwill@fedoraproject.org -- tflink@fedoraproject.org -- pschindl@fedoraproject.org +- lruzicka@fedoraproject.org # These are consumed by a task in roles/fedmsg/base/main.yml fedmsg_certs: diff --git a/inventory/group_vars/os b/inventory/group_vars/os index 92656a93d5..325ae29082 100644 --- a/inventory/group_vars/os +++ b/inventory/group_vars/os @@ -3,3 +3,4 @@ host_group: os baseiptables: False no_http2: True nm_controlled_resolv: True +openshift_ansible_upgrading: True diff --git a/inventory/group_vars/os-masters b/inventory/group_vars/os-masters index 4f9891d8ad..16298ca729 100644 --- a/inventory/group_vars/os-masters +++ b/inventory/group_vars/os-masters @@ -6,3 +6,4 @@ swap: false nagios_Check_Services: swap: false nrpe: false + mail: false diff --git a/inventory/group_vars/os-masters-stg b/inventory/group_vars/os-masters-stg index 3b850a08b4..661aafd42b 100644 --- a/inventory/group_vars/os-masters-stg +++ b/inventory/group_vars/os-masters-stg @@ -6,3 +6,4 @@ os_app_url: app.os.stg.fedoraproject.org nagios_Check_Services: swap: false nrpe: false + mail: false diff --git a/inventory/group_vars/os-nodes b/inventory/group_vars/os-nodes index 4f9891d8ad..16298ca729 100644 --- a/inventory/group_vars/os-nodes +++ b/inventory/group_vars/os-nodes @@ -6,3 +6,4 @@ swap: false nagios_Check_Services: swap: false nrpe: false + mail: false diff --git a/inventory/group_vars/os-nodes-stg b/inventory/group_vars/os-nodes-stg index 3b850a08b4..661aafd42b 100644 --- a/inventory/group_vars/os-nodes-stg +++ b/inventory/group_vars/os-nodes-stg @@ -6,3 +6,4 @@ os_app_url: app.os.stg.fedoraproject.org nagios_Check_Services: swap: false nrpe: false + mail: false diff --git a/inventory/group_vars/os-stg b/inventory/group_vars/os-stg index 7e31e1985c..ae4a2ed9f1 100644 --- a/inventory/group_vars/os-stg +++ b/inventory/group_vars/os-stg @@ -3,3 +3,5 @@ host_group: os baseiptables: False no_http2: False nm_controlled_resolv: True +# Only set this when upgrading +#openshift_ansible_upgrading: True diff --git a/inventory/group_vars/osbs-masters b/inventory/group_vars/osbs-masters index 74baed899b..ad44511010 100644 --- a/inventory/group_vars/osbs-masters +++ b/inventory/group_vars/osbs-masters @@ -132,7 +132,7 @@ _osbs_reactor_config_map: required_secrets: - kojisecret - v2-registry-dockercfg - # - odcs-oidc-secret + - odcs-oidc-secret worker_token_secrets: - x86-64-orchestrator diff --git a/inventory/group_vars/pkgs b/inventory/group_vars/pkgs index 6510e6214f..0aeb444155 100644 --- a/inventory/group_vars/pkgs +++ b/inventory/group_vars/pkgs @@ -17,7 +17,7 @@ wsgi_fedmsg_service: pagure wsgi_procs: 6 wsgi_threads: 6 -fas_client_groups: sysadmin-main,sysadmin-cvs,sysadmin-build,sysadmin-noc,sysadmin-veteran +fas_client_groups: sysadmin-main,sysadmin-cvs,sysadmin-noc,sysadmin-veteran fas_client_restricted_app: PAGURE_CONFIG=/etc/pagure/pagure_hook.cfg HOME=/srv/git /usr/share/gitolite3/gitolite-shell %(username)s fas_client_admin_app: PAGURE_CONFIG=/etc/pagure/pagure_hook.cfg HOME=/srv/git /usr/share/gitolite3/gitolite-shell -s %(username)s fas_client_ssh_groups: "@cvs,sysadmin-main,sysadmin-cvs,sysadmin-releng,sysadmin-noc,sysadmin-veteran" diff --git a/inventory/group_vars/retrace-stg b/inventory/group_vars/retrace-stg index 5701a48b07..1546b890bc 100644 --- a/inventory/group_vars/retrace-stg +++ b/inventory/group_vars/retrace-stg @@ -7,5 +7,6 @@ sudoers: "{{ private }}/files/sudo/arm-retrace-sudoers" root_auth_users: msuchy nagios_Check_Services: + mail: false nrpe: false swap: false diff --git a/inventory/group_vars/sign-vault b/inventory/group_vars/sign-vault index 8b63ff97a2..6ca3adcbd3 100644 --- a/inventory/group_vars/sign-vault +++ b/inventory/group_vars/sign-vault @@ -3,3 +3,9 @@ freezes: true postfix_group: sign host_group: sign ansible_ifcfg_blacklist: true +nagios_Check_Services: + mail: false + nrpe: false + sshd: false + swap: false + ping: true diff --git a/inventory/group_vars/smtp-mm b/inventory/group_vars/smtp-mm index 5026f332f7..ac51a7938a 100644 --- a/inventory/group_vars/smtp-mm +++ b/inventory/group_vars/smtp-mm @@ -14,3 +14,7 @@ fas_client_groups: sysadmin-noc,sysadmin-tools,fi-apprentice,sysadmin-veteran postfix_transport_filename: transports.mm-smtp postfix_group: smtp-mm vpn: true + +nagios_Check_Services: + nrpe: true + mail: false diff --git a/inventory/group_vars/tang b/inventory/group_vars/tang new file mode 100644 index 0000000000..8e4850365c --- /dev/null +++ b/inventory/group_vars/tang @@ -0,0 +1,23 @@ +--- +nm: 255.255.255.0 +gw: 10.5.128.254 +dns: 10.5.126.21 + +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28 +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ + +host_backup_targets: ['/var/db/tang'] + +datacenter: phx2 + +# Define resources for this group of hosts here. +lvm_size: 20000 +mem_size: 4096 +num_cpus: 2 + +# for systems that do not match the above - specify the same parameter in +# the host_vars/$hostname file + +tcp_ports: [80] + +fas_client_groups: sysadmin-main diff --git a/inventory/host_vars/batcave13.rdu2.fedoraproject.org b/inventory/host_vars/batcave13.rdu2.fedoraproject.org index dcbd9c85bc..54dddc5115 100644 --- a/inventory/host_vars/batcave13.rdu2.fedoraproject.org +++ b/inventory/host_vars/batcave13.rdu2.fedoraproject.org @@ -26,6 +26,7 @@ postfix_group: vpn vpn: true nagios_Check_Services: + mail: false nrpe: false sshd: false swap: false diff --git a/inventory/host_vars/bkernel03.phx2.fedoraproject.org b/inventory/host_vars/bkernel03.phx2.fedoraproject.org index 963a4e7581..65fefce9ab 100644 --- a/inventory/host_vars/bkernel03.phx2.fedoraproject.org +++ b/inventory/host_vars/bkernel03.phx2.fedoraproject.org @@ -1,4 +1,4 @@ --- gw: 10.5.125.254 eth0_ip: 10.5.125.81 -eth1_ip: 10.5.127.133 +eth1_ip: 10.5.127.129 diff --git a/inventory/host_vars/bkernel04.phx2.fedoraproject.org b/inventory/host_vars/bkernel04.phx2.fedoraproject.org index ae72e8530b..1c0fb8dc01 100644 --- a/inventory/host_vars/bkernel04.phx2.fedoraproject.org +++ b/inventory/host_vars/bkernel04.phx2.fedoraproject.org @@ -1,4 +1,4 @@ --- gw: 10.5.125.254 eth0_ip: 10.5.125.82 -eth1_ip: 10.5.127.134 +eth1_ip: 10.5.127.144 diff --git a/inventory/host_vars/branched-composer.phx2.fedoraproject.org b/inventory/host_vars/branched-composer.phx2.fedoraproject.org index 7cf1effb61..8658b53e88 100644 --- a/inventory/host_vars/branched-composer.phx2.fedoraproject.org +++ b/inventory/host_vars/branched-composer.phx2.fedoraproject.org @@ -34,3 +34,8 @@ fedmsg_certs: - compose.branched.rsync.complete - compose.branched.rsync.start - compose.branched.start + - compose.29.start + - compose.29.complete + - compose.29.rsync.start + - compose.29.rsync.complete + diff --git a/inventory/host_vars/certgetter01.phx2.fedoraproject.org b/inventory/host_vars/certgetter01.phx2.fedoraproject.org index 00bd41fde6..21a44dc613 100644 --- a/inventory/host_vars/certgetter01.phx2.fedoraproject.org +++ b/inventory/host_vars/certgetter01.phx2.fedoraproject.org @@ -3,8 +3,8 @@ nm: 255.255.255.0 gw: 10.5.126.254 dns: 10.5.126.21 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-26 -ks_repo: http://10.5.126.23/pub/fedora/linux/releases/26/Server/x86_64/os/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28 +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ volgroup: /dev/vg_guests eth0_ip: 10.5.126.237 diff --git a/inventory/host_vars/cloud-noc01.cloud.fedoraproject.org b/inventory/host_vars/cloud-noc01.cloud.fedoraproject.org index 69c19d7777..2305cebee2 100644 --- a/inventory/host_vars/cloud-noc01.cloud.fedoraproject.org +++ b/inventory/host_vars/cloud-noc01.cloud.fedoraproject.org @@ -12,16 +12,16 @@ freezes: false resolvconf: "{{ files }}/resolv.conf/cloud-noc01.cloud.fedoraproject.org" tcp_ports: ['22'] -custom_rules: [ '-A INPUT -i eth0 -p tcp -m tcp -s 209.132.184.0/24 --dport 67 -j ACCEPT', - '-A INPUT -i eth0 -p tcp -m tcp -s 209.132.184.0/24 --dport 68 -j ACCEPT', - '-A INPUT -i eth0 -p tcp -m tcp -s 209.132.184.0/24 --dport 69 -j ACCEPT', - '-A INPUT -i eth0 -p udp -m udp -s 209.132.184.0/24 --dport 67 -j ACCEPT', - '-A INPUT -i eth0 -p udp -m udp -s 209.132.184.0/24 --dport 68 -j ACCEPT', - '-A INPUT -i eth0 -p udp -m udp -s 209.132.184.0/24 --dport 69 -j ACCEPT', - '-A INPUT -i eth1 -p tcp -m tcp -s 172.23.0.0/23 --dport 67 -j ACCEPT', - '-A INPUT -i eth1 -p tcp -m tcp -s 172.23.0.0/23 --dport 68 -j ACCEPT', - '-A INPUT -i eth1 -p tcp -m tcp -s 172.23.0.0/23 --dport 69 -j ACCEPT', - '-A INPUT -i eth1 -p udp -m udp -s 172.23.0.0/23 --dport 67 -j ACCEPT', - '-A INPUT -i eth1 -p udp -m udp -s 172.23.0.0/23 --dport 68 -j ACCEPT', - '-A INPUT -i eth1 -p udp -m udp -s 172.23.0.0/23 --dport 69 -j ACCEPT' ] +custom_rules: [ '-A INPUT -i br0 -p tcp -m tcp -s 209.132.184.0/24 --dport 67 -j ACCEPT', + '-A INPUT -i br0 -p tcp -m tcp -s 209.132.184.0/24 --dport 68 -j ACCEPT', + '-A INPUT -i br0 -p tcp -m tcp -s 209.132.184.0/24 --dport 69 -j ACCEPT', + '-A INPUT -i br0 -p udp -m udp -s 209.132.184.0/24 --dport 67 -j ACCEPT', + '-A INPUT -i br0 -p udp -m udp -s 209.132.184.0/24 --dport 68 -j ACCEPT', + '-A INPUT -i br0 -p udp -m udp -s 209.132.184.0/24 --dport 69 -j ACCEPT', + '-A INPUT -i br1 -p tcp -m tcp -s 172.23.0.0/23 --dport 67 -j ACCEPT', + '-A INPUT -i br1 -p tcp -m tcp -s 172.23.0.0/23 --dport 68 -j ACCEPT', + '-A INPUT -i br1 -p tcp -m tcp -s 172.23.0.0/23 --dport 69 -j ACCEPT', + '-A INPUT -i br1 -p udp -m udp -s 172.23.0.0/23 --dport 67 -j ACCEPT', + '-A INPUT -i br1 -p udp -m udp -s 172.23.0.0/23 --dport 68 -j ACCEPT', + '-A INPUT -i br1 -p udp -m udp -s 172.23.0.0/23 --dport 69 -j ACCEPT' ] diff --git a/inventory/host_vars/compose-iot-01.phx2.fedoraproject.org b/inventory/host_vars/compose-iot-01.phx2.fedoraproject.org index 4bfa20d70d..efc4b6c1be 100644 --- a/inventory/host_vars/compose-iot-01.phx2.fedoraproject.org +++ b/inventory/host_vars/compose-iot-01.phx2.fedoraproject.org @@ -35,3 +35,5 @@ fedmsg_certs: - pungi.compose.ostree - compose.29.complete - compose.29.start + - compose.29.rsync.start + - compose.29.rsync.complete diff --git a/inventory/host_vars/copr-be-stg.fedorainfracloud.org b/inventory/host_vars/copr-be-stg.fedorainfracloud.org new file mode 100644 index 0000000000..f000c0d26c --- /dev/null +++ b/inventory/host_vars/copr-be-stg.fedorainfracloud.org @@ -0,0 +1,26 @@ +--- +instance_type: m1.xlarge +image: "{{ fedora27_x86_64 }}" +keypair: fedora-admin-20130801 +security_group: web-80-anywhere-persistent,web-443-anywhere-persistent,ssh-anywhere-persistent,default,allow-nagios-persistent,fedmsg-relay-persistent +zone: nova +hostbase: copr-be-stg- +public_ip: 209.132.184.44 +root_auth_users: msuchy pingou frostyx dturecek clime +description: copr dispatcher and repo server - stg instance +tcp_ports: ['22', '80', '443', '2003', '4001'] +# volumes: copr-be-stg-data +volumes: [ {volume_id: 'a3325e22-bdc0-4eeb-bb73-45365ddb7a01', device: '/dev/vdc'} ] + +inventory_tenant: persistent +# name of machine in OpenStack +inventory_instance_name: copr-be-stg +cloud_networks: + # persistent-net + - net-id: "67b77354-39a4-43de-b007-bb813ac5c35f" + # coprdev-net + - net-id: "a440568f-b90a-46af-8ca6-d8fa743a7e7a" + +# Copr vars +copr_hostbase: copr-be-stg +_copr_be_conf: copr-be.conf-stg diff --git a/inventory/host_vars/copr-dist-git-stg.fedorainfracloud.org b/inventory/host_vars/copr-dist-git-stg.fedorainfracloud.org new file mode 100644 index 0000000000..d02c129dfb --- /dev/null +++ b/inventory/host_vars/copr-dist-git-stg.fedorainfracloud.org @@ -0,0 +1,22 @@ +--- +instance_type: ms1.small +image: "{{ fedora27_x86_64 }}" +keypair: fedora-admin-20130801 +security_group: web-80-anywhere-persistent,ssh-anywhere-persistent,default,all-icmp-persistent +zone: nova +hostbase: copr-dist-git-stg- +public_ip: 209.132.184.57 +root_auth_users: ryanlerch pingou msuchy dturecek frostyx clime +description: dist-git for copr service - stg instance +tcp_ports: [22, 80] +# volumes: copr-dist-git-stg +volumes: [ {volume_id: '0cb506b9-3931-47fa-b6d3-a0ad2614f221', device: '/dev/vdc'} ] +inventory_tenant: persistent +# name of machine in OpenStack +inventory_instance_name: copr-dist-git-stg +cloud_networks: + # persistent-net + - net-id: "67b77354-39a4-43de-b007-bb813ac5c35f" + +# Copr vars +copr_hostbase: copr-dist-git-stg diff --git a/inventory/host_vars/copr-dist-git.fedorainfracloud.org b/inventory/host_vars/copr-dist-git.fedorainfracloud.org index e88f14097e..7e427a7f5d 100644 --- a/inventory/host_vars/copr-dist-git.fedorainfracloud.org +++ b/inventory/host_vars/copr-dist-git.fedorainfracloud.org @@ -6,7 +6,7 @@ security_group: web-80-anywhere-persistent,ssh-anywhere-persistent,default,all-i zone: nova hostbase: copr-dist-git public_ip: 209.132.184.163 -root_auth_users: msuchy asamalik clime frostyx +root_auth_users: msuchy clime frostyx description: dist-git for copr service - prod instance tcp_ports: [22, 80] # volumes: copr-dist-git, copr-dist-git-log diff --git a/inventory/host_vars/copr-fe.cloud.fedoraproject.org b/inventory/host_vars/copr-fe.cloud.fedoraproject.org index 0f296321a2..b2ff09faac 100644 --- a/inventory/host_vars/copr-fe.cloud.fedoraproject.org +++ b/inventory/host_vars/copr-fe.cloud.fedoraproject.org @@ -9,7 +9,7 @@ security_group: web-80-anywhere-persistent,web-443-anywhere-persistent,ssh-anywh zone: nova hostbase: copr-fe- public_ip: 209.132.184.54 -root_auth_users: msuchy asamalik clime frostyx +root_auth_users: msuchy clime frostyx description: copr frontend server - prod instance tcp_ports: [22, 80, 443] volumes: [ {volume_id: '8f790db7-8294-4d2b-8bae-7af5961ce0f8', device: '/dev/vdc'} ] diff --git a/inventory/host_vars/copr-frontend01.stg.phx2.fedoraproject.org b/inventory/host_vars/copr-frontend01.stg.phx2.fedoraproject.org new file mode 100644 index 0000000000..f45bf57fce --- /dev/null +++ b/inventory/host_vars/copr-frontend01.stg.phx2.fedoraproject.org @@ -0,0 +1,12 @@ +--- +nm: 255.255.255.0 +gw: 10.5.128.254 +dns: 10.5.126.21 + +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28 +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ + +volgroup: /dev/vg_guests +eth0_ip: 10.5.128.49 +vmhost: virthost02.stg.phx2.fedoraproject.org +datacenter: phx2 diff --git a/inventory/host_vars/copr-frontend02.stg.phx2.fedoraproject.org b/inventory/host_vars/copr-frontend02.stg.phx2.fedoraproject.org new file mode 100644 index 0000000000..25af190ed6 --- /dev/null +++ b/inventory/host_vars/copr-frontend02.stg.phx2.fedoraproject.org @@ -0,0 +1,12 @@ +--- +nm: 255.255.255.0 +gw: 10.5.128.254 +dns: 10.5.126.21 + +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28 +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ + +volgroup: /dev/vg_virthost16 +eth0_ip: 10.5.128.50 +vmhost: virthost05.stg.phx2.fedoraproject.org +datacenter: phx2 diff --git a/inventory/host_vars/copr-keygen-stg.fedorainfracloud.org b/inventory/host_vars/copr-keygen-stg.fedorainfracloud.org new file mode 100644 index 0000000000..e97eae9c0f --- /dev/null +++ b/inventory/host_vars/copr-keygen-stg.fedorainfracloud.org @@ -0,0 +1,22 @@ +--- +instance_type: ms1.small +image: "{{ fedora27_x86_64 }}" +keypair: fedora-admin-20130801 +# todo: remove some security groups ? +security_group: web-80-anywhere-persistent,web-443-anywhere-persistent,ssh-anywhere-persistent,default,all-icmp-persistent +zone: nova +hostbase: copr-keygen-stg- +public_ip: 209.132.184.56 +root_auth_users: msuchy clime frostyx dturecek +volumes: [ {volume_id: '5424ff3c-b1c6-4291-a0ed-2d30924f4f88', device: '/dev/vdc'} ] +description: copr keygen and sign host - stg instance + +inventory_tenant: persistent +# name of machine in OpenStack +inventory_instance_name: copr-keygen-stg +cloud_networks: + # persistent-net + - net-id: "67b77354-39a4-43de-b007-bb813ac5c35f" + +# Copr vars +copr_hostbase: copr-keygen-stg diff --git a/inventory/host_vars/db-koji01.stg.phx2.fedoraproject.org b/inventory/host_vars/db-koji01.stg.phx2.fedoraproject.org index 96d287f38e..e22412338d 100644 --- a/inventory/host_vars/db-koji01.stg.phx2.fedoraproject.org +++ b/inventory/host_vars/db-koji01.stg.phx2.fedoraproject.org @@ -7,8 +7,8 @@ eth0_ip: 10.5.128.98 vmhost: bvirthost01.stg.phx2.fedoraproject.org datacenter: phx2 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-27 -ks_repo: http://10.5.126.23/pub/fedora/linux/releases/27/Server/x86_64/os/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28 +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ # This is a generic list, monitored by collectd databases: diff --git a/inventory/host_vars/download-rdu01.fedoraproject.org b/inventory/host_vars/download-rdu01.fedoraproject.org index 9cb0cbe8a4..b191d87481 100644 --- a/inventory/host_vars/download-rdu01.fedoraproject.org +++ b/inventory/host_vars/download-rdu01.fedoraproject.org @@ -13,3 +13,8 @@ eth1_ip: 172.31.1.1 eth1_nm: 255.255.255.0 public_ip: 209.132.190.4 + +nagios_Check_Services: + mail: false + nrpe: false + ping: true diff --git a/inventory/host_vars/download01.phx2.fedoraproject.org b/inventory/host_vars/download01.phx2.fedoraproject.org index 0b4585f104..470bc545ee 100644 --- a/inventory/host_vars/download01.phx2.fedoraproject.org +++ b/inventory/host_vars/download01.phx2.fedoraproject.org @@ -1,4 +1,34 @@ --- +nm: 255.255.255.0 gw: 10.5.126.254 +dns: 10.5.126.21 + +ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-7 +ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL7-x86_64/ + +vmhost: virthost01.phx2.fedoraproject.org +volgroup: /dev/vg_guests +# +# We need this to install with 2 nics +# +virt_install_command: "{{ virt_install_command_two_nic }}" + eth0_ip: 10.5.126.93 eth1_ip: 10.5.127.101 +main_bridge: br0 +nfs_bridge: br1 + +datacenter: phx2 + +tcp_ports: [80, 443, 873] +rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}" + +nrpe_procs_warn: 1200 +nrpe_procs_crit: 1400 + +mem_size: 16384 +max_mem_size: 20480 +lvm_size: 20000 +num_cpus: 8 + +vpn: false diff --git a/inventory/host_vars/download02.phx2.fedoraproject.org b/inventory/host_vars/download02.phx2.fedoraproject.org index 03ff674206..a82c89030f 100644 --- a/inventory/host_vars/download02.phx2.fedoraproject.org +++ b/inventory/host_vars/download02.phx2.fedoraproject.org @@ -1,4 +1,34 @@ --- +nm: 255.255.255.0 gw: 10.5.126.254 +dns: 10.5.126.21 + +ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-7 +ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL7-x86_64/ + +vmhost: virthost02.phx2.fedoraproject.org +volgroup: /dev/vg_guests +# +# We need this to install with 2 nics +# +virt_install_command: "{{ virt_install_command_two_nic }}" + eth0_ip: 10.5.126.94 eth1_ip: 10.5.127.102 +main_bridge: br0 +nfs_bridge: br1 + +datacenter: phx2 + +tcp_ports: [80, 443, 873] +rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}" + +nrpe_procs_warn: 1200 +nrpe_procs_crit: 1400 + +mem_size: 16384 +max_mem_size: 20480 +lvm_size: 20000 +num_cpus: 8 + +vpn: false diff --git a/inventory/host_vars/eclipse.fedorainfracloud.org b/inventory/host_vars/eclipse.fedorainfracloud.org deleted file mode 100644 index 7ffc7ff6ca..0000000000 --- a/inventory/host_vars/eclipse.fedorainfracloud.org +++ /dev/null @@ -1,18 +0,0 @@ ---- -image: "{{ fedora23_x86_64 }}" -instance_type: m1.small -keypair: fedora-admin-20130801 -security_group: ssh-anywhere-persistent,web-80-anywhere-persistent,web-443-anywhere-persistent,default,all-icmp-persistent -zone: nova -tcp_ports: [22, 80, 443] - -inventory_tenant: persistent -inventory_instance_name: eclipse -hostbase: eclipse -public_ip: 209.132.184.121 -root_auth_users: mbooth sopotc akurtakov -description: eclipse help for fedora eclipse addons - -cloud_networks: - # persistent-net - - net-id: "67b77354-39a4-43de-b007-bb813ac5c35f" diff --git a/inventory/host_vars/fas3-01.stg.phx2.fedoraproject.org b/inventory/host_vars/fas3-01.stg.phx2.fedoraproject.org index 8ea089496d..032f6906ac 100644 --- a/inventory/host_vars/fas3-01.stg.phx2.fedoraproject.org +++ b/inventory/host_vars/fas3-01.stg.phx2.fedoraproject.org @@ -12,6 +12,7 @@ vmhost: virthost04.stg.phx2.fedoraproject.org datacenter: phx2 nagios_Check_Services: + mail: false nrpe: false swap: false diff --git a/inventory/host_vars/fed-cloud01.cloud.fedoraproject.org b/inventory/host_vars/fed-cloud01.cloud.fedoraproject.org index 3589a63061..ce54999840 100644 --- a/inventory/host_vars/fed-cloud01.cloud.fedoraproject.org +++ b/inventory/host_vars/fed-cloud01.cloud.fedoraproject.org @@ -1,4 +1,5 @@ --- nagios_Check_Services: + mail: false nrpe: false swap: false diff --git a/inventory/host_vars/fed-cloud02.cloud.fedoraproject.org b/inventory/host_vars/fed-cloud02.cloud.fedoraproject.org index 3589a63061..ce54999840 100644 --- a/inventory/host_vars/fed-cloud02.cloud.fedoraproject.org +++ b/inventory/host_vars/fed-cloud02.cloud.fedoraproject.org @@ -1,4 +1,5 @@ --- nagios_Check_Services: + mail: false nrpe: false swap: false diff --git a/inventory/host_vars/ns13.rdu2.fedoraproject.org b/inventory/host_vars/ns13.rdu2.fedoraproject.org index 42d5ae003e..db8de347ea 100644 --- a/inventory/host_vars/ns13.rdu2.fedoraproject.org +++ b/inventory/host_vars/ns13.rdu2.fedoraproject.org @@ -28,6 +28,7 @@ ansible_ssh_common_args: '-o ProxyCommand="ssh -W %h:%p -q root@bastion13.fedora nagios_Check_Services: nrpe: false + mail: false sshd: false swap: false ping: false diff --git a/inventory/host_vars/docker-candidate-registry01.phx2.fedoraproject.org b/inventory/host_vars/oci-candidate-registry01.phx2.fedoraproject.org similarity index 65% rename from inventory/host_vars/docker-candidate-registry01.phx2.fedoraproject.org rename to inventory/host_vars/oci-candidate-registry01.phx2.fedoraproject.org index bd87883da8..b3b2a45845 100644 --- a/inventory/host_vars/docker-candidate-registry01.phx2.fedoraproject.org +++ b/inventory/host_vars/oci-candidate-registry01.phx2.fedoraproject.org @@ -2,8 +2,8 @@ nm: 255.255.255.0 gw: 10.5.125.254 dns: 10.5.126.21 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7-docker-reg -ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28-docker-reg +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ volgroup: /dev/vg_guests eth0_ip: 10.5.125.57 vmhost: bvirthost01.phx2.fedoraproject.org diff --git a/inventory/host_vars/docker-candidate-registry01.stg.phx2.fedoraproject.org b/inventory/host_vars/oci-candidate-registry01.stg.phx2.fedoraproject.org similarity index 69% rename from inventory/host_vars/docker-candidate-registry01.stg.phx2.fedoraproject.org rename to inventory/host_vars/oci-candidate-registry01.stg.phx2.fedoraproject.org index e833527ea2..e7a3d5e905 100644 --- a/inventory/host_vars/docker-candidate-registry01.stg.phx2.fedoraproject.org +++ b/inventory/host_vars/oci-candidate-registry01.stg.phx2.fedoraproject.org @@ -2,8 +2,8 @@ nm: 255.255.255.0 gw: 10.5.128.254 dns: 10.5.126.21 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-27-docker-reg -ks_repo: http://10.5.126.23/pub/fedora/linux/releases/27/Server/x86_64/os/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28-docker-reg +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ volgroup: /dev/vg_guests eth0_ip: 10.5.128.122 vmhost: virthost04.stg.phx2.fedoraproject.org diff --git a/inventory/host_vars/docker-registry03.phx2.fedoraproject.org b/inventory/host_vars/oci-registry01.phx2.fedoraproject.org similarity index 60% rename from inventory/host_vars/docker-registry03.phx2.fedoraproject.org rename to inventory/host_vars/oci-registry01.phx2.fedoraproject.org index db421414ba..c72afc5082 100644 --- a/inventory/host_vars/docker-registry03.phx2.fedoraproject.org +++ b/inventory/host_vars/oci-registry01.phx2.fedoraproject.org @@ -2,10 +2,10 @@ nm: 255.255.255.0 gw: 10.5.125.254 dns: 10.5.126.21 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7-docker-reg -ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28-docker-reg +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ volgroup: /dev/vg_guests -eth0_ip: 10.5.125.78 +eth0_ip: 10.5.125.77 vmhost: bvirthost04.phx2.fedoraproject.org datacenter: phx2 diff --git a/inventory/host_vars/docker-registry01.stg.phx2.fedoraproject.org b/inventory/host_vars/oci-registry01.stg.phx2.fedoraproject.org similarity index 69% rename from inventory/host_vars/docker-registry01.stg.phx2.fedoraproject.org rename to inventory/host_vars/oci-registry01.stg.phx2.fedoraproject.org index 351e7b0428..57bb8eacab 100644 --- a/inventory/host_vars/docker-registry01.stg.phx2.fedoraproject.org +++ b/inventory/host_vars/oci-registry01.stg.phx2.fedoraproject.org @@ -2,8 +2,8 @@ nm: 255.255.255.0 gw: 10.5.128.254 dns: 10.5.126.21 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-27-docker-reg -ks_repo: http://10.5.126.23/pub/fedora/linux/releases/27/Server/x86_64/os/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28-docker-reg +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ volgroup: /dev/vg_guests eth0_ip: 10.5.128.123 vmhost: virthost04.stg.phx2.fedoraproject.org diff --git a/inventory/host_vars/docker-registry02.phx2.fedoraproject.org b/inventory/host_vars/oci-registry02.phx2.fedoraproject.org similarity index 60% rename from inventory/host_vars/docker-registry02.phx2.fedoraproject.org rename to inventory/host_vars/oci-registry02.phx2.fedoraproject.org index 0f13c692d8..1d6c44c915 100644 --- a/inventory/host_vars/docker-registry02.phx2.fedoraproject.org +++ b/inventory/host_vars/oci-registry02.phx2.fedoraproject.org @@ -2,10 +2,10 @@ nm: 255.255.255.0 gw: 10.5.125.254 dns: 10.5.126.21 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7-docker-reg -ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28-docker-reg +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ volgroup: /dev/vg_guests -eth0_ip: 10.5.125.77 +eth0_ip: 10.5.125.78 vmhost: bvirthost01.phx2.fedoraproject.org datacenter: phx2 diff --git a/inventory/host_vars/docker-registry02.stg.phx2.fedoraproject.org b/inventory/host_vars/oci-registry02.stg.phx2.fedoraproject.org similarity index 69% rename from inventory/host_vars/docker-registry02.stg.phx2.fedoraproject.org rename to inventory/host_vars/oci-registry02.stg.phx2.fedoraproject.org index 446f9f6015..04cb1a4bcc 100644 --- a/inventory/host_vars/docker-registry02.stg.phx2.fedoraproject.org +++ b/inventory/host_vars/oci-registry02.stg.phx2.fedoraproject.org @@ -2,8 +2,8 @@ nm: 255.255.255.0 gw: 10.5.128.254 dns: 10.5.126.21 -ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-27-docker-reg -ks_repo: http://10.5.126.23/pub/fedora/linux/releases/27/Server/x86_64/os/ +ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-28-docker-reg +ks_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ volgroup: /dev/vg_guests eth0_ip: 10.5.128.124 vmhost: virthost01.stg.phx2.fedoraproject.org diff --git a/inventory/host_vars/osbs-control01.phx2.fedoraproject.org b/inventory/host_vars/osbs-control01.phx2.fedoraproject.org index c9788e04e7..664fdec6b3 100644 --- a/inventory/host_vars/osbs-control01.phx2.fedoraproject.org +++ b/inventory/host_vars/osbs-control01.phx2.fedoraproject.org @@ -13,3 +13,7 @@ datacenter: phx2 mem_size: 4096 max_mem_size: 4096 + +nagios_Check_Services: + nrpe: false + mail: false diff --git a/inventory/host_vars/relay-stg.ci.centos.org b/inventory/host_vars/relay-stg.ci.centos.org index d24ffdb3d5..50248bcf68 100644 --- a/inventory/host_vars/relay-stg.ci.centos.org +++ b/inventory/host_vars/relay-stg.ci.centos.org @@ -62,7 +62,7 @@ fedmsg_prefix: org.centos fedmsg_env: stg nagios_Check_Services: - monitor: false + mail: false nrpe: false sshd: false swap: false diff --git a/inventory/host_vars/relay.ci.centos.org b/inventory/host_vars/relay.ci.centos.org index 8c98190eb6..2648a8d162 100644 --- a/inventory/host_vars/relay.ci.centos.org +++ b/inventory/host_vars/relay.ci.centos.org @@ -62,7 +62,7 @@ fedmsg_prefix: org.centos fedmsg_env: prod nagios_Check_Services: - monitor: false + mail: false nrpe: false sshd: false swap: false diff --git a/inventory/host_vars/sign-vault05.phx2.fedoraproject.org b/inventory/host_vars/sign-vault05.phx2.fedoraproject.org new file mode 100644 index 0000000000..640cf9e319 --- /dev/null +++ b/inventory/host_vars/sign-vault05.phx2.fedoraproject.org @@ -0,0 +1,10 @@ +--- +gw: 10.5.125.254 +eth0_ip: 10.5.125.83 + +install_noc: noc01.phx2.fedoraproject.org +install_mac: D0:94:66:45:87:C1 +# Inside this, expect /vmlinuz and /initrd.img +install_binpath: /uefi/x86_64/f28 +install_ks: http://10.5.126.23/repo/rhel/ks/buildhw-f28 +install_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ diff --git a/inventory/host_vars/sign-vault06.phx2.fedoraproject.org b/inventory/host_vars/sign-vault06.phx2.fedoraproject.org new file mode 100644 index 0000000000..b2a9e8d90b --- /dev/null +++ b/inventory/host_vars/sign-vault06.phx2.fedoraproject.org @@ -0,0 +1,10 @@ +--- +gw: 10.5.125.254 +eth0_ip: 10.5.125.84 + +install_noc: noc01.phx2.fedoraproject.org +install_mac: D0:94:66:45:A1:62 +# Inside this, expect /vmlinuz and /initrd.img +install_binpath: /uefi/x86_64/f28 +install_ks: http://10.5.126.23/repo/rhel/ks/buildhw-f28 +install_repo: http://10.5.126.23/pub/fedora/linux/releases/28/Server/x86_64/os/ diff --git a/inventory/host_vars/tang01.phx2.fedoraproject.org b/inventory/host_vars/tang01.phx2.fedoraproject.org new file mode 100644 index 0000000000..e76277b4e5 --- /dev/null +++ b/inventory/host_vars/tang01.phx2.fedoraproject.org @@ -0,0 +1,4 @@ +--- +volgroup: /dev/vg_guests +eth0_ip: 10.5.126.3 +vmhost: virthost12.phx2.fedoraproject.org diff --git a/inventory/host_vars/tang02.phx2.fedoraproject.org b/inventory/host_vars/tang02.phx2.fedoraproject.org new file mode 100644 index 0000000000..4a39e14829 --- /dev/null +++ b/inventory/host_vars/tang02.phx2.fedoraproject.org @@ -0,0 +1,4 @@ +--- +volgroup: /dev/vg_guests +eth0_ip: 10.5.126.4 +vmhost: virthost14.phx2.fedoraproject.org diff --git a/inventory/host_vars/undercloud02.cloud.fedoraproject.org b/inventory/host_vars/undercloud02.cloud.fedoraproject.org index 7e7d3b0e27..1adcb27f86 100644 --- a/inventory/host_vars/undercloud02.cloud.fedoraproject.org +++ b/inventory/host_vars/undercloud02.cloud.fedoraproject.org @@ -17,7 +17,7 @@ vmhost: cloud-noc01.cloud.fedoraproject.org datacenter: newcloud nagios_Check_Services: - monitor: false + mail: false nrpe: false sshd: false swap: false diff --git a/inventory/host_vars/virthost-rdu01.fedoraproject.org b/inventory/host_vars/virthost-rdu01.fedoraproject.org index 70bf538d09..50b5998788 100644 --- a/inventory/host_vars/virthost-rdu01.fedoraproject.org +++ b/inventory/host_vars/virthost-rdu01.fedoraproject.org @@ -13,3 +13,7 @@ br1_nm: 255.255.255.0 vpn: true public_ip: 209.132.190.11 + +nagios_Check_Services: + nrpe: false + mail: false diff --git a/inventory/host_vars/virthost01.stg.phx2.fedoraproject.org b/inventory/host_vars/virthost01.stg.phx2.fedoraproject.org index ff86a10d32..89831a9add 100644 --- a/inventory/host_vars/virthost01.stg.phx2.fedoraproject.org +++ b/inventory/host_vars/virthost01.stg.phx2.fedoraproject.org @@ -8,3 +8,10 @@ br0_ip: 10.5.128.40 br0_nm: 255.255.255.0 br1_ip: 10.5.127.202 br1_nm: 255.255.255.0 + +install_noc: noc01.phx2.fedoraproject.org +install_mac: 24-6E-96-B1-C7-F4 +# Inside this, expect /vmlinuz and /initrd.img +install_binpath: /uefi/x86_64/el7 +install_ks: http://10.5.126.23/repo/rhel/ks/hardware-rhel-7-08disk +install_repo: http://10.5.126.23/http://10.5.126.23/repo/rhel/RHEL7-x86_64/ diff --git a/inventory/inventory b/inventory/inventory index be90c8653d..c86a45d613 100644 --- a/inventory/inventory +++ b/inventory/inventory @@ -229,7 +229,6 @@ mdapi01.phx2.fedoraproject.org mdapi01.stg.phx2.fedoraproject.org [minimal] -bkernel03.phx2.fedoraproject.org bkernel04.phx2.fedoraproject.org [modernpaste] @@ -260,6 +259,8 @@ sign-bridge01.stg.phx2.fedoraproject.org #sign-vault03.phx2.fedoraproject.org #sign-vault04.phx2.fedoraproject.org #sign-vault01.stg.phx2.fedoraproject.org +sign-vault05.phx2.fedoraproject.org +sign-vault06.phx2.fedoraproject.org [autocloud-web] autocloud-web01.phx2.fedoraproject.org @@ -329,6 +330,8 @@ badges-web01.stg.phx2.fedoraproject.org blockerbugs01.stg.phx2.fedoraproject.org bodhi-backend01.stg.phx2.fedoraproject.org busgateway01.stg.phx2.fedoraproject.org +copr-frontend01.stg.phx2.fedoraproject.org +copr-frontend02.stg.phx2.fedoraproject.org datagrepper01.stg.phx2.fedoraproject.org elections01.stg.phx2.fedoraproject.org fedocal01.stg.phx2.fedoraproject.org @@ -344,7 +347,6 @@ download02.phx2.fedoraproject.org download03.phx2.fedoraproject.org download04.phx2.fedoraproject.org download05.phx2.fedoraproject.org -download06.phx2.fedoraproject.org [download-ibiblio] download-ib01.fedoraproject.org @@ -361,7 +363,8 @@ download05.phx2.fedoraproject.org #download-rdu01.fedoraproject.org [download-phx2-virtual] -download06.phx2.fedoraproject.org +download01.phx2.fedoraproject.org +download02.phx2.fedoraproject.org [download:children] @@ -553,6 +556,10 @@ qa12.qa.fedoraproject.org qa13.qa.fedoraproject.org qa14.qa.fedoraproject.org +[tang] +tang01.phx2.fedoraproject.org +tang02.phx2.fedoraproject.org + [torrent] torrent02.fedoraproject.org @@ -751,17 +758,22 @@ buildvm-s390x-01.stg.s390.fedoraproject.org busgateway01.stg.phx2.fedoraproject.org composer.stg.phx2.fedoraproject.org copr-be-dev.cloud.fedoraproject.org +copr-be-stg.fedorainfracloud.org copr-dist-git-dev.fedorainfracloud.org +copr-dist-git-stg.fedorainfracloud.org copr-fe-dev.cloud.fedoraproject.org +copr-frontend01.stg.phx2.fedoraproject.org +copr-frontend02.stg.phx2.fedoraproject.org copr-keygen-dev.cloud.fedoraproject.org +copr-keygen-stg.fedorainfracloud.org datagrepper01.stg.phx2.fedoraproject.org db-fas01.stg.phx2.fedoraproject.org db-koji01.stg.phx2.fedoraproject.org db01.stg.phx2.fedoraproject.org db03.stg.phx2.fedoraproject.org -docker-candidate-registry01.stg.phx2.fedoraproject.org -docker-registry01.stg.phx2.fedoraproject.org -docker-registry02.stg.phx2.fedoraproject.org +oci-candidate-registry01.stg.phx2.fedoraproject.org +oci-registry01.stg.phx2.fedoraproject.org +oci-registry02.stg.phx2.fedoraproject.org elections01.stg.phx2.fedoraproject.org fas01.stg.phx2.fedoraproject.org fedimg01.stg.phx2.fedoraproject.org @@ -860,6 +872,8 @@ proxy10.phx2.fedoraproject.org proxy101.phx2.fedoraproject.org proxy110.phx2.fedoraproject.org openqa-stg01.qa.fedoraproject.org +tang01.phx2.fedoraproject.org +tang02.phx2.fedoraproject.org [statscache:children] statscache-web @@ -1210,8 +1224,6 @@ java-deptools.fedorainfracloud.org developer.fedorainfracloud.org # fedimg-dev development instance fedimg-dev.fedorainfracloud.org -# eclipse help center - ticket 5293 -eclipse.fedorainfracloud.org # iddev iddev.fedorainfracloud.org # commops - ticket 5380 @@ -1291,15 +1303,6 @@ bvirthost buildvmhost virthost-comm -[copr-front-stg] -copr-fe-dev.cloud.fedoraproject.org - -[copr-back-stg] -copr-be-dev.cloud.fedoraproject.org - -[copr-keygen-stg] -copr-keygen-dev.cloud.fedoraproject.org - [copr-keygen] copr-keygen.cloud.fedoraproject.org @@ -1312,9 +1315,31 @@ copr-be.cloud.fedoraproject.org [copr-dist-git] copr-dist-git.fedorainfracloud.org -[copr-dist-git-stg] +[copr-front-dev] +copr-fe-dev.cloud.fedoraproject.org + +[copr-back-dev] +copr-be-dev.cloud.fedoraproject.org + +[copr-keygen-dev] +copr-keygen-dev.cloud.fedoraproject.org + +[copr-dist-git-dev] copr-dist-git-dev.fedorainfracloud.org +[copr-front-stg] +copr-frontend01.stg.phx2.fedoraproject.org +copr-frontend02.stg.phx2.fedoraproject.org + +[copr-back-stg] +copr-be-stg.fedorainfracloud.org + +[copr-keygen-stg] +copr-keygen-stg.fedorainfracloud.org + +[copr-dist-git-stg] +copr-dist-git-stg.fedorainfracloud.org + [copr:children] copr-front copr-back @@ -1327,6 +1352,12 @@ copr-back-stg copr-keygen-stg copr-dist-git-stg +[copr-dev:children] +copr-front-dev +copr-back-dev +copr-keygen-dev +copr-dist-git-dev + [pagure] pagure01.fedoraproject.org @@ -1438,28 +1469,32 @@ os-control [ci] ci-cc-rdu01.fedoraproject.org -# Docker (docker-distribution) registries -[docker-registry] -docker-registry02.phx2.fedoraproject.org -docker-registry03.phx2.fedoraproject.org -docker-candidate-registry01.phx2.fedoraproject.org +# registries +[oci-registry] +oci-registry01.phx2.fedoraproject.org +oci-registry02.phx2.fedoraproject.org +oci-candidate-registry01.phx2.fedoraproject.org -[docker-registry-gluster-stg] -docker-registry01.stg.phx2.fedoraproject.org -docker-registry02.stg.phx2.fedoraproject.org +[oci-registry-gluster] +oci-registry01.phx2.fedoraproject.org +oci-registry02.phx2.fedoraproject.org -[docker-registry-stg] -docker-registry01.stg.phx2.fedoraproject.org -docker-registry02.stg.phx2.fedoraproject.org -docker-candidate-registry01.stg.phx2.fedoraproject.org +[oci-registry-gluster-stg] +oci-registry01.stg.phx2.fedoraproject.org +oci-registry02.stg.phx2.fedoraproject.org + +[oci-registry-stg] +oci-registry01.stg.phx2.fedoraproject.org +oci-registry02.stg.phx2.fedoraproject.org +oci-candidate-registry01.stg.phx2.fedoraproject.org ## Not the candidate just the top registry [moby-registry] -docker-registry02.phx2.fedoraproject.org +oci-registry01.phx2.fedoraproject.org ## Not the candidate just the top registry [moby-registry-stg] -docker-registry01.stg.phx2.fedoraproject.org +oci-registry01.stg.phx2.fedoraproject.org [webservers:children] proxies diff --git a/master.yml b/master.yml index 45677193cf..036b3056ce 100644 --- a/master.yml +++ b/master.yml @@ -36,9 +36,10 @@ - import_playbook: /srv/web/infra/ansible/playbooks/groups/copr-backend.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/copr-dist-git.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/copr-frontend.yml +- import_playbook: /srv/web/infra/ansible/playbooks/groups/copr-frontend-cloud.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/copr-keygen.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/datagrepper.yml -- import_playbook: /srv/web/infra/ansible/playbooks/groups/docker-registry.yml +- import_playbook: /srv/web/infra/ansible/playbooks/groups/oci-registry.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/dns.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/download.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/elections.yml @@ -98,6 +99,7 @@ - import_playbook: /srv/web/infra/ansible/playbooks/groups/statscache.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/sundries.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/tagger.yml +- import_playbook: /srv/web/infra/ansible/playbooks/groups/tang.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/taskotron.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/taskotron-client-hosts.yml - import_playbook: /srv/web/infra/ansible/playbooks/groups/torrent.yml @@ -117,6 +119,7 @@ - import_playbook: /srv/web/infra/ansible/playbooks/openshift-apps/waiverdb.yml - import_playbook: /srv/web/infra/ansible/playbooks/openshift-apps/coreos.yml # These need work to finish and complete and are all stg currently. +#- import_playbook: /srv/web/infra/ansible/playbooks/openshift-apps/koschei.yml #- import_playbook: /srv/web/infra/ansible/playbooks/openshift-apps/modernpaste.yml #- import_playbook: /srv/web/infra/ansible/playbooks/openshift-apps/rats.yml #- import_playbook: /srv/web/infra/ansible/playbooks/openshift-apps/release-monitoring.yml @@ -132,7 +135,6 @@ - import_playbook: /srv/web/infra/ansible/playbooks/hosts/commops.fedorainfracloud.org.yml - import_playbook: /srv/web/infra/ansible/playbooks/hosts/data-analysis01.phx2.fedoraproject.org.yml - import_playbook: /srv/web/infra/ansible/playbooks/hosts/developer.fedorainfracloud.org.yml -- import_playbook: /srv/web/infra/ansible/playbooks/hosts/eclipse.fedorainfracloud.org.yml - import_playbook: /srv/web/infra/ansible/playbooks/hosts/elastic-dev.fedorainfracloud.org.yml - import_playbook: /srv/web/infra/ansible/playbooks/hosts/fas2-dev.fedorainfracloud.org.yml - import_playbook: /srv/web/infra/ansible/playbooks/hosts/fas3-dev.fedorainfracloud.org.yml diff --git a/playbooks/groups/bodhi-backend.yml b/playbooks/groups/bodhi-backend.yml index 23b056d7b7..22c66701a8 100644 --- a/playbooks/groups/bodhi-backend.yml +++ b/playbooks/groups/bodhi-backend.yml @@ -64,10 +64,10 @@ service: bodhi host: "bodhi.stg.fedoraproject.org" when: env == "staging" - - role: manage-container-images + - role: push-container-registry cert_dest_dir: "/etc/docker/certs.d/registry{{ env_suffix }}.fedoraproject.org" - cert_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.pem" - key_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.key" + cert_src: "{{private}}/files/docker-registry/{{env}}/pki/issued/containerstable.crt" + key_src: "{{private}}/files/docker-registry/{{env}}/pki/private/containerstable.key" certs_group: apache diff --git a/playbooks/groups/certgetter.yml b/playbooks/groups/certgetter.yml index 65c2e97a70..95290922d0 100644 --- a/playbooks/groups/certgetter.yml +++ b/playbooks/groups/certgetter.yml @@ -21,8 +21,10 @@ - { role: openvpn/client, when: env != "staging" } - tasks: + pre_tasks: - import_tasks: "{{ tasks_path }}/yumrepos.yml" + + tasks: - import_tasks: "{{ tasks_path }}/2fa_client.yml" - import_tasks: "{{ tasks_path }}/motd.yml" diff --git a/playbooks/groups/copr-backend.yml b/playbooks/groups/copr-backend.yml index 67fe7d8772..4b9a03e312 100644 --- a/playbooks/groups/copr-backend.yml +++ b/playbooks/groups/copr-backend.yml @@ -1,6 +1,5 @@ - name: check/create instance - #hosts: copr-back - hosts: copr-back:copr-back-stg + hosts: copr-back-dev:copr-back-stg:copr-back user: root gather_facts: False @@ -13,7 +12,7 @@ - import_tasks: "{{ tasks_path }}/persistent_cloud.yml" - name: cloud basic setup - hosts: copr-back:copr-back-stg + hosts: copr-back-dev:copr-back-stg:copr-back user: root gather_facts: True vars_files: @@ -28,7 +27,7 @@ hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org" - name: provision instance - hosts: copr-back:copr-back-stg + hosts: copr-back-dev:copr-back-stg:copr-back user: root gather_facts: True diff --git a/playbooks/groups/copr-dist-git.yml b/playbooks/groups/copr-dist-git.yml index fd6224cb5a..658c7aa442 100644 --- a/playbooks/groups/copr-dist-git.yml +++ b/playbooks/groups/copr-dist-git.yml @@ -1,5 +1,5 @@ - name: check/create instance - hosts: copr-dist-git-stg:copr-dist-git + hosts: copr-dist-git-dev:copr-dist-git-stg:copr-dist-git user: root gather_facts: False @@ -13,7 +13,7 @@ - import_tasks: "{{ tasks_path }}/persistent_cloud.yml" - name: cloud basic setup - hosts: copr-dist-git-stg:copr-dist-git + hosts: copr-dist-git-dev:copr-dist-git-stg:copr-dist-git user: root gather_facts: True vars_files: @@ -27,7 +27,7 @@ hostname: name="{{copr_hostbase}}.fedorainfracloud.org" - name: provision instance - hosts: copr-dist-git-stg:copr-dist-git + hosts: copr-dist-git-dev:copr-dist-git-stg:copr-dist-git user: root gather_facts: True diff --git a/playbooks/hosts/eclipse.fedorainfracloud.org.yml b/playbooks/groups/copr-frontend-cloud.yml similarity index 54% rename from playbooks/hosts/eclipse.fedorainfracloud.org.yml rename to playbooks/groups/copr-frontend-cloud.yml index a6213b3bcd..b1ccfa9fca 100644 --- a/playbooks/hosts/eclipse.fedorainfracloud.org.yml +++ b/playbooks/groups/copr-frontend-cloud.yml @@ -1,35 +1,42 @@ - name: check/create instance - hosts: eclipse.fedorainfracloud.org + hosts: copr-front-dev:copr-front + # hosts: copr-front gather_facts: False vars_files: - /srv/web/infra/ansible/vars/global.yml - - /srv/private/ansible/vars.yml + - "/srv/private/ansible/vars.yml" - /srv/web/infra/ansible/vars/fedora-cloud.yml - /srv/private/ansible/files/openstack/passwords.yml tasks: - import_tasks: "{{ tasks_path }}/persistent_cloud.yml" - handlers: - - import_tasks: "{{ handlers_path }}/restart_services.yml" - -- name: setup all the things - hosts: eclipse.fedorainfracloud.org +- name: cloud basic setup + hosts: copr-front-dev:copr-front + # hosts: copr-front gather_facts: True vars_files: - /srv/web/infra/ansible/vars/global.yml - - /srv/private/ansible/vars.yml - - /srv/private/ansible/files/openstack/passwords.yml - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - pre_tasks: - - import_tasks: "{{ tasks_path }}/yumrepos.yml" - - roles: - - basessh + - "/srv/private/ansible/vars.yml" tasks: - import_tasks: "{{ tasks_path }}/cloud_setup_basic.yml" + - import_tasks: "{{ tasks_path }}/yumrepos.yml" - name: set hostname (required by some services, at least postfix need it) - hostname: name="{{inventory_hostname}}" + hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org" + +- name: provision instance + hosts: copr-front:copr-front-dev + # hosts: copr-front + gather_facts: True + + vars_files: + - /srv/web/infra/ansible/vars/global.yml + - "/srv/private/ansible/vars.yml" + - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml + + roles: + - base + - copr/frontend-cloud + - nagios_client diff --git a/playbooks/groups/copr-frontend.yml b/playbooks/groups/copr-frontend.yml index f669bbc15d..7a2028d382 100644 --- a/playbooks/groups/copr-frontend.yml +++ b/playbooks/groups/copr-frontend.yml @@ -1,34 +1,9 @@ -- name: check/create instance - hosts: copr-front-stg:copr-front - # hosts: copr-front - gather_facts: False +--- +- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=copr-front-stg" - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/fedora-cloud.yml - - /srv/private/ansible/files/openstack/passwords.yml - - tasks: - - import_tasks: "{{ tasks_path }}/persistent_cloud.yml" - -- name: cloud basic setup - hosts: copr-front-stg:copr-front - # hosts: copr-front - gather_facts: True - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - tasks: - - import_tasks: "{{ tasks_path }}/cloud_setup_basic.yml" - - import_tasks: "{{ tasks_path }}/yumrepos.yml" - - name: set hostname (required by some services, at least postfix need it) - hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org" - -- name: provision instance - hosts: copr-front:copr-front-stg - # hosts: copr-front +- name: provision copr frontend + hosts: copr-front-stg + user: root gather_facts: True vars_files: @@ -36,7 +11,25 @@ - "/srv/private/ansible/vars.yml" - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml + pre_tasks: + - import_tasks: "{{ tasks_path }}/yumrepos.yml" + roles: - - base - - copr/frontend - - nagios_client + - base + - rkhunter + - nagios_client + - hosts + - fas_client + - collectd/base + - { role: openvpn/client, when: env != "staging" } + - { role: sudo, sudoers: "{{ private }}/files/sudo/copr-sudoers" } + - redis + - mod_wsgi + - copr/frontend + + tasks: + - import_tasks: "{{ tasks_path }}/2fa_client.yml" + - import_tasks: "{{ tasks_path }}/motd.yml" + + handlers: + - import_tasks: "{{ handlers_path }}/restart_services.yml" diff --git a/playbooks/groups/copr-keygen.yml b/playbooks/groups/copr-keygen.yml index 4ec2e5afe4..f0f82127e8 100644 --- a/playbooks/groups/copr-keygen.yml +++ b/playbooks/groups/copr-keygen.yml @@ -1,6 +1,5 @@ - name: check/create instance - hosts: copr-keygen-stg:copr-keygen - #hosts: copr-keygen + hosts: copr-keygen-dev:copr-keygen-stg:copr-keygen gather_facts: False vars_files: @@ -21,8 +20,7 @@ when: facts is failed - name: cloud basic setup - hosts: copr-keygen-stg:copr-keygen - # hosts: copr-keygen + hosts: copr-keygen-dev:copr-keygen-stg:copr-keygen gather_facts: True vars_files: - /srv/web/infra/ansible/vars/global.yml @@ -35,8 +33,7 @@ hostname: name="{{copr_hostbase}}.cloud.fedoraproject.org" - name: provision instance - hosts: copr-keygen:copr-keygen-stg - #hosts: copr-keygen + hosts: copr-keygen-dev:copr-keygen-stg:copr-keygen gather_facts: True vars_files: diff --git a/playbooks/groups/docker-registry.yml b/playbooks/groups/oci-registry.yml similarity index 70% rename from playbooks/groups/docker-registry.yml rename to playbooks/groups/oci-registry.yml index a35e6e2f2c..5dd4b10d15 100644 --- a/playbooks/groups/docker-registry.yml +++ b/playbooks/groups/oci-registry.yml @@ -1,8 +1,8 @@ # create an osbs server -- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=docker-registry:docker-registry-stg" +- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=oci-registry:oci-registry-stg" - name: make the box be real - hosts: docker-registry:docker-registry-stg + hosts: oci-registry:oci-registry-stg user: root gather_facts: True @@ -35,8 +35,8 @@ - name: set up gluster on stg hosts: - - docker-registry01.stg.phx2.fedoraproject.org - - docker-registry02.stg.phx2.fedoraproject.org + - oci-registry01.stg.phx2.fedoraproject.org + - oci-registry02.stg.phx2.fedoraproject.org user: root gather_facts: True @@ -47,16 +47,16 @@ roles: - role: gluster/consolidated - gluster_brick_dir: /srv/glusterfs/ + gluster_brick_dir: /srv/glusterfs gluster_mount_dir: /srv/docker/ gluster_brick_name: registry - gluster_server_group: docker-registry-gluster-stg + gluster_server_group: oci-registry-gluster-stg tags: gluster - name: set up gluster on prod hosts: - - docker-registry02.phx2.fedoraproject.org - - docker-registry03.phx2.fedoraproject.org + - oci-registry01.phx2.fedoraproject.org + - oci-registry02.phx2.fedoraproject.org user: root gather_facts: True @@ -66,28 +66,15 @@ - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml roles: - - role: gluster/server - glusterservername: gluster - username: "{{ registry_gluster_username_prod }}" - password: "{{ registry_gluster_password_prod }}" - owner: root - group: root - datadir: /srv/glusterfs/registry - - - role: gluster/client - glusterservername: gluster - servers: - - docker-registry02.phx2.fedoraproject.org - - docker-registry03.phx2.fedoraproject.org - username: "{{ registry_gluster_username_prod }}" - password: "{{ registry_gluster_password_prod }}" - owner: root - group: root - mountdir: "/srv/docker" - + - role: gluster/consolidated + gluster_brick_dir: /srv/glusterfs + gluster_mount_dir: /srv/docker/ + gluster_brick_name: registry + gluster_server_group: oci-registry-gluster + tags: gluster - name: setup docker distribution registry - hosts: docker-registry:docker-registry-stg + hosts: oci-registry:oci-registry-stg vars_files: - /srv/web/infra/ansible/vars/global.yml - /srv/private/ansible/vars.yml @@ -122,8 +109,6 @@ # Setup compose-x86-01 push docker images to registry - { role: push-docker, - docker_cert_name: "containerstable", - docker_cert_dir: "/etc/docker/certs.d/registry.stg.fedoraproject.org", candidate_registry: "candidate-registry.stg.fedoraproject.org", candidate_registry_osbs_username: "{{candidate_registry_osbs_stg_username}}", candidate_registry_osbs_password: "{{candidate_registry_osbs_stg_password}}", @@ -132,8 +117,6 @@ } - { role: push-docker, - docker_cert_name: "containerstable", - docker_cert_dir: "/etc/docker/certs.d/registry.fedoraproject.org", candidate_registry: "candidate-registry.fedoraproject.org", candidate_registry_osbs_username: "{{candidate_registry_osbs_prod_username}}", candidate_registry_osbs_password: "{{candidate_registry_osbs_prod_password}}", diff --git a/playbooks/groups/odcs.yml b/playbooks/groups/odcs.yml index 36c639c00c..ea821fa672 100644 --- a/playbooks/groups/odcs.yml +++ b/playbooks/groups/odcs.yml @@ -58,14 +58,14 @@ roles: - role: gluster/consolidated - gluster_brick_dir: /srv/glusterfs/ + gluster_brick_dir: /srv/glusterfs gluster_mount_dir: /srv/odcs gluster_brick_name: odcs gluster_server_group: odcs-stg tags: gluster when: env == 'staging' - role: gluster/consolidated - gluster_brick_dir: /srv/glusterfs/ + gluster_brick_dir: /srv/glusterfs gluster_mount_dir: /srv/odcs gluster_brick_name: odcs gluster_server_group: odcs diff --git a/playbooks/groups/os-cluster.yml b/playbooks/groups/os-cluster.yml index 7f85649310..f0d20f4ee3 100644 --- a/playbooks/groups/os-cluster.yml +++ b/playbooks/groups/os-cluster.yml @@ -103,11 +103,11 @@ - { role: ansible-ansible-openshift-ansible, cluster_inventory_filename: "cluster-inventory-stg", - openshift_release: "v3.9", + openshift_release: "v3.10", openshift_ansible_path: "/root/openshift-ansible", openshift_ansible_pre_playbook: "playbooks/prerequisites.yml", openshift_ansible_playbook: "playbooks/deploy_cluster.yml", - openshift_ansible_version: "openshift-ansible-3.9.30-1", + openshift_ansible_version: "openshift-ansible-3.10.38-1", openshift_ansible_ssh_user: root, openshift_ansible_install_examples: false, openshift_ansible_containerized_deploy: false, @@ -132,11 +132,11 @@ - { role: ansible-ansible-openshift-ansible, cluster_inventory_filename: "cluster-inventory", - openshift_release: "v3.9", + openshift_release: "v3.10", openshift_ansible_path: "/root/openshift-ansible", openshift_ansible_pre_playbook: "playbooks/prerequisites.yml", openshift_ansible_playbook: "playbooks/deploy_cluster.yml", - openshift_ansible_version: "openshift-ansible-3.9.30-1", + openshift_ansible_version: "openshift-ansible-3.10.35-1", openshift_ansible_ssh_user: root, openshift_ansible_install_examples: false, openshift_ansible_containerized_deploy: false, diff --git a/playbooks/groups/osbs-cluster.yml b/playbooks/groups/osbs-cluster.yml index b7ffd2dd50..6030bfbb1c 100644 --- a/playbooks/groups/osbs-cluster.yml +++ b/playbooks/groups/osbs-cluster.yml @@ -270,46 +270,6 @@ - "/srv/private/ansible/vars.yml" - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - pre_tasks: - - name: Make sure python2-docker-py is not installed - dnf: - name: python2-docker-py - state: absent - - roles: - - { - role: osbs-common, - osbs_manage_firewalld: false, - } - - { - role: push-docker, - candidate_registry: "{{docker_registry}}", - candidate_registry_osbs_username: "{{candidate_registry_osbs_stg_username}}", - candidate_registry_osbs_password: "{{candidate_registry_osbs_stg_password}}", - when: env == "staging" - } - - { - role: push-docker, - candidate_registry: "{{docker_registry}}", - candidate_registry_osbs_username: "{{candidate_registry_osbs_prod_username}}", - candidate_registry_osbs_password: "{{candidate_registry_osbs_prod_password}}", - when: env == "production" - } - - { - role: "manage-container-images", - cert_dest_dir: "/etc/docker/certs.d/candidate-registry{{ env_suffix }}.fedoraproject.org", - cert_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.pem", - key_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.key", - when: env == "staging" - } - - - handlers: - - name: restart dnsmasq - service: - name: dnsmasq - state: restarted - tasks: - name: Ensures /etc/dnsmasq.d/ dir exists file: path="/etc/dnsmasq.d/" state=directory @@ -372,7 +332,6 @@ osbs_secret_files: - source: "{{ private }}/files/osbs/{{ env }}/odcs-oidc-token" dest: token - when: env == "staging" tags: - osbs-worker-namespace @@ -446,7 +405,6 @@ osbs_secret_files: - source: "{{ private }}/files/osbs/{{ env }}/odcs-oidc-token" dest: token - when: env == "staging" tags: - osbs-orchestrator-namespace @@ -504,7 +462,8 @@ - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml pre_tasks: - - set_fact: + - name: Create the username:password string needed by the template + set_fact: auth_info_prod: "{{candidate_registry_osbs_prod_username}}:{{candidate_registry_osbs_prod_password}}" auth_info_stg: "{{candidate_registry_osbs_stg_username}}:{{candidate_registry_osbs_stg_password}}" @@ -542,7 +501,8 @@ - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml pre_tasks: - - set_fact: + - name: Create the username:password string needed by the template + set_fact: auth_info_prod: "{{candidate_registry_osbs_prod_username}}:{{candidate_registry_osbs_prod_password}}" auth_info_stg: "{{candidate_registry_osbs_stg_username}}:{{candidate_registry_osbs_stg_password}}" @@ -588,36 +548,7 @@ koji_builder_user: dockerbuilder osbs_builder_user: builder - - handlers: - - name: oc secrets new - command: "oc secrets new koji cert={{ koji_cert_path }} ca={{ koji_ca_cert_path }} serverca={{ koji_ca_cert_path }}" - environment: "{{ osbs_environment }}" - notify: oc secrets add - - - name: oc secrets add - command: "oc secrets add serviceaccount/{{ osbs_builder_user }} secrets/koji --for=mount" - environment: "{{ osbs_environment }}" - tasks: - - name: Ensure koji dockerbuilder cert path exists - file: - path: "{{ koji_pki_dir }}" - state: "directory" - mode: 0400 - - - name: Add koji dockerbuilder cert for Content Generator import - copy: - src: "{{private}}/files/koji/containerbuild.pem" - dest: "{{ koji_cert_path }}" - notify: oc secrets new - - - name: Add koji dockerbuilder ca cert for Content Generator import - copy: - src: "{{private}}/files/koji/buildercerts/fedora-ca.cert" - dest: "{{ koji_ca_cert_path }}" - notify: oc secrets new - - name: cron entry to clean up old builds copy: src: "{{files}}/osbs/cleanup-old-osbs-builds" @@ -706,7 +637,7 @@ src: "{{item}}" dest: "/etc/osbs/buildroot/" owner: root - mode: 600 + mode: 0600 with_items: - "{{files}}/osbs/worker_customize.json" - "{{files}}/osbs/orchestrator_customize.json" @@ -803,26 +734,5 @@ register: docker_pull_fedora changed_when: "'Downloaded newer image' in docker_pull_fedora.stdout" - - name: register origin_version_out rpm query - command: "rpm -q origin --qf '%{Version}'" - register: origin_version_out - check_mode: no - changed_when: False - - -- name: Post-Install image stream refresh - hosts: osbs-masters[0]:osbs-masters-stg[0] - tags: - - osbs-post-install - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - /srv/private/ansible/vars.yml - - /srv/private/ansible/files/openstack/passwords.yml - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - tasks: - name: enable nrpe for monitoring (noc01) iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.5.126.41 state=present jump=ACCEPT - -# - name: enable nrpe for monitoring (noc01.stg) -# iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=1#0.5.126.2 state=present jump=ACCEPT diff --git a/playbooks/groups/postgresql-server.yml b/playbooks/groups/postgresql-server.yml index 8d6cb1fe50..a9f2abbeaa 100644 --- a/playbooks/groups/postgresql-server.yml +++ b/playbooks/groups/postgresql-server.yml @@ -7,7 +7,7 @@ # Once the instance exists, configure it. - name: configure postgresql server system - hosts: db-datanommer02.phx2.fedoraproject.org:db-qa01.qa.fedoraproject.org:db-koji01.phx2.fedoraproject.org:db-fas01.stg.phx2.fedoraproject.org:db-fas01.phx2.fedoraproject.org:db01.phx2.fedoraproject.org:db01.stg.phx2.fedoraproject.org:db-qa02.qa.fedoraproject.org:db-koji01.stg.phx2.fedoraproject.or:db-qa03.qa.fedoraproject.org + hosts: db-datanommer02.phx2.fedoraproject.org:db-qa01.qa.fedoraproject.org:db-koji01.phx2.fedoraproject.org:db-fas01.stg.phx2.fedoraproject.org:db-fas01.phx2.fedoraproject.org:db01.phx2.fedoraproject.org:db01.stg.phx2.fedoraproject.org:db-qa02.qa.fedoraproject.org:db-koji01.stg.phx2.fedoraproject.org:db-qa03.qa.fedoraproject.org user: root gather_facts: True diff --git a/playbooks/groups/releng-compose.yml b/playbooks/groups/releng-compose.yml index 33a5e82c0c..b6adc0211c 100644 --- a/playbooks/groups/releng-compose.yml +++ b/playbooks/groups/releng-compose.yml @@ -54,25 +54,31 @@ tags: - releng - { - role: "manage-container-images", + role: "push-container-registry", cert_dest_dir: "/etc/docker/certs.d/registry.stg.fedoraproject.org", cert_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.pem", key_src: "{{private}}/files/docker-registry/{{env}}/docker-registry-internal.key", when: env == "staging" } + - { + role: "push-container-registry", + cert_dest_dir: "/etc/docker/certs.d/registry.fedoraproject.org", + cert_src: "{{private}}/files/docker-registry/{{env}}/pki/issued/containerstable.crt", + key_src: "{{private}}/files/docker-registry/{{env}}/pki/private/containerstable.key", + when: env == "production" + } + - { + role: push-docker, + candidate_registry: "candidate-registry.stg.fedoraproject.org", + candidate_registry_osbs_username: "{{candidate_registry_osbs_stg_username}}", + candidate_registry_osbs_password: "{{candidate_registry_osbs_stg_password}}", + when: env == "staging" + } - { role: push-docker, candidate_registry: "candidate-registry.fedoraproject.org", candidate_registry_osbs_username: "{{candidate_registry_osbs_prod_username}}", candidate_registry_osbs_password: "{{candidate_registry_osbs_prod_password}}", - docker_cert_dir: "/etc/docker/certs.d/candidate-registry.fedoraproject.org", - when: env == "production" - } - - { - role: "manage-container-images", - cert_dest_dir: "/etc/docker/certs.d/registry.fedoraproject.org", - cert_src: "{{private}}/files/koji/containerstable.cert.pem", - key_src: "{{private}}/files/koji/containerstable.key.pem", when: env == "production" } diff --git a/playbooks/groups/tang.yml b/playbooks/groups/tang.yml new file mode 100644 index 0000000000..8c722cd94d --- /dev/null +++ b/playbooks/groups/tang.yml @@ -0,0 +1,31 @@ +- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=tang" + +- name: make the box be real + hosts: tang + user: root + gather_facts: True + + vars_files: + - /srv/web/infra/ansible/vars/global.yml + - "/srv/private/ansible/vars.yml" + - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml + + pre_tasks: + - import_tasks: "{{ tasks_path }}/yumrepos.yml" + + roles: + - base + - rkhunter + - nagios_client + - hosts + - fas_client + - rsyncd + - sudo + - tang + + tasks: + - import_tasks: "{{ tasks_path }}/2fa_client.yml" + - import_tasks: "{{ tasks_path }}/motd.yml" + + handlers: + - import_tasks: "{{ handlers_path }}/restart_services.yml" diff --git a/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml b/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml index 437d3bbc8c..e54fc7a776 100644 --- a/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml +++ b/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml @@ -1142,15 +1142,15 @@ remote_ip_prefix: "0.0.0.0/0" with_items: "{{all_tenants}}" - - name: "Create 'docker-registry-5000-anywhere' security group" + - name: "Create 'oci-registry-5000-anywhere' security group" neutron_sec_group: login_username: "admin" login_password: "{{ ADMIN_PASS }}" login_tenant_name: "admin" auth_url: "https://{{controller_publicname}}:35357/v2.0" state: "present" - name: 'docker-registry-5000-anywhere-{{item}}' - description: "allow docker-registry-5000 from anywhere" + name: 'oci-registry-5000-anywhere-{{item}}' + description: "allow oci-registry-5000 from anywhere" tenant_name: "{{item}}" rules: - direction: "ingress" diff --git a/playbooks/include/proxies-redirects.yml b/playbooks/include/proxies-redirects.yml index 17e5c1d6f9..21b1583857 100644 --- a/playbooks/include/proxies-redirects.yml +++ b/playbooks/include/proxies-redirects.yml @@ -189,6 +189,8 @@ shortname: copr website: copr.fedoraproject.org target: https://copr.fedorainfracloud.org/ + when: env != "staging" + tags: copr - role: httpd/redirect shortname: join-fedora @@ -760,3 +762,12 @@ website: cloud.fedoraproject.org path: /fedora-atomic-latest.x86_64.qcow2 target: https://download.fedoraproject.org/pub/fedora/linux/releases/22/Cloud/x86_64/Images/Fedora-Cloud-Atomic-22-20150521.x86_64.qcow2 + + # Team Silverblue + - role: httpd/redirect + shortname: docsteamsilverblue + website: docs.teamsilverblue.org + path: / + target: https://docs.fedoraproject.org/en-US/fedora-silverblue/ + tags: + - docs.teamsilverblue.org diff --git a/playbooks/include/proxies-reverseproxy.yml b/playbooks/include/proxies-reverseproxy.yml index 104d2e8321..2ed1a09d6c 100644 --- a/playbooks/include/proxies-reverseproxy.yml +++ b/playbooks/include/proxies-reverseproxy.yml @@ -36,6 +36,15 @@ localpath: /api remotepath: /api proxyurl: https://copr.fedorainfracloud.org + when: env != "staging" + tags: copr + + - role: httpd/reverseproxy + website: copr.fedoraproject.org + destname: copr + proxyurl: http://localhost:10070 + when: env == "staging" + tags: copr - role: httpd/reverseproxy website: nagios.fedoraproject.org @@ -293,6 +302,14 @@ remotepath: /koschei proxyurl: "{{ varnish_url }}" + - role: httpd/reverseproxy + website: koschei.fedoraproject.org + destname: koschei + # haproxy entry for os-nodes-frontend + proxyurl: http://localhost:10065 + keephost: true + tags: koschei + - role: httpd/reverseproxy website: apps.fedoraproject.org destname: mdapi @@ -727,6 +744,15 @@ keephost: true tags: silverblue + - role: httpd/reverseproxy + website: stg.release-monitoring.org + destname: stg.release-monitoring + # haproxy entry for os-nodes-frontend + proxyurl: http://localhost:10065 + keephost: true + tags: release-montoring.org + when: env == "staging" + - role: httpd/reverseproxy website: data-analysis.fedoraproject.org destname: awstats diff --git a/playbooks/include/proxies-websites.yml b/playbooks/include/proxies-websites.yml index 731bf9e288..8013c539ec 100644 --- a/playbooks/include/proxies-websites.yml +++ b/playbooks/include/proxies-websites.yml @@ -137,7 +137,6 @@ - www.projectofedora.org - www.getfedora.com - getfedora.com - - www.getfedora.org - fedoraplayground.org - fedoraplayground.com @@ -292,9 +291,10 @@ - role: httpd/website site_name: copr.fedoraproject.org - ssl: true sslonly: true + server_aliases: [copr.stg.fedoraproject.org] cert_name: "{{wildcard_cert_name}}" + tags: copr - role: httpd/website site_name: bugz.fedoraproject.org @@ -417,6 +417,14 @@ tags: - whatcanidoforfedora.org + - role: httpd/website + site_name: docs.teamsilverblue.org + ssl: true + sslonly: true + certbot: true + tags: + - docs.teamsilverblue.org + - role: httpd/website site_name: fedoramagazine.org server_aliases: [www.fedoramagazine.org stg.fedoramagazine.org] @@ -843,6 +851,12 @@ server_aliases: [greenwave.stg.fedoraproject.org] cert_name: "{{wildcard_cert_name}}" + - role: httpd/website + site_name: koschei.fedoraproject.org + sslonly: true + server_aliases: [koschei.stg.fedoraproject.org] + cert_name: "{{wildcard_cert_name}}" + - role: httpd/website site_name: waiverdb.fedoraproject.org sslonly: true @@ -867,6 +881,14 @@ server_aliases: [silverblue.stg.fedoraproject.org] cert_name: "{{wildcard_cert_name}}" + - role: httpd/website + site_name: stg.release-monitoring.org + sslonly: true + certbot: true + tags: + - release-monitoring.org + when: env == "staging" + # fedorahosted is retired. We have the site here so we can redirect it. - role: httpd/website @@ -896,6 +918,17 @@ ssl: true sslonly: true certbot: true + certbot_addhost: pkgs02.phx2.fedoraproject.org tags: - pkgs.fedoraproject.org when: env == "production" and "phx2" in inventory_hostname + + - role: httpd/website + site_name: pkgs.stg.fedoraproject.org + ssl: true + sslonly: true + certbot: true + certbot_addhost: pkgs01.stg.phx2.fedoraproject.org + tags: + - pkgs.fedoraproject.org + when: env == "staging" and "phx2" in inventory_hostname diff --git a/playbooks/manual/sign-vault.yml b/playbooks/manual/sign-vault.yml index 9a54454211..8b33245d6b 100644 --- a/playbooks/manual/sign-vault.yml +++ b/playbooks/manual/sign-vault.yml @@ -36,7 +36,7 @@ - base - rkhunter - serial-console - - sigul/server +# - sigul/server tasks: - import_tasks: "{{ tasks_path }}/yumrepos.yml" diff --git a/playbooks/manual/upgrade/bodhi.yml b/playbooks/manual/upgrade/bodhi.yml index 57e3518386..59a1cae7bc 100644 --- a/playbooks/manual/upgrade/bodhi.yml +++ b/playbooks/manual/upgrade/bodhi.yml @@ -15,7 +15,7 @@ fail: msg: "There are composes in progress." any_errors_fatal: true - when: "composes.stdout != '{\"composes\": []}'" + when: "composes.stdout != '{\"composes\": []}' and env != 'staging'" - name: push packages out hosts: bodhi-backend:bodhi-backend-stg:bodhi2:bodhi2-stg diff --git a/playbooks/manual/upgrade/copr.yml b/playbooks/manual/upgrade/copr.yml new file mode 100644 index 0000000000..2e87172b07 --- /dev/null +++ b/playbooks/manual/upgrade/copr.yml @@ -0,0 +1,34 @@ +- name: upgrade copr packages + hosts: copr-front-stg + tasks: + - name: clean dnf metadata + command: dnf clean all + args: + warn: False + - name: create dnf metadata cache + command: dnf makecache + args: + warn: False + - name: lits installed copr packages + dnf: + list: "copr*" + disablerepo: "*" + register: copr_packages + - name: update copr packages + dnf: + name: "{{ item.name }}" + state: latest + register: copr_upgrade + with_items: "{{ copr_packages.results }}" + - name: stop httpd + service: name="httpd" state=stopped + when: copr_upgrade.changed + - name: run db migration + become: yes + become_user: copr-fe + command: alembic-3 upgrade head + args: + chdir: /usr/share/copr/coprs_frontend/ + when: copr_upgrade.changed + - name: start httpd + service: name="httpd" state=started diff --git a/playbooks/openshift-apps/bodhi.yml b/playbooks/openshift-apps/bodhi.yml index fa6aff5862..cf1366a076 100644 --- a/playbooks/openshift-apps/bodhi.yml +++ b/playbooks/openshift-apps/bodhi.yml @@ -51,13 +51,13 @@ app: bodhi template: buildconfig.yml objectname: buildconfig.yml - bodhi_version: 3.8.0-1.fc27 + bodhi_version: 3.9.0-1.fc27 when: env == "staging" - role: openshift/object app: bodhi template: buildconfig.yml objectname: buildconfig.yml - bodhi_version: 3.8.0-1.fc27 + bodhi_version: 3.9.0-1.fc27 when: env != "staging" - role: openshift/start-build app: bodhi diff --git a/playbooks/openshift-apps/koschei.yml b/playbooks/openshift-apps/koschei.yml new file mode 100644 index 0000000000..da53100241 --- /dev/null +++ b/playbooks/openshift-apps/koschei.yml @@ -0,0 +1,37 @@ +- name: provision koschei + hosts: os-masters-stg[0] + user: root + gather_facts: False + + vars_files: + - /srv/web/infra/ansible/vars/global.yml + - "/srv/private/ansible/vars.yml" + - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml + + roles: + - role: openshift/project + app: koschei + description: koschei + appowners: + - mizdebsk + - role: openshift/imagestream + app: koschei + imagename: koschei-web + - role: openshift/object + app: koschei + template: buildconfig.yml + objectname: buildconfig.yml + - role: openshift/object + app: koschei + template: service.yml + objectname: service.yml + - role: openshift/object + app: koschei + template: deploymentconfig.yml + objectname: deploymentconfig.yml + - role: openshift/route + app: koschei + routename: koschei-web + host: "koschei{{ env_suffix }}.fedoraproject.org" + serviceport: web + servicename: koschei-web diff --git a/playbooks/openshift-apps/release-monitoring.yml b/playbooks/openshift-apps/release-monitoring.yml index 8aaadf6da7..ffe869b35d 100644 --- a/playbooks/openshift-apps/release-monitoring.yml +++ b/playbooks/openshift-apps/release-monitoring.yml @@ -14,6 +14,7 @@ description: release-monitoring appowners: - jcline + - zlopez - role: openshift/object app: release-monitoring file: imagestream.yml @@ -44,3 +45,7 @@ - role: openshift/rollout app: release-monitoring dcname: release-monitoring-web + - role: openshift/object + app: release-monitoring + file: cron.yml + objectname: cron.yml diff --git a/roles/anitya/frontend/templates/anitya.cfg b/roles/anitya/frontend/templates/anitya.cfg index e947dcf431..c9bda4fdc0 100644 --- a/roles/anitya/frontend/templates/anitya.cfg +++ b/roles/anitya/frontend/templates/anitya.cfg @@ -18,6 +18,7 @@ ANITYA_WEB_ADMINS = [ 'http://ralph.id.fedoraproject.org/', 'http://pingou.id.fedoraproject.org/', 'http://jcline.id.fedoraproject.org/', + 'http://zlopez.id.fedoraproject.org/', 'http://tibbs.id.fedoraproject.org/', 'http://carlwgeorge.id.fedoraproject.org/', ] diff --git a/roles/ansible-ansible-openshift-ansible/tasks/main.yml b/roles/ansible-ansible-openshift-ansible/tasks/main.yml index 989bd0a391..6beb1f7a58 100644 --- a/roles/ansible-ansible-openshift-ansible/tasks/main.yml +++ b/roles/ansible-ansible-openshift-ansible/tasks/main.yml @@ -20,21 +20,50 @@ tags: - ansible-ansible-openshift-ansible - ansible-ansible-openshift-ansible-config + ignore_errors: true -- name: generate the inventory file +- debug: + var: os_app_url + +- debug: + var: openshift_app_subdomain + +- debug: + var: openshift_master_default_subdomain + +- name: generate the inventory file (staging) + template: + src: "cluster-inventory-stg.j2" + dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}" + tags: + - ansible-ansible-openshift-ansible + - ansible-ansible-openshift-ansible-config + when: env == 'staging' and inventory_hostname.startswith('os-') + +- name: generate the inventory file (production) + template: + src: "cluster-inventory-prod.j2" + dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}" + tags: + - ansible-ansible-openshift-ansible + - ansible-ansible-openshift-ansible-config + when: env == 'production' and inventory_hostname.startswith('os-') + +- name: generate the inventory file (osbs) template: src: "cluster-inventory.j2" dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}" tags: - ansible-ansible-openshift-ansible - ansible-ansible-openshift-ansible-config + when: inventory_hostname.startswith('osbs-') - name: run ansible prereqs playbook shell: "ansible-playbook {{ openshift_ansible_pre_playbook }} -i {{ cluster_inventory_filename }}" args: chdir: "{{ openshift_ansible_path }}" register: run_ansible_out - when: openshift_ansible_pre_playbook is defined + when: openshift_ansible_pre_playbook is defined and not openshift_ansible_upgrading is defined tags: - ansible-ansible-openshift-ansible @@ -45,6 +74,7 @@ register: run_ansible_out tags: - ansible-ansible-openshift-ansible + when: not openshift_ansible_upgrading is defined - name: display run ansible stdout_lines debug: diff --git a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-prod.j2 b/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-prod.j2 new file mode 100644 index 0000000000..31e8db2c46 --- /dev/null +++ b/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-prod.j2 @@ -0,0 +1,1148 @@ +# This is an example of an OpenShift-Ansible host inventory that provides the +# minimum recommended configuration for production use. This includes 3 masters, +# two infra nodes, two compute nodes, and an haproxy load balancer to load +# balance traffic to the API servers. For a truly production environment you +# should use an external load balancing solution that itself is highly available. + +[masters] +{% for host in groups[openshift_cluster_masters_group] %} +{{ host }} +{% endfor %} + +[etcd] +{% for host in groups[openshift_cluster_masters_group] %} +{{ host }} +{% endfor %} + +[nodes] +{% for host in groups[openshift_cluster_masters_group] %} +{{ host }} openshift_node_group_name='node-config-master' +{% endfor %} +{% for host in groups[openshift_cluster_nodes_group] %} +{{ host }} openshift_node_group_name='node-config-compute' +{% endfor %} + +#[nfs] +#ose3-master1.test.example.com + +#[lb] +#ose3-lb.test.example.com + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +#lb +#nfs + +[OSEv3:vars] + +openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}] +############################################################################### +# Common/ Required configuration variables follow # +############################################################################### +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_user={{openshift_ansible_ssh_user}} + +# If ansible_user is not root, ansible_become must be set to true and the +# user must be configured for passwordless sudo +#ansible_become=yes + +# Specify the deployment type. Valid values are origin and openshift-enterprise. +#openshift_deployment_type=origin +openshift_deployment_type={{openshift_deployment_type}} + +# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we +# rely on the version running on the first master. Works best for containerized installs where we can usually +# use this to lookup the latest exact version of the container images, which is the tag actually used to configure +# the cluster. For RPM installations we just verify the version detected in your configured repos matches this +# release. +openshift_release={{openshift_release}} + +{% if openshift_master_ha is defined %} +{% if openshift_master_ha %} +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +openshift_master_cluster_method=native +openshift_master_cluster_hostname={{openshift_internal_cluster_url}} +openshift_master_cluster_public_hostname={{openshift_cluster_url}} +{% endif %} +{% endif %} + +# default subdomain to use for exposed routes, you should have wildcard dns +# for *.apps.test.example.com that points at your infra nodes which will run +# your router +{% if openshift_app_subdomain is defined %} +openshift_master_default_subdomain={{openshift_app_subdomain}} +{% endif %} + +############################################################################### +# Additional configuration variables follow # +############################################################################### + +# Debug level for all OpenShift components (Defaults to 2) +debug_level={{openshift_debug_level}} + +# Specify an exact container image tag to install or configure. +# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_image_tag=v3.10.0 +openshift_image_tag={{openshift_release}} + +# Specify an exact rpm version to install or configure. +# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_pkg_version=-3.10.0 +openshift_pkg_version="-3.10.14" + +# If using Atomic Host, you may specify system container image registry for the nodes: +#system_images_registry="docker.io" +# when openshift_deployment_type=='openshift-enterprise' +#system_images_registry="registry.access.redhat.com" + +# Manage openshift example imagestreams and templates during install and upgrade +#openshift_install_examples=true +{% if openshift_ansible_install_examples is defined %} +openshift_install_examples={{openshift_ansible_install_examples}} +{% endif %} + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_templates={'login': '/path/to/login-template.html'} +# openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead. +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure imagePolicyConfig in the master config +# See: https://docs.openshift.org/latest/admin_guide/image_policy.html +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} + +# Configure master API rate limits for external clients +#openshift_master_external_ratelimit_qps=200 +#openshift_master_external_ratelimit_burst=400 +# Configure master API rate limits for loopback clients +#openshift_master_loopback_ratelimit_qps=300 +#openshift_master_loopback_ratelimit_burst=600 + +# Install and run cri-o. +#openshift_use_crio=False +#openshift_use_crio_only=False +{% if openshift_ansible_use_crio is defined %} +openshift_use_crio={{ openshift_ansible_use_crio }} +{% endif %} +{% if openshift_ansible_use_crio_only is defined %} +openshift_use_crio_only={{ openshift_ansible_crio_only }} +{% endif %} +# The following two variables are used when openshift_use_crio is True +# and cleans up after builds that pass through docker. When openshift_use_crio is True +# these variables are set to the defaults shown. You may override them here. +# NOTE: You will still need to tag crio nodes with your given label(s)! +# Enable docker garbage collection when using cri-o +#openshift_crio_enable_docker_gc=True +# Node Selectors to run the garbage collection +#openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'} + +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +# Default value: "--log-driver=journald" +#openshift_docker_options="-l warn --ipv6=false" + +# Specify exact version of Docker to configure or upgrade to. +# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. +# docker_version="1.12.1" + +# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. +# Uncomment below to disable; for example if your kernel does not support the +# Docker overlay/overlay2 storage drivers with SELinux enabled. +#openshift_docker_selinux_enabled=False + +# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. +# docker_upgrade=False + +# Specify a list of block devices to be formatted and mounted on the nodes +# during prerequisites.yml. For each hash, "device", "path", "filesystem" are +# required. To add devices only on certain classes of node, redefine +# container_runtime_extra_storage as a group var. +#container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]' + +# Enable etcd debug logging, defaults to false +# etcd_debug=true +# Set etcd log levels by package +# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + +# Cluster Image Source (registry) configuration +# openshift-enterprise default is 'registry.access.redhat.com/openshift3/ose-${component}:${version}' +# origin default is 'docker.io/openshift/origin-${component}:${version}' +#oreg_url=example.com/openshift3/ose-${component}:${version} +# If oreg_url points to a registry other than registry.access.redhat.com we can +# modify image streams to point at that registry by setting the following to true +#openshift_examples_modify_imagestreams=true +# Add insecure and blocked registries to global docker configuration +#openshift_docker_insecure_registries=registry.example.com +#openshift_docker_blocked_registries=registry.hacker.com +# You may also configure additional default registries for docker, however this +# is discouraged. Instead you should make use of fully qualified image names. +#openshift_docker_additional_registries=registry.example.com + +# If oreg_url points to a registry requiring authentication, provide the following: +#oreg_auth_user=some_user +#oreg_auth_password='my-pass' +# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. +# oreg_auth_pass should be generated from running docker login. +# To update registry auth credentials, uncomment the following: +#oreg_auth_credentials_replace: True + +# OpenShift repository configuration +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] +#openshift_repos_enable_testing=false + +# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in +# a disconnected and containerized installation, use osm_etcd_image to specify the image to use: +#osm_etcd_image=rhel7/etcd + +# htpasswd auth +#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] +# Defining htpasswd users +#openshift_master_htpasswd_users={'user1': '', 'user2': ''} +# or +#openshift_master_htpasswd_file= + +{% if openshift_auth_profile == "osbs" %} +openshift_master_manage_htpasswd=false +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift_htpasswd_file }}'}] +{% endif %} + +{% if openshift_auth_profile == "fedoraidp" %} +openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "extraScopes": ["profile", "email", "https://id.fedoraproject.org/scope/groups"], "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] +{% endif %} + +{% if openshift_auth_profile == "fedoraidp-stg" %} +openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] +{% endif %} + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] +# +# Configure LDAP CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the LDAPPasswordIdentityProvider. +# +#openshift_master_ldap_ca= +# or +#openshift_master_ldap_ca_file= + +# OpenID auth +#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] +# +# Configure OpenID CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the OpenIDIdentityProvider. +# +#openshift_master_openid_ca= +# or +#openshift_master_openid_ca_file= + +# Request header auth +#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] +# +# Configure request header CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "clientCA" +# key set within the RequestHeaderIdentityProvider. +# +#openshift_master_request_header_ca= +# or +#openshift_master_request_header_ca_file= + +# CloudForms Management Engine (ManageIQ) App Install +# +# Enables installation of MIQ server. Recommended for dedicated +# clusters only. See roles/openshift_management/README.md for instructions +# and requirements. +#openshift_management_install_management=False + +# Cloud Provider Configuration +# +# Note: You may make use of environment variables rather than store +# sensitive configuration within the ansible inventory. +# For example: +#openshift_cloudprovider_aws_access_key="{ lookup('env','AWS_ACCESS_KEY_ID') }" +#openshift_cloudprovider_aws_secret_key="{ lookup('env','AWS_SECRET_ACCESS_KEY') }" +# +# AWS +#openshift_cloudprovider_kind=aws +# Note: IAM profiles may be used instead of storing API credentials on disk. +#openshift_cloudprovider_aws_access_key=aws_access_key_id +#openshift_cloudprovider_aws_secret_key=aws_secret_access_key +# +# Openstack +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ +#openshift_cloudprovider_openstack_username=username +#openshift_cloudprovider_openstack_password=password +#openshift_cloudprovider_openstack_domain_id=domain_id +#openshift_cloudprovider_openstack_domain_name=domain_name +#openshift_cloudprovider_openstack_tenant_id=tenant_id +#openshift_cloudprovider_openstack_tenant_name=tenant_name +#openshift_cloudprovider_openstack_region=region +#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id +# +# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting +#openshift_cloudprovider_openstack_blockstorage_version=v2 +# +# GCE +#openshift_cloudprovider_kind=gce +# Note: When using GCE, openshift_gcp_project and openshift_gcp_prefix must be +# defined. +# openshift_gcp_project is the project-id +#openshift_gcp_project= +# openshift_gcp_prefix is a unique string to identify each openshift cluster. +#openshift_gcp_prefix= +#openshift_gcp_multizone=False +# Note: To enable nested virtualization in gcp use the following variable and url +#openshift_gcp_licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" +# Additional details regarding nested virtualization are available: +# https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances +# +# vSphere +#openshift_cloudprovider_kind=vsphere +#openshift_cloudprovider_vsphere_username=username +#openshift_cloudprovider_vsphere_password=password +#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host +#openshift_cloudprovider_vsphere_datacenter=datacenter +#openshift_cloudprovider_vsphere_datastore=datastore +#openshift_cloudprovider_vsphere_folder=optional_folder_name + + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure additional projects +#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# If an external load balancer is used public hostname should resolve to +# external load balancer address +#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# Override the default oauth tokenConfig settings: +# openshift_master_access_token_max_seconds=86400 +# openshift_master_auth_token_max_seconds=500 + +# Override master servingInfo.maxRequestsInFlight +#openshift_master_max_requests_inflight=500 + +# Override master and node servingInfo.minTLSVersion and .cipherSuites +# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 +# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants +#openshift_master_min_tls_version=VersionTLS12 +#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] +# +#openshift_node_min_tls_version=VersionTLS12 +#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] + +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "node-role.kubernetes.io/infra=true". +# +# Example: +# [nodes] +# node.example.com openshift_node_group_name="node-config-infra" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'node-role.kubernetes.io/infra=true' +#openshift_hosted_router_selector='node-role.kubernetes.io/infra=true' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router force subdomain (optional) +# A router path format to force on all routes used by this router +# (will ignore the route host value) +#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} +# +# Manage the OpenShift Router (optional) +#openshift_hosted_manage_router=true +# +# Router sharding support has been added and can be achieved by supplying the correct +# data to the inventory. The variable to house the data is openshift_hosted_routers +# and is in the form of a list. If no data is passed then a default router will be +# created. There are multiple combinations of router sharding. The one described +# below supports routers on separate nodes. +# +#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] + +# OpenShift Registry Console Options +# Override the console image prefix: +# origin default is "cockpit/", enterprise default is "openshift3/" +#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ +# origin default is "kubernetes", enterprise default is "registry-console" +#openshift_cockpit_deployer_basename=my-console +# Override image version, defaults to latest for origin, vX.Y product version for enterprise +#openshift_cockpit_deployer_version=1.4.1 + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "node-role.kubernetes.io/infra=true". +# +# Example: +# [nodes] +# node.example.com openshift_node_group_name="node-config-infra" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'node-role.kubernetes.io/infra=true' +#openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true' +# +# Registry replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift registry selector. +#openshift_hosted_registry_replicas=2 +# +# Validity of the auto-generated certificate in days (optional) +#openshift_hosted_registry_cert_expire_days=730 +# +# Manage the OpenShift Registry (optional) +#openshift_hosted_manage_registry=true +# Manage the OpenShift Registry Console (optional) +#openshift_hosted_manage_registry_console=true +# +# Registry Storage Options +# +# NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +#openshift_hosted_registry_storage_host=nfs.example.com +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +{% if env == "staging" %} +openshift_hosted_registry_storage_kind=nfs +openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com +openshift_hosted_registry_storage_nfs_directory=/ +openshift_hosted_registry_storage_volume_name=openshift-stg-registry +openshift_hosted_registry_storage_volume_size=10Gi +{% else %} +openshift_hosted_registry_storage_kind=nfs +openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com +openshift_hosted_registry_storage_nfs_directory=/ +openshift_hosted_registry_storage_volume_name=openshift-prod-registry +openshift_hosted_registry_storage_volume_size=10Gi +{% endif %} +# +# Openstack +# Volume must already exist. +#openshift_hosted_registry_storage_kind=openstack +#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem=ext4 +#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 +#openshift_hosted_registry_storage_volume_size=10Gi +# +# hostPath (local filesystem storage) +# Suitable for "all-in-one" or proof of concept deployments +# Must not be used for high-availability and production deployments +#openshift_hosted_registry_storage_kind=hostpath +#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_registry_storage_hostpath_path=/var/lib/openshift_volumes +#openshift_hosted_registry_storage_volume_size=10Gi +# +# AWS S3 +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_encrypt=false +#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id +#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id +#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Any S3 service (Minio, ExoScale, ...): Basically the same as above +# but with regionendpoint configured +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_accesskey=access_key_id +#openshift_hosted_registry_storage_s3_secretkey=secret_access_key +#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Additional CloudFront Options. When using CloudFront all three +# of the followingg variables must be defined. +#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ +#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem +#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid +# vSphere Volume with vSphere Cloud Provider +# openshift_hosted_registry_storage_kind=vsphere +# openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +# openshift_hosted_registry_storage_annotations=['volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume'] +# +# GCS Storage Bucket +#openshift_hosted_registry_storage_provider=gcs +#openshift_hosted_registry_storage_gcs_bucket=bucket01 +#openshift_hosted_registry_storage_gcs_keyfile=test.key +#openshift_hosted_registry_storage_gcs_rootdirectory=/registry + +# Metrics deployment +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +# +# By default metrics are not automatically deployed, set this to enable them +#openshift_metrics_install_metrics=true +{% if openshift_metrics_deploy is defined %} +{% if openshift_metrics_deploy %} +openshift_hosted_metrics_deploy=true +{% endif %} +{% endif %} +# +# Storage Options +# If openshift_metrics_storage_kind is unset then metrics will be stored +# in an EmptyDir volume and will be deleted when the cassandra pod terminates. +# Storage options A & B currently support only one cassandra pod which is +# generally enough for up to 1000 pods. Additional volumes can be created +# manually after the fact and metrics scaled per the docs. +# +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_metrics_storage_kind=dynamic +# +# Other Metrics Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_metrics/README.md +# +# Override metricsPublicURL in the master config for cluster metrics +# Defaults to https://hawkular-metrics.{openshift_master_default_subdomain}/hawkular/metrics +# Currently, you may only alter the hostname portion of the url, alterting the +# `/hawkular/metrics` path will break installation of metrics. +#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com +# Configure the metrics component images # Note, these will be modified by oreg_url by default +#openshift_metrics_cassandra_image="docker.io/openshift/origin-metrics-cassandra:{ openshift_image_tag }" +#openshift_metrics_hawkular_agent_image="docker.io/openshift/origin-metrics-hawkular-openshift-agent:{ openshift_image_tag }" +#openshift_metrics_hawkular_metrics_image="docker.io/openshift/origin-metrics-hawkular-metrics:{ openshift_image_tag }" +#openshift_metrics_schema_installer_image="docker.io/openshift/origin-metrics-schema-installer:{ openshift_image_tag }" +#openshift_metrics_heapster_image="docker.io/openshift/origin-metrics-heapster:{ openshift_image_tag }" +# when openshift_deployment_type=='openshift-enterprise' +#openshift_metrics_cassandra_image="registry.access.redhat.com/openshift3/metrics-cassandra:{ openshift_image_tag }" +#openshift_metrics_hawkular_agent_image="registry.access.redhat.com/openshift3/metrics-hawkular-openshift-agent:{ openshift_image_tag }" +#openshift_metrics_hawkular_metrics_image="registry.access.redhat.com/openshift3/metrics-hawkular-metrics:{ openshift_image_tag }" +#openshift_metrics_schema_installer_image="registry.access.redhat.com/openshift3/metrics-schema-installer:{ openshift_image_tag }" +#openshift_metrics_heapster_image="registry.access.redhat.com/openshift3/metrics-heapster:{ openshift_image_tag }" +# +# StorageClass +# openshift_storageclass_name=gp2 +# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} +# openshift_storageclass_mount_options=['dir_mode=0777', 'file_mode=0777'] +# openshift_storageclass_reclaim_policy="Delete" +# +# PersistentLocalStorage +# If Persistent Local Storage is wanted, this boolean can be defined to True. +# This will create all necessary configuration to use persistent storage on nodes. +#openshift_persistentlocalstorage_enabled=False +#openshift_persistentlocalstorage_classes=[] +#openshift_persistentlocalstorage_path=/mnt/local-storage +#openshift_persistentlocalstorage_provisionner_image=quay.io/external_storage/local-volume-provisioner:v1.0.1 + +# Logging deployment +# +# Currently logging deployment is disabled by default, enable it by setting this +#openshift_logging_install_logging=true +# +# Logging storage config +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_logging_storage_kind=dynamic +# +# Option D - none -- Logging will use emptydir volumes which are destroyed when +# pods are deleted +# +# Other Logging Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_logging/README.md +# +# Configure loggingPublicURL in the master config for aggregate logging, defaults +# to kibana.{ openshift_master_default_subdomain } +#openshift_logging_kibana_hostname=logging.apps.example.com +# Configure the number of elastic search nodes, unless you're using dynamic provisioning +# this value must be 1 +#openshift_logging_es_cluster_size=1 + +# Prometheus deployment +# +# Currently prometheus deployment is disabled by default, enable it by setting this +#openshift_hosted_prometheus_deploy=true +# +# Prometheus storage config +# By default prometheus uses emptydir storage, if you want to persist you should +# configure it to use pvc storage type. Each volume must be ReadWriteOnce. +#openshift_prometheus_storage_type=emptydir +#openshift_prometheus_alertmanager_storage_type=emptydir +#openshift_prometheus_alertbuffer_storage_type=emptydir +# Use PVCs for persistence +#openshift_prometheus_storage_type=pvc +#openshift_prometheus_alertmanager_storage_type=pvc +#openshift_prometheus_alertbuffer_storage_type=pvc + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network and kubernetes service CIDR blocks. These +# network blocks should be private and should not conflict with network blocks +# in your infrastructure that pods may require access to. Can not be changed +# after deployment. +# +# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of +# 172.17.0.0/16. Your installation will fail and/or your configuration change will +# cause the Pod SDN or Cluster SDN to fail. +# +# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting +# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS +# environment variable located in /etc/sysconfig/docker-network. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_cluster_network_cidr: clusterNetworkCIDR +# openshift_portal_net: serviceNetworkCIDR +# When installing osm_cluster_network_cidr and openshift_portal_net must be set. +# Sane examples are provided below. +#osm_cluster_network_cidr=10.128.0.0/14 +#openshift_portal_net=172.30.0.0/16 + +# ExternalIPNetworkCIDRs controls what values are acceptable for the +# service external IP field. If empty, no externalIP may be set. It +# may contain a list of CIDRs which are checked for access. If a CIDR +# is prefixed with !, IPs in that CIDR will be rejected. Rejections +# will be applied first, then the IP checked against one of the +# allowed CIDRs. You should ensure this range does not overlap with +# your nodes, pods, or service CIDRs for security reasons. +#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] + +# IngressIPNetworkCIDR controls the range to assign ingress IPs from for +# services of type LoadBalancer on bare metal. If empty, ingress IPs will not +# be assigned. It may contain a single CIDR that will be allocated from. For +# security reasons, you should ensure that this range does not overlap with +# the CIDRs reserved for external IPs, nodes, pods, or services. +#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 + +# Configure number of bits to allocate to each host's subnet e.g. 9 +# would mean a /23 network on the host. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_host_subnet_length: hostSubnetLength +# When installing osm_host_subnet_length must be set. A sane example is provided below. +#osm_host_subnet_length=9 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 +{% if openshift_api_port is defined and openshift_console_port is defined %} +{% if openshift_api_port and openshift_console_port %} +openshift_master_api_port={{openshift_api_port}} +openshift_master_console_port={{openshift_console_port}} +{% endif %} +{% endif %} + +# set exact RPM version (include - prefix) +#openshift_pkg_version=-3.9.0 +# you may also specify version and release, ie: +#openshift_pkg_version=-3.9.0-0.126.0.git.0.9351aae.el7 + +# Configure custom ca certificate +#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} +# +# NOTE: CA certificate will not be replaced with existing clusters. +# This option may only be specified when creating a new cluster or +# when redeploying cluster certificates with the redeploy-certificates +# playbook. + +# Configure custom named certificates (SNI certificates) +# +# https://docs.openshift.org/latest/install_config/certificate_customization.html +# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html +# +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# +# An optional CA may be specified for each named certificate. CAs will +# be added to the OpenShift CA bundle which allows for the named +# certificate to be served for internal cluster communication. +# +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates=true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] +# +# Add a trusted CA to all pods, copies from the control host, may be multiple +# certs in one file +#openshift_additional_ca=/path/to/additional-ca.crt + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_set_node_ip=True + +#openshift_node_kubelet_args is deprecated, use node config edits instead + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# The OpenShift-Ansible installer will fail when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to false will override that check. +#openshift_hostname_check=true + +# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail +# in versions >= 3.6 +#openshift_use_dnsmasq=False + +# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf +# This is useful for POC environments where DNS may not actually be available yet or to set +# options like 'strict-order' to alter dnsmasq configuration. +#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf + +# Global Proxy Configuration +# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment +# variables for docker and master services. +# +# Hosts in the openshift_no_proxy list will NOT use any globally +# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains +# (.example.com), hosts (example.com), and IP addresses. +#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT +#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT +#openshift_no_proxy='.hosts.example.com,some-host.com' +# +# Most environments don't require a proxy between openshift masters, nodes, and +# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. +# If all of your hosts share a common domain you may wish to disable this and +# specify that domain above instead. +# +# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and +# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy +# variable (above) and set this value to False +#openshift_generate_no_proxy_hosts=True +# +# These options configure the BuildDefaults admission controller which injects +# configuration into Builds. Proxy related values will default to the global proxy +# config values. You only need to set these if they differ from the global proxy settings. +# See BuildDefaults documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_no_proxy=mycorp.com +#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_no_proxy=mycorp.com +#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} +#openshift_builddefaults_resources_requests_cpu=100m +#openshift_builddefaults_resources_requests_memory=256Mi +#openshift_builddefaults_resources_limits_cpu=1000m +#openshift_builddefaults_resources_limits_memory=512Mi + +# Or you may optionally define your own build defaults configuration serialized as json +#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' + +# These options configure the BuildOverrides admission controller which injects +# configuration into Builds. +# See BuildOverrides documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_buildoverrides_force_pull=true +#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} +#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}] + +# Or you may optionally define your own build overrides configuration serialized as json +#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' + +# Enable service catalog +#openshift_enable_service_catalog=true + +# Enable template service broker (requires service catalog to be enabled, above) +#template_service_broker_install=true + +# Specify an openshift_service_catalog image +# (defaults for origin and openshift-enterprise, repsectively) +#openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{ openshift_image_tag }"" +#openshift_service_catalog_image="registry.access.redhat.com/openshift3/ose-service-catalog:{ openshift_image_tag }" + +# TSB image tag +#template_service_broker_version='v3.9' + +# Configure one of more namespaces whose templates will be served by the TSB +#openshift_template_service_broker_namespaces=['openshift'] + +# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default +#openshift_master_dynamic_provisioning_enabled=True + +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} + +# Configure usage of openshift_clock role. +#openshift_clock_enabled=true + +# OpenShift Per-Service Environment Variables +# Environment variables are added to /etc/sysconfig files for +# each OpenShift node. +# API and controllers environment variables are merged in single +# master environments. +#openshift_node_env_vars={"ENABLE_HTTP2": "true"} +{% if no_http2 is defined %} +{% if no_http2 %} +openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} +openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} +openshift_node_env_vars={"ENABLE_HTTP2": "true"} +{% endif %} +{% endif %} + +# Enable API service auditing +#openshift_master_audit_config={"enabled": "true"} +# +# In case you want more advanced setup for the auditlog you can +# use this line. +# The directory in "auditFilePath" will be created if it's not +# exist +#openshift_master_audit_config={"enabled": "true", "auditFilePath": "/var/lib/origin/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": "14", "maximumFileSizeMegabytes": "500", "maximumRetainedFiles": "5"} + +# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used +# by openshift_deployment_type=origin +#openshift_enable_origin_repo=false + +# Validity of the auto-generated OpenShift certificates in days. +# See also openshift_hosted_registry_cert_expire_days above. +# +#openshift_ca_cert_expire_days=1825 +#openshift_node_cert_expire_days=730 +#openshift_master_cert_expire_days=730 + +# Validity of the auto-generated external etcd certificates in days. +# Controls validity for etcd CA, peer, server and client certificates. +# +#etcd_ca_default_days=1825 +# +# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference +# openshift_master_saconfig_limitsecretreferences=false + +# Upgrade Control +# +# By default nodes are upgraded in a serial manner one at a time and all failures +# are fatal, one set of variables for normal nodes, one set of variables for +# nodes that are part of control plane as the number of hosts may be different +# in those two groups. +#openshift_upgrade_nodes_serial=1 +#openshift_upgrade_nodes_max_fail_percentage=0 +#openshift_upgrade_control_plane_nodes_serial=1 +#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 +# +# You can specify the number of nodes to upgrade at once. We do not currently +# attempt to verify that you have capacity to drain this many nodes at once +# so please be careful when specifying these values. You should also verify that +# the expected number of nodes are all schedulable and ready before starting an +# upgrade. If it's not possible to drain the requested nodes the upgrade will +# stall indefinitely until the drain is successful. +# +# If you're upgrading more than one node at a time you can specify the maximum +# percentage of failure within the batch before the upgrade is aborted. Any +# nodes that do fail are ignored for the rest of the playbook run and you should +# take care to investigate the failure and return the node to service so that +# your cluster. +# +# The percentage must exceed the value, this would fail on two failures +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 +# where as this would not +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# A timeout to wait for nodes to drain pods can be specified to ensure that the +# upgrade continues even if nodes fail to drain pods in the allowed time. The +# default value of 0 will wait indefinitely allowing the admin to investigate +# the root cause and ensuring that disruption budgets are respected. If the +# a timeout of 0 is used there will also be one attempt to re-try draining the +# node. If a non zero timeout is specified there will be no attempt to retry. +#openshift_upgrade_nodes_drain_timeout=0 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal=true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal=false + +###################################################################### +# CloudForms/ManageIQ (CFME/MIQ) Configuration + +# See the readme for full descriptions and getting started +# instructions: ../../roles/openshift_management/README.md or go directly to +# their definitions: ../../roles/openshift_management/defaults/main.yml +# ../../roles/openshift_management/vars/main.yml +# +# Namespace for the CFME project +#openshift_management_project: openshift-management + +# Namespace/project description +#openshift_management_project_description: CloudForms Management Engine + +# Choose 'miq-template' for a podified database install +# Choose 'miq-template-ext-db' for an external database install +# +# If you are using the miq-template-ext-db template then you must add +# the required database parameters to the +# openshift_management_template_parameters variable. +#openshift_management_app_template: miq-template + +# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. +#openshift_management_storage_class: nfs + +# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a +# netapp appliance, then you must set the hostname here. Leave the +# value as 'false' if you are not using external NFS. +#openshift_management_storage_nfs_external_hostname: false + +# [OPTIONAL] - If you are using external NFS then you must set the base +# path to the exports location here. +# +# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports +# that will back the application PV and optionally the database +# pv. Export path definitions, relative to +# { openshift_management_storage_nfs_base_dir} +# +# LOCAL NFS NOTE: +# +# You may may also change this value if you want to change the default +# path used for local NFS exports. +#openshift_management_storage_nfs_base_dir: /exports + +# LOCAL NFS NOTE: +# +# You may override the automatically selected LOCAL NFS server by +# setting this variable. Useful for testing specific task files. +#openshift_management_storage_nfs_local_hostname: false + +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/openshift-management/add_container_provider.yml +#openshift_management_username: admin +#openshift_management_password: smartvm + +# A hash of parameters you want to override or set in the +# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in +# your inventory file as a simple hash. Acceptable values are defined +# under the .parameters list in files/miq-template{-ext-db}.yaml +# Example: +# +# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} +#openshift_management_template_parameters: {} + +# Firewall configuration +# You can open additional firewall ports by defining them as a list. of service +# names and ports/port ranges for either masters or nodes. +#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] +#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] + +# Service port node range +#openshift_node_port_range=30000-32767 + +# Enable unsupported configurations, things that will yield a partially +# functioning cluster but would not be supported for production use +#openshift_enable_unsupported_configurations=false +openshift_enable_unsupported_configurations=True diff --git a/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-stg.j2 b/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-stg.j2 new file mode 100644 index 0000000000..31e8db2c46 --- /dev/null +++ b/roles/ansible-ansible-openshift-ansible/templates/cluster-inventory-stg.j2 @@ -0,0 +1,1148 @@ +# This is an example of an OpenShift-Ansible host inventory that provides the +# minimum recommended configuration for production use. This includes 3 masters, +# two infra nodes, two compute nodes, and an haproxy load balancer to load +# balance traffic to the API servers. For a truly production environment you +# should use an external load balancing solution that itself is highly available. + +[masters] +{% for host in groups[openshift_cluster_masters_group] %} +{{ host }} +{% endfor %} + +[etcd] +{% for host in groups[openshift_cluster_masters_group] %} +{{ host }} +{% endfor %} + +[nodes] +{% for host in groups[openshift_cluster_masters_group] %} +{{ host }} openshift_node_group_name='node-config-master' +{% endfor %} +{% for host in groups[openshift_cluster_nodes_group] %} +{{ host }} openshift_node_group_name='node-config-compute' +{% endfor %} + +#[nfs] +#ose3-master1.test.example.com + +#[lb] +#ose3-lb.test.example.com + +# Create an OSEv3 group that contains the masters and nodes groups +[OSEv3:children] +masters +nodes +etcd +#lb +#nfs + +[OSEv3:vars] + +openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}] +############################################################################### +# Common/ Required configuration variables follow # +############################################################################### +# SSH user, this user should allow ssh based auth without requiring a +# password. If using ssh key based auth, then the key should be managed by an +# ssh agent. +ansible_user={{openshift_ansible_ssh_user}} + +# If ansible_user is not root, ansible_become must be set to true and the +# user must be configured for passwordless sudo +#ansible_become=yes + +# Specify the deployment type. Valid values are origin and openshift-enterprise. +#openshift_deployment_type=origin +openshift_deployment_type={{openshift_deployment_type}} + +# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we +# rely on the version running on the first master. Works best for containerized installs where we can usually +# use this to lookup the latest exact version of the container images, which is the tag actually used to configure +# the cluster. For RPM installations we just verify the version detected in your configured repos matches this +# release. +openshift_release={{openshift_release}} + +{% if openshift_master_ha is defined %} +{% if openshift_master_ha %} +# Native high availability cluster method with optional load balancer. +# If no lb group is defined, the installer assumes that a load balancer has +# been preconfigured. For installation the value of +# openshift_master_cluster_hostname must resolve to the load balancer +# or to one or all of the masters defined in the inventory if no load +# balancer is present. +openshift_master_cluster_method=native +openshift_master_cluster_hostname={{openshift_internal_cluster_url}} +openshift_master_cluster_public_hostname={{openshift_cluster_url}} +{% endif %} +{% endif %} + +# default subdomain to use for exposed routes, you should have wildcard dns +# for *.apps.test.example.com that points at your infra nodes which will run +# your router +{% if openshift_app_subdomain is defined %} +openshift_master_default_subdomain={{openshift_app_subdomain}} +{% endif %} + +############################################################################### +# Additional configuration variables follow # +############################################################################### + +# Debug level for all OpenShift components (Defaults to 2) +debug_level={{openshift_debug_level}} + +# Specify an exact container image tag to install or configure. +# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_image_tag=v3.10.0 +openshift_image_tag={{openshift_release}} + +# Specify an exact rpm version to install or configure. +# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed. +# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up. +#openshift_pkg_version=-3.10.0 +openshift_pkg_version="-3.10.14" + +# If using Atomic Host, you may specify system container image registry for the nodes: +#system_images_registry="docker.io" +# when openshift_deployment_type=='openshift-enterprise' +#system_images_registry="registry.access.redhat.com" + +# Manage openshift example imagestreams and templates during install and upgrade +#openshift_install_examples=true +{% if openshift_ansible_install_examples is defined %} +openshift_install_examples={{openshift_ansible_install_examples}} +{% endif %} + +# Configure logoutURL in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url +#openshift_master_logout_url=http://example.com + +# Configure extensions in the master config for console customization +# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files +#openshift_master_oauth_templates={'login': '/path/to/login-template.html'} +# openshift_master_oauth_template is deprecated. Use openshift_master_oauth_templates instead. +#openshift_master_oauth_template=/path/to/login-template.html + +# Configure imagePolicyConfig in the master config +# See: https://docs.openshift.org/latest/admin_guide/image_policy.html +#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true} + +# Configure master API rate limits for external clients +#openshift_master_external_ratelimit_qps=200 +#openshift_master_external_ratelimit_burst=400 +# Configure master API rate limits for loopback clients +#openshift_master_loopback_ratelimit_qps=300 +#openshift_master_loopback_ratelimit_burst=600 + +# Install and run cri-o. +#openshift_use_crio=False +#openshift_use_crio_only=False +{% if openshift_ansible_use_crio is defined %} +openshift_use_crio={{ openshift_ansible_use_crio }} +{% endif %} +{% if openshift_ansible_use_crio_only is defined %} +openshift_use_crio_only={{ openshift_ansible_crio_only }} +{% endif %} +# The following two variables are used when openshift_use_crio is True +# and cleans up after builds that pass through docker. When openshift_use_crio is True +# these variables are set to the defaults shown. You may override them here. +# NOTE: You will still need to tag crio nodes with your given label(s)! +# Enable docker garbage collection when using cri-o +#openshift_crio_enable_docker_gc=True +# Node Selectors to run the garbage collection +#openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'} + +# Items added, as is, to end of /etc/sysconfig/docker OPTIONS +# Default value: "--log-driver=journald" +#openshift_docker_options="-l warn --ipv6=false" + +# Specify exact version of Docker to configure or upgrade to. +# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10. +# docker_version="1.12.1" + +# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True. +# Uncomment below to disable; for example if your kernel does not support the +# Docker overlay/overlay2 storage drivers with SELinux enabled. +#openshift_docker_selinux_enabled=False + +# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone. +# docker_upgrade=False + +# Specify a list of block devices to be formatted and mounted on the nodes +# during prerequisites.yml. For each hash, "device", "path", "filesystem" are +# required. To add devices only on certain classes of node, redefine +# container_runtime_extra_storage as a group var. +#container_runtime_extra_storage='[{"device":"/dev/vdc","path":"/var/lib/origin/openshift.local.volumes","filesystem":"xfs","options":"gquota"}]' + +# Enable etcd debug logging, defaults to false +# etcd_debug=true +# Set etcd log levels by package +# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG" + +# Upgrade Hooks +# +# Hooks are available to run custom tasks at various points during a cluster +# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using +# absolute paths, if not the path will be treated as relative to the file where the +# hook is actually used. +# +# Tasks to run before each master is upgraded. +# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml +# +# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible +# upgrade steps, but before we restart system/services. +# openshift_master_upgrade_hook=/usr/share/custom/master.yml +# +# Tasks to run after each master is upgraded and system/services have been restarted. +# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml + +# Cluster Image Source (registry) configuration +# openshift-enterprise default is 'registry.access.redhat.com/openshift3/ose-${component}:${version}' +# origin default is 'docker.io/openshift/origin-${component}:${version}' +#oreg_url=example.com/openshift3/ose-${component}:${version} +# If oreg_url points to a registry other than registry.access.redhat.com we can +# modify image streams to point at that registry by setting the following to true +#openshift_examples_modify_imagestreams=true +# Add insecure and blocked registries to global docker configuration +#openshift_docker_insecure_registries=registry.example.com +#openshift_docker_blocked_registries=registry.hacker.com +# You may also configure additional default registries for docker, however this +# is discouraged. Instead you should make use of fully qualified image names. +#openshift_docker_additional_registries=registry.example.com + +# If oreg_url points to a registry requiring authentication, provide the following: +#oreg_auth_user=some_user +#oreg_auth_password='my-pass' +# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect. +# oreg_auth_pass should be generated from running docker login. +# To update registry auth credentials, uncomment the following: +#oreg_auth_credentials_replace: True + +# OpenShift repository configuration +#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}] +#openshift_repos_enable_testing=false + +# If the image for etcd needs to be pulled from anywhere else than registry.access.redhat.com, e.g. in +# a disconnected and containerized installation, use osm_etcd_image to specify the image to use: +#osm_etcd_image=rhel7/etcd + +# htpasswd auth +#openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}] +# Defining htpasswd users +#openshift_master_htpasswd_users={'user1': '', 'user2': ''} +# or +#openshift_master_htpasswd_file= + +{% if openshift_auth_profile == "osbs" %} +openshift_master_manage_htpasswd=false +openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift_htpasswd_file }}'}] +{% endif %} + +{% if openshift_auth_profile == "fedoraidp" %} +openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "extraScopes": ["profile", "email", "https://id.fedoraproject.org/scope/groups"], "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] +{% endif %} + +{% if openshift_auth_profile == "fedoraidp-stg" %} +openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}] +{% endif %} + +# Allow all auth +#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}] + +# LDAP auth +#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}] +# +# Configure LDAP CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the LDAPPasswordIdentityProvider. +# +#openshift_master_ldap_ca= +# or +#openshift_master_ldap_ca_file= + +# OpenID auth +#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}] +# +# Configure OpenID CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "ca" key set +# within the OpenIDIdentityProvider. +# +#openshift_master_openid_ca= +# or +#openshift_master_openid_ca_file= + +# Request header auth +#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}] +# +# Configure request header CA certificate +# Specify either the ASCII contents of the certificate or the path to +# the local file that will be copied to the remote host. CA +# certificate contents will be copied to master systems and saved +# within /etc/origin/master/ with a filename matching the "clientCA" +# key set within the RequestHeaderIdentityProvider. +# +#openshift_master_request_header_ca= +# or +#openshift_master_request_header_ca_file= + +# CloudForms Management Engine (ManageIQ) App Install +# +# Enables installation of MIQ server. Recommended for dedicated +# clusters only. See roles/openshift_management/README.md for instructions +# and requirements. +#openshift_management_install_management=False + +# Cloud Provider Configuration +# +# Note: You may make use of environment variables rather than store +# sensitive configuration within the ansible inventory. +# For example: +#openshift_cloudprovider_aws_access_key="{ lookup('env','AWS_ACCESS_KEY_ID') }" +#openshift_cloudprovider_aws_secret_key="{ lookup('env','AWS_SECRET_ACCESS_KEY') }" +# +# AWS +#openshift_cloudprovider_kind=aws +# Note: IAM profiles may be used instead of storing API credentials on disk. +#openshift_cloudprovider_aws_access_key=aws_access_key_id +#openshift_cloudprovider_aws_secret_key=aws_secret_access_key +# +# Openstack +#openshift_cloudprovider_kind=openstack +#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/ +#openshift_cloudprovider_openstack_username=username +#openshift_cloudprovider_openstack_password=password +#openshift_cloudprovider_openstack_domain_id=domain_id +#openshift_cloudprovider_openstack_domain_name=domain_name +#openshift_cloudprovider_openstack_tenant_id=tenant_id +#openshift_cloudprovider_openstack_tenant_name=tenant_name +#openshift_cloudprovider_openstack_region=region +#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id +# +# Note: If you're getting a "BS API version autodetection failed" when provisioning cinder volumes you may need this setting +#openshift_cloudprovider_openstack_blockstorage_version=v2 +# +# GCE +#openshift_cloudprovider_kind=gce +# Note: When using GCE, openshift_gcp_project and openshift_gcp_prefix must be +# defined. +# openshift_gcp_project is the project-id +#openshift_gcp_project= +# openshift_gcp_prefix is a unique string to identify each openshift cluster. +#openshift_gcp_prefix= +#openshift_gcp_multizone=False +# Note: To enable nested virtualization in gcp use the following variable and url +#openshift_gcp_licenses="https://www.googleapis.com/compute/v1/projects/vm-options/global/licenses/enable-vmx" +# Additional details regarding nested virtualization are available: +# https://cloud.google.com/compute/docs/instances/enable-nested-virtualization-vm-instances +# +# vSphere +#openshift_cloudprovider_kind=vsphere +#openshift_cloudprovider_vsphere_username=username +#openshift_cloudprovider_vsphere_password=password +#openshift_cloudprovider_vsphere_host=vcenter_host or vsphere_host +#openshift_cloudprovider_vsphere_datacenter=datacenter +#openshift_cloudprovider_vsphere_datastore=datastore +#openshift_cloudprovider_vsphere_folder=optional_folder_name + + +# Project Configuration +#osm_project_request_message='' +#osm_project_request_template='' +#osm_mcs_allocator_range='s0:/2' +#osm_mcs_labels_per_project=5 +#osm_uid_allocator_range='1000000000-1999999999/10000' + +# Configure additional projects +#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}} + +# Enable cockpit +#osm_use_cockpit=true +# +# Set cockpit plugins +#osm_cockpit_plugins=['cockpit-kubernetes'] + +# If an external load balancer is used public hostname should resolve to +# external load balancer address +#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com + +# Configure controller arguments +#osm_controller_args={'resource-quota-sync-period': ['10s']} + +# Configure api server arguments +#osm_api_server_args={'max-requests-inflight': ['400']} + +# additional cors origins +#osm_custom_cors_origins=['foo.example.com', 'bar.example.com'] + +# default project node selector +#osm_default_node_selector='region=primary' + +# Override the default pod eviction timeout +#openshift_master_pod_eviction_timeout=5m + +# Override the default oauth tokenConfig settings: +# openshift_master_access_token_max_seconds=86400 +# openshift_master_auth_token_max_seconds=500 + +# Override master servingInfo.maxRequestsInFlight +#openshift_master_max_requests_inflight=500 + +# Override master and node servingInfo.minTLSVersion and .cipherSuites +# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12 +# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants +#openshift_master_min_tls_version=VersionTLS12 +#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] +# +#openshift_node_min_tls_version=VersionTLS12 +#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...'] + +# default storage plugin dependencies to install, by default the ceph and +# glusterfs plugin dependencies will be installed, if available. +#osn_storage_plugin_deps=['ceph','glusterfs','iscsi'] + +# OpenShift Router Options +# +# An OpenShift router will be created during install if there are +# nodes present with labels matching the default router selector, +# "node-role.kubernetes.io/infra=true". +# +# Example: +# [nodes] +# node.example.com openshift_node_group_name="node-config-infra" +# +# Router selector (optional) +# Router will only be created if nodes matching this label are present. +# Default value: 'node-role.kubernetes.io/infra=true' +#openshift_hosted_router_selector='node-role.kubernetes.io/infra=true' +# +# Router replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift router selector. +#openshift_hosted_router_replicas=2 +# +# Router force subdomain (optional) +# A router path format to force on all routes used by this router +# (will ignore the route host value) +#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com' +# +# Router certificate (optional) +# Provide local certificate paths which will be configured as the +# router's default certificate. +#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"} +# +# Manage the OpenShift Router (optional) +#openshift_hosted_manage_router=true +# +# Router sharding support has been added and can be achieved by supplying the correct +# data to the inventory. The variable to house the data is openshift_hosted_routers +# and is in the form of a list. If no data is passed then a default router will be +# created. There are multiple combinations of router sharding. The one described +# below supports routers on separate nodes. +# +#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}] + +# OpenShift Registry Console Options +# Override the console image prefix: +# origin default is "cockpit/", enterprise default is "openshift3/" +#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/ +# origin default is "kubernetes", enterprise default is "registry-console" +#openshift_cockpit_deployer_basename=my-console +# Override image version, defaults to latest for origin, vX.Y product version for enterprise +#openshift_cockpit_deployer_version=1.4.1 + +# Openshift Registry Options +# +# An OpenShift registry will be created during install if there are +# nodes present with labels matching the default registry selector, +# "node-role.kubernetes.io/infra=true". +# +# Example: +# [nodes] +# node.example.com openshift_node_group_name="node-config-infra" +# +# Registry selector (optional) +# Registry will only be created if nodes matching this label are present. +# Default value: 'node-role.kubernetes.io/infra=true' +#openshift_hosted_registry_selector='node-role.kubernetes.io/infra=true' +# +# Registry replicas (optional) +# Unless specified, openshift-ansible will calculate the replica count +# based on the number of nodes matching the openshift registry selector. +#openshift_hosted_registry_replicas=2 +# +# Validity of the auto-generated certificate in days (optional) +#openshift_hosted_registry_cert_expire_days=730 +# +# Manage the OpenShift Registry (optional) +#openshift_hosted_manage_registry=true +# Manage the OpenShift Registry Console (optional) +#openshift_hosted_manage_registry_console=true +# +# Registry Storage Options +# +# NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)' +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +# +# External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/registry". "exports" is +# is the name of the export served by the nfs server. "registry" is +# the name of a directory inside of "/exports". +#openshift_hosted_registry_storage_kind=nfs +#openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +#openshift_hosted_registry_storage_host=nfs.example.com +# nfs_directory must conform to DNS-1123 subdomain must consist of lower case +# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character +#openshift_hosted_registry_storage_nfs_directory=/exports +#openshift_hosted_registry_storage_volume_name=registry +#openshift_hosted_registry_storage_volume_size=10Gi +{% if env == "staging" %} +openshift_hosted_registry_storage_kind=nfs +openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com +openshift_hosted_registry_storage_nfs_directory=/ +openshift_hosted_registry_storage_volume_name=openshift-stg-registry +openshift_hosted_registry_storage_volume_size=10Gi +{% else %} +openshift_hosted_registry_storage_kind=nfs +openshift_hosted_registry_storage_access_modes=['ReadWriteMany'] +openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com +openshift_hosted_registry_storage_nfs_directory=/ +openshift_hosted_registry_storage_volume_name=openshift-prod-registry +openshift_hosted_registry_storage_volume_size=10Gi +{% endif %} +# +# Openstack +# Volume must already exist. +#openshift_hosted_registry_storage_kind=openstack +#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_registry_storage_openstack_filesystem=ext4 +#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57 +#openshift_hosted_registry_storage_volume_size=10Gi +# +# hostPath (local filesystem storage) +# Suitable for "all-in-one" or proof of concept deployments +# Must not be used for high-availability and production deployments +#openshift_hosted_registry_storage_kind=hostpath +#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +#openshift_hosted_registry_storage_hostpath_path=/var/lib/openshift_volumes +#openshift_hosted_registry_storage_volume_size=10Gi +# +# AWS S3 +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_encrypt=false +#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id +#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id +#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Any S3 service (Minio, ExoScale, ...): Basically the same as above +# but with regionendpoint configured +# S3 bucket must already exist. +#openshift_hosted_registry_storage_kind=object +#openshift_hosted_registry_storage_provider=s3 +#openshift_hosted_registry_storage_s3_accesskey=access_key_id +#openshift_hosted_registry_storage_s3_secretkey=secret_access_key +#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/ +#openshift_hosted_registry_storage_s3_bucket=bucket_name +#openshift_hosted_registry_storage_s3_region=bucket_region +#openshift_hosted_registry_storage_s3_chunksize=26214400 +#openshift_hosted_registry_storage_s3_rootdirectory=/registry +#openshift_hosted_registry_pullthrough=true +#openshift_hosted_registry_acceptschema2=true +#openshift_hosted_registry_enforcequota=true +# +# Additional CloudFront Options. When using CloudFront all three +# of the followingg variables must be defined. +#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/ +#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem +#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid +# vSphere Volume with vSphere Cloud Provider +# openshift_hosted_registry_storage_kind=vsphere +# openshift_hosted_registry_storage_access_modes=['ReadWriteOnce'] +# openshift_hosted_registry_storage_annotations=['volume.beta.kubernetes.io/storage-provisioner: kubernetes.io/vsphere-volume'] +# +# GCS Storage Bucket +#openshift_hosted_registry_storage_provider=gcs +#openshift_hosted_registry_storage_gcs_bucket=bucket01 +#openshift_hosted_registry_storage_gcs_keyfile=test.key +#openshift_hosted_registry_storage_gcs_rootdirectory=/registry + +# Metrics deployment +# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html +# +# By default metrics are not automatically deployed, set this to enable them +#openshift_metrics_install_metrics=true +{% if openshift_metrics_deploy is defined %} +{% if openshift_metrics_deploy %} +openshift_hosted_metrics_deploy=true +{% endif %} +{% endif %} +# +# Storage Options +# If openshift_metrics_storage_kind is unset then metrics will be stored +# in an EmptyDir volume and will be deleted when the cassandra pod terminates. +# Storage options A & B currently support only one cassandra pod which is +# generally enough for up to 1000 pods. Additional volumes can be created +# manually after the fact and metrics scaled per the docs. +# +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_nfs_options='*(rw,root_squash)' +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/metrics". "exports" is +# is the name of the export served by the nfs server. "metrics" is +# the name of a directory inside of "/exports". +#openshift_metrics_storage_kind=nfs +#openshift_metrics_storage_access_modes=['ReadWriteOnce'] +#openshift_metrics_storage_host=nfs.example.com +#openshift_metrics_storage_nfs_directory=/exports +#openshift_metrics_storage_volume_name=metrics +#openshift_metrics_storage_volume_size=10Gi +#openshift_metrics_storage_labels={'storage': 'metrics'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_metrics_storage_kind=dynamic +# +# Other Metrics Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_metrics/README.md +# +# Override metricsPublicURL in the master config for cluster metrics +# Defaults to https://hawkular-metrics.{openshift_master_default_subdomain}/hawkular/metrics +# Currently, you may only alter the hostname portion of the url, alterting the +# `/hawkular/metrics` path will break installation of metrics. +#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com +# Configure the metrics component images # Note, these will be modified by oreg_url by default +#openshift_metrics_cassandra_image="docker.io/openshift/origin-metrics-cassandra:{ openshift_image_tag }" +#openshift_metrics_hawkular_agent_image="docker.io/openshift/origin-metrics-hawkular-openshift-agent:{ openshift_image_tag }" +#openshift_metrics_hawkular_metrics_image="docker.io/openshift/origin-metrics-hawkular-metrics:{ openshift_image_tag }" +#openshift_metrics_schema_installer_image="docker.io/openshift/origin-metrics-schema-installer:{ openshift_image_tag }" +#openshift_metrics_heapster_image="docker.io/openshift/origin-metrics-heapster:{ openshift_image_tag }" +# when openshift_deployment_type=='openshift-enterprise' +#openshift_metrics_cassandra_image="registry.access.redhat.com/openshift3/metrics-cassandra:{ openshift_image_tag }" +#openshift_metrics_hawkular_agent_image="registry.access.redhat.com/openshift3/metrics-hawkular-openshift-agent:{ openshift_image_tag }" +#openshift_metrics_hawkular_metrics_image="registry.access.redhat.com/openshift3/metrics-hawkular-metrics:{ openshift_image_tag }" +#openshift_metrics_schema_installer_image="registry.access.redhat.com/openshift3/metrics-schema-installer:{ openshift_image_tag }" +#openshift_metrics_heapster_image="registry.access.redhat.com/openshift3/metrics-heapster:{ openshift_image_tag }" +# +# StorageClass +# openshift_storageclass_name=gp2 +# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'} +# openshift_storageclass_mount_options=['dir_mode=0777', 'file_mode=0777'] +# openshift_storageclass_reclaim_policy="Delete" +# +# PersistentLocalStorage +# If Persistent Local Storage is wanted, this boolean can be defined to True. +# This will create all necessary configuration to use persistent storage on nodes. +#openshift_persistentlocalstorage_enabled=False +#openshift_persistentlocalstorage_classes=[] +#openshift_persistentlocalstorage_path=/mnt/local-storage +#openshift_persistentlocalstorage_provisionner_image=quay.io/external_storage/local-volume-provisioner:v1.0.1 + +# Logging deployment +# +# Currently logging deployment is disabled by default, enable it by setting this +#openshift_logging_install_logging=true +# +# Logging storage config +# Option A - NFS Host Group +# An NFS volume will be created with path "nfs_directory/volume_name" +# on the host within the [nfs] host group. For example, the volume +# path using these options would be "/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_nfs_options='*(rw,root_squash)' +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option B - External NFS Host +# NFS volume must already exist with path "nfs_directory/_volume_name" on +# the storage_host. For example, the remote volume path using these +# options would be "nfs.example.com:/exports/logging". "exports" is +# is the name of the export served by the nfs server. "logging" is +# the name of a directory inside of "/exports". +#openshift_logging_storage_kind=nfs +#openshift_logging_storage_access_modes=['ReadWriteOnce'] +#openshift_logging_storage_host=nfs.example.com +#openshift_logging_storage_nfs_directory=/exports +#openshift_logging_storage_volume_name=logging +#openshift_logging_storage_volume_size=10Gi +#openshift_logging_storage_labels={'storage': 'logging'} +# +# Option C - Dynamic -- If openshift supports dynamic volume provisioning for +# your cloud platform use this. +#openshift_logging_storage_kind=dynamic +# +# Option D - none -- Logging will use emptydir volumes which are destroyed when +# pods are deleted +# +# Other Logging Options -- Common items you may wish to reconfigure, for the complete +# list of options please see roles/openshift_logging/README.md +# +# Configure loggingPublicURL in the master config for aggregate logging, defaults +# to kibana.{ openshift_master_default_subdomain } +#openshift_logging_kibana_hostname=logging.apps.example.com +# Configure the number of elastic search nodes, unless you're using dynamic provisioning +# this value must be 1 +#openshift_logging_es_cluster_size=1 + +# Prometheus deployment +# +# Currently prometheus deployment is disabled by default, enable it by setting this +#openshift_hosted_prometheus_deploy=true +# +# Prometheus storage config +# By default prometheus uses emptydir storage, if you want to persist you should +# configure it to use pvc storage type. Each volume must be ReadWriteOnce. +#openshift_prometheus_storage_type=emptydir +#openshift_prometheus_alertmanager_storage_type=emptydir +#openshift_prometheus_alertbuffer_storage_type=emptydir +# Use PVCs for persistence +#openshift_prometheus_storage_type=pvc +#openshift_prometheus_alertmanager_storage_type=pvc +#openshift_prometheus_alertbuffer_storage_type=pvc + +# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet') +# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant' + +# Disable the OpenShift SDN plugin +# openshift_use_openshift_sdn=False + +# Configure SDN cluster network and kubernetes service CIDR blocks. These +# network blocks should be private and should not conflict with network blocks +# in your infrastructure that pods may require access to. Can not be changed +# after deployment. +# +# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of +# 172.17.0.0/16. Your installation will fail and/or your configuration change will +# cause the Pod SDN or Cluster SDN to fail. +# +# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting +# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS +# environment variable located in /etc/sysconfig/docker-network. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_cluster_network_cidr: clusterNetworkCIDR +# openshift_portal_net: serviceNetworkCIDR +# When installing osm_cluster_network_cidr and openshift_portal_net must be set. +# Sane examples are provided below. +#osm_cluster_network_cidr=10.128.0.0/14 +#openshift_portal_net=172.30.0.0/16 + +# ExternalIPNetworkCIDRs controls what values are acceptable for the +# service external IP field. If empty, no externalIP may be set. It +# may contain a list of CIDRs which are checked for access. If a CIDR +# is prefixed with !, IPs in that CIDR will be rejected. Rejections +# will be applied first, then the IP checked against one of the +# allowed CIDRs. You should ensure this range does not overlap with +# your nodes, pods, or service CIDRs for security reasons. +#openshift_master_external_ip_network_cidrs=['0.0.0.0/0'] + +# IngressIPNetworkCIDR controls the range to assign ingress IPs from for +# services of type LoadBalancer on bare metal. If empty, ingress IPs will not +# be assigned. It may contain a single CIDR that will be allocated from. For +# security reasons, you should ensure that this range does not overlap with +# the CIDRs reserved for external IPs, nodes, pods, or services. +#openshift_master_ingress_ip_network_cidr=172.46.0.0/16 + +# Configure number of bits to allocate to each host's subnet e.g. 9 +# would mean a /23 network on the host. +# When upgrading or scaling up the following must match whats in your master config! +# Inventory: master yaml field +# osm_host_subnet_length: hostSubnetLength +# When installing osm_host_subnet_length must be set. A sane example is provided below. +#osm_host_subnet_length=9 + +# Configure master API and console ports. +#openshift_master_api_port=8443 +#openshift_master_console_port=8443 +{% if openshift_api_port is defined and openshift_console_port is defined %} +{% if openshift_api_port and openshift_console_port %} +openshift_master_api_port={{openshift_api_port}} +openshift_master_console_port={{openshift_console_port}} +{% endif %} +{% endif %} + +# set exact RPM version (include - prefix) +#openshift_pkg_version=-3.9.0 +# you may also specify version and release, ie: +#openshift_pkg_version=-3.9.0-0.126.0.git.0.9351aae.el7 + +# Configure custom ca certificate +#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'} +# +# NOTE: CA certificate will not be replaced with existing clusters. +# This option may only be specified when creating a new cluster or +# when redeploying cluster certificates with the redeploy-certificates +# playbook. + +# Configure custom named certificates (SNI certificates) +# +# https://docs.openshift.org/latest/install_config/certificate_customization.html +# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html +# +# NOTE: openshift_master_named_certificates is cached on masters and is an +# additive fact, meaning that each run with a different set of certificates +# will add the newly provided certificates to the cached set of certificates. +# +# An optional CA may be specified for each named certificate. CAs will +# be added to the OpenShift CA bundle which allows for the named +# certificate to be served for internal cluster communication. +# +# If you would like openshift_master_named_certificates to be overwritten with +# the provided value, specify openshift_master_overwrite_named_certificates. +#openshift_master_overwrite_named_certificates=true +# +# Provide local certificate paths which will be deployed to masters +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}] +# +# Detected names may be overridden by specifying the "names" key +#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}] +# +# Add a trusted CA to all pods, copies from the control host, may be multiple +# certs in one file +#openshift_additional_ca=/path/to/additional-ca.crt + +# Session options +#openshift_master_session_name=ssn +#openshift_master_session_max_seconds=3600 + +# An authentication and encryption secret will be generated if secrets +# are not provided. If provided, openshift_master_session_auth_secrets +# and openshift_master_encryption_secrets must be equal length. +# +# Signing secrets, used to authenticate sessions using +# HMAC. Recommended to use secrets with 32 or 64 bytes. +#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] +# +# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32 +# characters long, to select AES-128, AES-192, or AES-256. +#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO'] + +# configure how often node iptables rules are refreshed +#openshift_node_iptables_sync_period=5s + +# Configure nodeIP in the node config +# This is needed in cases where node traffic is desired to go over an +# interface other than the default network interface. +#openshift_set_node_ip=True + +#openshift_node_kubelet_args is deprecated, use node config edits instead + +# Configure logrotate scripts +# See: https://github.com/nickhammond/ansible-logrotate +#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}] + +# The OpenShift-Ansible installer will fail when it detects that the +# value of openshift_hostname resolves to an IP address not bound to any local +# interfaces. This mis-configuration is problematic for any pod leveraging host +# networking and liveness or readiness probes. +# Setting this variable to false will override that check. +#openshift_hostname_check=true + +# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail +# in versions >= 3.6 +#openshift_use_dnsmasq=False + +# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf +# This is useful for POC environments where DNS may not actually be available yet or to set +# options like 'strict-order' to alter dnsmasq configuration. +#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf + +# Global Proxy Configuration +# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment +# variables for docker and master services. +# +# Hosts in the openshift_no_proxy list will NOT use any globally +# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains +# (.example.com), hosts (example.com), and IP addresses. +#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT +#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT +#openshift_no_proxy='.hosts.example.com,some-host.com' +# +# Most environments don't require a proxy between openshift masters, nodes, and +# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list. +# If all of your hosts share a common domain you may wish to disable this and +# specify that domain above instead. +# +# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and +# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy +# variable (above) and set this value to False +#openshift_generate_no_proxy_hosts=True +# +# These options configure the BuildDefaults admission controller which injects +# configuration into Builds. Proxy related values will default to the global proxy +# config values. You only need to set these if they differ from the global proxy settings. +# See BuildDefaults documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_no_proxy=mycorp.com +#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT +#openshift_builddefaults_git_no_proxy=mycorp.com +#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'} +#openshift_builddefaults_resources_requests_cpu=100m +#openshift_builddefaults_resources_requests_memory=256Mi +#openshift_builddefaults_resources_limits_cpu=1000m +#openshift_builddefaults_resources_limits_memory=512Mi + +# Or you may optionally define your own build defaults configuration serialized as json +#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}' + +# These options configure the BuildOverrides admission controller which injects +# configuration into Builds. +# See BuildOverrides documentation at +# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html +#openshift_buildoverrides_force_pull=true +#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}] +#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'} +#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'} +#openshift_buildoverrides_tolerations=[{'key':'mykey1','value':'myvalue1','effect':'NoSchedule','operator':'Equal'}] + +# Or you may optionally define your own build overrides configuration serialized as json +#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}' + +# Enable service catalog +#openshift_enable_service_catalog=true + +# Enable template service broker (requires service catalog to be enabled, above) +#template_service_broker_install=true + +# Specify an openshift_service_catalog image +# (defaults for origin and openshift-enterprise, repsectively) +#openshift_service_catalog_image="docker.io/openshift/origin-service-catalog:{ openshift_image_tag }"" +#openshift_service_catalog_image="registry.access.redhat.com/openshift3/ose-service-catalog:{ openshift_image_tag }" + +# TSB image tag +#template_service_broker_version='v3.9' + +# Configure one of more namespaces whose templates will be served by the TSB +#openshift_template_service_broker_namespaces=['openshift'] + +# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default +#openshift_master_dynamic_provisioning_enabled=True + +# Admission plugin config +#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}} + +# Configure usage of openshift_clock role. +#openshift_clock_enabled=true + +# OpenShift Per-Service Environment Variables +# Environment variables are added to /etc/sysconfig files for +# each OpenShift node. +# API and controllers environment variables are merged in single +# master environments. +#openshift_node_env_vars={"ENABLE_HTTP2": "true"} +{% if no_http2 is defined %} +{% if no_http2 %} +openshift_master_api_env_vars={"ENABLE_HTTP2": "true"} +openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"} +openshift_node_env_vars={"ENABLE_HTTP2": "true"} +{% endif %} +{% endif %} + +# Enable API service auditing +#openshift_master_audit_config={"enabled": "true"} +# +# In case you want more advanced setup for the auditlog you can +# use this line. +# The directory in "auditFilePath" will be created if it's not +# exist +#openshift_master_audit_config={"enabled": "true", "auditFilePath": "/var/lib/origin/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": "14", "maximumFileSizeMegabytes": "500", "maximumRetainedFiles": "5"} + +# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used +# by openshift_deployment_type=origin +#openshift_enable_origin_repo=false + +# Validity of the auto-generated OpenShift certificates in days. +# See also openshift_hosted_registry_cert_expire_days above. +# +#openshift_ca_cert_expire_days=1825 +#openshift_node_cert_expire_days=730 +#openshift_master_cert_expire_days=730 + +# Validity of the auto-generated external etcd certificates in days. +# Controls validity for etcd CA, peer, server and client certificates. +# +#etcd_ca_default_days=1825 +# +# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference +# openshift_master_saconfig_limitsecretreferences=false + +# Upgrade Control +# +# By default nodes are upgraded in a serial manner one at a time and all failures +# are fatal, one set of variables for normal nodes, one set of variables for +# nodes that are part of control plane as the number of hosts may be different +# in those two groups. +#openshift_upgrade_nodes_serial=1 +#openshift_upgrade_nodes_max_fail_percentage=0 +#openshift_upgrade_control_plane_nodes_serial=1 +#openshift_upgrade_control_plane_nodes_max_fail_percentage=0 +# +# You can specify the number of nodes to upgrade at once. We do not currently +# attempt to verify that you have capacity to drain this many nodes at once +# so please be careful when specifying these values. You should also verify that +# the expected number of nodes are all schedulable and ready before starting an +# upgrade. If it's not possible to drain the requested nodes the upgrade will +# stall indefinitely until the drain is successful. +# +# If you're upgrading more than one node at a time you can specify the maximum +# percentage of failure within the batch before the upgrade is aborted. Any +# nodes that do fail are ignored for the rest of the playbook run and you should +# take care to investigate the failure and return the node to service so that +# your cluster. +# +# The percentage must exceed the value, this would fail on two failures +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49 +# where as this would not +# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50 +# +# A timeout to wait for nodes to drain pods can be specified to ensure that the +# upgrade continues even if nodes fail to drain pods in the allowed time. The +# default value of 0 will wait indefinitely allowing the admin to investigate +# the root cause and ensuring that disruption budgets are respected. If the +# a timeout of 0 is used there will also be one attempt to re-try draining the +# node. If a non zero timeout is specified there will be no attempt to retry. +#openshift_upgrade_nodes_drain_timeout=0 +# +# Multiple data migrations take place and if they fail they will fail the upgrade +# You may wish to disable these or make them non fatal +# +# openshift_upgrade_pre_storage_migration_enabled=true +# openshift_upgrade_pre_storage_migration_fatal=true +# openshift_upgrade_post_storage_migration_enabled=true +# openshift_upgrade_post_storage_migration_fatal=false + +###################################################################### +# CloudForms/ManageIQ (CFME/MIQ) Configuration + +# See the readme for full descriptions and getting started +# instructions: ../../roles/openshift_management/README.md or go directly to +# their definitions: ../../roles/openshift_management/defaults/main.yml +# ../../roles/openshift_management/vars/main.yml +# +# Namespace for the CFME project +#openshift_management_project: openshift-management + +# Namespace/project description +#openshift_management_project_description: CloudForms Management Engine + +# Choose 'miq-template' for a podified database install +# Choose 'miq-template-ext-db' for an external database install +# +# If you are using the miq-template-ext-db template then you must add +# the required database parameters to the +# openshift_management_template_parameters variable. +#openshift_management_app_template: miq-template + +# Allowed options: nfs, nfs_external, preconfigured, cloudprovider. +#openshift_management_storage_class: nfs + +# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a +# netapp appliance, then you must set the hostname here. Leave the +# value as 'false' if you are not using external NFS. +#openshift_management_storage_nfs_external_hostname: false + +# [OPTIONAL] - If you are using external NFS then you must set the base +# path to the exports location here. +# +# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports +# that will back the application PV and optionally the database +# pv. Export path definitions, relative to +# { openshift_management_storage_nfs_base_dir} +# +# LOCAL NFS NOTE: +# +# You may may also change this value if you want to change the default +# path used for local NFS exports. +#openshift_management_storage_nfs_base_dir: /exports + +# LOCAL NFS NOTE: +# +# You may override the automatically selected LOCAL NFS server by +# setting this variable. Useful for testing specific task files. +#openshift_management_storage_nfs_local_hostname: false + +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/openshift-management/add_container_provider.yml +#openshift_management_username: admin +#openshift_management_password: smartvm + +# A hash of parameters you want to override or set in the +# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in +# your inventory file as a simple hash. Acceptable values are defined +# under the .parameters list in files/miq-template{-ext-db}.yaml +# Example: +# +# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'} +#openshift_management_template_parameters: {} + +# Firewall configuration +# You can open additional firewall ports by defining them as a list. of service +# names and ports/port ranges for either masters or nodes. +#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}] +#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}] + +# Service port node range +#openshift_node_port_range=30000-32767 + +# Enable unsupported configurations, things that will yield a partially +# functioning cluster but would not be supported for production use +#openshift_enable_unsupported_configurations=false +openshift_enable_unsupported_configurations=True diff --git a/roles/ansible-server/templates/ansible.cfg.j2 b/roles/ansible-server/templates/ansible.cfg.j2 index bff0fd5a43..d197372ffe 100644 --- a/roles/ansible-server/templates/ansible.cfg.j2 +++ b/roles/ansible-server/templates/ansible.cfg.j2 @@ -462,7 +462,7 @@ pipelining = True # file systems that require special treatment when dealing with security context # the default behaviour that copies the existing context or uses the user default # needs to be changed to use the file system dependent context. -#special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p +special_context_filesystems=nfs,vboxsf,fuse,ramfs,9p,fuse.glusterfs # Set this to yes to allow libvirt_lxc connections to work without SELinux. #libvirt_lxc_noseclabel = yes diff --git a/roles/basessh/tasks/main.yml b/roles/basessh/tasks/main.yml index 0b60d87aec..3b8166aeb5 100644 --- a/roles/basessh/tasks/main.yml +++ b/roles/basessh/tasks/main.yml @@ -25,7 +25,7 @@ - /root/.ssh/known_hosts when: birthday is defined -- name: make sure linselinux-python is installed +- name: make sure libselinux-python is installed package: name=libselinux-python state=present tags: - basessh @@ -128,7 +128,19 @@ - sshd - base -# TODO: Get expired certificates, and add them to certs_to_sign +# Renew if last mod was more than 10 months ago +- name: Get soon-to-expire certificates to sign + set_fact: + certs_to_sign: "{{certs_to_sign}} + [ '{{item.item.path}}' ]" + with_items: "{{ssh_cert_files.results}}" + when: "item.stat.exists and item.stat.mtime|int < (lookup('pipe', 'date +%s')|int - 25920000)" + tags: + - basessh + - sshd_cert + - sshd_config + - config + - sshd + - base - set_fact: pubkeydir: "/tmp/sshkeysign/{{inventory_hostname}}" diff --git a/roles/batcave/files/namespace.conf b/roles/batcave/files/namespace.conf new file mode 100644 index 0000000000..04b8bce01e --- /dev/null +++ b/roles/batcave/files/namespace.conf @@ -0,0 +1,28 @@ +# /etc/security/namespace.conf +# +# See /usr/share/doc/pam-*/txts/README.pam_namespace for more information. +# +# Uncommenting the following three lines will polyinstantiate +# /tmp, /var/tmp and user's home directories. /tmp and /var/tmp will +# be polyinstantiated based on the MLS level part of the security context as well as user +# name, Polyinstantion will not be performed for user root and adm for directories +# /tmp and /var/tmp, whereas home directories will be polyinstantiated for all users. +# The user name and context is appended to the instance prefix. +# +# Note that instance directories do not have to reside inside the +# polyinstantiated directory. In the examples below, instances of /tmp +# will be created in /tmp-inst directory, where as instances of /var/tmp +# and users home directories will reside within the directories that +# are being polyinstantiated. +# +# Instance parent directories must exist for the polyinstantiation +# mechanism to work. By default, they should be created with the mode +# of 000. pam_namespace module will enforce this mode unless it +# is explicitly called with an argument to ignore the mode of the +# instance parent. System administrators should use this argument with +# caution, as it will reduce security and isolation achieved by +# polyinstantiation. +# +#/tmp /tmp-inst/ level root,adm +#/var/tmp /var/tmp-inst/ level root,adm +#$HOME $HOME/$USER.inst/ level diff --git a/roles/batcave/tasks/main.yml b/roles/batcave/tasks/main.yml index 2dccc4d29c..d4abe539b5 100644 --- a/roles/batcave/tasks/main.yml +++ b/roles/batcave/tasks/main.yml @@ -129,6 +129,7 @@ - httpd_can_network_connect - httpd_use_nfs - httpd_can_network_relay + - polyinstantiation_enabled tags: - batcave - config @@ -166,27 +167,6 @@ - batcave - config -# -# Script to sync ssh keys from fas to openshift instances. -# - -- name: setup python module for openshift sync script - copy: src=oshift_mod.py dest=/usr/local/bin/oshift_mod.py mode=0644 - tags: - - batcave - - config - -- name: setup setup sync-openshift-keys config - template: src=sync-openshift-keys.conf.j2 dest=/etc/sync-openshift-keys.conf mode=0600 - tags: - - batcave - - config - -- name: setup setup sync-openshift-keys script - copy: src=sync-openshift-keys.py dest=/usr/local/bin/sync-openshift-keys.py mode=0755 - tags: - - batcave - - config # The zodbot server must allow TCP on whatever port zodbot is listening on # for this to work (currently TCP port 5050). @@ -507,3 +487,23 @@ - koji - batcave +- name: create some tmp dirs + file: path=/tmp-inst mode=000 owner=root group=root state=directory + tags: + - config + - batcave + - selinux + +- name: create some tmp dirs + file: path=/var/tmp-inst mode=000 owner=root group=root state=directory + tags: + - config + - batcave + - selinux + +- name: put in place namespace.conf file + copy: src=namespace.conf dest=/etc/security/namespace.conf mode=644 owner=root group=root + tags: + - config + - batcave + - selinux diff --git a/roles/bodhi2/backend/files/new-updates-sync b/roles/bodhi2/backend/files/new-updates-sync index 3bcfbea131..e625f50853 100755 --- a/roles/bodhi2/backend/files/new-updates-sync +++ b/roles/bodhi2/backend/files/new-updates-sync @@ -14,13 +14,32 @@ logger = logging.getLogger('updates-sync') SOURCE = '/mnt/koji/compose/updates/' +RAWHIDESOURCE = '/mnt/koji/compose/rawhide/' FEDORADEST = '/pub/fedora/linux/updates/' FEDORAMODDEST = '/pub/fedora/linux/modular/updates/' FEDORAALTDEST = '/pub/fedora-secondary/updates/' +RAWHIDEDEST = '/pub/fedora/linux/development/' +RAWHIDEALTDEST = '/pub/fedora-secondary/development/' EPELDEST = '/pub/epel/' ATOMICSOURCE = '/mnt/koji/compose/atomic/repo/' ATOMICDEST = '/mnt/koji/atomic/repo/' -RELEASES = {'f29': {'topic': 'fedora', +RELEASES = {'rawhide': {'topic': 'fedora', + 'version': 'rawhide', + 'modules': ['fedora', 'fedora-secondary'], + 'repos': {'rawhide': { + 'from': 'latest-Fedora-Rawhide', + 'ostrees': [{'ref': 'fedora/rawhide/%(arch)s/atomic-host', + 'dest': ATOMICDEST, + 'arches': ['x86_64', 'ppc64le', 'aarch64']}, + {'ref': 'fedora/rawhide/x86_64/silverblue', + 'dest': ATOMICDEST}], + 'to': [{'arches': ['x86_64', 'armhfp', 'aarch64', 'source'], + 'dest': os.path.join(RAWHIDEDEST, 'rawhide', 'Everything')}, + {'arches': ['i386', 'ppc64le', 's390x'], + 'dest': os.path.join(RAWHIDEALTDEST, 'rawhide', 'Everything')} + ]}}, + }, + 'f29': {'topic': 'fedora', 'version': '29', 'modules': ['fedora', 'fedora-secondary'], 'repos': {'updates': { @@ -28,10 +47,10 @@ RELEASES = {'f29': {'topic': 'fedora', 'ostrees': [{'ref': 'fedora/29/%(arch)s/updates/atomic-host', 'dest': ATOMICDEST, 'arches': ['x86_64', 'ppc64le', 'aarch64']}, - {'ref': 'fedora/29/x86_64/updates/workstation', + {'ref': 'fedora/29/x86_64/updates/silverblue', 'dest': ATOMICDEST}, # Hack around for the fact that ostree on f25 doesn't know links - {'ref': 'fedora/29/x86_64/workstation', + {'ref': 'fedora/29/x86_64/silverblue', 'dest': ATOMICDEST}], 'to': [{'arches': ['x86_64', 'armhfp', 'aarch64', 'source'], 'dest': os.path.join(FEDORADEST, '29', 'Everything')}, @@ -43,7 +62,7 @@ RELEASES = {'f29': {'topic': 'fedora', 'ostrees': [{'ref': 'fedora/29/%(arch)s/testing/atomic-host', 'dest': ATOMICDEST, 'arches': ['x86_64', 'ppc64le', 'aarch64']}, - {'ref': 'fedora/29/x86_64/testing/workstation', + {'ref': 'fedora/29/x86_64/testing/silverblue', 'dest': ATOMICDEST}], 'to': [{'arches': ['x86_64', 'aarch64', 'armhfp', 'source'], 'dest': os.path.join(FEDORADEST, 'testing', '29', 'Everything')}, @@ -275,9 +294,14 @@ def to_human(num_bytes): def sync_single_repo_arch(release, repo, arch, dest_path): - source_path = os.path.join(SOURCE, - RELEASES[release]['repos'][repo]['from'], - 'compose', 'Everything', arch) + if repo == 'rawhide': + source_path = os.path.join(RAWHIDESOURCE, + RELEASES[release]['repos'][repo]['from'], + 'compose', 'Everything', arch) + else: + source_path = os.path.join(SOURCE, + RELEASES[release]['repos'][repo]['from'], + 'compose', 'Everything', arch) maindir = 'tree' if arch == 'source' else 'os' @@ -340,9 +364,17 @@ def sync_single_repo(release, repo): def determine_last_link(release, repo): - source_path = os.path.join(SOURCE, - RELEASES[release]['repos'][repo]['from']) - target = os.readlink(source_path) + if repo == 'rawhide': + source_path = os.path.join(RAWHIDESOURCE, + RELEASES[release]['repos'][repo]['from']) + #Since latest-Fedora-Rawhide is a symlink pointing to just the + #compose dir rather than its full path, we need the absolute path + #of the compose rather than relative path + target = os.path.realpath(source_path) + else: + source_path = os.path.join(SOURCE, + RELEASES[release]['repos'][repo]['from']) + target = os.readlink(source_path) logger.info('Release %s, repo %s, target %s', release, repo, target) RELEASES[release]['repos'][repo]['from'] = target return target diff --git a/roles/bodhi2/backend/tasks/main.yml b/roles/bodhi2/backend/tasks/main.yml index f9cf08b6d4..2f9521231f 100644 --- a/roles/bodhi2/backend/tasks/main.yml +++ b/roles/bodhi2/backend/tasks/main.yml @@ -177,7 +177,7 @@ # bodhi2/backend/files/koji-sync-listener.py # This cronjob runs only once a day. The listener script runs reactively. cron: name="owner-sync" minute="15" hour="4" user="root" - job="/usr/local/bin/lock-wrapper owner-sync '/usr/local/bin/owner-sync-pagure f30 f30-container f30-modular f29 f28 f27 f29-container f28-container f27-container f28-docker f27-docker f29-modular f28-modular f27-modular epel7 dist-6E-epel module-package-list modular" + job="/usr/local/bin/lock-wrapper owner-sync '/usr/local/bin/owner-sync-pagure f30 f30-container f30-modular f29 f28 f27 f29-container f28-container f27-container f28-docker f27-docker f29-modular f28-modular f27-modular epel7 dist-6E-epel module-package-list modular'" cron_file=update-koji-owner when: inventory_hostname.startswith('bodhi-backend01') and env == "production" tags: diff --git a/roles/bodhi2/backend/templates/pungi.rpm.conf.j2 b/roles/bodhi2/backend/templates/pungi.rpm.conf.j2 index fd1e775a34..4c436dbb1d 100644 --- a/roles/bodhi2/backend/templates/pungi.rpm.conf.j2 +++ b/roles/bodhi2/backend/templates/pungi.rpm.conf.j2 @@ -203,7 +203,11 @@ image_build = { 'image-build': { 'format': [('qcow2', 'qcow2'), ('raw-xz', 'raw.xz')], 'name': 'Fedora-AtomicHost', - 'version': '!VERSION_FROM_VERSION' + [% if request.name == 'stable' %] + # Use a different version string for the updates vs updates-testing + # runs so that NVRs don't conflict + 'version': '!VERSION_FROM_VERSION' + [% endif %] 'release': '!RELEASE_FROM_DATE_RESPIN' 'kickstart': 'fedora-atomic.ks', 'distro': 'Fedora-22', @@ -225,7 +229,11 @@ image_build = { 'image-build': { 'format': [('vagrant-libvirt','vagrant-libvirt.box'), ('vagrant-virtualbox','vagrant-virtualbox.box')], 'name': 'Fedora-AtomicHost-Vagrant', - 'version': '!VERSION_FROM_VERSION' + [% if request.name == 'stable' %] + # Use a different version string for the updates vs updates-testing + # runs so that NVRs don't conflict + 'version': '!VERSION_FROM_VERSION' + [% endif %] 'release': '!RELEASE_FROM_DATE_RESPIN' 'kickstart': 'fedora-atomic-vagrant.ks', 'distro': 'Fedora-22', diff --git a/roles/copr/backend/files/lighttpd/lighttpd_dev.conf b/roles/copr/backend/files/lighttpd/lighttpd_dev.conf index ba3d6b7482..1bf908e09f 100644 --- a/roles/copr/backend/files/lighttpd/lighttpd_dev.conf +++ b/roles/copr/backend/files/lighttpd/lighttpd_dev.conf @@ -456,7 +456,7 @@ server.upload-dirs = ( "/var/tmp" ) ## custom includes like vhosts. ## #include "conf.d/config.conf" -include_shell "cat /etc/lighttpd/vhosts.d/*.conf" +#include_shell "cat /etc/lighttpd/vhosts.d/*.conf" ## ####################################################################### diff --git a/roles/copr/backend/files/provision/files/mock/fedora-rawhide-i386.cfg b/roles/copr/backend/files/provision/files/mock/fedora-rawhide-i386.cfg new file mode 100644 index 0000000000..0912b5e1b1 --- /dev/null +++ b/roles/copr/backend/files/provision/files/mock/fedora-rawhide-i386.cfg @@ -0,0 +1,48 @@ +config_opts['root'] = 'fedora-rawhide-i386' +config_opts['target_arch'] = 'i686' +config_opts['legal_host_arches'] = ('i386', 'i586', 'i686', 'x86_64') +config_opts['chroot_setup_cmd'] = 'install @buildsys-build' +config_opts['dist'] = 'rawhide' # only useful for --resultdir variable subst +config_opts['extra_chroot_dirs'] = [ '/run/lock', ] +config_opts['releasever'] = '30' + +config_opts['package_manager'] = 'dnf' + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= +install_weak_deps=0 +metadata_expire=0 +best=1 + +# repos + +[fedora] +name=fedora +metalink=https://mirrors.fedoraproject.org/metalink?repo=rawhide&arch=$basearch +gpgkey=file:///usr/share/distribution-gpg-keys/fedora/RPM-GPG-KEY-fedora-$releasever-primary file:///usr/share/distribution-gpg-keys/fedora/RPM-GPG-KEY-fedora-29-primary +gpgcheck=1 +skip_if_unavailable=False + +[local] +name=local +baseurl=https://kojipkgs.fedoraproject.org/repos/rawhide/latest/i386 +cost=2000 +enabled=0 +skip_if_unavailable=False + +[fedora-debuginfo] +name=Fedora Rawhide - i386 - Debug +metalink=https://mirrors.fedoraproject.org/metalink?repo=rawhide-debug&arch=$basearch +enabled=0 +skip_if_unavailable=False +""" diff --git a/roles/copr/backend/files/provision/files/mock/fedora-rawhide-ppc64le.cfg b/roles/copr/backend/files/provision/files/mock/fedora-rawhide-ppc64le.cfg new file mode 100644 index 0000000000..9661c7120a --- /dev/null +++ b/roles/copr/backend/files/provision/files/mock/fedora-rawhide-ppc64le.cfg @@ -0,0 +1,48 @@ +config_opts['root'] = 'fedora-rawhide-ppc64le' +config_opts['target_arch'] = 'ppc64le' +config_opts['legal_host_arches'] = ('ppc64le',) +config_opts['chroot_setup_cmd'] = 'install @buildsys-build' +config_opts['dist'] = 'rawhide' # only useful for --resultdir variable subst +config_opts['extra_chroot_dirs'] = [ '/run/lock', ] +config_opts['releasever'] = '30' + +config_opts['package_manager'] = 'dnf' + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=1 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= +install_weak_deps=0 +metadata_expire=0 +best=1 + +# repos + +[fedora] +name=fedora +metalink=https://mirrors.fedoraproject.org/metalink?repo=rawhide&arch=$basearch +gpgkey=file:///usr/share/distribution-gpg-keys/fedora/RPM-GPG-KEY-fedora-$releasever-primary file:///usr/share/distribution-gpg-keys/fedora/RPM-GPG-KEY-fedora-29-primary +gpgcheck=1 +skip_if_unavailable=False + +[local] +name=local +baseurl=https://kojipkgs.fedoraproject.org/repos/rawhide/latest/ppc64le/ +cost=2000 +enabled=0 +skip_if_unavailable=False + +[fedora-debuginfo] +name=Fedora Rawhide - ppc64le - Debug +metalink=https://mirrors.fedoraproject.org/metalink?repo=rawhide-debug&arch=$basearch +enabled=0 +skip_if_unavailable=False +""" diff --git a/roles/copr/backend/files/provision/files/mock/fedora-rawhide-x86_64.cfg b/roles/copr/backend/files/provision/files/mock/fedora-rawhide-x86_64.cfg new file mode 100644 index 0000000000..cd2edfe037 --- /dev/null +++ b/roles/copr/backend/files/provision/files/mock/fedora-rawhide-x86_64.cfg @@ -0,0 +1,48 @@ +config_opts['root'] = 'fedora-rawhide-x86_64' +config_opts['target_arch'] = 'x86_64' +config_opts['legal_host_arches'] = ('x86_64',) +config_opts['chroot_setup_cmd'] = 'install @buildsys-build' +config_opts['dist'] = 'rawhide' # only useful for --resultdir variable subst +config_opts['extra_chroot_dirs'] = [ '/run/lock', ] +config_opts['releasever'] = '30' + +config_opts['package_manager'] = 'dnf' + +config_opts['yum.conf'] = """ +[main] +keepcache=1 +debuglevel=2 +reposdir=/dev/null +logfile=/var/log/yum.log +retries=20 +obsoletes=1 +gpgcheck=0 +assumeyes=1 +syslog_ident=mock +syslog_device= +install_weak_deps=0 +metadata_expire=0 +best=1 + +# repos + +[fedora] +name=fedora +metalink=https://mirrors.fedoraproject.org/metalink?repo=rawhide&arch=$basearch +gpgkey=file:///usr/share/distribution-gpg-keys/fedora/RPM-GPG-KEY-fedora-$releasever-primary file:///usr/share/distribution-gpg-keys/fedora/RPM-GPG-KEY-fedora-29-primary +gpgcheck=1 +skip_if_unavailable=False + +[local] +name=local +baseurl=https://kojipkgs.fedoraproject.org/repos/rawhide/latest/x86_64/ +cost=2000 +enabled=0 +skip_if_unavailable=False + +[fedora-debuginfo] +name=Fedora Rawhide - x86_64 - Debug +metalink=https://mirrors.fedoraproject.org/metalink?repo=rawhide-debug&arch=$basearch +enabled=0 +skip_if_unavailable=False +""" diff --git a/roles/copr/base/files/forward b/roles/copr/base/files/forward index 2e1b36e41d..6026f0decc 100644 --- a/roles/copr/base/files/forward +++ b/roles/copr/base/files/forward @@ -3,3 +3,5 @@ kfenzi@redhat.com nb@fedoraproject.org sgallagh@redhat.com tcallawa@redhat.com +clime@redhat.com +jkadlcik@redhat.com diff --git a/roles/copr/base/files/forward_dev b/roles/copr/base/files/forward_dev index e25b03e22b..dd2c4e6759 100644 --- a/roles/copr/base/files/forward_dev +++ b/roles/copr/base/files/forward_dev @@ -1,2 +1,3 @@ msuchy+coprmachine@redhat.com -asamalik@redhat.com +clime@redhat.com +jkadlcik@redhat.com diff --git a/roles/copr/frontend/files/DigiCertCA.crt b/roles/copr/frontend-cloud/files/DigiCertCA.crt similarity index 100% rename from roles/copr/frontend/files/DigiCertCA.crt rename to roles/copr/frontend-cloud/files/DigiCertCA.crt diff --git a/roles/copr/frontend-cloud/files/banner-include.html b/roles/copr/frontend-cloud/files/banner-include.html new file mode 100644 index 0000000000..2b539819d1 --- /dev/null +++ b/roles/copr/frontend-cloud/files/banner-include.html @@ -0,0 +1,8 @@ +
+

+ Warning! This is a development server. +

+

+ Production instance: https://copr.fedoraproject.org/ +

+
diff --git a/roles/copr/frontend-cloud/files/crond b/roles/copr/frontend-cloud/files/crond new file mode 100644 index 0000000000..25483f778e --- /dev/null +++ b/roles/copr/frontend-cloud/files/crond @@ -0,0 +1,4 @@ +# Settings for the CRON daemon. +# CRONDARGS= : any extra command-line startup arguments for crond +CRONDARGS= +MAILTO=sysadmin-copr-members@fedoraproject.org diff --git a/roles/copr/frontend/files/httpd/welcome.conf b/roles/copr/frontend-cloud/files/httpd/welcome.conf similarity index 100% rename from roles/copr/frontend/files/httpd/welcome.conf rename to roles/copr/frontend-cloud/files/httpd/welcome.conf diff --git a/roles/copr/frontend/files/pg/pg_hba.conf b/roles/copr/frontend-cloud/files/pg/pg_hba.conf similarity index 100% rename from roles/copr/frontend/files/pg/pg_hba.conf rename to roles/copr/frontend-cloud/files/pg/pg_hba.conf diff --git a/roles/copr/frontend/files/robots.txt b/roles/copr/frontend-cloud/files/robots.txt similarity index 100% rename from roles/copr/frontend/files/robots.txt rename to roles/copr/frontend-cloud/files/robots.txt diff --git a/roles/copr/frontend/handlers/main.yml b/roles/copr/frontend-cloud/handlers/main.yml similarity index 100% rename from roles/copr/frontend/handlers/main.yml rename to roles/copr/frontend-cloud/handlers/main.yml diff --git a/roles/copr/frontend/meta/main.yml b/roles/copr/frontend-cloud/meta/main.yml similarity index 100% rename from roles/copr/frontend/meta/main.yml rename to roles/copr/frontend-cloud/meta/main.yml diff --git a/roles/copr/frontend/tasks/install_certs.yml b/roles/copr/frontend-cloud/tasks/install_certs.yml similarity index 100% rename from roles/copr/frontend/tasks/install_certs.yml rename to roles/copr/frontend-cloud/tasks/install_certs.yml diff --git a/roles/copr/frontend-cloud/tasks/main.yml b/roles/copr/frontend-cloud/tasks/main.yml new file mode 100644 index 0000000000..353495fabf --- /dev/null +++ b/roles/copr/frontend-cloud/tasks/main.yml @@ -0,0 +1,138 @@ +--- +- import_tasks: "mount_fs.yml" + +- command: "ls -dZ /var/lib/pgsql" + register: pgsql_ls + +- name: update selinux context for postgress db dir if it's wrong + command: "restorecon -vvRF /var/lib/pgsql" + when: pgsql_ls.stdout is defined and 'postgresql_db_t' not in pgsql_ls.stdout + +- name: install copr-frontend and copr-selinux + dnf: state=latest name={{ item }} + with_items: + - copr-frontend + - copr-selinux + tags: + - packages + + # we install python-alembic because https://bugzilla.redhat.com/show_bug.cgi?id=1536058 +- name: install additional pkgs for copr-frontend + dnf: state=present pkg={{ item }} + with_items: + - "bash-completion" + - "mod_ssl" + - redis + - pxz + - python3-alembic + tags: + - packages + +- name: install copr configs + template: src="copr.conf" dest=/etc/copr/copr.conf mode=600 + notify: + - reload httpd + tags: + - config + +- name: enable and start redis # TODO: .service in copr-backend should depend on redis + service: name=redis enabled=yes state=started + +- name: enable and start pagure-events + service: name=pagure-events enabled=yes state=started + +- name: copy apache files to conf.d + copy: src="httpd/{{ item }}" dest="/etc/httpd/conf.d/{{ item }}" + with_items: + - "welcome.conf" + tags: + - config + +- name: copy crond conf + copy: src="crond" dest="/etc/sysconfig/crond" + +- name: copy apache files to conf.d (templates) + template: src="httpd/{{ item }}" dest="/etc/httpd/conf.d/{{ item }}" + with_items: + - "coprs.conf" + tags: + - config + +# https://bugzilla.redhat.com/show_bug.cgi?id=1535689 +- name: Allow execmem for Apache + seboolean: + name: httpd_execmem + state: yes + persistent: yes + +- import_tasks: "psql_setup.yml" + +- name: upgrade db to head + command: alembic-3 upgrade head + become: yes + become_user: copr-fe + args: + chdir: /usr/share/copr/coprs_frontend/ + +- name: set up admins + command: ./manage.py alter_user --admin {{ item }} + become: yes + become_user: copr-fe + args: + chdir: /usr/share/copr/coprs_frontend/ + ignore_errors: yes + with_items: + - msuchy + - sgallagh + - spot + - nb + - kevin + +- name: install ssl certificates for production + import_tasks: "install_certs.yml" + when: not devel + tags: + - config + +- name: letsencrypt cert + include_role: name=certbot + when: devel + tags: + - config + +- name: Check that cert file exists + stat: + path: "/etc/letsencrypt/live/{{ copr_frontend_public_hostname }}/cert.pem" + register: stat_cert + +- name: Should admin run certbot? + fail: + msg: Please see roles/certbot/README step (2) and manually run certbot + when: + - stat_cert.stat.exists == False + - devel + +- name: install copr-frontend ssl vhost + template: src="httpd/coprs_ssl.conf.j2" dest="/etc/httpd/conf.d/coprs_ssl.conf" + tags: + - config + +- name: enable services + service: state=started enabled=yes name={{ item }} + with_items: + - httpd + +- name: set dev banner for dev instance + when: devel + copy: src=banner-include.html dest=/var/lib/copr/ + +- name: disallow robots on dev instance + when: devel + copy: src=robots.txt dest=/var/www/html/ + +- name: rebuild indexes + command: ./manage.py update_indexes + become: yes + become_user: copr-fe + args: + chdir: /usr/share/copr/coprs_frontend/ diff --git a/roles/copr/frontend/tasks/mount_fs.yml b/roles/copr/frontend-cloud/tasks/mount_fs.yml similarity index 100% rename from roles/copr/frontend/tasks/mount_fs.yml rename to roles/copr/frontend-cloud/tasks/mount_fs.yml diff --git a/roles/copr/frontend/tasks/psql_setup.yml b/roles/copr/frontend-cloud/tasks/psql_setup.yml similarity index 100% rename from roles/copr/frontend/tasks/psql_setup.yml rename to roles/copr/frontend-cloud/tasks/psql_setup.yml diff --git a/roles/copr/frontend-cloud/templates/copr.conf b/roles/copr/frontend-cloud/templates/copr.conf new file mode 100644 index 0000000000..b66f1514d1 --- /dev/null +++ b/roles/copr/frontend-cloud/templates/copr.conf @@ -0,0 +1,81 @@ +# Directory and files where is stored Copr database files +DATA_DIR = '/var/lib/copr/data' +DATABASE = '/var/lib/copr/data/copr.db' +OPENID_STORE = '/var/lib/copr/data/openid_store' +WHOOSHEE_DIR = '/var/lib/copr/data/whooshee' +WHOOSHEE_MIN_STRING_LEN = 2 +WHOOSHEE_WRITER_TIMEOUT = 10 + +SECRET_KEY = '{{ copr_secret_key }}' +BACKEND_PASSWORD = '{{ copr_backend_password }}' +BACKEND_BASE_URL = '{{ backend_base_url }}' + +# restrict access to a set of users +#USE_ALLOWED_USERS = False +#ALLOWED_USERS = ['bonnie', 'clyde'] + +SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://copr-fe:{{ copr_database_password }}@/coprdb' + +# Token length, defaults to 30 (max 255) +#API_TOKEN_LENGTH = 30 + +# Expiration of API token in days +#API_TOKEN_EXPIRATION = 180 + +# logging options +#SEND_LOGS_TO = ['root@localhost'] +#LOGGING_LEVEL = logging.ERROR + +DEBUG = False +SQLALCHEMY_ECHO = False + +CSRF_ENABLED = True +WTF_CSRF_ENABLED = True + +# send emails when user's perms change in project? +SEND_EMAILS = True + +PUBLIC_COPR_HOSTNAME = "{{ copr_frontend_public_hostname }}" + +LOG_FILENAME = "/var/log/copr-frontend/frontend.log" +LOG_DIR = "/var/log/copr-frontend/" + +# to accept stat events from logstash +INTRANET_IPS = {{ copr_backend_ips }} + +REPO_GPGCHECK = {% if devel %} 0 {% else %} 1 {% endif %} + +{% if env == 'staging' %} +PUBLIC_COPR_BASE_URL = "http://copr-fe-dev.cloud.fedoraproject.org" +{% else %} +PUBLIC_COPR_BASE_URL = "https://copr.fedorainfracloud.org" +{% endif %} + +{% if env == 'staging' %} +# Staging URLs for fedmenu +FEDMENU_URL = "https://apps.stg.fedoraproject.org/fedmenu/" +FEDMENU_DATA_URL = "https://apps.stg.fedoraproject.org/js/data.js" +{% else %} +# Production URLs for fedmenu +FEDMENU_URL = "https://apps.fedoraproject.org/fedmenu/" +FEDMENU_DATA_URL = "https://apps.fedoraproject.org/js/data.js" +{% endif %} + +# todo: check that ansible variable is used correctly +{% if env == 'staging' %} +ENFORCE_PROTOCOL_FOR_BACKEND_URL = "http" +ENFORCE_PROTOCOL_FOR_FRONTEND_URL = "http" +{% else %} +ENFORCE_PROTOCOL_FOR_BACKEND_URL = "https" +ENFORCE_PROTOCOL_FOR_FRONTEND_URL = "https" +{% endif %} + +DIST_GIT_URL="https://{{ dist_git_base_url }}/cgit" +DIST_GIT_CLONE_URL="https://{{ dist_git_base_url }}/git" +COPR_DIST_GIT_LOGS_URL = "https://{{ dist_git_base_url }}/per-task-logs" +MBS_URL = "http://localhost/module/1/module-builds/" + +# no need to filter cla_* groups, they are already filtered by fedora openid +BLACKLISTED_GROUPS = ['fedorabugs', 'packager', 'provenpackager'] + +DEFER_BUILD_SECONDS = 300 diff --git a/roles/copr/frontend/files/httpd/coprs.conf b/roles/copr/frontend-cloud/templates/httpd/coprs.conf similarity index 50% rename from roles/copr/frontend/files/httpd/coprs.conf rename to roles/copr/frontend-cloud/templates/httpd/coprs.conf index 054d507e06..453144a8ac 100644 --- a/roles/copr/frontend/files/httpd/coprs.conf +++ b/roles/copr/frontend-cloud/templates/httpd/coprs.conf @@ -3,17 +3,18 @@ LoadModule wsgi_module modules/mod_wsgi.so WSGISocketPrefix /var/run/wsgi Alias /robots.txt /var/www/html/robots.txt +WSGIDaemonProcess 127.0.0.1 user=copr-fe group=copr-fe threads=15 display-name=other maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess api user=copr-fe group=copr-fe threads=15 display-name=api maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess backend user=copr-fe group=copr-fe threads=15 display-name=backend maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess stats user=copr-fe group=copr-fe threads=15 display-name=stats maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess tmp user=copr-fe group=copr-fe threads=15 display-name=tmp maximum-requests=8000 graceful-timeout=20 +WSGIScriptAlias / /usr/share/copr/coprs_frontend/application + ServerName copr.fedorainfracloud.org ServerAlias copr-fe.cloud.fedoraproject.org WSGIPassAuthorization On - WSGIDaemonProcess 127.0.0.1 user=copr-fe group=copr-fe threads=15 display-name=other maximum-requests=8000 graceful-timeout=20 - WSGIDaemonProcess api user=copr-fe group=copr-fe threads=15 display-name=api maximum-requests=8000 graceful-timeout=20 - WSGIDaemonProcess backend user=copr-fe group=copr-fe threads=15 display-name=backend maximum-requests=8000 graceful-timeout=20 - WSGIDaemonProcess stats user=copr-fe group=copr-fe threads=15 display-name=stats maximum-requests=8000 graceful-timeout=20 - WSGIDaemonProcess tmp user=copr-fe group=copr-fe threads=15 display-name=tmp maximum-requests=8000 graceful-timeout=20 - WSGIScriptAlias / /usr/share/copr/coprs_frontend/application WSGIProcessGroup 127.0.0.1 @@ -27,6 +28,14 @@ Alias /robots.txt /var/www/html/robots.txt +{% if devel %} + + RewriteEngine on + RewriteRule ^/\.well-known/(.*) /srv/web/acme-challenge/.well-known/$1 [L] + RewriteRule "^/?(.*)" "https://%{HTTP_HOST}/$1" [L,R=301,NE] + +{% endif %} + ExtendedStatus On diff --git a/roles/copr/frontend/templates/httpd/coprs_ssl.conf.j2 b/roles/copr/frontend-cloud/templates/httpd/coprs_ssl.conf.j2 similarity index 73% rename from roles/copr/frontend/templates/httpd/coprs_ssl.conf.j2 rename to roles/copr/frontend-cloud/templates/httpd/coprs_ssl.conf.j2 index 5d4612efcc..846d8d85dd 100644 --- a/roles/copr/frontend/templates/httpd/coprs_ssl.conf.j2 +++ b/roles/copr/frontend-cloud/templates/httpd/coprs_ssl.conf.j2 @@ -1,3 +1,6 @@ +Listen 443 https + + SSLEngine on SSLProtocol {{ ssl_protocols }} @@ -6,11 +9,17 @@ SSLHonorCipherOrder on Header always add Strict-Transport-Security "max-age=31536000; preload" + {% if not devel %} SSLCertificateFile /etc/pki/tls/certs/copr.fedorainfracloud.org.crt SSLCertificateKeyFile /etc/pki/tls/private/copr.fedorainfracloud.org.key SSLCertificateChainFile /etc/pki/tls/certs/copr.fedorainfracloud.org.intermediate.crt + {% else %} + SSLCertificateFile /etc/letsencrypt/live/{{ copr_frontend_public_hostname }}/cert.pem + SSLCertificateKeyFile /etc/letsencrypt/live/{{ copr_frontend_public_hostname }}/privkey.pem + SSLCertificateChainFile /etc/letsencrypt/live/{{ copr_frontend_public_hostname }}/fullchain.pem + {% endif %} - ServerName copr.fedorainfracloud.org + ServerName {{ copr_frontend_public_hostname }} WSGIPassAuthorization On WSGIScriptAlias / /usr/share/copr/coprs_frontend/application @@ -54,13 +63,18 @@ SSLHonorCipherOrder on Header always add Strict-Transport-Security "max-age=31536000; preload" + {% if not devel %} SSLCertificateFile /etc/pki/tls/certs/copr.fedorainfracloud.org.crt SSLCertificateKeyFile /etc/pki/tls/private/copr.fedorainfracloud.org.key SSLCertificateChainFile /etc/pki/tls/certs/copr.fedorainfracloud.org.intermediate.crt + {% else %} + SSLCertificateFile /etc/letsencrypt/live/{{ copr_frontend_public_hostname }}/cert.pem + SSLCertificateKeyFile /etc/letsencrypt/live/{{ copr_frontend_public_hostname }}/privkey.pem + SSLCertificateChainFile /etc/letsencrypt/live/{{ copr_frontend_public_hostname }}/fullchain.pem + {% endif %} + {% if not devel %} ServerAlias copr.fedoraproject.org - Redirect 302 / https://copr.fedorainfracloud.org/ + {% endif %} - - diff --git a/roles/copr/frontend/files/banner-include.html b/roles/copr/frontend/files/banner-include.html index 2b539819d1..78ada661ed 100644 --- a/roles/copr/frontend/files/banner-include.html +++ b/roles/copr/frontend/files/banner-include.html @@ -1,6 +1,6 @@

- Warning! This is a development server. + Warning! This is a staging server.

Production instance: https://copr.fedoraproject.org/ diff --git a/roles/copr/frontend/tasks/main.yml b/roles/copr/frontend/tasks/main.yml index 250f8215ca..c93fb6951e 100644 --- a/roles/copr/frontend/tasks/main.yml +++ b/roles/copr/frontend/tasks/main.yml @@ -1,55 +1,36 @@ --- -- import_tasks: "mount_fs.yml" - -- command: "ls -dZ /var/lib/pgsql" - register: pgsql_ls - -- name: update selinux context for postgress db dir if it's wrong - command: "restorecon -vvRF /var/lib/pgsql" - when: pgsql_ls.stdout is defined and 'postgresql_db_t' not in pgsql_ls.stdout - -- name: install copr-frontend and copr-selinux - dnf: state=latest name={{ item }} +- name: install copr-frontend packages + package: name={{ item }} state=present with_items: - copr-frontend - copr-selinux + # workaround for https://bugzilla.redhat.com/show_bug.cgi?id=1622513 + - python3-requests tags: + - copr - packages - # we install python-alembic because https://bugzilla.redhat.com/show_bug.cgi?id=1536058 -- name: install additional pkgs for copr-frontend - dnf: state=present pkg={{ item }} - with_items: - - "bash-completion" - - "mod_ssl" - - redis - - pxz - - python3-alembic - tags: - - packages - -- name: install a newer version of xstatic-jquery-ui-common - command: dnf install -y https://kojipkgs.fedoraproject.org//packages/python-XStatic-jquery-ui/1.12.0.1/2.fc26/noarch/xstatic-jquery-ui-common-1.12.0.1-2.fc26.noarch.rpm - - name: install copr configs - template: src="copr.conf" dest=/etc/copr/copr.conf mode=600 + template: src=copr.conf dest=/etc/copr/copr.conf mode=600 notify: - reload httpd tags: + - copr - config -- name: enable and start redis # TODO: .service in copr-backend should depend on redis - service: name=redis enabled=yes state=started - -- name: enable and start pagure-events - service: name=pagure-events enabled=yes state=started - -- name: copy apache files to conf.d - copy: src="httpd/{{ item }}" dest="/etc/httpd/conf.d/{{ item }}" - with_items: - - "welcome.conf" - - "coprs.conf" +- name: copy apache files to conf.d (templates) + template: src=httpd.conf dest=/etc/httpd/conf.d/coprs.conf + notify: + - reload httpd tags: + - copr + - config + +- name: set staging banner for staging instance + when: env == 'staging' + copy: src=banner-include.html dest=/var/lib/copr/ + tags: + - copr - config # https://bugzilla.redhat.com/show_bug.cgi?id=1535689 @@ -58,21 +39,22 @@ name: httpd_execmem state: yes persistent: yes - -- name: install copr-frontend ssl vhost for production - template: src="httpd/coprs_ssl.conf.j2" dest="/etc/httpd/conf.d/copr_ssl.conf" - when: not devel tags: - - config + - copr + - selinux -- import_tasks: "psql_setup.yml" +- name: enable and start httpd + service: name=httpd state=started enabled=yes + tags: + - copr + - service -- name: upgrade db to head - command: alembic-3 upgrade head - become: yes - become_user: copr-fe - args: - chdir: /usr/share/copr/coprs_frontend/ +- name: enable and start pagure-events + service: name=pagure-events enabled=yes state=started + when: not 'pagure-events.service is missing in latest copr-frontend rpm in f28 repos' + tags: + - copr + - service - name: set up admins command: ./manage.py alter_user --admin {{ item }} @@ -80,7 +62,7 @@ become_user: copr-fe args: chdir: /usr/share/copr/coprs_frontend/ - ignore_errors: yes + when: False with_items: - msuchy - sgallagh @@ -94,22 +76,4 @@ become_user: copr-fe args: chdir: /usr/share/copr/coprs_frontend/ - -- name: install ssl certificates for production - import_tasks: "install_certs.yml" - when: not devel - tags: - - config - -- name: enable services - service: state=started enabled=yes name={{ item }} - with_items: - - httpd - -- name: set dev banner for dev instance - when: devel - copy: src=banner-include.html dest=/var/lib/copr/ - -- name: disallow robots on dev instance - when: devel - copy: src=robots.txt dest=/var/www/html/ + when: False diff --git a/roles/copr/frontend/templates/copr.conf b/roles/copr/frontend/templates/copr.conf index b66f1514d1..2207ce99db 100644 --- a/roles/copr/frontend/templates/copr.conf +++ b/roles/copr/frontend/templates/copr.conf @@ -14,7 +14,7 @@ BACKEND_BASE_URL = '{{ backend_base_url }}' #USE_ALLOWED_USERS = False #ALLOWED_USERS = ['bonnie', 'clyde'] -SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://copr-fe:{{ copr_database_password }}@/coprdb' +SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://copruser:{{ copruser_db_password }}@db01/copr' # Token length, defaults to 30 (max 255) #API_TOKEN_LENGTH = 30 @@ -43,32 +43,16 @@ LOG_DIR = "/var/log/copr-frontend/" # to accept stat events from logstash INTRANET_IPS = {{ copr_backend_ips }} -REPO_GPGCHECK = {% if devel %} 0 {% else %} 1 {% endif %} +REPO_GPGCHECK = 1 -{% if env == 'staging' %} -PUBLIC_COPR_BASE_URL = "http://copr-fe-dev.cloud.fedoraproject.org" -{% else %} -PUBLIC_COPR_BASE_URL = "https://copr.fedorainfracloud.org" -{% endif %} +PUBLIC_COPR_BASE_URL = "https://{{ copr_frontend_public_hostname }}" -{% if env == 'staging' %} -# Staging URLs for fedmenu -FEDMENU_URL = "https://apps.stg.fedoraproject.org/fedmenu/" -FEDMENU_DATA_URL = "https://apps.stg.fedoraproject.org/js/data.js" -{% else %} -# Production URLs for fedmenu -FEDMENU_URL = "https://apps.fedoraproject.org/fedmenu/" -FEDMENU_DATA_URL = "https://apps.fedoraproject.org/js/data.js" -{% endif %} +# URLs for fedmenu +FEDMENU_URL = "https://apps{{ env_suffix }}.fedoraproject.org/fedmenu/" +FEDMENU_DATA_URL = "https://apps{{ env_suffix }}.fedoraproject.org/js/data.js" -# todo: check that ansible variable is used correctly -{% if env == 'staging' %} -ENFORCE_PROTOCOL_FOR_BACKEND_URL = "http" -ENFORCE_PROTOCOL_FOR_FRONTEND_URL = "http" -{% else %} ENFORCE_PROTOCOL_FOR_BACKEND_URL = "https" ENFORCE_PROTOCOL_FOR_FRONTEND_URL = "https" -{% endif %} DIST_GIT_URL="https://{{ dist_git_base_url }}/cgit" DIST_GIT_CLONE_URL="https://{{ dist_git_base_url }}/git" diff --git a/roles/copr/frontend/templates/httpd.conf b/roles/copr/frontend/templates/httpd.conf new file mode 100644 index 0000000000..eaed6e248e --- /dev/null +++ b/roles/copr/frontend/templates/httpd.conf @@ -0,0 +1,27 @@ +WSGIDaemonProcess 127.0.0.1 user=copr-fe group=copr-fe threads=15 display-name=other maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess api user=copr-fe group=copr-fe threads=15 display-name=api maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess backend user=copr-fe group=copr-fe threads=15 display-name=backend maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess stats user=copr-fe group=copr-fe threads=15 display-name=stats maximum-requests=8000 graceful-timeout=20 +WSGIDaemonProcess tmp user=copr-fe group=copr-fe threads=15 display-name=tmp maximum-requests=8000 graceful-timeout=20 +WSGIScriptAlias / /usr/share/copr/coprs_frontend/application + +ServerName {{ inventory_hostname }} +WSGIPassAuthorization On + + + WSGIProcessGroup 127.0.0.1 + + + + WSGIApplicationGroup %{GLOBAL} + Require all granted + + + + StartServers 8 + MinSpareServers 8 + MaxSpareServers 20 + MaxClients 50 + MaxRequestsPerChild 10000 + + diff --git a/roles/copr/keygen/selinux/copr_rules.mod b/roles/copr/keygen/selinux/copr_rules.mod index b65038ad84..4fcd337ab8 100644 Binary files a/roles/copr/keygen/selinux/copr_rules.mod and b/roles/copr/keygen/selinux/copr_rules.mod differ diff --git a/roles/copr/keygen/selinux/copr_rules.pp b/roles/copr/keygen/selinux/copr_rules.pp index 0642cc91e6..0c09d8d38a 100644 Binary files a/roles/copr/keygen/selinux/copr_rules.pp and b/roles/copr/keygen/selinux/copr_rules.pp differ diff --git a/roles/copr/keygen/selinux/copr_rules.te b/roles/copr/keygen/selinux/copr_rules.te index 42d15bbd61..46116ead3b 100644 --- a/roles/copr/keygen/selinux/copr_rules.te +++ b/roles/copr/keygen/selinux/copr_rules.te @@ -6,7 +6,8 @@ require { class sock_file getattr; class sock_file unlink; class sock_file write; + class sock_file create; } #============= httpd_t ============== -allow httpd_t httpd_var_lib_t:sock_file { getattr unlink write }; +allow httpd_t httpd_var_lib_t:sock_file { getattr unlink write create }; diff --git a/roles/dhcp_server/files/dhcpd.conf.cloud-noc01.cloud.fedoraproject.org b/roles/dhcp_server/files/dhcpd.conf.cloud-noc01.cloud.fedoraproject.org index 16511d0b67..4a2789e139 100644 --- a/roles/dhcp_server/files/dhcpd.conf.cloud-noc01.cloud.fedoraproject.org +++ b/roles/dhcp_server/files/dhcpd.conf.cloud-noc01.cloud.fedoraproject.org @@ -31,6 +31,30 @@ shared-network cloud { option routers 172.23.1.254; + host rhev01 { + hardware ethernet 48:4D:7E:05:4E:F4; + fixed-address 172.23.1.5; + option host-name "rhev01.fedoraproject.org"; + next-server 172.23.1.1; + filename "pxelinux.0"; + } + + host rhev02 { + hardware ethernet 48:4D:7E:05:4F:E2; + fixed-address 172.23.1.6; + option host-name "rhev02.fedoraproject.org"; + next-server 172.23.1.1; + filename "pxelinux.0"; + } + + host rhev03 { + hardware ethernet 48:4D:7E:05:4F:5C; + fixed-address 172.23.1.7; + option host-name "rhev03.fedoraproject.org"; + next-server 172.23.1.1; + filename "pxelinux.0"; + } + # Transitional host arm03-packager00-mgmt { hardware ethernet fc:2f:40:1b:64:4e; diff --git a/roles/dhcp_server/files/dhcpd.conf.dhcp01.phx2.fedoraproject.org b/roles/dhcp_server/files/dhcpd.conf.dhcp01.phx2.fedoraproject.org index b26b1e21f9..c05d1608d1 100644 --- a/roles/dhcp_server/files/dhcpd.conf.dhcp01.phx2.fedoraproject.org +++ b/roles/dhcp_server/files/dhcpd.conf.dhcp01.phx2.fedoraproject.org @@ -36,23 +36,6 @@ subnet 10.5.125.0 netmask 255.255.255.0 { filename "pxelinux.0"; } - host bkernel01 { - hardware ethernet 6c:ae:8b:1e:fd:82; - fixed-address 10.5.125.51; - option host-name "bkernel01"; - next-server 10.5.126.41; - filename "pxelinux.0"; - } - - host bkernel02 { - hardware ethernet 6c:ae:8b:1e:fd:6a; - fixed-address 10.5.125.52; - option host-name "bkernel02"; - next-server 10.5.126.41; - filename "pxelinux.0"; - } - - host bkernel03 { hardware ethernet D0:94:66:45:8C:0F; fixed-address 10.5.125.81; diff --git a/roles/dhcp_server/files/dhcpd.conf.noc01.phx2.fedoraproject.org b/roles/dhcp_server/files/dhcpd.conf.noc01.phx2.fedoraproject.org index 83800a4f99..c875b6d399 100644 --- a/roles/dhcp_server/files/dhcpd.conf.noc01.phx2.fedoraproject.org +++ b/roles/dhcp_server/files/dhcpd.conf.noc01.phx2.fedoraproject.org @@ -41,7 +41,7 @@ subnet 10.5.125.0 netmask 255.255.255.0 { fixed-address 10.5.125.83; option host-name "sign-vault05"; next-server 10.5.126.41; - filename "pxelinux.0"; + filename "uefi/grubx64.efi"; } host sign-vault06 { @@ -49,32 +49,15 @@ subnet 10.5.125.0 netmask 255.255.255.0 { fixed-address 10.5.125.84; option host-name "sign-vault06"; next-server 10.5.126.41; - filename "pxelinux.0"; + filename "uefi/grubx64.efi"; } - host bkernel01 { - hardware ethernet 6c:ae:8b:1e:fd:82; - fixed-address 10.5.125.51; - option host-name "bkernel01"; - next-server 10.5.126.41; - filename "pxelinux.0"; - } - - host bkernel02 { - hardware ethernet 6c:ae:8b:1e:fd:6a; - fixed-address 10.5.125.52; - option host-name "bkernel02"; - next-server 10.5.126.41; - filename "pxelinux.0"; - } - - host bkernel03 { hardware ethernet D0:94:66:45:8C:0F; fixed-address 10.5.125.81; option host-name "bkernel03"; next-server 10.5.126.41; - filename "pxelinux.0"; + filename "uefi/grubx64.efi"; } host bkernel04 { @@ -82,7 +65,7 @@ subnet 10.5.125.0 netmask 255.255.255.0 { fixed-address 10.5.125.82; option host-name "bkernel04"; next-server 10.5.126.41; - filename "pxelinux.0"; + filename "uefi/grubx64.efi"; } host bvirthost01 { @@ -335,8 +318,18 @@ subnet 10.5.126.0 netmask 255.255.255.0 { fixed-address 10.5.126.142; next-server 10.5.126.41; option host-name "virthost02"; + filename "pxelinux.0"; } + host virthost03 { + hardware ethernet f0:1f:af:e1:d9:d8; + fixed-address 10.5.126.143; + next-server 10.5.126.41; + option host-name "virthost03"; + filename "uefi/grubx64.efi"; + } + + host virthost06 { hardware ethernet 18:66:da:f7:7a:58; fixed-address 10.5.126.146; diff --git a/roles/distgit/pagure/templates/pagure.cfg b/roles/distgit/pagure/templates/pagure.cfg index 32dd8dccd3..d0f34b4dac 100644 --- a/roles/distgit/pagure/templates/pagure.cfg +++ b/roles/distgit/pagure/templates/pagure.cfg @@ -266,6 +266,9 @@ ALLOW_DELETE_BRANCH = False ALLOWED_PREFIX = ['rpms', 'modules', 'container', 'tests'] EXCLUDE_GROUP_INDEX = ['packager'] EMAIL_ON_WATCHCOMMITS = False +PRIVATE_PROJECTS = False +FEDMSG_NOTIFICATIONS = True +PR_TARGET_MATCHING_BRANCH = True DISABLED_PLUGINS = ['IRC', 'Pagure tickets', 'Read the Doc', 'Fedmsg'] diff --git a/roles/distgit/tasks/main.yml b/roles/distgit/tasks/main.yml index 344ca5d147..1247adaf04 100644 --- a/roles/distgit/tasks/main.yml +++ b/roles/distgit/tasks/main.yml @@ -347,7 +347,6 @@ notify: - reload httpd - # -- Lookaside Cache ------------------------------------- # This is the annex to Dist Git, where we host source tarballs. - name: install the Lookaside Cache httpd configs diff --git a/roles/distgit/templates/lookaside-upload.conf b/roles/distgit/templates/lookaside-upload.conf index 62cd125d7b..20aeb316e4 100644 --- a/roles/distgit/templates/lookaside-upload.conf +++ b/roles/distgit/templates/lookaside-upload.conf @@ -39,7 +39,7 @@ Alias /robots.txt /var/www/robots-src.txt RewriteEngine on - RewriteRule "^/.well-known/acme-challenge/(.*)$" "http://src{{ env_suffix }}.fedoraproject.org/$1" + RewriteRule "^/.well-known/acme-challenge/(.*)$" "http://src{{ env_suffix }}.fedoraproject.org/.well-known/acme-challenge/$1" RewriteRule "^/(.*)$" "https://src{{ env_suffix }}.fedoraproject.org/$1" RewriteRule "^/login/$" "https://src{{ env_suffix }}.fedoraproject.org/login/" @@ -54,10 +54,9 @@ Alias /robots.txt /var/www/robots-src.txt SSLEngine on - SSLCertificateFile conf/pkgs.fedoraproject.org_key_and_cert.pem - SSLCertificateKeyFile conf/pkgs.fedoraproject.org_key_and_cert.pem - SSLCACertificateFile conf/cacert.pem - SSLCARevocationFile /etc/pki/tls/crl.pem + SSLCertificateFile /etc/pki/tls/certs/pkgs{{ env_suffix }}.fedoraproject.org.cert + SSLCertificateKeyFile /etc/pki/tls/private/pkgs{{ env_suffix }}.fedoraproject.org.key + SSLCertificateChainFile /etc/pki/tls/certs/pkgs{{ env_suffix }}.fedoraproject.org.intermediate.cert SSLProtocol {{ ssl_protocols }} SSLCipherSuite {{ ssl_ciphers }} diff --git a/roles/easyfix/gather/templates/gather_easyfix.py b/roles/easyfix/gather/templates/gather_easyfix.py index d0074282a8..4ebd4c9b7b 100755 --- a/roles/easyfix/gather/templates/gather_easyfix.py +++ b/roles/easyfix/gather/templates/gather_easyfix.py @@ -247,6 +247,27 @@ def main(): project.name, ticket['id']) ticketobj.status = ticket['status'] tickets.append(ticketobj) + elif project.name.startswith('gitlab.com:'): + # https://docs.gitlab.com/ee/api/issues.html#list-project-issues + project.name = project.name.split('gitlab.com:')[1] + project.url = 'https://gitlab.com/%s/' % (project.name) + project.site = 'gitlab.com' + url = 'https://gitlab.com/api/v4/projects/%s/issues' \ + '?state=opened&labels=%s' % (urllib2.quote(project.name, + safe=''), + project.tag) + stream = urllib2.urlopen(url) + output = stream.read() + jsonobj = json.loads(output) + if jsonobj: + for ticket in jsonobj: + ticket_num = ticket_num + 1 + ticketobj = Ticket() + ticketobj.id = ticket['id'] + ticketobj.title = ticket['title'] + ticketobj.url = ticket['web_url'] + ticketobj.status = ticket['state'] + tickets.append(ticketobj) project.tickets = tickets bzbugs = gather_bugzilla_easyfix() diff --git a/roles/fas_client/files/aliases.template b/roles/fas_client/files/aliases.template index 045b2e52d2..626bcb65f5 100644 --- a/roles/fas_client/files/aliases.template +++ b/roles/fas_client/files/aliases.template @@ -166,7 +166,7 @@ security: security-private@lists.fedoraproject.org secalert: security-private@lists.fedoraproject.org # Infrastructure security officer -infra-security: puiterwijk +infra-security: puiterwijk,kevin,smooge,codeblock webmaster: websites@lists.fedoraproject.org logo: rlerch@redhat.com,duffy@redhat.com diff --git a/roles/fas_client/tasks/main.yml b/roles/fas_client/tasks/main.yml index 22836c1b9b..ce67889b45 100644 --- a/roles/fas_client/tasks/main.yml +++ b/roles/fas_client/tasks/main.yml @@ -8,7 +8,7 @@ # fas-clients is in the infrastructure repo. # nss_db is needed to store user/group info. # -- name: install package needed for fas-client (yum) +- name: install package needed for fas-client package: state=present name={{ item }} with_items: - fas-clients diff --git a/roles/fedmsg/irc/templates/ircbot.py b/roles/fedmsg/irc/templates/ircbot.py index a89c80637b..669548421d 100644 --- a/roles/fedmsg/irc/templates/ircbot.py +++ b/roles/fedmsg/irc/templates/ircbot.py @@ -109,9 +109,9 @@ config = dict( make_terse=True, {% if env == 'staging' %} - nickname='commopsbot-s', + nickname='commops-bot-s', {% else %} - nickname='commopsbot', + nickname='commops-bot', {% endif %} channel='fedora-commops', filters=dict( @@ -128,9 +128,9 @@ config = dict( make_terse=True, {% if env == 'staging' %} - nickname='commopswatch-s', + nickname='commops-watch-s', {% else %} - nickname='commopswatch', + nickname='commops-watch', {% endif %} channel='fedora-commops', filters=dict( @@ -140,7 +140,7 @@ config = dict( body=['^((?!fedora-commops).)*$'], ), ), - # A third one! for that commops crew that watches for the admin user to post on planet + # A third one to listen for new Community Blog posts dict( network='chat.freenode.net', port=6667, @@ -148,9 +148,9 @@ config = dict( make_terse=True, {% if env == 'staging' %} - nickname='commopsplanet-s', + nickname='fm-commblog-s', {% else %} - nickname='commopslanet', + nickname='fm-commblog', {% endif %} channel='fedora-commops', filters=dict( diff --git a/roles/fedora-web/candidate-registry/tasks/main.yml b/roles/fedora-web/candidate-registry/tasks/main.yml index 26616cc8a0..60621934cc 100644 --- a/roles/fedora-web/candidate-registry/tasks/main.yml +++ b/roles/fedora-web/candidate-registry/tasks/main.yml @@ -1,22 +1,3 @@ -- name: Copy over the Fedora Server CA cert - copy: src="{{ private }}/files/fedora-ca.cert" dest=/etc/pki/httpd/fedora-server-ca.cert - owner=root group=root mode=0644 - notify: - - reload proxyhttpd - tags: - - fedora-web - - fedora-web/candidate-registry - -- name: Copy over the registry CA - copy: src="{{private}}/files/docker-registry/{{env}}/docker-registry-ca.pem" - dest="/etc/pki/httpd/registry-ca-{{env}}.cert" - owner=root group=root mode=0644 - notify: - - reload proxyhttpd - tags: - - fedora-web - - fedora-web/candidate-registry - - name: Copy over the registry passwd copy: src="{{private}}/files/docker-registry/{{env}}/candidate-htpasswd" dest=/etc/httpd/conf.d/candidate-registry.fedoraproject.org/passwd owner=root group=root mode=0644 diff --git a/roles/fedora-web/registry/files/passwd-production b/roles/fedora-web/registry/files/passwd-production index acc4e47062..edba1bff2e 100644 --- a/roles/fedora-web/registry/files/passwd-production +++ b/roles/fedora-web/registry/files/passwd-production @@ -1 +1 @@ -/C=US/ST=North Carolina/O=Fedora Project/OU=Fedora Builders/CN=containerstable/emailAddress=buildsys@fedoraproject.org:xxj31ZMTZzkVA +/CN=containerstable:xxj31ZMTZzkVA diff --git a/roles/fedora-web/registry/tasks/main.yml b/roles/fedora-web/registry/tasks/main.yml index 1476b1961a..cab50ae756 100644 --- a/roles/fedora-web/registry/tasks/main.yml +++ b/roles/fedora-web/registry/tasks/main.yml @@ -36,7 +36,7 @@ - fedora-web/registry - name: Copy over the registry CA - copy: src="{{private}}/files/docker-registry/{{env}}/docker-registry-ca.pem" + copy: src="{{private}}/files/docker-registry/{{env}}/pki/ca.crt" dest="/etc/pki/httpd/registry-ca-{{env}}.cert" owner=root group=root mode=0644 notify: diff --git a/roles/gluster/consolidated/tasks/main.yml b/roles/gluster/consolidated/tasks/main.yml index b35c9ec109..584911efcd 100644 --- a/roles/gluster/consolidated/tasks/main.yml +++ b/roles/gluster/consolidated/tasks/main.yml @@ -15,12 +15,14 @@ command: gluster peer probe {{ item }} with_items: '{{groups[gluster_server_group]}}' ignore_errors: true + changed_when: false tags: - gluster - name: Servers discover each other, pass two. command: gluster peer probe {{ item }} with_items: '{{groups[gluster_server_group]}}' + changed_when: false ignore_errors: true tags: - gluster diff --git a/roles/gnome_backups/files/backup.sh b/roles/gnome_backups/files/backup.sh index 94381b64cd..d41ebd3142 100644 --- a/roles/gnome_backups/files/backup.sh +++ b/roles/gnome_backups/files/backup.sh @@ -9,7 +9,6 @@ MACHINES='signal.gnome.org webapps2.gnome.org blogs.gnome.org palette.gnome.org - git.gnome.org webapps.gnome.org cloud.gnome.org bastion.gnome.org @@ -31,7 +30,8 @@ MACHINES='signal.gnome.org scale.gnome.org sdkbuilder.gnome.org webapps3.gnome.org - gitlab.gnome.org' + gitlab.gnome.org + oscp-master01.gnome.org' BACKUP_DIR='/gnome_backups' diff --git a/roles/gnome_backups/files/ssh_config b/roles/gnome_backups/files/ssh_config index e6ed7f9d59..630a6521c1 100644 --- a/roles/gnome_backups/files/ssh_config +++ b/roles/gnome_backups/files/ssh_config @@ -3,6 +3,11 @@ Host puppetmaster01.gnome.org cloud.gnome.org webapps3.gnome.org IdentityFile /usr/local/etc/gnome_backup_id.rsa ProxyCommand ssh -W %h:%p bastion.gnome.org -F /usr/local/etc/gnome_ssh_config +Host oscp-master01.gnome.org + User root + IdentityFile /usr/local/etc/gnome_backup_id.rsa + ProxyCommand ssh -W %h:%p gesture.gnome.org -F /usr/local/etc/gnome_ssh_config + Host *.gnome.org pentagon.gimp.org User root IdentityFile /usr/local/etc/gnome_backup_id.rsa diff --git a/roles/gnome_backups/tasks/main.yml b/roles/gnome_backups/tasks/main.yml index 39d17d1ee7..db309eee79 100644 --- a/roles/gnome_backups/tasks/main.yml +++ b/roles/gnome_backups/tasks/main.yml @@ -31,7 +31,7 @@ - view.gnome.org - puppetmaster01.gnome.org - palette.gnome.org - - git.gnome.org + - oscp-master01.gnome.org - webapps.gnome.org - socket.gnome.org - bugzilla.gnome.org diff --git a/roles/haproxy/templates/haproxy.cfg b/roles/haproxy/templates/haproxy.cfg index 839862f201..ac866d03d0 100644 --- a/roles/haproxy/templates/haproxy.cfg +++ b/roles/haproxy/templates/haproxy.cfg @@ -366,20 +366,15 @@ backend osbs-backend balance hdr(appserver) server osbs-master01 osbs-master01:8443 check inter 10s rise 1 fall 2 check ssl verify none -# This is silly, but basically, stg has registry01/02, prod has registry02/03 -frontend docker-registry-frontend +frontend oci-registry-frontend bind 0.0.0.0:10048 - default_backend docker-registry-backend + default_backend oci-registry-backend -backend docker-registry-backend +backend oci-registry-backend balance hdr(appserver) -{% if env == "staging" %} - server docker-registry01 docker-registry01:5000 check inter 10s rise 1 fall 2 -{% endif %} - server docker-registry02 docker-registry02:5000 check inter 10s rise 1 fall 2 -{% if env == "production" %} - server docker-registry03 docker-registry03:5000 check inter 10s rise 1 fall 2 -{% endif %} + server oci-registry01 oci-registry01:5000 check inter 10s rise 1 fall 2 + server oci-registry02 oci-registry02:5000 check inter 10s rise 1 fall 2 + {% if env == "staging" %} frontend retrace-frontend @@ -445,13 +440,13 @@ backend krb5-backend # server ipa02 ipa02:88 weight 1 maxconn 16384 {% endif %} -frontend docker-candidate-registry-frontend +frontend oci-candidate-registry-frontend bind 0.0.0.0:10054 - default_backend docker-candidate-registry-backend + default_backend oci-candidate-registry-backend -backend docker-candidate-registry-backend +backend oci-candidate-registry-backend balance hdr(appserver) - server docker-candidate-registry01 docker-candidate-registry01:5000 check inter 10s rise 1 fall 2 + server oci-candidate-registry01 oci-candidate-registry01:5000 check inter 10s rise 1 fall 2 frontend modernpaste-frontend bind 0.0.0.0:10055 @@ -596,6 +591,23 @@ backend freshmaker-backend server freshmaker-frontend01 freshmaker-frontend01:80 check inter 20s rise 2 fall 3 option httpchk GET /api/1/builds/ +frontend copr-frontend + bind 0.0.0.0:10070 + default_backend copr-backend + +backend copr-backend + balance hdr(appserver) +{% if env == "production" %} + server copr-frontend01 copr-frontend01:80 check inter 10s rise 1 fall 2 + server copr-frontend02 copr-frontend02:80 check inter 10s rise 1 fall 2 +{% else %} + # XXX mizdebsk 2018-08-28: kill this conditional after F29 beta freeze is litfed + cookie SERVERID insert indirect nocache + server copr-frontend01 copr-frontend01:80 check inter 10s rise 1 fall 2 cookie copr-frontend01 + server copr-frontend02 copr-frontend02:80 check inter 10s rise 1 fall 2 cookie copr-frontend02 +{% endif %} + option httpchk GET /api_3/ + # Apache doesn't handle the initial connection here like the other proxy # entries. This proxy also doesn't use the http mode like the others. # stunnel should be sitting on port 9939 (public) and redirecting diff --git a/roles/hosts/files/blockerbugs01.stg.phx2.fedoraproject.org-hosts b/roles/hosts/files/blockerbugs01.stg.phx2.fedoraproject.org-hosts new file mode 100644 index 0000000000..c5f9bb6fac --- /dev/null +++ b/roles/hosts/files/blockerbugs01.stg.phx2.fedoraproject.org-hosts @@ -0,0 +1,10 @@ +127.0.0.1 localhost localhost.localdomain localhost4 localhost4.localdomain4 +::1 localhost localhost.localdomain localhost6 localhost6.localdomain6 + +10.5.128.177 proxy01.phx2.fedoraproject.org proxy01 proxy02 proxy03 proxy04 proxy05 proxy06 proxy07 proxy08 proxy09 proxy10 proxy11 proxy12 proxy13 proxy14 fedoraproject.org admin.fedoraproject.org admin.stg.fedoraproject.org apps.fedoraproject.org apps.stg.fedoraproject.org +10.5.126.23 infrastructure.fedoraproject.org +10.5.128.175 pkgs.fedoraproject.org +10.5.128.148 memcached01.stg.phx2.fedoraproject.org memcached01 memcached02 memcached03 memcached04 +10.5.128.120 db01.stg.phx2.fedoraproject.org db-ask db-koji01 db-github2fedmsg tagger_dbdb-summershum nuancier_db db-notifs db-kerneltest db-pps +10.5.128.129 fas01.stg.phx2.fedoraproject.org fas01.phx2.fedoraproject.org fas1 fas2 fas01 fas02 fas03 fas-all +209.132.183.72 bugzilla.redhat.com partner-bugzilla.redhat.com diff --git a/roles/hosts/files/sign-hosts b/roles/hosts/files/sign-hosts index 40bbd15766..d00369967c 100644 --- a/roles/hosts/files/sign-hosts +++ b/roles/hosts/files/sign-hosts @@ -4,8 +4,8 @@ # # Here for historical reasons due to cert names. # -10.5.125.75 sign-vault1 -10.5.125.71 sign-bridge1 +10.5.125.75 sign-vault1 sign-vault.phx2.fedoraproject.org +10.5.125.71 sign-bridge1 sign-bridge.phx2.fedoraproject.org # # Need to be able to talk to various kojis # diff --git a/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf b/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf index b291c0ad73..0e4ef7524e 100644 --- a/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf +++ b/roles/httpd/reverseproxy/templates/reversepassproxy.registry-generic.conf @@ -12,7 +12,7 @@ RewriteRule ^/v2/(.*)/blobs/([a-zA-Z0-9:]*) https://cdn.registry.fedoraproject.o # This is terible, but Docker. RewriteCond %{REQUEST_METHOD} ^(PATCH|POST|PUT|DELETE)$ -RewriteRule ^/v2/(.*)$ http://docker-registry02:5000/v2/$1 [P,L] +RewriteRule ^/v2/(.*)$ http://oci-registry02:5000/v2/$1 [P,L] RewriteRule ^/v2/(.*)$ http://localhost:6081/v2/$1 [P,L] DocumentRoot /srv/web/registry-index/ diff --git a/roles/java-deptools/files/cron b/roles/java-deptools/files/cron index 1269f67557..779ac8801b 100644 --- a/roles/java-deptools/files/cron +++ b/roles/java-deptools/files/cron @@ -3,6 +3,6 @@ set -e log=/var/log/java-deptools/backend.log cd /var/lib/java-deptools/repos/ date >$log -java-deptools-repogen f29 f28 f27 &>>$log +java-deptools-repogen $(curl -sXPOST -d 'getBuildTargets' https://koji.fedoraproject.org/kojihub | sed -n 's,^\(f[2-9][0-9]\)$,\1,;T;p' | sort -ru) &>>$log date >>$log echo 'Repo regeneration successful' >>$log diff --git a/roles/koji_builder/tasks/main.yml b/roles/koji_builder/tasks/main.yml index 5392290d05..921daa8cc7 100644 --- a/roles/koji_builder/tasks/main.yml +++ b/roles/koji_builder/tasks/main.yml @@ -214,6 +214,16 @@ tags: - koji_builder +# non-bkernel x86_64 builders run container_build, which needs osbs +- name: special pkgs for the x86_64 builders + package: state=present pkg={{ item }} + with_items: + - python2-osbs-client.noarch + - python3-osbs-client.noarch + when: "ansible_architecture == 'x86_64' and not inventory_hostname.startswith('bkernel')" + tags: + - koji_builder + # Before, the builders had the "apache" role. This is a temporary play to remove the httpd daemon everywhere - name: Uninstall httpd package: name=httpd diff --git a/roles/koji_builder/templates/kojid.conf b/roles/koji_builder/templates/kojid.conf index d582eced1d..3c28a3372e 100644 --- a/roles/koji_builder/templates/kojid.conf +++ b/roles/koji_builder/templates/kojid.conf @@ -138,7 +138,7 @@ plugins = runroot {% else %} -{% if ansible_architecture == 'x86_64' %} +{% if ansible_architecture == 'x86_64' and not inventory_hostname.startswith('bkernel') %} plugins = builder_containerbuild {% else %} plugins = diff --git a/roles/koschei/frontend/templates/config-frontend.cfg.j2 b/roles/koschei/frontend/templates/config-frontend.cfg.j2 index d5ab5612d6..79028342a1 100644 --- a/roles/koschei/frontend/templates/config-frontend.cfg.j2 +++ b/roles/koschei/frontend/templates/config-frontend.cfg.j2 @@ -2,7 +2,6 @@ # configuration in /usr/share/koschei/config.cfg. It is a python file expecting # assignment to config dictionary which will be recursively merged with the # default one. -{% set env_prefix = ".stg" if env == "staging" else "" %} config = { "database_config": { "host": "{{ koschei_pgsql_hostname }}", @@ -56,7 +55,7 @@ config = { }, }, "pagure": { - "api_url": "https://src{{ env_prefix }}.fedoraproject.org/api/0", + "api_url": "https://src{{ env_suffix }}.fedoraproject.org/api/0", }, "frontend": { "builds_per_page": 8, @@ -70,11 +69,11 @@ config = { }, "links": [ {"name": "Packages", - "url": "https://apps{{ env_prefix }}.fedoraproject.org/packages/{package.name}"}, + "url": "https://apps{{ env_suffix }}.fedoraproject.org/packages/{package.name}"}, {"name": "Bodhi", - "url": "https://bodhi{{ env_prefix }}.fedoraproject.org/updates?packages={package.name}"}, + "url": "https://bodhi{{ env_suffix }}.fedoraproject.org/updates?packages={package.name}"}, {"name": "Dist-git", - "url": "https://src{{ env_prefix }}.fedoraproject.org/rpms/{package.name}"}, + "url": "https://src{{ env_suffix }}.fedoraproject.org/rpms/{package.name}"}, {"name": "Bugzilla", "url": "https://{{ koschei_bugzilla }}/buglist.cgi?product={package.collection.bugzilla_product}&component={package.name}"}, {"name": "Koji", diff --git a/roles/letsencrypt/tasks/main.yml b/roles/letsencrypt/tasks/main.yml index 20b121c60b..d4cdffb959 100644 --- a/roles/letsencrypt/tasks/main.yml +++ b/roles/letsencrypt/tasks/main.yml @@ -68,3 +68,45 @@ - reload proxyhttpd tags: - letsencrypt + +- name: Install the certificate (additional host) + copy: > + dest=/etc/pki/tls/certs/{{site_name}}.cert + content="{{certbot_certificate.stdout}}" + owner=root + group=root + mode=0644 + notify: + - reload proxyhttpd + tags: + - letsencrypt + delegate_to: "{{ certbot_addhost }}" + when: certbot_addhost is defined + +- name: Install the intermediate/chain certificate (additional host) + copy: > + dest=/etc/pki/tls/certs/{{site_name}}.intermediate.cert + content="{{certbot_chain.stdout}}" + owner=root + group=root + mode=0644 + notify: + - reload proxyhttpd + tags: + - letsencrypt + delegate_to: "{{ certbot_addhost }}" + when: certbot_addhost is defined + +- name: Install the key (additional host) + copy: > + dest=/etc/pki/tls/private/{{site_name}}.key + content="{{certbot_key.stdout}}" + owner=root + group=root + mode=0600 + notify: + - reload proxyhttpd + tags: + - letsencrypt + delegate_to: "{{ certbot_addhost }}" + when: certbot_addhost is defined diff --git a/roles/mailman/files/mailman-template-list-member-generic-footer.txt b/roles/mailman/files/mailman-template-list-member-generic-footer.txt index 9d8f9765c6..657147962f 100644 --- a/roles/mailman/files/mailman-template-list-member-generic-footer.txt +++ b/roles/mailman/files/mailman-template-list-member-generic-footer.txt @@ -3,4 +3,4 @@ $display_name mailing list -- $listname To unsubscribe send an email to ${short_listname}-leave@${domain} Fedora Code of Conduct: https://getfedora.org/code-of-conduct.html List Guidelines: https://fedoraproject.org/wiki/Mailing_list_guidelines -List Archives: ${hyperkitty_url} +List Archives: https://${domain}/archives/list/${listname} diff --git a/roles/manage-container-images/defaults/main.yml b/roles/manage-container-images/defaults/main.yml deleted file mode 100644 index c1f21c78bf..0000000000 --- a/roles/manage-container-images/defaults/main.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -# defaults file for manage-container-images -# - -certs_group: "releng-team" \ No newline at end of file diff --git a/roles/manage-container-images/tasks/main.yml b/roles/manage-container-images/tasks/main.yml deleted file mode 100644 index 069ef82e0f..0000000000 --- a/roles/manage-container-images/tasks/main.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -# tasks file for push-docker -# -#- name: install necessary packages -# package: -# name: "{{item}}" -# state: present -# with_items: -# - skopeo -# -#- name: ensure cert dir exists -# file: -# path: "{{cert_dest_dir}}" -# state: directory -# -#- name: install docker client cert for registry -# copy: -# src: "{{cert_src}}" -# dest: "{{cert_dest_dir}}/client.cert" -# owner: root -# group: "{{ certs_group }}" -# mode: 0640 -# -#- name: install docker client key for registry -# copy: -# src: "{{key_src}}" -# dest: "{{cert_dest_dir}}/client.key" -# group: "{{ certs_group }}" -# mode: 0640 diff --git a/roles/mbs/common/files/default-modules.production/platform-f30.yaml b/roles/mbs/common/files/default-modules.production/platform-f30.yaml new file mode 100644 index 0000000000..d40fb4c60a --- /dev/null +++ b/roles/mbs/common/files/default-modules.production/platform-f30.yaml @@ -0,0 +1,27 @@ +data: + description: Fedora 30 traditional base + license: + module: [MIT] + name: platform + profiles: + buildroot: + rpms: [bash, bzip2, coreutils, cpio, diffutils, fedora-release, findutils, gawk, + grep, gzip, info, make, patch, redhat-rpm-config, rpm-build, sed, shadow-utils, + tar, unzip, util-linux, which, xz] + srpm-buildroot: + rpms: [bash, fedora-release, fedpkg-minimal, gnupg2, redhat-rpm-config, rpm-build, + shadow-utils] + stream: f30 + summary: Fedora 30 traditional base + context: 00000000 + version: 5 + xmd: + mbs: + buildrequires: {} + commit: f30 + requires: {} + koji_tag: module-f30-build + mse: TRUE +document: modulemd +version: 1 + diff --git a/roles/mbs/common/files/default-modules.staging/platform-f30.yaml b/roles/mbs/common/files/default-modules.staging/platform-f30.yaml new file mode 100644 index 0000000000..d40fb4c60a --- /dev/null +++ b/roles/mbs/common/files/default-modules.staging/platform-f30.yaml @@ -0,0 +1,27 @@ +data: + description: Fedora 30 traditional base + license: + module: [MIT] + name: platform + profiles: + buildroot: + rpms: [bash, bzip2, coreutils, cpio, diffutils, fedora-release, findutils, gawk, + grep, gzip, info, make, patch, redhat-rpm-config, rpm-build, sed, shadow-utils, + tar, unzip, util-linux, which, xz] + srpm-buildroot: + rpms: [bash, fedora-release, fedpkg-minimal, gnupg2, redhat-rpm-config, rpm-build, + shadow-utils] + stream: f30 + summary: Fedora 30 traditional base + context: 00000000 + version: 5 + xmd: + mbs: + buildrequires: {} + commit: f30 + requires: {} + koji_tag: module-f30-build + mse: TRUE +document: modulemd +version: 1 + diff --git a/roles/mbs/common/tasks/main.yml b/roles/mbs/common/tasks/main.yml index 431453488f..97f6017338 100644 --- a/roles/mbs/common/tasks/main.yml +++ b/roles/mbs/common/tasks/main.yml @@ -107,16 +107,23 @@ owner: root group: root mode: 0775 + tags: + - mbs + - mbs/common - name: copy default modules to /etc/module-build-service/default-modules copy: src={{ item }} dest=/etc/module-build-service/default-modules with_fileglob: - default-modules.{{ env }}/*.yaml + tags: + - mbs + - mbs/common - name: import default-modules command: /usr/bin/mbs-manager import_module /etc/module-build-service/default-modules/{{ item | basename }} with_fileglob: - default-modules.{{ env }}/*.yaml when: mbs_import_default_modules | default(True) - - + tags: + - mbs + - mbs/common diff --git a/roles/mbs/common/templates/config.py b/roles/mbs/common/templates/config.py index 61c23601fa..90e099e3fb 100644 --- a/roles/mbs/common/templates/config.py +++ b/roles/mbs/common/templates/config.py @@ -35,7 +35,7 @@ class BaseConfiguration(object): PDC_URL = 'http://modularity.fedorainfracloud.org:8080/rest_api/v1' PDC_INSECURE = True PDC_DEVELOP = True - SCMURLS = ["git://pkgs.stg.fedoraproject.org/modules/"] + SCMURLS = ["git+https://src.fedoraproject.org/modules/"] # How often should we resort to polling, in seconds # Set to zero to disable polling @@ -48,12 +48,12 @@ class BaseConfiguration(object): # Old name https://pagure.io/fm-orchestrator/issue/574 NUM_CONSECUTIVE_BUILDS = 5 - RPMS_DEFAULT_REPOSITORY = 'git://pkgs.fedoraproject.org/rpms/' + RPMS_DEFAULT_REPOSITORY = 'git+https://src.fedoraproject.org/rpms/' RPMS_ALLOW_REPOSITORY = False - RPMS_DEFAULT_CACHE = 'http://pkgs.fedoraproject.org/repo/pkgs/' + RPMS_DEFAULT_CACHE = 'https://src.fedoraproject.org/repo/pkgs/' RPMS_ALLOW_CACHE = False - MODULES_DEFAULT_REPOSITORY = 'git://pkgs.fedoraproject.org/modules/' + MODULES_DEFAULT_REPOSITORY = 'git+https://src.fedoraproject.org/modules/' MODULES_ALLOW_REPOSITORY = False # Available backends are: console, file, journal. @@ -135,11 +135,10 @@ class ProdConfiguration(BaseConfiguration): {% if env == 'staging' %} KOJI_PROFILE = 'staging' KOJI_ARCHES = ['x86_64', 'i686'] - KOJI_REPOSITORY_URL = 'http://kojipkgs.stg.fedoraproject.org/repos' + KOJI_REPOSITORY_URL = 'https://kojipkgs.stg.fedoraproject.org/repos' MESSAGING_TOPIC_PREFIX = ['org.fedoraproject.stg'] PDC_URL = 'https://pdc.stg.fedoraproject.org/rest_api/v1' - SCMURLS = ['git://pkgs.stg.fedoraproject.org/modules/', - 'git+https://src.stg.fedoraproject.org/modules/', + SCMURLS = ['git+https://src.stg.fedoraproject.org/modules/', 'https://src.stg.fedoraproject.org/modules/', 'https://src.stg.fedoraproject.org/git/modules/'] @@ -151,11 +150,10 @@ class ProdConfiguration(BaseConfiguration): # https://fedoraproject.org/wiki/Changes/DiscontinuePPC64 'platform:f28': ['aarch64', 'armv7hl', 'i686', 'ppc64', 'ppc64le', 'x86_64', 's390x'], } - KOJI_REPOSITORY_URL = 'http://kojipkgs.fedoraproject.org/repos' + KOJI_REPOSITORY_URL = 'https://kojipkgs.fedoraproject.org/repos' MESSAGING_TOPIC_PREFIX = ['org.fedoraproject.prod'] PDC_URL = 'https://pdc.fedoraproject.org/rest_api/v1' - SCMURLS = ['git://pkgs.fedoraproject.org/modules/', - 'git+https://src.fedoraproject.org/modules/', + SCMURLS = ['git+https://src.fedoraproject.org/modules/', 'https://src.fedoraproject.org/modules/', 'https://src.fedoraproject.org/git/modules/'] {% endif %} diff --git a/roles/modernpaste/templates/modern-paste.conf b/roles/modernpaste/templates/modern-paste.conf index 2c55e79d8e..b9c4737b45 100644 --- a/roles/modernpaste/templates/modern-paste.conf +++ b/roles/modernpaste/templates/modern-paste.conf @@ -112,6 +112,8 @@ RewriteEngine on {% if env != 'staging' %} RewriteRule login / [L,R] RewriteRule archive /login/ [L,R] +RewriteRule api/paste/recent /login/ [L,R] +RewriteRule api/paste/top /login/ [L,R] {% endif %} RewriteCond %{HTTP_USER_AGENT} ^fpaste\/0\.3.*$ [OR] diff --git a/roles/nagios_server/files/nagios/services/ssl.cfg b/roles/nagios_server/files/nagios/services/ssl.cfg index 16b6fe281a..38dc24eb4c 100644 --- a/roles/nagios_server/files/nagios/services/ssl.cfg +++ b/roles/nagios_server/files/nagios/services/ssl.cfg @@ -26,13 +26,6 @@ define service { use defaulttemplate } -define service { - hostgroup_name pkgs - service_description https-Pkgs-cert - check_command check_ssl_cert!pkgs.fedoraproject.org!60 - use defaulttemplate -} - define service { hostgroup_name proxies service_description https-whatcanidoforfedora-cert diff --git a/roles/nagios_server/files/selinux/nagios_nrpe.mod b/roles/nagios_server/files/selinux/nagios_nrpe.mod new file mode 100644 index 0000000000..80aff88beb Binary files /dev/null and b/roles/nagios_server/files/selinux/nagios_nrpe.mod differ diff --git a/roles/nagios_server/files/selinux/nagios_nrpe.pp b/roles/nagios_server/files/selinux/nagios_nrpe.pp new file mode 100644 index 0000000000..857c18b557 Binary files /dev/null and b/roles/nagios_server/files/selinux/nagios_nrpe.pp differ diff --git a/roles/nagios_server/files/selinux/nagios_nrpe.te b/roles/nagios_server/files/selinux/nagios_nrpe.te new file mode 100644 index 0000000000..098dd49488 --- /dev/null +++ b/roles/nagios_server/files/selinux/nagios_nrpe.te @@ -0,0 +1,32 @@ +module nagios_nrpe 1.0; + +require { + type nagios_t; + type nagios_checkdisk_plugin_t; + type nagios_unconfined_plugin_t; + type nrpe_t; + type system_mail_t; + class process { noatsecure rlimitinh siginh }; + class tcp_socket { read write }; +} + +#============= nagios_checkdisk_plugin_t ============== +# src="nagios_checkdisk_plugin_t" tgt="nrpe_t" class="tcp_socket", perms="{ read write }" +# comm="check_disk" exe="" path="socket:[270138836]" +allow nagios_checkdisk_plugin_t nrpe_t:tcp_socket { read write }; + +#============= nagios_t ============== +# src="nagios_t" tgt="nagios_unconfined_plugin_t" class="process", perms="{ noatsecure rlimitinh siginh }" +# comm="check_ping" exe="" path="" +allow nagios_t nagios_unconfined_plugin_t:process { noatsecure rlimitinh siginh }; +# src="nagios_t" tgt="system_mail_t" class="process", perms="{ noatsecure rlimitinh siginh }" +# comm="sendmail" exe="" path="" +allow nagios_t system_mail_t:process { noatsecure rlimitinh siginh }; + +#============= nrpe_t ============== +# src="nrpe_t" tgt="nagios_checkdisk_plugin_t" class="process", perms="{ noatsecure rlimitinh siginh }" +# comm="check_disk" exe="" path="" +allow nrpe_t nagios_checkdisk_plugin_t:process { noatsecure rlimitinh siginh }; +# src="nrpe_t" tgt="nagios_unconfined_plugin_t" class="process", perms="{ noatsecure rlimitinh siginh }" +# comm="check_swap" exe="" path="" +allow nrpe_t nagios_unconfined_plugin_t:process { noatsecure rlimitinh siginh }; diff --git a/roles/nagios_server/tasks/main.yml b/roles/nagios_server/tasks/main.yml index fc25dc0511..e91a075395 100644 --- a/roles/nagios_server/tasks/main.yml +++ b/roles/nagios_server/tasks/main.yml @@ -468,3 +468,15 @@ when: selinux_module is changed tags: - nagios_server + +- name: Copy over our custom selinux module + copy: src=selinux/nagios_nrpe.pp dest=/usr/local/share/nagios-policy/nagios_nrpe.pp + register: selinux_module2 + tags: + - nagios_server + +- name: Install our custom selinux module + command: semodule -i /usr/local/share/nagios-policy/nagios_nrpe.pp + when: selinux_module2 is changed + tags: + - nagios_server diff --git a/roles/nagios_server/templates/nagios/hostgroups/nomail.cfg.j2 b/roles/nagios_server/templates/nagios/hostgroups/nomail.cfg.j2 index 881b950617..53df68536c 100644 --- a/roles/nagios_server/templates/nagios/hostgroups/nomail.cfg.j2 +++ b/roles/nagios_server/templates/nagios/hostgroups/nomail.cfg.j2 @@ -1,6 +1,6 @@ define hostgroup { hostgroup_name nomail - alias No Mail - members *, !status, !registry-cdn, !phx2-gw, !ibiblio-gw, !cloud-gw, !bodhost-gw, !coloamer-gw, !dedicated-gw, !host1plus-gw, !internetx-gw, !osuosl-gw, !rdu-gw, !rdu-cc-gw, !tummy-gw, !download-rdu01.fedoraproject.org, !virthost-rdu01.fedoraproject.org, !osbs-control01.phx2.fedoraproject.org, {% for host in groups['bastion']|sort %}!{{host}}, {% endfor %}{% for host in groups['smtp-mm']|sort %}!{{host}}, {% endfor %} {% for host in groups['builders']|sort %}!{{host}},{% endfor %} {% for host in groups['builders-stg']|sort %}!{{host}},{% endfor %} {% for host in groups['cloud']|sort %}!{{host}}, {% endfor %} {% for host in vars['phx2_management_limited']|sort %}!{{host}},{% endfor %} {% for host in vars['phx2_management_hosts']|sort %}!{{host}}{% if not loop.last %},{% endif %} {% endfor %} + alias Detect For 0 Mail In Queue + members {% for host in groups['all']|sort %}{% if hostvars[host].nagios_Check_Services['nrpe'] == true and hostvars[host].nagios_Check_Services['mail'] == true%}{{host}}, {% endif %}{% endfor %} } diff --git a/roles/openqa/dispatcher/tasks/main.yml b/roles/openqa/dispatcher/tasks/main.yml index b27537df33..070931cc45 100644 --- a/roles/openqa/dispatcher/tasks/main.yml +++ b/roles/openqa/dispatcher/tasks/main.yml @@ -153,7 +153,7 @@ - config - name: Write wikitcms token file for fedmsg - copy: src={{ wikitcms_token }} dest=/usr/share/fedmsg/.openidc/oidc_wikitcms.json owner=root group=fedmsg mode=0640 + copy: src={{ wikitcms_token }} dest=/usr/share/fedmsg/.openidc/oidc_wikitcms.json owner=root group=fedmsg mode=0660 when: "wikitcms_token is defined" tags: - config diff --git a/roles/openshift-apps/greenwave/templates/configmap.yml b/roles/openshift-apps/greenwave/templates/configmap.yml index dd297c4898..05041da763 100644 --- a/roles/openshift-apps/greenwave/templates/configmap.yml +++ b/roles/openshift-apps/greenwave/templates/configmap.yml @@ -97,6 +97,7 @@ data: --- !Policy id: "taskotron_release_critical_tasks_for_testing" product_versions: + - fedora-29 - fedora-28 - fedora-27 - fedora-26 @@ -114,6 +115,7 @@ data: --- !Policy id: "taskotron_release_critical_tasks_for_stable" product_versions: + - fedora-29 - fedora-28 - fedora-27 - fedora-26 @@ -131,6 +133,7 @@ data: --- !Policy id: "no_requirements_testing" product_versions: + - fedora-29-modular - fedora-28-modular - fedora-epel-7 - fedora-epel-6 @@ -142,6 +145,7 @@ data: --- !Policy id: "no_requirements_for_stable" product_versions: + - fedora-29-modular - fedora-28-modular - fedora-epel-7 - fedora-epel-6 @@ -155,6 +159,7 @@ data: # http://fedoraproject.org/wiki/CI id: "atomic_ci_pipeline_results" product_versions: + - fedora-29 - fedora-28 - fedora-27 - fedora-26 @@ -174,6 +179,7 @@ data: # http://fedoraproject.org/wiki/CI id: "atomic_ci_pipeline_results_stable" product_versions: + - fedora-29 - fedora-28 - fedora-27 - fedora-26 diff --git a/roles/openshift-apps/greenwave/templates/deploymentconfig.yml b/roles/openshift-apps/greenwave/templates/deploymentconfig.yml index 5d3188cfe3..82f93e709a 100644 --- a/roles/openshift-apps/greenwave/templates/deploymentconfig.yml +++ b/roles/openshift-apps/greenwave/templates/deploymentconfig.yml @@ -144,9 +144,7 @@ spec: spec: containers: - name: greenwave-memcached - # XXX: change it to registry.fedoraproject.org/f26/memcached once the - # image gets promoted from candidate-registry to registry. - image: candidate-registry.fedoraproject.org/f26/memcached + image: registry.fedoraproject.org/f28/memcached ports: - containerPort: 11211 resources: diff --git a/roles/openshift-apps/greenwave/templates/imagestream.yml b/roles/openshift-apps/greenwave/templates/imagestream.yml index 9c564644fb..bd35237806 100644 --- a/roles/openshift-apps/greenwave/templates/imagestream.yml +++ b/roles/openshift-apps/greenwave/templates/imagestream.yml @@ -20,5 +20,9 @@ spec: name: quay.io/factory2/greenwave:latest {% else %} # This is 'prod' tag is maintained by hand. - name: quay.io/factory2/greenwave:prod + #name: quay.io/factory2/greenwave:prod + # But, pin it to 0.9.4 until we can resolve + # https://github.com/fedora-infra/bodhi/issues/2554 + # https://pagure.io/greenwave/issue/287 + name: quay.io/factory2/greenwave:0.9.4 {% endif %} diff --git a/roles/openshift-apps/koschei/templates/buildconfig.yml b/roles/openshift-apps/koschei/templates/buildconfig.yml new file mode 100644 index 0000000000..39ddfc555e --- /dev/null +++ b/roles/openshift-apps/koschei/templates/buildconfig.yml @@ -0,0 +1,27 @@ +apiVersion: v1 +items: +- apiVersion: v1 + kind: BuildConfig + metadata: + labels: + build: koschei-web + name: koschei-web + spec: + runPolicy: Serial + source: + dockerfile: |- + FROM registry.fedoraproject.org/fedora-minimal:28 + RUN microdnf install koschei-frontend-fedora + EXPOSE 80 + CMD httpd -DFOREGROUND + type: Dockerfile + strategy: + type: Docker + dockerStrategy: + noCache: false + output: + to: + kind: ImageStreamTag + name: koschei-web:latest +kind: List +metadata: {} diff --git a/roles/openshift-apps/koschei/templates/deploymentconfig.yml b/roles/openshift-apps/koschei/templates/deploymentconfig.yml new file mode 100644 index 0000000000..125f8fa61a --- /dev/null +++ b/roles/openshift-apps/koschei/templates/deploymentconfig.yml @@ -0,0 +1,55 @@ +apiVersion: v1 +items: +- apiVersion: v1 + kind: DeploymentConfig + metadata: + labels: + app: koschei + service: web + name: koschei-web + spec: + replicas: 1 + selector: + deploymentconfig: koschei-web + strategy: + activeDeadlineSeconds: 21600 + recreateParams: + timeoutSeconds: 600 + resources: {} + rollingParams: + intervalSeconds: 1 + maxSurge: 25% + maxUnavailable: 25% + timeoutSeconds: 600 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + creationTimestamp: null + labels: + app: koschei-web + deploymentconfig: koschei-web + spec: + containers: + - name: koschei-web + image: docker-registry.default.svc:5000/koschei/koschei-web:latest + ports: + - containerPort: 80 + resources: {} + volumeMounts: {} + readinessProbe: + timeoutSeconds: 10 + initialDelaySeconds: 5 + httpGet: + path: / + port: 80 + livenessProbe: + timeoutSeconds: 10 + initialDelaySeconds: 30 + httpGet: + path: / + port: 80 + volumes: {} + triggers: {} +kind: List +metadata: {} diff --git a/roles/openshift-apps/koschei/templates/service.yml b/roles/openshift-apps/koschei/templates/service.yml new file mode 100644 index 0000000000..cbb579534e --- /dev/null +++ b/roles/openshift-apps/koschei/templates/service.yml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: koschei-web + labels: + app: koschei + service: web + namespace: koschei +spec: + ports: + - name: web + port: 80 + targetPort: 80 + selector: + deploymentconfig: koschei-web diff --git a/roles/openshift-apps/release-monitoring/files/buildconfig.yml b/roles/openshift-apps/release-monitoring/files/buildconfig.yml index e66e49391c..24903d4fa4 100644 --- a/roles/openshift-apps/release-monitoring/files/buildconfig.yml +++ b/roles/openshift-apps/release-monitoring/files/buildconfig.yml @@ -41,7 +41,7 @@ items: dnf clean all -y RUN git clone https://github.com/release-monitoring/anitya.git && \ pushd anitya && \ - git checkout 0.12.0 && \ + git checkout 0.12.1 && \ pushd docs && \ sphinx-build-3 -b html . _build/html && \ mkdir -p ../anitya/static/docs/ && \ diff --git a/roles/openshift-apps/release-monitoring/files/cron.yml b/roles/openshift-apps/release-monitoring/files/cron.yml index e597332a01..7eac536cb5 100644 --- a/roles/openshift-apps/release-monitoring/files/cron.yml +++ b/roles/openshift-apps/release-monitoring/files/cron.yml @@ -3,6 +3,7 @@ kind: CronJob metadata: name: anitya spec: + concurrencyPolicy: Forbid schedule: "10 */12 * * *" jobTemplate: spec: @@ -13,10 +14,18 @@ spec: spec: containers: - name: release-monitoring-web - image: release-monitoring/release-monitoring-web:latest - image: perl - command: ["/usr/share/anitya/anitya_cron.py"] + image: docker-registry.default.svc:5000/release-monitoring/release-monitoring-web:latest + command: ["/usr/local/bin/anitya_cron.py"] env: - name: ANITYA_WEB_CONFIG - value: /etc/anitya/anitya.cfg + value: /etc/anitya/anitya.toml + volumeMounts: + - mountPath: /etc/anitya + name: config-volume + readOnly: true restartPolicy: OnFailure + volumes: + - configMap: + defaultMode: 420 + name: release-monitoring-configmap + name: config-volume diff --git a/roles/openshift-apps/release-monitoring/templates/route.yml b/roles/openshift-apps/release-monitoring/templates/route.yml index 97f90a67e4..a9f5b6c082 100644 --- a/roles/openshift-apps/release-monitoring/templates/route.yml +++ b/roles/openshift-apps/release-monitoring/templates/route.yml @@ -6,7 +6,7 @@ metadata: app: release-monitoring spec: {% if env == 'staging' %} - host: release-monitoring.app.os.stg.fedoraproject.org + host: stg.release-monitoring.org {% else %} host: release-monitoring.org {% endif %} diff --git a/roles/openshift-apps/silverblue/templates/buildconfig.yml b/roles/openshift-apps/silverblue/templates/buildconfig.yml index 4d71b2d35d..485ba44ea2 100644 --- a/roles/openshift-apps/silverblue/templates/buildconfig.yml +++ b/roles/openshift-apps/silverblue/templates/buildconfig.yml @@ -12,7 +12,7 @@ spec: source: type: Git git: - uri: https://github.com/teamsilverblue/silverblue-site.git + uri: https://github.com/fedora-silverblue/silverblue-site.git strategy: type: Source sourceStrategy: diff --git a/roles/openshift/project/templates/role-appowners.yml b/roles/openshift/project/templates/role-appowners.yml index 36554c791b..a335224c58 100644 --- a/roles/openshift/project/templates/role-appowners.yml +++ b/roles/openshift/project/templates/role-appowners.yml @@ -1,11 +1,5 @@ apiVersion: v1 -{% if env == "staging" %} kind: Role -{% else %} -# Namespace-local roles did not work until openshift 3.6 -# https://github.com/openshift/origin/issues/14078 -kind: ClusterRole -{% endif %} metadata: annotations: openshift.io/description: An application owner. Can view everything but ConfigMaps. @@ -13,7 +7,7 @@ metadata: namespace: "{{ app }}" rules: - apiGroups: - - "" + - "*" attributeRestrictions: null resources: - endpoints @@ -34,23 +28,65 @@ rules: - update {% endif %} - apiGroups: - - "" + - "*" attributeRestrictions: null resources: + - appliedclusterresourcequotas - bindings + - buildconfigs + - buildconfigs/webhooks + - buildlogs + - builds + - builds/log + - deploymentconfigs + - deploymentconfigs/log + - deploymentconfigs/scale + - deploymentconfigs/status + - deployments + - deployments/scale - events + - horizontalpodautoscalers + - imagestreamimages + - imagestreammappings + - imagestreams + - imagestreams/status + - imagestreamtags + - jobs - limitranges - namespaces - namespaces/status - pods/log - pods/status + - processedtemplates + - replicasets + - replicasets/scale - replicationcontrollers/status - resourcequotas - resourcequotas/status + - resourcequotausages + - routes + - routes/status + - statefulsets + - templateconfigs + - templates verbs: - get - list - watch +- apiGroups: + - "*" + attributeRestrictions: null + resources: + - buildconfigs/instantiate + verbs: + - create +- apiGroups: + - "*" + attributeRestrictions: null + resources: + - projects + verbs: + - get - apiGroups: - autoscaling attributeRestrictions: null @@ -72,19 +108,12 @@ rules: - list - watch - apiGroups: - - "*" + - build.openshift.io attributeRestrictions: null resources: - - deployments - - deployments/scale - - horizontalpodautoscalers - - jobs - - replicasets - - replicasets/scale + - jenkins verbs: - - get - - list - - watch + - view - apiGroups: - extensions attributeRestrictions: null @@ -94,143 +123,3 @@ rules: - get - list - watch -- apiGroups: - - apps - attributeRestrictions: null - resources: - - statefulsets - verbs: - - get - - list - - watch -- apiGroups: - - "*" - attributeRestrictions: null - resources: - - buildconfigs - - buildconfigs/webhooks - - builds - verbs: - - get - - list - - watch -- apiGroups: - - "*" - attributeRestrictions: null - resources: - - builds/log - verbs: - - get - - list - - watch -- apiGroups: - - build.openshift.io - attributeRestrictions: null - resources: - - jenkins - verbs: - - view -- apiGroups: - - "*" - attributeRestrictions: null - resources: - - deploymentconfigs - - deploymentconfigs/scale - verbs: - - get - - list - - watch -- apiGroups: - - "*" - attributeRestrictions: null - resources: - - deploymentconfigs/log - - deploymentconfigs/status - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - imagestreamimages - - imagestreammappings - - imagestreams - - imagestreamtags - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - imagestreams/status - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - projects - verbs: - - get -- apiGroups: - - "" - attributeRestrictions: null - resources: - - appliedclusterresourcequotas - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - routes - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - routes/status - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - processedtemplates - - templateconfigs - - templates - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - buildlogs - verbs: - - get - - list - - watch -- apiGroups: - - "" - attributeRestrictions: null - resources: - - resourcequotausages - verbs: - - get - - list - - watch diff --git a/roles/openvpn/base/tasks/main.yml b/roles/openvpn/base/tasks/main.yml index 3037fb4141..9a0ba8db1c 100644 --- a/roles/openvpn/base/tasks/main.yml +++ b/roles/openvpn/base/tasks/main.yml @@ -20,7 +20,7 @@ when: ansible_distribution_major_version|int > 7 and ansible_cmdline.ostree is not defined - name: Install certificate and key (rhel6 and fedora24 and older) - copy: src={{ private }}/files/vpn/openvpn/keys/ca.crt + copy: src={{ private }}/files/vpn/pki/ca.crt dest=/etc/openvpn/ca.crt owner=root group=root mode=0600 tags: @@ -33,7 +33,7 @@ when: ansible_distribution_major_version|int < 25 - name: Install certificate and key (rhel7 or fedora) for client - copy: src={{ private }}/files/vpn/openvpn/keys/ca.crt + copy: src={{ private }}/files/vpn/pki/ca.crt dest=/etc/openvpn/client/ca.crt owner=root group=root mode=0600 tags: @@ -46,7 +46,7 @@ when: ( ansible_distribution_major_version|int != 6 and ansible_distribution_major_version|int != 24 ) and ansible_cmdline.ostree is not defined - name: Install certificate and key (rhel7 or fedora) for server - copy: src={{ private }}/files/vpn/openvpn/keys/ca.crt + copy: src={{ private }}/files/vpn/pki/ca.crt dest=/etc/openvpn/server/ca.crt owner=root group=root mode=0600 tags: diff --git a/roles/openvpn/client/tasks/main.yml b/roles/openvpn/client/tasks/main.yml index ba2d215e49..27c150d16a 100644 --- a/roles/openvpn/client/tasks/main.yml +++ b/roles/openvpn/client/tasks/main.yml @@ -27,10 +27,10 @@ - { file: client.conf, dest: /etc/openvpn/client/openvpn.conf, mode: '0644' } - - { file: "{{ private }}/files/vpn/openvpn/keys/{{ inventory_hostname }}.crt", + - { file: "{{ private }}/files/vpn/pki/issued/{{ inventory_hostname }}.crt", dest: "/etc/openvpn/client/client.crt", mode: '0600' } - - { file: "{{ private }}/files/vpn/openvpn/keys/{{ inventory_hostname }}.key", + - { file: "{{ private }}/files/vpn/pki/private/{{ inventory_hostname }}.key", dest: "/etc/openvpn/client/client.key", mode: '0600' } tags: @@ -50,10 +50,10 @@ - { file: client.conf, dest: /etc/openvpn/openvpn.conf, mode: '0644' } - - { file: "{{ private }}/files/vpn/openvpn/keys/{{ inventory_hostname }}.crt", + - { file: "{{ private }}/files/vpn/pki/issued/{{ inventory_hostname }}.crt", dest: "/etc/openvpn/client.crt", mode: '0600' } - - { file: "{{ private }}/files/vpn/openvpn/keys/{{ inventory_hostname }}.key", + - { file: "{{ private }}/files/vpn/pki/private/{{ inventory_hostname }}.key", dest: "/etc/openvpn/client.key", mode: '0600' } tags: diff --git a/roles/openvpn/server/files/ccd/docker-registry01.phx2.fedoraproject.org b/roles/openvpn/server/files/ccd/docker-registry01.phx2.fedoraproject.org deleted file mode 100644 index c2ccfa0d1b..0000000000 --- a/roles/openvpn/server/files/ccd/docker-registry01.phx2.fedoraproject.org +++ /dev/null @@ -1,2 +0,0 @@ -# ifconfig-push actualIP PtPIP -ifconfig-push 192.168.1.48 192.168.0.48 diff --git a/roles/openvpn/server/files/ccd/docker-candidate-registry01.phx2.fedoraproject.org b/roles/openvpn/server/files/ccd/oci-candidate-registry01.phx2.fedoraproject.org similarity index 100% rename from roles/openvpn/server/files/ccd/docker-candidate-registry01.phx2.fedoraproject.org rename to roles/openvpn/server/files/ccd/oci-candidate-registry01.phx2.fedoraproject.org diff --git a/roles/openvpn/server/files/ccd/docker-registry02.phx2.fedoraproject.org b/roles/openvpn/server/files/ccd/oci-registry01.phx2.fedoraproject.org similarity index 100% rename from roles/openvpn/server/files/ccd/docker-registry02.phx2.fedoraproject.org rename to roles/openvpn/server/files/ccd/oci-registry01.phx2.fedoraproject.org diff --git a/roles/openvpn/server/files/ccd/docker-registry03.phx2.fedoraproject.org b/roles/openvpn/server/files/ccd/oci-registry02.phx2.fedoraproject.org similarity index 100% rename from roles/openvpn/server/files/ccd/docker-registry03.phx2.fedoraproject.org rename to roles/openvpn/server/files/ccd/oci-registry02.phx2.fedoraproject.org diff --git a/roles/openvpn/server/tasks/main.yml b/roles/openvpn/server/tasks/main.yml index dd760b103c..2d763bd3ae 100644 --- a/roles/openvpn/server/tasks/main.yml +++ b/roles/openvpn/server/tasks/main.yml @@ -27,16 +27,16 @@ - { file: server.conf, dest: /etc/openvpn/server/openvpn.conf, mode: '0644' } - - { file: "{{ private }}/files/vpn/openvpn/keys/crl.pem", + - { file: "{{ private }}/files/vpn/pki/crl.pem", dest: /etc/openvpn/server/crl.pem, mode: '0644' } - - { file: "{{ private }}/files/vpn/openvpn/keys/server.crt", + - { file: "{{ private }}/files/vpn/pki/issued/bastion.fedoraproject.org.crt", dest: /etc/openvpn/server/server.crt, mode: '0644' } - - { file: "{{ private }}/files/vpn/openvpn/keys/server.key", + - { file: "{{ private }}/files/vpn/pki/private/bastion.fedoraproject.org.key", dest: /etc/openvpn/server/server.key, mode: '0600' } - - { file: "{{ private }}/files/vpn/openvpn/keys/dh2048.pem", + - { file: "{{ private }}/files/vpn/dh2048.pem", dest: /etc/openvpn/server/dh2048.pem, mode: '0644' } tags: diff --git a/roles/osbs-common/defaults/main.yml b/roles/osbs-common/defaults/main.yml deleted file mode 100644 index 0436b5c48b..0000000000 --- a/roles/osbs-common/defaults/main.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -# set hostname of the machine -#hostname: example.org - -# set to false if you don't use firewalld or do not want the playbook to modify it -osbs_manage_firewalld: true diff --git a/roles/osbs-common/tasks/main.yml b/roles/osbs-common/tasks/main.yml deleted file mode 100644 index 73fa498d69..0000000000 --- a/roles/osbs-common/tasks/main.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- name: set hostname - hostname: name={{ hostname }} - when: hostname is defined - -- name: install basic packages - action: "{{ ansible_pkg_mgr }} name={{ item }} state=present" - with_items: - - vim - - tmux - - wget - - git - - net-tools - - tree - -- name: install yum-utils when using yum - package: name=yum-utils state=present - when: ansible_pkg_mgr == "yum" - -- name: enable rhel7 repos - command: yum-config-manager --enable {{ item }} - with_items: - - rhel-7-server-optional-rpms - - rhel-7-server-extras-rpms - when: ansible_distribution == 'RedHat' and ansible_distribution_major_version == '7' - -- name: enable epel7 - package: name={{ epel7_url }} state=present - when: ansible_os_family == 'RedHat' and ansible_distribution_major_version == '7' - -- name: install firewalld - action: "{{ ansible_pkg_mgr }} name=firewalld state=present" - when: osbs_manage_firewalld - -- name: enable firewalld - service: name=firewalld state=started enabled=yes - when: osbs_manage_firewalld diff --git a/roles/osbs-common/vars/main.yml b/roles/osbs-common/vars/main.yml deleted file mode 100644 index 948958a46b..0000000000 --- a/roles/osbs-common/vars/main.yml +++ /dev/null @@ -1,2 +0,0 @@ ---- -epel7_url: http://download.fedoraproject.org/pub/epel/7/x86_64/e/epel-release-7-5.noarch.rpm diff --git a/roles/osbs-on-openshift/README.md b/roles/osbs-on-openshift/README.md deleted file mode 100644 index 1ccfb2dc7d..0000000000 --- a/roles/osbs-on-openshift/README.md +++ /dev/null @@ -1,19 +0,0 @@ -osbs-on-openshift -================= - -Role for deploying OSBS on top of a pre-existing [OpenShift](https://openshift.org) -cluster where we do not have cluster admin. - -- [OpenShift build service](https://github.com/projectatomic/osbs-client/), -service for building layered Docker images. - -This role is based on -[ansible-role-osbs-common](https://github.com/projectatomic/ansible-role-osbs-common) -upstream but the `osbs-common` role in Fedora Infra was pre-existing and used as -a location for common tasks required of all nodes in an osbs cluster. - -This role is part of -[ansible-osbs](https://github.com/projectatomic/ansible-osbs/) -playbook for deploying OpenShift build service. Please refer to that github -repository for [documentation](https://github.com/projectatomic/ansible-osbs/blob/master/README.md) -and [issue tracker](https://github.com/projectatomic/ansible-osbs/issues). diff --git a/roles/osbs-on-openshift/defaults/main.yml b/roles/osbs-on-openshift/defaults/main.yml deleted file mode 100644 index 818a49d248..0000000000 --- a/roles/osbs-on-openshift/defaults/main.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -osbs_openshift_home: /var/lib/origin - -osbs_namespace: default -osbs_namespace_create: false - -osbs_kubeconfig_path: /etc/origin/master/admin.kubeconfig - -osbs_environment: - KUBECONFIG: "{{ osbs_kubeconfig_path }}" - -osbs_service_accounts: [] - -# openshift authorization - which users should be assigned the view (readonly), -# osbs-builder (readwrite), and cluster-admin (admin) roles -# in default configuration, everyone has read/write access -osbs_readonly_users: [] -osbs_readonly_groups: [] -osbs_readwrite_users: [] -osbs_readwrite_groups: -- system:authenticated -- system:unauthenticated -osbs_admin_users: [] -osbs_admin_groups: [] - -## development w/ auth proxy: -#osbs_readonly_users: [] -#osbs_readonly_groups: [] -#osbs_readwrite_users: [] -#osbs_readwrite_groups: -# - system:authenticated -#osbs_admin_users: [] -#osbs_admin_groups: [] - -## example production configuration: -#osbs_readonly_users: [] -#osbs_readonly_groups: -# - system:authenticated -#osbs_readwrite_groups: [] -#osbs_readwrite_users: -# - kojibuilder -# - "{{ ansible_hostname }}" -# - system:serviceaccount:default:default -#osbs_admin_users: -# - foo@EXAMPLE.COM -# - bar@EXAMPLE.COM -#osbs_admin_groups: [] - -# limit on the number of running pods - undefine or set to -1 to remove limit -#osbs_master_max_pods: 3 - -osbs_docker_registry: false -osbs_docker_registry_storage: /opt/openshift-registry diff --git a/roles/osbs-on-openshift/handlers/main.yml b/roles/osbs-on-openshift/handlers/main.yml deleted file mode 100644 index 54df6fc488..0000000000 --- a/roles/osbs-on-openshift/handlers/main.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: restart openshift-master - service: - name: "{{ osbs_deployment_type }}-master" - state: restarted - -- name: restart httpd - service: name=httpd state=restarted - -- name: restart firewalld - service: name=firewalld state=restarted - -- name: convert privkey to rsa - command: openssl rsa -in {{ osbs_proxy_key_file }} -out {{ osbs_proxy_key_file }} - -- name: concatenate cert and key - shell: cat {{ osbs_proxy_cert_file }} {{ osbs_proxy_key_file }} > {{ osbs_proxy_certkey_file }} diff --git a/roles/osbs-on-openshift/meta/main.yml b/roles/osbs-on-openshift/meta/main.yml deleted file mode 100644 index ba52c1124c..0000000000 --- a/roles/osbs-on-openshift/meta/main.yml +++ /dev/null @@ -1,22 +0,0 @@ -# Standards: 1.2 ---- -galaxy_info: - author: Martin Milata - description: OpenShift build service common role - builder of layered Docker images - company: Red Hat - issue_tracker_url: https://github.com/projectatomic/ansible-osbs/issues - license: BSD - min_ansible_version: 1.9 - platforms: - - name: EL - versions: - - 7 - - name: Fedora - versions: - - 24 - - 25 - categories: - - cloud - - development - - packaging -dependencies: [] diff --git a/roles/osbs-on-openshift/tasks/main.yml b/roles/osbs-on-openshift/tasks/main.yml deleted file mode 100644 index 228008a873..0000000000 --- a/roles/osbs-on-openshift/tasks/main.yml +++ /dev/null @@ -1,141 +0,0 @@ ---- -### openshift service ### - -- name: create osbs namespace - command: > - oc new-project {{ osbs_namespace }} - register: new_project - failed_when: new_project.rc != 0 and ('already exists' not in new_project.stderr) - changed_when: new_project.rc == 0 - environment: "{{osbs_environment}}" - when: osbs_namespace_create - -- name: copy service accounts - template: src=openshift-serviceaccount.yml.j2 dest={{ osbs_openshift_home }}/serviceaccount-{{ item }}.yml - with_items: "{{ osbs_service_accounts }}" - register: yaml_sa - -- name: import service accounts - command: > - oc create - --namespace={{ osbs_namespace }} - --filename={{ osbs_openshift_home }}/serviceaccount-{{ item.item }}.yml - register: service_account_import - failed_when: service_account_import.rc != 0 and ('already exists' not in service_account_import.stderr) - environment: "{{osbs_environment}}" - with_items: "{{ yaml_sa.results | default([]) }}" - when: item.changed - -- name: copy role bindings - template: src=openshift-rolebinding.yml.j2 dest={{ osbs_openshift_home }}/rolebinding-{{ item.name }}.yml - with_items: - - name: osbs-readonly - role: view - users: "{{ osbs_readonly_users }}" - groups: "{{ osbs_readonly_groups }}" - - name: osbs-readwrite - role: edit - users: "{{ osbs_readwrite_users }}" - groups: "{{ osbs_readwrite_groups }}" - - name: osbs-admin - role: admin - users: "{{ osbs_admin_users }}" - groups: "{{ osbs_admin_groups }}" - register: yaml_rolebindings - -- name: import the role bindings - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/rolebinding-{{ item.item.name }}.yml - environment: "{{osbs_environment}}" - with_items: "{{ yaml_rolebindings.results }}" - when: item.changed - -- name: copy resource quotas - template: src=openshift-resourcequota.yml.j2 dest={{ osbs_openshift_home }}/resourcequota.yml - when: osbs_master_max_pods is defined and osbs_master_max_pods >= 0 - register: yaml_resourcequotas - tags: - - resourcequotas - -- name: import resource quotas - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/resourcequota.yml - environment: "{{osbs_environment}}" - when: osbs_master_max_pods is defined and osbs_master_max_pods >= 0 and yaml_resourcequotas.changed - tags: - - resourcequotas - -- name: delete resource quotas - command: > - oc delete - --namespace={{ osbs_namespace }} - --ignore-not-found=true - resourcequota concurrentbuilds - environment: "{{osbs_environment}}" - when: osbs_master_max_pods is not defined or osbs_master_max_pods < 0 - tags: - - resourcequotas - -- name: copy cpu limitrange - template: - src: openshift-limitrange.yml.j2 - dest: "{{ osbs_openshift_home }}/limitrange.yml" - when: osbs_master_cpu_limitrange is defined and osbs_master_cpu_limitrange - register: yaml_limitrange - tags: - - limitranges - -- name: import cpu limitrange - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/limitrange.yml - environment: "{{osbs_environment}}" - when: osbs_master_cpu_limitrange is defined and osbs_master_cpu_limitrange and yaml_limitrange.changed - tags: - - limitranges - -- name: delete cpu limitrange - command: > - oc delete - --namespace={{ osbs_namespace }} - --ignore-not-found=true - limitrange cpureq - environment: "{{osbs_environment}}" - when: osbs_master_cpu_limitrange is not defined or not osbs_master_cpu_limitrange - tags: - - limitranges - -# Setup custom build role -- name: copy custom build role - template: - src: role-osbs-custom-build.yml.j2 - dest: "{{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-role-osbs-custom-build.yml" - environment: "{{ osbs_environment }}" - register: yaml_role - tags: - - oc - -- name: import custom build role - command: > - oc replace - --namespace={{ osbs_namespace }} - --force=true - --filename={{ osbs_openshift_home }}/{{ inventory_hostname }}-{{ osbs_namespace }}-role-osbs-custom-build.yml - environment: "{{ osbs_environment }}" - when: yaml_role.changed - tags: - - oc - -- import_tasks: yum_proxy.yml - when: osbs_yum_proxy_image is defined - -- import_tasks: registry.yml - when: osbs_docker_registry is defined and osbs_docker_registry diff --git a/roles/osbs-on-openshift/tasks/registry.yml b/roles/osbs-on-openshift/tasks/registry.yml deleted file mode 100644 index e56aaa8e73..0000000000 --- a/roles/osbs-on-openshift/tasks/registry.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: copy registry service account - template: - src: openshift-serviceaccount.yml.j2 - dest: "{{ osbs_openshift_home }}/serviceaccount-{{ item }}.yml" - with_items: - - registry - register: yaml_sa - tags: - - oc - -- name: import registry service account - command: > - oc create - --namespace=default - --filename={{ osbs_openshift_home }}/serviceaccount-{{ item.item }}.yml - register: service_account_import - failed_when: service_account_import.rc != 0 and ('already exists' not in service_account_import.stderr) - environment: "{{osbs_environment}}" - with_items: "{{ yaml_sa.results | default([]) }}" - when: item.changed - tags: - - oc - -- name: make registry serviceaccount privileged - command: > - oadm policy - --namespace=default - add-scc-to-user - privileged -z registry - environment: "{{osbs_environment}}" - tags: - - oc - -- name: create registry storage - file: - path: "{{ osbs_docker_registry_storage }}" - owner: 1001 - group: root - mode: "0770" - state: directory - -- name: set up internal registry - command: > - oadm registry - --namespace=default - --service-account registry - --credentials /etc/origin/master/openshift-registry.kubeconfig - --mount-host {{ osbs_docker_registry_storage }} - register: create_registry - changed_when: "'service exists' not in create_registry.stdout" - environment: "{{osbs_environment}}" - tags: - - oc diff --git a/roles/osbs-on-openshift/tasks/yum_proxy.yml b/roles/osbs-on-openshift/tasks/yum_proxy.yml deleted file mode 100644 index be2940852b..0000000000 --- a/roles/osbs-on-openshift/tasks/yum_proxy.yml +++ /dev/null @@ -1,36 +0,0 @@ ---- -- name: copy yum proxy deployment config - template: src=openshift-yumproxy-dc.yml.j2 dest={{ osbs_openshift_home }}/yumproxy-dc.yml - register: yaml_dc - tags: - - oc - - yumproxy - -- name: import yum proxy deployment config - command: > - oc replace - --force=true - --namespace={{ osbs_namespace }} - --filename={{ osbs_openshift_home }}/yumproxy-dc.yml - when: yaml_dc.changed - tags: - - oc - - yumproxy - -- name: copy yum proxy service - template: src=openshift-yumproxy-svc.yml.j2 dest={{ osbs_openshift_home }}/yumproxy-svc.yml - register: yaml_svc - tags: - - oc - - yumproxy - -- name: import yum proxy service - command: > - oc replace - --force=true - --namespace={{ osbs_namespace }} - --filename={{ osbs_openshift_home }}/yumproxy-svc.yml - when: yaml_svc.changed - tags: - - oc - - yumproxy diff --git a/roles/osbs-on-openshift/templates/openshift-limitrange.yml.j2 b/roles/osbs-on-openshift/templates/openshift-limitrange.yml.j2 deleted file mode 100644 index 4beb22c5d1..0000000000 --- a/roles/osbs-on-openshift/templates/openshift-limitrange.yml.j2 +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v1 -kind: LimitRange -metadata: - name: cpureq -spec: - limits: - - type: Container - defaultRequest: - cpu: {{ osbs_master_cpu_limitrange }} diff --git a/roles/osbs-on-openshift/templates/openshift-resourcequota.yml.j2 b/roles/osbs-on-openshift/templates/openshift-resourcequota.yml.j2 deleted file mode 100644 index cc62a6017f..0000000000 --- a/roles/osbs-on-openshift/templates/openshift-resourcequota.yml.j2 +++ /dev/null @@ -1,7 +0,0 @@ -apiVersion: v1 -kind: ResourceQuota -metadata: - name: concurrentbuilds -spec: - hard: - pods: {{ osbs_master_max_pods }} diff --git a/roles/osbs-on-openshift/templates/openshift-rolebinding.yml.j2 b/roles/osbs-on-openshift/templates/openshift-rolebinding.yml.j2 deleted file mode 100644 index 12174cc36c..0000000000 --- a/roles/osbs-on-openshift/templates/openshift-rolebinding.yml.j2 +++ /dev/null @@ -1,24 +0,0 @@ -apiVersion: v1 -kind: RoleBinding -metadata: - name: {{ item.name }} -roleRef: - name: {{ item.role }} - -{% if item.users == [] %} -userNames: [] -{% else %} -userNames: -{% for u in item.users %} -- {{ u }} -{% endfor %} -{% endif %} - -{% if item.groups == [] %} -groupNames: [] -{% else %} -groupNames: -{% for g in item.groups %} -- {{ g }} -{% endfor %} -{% endif %} diff --git a/roles/osbs-on-openshift/templates/openshift-serviceaccount.yml.j2 b/roles/osbs-on-openshift/templates/openshift-serviceaccount.yml.j2 deleted file mode 100644 index 931e249f9d..0000000000 --- a/roles/osbs-on-openshift/templates/openshift-serviceaccount.yml.j2 +++ /dev/null @@ -1,4 +0,0 @@ -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ item }} diff --git a/roles/osbs-on-openshift/templates/openshift-yumproxy-dc.yml.j2 b/roles/osbs-on-openshift/templates/openshift-yumproxy-dc.yml.j2 deleted file mode 100644 index a8dd047b72..0000000000 --- a/roles/osbs-on-openshift/templates/openshift-yumproxy-dc.yml.j2 +++ /dev/null @@ -1,31 +0,0 @@ -apiVersion: v1 -kind: DeploymentConfig -metadata: - name: {{ osbs_yum_proxy_name }} - labels: - app: {{ osbs_yum_proxy_name }} -spec: - replicas: 1 - selector: - app: {{ osbs_yum_proxy_name }} - deploymentconfig: {{ osbs_yum_proxy_name }} - template: - metadata: - labels: - app: {{ osbs_yum_proxy_name }} - deploymentconfig: {{ osbs_yum_proxy_name }} - spec: - containers: - - name: {{ osbs_yum_proxy_name }} - image: {{ osbs_yum_proxy_image }} - ports: - - containerPort: 3128 - protocol: TCP - volumeMounts: - - mountPath: /squid - name: {{ osbs_yum_proxy_name }}-volume-1 - volumes: - - emptyDir: {} - name: {{ osbs_yum_proxy_name }}-volume-1 - triggers: - - type: ConfigChange diff --git a/roles/osbs-on-openshift/templates/openshift-yumproxy-svc.yml.j2 b/roles/osbs-on-openshift/templates/openshift-yumproxy-svc.yml.j2 deleted file mode 100644 index 930297631f..0000000000 --- a/roles/osbs-on-openshift/templates/openshift-yumproxy-svc.yml.j2 +++ /dev/null @@ -1,15 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ osbs_yum_proxy_name }} - labels: - app: {{ osbs_yum_proxy_name }} -spec: - ports: - - name: 3128-tcp - protocol: TCP - port: 3128 - targetPort: 3128 - selector: - app: {{ osbs_yum_proxy_name }} - deploymentconfig: {{ osbs_yum_proxy_name }} diff --git a/roles/osbs-on-openshift/templates/role-osbs-custom-build.yml.j2 b/roles/osbs-on-openshift/templates/role-osbs-custom-build.yml.j2 deleted file mode 100644 index 7beaba0ec7..0000000000 --- a/roles/osbs-on-openshift/templates/role-osbs-custom-build.yml.j2 +++ /dev/null @@ -1,10 +0,0 @@ -apiVersion: v1 -kind: Role -metadata: - name: osbs-custom-build - namespace: {{ osbs_namespace }} -rules: - - verbs: - - create - resources: - - builds/custom \ No newline at end of file diff --git a/roles/pagure/frontend/tasks/main.yml b/roles/pagure/frontend/tasks/main.yml index 0412771f0c..59b925b782 100644 --- a/roles/pagure/frontend/tasks/main.yml +++ b/roles/pagure/frontend/tasks/main.yml @@ -23,6 +23,15 @@ - pagure - packages +- name: install needed packages + package: name={{ item }} state=present + when: env == 'pagure-staging' + with_items: + - pagure-theme-pagureio + tags: + - pagure + - packages + - name: Initialize postgres if necessary command: /usr/bin/postgresql-setup initdb creates=/var/lib/pgsql/data diff --git a/roles/pagure/frontend/templates/pagure.cfg b/roles/pagure/frontend/templates/pagure.cfg index abcbcb3c56..3db2cf708d 100644 --- a/roles/pagure/frontend/templates/pagure.cfg +++ b/roles/pagure/frontend/templates/pagure.cfg @@ -306,3 +306,6 @@ GITOLITE_CELERY_QUEUE = 'gitolite_queue' FAST_CELERY_QUEUE = 'fast_workers' MEDIUM_CELERY_QUEUE = 'medium_workers' SLOW_CELERY_QUEUE = 'slow_workers' +PRIVATE_PROJECTS = False +FEDMSG_NOTIFICATIONS = True +THEME = 'pagureio' diff --git a/roles/push-container-registry/defaults/main.yml b/roles/push-container-registry/defaults/main.yml new file mode 100644 index 0000000000..6c059deb19 --- /dev/null +++ b/roles/push-container-registry/defaults/main.yml @@ -0,0 +1,5 @@ +--- +# defaults file for push-container-registry role +# + +certs_group: "releng-team" diff --git a/roles/push-container-registry/tasks/main.yml b/roles/push-container-registry/tasks/main.yml new file mode 100644 index 0000000000..3b5fa6cc58 --- /dev/null +++ b/roles/push-container-registry/tasks/main.yml @@ -0,0 +1,41 @@ +--- +# tasks file for push-container-registry +# This role install skopeo and the certificates +# needed to push container images to our production registry. +# Note : push to the candidate-registry is done using docker login +# see the push-docker role. + +- name: install necessary packages + package: + name: "{{item}}" + state: present + with_items: + - skopeo + tags: + - push-container-registry + +- name: ensure cert dir exists + file: + path: "{{cert_dest_dir}}" + state: directory + tags: + - push-container-registry + +- name: install client cert for registry + copy: + src: "{{cert_src}}" + dest: "{{cert_dest_dir}}/client.cert" + owner: root + group: "{{ certs_group }}" + mode: 0640 + tags: + - push-container-registry + +- name: install client key for registry + copy: + src: "{{key_src}}" + dest: "{{cert_dest_dir}}/client.key" + group: "{{ certs_group }}" + mode: 0640 + tags: + - push-container-registry diff --git a/roles/push-docker/tasks/main.yml b/roles/push-docker/tasks/main.yml index 5e7cbaabe9..dc9433d1b6 100644 --- a/roles/push-docker/tasks/main.yml +++ b/roles/push-docker/tasks/main.yml @@ -1,34 +1,12 @@ --- -# tasks file for push-docker -# +# tasks file for push-docker role +# This role is used to login to a registry using the +# docker client. + - name: install docker and python-docker package: name="{{ item }}" state=present with_items: - docker - - python-docker - -- name: ensure docker daemon cert dir exists - file: - path: "{{docker_cert_dir}}" - state: directory - -#- name: install docker client cert for registry -# copy: -# src: "{{private}}/files/koji/{{docker_cert_name}}.cert.pem" -# dest: "{{docker_cert_dir}}/client.cert" -# owner: root -# group: "releng-team" -# mode: 0640 -# when: docker_cert_name is defined -# -#- name: install docker client key for registry -# copy: -# src: "{{private}}/files/koji/{{docker_cert_name}}.key.pem" -# dest: "{{docker_cert_dir}}/client.key" -# owner: root -# group: "releng-team" -# mode: 0640 -# when: docker_cert_name is defined - name: start and enable docker service: name=docker state=started enabled=yes diff --git a/roles/rabbitmq_cluster/files/enabled_plugins b/roles/rabbitmq_cluster/files/enabled_plugins index 352dfc4de1..6a9f28b93b 100644 --- a/roles/rabbitmq_cluster/files/enabled_plugins +++ b/roles/rabbitmq_cluster/files/enabled_plugins @@ -1 +1 @@ -[rabbitmq_management]. +[rabbitmq_management,rabbitmq_auth_mechanism_ssl]. diff --git a/roles/rabbitmq_cluster/templates/rabbitmq.config b/roles/rabbitmq_cluster/templates/rabbitmq.config index a12d27edd1..3fa421d26e 100644 --- a/roles/rabbitmq_cluster/templates/rabbitmq.config +++ b/roles/rabbitmq_cluster/templates/rabbitmq.config @@ -3,6 +3,7 @@ [ %% We do not want plain TCP, only TLS {tcp_listeners, []}, + {ssl_cert_login_from, common_name}, %% Here goes TLS {ssl_listeners, [5671]}, {ssl_options, [{cacertfile, "/etc/rabbitmq/ca.crt"}, diff --git a/roles/releng/files/twoweek-updates b/roles/releng/files/twoweek-updates index 32a69fb6bc..3cfae4f11f 100644 --- a/roles/releng/files/twoweek-updates +++ b/roles/releng/files/twoweek-updates @@ -1,6 +1,8 @@ #Fedora 28 two-week updates nightly compose -MAILTO=releng-cron@lists.fedoraproject.org -15 8 * * * root TMPDIR=`mktemp -d /tmp/twoweekF28.XXXXXX` && pushd $TMPDIR && git clone -n https://pagure.io/pungi-fedora.git && cd pungi-fedora && git checkout f28 && LANG=en_US.UTF-8 ./twoweek-nightly.sh RC-$(date "+\%Y\%m\%d").0 && popd && rm -rf $TMPDIR +# XXX: this has been moved to the updates compose: +# https://kojipkgs.fedoraproject.org/compose/updates/f28-updates/ +#MAILTO=releng-cron@lists.fedoraproject.org +#15 8 * * * root TMPDIR=`mktemp -d /tmp/twoweekF28.XXXXXX` && pushd $TMPDIR && git clone -n https://pagure.io/pungi-fedora.git && cd pungi-fedora && git checkout f28 && LANG=en_US.UTF-8 ./twoweek-nightly.sh RC-$(date "+\%Y\%m\%d").0 && popd && rm -rf $TMPDIR #Fedora 27 two-week updates nightly compose #MAILTO=releng-cron@lists.fedoraproject.org diff --git a/roles/robosignatory/files/robosignatory.production.py b/roles/robosignatory/files/robosignatory.production.py index d4c8277bad..99e7dd7f72 100644 --- a/roles/robosignatory/files/robosignatory.production.py +++ b/roles/robosignatory/files/robosignatory.production.py @@ -98,6 +98,18 @@ config = { "key": "fedora-infra", "keyid": "47dd8ef9" }, + { + "from": "f29-infra-candidate", + "to": "f29-infra-stg", + "key": "fedora-infra", + "keyid": "47dd8ef9" + }, + { + "from": "f30-infra-candidate", + "to": "f30-infra-stg", + "key": "fedora-infra", + "keyid": "47dd8ef9" + }, # Gated rawhide and branched { @@ -210,6 +222,18 @@ config = { }, 'robosignatory.ostree_refs': { + 'fedora/30/x86_64/iot': { + 'directory': '/mnt/fedora_koji/koji/compose/iot/repo/', + 'key': 'fedora-30' + }, + 'fedora/30/aarch64/iot': { + 'directory': '/mnt/fedora_koji/koji/compose/iot/repo/', + 'key': 'fedora-30' + }, + 'fedora/30/armhfp/iot': { + 'directory': '/mnt/fedora_koji/koji/compose/iot/repo/', + 'key': 'fedora-30' + }, 'fedora/29/x86_64/iot': { 'directory': '/mnt/fedora_koji/koji/compose/iot/repo/', 'key': 'fedora-29' @@ -322,10 +346,6 @@ config = { 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', 'key': 'fedora-28' }, - 'fedora/rawhide/x86_64/atomic-host': { - 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', - 'key': 'fedora-29' - }, 'fedora/28/x86_64/workstation': { 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', 'key': 'fedora-28' @@ -338,17 +358,69 @@ config = { 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', 'key': 'fedora-28' }, - 'fedora/rawhide/ppc64le/atomic-host': { + 'fedora/29/x86_64/atomic-host': { 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', 'key': 'fedora-29' }, + 'fedora/29/ppc64le/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/aarch64/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/x86_64/updates/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/ppc64le/updates/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/aarch64/updates/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/x86_64/testing/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/ppc64le/testing/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/aarch64/testing/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/x86_64/silverblue': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/x86_64/updates/silverblue': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/29/x86_64/testing/silverblue': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-29' + }, + 'fedora/rawhide/x86_64/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-30' + }, + 'fedora/rawhide/ppc64le/atomic-host': { + 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', + 'key': 'fedora-30' + }, 'fedora/rawhide/aarch64/atomic-host': { 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', - 'key': 'fedora-29' + 'key': 'fedora-30' }, - 'fedora/rawhide/x86_64/workstation': { + 'fedora/rawhide/x86_64/silverblue': { 'directory': '/mnt/fedora_koji/koji/compose/atomic/repo/', - 'key': 'fedora-29' + 'key': 'fedora-30' }, } } diff --git a/roles/robosignatory/files/sigul.production.conf b/roles/robosignatory/files/sigul.production.conf index 6524668d2b..6b4865e079 100644 --- a/roles/robosignatory/files/sigul.production.conf +++ b/roles/robosignatory/files/sigul.production.conf @@ -1,7 +1,7 @@ [client] -bridge-hostname: sign-bridge1 -server-hostname: sign-vault1 -client-cert-nickname: sigul-client-cert +bridge-hostname: sign-bridge.phx2.fedoraproject.org +server-hostname: sign-vault.phx2.fedoraproject.org +client-cert-nickname: autopen user-name: autopen [koji] diff --git a/roles/serial-console/tasks/main.yml b/roles/serial-console/tasks/main.yml index a333188fb8..a203439399 100644 --- a/roles/serial-console/tasks/main.yml +++ b/roles/serial-console/tasks/main.yml @@ -2,7 +2,7 @@ # This role sets up serial console on ttyS0 # - name: check for grub serial setup - command: cat /etc/grub2.cfg + command: cat /etc/grub2-efi.cfg register: serial check_mode: no changed_when: '1 != 1' diff --git a/roles/sigul/bridge/files/koji-arm.conf b/roles/sigul/bridge/files/koji-arm.conf deleted file mode 100644 index 2341f04c82..0000000000 --- a/roles/sigul/bridge/files/koji-arm.conf +++ /dev/null @@ -1,22 +0,0 @@ -[koji] -realm = FEDORAPROJECT.ORG - -;configuration for koji cli tool - -;url of XMLRPC server -server = https://arm.koji.fedoraproject.org/kojihub - -;url of web interface -weburl = https://arm.koji.fedoraproject.org/koji - -;url of package download site -topurl = https://armpkgs.fedoraproject.org/ - -;path to the koji top directory -;topdir = /mnt/koji -serverca = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem - -authtype = kerberos -principal = sigul/secondary-bridge01.phx2.fedoraproject.org@FEDORAPROJECT.ORG -keytab = /etc/krb5.sigul_secondary-bridge01.phx2.fedoraproject.org.keytab -krb_rdns = false diff --git a/roles/sigul/bridge/files/koji-ppc.conf b/roles/sigul/bridge/files/koji-ppc.conf deleted file mode 100644 index 1c5ac6d458..0000000000 --- a/roles/sigul/bridge/files/koji-ppc.conf +++ /dev/null @@ -1,22 +0,0 @@ -[koji] -realm = FEDORAPROJECT.ORG - -;configuration for koji cli tool - -;url of XMLRPC server -server = https://ppc.koji.fedoraproject.org/kojihub - -;url of web interface -weburl = https://ppc.koji.fedoraproject.org/koji - -;url of package download site -topurl = https://ppc.koji.fedoraproject.org/ - -;path to the koji top directory -;topdir = /mnt/koji -serverca = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem - -authtype = kerberos -principal = sigul/secondary-bridge01.phx2.fedoraproject.org@FEDORAPROJECT.ORG -keytab = /etc/krb5.sigul_secondary-bridge01.phx2.fedoraproject.org.keytab -krb_rdns = false diff --git a/roles/sigul/bridge/files/koji-s390.conf b/roles/sigul/bridge/files/koji-s390.conf deleted file mode 100644 index d96564d864..0000000000 --- a/roles/sigul/bridge/files/koji-s390.conf +++ /dev/null @@ -1,22 +0,0 @@ -[koji] -realm = FEDORAPROJECT.ORG - -;configuration for koji cli tool - -;url of XMLRPC server -server = https://s390.koji.fedoraproject.org/kojihub - -;url of web interface -weburl = https://s390.koji.fedoraproject.org/koji - -;url of package download site -topurl = https://s390pkgs.fedoraproject.org/ - -;path to the koji top directory -;topdir = /mnt/koji -serverca = /etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem - -authtype = kerberos -principal = sigul/secondary-bridge01.phx2.fedoraproject.org@FEDORAPROJECT.ORG -keytab = /etc/krb5.sigul_secondary-bridge01.phx2.fedoraproject.org.keytab -krb_rdns = false diff --git a/roles/sigul/bridge/tasks/main.yml b/roles/sigul/bridge/tasks/main.yml index 57fb7b2e64..632d0c889a 100644 --- a/roles/sigul/bridge/tasks/main.yml +++ b/roles/sigul/bridge/tasks/main.yml @@ -2,25 +2,27 @@ package: state=present name=sigul-bridge tags: - packages + - sigul + - sigul/bridge - name: Setup sigul bridge.conf template: src=bridge.conf.j2 dest=/etc/sigul/bridge.conf owner=sigul group=sigul mode=0640 tags: - config + - sigul + - sigul/bridge - name: Setup primary koji config file template: src=koji-primary.conf.j2 dest=/etc/koji-primary.conf owner=root group=root mode=644 when: inventory_hostname.startswith('sign') and env == "production" + tags: + - sigul + - sigul/bridge - name: Setup primary stg koji config file copy: src=koji-primary.stg.conf dest=/etc/koji-primary.conf owner=root group=root mode=644 when: inventory_hostname.startswith('sign') and env == "staging" - -- name: Setup secondary koji config files - copy: src={{ item }} dest=/etc/{{ item }} owner=root group=root mode=644 - with_items: - - koji-arm.conf - - koji-ppc.conf - - koji-s390.conf - when: inventory_hostname.startswith('secondary') + tags: + - sigul + - sigul/bridge diff --git a/roles/sigul/bridge/templates/bridge.conf.j2 b/roles/sigul/bridge/templates/bridge.conf.j2 index 4912be5320..ce561a71ec 100644 --- a/roles/sigul/bridge/templates/bridge.conf.j2 +++ b/roles/sigul/bridge/templates/bridge.conf.j2 @@ -2,13 +2,7 @@ # [bridge] # Nickname of the bridge's certificate in the NSS database specified below -{% if env == "staging" %} -bridge-cert-nickname: sigul-bridge-cert -{% elif inventory_hostname.startswith('sign') %} -bridge-cert-nickname: sign-bridge1 - Fedora Project -{% else %} -bridge-cert-nickname: secondary-signer - Fedora Project -{% endif %} +bridge-cert-nickname: sign-bridge.phx2.fedoraproject.org # Maximum accepted total size of all RPM payloads stored on disk for one request max-rpms-payload-size: 70737418240 @@ -27,16 +21,9 @@ fas-password: {{ fedoraDummyUserPassword }} {% endif %} [koji] -{% if inventory_hostname.startswith('sign') %} koji-instances: primary koji-config-primary: /etc/koji-primary.conf koji-config: /etc/koji-primary.conf -{% else %} -koji-instances: ppc s390 arm -koji-config-ppc: /etc/koji-ppc.conf -koji-config-s390: /etc/koji-s390.conf -koji-config-arm: /etc/koji-arm.conf -{% endif %} [daemon] # The user to run as diff --git a/roles/sigul/server/templates/server.conf.j2 b/roles/sigul/server/templates/server.conf.j2 index f642ebc402..fc2ce8ff6e 100644 --- a/roles/sigul/server/templates/server.conf.j2 +++ b/roles/sigul/server/templates/server.conf.j2 @@ -3,13 +3,8 @@ [server] # Host name of the publically acessible bridge to clients -{% if inventory_hostname.startswith('sign') %} -bridge-hostname: sign-bridge1 -server-cert-nickname: sign-vault1 - Fedora Project -{% else %} -bridge-hostname: secondary-signer -server-cert-nickname: secondary-signer-server -{% endif %} +bridge-hostname: sign-bridge.phx2.fedoraproject.org +server-cert-nickname: sigul-vault-cert # Port on which the bridge expects server connections bridge-port: 44333 diff --git a/roles/tang/tasks/main.yml b/roles/tang/tasks/main.yml new file mode 100644 index 0000000000..d1d40dfab8 --- /dev/null +++ b/roles/tang/tasks/main.yml @@ -0,0 +1,10 @@ +- name: install tang + package: name=tang state=present + tags: + - tang + - packages + +- name: Enable and start tang + systemd: name=tangd.socket enabled=yes state=started + tags: + - tang diff --git a/roles/taskotron/imagefactory-client/templates/config.ini.j2 b/roles/taskotron/imagefactory-client/templates/config.ini.j2 index 4c72a120e4..226e7293a6 100644 --- a/roles/taskotron/imagefactory-client/templates/config.ini.j2 +++ b/roles/taskotron/imagefactory-client/templates/config.ini.j2 @@ -1,4 +1,4 @@ [default] imgfac_base_url={{imagefactory_baseurl}} -rawhide=29 +rawhide=30 diff --git a/roles/taskotron/imagefactory/tasks/main.yml b/roles/taskotron/imagefactory/tasks/main.yml index 67deaa2e27..f987c17bc1 100644 --- a/roles/taskotron/imagefactory/tasks/main.yml +++ b/roles/taskotron/imagefactory/tasks/main.yml @@ -85,6 +85,7 @@ with_items: - 27 - 28 + - 29 - rawhide - name: Create cronjob to report failed builds diff --git a/roles/taskotron/imagefactory/templates/config_server.ini.j2 b/roles/taskotron/imagefactory/templates/config_server.ini.j2 index 0d4e1c2914..b4d04f33a9 100644 --- a/roles/taskotron/imagefactory/templates/config_server.ini.j2 +++ b/roles/taskotron/imagefactory/templates/config_server.ini.j2 @@ -1,6 +1,6 @@ [default] imgfac_base_url=http://127.0.0.1:8075/imagefactory -rawhide=29 +rawhide=30 mail_from={{deployment_type}}.imagefactory@qa.fedoraproject.org mail_to=jskladan@redhat.com tflink@redhat.com diff --git a/roles/taskotron/taskotron-client/templates/taskotron.yaml.j2 b/roles/taskotron/taskotron-client/templates/taskotron.yaml.j2 index 1e0645f9ea..784bb190d0 100644 --- a/roles/taskotron/taskotron-client/templates/taskotron.yaml.j2 +++ b/roles/taskotron/taskotron-client/templates/taskotron.yaml.j2 @@ -92,14 +92,6 @@ resultsdb_frontend: {{ resultsdb_external_url }} ## Please make sure the URL doesn't have a trailing slash. execdb_server: {{ execdb_external_url }} -## URL of taskotron buildmaster, to construct log URLs from. -## Please make sure the URL doesn't have a trailing slash. -taskotron_master: {{ taskotron_master }} - -## URL of artifacts base directory, to construct artifacts URLs from. -## Please make sure the URL doesn't have a trailing slash. -artifacts_baseurl: {{ artifacts_base_url }} - {% if deployment_type in ['dev'] %} ## URL of VAULT server API interface, which stores secrets. ## Please make sure the URL doesn't have a trailing slash. @@ -114,6 +106,16 @@ vault_username: {{ vault_api_username }} ## Password for vault server vault_password: {{ vault_api_password }} {% endif %} + +## URL of taskotron buildmaster, to construct log URLs from. +## Please make sure the URL doesn't have a trailing slash. +taskotron_master: {{ taskotron_master }} + +## URL of artifacts base directory, to construct artifacts URLs from. The final +## URL will be $artifacts_baseurl/. +## Please make sure the URL doesn't have a trailing slash. +artifacts_baseurl: {{ artifacts_base_url }} + ## Whether to cache downloaded files to speed up subsequent downloads. If True, ## files will be downloaded to a common directory specified by "cachedir". At ## the moment, Taskotron only supports Koji RPM downloads to be cached. @@ -154,7 +156,7 @@ imagesdir: {{ imagesdir }} force_imageurl: False ## Url of an image to download and use for disposable client, if force_imageurl was set -#imageurl: +#imageurl: http://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2 ## Default distro/release/flavor/arch for the disposable images discovery #default_disposable_distro: fedora @@ -179,6 +181,10 @@ minion_repos: - copr:kparal/taskotron-dev {% endif %} +## If one or more minions repos fail to be added (e.g. not accessible), should +## we abort the execution or ignore the error and continue? +## [default: False] +minion_repos_ignore_errors: True ## ==== LOGGING section ==== ## This section contains configuration of logging. diff --git a/roles/tftp_server/tasks/main.yml b/roles/tftp_server/tasks/main.yml index df6d157e48..6b1fc27cf9 100644 --- a/roles/tftp_server/tasks/main.yml +++ b/roles/tftp_server/tasks/main.yml @@ -40,3 +40,10 @@ synchronize: src="{{ bigfiles }}/tftpboot/" dest=/var/lib/tftpboot/ tags: - tftp_server + +- name: generate custom configs + template: src=grubhost.cfg.j2 dest="/var/lib/tftpboot/uefi/{{ hostvars[item].install_mac }}" + with_items: "{{ groups['all'] }}" + when: "hostvars[item].install_noc == inventory_hostname" + tags: + - tftp_server diff --git a/roles/tftp_server/templates/grubhost.cfg.j2 b/roles/tftp_server/templates/grubhost.cfg.j2 new file mode 100644 index 0000000000..b70cbf7b40 --- /dev/null +++ b/roles/tftp_server/templates/grubhost.cfg.j2 @@ -0,0 +1,28 @@ +set default="0" + +function load_video { + if [ x$feature_all_video_module = xy ]; then + insmod all_video + else + insmod efi_gop + insmod efi_uga + insmod ieee1275_fb + insmod vbe + insmod vga + insmod video_bochs + insmod video_cirrus + fi +} + +load_video +set gfxpayload=keep +insmod gzio +insmod part_gpt +insmod ext2 + +set timeout=5 + +menuentry 'Install {{ item }}' --class red --class gnu-linux --class gnu --class os { + linux {{ hostvars[item].install_binpath }}/vmlinuz ip=dhcp biosdevname=0 ksdevice=eth0 net.ifnames=0 ks={{ hostvars[item].install_ks }} inst.repo={{ hostvars[item].install_repo }} nomodeset + initrd {{ hostvars[item].install_binpath }}/initrd.img +}