Inventory group/host variables: Sort yaml

This was done using yq (
https://mikefarah.gitbook.io/yq/operators/sort-keys )

Doing things this way makes it much easier to see if a variable is set
in a file or if two hosts differ in what variables they set. Hopefully
we can keep things sorted moving forward.

Basically this means just sort a-z anything you add to any host or group
vaiable and it will be in the right place.

Additionally, this enforces 'normal' intent rules for all the variable
files which we should also try and obey. 2 spaces for first level, 3 for
next, etc. When in doubt you can run yq on it.

This should cause NO actual vairable changes, it's all just readability
fixing for humans, ansible parses it exactly the same.

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
This commit is contained in:
Kevin Fenzi 2021-11-16 13:27:57 -08:00
parent f6f29ded92
commit 580cd252c5
769 changed files with 12115 additions and 15787 deletions

View file

@ -1,6 +1,6 @@
---
ansible_ifcfg_blocklist: true
freezes: false
host_group: cloud
sudoers: "{{ private }}/files/sudo/arm-packager-sudoers"
sudoers_main: nopasswd
host_group: cloud
ansible_ifcfg_blocklist: true

View file

@ -5,321 +5,56 @@
# Background/reference about external repos pulled in:
# https://pagure.io/fedora-infrastructure/issue/5476
#
# IPA settings
additional_host_keytabs: []
ansible_base: /srv/web/infra
# Path to the openshift-ansible checkout as external git repo brought into
# Fedora Infra
openshift_ansible: /srv/web/infra/openshift-ansible/
#
# END: Ansible roles_path variables
#######
freezes: true
# most of our systems are in IAD2
datacenter: iad2
preferred_dc: iad2
postfix_group: "none"
# usually we do not want to enable nested virt, only on some virthosts
nested: false
# most of our systems are 64bit.
# Used to install various nagios scripts and the like.
libdir: /usr/lib64
# Most EL systems need default EPEL repos.
# Some systems (notably fed-cloud*) need to get their own
# EPEL files because EPEL overrides packages in their core repos.
use_default_epel: true
# example of ports for default iptables
# tcp_ports: [ 22, 80, 443 ]
# udp_ports: [ 110, 1024, 2049 ]
# multiple lines can be handled as below
# custom_rules: [ '-A INPUT -p tcp -m tcp --dport 8888 -j ACCEPT',
# '-A INPUT -p tcp -m tcp --dport 8889 -j ACCEPT' ]
# We default these to empty
udp_ports: []
tcp_ports: []
custom_rules: []
nat_rules: []
custom6_rules: []
# defaults for hw installs
install_noc: none
# defaults for virt installs
ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-7
ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL7-x86_64/
mem_size: 4096
num_cpus: 2
lvm_size: 20000
# on MOST infra systems, the interface connected to the infra network
# is eth0. but not on quite ALL systems. e.g. on s390 boxes it's enc900,
# on openqa-ppc64le-01.qa it's eth2 for some reason, and on qa01.qa and
# qa02.qa it's em3. currently this only affects whether GATEWAY, DOMAIN
# and DNS1/DNS2 lines are put into ifcfg-(device).
ansible_ifcfg_infra_net_devices: [ 'eth0', 'enc900' ]
# Default netmask. All of our iad2 nets are /24's. Almost all of our
# non-iad2 sites are less than a /24.
eth0_nm: 255.255.255.0
eth1_nm: 255.255.255.0
eth1_ip: 10.0.0.10
br0_nm: 255.255.255.0
br1_nm: 255.255.255.0
nm: 255.255.255.0
# Default to managing the network, we want to not do this on select
# hosts (like cloud nodes)
ansible_ifcfg_blocklist: false
# List of interfaces to explicitly disable
ansible_ifcfg_disabled: []
# on MOST infra systems, the interface connected to the infra network
# is eth0. but not on quite ALL systems. e.g. on s390 boxes it's enc900,
# on openqa-ppc64le-01.qa it's eth2 for some reason, and on qa01.qa and
# qa02.qa it's em3. currently this only affects whether GATEWAY, DOMAIN
# and DNS1/DNS2 lines are put into ifcfg-(device).
ansible_ifcfg_infra_net_devices: ['eth0', 'enc900']
#
# The default virt-install works for rhel7 or fedora with 1 nic
# Autodetect python version
#
virt_install_command: "{{ virt_install_command_one_nic }}"
main_bridge: br0
nfs_bridge: br1
mac_address: RANDOM
mac_address1: RANDOM
virt_install_command_pxe_rhcos: virt-install -n {{ inventory_hostname }}
--vcpus {{ num_cpus }},maxvcpus={{ num_cpus }}
--cpu host
--memory {{ mem_size }}
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--nographics
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--hvm --accelerate
--autostart --wait=-1
--extra-args "ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:ens2:none hostname={{ inventory_hostname }} nameserver={{ dns }} console=ttyS0 nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url={{ rhcos_install_rootfs_url }} coreos.inst.ignition_url={{ rhcos_ignition_file_url }}"
--os-variant rhel7
--location {{ rhcos_install_url }}
virt_install_command_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ipv4 }}::{{ eth0_ipv4_gw }}:{{ eth0_ipv4_nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
virt_install_command_two_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns1 }}
ip={{ eth0_ipv4 }}::{{ eth0_ipv4_gw }}:{{ eth0_ipv4_nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
virt_install_command_two_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_ppc64le_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_ppc64le_two_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_aarch64_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole
virt_install_command_aarch64_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole
virt_install_command_aarch64_2nd_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole
virt_install_command_aarch64_two_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
--autostart --noautoconsole --rng /dev/random
virt_install_command_armv7_one_nic: virt-install -n {{ inventory_hostname }} --arch armv7l
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }}
--autostart --noautoconsole --rng /dev/random
virt_install_command_armv7_one_nic_unsafe: virt-install -n {{ inventory_hostname }} --arch armv7l
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }}
--autostart --noautoconsole --rng /dev/random --qemu-commandline="-machine highmem=off"
virt_install_command_s390x_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --rng /dev/random --cpu host
virt_install_command_s390x_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ipv4 }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --rng /dev/random --cpu host
virt_install_command_rhel6: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }}
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
"inst.ksdevice=eth0 inst.ks={{ ks_url }} ip={{ eth0_ip }} netmask={{ nm }}
gateway={{ gw }} dns={{ dns }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }}"
--network=bridge=br0 --autostart --noautoconsole --watchdog default
max_mem_size: "{{ mem_size * 5 }}"
max_cpu: "{{ num_cpus * 5 }}"
# This is the wildcard certname for our proxies. It has a different name for
# the staging group and is used in the proxies.yml playbook.
wildcard_cert_name: wildcard-2020.fedoraproject.org
wildcard_crt_file: wildcard-2020.fedoraproject.org.cert
wildcard_key_file: wildcard-2020.fedoraproject.org.key
wildcard_int_file: wildcard-2020.fedoraproject.org.intermediate.cert
# This is the openshift wildcard cert. Until it exists set it equal to wildcard
os_wildcard_cert_name: wildcard-2021.app.os.fedoraproject.org
os_wildcard_crt_file: wildcard-2021.app.os.fedoraproject.org.cert
os_wildcard_key_file: wildcard-2021.app.os.fedoraproject.org.key
os_wildcard_int_file: wildcard-2021.app.os.fedoraproject.org.intermediate.cert
# This is the openshift wildcard cert for ocp
ocp_wildcard_cert_name: wildcard-2021.apps.ocp.fedoraproject.org
ocp_wildcard_cert_file: wildcard-2021.apps.ocp.fedoraproject.org.cert
ocp_wildcard_key_file: wildcard-2021.apps.ocp.fedoraproject.org.key
ocp_wildcard_int_file: wildcard-2021.apps.ocp.fedoraproject.org.intermediate.cert
# This is the mirrors.centos.org certs
mirrors_centos_org_cert_name: mirrors.centos.org
mirrors_centos_org_cert_file: mirrors.centos.org.cert
mirrors_centos_org_key_file: mirrors.centos.org.key
# Everywhere, always, we should sign messages and validate signatures.
# However, we allow individual hosts and groups to override this. Use this very
# carefully.. and never in production (good for testing stuff in staging).
fedmsg_sign_messages: True
fedmsg_validate_signatures: True
# By default, nodes get no fedmsg certs. They need to declare them explicitly.
fedmsg_certs: []
# By default, fedmsg should not log debug info. Groups can override this.
fedmsg_loglevel: INFO
# By default, fedmsg sends error logs to sysadmin-datanommer-members@fp.o.
fedmsg_error_recipients:
- sysadmin-datanommer-members@fedoraproject.org
# By default, fedmsg hosts are in passive mode. External hosts are typically
# active.
fedmsg_active: False
# Other defaults for fedmsg environments
fedmsg_prefix: org.fedoraproject
fedmsg_env: prod
# Amount of time to wait for connections after a socket is first established.
fedmsg_post_init_sleep: 1.0
# A special flag that, when set to true, will disconnect the host from the
# global fedmsg-relay instance and set it up with its own local one. You can
# temporarily set this to true for a specific host to do some debugging -- so
# you can *replay real messages from the datagrepper history without having
# those broadcast to the rest of the bus*.
fedmsg_debug_loopback: False
ansible_python_interpreter: auto
# Set variable if we want to use our global iptables defaults
# Some things need to set their own.
baseiptables: True
# by default set become to false here We can override it as needed.
# Note that if become is true, you need to unset requiretty for
# ssh controlpersist to work.
become: false
br0_nm: 255.255.255.0
br1_nm: 255.255.255.0
# assume collectd apache
collectd_apache: true
# true or false if we are or are not a copr build virthost.
# Default to false
copr_build_virthost: false
# assume createrepo is true and this builder has the koji nfs mount to do that
createrepo: True
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Unspecified
csi_relationship: |
Unspecified.
* What hosts/services does this rely on?
* What hosts/services rely on this?
To update this text, add the csi_* vars to group_vars/ in ansible.
# This vars get shoved into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Unspecified
custom6_rules: []
custom_rules: []
# most of our systems are in IAD2
datacenter: iad2
# These are used to:
# 1) configure mod_wsgi
# 2) open iptables rules for fedmsg (per wsgi thread)
@ -330,130 +65,187 @@ fedmsg_debug_loopback: False
# By default, nodes don't backup any dbs on them unless they declare it.
dbs_to_backup: []
# by default the number of procs we allow before we whine
nrpe_procs_warn: 250
nrpe_procs_crit: 300
# by default, the number of emails in queue before we whine
nrpe_check_postfix_queue_warn: 2
nrpe_check_postfix_queue_crit: 5
dns1: "10.3.163.33"
dns2: "10.3.163.34"
# env is staging or production, we default it to production here.
env: production
env_prefix: ""
env_suffix: ""
env_short: prod
# nfs mount options, override at the group/host level
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
# by default set become to false here We can override it as needed.
# Note that if become is true, you need to unset requiretty for
# ssh controlpersist to work.
become: false
# default the root_auth_users to nothing.
# This should be set for cloud instances in their host or group vars.
root_auth_users: ''
# This vars get shoved into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Unspecified
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Unspecified
csi_relationship: |
Unspecified.
* What hosts/services does this rely on?
* What hosts/services rely on this?
To update this text, add the csi_* vars to group_vars/ in ansible.
env_suffix: ""
# Default netmask. All of our iad2 nets are /24's. Almost all of our
# non-iad2 sites are less than a /24.
eth0_nm: 255.255.255.0
eth1_ip: 10.0.0.10
eth1_nm: 255.255.255.0
# By default, fedmsg hosts are in passive mode. External hosts are typically
# active.
fedmsg_active: False
# By default, nodes get no fedmsg certs. They need to declare them explicitly.
fedmsg_certs: []
# A special flag that, when set to true, will disconnect the host from the
# global fedmsg-relay instance and set it up with its own local one. You can
# temporarily set this to true for a specific host to do some debugging -- so
# you can *replay real messages from the datagrepper history without having
# those broadcast to the rest of the bus*.
fedmsg_debug_loopback: False
fedmsg_env: prod
# By default, fedmsg sends error logs to sysadmin-datanommer-members@fp.o.
fedmsg_error_recipients:
- sysadmin-datanommer-members@fedoraproject.org
# By default, fedmsg should not log debug info. Groups can override this.
fedmsg_loglevel: INFO
# Amount of time to wait for connections after a socket is first established.
fedmsg_post_init_sleep: 1.0
# Other defaults for fedmsg environments
fedmsg_prefix: org.fedoraproject
# Everywhere, always, we should sign messages and validate signatures.
# However, we allow individual hosts and groups to override this. Use this very
# carefully.. and never in production (good for testing stuff in staging).
fedmsg_sign_messages: True
fedmsg_validate_signatures: True
#
# say if we want the apache role dependency for mod_wsgi or not
# In some cases we want mod_wsgi and no apache (for python3 httpaio stuff)
#
wsgi_wants_apache: true
# IPA settings
additional_host_keytabs: []
# END: Ansible roles_path variables
#######
freezes: true
# defaults for hw installs
install_noc: none
ipa_admin_password: "{{ ipa_prod_admin_password }}"
ipa_realm: FEDORAPROJECT.ORG
ipa_server: ipa01.iad2.fedoraproject.org
ipa_server_nodes:
- ipa01.iad2.fedoraproject.org
- ipa02.iad2.fedoraproject.org
#- ipa03.iad2.fedoraproject.org
ipa_realm: FEDORAPROJECT.ORG
ipa_admin_password: "{{ ipa_prod_admin_password }}"
primary_auth_source: ipa
# Normal default sshd port is 22
sshd_port: 22
# This enables/disables the SSH "keyhelper" used by Pagure for verifying users'
# SSH keys from the Pagure database
sshd_keyhelper: false
# List of names under which the host is available
ssh_hostnames: []
# assume collectd apache
collectd_apache: true
# assume vpn is false
vpn: False
# assume createrepo is true and this builder has the koji nfs mount to do that
createrepo: True
ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL7-x86_64/
# defaults for virt installs
ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-7
# most of our systems are 64bit.
# Used to install various nagios scripts and the like.
libdir: /usr/lib64
lvm_size: 20000
mac_address: RANDOM
mac_address1: RANDOM
main_bridge: br0
max_cpu: "{{ num_cpus * 5 }}"
max_mem_size: "{{ mem_size * 5 }}"
mem_size: 4096
mirrors_centos_org_cert_file: mirrors.centos.org.cert
# This is the mirrors.centos.org certs
mirrors_centos_org_cert_name: mirrors.centos.org
mirrors_centos_org_key_file: mirrors.centos.org.key
nagios_Can_Connect: true
# Nagios global variables
nagios_Check_Services:
mail: true
nrpe: true
sshd: true
named: false
dhcpd: false
httpd: false
swap: true
mail: true
named: false
nrpe: true
ping: true
raid: false
nagios_Can_Connect: true
# Set variable if we want to use our global iptables defaults
# Some things need to set their own.
baseiptables: True
sshd: true
swap: true
nat_rules: []
# usually we do not want to enable nested virt, only on some virthosts
nested: false
nfs_bridge: br1
# nfs mount options, override at the group/host level
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
nm: 255.255.255.0
# Most of our machines have manual resolv.conf files
# These settings are for machines where NM is supposed to control resolv.conf.
nm_controlled_resolv: False
dns1: "10.3.163.33"
dns2: "10.3.163.34"
nrpe_check_postfix_queue_crit: 5
# by default, the number of emails in queue before we whine
nrpe_check_postfix_queue_warn: 2
nrpe_procs_crit: 300
# by default the number of procs we allow before we whine
nrpe_procs_warn: 250
num_cpus: 2
# ocp4 is only set true in some proxy roles
ocp4: false
ocp_wildcard_cert_file: wildcard-2021.apps.ocp.fedoraproject.org.cert
# This is the openshift wildcard cert for ocp
ocp_wildcard_cert_name: wildcard-2021.apps.ocp.fedoraproject.org
ocp_wildcard_int_file: wildcard-2021.apps.ocp.fedoraproject.org.intermediate.cert
ocp_wildcard_key_file: wildcard-2021.apps.ocp.fedoraproject.org.key
# Path to the openshift-ansible checkout as external git repo brought into
# Fedora Infra
openshift_ansible: /srv/web/infra/openshift-ansible/
# This is the openshift wildcard cert. Until it exists set it equal to wildcard
os_wildcard_cert_name: wildcard-2021.app.os.fedoraproject.org
os_wildcard_crt_file: wildcard-2021.app.os.fedoraproject.org.cert
os_wildcard_int_file: wildcard-2021.app.os.fedoraproject.org.intermediate.cert
os_wildcard_key_file: wildcard-2021.app.os.fedoraproject.org.key
postfix_group: "none"
# This is a list of services that need to wait for VPN to be up before getting started.
postvpnservices: []
# true or false if we are or are not a copr build virthost.
# Default to false
copr_build_virthost: false
preferred_dc: iad2
primary_auth_source: ipa
#
# Set a redirectmatch variable we can use to disable some redirectmatches
# like the prerelease to final ones.
#
redirectmatch_enabled: True
# default the root_auth_users to nothing.
# This should be set for cloud instances in their host or group vars.
root_auth_users: ''
# List of names under which the host is available
ssh_hostnames: []
# This enables/disables the SSH "keyhelper" used by Pagure for verifying users'
# SSH keys from the Pagure database
sshd_keyhelper: false
# Normal default sshd port is 22
sshd_port: 22
#
# sshd can run a internal sftp server, we need this on some hosts, but
# not on most of them, so default to false
sshd_sftp: false
tcp_ports: []
# example of ports for default iptables
# tcp_ports: [ 22, 80, 443 ]
# udp_ports: [ 110, 1024, 2049 ]
# multiple lines can be handled as below
# custom_rules: [ '-A INPUT -p tcp -m tcp --dport 8888 -j ACCEPT',
# '-A INPUT -p tcp -m tcp --dport 8889 -j ACCEPT' ]
# We default these to empty
udp_ports: []
# Most EL systems need default EPEL repos.
# Some systems (notably fed-cloud*) need to get their own
# EPEL files because EPEL overrides packages in their core repos.
use_default_epel: true
#
# Autodetect python version
# The default virt-install works for rhel7 or fedora with 1 nic
#
ansible_python_interpreter: auto
virt_install_command: "{{ virt_install_command_one_nic }}"
virt_install_command_aarch64_2nd_nic: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole
virt_install_command_aarch64_one_nic: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole
virt_install_command_aarch64_one_nic_unsafe: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole
virt_install_command_aarch64_two_nic: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }} --autostart --noautoconsole --rng /dev/random
virt_install_command_armv7_one_nic: virt-install -n {{ inventory_hostname }} --arch armv7l --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0 hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }} --autostart --noautoconsole --rng /dev/random
virt_install_command_armv7_one_nic_unsafe: virt-install -n {{ inventory_hostname }} --arch armv7l --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0 hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }} --autostart --noautoconsole --rng /dev/random --qemu-commandline="-machine highmem=off"
virt_install_command_one_nic: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0 hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ipv4 }}::{{ eth0_ipv4_gw }}:{{ eth0_ipv4_nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
virt_install_command_one_nic_unsafe: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0 hostname={{ inventory_hostname }} nameserver={{ dns1 }} ip={{ eth0_ipv4 }}::{{ eth0_ipv4_gw }}:{{ eth0_ipv4_nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
virt_install_command_ppc64le_one_nic_unsafe: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0 hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_ppc64le_two_nic_unsafe: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0 hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }} --autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_pxe_rhcos: virt-install -n {{ inventory_hostname }} --vcpus {{ num_cpus }},maxvcpus={{ num_cpus }} --cpu host --memory {{ mem_size }} --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --nographics --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --hvm --accelerate --autostart --wait=-1 --extra-args "ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:ens2:none hostname={{ inventory_hostname }} nameserver={{ dns }} console=ttyS0 nomodeset rd.neednet=1 coreos.inst=yes coreos.inst.install_dev=vda coreos.live.rootfs_url={{ rhcos_install_rootfs_url }} coreos.inst.ignition_url={{ rhcos_ignition_file_url }}" --os-variant rhel7 --location {{ rhcos_install_url }}
virt_install_command_rhel6: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x "inst.ksdevice=eth0 inst.ks={{ ks_url }} ip={{ eth0_ip }} netmask={{ nm }} gateway={{ gw }} dns={{ dns }} console=tty0 console=ttyS0 hostname={{ inventory_hostname }}" --network=bridge=br0 --autostart --noautoconsole --watchdog default
virt_install_command_s390x_one_nic: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole --rng /dev/random --cpu host
virt_install_command_s390x_one_nic_unsafe: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ipv4 }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --autostart --noautoconsole --rng /dev/random --cpu host
virt_install_command_two_nic: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }} --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0 hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }} --autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_two_nic_unsafe: virt-install -n {{ inventory_hostname }} --memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio --disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads --vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x 'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0 hostname={{ inventory_hostname }} nameserver={{ dns }} ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none' --network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }} --network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }} --autostart --noautoconsole --watchdog default --rng /dev/random
# assume vpn is false
vpn: False
# This is the wildcard certname for our proxies. It has a different name for
# the staging group and is used in the proxies.yml playbook.
wildcard_cert_name: wildcard-2020.fedoraproject.org
wildcard_crt_file: wildcard-2020.fedoraproject.org.cert
wildcard_int_file: wildcard-2020.fedoraproject.org.intermediate.cert
wildcard_key_file: wildcard-2020.fedoraproject.org.key
#
# say if we want the apache role dependency for mod_wsgi or not
# In some cases we want mod_wsgi and no apache (for python3 httpaio stuff)
#
wsgi_wants_apache: true
# set no x-forward header by default
x_forward: false
# ocp4 is only set true in some proxy roles
ocp4: false

View file

@ -1,39 +1,32 @@
---
# Define resources for this group of hosts here.
lvm_size: 30000
mem_size: 2048
num_cpus: 2
ansible_ifcfg_allowlist:
- eth0
- eth1
csi_primary_contact: Release Engineering - rel-eng@lists.fedoraproject.org
csi_purpose: Automatically sign Rawhide and Branched packages
csi_relationship: |
This host will run the robosignatory application which should automatically sign
builds. It listens to koji over fedora-messaging for notifications of new builds,
and then asks sigul, the signing server, to sign the rpms and store the new rpm
header back in Koji.
# For the MOTD
csi_security_category: High
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
# Make connections from signing bridges stateless, they break sigul connections
# https://bugzilla.redhat.com/show_bug.cgi?id=1283364
custom_rules: ['-A INPUT --proto tcp --sport 44334 --source 10.3.169.120 -j ACCEPT']
ansible_ifcfg_allowlist:
- eth0
- eth1
fedmsg_error_recipients: []
host_group: autosign
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
ipa_host_group: autosign
ipa_host_group_desc: Hosts signing content automatically
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
fedmsg_error_recipients: []
lvm_size: 30000
mem_size: 2048
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
# For the MOTD
csi_security_category: High
csi_primary_contact: Release Engineering - rel-eng@lists.fedoraproject.org
csi_purpose: Automatically sign Rawhide and Branched packages
csi_relationship: |
This host will run the robosignatory application which should automatically sign
builds. It listens to koji over fedora-messaging for notifications of new builds,
and then asks sigul, the signing server, to sign the rpms and store the new rpm
header back in Koji.
num_cpus: 2

View file

@ -2,5 +2,4 @@
# Make connections from signing bridges stateless, they break sigul connections
# https://bugzilla.redhat.com/show_bug.cgi?id=1283364
custom_rules: ['-A INPUT --proto tcp --sport 44334 --source 10.3.169.120 -j ACCEPT']
host_group: autosign

View file

@ -1,10 +1,10 @@
---
primary_auth_source: ipa
ipa_client_shell_groups:
- sysadmin-badges
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-badges
ipa_host_group: badges
ipa_host_group_desc: Hosts running the Badges application
ipa_client_shell_groups:
- sysadmin-badges
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-badges
primary_auth_source: ipa

View file

@ -1,60 +1,51 @@
---
lvm_size: 20000
mem_size: 16384
num_cpus: 2
freezes: false
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 3000, 3001, 3002, 3003,
3004, 3005, 3006, 3007 ]
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-badges-members@fedoraproject.org
fedmsg_hub_auto_restart: True
fedmsg_hub_memory_limit_mb: "{{ (mem_size / 2) | int }}"
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: fedbadges
owner: root
group: fedmsg
can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
# For the MOTD
csi_security_category: Low
csi_primary_contact: Badges admins - sysadmin-badges-members@fedoraproject.org
csi_purpose: Run fedmsg-hub with the fedbadges plugin to award badges (+ some crons)
csi_relationship: |
fedbadges integrates many different services..
fedbadges integrates many different services..
* The fedbadges fedmsg-hub plugin relies on:
* the fedmsg bus, to deliver messages
* pkgdb, for queries about who owns what packages
* fas, to lookup what irc nick corresponds to what fas user.
* db-datanommer for the fedmsg history
* db01, for storing badge awards
* The fedbadges fedmsg-hub plugin relies on:
* the fedmsg bus, to deliver messages
* pkgdb, for queries about who owns what packages
* fas, to lookup what irc nick corresponds to what fas user.
* db-datanommer for the fedmsg history
* db01, for storing badge awards
* badges-web01 will be expecting to display badges entered into the tahrir
db on db01. So, if badges stop showing up there, the problem is likely
here.
* badges-web01 will be expecting to display badges entered into the tahrir
db on db01. So, if badges stop showing up there, the problem is likely
here.
* Locally, of note there exists:
* a git repo of badge rules and images to be synced here by ansible
to /usr/share/badges/
* a local file cache in /var/tmp/fedbadges-cache.dbm (not memcached, atm)
* Furthermore, there are a ton of cronjobs for awarding badges in
/usr/share/badges/cronjobs/ that depends on all sorts of third parties
(flickr, google+, libravatar, etc..).
* Locally, of note there exists:
* a git repo of badge rules and images to be synced here by ansible
to /usr/share/badges/
* a local file cache in /var/tmp/fedbadges-cache.dbm (not memcached, atm)
* Furthermore, there are a ton of cronjobs for awarding badges in
/usr/share/badges/cronjobs/ that depends on all sorts of third parties
(flickr, google+, libravatar, etc..).
# For the MOTD
csi_security_category: Low
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
group: fedmsg
owner: root
service: fedbadges
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-badges-members@fedoraproject.org
fedmsg_hub_auto_restart: True
fedmsg_hub_memory_limit_mb: "{{ (mem_size / 2) | int }}"
freezes: false
lvm_size: 20000
mem_size: 16384
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007]

View file

@ -1,59 +1,51 @@
---
# Define resources for this group of hosts here.
lvm_size: 20000
mem_size: 8192
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 3000, 3001, 3002, 3003,
3004, 3005, 3006, 3007 ]
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-badges-members@fedoraproject.org
fedmsg_hub_auto_restart: True
fedmsg_hub_memory_limit_mb: "{{ (mem_size / 2) | int }}"
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: fedbadges
owner: root
group: fedmsg
can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
# For the MOTD
csi_security_category: Low
csi_primary_contact: Badges admins - sysadmin-badges-members@fedoraproject.org
csi_purpose: Run fedmsg-hub with the fedbadges plugin to award badges (+ some crons)
csi_relationship: |
fedbadges integrates many different services..
fedbadges integrates many different services..
* The fedbadges fedmsg-hub plugin relies on:
* the fedmsg bus, to deliver messages
* pkgdb, for queries about who owns what packages
* fas, to lookup what irc nick corresponds to what fas user.
* db-datanommer for the fedmsg history
* db01, for storing badge awards
* The fedbadges fedmsg-hub plugin relies on:
* the fedmsg bus, to deliver messages
* pkgdb, for queries about who owns what packages
* fas, to lookup what irc nick corresponds to what fas user.
* db-datanommer for the fedmsg history
* db01, for storing badge awards
* badges-web01 will be expecting to display badges entered into the tahrir
db on db01. So, if badges stop showing up there, the problem is likely
here.
* badges-web01 will be expecting to display badges entered into the tahrir
db on db01. So, if badges stop showing up there, the problem is likely
here.
* Locally, of note there exists:
* a git repo of badge rules and images to be synced here by ansible
to /usr/share/badges/
* a local file cache in /var/tmp/fedbadges-cache.dbm (not memcached, atm)
* Furthermore, there are a ton of cronjobs for awarding badges in
/usr/share/badges/cronjobs/ that depends on all sorts of third parties
(flickr, google+, libravatar, etc..).
* Locally, of note there exists:
* a git repo of badge rules and images to be synced here by ansible
to /usr/share/badges/
* a local file cache in /var/tmp/fedbadges-cache.dbm (not memcached, atm)
* Furthermore, there are a ton of cronjobs for awarding badges in
/usr/share/badges/cronjobs/ that depends on all sorts of third parties
(flickr, google+, libravatar, etc..).
# For the MOTD
csi_security_category: Low
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
group: fedmsg
owner: root
service: fedbadges
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-badges-members@fedoraproject.org
fedmsg_hub_auto_restart: True
fedmsg_hub_memory_limit_mb: "{{ (mem_size / 2) | int }}"
lvm_size: 20000
mem_size: 8192
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007]

View file

@ -1,11 +1,11 @@
---
ipa_client_shell_groups:
- sysadmin-badges
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-badges
- sysadmin-noc
- sysadmin-veteran
ipa_host_group: badges
ipa_host_group_desc: Hosts running the Badges application
ipa_client_shell_groups:
- sysadmin-badges
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-badges
- sysadmin-noc
- sysadmin-veteran

View file

@ -1,9 +1,48 @@
---
csi_primary_contact: Badges admins - sysadmin-badges-members@fedoraproject.org
csi_purpose: Run the 'tahrir' mod_wsgi app to display badges.fedoraproject.org
csi_relationship: |
The apache/mod_wsgi app is the only thing really running here
* This host relies on:
* db01 for its database of badge awards (and users, etc..)
* a collection of .pngs in /usr/share/badges/pngs put there by ansible
* memcached!
* Conversely, a few things rely on this site:
* We have a mediawiki plugin that hits a JSON endpoint to display badges.
It should be resilient, but issues in the badges app may cascade into
mediawiki issues in the event of faults.
* fedora-mobile (the android app) queries the JSON api here.
* zodbot has a .badges <username> command that queries the JSON api here.
* openbadges.org may call back to this app to verify that badge assertions
are really certified by us (this will happen anytime someone exports
their fedora badges to the mozilla universe via the tahrir web
interface, but may also happen later in the future to ensure we did not
revoke such and such badge).
# For the MOTD
csi_security_category: Low
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
- fedbadges.person.login.first
group: tahrir
owner: root
service: tahrir
freezes: false
lvm_size: 20000
mem_size: 6144
num_cpus: 2
freezes: false
tcp_ports: [80]
# Definining these vars has a number of effects
# 1) mod_wsgi is configured to use the vars for its own setup
# 2) iptables opens enough ports for all threads for fedmsg
@ -11,49 +50,3 @@ freezes: false
wsgi_fedmsg_service: tahrir
wsgi_procs: 2
wsgi_threads: 2
tcp_ports: [ 80 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: tahrir
owner: root
group: tahrir
can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
- fedbadges.person.login.first
# For the MOTD
csi_security_category: Low
csi_primary_contact: Badges admins - sysadmin-badges-members@fedoraproject.org
csi_purpose: Run the 'tahrir' mod_wsgi app to display badges.fedoraproject.org
csi_relationship: |
The apache/mod_wsgi app is the only thing really running here
* This host relies on:
* db01 for its database of badge awards (and users, etc..)
* a collection of .pngs in /usr/share/badges/pngs put there by ansible
* memcached!
* Conversely, a few things rely on this site:
* We have a mediawiki plugin that hits a JSON endpoint to display badges.
It should be resilient, but issues in the badges app may cascade into
mediawiki issues in the event of faults.
* fedora-mobile (the android app) queries the JSON api here.
* zodbot has a .badges <username> command that queries the JSON api here.
* openbadges.org may call back to this app to verify that badge assertions
are really certified by us (this will happen anytime someone exports
their fedora badges to the mozilla universe via the tahrir web
interface, but may also happen later in the future to ensure we did not
revoke such and such badge).

View file

@ -1,9 +1,48 @@
---
# Define resources for this group of hosts here.
csi_primary_contact: Badges admins - sysadmin-badges-members@fedoraproject.org
csi_purpose: Run the 'tahrir' mod_wsgi app to display badges.fedoraproject.org
csi_relationship: |
The apache/mod_wsgi app is the only thing really running here
* This host relies on:
* db01 for its database of badge awards (and users, etc..)
* a collection of .pngs in /usr/share/badges/pngs put there by ansible
* memcached!
* Conversely, a few things rely on this site:
* We have a mediawiki plugin that hits a JSON endpoint to display badges.
It should be resilient, but issues in the badges app may cascade into
mediawiki issues in the event of faults.
* fedora-mobile (the android app) queries the JSON api here.
* zodbot has a .badges <username> command that queries the JSON api here.
* openbadges.org may call back to this app to verify that badge assertions
are really certified by us (this will happen anytime someone exports
their fedora badges to the mozilla universe via the tahrir web
interface, but may also happen later in the future to ensure we did not
revoke such and such badge).
# For the MOTD
csi_security_category: Low
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
- fedbadges.person.login.first
group: tahrir
owner: root
service: tahrir
lvm_size: 20000
mem_size: 2048
num_cpus: 2
tcp_ports: [80]
# Definining these vars has a number of effects
# 1) mod_wsgi is configured to use the vars for its own setup
# 2) iptables opens enough ports for all threads for fedmsg
@ -11,48 +50,3 @@ num_cpus: 2
wsgi_fedmsg_service: tahrir
wsgi_procs: 2
wsgi_threads: 2
tcp_ports: [ 80 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: tahrir
owner: root
group: tahrir
can_send:
- fedbadges.badge.award
- fedbadges.person.rank.advance
- fedbadges.person.login.first
# For the MOTD
csi_security_category: Low
csi_primary_contact: Badges admins - sysadmin-badges-members@fedoraproject.org
csi_purpose: Run the 'tahrir' mod_wsgi app to display badges.fedoraproject.org
csi_relationship: |
The apache/mod_wsgi app is the only thing really running here
* This host relies on:
* db01 for its database of badge awards (and users, etc..)
* a collection of .pngs in /usr/share/badges/pngs put there by ansible
* memcached!
* Conversely, a few things rely on this site:
* We have a mediawiki plugin that hits a JSON endpoint to display badges.
It should be resilient, but issues in the badges app may cascade into
mediawiki issues in the event of faults.
* fedora-mobile (the android app) queries the JSON api here.
* zodbot has a .badges <username> command that queries the JSON api here.
* openbadges.org may call back to this app to verify that badge assertions
are really certified by us (this will happen anytime someone exports
their fedora badges to the mozilla universe via the tahrir web
interface, but may also happen later in the future to ensure we did not
revoke such and such badge).

View file

@ -1,22 +1,13 @@
---
# Define resources for this group of hosts here.
custom_rules: [
# fas01, fas02
'-A INPUT -p tcp -m tcp -s 10.5.126.25 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.126.26 --dport 80 -j ACCEPT',
# wiki01, wiki02
'-A INPUT -p tcp -m tcp -s 10.5.126.63 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.126.73 --dport 80 -j ACCEPT',
# os-node*
'-A INPUT -p tcp -m tcp -s 10.5.126.248 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.126.164 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.126.165 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.126.166 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.126.167 --dport 80 -j ACCEPT']
lvm_size: 30000
mem_size: 4096
num_cpus: 2
custom_rules: [
# fas01, fas02
'-A INPUT -p tcp -m tcp -s 10.5.126.25 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.126.26 --dport 80 -j ACCEPT',
# wiki01, wiki02
'-A INPUT -p tcp -m tcp -s 10.5.126.63 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.126.73 --dport 80 -j ACCEPT',
# os-node*
'-A INPUT -p tcp -m tcp -s 10.5.126.248 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.126.164 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.126.165 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.126.166 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.126.167 --dport 80 -j ACCEPT',
]
primary_auth_source: ipa

View file

@ -1,17 +1,12 @@
---
# Define resources for this group of hosts here.
custom_rules: [
# fas01.stg
'-A INPUT -p tcp -m tcp -s 10.5.128.129 --dport 80 -j ACCEPT',
# wiki01.stg
'-A INPUT -p tcp -m tcp -s 10.5.128.188 --dport 80 -j ACCEPT',
# os-node*.stg
'-A INPUT -p tcp -m tcp -s 10.5.128.104 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.128.105 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.128.106 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.5.128.107 --dport 80 -j ACCEPT']
lvm_size: 20000
mem_size: 4096
num_cpus: 2
custom_rules: [
# fas01.stg
'-A INPUT -p tcp -m tcp -s 10.5.128.129 --dport 80 -j ACCEPT',
# wiki01.stg
'-A INPUT -p tcp -m tcp -s 10.5.128.188 --dport 80 -j ACCEPT',
# os-node*.stg
'-A INPUT -p tcp -m tcp -s 10.5.128.104 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.128.105 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.128.106 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 10.5.128.107 --dport 80 -j ACCEPT',
]

View file

@ -1,72 +1,5 @@
---
# Define resources for this group of hosts here.
lvm_size: 20000
mem_size: 8192
num_cpus: 4
#
# allow incoming openvpn and smtp
#
tcp_ports: [ 22, 25, 1194 ]
udp_ports: [ 1194 ]
#
# drop incoming traffic from less trusted vpn hosts
# allow ntp from internal RH 10 nets
#
custom_rules: [
'-A INPUT -s 192.168.100/24 -j REJECT --reject-with icmp-host-prohibited',
'-A INPUT -s 10.0.0.0/8 -p udp -m udp --dport 123 -j ACCEPT',
]
primary_auth_source: ipa
# allow a bunch of sysadmin groups here so they can access internal stuff
ipa_host_group: bastion
ipa_host_group_desc: Bastion hosts
ipa_client_shell_groups:
- pungi-devel
- sysadmin-analysis
- sysadmin-dba
- sysadmin-ppc
- sysadmin-secondary
- sysadmin-spin
- sysadmin-troubleshoot
- sysadmin-qa
- sysadmin-kernel
ipa_client_shell_groups_inherit_from:
- batcave
fasjson_url: https://fasjson.fedoraproject.org/
#
# This is a postfix gateway. This will pick up gateway postfix config in base
#
postfix_group: gateway
postfix_transport_filename: transports.gateway
#
# Set this to get fasclient cron to make the aliases file
#
fas_aliases: true
#
# Set this to get fasjson-client cron to make the aliases file
#
fasjson_aliases: false
#
# Sometimes there are lots of postfix processes
#
nrpe_procs_warn: 1100
nrpe_procs_crit: 1200
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: sysadmin-main admin@fedoraproject.org
csi_purpose: SSH proxy to access infrastructure not exposed to the web
csi_relationship: |
@ -75,9 +8,58 @@ csi_relationship: |
- All incoming SMTP from iad2 and VPN, as well as outgoing SMTP,
pass or are filtered here.
- Bastion does not accept any mail outside phx2/vpn.
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
#
# drop incoming traffic from less trusted vpn hosts
# allow ntp from internal RH 10 nets
#
custom_rules: ['-A INPUT -s 192.168.100/24 -j REJECT --reject-with icmp-host-prohibited', '-A INPUT -s 10.0.0.0/8 -p udp -m udp --dport 123 -j ACCEPT']
#
# Set this to get fasclient cron to make the aliases file
#
fas_aliases: true
#
# Set this to get fasjson-client cron to make the aliases file
#
fasjson_aliases: false
fasjson_url: https://fasjson.fedoraproject.org/
ipa_client_shell_groups:
- pungi-devel
- sysadmin-analysis
- sysadmin-dba
- sysadmin-ppc
- sysadmin-secondary
- sysadmin-spin
- sysadmin-troubleshoot
- sysadmin-qa
- sysadmin-kernel
ipa_client_shell_groups_inherit_from:
- batcave
# allow a bunch of sysadmin groups here so they can access internal stuff
ipa_host_group: bastion
ipa_host_group_desc: Bastion hosts
lvm_size: 20000
mem_size: 8192
nagios_Check_Services:
nrpe: true
mail: false
# needed for rhel8
nrpe: true
nrpe_procs_crit: 1200
#
# Sometimes there are lots of postfix processes
#
nrpe_procs_warn: 1100
num_cpus: 4
#
# This is a postfix gateway. This will pick up gateway postfix config in base
#
postfix_group: gateway
postfix_transport_filename: transports.gateway
primary_auth_source: ipa
#
# allow incoming openvpn and smtp
#
tcp_ports: [22, 25, 1194]
udp_ports: [1194]

View file

@ -1,59 +1,16 @@
---
# Define resources for this group of hosts here.
lvm_size: 20000
mem_size: 8192
num_cpus: 4
#
# allow incoming openvpn and smtp
#
tcp_ports: [ 22, 25, 1194 ]
udp_ports: [ 1194 ]
#
# drop incoming traffic from less trusted vpn hosts
# allow ntp from internal RH 10 nets
#
custom_rules: [
'-A INPUT -s 192.168.100/24 -j REJECT --reject-with icmp-host-prohibited',
'-A INPUT -s 10.0.0.0/8 -p udp -m udp --dport 123 -j ACCEPT',
]
#
# allow a bunch of sysadmin groups here so they can access internal stuff
#
ipa_host_group: bastion
ipa_host_group_desc: Bastion hosts
bastion_ipa_client_shell_groups:
- pungi-devel
- sysadmin-analysis
- sysadmin-dba
- sysadmin-ppc
- sysadmin-secondary
- sysadmin-spin
- sysadmin-troubleshoot
# this only works if the `batcave_stg` group and at least one host in it is defined
# batcave_ipa_client_shell_groups: "{{ hostvars[groups['batcave_stg'][0]]['ipa_client_shell_groups'] | default([]) }}"
batcave_ipa_client_shell_groups: []
bastion_ipa_client_shell_groups:
- pungi-devel
- sysadmin-analysis
- sysadmin-dba
- sysadmin-ppc
- sysadmin-secondary
- sysadmin-spin
- sysadmin-troubleshoot
ipa_client_shell_groups: "{{ (bastion_ipa_client_shell_groups + batcave_ipa_client_shell_groups) | sort | unique }}"
#
# Set this to get fasjson-client cron to make the aliases file
#
fasjson_aliases: true
#
# Sometimes there are lots of postfix processes
#
nrpe_procs_warn: 1100
nrpe_procs_crit: 1200
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: sysadmin-main admin@fedoraproject.org
csi_purpose: SSH proxy to access STAGING infrastructure not exposed to the web
csi_relationship: |
@ -62,9 +19,38 @@ csi_relationship: |
- All incoming SMTP from iad2 and VPN, as well as outgoing SMTP,
pass or are filtered here.
- Bastion does not accept any mail outside phx2/vpn.
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
#
# drop incoming traffic from less trusted vpn hosts
# allow ntp from internal RH 10 nets
#
custom_rules: ['-A INPUT -s 192.168.100/24 -j REJECT --reject-with icmp-host-prohibited', '-A INPUT -s 10.0.0.0/8 -p udp -m udp --dport 123 -j ACCEPT']
#
# Set this to get fasjson-client cron to make the aliases file
#
fasjson_aliases: true
ipa_client_shell_groups: "{{ (bastion_ipa_client_shell_groups + batcave_ipa_client_shell_groups) | sort | unique }}"
#
# allow a bunch of sysadmin groups here so they can access internal stuff
#
ipa_host_group: bastion
ipa_host_group_desc: Bastion hosts
lvm_size: 20000
mem_size: 8192
nagios_Check_Services:
nrpe: true
mail: false
# needed for rhel8
nrpe: true
nrpe_procs_crit: 1200
#
# Sometimes there are lots of postfix processes
#
nrpe_procs_warn: 1100
num_cpus: 4
#
# allow incoming openvpn and smtp
#
tcp_ports: [22, 25, 1194]
udp_ports: [1194]

View file

@ -1,83 +1,76 @@
---
lvm_size: 500000
mem_size: 24576
num_cpus: 10
tcp_ports: [ 80, 443, 8442, 8443 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
primary_auth_source: ipa
ipa_host_group: batcave
ipa_host_group_desc: The Bat Cave
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-ask
- sysadmin-badges
- sysadmin-bot
- sysadmin-centos
- sysadmin-cloud
- sysadmin-copr
- sysadmin-coreos
- sysadmin-cvs
- sysadmin-datanommer
- sysadmin-debuginfod
- sysadmin-fedimg
- sysadmin-koschei
- sysadmin-libravatar
- sysadmin-mbs
- sysadmin-messaging
- sysadmin-noc
- sysadmin-odcs
- sysadmin-osbs
- sysadmin-qa
- sysadmin-retrace
- sysadmin-releasemonitoring
- sysadmin-releng
- sysadmin-tools
- sysadmin-upstreamfirst
- sysadmin-veteran
- sysadmin-web
ansible_base: /srv/web/infra
freezes: true
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- ansible.playbook.complete
- ansible.playbook.start
- logger.log
- service: scm
owner: root
group: sysadmin
can_send:
- infragit.receive
# For the MOTD
csi_security_category: High
csi_primary_contact: admin@fedoraproject.org / sysadmin-main-members
csi_purpose: Central management host for ansible
csi_relationship: |
From the batcave batman ventures out to fight crime and protect gotham city!
From the batcave batman ventures out to fight crime and protect gotham city!
batcave is the central management host for ansible.
It also is the infrastructure.fedoraproject.org website with various content.
It houses a number of infrastructure git repos.
batcave is the central management host for ansible.
It also is the infrastructure.fedoraproject.org website with various content.
It houses a number of infrastructure git repos.
* This host relies on:
The virthost it's hosted on (virthost22)
* This host relies on:
The virthost it's hosted on (virthost22)
* Things that rely on this host:
Things that access rhel/fedora/infra rpm repos, including builders and infra hosts.
If this host is down, ansible runs cannot be made to update other hosts.
If this host is down, crime may go up in gotham city.
nrpe_procs_warn: 900
* Things that rely on this host:
Things that access rhel/fedora/infra rpm repos, including builders and infra hosts.
If this host is down, ansible runs cannot be made to update other hosts.
If this host is down, crime may go up in gotham city.
# For the MOTD
csi_security_category: High
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
fedmsg_certs:
- can_send:
- ansible.playbook.complete
- ansible.playbook.start
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- infragit.receive
group: sysadmin
owner: root
service: scm
freezes: true
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-ask
- sysadmin-badges
- sysadmin-bot
- sysadmin-centos
- sysadmin-cloud
- sysadmin-copr
- sysadmin-coreos
- sysadmin-cvs
- sysadmin-datanommer
- sysadmin-debuginfod
- sysadmin-fedimg
- sysadmin-koschei
- sysadmin-libravatar
- sysadmin-mbs
- sysadmin-messaging
- sysadmin-noc
- sysadmin-odcs
- sysadmin-osbs
- sysadmin-qa
- sysadmin-retrace
- sysadmin-releasemonitoring
- sysadmin-releng
- sysadmin-tools
- sysadmin-upstreamfirst
- sysadmin-veteran
- sysadmin-web
ipa_host_group: batcave
ipa_host_group_desc: The Bat Cave
lvm_size: 500000
mem_size: 24576
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
nrpe_procs_crit: 1000
vpn: true
nrpe_procs_warn: 900
num_cpus: 10
primary_auth_source: ipa
sshd_sftp: true
tcp_ports: [80, 443, 8442, 8443]
vpn: true

View file

@ -1,13 +1,11 @@
---
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_weburl: "https://koji.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
ipa_host_group: kojibuilder-kernel
ipa_host_group_desc: Koji Build hosts for kernel builds
# Both of these default to sysadmin-main in the ipa/client role
ipa_client_shell_groups: []
ipa_client_sudo_groups: []
ipa_host_group: kojibuilder-kernel
ipa_host_group_desc: Koji Build hosts for kernel builds
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"

View file

@ -1,48 +1,39 @@
---
lvm_size: 30000
mem_size: 4096
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443, 8888 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
primary_auth_source: ipa
ipa_host_group: blockerbugs
ipa_host_group_desc: Blocker bug tracking service
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-qa
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-qa
# This gets overridden by whichever node we want to run special cronjobs.
master_blockerbugs_node: False
host_group: blockerbugs
blockerbugs_secret_key: "{{ stg_blockerbugs_secret_key }}"
blockerbugs_url: 'https://qa.fedoraproject.org/blockerbugs/'
blockerbugs_bugzilla_url: 'https://bugzilla.redhat.com/'
blockerbugs_bodhi_url: 'https://bodhi.fedoraproject.org/'
blockerbugs_pagure_url: 'https://pagure.io/'
blockerbugs_pagure_repo_token_secret: "{{ blockerbugs_pagure_repo_token }}"
blockerbugs_pagure_repo_webhook_key_secret: "{{ blockerbugs_pagure_repo_webhook_key }}"
blockerbugs_bugzilla_url: 'https://bugzilla.redhat.com/'
blockerbugs_db_host: "{{ blockerbugs_db_host_machine }}"
############################################################
# blockerbugs db details
############################################################
blockerbugs_db_host_machine: db01.iad2.fedoraproject.org
blockerbugs_db_host: "{{ blockerbugs_db_host_machine }}"
blockerbugs_db_port: 5432
blockerbugs_db_name: blockerbugs
blockerbugs_db_password: "{{ prod_blockerbugs_db_password }}"
blockerbugs_db_port: 5432
# these aren't right but they're just placeholders for now
blockerbugs_db_user: "{{ prod_blockerbugs_db_user }}"
blockerbugs_db_password: "{{ prod_blockerbugs_db_password }}"
blockerbugs_pagure_repo_token_secret: "{{ blockerbugs_pagure_repo_token }}"
blockerbugs_pagure_repo_webhook_key_secret: "{{ blockerbugs_pagure_repo_webhook_key }}"
blockerbugs_pagure_url: 'https://pagure.io/'
blockerbugs_secret_key: "{{ stg_blockerbugs_secret_key }}"
blockerbugs_url: 'https://qa.fedoraproject.org/blockerbugs/'
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
host_group: blockerbugs
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-qa
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-qa
ipa_host_group: blockerbugs
ipa_host_group_desc: Blocker bug tracking service
lvm_size: 30000
# This gets overridden by whichever node we want to run special cronjobs.
master_blockerbugs_node: False
mem_size: 4096
num_cpus: 2
primary_auth_source: ipa
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [80, 443, 8888]

View file

@ -1,44 +1,36 @@
---
lvm_size: 30000
mem_size: 4096
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443, 8888 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
ipa_host_group: blockerbugs
ipa_host_group_desc: Blocker bug tracking service
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-qa
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-qa
# This gets overridden by whichever node we want to run special cronjobs.
master_blockerbugs_node: False
blockerbugs_secret_key: "{{ stg_blockerbugs_secret_key }}"
blockerbugs_url: 'https://qa.stg.fedoraproject.org/blockerbugs/'
blockerbugs_bugzilla_url: 'https://bugzilla.stage.redhat.com/'
blockerbugs_bodhi_url: 'https://bodhi.stg.fedoraproject.org/'
blockerbugs_pagure_url: 'https://stg.pagure.io/'
blockerbugs_pagure_repo_token_secret: "{{ blockerbugs_stg_pagure_repo_token }}"
blockerbugs_pagure_repo_webhook_key_secret: "{{ blockerbugs_stg_pagure_repo_webhook_key }}"
blockerbugs_bugzilla_url: 'https://bugzilla.stage.redhat.com/'
blockerbugs_db_host: "{{ blockerbugs_db_host_machine }}"
############################################################
# blockerbugs db details
############################################################
blockerbugs_db_host_machine: db01.stg.iad2.fedoraproject.org
blockerbugs_db_host: "{{ blockerbugs_db_host_machine }}"
blockerbugs_db_port: 5432
blockerbugs_db_name: blockerbugs
blockerbugs_db_user: "{{ stg_blockerbugs_db_user }}"
blockerbugs_db_password: "{{ stg_blockerbugs_db_password }}"
blockerbugs_db_port: 5432
blockerbugs_db_user: "{{ stg_blockerbugs_db_user }}"
blockerbugs_pagure_repo_token_secret: "{{ blockerbugs_stg_pagure_repo_token }}"
blockerbugs_pagure_repo_webhook_key_secret: "{{ blockerbugs_stg_pagure_repo_webhook_key }}"
blockerbugs_pagure_url: 'https://stg.pagure.io/'
blockerbugs_secret_key: "{{ stg_blockerbugs_secret_key }}"
blockerbugs_url: 'https://qa.stg.fedoraproject.org/blockerbugs/'
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-qa
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-qa
ipa_host_group: blockerbugs
ipa_host_group_desc: Blocker bug tracking service
lvm_size: 30000
# This gets overridden by whichever node we want to run special cronjobs.
master_blockerbugs_node: False
mem_size: 4096
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [80, 443, 8888]

View file

@ -1,50 +1,36 @@
---
# common items for the releng-* boxes
lvm_size: 100000
mem_size: 16384
num_cpus: 16
nm: 255.255.255.0
gw: 10.5.125.254
dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7
ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
virt_install_command: "{{ virt_install_command_two_nic }}"
# Do not use testing repositories on production
testing: False
# Make connections from signing bridges stateless, they break sigul connections
# https://bugzilla.redhat.com/show_bug.cgi?id=1283364
custom_rules: ['-A INPUT --proto tcp --sport 44334 --source 10.5.125.71 -j ACCEPT']
# With 16 cpus, theres a bunch more kernel threads
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
host_group: bodhi2
bodhi_message_queue_name: "bodhi{{ env_suffix }}_composer"
# Define the topics that our fedora-messaging queue should be subscribed to.
bodhi_message_routing_keys:
- "org.fedoraproject.*.bodhi.composer.start"
## XXX -- note that the fedmsg_certs declaration does not happen here, but
# happens instead at the inventory/host_vars/ level s
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
# Make connections from signing bridges stateless, they break sigul connections
# https://bugzilla.redhat.com/show_bug.cgi?id=1283364
custom_rules: ['-A INPUT --proto tcp --sport 44334 --source 10.5.125.71 -j ACCEPT']
dns: 10.5.126.21
gw: 10.5.125.254
host_group: bodhi2
ipa_client_shell_groups:
- sysadmin-bodhi
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-bodhi
- sysadmin-releng
ipa_host_group: bodhi
ipa_host_group_desc: Bodhi update service
ipa_client_shell_groups:
- sysadmin-bodhi
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-bodhi
- sysadmin-releng
## XXX - note that the csi_ stuff is kept at the host_vars/ level.
ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7
lvm_size: 100000
mem_size: 16384
## XXX -- note that the fedmsg_certs declaration does not happen here, but
# happens instead at the inventory/host_vars/ level s
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
nm: 255.255.255.0
nrpe_procs_crit: 1000
# With 16 cpus, theres a bunch more kernel threads
nrpe_procs_warn: 900
num_cpus: 16
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
# Do not use testing repositories on production
testing: False
virt_install_command: "{{ virt_install_command_two_nic }}"

View file

@ -1,59 +1,52 @@
---
# common items for the releng-* boxes
lvm_size: 100000
mem_size: 4096
num_cpus: 2
nm: 255.255.255.0
gw: 10.5.126.254
dns: 10.5.126.21
# Use the infra-testing repo
testing: True
# Make connections from signing bridges stateless, they break sigul connections
# https://bugzilla.redhat.com/show_bug.cgi?id=1283364
# this is sign-bridge01.iad2 ip 10.3.169.120
custom_rules: ['-A INPUT --proto tcp --sport 44334 --source 10.3.169.120 -j ACCEPT']
# With 16 cpus, theres a bunch more kernel threads
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
host_group: bodhi2
bodhi_message_queue_name: "bodhi{{ env_suffix }}_composer"
# Define the topics that our fedora-messaging queue should be subscribed to.
bodhi_message_routing_keys:
- "org.fedoraproject.*.bodhi.composer.start"
ipa_host_group: bodhi
ipa_host_group_desc: Bodhi update service
ipa_client_shell_groups:
- sysadmin-bodhi
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-bodhi
- sysadmin-releng
# For the MOTD
csi_security_category: Moderate
csi_primary_contact: Releng Admins sysadmin-releng-members@fedoraproject.org
csi_purpose: Run the Bodhi masher.
csi_relationship: |
The mashing of repos here happens as part of the 'fedmsg-hub' daemon. Check
logs with 'journalctl -u fedmsg-hub'. Check the bodhi masher docs/code for
more detail on what it does:
https://github.com/fedora-infra/bodhi/blob/develop/bodhi/consumers/masher.py
The mashing of repos here happens as part of the 'fedmsg-hub' daemon. Check
logs with 'journalctl -u fedmsg-hub'. Check the bodhi masher docs/code for
more detail on what it does:
https://github.com/fedora-infra/bodhi/blob/develop/bodhi/consumers/masher.py
* This host relies on:
* db01 for its database, which is shares with the bodhi2 frontend nodes.
* An NFS mount of koji data in /mnt/koji/
* The fedmsg bus for triggering mashes.
* XMLRPC calls to koji for tagging and untagging updates.
* bugzilla for posting comments about status changes
* the wiki for getting information about QA "Test Cases"
* taksotron (resultsdb) for getting status-check results (gating updates).
* This host relies on:
* db01 for its database, which is shares with the bodhi2 frontend nodes.
* An NFS mount of koji data in /mnt/koji/
* The fedmsg bus for triggering mashes.
* XMLRPC calls to koji for tagging and untagging updates.
* bugzilla for posting comments about status changes
* the wiki for getting information about QA "Test Cases"
* taksotron (resultsdb) for getting status-check results (gating updates).
* No other systems rely directly on this host. Everything depends on it
indirectly for the creation of new updates repos (which get synced out to
the master mirror for distribution.
* No other systems rely directly on this host. Everything depends on it
indirectly for the creation of new updates repos (which get synced out to
the master mirror for distribution.
# For the MOTD
csi_security_category: Moderate
# Make connections from signing bridges stateless, they break sigul connections
# https://bugzilla.redhat.com/show_bug.cgi?id=1283364
# this is sign-bridge01.iad2 ip 10.3.169.120
custom_rules: ['-A INPUT --proto tcp --sport 44334 --source 10.3.169.120 -j ACCEPT']
dns: 10.5.126.21
gw: 10.5.126.254
host_group: bodhi2
ipa_client_shell_groups:
- sysadmin-bodhi
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-bodhi
- sysadmin-releng
ipa_host_group: bodhi
ipa_host_group_desc: Bodhi update service
lvm_size: 100000
mem_size: 4096
nm: 255.255.255.0
nrpe_procs_crit: 1000
# With 16 cpus, theres a bunch more kernel threads
nrpe_procs_warn: 900
num_cpus: 2
# Use the infra-testing repo
testing: True

View file

@ -2,15 +2,14 @@
# nagios items
# We don't use nrpe to check any of the builders
# Nor do we check swap there.
nagios_Check_Services:
nrpe: false
swap: false
mail: false
primary_auth_source: ipa
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
ipa_host_group: kojibuilder
ipa_host_group_desc: Koji Build hosts
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
nagios_Check_Services:
mail: false
nrpe: false
swap: false
primary_auth_source: ipa

View file

@ -2,15 +2,14 @@
# nagios items
# We don't use nrpe to check any of the builders
# Nor do we check swap there.
nagios_Check_Services:
nrpe: false
swap: false
mail: false
primary_auth_source: ipa
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
ipa_host_group: kojibuilder
ipa_host_group_desc: Koji Build hosts
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
nagios_Check_Services:
mail: false
nrpe: false
swap: false
primary_auth_source: ipa

View file

@ -1,27 +1,23 @@
---
host_group: kojibuilder
freezes: true
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_weburl: "https://koji.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project.
csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
docker_registry: "candidate-registry.fedoraproject.org"
freezes: true
host_group: kojibuilder
koji_hub: "koji.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.fedoraproject.org/koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
koji_root: "koji.fedoraproject.org/koji"
koji_hub: "koji.fedoraproject.org/kojihub"

View file

@ -1,27 +1,5 @@
---
# common items for the buildvm-* koji builders
volgroup: /dev/BuildGuests
lvm_size: 262144
mem_size: 15360
max_mem_size: "{{ mem_size }}"
num_cpus: 6
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/x86_64/os/
nm: 255.255.255.0
gw: 10.5.125.254
dns: 10.3.163.33
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_weburl: "https://koji.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
csi_relationship: |
@ -29,10 +7,28 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
dns: 10.3.163.33
docker_registry: "candidate-registry.fedoraproject.org"
gw: 10.5.125.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub: "koji.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.fedoraproject.org/koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/x86_64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
lvm_size: 262144
max_mem_size: "{{ mem_size }}"
mem_size: 15360
nm: 255.255.255.0
num_cpus: 6
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
koji_root: "koji.fedoraproject.org/koji"
koji_hub: "koji.fedoraproject.org/kojihub"
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"
volgroup: /dev/BuildGuests

View file

@ -1,29 +1,5 @@
---
# common items for the buildvm-aarch64* koji builders
volgroup: /dev/vg_guests
lvm_size: 140000
mem_size: 40960
max_mem_size: "{{ mem_size }}"
num_cpus: 5
max_cpu: "{{ num_cpus }}"
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/aarch64/os/
nm: 255.255.255.0
gw: 10.3.170.254
dns: 10.3.163.33
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_weburl: "https://koji.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
csi_relationship: |
@ -31,10 +7,29 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
dns: 10.3.163.33
docker_registry: "candidate-registry.fedoraproject.org"
gw: 10.3.170.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub: "koji.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.fedoraproject.org/koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/aarch64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
lvm_size: 140000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 40960
nm: 255.255.255.0
num_cpus: 5
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
koji_root: "koji.fedoraproject.org/koji"
koji_hub: "koji.fedoraproject.org/kojihub"
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View file

@ -1,32 +1,6 @@
---
# common items for the buildvm-* koji builders
volgroup: /dev/vg_guests
lvm_size: 140000
mem_size: 40960
max_mem_size: "{{ mem_size }}"
num_cpus: 5
max_cpu: "{{ num_cpus }}"
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/aarch64/os/
nm: 255.255.255.0
gw: 10.3.167.254
dns: 10.3.163.33
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
datacenter: staging
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
createrepo: True
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders (staging).
csi_relationship: |
@ -34,15 +8,36 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
datacenter: staging
dns: 10.3.163.33
docker_registry: "candidate-registry.stg.fedoraproject.org"
gw: 10.3.167.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub: "koji.stg.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
# this is to enable nested virt, which we need for some builds
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/aarch64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
lvm_size: 140000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 40960
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
nm: 255.255.255.0
num_cpus: 5
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_hub: "koji.stg.fedoraproject.org/kojihub"
createrepo: True
# this is to enable nested virt, which we need for some builds
virt_install_command: "{{ virt_install_command_aarch64_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View file

@ -1,32 +1,5 @@
---
# common items for the buildvm-aarmv7* koji builders
volgroup: /dev/vg_guests
lvm_size: 140000
mem_size: 40960
max_mem_size: "{{ mem_size }}"
num_cpus: 5
max_cpu: "{{ num_cpus }}"
ks_url: http://10.3.163.35/repo/rhel/ks/buildvm-fedora-34-armv7
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/armhfp/os/
nm: 255.255.255.0
gw: 10.3.170.254
dns: 10.3.163.33
# This is reverted so that eth1 gets br0 and eth0 gets br1
# This seems some kind of bug where in the guest kernel the devices are swapped around
# when compared to the host.
virt_install_command: "{{ virt_install_command_armv7_one_nic_unsafe }}"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_weburl: "https://koji.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
csi_relationship: |
@ -34,10 +7,32 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
dns: 10.3.163.33
docker_registry: "candidate-registry.fedoraproject.org"
gw: 10.3.170.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub: "koji.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.fedoraproject.org/koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/armhfp/os/
ks_url: http://10.3.163.35/repo/rhel/ks/buildvm-fedora-34-armv7
lvm_size: 140000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 40960
nm: 255.255.255.0
num_cpus: 5
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
koji_root: "koji.fedoraproject.org/koji"
koji_hub: "koji.fedoraproject.org/kojihub"
# This is reverted so that eth1 gets br0 and eth0 gets br1
# This seems some kind of bug where in the guest kernel the devices are swapped around
# when compared to the host.
virt_install_command: "{{ virt_install_command_armv7_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View file

@ -1,32 +1,6 @@
---
# common items for the buildvm-* koji builders
volgroup: /dev/vg_guests
lvm_size: 140000
mem_size: 40960
max_mem_size: "{{ mem_size }}"
num_cpus: 5
max_cpu: "{{ num_cpus }}"
ks_url: http://10.3.163.35/repo/rhel/ks/buildvm-fedora-34-armv7
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/armhfp/os/
nm: 255.255.255.0
gw: 10.3.167.254
dns: 10.3.163.33
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
datacenter: staging
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
createrepo: True
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders (staging).
csi_relationship: |
@ -34,15 +8,36 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
datacenter: staging
dns: 10.3.163.33
docker_registry: "candidate-registry.stg.fedoraproject.org"
gw: 10.3.167.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub: "koji.stg.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
# this is to enable nested virt, which we need for some builds
virt_install_command: "{{ virt_install_command_armv7_one_nic_unsafe }}"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/armhfp/os/
ks_url: http://10.3.163.35/repo/rhel/ks/buildvm-fedora-34-armv7
lvm_size: 140000
max_cpu: "{{ num_cpus }}"
max_mem_size: "{{ mem_size }}"
mem_size: 40960
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
nm: 255.255.255.0
num_cpus: 5
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_hub: "koji.stg.fedoraproject.org/kojihub"
createrepo: True
# this is to enable nested virt, which we need for some builds
virt_install_command: "{{ virt_install_command_armv7_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View file

@ -1,41 +1,5 @@
---
# common items for the buildvm-* koji builders
volgroup: /dev/vg_guests
lvm_size: 600000
mem_size: 20480
max_mem_size: 20480
num_cpus: 8
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/34/Server/ppc64le/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
ipa_server: ipa01.iad2.fedoraproject.org
nm: 255.255.255.0
gw: 10.3.171.254
dns: 10.3.163.33
datacenter: iad2
#
# The ppc virthosts have different bridge names for the main and nfs bridges.
#
main_bridge: br0
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_weburl: "https://koji.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of virtual machines to build packages for the Fedora project. This group builds packages for ppcle architecture.
csi_relationship: |
@ -43,3 +7,31 @@ csi_relationship: |
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
* virtual instances
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
datacenter: iad2
dns: 10.3.163.33
gw: 10.3.171.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
ipa_server: ipa01.iad2.fedoraproject.org
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/34/Server/ppc64le/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
lvm_size: 600000
#
# The ppc virthosts have different bridge names for the main and nfs bridges.
#
main_bridge: br0
max_mem_size: 20480
mem_size: 20480
nm: 255.255.255.0
num_cpus: 8
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View file

@ -1,33 +1,6 @@
---
# common items for the buildvm-* koji builders
volgroup: /dev/vg_guests
lvm_size: 150000
mem_size: 10240
max_mem_size: "{{ mem_size }}"
num_cpus: 4
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/34/Server/ppc64le/os/
nm: 255.255.255.0
gw: 10.3.167.254
dns: 10.3.163.33
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
datacenter: staging
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
main_bridge: br0
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
createrepo: True
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders (staging).
csi_relationship: |
@ -35,14 +8,35 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
datacenter: staging
dns: 10.3.163.33
docker_registry: "candidate-registry.stg.fedoraproject.org"
gw: 10.3.167.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
koji_hub: "koji.stg.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
virt_install_command: "{{ virt_install_command_ppc64le_one_nic_unsafe }}"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/34/Server/ppc64le/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
lvm_size: 150000
main_bridge: br0
max_mem_size: "{{ mem_size }}"
mem_size: 10240
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
nm: 255.255.255.0
num_cpus: 4
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_hub: "koji.stg.fedoraproject.org/kojihub"
createrepo: True
virt_install_command: "{{ virt_install_command_ppc64le_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View file

@ -1,28 +1,5 @@
---
lvm_size: 102400
mem_size: 10240
max_mem_size: "{{ mem_size }}"
num_cpus: 3
varnish_group: s390kojipkgs
vmhost: buildvmhost-s390x-01.s390.fedoraproject.org
gw: 10.16.0.254
main_bridge: vmbr
volgroup: /dev/fedora_linux_lpar_1
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/34/Server/s390x/os/
dns: 10.3.163.33
nm: 255.255.255.0
virt_install_command: "{{ virt_install_command_s390x_one_nic_unsafe }}"
createrepo: False
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_weburl: "https://koji.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
csi_relationship: |
@ -30,3 +7,23 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
dns: 10.3.163.33
gw: 10.16.0.254
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/34/Server/s390x/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
lvm_size: 102400
main_bridge: vmbr
max_mem_size: "{{ mem_size }}"
mem_size: 10240
nm: 255.255.255.0
num_cpus: 3
varnish_group: s390kojipkgs
virt_install_command: "{{ virt_install_command_s390x_one_nic_unsafe }}"
vmhost: buildvmhost-s390x-01.s390.fedoraproject.org
volgroup: /dev/fedora_linux_lpar_1

View file

@ -1,17 +1,6 @@
---
ansible_ifcfg_blocklist: True
createrepo: False
host_group: kojibuilder
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/35/Server/s390x/os/
virt_install_command: "{{ virt_install_command_s390x_one_nic_unsafe }}"
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
csi_relationship: |
@ -19,3 +8,12 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
host_group: kojibuilder
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora-secondary/releases/35/Server/s390x/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
virt_install_command: "{{ virt_install_command_s390x_one_nic_unsafe }}"

View file

@ -1,34 +1,5 @@
---
# common items for the buildvm-* koji builders
volgroup: /dev/vg_guests
lvm_size: 150000
mem_size: 10240
max_mem_size: "{{ mem_size }}"
num_cpus: 4
dns: 10.3.163.33
gw: 10.3.167.254
nm: 255.255.255.0
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/x86_64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm_fedora
resolvconf: "resolv.conf/iad2"
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"
ipa_server: ipa01.stg.iad2.fedoraproject.org
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
datacenter: staging
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=4"
koji_hub_nfs: "fedora_koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders (staging).
csi_relationship: |
@ -36,10 +7,36 @@ csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
csi_security_category: High
datacenter: staging
dns: 10.3.163.33
docker_registry: "candidate-registry.stg.fedoraproject.org"
gw: 10.3.167.254
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
host_group: kojibuilder
ipa_server: ipa01.stg.iad2.fedoraproject.org
koji_hub: "koji.stg.fedoraproject.org/kojihub"
koji_hub_nfs: "fedora_koji"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.stg.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/34/Server/x86_64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm_fedora
lvm_size: 150000
max_mem_size: "{{ mem_size }}"
mem_size: 10240
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=4"
nm: 255.255.255.0
num_cpus: 4
# These variables are for koji-containerbuild/osbs
osbs_url: "osbs.stg.fedoraproject.org"
resolvconf: "resolv.conf/iad2"
source_registry: "registry.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_hub: "koji.stg.fedoraproject.org/kojihub"
virt_install_command: "{{ virt_install_command_one_nic_unsafe }}"
volgroup: /dev/vg_guests

View file

@ -1,17 +1,15 @@
---
nrpe_procs_warn: 1500
nrpe_procs_crit: 1600
virthost: true
nested: True
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of virtual machines to build packages for the Fedora project. This playbook is for the provisioning of a physical host for buildvm's.
csi_relationship: |
* Relies on ansible, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Builder vm's are hosted on hosts created with this playbook.
* Relies on ansible, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Builder vm's are hosted on hosts created with this playbook.
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
nested: True
nrpe_procs_crit: 1600
nrpe_procs_warn: 1500
virthost: true

View file

@ -1,34 +1,29 @@
---
# Define resources for this group of hosts here.
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
ipa_client_shell_groups:
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
ipa_host_group: busgateway
ipa_host_group_desc: Bridge between fedmsg and fedora-messaging
lvm_size: 20000
mem_size: 8192
num_cpus: 2
primary_auth_source: ipa
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [
3999, # The fedmsg-relay republishes here. Listeners need to connect.
9941, # The fedmsg-relay listens here. Ephemeral producers connect.
3998, # The fedmsg-relay listens here. VPN producers connect.
9940, # The fedmsg-gateway republishes here. Proxies need to connect.
9919, # The websocket server publishes here. Proxies need to connect.
tcp_ports: [3999, # The fedmsg-relay republishes here. Listeners need to connect.
9941, # The fedmsg-relay listens here. Ephemeral producers connect.
3998, # The fedmsg-relay listens here. VPN producers connect.
9940, # The fedmsg-gateway republishes here. Proxies need to connect.
9919, # The websocket server publishes here. Proxies need to connect.
]
primary_auth_source: ipa
ipa_host_group: busgateway
ipa_host_group_desc: Bridge between fedmsg and fedora-messaging
ipa_client_shell_groups:
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log

View file

@ -1,32 +1,27 @@
---
# Define resources for this group of hosts here.
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
ipa_client_shell_groups:
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
ipa_host_group: busgateway
ipa_host_group_desc: Bridge between fedmsg and fedora-messaging
lvm_size: 20000
mem_size: 4096
num_cpus: 1
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [
3999, # The fedmsg-relay republishes here. Listeners need to connect.
9941, # The fedmsg-relay listens here. Ephemeral producers connect.
9940, # The fedmsg-gateway republishes here. Proxies need to connect.
9919, # The websocket server publishes here. Proxies need to connect.
tcp_ports: [3999, # The fedmsg-relay republishes here. Listeners need to connect.
9941, # The fedmsg-relay listens here. Ephemeral producers connect.
9940, # The fedmsg-gateway republishes here. Proxies need to connect.
9919, # The websocket server publishes here. Proxies need to connect.
]
ipa_host_group: busgateway
ipa_host_group_desc: Bridge between fedmsg and fedora-messaging
ipa_client_shell_groups:
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log

View file

@ -1,5 +1,5 @@
---
virthost: true
nrpe_procs_warn: 1400
nrpe_procs_crit: 1500
nested: true
nrpe_procs_crit: 1500
nrpe_procs_warn: 1400
virthost: true

View file

@ -1,15 +1,11 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
lvm_size: 20000
mem_size: 2048
num_cpus: 2
primary_auth_source: ipa
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
primary_auth_source: ipa
tcp_ports: [80, 443]

View file

@ -1,13 +1,10 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
lvm_size: 20000
mem_size: 2048
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
tcp_ports: [80, 443]

View file

@ -1,16 +1,15 @@
# This var should never be set for more than one machine
checkcompose_prod: true
checkcompose_env_suffix:
checkcompose_env: production
# The checkcompose settings below cause system(s) in this group to
# send out check-compose reports. This could cause duplicate reports
# if additional systems were added to this group.
checkcompose_emailfrom: rawhide@fedoraproject.org
checkcompose_emailto: "test@lists.fedoraproject.org devel@lists.fedoraproject.org"
checkcompose_env: production
checkcompose_env_suffix:
checkcompose_greenwaveurl: https://greenwave.fedoraproject.org
checkcompose_prod: true
checkcompose_smtp: bastion.iad2.fedoraproject.org
checkcompose_subvariant_emails:
AtomicHost:
error: ["dusty@dustymabe.com", "walters@verbum.org", "atomic@lists.fedoraproject.org"]
checkcompose_smtp: bastion.iad2.fedoraproject.org
checkcompose_url: "https://{{ external_hostname }}"
checkcompose_greenwaveurl: https://greenwave.fedoraproject.org

View file

@ -1,15 +1,13 @@
# we need this for our fedora-messaging consumer as it is not allowed
# to create queues on the infra AMQP broker, by broker config
checkcompose_amqp_passive: true
# fedora-messaging compose report sender settings
checkcompose_amqp_url: "amqps://openqa{{ checkcompose_env_suffix }}:@rabbitmq{{ checkcompose_env_suffix }}.fedoraproject.org/%2Fpubsub"
checkcompose_amqp_cacert: /etc/fedora-messaging/cacert{{ checkcompose_env_suffix }}.pem
checkcompose_amqp_key: /etc/pki/fedora-messaging/openqa{{ checkcompose_env_suffix }}-key.pem
checkcompose_amqp_cert: /etc/pki/fedora-messaging/openqa{{ checkcompose_env_suffix }}-cert.pem
checkcompose_amqp_queue: "openqa{{ checkcompose_env_suffix }}_checkcomp"
checkcompose_amqp_routing_keys: ["org.fedoraproject.{{ deployment_type }}.openqa.job.done"]
checkcompose_amqp_key: /etc/pki/fedora-messaging/openqa{{ checkcompose_env_suffix }}-key.pem
# fedora-messaging email error reporting settings
checkcompose_amqp_mailto: ["adamwill@fedoraproject.org", "lruzicka@fedoraproject.org"]
checkcompose_amqp_passive: true
checkcompose_amqp_queue: "openqa{{ checkcompose_env_suffix }}_checkcomp"
checkcompose_amqp_routing_keys: ["org.fedoraproject.{{ deployment_type }}.openqa.job.done"]
checkcompose_amqp_smtp: bastion
# fedora-messaging compose report sender settings
checkcompose_amqp_url: "amqps://openqa{{ checkcompose_env_suffix }}:@rabbitmq{{ checkcompose_env_suffix }}.fedoraproject.org/%2Fpubsub"

View file

@ -1,6 +1,5 @@
checkcompose_prod: false
checkcompose_env_suffix: .stg
checkcompose_env: staging
checkcompose_url: "https://{{ external_hostname }}"
checkcompose_env_suffix: .stg
checkcompose_greenwaveurl: https://greenwave-web-greenwave.app.os.stg.fedoraproject.org
checkcompose_prod: false
checkcompose_url: "https://{{ external_hostname }}"

View file

@ -1,7 +1,7 @@
---
ansible_ifcfg_blocklist: true
datacenter: cloud
nagios_Check_Services:
mail: false
nrpe: false
swap: false
datacenter: cloud
ansible_ifcfg_blocklist: true

View file

@ -6,15 +6,14 @@
# Disable ethX ifcfg, let amazon handle these via DHCP.
ansible_ifcfg_blocklist: true
datacenter: aws
nagios_Check_Services:
mail: false
nrpe: false
sshd: false
named: false
dhcpd: false
httpd: false
swap: false
mail: false
named: false
nrpe: false
ping: false
raid: false
sshd: false
swap: false

View file

@ -1,10 +1,7 @@
---
freezes: false
use_default_epel: false
collectd_apache: false
freezes: false
nagios_Check_Services:
nrpe: true
swap: true
use_default_epel: false

View file

@ -1,30 +1,22 @@
---
devel: false
_forward_src: "forward"
ansible_ifcfg_blocklist: true
backend_base_url: "https://download.copr.fedorainfracloud.org"
builders:
# max|max_spawn|max_prealloc
aws:
aarch64: [20, 10, 10]
armhfp: [20, 5, 5]
x86_64: [100, 20, 30]
copr_aws_region: us-east-1
# don't forget to update ip in ./copr-keygen, due to custom firewall rules
# eth0, eth1
copr_backend_ips: ["172.25.33.79", "172.25.82.25"]
keygen_host: "172.25.33.75"
resolvconf: "resolv.conf/cloud"
backend_base_url: "https://download.copr.fedorainfracloud.org"
postfix_maincf: "postfix/main.cf/main.cf.copr"
frontend_base_url: "https://copr.fedorainfracloud.org"
dist_git_base_url: "copr-dist-git.fedorainfracloud.org"
ansible_ifcfg_blocklist: true
copr_aws_region: us-east-1
datacenter: cloud
builders:
# max|max_spawn|max_prealloc
aws:
x86_64: [100,20,30]
armhfp: [20,5,5]
aarch64: [20,10,10]
devel: false
dist_git_base_url: "copr-dist-git.fedorainfracloud.org"
frontend_base_url: "https://copr.fedorainfracloud.org"
keygen_host: "172.25.33.75"
postfix_maincf: "postfix/main.cf/main.cf.copr"
resolvconf: "resolv.conf/cloud"

View file

@ -1 +1,2 @@
# Put here configuration for all copr instances (production, devel, ...)

View file

@ -1,76 +1,7 @@
---
devel: false
datacenter: aws
copr_messaging: true
_forward_src: "forward"
# don't forget to update ip in ./copr-keygen, due to custom firewall rules
# eth0, eth1
copr_backend_ips: ["52.44.175.77", "172.30.2.203"]
keygen_host: "54.83.48.73"
backend_base_url: "https://download.copr.fedorainfracloud.org"
postfix_group: copr
frontend_base_url: "https://copr.fedorainfracloud.org"
dist_git_base_url: "copr-dist-git.fedorainfracloud.org"
ansible_ifcfg_blocklist: true
copr_aws_region: us-east-1
services_disabled: false
nm_controlled_resolv: True
builders:
# max|spawn_concurrently|prealloc
aws:
x86_64: [20, 4, 4]
aarch64: [8, 2, 2]
aws_spot:
x86_64: [40, 8, 8]
aarch64: [30, 4, 6]
x86_hypervisor_01:
x86_64: [20, 4, 20]
x86_hypervisor_02:
x86_64: [20, 4, 20]
x86_hypervisor_03:
x86_64: [20, 4, 20]
x86_hypervisor_04:
x86_64: [20, 4, 20]
ppc64le_hypervisor_01:
ppc64le: [15, 4, 15]
# There's the ppc64le-test machine, so keep 2 builders less.
ppc64le_hypervisor_02:
ppc64le: [13, 4, 13]
copr_builder_images:
hypervisor:
x86_64: copr-builder-x86_64-20211012_115536
ppc64le: copr-builder-ppc64le-20211012_120530
aws:
x86_64: ami-0baeeebc194e64780
aarch64: ami-068c2760406b9e3c9
aws_arch_subnets:
x86_64:
- subnet-0995f6a466849f4c3
- subnet-08cadf5a14b530ac4
- subnet-07b0b3168a353e3ee
- subnet-09c74a3e6420a206b
- subnet-01d4e967ab5e78005
- subnet-05437ac82d63b6ef5
# Your requested instance type (a1.xlarge) is not supported in your requested Availability Zone (us-east-1a).
# Your requested instance type (a1.xlarge) is not supported in your requested Availability Zone (us-east-1d).
# Your requested instance type (a1.xlarge) is not supported in your requested Availability Zone (us-east-1f).
@ -78,5 +9,54 @@ aws_arch_subnets:
- subnet-0995f6a466849f4c3
- subnet-08cadf5a14b530ac4
- subnet-07b0b3168a353e3ee
x86_64:
- subnet-0995f6a466849f4c3
- subnet-08cadf5a14b530ac4
- subnet-07b0b3168a353e3ee
- subnet-09c74a3e6420a206b
- subnet-01d4e967ab5e78005
- subnet-05437ac82d63b6ef5
backend_base_url: "https://download.copr.fedorainfracloud.org"
builders:
# max|spawn_concurrently|prealloc
aws:
aarch64: [8, 2, 2]
x86_64: [20, 4, 4]
aws_spot:
aarch64: [30, 4, 6]
x86_64: [40, 8, 8]
ppc64le_hypervisor_01:
ppc64le: [15, 4, 15]
# There's the ppc64le-test machine, so keep 2 builders less.
ppc64le_hypervisor_02:
ppc64le: [13, 4, 13]
x86_hypervisor_01:
x86_64: [20, 4, 20]
x86_hypervisor_02:
x86_64: [20, 4, 20]
x86_hypervisor_03:
x86_64: [20, 4, 20]
x86_hypervisor_04:
x86_64: [20, 4, 20]
copr_aws_region: us-east-1
# don't forget to update ip in ./copr-keygen, due to custom firewall rules
# eth0, eth1
copr_backend_ips: ["52.44.175.77", "172.30.2.203"]
copr_builder_images:
aws:
aarch64: ami-068c2760406b9e3c9
x86_64: ami-0baeeebc194e64780
hypervisor:
ppc64le: copr-builder-ppc64le-20211012_120530
x86_64: copr-builder-x86_64-20211012_115536
copr_messaging: true
datacenter: aws
devel: false
dist_git_base_url: "copr-dist-git.fedorainfracloud.org"
frontend_base_url: "https://copr.fedorainfracloud.org"
keygen_host: "54.83.48.73"
nm_controlled_resolv: True
postfix_group: copr
rpm_vendor_copr_name: Fedora Copr
services_disabled: false

View file

@ -1,39 +1,31 @@
---
# copr_builder_image_name: "Fedora-Cloud-Base-20141203-21"
copr_builder_flavor_name: "ms2.builder"
copr_builder_images:
aarch64: copr-builder-20200120_133457
aws:
aarch64: ami-0acfbfbed95798259 # copr-builder-aarch64-f31-20200421_133814
x86_64: ami-09a4c035460759858 # copr-builder-x86_64-f31-20200421_131242
ppc64le: copr-builder-ppc64le-f31-20200117_132023
x86_64: copr-builder-x86_64-f31-20200117_120726
copr_builder_key_name: "buildsys"
copr_builder_network_name: "copr-net"
copr_builder_security_groups: "ssh-anywhere-copr,default,ssh-from-persistent-copr"
copr_nova_auth_url: "https://fedorainfracloud.org:5000/v2.0"
copr_nova_tenant_id: "5d99f099b7fe4b0387f0352f6301ba6a"
copr_nova_tenant_name: "copr"
copr_nova_username: "copr"
# copr_builder_image_name: "Fedora-Cloud-Base-20141203-21"
copr_builder_flavor_name: "ms2.builder"
copr_builder_network_name: "copr-net"
copr_builder_key_name: "buildsys"
copr_builder_security_groups: "ssh-anywhere-copr,default,ssh-from-persistent-copr"
copr_builder_images:
x86_64: copr-builder-x86_64-f31-20200117_120726
ppc64le: copr-builder-ppc64le-f31-20200117_132023
aarch64: copr-builder-20200120_133457
aws:
x86_64: ami-09a4c035460759858 # copr-builder-x86_64-f31-20200421_131242
aarch64: ami-0acfbfbed95798259 # copr-builder-aarch64-f31-20200421_133814
nrpe_procs_warn: 2200
nrpe_procs_crit: 2500
do_sign: "true"
spawn_in_advance: "true"
frontend_base_url: "https://copr.fedorainfracloud.org"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the backend for copr (3rd party packages)
csi_relationship: |
- Backend: Management of copr cloud infrastructure (OpenStack).
- Small frontend with copr's public stats
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
do_sign: "true"
frontend_base_url: "https://copr.fedorainfracloud.org"
nrpe_procs_crit: 2500
nrpe_procs_warn: 2200
spawn_in_advance: "true"

View file

@ -1,55 +1,41 @@
---
description: copr dispatcher and repo server
_copr_be_conf: copr-be.conf
# There is no python2 on F30
# what is the main backend service name
copr_backend_target: copr-backend.target
# copr_builder_image_name: "Fedora-Cloud-Base-20141203-21"
copr_builder_flavor_name: "ms2.builder"
copr_builder_key_name: "buildsys"
copr_builder_network_name: "copr-net"
copr_builder_security_groups: "ssh-anywhere-copr,default,ssh-from-persistent-copr"
# Copr vars
copr_hostbase: copr-be
copr_nova_auth_url: "https://fedorainfracloud.org:5000/v2.0"
copr_nova_tenant_id: "5d99f099b7fe4b0387f0352f6301ba6a"
copr_nova_tenant_name: "copr"
copr_nova_username: "copr"
# copr_builder_image_name: "Fedora-Cloud-Base-20141203-21"
copr_builder_flavor_name: "ms2.builder"
copr_builder_network_name: "copr-net"
copr_builder_key_name: "buildsys"
copr_builder_security_groups: "ssh-anywhere-copr,default,ssh-from-persistent-copr"
nrpe_procs_warn: 2200
nrpe_procs_crit: 2500
do_sign: "true"
spawn_in_advance: "true"
frontend_base_url: "https://copr.fedorainfracloud.org"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the backend for copr (3rd party packages)
csi_relationship: |
- Backend: Management of copr cloud infrastructure (OpenStack).
- Small frontend with copr's public stats
root_auth_users: msuchy pingou frostyx praiskup schlupov
tcp_ports: [ 22, 80, 443 ]
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
description: copr dispatcher and repo server
do_sign: "true"
frontend_base_url: "https://copr.fedorainfracloud.org"
host_backup_targets: ['/var/lib/copr/public_html/results']
# consumed by roles/messaging/base
messaging:
certificates:
- key: copr
username: copr
app_name: Copr build system
# Copr vars
copr_hostbase: copr-be
host_backup_targets: ['/var/lib/copr/public_html/results']
_copr_be_conf: copr-be.conf
# There is no python2 on F30
# what is the main backend service name
copr_backend_target: copr-backend.target
- app_name: Copr build system
key: copr
username: copr
nrpe_procs_crit: 2500
nrpe_procs_warn: 2200
root_auth_users: msuchy pingou frostyx praiskup schlupov
spawn_in_advance: "true"
tcp_ports: [22, 80, 443]

View file

@ -1,44 +1,36 @@
---
copr_builder_flavor_name: "ms2.builder"
copr_builder_image_name: "builder-f24"
copr_builder_images:
aarch64: copr-builder-20200120_133457
aws:
aarch64: ami-0acfbfbed95798259 # copr-builder-aarch64-f31-20200421_133814
x86_64: ami-09a4c035460759858 # copr-builder-x86_64-f31-20200421_131242
ppc64le: copr-builder-ppc64le-f31-20200117_132023
x86_64: copr-builder-x86_64-f31-20200117_120726
copr_builder_key_name: "buildsys"
copr_builder_network_name: "coprdev-net"
copr_builder_security_groups: "ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev"
copr_nova_auth_url: "https://fedorainfracloud.org:5000/v2.0"
copr_nova_tenant_id: "a6ff2158641c439a8426d7facab45437"
copr_nova_tenant_name: "coprdev"
copr_nova_username: "copr"
copr_builder_image_name: "builder-f24"
copr_builder_flavor_name: "ms2.builder"
copr_builder_network_name: "coprdev-net"
copr_builder_key_name: "buildsys"
copr_builder_security_groups: "ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev"
copr_builder_images:
x86_64: copr-builder-x86_64-f31-20200117_120726
ppc64le: copr-builder-ppc64le-f31-20200117_132023
aarch64: copr-builder-20200120_133457
aws:
x86_64: ami-09a4c035460759858 # copr-builder-x86_64-f31-20200421_131242
aarch64: ami-0acfbfbed95798259 # copr-builder-aarch64-f31-20200421_133814
do_sign: "true"
spawn_in_advance: "false"
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the testing environment of copr's backend
csi_relationship: This host is the testing environment for the cloud infrastructure of copr's backend
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
do_sign: "true"
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: lighttpd
certificates:
copr-be-dev.cloud.fedoraproject.org:
domains:
- copr-be-dev.cloud.fedoraproject.org
challenge_dir: /var/lib/copr/public_html
domains:
- copr-be-dev.cloud.fedoraproject.org
mail: copr-devel@lists.fedorahosted.org
predefined_deploy_script: lighttpd
spawn_in_advance: "false"

View file

@ -1,58 +1,44 @@
---
description: copr dispatcher and repo server - dev instance
_copr_be_conf: copr-be.conf-dev
# what is the main backend service name
copr_backend_target: copr-backend.target
copr_builder_flavor_name: "ms2.builder"
copr_builder_image_name: "builder-f24"
copr_builder_key_name: "buildsys"
copr_builder_network_name: "coprdev-net"
copr_builder_security_groups: "ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev"
# Copr vars
copr_hostbase: copr-be-dev
copr_nova_auth_url: "https://fedorainfracloud.org:5000/v2.0"
copr_nova_tenant_id: "a6ff2158641c439a8426d7facab45437"
copr_nova_tenant_name: "coprdev"
copr_nova_username: "copr"
copr_builder_image_name: "builder-f24"
copr_builder_flavor_name: "ms2.builder"
copr_builder_network_name: "coprdev-net"
copr_builder_key_name: "buildsys"
copr_builder_security_groups: "ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev"
do_sign: "true"
spawn_in_advance: "false"
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the testing environment of copr's backend
csi_relationship: This host is the testing environment for the cloud infrastructure of copr's backend
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
datacenter: aws
description: copr dispatcher and repo server - dev instance
do_sign: "true"
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: lighttpd
certificates:
copr-be-dev.cloud.fedoraproject.org:
domains:
- copr-be-dev.cloud.fedoraproject.org
challenge_dir: /var/lib/copr/public_html
domains:
- copr-be-dev.cloud.fedoraproject.org
mail: copr-devel@lists.fedorahosted.org
root_auth_users: msuchy pingou frostyx praiskup schlupov
tcp_ports: [ 22, 80, 443 ]
predefined_deploy_script: lighttpd
# consumed by roles/messaging/base
messaging:
certificates:
- key: copr
username: copr
app_name: Copr build system
# Copr vars
copr_hostbase: copr-be-dev
_copr_be_conf: copr-be.conf-dev
datacenter: aws
# what is the main backend service name
copr_backend_target: copr-backend.target
- app_name: Copr build system
key: copr
username: copr
root_auth_users: msuchy pingou frostyx praiskup schlupov
spawn_in_advance: "false"
tcp_ports: [22, 80, 443]

View file

@ -1,28 +1,21 @@
---
resolvconf: "resolv.conf/cloud"
copr_builder_flavor_name: "ms2.builder"
copr_builder_image_name: "builder-f24"
copr_builder_key_name: "buildsys"
copr_builder_network_name: "coprdev-net"
copr_builder_security_groups: "ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev"
copr_nova_auth_url: "https://fedorainfracloud.org:5000/v2.0"
copr_nova_tenant_id: "a6ff2158641c439a8426d7facab45437"
copr_nova_tenant_name: "coprdev"
copr_nova_username: "copr"
copr_builder_image_name: "builder-f24"
copr_builder_flavor_name: "ms2.builder"
copr_builder_network_name: "coprdev-net"
copr_builder_key_name: "buildsys"
copr_builder_security_groups: "ssh-anywhere-coprdev,default,ssh-from-persistent-coprdev"
do_sign: "true"
spawn_in_advance: "false"
frontend_base_url: "https://copr.stg.fedoraproject.org"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the testing environment of copr's backend
csi_relationship: This host is the testing environment for the cloud infrastructure of copr's backend
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
do_sign: "true"
frontend_base_url: "https://copr.stg.fedoraproject.org"
resolvconf: "resolv.conf/cloud"
spawn_in_advance: "false"

View file

@ -1,7 +1,6 @@
---
tcp_ports: [22, 5432]
csi_security_category: Low
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the testing environment of copr's db
csi_relationship: This host is the testing environment for copr's database
csi_security_category: Low
tcp_ports: [22, 5432]

View file

@ -1,31 +1,23 @@
---
devel: true
#_forward-src: "{{ files }}/copr/forward-dev"
_forward_src: "forward_dev"
ansible_ifcfg_blocklist: true
backend_base_url: "https://download.copr-dev.fedorainfracloud.org"
builders:
# max|max_spawn|max_prealloc
aws:
aarch64: [5, 2, 2]
armhfp: [3, 1, 1]
x86_64: [20, 5, 5]
copr_aws_region: us-east-1
# don't forget to update ip in ./copr-keygen-stg, due to custom firewall rules
# eth0, eth1
copr_backend_ips: ["172.25.33.80", "172.25.144.254"]
keygen_host: "172.25.33.73"
resolvconf: "resolv.conf/cloud"
backend_base_url: "https://download.copr-dev.fedorainfracloud.org"
postfix_maincf: "postfix/main.cf/main.cf.copr"
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
devel: true
dist_git_base_url: "copr-dist-git-dev.fedorainfracloud.org"
ansible_ifcfg_blocklist: true
copr_aws_region: us-east-1
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
keygen_host: "172.25.33.73"
postfix_maincf: "postfix/main.cf/main.cf.copr"
resolvconf: "resolv.conf/cloud"
services_disabled: true
builders:
# max|max_spawn|max_prealloc
aws:
x86_64: [20,5,5]
armhfp: [3,1,1]
aarch64: [5,2,2]

View file

@ -1,76 +1,7 @@
---
devel: true
copr_messaging: true
datacenter: aws
_forward_src: "forward_dev"
# don't forget to update ip in ./copr-keygen-stg, due to custom firewall rules
# eth0, eth1
copr_backend_ips: ["18.208.10.131", "172.30.2.207"]
keygen_host: "54.225.23.248"
backend_base_url: "https://download.copr-dev.fedorainfracloud.org"
postfix_group: copr
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
dist_git_base_url: "copr-dist-git-dev.fedorainfracloud.org"
ansible_ifcfg_blocklist: true
copr_aws_region: us-east-1
services_disabled: false
nm_controlled_resolv: True
builders:
# max|spawn_concurrently|prealloc
aws:
x86_64: [4, 1, 1]
aarch64: [2, 1, 1]
aws_spot:
x86_64: [5, 2, 2]
aarch64: [5, 2, 1]
x86_hypervisor_01:
x86_64: [2,1,1]
x86_hypervisor_02:
x86_64: [2, 1, 1]
x86_hypervisor_03:
x86_64: [2, 1, 1]
x86_hypervisor_04:
x86_64: [2, 1, 1]
ppc64le_hypervisor_01:
ppc64le: [2, 1, 1]
ppc64le_hypervisor_02:
ppc64le: [2, 1, 1]
copr_builder_images:
hypervisor:
x86_64: copr-builder-x86_64-20211012_115536
ppc64le: copr-builder-ppc64le-20211012_120530
aws:
x86_64: ami-0baeeebc194e64780
aarch64: ami-068c2760406b9e3c9
aws_arch_subnets:
x86_64:
- subnet-0995f6a466849f4c3
- subnet-08cadf5a14b530ac4
- subnet-07b0b3168a353e3ee
- subnet-09c74a3e6420a206b
- subnet-01d4e967ab5e78005
- subnet-05437ac82d63b6ef5
# Your requested instance type (a1.xlarge) is not supported in your requested Availability Zone (us-east-1a).
# Your requested instance type (a1.xlarge) is not supported in your requested Availability Zone (us-east-1d).
# Your requested instance type (a1.xlarge) is not supported in your requested Availability Zone (us-east-1f).
@ -78,5 +9,53 @@ aws_arch_subnets:
- subnet-0995f6a466849f4c3
- subnet-08cadf5a14b530ac4
- subnet-07b0b3168a353e3ee
x86_64:
- subnet-0995f6a466849f4c3
- subnet-08cadf5a14b530ac4
- subnet-07b0b3168a353e3ee
- subnet-09c74a3e6420a206b
- subnet-01d4e967ab5e78005
- subnet-05437ac82d63b6ef5
backend_base_url: "https://download.copr-dev.fedorainfracloud.org"
builders:
# max|spawn_concurrently|prealloc
aws:
aarch64: [2, 1, 1]
x86_64: [4, 1, 1]
aws_spot:
aarch64: [5, 2, 1]
x86_64: [5, 2, 2]
ppc64le_hypervisor_01:
ppc64le: [2, 1, 1]
ppc64le_hypervisor_02:
ppc64le: [2, 1, 1]
x86_hypervisor_01:
x86_64: [2, 1, 1]
x86_hypervisor_02:
x86_64: [2, 1, 1]
x86_hypervisor_03:
x86_64: [2, 1, 1]
x86_hypervisor_04:
x86_64: [2, 1, 1]
copr_aws_region: us-east-1
# don't forget to update ip in ./copr-keygen-stg, due to custom firewall rules
# eth0, eth1
copr_backend_ips: ["18.208.10.131", "172.30.2.207"]
copr_builder_images:
aws:
aarch64: ami-068c2760406b9e3c9
x86_64: ami-0baeeebc194e64780
hypervisor:
ppc64le: copr-builder-ppc64le-20211012_120530
x86_64: copr-builder-x86_64-20211012_115536
copr_messaging: true
datacenter: aws
devel: true
dist_git_base_url: "copr-dist-git-dev.fedorainfracloud.org"
frontend_base_url: "https://copr-fe-dev.cloud.fedoraproject.org"
keygen_host: "54.225.23.248"
nm_controlled_resolv: True
postfix_group: copr
rpm_vendor_copr_name: Fedora Copr (devel)
services_disabled: false

View file

@ -1,14 +1,13 @@
---
tcp_ports: [22, 80, 443]
datacenter: cloud
freezes: false
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: httpd
certificates:
copr-dist-git.fedorainfracloud.org:
domains:
- copr-dist-git.fedorainfracloud.org
challenge_dir: /var/www/html
domains:
- copr-dist-git.fedorainfracloud.org
mail: copr-devel@lists.fedorahosted.org
predefined_deploy_script: httpd
tcp_ports: [22, 80, 443]

View file

@ -1,14 +1,13 @@
---
tcp_ports: [22, 80, 443]
datacenter: aws
freezes: false
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: httpd
certificates:
copr-dist-git.fedorainfracloud.org:
domains:
- copr-dist-git.fedorainfracloud.org
challenge_dir: /var/www/html
domains:
- copr-dist-git.fedorainfracloud.org
mail: copr-devel@lists.fedorahosted.org
predefined_deploy_script: httpd
tcp_ports: [22, 80, 443]

View file

@ -1,15 +1,14 @@
---
tcp_ports: [22, 80, 443]
datacenter: cloud
freezes: false
devel: true
freezes: false
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: httpd
certificates:
copr-dist-git-dev.fedorainfracloud.org:
domains:
- copr-dist-git-dev.fedorainfracloud.org
challenge_dir: /var/www/html
domains:
- copr-dist-git-dev.fedorainfracloud.org
mail: copr-devel@lists.fedorahosted.org
predefined_deploy_script: httpd
tcp_ports: [22, 80, 443]

View file

@ -1,15 +1,14 @@
---
tcp_ports: [22, 80, 443]
datacenter: aws
freezes: false
devel: true
freezes: false
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: httpd
certificates:
copr-dist-git-dev.fedorainfracloud.org:
domains:
- copr-dist-git-dev.fedorainfracloud.org
challenge_dir: /var/www/html
domains:
- copr-dist-git-dev.fedorainfracloud.org
mail: copr-devel@lists.fedorahosted.org
predefined_deploy_script: httpd
tcp_ports: [22, 80, 443]

View file

@ -1,6 +1,5 @@
---
resolvconf: "resolv.conf/cloud"
tcp_ports: [22, 80, 443]
datacenter: cloud
freezes: false
resolvconf: "resolv.conf/cloud"
tcp_ports: [22, 80, 443]

View file

@ -1,34 +1,27 @@
---
tcp_ports: [22, 80, 443]
copr_fe_homedir: /usr/share/copr/coprs_frontend
copr_frontend_public_hostname: "copr.fedorainfracloud.org"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
copr_kerberos_auth_enabled: false
copr_messaging_queue: "a9b74258-21c6-4e79-ba65-9e858dc84a2b"
copr_pagure_events:
io.pagure.prod.pagure: "https://pagure.io/"
org.fedoraproject.prod.pagure: "https://src.fedoraproject.org/"
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide a publicly accessible frontend for 3rd party packages (copr)
csi_relationship: |
- This host provides the frontend part of copr only.
- It's the point of contact between end users and the copr build system (backend, package singer)
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Moderate
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: httpd
certificates:
copr.fedorainfracloud.org:
domains:
- copr.fedorainfracloud.org
challenge_dir: /var/www/html
domains:
- copr.fedorainfracloud.org
mail: copr-devel@lists.fedorahosted.org
copr_pagure_events:
io.pagure.prod.pagure: "https://pagure.io/"
org.fedoraproject.prod.pagure: "https://src.fedoraproject.org/"
copr_messaging_queue: "a9b74258-21c6-4e79-ba65-9e858dc84a2b"
copr_fe_homedir: /usr/share/copr/coprs_frontend
copr_kerberos_auth_enabled: false
predefined_deploy_script: httpd
tcp_ports: [22, 80, 443]

View file

@ -1,37 +1,29 @@
---
tcp_ports: [22, 80, 443]
copr_frontend_public_hostname: "copr-fe-dev.cloud.fedoraproject.org"
csi_security_category: Low
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the testing environment of copr's frontend
csi_relationship: This host is the testing environment for copr's web interface
copr_mbs_cli_login: Y29wcg==##vtvvikhcjncwkfkdcssv
# consumed by roles/copr/certbot
letsencrypt:
predefined_deploy_script: httpd
certificates:
copr-fe-dev.cloud.fedoraproject.org:
domains:
- copr-fe-dev.cloud.fedoraproject.org
challenge_dir: /var/www/html
mail: copr-devel@lists.fedorahosted.org
allowlist_emails:
- msuchy@redhat.com
- praiskup@redhat.com
- jkadlcik@redhat.com
- schlupov@redhat.com
copr_fe_homedir: /usr/share/copr/coprs_frontend
copr_frontend_public_hostname: "copr-fe-dev.cloud.fedoraproject.org"
copr_kerberos_auth_enabled: false
copr_mbs_cli_login: Y29wcg==##vtvvikhcjncwkfkdcssv
copr_messaging_queue: "c8e11df7-e863-4ca4-99b9-d37c6663c7f7"
copr_pagure_events:
io.pagure.prod.pagure: "https://pagure.io/"
org.fedoraproject.prod.pagure: "https://src.fedoraproject.org/"
io.pagure.stg.pagure: "https://stg.pagure.io"
copr_messaging_queue: "c8e11df7-e863-4ca4-99b9-d37c6663c7f7"
copr_fe_homedir: /usr/share/copr/coprs_frontend
copr_kerberos_auth_enabled: false
org.fedoraproject.prod.pagure: "https://src.fedoraproject.org/"
csi_primary_contact: "msuchy (mirek), frostyx, praiskup IRC #fedora-admin, #fedora-buildsys"
csi_purpose: Provide the testing environment of copr's frontend
csi_relationship: This host is the testing environment for copr's web interface
csi_security_category: Low
# consumed by roles/copr/certbot
letsencrypt:
certificates:
copr-fe-dev.cloud.fedoraproject.org:
challenge_dir: /var/www/html
domains:
- copr-fe-dev.cloud.fedoraproject.org
mail: copr-devel@lists.fedorahosted.org
predefined_deploy_script: httpd
tcp_ports: [22, 80, 443]

View file

@ -1,33 +1,26 @@
---
virthost: true
vpn: true
primary_auth_source: ipa
ipa_host_group: vmhost-copr
ipa_host_group_desc: VM hosts for COPR
ipa_client_shell_groups:
- sysadmin-copr
ipa_client_sudo_groups:
- sysadmin-copr
nrpe_procs_warn: 1400
nrpe_procs_crit: 1500
postfix_group: copr
postfix_maincf: "postfix/main.cf/main.cf.copr"
freezes: false
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Host guest virtual machines.
csi_relationship: |
- Guests on this host will be inaccessible if the host is down.
- This host will be required by any application with a virtual machine running on it, therefore, if this host is down those applications will be impacted.
- Guests on this host will be inaccessible if the host is down.
- This host will be required by any application with a virtual machine running on it, therefore, if this host is down those applications will be impacted.
csi_security_category: High
dist_git_base_url: https://example.com/unset
freezes: false
frontend_base_url: https://exmaple.com/unset
ipa_client_shell_groups:
- sysadmin-copr
ipa_client_sudo_groups:
- sysadmin-copr
ipa_host_group: vmhost-copr
ipa_host_group_desc: VM hosts for COPR
nagios_Check_Services:
raid: true
nrpe_procs_crit: 1500
nrpe_procs_warn: 1400
postfix_group: copr
postfix_maincf: "postfix/main.cf/main.cf.copr"
primary_auth_source: ipa
rpm_vendor_copr_name: unset vendor
frontend_base_url: https://exmaple.com/unset
dist_git_base_url: https://example.com/unset
virthost: true
vpn: true

View file

@ -1,12 +1,6 @@
---
tcp_ports: [22]
# http + signd dest ports
custom_rules: [ '-A INPUT -p tcp -m tcp -s 172.25.33.79 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.25.82.25 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.25.33.79 --dport 5167 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.25.82.25 --dport 5167 -j ACCEPT']
custom_rules: ['-A INPUT -p tcp -m tcp -s 172.25.33.79 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.25.82.25 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.25.33.79 --dport 5167 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.25.82.25 --dport 5167 -j ACCEPT']
datacenter: cloud
freezes: false
tcp_ports: [22]

View file

@ -1,14 +1,7 @@
---
copr_hostbase: copr-keygen
tcp_ports: [22]
# http + signd dest ports
custom_rules: [ '-A INPUT -p tcp -m tcp -s 52.44.175.77 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 52.44.175.77 --dport 5167 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.30.2.203 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.30.2.203 --dport 5167 -j ACCEPT']
custom_rules: ['-A INPUT -p tcp -m tcp -s 52.44.175.77 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 52.44.175.77 --dport 5167 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.30.2.203 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.30.2.203 --dport 5167 -j ACCEPT']
datacenter: aws
freezes: false
tcp_ports: [22]

View file

@ -1,13 +1,7 @@
---
copr_hostbase: copr-keygen-dev
tcp_ports: []
# http + signd dest ports
custom_rules: [ '-A INPUT -p tcp -m tcp -s 172.25.33.80 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.25.33.80 --dport 5167 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.25.144.254 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.25.144.254 --dport 5167 -j ACCEPT']
custom_rules: ['-A INPUT -p tcp -m tcp -s 172.25.33.80 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.25.33.80 --dport 5167 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.25.144.254 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.25.144.254 --dport 5167 -j ACCEPT']
datacenter: cloud
freezes: false
tcp_ports: []

View file

@ -1,14 +1,7 @@
---
copr_hostbase: copr-keygen-dev
tcp_ports: [22]
# http + signd dest ports
custom_rules: [ '-A INPUT -p tcp -m tcp -s 18.208.10.131 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 18.208.10.131 --dport 5167 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.30.2.207 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.30.2.207 --dport 5167 -j ACCEPT']
custom_rules: ['-A INPUT -p tcp -m tcp -s 18.208.10.131 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 18.208.10.131 --dport 5167 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.30.2.207 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.30.2.207 --dport 5167 -j ACCEPT']
datacenter: aws
freezes: false
tcp_ports: [22]

View file

@ -1,15 +1,8 @@
---
resolvconf: "resolv.conf/cloud"
copr_hostbase: copr-keygen-stg
tcp_ports: []
# http + signd dest ports
custom_rules: ['-A INPUT -p tcp -m tcp -s 172.25.33.49 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 209.132.184.44 --dport 80 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 172.25.33.49 --dport 5167 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 209.132.184.44 --dport 5167 -j ACCEPT']
custom_rules: ['-A INPUT -p tcp -m tcp -s 172.25.33.49 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 209.132.184.44 --dport 80 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 172.25.33.49 --dport 5167 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 209.132.184.44 --dport 5167 -j ACCEPT']
datacenter: cloud
freezes: false
resolvconf: "resolv.conf/cloud"
tcp_ports: []

View file

@ -1,15 +1,11 @@
---
devel: false
#_forward-src: "{{ files }}/copr/forward-dev"
_forward_src: "forward_dev"
# don't forget to update ip in ./copr-keygen-stg, due to custom firewall rules
copr_backend_ips: ["172.25.33.49", "209.132.184.44"]
keygen_host: "172.25.33.51"
backend_base_url: "https://copr-be-stg.fedorainfracloud.org"
frontend_base_url: "https://copr.stg.fedoraproject.org"
dist_git_base_url: "copr-dist-git-stg.fedorainfracloud.org"
ansible_ifcfg_blocklist: true
backend_base_url: "https://copr-be-stg.fedorainfracloud.org"
# don't forget to update ip in ./copr-keygen-stg, due to custom firewall rules
copr_backend_ips: ["172.25.33.49", "209.132.184.44"]
devel: false
dist_git_base_url: "copr-dist-git-stg.fedorainfracloud.org"
frontend_base_url: "https://copr.stg.fedoraproject.org"
keygen_host: "172.25.33.51"

View file

@ -1,30 +1,22 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
deployment_type: prod
freezes: false
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
ipa_host_group: datagrepper
ipa_host_group_desc: Service to grep through historical message bus data
lvm_size: 20000
mem_size: 8192
num_cpus: 2
primary_auth_source: ipa
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443, 6996 ]
# Neeed for rsync from log01 for logs.
custom_rules: [
'-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT',
]
primary_auth_source: ipa
ipa_host_group: datagrepper
ipa_host_group_desc: Service to grep through historical message bus data
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
freezes: false
deployment_type: prod
tcp_ports: [80, 443, 6996]

View file

@ -1,24 +1,20 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
freezes: false
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
ipa_host_group: datagrepper
ipa_host_group_desc: Service to grep through historical message bus data
lvm_size: 20000
mem_size: 2048
num_cpus: 1
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443, 6996 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
ipa_host_group: datagrepper
ipa_host_group_desc: Service to grep through historical message bus data
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
freezes: false
tcp_ports: [80, 443, 6996]

View file

@ -1,9 +1,9 @@
---
ipa_client_shell_groups:
- sysadmin-dba
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-dba
ipa_host_group: dbserver
ipa_host_group_desc: Database server hosts
ipa_client_shell_groups:
- sysadmin-dba
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-dba

View file

@ -1,9 +1,9 @@
---
ipa_client_shell_groups:
- sysadmin-dba
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-dba
ipa_host_group: dbserver
ipa_host_group_desc: Database server hosts
ipa_client_shell_groups:
- sysadmin-dba
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-dba

View file

@ -1,28 +1,23 @@
---
# Define resources for this group of hosts here.
lvm_size: 500000
mem_size: 8192
max_mem_size: 16384
num_cpus: 4
csi_primary_contact: "#fedora-admin"
csi_purpose: Provides debuginfod services
csi_relationship: |
- This server provides a debuginfod server to allow downloading debuginfod
csi_security_category: Low
deployment_type: prod
tcp_ports: [ 8002 ]
primary_auth_source: ipa
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-veteran
- sysadmin-debuginfod
ipa_client_sudo_groups:
- sysadmin-debuginfod
ipa_host_group: debuginfod
ipa_host_group_desc: debuginfod servers
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-veteran
- sysadmin-debuginfod
ipa_client_sudo_groups:
- sysadmin-debuginfod
csi_security_category: Low
csi_primary_contact: "#fedora-admin"
csi_purpose: Provides debuginfod services
csi_relationship: |
- This server provides a debuginfod server to allow downloading debuginfod
lvm_size: 500000
max_mem_size: 16384
mem_size: 8192
num_cpus: 4
primary_auth_source: ipa
tcp_ports: [8002]

View file

@ -1,28 +1,23 @@
---
# Define resources for this group of hosts here.
lvm_size: 500000
mem_size: 8192
max_mem_size: 16384
num_cpus: 4
csi_primary_contact: "#fedora-admin"
csi_purpose: Provides debuginfod services
csi_relationship: |
- This server provides a debuginfod server to allow downloading debuginfod
csi_security_category: Low
deployment_type: stg
tcp_ports: [ 8002 ]
primary_auth_source: ipa
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-veteran
- sysadmin-debuginfod
ipa_client_sudo_groups:
- sysadmin-debuginfod
ipa_host_group: debuginfod
ipa_host_group_desc: debuginfod servers
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-veteran
- sysadmin-debuginfod
ipa_client_sudo_groups:
- sysadmin-debuginfod
csi_security_category: Low
csi_primary_contact: "#fedora-admin"
csi_purpose: Provides debuginfod services
csi_relationship: |
- This server provides a debuginfod server to allow downloading debuginfod
lvm_size: 500000
max_mem_size: 16384
mem_size: 8192
num_cpus: 4
primary_auth_source: ipa
tcp_ports: [8002]

View file

@ -1,16 +1,14 @@
---
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
virthost: true
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of virtual machines to build packages for the Fedora project. This playbook is for the provisioning of a physical host for buildvm's.
csi_relationship: |
* Relies on ansible, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Builder vm's are hosted on hosts created with this playbook.
* Relies on ansible, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Builder vm's are hosted on hosts created with this playbook.
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
virthost: true

View file

@ -1,30 +1,23 @@
---
# Define resources for this group of hosts here.
lvm_size: 30000
mem_size: 2048
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
udp_ports: [ 53 ]
tcp_ports: [ 53 ]
primary_auth_source: ipa
ipa_host_group: dns
ipa_host_group_desc: DNS servers
ipa_client_shell_groups:
- sysadmin-dns
ipa_client_sudo_groups:
- sysadmin-dns
nrpe_procs_warn: 300
nrpe_procs_crit: 500
sudoers: "{{ private }}/files/sudo/sysadmin-dns"
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Domain Name Service
csi_security_category: High
ipa_client_shell_groups:
- sysadmin-dns
ipa_client_sudo_groups:
- sysadmin-dns
ipa_host_group: dns
ipa_host_group_desc: DNS servers
lvm_size: 30000
mem_size: 2048
nagios_has_named: true
nrpe_procs_crit: 500
nrpe_procs_warn: 300
num_cpus: 2
primary_auth_source: ipa
sudoers: "{{ private }}/files/sudo/sysadmin-dns"
tcp_ports: [53]
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
udp_ports: [53]

View file

@ -1,98 +1,96 @@
---
primary_auth_source: ipa
dl_tier1:
- 10.0.0.0/8 # Red Hat Internal
- 128.171.104.148 # mirror.ancl.hawaii.edu.
- 129.101.198.59 # University of Idaho
- 129.21.171.98 # kirby.main.ad.rit.edu.
- 129.7.128.189 # pubmirror1.math.uh.edu.
- 129.7.128.190 # pubmirror2.math.uh.edu.
- 129.7.128.191 # pubmirror3.math.uh.edu.
- 130.225.254.116 # dotsrc.org
- 130.239.17.3 # its-ehm.its.umu.se.
- 137.138.120.188 # cern
- 137.138.148.168 # cern
- 137.138.44.122 # cern
- 140.247.173.57 # pool-computing-servers.seas.harvard.edu.
- 147.75.101.1 # ams.edge.kernel.org
- 147.75.197.195 # ewr.edge.kernel.org
- 147.75.69.165 # sjc.edge.kernel.org
- 147.75.95.133 # kernel.org apac
- 149.11.118.8/29 # Red Hat CZ
- 152.19.134.145 # vm6.fedora.ibiblio.org.
- 152.19.134.195 # vm15.fedora.ibiblio.org.
- 154.45.192.0/29 # Red Hat CZ New
- 166.78.229.128 # rackspace new infra
- 182.255.111.7 # dksn-k4.cdn.aarnet.edu.au.
- 188.184.104.133 # cern
- 188.184.116.38 # cern
- 195.220.108.108 # mandril.creatis.insa-lyon.fr.
- 198.129.224.34 # linux-src.es.net.
- 199.6.1.170 # isc.org
- 200.17.202.1/28 # ufpr.br
- 202.158.214.12 # bne-a-vms1.retain.aarnet.edu.au.
- 204.152.191.36 # isc.org
- 208.96.144.68 # University of Southern Indiana
- 208.96.144.90 # University of Southern Indiana
- 208.89.87.36 # Mirror.dst.ca
- 213.175.37.8/29 # ??
- 66.187.233.206 # Red Hat BOS
- 71.19.151.18 # prgmr.com / nb.zone
- 72.4.120.222 # rackspace old infra
- 91.209.10.253 # Red Hat CZ New Newer
- 125.16.200.50 # Red Hat PNQ
- 2001:388:1:4066:225:90ff:fec7:777e # ??
- 2001:4DE8:C0FD::/48 # ??
- 2001:878:346::116 # dotsrc.org
- 2001:978:2:81::1:0/112 # ??
- 2604:1380:3000:1500::1 # kernel.org apac
- 2620:52:3:1:dead:beef:cafe:fed1 # download-cc-rdu01's ipv6 address
- archive.linux.duke.edu # 152.3.102.53
- 152.3.68.159 # new archive.linux.duke.edu
- auslistsdr01.us.dell.com # 143.166.224.62
- auslistsprd01.us.dell.com # 143.166.82.43
- download-ib01.fedoraproject.org # 152.19.134.145
- download-cc-rdu01.fedoraproject.org # 8.43.85.72
- fedora.c3sl.ufpr.br # 200.236.31.8
- frisal.switch.ch # 130.59.113.36
- ftp.heanet.ie # 193.1.193.64
- ftp.linux.cz # 147.251.48.205
- 2001:718:801:230::cd # ftp.linux.cz ipv6 address
- ftp.nrc.ca # 132.246.2.21
- jobbot1.ibiblio.org # 152.19.134.30
- elba.hrz.tu-chemnitz.de # 134.109.228.48 / 2001:638:911:b0e:134:109:228:48
- waterloo.hrz.tu-chemnitz.de # 134.109.228.1 / 2001:638:911:b0e:134:109:228:1
- lists.us.dell.com # 143.166.82.43
- mirror.gtlib.gatech.edu # 128.61.111.11
- mirror.hiwaay.net # 216.180.99.217
- mirror.liquidtelecom.com # 197.155.77.1
- mirror.prgmr.com # 71.19.148.193
- mirror.speedpartner.de # 91.184.32.5
- mirrors.mit.edu # 18.7.29.125
- mirrors.pdx.kernel.org # 198.145.21.9 / 2001:19d0:306:6:0:1994:3:14
- mirrors.rit.edu # 129.21.171.72
- mirrors.sfo.kernel.org # 149.20.37.36 / 2001:4f8:4:6f:0:1994:3:14
- mirrors.xmission.com # 198.60.22.13
- nrt.edge.kernel.org # 147.75.95.133 / 2604:1380:3000:1500::1
- odysseus.fi.muni.cz # 147.251.48.205
- odysseus.linux.cz # 147.251.48.205
- rhlx01.hs-esslingen.de # 129.143.116.10
- rsyncer.ftp.heanet.ie # 193.1.219.88
- sagres.c3sl.ufpr.br # 200.236.31.1
- scrye.com # 75.148.32.185
- sfo-korg-mirror.kernel.org # 149.20.37.36 / 2001:4f8:4:6f:0:1994:3:14
- sinclair.wpi.edu # 130.215.32.86
- mirr-web-p-u01.wpi.edu # 130.215.32.92 / 2607:f5c0:8040:a081::80
- solar-one.mit.edu # 18.7.29.123
- speculum.rbc.ru # 80.68.250.217
- torrent01.fedoraproject.org # 152.19.134.141
- torrent02.fedoraproject.org # 152.19.134.148
- ultra.linux.cz # 195.113.15.27
- wpi.edu # 130.215.36.26
- zaphod.gtlib.gatech.edu # 128.61.111.12
ipa_host_group: download
ipa_host_group_desc: Download servers
nagios_Check_Services:
swap: false
dl_tier1:
- 10.0.0.0/8 # Red Hat Internal
- 128.171.104.148 # mirror.ancl.hawaii.edu.
- 129.101.198.59 # University of Idaho
- 129.21.171.98 # kirby.main.ad.rit.edu.
- 129.7.128.189 # pubmirror1.math.uh.edu.
- 129.7.128.190 # pubmirror2.math.uh.edu.
- 129.7.128.191 # pubmirror3.math.uh.edu.
- 130.225.254.116 # dotsrc.org
- 130.239.17.3 # its-ehm.its.umu.se.
- 137.138.120.188 # cern
- 137.138.148.168 # cern
- 137.138.44.122 # cern
- 140.247.173.57 # pool-computing-servers.seas.harvard.edu.
- 147.75.101.1 # ams.edge.kernel.org
- 147.75.197.195 # ewr.edge.kernel.org
- 147.75.69.165 # sjc.edge.kernel.org
- 147.75.95.133 # kernel.org apac
- 149.11.118.8/29 # Red Hat CZ
- 152.19.134.145 # vm6.fedora.ibiblio.org.
- 152.19.134.195 # vm15.fedora.ibiblio.org.
- 154.45.192.0/29 # Red Hat CZ New
- 166.78.229.128 # rackspace new infra
- 182.255.111.7 # dksn-k4.cdn.aarnet.edu.au.
- 188.184.104.133 # cern
- 188.184.116.38 # cern
- 195.220.108.108 # mandril.creatis.insa-lyon.fr.
- 198.129.224.34 # linux-src.es.net.
- 199.6.1.170 # isc.org
- 200.17.202.1/28 # ufpr.br
- 202.158.214.12 # bne-a-vms1.retain.aarnet.edu.au.
- 204.152.191.36 # isc.org
- 208.96.144.68 # University of Southern Indiana
- 208.96.144.90 # University of Southern Indiana
- 208.89.87.36 # Mirror.dst.ca
- 213.175.37.8/29 # ??
- 66.187.233.206 # Red Hat BOS
- 71.19.151.18 # prgmr.com / nb.zone
- 72.4.120.222 # rackspace old infra
- 91.209.10.253 # Red Hat CZ New Newer
- 125.16.200.50 # Red Hat PNQ
- 2001:388:1:4066:225:90ff:fec7:777e # ??
- 2001:4DE8:C0FD::/48 # ??
- 2001:878:346::116 # dotsrc.org
- 2001:978:2:81::1:0/112 # ??
- 2604:1380:3000:1500::1 # kernel.org apac
- 2620:52:3:1:dead:beef:cafe:fed1 # download-cc-rdu01's ipv6 address
- archive.linux.duke.edu # 152.3.102.53
- 152.3.68.159 # new archive.linux.duke.edu
- auslistsdr01.us.dell.com # 143.166.224.62
- auslistsprd01.us.dell.com # 143.166.82.43
- download-ib01.fedoraproject.org # 152.19.134.145
- download-cc-rdu01.fedoraproject.org # 8.43.85.72
- fedora.c3sl.ufpr.br # 200.236.31.8
- frisal.switch.ch # 130.59.113.36
- ftp.heanet.ie # 193.1.193.64
- ftp.linux.cz # 147.251.48.205
- 2001:718:801:230::cd # ftp.linux.cz ipv6 address
- ftp.nrc.ca # 132.246.2.21
- jobbot1.ibiblio.org # 152.19.134.30
- elba.hrz.tu-chemnitz.de # 134.109.228.48 / 2001:638:911:b0e:134:109:228:48
- waterloo.hrz.tu-chemnitz.de # 134.109.228.1 / 2001:638:911:b0e:134:109:228:1
- lists.us.dell.com # 143.166.82.43
- mirror.gtlib.gatech.edu # 128.61.111.11
- mirror.hiwaay.net # 216.180.99.217
- mirror.liquidtelecom.com # 197.155.77.1
- mirror.prgmr.com # 71.19.148.193
- mirror.speedpartner.de # 91.184.32.5
- mirrors.mit.edu # 18.7.29.125
- mirrors.pdx.kernel.org # 198.145.21.9 / 2001:19d0:306:6:0:1994:3:14
- mirrors.rit.edu # 129.21.171.72
- mirrors.sfo.kernel.org # 149.20.37.36 / 2001:4f8:4:6f:0:1994:3:14
- mirrors.xmission.com # 198.60.22.13
- nrt.edge.kernel.org # 147.75.95.133 / 2604:1380:3000:1500::1
- odysseus.fi.muni.cz # 147.251.48.205
- odysseus.linux.cz # 147.251.48.205
- rhlx01.hs-esslingen.de # 129.143.116.10
- rsyncer.ftp.heanet.ie # 193.1.219.88
- sagres.c3sl.ufpr.br # 200.236.31.1
- scrye.com # 75.148.32.185
- sfo-korg-mirror.kernel.org # 149.20.37.36 / 2001:4f8:4:6f:0:1994:3:14
- sinclair.wpi.edu # 130.215.32.86
- mirr-web-p-u01.wpi.edu # 130.215.32.92 / 2607:f5c0:8040:a081::80
- solar-one.mit.edu # 18.7.29.123
- speculum.rbc.ru # 80.68.250.217
- torrent01.fedoraproject.org # 152.19.134.141
- torrent02.fedoraproject.org # 152.19.134.148
- ultra.linux.cz # 195.113.15.27
- wpi.edu # 130.215.36.26
- zaphod.gtlib.gatech.edu # 128.61.111.12
primary_auth_source: ipa

View file

@ -1,14 +1,11 @@
---
blocked_ips: []
datacenter: iad2
tcp_ports: [80, 443, 873]
rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}"
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
dns: 10.3.163.33
host_group: download-iad2
# nfs mount options, overrides the all/default
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,actimeo=600,nfsvers=3"
blocked_ips: [ ]
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}"
tcp_ports: [80, 443, 873]

View file

@ -1,7 +1,6 @@
---
datacenter: ibiblio
tcp_ports: [80, 443, 873]
rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}"
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}"
tcp_ports: [80, 443, 873]

View file

@ -1,11 +1,10 @@
---
datacenter: rdu
tcp_ports: [80, 443, 873]
rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}"
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
virt_install_command: "{{ virt_install_command_rhel6 }}"
ansible_ifcfg_blocklist: true
datacenter: rdu
# nfs mount options, overrides the all/default
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,actimeo=600,nfsvers=3"
nrpe_procs_crit: 1000
nrpe_procs_warn: 900
rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}"
tcp_ports: [80, 443, 873]
virt_install_command: "{{ virt_install_command_rhel6 }}"

View file

@ -1,42 +1,34 @@
---
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fedimg.image.test
- fedimg.image.upload
- fedimg.image.copy
- fedimg.image.publish
group: fedmsg
owner: root
service: fedimg
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-fedimg-members@fedoraproject.org
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
ipa_host_group: fedimg
lvm_size: 20000
mem_size: 6144
num_cpus: 2
testing: False
primary_auth_source: ipa
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [
# These are all for outgoing fedmsg.
3000, 3001, 3002, 3003, 3004, 3005, 3006,
3007, 3008, 3009, 3010, 3011, 3012, 3013,
]
primary_auth_source: ipa
ipa_host_group: fedimg
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-fedimg-members@fedoraproject.org
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: fedimg
owner: root
group: fedmsg
can_send:
- fedimg.image.test
- fedimg.image.upload
- fedimg.image.copy
- fedimg.image.publish
# These are all for outgoing fedmsg.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013]
testing: False

View file

@ -1,44 +1,35 @@
---
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fedimg.image.test
- fedimg.image.upload
- fedimg.image.copy
- fedimg.image.publish
group: fedmsg
owner: root
service: fedimg
fedmsg_debug_loopback: True
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-fedimg-members@fedoraproject.org
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
ipa_host_group: fedimg
lvm_size: 20000
mem_size: 6144
num_cpus: 2
# Use infrastructure-tags-stg repo
testing: True
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [
# These are all for outgoing fedmsg.
3000, 3001, 3002, 3003, 3004, 3005, 3006,
3007, 3008, 3009, 3010, 3011, 3012, 3013,
]
ipa_host_group: fedimg
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
fedmsg_debug_loopback: True
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-fedimg-members@fedoraproject.org
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: fedimg
owner: root
group: fedmsg
can_send:
- fedimg.image.test
- fedimg.image.upload
- fedimg.image.copy
- fedimg.image.publish
# These are all for outgoing fedmsg.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013]
# Use infrastructure-tags-stg repo
testing: True

View file

@ -1,17 +1,15 @@
# we need this for our fedora-messaging consumer as it is not allowed
# to create queues on the infra AMQP broker, by broker config
fedora_nightlies_amqp_passive: true
# fedora-messaging job scheduler settings
fedora_nightlies_amqp_url: "amqps://openqa:@rabbitmq.fedoraproject.org/%2Fpubsub"
fedora_nightlies_amqp_cacert: /etc/fedora-messaging/cacert.pem
fedora_nightlies_amqp_key: /etc/pki/fedora-messaging/openqa-key.pem
fedora_nightlies_amqp_cert: /etc/pki/fedora-messaging/openqa-cert.pem
fedora_nightlies_amqp_queue: "openqa_fedora_nightlies"
fedora_nightlies_amqp_routing_keys: ["org.fedoraproject.prod.openqa.job.done", "org.fedoraproject.prod.pungi.compose.status.change"]
fedora_nightlies_amqp_html_file: /usr/share/openqa/public/nightlies.html
fedora_nightlies_amqp_data_file: /usr/share/openqa/public/nightlies.json
fedora_nightlies_amqp_html_file: /usr/share/openqa/public/nightlies.html
fedora_nightlies_amqp_key: /etc/pki/fedora-messaging/openqa-key.pem
# fedora-messaging email error reporting settings
fedora_nightlies_amqp_mailto: ["adamwill@fedoraproject.org"]
fedora_nightlies_amqp_passive: true
fedora_nightlies_amqp_queue: "openqa_fedora_nightlies"
fedora_nightlies_amqp_routing_keys: ["org.fedoraproject.prod.openqa.job.done", "org.fedoraproject.prod.pungi.compose.status.change"]
fedora_nightlies_amqp_smtp: bastion
# fedora-messaging job scheduler settings
fedora_nightlies_amqp_url: "amqps://openqa:@rabbitmq.fedoraproject.org/%2Fpubsub"

View file

@ -1,9 +1,66 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- github.commit_comment
- github.create
- github.delete
- github.fork
- github.gollum
- github.issue.assigned
- github.issue.closed
- github.issue.comment
- github.issue.edited
- github.issue.labeled
- github.issue.milestone
- github.issue.opened
- github.issue.reopened
- github.issue.unassigned
- github.issue.unlabeled
- github.label
- github.member
- github.page_build
- github.pull_request.assigned
- github.pull_request.closed
- github.pull_request.edited
- github.pull_request.labeled
- github.pull_request.opened
- github.pull_request_review
- github.pull_request_review_comment
- github.pull_request.review_requested
- github.pull_request.synchronize
- github.pull_request.unlabeled
- github.push
- github.release
- github.repository_vulnerability_alert
- github.star
- github.status
- github.team_add
- github.webhook
group: apache
owner: root
service: github2fedmsg
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-veteran
ipa_host_group: github2fedmsg
ipa_host_group_desc: Bridge select GitHub repo events into bus messages
lvm_size: 20000
mem_size: 2048
num_cpus: 2
primary_auth_source: ipa
tcp_ports: [80]
# for fedora-messaging
username: "github2fedmsg{{ env_suffix }}"
# Definining these vars has a number of effects
# 1) mod_wsgi is configured to use the vars for its own setup
# 2) iptables opens enough ports for all threads for fedmsg
@ -11,66 +68,3 @@ num_cpus: 2
wsgi_fedmsg_service: github2fedmsg
wsgi_procs: 2
wsgi_threads: 2
tcp_ports: [ 80 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
primary_auth_source: ipa
ipa_host_group: github2fedmsg
ipa_host_group_desc: Bridge select GitHub repo events into bus messages
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-veteran
# for fedora-messaging
username: "github2fedmsg{{ env_suffix }}"
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: github2fedmsg
owner: root
group: apache
can_send:
- github.commit_comment
- github.create
- github.delete
- github.fork
- github.gollum
- github.issue.assigned
- github.issue.closed
- github.issue.comment
- github.issue.edited
- github.issue.labeled
- github.issue.milestone
- github.issue.opened
- github.issue.reopened
- github.issue.unassigned
- github.issue.unlabeled
- github.label
- github.member
- github.page_build
- github.pull_request.assigned
- github.pull_request.closed
- github.pull_request.edited
- github.pull_request.labeled
- github.pull_request.opened
- github.pull_request_review
- github.pull_request_review_comment
- github.pull_request.review_requested
- github.pull_request.synchronize
- github.pull_request.unlabeled
- github.push
- github.release
- github.repository_vulnerability_alert
- github.star
- github.status
- github.team_add
- github.webhook

View file

@ -1,9 +1,65 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
deployment_type: stg
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- github.commit_comment
- github.create
- github.delete
- github.fork
- github.gollum
- github.issue.assigned
- github.issue.closed
- github.issue.comment
- github.issue.edited
- github.issue.labeled
- github.issue.milestone
- github.issue.opened
- github.issue.reopened
- github.issue.unassigned
- github.issue.unlabeled
- github.label
- github.member
- github.page_build
- github.pull_request.assigned
- github.pull_request.closed
- github.pull_request.edited
- github.pull_request.labeled
- github.pull_request.opened
- github.pull_request_review
- github.pull_request_review_comment
- github.pull_request.review_requested
- github.pull_request.synchronize
- github.pull_request.unlabeled
- github.push
- github.release
- github.repository_vulnerability_alert
- github.star
- github.status
- github.team_add
- github.webhook
group: apache
owner: root
service: github2fedmsg
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-veteran
ipa_host_group: github2fedmsg
ipa_host_group_desc: Bridge select GitHub repo events into bus messages
lvm_size: 20000
mem_size: 4096
num_cpus: 1
tcp_ports: [80]
# for fedora-messaging
username: "github2fedmsg{{ env_suffix }}"
# Definining these vars has a number of effects
# 1) mod_wsgi is configured to use the vars for its own setup
# 2) iptables opens enough ports for all threads for fedmsg
@ -11,65 +67,3 @@ num_cpus: 1
wsgi_fedmsg_service: github2fedmsg
wsgi_procs: 2
wsgi_threads: 2
tcp_ports: [ 80 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
ipa_host_group: github2fedmsg
ipa_host_group_desc: Bridge select GitHub repo events into bus messages
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-veteran
# for fedora-messaging
username: "github2fedmsg{{ env_suffix }}"
deployment_type: stg
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: github2fedmsg
owner: root
group: apache
can_send:
- github.commit_comment
- github.create
- github.delete
- github.fork
- github.gollum
- github.issue.assigned
- github.issue.closed
- github.issue.comment
- github.issue.edited
- github.issue.labeled
- github.issue.milestone
- github.issue.opened
- github.issue.reopened
- github.issue.unassigned
- github.issue.unlabeled
- github.label
- github.member
- github.page_build
- github.pull_request.assigned
- github.pull_request.closed
- github.pull_request.edited
- github.pull_request.labeled
- github.pull_request.opened
- github.pull_request_review
- github.pull_request_review_comment
- github.pull_request.review_requested
- github.pull_request.synchronize
- github.pull_request.unlabeled
- github.push
- github.release
- github.repository_vulnerability_alert
- github.star
- github.status
- github.team_add
- github.webhook

View file

@ -1,7 +1,7 @@
freezes: False
csi_purpose: GNOME Infrastructure Backups facility
csi_relationship: |
Provides rdiff-backup based backups to all the GNOME Infrastructure
machines and services
- This machine mainly relies on the Red Hat sponsored NetApp assigned
to the GNOME Project where all the backups do reside
Provides rdiff-backup based backups to all the GNOME Infrastructure
machines and services
- This machine mainly relies on the Red Hat sponsored NetApp assigned
to the GNOME Project where all the backups do reside
freezes: False

View file

@ -2,9 +2,8 @@
# XXX - this is not really a group of real hosts.
# Instead, it represents an application in openshift.
# See playbooks/openshift-apps/greenwave.yml
fedmsg_certs:
- service: greenwave
can_send:
- logger.log
- greenwave.decision.update
- can_send:
- logger.log
- greenwave.decision.update
service: greenwave

View file

@ -2,11 +2,9 @@
# XXX - this is not really a group of real hosts.
# Instead, it represents an application in openshift.
# See playbooks/openshift-apps/greenwave.yml
fedmsg_env: stg
fedmsg_certs:
- service: greenwave
can_send:
- logger.log
- greenwave.decision.update
- can_send:
- logger.log
- greenwave.decision.update
service: greenwave
fedmsg_env: stg

View file

@ -1,29 +1,21 @@
---
# Define resources for this group of hosts here.
lvm_size: 30000
mem_size: 8192
num_cpus: 4
tcp_ports: [ 80, 88, 389, 443, 464, 636 ]
udp_ports: [ 88, 464 ]
custom_rules: [
'-A INPUT -p udp -m udp -s 10.3.0.0/16 --dport 53 -j ACCEPT'
]
primary_auth_source: ipa
custom_rules: ['-A INPUT -p udp -m udp -s 10.3.0.0/16 --dport 53 -j ACCEPT']
host_backup_targets: ['/var/lib/ipa/backup', '/var/log/dirsrv/slapd-FEDORAPROJECT-ORG']
ipa_client_shell_groups:
- sysadmin-accounts
ipa_client_sudo_groups:
- sysadmin-accounts
ipa_dm_password: "{{ ipa_prod_dm_password }}"
ipa_host_group: ipa
ipa_host_group_desc: IPA service
ipa_client_shell_groups:
- sysadmin-accounts
ipa_client_sudo_groups:
- sysadmin-accounts
nrpe_procs_warn: 300
nrpe_procs_crit: 500
ipa_initial: false
ipa_dm_password: "{{ ipa_prod_dm_password }}"
ipa_ldap_socket: ldapi://%2fvar%2frun%2fslapd-FEDORAPROJECT-ORG.socket
host_backup_targets: ['/var/lib/ipa/backup', '/var/log/dirsrv/slapd-FEDORAPROJECT-ORG']
lvm_size: 30000
mem_size: 8192
nrpe_procs_crit: 500
nrpe_procs_warn: 300
num_cpus: 4
primary_auth_source: ipa
tcp_ports: [80, 88, 389, 443, 464, 636]
udp_ports: [88, 464]

View file

@ -1,21 +1,17 @@
---
# Define resources for this group of hosts here.
lvm_size: 30000
mem_size: 8192
num_cpus: 4
tcp_ports: [ 80, 88, 389, 443, 464, 636 ]
udp_ports: [ 88, 464 ]
ipa_client_shell_groups:
- sysadmin-accounts
ipa_client_sudo_groups:
- sysadmin-accounts
ipa_dm_password: "{{ ipa_stg_dm_password }}"
ipa_host_group: ipa
ipa_host_group_desc: IPA service
ipa_client_shell_groups:
- sysadmin-accounts
ipa_client_sudo_groups:
- sysadmin-accounts
nrpe_procs_warn: 300
nrpe_procs_crit: 500
ipa_dm_password: "{{ ipa_stg_dm_password }}"
ipa_ldap_socket: ldapi://%2fvar%2frun%2fslapd-STG-FEDORAPROJECT-ORG.socket
lvm_size: 30000
mem_size: 8192
nrpe_procs_crit: 500
nrpe_procs_warn: 300
num_cpus: 4
tcp_ports: [80, 88, 389, 443, 464, 636]
udp_ports: [88, 464]

View file

@ -1,16 +1,12 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
ipa_host_group: ipsilon
ipa_host_group_desc: Ipsilon SSO application
lvm_size: 20000
mem_size: 4096
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
ipa_host_group: ipsilon
ipa_host_group_desc: Ipsilon SSO application
tcp_ports: [80, 443]

View file

@ -1,16 +1,12 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
ipa_host_group: ipsilon
ipa_host_group_desc: Ipsilon SSO application
lvm_size: 20000
mem_size: 4096
num_cpus: 2
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
proxy_tcp_ports: [ 80, 443 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
ipa_host_group: ipsilon
ipa_host_group_desc: Ipsilon SSO application
proxy_tcp_ports: [80, 443]

View file

@ -1,12 +1,10 @@
---
custom_rules: ['-A INPUT -p tcp -m tcp -s 192.168.122.0/24 --dport 2049 -j ACCEPT']
freezes: false
resolvconf: "{{ files }}/resolv.conf/iad2"
ipa_client_shell_groups:
- sysadmin-kernel
ipa_client_sudo_groups:
- sysadmin-kernel
ipa_host_group: kernel_qa
ipa_host_group_desc: kernel test machines
ipa_client_shell_groups:
- sysadmin-kernel
ipa_client_sudo_groups:
- sysadmin-kernel
custom_rules: [ '-A INPUT -p tcp -m tcp -s 192.168.122.0/24 --dport 2049 -j ACCEPT' ]
resolvconf: "{{ files }}/resolv.conf/iad2"

View file

@ -1,9 +1,30 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- kerneltest.release.edit
- kerneltest.release.new
- kerneltest.upload.new
group: apache
owner: root
service: kerneltest
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-veteran
ipa_host_group: kerneltest
lvm_size: 20000
mem_size: 4096
num_cpus: 2
primary_auth_source: ipa
tcp_ports: [80]
# Definining these vars has a number of effects
# 1) mod_wsgi is configured to use the vars for its own setup
# 2) iptables opens enough ports for all threads for fedmsg
@ -11,29 +32,3 @@ num_cpus: 2
wsgi_fedmsg_service: kerneltest
wsgi_procs: 2
wsgi_threads: 1
tcp_ports: [ 80 ]
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
primary_auth_source: ipa
ipa_host_group: kerneltest
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-veteran
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: kerneltest
owner: root
group: apache
can_send:
- kerneltest.release.edit
- kerneltest.release.new
- kerneltest.upload.new

View file

@ -1,56 +1,47 @@
---
# Define resources for this group of hosts here.
lvm_size: 30000
mem_size: 32768
num_cpus: 16
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443, 111, 2049,
# These 8 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007]
udp_ports: [ 111, 2049 ]
custom_rules: [
# Needed for keepalived
'-A INPUT -d 224.0.0.0/8 -j ACCEPT',
'-A INPUT -p vrrp -j ACCEPT',
]
primary_auth_source: ipa
ipa_host_group: kojihub
ipa_host_group_desc: Koji Hub hosts
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
# Needed for keepalived
'-A INPUT -d 224.0.0.0/8 -j ACCEPT', '-A INPUT -p vrrp -j ACCEPT']
docker_registry: "candidate-registry.fedoraproject.org"
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: koji
owner: root
group: apache
can_send:
- buildsys.build.state.change
- buildsys.package.list.change
- buildsys.repo.done
- buildsys.repo.init
- buildsys.rpm.sign
- buildsys.tag
- buildsys.task.state.change
- buildsys.untag
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
virt_install_command: "{{ virt_install_command_two_nic }}"
osbs_url: "osbs.fedoraproject.org"
docker_registry: "candidate-registry.fedoraproject.org"
source_registry: "registry.fedoraproject.org"
koji_root: "koji.fedoraproject.org/koji"
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- buildsys.build.state.change
- buildsys.package.list.change
- buildsys.repo.done
- buildsys.repo.init
- buildsys.rpm.sign
- buildsys.tag
- buildsys.task.state.change
- buildsys.untag
group: apache
owner: root
service: koji
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
- sysadmin-releng
ipa_host_group: kojihub
ipa_host_group_desc: Koji Hub hosts
koji_hub: "koji.fedoraproject.org/kojihub"
koji_root: "koji.fedoraproject.org/koji"
lvm_size: 30000
mem_size: 32768
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
num_cpus: 16
osbs_url: "osbs.fedoraproject.org"
primary_auth_source: ipa
source_registry: "registry.fedoraproject.org"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [80, 443, 111, 2049,
# These 8 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007]
udp_ports: [111, 2049]
virt_install_command: "{{ virt_install_command_two_nic }}"

View file

@ -1,66 +1,55 @@
---
# Define resources for this group of hosts here.
lvm_size: 250000
mem_size: 8192
num_cpus: 8
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443, 111, 2049,
# These 8 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007]
udp_ports: [ 111, 2049 ]
ipa_host_group: kojihub
ipa_host_group_desc: Koji Hub hosts
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-osbs
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-osbs
- sysadmin-releng
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: koji
owner: root
group: apache
can_send:
- buildsys.build.state.change
- buildsys.package.list.change
- buildsys.repo.done
- buildsys.repo.init
- buildsys.rpm.sign
- buildsys.tag
- buildsys.task.state.change
- buildsys.untag
# NOTE -- staging mounts read-only
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
docker_registry: "candidate-registry.stg.fedoraproject.org"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_hub: "koji.stg.fedoraproject.org/kojihub"
# Add custom iptable rule to allow stage koji to talk to
# osbs-dev.fedorainfracloud.org (will move to stage osbs later, this is for the
# sake of testing).
custom_rules: [
'-A OUTPUT -p tcp -m tcp -d 209.132.184.60 --dport 8443 -j ACCEPT'
]
custom_rules: ['-A OUTPUT -p tcp -m tcp -d 209.132.184.60 --dport 8443 -j ACCEPT']
docker_registry: "candidate-registry.stg.fedoraproject.org"
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- buildsys.build.state.change
- buildsys.package.list.change
- buildsys.repo.done
- buildsys.repo.init
- buildsys.rpm.sign
- buildsys.tag
- buildsys.task.state.change
- buildsys.untag
group: apache
owner: root
service: koji
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-osbs
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-osbs
- sysadmin-releng
ipa_host_group: kojihub
ipa_host_group_desc: Koji Hub hosts
koji_hub: "koji.stg.fedoraproject.org/kojihub"
koji_root: "koji.stg.fedoraproject.org/koji"
koji_server_url: "https://koji.stg.fedoraproject.org/kojihub"
koji_topurl: "https://kojipkgs.fedoraproject.org/"
koji_weburl: "https://koji.stg.fedoraproject.org/koji"
lvm_size: 250000
mem_size: 8192
# NOTE -- staging mounts read-only
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
num_cpus: 8
osbs_url: "osbs.stg.fedoraproject.org"
source_registry: "registry.stg.fedoraproject.org"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [80, 443, 111, 2049,
# These 8 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007]
udp_ports: [111, 2049]

View file

@ -1,47 +1,39 @@
---
# Define resources for this group of hosts here.
lvm_size: 50000
mem_size: 98304
max_mem_size: 98304
num_cpus: 16
custom_rules: [
# Need for rsync from log01 for logs.
'-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT',
]
tcp_ports: [80, 8080]
primary_auth_source: ipa
ipa_host_group: kojipkgs
ipa_host_group_desc: Koji Packages hosts
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-releng
varnish_group: kojipkgs
# For the MOTD
csi_security_category: Moderate
csi_primary_contact: Fedora admins - admin@fedoraproject.org
csi_purpose: Cache packages from koji for builders and others
csi_relationship: |
There are a few things running here:
There are a few things running here:
- apache web server and varnish caching proxy.
- apache web server and varnish caching proxy.
- This host relies on:
- koji nfs storage
- proxy01/10 to proxy requests to it.
- Things that rely on this host:
- all koji builders/buildsystem
- koschei
- external users downloading packages from koji.
- This host relies on:
- koji nfs storage
- proxy01/10 to proxy requests to it.
- Things that rely on this host:
- all koji builders/buildsystem
- koschei
- external users downloading packages from koji.
# For the MOTD
csi_security_category: Moderate
custom_rules: [
# Need for rsync from log01 for logs.
'-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-releng
ipa_host_group: kojipkgs
ipa_host_group_desc: Koji Packages hosts
lvm_size: 50000
max_mem_size: 98304
mem_size: 98304
nagios_Check_Services:
swap: false
num_cpus: 16
primary_auth_source: ipa
tcp_ports: [80, 8080]
varnish_group: kojipkgs

View file

@ -1,13 +1,13 @@
---
primary_auth_source: ipa
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-analysis
- sysadmin-logs
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-analysis
- sysadmin-logs
ipa_host_group: logging
ipa_host_group_desc: Logging hosts
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-analysis
- sysadmin-logs
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-analysis
- sysadmin-logs
primary_auth_source: ipa

Some files were not shown because too many files have changed in this diff Show more