ansible/inventory/group_vars/all

431 lines
20 KiB
Text
Raw Normal View History

---
#######
# BEGIN: Ansible roles_path variables
#
# Background/reference about external repos pulled in:
# https://pagure.io/fedora-infrastructure/issue/5476
#
ansible_base: /srv/web/infra
# Path to the openshift-ansible checkout as external git repo brought into
# Fedora Infra
openshift_ansible: /srv/web/infra/openshift-ansible/
#
# END: Ansible roles_path variables
#######
freezes: true
# most of our systems are in IAD2
datacenter: iad2
preferred_dc: iad2
postfix_group: "none"
# usually we do not want to enable nested virt, only on some virthosts
nested: false
# most of our systems are 64bit.
2013-08-23 22:02:20 +00:00
# Used to install various nagios scripts and the like.
libdir: /usr/lib64
# Most EL systems need default EPEL repos.
# Some systems (notably fed-cloud*) need to get their own
# EPEL files because EPEL overrides packages in their core repos.
use_default_epel: true
# example of ports for default iptables
# tcp_ports: [ 22, 80, 443 ]
# udp_ports: [ 110, 1024, 2049 ]
# multiple lines can be handled as below
# custom_rules: [ '-A INPUT -p tcp -m tcp --dport 8888 -j ACCEPT',
2013-05-24 15:03:21 +00:00
# '-A INPUT -p tcp -m tcp --dport 8889 -j ACCEPT' ]
# We default these to empty
udp_ports: []
tcp_ports: []
2013-09-25 19:58:45 +00:00
custom_rules: []
nat_rules: []
custom6_rules: []
# defaults for hw installs
install_noc: none
# defaults for virt installs
2014-09-29 20:45:35 +00:00
ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-7
ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL7-x86_64/
mem_size: 4096
num_cpus: 2
lvm_size: 20000
# on MOST infra systems, the interface connected to the infra network
# is eth0. but not on quite ALL systems. e.g. on s390 boxes it's enc900,
# on openqa-ppc64le-01.qa it's eth2 for some reason, and on qa01.qa and
# qa02.qa it's em3. currently this only affects whether GATEWAY, DOMAIN
# and DNS1/DNS2 lines are put into ifcfg-(device).
ansible_ifcfg_infra_net_devices: [ 'eth0', 'enc900' ]
# Default netmask. All of our iad2 nets are /24's. Almost all of our
# non-iad2 sites are less than a /24.
eth0_nm: 255.255.255.0
eth1_nm: 255.255.255.0
eth1_ip: 10.0.0.10
br0_nm: 255.255.255.0
br1_nm: 255.255.255.0
nm: 255.255.255.0
# Default to managing the network, we want to not do this on select
# hosts (like cloud nodes)
ansible_ifcfg_blocklist: false
# List of interfaces to explicitly disable
ansible_ifcfg_disabled: []
#
# The default virt-install works for rhel7 or fedora with 1 nic
#
virt_install_command: "{{ virt_install_command_one_nic }}"
2014-07-27 22:22:20 +00:00
main_bridge: br0
nfs_bridge: br1
mac_address: RANDOM
2018-10-31 19:40:24 +00:00
mac_address1: RANDOM
virt_install_command_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
2014-07-27 22:22:20 +00:00
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
2016-09-03 11:15:17 +00:00
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
2018-07-02 00:49:17 +00:00
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
2018-05-07 16:50:45 +00:00
--autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
2016-09-03 11:15:17 +00:00
virt_install_command_two_nic: virt-install -n {{ inventory_hostname }}
2016-09-03 11:15:17 +00:00
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
2014-07-27 22:22:20 +00:00
hostname={{ inventory_hostname }} nameserver={{ dns }}
2016-09-05 13:28:45 +00:00
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
2018-10-31 19:40:24 +00:00
--network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
2018-05-07 16:50:45 +00:00
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --watchdog default --rng /dev/random --cpu host
virt_install_command_two_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_ppc64le_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_ppc64le_two_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
--autostart --noautoconsole --watchdog default --rng /dev/random
virt_install_command_aarch64_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
2017-01-23 05:13:11 +00:00
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole
virt_install_command_aarch64_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole
2019-12-05 17:59:12 +00:00
virt_install_command_aarch64_2nd_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
2019-12-05 17:59:12 +00:00
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole
virt_install_command_aarch64_two_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none
ip={{ eth1_ip }}:::{{ nm }}:{{ inventory_hostname_short }}-nfs:eth1:none'
2018-10-31 19:40:24 +00:00
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--network=bridge={{ nfs_bridge }},model=virtio,mac={{ mac_address1 }}
2018-05-07 16:50:45 +00:00
--autostart --noautoconsole --rng /dev/random
virt_install_command_armv7_one_nic: virt-install -n {{ inventory_hostname }} --arch armv7l
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }}
--autostart --noautoconsole --rng /dev/random
virt_install_command_armv7_one_nic_unsafe: virt-install -n {{ inventory_hostname }} --arch armv7l
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }} console=tty0 console=ttyAMA0
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }}
--autostart --noautoconsole --rng /dev/random --qemu-commandline="-machine highmem=off"
virt_install_command_s390x_one_nic: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --rng /dev/random --cpu host
virt_install_command_s390x_one_nic_unsafe: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }} --memballoon virtio
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }},cache=unsafe,io=threads
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
'net.ifnames=0 inst.ksdevice=eth0 inst.ks={{ ks_url }}
hostname={{ inventory_hostname }} nameserver={{ dns }}
ip={{ eth0_ip }}::{{ gw }}:{{ nm }}:{{ inventory_hostname }}:eth0:none'
--network bridge={{ main_bridge }},model=virtio,mac={{ mac_address }}
--autostart --noautoconsole --rng /dev/random --cpu host
virt_install_command_rhel6: virt-install -n {{ inventory_hostname }}
--memory={{ mem_size }},maxmemory={{ max_mem_size }}
2016-02-02 17:28:44 +00:00
--disk bus=virtio,path={{ volgroup }}/{{ inventory_hostname }}
--vcpus={{ num_cpus }},maxvcpus={{ max_cpu }} -l {{ ks_repo }} -x
"inst.ksdevice=eth0 inst.ks={{ ks_url }} ip={{ eth0_ip }} netmask={{ nm }}
2016-02-02 17:28:44 +00:00
gateway={{ gw }} dns={{ dns }} console=tty0 console=ttyS0
hostname={{ inventory_hostname }}"
2016-02-02 17:28:44 +00:00
--network=bridge=br0 --autostart --noautoconsole --watchdog default
2016-02-10 00:47:04 +00:00
max_mem_size: "{{ mem_size * 5 }}"
max_cpu: "{{ num_cpus * 5 }}"
# This is the wildcard certname for our proxies. It has a different name for
# the staging group and is used in the proxies.yml playbook.
2020-02-27 21:19:54 +00:00
wildcard_cert_name: wildcard-2020.fedoraproject.org
wildcard_crt_file: wildcard-2020.fedoraproject.org.cert
wildcard_key_file: wildcard-2020.fedoraproject.org.key
wildcard_int_file: wildcard-2020.fedoraproject.org.intermediate.cert
# This is the openshift wildcard cert. Until it exists set it equal to wildcard
os_wildcard_cert_name: wildcard-2021.app.os.fedoraproject.org
os_wildcard_crt_file: wildcard-2021.app.os.fedoraproject.org.cert
os_wildcard_key_file: wildcard-2021.app.os.fedoraproject.org.key
os_wildcard_int_file: wildcard-2021.app.os.fedoraproject.org.intermediate.cert
# Everywhere, always, we should sign messages and validate signatures.
# However, we allow individual hosts and groups to override this. Use this very
# carefully.. and never in production (good for testing stuff in staging).
fedmsg_sign_messages: True
fedmsg_validate_signatures: True
# By default, nodes get no fedmsg certs. They need to declare them explicitly.
fedmsg_certs: []
# By default, fedmsg should not log debug info. Groups can override this.
fedmsg_loglevel: INFO
# By default, fedmsg sends error logs to sysadmin-datanommer-members@fp.o.
fedmsg_error_recipients:
- sysadmin-datanommer-members@fedoraproject.org
# By default, fedmsg hosts are in passive mode. External hosts are typically
# active.
fedmsg_active: False
# Other defaults for fedmsg environments
fedmsg_prefix: org.fedoraproject
fedmsg_env: prod
# Amount of time to wait for connections after a socket is first established.
fedmsg_post_init_sleep: 1.0
# A special flag that, when set to true, will disconnect the host from the
# global fedmsg-relay instance and set it up with its own local one. You can
# temporarily set this to true for a specific host to do some debugging -- so
# you can *replay real messages from the datagrepper history without having
# those broadcast to the rest of the bus*.
fedmsg_debug_loopback: False
# These are used to:
# 1) configure mod_wsgi
# 2) open iptables rules for fedmsg (per wsgi thread)
# 3) declare enough fedmsg endpoints for the service
#wsgi_fedmsg_service: bodhi
#wsgi_procs: 4
#wsgi_threads: 4
2014-01-10 21:29:20 +00:00
# By default, nodes don't backup any dbs on them unless they declare it.
dbs_to_backup: []
# by default the number of procs we allow before we whine
nrpe_procs_warn: 250
nrpe_procs_crit: 300
# by default, the number of emails in queue before we whine
nrpe_check_postfix_queue_warn: 2
nrpe_check_postfix_queue_crit: 5
# env is staging or production, we default it to production here.
env: production
env_prefix: ""
env_suffix: ""
env_short: prod
# nfs mount options, override at the group/host level
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
# by default set become to false here We can override it as needed.
# Note that if become is true, you need to unset requiretty for
# ssh controlpersist to work.
become: false
# default the root_auth_users to nothing.
2015-03-17 22:20:17 +00:00
# This should be set for cloud instances in their host or group vars.
2015-03-17 22:17:46 +00:00
root_auth_users: ''
# This vars get shoved into /etc/system_identification by the base role.
# Groups and individual hosts should override them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: Unspecified
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Unspecified
csi_relationship: |
Unspecified.
* What hosts/services does this rely on?
* What hosts/services rely on this?
2014-12-17 20:26:06 +00:00
To update this text, add the csi_* vars to group_vars/ in ansible.
#
# say if we want the apache role dependency for mod_wsgi or not
# In some cases we want mod_wsgi and no apache (for python3 httpaio stuff)
#
wsgi_wants_apache: true
# IPA settings
additional_host_keytabs: []
ipa_server: ipa01.iad2.fedoraproject.org
ipa_server_nodes:
- ipa01.iad2.fedoraproject.org
- ipa02.iad2.fedoraproject.org
#- ipa03.iad2.fedoraproject.org
ipa_realm: FEDORAPROJECT.ORG
ipa_admin_password: "{{ ipa_prod_admin_password }}"
primary_auth_source: ipa
2016-11-30 17:14:53 +00:00
# Normal default sshd port is 22
sshd_port: 22
# This enables/disables the SSH "keyhelper" used by Pagure for verifying users'
# SSH keys from the Pagure database
sshd_keyhelper: false
2016-12-01 03:27:16 +00:00
# List of names under which the host is available
ssh_hostnames: []
2016-12-01 03:27:16 +00:00
# assume collectd apache
collectd_apache: true
2017-01-08 23:19:25 +00:00
# assume vpn is false
2017-01-08 23:30:14 +00:00
vpn: False
# assume createrepo is true and this builder has the koji nfs mount to do that
createrepo: True
2017-04-29 22:30:16 +00:00
# Nagios global variables
nagios_Check_Services:
mail: true
2017-05-01 19:02:55 +00:00
nrpe: true
sshd: true
named: false
dhcpd: false
httpd: false
swap: true
2017-12-01 01:29:27 +00:00
ping: true
raid: false
2020-02-26 22:05:12 +00:00
nagios_Can_Connect: true
# Set variable if we want to use our global iptables defaults
# Some things need to set their own.
baseiptables: True
# Most of our machines have manual resolv.conf files
# These settings are for machines where NM is supposed to control resolv.conf.
nm_controlled_resolv: False
2020-06-09 20:30:01 -04:00
dns1: "10.3.163.33"
dns2: "10.3.163.34"
# This is a list of services that need to wait for VPN to be up before getting started.
postvpnservices: []
# true or false if we are or are not a copr build virthost.
# Default to false
copr_build_virthost: false
#
# Set a redirectmatch variable we can use to disable some redirectmatches
# like the prerelease to final ones.
#
redirectmatch_enabled: True
#
# sshd can run a internal sftp server, we need this on some hosts, but
# not on most of them, so default to false
sshd_sftp: false
#
# Autodetect python version
#
ansible_python_interpreter: auto
# set no x-forward header by default
x_forward: false