882 lines
42 KiB
Django/Jinja
882 lines
42 KiB
Django/Jinja
# This is based on the example inventories provided by the upstream
|
|
# openshift-ansible project available:
|
|
# https://github.com/openshift/openshift-ansible/tree/master/inventory/byo
|
|
|
|
|
|
[masters]
|
|
{% for host in groups[openshift_cluster_masters_group] %}
|
|
{% if hostvars[host].datacenter == datacenter %}
|
|
{{ host }}
|
|
{% endif %}
|
|
{% endfor %}
|
|
|
|
[etcd]
|
|
{% for host in groups[openshift_cluster_masters_group] %}
|
|
{% if hostvars[host].datacenter == datacenter %}
|
|
{{ host }}
|
|
{% endif %}
|
|
{% endfor %}
|
|
|
|
[nodes]
|
|
{% for host in groups[openshift_cluster_masters_group] %}
|
|
{% if hostvars[host].datacenter == datacenter %}
|
|
{{ host }} openshift_node_group_name='node-config-master'
|
|
{% endif %}
|
|
{% endfor %}
|
|
{% for host in groups[openshift_cluster_nodes_group] %}
|
|
{% if hostvars[host].datacenter == datacenter %}
|
|
{{ host }} openshift_node_group_name='node-config-compute'
|
|
{% endif %}
|
|
{% endfor %}
|
|
|
|
|
|
|
|
# Create an OSEv3 group that contains the masters and nodes groups
|
|
[OSEv3:children]
|
|
masters
|
|
nodes
|
|
etcd
|
|
|
|
# Add this if using nfs and have defined the nfs group
|
|
#nfs
|
|
|
|
# Set variables common for all OSEv3 hosts
|
|
[OSEv3:vars]
|
|
openshift_node_groups=[{'name': 'node-config-master', 'labels': ['node-role.kubernetes.io/master=true', 'orchestrator=true']}, {'name': 'node-config-infra', 'labels': ['node-role.kubernetes.io/infra=true',]}, {'name': 'node-config-compute', 'labels': ['node-role.kubernetes.io/compute=true', 'node-role.kubernetes.io/infra=true', 'worker=true'], 'edits': [{ 'key': 'kubeletArguments.pods-per-core','value': ['20']}]}]
|
|
# Disable the service catalog. We don't use it and it needs persistent storage.
|
|
openshift_enable_service_catalog=false
|
|
# Set this because we have nfs which isn't supported
|
|
openshift_enable_unsupported_configurations=true
|
|
# Have upgrader also restart systems in a rolling manner.
|
|
openshift_rolling_restart_mode=system
|
|
# Disable the disk and package version tests
|
|
openshift_disable_check=disk_availability,package_version,docker_image_availability,memory_availability,docker_storage
|
|
# SSH user, this user should allow ssh based auth without requiring a
|
|
# password. If using ssh key based auth, then the key should be managed by an
|
|
# ssh agent.
|
|
ansible_ssh_user={{openshift_ansible_ssh_user}}
|
|
|
|
# Specify the deployment type. Valid values are origin and openshift-enterprise.
|
|
deployment_type={{openshift_deployment_type}}
|
|
|
|
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
|
|
# rely on the version running on the first master. Works best for containerized installs where we can usually
|
|
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
|
|
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
|
|
# release.
|
|
openshift_release={{openshift_release}}
|
|
openshift_version={{openshift_version}}
|
|
|
|
# For whatever reason, this keeps hitting a race condition and docker is
|
|
# excluded before docker is installed so we're just going to remove it.
|
|
openshift_enable_docker_excluder = False
|
|
|
|
# OpenShift Containerized deployment or not?
|
|
containerized={{openshift_ansible_containerized_deploy}}
|
|
|
|
{% if openshift_ansible_ssh_user != "root" %}
|
|
# If ansible_ssh_user is not root, ansible_become must be set to true and the
|
|
# user must be configured for passwordless sudo
|
|
ansible_become=yes
|
|
{% endif %}
|
|
|
|
{% if openshift_ansible_python_interpreter is defined %}
|
|
ansible_python_interpreter={{openshift_ansible_python_interpreter}}
|
|
{% endif %}
|
|
|
|
# Debug level for all OpenShift components (Defaults to 2)
|
|
debug_level={{openshift_debug_level}}
|
|
|
|
|
|
# Specify an exact container image tag to install or configure.
|
|
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
|
|
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
|
|
openshift_image_tag={{openshift_release}}
|
|
|
|
# Specify an exact rpm version to install or configure.
|
|
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
|
|
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
|
|
openshift_pkg_version={{openshift_pkg_version}}
|
|
# Install the openshift examples
|
|
{% if openshift_ansible_install_examples is defined %}
|
|
openshift_install_examples={{openshift_ansible_install_examples}}
|
|
{% endif %}
|
|
|
|
openshift_cluster_monitoring_operator_install = false
|
|
openshift_web_console_install = false
|
|
openshift_console_install = false
|
|
openshift_enable_olm=false
|
|
|
|
# Configure logoutURL in the master config for console customization
|
|
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
|
|
#openshift_master_logout_url=http://example.com
|
|
|
|
# Configure extensionScripts in the master config for console customization
|
|
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
|
|
#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
|
|
|
|
# Configure extensionStylesheets in the master config for console customization
|
|
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
|
|
#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
|
|
|
|
# Configure extensions in the master config for console customization
|
|
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
|
|
#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
|
|
|
|
# Configure extensions in the master config for console customization
|
|
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
|
|
#openshift_master_oauth_template=/path/to/login-template.html
|
|
|
|
# Configure imagePolicyConfig in the master config
|
|
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
|
|
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
|
|
|
|
# Docker Configuration
|
|
# Add additional, insecure, and blocked registries to global docker configuration
|
|
# For enterprise deployment types we ensure that registry.access.redhat.com is
|
|
# included if you do not include it
|
|
#openshift_docker_additional_registries=registry.example.com
|
|
#openshift_docker_insecure_registries=registry.example.com
|
|
#openshift_docker_blocked_registries=registry.hacker.com
|
|
# Disable pushing to dockerhub
|
|
#openshift_docker_disable_push_dockerhub=True
|
|
# Install and run cri-o.
|
|
{% if openshift_ansible_use_crio is defined %}
|
|
openshift_use_crio={{ openshift_ansible_use_crio }}
|
|
{% endif %}
|
|
{% if openshift_ansible_use_crio_only is defined %}
|
|
openshift_use_crio_only={{ openshift_ansible_crio_only }}
|
|
{% endif %}
|
|
# The following two variables are used when openshift_use_crio is True
|
|
# and cleans up after builds that pass through docker. When openshift_use_crio is True
|
|
# these variables are set to the defaults shown. You may override them here.
|
|
# NOTE: You will still need to tag crio nodes with your given label(s)!
|
|
# Enable docker garbage collection when using cri-o
|
|
#openshift_crio_enable_docker_gc=True
|
|
# Node Selectors to run the garbage collection
|
|
#openshift_crio_docker_gc_node_selector={'runtime': 'cri-o'}
|
|
openshift_crio_docker_gc_node_selector={}
|
|
openshift_crio_systemcontainer_image_override="registry.access.redhat.com/openshift3/cri-o:v3.9"
|
|
|
|
# Use Docker inside a System Container. Note that this is a tech preview and should
|
|
# not be used to upgrade!
|
|
# The following options for docker are ignored:
|
|
# - docker_version
|
|
# - docker_upgrade
|
|
# The following options must not be used
|
|
# - openshift_docker_options
|
|
#openshift_docker_use_system_container=False
|
|
# Force the registry to use for the system container. By default the registry
|
|
# will be built off of the deployment type and ansible_distribution. Only
|
|
# use this option if you are sure you know what you are doing!
|
|
#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
|
|
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
|
|
# Default value: "--log-driver=journald"
|
|
#openshift_docker_options="-l warn --ipv6=false"
|
|
|
|
# Specify exact version of Docker to configure or upgrade to.
|
|
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
|
|
# docker_version="1.12.1"
|
|
|
|
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
|
|
# docker_upgrade=False
|
|
|
|
# Specify exact version of etcd to configure or upgrade to.
|
|
# etcd_version="3.1.0"
|
|
# Enable etcd debug logging, defaults to false
|
|
# etcd_debug=true
|
|
# Set etcd log levels by package
|
|
# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
|
|
|
|
# Upgrade Hooks
|
|
#
|
|
# Hooks are available to run custom tasks at various points during a cluster
|
|
# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
|
|
# absolute paths, if not the path will be treated as relative to the file where the
|
|
# hook is actually used.
|
|
#
|
|
# Tasks to run before each master is upgraded.
|
|
# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
|
|
#
|
|
# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
|
|
# upgrade steps, but before we restart system/services.
|
|
# openshift_master_upgrade_hook=/usr/share/custom/master.yml
|
|
#
|
|
# Tasks to run after each master is upgraded and system/services have been restarted.
|
|
# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
|
|
|
|
|
|
# Alternate image format string, useful if you've got your own registry mirror
|
|
#oreg_url=example.com/openshift3/ose-${component}:${version}
|
|
# If oreg_url points to a registry other than registry.access.redhat.com we can
|
|
# modify image streams to point at that registry by setting the following to true
|
|
#openshift_examples_modify_imagestreams=true
|
|
|
|
# Additional yum repos to install
|
|
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
|
|
|
|
# Defining htpasswd users
|
|
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
|
|
# or
|
|
#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
|
|
|
|
# OSBS Specific Auth
|
|
{% if openshift_auth_profile == "osbs" %}
|
|
openshift_master_manage_htpasswd=false
|
|
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider'}]
|
|
{% endif %}
|
|
|
|
{% if openshift_auth_profile == "fedoraidp" %}
|
|
openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_prod_client_secret}}", "extraScopes": ["profile", "email", "https://id.fedoraproject.org/scope/groups"], "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]
|
|
{% endif %}
|
|
|
|
{% if openshift_auth_profile == "fedoraidp-stg" %}
|
|
openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]
|
|
{% endif %}
|
|
|
|
|
|
# If oreg_url points to a registry requiring authentication, provide the following:
|
|
{% if openshift_arch == "aarch64" %}
|
|
oreg_url=quay.io/multi-arch/aarch64-openshift3-ose-${component}:v3.11
|
|
oreg_auth_user="{{ os_multiarch_registry_user }}"
|
|
oreg_auth_password="{{ os_multiarch_registry_password }}"
|
|
oreg_test_login=false
|
|
{% elif env == "staging" %}
|
|
oreg_auth_user="{{ os_stg_registry_user }}"
|
|
oreg_auth_password="{{ os_stg_registry_password }}"
|
|
{% elif datacenter != 'iad2' %}
|
|
oreg_auth_user="{{ os_prod_registry_user }}"
|
|
oreg_auth_password="{{ os_prod_registry_password }}"
|
|
{% else %}
|
|
oreg_auth_user="{{ os_prod_iad2_registry_user }}"
|
|
oreg_auth_password="{{ os_prod_iad2_registry_password }}"
|
|
{% endif %}
|
|
|
|
# NOTE: oreg_url must be defined by the user for oreg_auth_* to have any affect.
|
|
# oreg_auth_pass should be generated from running docker login.
|
|
# To update registry auth credentials, uncomment the following:
|
|
#oreg_auth_credentials_replace=True
|
|
|
|
# Allow all auth
|
|
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
|
|
|
|
# LDAP auth
|
|
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
|
|
#
|
|
# Configure LDAP CA certificate
|
|
# Specify either the ASCII contents of the certificate or the path to
|
|
# the local file that will be copied to the remote host. CA
|
|
# certificate contents will be copied to master systems and saved
|
|
# within /etc/origin/master/ with a filename matching the "ca" key set
|
|
# within the LDAPPasswordIdentityProvider.
|
|
#
|
|
#openshift_master_ldap_ca=<ca text>
|
|
# or
|
|
#openshift_master_ldap_ca_file=<path to local ca file to use>
|
|
|
|
# OpenID auth
|
|
#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
|
|
#
|
|
# Configure OpenID CA certificate
|
|
# Specify either the ASCII contents of the certificate or the path to
|
|
# the local file that will be copied to the remote host. CA
|
|
# certificate contents will be copied to master systems and saved
|
|
# within /etc/origin/master/ with a filename matching the "ca" key set
|
|
# within the OpenIDIdentityProvider.
|
|
#
|
|
#openshift_master_openid_ca=<ca text>
|
|
# or
|
|
#openshift_master_openid_ca_file=<path to local ca file to use>
|
|
|
|
# Request header auth
|
|
#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
|
|
#
|
|
# Configure request header CA certificate
|
|
# Specify either the ASCII contents of the certificate or the path to
|
|
# the local file that will be copied to the remote host. CA
|
|
# certificate contents will be copied to master systems and saved
|
|
# within /etc/origin/master/ with a filename matching the "clientCA"
|
|
# key set within the RequestHeaderIdentityProvider.
|
|
#
|
|
#openshift_master_request_header_ca=<ca text>
|
|
# or
|
|
#openshift_master_request_header_ca_file=<path to local ca file to use>
|
|
|
|
{% if openshift_master_ha is defined %}
|
|
{% if openshift_master_ha %}
|
|
# Native high availability cluster method with optional load balancer.
|
|
# If no lb group is defined, the installer assumes that a load balancer has
|
|
# been preconfigured. For installation the value of
|
|
# openshift_master_cluster_hostname must resolve to the load balancer
|
|
# or to one or all of the masters defined in the inventory if no load
|
|
# balancer is present.
|
|
openshift_master_cluster_method=native
|
|
openshift_master_cluster_hostname={{openshift_internal_cluster_url}}
|
|
openshift_master_cluster_public_hostname={{openshift_cluster_url}}
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
# Override the default controller lease ttl
|
|
#osm_controller_lease_ttl=30
|
|
|
|
# Configure controller arguments
|
|
#osm_controller_args={'resource-quota-sync-period': ['10s']}
|
|
|
|
# Configure api server arguments
|
|
#osm_api_server_args={'max-requests-inflight': ['400']}
|
|
|
|
# default subdomain to use for exposed routes
|
|
{% if openshift_app_subdomain is defined %}
|
|
{% if openshift_app_subdomain %}
|
|
openshift_master_default_subdomain={{openshift_app_subdomain}}
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
# additional cors origins
|
|
#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
|
|
|
|
# default project node selector
|
|
#osm_default_node_selector='region=primary'
|
|
|
|
# Override the default pod eviction timeout
|
|
#openshift_master_pod_eviction_timeout=5m
|
|
|
|
# Override the default oauth tokenConfig settings:
|
|
# openshift_master_access_token_max_seconds=86400
|
|
# openshift_master_auth_token_max_seconds=500
|
|
|
|
# Override master servingInfo.maxRequestsInFlight
|
|
#openshift_master_max_requests_inflight=500
|
|
|
|
# Override master and node servingInfo.minTLSVersion and .cipherSuites
|
|
# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
|
|
# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
|
|
#openshift_master_min_tls_version=VersionTLS12
|
|
#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
|
|
#
|
|
#openshift_node_min_tls_version=VersionTLS12
|
|
#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
|
|
|
|
# default storage plugin dependencies to install, by default the ceph and
|
|
# glusterfs plugin dependencies will be installed, if available.
|
|
#osn_storage_plugin_deps=['ceph','glusterfs']
|
|
|
|
# OpenShift Router Options
|
|
#
|
|
# An OpenShift router will be created during install if there are
|
|
# nodes present with labels matching the default router selector,
|
|
# "region=infra". Set openshift_node_labels per node as needed in
|
|
# order to label nodes.
|
|
#
|
|
# Example:
|
|
# [nodes]
|
|
# node.example.com openshift_node_labels="{'region': 'infra'}"
|
|
#
|
|
# Router selector (optional)
|
|
# Router will only be created if nodes matching this label are present.
|
|
# Default value: 'region=infra'
|
|
#openshift_hosted_router_selector='region=infra'
|
|
#
|
|
# Router replicas (optional)
|
|
# Unless specified, openshift-ansible will calculate the replica count
|
|
# based on the number of nodes matching the openshift router selector.
|
|
#openshift_hosted_router_replicas=2
|
|
#
|
|
# Router force subdomain (optional)
|
|
# A router path format to force on all routes used by this router
|
|
# (will ignore the route host value)
|
|
#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
|
|
#
|
|
# Router certificate (optional)
|
|
# Provide local certificate paths which will be configured as the
|
|
# router's default certificate.
|
|
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
|
|
#
|
|
# Disable management of the OpenShift Router
|
|
openshift_hosted_manage_router=false
|
|
#
|
|
# Router sharding support has been added and can be achieved by supplying the correct
|
|
# data to the inventory. The variable to house the data is openshift_hosted_routers
|
|
# and is in the form of a list. If no data is passed then a default router will be
|
|
# created. There are multiple combinations of router sharding. The one described
|
|
# below supports routers on separate nodes.
|
|
#openshift_hosted_routers:
|
|
#- name: router1
|
|
# stats_port: 1936
|
|
# ports:
|
|
# - 80:80
|
|
# - 443:443
|
|
# replicas: 1
|
|
# namespace: default
|
|
# serviceaccount: router
|
|
# selector: type=router1
|
|
# images: "openshift3/ose-${component}:${version}"
|
|
# edits: []
|
|
# certificates:
|
|
# certfile: /path/to/certificate/abc.crt
|
|
# keyfile: /path/to/certificate/abc.key
|
|
# cafile: /path/to/certificate/ca.crt
|
|
#- name: router2
|
|
# stats_port: 1936
|
|
# ports:
|
|
# - 80:80
|
|
# - 443:443
|
|
# replicas: 1
|
|
# namespace: default
|
|
# serviceaccount: router
|
|
# selector: type=router2
|
|
# images: "openshift3/ose-${component}:${version}"
|
|
# certificates:
|
|
# certfile: /path/to/certificate/xyz.crt
|
|
# keyfile: /path/to/certificate/xyz.key
|
|
# cafile: /path/to/certificate/ca.crt
|
|
# edits:
|
|
# # ROUTE_LABELS sets the router to listen for routes
|
|
# # tagged with the provided values
|
|
# - key: spec.template.spec.containers[0].env
|
|
# value:
|
|
# name: ROUTE_LABELS
|
|
# value: "route=external"
|
|
# action: append
|
|
|
|
# OpenShift Registry Console Options
|
|
# Override the console image prefix for enterprise deployments, not used in origin
|
|
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
|
|
#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
|
|
# Override image version, defaults to latest for origin, matches the product version for enterprise
|
|
#openshift_cockpit_deployer_version=1.4.1
|
|
|
|
# Openshift Registry Options
|
|
#
|
|
# An OpenShift registry will be created during install if there are
|
|
# nodes present with labels matching the default registry selector,
|
|
# "region=infra". Set openshift_node_labels per node as needed in
|
|
# order to label nodes.
|
|
#
|
|
# Example:
|
|
# [nodes]
|
|
# node.example.com openshift_node_labels="{'region': 'infra'}"
|
|
#
|
|
# Registry selector (optional)
|
|
# Registry will only be created if nodes matching this label are present.
|
|
# Default value: 'region=infra'
|
|
#openshift_hosted_registry_selector='region=infra'
|
|
#
|
|
# Registry replicas (optional)
|
|
# Unless specified, openshift-ansible will calculate the replica count
|
|
# based on the number of nodes matching the openshift registry selector.
|
|
#openshift_hosted_registry_replicas=2
|
|
#
|
|
# Validity of the auto-generated certificate in days (optional)
|
|
#openshift_hosted_registry_cert_expire_days=730
|
|
#
|
|
# Disable management of the OpenShift Registry
|
|
#openshift_hosted_manage_registry=false
|
|
|
|
# Registry Storage Options
|
|
#
|
|
# NFS Host Group
|
|
# An NFS volume will be created with path "nfs_directory/volume_name"
|
|
# on the host within the [nfs] host group. For example, the volume
|
|
# path using these options would be "/exports/registry"
|
|
#openshift_hosted_registry_storage_kind=nfs
|
|
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
|
|
#openshift_hosted_registry_storage_nfs_directory=/exports
|
|
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
|
|
#openshift_hosted_registry_storage_volume_name=registry
|
|
#openshift_hosted_registry_storage_volume_size=10Gi
|
|
#
|
|
# External NFS Host
|
|
# NFS volume must already exist with path "nfs_directory/_volume_name" on
|
|
# the storage_host. For example, the remote volume path using these
|
|
# options would be "nfs.example.com:/exports/registry"
|
|
#openshift_hosted_registry_storage_kind=nfs
|
|
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
|
|
#openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com
|
|
#openshift_hosted_registry_storage_nfs_directory=/{{ansible_architecture}}
|
|
#openshift_hosted_registry_storage_volume_name=osbs-stg-registry
|
|
#openshift_hosted_registry_storage_volume_size=10Gi
|
|
#openshift_hosted_registry_storage_kind=nfs
|
|
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
|
|
#openshift_hosted_registry_storage_host=ntap-phx2-c01-fedora01-nfs.storage.phx2.redhat.com
|
|
#openshift_hosted_registry_storage_nfs_directory=/{{ansible_architecture}}
|
|
#openshift_hosted_registry_storage_volume_name=osbs-prod-registry
|
|
#openshift_hosted_registry_storage_volume_size=10Gi
|
|
# Openstack
|
|
# Volume must already exist.
|
|
#openshift_hosted_registry_storage_kind=openstack
|
|
#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
|
|
#openshift_hosted_registry_storage_openstack_filesystem=ext4
|
|
#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
|
|
#openshift_hosted_registry_storage_volume_size=10Gi
|
|
#
|
|
# Native GlusterFS Registry Storage
|
|
#openshift_hosted_registry_storage_kind=glusterfs
|
|
#
|
|
# AWS S3
|
|
#
|
|
# S3 bucket must already exist.
|
|
#openshift_hosted_registry_storage_kind=object
|
|
#openshift_hosted_registry_storage_provider=s3
|
|
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
|
|
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
|
|
#openshift_hosted_registry_storage_s3_bucket=bucket_name
|
|
#openshift_hosted_registry_storage_s3_region=bucket_region
|
|
#openshift_hosted_registry_storage_s3_chunksize=26214400
|
|
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
|
|
#openshift_hosted_registry_pullthrough=true
|
|
#openshift_hosted_registry_acceptschema2=true
|
|
#openshift_hosted_registry_enforcequota=true
|
|
#
|
|
# Any S3 service (Minio, ExoScale, ...): Basically the same as above
|
|
# but with regionendpoint configured
|
|
# S3 bucket must already exist.
|
|
#openshift_hosted_registry_storage_kind=object
|
|
#openshift_hosted_registry_storage_provider=s3
|
|
#openshift_hosted_registry_storage_s3_accesskey=access_key_id
|
|
#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
|
|
#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
|
|
#openshift_hosted_registry_storage_s3_bucket=bucket_name
|
|
#openshift_hosted_registry_storage_s3_region=bucket_region
|
|
#openshift_hosted_registry_storage_s3_chunksize=26214400
|
|
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
|
|
#openshift_hosted_registry_pullthrough=true
|
|
#openshift_hosted_registry_acceptschema2=true
|
|
#openshift_hosted_registry_enforcequota=true
|
|
#
|
|
# Additional CloudFront Options. When using CloudFront all three
|
|
# of the followingg variables must be defined.
|
|
#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
|
|
#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
|
|
#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
|
|
|
|
# Metrics deployment
|
|
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
|
|
#
|
|
# By default metrics are not automatically deployed, set this to enable them
|
|
#
|
|
# openshift_hosted_metrics_deploy=true
|
|
{% if openshift_metrics_deploy is defined %}
|
|
{% if openshift_metrics_deploy %}
|
|
#
|
|
openshift_hosted_metrics_deploy=false
|
|
|
|
# Storage Options
|
|
# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
|
|
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
|
|
# Storage options A & B currently support only one cassandra pod which is
|
|
# generally enough for up to 1000 pods. Additional volumes can be created
|
|
# manually after the fact and metrics scaled per the docs.
|
|
#
|
|
# Option A - NFS Host Group
|
|
# An NFS volume will be created with path "nfs_directory/volume_name"
|
|
# on the host within the [nfs] host group. For example, the volume
|
|
# path using these options would be "/exports/metrics"
|
|
#openshift_hosted_metrics_storage_kind=nfs
|
|
#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
|
|
#openshift_hosted_metrics_storage_nfs_directory=/exports
|
|
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
|
|
#openshift_hosted_metrics_storage_volume_name=metrics
|
|
#openshift_hosted_metrics_storage_volume_size=10Gi
|
|
#
|
|
# Option B - External NFS Host
|
|
# NFS volume must already exist with path "nfs_directory/_volume_name" on
|
|
# the storage_host. For example, the remote volume path using these
|
|
# options would be "nfs.example.com:/exports/metrics"
|
|
#openshift_hosted_metrics_storage_kind=nfs
|
|
#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
|
|
#openshift_hosted_metrics_storage_host=nfs.example.com
|
|
#openshift_hosted_metrics_storage_nfs_directory=/exports
|
|
#openshift_hosted_metrics_storage_volume_name=metrics
|
|
#openshift_hosted_metrics_storage_volume_size=10Gi
|
|
#
|
|
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
|
|
# your cloud platform use this.
|
|
#openshift_hosted_metrics_storage_kind=dynamic
|
|
#
|
|
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
|
|
# list of options please see roles/openshift_metrics/README.md
|
|
#
|
|
# Override metricsPublicURL in the master config for cluster metrics
|
|
# Defaults to https://hawkular-metrics.openshift_master_default_subdomain/hawkular/metrics
|
|
# Currently, you may only alter the hostname portion of the url, alterting the
|
|
# `/hawkular/metrics` path will break installation of metrics.
|
|
#openshift_hosted_metrics_public_url=https://hawkular-metrics.{{openshift_cluster_url}}/hawkular/metrics
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
# Logging deployment
|
|
#
|
|
# Currently logging deployment is disabled by default, enable it by setting this
|
|
#openshift_hosted_logging_deploy=true
|
|
#
|
|
# Logging storage config
|
|
# Option A - NFS Host Group
|
|
# An NFS volume will be created with path "nfs_directory/volume_name"
|
|
# on the host within the [nfs] host group. For example, the volume
|
|
# path using these options would be "/exports/logging"
|
|
#openshift_hosted_logging_storage_kind=nfs
|
|
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
|
|
#openshift_hosted_logging_storage_nfs_directory=/exports
|
|
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
|
|
#openshift_hosted_logging_storage_volume_name=logging
|
|
#openshift_hosted_logging_storage_volume_size=10Gi
|
|
#
|
|
# Option B - External NFS Host
|
|
# NFS volume must already exist with path "nfs_directory/_volume_name" on
|
|
# the storage_host. For example, the remote volume path using these
|
|
# options would be "nfs.example.com:/exports/logging"
|
|
#openshift_hosted_logging_storage_kind=nfs
|
|
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
|
|
#openshift_hosted_logging_storage_host=nfs.example.com
|
|
#openshift_hosted_logging_storage_nfs_directory=/exports
|
|
#openshift_hosted_logging_storage_volume_name=logging
|
|
#openshift_hosted_logging_storage_volume_size=10Gi
|
|
#
|
|
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
|
|
# your cloud platform use this.
|
|
#openshift_hosted_logging_storage_kind=dynamic
|
|
#
|
|
# Option D - none -- Logging will use emptydir volumes which are destroyed when
|
|
# pods are deleted
|
|
#
|
|
# Other Logging Options -- Common items you may wish to reconfigure, for the complete
|
|
# list of options please see roles/openshift_logging/README.md
|
|
#
|
|
# Configure loggingPublicURL in the master config for aggregate logging, defaults
|
|
# to kibana.openshift_master_default_subdomain
|
|
#openshift_hosted_logging_hostname=logging.apps.example.com
|
|
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
|
|
# this value must be 1
|
|
#openshift_hosted_logging_elasticsearch_cluster_size=1
|
|
# Configure the prefix and version for the component images
|
|
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
|
|
#openshift_hosted_logging_deployer_version=3.5.0
|
|
|
|
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
|
|
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
|
|
|
|
# Disable the OpenShift SDN plugin
|
|
# openshift_use_openshift_sdn=False
|
|
|
|
# Configure SDN cluster network and kubernetes service CIDR blocks. These
|
|
# network blocks should be private and should not conflict with network blocks
|
|
# in your infrastructure that pods may require access to. Can not be changed
|
|
# after deployment.
|
|
#
|
|
# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
|
|
# 172.17.0.0/16. Your installation will fail and/or your configuration change will
|
|
# cause the Pod SDN or Cluster SDN to fail.
|
|
#
|
|
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
|
|
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
|
|
# environment variable located in /etc/sysconfig/docker-network.
|
|
#osm_cluster_network_cidr=10.128.0.0/14
|
|
#openshift_portal_net=172.30.0.0/16
|
|
|
|
# ExternalIPNetworkCIDRs controls what values are acceptable for the
|
|
# service external IP field. If empty, no externalIP may be set. It
|
|
# may contain a list of CIDRs which are checked for access. If a CIDR
|
|
# is prefixed with !, IPs in that CIDR will be rejected. Rejections
|
|
# will be applied first, then the IP checked against one of the
|
|
# allowed CIDRs. You should ensure this range does not overlap with
|
|
# your nodes, pods, or service CIDRs for security reasons.
|
|
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
|
|
|
|
# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
|
|
# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
|
|
# be assigned. It may contain a single CIDR that will be allocated from. For
|
|
# security reasons, you should ensure that this range does not overlap with
|
|
# the CIDRs reserved for external IPs, nodes, pods, or services.
|
|
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
|
|
|
|
# Configure number of bits to allocate to each host's subnet e.g. 9
|
|
# would mean a /23 network on the host.
|
|
#osm_host_subnet_length=9
|
|
|
|
# Configure master API and console ports.
|
|
# These will default to 8443
|
|
{% if openshift_api_port is defined and openshift_console_port is defined %}
|
|
{% if openshift_api_port and openshift_console_port %}
|
|
openshift_master_api_port={{openshift_api_port}}
|
|
openshift_master_console_port={{openshift_console_port}}
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
|
|
# set RPM version for debugging purposes
|
|
#openshift_pkg_version=-3.1.0.0
|
|
|
|
# Configure custom ca certificate
|
|
#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
|
|
#
|
|
# NOTE: CA certificate will not be replaced with existing clusters.
|
|
# This option may only be specified when creating a new cluster or
|
|
# when redeploying cluster certificates with the redeploy-certificates
|
|
# playbook.
|
|
|
|
# Configure custom named certificates (SNI certificates)
|
|
#
|
|
# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
|
|
#
|
|
# NOTE: openshift_master_named_certificates is cached on masters and is an
|
|
# additive fact, meaning that each run with a different set of certificates
|
|
# will add the newly provided certificates to the cached set of certificates.
|
|
#
|
|
# An optional CA may be specified for each named certificate. CAs will
|
|
# be added to the OpenShift CA bundle which allows for the named
|
|
# certificate to be served for internal cluster communication.
|
|
#
|
|
# If you would like openshift_master_named_certificates to be overwritten with
|
|
# the provided value, specify openshift_master_overwrite_named_certificates.
|
|
#openshift_master_overwrite_named_certificates=true
|
|
#
|
|
# Provide local certificate paths which will be deployed to masters
|
|
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
|
|
#
|
|
# Detected names may be overridden by specifying the "names" key
|
|
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
|
|
|
|
# Session options
|
|
#openshift_master_session_name=ssn
|
|
#openshift_master_session_max_seconds=3600
|
|
|
|
# An authentication and encryption secret will be generated if secrets
|
|
# are not provided. If provided, openshift_master_session_auth_secrets
|
|
# and openshift_master_encryption_secrets must be equal length.
|
|
#
|
|
# Signing secrets, used to authenticate sessions using
|
|
# HMAC. Recommended to use secrets with 32 or 64 bytes.
|
|
#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
|
|
#
|
|
# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
|
|
# characters long, to select AES-128, AES-192, or AES-256.
|
|
#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
|
|
|
|
# configure how often node iptables rules are refreshed
|
|
#openshift_node_iptables_sync_period=5s
|
|
|
|
# Configure nodeIP in the node config
|
|
# This is needed in cases where node traffic is desired to go over an
|
|
# interface other than the default network interface.
|
|
#openshift_set_node_ip=True
|
|
|
|
# Force setting of system hostname when configuring OpenShift
|
|
# This works around issues related to installations that do not have valid dns
|
|
# entries for the interfaces attached to the host.
|
|
#openshift_set_hostname=True
|
|
|
|
# Configure dnsIP in the node config
|
|
#openshift_dns_ip=172.30.0.1
|
|
|
|
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
|
|
#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
|
|
|
|
# Configure logrotate scripts
|
|
# See: https://github.com/nickhammond/ansible-logrotate
|
|
#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
|
|
|
|
# openshift-ansible will wait indefinitely for your input when it detects that the
|
|
# value of openshift_hostname resolves to an IP address not bound to any local
|
|
# interfaces. This mis-configuration is problematic for any pod leveraging host
|
|
# networking and liveness or readiness probes.
|
|
# Setting this variable to true will override that check.
|
|
#openshift_override_hostname_check=true
|
|
|
|
# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
|
|
# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
|
|
# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
|
|
# be used with 1.0 and 3.0.
|
|
#openshift_use_dnsmasq=False
|
|
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
|
|
# This is useful for POC environments where DNS may not actually be available yet or to set
|
|
# options like 'strict-order' to alter dnsmasq configuration.
|
|
#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
|
|
|
|
# Global Proxy Configuration
|
|
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
|
|
# variables for docker and master services.
|
|
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
|
|
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
|
|
#openshift_no_proxy='.hosts.example.com,some-host.com'
|
|
#
|
|
# Most environments don't require a proxy between openshift masters, nodes, and
|
|
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
|
|
# If all of your hosts share a common domain you may wish to disable this and
|
|
# specify that domain above.
|
|
#openshift_generate_no_proxy_hosts=True
|
|
#
|
|
# These options configure the BuildDefaults admission controller which injects
|
|
# configuration into Builds. Proxy related values will default to the global proxy
|
|
# config values. You only need to set these if they differ from the global proxy settings.
|
|
# See BuildDefaults documentation at
|
|
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
|
|
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
|
|
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
|
|
#openshift_builddefaults_no_proxy=mycorp.com
|
|
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
|
|
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
|
|
#openshift_builddefaults_git_no_proxy=mycorp.com
|
|
#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
|
|
#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
|
|
#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
|
|
#openshift_builddefaults_resources_requests_cpu=100m
|
|
#openshift_builddefaults_resources_requests_memory=256m
|
|
#openshift_builddefaults_resources_limits_cpu=1000m
|
|
#openshift_builddefaults_resources_limits_memory=512m
|
|
|
|
# Or you may optionally define your own build defaults configuration serialized as json
|
|
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
|
|
|
|
# These options configure the BuildOverrides admission controller which injects
|
|
# configuration into Builds.
|
|
# See BuildOverrides documentation at
|
|
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
|
|
#openshift_buildoverrides_force_pull=true
|
|
#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
|
|
#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
|
|
#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
|
|
|
|
# Or you may optionally define your own build overrides configuration serialized as json
|
|
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
|
|
|
|
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
|
|
#openshift_master_dynamic_provisioning_enabled=False
|
|
|
|
# Admission plugin config
|
|
#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
|
|
|
|
# Configure usage of openshift_clock role.
|
|
#openshift_clock_enabled=true
|
|
|
|
# OpenShift Per-Service Environment Variables
|
|
# Environment variables are added to /etc/sysconfig files for
|
|
# each OpenShift service: node, master (api and controllers).
|
|
# API and controllers environment variables are merged in single
|
|
# master environments.
|
|
{% if no_http2 is defined %}
|
|
{% if no_http2 %}
|
|
openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
|
|
openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
|
|
openshift_node_env_vars={"ENABLE_HTTP2": "true"}
|
|
{% endif %}
|
|
{% endif %}
|
|
|
|
# Enable API service auditing, available as of 3.2
|
|
#openshift_master_audit_config={"enabled": true}
|
|
|
|
# Validity of the auto-generated OpenShift certificates in days.
|
|
# See also openshift_hosted_registry_cert_expire_days above.
|
|
#
|
|
#openshift_ca_cert_expire_days=1825
|
|
#openshift_node_cert_expire_days=730
|
|
#openshift_master_cert_expire_days=730
|
|
|
|
# Validity of the auto-generated external etcd certificates in days.
|
|
# Controls validity for etcd CA, peer, server and client certificates.
|
|
#
|
|
#etcd_ca_default_days=1825
|
|
|
|
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
|
|
# However, in order to ensure that your masters are not burdened with running pods you should
|
|
# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
|