Merge branch 'master' of /git/ansible

Conflicts:
	inventory/group_vars/packages-stg
This commit is contained in:
Nick Bebout 2017-06-14 16:43:41 +00:00
commit e40ac701b8
245 changed files with 5089 additions and 1792 deletions

View file

@ -1,89 +0,0 @@
# -*- coding: utf-8 -*-
from retask.task import Task
from retask.queue import Queue
import autocloud
from autocloud.models import init_model, ComposeJobDetails
from autocloud.producer import publish_to_fedmsg
import datetime
import logging
log = logging.getLogger("fedmsg")
def produce_jobs(infox):
""" Queue the jobs into jobqueue
:args infox: list of dictionaries contains the image url and the buildid
"""
jobqueue = Queue('jobqueue')
jobqueue.connect()
family_mapping = {
'Cloud_Base': 'b',
'Atomic': 'a'
}
session = init_model()
timestamp = datetime.datetime.now()
for info in infox:
image_name = info['path'].split('/')[-1].split(info['arch'])[0]
jd = ComposeJobDetails(
arch=info['arch'],
compose_id=info['compose']['id'],
created_on=timestamp,
family=family_mapping[info['subvariant']],
image_url=info['absolute_path'],
last_updated=timestamp,
release=info['compose']['release'],
status='q',
subvariant=info['subvariant'],
user='admin',
image_format=info['format'],
image_type=info['type'],
image_name=image_name,
)
session.add(jd)
session.commit()
job_details_id = jd.id
log.info('Save {jd_id} to database'.format(jd_id=job_details_id))
info.update({'job_id': jd.id})
task = Task(info)
jobqueue.enqueue(task)
log.info('Enqueue {jd_id} to redis'.format(jd_id=job_details_id))
publish_to_fedmsg(topic='image.queued',
compose_url=info['absolute_path'],
compose_id=info['compose']['id'],
image_name=image_name,
status='queued',
job_id=info['job_id'],
release=info['compose']['release'],
family=jd.family.value,
type=info['type'])
session.close()
def is_valid_image(image_url):
if autocloud.VIRTUALBOX:
supported_image_ext = ('.vagrant-virtualbox.box',)
else:
supported_image_ext = ('.qcow2', '.vagrant-libvirt.box')
if image_url.endswith(supported_image_ext):
return True
return False
def get_image_name(image_name):
if 'vagrant' in image_name.lower():
if autocloud.VIRTUALBOX:
image_name = '{image_name}-Virtualbox'.format(
image_name=image_name)
else:
image_name = '{image_name}-Libvirt'.format(image_name=image_name)
return image_name

View file

@ -9,7 +9,7 @@ from sqlalchemy import exc
import autocloud
from autocloud.models import init_model, ComposeDetails
from autocloud.models import init_model, ComposeDetails, ComposeJobDetails
from autocloud.producer import publish_to_fedmsg
from autocloud.utils import is_valid_image, produce_jobs
@ -37,6 +37,8 @@ class AutoCloudConsumer(fedmsg.consumers.FedmsgConsumer):
config_key = 'autocloud.consumer.enabled'
def __init__(self, *args, **kwargs):
self.supported_archs = [arch for arch, _ in ComposeJobDetails.ARCH_TYPES]
log.info("Autocloud Consumer is ready for action.")
super(AutoCloudConsumer, self).__init__(*args, **kwargs)
@ -79,9 +81,7 @@ class AutoCloudConsumer(fedmsg.consumers.FedmsgConsumer):
compose_image = compose_images[variant]
for arch, payload in compose_image.iteritems():
# aarch64 is not supported so filter if the arch is
# 'aarch64'
if arch == 'aarch64':
if arch not in self.supported_archs:
continue
for item in payload:

View file

@ -1,122 +0,0 @@
# -*- coding: utf-8 -*-
import datetime
from sqlalchemy import Column, Integer, String, DateTime, Text
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm import scoped_session
from sqlalchemy_utils import ChoiceType
import autocloud
Base = declarative_base()
class JobDetails(Base):
__tablename__ = 'job_details'
STATUS_TYPES = (
('s', 'Success'),
('f', 'Failed'),
('a', 'Aborted'),
('r', 'Running'),
('q', 'Queued')
)
IMAGE_FAMILY_TYPES = (
('b', 'Base'),
('a', 'Atomic')
)
ARCH_TYPES = (
('i386', 'i386'),
('x86_64', 'x86_64')
)
id = Column(Integer, primary_key=True)
taskid = Column(String(255), nullable=False)
status = Column(ChoiceType(STATUS_TYPES))
family = Column(ChoiceType(IMAGE_FAMILY_TYPES))
arch = Column(ChoiceType(ARCH_TYPES))
release = Column(String(255))
output = Column(Text, nullable=False, default='')
created_on = Column(DateTime, default=datetime.datetime.utcnow)
last_updated = Column(DateTime, default=datetime.datetime.utcnow)
user = Column(String(255), nullable=False)
class ComposeDetails(Base):
__tablename__ = 'compose_details'
STATUS_TYPES = (
('c', 'Complete'),
('q', 'Queued'),
('r', 'Running'),
)
id = Column(Integer, primary_key=True)
date = Column(DateTime, nullable=False)
compose_id = Column(String(255), nullable=False, unique=True)
respin = Column(Integer, nullable=False)
type = Column(String(255), nullable=False)
passed = Column(Integer, nullable=True, default=0)
failed = Column(Integer, nullable=True, default=0)
status = Column(ChoiceType(STATUS_TYPES))
created_on = Column(DateTime, default=datetime.datetime.utcnow)
last_updated = Column(DateTime, default=datetime.datetime.utcnow)
location = Column(String(255), nullable=False)
class ComposeJobDetails(Base):
__tablename__ = 'compose_job_details'
STATUS_TYPES = (
('s', 'Success'),
('f', 'Failed'),
('a', 'Aborted'),
('r', 'Running'),
('q', 'Queued')
)
IMAGE_FAMILY_TYPES = (
('b', u'Base'),
('a', u'Atomic')
)
ARCH_TYPES = (
('i386', 'i386'),
('x86_64', 'x86_64'),
('aarch64', 'aarch64')
)
id = Column(Integer, primary_key=True)
arch = Column(ChoiceType(ARCH_TYPES))
compose_id = Column(String(255), nullable=False)
created_on = Column(DateTime, default=datetime.datetime.utcnow)
family = Column(ChoiceType(IMAGE_FAMILY_TYPES))
image_url = Column(String(255), nullable=False)
last_updated = Column(DateTime, default=datetime.datetime.utcnow)
output = Column(Text, nullable=False, default='')
release = Column(String(255))
status = Column(ChoiceType(STATUS_TYPES))
subvariant = Column(String(255), nullable=False)
user = Column(String(255), nullable=False)
image_format = Column(String(255), nullable=False)
image_type = Column(String(255), nullable=False)
image_name = Column(String(255), nullable=False)
def create_tables():
# Create an engine that stores data in the local directory
engine = create_engine(autocloud.SQLALCHEMY_URI)
# Create all tables in the engine. This is equivalent to "Create Table"
# statements in raw SQL.
Base.metadata.create_all(engine)
def init_model():
engine = create_engine(autocloud.SQLALCHEMY_URI)
scopedsession = scoped_session(sessionmaker(bind=engine))
return scopedsession

View file

@ -178,6 +178,10 @@ RewriteRule ^/fedora-badges/report https://pagure.io/Fedora-Badges/issues [R=301
RewriteRule ^/fedora-badges/ticket/(.*) https://pagure.io/Fedora-Badges/issue/$1 [R=301]
RewriteRule ^/fedora-badges https://pagure.io/Fedora-Badges [R=301]
RewriteRule ^/bind-dyndb-ldap/wiki https://docs.pagure.org/bind-dyndb-ldap/ [R=301]
RewriteRule ^/bind-dyndb-ldap/wiki/ https://docs.pagure.org/bind-dyndb-ldap/ [R=301]
RewriteRule ^/bind-dyndb-ldap/wiki/(.*) https://docs.pagure.org/bind-dyndb-ldap/$1.html [R=301]
RewriteRule ^/bind-dyndb-ldap/wiki/(.*)/ https://docs.pagure.org/bind-dyndb-ldap/$1.html [R=301]
RewriteRule ^/bind-dyndb-ldap/report https://pagure.io/bind-dyndb-ldap/issues [R=301]
RewriteRule ^/bind-dyndb-ldap/ticket/(.*) https://pagure.io/bind-dyndb-ldap/issue/$1 [R=301]
RewriteRule ^/bind-dyndb-ldap/changeset/(.*) https://pagure.io/bind-dyndb-ldap/c/$1 [R=301]

View file

@ -0,0 +1,17 @@
[Unit]
Description=loopabull worker #%i
After=network.target
Documentation=https://github.com/maxamillion/loopabull
[Service]
ExecStart=/usr/bin/loopabull $CONFIG_FILE
User=root
Group=root
Restart=on-failure
Type=simple
EnvironmentFile=-/etc/sysconfig/loopabull
Restart=on-failure
PrivateTmp=yes
[Install]
WantedBy=multi-user.target

View file

@ -0,0 +1 @@
config = { "rabbitmq.serializer.enabled": True }

View file

@ -0,0 +1,9 @@
[rhel7-openshift-3.4]
name = rhel7 openshift 3.4 $basearch
baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.4-rpms/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release
[rhel7-openshift-3.5]
name = rhel7 openshift 3.5 $basearch
baseurl=http://infrastructure.fedoraproject.org/repo/rhel/rhel7/$basearch/rhel-7-openshift-3.5-rpms/
gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-redhat-release

View file

@ -56,7 +56,7 @@
- name: restart openvpn (Fedora)
when: ansible_distribution == "Fedora"
action: service name=openvpn@openvpn state=restarted
action: service name=openvpn-client@openvpn state=restarted
#notify:
#- fix openvpn routing
@ -68,7 +68,7 @@
- name: restart openvpn (RHEL7)
when: ansible_distribution == "RedHat" and ansible_distribution_major_version|int == 7
action: service name=openvpn@openvpn state=restarted
action: service name=openvpn-client@openvpn state=restarted
#notify:
#- fix openvpn routing

View file

@ -17,11 +17,12 @@ db-koji01.phx2.fedoraproject.org
#copr-be.cloud.fedoraproject.org
copr-fe.cloud.fedoraproject.org
copr-keygen.cloud.fedoraproject.org
copr-dist-git.fedorainfracloud.org
#copr-dist-git.fedorainfracloud.org
value01.phx2.fedoraproject.org
taiga.fedorainfracloud.org
taskotron01.qa.fedoraproject.org
nuancier01.phx2.fedoraproject.org
piwik.fedorainfracloud.org
#magazine.fedorainfracloud.org
magazine2.fedorainfracloud.org
communityblog.fedorainfracloud.org
upstreamfirst.fedorainfracloud.org

View file

@ -1,4 +1,3 @@
[buildvm]
buildvm-01.phx2.fedoraproject.org
buildvm-02.phx2.fedoraproject.org
@ -169,24 +168,6 @@ buildhw-aarch64-01.arm.fedoraproject.org
buildhw-aarch64-02.arm.fedoraproject.org
buildhw-aarch64-03.arm.fedoraproject.org
[dell-fx-build]
# dell-fx01-01.phx2.fedoraproject.org
# dell-fx01-02.phx2.fedoraproject.org
# dell-fx01-03.phx2.fedoraproject.org
# dell-fx01-04.phx2.fedoraproject.org
# dell-fx01-05.phx2.fedoraproject.org
# dell-fx01-06.phx2.fedoraproject.org
# dell-fx01-07.phx2.fedoraproject.org
# dell-fx01-08.phx2.fedoraproject.org
# dell-fx02-01.phx2.fedoraproject.org
# dell-fx02-02.phx2.fedoraproject.org
# dell-fx02-03.phx2.fedoraproject.org
# dell-fx02-04.phx2.fedoraproject.org
# dell-fx02-05.phx2.fedoraproject.org
# dell-fx02-06.phx2.fedoraproject.org
# dell-fx02-07.phx2.fedoraproject.org
# dell-fx02-08.phx2.fedoraproject.org
#
# These are primary koji builders.
#
@ -259,9 +240,6 @@ arm01
arm02
arm04
#
# These are secondary arch builders.
#
[arm01]
# 01 is in use as retrace instance
arm01-builder00.arm.fedoraproject.org
@ -286,6 +264,7 @@ arm01-builder18.arm.fedoraproject.org
arm01-builder19.arm.fedoraproject.org
arm01-builder20.arm.fedoraproject.org
arm01-builder21.arm.fedoraproject.org
# These two are using in staging
#arm01-builder22.arm.fedoraproject.org
#arm01-builder23.arm.fedoraproject.org

View file

@ -81,3 +81,4 @@ twisted-fedora25-2.fedorainfracloud.org
twisted-rhel7-1.fedorainfracloud.org
twisted-rhel7-2.fedorainfracloud.org
waiverdb-dev.fedorainfracloud.org
upstreamfirst.fedorainfracloud.org

View file

@ -138,6 +138,12 @@ wildcard_crt_file: wildcard-2017.fedoraproject.org.cert
wildcard_key_file: wildcard-2017.fedoraproject.org.key
wildcard_int_file: wildcard-2017.fedoraproject.org.intermediate.cert
# This is the openshift wildcard cert. Until it exists set it equal to wildcard
os_wildcard_cert_name: wildcard-2017.fedoraproject.org
os_wildcard_crt_file: wildcard-2017.fedoraproject.org.cert
os_wildcard_key_file: wildcard-2017.fedoraproject.org.key
os_wildcard_int_file: wildcard-2017.fedoraproject.org.intermediate.cert
# Everywhere, always, we should sign messages and validate signatures.
# However, we allow individual hosts and groups to override this. Use this very
# carefully.. and never in production (good for testing stuff in staging).
@ -266,4 +272,6 @@ nagios_Check_Services:
dhcpd: false
httpd: false
# Set variable if we want to use our global iptables defaults
# Some things need to set their own.
baseiptables: True

View file

@ -1,26 +1,2 @@
---
host_group: kojibuilder
fas_client_groups: sysadmin-releng,sysadmin-secondary
sudoers: "{{ private }}/files/sudo/buildsecondary-sudoers"
gw: 10.5.78.254
kojipkgs_url: armpkgs.fedoraproject.org
kojihub_url: arm.koji.fedoraproject.org/kojihub
kojihub_scheme: https
koji_hub_nfs: "fedora_arm/data"
koji_server_url: "https://arm.koji.fedoraproject.org/kojihub"
koji_weburl: "https://arm.koji.fedoraproject.org/koji"
koji_topurl: "https://armpkgs.fedoraproject.org/"
# These variables are pushed into /etc/system_identification by the base role.
# Groups and individual hosts should ovveride them with specific info.
# See http://infrastructure.fedoraproject.org/csi/security-policy/
csi_security_category: High
csi_primary_contact: Fedora Admins - admin@fedoraproject.org
csi_purpose: Koji service employs a set of machines to build packages for the Fedora project. This group builds packages for aarch64 architecture.
csi_relationship: |
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"

View file

@ -17,7 +17,7 @@ virt_install_command: "{{ virt_install_command_one_nic }} --graphics none"
# the host_vars/$hostname file
host_group: kojibuilder
fas_client_groups: sysadmin-releng,sysadmin-secondary
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"
koji_hub_nfs: "fedora_ppc/data"
koji_server_url: "https://ppc.koji.fedoraproject.org/kojihub"

View file

@ -17,7 +17,7 @@ virt_install_command: "{{ virt_install_command_one_nic }} --graphics none"
# the host_vars/$hostname file
host_group: kojibuilder
fas_client_groups: sysadmin-releng,sysadmin-secondary
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"
koji_hub_nfs: "fedora_ppc/data"
koji_server_url: "https://ppc.koji.fedoraproject.org/kojihub"

100
inventory/group_vars/ci Normal file
View file

@ -0,0 +1,100 @@
---
############################################################
# general information
############################################################
# common items for the releng-* boxes
lvm_size: 50000
mem_size: 4096
num_cpus: 4
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [ 80, 443, "{{ resultsdb_db_port }}" ]
fas_client_groups: sysadmin-qa,sysadmin-noc,sysadmin-web
nrpe_procs_warn: 250
nrpe_procs_crit: 300
external_hostname: resultsdb.ci.centos.org
deployment_type: prod
freezes: false
#
# PostgreSQL configuration
#
shared_buffers: "32MB"
effective_cache_size: "512MB"
############################################################
# resultsdb details
############################################################
# the db_host_machine bits are so that delegation continues to work, even if
# that db is localhost relative to resultsdb
resultsdb_db_host_machine: ci-cc-rdu01.fedoraproject.org
resultsdb_db_host: "{{ resultsdb_db_host_machine }}"
resultsdb_db_port: 5432
resultsdb_endpoint: 'resultsdb_api'
resultsdb_db_name: resultsdb
resultsdb_db_user: "{{ ci_resultsdb_db_user }}"
resultsdb_db_password: "{{ ci_resultsdb_db_password }}"
resultsdb_secret_key: "{{ ci_resultsdb_secret_key }}"
allowed_hosts:
- 10.5.124
- 10.5.131
############################################################
# resultsdb-frontend details
############################################################
resultsdb_fe_endpoint: "resultsdb"
resultsdb_frontend_secret_key: "{{ ci_resultsdb_frontend_secret_key }}"
###########################################################
# execdb details
###########################################################
execdb_db_host_machine: ci-cc-rdu01.fedoraproject.org
execdb_db_host: "{{ execdb_db_host_machine }}"
execdb_db_port: 5432
execdb_endpoint: 'execdb'
execdb_db_name: execdb
execdb_db_user: "{{ ci_execdb_db_user }}"
execdb_db_password: "{{ ci_execdb_db_password }}"
execdb_secret_key: "{{ ci_execdb_secret_key }}"
###########################################################
# ccsdb details
###########################################################
ccsdb_db_host_machine: ci-cc-rdu01.fedoraproject.org
ccsdb_db_host: "{{ ccsdb_db_host_machine }}"
ccsdb_db_port: 5432
ccsdb_endpoint: 'ccsdb'
ccsdb_db_name: ccsdb
ccsdb_db_user: "{{ ci_ccsdb_db_user }}"
ccsdb_db_password: "{{ ci_ccsdb_db_password }}"
ccsdb_secret_key: "{{ ci_ccsdb_secret_key }}"
############################################################
# fedmsg details
############################################################
fedmsg_active: False
fedmsg_cert_prefix: ci.resultsdb
fedmsg_certs:
- service: shell
owner: root
group: sysadmin
can_send:
- logger.log
- service: resultsdb
owner: root
group: apache
can_send:
- taskotron.result.new

View file

@ -7,7 +7,6 @@ copr_nova_tenant_name: "copr"
copr_nova_username: "copr"
# copr_builder_image_name: "Fedora-Cloud-Base-20141203-21"
copr_builder_image_name: "builder-f24"
copr_builder_flavor_name: "ms2.builder"
copr_builder_network_name: "copr-net"
copr_builder_key_name: "buildsys"

View file

@ -15,6 +15,8 @@ tcp_ports: [
# TODO, restrict this down to just sysadmin-releng
fas_client_groups: sysadmin-datanommer,sysadmin-releng,sysadmin-fedimg,fi-apprentice,sysadmin-noc,sysadmin-veteran
fedmsg_debug_loopback: True
# These people get told when something goes wrong.
fedmsg_error_recipients:
- sysadmin-fedimg-members@fedoraproject.org

View file

@ -0,0 +1,5 @@
---
freezes: false
mem_size: 4096
num_cpus: 2
tcp_ports: [22, 80, 443]

View file

@ -3,3 +3,4 @@ host_group: openstack-compute
nrpe_procs_warn: 1100
nrpe_procs_crit: 1200
ansible_ifcfg_blacklist: true
baseiptables: False

3
inventory/group_vars/os Normal file
View file

@ -0,0 +1,3 @@
---
host_group: os
baseiptables: False

View file

@ -0,0 +1,4 @@
---
os_url: os.stg.fedoraproject.org
os_app_url: app.os.stg.fedoraproject.org

View file

@ -0,0 +1,4 @@
---
os_url: os.stg.fedoraproject.org
os_app_url: app.os.stg.fedoraproject.org

View file

@ -0,0 +1,4 @@
---
os_url: os.stg.fedoraproject.org
os_app_url: app.os.stg.fedoraproject.org

View file

@ -0,0 +1,3 @@
---
host_group: os
baseiptables: False

View file

@ -19,3 +19,5 @@ osbs_koji_username: "kojibuilder"
koji_url: "koji.fedoraproject.org"
osbs_client_conf_path: /etc/osbs.conf
baseiptables: False

View file

@ -0,0 +1,2 @@
---
baseiptables: False

View file

@ -16,7 +16,7 @@ tcp_ports: [ 80, 443,
# Neeed for rsync from log01 for logs.
custom_rules: [ '-A INPUT -p tcp -m tcp -s 10.5.126.13 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT' ]
fas_client_groups: sysadmin-noc,sysadmin-web,sysadmin-veteran
fas_client_groups: sysadmin-noc,sysadmin-web,fi-apprentice,sysadmin-veteran
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:

View file

@ -36,10 +36,10 @@ buildmaster_template: ci.master.cfg.j2
buildmaster_endpoint: buildmaster
buildslave_ssh_pubkey: ''
buildslave_port: 9989
buildmaster_dir: /home/buildmaster/master
buildslave_dir: /home/buildslave/slave
buildmaster_dir: /srv/buildmaster/master
buildslave_dir: /srv/buildslave/slave
buildslave_poll_interval: 1800
buildmaster_home: /home/buildmaster
buildmaster_home: /srv/buildmaster
buildmaster_user: buildmaster
# build details

View file

@ -10,6 +10,11 @@ wildcard_cert_file: wildcard-2017.stg.fedoraproject.org.cert
wildcard_key_file: wildcard-2017.stg.fedoraproject.org.key
wildcard_int_file: wildcard-2017.stg.fedoraproject.org.intermediate.cert
# This is the openshift wildcard cert for stg
os_wildcard_cert_name: wildcard-2017.app.os.stg.fedoraproject.org
os_wildcard_cert_file: wildcard-2017.app.os.stg.fedoraproject.org.cert
os_wildcard_key_file: wildcard-2017.app.os.stg.fedoraproject.org.key
os_wildcard_int_file: wildcard-2017.stg.fedoraproject.org.intermediate.cert
# This only does anything if the host is not RHEL6
collectd_graphite: True

View file

@ -29,7 +29,7 @@ grokmirror_repos:
- { name: fedoraqa/abicheck, url: 'https://pagure.io/task-abicheck.git'}
- { name: fedoraqa/rpmgrill, url: 'https://bitbucket.org/fedoraqa/task-rpmgrill.git'}
- { name: fedoraqa/simpledocker, url: 'https://bitbucket.org/fedoraqa/task-simpledocker.git'}
- { name: fedoraqa/python-versions, url: 'https://github.com/fedora-python/task-python-versions'}
- { name: fedoraqa/python-versions, url: 'https://github.com/fedora-python/taskotron-python-versions'}
- { name: fedoraqa/check_modulemd, url: 'https://github.com/fedora-modularity/check_modulemd'}
- { name: fedoraqa/rpmdeplint, url: 'https://pagure.io/taskotron/task-rpmdeplint.git'}
- { name: fedoraqa/rpmlint-scratch, url: 'https://bitbucket.org/fedoraqa/task-rpmlint-scratch.git'}

View file

@ -23,7 +23,7 @@ grokmirror_repos:
- { name: fedoraqa/dockerautotest, url: 'https://bitbucket.org/fedoraqa/task-dockerautotest.git'}
- { name: fedoraqa/abicheck, url: 'https://pagure.io/task-abicheck.git'}
- { name: fedoraqa/rpmgrill, url: 'https://bitbucket.org/fedoraqa/task-rpmgrill.git'}
- { name: fedoraqa/python-versions, url: 'https://github.com/fedora-python/task-python-versions'}
- { name: fedoraqa/python-versions, url: 'https://github.com/fedora-python/taskotron-python-versions'}
- { name: fedoraqa/check_modulemd, url: 'https://github.com/fedora-modularity/check_modulemd'}
- { name: fedoraqa/upstream-atomic, url: 'https://pagure.io/taskotron/task-upstream-atomic.git'}
- { name: fedoraqa/fedora-cloud-tests, url: 'https://pagure.io/taskotron/task-fedora-cloud-tests.git'}

View file

@ -29,7 +29,7 @@ grokmirror_repos:
- { name: fedoraqa/dockerautotest, url: 'https://bitbucket.org/fedoraqa/task-dockerautotest.git'}
- { name: fedoraqa/abicheck, url: 'https://pagure.io/task-abicheck.git'}
- { name: fedoraqa/rpmgrill, url: 'https://bitbucket.org/fedoraqa/task-rpmgrill.git'}
- { name: fedoraqa/python-versions, url: 'https://github.com/fedora-python/task-python-versions'}
- { name: fedoraqa/python-versions, url: 'https://github.com/fedora-python/taskotron-python-versions'}
- { name: fedoraqa/check_modulemd, url: 'https://github.com/fedora-modularity/check_modulemd'}
- { name: fedoraqa/rpmdeplint, url: 'https://pagure.io/taskotron/task-rpmdeplint.git'}
- { name: fedoraqa/rpmlint-scratch, url: 'https://bitbucket.org/fedoraqa/task-rpmlint-scratch.git'}

View file

@ -4,7 +4,10 @@ mem_size: 1024
num_cpus: 2
tcp_ports: [ 80, 443 ]
custom_rules: [ '-A INPUT -p tcp -m tcp -s 209.132.184.0/24 --dport 53 -j ACCEPT', '-A INPUT -p udp -m udp -s 209.132.184.0/24 --dport 53 -j ACCEPT' ]
custom_rules: [ '-A INPUT -p tcp -m tcp -s 209.132.184.0/24 --dport 53 -j ACCEPT',
'-A INPUT -p udp -m udp -s 209.132.184.0/24 --dport 53 -j ACCEPT',
'-A INPUT -p tcp -m tcp -s 209.132.181.0/24 --dport 53 -j ACCEPT',
'-A INPUT -p udp -m udp -s 209.132.181.0/24 --dport 53 -j ACCEPT' ]
fas_client_groups: sysadmin-dns
freezes: false

View file

@ -16,3 +16,4 @@ ks_url: http://10.5.126.23/repo/rhel/ks/buildvm-fedora-25
ks_repo: http://10.5.126.23/pub/fedora/linux/releases/25/Everything/x86_64/os/
virt_install_command: "{{ virt_install_command_one_nic }}"
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"

View file

@ -0,0 +1,16 @@
---
nrpe_procs_warn: 900
nrpe_procs_crit: 1000
datacenter: rdu-cc
eth0_ip: 8.43.85.69
eth0_nm: 255.255.255.0
gw: 8.43.85.254
nm: 255.255.255.0
dns: 8.8.8.8
postfix_group: vpn
vpn: true
volgroup: /dev/vg_guests
vmhost: virthost-cc-rdu01.fedoraproject.org
deployment_type: prod
ks_url: http://209.132.181.6/repo/rhel/ks/kvm-fedora-25-ext
ks_repo: http://209.132.181.6/pub/fedora/linux/releases/25/Server/x86_64/os/

View file

@ -18,3 +18,5 @@ kojihub_scheme: https
koji_server_url: "https://ppc.koji.fedoraproject.org/kojihub"
koji_weburl: "https://ppc.koji.fedoraproject.org/koji"
koji_topurl: "https://ppcpkgs.fedoraproject.org/"
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"

View file

@ -18,3 +18,5 @@ kojihub_scheme: https
koji_server_url: "https://arm.koji.fedoraproject.org/kojihub"
koji_weburl: "https://arm.koji.fedoraproject.org/koji"
koji_topurl: "https://armpkgs.fedoraproject.org/"
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"

View file

@ -20,3 +20,5 @@ kojihub_scheme: https
koji_server_url: "https://s390.koji.fedoraproject.org/kojihub"
koji_weburl: "https://s390.koji.fedoraproject.org/koji"
koji_topurl: "https://s390pkgs.fedoraproject.org/"
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"

View file

@ -22,7 +22,7 @@ dbs_to_backup:
- koji
fas_client_groups: sysadmin-dba,sysadmin-noc,sysadmin-releng,sysadmin-veteran
sudoers: "{{ private }}/files/sudo/00releng-sudoers"
sudoers: "{{ private }}/files/sudo/sysadmin-secondary-sudoers"
# These are normally group variables, but in this case db servers are often different
lvm_size: 500000

View file

@ -2,5 +2,3 @@
gw: 10.5.126.254
eth0_ip: 10.5.126.96
eth1_ip: 10.5.127.104
# This is a tier1 only host
rsyncd_conf: "rsyncd.conf.download-{{ datacenter }}-tier1"

View file

@ -4,3 +4,9 @@ nrpe_procs_warn: 900
nrpe_procs_crit: 1000
host_group: openstack-compute
ansible_ifcfg_blacklist: true
nagios_Check_Services:
nrpe: true
sshd: true
baseiptables: False

View file

@ -6,9 +6,7 @@ dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-25
ks_repo: http://10.5.126.23/pub/fedora/linux/releases/25/Server/x86_64/os/
mem_size: 4096
volgroup: /dev/vg_virthost03
eth0_ip: 10.5.126.230
vmhost: virthost03.phx2.fedoraproject.org
datacenter: phx2
tcp_ports: [22, 80, 443]

View file

@ -6,9 +6,7 @@ dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-fedora-25
ks_repo: http://10.5.126.23/pub/fedora/linux/releases/25/Server/x86_64/os/
mem_size: 4096
volgroup: /dev/vg_virthost01
eth0_ip: 10.5.126.238
vmhost: virthost01.phx2.fedoraproject.org
datacenter: phx2
tcp_ports: [22, 80, 443]

View file

@ -8,5 +8,5 @@ ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
volgroup: /dev/vg_guests
eth0_ip: 10.5.128.100
vmhost: virthost04.phx2.fedoraproject.org
vmhost: virthost05.phx2.fedoraproject.org
datacenter: phx2

View file

@ -2,11 +2,11 @@
nm: 255.255.255.0
gw: 10.5.128.254
dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-atomic-rhel-7
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7
ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
volgroup: /dev/vg_guests
eth0_ip: 10.5.128.101
vmhost: virthost11.phx2.fedoraproject.org
vmhost: virthost05.phx2.fedoraproject.org
datacenter: phx2
host_group: os-stg

View file

@ -2,11 +2,11 @@
nm: 255.255.255.0
gw: 10.5.128.254
dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-atomic-host-rhel-7
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7
ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
volgroup: /dev/vg_guests
eth0_ip: 10.5.128.102
vmhost: virthost04.phx2.fedoraproject.org
vmhost: virthost05.phx2.fedoraproject.org
datacenter: phx2
host_group: os-stg

View file

@ -2,11 +2,11 @@
nm: 255.255.255.0
gw: 10.5.128.254
dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-atomic-host-rhel-7
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7
ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
volgroup: /dev/vg_guests
eth0_ip: 10.5.128.103
vmhost: virthost04.phx2.fedoraproject.org
vmhost: virthost05.phx2.fedoraproject.org
datacenter: phx2
host_group: os-stg

View file

@ -2,11 +2,11 @@
nm: 255.255.255.0
gw: 10.5.128.254
dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-atomic-host-rhel-7
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7
ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
volgroup: /dev/vg_guests
eth0_ip: 10.5.128.104
vmhost: virthost11.phx2.fedoraproject.org
vmhost: virthost05.phx2.fedoraproject.org
datacenter: phx2
host_group: os-nodes-stg

View file

@ -2,11 +2,11 @@
nm: 255.255.255.0
gw: 10.5.128.254
dns: 10.5.126.21
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-atomic-host-7
ks_url: http://10.5.126.23/repo/rhel/ks/kvm-rhel-7
ks_repo: http://10.5.126.23/repo/rhel/RHEL7-x86_64/
volgroup: /dev/vg_guests
eth0_ip: 10.5.128.105
vmhost: virthost04.phx2.fedoraproject.org
vmhost: virthost05.phx2.fedoraproject.org
datacenter: phx2
host_group: os-nodes-stg

View file

@ -0,0 +1,165 @@
---
############################################################
# Persistent Cloud
############################################################
instance_type: m1.medium
image: CentOS-7-x86_64-GenericCloud-1503
keypair: fedora-admin-20130801
zone: nova
inventory_tenant: persistent
inventory_instance_name: upstreamfirst
hostbase: upstreamfirst
public_ip: 209.132.184.153
root_auth_users: tflink roshi
description: upstream-first pagure server
security_group: ssh-anywhere-persistent,web-443-anywhere-persistent,web-80-anywhere-persistent,default,all-icmp-persistent,mail-25-anywhere-persistent,allow-nagios-persistent,fedmsg-relay-persistent,pagure-ports
volumes:
- volume_id: 81c1cb3e-5fb0-4abd-a252-b0102f1378de
device: /dev/vdc
cloud_networks:
# persistent-net
- net-id: "67b77354-39a4-43de-b007-bb813ac5c35f"
############################################################
# General configuration
############################################################
tcp_ports: [ 22, 25, 80, 443, 9418,
# Used for the eventsource server
8088,
# This is for the pagure public fedmsg relay
9940]
external_hostname: 'upstreamfirst.fedorainfracloud.org'
############################################################
# Backup
############################################################
dbs_to_backup:
- postgres
- pagure
host_backup_targets:
- '/backups'
- '/srv/git'
############################################################
# PostgreSQL configuration
############################################################
shared_buffers: "2GB"
effective_cache_size: "6GB"
############################################################
# Pagure Config
############################################################
new_pagure_db_admin_user: "{{ upstreamfirst_pagure_db_admin_user }}"
new_pagure_db_admin_pass: "{{ upstreamfirst_pagure_db_admin_pass }}"
new_pagure_db_user: "{{ upstreamfirst_pagure_db_user }}"
new_pagure_db_pass: "{{ upstreamfirst_pagure_db_pass }}"
# there are two db hosts here to work around the pg_hba that's in postgres_server
# we need to delegate postgres admin commands to a host that is remote from where
# this playbook is run but have to use localhost for the application to run in the
# case where we're using a local postgres instance
new_pagure_db_host: "127.0.0.1"
new_pagure_db_command_host: "{{ inventory_hostname }}"
new_pagure_db_name: "{{ upstreamfirst_pagure_db_name }}"
new_pagure_secret_key: "{{ upstreamfirst_pagure_db_admin_user }}"
new_pagure_secret_salt_email: "{{ upstreamfirst_pagure_secret_salt_email }}"
pagure_admin_email: 'tflink@fedoraproject.org'
pagure_ssh_host_pubkey: 'ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQC/bYFmX8pthJHcM2J85+mmN8pGJ/EJMcsdwoazihcooIBONcUazYF/BVV5/3nK7H3shq2nLR7vmdd2NuFHOPNsaAMK6nlADEg2tsKMC3UHHnwo1/iIO21pvf7+w2KIKCNIhiYA70W1aIxFBMZ7oo0VXjZ19PBwg6huAh0CBrLBP+XU4QN6LgLd87T5qMN/7g/QVqDforeoL8NUSQXMfzYNbxXPdRvMc5vbEMS/QNu5I8Ycu6FDqChnWc5Qd2orVCNreEMKwkgW27+FTpxzAnq3avotb0Cv1WuZjd8q402ldvp+ELcS8WHc+Mx41KaR//QTlSIYeX4OlcX/pl6C+Sdz'
# ssh-keygen -lf /etc/ssh/ssh_host_rsa_key.pub
pagure_ssh_host_fingerprint: '2048 6b:d8:48:27:5a:11:d1:14:e0:c1:91:23:45:c7:fb:6d (RSA)'
# awk '{print $2}' /etc/ssh/ssh_host_rsa_key.pub | base64 -d | sha256sum -b | awk '{print $1}' | xxd -r -p | base64_
pagure_ssh_host_sha256: 'SHA256:ggRdzg+ugyR6WIzeiuyASAdEHf+HG5yZqJJIu/YTtHI='
new_pagure_admin_groups: ['sysadmin-main', 'sysadmin-qa']
pagure_instance_name: "Upstream First Pagure"
pagure_theme_static_dir: "/var/www/upstreamfirst-paguretheme/static"
pagure_theme_template_dir: "/var/www/upstreamfirst-paguretheme/templates"
stunnel_service: "eventsource"
stunnel_source_port: 8088
stunnel_destination_port: 8080
# not doing anything with fedmsg right now
## These are consumed by a task in roles/fedmsg/base/main.yml
#fedmsg_certs:
#- service: shell
# owner: root
# group: sysadmin
# can_send:
# - logger.log
#- service: pagure
# owner: git
# group: apache
# can_send:
# - pagure.issue.assigned.added
# - pagure.issue.assigned.reset
# - pagure.issue.comment.added
# - pagure.issue.dependency.added
# - pagure.issue.dependency.removed
# - pagure.issue.edit
# - pagure.issue.new
# - pagure.issue.tag.added
# - pagure.issue.tag.removed
# - pagure.project.edit
# - pagure.project.forked
# - pagure.project.new
# - pagure.project.tag.edited
# - pagure.project.tag.removed
# - pagure.project.user.added
# - pagure.pull-request.closed
# - pagure.pull-request.comment.added
# - pagure.pull-request.flag.added
# - pagure.pull-request.flag.updated
# - pagure.request.assigned.added
# - pagure.pull-request.new
#
#fedmsg_prefix: io.pagure
#fedmsg_env: stg
fas_client_groups: sysadmin-noc,sysadmin-web,sysadmin-qa
freezes: false
#env: pagure-staging
#postfix_group: vpn.pagure-stg
# Configuration for the git-daemon/server
git_group: git
git_port: 9418
git_server: /usr/libexec/git-core/git-daemon
git_server_args: --export-all --syslog --inetd --verbose
git_basepath: /srv/git/repositories
git_daemon_user: git
# For the MOTD
csi_security_category: Low
csi_primary_contact: Fedora admins - admin@fedoraproject.org
csi_purpose: Stage testcases being submitted upstream to Fedora
csi_relationship: |
There are a few things running here:
- The apache/mod_wsgi app for pagure
- This host relies on:
- A postgres db server running locally
- Things that rely on this host:
- nothing currently

View file

@ -12,6 +12,7 @@ hostbase: waverdb-dev
public_ip: 209.132.184.51
root_auth_users: mjia
description: waverdb development instance
deployment_type: dev
cloud_networks:
# persistent-net

View file

@ -1,9 +1,3 @@
# dummies until the multiple inventory group import issue is fixed in
# ansible
[builders]
[bkernel]
[buildvmhost]
[beaker]
beaker01.qa.fedoraproject.org
@ -266,6 +260,9 @@ autocloud-backend01.stg.phx2.fedoraproject.org
autocloud-backend02.stg.phx2.fedoraproject.org
[autosign]
#
# autosign01 does not listen to ssh by default
#
#autosign01.phx2.fedoraproject.org
[autosign-stg]
@ -345,7 +342,6 @@ download-phx2
download-ibiblio
download-rdu2
[elections]
elections01.phx2.fedoraproject.org
elections02.phx2.fedoraproject.org
@ -372,7 +368,6 @@ hotness01.stg.phx2.fedoraproject.org
[kerneltest]
kerneltest01.phx2.fedoraproject.org
#kerneltest02.phx2.fedoraproject.org
[kerneltest-stg]
kerneltest01.stg.phx2.fedoraproject.org
@ -444,7 +439,6 @@ iddev.fedorainfracloud.org
dhcp01.phx2.fedoraproject.org
[nagios]
#noc01.phx2.fedoraproject.org
noc02.fedoraproject.org
[nagios-new]
@ -919,7 +913,6 @@ zanata2fedmsg01.stg.phx2.fedoraproject.org
#[zanata2fedmsg]
#zanata2fedmsg01.phx2.fedoraproject.org
# This is a convenience group listing the hosts that live on the QA network that
# are allowed to send inbound fedmsg messages to our production fedmsg bus.
# See also:
@ -938,7 +931,6 @@ openqa01.qa.fedoraproject.org
resultsdb-stg01.qa.fedoraproject.org
openqa-stg01.qa.fedoraproject.org
# assorted categories of fedmsg services, for convenience
[fedmsg-hubs:children]
autocloud-backend
@ -1159,6 +1151,8 @@ respins.fedorainfracloud.org
waiverdb-dev.fedorainfracloud.org
# hubs-dev
hubs-dev.fedorainfracloud.org
# upstreamfirst - ticket 6066
upstreamfirst.fedorainfracloud.org
#
# These are in the new cloud
@ -1330,18 +1324,36 @@ osbs-master01.stg.phx2.fedoraproject.org
osbs-node01.stg.phx2.fedoraproject.org
osbs-node02.stg.phx2.fedoraproject.org
[osbs:children]
osbs-control
osbs-nodes
osbs-masters
[osbs-stg:children]
osbs-control-stg
osbs-nodes-stg
osbs-masters-stg
[os-control-stg]
os-control01.stg.phx2.fedoraproject.org
[os-master-stg]
[os-masters-stg]
os-master01.stg.phx2.fedoraproject.org
os-master02.stg.phx2.fedoraproject.org
os-master03.stg.phx2.fedoraproject.org
[os-node-stg]
[os-nodes-stg]
os-node01.stg.phx2.fedoraproject.org
os-node02.stg.phx2.fedoraproject.org
[os-stg:children]
os-nodes-stg
os-masters-stg
os-control-stg
[ci]
ci-cc-rdu01.fedoraproject.org
# Docker (docker-distribution) registries
[docker-registry]
docker-registry01.phx2.fedoraproject.org
@ -1352,6 +1364,14 @@ docker-registry01.stg.phx2.fedoraproject.org
docker-registry02.stg.phx2.fedoraproject.org
docker-candidate-registry01.stg.phx2.fedoraproject.org
## Not the candidate just the top registry
[moby-registry]
docker-registry01.phx2.fedoraproject.org
## Not the candidate just the top registry
[moby-registry-stg]
docker-registry01.phx2.fedoraproject.org
[webservers:children]
proxies
ipsilon

View file

@ -33,6 +33,7 @@
- include: /srv/web/infra/ansible/playbooks/groups/buildvm.yml
- include: /srv/web/infra/ansible/playbooks/groups/bugyou.yml
- include: /srv/web/infra/ansible/playbooks/groups/busgateway.yml
- include: /srv/web/infra/ansible/playbooks/groups/ci.yml
- include: /srv/web/infra/ansible/playbooks/groups/copr-backend.yml
- include: /srv/web/infra/ansible/playbooks/groups/copr-dist-git.yml
- include: /srv/web/infra/ansible/playbooks/groups/copr-frontend.yml
@ -107,10 +108,8 @@
- include: /srv/web/infra/ansible/playbooks/groups/summershum.yml
- include: /srv/web/infra/ansible/playbooks/groups/sundries.yml
- include: /srv/web/infra/ansible/playbooks/groups/tagger.yml
- include: /srv/web/infra/ansible/playbooks/groups/taskotron.yml
- include: /srv/web/infra/ansible/playbooks/groups/taskotron-client-hosts.yml
- include: /srv/web/infra/ansible/playbooks/groups/taskotron-prod.yml
- include: /srv/web/infra/ansible/playbooks/groups/taskotron-dev.yml
- include: /srv/web/infra/ansible/playbooks/groups/taskotron-stg.yml
- include: /srv/web/infra/ansible/playbooks/groups/torrent.yml
- include: /srv/web/infra/ansible/playbooks/groups/twisted-buildbots.yml
- include: /srv/web/infra/ansible/playbooks/groups/unbound.yml
@ -145,10 +144,11 @@
- include: /srv/web/infra/ansible/playbooks/hosts/insim.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/lists-dev.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/kolinahr.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/magazine.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/magazine2.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/modernpaste.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/modularity.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/piwik.fedorainfracloud.org.yml
#- include: /srv/web/infra/ansible/playbooks/hosts/regcfp.fedorainfracloud.org.yml
#- include: /srv/web/infra/ansible/playbooks/hosts/regcfp2.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/respins.fedorainfracloud.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/shogun-ca.cloud.fedoraproject.org.yml
- include: /srv/web/infra/ansible/playbooks/hosts/shumgrepper-dev.fedorainfracloud.org.yml

View file

@ -50,6 +50,7 @@
- name: make sure httpd is running
service: name=httpd state=started enabled=yes
when: not inventory_hostname_short == "buildvm-s390x-07"
- name: make sure kojid is running
service: name=kojid state=started enabled=yes

View file

@ -3,10 +3,10 @@
# NOTE: make sure there is room/space for this server on the vmhost
# NOTE: most of these vars_path come from group_vars/mirrorlist or from hostvars
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=taskotron-dev"
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=ci-cc-rdu01.fedoraproject.org"
- name: make the box be real
hosts: taskotron-dev
hosts: ci
user: root
gather_facts: True
@ -24,7 +24,11 @@
- { role: collectd/base, tags: ['collectd_base'] }
- { role: dnf-automatic, tags: ['dnfautomatic'] }
- { role: sudo, tags: ['sudo'] }
- { role: openvpn/client,
when: deployment_type == "prod", tags: ['openvpn_client'] }
- postgresql_server
- apache
# - { role: fedmsg/base }
tasks:
# this is how you include other task lists
@ -35,8 +39,8 @@
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
- name: configure taskotron master
hosts: taskotron-dev
- name: configure resultsdb production
hosts: ci
user: root
gather_facts: True
@ -46,14 +50,11 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- { role: taskotron/grokmirror, tags: ['grokmirror'] }
# - { role: taskotron/cgit, tags: ['cgit'] }
- { role: taskotron/buildmaster, tags: ['buildmaster'] }
- { role: taskotron/buildmaster-configure, tags: ['buildmasterconfig'] }
- { role: taskotron/taskotron-trigger, tags: ['trigger'] }
- { role: taskotron/taskotron-frontend, tags: ['frontend'] }
- { role: taskotron/taskotron-proxy, tags: ['taskotronproxy'] }
- { role: taskotron/ssl-taskotron, tags: ['ssltaskotron'] }
# - { role: taskotron/resultsdb-fedmsg, tags: ['resultsdb-fedmsg']}
- { role: taskotron/resultsdb-backend, tags: ['resultsdb-be'] }
- { role: taskotron/resultsdb-frontend, tags: ['resultsdb-fe'] }
- { role: taskotron/execdb, tags: ['execdb'] }
- { role: ccsdb, tags: ['ccsdb'] }
handlers:
- include: "{{ handlers_path }}/restart_services.yml"

View file

@ -40,16 +40,36 @@
- "/srv/private/ansible/vars.yml"
- "/srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml"
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: git clone the releng-automation playbook repo
git:
repo: "https://pagure.io/releng-automation.git"
dest: "/usr/local/loopabull-playbooks"
- name: ensure ~/.ssh dir exists
file:
path: "/home/root/.ssh/"
state: directory
- name: place loopabull_ociimage user private keys
copy:
src: "{{ private }}/files/loopabull/keys/{{ env }}_ociimage"
dest: "/home/root/.ssh/id_rsa.loopabull_ociimage"
mode: 0600
- name: Install required packages
package:
name: python-fedmsg-rabbitmq-serializer
state: latest
roles:
- rabbitmq
- fedmsg/base
- fedmsg/hub
- {
role: loopabull,
plugin: fedmsg,
loglevel: info,
plugin: fedmsgrabbitmq,
routing_keys: [
"org.fedoraproject.prod.buildsys.build.state.change"
],
@ -59,3 +79,22 @@
}
- name: Post Loopabull install configuration
hosts: loopabull-stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- "/srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml"
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: Enable fedmsg-rabbitmq-serializer
copy:
src: "{{files}}/loopabull/serializer.py"
dest: "/etc/fedmsg.d/serializer.py"
notify: restart fedmsg-hub

View file

@ -1,6 +1,5 @@
# create an os server
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=os-control-stg:os-control"
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=os-nodes-stg:os-masters-stg:os-nodes:os-masters"
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=os-control-stg:os-control:os-nodes-stg:os-masters-stg:os-nodes:os-masters"
- name: make the box be real
hosts: os-control:os-control-stg:os-masters-stg:os-nodes-stg:os-masters:os-nodes
@ -23,6 +22,18 @@
- sudo
tasks:
- name: put openshift 3.4 repo on os- systems
copy: src="{{ files }}/openshift/openshift.repo" dest="/etc/yum.repos.d/openshift.repo"
tags:
- config
- packages
- yumrepos
- name: Deploy controller public ssh keys to osbs cluster hosts
authorized_key:
user: root
key: "{{ lookup('file', '{{private}}/files/os/{{env}}/control_key.pub') }}"
- include: "{{ tasks_path }}/yumrepos.yml"
- include: "{{ tasks_path }}/2fa_client.yml"
- include: "{{ tasks_path }}/motd.yml"
@ -43,6 +54,13 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: fix hosts file
copy:
src: "{{roles_path}}/hosts/files/os-hosts"
dest: "/etc/hosts"
owner: root
mode: 0644
- name: deploy private key to control hosts
copy:
src: "{{private}}/files/os/{{env}}/control_key"
@ -57,10 +75,10 @@
option: pipelining
value: "True"
- name: Setup cluster masters pre-reqs
hosts: os-masters-stg:os-masters
- name: Deploy OpenShift cluster
hosts: os-control-stg
tags:
- os-cluster-prereq
- os-cluster-deploy
user: root
gather_facts: True
@ -69,90 +87,44 @@
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: ensure origin conf dir exists
file:
path: "/etc/origin"
state: "directory"
roles:
- {
role: ansible-ansible-openshift-ansible,
cluster_inventory_filename: "cluster-inventory-stg",
openshift_release: "v3.5",
openshift_ansible_path: "/root/openshift-ansible",
openshift_ansible_playbook: "playbooks/byo/config.yml",
openshift_ansible_version: "openshift-ansible-3.5.70-1",
openshift_ansible_ssh_user: root,
openshift_ansible_install_examples: true,
openshift_ansible_containerized_deploy: false,
openshift_cluster_masters_group: "os-masters-stg",
openshift_cluster_nodes_group: "os-nodes-stg",
openshift_cluster_infra_group: "os-nodes-stg",
openshift_auth_profile: "fedoraidp-stg",
openshift_master_ha: true,
openshift_debug_level: 2,
openshift_deployment_type: "openshift-enterprise",
openshift_cluster_url: "{{ os_url}}",
openshift_app_subdomain: "{{ os_app_url }}",
openshift_internal_cluster_url: "os-masters{{ env_suffix }}.phx2.fedoraproject.org",
openshift_api_port: 443,
openshift_console_port: 443,
openshift_shared_infra: true,
when: env == 'staging',
tags: ['openshift-cluster','ansible-ansible-openshift-ansible']
}
- name: create cert dir for openshift public facing REST API SSL
file:
path: "/etc/origin/master/named_certificates"
state: "directory"
- name: install cert for openshift public facing REST API SSL
copy:
src: "{{private}}/files/os/{{env}}/os-internal.pem"
dest: "/etc/origin/master/named_certificates/{{os}}.pem"
- name: install key for openshift public facing REST API SSL
copy:
src: "{{private}}/files/os/{{env}}/os-internal.key"
dest: "/etc/origin/master/named_certificates/{{os}}.key"
- name: place htpasswd file
copy:
src: "{{private}}/files/httpd/os-{{env}}.htpasswd"
dest: /etc/origin/htpasswd
- name: Setup cluster hosts pre-reqs
hosts: os-masters-stg:os-nodes-stg:os-masters:os-nodes
- name: Post-Install setup
hosts: os-stg:os
tags:
- os-cluster-prereq
user: root
gather_facts: True
- os-post-install
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- name: restart NetworkManager
service:
name: NetworkManager
state: restarted
tasks:
- name: Install necessary packages that openshift-ansible needs
package: name="{{ item }}" state=installed
with_items:
- tar
- rsync
- dbus-python
- NetworkManager
- libselinux-python
- origin
- name: Deploy controller public ssh keys to os cluster hosts
authorized_key:
user: root
key: "{{ lookup('file', '{{private}}/files/os/{{env}}/control_key.pub') }}"
# This is required for OpenShift built-in SkyDNS inside the overlay network
# of the cluster
- name: ensure NM_CONTROLLED is set to "yes" for os cluster
lineinfile:
dest: "/etc/sysconfig/network-scripts/ifcfg-eth0"
line: "NM_CONTROLLED=yes"
notify:
- restart NetworkManager
# This is required for OpenShift built-in SkyDNS inside the overlay network
# of the cluster
- name: ensure NetworkManager is enabled and started
service:
name: NetworkManager
state: started
enabled: yes
- name: cron entry to clean up docker storage
copy:
src: "{{files}}/os/cleanup-docker-storage"
dest: "/etc/cron.d/cleanup-docker-storage"
- name: copy docker-storage-setup config
copy:
src: "{{files}}/os/docker-storage-setup"
dest: "/etc/sysconfig/docker-storage-setup"
- name: enable nrpe for monitoring (noc01)
iptables: action=insert chain=INPUT destination_port=5666 protocol=tcp source=10.5.126.41 state=present jump=ACCEPT

View file

@ -205,8 +205,18 @@
openshift_ansible_path: "/root/openshift-ansible",
openshift_ansible_playbook: "playbooks/byo/config.yml",
openshift_ansible_version: "openshift-ansible-3.3.57-1",
openshift_ansible_ssh_user: root,
openshift_ansible_install_examples: false,
openshift_ansible_containerized_deploy: false,
openshift_cluster_masters_group: "osbs-masters-stg",
openshift_cluster_nodes_group: "osbs-nodes-stg",
openshift_cluster_infra_group: "osbs-masters-stg",
openshift_auth_profile: "osbs",
openshift_cluster_url: "{{osbs_url}}",
openshift_master_ha: false,
openshift_debug_level: 2,
openshift_shared_infra: true,
openshift_deployment_type: "origin",
when: env == 'staging',
tags: ['openshift-cluster','ansible-ansible-openshift-ansible']
}
@ -219,8 +229,18 @@
openshift_ansible_path: "/root/openshift-ansible",
openshift_ansible_playbook: "playbooks/byo/config.yml",
openshift_ansible_version: "openshift-ansible-3.3.57-1",
openshift_ansible_ssh_user: root,
openshift_ansible_install_examples: false,
openshift_ansible_containerized_deploy: false,
openshift_cluster_masters_group: "osbs-masters",
openshift_cluster_nodes_group: "osbs-nodes",
openshift_cluster_infra_group: "osbs-masters",
openshift_auth_profile: "osbs",
openshift_cluster_url: "{{osbs_url}}",
openshift_master_ha: false,
openshift_debug_level: 2,
openshift_shared_infra: true,
openshift_deployment_type: "origin",
when: env == 'production',
tags: ['openshift-cluster','ansible-ansible-openshift-ansible']
}

View file

@ -75,8 +75,8 @@
- role: apache
- role: httpd/certificate
name: wildcard-2014.fedorapeople.org
SSLCertificateChainFile: wildcard-2014.fedorapeople.org.intermediate.cert
name: wildcard-2017.fedorapeople.org
SSLCertificateChainFile: wildcard-2017.fedorapeople.org.intermediate.cert
- people

View file

@ -57,8 +57,8 @@
- include: "{{ handlers_path }}/restart_services.yml"
- name: configure qa uildbot CI
hosts: qa-prod:qa-stg
- name: configure qa buildbot CI
hosts: qa-stg
user: root
gather_facts: True
@ -108,10 +108,11 @@
tags:
- qastaticsites
roles:
- { role: taskotron/imagefactory-client,
when: deployment_type != "qa-stg", tags: ['imagefactoryclient'] }
# don't need this if buildbot is not enabled
# roles:
# - { role: taskotron/imagefactory-client,
# when: deployment_type != "qa-stg", tags: ['imagefactoryclient'] }
#
handlers:
- include: "{{ handlers_path }}/restart_services.yml"

View file

@ -42,6 +42,9 @@
- role: loopabull/target
loopabull_role: koji
when: "env == 'staging' and inventory_hostname == 'composer.stg.phx2.fedoraproject.org'"
- role: loopabull/target
loopabull_role: ociimage
when: "env == 'staging' and inventory_hostname == 'composer.stg.phx2.fedoraproject.org'"
- { role: nfs/client, when: "'releng-stg' not in group_names", mnt_dir: '/mnt/fedora_koji', nfs_src_dir: "{{ koji_hub_nfs }}" }
- { role: nfs/client, when: "'releng-compose' in group_names", mnt_dir: '/pub', nfs_src_dir: 'fedora_ftp/fedora.redhat.com/pub' }
- { role: nfs/client, when: "'releng-secondary' in group_names", mnt_dir: '/pub/fedora-secondary', nfs_src_dir: 'fedora_ftp/fedora.redhat.com/pub/fedora-secondary' }
@ -65,6 +68,7 @@
when: env == "staging"
}
tasks:
# this is how you include other task lists
- include: "{{ tasks_path }}/2fa_client.yml"

View file

@ -14,6 +14,9 @@
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- include: "{{ tasks_path }}/yumrepos.yml"
roles:
- base
- rkhunter
@ -26,7 +29,6 @@
- { role: openvpn/client, when: datacenter != "phx2" }
tasks:
- include: "{{ tasks_path }}/yumrepos.yml"
- include: "{{ tasks_path }}/2fa_client.yml"
- include: "{{ tasks_path }}/motd.yml"

View file

@ -1,58 +0,0 @@
---
# create a new taskotron production server
# NOTE: make sure there is room/space for this server on the vmhost
# NOTE: most of these vars_path come from group_vars/mirrorlist or from hostvars
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=taskotron-prod"
- name: make the box be real
hosts: taskotron-prod
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- { role: base, tags: ['base'] }
- { role: rkhunter, tags: ['rkhunter'] }
- { role: nagios_client, tags: ['nagios_client'] }
- { role: hosts, tags: ['hosts']}
- { role: fas_client, tags: ['fas_client'] }
- { role: collectd/base, tags: ['collectd_base'] }
- { role: sudo, tags: ['sudo'] }
- { role: openvpn/client,
when: env != "staging", tags: ['openvpn_client'] }
- apache
tasks:
# this is how you include other task lists
- include: "{{ tasks_path }}/yumrepos.yml"
- include: "{{ tasks_path }}/2fa_client.yml"
- include: "{{ tasks_path }}/motd.yml"
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
- name: configure taskotron master
hosts: taskotron-prod
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- { role: taskotron/grokmirror, tags: ['grokmirror'] }
# - { role: taskotron/cgit, tags: ['cgit'] }
- { role: taskotron/buildmaster, tags: ['buildmaster'] }
- { role: taskotron/buildmaster-configure, tags: ['buildmasterconfig'] }
- { role: taskotron/taskotron-trigger, tags: ['trigger'] }
- { role: taskotron/taskotron-frontend, tags: ['frontend'] }
handlers:
- include: "{{ handlers_path }}/restart_services.yml"

View file

@ -1,56 +0,0 @@
---
# create a new taskotron staging server
# NOTE: make sure there is room/space for this server on the vmhost
# NOTE: most of these vars_path come from group_vars/mirrorlist or from hostvars
- include: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=taskotron-stg"
- name: make the box be real
hosts: taskotron-stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- { role: base, tags: ['base'] }
- { role: rkhunter, tags: ['rkhunter'] }
- { role: nagios_client, tags: ['nagios_client'] }
- { role: hosts, tags: ['hosts']}
- { role: fas_client, tags: ['fas_client'] }
- { role: collectd/base, tags: ['collectd_base'] }
- { role: sudo, tags: ['sudo'] }
- apache
tasks:
# this is how you include other task lists
- include: "{{ tasks_path }}/yumrepos.yml"
- include: "{{ tasks_path }}/2fa_client.yml"
- include: "{{ tasks_path }}/motd.yml"
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
- name: configure taskotron master
hosts: taskotron-stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- { role: taskotron/grokmirror, tags: ['grokmirror'] }
# - { role: taskotron/cgit, tags: ['cgit'] }
- { role: taskotron/buildmaster, tags: ['buildmaster'] }
- { role: taskotron/buildmaster-configure, tags: ['buildmasterconfig'] }
- { role: taskotron/taskotron-trigger, tags: ['trigger'] }
- { role: taskotron/taskotron-frontend, tags: ['frontend'] }
handlers:
- include: "{{ handlers_path }}/restart_services.yml"

View file

@ -38,8 +38,7 @@
- git: repo=https://pagure.io/fedora-hubs.git
dest=/srv/git/fedora-hubs
version=develop
register: git_result
changed_when: "git_result.after|default('after') != git_result.before|default('before')"
ignore_errors: true
- file: dest=/etc/fedmsg.d/ state=directory
- name: copy around a number of files we want
command: cp {{item.src}} {{item.dest}}

View file

@ -1,55 +0,0 @@
- name: check/create instance
hosts: magazine.fedorainfracloud.org
gather_facts: False
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/fedora-cloud.yml
- /srv/private/ansible/files/openstack/passwords.yml
tasks:
- include: "{{ tasks_path }}/persistent_cloud.yml"
- name: setup all the things
hosts: magazine.fedorainfracloud.org
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/private/ansible/files/openstack/passwords.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- include: "{{ tasks_path }}/cloud_setup_basic.yml"
- name: set hostname (required by some services, at least postfix need it)
hostname: name="{{inventory_hostname}}"
tasks:
- name: add packages
yum: state=present name={{ item }}
with_items:
- httpd
- php
- php-mysql
- mariadb-server
- mariadb
- mod_ssl
- php-mcrypt
- php-mbstring
- wget
- unzip
- postfix
- name: enable httpd service
service: name=httpd enabled=yes state=started
- name: configure postfix for ipv4 only
raw: postconf -e inet_protocols=ipv4
- name: enable local postfix service
service: name=postfix enabled=yes state=started
roles:
- nagios_client
- mariadb_server

View file

@ -0,0 +1,70 @@
- name: check/create instance
hosts: upstreamfirst.fedorainfracloud.org
gather_facts: False
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/fedora-cloud.yml
- /srv/private/ansible/files/openstack/passwords.yml
tasks:
- include: "{{ tasks_path }}/persistent_cloud.yml"
- name: do base configuration
hosts: upstreamfirst.fedorainfracloud.org
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- base
- rkhunter
- nagios_client
- hosts
- fas_client
- sudo
- collectd/base
- postgresql_server
- certbot
tasks:
- include: "{{ tasks_path }}/yumrepos.yml"
- include: "{{ tasks_path }}/2fa_client.yml"
- include: "{{ tasks_path }}/motd.yml"
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
- name: deploy pagure
hosts: upstreamfirst.fedorainfracloud.org
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- "{{ vars_path }}/{{ ansible_distribution }}.yml"
# pre_tasks:
# - name: install fedmsg-relay
# yum: pkg=fedmsg-relay state=present
# tags:
# - pagure
# - pagure/fedmsg
# - name: and start it
# service: name=fedmsg-relay state=started
# tags:
# - pagure
# - pagure/fedmsg
#
roles:
- pagure/upstreamfirst-frontend
# - pagure/fedmsg
handlers:
- include: "{{ handlers_path }}/restart_services.yml"

View file

@ -25,6 +25,9 @@
- name: set hostname (required by some services, at least postfix need it)
hostname: name="{{inventory_hostname}}"
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
roles:
- nginx
- waiverdb

View file

@ -30,6 +30,12 @@
- role: httpd/certificate
name: wildcard-2017.stg.fedoraproject.org
SSLCertificateChainFile: wildcard-2017.stg.fedoraproject.org.intermediate.cert
when: env == "staging"
- role: httpd/certificate
name: wildcard-2017.app.os.stg.fedoraproject.org
SSLCertificateChainFile: wildcard-2017.app.os.stg.fedoraproject.org.intermediate.cert
when: env == "staging"
- role: httpd/certificate
name: fedoramagazine.org

View file

@ -25,6 +25,12 @@
path: /community
target: https://apps.fedoraproject.org/packages
- role: httpd/redirect
name: nagios
website: admin.fedoraproject.org
path: /nagios
target: https://nagios.fedoraproject.org/nagios/
- role: httpd/redirect
name: docs
website: fedoraproject.org

View file

@ -37,13 +37,6 @@
remotepath: /api
proxyurl: https://copr.fedorainfracloud.org
- role: httpd/reverseproxy
website: admin.fedoraproject.org
destname: nagios
localpath: /nagios
remotepath: /nagios
proxyurl: http://noc01
- role: httpd/reverseproxy
website: nagios.fedoraproject.org
destname: nagios
@ -615,6 +608,18 @@
proxyurl: http://localhost:10062
keephost: true
- role: httpd/reverseproxy
website: os.fedoraproject.org
destname: os
proxyurl: http://localhost:10064
keephost: true
- role: httpd/reverseproxy
website: app.os.fedoraproject.org
destname: app.os
proxyurl: http://localhost:10065
keephost: true
- role: httpd/reverseproxy
website: data-analysis.fedoraproject.org
destname: awstats

View file

@ -556,6 +556,19 @@
sslonly: true
cert_name: "{{wildcard_cert_name}}"
- role: httpd/website
name: os.fedoraproject.org
server_aliases: [os.stg.fedoraproject.org]
sslonly: true
cert_name: "{{wildcard_cert_name}}"
- role: httpd/website
name: app.os.fedoraproject.org
server_aliases: ["*.app.os.fedoraproject.org", "*.app.os.stg.fedoraproject.org"]
sslonly: true
cert_name: "{{os_wildcard_cert_name}}"
SSLCertificateChainFile: wildcard-2017.app.os.stg.fedoraproject.org.intermediate.cert
- role: httpd/website
name: registry.fedoraproject.org
server_aliases: [registry.stg.fedoraproject.org]

View file

@ -0,0 +1,22 @@
- name: reload the frontend
hosts: pagure:pagure-stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- include: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: ask puiterwijk if he would like to capture debug info before restarting.
pause: seconds=30 prompt="Restarting pagure, abort if you want to get puiterwijk's attention first."
- name: Reload apache...
service: name="httpd" state=reloaded
post_tasks:
- name: tell nagios to unshush w.r.t. apache
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.phx2.fedoraproject.org
ignore_errors: true

View file

@ -13,6 +13,7 @@
- include: "{{ handlers_path }}/restart_services.yml"
tasks:
- service: name=webui-qcluster state=stopped
- service: name=httpd state=stopped
- service: name=mailman3 state=stopped

View file

@ -18,12 +18,11 @@
yum: name="python-fmn*" state=latest
when: not testing
- name: yum update FMN packages from testing repo
yum: pkg={{ item }} state=latest enablerepo=infrastructure-testing
with_items:
- python-fmn
- python-fmn-sse
- python-fmn-web
yum: pkg=python-fmn state=latest enablerepo=infrastructure-testing
when: testing
- name: yum update FMN packages from testing repo
yum: pkg=python-fmn state=latest enablerepo=epel-testing
when: env == "staging"
- name: verify the frontend and stop it
hosts: notifs-web:notifs-web-stg
@ -80,10 +79,10 @@
- fmn-worker@4
- name: Upgrade the database
command: /usr/bin/alembic -c /usr/share/fmn.lib/alembic.ini upgrade head
command: /usr/bin/alembic -c /usr/share/fmn/alembic.ini upgrade head
when: env != "staging"
args:
chdir: /usr/share/fmn.lib/
chdir: /usr/share/fmn/
- name: Re-start the workers and the backend
service: name={{ item }} state=started

View file

@ -39,11 +39,6 @@
update_cache=yes
when: testing
- name: Create new tables in the database
command: /usr/bin/python2 /usr/share/pagure/pagure_createdb.py
environment:
PAGURE_CONFIG: /etc/pagure/pagure.cfg
- name: Upgrade the database
command: /usr/bin/alembic -c /etc/pagure/alembic.ini upgrade head
args:
@ -51,12 +46,20 @@
environment:
PAGURE_CONFIG: /etc/pagure/pagure.cfg
- name: call createdb
command: /usr/bin/python2 /usr/share/pagure/pagure_createdb.py
environment:
PAGURE_CONFIG: /etc/pagure/pagure.cfg
post_tasks:
- service: name="httpd" state=restarted
- service: name="pagure_ev" state=restarted
- service: name="pagure_ci" state=restarted
- service: name="pagure_webhook" state=restarted
- service: name="pagure_milter" state=restarted
- service: name="pagure_worker" state=restarted
- service: name="pagure_logcom" state=restarted
- service: name="pagure_loadjson" state=restarted
- name: tell nagios to unshush w.r.t. the frontend
nagios:
action=unsilence

View file

@ -1,12 +1,66 @@
---
# defaults file for ansible-ansible-openshift-ansible
#
#
#
# Auth Profile
# These are Fedora Infra specific auth profiles
#
# Acceptable values:
# osbs - this will configure htpasswd for use with osbs
# fedoraidp - configure for fedora idp
# fedoraidp-stg - configure for fedora idp staging env
openshift_auth_profile: osbs
# Do we want OpenShift itself to be containerized?
# This is a requirement if using Atomic Host
#
# As of v3.5.x this would mean that all our systems would completely go down
# in the event the docker daemon were to restart or crash.
#
# In the future (as of v3.6 devel branch), this is done with system containers
# and won't be bound to the docker daemon.
openshift_ansible_containerized_deploy: false
# This will co-host the infra nodes with the primary nodes
openshift_shared_infra: false
# OpenShift Cluster URL
# Example: openshift.fedoraproject.org
openshift_cluster_url: None
# OpenShift Console and API listening ports
# These default to 8443 in openshift-ansible
openshift_api_port: 8443
openshift_console_port: 8443
# OpenShift Applications Ingress subdomain (OpenShift routes)
openshift_app_subdomain: None
# Setup native OpenShift Master High Availability (true or false)
openshift_master_ha: false
# Destination file name for template-generated cluster inventory
cluster_inventory_filename: "cluster-inventory"
# Ansible user for use with openshift-ansible playbooks
openshift_ansible_ssh_user: root
# OpenShift Debug level (Default is 2 upstream)
openshift_debug_level: 2
# Release required as per the openshift-ansible
openshift_release: "v1.2"
openshift_release: "v1.5.0"
# OpenShift Deployment Type
# Possible options:
# origin
# openshift-enterprise
deployment_type: origin
# Install the OpenShift App Examples (value should be "true" or "false")
openshift_ansible_install_examples: false
# Path to clone the openshift-ansible git repo into
openshift_ansible_path: "/root/openshift-ansible"
@ -28,4 +82,5 @@ openshift_ansible_version: "openshift-ansible-3.2.35-1"
# empty causing undesired effects.
openshift_cluster_masters_group: "openshift-cluster-masters"
openshift_cluster_nodes_group: "openshift-cluster-nodes"
openshift_cluster_infra_group: "openshift-cluster-nodes"

View file

@ -20,19 +20,19 @@
src: "cluster-inventory.j2"
dest: "{{ openshift_ansible_path }}/{{ cluster_inventory_filename }}"
- name: run ansible
shell: "ansible-playbook {{ openshift_ansible_playbook }} -i {{ cluster_inventory_filename }}"
args:
chdir: "{{ openshift_ansible_path }}"
register: run_ansible_out
#- name: run ansible
# shell: "ansible-playbook {{ openshift_ansible_playbook }} -i {{ cluster_inventory_filename }}"
# args:
# chdir: "{{ openshift_ansible_path }}"
# register: run_ansible_out
- name: display run ansible stdout_lines
debug:
var: run_ansible_out.stdout_lines
#- name: display run ansible stdout_lines
# debug:
# var: run_ansible_out.stdout_lines
- name: display run ansible stderr
debug:
var: run_ansible_out.stderr
#- name: display run ansible stderr
# debug:
# var: run_ansible_out.stderr

View file

@ -1,17 +1,777 @@
# This is based on the example inventories provided by the upstream
# openshift-ansible project available:
# https://github.com/openshift/openshift-ansible/tree/master/inventory/byo
# Create an OSEv3 group that contains the masters and nodes groups
[OSEv3:children]
masters
nodes
etcd
lb
# Add this if using nfs and have defined the nfs group
#nfs
# Set variables common for all OSEv3 hosts
[OSEv3:vars]
ansible_ssh_user=root
debug_level=2
deployment_type=origin
openshift_release={{ openshift_release }}
# SSH user, this user should allow ssh based auth without requiring a
# password. If using ssh key based auth, then the key should be managed by an
# ssh agent.
ansible_ssh_user={{openshift_ansible_ssh_user}}
# OpenShift Containerized deployment or not?
containerized={{openshift_ansible_containerized_deploy}}
{% if openshift_ansible_ssh_user != "root" %}
# If ansible_ssh_user is not root, ansible_become must be set to true and the
# user must be configured for passwordless sudo
ansible_become=yes
{% endif %}
# Debug level for all OpenShift components (Defaults to 2)
debug_level={{openshift_debug_level}}
# Specify the deployment type. Valid values are origin and openshift-enterprise.
deployment_type={{openshift_deployment_type}}
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
# rely on the version running on the first master. Works best for containerized installs where we can usually
# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
# release.
openshift_release={{openshift_release}}
# Specify an exact container image tag to install or configure.
# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_image_tag=v3.5.0
# Specify an exact rpm version to install or configure.
# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
#openshift_pkg_version=-3.5.0
# Install the openshift examples
{% if openshift_ansible_install_examples is defined %}
openshift_install_examples={{openshift_ansible_install_examples}}
{% endif %}
# Configure logoutURL in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
#openshift_master_logout_url=http://example.com
# Configure extensionScripts in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
# Configure extensionStylesheets in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
# Configure extensions in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
# Configure extensions in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
#openshift_master_oauth_template=/path/to/login-template.html
# Configure imagePolicyConfig in the master config
# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
# Docker Configuration
# Add additional, insecure, and blocked registries to global docker configuration
# For enterprise deployment types we ensure that registry.access.redhat.com is
# included if you do not include it
#openshift_docker_additional_registries=registry.example.com
#openshift_docker_insecure_registries=registry.example.com
#openshift_docker_blocked_registries=registry.hacker.com
# Disable pushing to dockerhub
#openshift_docker_disable_push_dockerhub=True
# Use Docker inside a System Container. Note that this is a tech preview and should
# not be used to upgrade!
# The following options for docker are ignored:
# - docker_version
# - docker_upgrade
# The following options must not be used
# - openshift_docker_options
#openshift_docker_use_system_container=False
# Force the registry to use for the system container. By default the registry
# will be built off of the deployment type and ansible_distribution. Only
# use this option if you are sure you know what you are doing!
#openshift_docker_systemcontainer_image_registry_override="registry.example.com"
# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
# Specify exact version of Docker to configure or upgrade to.
# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
# docker_version="1.12.1"
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
# Specify exact version of etcd to configure or upgrade to.
# etcd_version="3.1.0"
# Enable etcd debug logging, defaults to false
# etcd_debug=true
# Set etcd log levels by package
# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
# Upgrade Hooks
#
# Hooks are available to run custom tasks at various points during a cluster
# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
# absolute paths, if not the path will be treated as relative to the file where the
# hook is actually used.
#
# Tasks to run before each master is upgraded.
# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
#
# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
# upgrade steps, but before we restart system/services.
# openshift_master_upgrade_hook=/usr/share/custom/master.yml
#
# Tasks to run after each master is upgraded and system/services have been restarted.
# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
# Alternate image format string, useful if you've got your own registry mirror
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
# modify image streams to point at that registry by setting the following to true
#openshift_examples_modify_imagestreams=true
# Additional yum repos to install
#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
# Defining htpasswd users
#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
# or
#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
# OSBS Specific Auth
{% if openshift_auth_profile == "osbs" %}
openshift_master_manage_htpasswd=false
openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '{{ openshift_htpasswd_file }}'}]
openshift_master_public_api_url={{ openshift_master_public_api_url }}
{% endif %}
{% if openshift_auth_profile == "fedoraidp" %}
openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_client_secret}}", "extraScopes": ["profile", "email", "https://id.fedoraproject.org/scope/groups"], "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]
{% endif %}
{% if openshift_auth_profile == "fedoraidp-stg" %}
openshift_master_identity_providers=[{"name": "fedoraidp", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "openshift", "client_secret": "{{openshift_stg_client_secret}}", "claims": {"id": ["sub"], "preferredUsername": ["sub"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://id{{env_suffix}}.fedoraproject.org/openidc/Authorization", "token": "https://id{{env_suffix}}.fedoraproject.org/openidc/Token", "userInfo": "https://id{{env_suffix}}.fedoraproject.org/openidc/UserInfo"}}]
{% endif %}
# Allow all auth
#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
# LDAP auth
#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
#
# Configure LDAP CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "ca" key set
# within the LDAPPasswordIdentityProvider.
#
#openshift_master_ldap_ca=<ca text>
# or
#openshift_master_ldap_ca_file=<path to local ca file to use>
# OpenID auth
#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
#
# Configure OpenID CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "ca" key set
# within the OpenIDIdentityProvider.
#
#openshift_master_openid_ca=<ca text>
# or
#openshift_master_openid_ca_file=<path to local ca file to use>
# Request header auth
#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
#
# Configure request header CA certificate
# Specify either the ASCII contents of the certificate or the path to
# the local file that will be copied to the remote host. CA
# certificate contents will be copied to master systems and saved
# within /etc/origin/master/ with a filename matching the "clientCA"
# key set within the RequestHeaderIdentityProvider.
#
#openshift_master_request_header_ca=<ca text>
# or
#openshift_master_request_header_ca_file=<path to local ca file to use>
{% if openshift_master_ha is defined %}
{% if openshift_master_ha %}
# Native high availability cluster method with optional load balancer.
# If no lb group is defined, the installer assumes that a load balancer has
# been preconfigured. For installation the value of
# openshift_master_cluster_hostname must resolve to the load balancer
# or to one or all of the masters defined in the inventory if no load
# balancer is present.
openshift_master_cluster_method=native
openshift_master_cluster_hostname={{openshift_internal_cluster_url}}
openshift_master_cluster_public_hostname={{openshift_cluster_url}}
{% endif %}
{% endif %}
# Override the default controller lease ttl
#osm_controller_lease_ttl=30
# Configure controller arguments
#osm_controller_args={'resource-quota-sync-period': ['10s']}
# Configure api server arguments
#osm_api_server_args={'max-requests-inflight': ['400']}
# default subdomain to use for exposed routes
{% if openshift_app_subdomain is defined %}
{% if openshift_app_subdomain %}
openshift_master_default_subdomain={{openshift_app_subdomain}}
{% endif %}
{% endif %}
# additional cors origins
#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
# default project node selector
#osm_default_node_selector='region=primary'
# Override the default pod eviction timeout
#openshift_master_pod_eviction_timeout=5m
# Override the default oauth tokenConfig settings:
# openshift_master_access_token_max_seconds=86400
# openshift_master_auth_token_max_seconds=500
# Override master servingInfo.maxRequestsInFlight
#openshift_master_max_requests_inflight=500
# Override master and node servingInfo.minTLSVersion and .cipherSuites
# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
#openshift_master_min_tls_version=VersionTLS12
#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
#
#openshift_node_min_tls_version=VersionTLS12
#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
#osn_storage_plugin_deps=['ceph','glusterfs']
# OpenShift Router Options
#
# An OpenShift router will be created during install if there are
# nodes present with labels matching the default router selector,
# "region=infra". Set openshift_node_labels per node as needed in
# order to label nodes.
#
# Example:
# [nodes]
# node.example.com openshift_node_labels="{'region': 'infra'}"
#
# Router selector (optional)
# Router will only be created if nodes matching this label are present.
# Default value: 'region=infra'
#openshift_hosted_router_selector='region=infra'
#
# Router replicas (optional)
# Unless specified, openshift-ansible will calculate the replica count
# based on the number of nodes matching the openshift router selector.
#openshift_hosted_router_replicas=2
#
# Router force subdomain (optional)
# A router path format to force on all routes used by this router
# (will ignore the route host value)
#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
#
# Router certificate (optional)
# Provide local certificate paths which will be configured as the
# router's default certificate.
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
#
# Disable management of the OpenShift Router
#openshift_hosted_manage_router=false
#
# Router sharding support has been added and can be achieved by supplying the correct
# data to the inventory. The variable to house the data is openshift_hosted_routers
# and is in the form of a list. If no data is passed then a default router will be
# created. There are multiple combinations of router sharding. The one described
# below supports routers on separate nodes.
#openshift_hosted_routers:
#- name: router1
# stats_port: 1936
# ports:
# - 80:80
# - 443:443
# replicas: 1
# namespace: default
# serviceaccount: router
# selector: type=router1
# images: "openshift3/ose-${component}:${version}"
# edits: []
# certificates:
# certfile: /path/to/certificate/abc.crt
# keyfile: /path/to/certificate/abc.key
# cafile: /path/to/certificate/ca.crt
#- name: router2
# stats_port: 1936
# ports:
# - 80:80
# - 443:443
# replicas: 1
# namespace: default
# serviceaccount: router
# selector: type=router2
# images: "openshift3/ose-${component}:${version}"
# certificates:
# certfile: /path/to/certificate/xyz.crt
# keyfile: /path/to/certificate/xyz.key
# cafile: /path/to/certificate/ca.crt
# edits:
# # ROUTE_LABELS sets the router to listen for routes
# # tagged with the provided values
# - key: spec.template.spec.containers[0].env
# value:
# name: ROUTE_LABELS
# value: "route=external"
# action: append
# OpenShift Registry Console Options
# Override the console image prefix for enterprise deployments, not used in origin
# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
# Override image version, defaults to latest for origin, matches the product version for enterprise
#openshift_cockpit_deployer_version=1.4.1
# Openshift Registry Options
#
# An OpenShift registry will be created during install if there are
# nodes present with labels matching the default registry selector,
# "region=infra". Set openshift_node_labels per node as needed in
# order to label nodes.
#
# Example:
# [nodes]
# node.example.com openshift_node_labels="{'region': 'infra'}"
#
# Registry selector (optional)
# Registry will only be created if nodes matching this label are present.
# Default value: 'region=infra'
#openshift_hosted_registry_selector='region=infra'
#
# Registry replicas (optional)
# Unless specified, openshift-ansible will calculate the replica count
# based on the number of nodes matching the openshift registry selector.
#openshift_hosted_registry_replicas=2
#
# Validity of the auto-generated certificate in days (optional)
#openshift_hosted_registry_cert_expire_days=730
#
# Disable management of the OpenShift Registry
#openshift_hosted_manage_registry=false
# Registry Storage Options
#
# NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/registry"
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
#
# External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/registry"
#openshift_hosted_registry_storage_kind=nfs
#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
#openshift_hosted_registry_storage_host=nfs.example.com
#openshift_hosted_registry_storage_nfs_directory=/exports
#openshift_hosted_registry_storage_volume_name=registry
#openshift_hosted_registry_storage_volume_size=10Gi
#
# Openstack
# Volume must already exist.
#openshift_hosted_registry_storage_kind=openstack
#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_registry_storage_openstack_filesystem=ext4
#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
#openshift_hosted_registry_storage_volume_size=10Gi
#
# Native GlusterFS Registry Storage
#openshift_hosted_registry_storage_kind=glusterfs
#
# AWS S3
#
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
#openshift_hosted_registry_storage_s3_bucket=bucket_name
#openshift_hosted_registry_storage_s3_region=bucket_region
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
#
# Any S3 service (Minio, ExoScale, ...): Basically the same as above
# but with regionendpoint configured
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
#openshift_hosted_registry_storage_s3_accesskey=access_key_id
#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
#openshift_hosted_registry_storage_s3_bucket=bucket_name
#openshift_hosted_registry_storage_s3_region=bucket_region
#openshift_hosted_registry_storage_s3_chunksize=26214400
#openshift_hosted_registry_storage_s3_rootdirectory=/registry
#openshift_hosted_registry_pullthrough=true
#openshift_hosted_registry_acceptschema2=true
#openshift_hosted_registry_enforcequota=true
#
# Additional CloudFront Options. When using CloudFront all three
# of the followingg variables must be defined.
#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
# Metrics deployment
# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
#
# By default metrics are not automatically deployed, set this to enable them
# openshift_hosted_metrics_deploy=true
#
# Storage Options
# If openshift_hosted_metrics_storage_kind is unset then metrics will be stored
# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
# Storage options A & B currently support only one cassandra pod which is
# generally enough for up to 1000 pods. Additional volumes can be created
# manually after the fact and metrics scaled per the docs.
#
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/metrics"
#openshift_hosted_metrics_storage_kind=nfs
#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/metrics"
#openshift_hosted_metrics_storage_kind=nfs
#openshift_hosted_metrics_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_metrics_storage_host=nfs.example.com
#openshift_hosted_metrics_storage_nfs_directory=/exports
#openshift_hosted_metrics_storage_volume_name=metrics
#openshift_hosted_metrics_storage_volume_size=10Gi
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_hosted_metrics_storage_kind=dynamic
#
# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_metrics/README.md
#
# Override metricsPublicURL in the master config for cluster metrics
# Defaults to https://hawkular-metrics.openshift_master_default_subdomain/hawkular/metrics
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
# Logging deployment
#
# Currently logging deployment is disabled by default, enable it by setting this
#openshift_hosted_logging_deploy=true
#
# Logging storage config
# Option A - NFS Host Group
# An NFS volume will be created with path "nfs_directory/volume_name"
# on the host within the [nfs] host group. For example, the volume
# path using these options would be "/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_nfs_options='*(rw,root_squash)'
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option B - External NFS Host
# NFS volume must already exist with path "nfs_directory/_volume_name" on
# the storage_host. For example, the remote volume path using these
# options would be "nfs.example.com:/exports/logging"
#openshift_hosted_logging_storage_kind=nfs
#openshift_hosted_logging_storage_access_modes=['ReadWriteOnce']
#openshift_hosted_logging_storage_host=nfs.example.com
#openshift_hosted_logging_storage_nfs_directory=/exports
#openshift_hosted_logging_storage_volume_name=logging
#openshift_hosted_logging_storage_volume_size=10Gi
#
# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
# your cloud platform use this.
#openshift_hosted_logging_storage_kind=dynamic
#
# Option D - none -- Logging will use emptydir volumes which are destroyed when
# pods are deleted
#
# Other Logging Options -- Common items you may wish to reconfigure, for the complete
# list of options please see roles/openshift_logging/README.md
#
# Configure loggingPublicURL in the master config for aggregate logging, defaults
# to kibana.openshift_master_default_subdomain
#openshift_hosted_logging_hostname=logging.apps.example.com
# Configure the number of elastic search nodes, unless you're using dynamic provisioning
# this value must be 1
#openshift_hosted_logging_elasticsearch_cluster_size=1
# Configure the prefix and version for the component images
#openshift_hosted_logging_deployer_prefix=registry.example.com:8888/openshift3/
#openshift_hosted_logging_deployer_version=3.5.0
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
# Disable the OpenShift SDN plugin
# openshift_use_openshift_sdn=False
# Configure SDN cluster network and kubernetes service CIDR blocks. These
# network blocks should be private and should not conflict with network blocks
# in your infrastructure that pods may require access to. Can not be changed
# after deployment.
#
# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
# 172.17.0.0/16. Your installation will fail and/or your configuration change will
# cause the Pod SDN or Cluster SDN to fail.
#
# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
# environment variable located in /etc/sysconfig/docker-network.
#osm_cluster_network_cidr=10.128.0.0/14
#openshift_portal_net=172.30.0.0/16
# ExternalIPNetworkCIDRs controls what values are acceptable for the
# service external IP field. If empty, no externalIP may be set. It
# may contain a list of CIDRs which are checked for access. If a CIDR
# is prefixed with !, IPs in that CIDR will be rejected. Rejections
# will be applied first, then the IP checked against one of the
# allowed CIDRs. You should ensure this range does not overlap with
# your nodes, pods, or service CIDRs for security reasons.
#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
# be assigned. It may contain a single CIDR that will be allocated from. For
# security reasons, you should ensure that this range does not overlap with
# the CIDRs reserved for external IPs, nodes, pods, or services.
#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
# Configure number of bits to allocate to each host's subnet e.g. 9
# would mean a /23 network on the host.
#osm_host_subnet_length=9
# Configure master API and console ports.
# These will default to 8443
{% if openshift_api_port is defined and openshift_console_port is defined %}
{% if openshift_api_port and openshift_console_port %}
openshift_master_api_port={{openshift_api_port}}
openshift_master_console_port={{openshift_console_port}}
{% endif %}
{% endif %}
# set RPM version for debugging purposes
#openshift_pkg_version=-3.1.0.0
# Configure custom ca certificate
#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
#
# NOTE: CA certificate will not be replaced with existing clusters.
# This option may only be specified when creating a new cluster or
# when redeploying cluster certificates with the redeploy-certificates
# playbook.
# Configure custom named certificates (SNI certificates)
#
# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
#
# NOTE: openshift_master_named_certificates is cached on masters and is an
# additive fact, meaning that each run with a different set of certificates
# will add the newly provided certificates to the cached set of certificates.
#
# An optional CA may be specified for each named certificate. CAs will
# be added to the OpenShift CA bundle which allows for the named
# certificate to be served for internal cluster communication.
#
# If you would like openshift_master_named_certificates to be overwritten with
# the provided value, specify openshift_master_overwrite_named_certificates.
#openshift_master_overwrite_named_certificates=true
#
# Provide local certificate paths which will be deployed to masters
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
#
# Detected names may be overridden by specifying the "names" key
#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
# Session options
#openshift_master_session_name=ssn
#openshift_master_session_max_seconds=3600
# An authentication and encryption secret will be generated if secrets
# are not provided. If provided, openshift_master_session_auth_secrets
# and openshift_master_encryption_secrets must be equal length.
#
# Signing secrets, used to authenticate sessions using
# HMAC. Recommended to use secrets with 32 or 64 bytes.
#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
#
# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
# characters long, to select AES-128, AES-192, or AES-256.
#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
# configure how often node iptables rules are refreshed
#openshift_node_iptables_sync_period=5s
# Configure nodeIP in the node config
# This is needed in cases where node traffic is desired to go over an
# interface other than the default network interface.
#openshift_set_node_ip=True
# Force setting of system hostname when configuring OpenShift
# This works around issues related to installations that do not have valid dns
# entries for the interfaces attached to the host.
#openshift_set_hostname=True
# Configure dnsIP in the node config
#openshift_dns_ip=172.30.0.1
# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['90'], 'image-gc-low-threshold': ['80']}
# Configure logrotate scripts
# See: https://github.com/nickhammond/ansible-logrotate
#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
# openshift-ansible will wait indefinitely for your input when it detects that the
# value of openshift_hostname resolves to an IP address not bound to any local
# interfaces. This mis-configuration is problematic for any pod leveraging host
# networking and liveness or readiness probes.
# Setting this variable to true will override that check.
#openshift_override_hostname_check=true
# Configure dnsmasq for cluster dns, switch the host's local resolver to use dnsmasq
# and configure node's dnsIP to point at the node's local dnsmasq instance. Defaults
# to True for Origin 1.2 and OSE 3.2. False for 1.1 / 3.1 installs, this cannot
# be used with 1.0 and 3.0.
#openshift_use_dnsmasq=False
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
# options like 'strict-order' to alter dnsmasq configuration.
#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
# Global Proxy Configuration
# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
# variables for docker and master services.
#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
#openshift_no_proxy='.hosts.example.com,some-host.com'
#
# Most environments don't require a proxy between openshift masters, nodes, and
# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
# If all of your hosts share a common domain you may wish to disable this and
# specify that domain above.
#openshift_generate_no_proxy_hosts=True
#
# These options configure the BuildDefaults admission controller which injects
# configuration into Builds. Proxy related values will default to the global proxy
# config values. You only need to set these if they differ from the global proxy settings.
# See BuildDefaults documentation at
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_no_proxy=mycorp.com
#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
#openshift_builddefaults_git_no_proxy=mycorp.com
#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
#openshift_builddefaults_resources_requests_cpu=100m
#openshift_builddefaults_resources_requests_memory=256m
#openshift_builddefaults_resources_limits_cpu=1000m
#openshift_builddefaults_resources_limits_memory=512m
# Or you may optionally define your own build defaults configuration serialized as json
#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
# These options configure the BuildOverrides admission controller which injects
# configuration into Builds.
# See BuildOverrides documentation at
# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
#openshift_buildoverrides_force_pull=true
#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
# Admission plugin config
#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
# Configure usage of openshift_clock role.
#openshift_clock_enabled=true
# OpenShift Per-Service Environment Variables
# Environment variables are added to /etc/sysconfig files for
# each OpenShift service: node, master (api and controllers).
# API and controllers environment variables are merged in single
# master environments.
#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
# Enable API service auditing, available as of 3.2
#openshift_master_audit_config={"enabled": true}
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
#
#openshift_ca_cert_expire_days=1825
#openshift_node_cert_expire_days=730
#openshift_master_cert_expire_days=730
# Validity of the auto-generated external etcd certificates in days.
# Controls validity for etcd CA, peer, server and client certificates.
#
#etcd_ca_default_days=1825
# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
# However, in order to ensure that your masters are not burdened with running pods you should
# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
[masters]
{% for host in groups[openshift_cluster_masters_group] %}
@ -23,15 +783,43 @@ openshift_master_public_api_url={{ openshift_master_public_api_url }}
{{ host }}
{% endfor %}
[lb]
{% for host in groups[openshift_cluster_masters_group] %}
{{ host }}
{% endfor %}
{% if openshift_shared_infra is defined %}
{% if openshift_shared_infra %}
[nodes]
{% for host in groups[openshift_cluster_masters_group] %}
{{ host }} openshift_node_labels="{'region':'infra'}" openshift_schedulable=False
{{ host }} openshift_schedulable=False
{% endfor %}
{% for host in groups[openshift_cluster_nodes_group] %}
{{ host }} openshift_node_labels="{'region': 'infra', 'zone': 'default'}"
{% endfor %}
{% else %}
[nodes]
{% for host in groups[openshift_cluster_infra_group] %}
{{ host }} openshift_node_labels="{'region':'infra'}"
{% endfor %}
{% for host in groups[openshift_cluster_masters_group] %}
{{ host }} openshift_schedulable=False
{% endfor %}
{% for host in groups[openshift_cluster_nodes_group] %}
{{ host }} openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
{% endfor %}
{% endfor %}
{% endif %}
{% else %}
[nodes]
{% for host in groups[openshift_cluster_infra_group] %}
{{ host }} openshift_node_labels="{'region':'infra'}"
{% endfor %}
{% for host in groups[openshift_cluster_masters_group] %}
{{ host }} openshift_schedulable=False
{% endfor %}
{% for host in groups[openshift_cluster_nodes_group] %}
{{ host }} openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
{% endfor %}
{% endif %}

View file

@ -140,15 +140,13 @@
- autocloud/backend
#
# Install hotfix to add the architecture to aarch64
# Install hotfix to ignore new architectures
# See PR - https://github.com/kushaldas/autocloud/pull/56/
#
- name: hotfix - copy over models.py to autocloud/models.py
- name: hotfix - copy over consumer files
copy: src='{{ files }}/{{ item.src }}' dest={{ item.dest }}
with_items:
- { src: 'hotfix/autocloud/models.py', dest: '/usr/lib/python2.7/site-packages/autocloud' }
- { src: 'hotfix/autocloud/consumer.py', dest: '/usr/lib/python2.7/site-packages/autocloud' }
- { src: 'hotfix/autocloud/__init__.py', dest: '/usr/lib/python2.7/site-packages/autocloud/utils' }
notify:
- restart fedmsg-hub
tags:

View file

@ -66,18 +66,3 @@
- autocloud
- autocloud/frontend
- selinux
#
# Install hotfix to add the architecture to aarch64
# See PR - https://github.com/kushaldas/autocloud/pull/56/
#
- name: hotfix - copy over models.py to autocloud/models.py
copy: src='{{ files }}/{{ item.src }}' dest={{ item.dest }}
with_items:
- { src: 'hotfix/autocloud/models.py', dest: '/usr/lib/python2.7/site-packages/autocloud' }
- { src: 'hotfix/autocloud/__init__.py', dest: '/usr/lib/python2.7/site-packages/autocloud/utils' }
notify:
- restart fedmsg-hub
tags:
- autocloud
- hotfix

View file

@ -0,0 +1,687 @@
# "false"
# Global Postfix configuration file. This file lists only a subset
# of all parameters. For the syntax, and for a complete parameter
# list, see the postconf(5) manual page (command: "man 5 postconf").
#
# For common configuration examples, see BASIC_CONFIGURATION_README
# and STANDARD_CONFIGURATION_README. To find these documents, use
# the command "postconf html_directory readme_directory", or go to
# http://www.postfix.org/.
#
# For best results, change no more than 2-3 parameters at a time,
# and test if Postfix still works after every change.
# SOFT BOUNCE
#
# The soft_bounce parameter provides a limited safety net for
# testing. When soft_bounce is enabled, mail will remain queued that
# would otherwise bounce. This parameter disables locally-generated
# bounces, and prevents the SMTP server from rejecting mail permanently
# (by changing 5xx replies into 4xx replies). However, soft_bounce
# is no cure for address rewriting mistakes or mail routing mistakes.
#
#soft_bounce = no
# LOCAL PATHNAME INFORMATION
#
# The queue_directory specifies the location of the Postfix queue.
# This is also the root directory of Postfix daemons that run chrooted.
# See the files in examples/chroot-setup for setting up Postfix chroot
# environments on different UNIX systems.
#
queue_directory = /var/spool/postfix
# The command_directory parameter specifies the location of all
# postXXX commands.
#
command_directory = /usr/sbin
# The daemon_directory parameter specifies the location of all Postfix
# daemon programs (i.e. programs listed in the master.cf file). This
# directory must be owned by root.
#
daemon_directory = /usr/libexec/postfix
# QUEUE AND PROCESS OWNERSHIP
#
# The mail_owner parameter specifies the owner of the Postfix queue
# and of most Postfix daemon processes. Specify the name of a user
# account THAT DOES NOT SHARE ITS USER OR GROUP ID WITH OTHER ACCOUNTS
# AND THAT OWNS NO OTHER FILES OR PROCESSES ON THE SYSTEM. In
# particular, don't specify nobody or daemon. PLEASE USE A DEDICATED
# USER.
#
mail_owner = postfix
# The default_privs parameter specifies the default rights used by
# the local delivery agent for delivery to external file or command.
# These rights are used in the absence of a recipient user context.
# DO NOT SPECIFY A PRIVILEGED USER OR THE POSTFIX OWNER.
#
#default_privs = nobody
# INTERNET HOST AND DOMAIN NAMES
#
# The myhostname parameter specifies the internet hostname of this
# mail system. The default is to use the fully-qualified domain name
# from gethostname(). $myhostname is used as a default value for many
# other configuration parameters.
#
#myhostname = host.domain.tld
#myhostname = virtual.domain.tld
# The mydomain parameter specifies the local internet domain name.
# The default is to use $myhostname minus the first component.
# $mydomain is used as a default value for many other configuration
# parameters.
#
#mydomain = domain.tld
# SENDING MAIL
#
# The myorigin parameter specifies the domain that locally-posted
# mail appears to come from. The default is to append $myhostname,
# which is fine for small sites. If you run a domain with multiple
# machines, you should (1) change this to $mydomain and (2) set up
# a domain-wide alias database that aliases each user to
# user@that.users.mailhost.
#
# For the sake of consistency between sender and recipient addresses,
# myorigin also specifies the default domain name that is appended
# to recipient addresses that have no @domain part.
#
#myorigin = $myhostname
#myorigin = $mydomain
mydomain = fedoraproject.org
myorigin = fedoraproject.org
# RECEIVING MAIL
# The inet_interfaces parameter specifies the network interface
# addresses that this mail system receives mail on. By default,
# the software claims all active interfaces on the machine. The
# parameter also controls delivery of mail to user@[ip.address].
#
# See also the proxy_interfaces parameter, for network addresses that
# are forwarded to us via a proxy or network address translator.
#
# Note: you need to stop/start Postfix when this parameter changes.
#
#inet_interfaces = all
#inet_interfaces = $myhostname
#inet_interfaces = $myhostname, localhost
inet_interfaces = all
# The proxy_interfaces parameter specifies the network interface
# addresses that this mail system receives mail on by way of a
# proxy or network address translation unit. This setting extends
# the address list specified with the inet_interfaces parameter.
#
# You must specify your proxy/NAT addresses when your system is a
# backup MX host for other domains, otherwise mail delivery loops
# will happen when the primary MX host is down.
#
#proxy_interfaces =
#proxy_interfaces = 1.2.3.4
# The mydestination parameter specifies the list of domains that this
# machine considers itself the final destination for.
#
# These domains are routed to the delivery agent specified with the
# local_transport parameter setting. By default, that is the UNIX
# compatible delivery agent that lookups all recipients in /etc/passwd
# and /etc/aliases or their equivalent.
#
# The default is $myhostname + localhost.$mydomain. On a mail domain
# gateway, you should also include $mydomain.
#
# Do not specify the names of virtual domains - those domains are
# specified elsewhere (see VIRTUAL_README).
#
# Do not specify the names of domains that this machine is backup MX
# host for. Specify those names via the relay_domains settings for
# the SMTP server, or use permit_mx_backup if you are lazy (see
# STANDARD_CONFIGURATION_README).
#
# The local machine is always the final destination for mail addressed
# to user@[the.net.work.address] of an interface that the mail system
# receives mail on (see the inet_interfaces parameter).
#
# Specify a list of host or domain names, /file/name or type:table
# patterns, separated by commas and/or whitespace. A /file/name
# pattern is replaced by its contents; a type:table is matched when
# a name matches a lookup key (the right-hand side is ignored).
# Continue long lines by starting the next line with whitespace.
#
# See also below, section "REJECTING MAIL FOR UNKNOWN LOCAL USERS".
#
mydestination = $myhostname, localhost.$mydomain, fedora.redhat.com, localhost
#mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain
#mydestination = $myhostname, localhost.$mydomain, localhost, $mydomain,
# mail.$mydomain, www.$mydomain, ftp.$mydomain
# REJECTING MAIL FOR UNKNOWN LOCAL USERS
#
# The local_recipient_maps parameter specifies optional lookup tables
# with all names or addresses of users that are local with respect
# to $mydestination, $inet_interfaces or $proxy_interfaces.
#
# If this parameter is defined, then the SMTP server will reject
# mail for unknown local users. This parameter is defined by default.
#
# To turn off local recipient checking in the SMTP server, specify
# local_recipient_maps = (i.e. empty).
#
# The default setting assumes that you use the default Postfix local
# delivery agent for local delivery. You need to update the
# local_recipient_maps setting if:
#
# - You define $mydestination domain recipients in files other than
# /etc/passwd, /etc/aliases, or the $virtual_alias_maps files.
# For example, you define $mydestination domain recipients in
# the $virtual_mailbox_maps files.
#
# - You redefine the local delivery agent in master.cf.
#
# - You redefine the "local_transport" setting in main.cf.
#
# - You use the "luser_relay", "mailbox_transport", or "fallback_transport"
# feature of the Postfix local delivery agent (see local(8)).
#
# Details are described in the LOCAL_RECIPIENT_README file.
#
# Beware: if the Postfix SMTP server runs chrooted, you probably have
# to access the passwd file via the proxymap service, in order to
# overcome chroot restrictions. The alternative, having a copy of
# the system passwd file in the chroot jail is just not practical.
#
# The right-hand side of the lookup tables is conveniently ignored.
# In the left-hand side, specify a bare username, an @domain.tld
# wild-card, or specify a user@domain.tld address.
#
#local_recipient_maps = unix:passwd.byname $alias_maps
#local_recipient_maps = proxy:unix:passwd.byname $alias_maps
#local_recipient_maps =
# The unknown_local_recipient_reject_code specifies the SMTP server
# response code when a recipient domain matches $mydestination or
# ${proxy,inet}_interfaces, while $local_recipient_maps is non-empty
# and the recipient address or address local-part is not found.
#
# The default setting is 550 (reject mail) but it is safer to start
# with 450 (try again later) until you are certain that your
# local_recipient_maps settings are OK.
#
unknown_local_recipient_reject_code = 550
# TRUST AND RELAY CONTROL
# The mynetworks parameter specifies the list of "trusted" SMTP
# clients that have more privileges than "strangers".
#
# In particular, "trusted" SMTP clients are allowed to relay mail
# through Postfix. See the smtpd_recipient_restrictions parameter
# in postconf(5).
#
# You can specify the list of "trusted" network addresses by hand
# or you can let Postfix do it for you (which is the default).
#
# By default (mynetworks_style = subnet), Postfix "trusts" SMTP
# clients in the same IP subnetworks as the local machine.
# On Linux, this does works correctly only with interfaces specified
# with the "ifconfig" command.
#
# Specify "mynetworks_style = class" when Postfix should "trust" SMTP
# clients in the same IP class A/B/C networks as the local machine.
# Don't do this with a dialup site - it would cause Postfix to "trust"
# your entire provider's network. Instead, specify an explicit
# mynetworks list by hand, as described below.
#
# Specify "mynetworks_style = host" when Postfix should "trust"
# only the local machine.
#
#mynetworks_style = class
#mynetworks_style = subnet
#mynetworks_style = host
# Alternatively, you can specify the mynetworks list by hand, in
# which case Postfix ignores the mynetworks_style setting.
#
# Specify an explicit list of network/netmask patterns, where the
# mask specifies the number of bits in the network part of a host
# address.
#
# You can also specify the absolute pathname of a pattern file instead
# of listing the patterns here. Specify type:table for table-based lookups
# (the value on the table right-hand side is not used).
#
#mynetworks = 168.100.189.0/28, 127.0.0.0/8
#mynetworks = $config_directory/mynetworks
#mynetworks = hash:/etc/postfix/network_table
# The relay_domains parameter restricts what destinations this system will
# relay mail to. See the smtpd_recipient_restrictions description in
# postconf(5) for detailed information.
#
# By default, Postfix relays mail
# - from "trusted" clients (IP address matches $mynetworks) to any destination,
# - from "untrusted" clients to destinations that match $relay_domains or
# subdomains thereof, except addresses with sender-specified routing.
# The default relay_domains value is $mydestination.
#
# In addition to the above, the Postfix SMTP server by default accepts mail
# that Postfix is final destination for:
# - destinations that match $inet_interfaces or $proxy_interfaces,
# - destinations that match $mydestination
# - destinations that match $virtual_alias_domains,
# - destinations that match $virtual_mailbox_domains.
# These destinations do not need to be listed in $relay_domains.
#
# Specify a list of hosts or domains, /file/name patterns or type:name
# lookup tables, separated by commas and/or whitespace. Continue
# long lines by starting the next line with whitespace. A file name
# is replaced by its contents; a type:name table is matched when a
# (parent) domain appears as lookup key.
#
# NOTE: Postfix will not automatically forward mail for domains that
# list this system as their primary or backup MX host. See the
# permit_mx_backup restriction description in postconf(5).
#
#relay_domains = $mydestination
# INTERNET OR INTRANET
# The relayhost parameter specifies the default host to send mail to
# when no entry is matched in the optional transport(5) table. When
# no relayhost is given, mail is routed directly to the destination.
#
# On an intranet, specify the organizational domain name. If your
# internal DNS uses no MX records, specify the name of the intranet
# gateway host instead.
#
# In the case of SMTP, specify a domain, host, host:port, [host]:port,
# [address] or [address]:port; the form [host] turns off MX lookups.
#
# If you're connected via UUCP, see also the default_transport parameter.
#
#relayhost = $mydomain
#relayhost = [gateway.my.domain]
#relayhost = [mailserver.isp.tld]
#relayhost = uucphost
#relayhost = [an.ip.add.ress]
#relayhost = bastion
# REJECTING UNKNOWN RELAY USERS
#
# The relay_recipient_maps parameter specifies optional lookup tables
# with all addresses in the domains that match $relay_domains.
#
# If this parameter is defined, then the SMTP server will reject
# mail for unknown relay users. This feature is off by default.
#
# The right-hand side of the lookup tables is conveniently ignored.
# In the left-hand side, specify an @domain.tld wild-card, or specify
# a user@domain.tld address.
#
#relay_recipient_maps = hash:/etc/postfix/relay_recipients
# INPUT RATE CONTROL
#
# The in_flow_delay configuration parameter implements mail input
# flow control. This feature is turned on by default, although it
# still needs further development (it's disabled on SCO UNIX due
# to an SCO bug).
#
# A Postfix process will pause for $in_flow_delay seconds before
# accepting a new message, when the message arrival rate exceeds the
# message delivery rate. With the default 100 SMTP server process
# limit, this limits the mail inflow to 100 messages a second more
# than the number of messages delivered per second.
#
# Specify 0 to disable the feature. Valid delays are 0..10.
#
#in_flow_delay = 1s
# ADDRESS REWRITING
#
# The ADDRESS_REWRITING_README document gives information about
# address masquerading or other forms of address rewriting including
# username->Firstname.Lastname mapping.
masquerade_domains = redhat.com
masquerade_exceptions = root apache
# ADDRESS REDIRECTION (VIRTUAL DOMAIN)
#
# The VIRTUAL_README document gives information about the many forms
# of domain hosting that Postfix supports.
# "USER HAS MOVED" BOUNCE MESSAGES
#
# See the discussion in the ADDRESS_REWRITING_README document.
# TRANSPORT MAP
#
# See the discussion in the ADDRESS_REWRITING_README document.
# ALIAS DATABASE
#
# The alias_maps parameter specifies the list of alias databases used
# by the local delivery agent. The default list is system dependent.
#
# On systems with NIS, the default is to search the local alias
# database, then the NIS alias database. See aliases(5) for syntax
# details.
#
# If you change the alias database, run "postalias /etc/aliases" (or
# wherever your system stores the mail alias file), or simply run
# "newaliases" to build the necessary DBM or DB file.
#
# It will take a minute or so before changes become visible. Use
# "postfix reload" to eliminate the delay.
#
#alias_maps = dbm:/etc/aliases
alias_maps = hash:/etc/aliases
#alias_maps = hash:/etc/aliases, nis:mail.aliases
#alias_maps = netinfo:/aliases
# The alias_database parameter specifies the alias database(s) that
# are built with "newaliases" or "sendmail -bi". This is a separate
# configuration parameter, because alias_maps (see above) may specify
# tables that are not necessarily all under control by Postfix.
#
#alias_database = dbm:/etc/aliases
#alias_database = dbm:/etc/mail/aliases
alias_database = hash:/etc/aliases
#alias_database = hash:/etc/aliases, hash:/opt/majordomo/aliases
# ADDRESS EXTENSIONS (e.g., user+foo)
#
# The recipient_delimiter parameter specifies the separator between
# user names and address extensions (user+foo). See canonical(5),
# local(8), relocated(5) and virtual(5) for the effects this has on
# aliases, canonical, virtual, relocated and .forward file lookups.
# Basically, the software tries user+foo and .forward+foo before
# trying user and .forward.
#
recipient_delimiter = +
# DELIVERY TO MAILBOX
#
# The home_mailbox parameter specifies the optional pathname of a
# mailbox file relative to a user's home directory. The default
# mailbox file is /var/spool/mail/user or /var/mail/user. Specify
# "Maildir/" for qmail-style delivery (the / is required).
#
#home_mailbox = Mailbox
#home_mailbox = Maildir/
# The mail_spool_directory parameter specifies the directory where
# UNIX-style mailboxes are kept. The default setting depends on the
# system type.
#
#mail_spool_directory = /var/mail
#mail_spool_directory = /var/spool/mail
# The mailbox_command parameter specifies the optional external
# command to use instead of mailbox delivery. The command is run as
# the recipient with proper HOME, SHELL and LOGNAME environment settings.
# Exception: delivery for root is done as $default_user.
#
# Other environment variables of interest: USER (recipient username),
# EXTENSION (address extension), DOMAIN (domain part of address),
# and LOCAL (the address localpart).
#
# Unlike other Postfix configuration parameters, the mailbox_command
# parameter is not subjected to $parameter substitutions. This is to
# make it easier to specify shell syntax (see example below).
#
# Avoid shell meta characters because they will force Postfix to run
# an expensive shell process. Procmail alone is expensive enough.
#
# IF YOU USE THIS TO DELIVER MAIL SYSTEM-WIDE, YOU MUST SET UP AN
# ALIAS THAT FORWARDS MAIL FOR ROOT TO A REAL USER.
#
#mailbox_command = /usr/bin/procmail
#mailbox_command = /some/where/procmail -a "$EXTENSION"
# The mailbox_transport specifies the optional transport in master.cf
# to use after processing aliases and .forward files. This parameter
# has precedence over the mailbox_command, fallback_transport and
# luser_relay parameters.
#
# Specify a string of the form transport:nexthop, where transport is
# the name of a mail delivery transport defined in master.cf. The
# :nexthop part is optional. For more details see the sample transport
# configuration file.
#
# NOTE: if you use this feature for accounts not in the UNIX password
# file, then you must update the "local_recipient_maps" setting in
# the main.cf file, otherwise the SMTP server will reject mail for
# non-UNIX accounts with "User unknown in local recipient table".
#
#mailbox_transport = lmtp:unix:/var/lib/imap/socket/lmtp
# If using the cyrus-imapd IMAP server deliver local mail to the IMAP
# server using LMTP (Local Mail Transport Protocol), this is prefered
# over the older cyrus deliver program by setting the
# mailbox_transport as below:
#
# mailbox_transport = lmtp:unix:/var/lib/imap/socket/lmtp
#
# The efficiency of LMTP delivery for cyrus-imapd can be enhanced via
# these settings.
#
# local_destination_recipient_limit = 300
# local_destination_concurrency_limit = 5
#
# Of course you should adjust these settings as appropriate for the
# capacity of the hardware you are using. The recipient limit setting
# can be used to take advantage of the single instance message store
# capability of Cyrus. The concurrency limit can be used to control
# how many simultaneous LMTP sessions will be permitted to the Cyrus
# message store.
#
# To use the old cyrus deliver program you have to set:
#mailbox_transport = cyrus
# The fallback_transport specifies the optional transport in master.cf
# to use for recipients that are not found in the UNIX passwd database.
# This parameter has precedence over the luser_relay parameter.
#
# Specify a string of the form transport:nexthop, where transport is
# the name of a mail delivery transport defined in master.cf. The
# :nexthop part is optional. For more details see the sample transport
# configuration file.
#
# NOTE: if you use this feature for accounts not in the UNIX password
# file, then you must update the "local_recipient_maps" setting in
# the main.cf file, otherwise the SMTP server will reject mail for
# non-UNIX accounts with "User unknown in local recipient table".
#
#fallback_transport = lmtp:unix:/var/lib/imap/socket/lmtp
#fallback_transport =
#transport_maps = hash:/etc/postfix/transport
# The luser_relay parameter specifies an optional destination address
# for unknown recipients. By default, mail for unknown@$mydestination,
# unknown@[$inet_interfaces] or unknown@[$proxy_interfaces] is returned
# as undeliverable.
#
# The following expansions are done on luser_relay: $user (recipient
# username), $shell (recipient shell), $home (recipient home directory),
# $recipient (full recipient address), $extension (recipient address
# extension), $domain (recipient domain), $local (entire recipient
# localpart), $recipient_delimiter. Specify ${name?value} or
# ${name:value} to expand value only when $name does (does not) exist.
#
# luser_relay works only for the default Postfix local delivery agent.
#
# NOTE: if you use this feature for accounts not in the UNIX password
# file, then you must specify "local_recipient_maps =" (i.e. empty) in
# the main.cf file, otherwise the SMTP server will reject mail for
# non-UNIX accounts with "User unknown in local recipient table".
#
#luser_relay = $user@other.host
#luser_relay = $local@other.host
#luser_relay = admin+$local
# JUNK MAIL CONTROLS
#
# The controls listed here are only a very small subset. The file
# SMTPD_ACCESS_README provides an overview.
# The header_checks parameter specifies an optional table with patterns
# that each logical message header is matched against, including
# headers that span multiple physical lines.
#
# By default, these patterns also apply to MIME headers and to the
# headers of attached messages. With older Postfix versions, MIME and
# attached message headers were treated as body text.
#
# For details, see "man header_checks".
#
header_checks = regexp:/etc/postfix/header_checks
# FAST ETRN SERVICE
#
# Postfix maintains per-destination logfiles with information about
# deferred mail, so that mail can be flushed quickly with the SMTP
# "ETRN domain.tld" command, or by executing "sendmail -qRdomain.tld".
# See the ETRN_README document for a detailed description.
#
# The fast_flush_domains parameter controls what destinations are
# eligible for this service. By default, they are all domains that
# this server is willing to relay mail to.
#
#fast_flush_domains = $relay_domains
# SHOW SOFTWARE VERSION OR NOT
#
# The smtpd_banner parameter specifies the text that follows the 220
# code in the SMTP server's greeting banner. Some people like to see
# the mail version advertised. By default, Postfix shows no version.
#
# You MUST specify $myhostname at the start of the text. That is an
# RFC requirement. Postfix itself does not care.
#
#smtpd_banner = $myhostname ESMTP $mail_name
#smtpd_banner = $myhostname ESMTP $mail_name ($mail_version)
# PARALLEL DELIVERY TO THE SAME DESTINATION
#
# How many parallel deliveries to the same user or domain? With local
# delivery, it does not make sense to do massively parallel delivery
# to the same user, because mailbox updates must happen sequentially,
# and expensive pipelines in .forward files can cause disasters when
# too many are run at the same time. With SMTP deliveries, 10
# simultaneous connections to the same domain could be sufficient to
# raise eyebrows.
#
# Each message delivery transport has its XXX_destination_concurrency_limit
# parameter. The default is $default_destination_concurrency_limit for
# most delivery transports. For the local delivery agent the default is 2.
#local_destination_concurrency_limit = 2
#default_destination_concurrency_limit = 20
# DEBUGGING CONTROL
#
# The debug_peer_level parameter specifies the increment in verbose
# logging level when an SMTP client or server host name or address
# matches a pattern in the debug_peer_list parameter.
#
debug_peer_level = 2
# The debug_peer_list parameter specifies an optional list of domain
# or network patterns, /file/name patterns or type:name tables. When
# an SMTP client or server host name or address matches a pattern,
# increase the verbose logging level by the amount specified in the
# debug_peer_level parameter.
#
#debug_peer_list = 127.0.0.1
#debug_peer_list = some.domain
# The debugger_command specifies the external command that is executed
# when a Postfix daemon program is run with the -D option.
#
# Use "command .. & sleep 5" so that the debugger can attach before
# the process marches on. If you use an X-based debugger, be sure to
# set up your XAUTHORITY environment variable before starting Postfix.
#
debugger_command =
PATH=/bin:/usr/bin:/usr/local/bin:/usr/X11R6/bin
xxgdb $daemon_directory/$process_name $process_id & sleep 5
# If you can't use X, use this to capture the call stack when a
# daemon crashes. The result is in a file in the configuration
# directory, and is named after the process name and the process ID.
#
# debugger_command =
# PATH=/bin:/usr/bin:/usr/local/bin; export PATH; (echo cont;
# echo where) | gdb $daemon_directory/$process_name $process_id 2>&1
# >$config_directory/$process_name.$process_id.log & sleep 5
#
# Another possibility is to run gdb under a detached screen session.
# To attach to the screen sesssion, su root and run "screen -r
# <id_string>" where <id_string> uniquely matches one of the detached
# sessions (from "screen -list").
#
# debugger_command =
# PATH=/bin:/usr/bin:/sbin:/usr/sbin; export PATH; screen
# -dmS $process_name gdb $daemon_directory/$process_name
# $process_id & sleep 1
# INSTALL-TIME CONFIGURATION INFORMATION
#
# The following parameters are used when installing a new Postfix version.
#
# sendmail_path: The full pathname of the Postfix sendmail command.
# This is the Sendmail-compatible mail posting interface.
#
sendmail_path = /usr/sbin/sendmail.postfix
# newaliases_path: The full pathname of the Postfix newaliases command.
# This is the Sendmail-compatible command to build alias databases.
#
newaliases_path = /usr/bin/newaliases.postfix
# mailq_path: The full pathname of the Postfix mailq command. This
# is the Sendmail-compatible mail queue listing command.
#
mailq_path = /usr/bin/mailq.postfix
# setgid_group: The group for mail submission and queue management
# commands. This must be a group name with a numerical group ID that
# is not shared with other accounts, not even with the Postfix account.
#
setgid_group = postdrop
# html_directory: The location of the Postfix HTML documentation.
#
html_directory = no
# manpage_directory: The location of the Postfix on-line manual pages.
#
manpage_directory = /usr/share/man
# sample_directory: The location of the Postfix sample configuration files.
# This parameter is obsolete as of Postfix 2.1.
#
sample_directory = /usr/share/doc/postfix-2.4.5/samples
# readme_directory: The location of the Postfix README files.
#
readme_directory = /usr/share/doc/postfix-2.4.5/README_FILES
# add this to new postfix to get it to add proper message-id and other
# headers to outgoing emails via the gateway.
message_size_limit = 20971520
#inet_protocols = ipv4

View file

@ -233,7 +233,7 @@
- iptables/iptables.{{ host_group }}
- iptables/iptables.{{ env }}
- iptables/iptables
when: not inventory_hostname.startswith(('fed-cloud','osbs'))
when: baseiptables == true
notify:
- restart iptables
- reload libvirtd
@ -248,6 +248,7 @@
- iptables
- service
- base
when: baseiptables == true
- name: ip6tables
template: src={{ item }} dest=/etc/sysconfig/ip6tables mode=0600 backup=yes
@ -257,7 +258,7 @@
- iptables/ip6tables.{{ host_group }}
- iptables/ip6tables.{{ env }}
- iptables/ip6tables
when: not inventory_hostname.startswith('fed-cloud09')
when: baseiptables == true
notify:
- restart ip6tables
- reload libvirtd
@ -272,6 +273,7 @@
- ip6tables
- service
- base
when: baseiptables == true
- name: enable journald persistence
file: path=/var/log/journal state=directory

View file

@ -30,6 +30,9 @@
# kojipkgs
-A OUTPUT -p tcp -m tcp -d 10.5.125.36 --dport 80 -j ACCEPT
-A OUTPUT -p tcp -m tcp -d 10.5.125.36 --dport 443 -j ACCEPT
{% if host in groups['buildvm-s390x'] %}
-A OUTPUT -p tcp -m tcp -d 10.16.0.17 --dport 80 -j ACCEPT
{% endif %}
#koji.fp.o
-A OUTPUT -p tcp -m tcp -d 10.5.125.63 --dport 80 -j ACCEPT

View file

@ -33,6 +33,16 @@
- config
- bodhi
- name: Configure alembic
template:
src: alembic.ini
dest: /etc/bodhi/alembic.ini
owner: bodhi
group: root
tags:
- config
- bodhi
- name: setup basic /etc/bodhi/ contents (production)
template: >
src="production.ini.j2"

View file

@ -0,0 +1,59 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = /usr/share/bodhi/alembic
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# Don't bother, this is obtained from the Bodhi config file
sqlalchemy.url = sqlite://bodhi.db
# Set to true to aquire the global DDL lock for BDR
# See http://bdr-project.org/docs/stable/ddl-replication-advice.html
{% if env == 'staging' %}
bdr = true
{% else %}
bdr = false
{% endif %}
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View file

@ -397,7 +397,7 @@ debugtoolbar.hosts = 127.0.0.1 ::1
##
## Database
##
sqlalchemy.url = postgresql://bodhi2:{{ bodhi2PasswordSTG }}@db-bodhi/bodhi2
sqlalchemy.url = postgresql://bodhi2:{{ bodhi2PasswordSTG }}@pgbdr.stg.phx2.fedoraproject.org/bodhi2
##
## Templates

View file

@ -0,0 +1,74 @@
---
- name: install ccsdb and its dependencies
yum: name={{ item }} state=present
with_items:
- ccsdb
- mod_wsgi
- python-psycopg2
- libsemanage-python
when: ansible_distribution_major_version|int < 22
- name: install ccsdb and its dependencies
dnf: name={{ item }} state=present enablerepo={{ extra_enablerepos }}
with_items:
- ccsdb
- mod_wsgi
- python-psycopg2
- libsemanage-python
when: ansible_distribution_major_version|int > 21 and ansible_cmdline.ostree is not defined
- name: ensure database is created
delegate_to: "{{ ccsdb_db_host_machine }}"
become_user: postgres
become: true
postgresql_db: db={{ ccsdb_db_name }}
- name: ensure ccsdb db user has access to database
delegate_to: "{{ ccsdb_db_host_machine }}"
become_user: postgres
become: true
postgresql_user: db={{ ccsdb_db_name }}
user={{ ccsdb_db_user }}
password={{ ccsdb_db_password }}
role_attr_flags=NOSUPERUSER
- name: ensure selinux lets httpd talk to postgres
seboolean: name=httpd_can_network_connect_db persistent=yes state=yes
- name: create the /etc/ccsdb folder
file: state=directory
path=/etc/ccsdb
owner=root group=root mode=0755
- name: generate ccsdb config
template: src=ccsdb.cfg dest=/etc/ccsdb/ccsdb.cfg
owner=root group=root mode=0644
notify:
- reload httpd
- name: generate ccsdb apache config
template: src=ccsdb.conf dest=/etc/httpd/conf.d/ccsdb.conf
owner=root group=root mode=0644
notify:
- reload httpd
- name: create the /usr/share/ccsdb folder
file: state=directory
path=/usr/share/ccsdb
owner=root group=root mode=0755
- name: install the wsgi file
template: src=ccsdb.wsgi dest=/usr/share/ccsdb/ccsdb.wsgi
owner=root group=root mode=0644
notify:
- reload httpd
- name: initialize execdb database
shell: CCSDB_CONFIG=/etc/ccsdb/ccsdb.cfg ccsdb-cli init_db
- name: Start and enable the different services required
service: name={{ item }} enabled=yes state=started
with_items:
- httpd
- fedmsg-hub

View file

@ -0,0 +1,7 @@
SECRET_KEY = '{{ ccsdb_secret_key }}'
SQLALCHEMY_DATABASE_URI = 'postgresql://{{ ccsdb_db_user }}:{{ ccsdb_db_password }}@{{ ccsdb_db_host }}:{{ ccsdb_db_port }}/{{ ccsdb_db_name }}'
FILE_LOGGING = False
LOGFILR = '/var/log/ccsdb/ccsdb.log'
SYSLOG_LOGGING = False
STREAM_LOGGING = True

View file

@ -0,0 +1,27 @@
WSGIDaemonProcess ccsdb user=apache group=apache threads=5
WSGIScriptAlias /{{ ccsdb_endpoint }} /usr/share/ccsdb/ccsdb.wsgi
WSGISocketPrefix run/wsgi
# this isn't the best way to force SSL but it works for now
#RewriteEngine On
#RewriteCond %{HTTPS} !=on
#RewriteRule ^/execdb/admin/?(.*) https://%{SERVER_NAME}/$1 [R,L]
<Directory /usr/share/ccsdb>
WSGIProcessGroup ccsdb
WSGIApplicationGroup %{GLOBAL}
WSGIScriptReloading On
<IfModule mod_authz_core.c>
# Apache 2.4
<RequireAny>
Require method GET
Require ip 127.0.0.1 ::1{% for host in allowed_hosts %} {{ host }}{% endfor %}
</RequireAny>
</IfModule>
<IfModule !mod_auth_core.c>
Order allow,deny
Allow from all
</IfModule>
</Directory>

View file

@ -0,0 +1,4 @@
import os
os.environ['CCSDB_CONFIG'] = '/etc/ccsdb/ccsdb.cfg'
from ccsdb.app import _app as application

View file

@ -1,95 +0,0 @@
---
- name: check/create instance
hosts: localhost
user: copr
gather_facts: False
vars_files:
- nova_cloud_vars.yml
vars:
- security_group: builder
- image_id: cba0c766-84ac-4048-b0f5-6d4000af62f8
- OS_USERNAME_OLD: msuchy
- OS_AUTH_URL_OLD: http://172.23.0.2:5000/v2.0
# todo: remove after transition to new cloud
tasks:
- name: generate builder name
local_action: command echo "Copr builder {{ 999999999 | random }}"
register: vm_name
- name: spin it up
local_action: nova_compute auth_url={{OS_AUTH_URL_OLD}} flavor_id=6 image_id={{ image_id }} key_name=buildsys login_password={{OS_PASSWORD_OLD}} login_tenant_name={{OS_TENANT_NAME}} login_username={{OS_USERNAME_OLD}} security_groups={{security_group}} wait=yes name="{{vm_name.stdout}}"
register: nova
# should be able to use nova.private_ip, but it does not work with Fedora Cloud.
- debug: msg="IP={{ nova.info.addresses.vlannet_3[0].addr }}"
- debug: msg="vm_name={{vm_name.stdout}}"
- name: add it to the special group
local_action: add_host hostname={{ nova.info.addresses.vlannet_3[0].addr }} groupname=builder_temp_group
- name: wait for the host to be hot
local_action: wait_for host={{ nova.info.addresses.vlannet_3[0].addr }} port=22 delay=5 timeout=600
- hosts: builder_temp_group
user: root
gather_facts: True
vars:
- files: files/
tasks:
- name: edit hostname to be instance name
shell: hostname `curl -s http://169.254.169.254/2009-04-04/meta-data/instance-id`
- name: install pkgs
yum: state=present pkg={{ item }}
with_items:
- rsync
- openssh-clients
- libselinux-python
- libsemanage-python
- name: add repos
copy: src={{ files }}/{{ item }} dest=/etc/yum.repos.d/{{ item }}
with_items:
- epel6.repo
- name: install additional pkgs
yum: state=present pkg={{ item }}
with_items:
- mock
- createrepo
- yum-utils
- pyliblzma
- name: make sure newest rpm
yum: name={{ item }} state=latest
with_items:
- rpm
- glib2
- ca-certificates
#- yum: name=mock enablerepo=epel-testing state=latest
- name: mockbuilder user
user: name=mockbuilder groups=mock
- name: mockbuilder .ssh
file: state=directory path=/home/mockbuilder/.ssh mode=0700 owner=mockbuilder group=mockbuilder
- name: mockbuilder authorized_keys
authorized_key: user=mockbuilder key='{{ lookup('file', '/home/copr/provision/files/buildsys.pub') }}'
- name: put updated mock configs into /etc/mock
template: src={{ files }}/mock/{{ item }} dest=/etc/mock
with_items:
- site-defaults.cfg
- lineinfile: dest=/etc/mock/fedora-rawhide-x86_64.cfg line="config_opts['package_manager'] = 'dnf'" state=absent
- lineinfile: dest=/etc/mock/fedora-rawhide-i386.cfg line="config_opts['package_manager'] = 'dnf'" state=absent
- lineinfile: dest=/etc/security/limits.conf line="* soft nofile 10240" insertafter=EOF
- lineinfile: dest=/etc/security/limits.conf line="* hard nofile 10240" insertafter=EOF

View file

@ -11,6 +11,7 @@
keypair: buildsys
max_spawn_time: 600
spawning_vm_user: "fedora"
image_name: "copr-builder-f26-x86_64-beta"
tasks:
- name: generate builder name
@ -61,5 +62,5 @@
- nss-softokn-freebl.i686
# DNF module will not resolve the deps, we must install deps manualy!
- name: install i686 version of nosync for multilib building
dnf: name=https://kojipkgs.fedoraproject.org//packages/nosync/1.0/5.fc24/i686/nosync-1.0-5.fc24.i686.rpm state=present
dnf: name=https://kojipkgs.fedoraproject.org/packages/nosync/1.0/6.fc26/i686/nosync-1.0-6.fc26.i686.rpm state=present
when: prepare_base_image is defined

View file

@ -3,7 +3,7 @@
gather_facts: False
vars_files:
- nova_cloud_vars_ppc64le.yml
- nova_cloud_vars.yml
vars:
# _OS_AUTH_OPTS: "--os-auth-url {{OS_AUTH_URL}} --os-username {{OS_USERNAME}} --os-password {{OS_PASSWORD}} --os-tenant-name {{OS_TENANT_NAME}} --os-tenant-id {{OS_TENANT_ID}} "
@ -11,6 +11,7 @@
keypair: buildsys
max_spawn_time: 600
spawning_vm_user: "fedora"
image_name: "copr-builder-f26-ppc64le-beta"
tasks:
- name: generate builder name
@ -41,7 +42,10 @@
#prepare_base_image: True
tasks:
- include: "provision_builder_tasks_ppc64le.yml"
- name: swap on /dev/vda 100GB volume for tmpfs mock plugin
command: swapon /dev/vda
- include: "provision_builder_tasks.yml"
- name: disable offloading
command: ethtool -K eth0 tso off gro off gso off

View file

@ -1,45 +0,0 @@
- name: check/create instance
hosts: 127.0.0.1
gather_facts: False
tasks:
- name: add hypervisor
local_action: add_host hostname=rh-power2.fit.vutbr.cz groupname=spinup_vm_group
- name: spinup vm
hosts: spinup_vm_group
gather_facts: False
user: msuchy
tasks:
- name: spin up VM
shell: /home/msuchy/bin/get-one-vm.sh
register: get_one
- debug: msg="{{ get_one.stdout }}"
- set_fact: builder_ip="{{ get_one.stdout|extract_ip_from_stdout() }}"
- name: wait for he host to be hot
local_action: wait_for host={{ builder_ip }} port=22 delay=1 timeout=600
- name: add builder ip to the special group
local_action: add_host hostname={{ builder_ip }} groupname=builder_temp_group
- name: provision builder
hosts: builder_temp_group
gather_facts: True
user: root
vars:
# pass this options if you need to create new base image from snapshot
#prepare_base_image: True
tasks:
- include: "provision_builder_tasks.yml"
- name: disable offloading
command: ethtool -K eth0 tso off gro off gso off
- yum: state=latest enablerepo="updates-testing" name=mock

View file

@ -1,11 +0,0 @@
[Copr]
name=Copr
failovermethod=priority
baseurl=https://209.132.184.48/results/@copr/copr/fedora-$releasever-x86_64/
https://copr-be.cloud.fedoraproject.org/results/@copr/copr/fedora-$releasever-x86_64/
https://172.25.32.109/results/@copr/copr/fedora-$releasever-x86_64/
enabled=1
gpgcheck=1
gpgkey=https://copr-be.cloud.fedoraproject.org/results/@copr/copr/pubkey.gpg
skip_if_unavailable=1

View file

@ -1,24 +0,0 @@
config_opts['root'] = 'custom-1-i386'
config_opts['target_arch'] = 'i686'
config_opts['legal_host_arches'] = ('i386', 'i586', 'i686', 'x86_64')
config_opts['chroot_setup_cmd'] = ''
config_opts['extra_chroot_dirs'] = [ '/run/lock', ]
config_opts['package_manager'] = 'dnf'
config_opts['yum.conf'] = """
[main]
keepcache=1
debuglevel=2
reposdir=/dev/null
logfile=/var/log/dnf.log
retries=20
obsoletes=1
gpgcheck=0
assumeyes=1
syslog_ident=mock
syslog_device=
install_weak_deps=0
metadata_expire=0
mdpolicy=group:primary
"""

View file

@ -1,24 +0,0 @@
config_opts['root'] = 'custom-1-ppc64le'
config_opts['target_arch'] = 'ppc64le'
config_opts['legal_host_arches'] = ('ppc64le',)
config_opts['chroot_setup_cmd'] = ''
config_opts['extra_chroot_dirs'] = [ '/run/lock', ]
config_opts['package_manager'] = 'dnf'
config_opts['yum.conf'] = """
[main]
keepcache=1
debuglevel=2
reposdir=/dev/null
logfile=/var/log/dnf.log
retries=20
obsoletes=1
gpgcheck=0
assumeyes=1
syslog_ident=mock
syslog_device=
install_weak_deps=0
metadata_expire=0
mdpolicy=group:primary
"""

Some files were not shown because too many files have changed in this diff Show more