removing taskotron roles

This commit is contained in:
Tim Flink 2020-06-02 13:04:35 -06:00 committed by tflink
parent 43140ff50f
commit 8aa9f03b36
91 changed files with 0 additions and 4435 deletions

View file

@ -1,42 +0,0 @@
- name: create master
become: true
become_user: "{{ buildmaster_user }}"
command: creates={{ buildmaster_dir }} buildbot create-master -r {{ buildmaster_dir }}
- name: upload master config
become: true
become_user: "{{ buildmaster_user }}"
template: src={{ item }} dest={{ buildmaster_dir }}/master.cfg owner={{ buildmaster_user }} group={{ buildmaster_user }}
with_first_found:
- '{{ buildmaster_template }}.{{ deployment_type }}'
- '{{ buildmaster_template }}'
- name: check master config
become: true
become_user: "{{ buildmaster_user }}"
command: buildbot checkconfig {{ buildmaster_dir }}
#- name: upgrade master
# command: buildbot upgrade-master {{ buildmaster_dir }}
- name: generate buildmaster service file
template: src=buildmaster.service.j2 dest=/lib/systemd/system/buildmaster.service owner=root group=root mode=0644
register: buildmaster_service
- name: reload systemd
command: systemctl daemon-reload
when: buildmaster_service.changed
- name: start and enable buildmaster service
service: name=buildmaster enabled=yes state={{ (buildmaster_service.changed) | ternary('restarted','started') }}
# Workaround for https://pagure.io/taskotron/issue/139
# Otherwise, Ansible playbook will fail if it has to wait for another buildmaster.service start try
register: result
until: result is succeeded
delay: 70
retries: 3
- name: reconfig master
become: true
become_user: "{{ buildmaster_user }}"
command: buildbot reconfig {{ buildmaster_dir }}

View file

@ -1,24 +0,0 @@
[Unit]
Description=Buildmaster for taskbot
After=network.target
# Workaround for builmaster not starting on system reboot
# https://pagure.io/taskotron/issue/139
StartLimitIntervalSec=15m
StartLimitBurst=5
[Service]
Type=forking
# disabled because of https://pagure.io/taskotron/issue/236
#PIDFile={{ buildmaster_dir }}/twistd.pid
ExecStart=/bin/buildbot start {{ buildmaster_dir }}
ExecStop=/bin/buildbot stop {{ buildmaster_dir }}
ExecReload=/bin/buildbot reconfig {{ buildmaster_dir }}
User={{ buildmaster_user }}
Group={{ buildmaster_user }}
# Workaround for builmaster not starting on system reboot
# https://pagure.io/taskotron/issue/139
Restart=on-failure
RestartSec=30
[Install]
WantedBy=multi-user.target

View file

@ -1,257 +0,0 @@
# -*- python -*-
# ex: set syntax=python:
# This is a sample buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory.
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
####### BUILDSLAVES
# The 'slaves' list defines the set of recognized buildslaves. Each element is
# a BuildSlave object, specifying a unique slave name and password. The same
# slave name and password must be configured on the slave.
from buildbot.buildslave import BuildSlave
from buildbot.buildslave import openstack
c['slaves'] = [
{% for buildslave in buildslaves %}
BuildSlave("{{ buildslave }}", "{{ qa_stg_buildslave_password }}"),
{% endfor %}
]
{% if deployment_type == 'qadevel-prod' %}
c['slaves'].append(
openstack.OpenStackLatentBuildSlave(
"{{ qadevel_prod_buildslave_user }}",
"{{ qadevel_prod_buildslave_password }}",
flavor=3,
image="5345b501-9264-4198-a185-eb5a2282fed0",
os_username="{{ taskotron_openstack_username }}",
os_password="{{ taskotron_openstack_password }}",
os_tenant_name="{{ taskotron_openstack_tenant_name }}",
os_auth_url="https://fed-cloud02.cloud.fedoraproject.org:5001/v2.0"
)
)
{% endif %}
# 'slavePortnum' defines the TCP port to listen on for connections from slaves.
# This must match the value configured into the buildslaves (with their
# --master option)
c['slavePortnum'] = {{ buildslave_port }}
####### CHANGESOURCES
# the 'change_source' setting tells the buildmaster how it should find out
# about source code changes. Here we point to the buildbot clone of pyflakes.
from buildbot.changes.gitpoller import GitPoller
interval = {{ buildslave_poll_interval }}
c['change_source'] = []
{#
{% if deployment_type == 'qa-prod' %}
#}
c['change_source'].append(GitPoller(
'https://pagure.io/taskotron/taskotron-trigger.git',
workdir='gitpoller-workdir-trigger', branch='develop',
pollinterval=interval,
project='trigger'))
c['change_source'].append(GitPoller(
'https://pagure.io/taskotron/libtaskotron.git',
workdir='gitpoller-workdir-libtaskotron', branch='develop',
pollinterval=interval,
project='libtaskotron'))
c['change_source'].append(GitPoller(
'https://pagure.io/taskotron/resultsdb.git',
workdir='gitpoller-workdir-resultsdb', branch='develop',
pollinterval=interval,
project='resultsdb'))
c['change_source'].append(GitPoller(
'https://pagure.io/taskotron/resultsdb_api.git',
workdir='gitpoller-workdir-resultsdb_api', branch='develop',
pollinterval=interval,
project='resultsdb_api'))
{#
{% endif %}
#}
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes. In this
# case, just kick off a 'runtests' build
from buildbot.schedulers.basic import SingleBranchScheduler
from buildbot.schedulers.forcesched import ForceScheduler
from buildbot.changes import filter
c['schedulers'] = []
{# {% if deployment_type == 'qadevel-prod' %} #}
c['schedulers'].append(SingleBranchScheduler(
name="trigger-scheduler",
change_filter=filter.ChangeFilter(project='trigger', branch='develop'),
treeStableTimer=None,
builderNames=["trigger-builder"]))
c['schedulers'].append(SingleBranchScheduler(
name="libtaskotron-scheduler",
change_filter=filter.ChangeFilter(project='libtaskotron', branch='develop'),
treeStableTimer=None,
builderNames=["libtaskotron-builder"]))
c['schedulers'].append(SingleBranchScheduler(
name="resultsdb-scheduler",
change_filter=filter.ChangeFilter(project='resultsdb', branch='develop'),
treeStableTimer=None,
builderNames=["resultsdb-builder"]))
#c['schedulers'].append(ForceScheduler(
# name="openstack-force",
# builderNames=["openstack-builder"]))
{#
{% endif %}
{% if deployment_type == 'qa-stg' %}
c['schedulers'].append(SingleBranchScheduler(
name="libtaskotron",
builderNames=['libtaskotron-builder'],
treeStableTimer=None,
change_filter=filter.ChangeFilter(project='libtaskotron',
branch='develop')))
c['schedulers'].append(ForceScheduler(
name="force",
builderNames=["libtaskotron-builder"])xf
{% endif %}
#}
####### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which slaves can execute them. Note that any particular build will
# only take place on one slave.
from buildbot.process.factory import BuildFactory
from buildbot.steps.source.git import Git
from buildbot.steps.shell import ShellCommand
from buildbot.process.properties import Property, Interpolate
from buildbot.steps.transfer import DirectoryUpload
def create_test_factory(repo_name, func=True):
factory = BuildFactory()
# factory.addStep(Git(repourl=Interpolate('{{ repo_base }}/%s.git' % repo_name),
# mode='full', env={'GIT_SSL_NO_VERIFY': 'yes'},
# method='clobber'))
factory.addStep(Git(repourl=Interpolate('https://bitbucket.org/tflink/%s.git' % repo_name),
mode='full',method='clobber'))
factory.addStep(ShellCommand(command=['doit', 'envtype=ci', 'test'], descriptionDone = ['run tests']))
factory.addStep(ShellCommand(command=['doit', 'basedir=/srv/content/', 'envtype=ci', 'chainbuild'], descriptionDone=['Chainbuild RPMs']))
factory.addStep(ShellCommand(command=['doit', 'basedir=/srv/content/', 'envtype=ci', 'buildtype=release', 'releasedocs'], descriptionDone=['Build Documentation']))
factory.addStep(ShellCommand(command=['doit', 'basedir=/srv/content/', 'envtype=ci', 'buildtype=release', 'updatelatest'], descriptionDone=['Update Symlinks']))
return factory
trigger_factory = create_test_factory('taskotron-trigger')
libtaskotron_factory = create_test_factory('libtaskotron-docs')
resultsdb_factory = create_test_factory('resultsdb')
resultsdb_api_factory = create_test_factory('resultsdb_api')
from buildbot.config import BuilderConfig
c['builders'] = []
{#
{% if deployment_type == 'qa-prod' %}
#}
c['builders'].append(
BuilderConfig(name="trigger-builder",
slavenames=[{% for buildslave in buildslaves %}"{{ buildslave }}",{% endfor %}],
factory=trigger_factory))
c['builders'].append(
BuilderConfig(name="libtaskotron-builder",
slavenames=[{% for buildslave in buildslaves %}"{{ buildslave }}",{% endfor %}],
factory=libtaskotron_factory))
c['builders'].append(
BuilderConfig(name="resultsdb-builder",
slavenames=[{% for buildslave in buildslaves %}"{{ buildslave }}",{% endfor %}],
factory=resultsdb_factory))
c['builders'].append(
BuilderConfig(name="resultsdb_api-builder",
slavenames=[{% for buildslave in buildslaves %}"{{ buildslave }}",{% endfor %}],
factory=resultsdb_api_factory))
#c['builders'].append(
# BuilderConfig(name="openstack-builder",
# slavenames=["{{ qa_stg_buildslave_user }}"],
# factory=trigger_factory))
{#
{% endif %}
{% if deployment_type == 'qa-stg' %}
c['builders'].append(
BuilderConfig(name="libtaskotron-builder",
slavenames=[{% for buildslave in buildslaves %}"{{ buildslave }}",{% endfor %}],
factory=libtaskotron_factory))
{% endif %}
#}
####### STATUS TARGETS
# 'status' is a list of Status Targets. The results of each build will be
# pushed to these targets. buildbot/status/*.py has a variety to choose from,
# including web pages, email senders, and IRC bots.
#
c['status'] = []
from buildbot.status import html
from buildbot.status.web import authz, auth
authz_cfg=authz.Authz(
# change any of these to True to enable; see the manual for more
# options
{% if deployment_type == 'qa-stg' %}
auth=auth.BasicAuth([("{{ qa_stg_buildbot_master_user }}","{{ qa_stg_buildbot_master_password }}")]),
{% endif %}
{% if deployment_type == 'qa-prod' %}
auth=auth.BasicAuth([("{{ qa_prod_prod_buildbot_master_user }}","{{ qa_prod_prod_buildbot_master_password }}")]),
{% endif %}
gracefulShutdown = False,
forceBuild = 'auth', # use this to test your slave once it is set up
forceAllBuilds = False,
pingBuilder = False,
stopBuild = False,
stopAllBuilds = False,
cancelPendingBuild = 'auth',
)
c['status'].append(html.WebStatus(http_port=8010, authz=authz_cfg))
####### PROJECT IDENTITY
# the 'title' string will appear at the top of this buildbot
# installation's html.WebStatus home page (linked to the
# 'titleURL') and is embedded in the title of the waterfall HTML page.
c['title'] = "Taskotron CI"
c['titleURL'] = "http://{{ external_hostname }}/{{buildmaster_endpoint}}/"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server (usually the html.WebStatus page) is visible. This
# typically uses the port number set in the Waterfall 'status' entry, but
# with an externally-visible host name which the buildbot cannot figure out
# without some help.
c['buildbotURL'] = "http://{{ external_hostname }}/{{buildmaster_endpoint}}/"
####### DB URL
c['db'] = {
# This specifies what database buildbot uses to store its state. You can leave
# this at its default for all but the largest installations.
'db_url' : "sqlite:///state.sqlite",
}

View file

@ -1,489 +0,0 @@
# -*- python -*-
# ex: set filetype=python:
from buildbot.plugins import *
# This is a buildmaster config file. It must be installed as
# 'master.cfg' in your buildmaster's base directory.
# This is the dictionary that the buildmaster pays attention to. We also use
# a shorter alias to save typing.
c = BuildmasterConfig = {}
c['buildbotNetUsageData'] = 'basic'
####### WORKERS
# The 'workers' list defines the set of recognized workers. Each element is
# a Worker object, specifying a unique worker name and password. The same
# worker name and password must be configured on the worker.
c['workers'] = [
{% for buildslave in buildslaves %}
{% if deployment_type in ['dev', 'stg', 'prod'] %}
worker.Worker("{{ buildslave }}", "{{ buildslave_password }}"),
{% elif deployment_type == 'local' %}
worker.Worker("{{ buildslave }}", "{{ local_buildslave_password }}"),
{% endif %}
{% endfor %}
]
# 'protocols' contains information about protocols which master will use for
# communicating with workers. You must define at least 'port' option that workers
# could connect to your master with this protocol.
# 'port' must match the value configured into the workers (with their
# --master option)
c['protocols'] = {'pb': {'port': {{ buildslave_port }} }}
####### SCHEDULERS
# Configure the Schedulers, which decide how to react to incoming changes. In this
# case, just kick off a 'runtests' build
import buildbot.schedulers.basic
class JobScheduler(buildbot.schedulers.basic.BaseBasicScheduler):
def __init__(self, name, **kwargs):
buildbot.schedulers.basic.BaseBasicScheduler.__init__(self, name, **kwargs)
def getChangeFilter(self, branch, branches, change_filter, categories):
return util.ChangeFilter.fromSchedulerConstructorArgs(
change_filter=change_filter, categories=categories)
def getTimerNameForChange(self, changes):
return "only"
def getChangeClassificationsForTimer(self, sched_id, timer_name):
return self.master.db.schedulers.getChangeClassifications(sched_id)
c['schedulers'] = []
c['schedulers'].append(JobScheduler(
name="jobsched-noarch",
builderNames=["all"],
treeStableTimer=None,
change_filter=util.ChangeFilter(project='rpmcheck',
category='noarch')))
c['schedulers'].append(JobScheduler(
name="jobsched-i386",
builderNames=['i386'],
treeStableTimer=None,
change_filter=util.ChangeFilter(project='rpmcheck',
category='i386')))
c['schedulers'].append(JobScheduler(
name="jobsched-x86_64",
builderNames=['x86_64'],
treeStableTimer=None,
change_filter=util.ChangeFilter(project='rpmcheck',
category='x86_64')))
c['schedulers'].append(schedulers.ForceScheduler(
name="force",
builderNames=["all", 'x86_64', 'i386']))
c['schedulers'].append(schedulers.ForceScheduler(
name="rpmcheck",
builderNames=["all", 'x86_64', 'i386'],
properties=[
util.StringParameter(
name='taskname',
default='rpmlint',
size=256,
),
util.StringParameter(
name='item',
default='',
size=256,
),
util.StringParameter(
name='item_type',
default='koji_build',
size=256,
),
util.StringParameter(
name='uuid',
default='',
size=256,
),
util.StringParameter(
name='arch',
default='x86_64',
size=128,
),
]))
####### RESOURCE LOCKS
#
# This is a set of resource locks to make sure that we don't have too many things
# going on on each slave at one time.
build_lock = util.WorkerLock("worker_builds",
maxCount=1)
####### BUILDERS
# The 'builders' list defines the Builders, which tell Buildbot how to perform a build:
# what steps, and which workers can execute them. Note that any particular build will
# only take place on one worker.
factory = util.BuildFactory()
{% if deployment_type in ['local'] %}
# clean out /var/tmp/taskotron (see T253)
factory.addStep(steps.ShellCommand(
command="rm -rf /var/tmp/taskotron/*",
name="rm_tmp",
descriptionDone=['Clean tmp'],
))
# clean the log (see T230)
factory.addStep(steps.ShellCommand(
command=["rm", "-f", "/var/log/taskotron/taskotron.log"],
name="rm_log",
descriptionDone=['Clean log'],
))
{% endif %}
{% if deployment_type in ['dev', 'stg', 'prod'] %}
# clean out /var/tmp/taskotron (see T253)
factory.addStep(steps.ShellCommand(
command=util.Interpolate("rm -rf /var/tmp/taskotron/%(prop:workername)s/*"),
name="rm_tmp",
descriptionDone='Clean tmp',
))
{% endif %}
# prevent duplicated buildbot jobs
# (see https://pagure.io/taskotron/issue/273 )
factory.addStep(steps.ShellCommand(
command=util.Interpolate(
'mkdir /var/lib/taskotron/artifacts/%(prop:uuid)s/ || '
'( echo Multiple jobs with same UUID detected, aborting execution!; '
' echo See https://pagure.io/taskotron/issue/273 ; '
' exit 1 )'
),
descriptionDone='Create artifacs dir on slave',
haltOnFailure=True,
))
# check out the source
factory.addStep(steps.Git(
repourl=util.Property('git_repo', default=util.Interpolate(
'{{ grokmirror_user }}@{{ buildmaster }}:/var/lib/git/mirror/fedoraqa/%(prop:taskname)s/')),
branch=util.Property('git_branch', default='{{ grokmirror_default_branch }}'),
mode='full',
method='clobber',
shallow=True,
descriptionDone='Clone task',
))
# run the runner
factory.addStep(steps.ShellCommand(
command=["runtask",
'-i', util.Interpolate('%(prop:item)s'),
'-t', util.Interpolate('%(prop:item_type)s'),
'-a', util.Interpolate('%(prop:arch)s'),
'-j', util.Interpolate('%(prop:buildername)s/%(prop:buildnumber)s'),
'--uuid', util.Interpolate('%(prop:uuid)s'),
'.',
],
descriptionDone=[
util.Interpolate('%(prop:taskname)s on '),
util.Interpolate('%(prop:item)s (%(prop:arch)s)')
],
name='runtask',
timeout=20*60,
{% if deployment_type in ['dev', 'stg', 'prod'] %}
sigtermTime=5*60,
lazylogfiles=True,
logfiles={
'taskotron.log': {'filename': util.Interpolate('/var/lib/taskotron/artifacts/%(prop:uuid)s/taskotron/taskotron.log')},
'heartbeat.log': {'filename': util.Interpolate('/var/lib/taskotron/artifacts/%(prop:uuid)s/taskotron/heartbeat.log')},
},
{% elif deployment_type in ['local'] %}
logfiles={
'taskotron.log': {'filename': '/var/log/taskotron/taskotron.log'},
},
{% endif %}
))
# make sure minion is removed
factory.addStep(steps.ShellCommand(
command=util.Interpolate('testcloud instance remove --force taskotron-%(prop:uuid)s; true'),
descriptionDone='Remove minion',
))
# create artifacts dir on master
# and also prevent duplicated buildbot jobs
# (see https://pagure.io/taskotron/issue/273 )
factory.addStep(steps.MasterShellCommand(
command=util.Interpolate(
'mkdir -m 0755 {{ public_artifacts_dir }}/%(prop:uuid)s/ || '
'( echo Multiple jobs with same UUID detected, aborting execution!; '
' echo See https://pagure.io/taskotron/issue/273 ; '
' exit 1 )'
),
descriptionDone='Create artifacs dir on master',
haltOnFailure=True,
))
# copy artifacts to master
factory.addStep(steps.DirectoryUpload(
workersrc=util.Interpolate('/var/lib/taskotron/artifacts/%(prop:uuid)s/'),
masterdest=util.Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s/'),
descriptionDone='Copy artifacts',
))
{% if deployment_type in ['local'] %}
# copy taskotron log to master
factory.addStep(steps.FileUpload(
workersrc='/var/log/taskotron/taskotron.log',
masterdest=util.Interpolate('{{ public_artifacts_dir }}/%(prop:uuid)s/taskotron.log'),
mode=0644,
))
{% endif %}
# save stdio from runtask step
# FIXME: worked with buildbot 0.8, later buildbots changed URLs
# factory.addStep(steps.MasterShellCommand(
# command=['curl', '-o', util.Interpolate('/srv/taskotron/artifacts/%(prop:uuid)s/taskotron/stdio.log'),
# util.Interpolate('http://127.0.0.1/taskmaster/builders/%(prop:buildername)s/builds/'
# '%(prop:buildnumber)s/steps/runtask/logs/stdio/text')
# ],
# descriptionDone='Save runtask stdio log',
# ))
# ensure all artifacts are readable
factory.addStep(steps.MasterShellCommand(
command=['chmod', '-R', 'o+r', util.Interpolate('/srv/taskotron/artifacts/%(prop:uuid)s/')],
descriptionDone='Set file permissions',
))
# gzip artifacts
factory.addStep(steps.MasterShellCommand(
command=util.Interpolate('find {{ public_artifacts_dir }}/%(prop:uuid)s/ -type f -exec gzip {} \;'),
descriptionDone='Compress artifacs',
))
# render current time when needed
import datetime
from buildbot.process.properties import renderer
@renderer
def today(props):
return datetime.datetime.now().strftime("%Y%m%d")
# prevent duplicated buildbot jobs
# (see https://pagure.io/taskotron/issue/273 )
factory.addStep(steps.MasterShellCommand(
command=util.Interpolate(
'! test -d {{ public_artifacts_dir }}/%(kw:today)s/%(prop:uuid)s/ || '
'( echo Multiple jobs with same UUID detected, aborting execution!; '
' echo See https://pagure.io/taskotron/issue/273 ; '
' exit 1 )',
today=today,
),
descriptionDone='Check duplicate jobs',
haltOnFailure=True,
))
# move the artifacts to the correct dir on the master
factory.addStep(steps.MasterShellCommand(
command=util.Interpolate(
'mkdir -p -m 0755 {{ public_artifacts_dir }}/%(kw:today)s && '
'mkdir -p -m 0755 {{ public_artifacts_dir }}/all && '
'mv {{ public_artifacts_dir }}/%(prop:uuid)s/ {{ public_artifacts_dir }}/%(kw:today)s/ && '
'ln -s {{ public_artifacts_dir }}/%(kw:today)s/%(prop:uuid)s {{ public_artifacts_dir }}/all/',
today=today,
),
descriptionDone='Move artifacs',
))
c['builders'] = []
c['builders'].append(util.BuilderConfig(
name="x86_64",
workernames=[
{% for buildslave in x86_64_buildslaves %}
"{{ buildslave }}",
{% endfor %}
],
factory=factory,
locks=[
build_lock.access('counting')
],
collapseRequests=False,
))
c['builders'].append(util.BuilderConfig(
name="i386",
workernames=[
{% for buildslave in i386_buildslaves %}
"{{ buildslave }}",
{% endfor %}
],
factory=factory,
locks=[
build_lock.access('counting')
],
collapseRequests=False,
))
c['builders'].append(util.BuilderConfig(
name="all",
workernames=[
{% for buildslave in buildslaves %}
"{{ buildslave }}",
{% endfor %}
],
factory=factory,
locks=[
build_lock.access('counting')
],
collapseRequests=False,
))
####### BUILDBOT SERVICES
# 'services' is a list of BuildbotService items like reporter targets. The
# status of each build will be pushed to these targets. buildbot/reporters/*.py
# has a variety to choose from, like IRC bots.
c['services'] = []
## Email notifications
from buildbot.plugins import reporters
{% raw %}
email_template = u'''
<h4>Build status: {{ summary }}</h4>
<table cellspacing="10">
<tr><td>Task:</td><td><b>{{ build['properties']['taskname'][0] }}</b></td></tr>
<tr><td>Item:</td><td><b>{{ build['properties']['item'][0] }}</b></td></tr>
<tr><td>Item type:</td><td><b>{{ build['properties']['item_type'][0] }}</b></td></tr>
<tr><td>Arch:</td><td><b>{{ build['properties']['arch'][0] }}</b></td></tr>
<tr><td>Worker:</td><td><b>{{ workername }}</b></td></tr>
<tr><td>Artifacts:</td><td><a href="{{ artifacts_url }}/{{ build['properties']['uuid'][0] }}">
{{ artifacts_url }}/{{ build['properties']['uuid'][0] }}</a></td></tr>
<tr><td>Complete logs:</td><td><a href="{{ build_url }}">{{ build_url }}</a></td></tr>
</table>
{% for step in build['steps'] if statuses[step.results] in ["warnings", "failure", "exception"] %}
<p><b>Step: {{ step['name'] }}: {{ statuses[step['results']] }}</b></p>
{% for log in step['logs'] %}
<p>Last 40 lines of <i>{{ log['name'] }}</i>:</p>
{# buildbot returns an extra first character for each line (a bug?) denoting its color #}
<pre>
{% for line in log['content']['content'].split('\n')[-40:] %}{{ line[1:] }}
{% endfor %}
</pre>
<hr/>
{% endfor %}
{% endfor %}
'''
{% endraw %}
html_message_formatter = reporters.MessageFormatter(
template=email_template,
template_type='html',
wantProperties=True,
wantSteps=True,
wantLogs=True,
ctx=dict(
statuses=util.Results,
artifacts_url="https://{{ external_hostname }}/artifacts/all",
),
)
mn = reporters.MailNotifier(
fromaddr='taskotron@fedoraproject.org',
sendToInterestedUsers=False,
subject="%(projectName)s %(result)s on {{ deployment_type }} %(builder)s",
mode=('failing', 'exception', 'warnings'),
extraRecipients=['qa-taskotron-admin-members@fedoraproject.org'],
relayhost="bastion.phx2.fedoraproject.org",
messageFormatter=html_message_formatter)
c['services'].append(mn)
## ExecDB's push-notifications
sp = reporters.HttpStatusPush(
serverUrl="{{ execdb_statuspush }}",
wantProperties=True,
wantSteps=True,
)
c['services'].append(sp)
####### PROJECT IDENTITY
# the 'title' string will appear at the top of this buildbot installation's
# home pages (linked to the 'titleURL').
c['title'] = "Taskotron"
c['titleURL'] = "https://{{ external_hostname }}/{{buildmaster_endpoint}}/"
# the 'buildbotURL' string should point to the location where the buildbot's
# internal web server is visible. This typically uses the port number set in
# the 'www' entry below, but with an externally-visible host name which the
# buildbot cannot figure out without some help.
c['buildbotURL'] = "https://{{ external_hostname }}/{{buildmaster_endpoint}}/"
####### WEB
c['www'] = {
'port': 8010,
'change_hook_dialects': {
'base': True,
},
'auth': util.UserPasswordAuth({
{% if deployment_type == 'dev' %}
"{{ dev_buildbot_user }}": "{{ dev_buildbot_password }}",
{% elif deployment_type == 'stg' %}
"{{ stg_buildbot_user }}": "{{ stg_buildbot_password }}",
{% elif deployment_type == 'prod' %}
"{{ prod_buildbot_user }}": "{{ prod_buildbot_password }}",
{% elif deployment_type == 'local' %}
"{{ local_buildbot_user }}": "{{ local_buildbot_password }}",
{% endif %}
}),
'authz': util.Authz(
allowRules=[
util.AnyControlEndpointMatcher(role="admins"),
],
roleMatchers=[
{% if deployment_type == 'dev' %}
util.RolesFromUsername(roles=['admins'], usernames=["{{ dev_buildbot_user }}"]),
{% elif deployment_type == 'stg' %}
util.RolesFromUsername(roles=['admins'], usernames=["{{ stg_buildbot_user }}"]),
{% elif deployment_type == 'prod' %}
util.RolesFromUsername(roles=['admins'], usernames=["{{ prod_buildbot_user }}"]),
{% elif deployment_type == 'local' %}
util.RolesFromUsername(roles=['admins'], usernames=["{{ local_buildbot_user }}"]),
{% endif %}
]
),
}
####### DB URL
c['db'] = {
# This specifies what database buildbot uses to store its state.
{% if deployment_type in ['dev', 'stg', 'prod'] %}
'db_url' : "postgresql://{{ buildmaster_db_user }}:{{ buildmaster_db_password }}@{{ buildmaster_db_host }}/{{ buildmaster_db_name }}",
{% elif deployment_type == 'local' %}
'db_url' : "postgresql://{{ local_buildmaster_db_user }}:{{ local_buildmaster_db_password }}@127.0.0.1/{{ buildmaster_db_name }}",
{% endif %}
}

View file

@ -1,2 +0,0 @@
---
extra_enablerepos: ''

View file

@ -1,11 +0,0 @@
module allow-home-pid-files 1.0;
require {
type user_home_t;
type init_t;
class file { open read unlink };
}
#============= init_t ==============
allow init_t user_home_t:file { open read unlink };

View file

@ -1,82 +0,0 @@
---
- name: start httpd (provided in the apache role)
service: name=httpd state=started
- name: ensure packages required for buildmaster are installed
dnf: name={{ item }} state=present
with_items:
- buildbot-master
- buildbot-www
- python2-libsemanage
- python3-psycopg2
- policycoreutils-python-utils
- python3-treq
when: ansible_cmdline.ostree is not defined
- name: ensure additional packages required for CI buildmaster are installed
dnf: name={{ item }} state=present
when: deployment_type in ['qa-prod', 'qa-stg']
with_items:
- python-novaclient
- name: add the buildmaster user
user: name={{ buildmaster_user }} home={{ buildmaster_home }}
- name: ensure buildmaster user can use home directory (old)
file: path="{{ buildmaster_home }}" state=directory owner=buildmaster group=buildmaster mode=0775 setype=user_home_t
when: deployment_type == 'local'
- name: ensure buildmaster user can use home directory (new)
file: path="{{ buildmaster_home }}" state=directory owner=buildmaster group=buildmaster mode=0775
when: deployment_type in ['dev', 'stg', 'prod']
- name: set the selinux fcontext type for the buildmaster_home to var_lib_t
command: semanage fcontext -a -t var_lib_t "{{ buildmaster_home }}(/.*)?"
when: deployment_type in ['qa-stg']
- name: ensure correct fcontext for buildmaster home (new)
file: path="{{ buildmaster_home }}" setype=var_lib_t owner=buildmaster group=buildmaster mode=0775 state=directory
when: deployment_type in ['dev', 'stg', 'prod']
#- name: make sure the selinux fcontext is restored
# command: restorecon -R "{{ buildmaster_home }}"
# when: deployment_type in ['dev', 'stg', 'prod', 'qa-stg']
- name: allow httpd tcp connections with selinux
seboolean: name=httpd_can_network_connect state=true persistent=yes
- name: copy httpd config
template: src=buildmaster.conf.j2 dest=/etc/httpd/conf.d/buildmaster.conf owner=root group=root
notify:
- reload httpd
- name: ensure buildmaster database is created
delegate_to: "{{ buildmaster_db_host }}"
become: true
become_user: postgres
postgresql_db: db={{ buildmaster_db_name }}
- name: ensure dev db user has access to dev database
when: deployment_type in ['dev', 'stg']
delegate_to: "{{ buildmaster_db_host }}"
become: true
become_user: postgres
postgresql_user: db={{ buildmaster_db_name }} user={{ buildmaster_db_user }} password={{ buildmaster_db_password }} role_attr_flags=NOSUPERUSER
- name: ensure prod db user has access to prod database
when: deployment_type == 'prod'
delegate_to: "{{ buildmaster_db_host }}"
become: true
become_user: postgres
postgresql_user: db={{ buildmaster_db_name }} user={{ prod_buildmaster_db_user }} password={{ prod_buildmaster_db_password }} role_attr_flags=NOSUPERUSER
- name: ensure local db user has access to local database
when: deployment_type == 'local'
delegate_to: "{{ buildmaster_db_host }}"
become: true
become_user: postgres
postgresql_user: db={{ buildmaster_db_name }} user={{ local_buildmaster_db_user }} password={{ local_buildmaster_db_password }} role_attr_flags=NOSUPERUSER
- name: put robots.txt at web root of external hostname
when: robots_path is defined
template: src=robots.txt.j2 dest={{ robots_path }}/robots.txt owner=apache group=apache mode=0644

View file

@ -1,16 +0,0 @@
<Location /{{ buildmaster_endpoint }}/ >
ProxyPass "http://127.0.0.1:8010/"
ProxyPassReverse "http://127.0.0.1:8010/"
ProxyPreserveHost On
</Location>
# The /bm/ws endpoint must be defined after the /bm endpoint, because with
# apache the latest matching configuration is used
<Location /{{ buildmaster_endpoint }}/ws >
ProxyPass "ws://127.0.0.1:8010/ws"
ProxyPassReverse "ws://127.0.0.1:8010/ws"
ProxyPreserveHost On
</Location>

View file

@ -1,6 +0,0 @@
User-agent: *
Disallow: /{{ buildmaster_endpoint }}/
Disallow: /{{ resultsdb_fe_endpoint }}/
Disallow: /resultsdb_api/
Disallow: /execdb/
Disallow: /artifacts/

View file

@ -1,6 +0,0 @@
---
taskotron_admin_email: root@localhost.localdomain
slaves:
- user: ""
home: ""
dir: ""

View file

@ -1,142 +0,0 @@
- name: create slave
become: true
become_user: "{{ slave_user }}"
command: creates={{ slave_dir }} buildslave create-slave {{ slave_dir }} {{ buildmaster }} buildslave passwd
when: deployment_type in ['local', 'qa-stg']
- name: generate slave config
become: true
become_user: "{{ slave_user }}"
template: src={{ item }} dest={{ slave_dir }}/buildbot.tac mode=0600 owner={{ slave_user }} group={{ slave_user }}
with_first_found:
- 'buildbot.tac.j2.{{ deployment_type }}'
- buildbot.tac.j2
when: deployment_type in ['local', 'qa-stg']
- name: generate slave info
become: true
become_user: "{{ slave_user }}"
template: src={{ item }}.j2 dest={{ slave_dir }}/info/{{ item }} mode=0644 owner={{ slave_user }} group={{ slave_user }}
with_items:
- admin
- host
when: deployment_type in ['local', 'qa-stg']
- name: create slave ssh directory
when: buildslave_public_sshkey_file is defined and (deployment_type in ['local', 'qa-stg'])
file: path={{ slave_home }}/.ssh owner={{ slave_user }} group={{ slave_user }} mode=0700 state=directory
#when: buildslave_public_sshkey_file is defined and (deployment_type in ['prod', 'stg', 'local', 'qa-stg'])
- name: install slave ssh private key
when: buildslave_public_sshkey_file is defined and (deployment_type in ['local', 'qa-stg'])
copy: src={{ private }}/files/taskotron/{{ buildslave_private_sshkey_file }} dest={{ slave_home }}/.ssh/id_rsa owner={{ slave_user }} group={{ slave_user }} mode=0600
- name: install slave ssh public key
when: buildslave_public_sshkey_file is defined and (deployment_type in ['local', 'qa-stg'])
copy: src={{ private }}/files/taskotron/{{ buildslave_public_sshkey_file }} dest={{ slave_home }}/.ssh/id_rsa.pub owner={{ slave_user }} group={{ slave_user }} mode=0644
- name: make sure master is in known_hosts
when: buildslave_public_sshkey_file is defined and (deployment_type in ['local', 'qa-stg'])
lineinfile: dest=/home/{{ slave_user }}/.ssh/known_hosts regexp='{{ buildmaster }}' line='{{ buildmaster }} {{ buildmaster_pubkey }}' create=yes owner={{ slave_user }} group={{ slave_user }}
- name: generate buildslave service file
template: src={{ item }} dest=/lib/systemd/system/buildslave.service owner=root group=root mode=0744
with_first_found:
- 'buildslave.service.j2.{{ deployment_type }}'
- buildslave.service.j2
when: deployment_type in ['local', 'qa-stg']
register: buildslave_service
- name: reload systemd
command: systemctl daemon-reload
when: deployment_type in ['local', 'qa-stg'] and buildslave_service.changed
- name: start and enable buildslave service
service: name=buildslave enabled=yes state={{ (buildslave_service.changed) | ternary('restarted','started') }}
when: deployment_type in ['local', 'qa-stg']
- name: create slave
become: true
become_user: '{{ item.user }}'
command: creates={{ item.dir }} buildbot-worker create-worker {{ item.dir }} {{ buildmaster }} buildslave passwd
with_items:
- '{{ slaves }}'
- name: figure out which buildbot.tac.j2 file to use
set_fact: buildbot_tac_j2_file={{ item | basename }}
# this unfortunately searches in files/
with_first_found:
- '../templates/buildbot.tac.j2.{{ deployment_type }}'
- '../templates/buildbot.tac.j2'
- name: generate slave config
become: true
become_user: '{{ item.user }}'
template: src={{ buildbot_tac_j2_file }} dest={{ item.dir }}/buildbot.tac mode=0600 owner={{ item.user }} group={{ slaves_group }}
with_items:
- '{{ slaves }}'
when: deployment_type in ['dev', 'stg', 'prod']
- name: generate slave admin info
become: true
become_user: '{{ item.user }}'
template: src=admin.j2 dest={{ item.dir }}/info/admin mode=0644 owner={{ item.user }} group={{ slaves_group }}
with_items:
- '{{ slaves }}'
when: deployment_type in ['dev', 'stg', 'prod']
- name: generate slave host info
become: true
become_user: '{{ item.user }}'
template: src=host.j2 dest={{ item.dir }}/info/host mode=0644 owner={{ item.user }} group={{ slaves_group }}
with_items:
- '{{ slaves }}'
when: deployment_type in ['dev', 'stg', 'prod']
- name: create slave ssh directory
file: path={{ item.home }}/.ssh owner={{ item.user }} group={{ slaves_group }} mode=0700 state=directory
with_items:
- '{{ slaves }}'
when: buildslave_public_sshkey_file is defined and deployment_type in ['dev', 'stg', 'prod']
- name: install slave ssh private key
copy: src={{ private }}/files/taskotron/{{ buildslave_private_sshkey_file }} dest={{ item.home }}/.ssh/id_rsa owner={{ item.user }} group={{ slaves_group }} mode=0600
with_items:
- '{{ slaves }}'
when: buildslave_private_sshkey_file is defined and deployment_type in ['dev', 'stg', 'prod']
- name: install slave ssh public key
copy: src={{ private }}/files/taskotron/{{ buildslave_public_sshkey_file }} dest={{ item.home }}/.ssh/id_rsa.pub owner={{ item.user }} group={{ slaves_group }} mode=0644
with_items:
- '{{ slaves }}'
when: buildslave_public_sshkey_file is defined and deployment_type in ['dev', 'stg', 'prod']
- name: make sure master is in known_hosts
lineinfile: dest={{ item.home }}/.ssh/known_hosts regexp='{{ buildmaster }}' line='{{ buildmaster }} {{ buildmaster_pubkey }}' create=yes owner={{ item.user }} group={{ slaves_group }}
with_items:
- '{{ slaves }}'
when: buildslave_public_sshkey_file is defined and deployment_type in ['dev', 'stg', 'prod']
- name: ensure correct fcontext for buildslave dir
file: path={{ item.home }}/slave setype=var_lib_t owner={{ item.user }} group={{ slaves_group }} mode=0700 state=directory
with_items:
- '{{ slaves }}'
when: deployment_type in ['dev', 'stg', 'prod']
- name: generate buildslave service file
template: src={{ item }} dest=/lib/systemd/system/buildslave@.service owner=root group=root mode=0644
with_first_found:
- 'buildslave@.service.j2.{{ deployment_type }}'
- buildslave@.service.j2
when: deployment_type in ['dev', 'stg', 'prod']
register: buildslave_service
- name: reload systemd
command: systemctl daemon-reload
when: deployment_type in ['dev', 'stg', 'prod'] and buildslave_service.changed
- name: start and enable buildslave services
service: name=buildslave@{{ item.user }} enabled=yes state={{ (buildslave_service.changed) | ternary('restarted','started') }}
with_items:
- '{{ slaves }}'
when: deployment_type in ['dev', 'stg', 'prod']

View file

@ -1 +0,0 @@
Taskotron Admin <{{ taskotron_admin_email }}>

View file

@ -1,53 +0,0 @@
import os
from buildbot_worker.bot import Worker
from twisted.application import service
{% if deployment_type in ['local', 'qa-stg'] %}
basedir = '/home/buildslave/slave'
{% endif %}
{% if deployment_type in ['dev', 'stg', 'prod'] %}
basedir = '{{ item.dir }}'
{% endif %}
rotateLength = 10000000
maxRotatedFiles = 10
# if this is a relocatable tac file, get the directory containing the TAC
if basedir == '.':
import os.path
basedir = os.path.abspath(os.path.dirname(__file__))
# note: this line is matched against to check that this is a worker
# directory; do not edit it.
application = service.Application('buildbot-worker')
from twisted.python.logfile import LogFile
from twisted.python.log import ILogObserver, FileLogObserver
logfile = LogFile.fromFullPath(
os.path.join(basedir, "twistd.log"), rotateLength=rotateLength,
maxRotatedFiles=maxRotatedFiles)
application.setComponent(ILogObserver, FileLogObserver(logfile).emit)
buildmaster_host = '{{ buildmaster }}'
port = 9989
{% if deployment_type in ['local', 'qa-stg', 'qa-prod'] %}
workername = '{{ buildslave_name }}'
passwd = '{{ buildslave_password }}'
{% endif %}
{% if deployment_type in ['dev', 'stg', 'prod'] %}
workername = '{{ item.user }}'
passwd = '{{ buildslave_password }}'
{% endif %}
keepalive = 600
umask = 0o0022
maxdelay = 300
numcpus = None
allow_shutdown = None
maxretries = None
s = Worker(buildmaster_host, port, workername, passwd, basedir,
keepalive, umask=umask, maxdelay=maxdelay,
numcpus=numcpus, allow_shutdown=allow_shutdown,
maxRetries=maxretries)
s.setServiceParent(application)

View file

@ -1,15 +0,0 @@
[Unit]
Description=Buildslave for taskbot
After=network.target
[Service]
Type=forking
# disabled because of https://pagure.io/taskotron/issue/236
#PIDFile=/home/buildslave/slave/twistd.pid
ExecStart=/usr/bin/buildbot-worker start /home/buildslave/slave/
ExecStop=/usr/bin/buildbot-worker stop /home/buildslave/slave/
User=buildslave
Group=buildslave
[Install]
WantedBy=multi-user.target

View file

@ -1,15 +0,0 @@
[Unit]
Description=Buildslave for taskotron
After=network.target
[Service]
Type=forking
# disabled because of https://pagure.io/taskotron/issue/236
#PIDFile=/home/%i/slave/twistd.pid
ExecStart=/usr/bin/buildbot-worker start /srv/buildslaves/%i/slave/
ExecStop=/usr/bin/buildbot-worker stop /srv/buildslaves/%i/slave/
User=%i
Group={{ slaves_group }}
[Install]
WantedBy=multi-user.target

View file

@ -1 +0,0 @@
Buildslave running on {{ inventory_hostname }}

View file

@ -1,3 +0,0 @@
---
extra_enablerepos: ''
slaves: ''

View file

@ -1,75 +0,0 @@
---
- name: ensure packages required for buildslave are installed
dnf: name={{ item }} state=present
with_items:
- buildbot-worker
- git
- policycoreutils-python-utils
when: ansible_cmdline.ostree is not defined
- name: ensure packages required for CI buildslave are installed
dnf: name={{ item }} state=present
with_items:
- testcloud
- mash
- mock
- koji
- python-fedora
- python-doit
- python2-rpmfluff
- rpmlint
- koji
- bodhi-client
- python-virtualenv
- gcc
- libcurl-devel
- rpm-build
- pylint
- python-pep8
- moreutils
when: (deployment_type in ['qa-stg']) and (ansible_cmdline.ostree is not defined)
- name: set the selinux fcontext type for the buildmaster_home to var_lib_t
command: semanage fcontext -a -t var_lib_t "{{ slave_dir }}(/.*)?"
when: deployment_type in ['qa-stg']
- name: add the buildslave user for taskotron
user: name=buildslave
when: deployment_type in ['local']
- name: add the buildslave user for ci
user: name=buildslave groups=mock
when: deployment_type in ['qa-stg']
- name: ensure needed groups exist
group: name={{ item }}
when: deployment_type in ['dev', 'stg', 'prod']
with_items:
- testcloud
- taskotron
- name: ensure needed groups exist
group: name={{ slaves_group }}
when: slaves_group is defined and (deployment_type in ['dev', 'stg', 'prod'])
- name: create buildslaves home
file:
path: "{{ buildslaves_home }}"
state: directory
mode: 0755
when: slaves is defined and (deployment_type in ['dev', 'stg', 'prod'])
- name: add buildslave users
user: name={{ item.user }} group={{ slaves_group }} groups=testcloud,taskotron home={{ item.home }}
with_items: "{{ slaves }}"
when: slaves is defined and (deployment_type in ['dev', 'stg', 'prod'])
- name: set the selinux fcontext type for the buildslave dir to var_lib_t
command: semanage fcontext -a -t var_lib_t "{{ item.dir }}"
with_items: "{{ slaves }}"
when: slaves is defined and deployment_type in ['qa-prod', 'qa-stg']
- name: make sure the selinux fcontext is restored
command: restorecon -R "{{ item.home }}"
with_items: "{{ slaves }}"
when: slaves is defined and deployment_type in ['dev', 'stg', 'prod', 'qa-prod', 'qa-stg']

View file

@ -1,2 +0,0 @@
---
extra_enablerepos: ''

View file

@ -1,49 +0,0 @@
---
- name: start httpd (provided in the apache role)
service: name=httpd state=started
- name: ensure packages required for execdb are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- execdb
- python3-mod_wsgi
- python3-psycopg2
- python3-libsemanage
when: ansible_cmdline.ostree is not defined
- name: ensure database is created
delegate_to: "{{ execdb_db_host_machine }}"
become_user: postgres
become: true
postgresql_db: db={{ execdb_db_name }}
- name: ensure execdb db user has access to database
delegate_to: "{{ execdb_db_host_machine }}"
become_user: postgres
become: true
postgresql_user: db={{ execdb_db_name }} user={{ execdb_db_user }} password={{ execdb_db_password }} role_attr_flags=NOSUPERUSER
- name: ensure selinux lets httpd talk to postgres
seboolean: name=httpd_can_network_connect_db persistent=yes state=yes
- name: generate execdb config
template: src=settings.py.j2 dest=/etc/execdb/settings.py owner=root group=root mode=0644
notify:
- reload httpd
- name: generate execdb apache config
template: src=execdb.conf.j2 dest=/etc/httpd/conf.d/execdb.conf owner=root group=root mode=0644
notify:
- reload httpd
- name: generate alembic.ini
template: src=alembic.ini.j2 dest=/usr/share/execdb/alembic.ini owner=root group=root mode=0644
- name: initialize execdb database
shell: PROD='true' execdb init_db
- name: initialize alembic
shell: PROD='true' execdb init_alembic
- name: upgrade execdb database via alembic
shell: PROD='true' execdb upgrade_db

View file

@ -1,73 +0,0 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat alembic/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = driver://user:pass@localhost/dbname
[alembic-packaged]
# path to migration scripts on a packaged install
script_location = /usr/share/execdb/alembic
sqlalchemy.url = 'postgresql+psycopg2://{{ execdb_db_user }}:{{ execdb_db_password }}@{{ execdb_db_host }}:{{ execdb_db_port }}/{{ execdb_db_name }}'
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View file

@ -1,34 +0,0 @@
WSGIDaemonProcess execdb user=apache group=apache threads=5
WSGIScriptAlias /{{ execdb_endpoint }} /usr/share/execdb/execdb.wsgi
WSGISocketPrefix run/wsgi
# this isn't the best way to force SSL but it works for now
#RewriteEngine On
#RewriteCond %{HTTPS} !=on
#RewriteRule ^/execdb/admin/?(.*) https://%{SERVER_NAME}/$1 [R,L]
<Directory /usr/share/execdb>
WSGIProcessGroup execdb
WSGIApplicationGroup %{GLOBAL}
WSGIScriptReloading On
<IfModule mod_authz_core.c>
# Apache 2.4
<RequireAny>
Require method GET
Require ip 127.0.0.1 ::1{% for host in allowed_hosts %} {{ host }}{% endfor %}
</RequireAny>
</IfModule>
<IfModule !mod_auth_core.c>
Order allow,deny
Allow from all
</IfModule>
</Directory>
#Alias /execdb/static /var/www/execdb/execdb/static
#<Directory /var/www/execdb/execdb/static>
#Order allow,deny
#Allow from all
#</Directory>

View file

@ -1,11 +0,0 @@
SECRET_KEY = '{{ execdb_secret_key }}'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{{ execdb_db_user }}:{{ execdb_db_password }}@{{ execdb_db_host }}:{{ execdb_db_port }}/{{ execdb_db_name }}'
FILE_LOGGING = False
LOGFILE = '/var/log/execdb/execdb.log'
SYSLOG_LOGGING = False
STREAM_LOGGING = True
BUILDBOT_FRONTPAGE_URL = 'https://{{ external_hostname }}/taskmaster'
RESULTSDB_FRONTPAGE_URL = 'https://{{ external_hostname }}/resultsdb'
ARTIFACTS_BASE_URL = 'https://{{ external_hostname }}/artifacts/all'

View file

@ -1,2 +0,0 @@
---
extra_enablerepos: ''

View file

@ -1,63 +0,0 @@
---
- name: ensure grokmirror packages are installed (yum)
package:
state: present
name:
- git
- python-grokmirror
when: ansible_distribution_major_version|int <= 7 and ansible_distribution == 'RedHat'
- name: ensure grokmirror packages are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- git
- python-grokmirror
when: ansible_distribution_major_version|int >= 29 and ansible_distribution == 'Fedora' and ansible_cmdline.ostree is not defined
- name: ensure grokmirror packages are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- git
- python-grokmirror
when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
- name: create grokmirror user
user: name={{ grokmirror_user }}
- name: add buildslave cert to grokmirror for cloning
authorized_key: user={{ grokmirror_user }} key="{{ buildslave_ssh_pubkey }}"
- name: create grokmirror conf dir
file: path=/etc/grokmirror/ state=directory owner=root group=root mode=1755
- name: create grokmirror root directory
file: path={{ grokmirror_basedir }} state=directory owner={{ grokmirror_user }} group={{ grokmirror_user }} mode=1755
- name: create log directory for grokmirror
file: path=/var/log/grokmirror state=directory owner={{ grokmirror_user }} group={{ grokmirror_user }} mode=1775
- name: create directory for grokmirror locks
file: path=/var/lock/grokmirror state=directory owner={{ grokmirror_user }} group={{ grokmirror_user }} mode=1755
- name: clone initial git repos
git: repo={{ item.url }} bare=yes dest={{ grokmirror_basedir }}/{{ item.name }} update=no
become: true
become_user: "{{ grokmirror_user }}"
with_items: "{{ grokmirror_repos }}"
- name: set up default branch
command: chdir={{ grokmirror_basedir }}/{{ item.name }} git symbolic-ref HEAD refs/heads/{{ grokmirror_default_branch }}
become: true
become_user: "{{ grokmirror_user }}"
with_items: "{{ grokmirror_repos }}"
- name: generate grokmirror config
template: src=repos.conf.j2 dest=/etc/grokmirror/repos.conf owner={{ grokmirror_user }} group={{ grokmirror_user }} mode=0644
- name: install cron jobs to update repos
template:
src: grokmirror-update-repos.cron.j2
dest: /etc/cron.d/grokmirror-update-repos.cron
owner: root
group: root
mode: 0644

View file

@ -1,6 +0,0 @@
### Update grokmirror repos
{% for item in grokmirror_repos %}
# pull repo for {{ item.name }}
@hourly {{ grokmirror_user }} cd {{ grokmirror_basedir }}/{{ item.name }} && git fetch origin {{ grokmirror_default_branch }}:{{ grokmirror_default_branch }}
{% endfor %}

View file

@ -1,95 +0,0 @@
# You can pull from multiple grok mirrors, just create
# a separate section for each mirror. The name can be anything.
[bitbucket.org]
# The host part of the mirror you're pulling from.
#site = git://git.kernel.org
site = https://bitbucket.org/
#
# Where the grok manifest is published. The following protocols
# are supported at this time:
# http:// or https:// using If-Modified-Since http header
# file:// (when manifest file is on NFS, for example)
#manifest = http://git.kernel.org/manifest.js.gz
manifest = file://{{ grokmirror_basedir }}/manifest.js.gz
#
# Where are we going to put the mirror on our disk?
#toplevel = /var/lib/git/mirror
toplevel = {{ grokmirror_basedir }}
#
# Where do we store our own manifest? Usually in the toplevel.
#mymanifest = /var/lib/git/mirror/manifest.js.gz
mymanifest = {{ grokmirror_basedir }}/manifest.local.js.gz
#
# Write out projects.list that can be used by gitweb or cgit.
# Leave blank if you don't want a projects.list.
#projectslist = /var/lib/git/mirror/projects.list
projectslist = {{ grokmirror_basedir }}/projects.list
#
# When generating projects.list, start at this subpath instead
# of at the toplevel. Useful when mirroring kernel or when generating
# multiple gitweb/cgit configurations for the same tree.
#projectslist_trimtop = /pub/scm/
#projectslist_trimtop = {{ grokmirror_basedir }}
#
# When generating projects.list, also create entries for symlinks.
# Otherwise we assume they are just legacy and keep them out of
# web interfaces.
#projectslist_symlinks = yes
projectslist_symlinks = no
#
# A simple hook to execute whenever a repository is modified.
# It passes the full path to the git repository modified as the only
# argument.
#post_update_hook = /usr/local/bin/make-git-fairies-appear
post_update_hook =
#
# If owner is not specified in the manifest, who should be listed
# as the default owner in tools like gitweb or cgit?
#default_owner = Grokmirror User
default_owner = Grokmirror User
#
# Where do we put the logs?
#log = /var/log/mirror/kernelorg.log
log = /var/log/grokmirror/bitbucket-gitmirror.log
#
# Log level can be "info" or "debug"
#loglevel = info
loglevel = info
#
# To prevent multiple grok-pull instances from running at the same
# time, we first obtain an exclusive lock.
#lock = /var/lock/mirror/kernelorg.lock
lock = /var/lock/grokmirror/bitbucket.lock
#
# Use shell-globbing to list the repositories you would like to mirror.
# If you want to mirror everything, just say "*". Separate multiple entries
# with newline plus tab. Examples:
#
# mirror everything:
#include = *
#
# mirror just the main kernel sources:
#include = /pub/scm/linux/kernel/git/torvalds/linux.git
# /pub/scm/linux/kernel/git/stable/linux-stable.git
# /pub/scm/linux/kernel/git/next/linux-next.git
#
# mirror just git:
#include = /pub/scm/git/*
#include = /fedoraqa/task-rpmlint
# /fedoraqa/task-examplebodhi
# /fedoraqa/task-examplelong
# /fedoraqa/depcheck-mk-2
include = {% for repo in grokmirror_repos %}
/{{ repo.name }}
{% endfor %}
#include = rpmlint
# examplebodhi
# examplelong
# depcheck
#
# https://tflink@bitbucket.org/fedoraqa/task-rpmlint.git
# This is processed after the include. If you want to exclude some specific
# entries from an all-inclusive globbing above. E.g., to exclude all linux-2.4
# git sources:
#exclude = */linux-2.4*
exclude =

View file

@ -1,2 +0,0 @@
---
extra_enablerepos: ''

View file

@ -1,43 +0,0 @@
---
- name: ensure packages required for imagefactory are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- python-libguestfs
- libguestfs-tools
- qemu-img
- git
- name: create directory for git clone
file: path=/var/lib/fedoraqa state=directory owner=root group=root mode=1755
- name: create directory for configuration
file: path=/etc/taskotron_base_images state=directory owner=root group=root mode=0755
- name: create directory for disposable clients images
file: path={{ imagesdir }} state=directory owner=root group={{ slaves_group }} mode=1755
- name: Check out our imagefactory tools
git:
repo: https://pagure.io/taskotron/base_images.git
dest: '/var/lib/fedoraqa/base_images'
update: yes
version: 'master'
register: git_result
changed_when: "git_result.after|default('after') != git_result.before|default('before')"
- name: Generate configuration
template: src={{ item }} dest=/etc/taskotron_base_images/config.ini owner=root group=root mode=0644
with_first_found:
- config.ini.j2.{{ deployment_type }}
- config.ini.j2
- name: Download/update images
command: '/var/lib/fedoraqa/base_images/process_for_taskotron.py --taskotrondir {{imagesdir}}'
- name: Install cron job to download new ImageFactory images and clean up old ones
template:
src: imagefactory-client.cron.j2
dest: /etc/cron.d/imagefactory-client.cron
owner: root
group: root
mode: 0644

View file

@ -1,3 +0,0 @@
[default]
imgfac_base_url={{imagefactory_baseurl}}
rawhide=32

View file

@ -1,5 +0,0 @@
# Download new images from ImageFactory
@hourly root /var/lib/fedoraqa/base_images/process_for_taskotron.py --taskotrondir {{imagesdir}} > /dev/null
# Clean up images downloaded from ImageFactory
@daily root /var/lib/fedoraqa/base_images/prune_images/prune_images.py directory --keep 3 {{imagesdir}} > /dev/null

View file

@ -1,4 +0,0 @@
---
extra_enablerepos: ''
imagefactory_build_releases:
- 30

View file

@ -1,43 +0,0 @@
#!/bin/bash
# Kill all imagefactory build processes which took longer than specified
# (3 hours by default)
DEFAULT_TIMEOUT=180
# print usage
if [ "$1" = '--help' ] || [ "$1" = '-h' ]; then
echo "Usage: $0 [TIMEOUT]"
echo -n 'Kill all imagefactory build processes which took longer than '
echo "TIMEOUT (in minutes, $DEFAULT_TIMEOUT by default)."
exit 1
fi
PROCESS_ID='/usr/bin/qemu-system-x86_64 -machine accel=kvm -name guest=factory-build'
TIMEOUT=${1:-$DEFAULT_TIMEOUT}
# convert to seconds
TIMEOUT=$(( $TIMEOUT * 60 ))
while true; do
# get PID of the oldest (pgrep -o) running matched process
PID=$(pgrep -o -f "$PROCESS_ID")
if [ -z "$PID" ]; then
echo 'No (more) processes match, exiting'
exit
fi
AGE=$(ps -o etimes= -p "$PID")
if (( $AGE >= $TIMEOUT )); then
echo "Process matched, killing: $(ps -p $PID --no-headers -o pid,args)"
kill -s TERM "$PID"
# wait a while and see if the process is really terminated, otherwise
# force kill it
sleep 3
if [ "$PID" = "$(pgrep -o -f "$PROCESS_ID")" ]; then
echo "Process $PID wasn't terminated, force killing it"
kill -s KILL "$PID"
fi
else
echo 'Some processes match, but they are not old enough, exiting'
exit
fi
done

View file

@ -1,7 +0,0 @@
[Unit]
Description=Kill hanging imagefactory build processes
[Service]
Type=oneshot
ExecStart=/usr/local/bin/imagefactory-kill-zombie
TimeoutStartSec=300

View file

@ -1,10 +0,0 @@
[Unit]
Description=Kill hanging imagefactory build processes regularly
[Timer]
OnCalendar=daily
RandomizedDelaySec=1h
Persistent=true
[Install]
WantedBy=timers.target

View file

@ -1,23 +0,0 @@
{
"debug": 0,
"no_ssl": 1,
"no_oauth": 1,
"imgdir": "/var/lib/imagefactory/images",
"ec2_ami_type": "ebs",
"rhevm_image_format": "qcow2",
"openstack_image_format": "qcow2",
"clients": {
"mock-key": "mock-secret"
},
"proxy_ami_id": "ami-id",
"max_concurrent_local_sessions": 2,
"max_concurrent_ec2_sessions": 4,
"ec2-32bit-util": "m1.small",
"ec2-64bit-util": "m1.large",
"image_manager": "file",
"image_manager_args": { "storage_path": "/var/lib/imagefactory/storage" },
"tdl_require_root_pw": 0,
"jeos_config": [ "/etc/imagefactory/jeos_images/" ],
"output": "json"
}

View file

@ -1,13 +0,0 @@
# Workaround for https://github.com/redhat-imaging/imagefactory/issues/417
[Unit]
Requires=libvirtd.service
After=libvirtd.service
[Service]
Type=exec
ExecStart=/usr/bin/imagefactoryd --foreground --debug
#ExecStop=/usr/bin/killall imagefactoryd
PIDFile=/run/imagefactoryd.pid
[Install]
WantedBy=multi-user.target

View file

@ -1,21 +0,0 @@
[paths]
output_dir = /var/lib/libvirt/images
data_dir = /var/lib/oz
screenshot_dir = /var/lib/oz/screenshots
# sshprivkey = /etc/oz/id_rsa-icicle-gen
[libvirt]
uri = qemu:///system
image_type = raw
# type = kvm
# bridge_name = virbr0
# cpus = 1
memory = 2048
[cache]
original_media = no
modified_media = no
jeos = no
[icicle]
safe_generation = no

View file

@ -1,83 +0,0 @@
---
- name: ensure packages required for imagefactory are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- imagefactory
- imagefactory-plugins
- imagefactory-plugins-TinMan
- git
- name: copy imagefactory.conf config file
copy: src=imagefactory.conf dest=/etc/imagefactory/imagefactory.conf owner=root group=root mode=0644
- name: copy oz.cfg config file
copy: src=oz.cfg dest=/etc/oz/oz.cfg owner=root group=root mode=0644
- name: copy imagefactory-kill-zombie script
copy:
src: imagefactory-kill-zombie
dest: /usr/local/bin
owner: root
group: root
mode: '0744'
- name: copy imagefactory-kill-zombie service
copy:
src: imagefactory-kill-zombie.service
dest: /etc/systemd/system
register: imagefactory_kill_zombie_service
- name: copy imagefactory-kill-zombie timer
copy:
src: imagefactory-kill-zombie.timer
dest: /etc/systemd/system
register: imagefactory_kill_zombie_timer
# Workaround for https://github.com/redhat-imaging/imagefactory/issues/417
- name: fix issues in imagefactoryd.service
copy:
src: imagefactoryd.service
dest: /etc/systemd/system/imagefactoryd.service
register: imagefactory_service
- name: reload systemd
command: systemctl daemon-reload
when: imagefactory_service.changed or imagefactory_kill_zombie_service.changed or imagefactory_kill_zombie_timer.changed
- name: enable imagefactory
service: name=imagefactoryd state=started enabled=yes
- name: enable imagefactory-kill-zombie.timer
service:
name: imagefactory-kill-zombie.timer
state: started
enabled: yes
- name: create directory for git clone
file: path=/var/lib/fedoraqa state=directory owner=root group=root mode=1755
- name: Check out our imagefactory tools
git:
repo: https://pagure.io/taskotron/base_images.git
dest: '/var/lib/fedoraqa/base_images'
update: yes
version: 'master'
register: git_result
changed_when: "git_result.after|default('after') != git_result.before|default('before')"
- name: create directory for configuration
file: path=/etc/taskotron_base_images state=directory owner=root group=root mode=0755
- name: Generate configuration
template: src={{ item }} dest=/etc/taskotron_base_images/config_server.ini owner=root group=root mode=0644
with_first_found:
- config_server.ini.j2.{{ deployment_type }}
- config_server.ini.j2
- name: Install cron job to build new images (and other tasks) in ImageFactory
template:
src: imagefactory-server.cron.j2
dest: /etc/cron.d/imagefactory-server.cron
owner: root
group: root
mode: 0644

View file

@ -1,10 +0,0 @@
[default]
imgfac_base_url=http://127.0.0.1:8075/imagefactory
rawhide=32
mail_from={{deployment_type}}.imagefactory@qa.fedoraproject.org
mail_to=jskladan@redhat.com tflink@redhat.com
mail_subject=Images that failed to build for Taskotron in ImageFactory on {{deployment_type}}
smtp_server=bastion.phx2.fedoraproject.org
deployment_type={{deployment_type}}

View file

@ -1,12 +0,0 @@
### ImageFactory server jobs
{% for release in imagefactory_build_releases %}
# Build new Fedora {{ release }} taskotron_cloud image in ImageFactory
@daily root /var/lib/fedoraqa/base_images/trigger_build/trigger_build.py build --config /etc/taskotron_base_images/config_server.ini --release {{ release }} --arch x86_64 --flavor="taskotron_cloud" --disksize="30G" > /dev/null
{% endfor %}
# Report failed builds in ImageFactory
@daily root /var/lib/fedoraqa/base_images/report_failures/report_failures.py email --config /etc/taskotron_base_images/config_server.ini > /dev/null
# Prune old builds in ImageFactory
@daily root /var/lib/fedoraqa/base_images/prune_images/prune_images.py imgfac-old --config /etc/taskotron_base_images/config_server.ini --keep 3 > /dev/null

View file

@ -1,5 +0,0 @@
---
http_port: 80
https_port: 443
public_hostname: localhost.localdomain
extra_enablerepos: ''

View file

@ -1,2 +0,0 @@
RewriteEngine on
RewriteRule "^/.well-known/acme-challenge/(.*)$" "http://fedoraproject.org/.well-known/acme-challenge/$1"

View file

@ -1,49 +0,0 @@
---
- name: ensure ssl related packages are installed (yum)
package:
state: present
name:
- mod_ssl
when: ansible_distribution_major_version|int <= 7 and ansible_distribution == 'RedHat'
- name: ensure ssl related packages are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- mod_ssl
when: ansible_distribution_major_version|int >= 29 and ansible_distribution == 'Fedora' and ansible_cmdline.ostree is not defined
- name: ensure ssl related packages are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- mod_ssl
when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
#- name: copy ssl key
# copy: src={{ private }}/files/taskotron/certs/{{ inventory_hostname }}/{{ inventory_hostname }}.key dest=/etc/pki/tls/private/{{ inventory_hostname }}.key
#
#- name: copy ssl cert
# copy: src={{ private }}/files/taskotron/certs/{{ inventory_hostname }}/{{ inventory_hostname }}.crt dest=/etc/pki/tls/certs/{{ inventory_hostname }}.crt
- name: redirect letsencrypt to fedora proxies so certgetter can get this cert.
copy: src=certgetter.conf dest=/etc/httpd/conf.d/certgetter.conf owner=root group=root mode=0644
tags:
- httpd
notify:
- reload httpd
- name: generate rhel httpd ssl config
template: src=ssl.conf.rhel.j2 dest=/etc/httpd/conf.d/ssl.conf owner=root group=root mode=0644
when: is_rhel is defined
tags:
- httpd
notify:
- reload httpd
- name: generate fedora httpd ssl config
template: src=ssl.conf.j2 dest=/etc/httpd/conf.d/ssl.conf owner=root group=root mode=0644
when: is_fedora is defined
tags:
- httpd
notify:
- reload httpd

View file

@ -1,221 +0,0 @@
LoadModule ssl_module modules/mod_ssl.so
#
# When we also provide SSL we have to listen to the
# the HTTPS port in addition.
#
Listen 443 https
##
## SSL Global Context
##
## All SSL configuration in this context applies both to
## the main server and all SSL-enabled virtual hosts.
##
# Pass Phrase Dialog:
# Configure the pass phrase gathering process.
# The filtering dialog program (`builtin' is a internal
# terminal dialog) has to provide the pass phrase on stdout.
SSLPassPhraseDialog exec:/usr/libexec/httpd-ssl-pass-dialog
# Inter-Process Session Cache:
# Configure the SSL Session Cache: First the mechanism
# to use and second the expiring timeout (in seconds).
SSLSessionCache shmcb:/run/httpd/sslcache(512000)
SSLSessionCacheTimeout 300
# Pseudo Random Number Generator (PRNG):
# Configure one or more sources to seed the PRNG of the
# SSL library. The seed data should be of good random quality.
# WARNING! On some platforms /dev/random blocks if not enough entropy
# is available. This means you then cannot use the /dev/random device
# because it would lead to very long connection times (as long as
# it requires to make more entropy available). But usually those
# platforms additionally provide a /dev/urandom device which doesn't
# block. So, if available, use this one instead. Read the mod_ssl User
# Manual for more details.
SSLRandomSeed startup file:/dev/urandom 256
SSLRandomSeed connect builtin
#SSLRandomSeed startup file:/dev/random 512
#SSLRandomSeed connect file:/dev/random 512
#SSLRandomSeed connect file:/dev/urandom 512
#
# Use "SSLCryptoDevice" to enable any supported hardware
# accelerators. Use "openssl engine -v" to list supported
# engine names. NOTE: If you enable an accelerator and the
# server does not start, consult the error logs and ensure
# your accelerator is functioning properly.
#
SSLCryptoDevice builtin
#SSLCryptoDevice ubsec
##
## SSL Virtual Host Context
##
<VirtualHost _default_:443>
# General setup for the virtual host, inherited from global configuration
#DocumentRoot "/var/www/html"
#ServerName www.example.com:443
ServerName {{public_hostname}}:443
# Use separate log files for the SSL virtual host; note that LogLevel
# is not inherited from httpd.conf.
ErrorLog logs/ssl_error_log
TransferLog logs/ssl_access_log
LogLevel warn
# SSL Engine Switch:
# Enable/Disable SSL for this virtual host.
SSLEngine on
# SSL Protocol support:
# List the enable protocol levels with which clients will be able to
# connect. Disable SSLv2 access by default:
SSLProtocol {{ ssl_protocols }}
# SSL Cipher Suite:
# List the ciphers that the client is permitted to negotiate.
# See the mod_ssl documentation for a complete list.
SSLCipherSuite {{ ssl_ciphers }}
# Speed-optimized SSL Cipher configuration:
# If speed is your main concern (on busy HTTPS servers e.g.),
# you might want to force clients to specific, performance
# optimized ciphers. In this case, prepend those ciphers
# to the SSLCipherSuite list, and enable SSLHonorCipherOrder.
# Caveat: by giving precedence to RC4-SHA and AES128-SHA
# (as in the example below), most connections will no longer
# have perfect forward secrecy - if the server's key is
# compromised, captures of past or future traffic must be
# considered compromised, too.
#SSLCipherSuite RC4-SHA:AES128-SHA:HIGH:MEDIUM:!aNULL:!MD5
#SSLHonorCipherOrder on
# Server Certificate:
# Point SSLCertificateFile at a PEM encoded certificate. If
# the certificate is encrypted, then you will be prompted for a
# pass phrase. Note that a kill -HUP will prompt again. A new
# certificate can be generated using the genkey(1) command.
SSLCertificateFile /etc/pki/tls/certs/{{ public_hostname }}.cert
# Server Private Key:
# If the key is not combined with the certificate, use this
# directive to point at the key file. Keep in mind that if
# you've both a RSA and a DSA private key you can configure
# both in parallel (to also allow the use of DSA ciphers, etc.)
SSLCertificateKeyFile /etc/pki/tls/private/{{ public_hostname }}.key
# Server Certificate Chain:
# Point SSLCertificateChainFile at a file containing the
# concatenation of PEM encoded CA certificates which form the
# certificate chain for the server certificate. Alternatively
# the referenced file can be the same as SSLCertificateFile
# when the CA certificates are directly appended to the server
# certificate for convinience.
#SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt
# Certificate Authority (CA):
# Set the CA certificate verification path where to find CA
# certificates for client authentication or alternatively one
# huge file containing all of them (file must be PEM encoded)
#SSLCACertificateFile /etc/pki/tls/certs/ca-bundle.crt
# Client Authentication (Type):
# Client certificate verification type and depth. Types are
# none, optional, require and optional_no_ca. Depth is a
# number which specifies how deeply to verify the certificate
# issuer chain before deciding the certificate is not valid.
#SSLVerifyClient require
#SSLVerifyDepth 10
# Access Control:
# With SSLRequire you can do per-directory access control based
# on arbitrary complex boolean expressions containing server
# variable checks and other lookup directives. The syntax is a
# mixture between C and Perl. See the mod_ssl documentation
# for more details.
#<Location />
#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \
# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \
# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \
# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \
# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \
# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/
#</Location>
# SSL Engine Options:
# Set various options for the SSL engine.
# o FakeBasicAuth:
# Translate the client X.509 into a Basic Authorisation. This means that
# the standard Auth/DBMAuth methods can be used for access control. The
# user name is the `one line' version of the client's X.509 certificate.
# Note that no password is obtained from the user. Every entry in the user
# file needs this password: `xxj31ZMTZzkVA'.
# o ExportCertData:
# This exports two additional environment variables: SSL_CLIENT_CERT and
# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
# server (always existing) and the client (only existing when client
# authentication is used). This can be used to import the certificates
# into CGI scripts.
# o StdEnvVars:
# This exports the standard SSL/TLS related `SSL_*' environment variables.
# Per default this exportation is switched off for performance reasons,
# because the extraction step is an expensive operation and is usually
# useless for serving static content. So one usually enables the
# exportation for CGI and SSI requests only.
# o StrictRequire:
# This denies access when "SSLRequireSSL" or "SSLRequire" applied even
# under a "Satisfy any" situation, i.e. when it applies access is denied
# and no other module can change it.
# o OptRenegotiate:
# This enables optimized SSL connection renegotiation handling when SSL
# directives are used in per-directory context.
#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire
<Files ~ "\.(cgi|shtml|phtml|php3?)$">
SSLOptions +StdEnvVars
</Files>
<Directory "/var/www/cgi-bin">
SSLOptions +StdEnvVars
</Directory>
# SSL Protocol Adjustments:
# The safe and default but still SSL/TLS standard compliant shutdown
# approach is that mod_ssl sends the close notify alert but doesn't wait for
# the close notify alert from client. When you need a different shutdown
# approach you can use one of the following variables:
# o ssl-unclean-shutdown:
# This forces an unclean shutdown when the connection is closed, i.e. no
# SSL close notify alert is send or allowed to received. This violates
# the SSL/TLS standard but is needed for some brain-dead browsers. Use
# this when you receive I/O errors because of the standard approach where
# mod_ssl sends the close notify alert.
# o ssl-accurate-shutdown:
# This forces an accurate shutdown when the connection is closed, i.e. a
# SSL close notify alert is send and mod_ssl waits for the close notify
# alert of the client. This is 100% SSL/TLS standard compliant, but in
# practice often causes hanging connections with brain-dead browsers. Use
# this only for browsers where you know that their SSL implementation
# works correctly.
# Notice: Most problems of broken clients are also related to the HTTP
# keep-alive facility, so you usually additionally want to disable
# keep-alive for those clients, too. Use variable "nokeepalive" for this.
# Similarly, one has to force some clients to use HTTP/1.0 to workaround
# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and
# "force-response-1.0" for this.
BrowserMatch "MSIE [2-5]" \
nokeepalive ssl-unclean-shutdown \
downgrade-1.0 force-response-1.0
# Per-Server Logging:
# The home of a custom SSL log file. Use this when you want a
# compact non-error SSL logfile on a virtual host basis.
CustomLog logs/ssl_request_log \
"%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b"
</VirtualHost>

View file

@ -1,227 +0,0 @@
#
# This is the Apache server configuration file providing SSL support.
# It contains the configuration directives to instruct the server how to
# serve pages over an https connection. For detailing information about these
# directives see <URL:http://httpd.apache.org/docs/2.2/mod/mod_ssl.html>
#
# Do NOT simply read the instructions in here without understanding
# what they do. They're here only as hints or reminders. If you are unsure
# consult the online docs. You have been warned.
#
LoadModule ssl_module modules/mod_ssl.so
#
# When we also provide SSL we have to listen to the
# the HTTPS port in addition.
#
Listen 443
##
## SSL Global Context
##
## All SSL configuration in this context applies both to
## the main server and all SSL-enabled virtual hosts.
##
# Pass Phrase Dialog:
# Configure the pass phrase gathering process.
# The filtering dialog program (`builtin' is a internal
# terminal dialog) has to provide the pass phrase on stdout.
SSLPassPhraseDialog builtin
# Inter-Process Session Cache:
# Configure the SSL Session Cache: First the mechanism
# to use and second the expiring timeout (in seconds).
SSLSessionCache shmcb:/var/cache/mod_ssl/scache(512000)
SSLSessionCacheTimeout 300
# Semaphore:
# Configure the path to the mutual exclusion semaphore the
# SSL engine uses internally for inter-process synchronization.
SSLMutex default
# Pseudo Random Number Generator (PRNG):
# Configure one or more sources to seed the PRNG of the
# SSL library. The seed data should be of good random quality.
# WARNING! On some platforms /dev/random blocks if not enough entropy
# is available. This means you then cannot use the /dev/random device
# because it would lead to very long connection times (as long as
# it requires to make more entropy available). But usually those
# platforms additionally provide a /dev/urandom device which doesn't
# block. So, if available, use this one instead. Read the mod_ssl User
# Manual for more details.
SSLRandomSeed startup file:/dev/urandom 256
SSLRandomSeed connect builtin
#SSLRandomSeed startup file:/dev/random 512
#SSLRandomSeed connect file:/dev/random 512
#SSLRandomSeed connect file:/dev/urandom 512
#
# Use "SSLCryptoDevice" to enable any supported hardware
# accelerators. Use "openssl engine -v" to list supported
# engine names. NOTE: If you enable an accelerator and the
# server does not start, consult the error logs and ensure
# your accelerator is functioning properly.
#
SSLCryptoDevice builtin
#SSLCryptoDevice ubsec
##
## SSL Virtual Host Context
##
NameVirtualHost *:443
<VirtualHost _default_:443>
# General setup for the virtual host, inherited from global configuration
#DocumentRoot "/var/www/html"
ServerName {{ public_hostname }}:443
# Use separate log files for the SSL virtual host; note that LogLevel
# is not inherited from httpd.conf.
ErrorLog logs/ssl_error_log
TransferLog logs/ssl_access_log
LogLevel warn
# SSL Engine Switch:
# Enable/Disable SSL for this virtual host.
SSLEngine on
# SSL Protocol support:
# List the enable protocol levels with which clients will be able to
# connect. Disable SSLv2 access by default:
SSLProtocol {{ ssl_protocols }}
# SSL Cipher Suite:
# List the ciphers that the client is permitted to negotiate.
# See the mod_ssl documentation for a complete list.
SSLCipherSuite {{ ssl_ciphers }}
# Server Certificate:
# Point SSLCertificateFile at a PEM encoded certificate. If
# the certificate is encrypted, then you will be prompted for a
# pass phrase. Note that a kill -HUP will prompt again. A new
# certificate can be generated using the genkey(1) command.
#SSLCertificateFile /etc/pki/tls/certs/localhost.crt
SSLCertificateFile /etc/pki/tls/certs/{{ inventory_hostname }}.crt
# Server Private Key:
# If the key is not combined with the certificate, use this
# directive to point at the key file. Keep in mind that if
# you've both a RSA and a DSA private key you can configure
# both in parallel (to also allow the use of DSA ciphers, etc.)
#SSLCertificateKeyFile /etc/pki/tls/private/localhost.key
SSLCertificateKeyFile /etc/pki/tls/private/{{ inventory_hostname }}.key
# Server Certificate Chain:
# Point SSLCertificateChainFile at a file containing the
# concatenation of PEM encoded CA certificates which form the
# certificate chain for the server certificate. Alternatively
# the referenced file can be the same as SSLCertificateFile
# when the CA certificates are directly appended to the server
# certificate for convinience.
#SSLCertificateChainFile /etc/pki/tls/certs/server-chain.crt
# Certificate Authority (CA):
# Set the CA certificate verification path where to find CA
# certificates for client authentication or alternatively one
# huge file containing all of them (file must be PEM encoded)
#SSLCACertificateFile /etc/pki/tls/certs/ca-bundle.crt
# Client Authentication (Type):
# Client certificate verification type and depth. Types are
# none, optional, require and optional_no_ca. Depth is a
# number which specifies how deeply to verify the certificate
# issuer chain before deciding the certificate is not valid.
#SSLVerifyClient require
#SSLVerifyDepth 10
# Access Control:
# With SSLRequire you can do per-directory access control based
# on arbitrary complex boolean expressions containing server
# variable checks and other lookup directives. The syntax is a
# mixture between C and Perl. See the mod_ssl documentation
# for more details.
#<Location />
#SSLRequire ( %{SSL_CIPHER} !~ m/^(EXP|NULL)/ \
# and %{SSL_CLIENT_S_DN_O} eq "Snake Oil, Ltd." \
# and %{SSL_CLIENT_S_DN_OU} in {"Staff", "CA", "Dev"} \
# and %{TIME_WDAY} >= 1 and %{TIME_WDAY} <= 5 \
# and %{TIME_HOUR} >= 8 and %{TIME_HOUR} <= 20 ) \
# or %{REMOTE_ADDR} =~ m/^192\.76\.162\.[0-9]+$/
#</Location>
# SSL Engine Options:
# Set various options for the SSL engine.
# o FakeBasicAuth:
# Translate the client X.509 into a Basic Authorisation. This means that
# the standard Auth/DBMAuth methods can be used for access control. The
# user name is the `one line' version of the client's X.509 certificate.
# Note that no password is obtained from the user. Every entry in the user
# file needs this password: `xxj31ZMTZzkVA'.
# o ExportCertData:
# This exports two additional environment variables: SSL_CLIENT_CERT and
# SSL_SERVER_CERT. These contain the PEM-encoded certificates of the
# server (always existing) and the client (only existing when client
# authentication is used). This can be used to import the certificates
# into CGI scripts.
# o StdEnvVars:
# This exports the standard SSL/TLS related `SSL_*' environment variables.
# Per default this exportation is switched off for performance reasons,
# because the extraction step is an expensive operation and is usually
# useless for serving static content. So one usually enables the
# exportation for CGI and SSI requests only.
# o StrictRequire:
# This denies access when "SSLRequireSSL" or "SSLRequire" applied even
# under a "Satisfy any" situation, i.e. when it applies access is denied
# and no other module can change it.
# o OptRenegotiate:
# This enables optimized SSL connection renegotiation handling when SSL
# directives are used in per-directory context.
#SSLOptions +FakeBasicAuth +ExportCertData +StrictRequire
<Files ~ "\.(cgi|shtml|phtml|php3?)$">
SSLOptions +StdEnvVars
</Files>
<Directory "/var/www/cgi-bin">
SSLOptions +StdEnvVars
</Directory>
# SSL Protocol Adjustments:
# The safe and default but still SSL/TLS standard compliant shutdown
# approach is that mod_ssl sends the close notify alert but doesn't wait for
# the close notify alert from client. When you need a different shutdown
# approach you can use one of the following variables:
# o ssl-unclean-shutdown:
# This forces an unclean shutdown when the connection is closed, i.e. no
# SSL close notify alert is send or allowed to received. This violates
# the SSL/TLS standard but is needed for some brain-dead browsers. Use
# this when you receive I/O errors because of the standard approach where
# mod_ssl sends the close notify alert.
# o ssl-accurate-shutdown:
# This forces an accurate shutdown when the connection is closed, i.e. a
# SSL close notify alert is send and mod_ssl waits for the close notify
# alert of the client. This is 100% SSL/TLS standard compliant, but in
# practice often causes hanging connections with brain-dead browsers. Use
# this only for browsers where you know that their SSL implementation
# works correctly.
# Notice: Most problems of broken clients are also related to the HTTP
# keep-alive facility, so you usually additionally want to disable
# keep-alive for those clients, too. Use variable "nokeepalive" for this.
# Similarly, one has to force some clients to use HTTP/1.0 to workaround
# their broken HTTP/1.1 implementation. Use variables "downgrade-1.0" and
# "force-response-1.0" for this.
SetEnvIf User-Agent ".*MSIE.*" \
nokeepalive ssl-unclean-shutdown \
downgrade-1.0 force-response-1.0
# Per-Server Logging:
# The home of a custom SSL log file. Use this when you want a
# compact non-error SSL logfile on a virtual host basis.
CustomLog logs/ssl_request_log \
"%t %h %{SSL_PROTOCOL}x %{SSL_CIPHER}x \"%r\" %b"
</VirtualHost>

View file

@ -1,2 +0,0 @@
---
extra_enablerepos: ''

View file

@ -1,16 +0,0 @@
<network connections='1'>
<name>default</name>
<uuid>45d7b450-f5ef-4c8c-8eee-f80885076fd1</uuid>
<forward mode='nat'>
<nat>
<port start='1024' end='65535'/>
</nat>
</forward>
<bridge name='virbr0' stp='on' delay='0'/>
<mac address='52:54:00:9a:fc:16'/>
<ip address='192.168.0.1' netmask='255.255.0.0'>
<dhcp>
<range start='192.168.0.3' end='192.168.255.254'/>
</dhcp>
</ip>
</network>

View file

@ -1 +0,0 @@
options kvm_intel nested=1

View file

@ -1,12 +0,0 @@
module service-virt-transition 1.0;
require {
type unconfined_service_t;
type svirt_t;
class process transition;
}
#============= unconfined_service_t ==============
allow unconfined_service_t svirt_t:process transition;

View file

@ -1,8 +0,0 @@
# Configure tmpfiles for Taskotron use. Read 'man 5 tmpfiles.d' to understand
# how it works exactly.
#Type Path Mode UID GID Age Argument
# Make sure /var/tmp/taskotron is ignored and never removed. Contents can be.
X /var/tmp/taskotron - - - 1w -

View file

@ -1,80 +0,0 @@
## A Taskotron configuration file representing available YUM repositories inside
## Fedora infrastructure.
## This file is in a ConfigParser syntax, very similar to INI syntax known from
## Windows.
## There is a guide describing how to update this file after important Fedora
## release events, please see:
## https://fedoraproject.org/wiki/How_to_update_yumrepoinfo.conf_in_Taskotron
[DEFAULT]
# URLs to yum repos
# Please note: 'download.fedoraproject.org' is a redirect that points you to a
# random mirror every time. For faster and more reliable operation, pick a close
# mirror from https://mirrors.fedoraproject.org and use it in 'baseurl' and 'baseurl_altarch'.
baseurl = http://download.fedoraproject.org/pub/fedora/linux
baseurl_altarch = http://download.fedoraproject.org/pub/fedora-secondary
goldurl = %(baseurl)s/releases/%(path)s/Everything/%(arch)s/os
updatesurl = %(baseurl)s/updates/%(path)s/Everything/%(arch)s
rawhideurl = %(baseurl)s/%(path)s/Everything/%(arch)s/os
# list of primary and alternate architectures. That decides whether baseurl or baseurl_altarch
# is used when determining path.
primary_arches = armhfp, x86_64
alternate_arches = aarch64, i386, ppc64, ppc64le, s390x
# a direct parent of this repo, essential for operations (dependencies)
parent =
# koji tag matching the repo
tag =
# release_status can be one of: obsolete, stable, branched or rawhide
# for non-top-parent repos this is an empty string
release_status =
# Rawhide
[rawhide]
url = %(rawhideurl)s
path = development/rawhide
tag = f29
release_status = rawhide
# Fedora 28
[f28]
url = %(goldurl)s
path = 28
tag = f28
release_status = stable
[f28-updates]
url = %(updatesurl)s
path = 28
parent = f28
tag = f28-updates
[f28-updates-testing]
url = %(updatesurl)s
path = testing/28
parent = f28-updates
tag = f28-updates-testing
# Fedora 27
[f27]
url = %(goldurl)s
path = 27
tag = f27
release_status = stable
[f27-updates]
url = %(updatesurl)s
path = 27
parent = f27
updatesurl = %(baseurl)s/updates/%(path)s/%(arch)s
tag = f27-updates
[f27-updates-testing]
url = %(updatesurl)s
path = testing/27
parent = f27-updates
updatesurl = %(baseurl)s/updates/%(path)s/%(arch)s
tag = f27-updates-testing

View file

@ -1,125 +0,0 @@
---
- name: install tmpfiles.d Taskotron config
copy: src=tmpfiles-taskotron.conf dest=/etc/tmpfiles.d/taskotron.conf owner=root group=root mode=0600
when: deployment_type != 'local'
- name: ensure packages required for libtaskotron are installed
dnf: name={{ item }} state=present
with_items:
- PyYAML
- libtaskotron
when: deployment_type == 'local'
- name: ensure packages required for libtaskotron are installed
dnf: name={{ item }} state=present
with_items:
- PyYAML
- libtaskotron
- testcloud
- selinux-policy-devel
when: deployment_type in ['dev', 'stg', 'prod']
# see what happens when we don't have these
#- name: ensure packages required for taskotron tasks are installed
# dnf: name={{ item }} state=present
# with_items:
# - python-solv
# - python-librepo
# - rpmlint
# when: ansible_cmdline.ostree is not defined
- name: generate taskotron.yaml config file
template: src={{ item }} dest=/etc/taskotron/taskotron.yaml owner=root group=root mode=0644
with_first_found:
- taskotron.yaml.j2.{{ deployment_type }}
- taskotron.yaml.j2
- name: generate namespaces.yaml config file
template: src={{ item }} dest=/etc/taskotron/namespaces.yaml owner=root group=root mode=0644
with_first_found:
- namespaces.yaml.j2.{{ deployment_type }}
- namespaces.yaml.j2
- name: generate testcloud config file
template: src={{ item }} dest=/etc/testcloud/settings.py owner=root group=root mode=0644
with_first_found:
- settings.py.testcloud.j2.{{ deployment_type }}
- settings.py.testcloud.j2
- name: upload yumrepoinfo.conf
copy: src={{ item }} dest=/etc/taskotron/yumrepoinfo.conf owner=root group=root mode=0644
with_first_found:
- yumrepoinfo.conf.{{ deployment_type }}
- yumrepoinfo.conf
- name: check that baseurl= line exists in yumrepoinfo.conf
command: grep -q '^baseurl =' /etc/taskotron/yumrepoinfo.conf
changed_when: False
- name: check that baseurl_altarch= line exists in yumrepoinfo.conf
command: grep -q '^baseurl_altarch =' /etc/taskotron/yumrepoinfo.conf
changed_when: False
- name: set infra repo for baseurl in yumrepoinfo.conf
lineinfile:
path: /etc/taskotron/yumrepoinfo.conf
regexp: '^baseurl ='
line: 'baseurl = http://infrastructure.fedoraproject.org/pub/fedora/linux'
- name: set infra repo for baseurl_altarch in yumrepoinfo.conf
lineinfile:
path: /etc/taskotron/yumrepoinfo.conf
regexp: '^baseurl_altarch ='
line: 'baseurl_altarch = http://infrastructure.fedoraproject.org/pub/fedora-secondary'
- name: make sure there's no download.fp.o leftover in yumrepoinfo.conf
command: grep -qE '^[^#].*/download.fedoraproject.org/' /etc/taskotron/yumrepoinfo.conf
changed_when: False
register: result
failed_when: result.rc != 1
- name: create /var/log/taskotron for task logfiles
file: path=/var/log/taskotron state=directory owner={{ slave_user }} group={{ slave_user }} mode=1755
when: deployment_type == 'local'
- name: ensure buildslaves group exists
group: name={{ slaves_group }}
when: deployment_type in ['dev', 'stg', 'prod']
- name: create directory for disposable clients images
file: path={{ imagesdir }} state=directory owner=root group={{ slaves_group }} mode=1755
when: deployment_type in ['dev', 'stg', 'prod']
- name: create /var/log/taskotron for task logfiles
file: path=/var/log/taskotron state=directory owner=root group={{ slaves_group }} mode=1775
when: deployment_type in ['dev', 'stg', 'prod']
- name: upload custom selinux policy to allow buildslaves to use qemu-kvm
copy: src=service-virt-transition.te dest=/root/service-virt-transition.te owner=root group=root mode=0644
when: deployment_type in ['dev', 'stg', 'prod']
- name: compile selinux policy
command: chdir=/root/ creates=/root/service-virt-transition.pp make -f /usr/share/selinux/devel/Makefile
when: deployment_type in ['dev', 'stg', 'prod']
- name: load custom selinux policy for qemu-kvm from buildslaves
command: chdir=/root/ semodule -i /root/service-virt-transition.pp
when: deployment_type in ['dev', 'stg', 'prod']
- name: start and enable libvirt services
service: name={{ item }} state=started enabled=yes
with_items:
- libvirtd
- virtlogd
- name: copy custom libvirt network config
copy: src=default.xml dest=/etc/libvirt/qemu/networks/default.xml owner=root group=root mode=0600
when: deployment_type in ['dev', 'stg', 'prod']
- name: hotfix libtaskotron to set longer timeout for testcloud, this should be temporary fix though :/
replace: dest=/usr/lib/python3.7/site-packages/libtaskotron/ext/disposable/vm.py regexp=', timeout=60\):' replace=', timeout=120):' backup=yes
when: deployment_type in ['dev']
- name: setup nested virt on virthosts with nested=true variable
copy: src=kvm_intel.conf dest=/etc/modprobe.d/kvm_intel.conf
when: nested == true

View file

@ -1,25 +0,0 @@
# A list of namespaces that any task is allowed to post a result into
namespaces_safe:
- scratch
# A list of git repos that are allowed to post a result into a particular namespace
namespaces_whitelist:
dist:
# due to a bug in libtaskotron, list all repos instead of prefix
#- {{ grokmirror_user }}@{{ buildmaster }}:/var/lib/git/mirror/fedoraqa/
- {{ grokmirror_user }}@{{ buildmaster }}:/var/lib/git/mirror/fedoraqa/abicheck/
- {{ grokmirror_user }}@{{ buildmaster }}:/var/lib/git/mirror/fedoraqa/python-versions/
- {{ grokmirror_user }}@{{ buildmaster }}:/var/lib/git/mirror/fedoraqa/rpmdeplint/
- {{ grokmirror_user }}@{{ buildmaster }}:/var/lib/git/mirror/fedoraqa/rpmgrill/
- {{ grokmirror_user }}@{{ buildmaster }}:/var/lib/git/mirror/fedoraqa/rpmlint/
pkg:
# The directory used to be called rpms-checks
- git://pkgs.fedoraproject.org/rpms-checks/
# But it got renamed to test-rpms here
# https://pagure.io/fedora-infrastructure/issue/5570
- git://pkgs.fedoraproject.org/test-rpms/
# Also, modules are a thing
# https://pagure.io/fedora-infrastructure/issue/5571
- git://pkgs.fedoraproject.org/test-modules/
- git://pkgs.fedoraproject.org/test-docker/

View file

@ -1,73 +0,0 @@
# Commented out default values with details are displayed below. If you want
# to change the values, make sure this file is available in one of the three
# supported config locations:
# - conf/settings.py in the git checkout
# - ~/.config/testcloud/settings.py
# - /etc/testcloud/settings.py
#DOWNLOAD_PROGRESS = True
#LOG_FILE = None
## Directories for data and cached downloaded images ##
#DATA_DIR = "/var/lib/testcloud/"
#STORE_DIR = "/var/lib/testcloud/backingstores"
## Data for cloud-init ##
#PASSWORD = 'passw0rd'
#HOSTNAME = 'testcloud'
#META_DATA = """instance-id: iid-123456
#local-hostname: %s
#"""
## Read http://cloudinit.readthedocs.io/en/latest/topics/examples.html to see
## what options you can use here.
USER_DATA = """#cloud-config
users:
- default
- name: root
password: %s
chpasswd: { expire: False }
ssh-authorized-keys:
- {{ buildslave_pubkey }}
runcmd:
- [dnf, update, libtaskotron, python-resultsdb_api]
"""
#ATOMIC_USER_DATA = """#cloud-config
#password: %s
#chpasswd: { expire: False }
#ssh_pwauth: True
#runcmd:
# - [ sh, -c, 'echo -e "ROOT_SIZE=4G\nDATA_SIZE=10G" > /etc/sysconfig/docker-storage-setup']
#"""
## Extra cmdline args for the qemu invocation ##
## Customize as needed :)
#CMD_LINE_ARGS = []
# The timeout, in seconds, to wait for an instance to boot before
# failing the boot process. Setting this to 0 disables waiting and
# returns immediately after starting the boot process.
BOOT_TIMEOUT = 90
# ram size, in MiB
RAM = 6144
# Desired size, in GiB of instance disks. 0 leaves disk capacity
# identical to source image
DISK_SIZE = 40
# Number of retries when stopping of instance fails (host is busy)
#STOP_RETRIES = 3
# Waiting time between stop retries, in seconds
#STOP_RETRY_WAIT = 1
# Desired VM type: False = BIOS, True = UEFI
UEFI = True

View file

@ -1,218 +0,0 @@
## Main configuration file for Taskotron
## The file is in YAML syntax, read more about it at:
## http://en.wikipedia.org/wiki/Yaml
## libtaskotron docs live at:
## https://docs.qa.fedoraproject.org/libtaskotron/latest/
## ==== GENERAL section ====
## Config profile selection.
## There are two major config profiles in Taskotron - development and
## production.
## Development profile is used for developing libtaskotron, developing checks
## based on libtaskotron and local execution of these checks.
## Production profile is used for deploying Taskotron as a service on a
## server, periodically executing the checks and reporting results to relevant
## result consumers.
## The default profile is 'development'. If you want to switch to the
## 'production' profile, uncomment the following line.
## You can also switch a profile temporarily by using TASKOTRON_PROFILE=name
## environment variable, it has a higher priority. All other options set in this
## file still apply of course.
## [choices: production, development; default: development]
profile: production
## Task execution mode. The tasks can be executed locally (on the current
## machine) or remotely in a disposable virtual machine. Local execution is
## dangerous with destructive or untrusted tasks, remote execution requires
## some additional setup (see Taskotron documentation).
## Remote execution is done through libvirt, it creates a throwaway virtual
## machine from a specified disk image, executes the task and deletes the
## machine.
## Local execution is the default mode for development profile and remote
## execution for production profile.
## [choices: local, libvirt; default: local for development, libvirt for production]
{% if deployment_type in ['dev', 'stg', 'prod'] %}
runtask_mode: libvirt
{% elif deployment_type in ['local'] %}
runtask_mode: local
{% endif %}
## Supported machine architectures. This is mostly used by generic,
## arch-independent tasks to determine which arches to test and report against.
## You can still run an arch-specific task on any other arch using the command
## line.
#supported_arches: ['x86_64', 'armhfp']
## ==== SCHEDULING section ====
## This section holds options related to the scheduling and execution system,
## currently we use Buildbot
## name of step in buildbot that executes the task
buildbot_task_step: 'runtask'
## ==== REPORTING section ====
## This section controls which result reports you want to send after the test
## execution is complete.
## Whether to send test results to the configured ResultsDB server.
## [default: True for production, False for development]
report_to_resultsdb: True
## ==== RESOURCES section ====
## This section specifies access details to various external services.
##
## Note: Try to keep custom URL addresses without a trailing slash. Otherwise
## the rendered URLs might end up containing double slashes, which some
## application servers don't handle gracefully (e.g. Flask's internal app
## server werkzeug).
## URL of Koji instance used for querying about new builds
koji_url: {{ kojihub_url }}
## URL of repository of all the RPM packages built in Koji
pkg_url: {{ kojipkg_url }}
## Whether to use staging Bodhi instance instead of production (the
## default one).
#bodhi_staging: False
## URL of ResultsDB server API interface, which can store all test results.
## Please make sure the URL doesn't have a trailing slash.
resultsdb_server: {{ resultsdb_server }}
## URL of ResultsDB frontend, which displays results from ResultsDB.
## Please make sure the URL doesn't have a trailing slash.
resultsdb_frontend: {{ resultsdb_external_url }}
## URL of ExecDB server API interface, which tracks task execution status.
## Please make sure the URL doesn't have a trailing slash.
execdb_server: {{ execdb_external_url }}
{% if deployment_type in ['dev'] %}
## URL of VAULT server API interface, which stores secrets.
## Please make sure the URL doesn't have a trailing slash.
vault_server: {{ vault_api_server }}
## Use vault to grab secrets
vault_enabled: True
## Username for vault server
vault_username: {{ vault_api_username }}
## Password for vault server
vault_password: {{ vault_api_password }}
{% endif %}
## URL of taskotron buildmaster, to construct log URLs from.
## Please make sure the URL doesn't have a trailing slash.
taskotron_master: {{ taskotron_master }}
## URL of artifacts base directory, to construct artifacts URLs from. The final
## URL will be $artifacts_baseurl/<uuid>.
## Please make sure the URL doesn't have a trailing slash.
artifacts_baseurl: {{ artifacts_base_url }}
## Whether to cache downloaded files to speed up subsequent downloads. If True,
## files will be downloaded to a common directory specified by "cachedir". At
## the moment, Taskotron only supports Koji RPM downloads to be cached.
## [default: False for production, True for development]
#download_cache_enabled: False
## ==== PATHS section ====
## Location of various pieces of the project.
## The location of log files for Taskotron
#logdir: /var/log/taskotron
## The location of task files (git checkout) when running in disposable clients mode
#client_taskdir: /var/tmp/taskotron/taskdir
## The location of temporary files for Taskotron
#tmpdir: /var/tmp/taskotron
## The location of artifacts produced by checks
#artifactsdir: /var/lib/taskotron/artifacts
## The location of cached files downloaded by Taskotron
#cachedir: /var/cache/taskotron
## The location of images for disposable clients
## File names need to adhere to the naming standard of:
## YYMMDD_HHMM-fedora-RELEASE-FLAVOR-ARCH.(qcow2|raw|img)
## For example:
## 160301_1030-fedora-25-taskotron_cloud-x86_64.img
## Variables disposable_(release|flavor|arch) set in this config file
## define what kind of image is looked for.
## The newest (by YYMMDD_HHMM) image of the respective R-F-A is used.
imagesdir: {{ imagesdir }}
## If set to False, latest image from imagesdir will be used instead of one at imageurl
## [default: True]
force_imageurl: False
## Url of an image to download and use for disposable client, if force_imageurl was set
#imageurl: http://download.fedoraproject.org/pub/fedora/linux/releases/27/CloudImages/x86_64/images/Fedora-Cloud-Base-27-1.6.x86_64.qcow2
## Default distro/release/flavor/arch for the disposable images discovery
#default_disposable_distro: fedora
default_disposable_release: '30'
#default_disposable_flavor: taskotron_cloud
#default_disposable_arch: x86_64
## Number of retries when disposable client fails to boot within timeout
#spawn_vm_retries: 3
## Additional DNF repos to set up on the minion.
## You can use a special syntax 'copr:reponame' to enable Fedora COPR repo on
## minion by running 'dnf copr enable reponame' on it.
minion_repos:
- https://fedorapeople.org/groups/qa/taskotron-repos/taskotron-production-override/taskotron-production-override.repo
- https://infrastructure.fedoraproject.org/cgit/ansible.git/plain/files/common/fedora-infra-tags.repo
{% if deployment_type == 'stg' %}
- https://fedorapeople.org/groups/qa/taskotron-repos/taskotron-stg-override/taskotron-stg-override.repo
- https://infrastructure.fedoraproject.org/cgit/ansible.git/plain/files/common/fedora-infra-tags-stg.repo
{% endif %}
{% if deployment_type == 'dev' %}
- copr:kparal/taskotron-dev
{% endif %}
## If one or more minions repos fail to be added (e.g. not accessible), should
## we abort the execution or ignore the error and continue?
## [default: False]
minion_repos_ignore_errors: True
## ==== LOGGING section ====
## This section contains configuration of logging.
## Configuration of logging level. Here can be configured which messages
## will be logged. You can specify different level for logging to standard
## output (option log_level_stream) and logging to file (log_level_file).
## Possible values can be found here:
## https://docs.python.org/2.7/library/logging.html#logging-levels
{% if deployment_type == 'dev' %}
log_level_stream: DEBUG
{% elif deployment_type in ['stg', 'prod', 'local'] %}
log_level_stream: INFO
{% endif %}
#log_level_file: DEBUG
## If True, logging to file will be enabled.
## [default: True for production, False for development]
#log_file_enabled: True
## ==== SECRETS section ====
## All login credentials and other secrets are here. If you add some secret
## here, make sure you make this file readable just for the right user accounts.
## SSH private key location. Used for remote task execution, when connecting to
## VMs and remote machines. If your systems are not configured for automatic
## connection (private keys located in SSH's standard search path), specify a
## path to the private key here. An empty value (the default) means to rely on
## the standard search path only.
#ssh_privkey: /path/to/private.key

View file

@ -1,5 +0,0 @@
---
jquery_url: 'https://apps.fedoraproject.org/fedmenu/js/jquery-1.11.2.min.js'
fedmenu_url: 'https://apps.fedoraproject.org/fedmenu/js/fedmenu.js'
fedmenu_data_url: 'https://apps.fedoraproject.org/js/data.js'
extra_enablerepos: ''

View file

@ -1,206 +0,0 @@
@font-face {
font-family: "Overpass";
src: local("Overpass Regular"),
local(Overpass),
local(Overpass-Regular),
url(../fonts/Overpass-Regular.ttf);
font-style: normal;
font-weight: 400;
}
@font-face {
font-family: "Overpass";
src: local("Overpass Bold"),
local(Overpass-Bold),
local(Overpass-Bold),
url(../fonts/Overpass-Bold.ttf);
font-style: normal;
font-weight: 700;
}
@font-face {
font-family: "Overpass";
src: local("Overpass Light"),
local(Overpass-Light),
url(../fonts/Overpass-Light.ttf);
font-style: normal;
font-weight: 300;
}
body {
font-family: "Overpass";
background-color: #fefefe;
font-size: 16pt
}
.bg {
background-image: linear-gradient(#e8e8e8, #fefefe);
}
.fullpage {
height: 100vh;
display: flex;
justify-content: center;
align-items: center;
}
.navbar {
margin-bottom: 0px;
border: none;
background: none;
}
.navbar-dark {
background-color: #535452;
}
.navbar-nav > li > a {
text-transform: uppercase;
color: #e8e8e8 !important;
font-size: 10pt;
}
.navbar-nav > li > a:hover {
color: #639a81 !important;
}
.navbar-brand {
cursor: pointer;
}
.dropdown-menu {
background-color: #535452;
text-transform: uppercase;
}
.dropdown-menu a {
color: #e8e8e8 !important;
}
.dropdown-menu a:hover {
color: #535452 !important;
}
.production {
text-transform: uppercase;
color: #639a81 !important;
font-size: 10pt;
}
.development {
text-transform: uppercase;
color: #cc6168 !important;
font-size: 10pt;
}
.staging {
text-transform: uppercase;
color: #cc8f60 !important;
font-size: 10pt;
}
.jumbotron {
background: none;
}
.logo {
width: 1000px;
}
.punchline {
font-size: 34pt !important;
font-weight: 700 !important;
text-transform: uppercase;
color: #535452;
}
.vfill-40 {
margin: 0px;
padding: 0px;
height: 40px;
}
.vfill-80 {
margin: 0px;
padding: 0px;
height: 80px;
}
.vfill-120 {
margin: 0px;
padding: 0px;
height: 120px;
}
.btn-primary {
background-color: rgba(0,0,0,0);
border-color: #639a81 !important;
border-width: 2px;
color: #639a81;
margin: 0px 5px 5px 10px;
padding: 11px 16px 8px 16px;
border-radius: 23px;
text-transform: uppercase;
font-size: 12pt;
font-weight: 600 !important;
}
.btn-primary:hover {
background-color: #639a81;
border-color: #639a81;
color: white;
}
.hvr-fade {
display: inline-block;
vertical-align: middle;
-webkit-transform: translateZ(0);
transform: translateZ(0);
box-shadow: 0 0 1px rgba(0, 0, 0, 0);
-webkit-backface-visibility: hidden;
backface-visibility: hidden;
-moz-osx-font-smoothing: grayscale;
overflow: hidden;
-webkit-transition-duration: 0.3s;
transition-duration: 0.3s;
-webkit-transition-property: color, background-color;
transition-property: color, background-color;
}
.hvr-fade:hover, .hvr-fade:focus, .hvr-fade:active {
background-color: #639a81 !important;
color: white;
}
.vertical-center {
display: flex;
align-items: center;
}
.featurette {
font-size: 72pt;
margin-top: 0.1em;
line-height: 0px;
}
hr {
margin-top: 40px;
margin-bottom: 40px;
}
h2{
font-size: 18pt !important;
text-transform: uppercase;
font-weight: 700;
}
.text-muted {
font-weight: 300;
}
footer {
background: linear-gradient(#fefefe, #e8e8e8);
}
small {
font-size: 9pt;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 470 B

View file

@ -1,66 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="26.568575mm"
height="33.02mm"
viewBox="0 0 94.14062 117"
id="svg4510"
version="1.1"
inkscape:version="0.91 r13725"
sodipodi:docname="taskotron-box.svg">
<defs
id="defs4512" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.979899"
inkscape:cx="99.735278"
inkscape:cy="-24.881749"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1869"
inkscape:window-height="1016"
inkscape:window-x="1971"
inkscape:window-y="277"
inkscape:window-maximized="1" />
<metadata
id="metadata4515">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-358.64397,-196.71935)">
<path
style="display:inline;opacity:1;fill:#639a81;fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
d="m 405.71428,196.71935 -46.57031,26.8867 47.26367,26.45901 0,4.4062 -47.76367,-26.74021 0,58.8125 47.07031,27.1758 47.07031,-27.1758 0,-18.336 -47.07031,27.1758 -24.57813,-14.1914 0,-16.4785 24.57813,14.1914 47.07031,-27.1758 0,-27.834 -47.07031,-27.1757 z"
id="path5251"
inkscape:connector-curvature="0" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 2.3 KiB

View file

@ -1,131 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="152.8407mm"
height="33.020004mm"
viewBox="0 0 541.56153 117.00001"
id="svg3548"
version="1.1"
inkscape:version="0.91 r13725"
sodipodi:docname="taskotron-dev.svg">
<defs
id="defs3550" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.4142136"
inkscape:cx="164.34191"
inkscape:cy="137.69462"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1869"
inkscape:window-height="1016"
inkscape:window-x="1971"
inkscape:window-y="277"
inkscape:window-maximized="1" />
<metadata
id="metadata3553">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title />
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-107.01628,-333.22243)">
<g
id="g5313"
transform="translate(-144.75325,-51.724965)">
<path
d="m 574.31747,465.93712 c 0,-4.6652 -0.022,-10.1319 -0.022,-10.1319 l 13.2483,0 0,-24.6367 -20.5845,-0.043 c 0,0 0,34.0123 -0.011,34.8116 -0.117,-0.019 -4.8333,0 -4.8333,0 l -5.1537,-5.1537 0,-34.5838 5.2826,-5.2631 29.8084,0.021 5.0859,5.1537 0,39.7375 c -9.0794,0.1723 -17.0225,0.088 -22.8202,0.088 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="path4798"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccccccccccccccc" />
<g
style="fill:#535452;fill-opacity:1"
id="g4800"
transform="translate(24.19388,-514.91484)">
<path
sodipodi:nodetypes="ccccccc"
inkscape:connector-curvature="0"
d="m 517.42038,980.86302 -22.7564,-22.63636 22.7564,-22.36364 12.22779,0 -22.43625,22.36364 22.43625,22.63636 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:0.77410464"
id="path4802" />
<rect
y="935.86304"
x="490.38315"
height="45"
width="10.232907"
id="rect4804"
style="opacity:1;fill:#535452;fill-opacity:1;stroke:none;stroke-width:4;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:4;stroke-opacity:0.77410464" />
</g>
<path
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 653.48878,420.94862 0,44.9746 10.16999,0 -0.01,-34.9258 20.1621,-0.01 c 0,0 0.044,4.433 0.019,7.334 l -14.69141,0 0,10.0449 -0.02,0 17.56641,17.5801 12.2285,0 -17.3047,-17.5625 12.25,0 0,-22.3223 -5.04691,-5.1152 -35.32619,0 z"
id="path4806"
inkscape:connector-curvature="0" />
<path
inkscape:connector-curvature="0"
id="path4808"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 793.33107,465.94842 -10.0046,0 -20.2029,-27.4325 0,27.4325 -10.0047,0 0,-45 10.0047,0 20.2029,27.6351 0,-27.6351 10.0046,0 0,45 0,0 z" />
<path
inkscape:connector-curvature="0"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:-2.09637547px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 369.16458,420.94842 0,10.1816 15.3399,0 0,34.8184 10.1816,0 0,-34.8184 15.3399,0 0,-10.1816 -40.8614,0 z"
id="path4810" />
<path
inkscape:connector-curvature="0"
id="path4812"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 505.88197,460.73002 -5.1506,5.2184 -35.1732,0 0,-10.1657 30.2937,0 0,-7.2515 -30.2937,0 0,-22.3645 5.15061,-5.2183 35.17319,0 0,10.0979 -30.2937,0 0,7.3192 30.2937,0 0,22.3645 0,0 z" />
<path
d="m 456.82298,426.25422 0,39.6941 -35.1806,0 -5.1434,-5.2208 0,-22.3602 22.88889,0.016 0,10.1489 -12.86439,-0.01 0.015,7.2564 20.2521,-0.012 0.001,-24.7152 -30.2929,-0.012 0,-10.0943 35.17441,0 5.14959,5.2201 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:0px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:0.74159491px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="path4814"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cccccccccccccccc" />
<path
sodipodi:nodetypes="ccccccccccccccc"
inkscape:connector-curvature="0"
id="path4816"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 726.99267,465.94502 c 0,-4.6652 0.022,-10.1319 0.022,-10.1319 l -13.24829,0 0,-24.6367 20.5846,-0.043 c 0,0 0,34.0124 0.011,34.8116 0.11699,-0.019 4.8333,0 4.8333,0 l 5.15369,-5.1536 0,-34.5839 -5.24749,-5.2591 -29.8435,0.017 -5.0859,5.1536 0,39.7376 c 9.0793,0.1722 17.0225,0.088 22.8201,0.088 z" />
<path
id="path4818"
d="m 605.50407,420.94842 0,10.1816 15.3399,0 0,34.8184 10.1816,0 0,-34.8184 15.3398,0 0,-10.1816 -40.8613,0 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:-2.09637547px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
inkscape:connector-curvature="0" />
</g>
<path
style="display:inline;opacity:1;fill:#cc6168;fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
d="m 154.08659,333.22244 -46.57031,26.8867 47.26367,26.45901 0,4.4062 -47.76367,-26.74021 0,58.8125 47.07031,27.1758 47.07031,-27.1758 0,-18.336 -47.07031,27.1758 -24.57813,-14.1914 0,-16.4785 24.57813,14.1914 47.07031,-27.1758 0,-27.834 -47.07031,-27.1757 z"
id="path5251"
inkscape:connector-curvature="0" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 8.6 KiB

View file

@ -1,131 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="152.8407mm"
height="33.020004mm"
viewBox="0 0 541.56153 117.00001"
id="svg3548"
version="1.1"
inkscape:version="0.91 r13725"
sodipodi:docname="taskotron.svg">
<defs
id="defs3550" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1.979899"
inkscape:cx="254.20628"
inkscape:cy="13.732789"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1869"
inkscape:window-height="1016"
inkscape:window-x="1971"
inkscape:window-y="277"
inkscape:window-maximized="1" />
<metadata
id="metadata3553">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-107.01628,-333.22243)">
<g
id="g5313"
transform="translate(-144.75325,-51.724965)">
<path
d="m 574.31747,465.93712 c 0,-4.6652 -0.022,-10.1319 -0.022,-10.1319 l 13.2483,0 0,-24.6367 -20.5845,-0.043 c 0,0 0,34.0123 -0.011,34.8116 -0.117,-0.019 -4.8333,0 -4.8333,0 l -5.1537,-5.1537 0,-34.5838 5.2826,-5.2631 29.8084,0.021 5.0859,5.1537 0,39.7375 c -9.0794,0.1723 -17.0225,0.088 -22.8202,0.088 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="path4798"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccccccccccccccc" />
<g
style="fill:#535452;fill-opacity:1"
id="g4800"
transform="translate(24.19388,-514.91484)">
<path
sodipodi:nodetypes="ccccccc"
inkscape:connector-curvature="0"
d="m 517.42038,980.86302 -22.7564,-22.63636 22.7564,-22.36364 12.22779,0 -22.43625,22.36364 22.43625,22.63636 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:0.77410464"
id="path4802" />
<rect
y="935.86304"
x="490.38315"
height="45"
width="10.232907"
id="rect4804"
style="opacity:1;fill:#535452;fill-opacity:1;stroke:none;stroke-width:4;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:4;stroke-opacity:0.77410464" />
</g>
<path
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 653.48878,420.94862 0,44.9746 10.16999,0 -0.01,-34.9258 20.1621,-0.01 c 0,0 0.044,4.433 0.019,7.334 l -14.69141,0 0,10.0449 -0.02,0 17.56641,17.5801 12.2285,0 -17.3047,-17.5625 12.25,0 0,-22.3223 -5.04691,-5.1152 -35.32619,0 z"
id="path4806"
inkscape:connector-curvature="0" />
<path
inkscape:connector-curvature="0"
id="path4808"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 793.33107,465.94842 -10.0046,0 -20.2029,-27.4325 0,27.4325 -10.0047,0 0,-45 10.0047,0 20.2029,27.6351 0,-27.6351 10.0046,0 0,45 0,0 z" />
<path
inkscape:connector-curvature="0"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:-2.09637547px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 369.16458,420.94842 0,10.1816 15.3399,0 0,34.8184 10.1816,0 0,-34.8184 15.3399,0 0,-10.1816 -40.8614,0 z"
id="path4810" />
<path
inkscape:connector-curvature="0"
id="path4812"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 505.88197,460.73002 -5.1506,5.2184 -35.1732,0 0,-10.1657 30.2937,0 0,-7.2515 -30.2937,0 0,-22.3645 5.15061,-5.2183 35.17319,0 0,10.0979 -30.2937,0 0,7.3192 30.2937,0 0,22.3645 0,0 z" />
<path
d="m 456.82298,426.25422 0,39.6941 -35.1806,0 -5.1434,-5.2208 0,-22.3602 22.88889,0.016 0,10.1489 -12.86439,-0.01 0.015,7.2564 20.2521,-0.012 0.001,-24.7152 -30.2929,-0.012 0,-10.0943 35.17441,0 5.14959,5.2201 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:0px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:0.74159491px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="path4814"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cccccccccccccccc" />
<path
sodipodi:nodetypes="ccccccccccccccc"
inkscape:connector-curvature="0"
id="path4816"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 726.99267,465.94502 c 0,-4.6652 0.022,-10.1319 0.022,-10.1319 l -13.24829,0 0,-24.6367 20.5846,-0.043 c 0,0 0,34.0124 0.011,34.8116 0.11699,-0.019 4.8333,0 4.8333,0 l 5.15369,-5.1536 0,-34.5839 -5.24749,-5.2591 -29.8435,0.017 -5.0859,5.1536 0,39.7376 c 9.0793,0.1722 17.0225,0.088 22.8201,0.088 z" />
<path
id="path4818"
d="m 605.50407,420.94842 0,10.1816 15.3399,0 0,34.8184 10.1816,0 0,-34.8184 15.3398,0 0,-10.1816 -40.8613,0 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:-2.09637547px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
inkscape:connector-curvature="0" />
</g>
<path
style="display:inline;opacity:1;fill:#639a81;fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
d="m 154.08659,333.22244 -46.57031,26.8867 47.26367,26.45901 0,4.4062 -47.76367,-26.74021 0,58.8125 47.07031,27.1758 47.07031,-27.1758 0,-18.336 -47.07031,27.1758 -24.57813,-14.1914 0,-16.4785 24.57813,14.1914 47.07031,-27.1758 0,-27.834 -47.07031,-27.1757 z"
id="path5251"
inkscape:connector-curvature="0" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 8.6 KiB

View file

@ -1,131 +0,0 @@
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<!-- Created with Inkscape (http://www.inkscape.org/) -->
<svg
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:cc="http://creativecommons.org/ns#"
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:svg="http://www.w3.org/2000/svg"
xmlns="http://www.w3.org/2000/svg"
xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd"
xmlns:inkscape="http://www.inkscape.org/namespaces/inkscape"
width="152.8407mm"
height="33.020004mm"
viewBox="0 0 541.56153 117.00001"
id="svg3548"
version="1.1"
inkscape:version="0.91 r13725"
sodipodi:docname="taskotron-stg.svg">
<defs
id="defs3550" />
<sodipodi:namedview
id="base"
pagecolor="#ffffff"
bordercolor="#666666"
borderopacity="1.0"
inkscape:pageopacity="0.0"
inkscape:pageshadow="2"
inkscape:zoom="1"
inkscape:cx="412.10946"
inkscape:cy="-3.3890212"
inkscape:document-units="px"
inkscape:current-layer="layer1"
showgrid="false"
fit-margin-top="0"
fit-margin-left="0"
fit-margin-right="0"
fit-margin-bottom="0"
inkscape:window-width="1869"
inkscape:window-height="1016"
inkscape:window-x="1971"
inkscape:window-y="277"
inkscape:window-maximized="1" />
<metadata
id="metadata3553">
<rdf:RDF>
<cc:Work
rdf:about="">
<dc:format>image/svg+xml</dc:format>
<dc:type
rdf:resource="http://purl.org/dc/dcmitype/StillImage" />
<dc:title></dc:title>
</cc:Work>
</rdf:RDF>
</metadata>
<g
inkscape:label="Layer 1"
inkscape:groupmode="layer"
id="layer1"
transform="translate(-107.01628,-333.22243)">
<g
id="g5313"
transform="translate(-144.75325,-51.724965)">
<path
d="m 574.31747,465.93712 c 0,-4.6652 -0.022,-10.1319 -0.022,-10.1319 l 13.2483,0 0,-24.6367 -20.5845,-0.043 c 0,0 0,34.0123 -0.011,34.8116 -0.117,-0.019 -4.8333,0 -4.8333,0 l -5.1537,-5.1537 0,-34.5838 5.2826,-5.2631 29.8084,0.021 5.0859,5.1537 0,39.7375 c -9.0794,0.1723 -17.0225,0.088 -22.8202,0.088 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="path4798"
inkscape:connector-curvature="0"
sodipodi:nodetypes="ccccccccccccccc" />
<g
style="fill:#535452;fill-opacity:1"
id="g4800"
transform="translate(24.19388,-514.91484)">
<path
sodipodi:nodetypes="ccccccc"
inkscape:connector-curvature="0"
d="m 517.42038,980.86302 -22.7564,-22.63636 22.7564,-22.36364 12.22779,0 -22.43625,22.36364 22.43625,22.63636 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:0.77410464"
id="path4802" />
<rect
y="935.86304"
x="490.38315"
height="45"
width="10.232907"
id="rect4804"
style="opacity:1;fill:#535452;fill-opacity:1;stroke:none;stroke-width:4;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:4;stroke-opacity:0.77410464" />
</g>
<path
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 653.48878,420.94862 0,44.9746 10.16999,0 -0.01,-34.9258 20.1621,-0.01 c 0,0 0.044,4.433 0.019,7.334 l -14.69141,0 0,10.0449 -0.02,0 17.56641,17.5801 12.2285,0 -17.3047,-17.5625 12.25,0 0,-22.3223 -5.04691,-5.1152 -35.32619,0 z"
id="path4806"
inkscape:connector-curvature="0" />
<path
inkscape:connector-curvature="0"
id="path4808"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 793.33107,465.94842 -10.0046,0 -20.2029,-27.4325 0,27.4325 -10.0047,0 0,-45 10.0047,0 20.2029,27.6351 0,-27.6351 10.0046,0 0,45 0,0 z" />
<path
inkscape:connector-curvature="0"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:-2.09637547px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 369.16458,420.94842 0,10.1816 15.3399,0 0,34.8184 10.1816,0 0,-34.8184 15.3399,0 0,-10.1816 -40.8614,0 z"
id="path4810" />
<path
inkscape:connector-curvature="0"
id="path4812"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 505.88197,460.73002 -5.1506,5.2184 -35.1732,0 0,-10.1657 30.2937,0 0,-7.2515 -30.2937,0 0,-22.3645 5.15061,-5.2183 35.17319,0 0,10.0979 -30.2937,0 0,7.3192 30.2937,0 0,22.3645 0,0 z" />
<path
d="m 456.82298,426.25422 0,39.6941 -35.1806,0 -5.1434,-5.2208 0,-22.3602 22.88889,0.016 0,10.1489 -12.86439,-0.01 0.015,7.2564 20.2521,-0.012 0.001,-24.7152 -30.2929,-0.012 0,-10.0943 35.17441,0 5.14959,5.2201 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:0px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:0.74159491px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
id="path4814"
inkscape:connector-curvature="0"
sodipodi:nodetypes="cccccccccccccccc" />
<path
sodipodi:nodetypes="ccccccccccccccc"
inkscape:connector-curvature="0"
id="path4816"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:1.63800144px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
d="m 726.99267,465.94502 c 0,-4.6652 0.022,-10.1319 0.022,-10.1319 l -13.24829,0 0,-24.6367 20.5846,-0.043 c 0,0 0,34.0124 0.011,34.8116 0.11699,-0.019 4.8333,0 4.8333,0 l 5.15369,-5.1536 0,-34.5839 -5.24749,-5.2591 -29.8435,0.017 -5.0859,5.1536 0,39.7376 c 9.0793,0.1722 17.0225,0.088 22.8201,0.088 z" />
<path
id="path4818"
d="m 605.50407,420.94842 0,10.1816 15.3399,0 0,34.8184 10.1816,0 0,-34.8184 15.3398,0 0,-10.1816 -40.8613,0 z"
style="font-style:normal;font-variant:normal;font-weight:normal;font-stretch:normal;font-size:67.56756592px;line-height:125%;font-family:loaded;-inkscape-font-specification:loaded;letter-spacing:-2.09637547px;word-spacing:0px;fill:#535452;fill-opacity:1;stroke:none;stroke-width:1px;stroke-linecap:butt;stroke-linejoin:miter;stroke-opacity:1"
inkscape:connector-curvature="0" />
</g>
<path
style="display:inline;opacity:1;fill:#cc8f60;fill-opacity:1;stroke:none;stroke-width:2;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:4;stroke-dasharray:none;stroke-dashoffset:0;stroke-opacity:1"
d="m 154.08659,333.22244 -46.57031,26.8867 47.26367,26.45901 0,4.4062 -47.76367,-26.74021 0,58.8125 47.07031,27.1758 47.07031,-27.1758 0,-18.336 -47.07031,27.1758 -24.57813,-14.1914 0,-16.4785 24.57813,14.1914 47.07031,-27.1758 0,-27.834 -47.07031,-27.1757 z"
id="path5251"
inkscape:connector-curvature="0" />
</g>
</svg>

Before

Width:  |  Height:  |  Size: 8.6 KiB

View file

@ -1,39 +0,0 @@
---
- name: ensure packages required for taskotron-frontend are installed (yum)
package:
state: present
name:
- httpd
when: ansible_distribution_major_version|int <= 7 and ansible_distribution == 'RedHat'
- name: ensure packages required for taskotron-frontend are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- httpd
when: ansible_distribution_major_version|int >= 29 and ansible_distribution == 'Fedora' and ansible_cmdline.ostree is not defined
- name: ensure packages required for taskotron-frontend are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- httpd
when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
- name: create dirs for static files
file: path=/var/www/html/{{ item }} state=directory owner=apache group=apache mode=1755
with_items:
- 'static/'
- 'static/img'
- 'static/css'
- 'static/fonts'
- name: copy static bits
copy: src=static/ dest=/var/www/html/static owner=apache group=apache mode=0644
#- name: copy fonts
#copy: src={{ bigfiles }}/fonts dest=/var/www/html/static/ owner=apache group=apache mode=0644
- name: generate landing page
template: src={{ item }} dest=/var/www/html/index.html owner=apache group=apache mode=0644
with_first_found:
- 'landingpage.html.j2.{{ deployment_type }}'
- 'landingpage.html.j2'

View file

@ -1,184 +0,0 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta http-equiv="X-UA-Compatible" content="IE=edge">
<meta name="viewport" content="width=device-width, initial-scale=1">
<!-- The above 3 meta tags *must* come first in the head; any other head content must come *after* these tags -->
<meta name="description" content="Landing page for the Fedora QA Taskotron instance.">
<meta name="author" content="Fedora QA">
<link href="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/css/bootstrap.min.css" rel="stylesheet">
<link rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.4.0/css/font-awesome.min.css">
<link rel="stylesheet" href="static/css/style.css">
<title>Taskotron {{ landingpage_title }}</title>
<link rel="icon" type="image/png" href="static/img/favicon.png">
</head>
<body>
<nav class="navbar navbar-default navbar-static-top navbar-dark">
<div class="container-fluid">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false" aria-controls="navbar">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<div class="dropdown">
<a class="navbar-brand {{landingpage_title|lower}} dropdown-toggle" id="dropdownMenu1" data-toggle="dropdown">
{{landingpage_title}} Instance
<span class="caret"></span>
</a>
<ul class="dropdown-menu" style="top: 30px;">
{% for instance in instances %}
<li><a href="{{instance.url}}">{{instance.name}} Instance</a></li>
{% endfor %}
</ul>
</div>
</div>
<div id="navbar" class="navbar-collapse collapse navbar-dark">
<ul class="nav navbar-nav navbar-right">
<li><a href="https://fedoraproject.org/wiki/Taskotron">Wiki</a></li>
<li><a href="https://pagure.io/taskotron">Issue Tracker</a></li>
<li><a href="https://fedoraproject.org/wiki/Taskotron/Tasks">Information about Tasks</a></li>
</ul>
</div>
</div>
</nav>
<div class="bg fullpage">
<div class="jumbotron text-center">
<div class="container">
<div class="vfill-40"></div>
<img src="static/img/taskotron-{{landingpage_instance}}.svg" class="img-responsive center-block logo">
<div class="vfill-120"></div>
<p class="punchline">automated task execution framework</p>
<div class="vfill-120"></div>
<p>
<a class="btn btn-lg btn-primary hvr-fade" href="/{{ resultsdb_fe_endpoint }}/results" role="button">Browse Task Results</a>
<a class="btn btn-lg btn-primary hvr-fade" href="/taskmaster/#/builders" role="button">Recently Executed Tasks</a>
<a class="btn btn-lg btn-primary hvr-fade" href="https://qa.fedoraproject.org/docs/libtaskotron/latest" role="button">Documentation</a>
</p>
</div>
</div>
</div>
<div class="container">
<div class="row vertical-center">
<div class="col-md-9">
<h2>Trivial local execution</h2><p class="text-muted">No need to replicate the production environment with all its servers and configurations, the check authors can easily run and develop their checks on their local machine with no unnecessary software setup hassle.</p>
</div>
<div class="col-md-3">
<div class="featurette text-right" style="color: #cc8f60;">
<i class="fa fa-desktop"></i></div>
</div>
</div>
<hr>
<div class="row vertical-center">
<div class="col-md-3">
<div class="featurette text-left" style="color: #639a81;">
<i class="fa fa-bar-chart"></i></div>
</div>
<div class="col-md-9">
<h2>Simple check management</h2><p class="text-muted">Package maintainers in full control of their package-related checks, no hurdles.</p>
</div>
</div>
<hr>
<div class="row vertical-center">
<div class="col-md-9">
<h2>Support for package-related checks</h2><p class="text-muted">Can this new build of firefox package be safely updated? Do the functional tests pass for this new build of openssh?</p>
</div>
<div class="col-md-3">
<div class="featurette text-right" style="color: #cc8f60;">
<i class="fa fa-cubes"></i></div>
</div>
</div>
<hr>
<div class="row vertical-center">
<div class="col-md-3">
<div class="featurette text-left" style="color: #639a81;">
<i class="fa fa-globe"></i></div>
</div>
<div class="col-md-9">
<h2>Support for distribution-wide checks</h2><p class="text-muted">Can this set of packages be pushed to stable? Is this new system compose installable?</p>
</div>
</div>
<hr>
<div class="row vertical-center">
<div class="col-md-9">
<h2>Event-based</h2><p class="text-muted">Where applicable only the simplest interaction between services is used - passing messages through a message bus - for both check triggering and result reporting. No hardcoded tie-ins to specific services.</p>
</div>
<div class="col-md-3">
<div class="featurette text-right" style="color: #cc8f60;">
<i class="fa fa-paper-plane"></i></div>
</div>
</div>
<hr>
<div class="row vertical-center">
<div class="col-md-3">
<div class="featurette text-left" style="color: #639a81;">
<i class="fa fa-puzzle-piece"></i></div>
</div>
<div class="col-md-9">
<h2>Decoupled design</h2><p class="text-muted">Comprised of loosely-coupled standalone units (tools, libraries) so that important logical functions are separated and one unit can be replaced with a different unit with similar functionality.</p>
</div>
</div>
<hr>
<div class="row vertical-center">
<div class="col-md-9">
<h2>Open source rockstar</h2><p class="text-muted">Taskotron is under active development. If you're interested in contributing, check out our <a href="{{ taskotron_docs_url }}/devguide.html">development guide</a> and the tickets and tasks on our <a href="https://pagure.io/taskotron">project page</a><!--, or pick some <a href="https://phab.qa.fedoraproject.org/project/board/12/">easy to handle tasks</a> -->.</p>
</div>
<div class="col-md-3">
<div class="featurette text-right" style="color: #cc8f60;">
<i class="fa fa-code-fork"></i></div>
</div>
</div>
</div>
<footer class="fullpage">
<div class="container text-center">
<div class="punchline">Get in touch!</div>
<div class="vfill-120"></div>
<p class="text-muted">Email on <a href="https://lists.fedoraproject.org/archives/list/qa-devel@lists.fedoraproject.org/">The Fedora qa-devel list</a></p>
<p class="text-muted">In #fedora-qa on <a href="https://webchat.freenode.net/">Freenode IRC</a></p>
<p class="text-muted">Check the <a href="https://pagure.io/taskotron">Pagure</a></p>
<div class="vfill-120"></div>
<img src="static/img/taskotron-box.svg">
</div>
</footer>
</body>
<script src="https://ajax.googleapis.com/ajax/libs/jquery/1.11.3/jquery.min.js"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/3.3.5/js/bootstrap.min.js"></script>
<script src="https://apps.fedoraproject.org/fedmenu/js/fedmenu.js"></script>
<script>
fedmenu({
'url': 'https://apps.fedoraproject.org/js/data.js',
'mimeType': 'application/javascript',
'position': 'bottom-right',
});
</script>
</html>

View file

@ -1,3 +0,0 @@
---
artifacts_max_life: '120'
buildmaster_max_life: '120'

View file

@ -1,35 +0,0 @@
---
- name: start httpd (provided in the apache role)
service: name=httpd state=started
- name: ensure packages required for buildmaster are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- tmpwatch
when: ansible_distribution_major_version|int >= 29 and ansible_distribution == 'Fedora' and ansible_cmdline.ostree is not defined
- name: ensure packages required for buildmaster are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- tmpwatch
when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
- name: create artifacts directory
file: path={{ item }} state=directory owner=buildmaster group=buildmaster mode=0775 setype=httpd_sys_content_t
with_items:
- /srv/taskotron
- /srv/taskotron/artifacts
- name: copy artifacts httpd config
template: src={{ item }} dest=/etc/httpd/conf.d/artifacts.conf owner=root group=root
with_first_found:
- artifacts.conf.j2.{{ deployment_type }}
- artifacts.conf.j2
notify:
- reload httpd
- name: allow httpd to read artifacts on nfs
seboolean: name=httpd_use_nfs state=yes persistent=yes
- name: copy cronjob for cleaning old taskotron artifacts and buildmaster logs
template: src=taskotron-clean.cron.j2 dest=/etc/cron.d/taskotron-clean.cron owner=root group=root mode=0644

View file

@ -1,62 +0,0 @@
Alias /artifacts {{ public_artifacts_dir }}
<Directory "{{ public_artifacts_dir }}">
Options +Indexes
IndexOptions +NameWidth=*
IndexOptions FancyIndexing
IndexOrderDefault Ascending Name
AllowOverride None
Require all granted
</Directory>
<Directory "{{ public_artifacts_dir }}/all">
Options -Indexes
</Directory>
<DirectoryMatch "^{{ public_artifacts_dir }}/all/(.+)/">
Options +Indexes
IndexOptions +NameWidth=*
AllowOverride None
Require all granted
</DirectoryMatch>
ExtFilterDefine gz-to-plain mode=output \
intype=application/x-gzip outtype=text/plain \
cmd="/bin/gunzip -c -"
ExtFilterDefine gz-to-html mode=output \
intype=application/x-gzip outtype=text/html \
cmd="/bin/gunzip -c -"
ExtFilterDefine gz-to-css mode=output \
intype=application/x-gzip outtype=text/css \
cmd="/bin/gunzip -c -"
<DirectoryMatch "^{{ public_artifacts_dir }}/all/(.+)/">
RewriteEngine on
RewriteCond "{{ public_artifacts_dir }}/all/$1/$2.gz" -f
RewriteCond "{{ public_artifacts_dir }}/all/$1/$2" !-f
RewriteRule "^{{ public_artifacts_dir }}/all/(.+)/(.*)$" "{{ public_artifacts_dir }}/all/$1/$2.gz"
# mod_deflate doesnt work as expected for some reason
# use custom filter instead
<FilesMatch "(?!html)">
SetOutputFilter gz-to-plain
</FilesMatch>
<FilesMatch "(?!css)">
SetOutputFilter gz-to-css
</FilesMatch>
<FilesMatch "(\.html\.gz|\.html)$">
SetOutputFilter gz-to-html
</FilesMatch>
# keep the mod_deflate for reference though
#AddEncoding x-gzip .gz
#SetOutputFilter INFLATE
#ForceType text/plain
</DirectoryMatch>

View file

@ -1,4 +0,0 @@
0 0 * * * {{ buildmaster_user }} find {{ public_artifacts_dir }} -type d -mtime +{{ artifacts_max_life }} | xargs -r rm -rf
10 0 * * * {{ buildmaster_user }} find {{ buildmaster_dir }}/x86_64/ -mtime +{{ buildmaster_max_life }} | xargs -r rm -rf
20 0 * * * {{ buildmaster_user }} find {{ buildmaster_dir }}/i386/ -mtime +{{ buildmaster_max_life }} | xargs -r rm -rf
30 0 * * * {{ buildmaster_user }} find {{ buildmaster_dir }}/all/ -mtime +{{ buildmaster_max_life }} | xargs -r rm -rf

View file

@ -1,2 +0,0 @@
---
extra_enablerepos: ''

View file

@ -1,44 +0,0 @@
# this was designed to be used in dev where there is no global proxy
# this way, we can get away with a single ip and ssl cert instead of
# one for taskotron and one for resultsdb. The url scheme also stays
# closer to stg/prod
---
- name: start httpd (provided in the apache role)
service: name=httpd state=started
- name: ensure packages required for proxying are installed (yum)
package:
state: present
name:
- libsemanage-python
when: ansible_distribution_major_version|int <= 7 and ansible_distribution == 'RedHat'
- name: ensure packages required for proxying are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- libsemanage-python
when: ansible_distribution_major_version|int >= 29 and ansible_distribution == 'Fedora' and ansible_cmdline.ostree is not defined
- name: ensure packages required for proxying are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- libsemanage-python
when: ansible_distribution_major_version|int > 7 and ansible_distribution == 'RedHat' and ansible_cmdline.ostree is not defined
- name: allow httpd tcp connections with selinux
seboolean: name=httpd_can_network_connect state=true persistent=yes
- name: copy resultsdb proxy httpd config
template: src=resultsdb.conf.j2 dest=/etc/httpd/conf.d/resultsdb.conf owner=root group=root
notify:
- reload httpd
- name: copy execdb proxy httpd config
template: src=execdb.conf.j2 dest=/etc/httpd/conf.d/execdb.conf owner=root group=root
notify:
- reload httpd
- name: copy vault proxy httpd config
template: src=vault.conf.j2 dest=/etc/httpd/conf.d/vault.conf owner=root group=root
notify:
- reload httpd

View file

@ -1,5 +0,0 @@
<Location /{{ execdb_endpoint }}/ >
ProxyPass {{ execdb_server }}/
ProxyPassReverse {{ execdb_server }}/
RequestHeader add X-Script-Name /{{ execdb_endpoint }}/
</Location>

View file

@ -1,11 +0,0 @@
<Location /{{ resultsdb_fe_endpoint }}/ >
ProxyPass {{ resultsdb_frontend_url }}
ProxyPassReverse {{ resultsdb_frontend_url }}
RequestHeader add X-Script-Name /{{ resultsdb_fe_endpoint }}/
</Location>
<Location /{{ resultsdb_api_endpoint }}/ >
ProxyPass {{ resultsdb_host }}
ProxyPassReverse {{ resultsdb_host }}
RequestHeader add X-Script-Name /{{ resultsdb_api_endpoint }}/
</Location>

View file

@ -1,5 +0,0 @@
<Location /{{ vault_endpoint }}/ >
ProxyPass {{ vault_server }}/
ProxyPassReverse {{ vault_server }}/
RequestHeader add X-Script-Name /{{ vault_endpoint }}/
</Location>

View file

@ -1,6 +0,0 @@
---
trigger_joblog_file: /var/log/taskotron-trigger/jobs.csv
trigger_critpath_file: /var/lib/taskotron-trigger/critpath_whitelist
trigger_cache_dir: /var/lib/taskotron-trigger/cache
trigger_rules_template_path: /etc/taskotron/trigger_rules.yml
extra_enablerepos: ''

View file

@ -1,109 +0,0 @@
import re
import datetime
import socket
from optparse import OptionParser
from urllib.parse import urljoin
import resultsdb_api
# taken from http://docs.resultsdb20.apiary.io
OKAYISH = ["PASSED", "INFO"]
FAILISH = ["FAILED", "NEEDS_INSPECTION"]
def main(resultsdb_url, frontend_url, timeparam):
"""
Download results from resultdb for selected time span, return them
prettyprinted in string.
:param str resultsdb_url: URL of resultsdb instance
:param str frontend_url: URL of resultsdb frontend
:param str timeparam: two ISO 8601 values separated by commas for time span
:return: prettyprinted summary of checks
"""
api = resultsdb_api.ResultsDBapi(resultsdb_url)
results = []
page = 0
r = api.get_results(since=timeparam, page=page)
while len(r["data"]) != 0:
results.extend(r["data"])
page += 1
r = api.get_results(since=timeparam, page=page)
passed = 0
passed_types = {}
failed = 0
failed_types = {}
together = {}
for result in results:
test_case = result["testcase"]["name"]
if result["outcome"] in OKAYISH:
passed += 1
passed_types[test_case] = passed_types.get(test_case, 0) + 1
else:
failed += 1
failed_types[test_case] = failed_types.get(test_case, 0) + 1
together[test_case] = together.get(test_case, 0) + 1
output = "libtaskotron results\n====================\n"
output += "Generated on: " + socket.gethostname() + "\n"
[from_time, to_time] = timeparam.split(",")
output += "From: " + from_time + "\n"
output += "To: " + to_time + "\n\n"
output += "Executed checks:\n----------------\n"
for check in sorted(together.keys()):
failed_count = failed_types.get(check, 0)
failed_percent = int(round((failed_count * 100.0) / together[check]))
output += "%s: %d (%d %% failed)\n" % (check, together[check], failed_percent)
output += "\nTotal: %d executed, %d failed\n\n" % (passed + failed, failed)
output += "Links to failed checks:\n-----------------------\n"
for failed_check in sorted(failed_types.keys()):
limit = min(failed_types[failed_check], 1000)
url = urljoin(frontend_url, "results?outcome=%s&since=%s,%s&testcase_name=%s&limit=%d" %
(",".join(FAILISH), from_time, to_time, failed_check, limit))
output += "%s: %s\n" % (failed_check, url)
return output
if __name__ == "__main__":
parser = OptionParser(usage="usage: %prog -u [URL] -f [FRONTEND] "
"-t [TIMESPAN]")
parser.add_option("-u", "--url", dest="resultsdb_url",
help="url of resultsdb instance")
parser.add_option("-f", "--frontend", dest="frontend_url",
help="url of resultsdb frontend")
parser.add_option("-t", "--time", dest="time", help="time span - either "
"one number or time and date in ISO 8601 format. "
"When given simple number X, it generates report "
"for last X hours, starting from now. When given "
"one ISO 8601 formatted time, it generates report "
"starting from that time on. For time span, use "
"two ISO 8601 formatted times, separated by comma.")
(opts, _) = parser.parse_args()
if not opts.resultsdb_url or not opts.time or not opts.frontend_url:
parser.error("resultsdb url, frontend url and time span arguments"
" required")
iso_regex = re.compile(
r"^\d{4}-\d{2}-\d{2}(T\d{2}:\d{2}(:\d{2}(\.\d+)?)?)?$")
m = re.search(r"^(?P<first>.*),(?P<second>.*)$", opts.time)
if m: # both values (from and to) as arguments
if not re.match(iso_regex, m.group('first')):
parser.error("First time string not in YYYY-MM-DDTHH:MM:SS format")
if not re.match(iso_regex, m.group('second')):
parser.error("Second time string not in YYYY-MM-DDTHH:MM:SS format")
time_span = opts.time
else:
time_now = datetime.datetime.now()
if re.match(r"^\d+$", opts.time): # only last X hours as argument
time_param = time_now - datetime.timedelta(hours=int(opts.time))
time_span = time_param.isoformat() + "," + time_now.isoformat()
else: # one ISO 8601 time argument
if not re.match(iso_regex, opts.time):
parser.error("First time string not in YYYY-MM-DDTHH:MM:SS "
"format")
time_span = opts.time + "," + time_now.isoformat()
output = main(opts.resultsdb_url, opts.frontend_url, time_span)
print(output, end=' ')

View file

@ -1,42 +0,0 @@
# Setup fedmsg logging.
# See the following for constraints on this format http://bit.ly/Xn1WDn
bare_format = "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": bare_format
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "INFO",
"stream": "ext://sys.stdout",
},
filelog={
"class": "logging.handlers.RotatingFileHandler",
"formatter": "bare",
"level": "INFO",
"filename": "/var/log/fedmsg/taskotron-trigger.log",
"mode": "a",
}
),
loggers=dict(
fedmsg={
"level": "INFO",
"propagate": False,
"handlers": ["filelog"],
},
moksha={
"level": "INFO",
"propagate": False,
"handlers": ["filelog"],
},
),
),
)

View file

@ -1,47 +0,0 @@
---
- name: ensure packages required for taskotron-trigger are installed
dnf: name={{ item }} state=present
with_items:
- fedmsg
- fedmsg-hub
- taskotron-trigger
- python3-resultsdb_api
when: ansible_cmdline.ostree is not defined
- name: generate trigger configuration
template: src={{ item }} dest=/etc/taskotron/trigger.cfg owner=root group=root mode=0744
with_first_found:
- trigger.cfg.j2.{{ deployment_type }}
- trigger.cfg.j2
notify:
- restart fedmsg-hub-3
- name: generate trigger rules
template: src={{ item }} dest=/etc/taskotron/trigger_rules.yml owner=root group=root mode=0744
with_first_found:
- trigger_rules.yml.j2.{{ deployment_type }}
- trigger_rules.yml.j2
notify:
- restart fedmsg-hub-3
- name: copy fedmsg logging configuration
copy: src=logging.py dest=/etc/fedmsg.d/logging.py owner=root group=root mode=0744
notify:
- restart fedmsg-hub-3
- name: install fetch_activity
copy: src=fetch_activity.py dest=/root/fetch_activity.py owner=root group=root mode=0644
- name: create a dir for fetch_activity logs
file: dest=/root/fetch_activity_logs mode=755 owner=root group=root state=directory
- name: copy fetch_activity cron job
template: src=fetch_activity.cron.j2 dest=/etc/cron.d/fetch_activity.cron owner=root group=root mode=0644
notify:
- restart crond
- name: copy tmpfiles.d for trigger distgit cache
template: src=tmpfiles.d.trigger.conf.j2 dest=/etc/tmpfiles.d/trigger.conf owner=root group=root mode=0644
- name: start and enable fedmsg-hub-3 service
service: name=fedmsg-hub-3 enabled=yes state=started

View file

@ -1 +0,0 @@
0 0 * * * root python3 /root/fetch_activity.py -u {{ resultsdb_url }} -f {{ resultsdb_external_url }} -t 24 2>&1 | tee /root/fetch_activity_logs/$(date "+\%Y\%m\%d" -d "last day")_{{ deployment_type }}_activity | mailx -r taskotron@fedoraproject.org -s 'Taskotron status: {{ deployment_type|upper }} from '$(date "+\%Y-\%m-\%d" -d "last day") sysadmin-qa-members@fedoraproject.org

View file

@ -1 +0,0 @@
d /var/lib/taskotron-trigger/cache 0755 fedmsg fedmsg 12h

View file

@ -1,23 +0,0 @@
[buildbot]
url = http://127.0.0.1:8010/change_hook
[trigger]
valid_arches = x86_64,armhfp
koji_url = http://koji.fedoraproject.org/kojihub
datagrepper_url = https://apps.fedoraproject.org/datagrepper/raw
execdb_server = {{ execdb_server }}
runner_type = BuildbotRunner
job_logging = True
joblog_file = {{ trigger_joblog_file }}
fuse_delay = 900 ; 15 minutes in seconds
git_cache_dir = {{ trigger_cache_dir }}
rules_template = {{ trigger_rules_template_path }}
{# enable this if you want taskotron-stg to react to just stg fedmsgs
{% if deployment_type in ['stg'] %}
deployment_type = stg
{% endif%}
#}
[koji_build_completed]
; critpath_filepath = {{ trigger_critpath_file }}
critpath_filepath =

View file

@ -1,42 +0,0 @@
---
- when:
message_type: KojiBuildPackageCompleted
do:
- tasks:
- rpmlint
- tasks:
- python-versions
- rpmgrill
arches:
- noarch
- when:
message_type: KojiBuildPackageCompleted
do:
- tasks:
- abicheck
# disable rpmdeplint temporarily because of https://github.com/fedora-infra/bodhi/issues/3944
# - when:
# message_type: KojiTagChanged
# tag:
# $and:
# - $regex: '/^f[0-9]{2}-updates(-testing)?-pending$$/'
# - $not:
# $regex: '/^f33-.*/' # Rawhide CI gating not supported yet
# do:
# - tasks:
# - rpmdeplint
{% if deployment_type in ['dev'] %}
{# This is sufficient to run in a single environment only #}
- when:
message_type: PagureGitReceive
repo:
$regex: '/taskotron\/.+/'
do:
- discover: {repo: 'https://pagure.io/taskotron/task-dockerbuild.git'}
arches:
- x86_64
match_host_arch: True
{% endif %}

View file

@ -1,2 +0,0 @@
---
extra_enablerepos: ''

View file

@ -1,58 +0,0 @@
---
- name: start httpd (provided in the apache role)
service: name=httpd state=started
- name: ensure packages required for vault are installed (dnf)
dnf: name={{ item }} state=present
with_items:
- vault
- python3-mod_wsgi
- python3-psycopg2
- python3-flask-oidc
- python3-libsemanage
when: ansible_cmdline.ostree is not defined
- name: ensure database is created
delegate_to: "{{ vault_db_host_machine }}"
become_user: postgres
become: true
postgresql_db: db={{ vault_db_name }}
- name: ensure vault db user has access to database
delegate_to: "{{ vault_db_host_machine }}"
become_user: postgres
become: true
postgresql_user: db={{ vault_db_name }} user={{ vault_db_user }} password={{ vault_db_password }} role_attr_flags=NOSUPERUSER
- name: ensure selinux lets httpd talk to postgres
seboolean: name=httpd_can_network_connect_db persistent=yes state=yes
- name: register with iddev
command: python3 /usr/lib/python3.7/site-packages/flask_oidc/registration_util.py https://iddev.fedorainfracloud.org {{ vault_public_url }}
args:
chdir: /etc/vault
creates: /etc/vault/client_secrets.json
notify:
- reload httpd
- name: generate vault config
template: src=settings.py.j2 dest=/etc/vault/settings.py owner=root group=root mode=0644
notify:
- reload httpd
- name: generate vault apache config
template: src=vault.conf.j2 dest=/etc/httpd/conf.d/vault.conf owner=root group=root mode=0644
notify:
- reload httpd
- name: generate alembic.ini
template: src=alembic.ini.j2 dest=/usr/share/vault/alembic.ini owner=root group=root mode=0644
- name: initialize vault database
shell: PROD='true' vault init_db
- name: initialize alembic
shell: PROD='true' vault init_alembic
- name: upgrade vault database via alembic
shell: PROD='true' vault upgrade_db

View file

@ -1,73 +0,0 @@
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = alembic
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# max length of characters to apply to the
# "slug" field
#truncate_slug_length = 40
# set to 'true' to run the environment during
# the 'revision' command, regardless of autogenerate
# revision_environment = false
# set to 'true' to allow .pyc and .pyo files without
# a source .py file to be detected as revisions in the
# versions/ directory
# sourceless = false
# version location specification; this defaults
# to alembic/versions. When using multiple version
# directories, initial revisions must be specified with --version-path
# version_locations = %(here)s/bar %(here)s/bat alembic/versions
# the output encoding used when revision files
# are written from script.py.mako
# output_encoding = utf-8
sqlalchemy.url = driver://user:pass@localhost/dbname
[alembic-packaged]
# path to migration scripts on a packaged install
script_location = /usr/share/vault/alembic
sqlalchemy.url = 'postgresql+psycopg2://{{ vault_db_user }}:{{ vault_db_password }}@{{ vault_db_host }}:{{ vault_db_port }}/{{ vault_db_name }}'
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S

View file

@ -1,14 +0,0 @@
SECRET_KEY = '{{ vault_secret_key }}'
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://{{ vault_db_user }}:{{ vault_db_password }}@{{ vault_db_host }}:{{ vault_db_port }}/{{ vault_db_name }}'
FILE_LOGGING = False
LOGFILE = '/var/log/vault/vault.log'
SYSLOG_LOGGING = False
STREAM_LOGGING = True
MASTERKEY = '{{vault_masterkey}}'
OIDC_CLIENT_SECRETS = '/etc/vault/client_secrets.json'
OIDC_ID_TOKEN_COOKIE_SECURE = True
OVERWRITE_REDIRECT_URI = '{{vault_public_url}}/oidc_callback'
OIDC_SCOPES = ['openid', 'email', 'profile', 'https://id.fedoraproject.org/scope/groups', 'https://id.fedoraproject.org/scope/cla', ]

View file

@ -1,35 +0,0 @@
WSGIDaemonProcess vault user=apache group=apache threads=5
WSGIScriptAlias /{{ vault_endpoint }} /usr/share/vault/vault.wsgi
WSGISocketPrefix run/wsgi
WSGIPassAuthorization On
# this isn't the best way to force SSL but it works for now
#RewriteEngine On
#RewriteCond %{HTTPS} !=on
#RewriteRule ^/vault/admin/?(.*) https://%{SERVER_NAME}/$1 [R,L]
<Directory /usr/share/vault>
WSGIProcessGroup vault
WSGIApplicationGroup %{GLOBAL}
WSGIScriptReloading On
<IfModule mod_authz_core.c>
# Apache 2.4
<RequireAny>
Require method GET
Require ip 127.0.0.1 ::1{% for host in allowed_hosts %} {{ host }}{% endfor %}
</RequireAny>
</IfModule>
<IfModule !mod_auth_core.c>
Order allow,deny
Allow from all
</IfModule>
</Directory>
#Alias /vault/static /var/www/vault/vault/static
#<Directory /var/www/vault/vault/static>
#Order allow,deny
#Allow from all
#</Directory>