Invent openqa_onebox_test group to try and set up one-box openQA

This is an attempt to add a group that'll set openqa-x86-worker05
as a one-box openQA instance which doesn't report results. This
is to try and help with debugging
https://bugzilla.redhat.com/show_bug.cgi?id=2009585 : the idea is
to have a disposable openQA instance where we don't care if all
the tests fail, so we can try out various things to resolve that
bug on it. Debugging on lab is awkward because we do actually
care about lab's results, especially since it's the only instance
testing Rawhide updates.

This requires quite a lot of surgery to get around various
assumptions in the existing groups and plays (no reporting of
results, no NFS...) and make the box its own postgresql server.

Signed-off-by: Adam Williamson <awilliam@redhat.com>
This commit is contained in:
Adam Williamson 2022-06-07 14:46:51 -07:00
parent d227dac859
commit edb6f5c172
4 changed files with 151 additions and 1 deletions

View file

@ -0,0 +1,87 @@
# This setup is a bit out of the ordinary for a staging system. As the openqa
# scheduler needs up-to-date information from the wiki and koji, it doesn't
# work well as a proper staging host.
#
# We decided to keep it a pseudo-staging host that will get new versions of
# openqa first and will be a staging host for most intensive purposes but from
# a firewall rule POV, it will be a staging-friendly production host
#
# it should still be using the stg proxies and the external hostname will still
# be stg.fedoraproject.org
# this is to enable nested virt, which we need for disk image creation
virt_install_command: "{{ virt_install_command_one_nic }} --cpu=host-passthrough,+vmx"
deployment_type: stg
# this won't really work, there's not going to be any way to get at
# the webui from outside the box, but we gotta set it to something
external_hostname: openqa.oneboxtest.fedoraproject.org
# makes sure it sends stg not prod fedmsgs
fedmsg_env: stg
freezes: false
gw: 10.3.174.254
# we need this bigger on stg to handle Rawhide updates, if we enable
# Rawhide update testing in prod we can just move this to servers_common
openqa_assetsize_updates: 260
openqa_compose_arches: x86_64
openqa_dbname: openqa-stg
openqa_dbpassword: "{{ stg_openqa_dbpassword }}"
openqa_dbuser: openqastg
openqa_env: staging
openqa_env_prefix: stg-
# this is because openqa staging isn't really a staging host
# we don't want to set env_suffix to stg on it because that may
# break some other plays, but we do need the env suffix for the
# fedora-messaging bits, so let's make our own
openqa_env_suffix: .stg
openqa_key: "{{ stg_openqa_apikey }}"
# install openQA from updates-testing - this is staging, we live
# ON THE EDGE (radical guitar riff)
openqa_repo: updates-testing
openqa_secret: "{{ stg_openqa_apisecret }}"
openqa_update_arches: ['x86_64']
openqa_webapi_plugins: FedoraUpdateRestart
# copied from openqa_servers_common, we don't want everything from there though
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc
- sysadmin-qa
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-qa
ipa_host_group: openqa-servers
ipa_host_group_desc: OpenQA servers
# fedora-messaging email error reporting settings
openqa_amqp_mailto: ["adamwill@fedoraproject.org", "lruzicka@fedoraproject.org"]
# we need this for all our fedora-messaging consumers as they are not
# allowed to create queues on the infra AMQP broker, by broker config
openqa_amqp_passive: true
openqa_amqp_publisher_exchange: "amq.topic"
openqa_amqp_publisher_cacertfile: "/etc/fedora-messaging/{{ openqa_env_prefix }}cacert.pem"
openqa_amqp_publisher_certfile: "/etc/pki/fedora-messaging/openqa{{ openqa_env_suffix }}-cert.pem"
openqa_amqp_publisher_keyfile: "/etc/pki/fedora-messaging/openqa{{ openqa_env_suffix }}-key.pem"
openqa_amqp_scheduler_cert: /etc/pki/fedora-messaging/openqa-cert.pem
openqa_amqp_scheduler_key: /etc/pki/fedora-messaging/openqa-key.pem
openqa_amqp_scheduler_queue: "openqa{{ openqa_env_suffix }}_scheduler"
openqa_amqp_scheduler_routing_keys: ["org.fedoraproject.prod.pungi.compose.status.change", "org.fedoraproject.prod.bodhi.update.request.testing", "org.fedoraproject.prod.bodhi.update.edit", "org.fedoraproject.prod.bodhi.update.status.testing.koji-build-group.build.complete", "org.fedoraproject.prod.coreos.build.state.change"]
openqa_amqp_scheduler_url: "amqps://openqa:@rabbitmq.fedoraproject.org/%2Fpubsub"
openqa_amqp_smtp: bastion
openqa_assetsize: 500
openqa_email: adamwill@fedoraproject.org
openqa_fullname: Adam Williamson
openqa_hostname: localhost
openqa_nickname: adamwill
openqa_userid: http://adamwill.id.fedoraproject.org/
primary_auth_source: ipa
# http and NFS
tcp_ports: [80, 2049]
# from openqa_lab_workers
openqa_nfs_worker: false
openqa_workers: 4
openqa_createhdds_branch: rawhide-updates
# let's not do this
openqa_amqp_publisher_url: ""

View file

@ -67,3 +67,7 @@ sudoers: "{{ private }}/files/sudo/qavirt-sudoers"
# $ENV{QEMUPORT} = ($options{instance}) * 10 + 20002;
# so for worker 1 it's 20012, for worker 2 it's 20022, etc etc
tcp_ports: ['20013', '20023', '20033', '20043', '20053', '20063', '20073', '20083', '20093', '20103', '20113', '20123', '20133', '20143', '20153', '20163', '20173', '20183', '20193', '20203', '20213', '20223', '20233', '20243', '20253', '20263', '20273', '20283', '20293', '20303']
# ONLY FOR WHEN THIS IS ONEBOX_TEST. we make it its own pgsql server.
# I hope delegating to self works...
openqa_dbhost: openqa-x86-worker05.iad2.fedoraproject.org

View file

@ -469,9 +469,13 @@ openqa-a64-worker01.iad2.fedoraproject.org
openqa-p09-worker01.iad2.fedoraproject.org
openqa-p09-worker02.iad2.fedoraproject.org
openqa-x86-worker04.iad2.fedoraproject.org
openqa-x86-worker05.iad2.fedoraproject.org
# temporarily switched to openqa_onebox_test for debugging
#openqa-x86-worker05.iad2.fedoraproject.org
openqa-x86-worker06.iad2.fedoraproject.org
[openqa_onebox_test]
openqa-x86-worker05.iad2.fedoraproject.org
# the workers that can run networked jobs. each server should have *one* of these per arch
[openqa_tap_workers]
# prod
@ -481,6 +485,8 @@ openqa-a64-worker02.iad2.fedoraproject.org
openqa-x86-worker04.iad2.fedoraproject.org
openqa-a64-worker01.iad2.fedoraproject.org
openqa-p09-worker01.iad2.fedoraproject.org
# onebox
openqa-x86-worker05.iad2.fedoraproject.org
# the workers that run createhdds to create the base disk images. Again,
# only one per arch per instance should be present.
@ -492,6 +498,8 @@ openqa-a64-worker02.iad2.fedoraproject.org
openqa-x86-worker04.iad2.fedoraproject.org
openqa-a64-worker01.iad2.fedoraproject.org
openqa-p09-worker01.iad2.fedoraproject.org
# onebox
openqa-x86-worker05.iad2.fedoraproject.org
# common group for variables shared between all openQA boxes
[openqa_all_common:children]

View file

@ -0,0 +1,51 @@
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml myhosts=openqa:openqa_lab"
- name: setup base openQA host
hosts: openqa_onebox_test
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- include_vars: dir=/srv/web/infra/ansible/vars/all/ ignore_files=README
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
roles:
- { role: base, tags: ['base'] }
- { role: rkhunter, tags: ['rkhunter'] }
- { role: nagios_client, tags: ['nagios_client'] }
- { role: hosts, tags: ['hosts']}
- ipa/client
- { role: collectd/base, tags: ['collectd_base'] }
- { role: sudo, tags: ['sudo'] }
- apache
tasks:
- import_tasks: "{{ tasks_path }}/motd.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: configure openQA
hosts: openqa_onebox_test
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
# we need this box to be its own pgsql server...
roles:
- { role: postgresql_server, tags: ['postgresql_server'] }
- { role: openqa/server, tags: ['openqa_server'] }
- { role: openqa/dispatcher, tags: ['openqa_dispatcher'] }
- { role: openqa/worker, tags: ['openqa_worker'] }
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"