odcs: retire service ( infra 12192 )

Time to retire ODCS. ELN is moved off and that was the last thing using
it. Thanks for all the service ODCS!

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
This commit is contained in:
Kevin Fenzi 2024-09-23 11:57:50 -07:00 committed by kevin
parent f72ff64029
commit e3e2cb1d93
60 changed files with 14 additions and 2328 deletions

View file

@ -54,7 +54,6 @@ ipa_client_shell_groups:
- sysadmin-libravatar
- sysadmin-messaging
- sysadmin-noc
- sysadmin-odcs
- sysadmin-osbuild
- sysadmin-openscanhub
- sysadmin-qa

View file

@ -1,69 +0,0 @@
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-odcs
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-odcs
- sysadmin-releng
ipa_host_group: odcs
ipa_host_group_desc: On Demand Compose Service
odcs_allowed_clients_groups:
eln-sig: {"raw_config_keys": ["eln", "cccc", "eln_jwboyer"], "source_types": ["tag", "module", "build", "raw_config"]}
packager: {"source_types": ["module"]}
pungi-devel: {}
sysadmin-odcs: {}
odcs_allowed_clients_users:
humaton: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
jnsamyak: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
patrikp: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
jkaluza: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
mohanboddu: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
kevin: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
# This is token used by CCCC service running on https://jenkins-fedora-infra.apps.ci.centos.org/job/cccc.
odcs@service: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
releng-odcs@service: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
# Default queues for general ODCS backends.
odcs_celery_queues:
- pungi_composes
- cleanup
odcs_celery_router_config:
cleanup_task: odcs.server.celery_tasks.run_cleanup
default_queue: pungi_composes
routing_rules:
odcs.server.celery_tasks.generate_pungi_compose:
cccc_raw_pungi_composes:
source: "cccc.*"
source_type: 5 # "raw_config"
eln_raw_pungi_composes:
source: "eln.*"
source_type: 5 # "raw_config"
releng_pungi_composes:
owner: ["mohanboddu", "humaton"]
source_type: [1, 2, 6, 7] # "tag", "module", "build", "pungi_compose"
releng_raw_pungi_composes:
source: "releng_.*"
source_type: 5 # "raw_config"
# Configs executed on releng backends must have "releng_" prefix.
odcs_raw_config_urls:
cccc:
config_filename: cccc.conf
url: https://pagure.io/fedora-ci/cccc-merged-configs.git
eln:
config_filename: eln.conf
url: https://pagure.io/pungi-fedora.git
eln_jwboyer:
config_filename: eln.conf
url: https://pagure.io/forks/jwboyer/pungi-fedora.git
releng_compose_ci:
config_filename: compose_ci.conf
raw_config_wrapper: /etc/odcs/custom_compose_raw_config_wrapper.conf
url: https://pagure.io/fedora-ci/compose-ci-pipeline.git
releng_fmc:
config_filename: fedora-minimal-compose.conf
url: https://pagure.io/releng/fmc.git
releng_jkaluza_test_config:
config_filename: test.conf
url: https://pagure.io/jkaluza-test-compose.git
primary_auth_source: ipa
odcs_expected_backend_number: "{{ groups['odcs_backend'] | length }}"

View file

@ -1,46 +0,0 @@
---
csi_primary_contact: Factory 2 factory2-members@fedoraproject.org
csi_purpose: Run the on-demand-compose-service backend scheduler.
csi_relationship: |
There is an odcs backend process running here.
The process is called `odcs-backend`.
This host:
- relies on db01 for its database of activity (what composes have been
requested and what state are they in?)
- Uses pungi to compose repos of content.
- It also *provides* an nfs share used by odcs-frontend01.
# For the MOTD
csi_security_category: Low
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- odcs.compose.state-changed
group: fedmsg
owner: odcs
service: odcs
# These people get told when something goes wrong.
fedmsg_error_recipients:
- ralph@fedoraproject.org
- jkaluza@fedoraproject.org
- cqi@fedoraproject.org
- qwan@fedoraproject.org
fedmsg_hub_auto_restart: False
freezes: true
lvm_size: 200000
mem_size: 4096
nagios_Check_Services:
odcs-celery-backend: true
# NOTE -- read-only mount of /mnt/fedora_koji here.
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
num_cpus: 2
odcs_target_dir_url: https://odcs.fedoraproject.org/composes
tcp_ports: [3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007,
# These ports all required for gluster
111, 24007, 24008, 24009, 24010, 24011, 49152, 49153, 49154, 49155]
# Also for gluster.
udp_ports: [111]

View file

@ -1,5 +0,0 @@
fmc_queue_name: "fmc{{ env_suffix }}_composer"
# Define the topics that our fedora-messaging queue should be subscribed to.
fmc_routing_keys:
- "org.fedoraproject.prod.buildsys.rpm.sign"
odcs_celery_queues: ["releng_raw_pungi_composes", "releng_pungi_composes", "cleanup", "eln_raw_pungi_composes", "cccc_raw_pungi_composes"]

View file

@ -1,44 +0,0 @@
---
csi_primary_contact: Factory 2 factory2-members@fedoraproject.org
csi_purpose: Run the on-demand-compose-service backend scheduler.
csi_relationship: |
There is an odcs backend process running here.
The process is called `odcs-backend`.
This host:
- relies on db01 for its database of activity (what composes have been
requested and what state are they in?)
- Uses pungi to compose repos of content.
- It also *provides* an nfs share used by odcs-frontend01.
# For the MOTD
csi_security_category: Low
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
datacenter: iad2
# These people get told when something goes wrong.
fedmsg_error_recipients:
- ralph@fedoraproject.org
- jkaluza@fedoraproject.org
- cqi@fedoraproject.org
- qwan@fedoraproject.org
fedmsg_hub_auto_restart: False
# Set this to True for the F28 release and onwards.
freezes: false
lvm_size: 40000
mem_size: 2048
nagios_Check_Services:
odcs-celery-backend: true
# NOTE -- read-only mount of /mnt/fedora_koji here.
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
num_cpus: 2
odcs_allowed_source_types: ["tag", "module"]
# Handle all Celery queues on single staging backend.
odcs_celery_queues: ["releng_raw_pungi_composes", "releng_pungi_composes", "cleanup", "eln_raw_pungi_composes", "pungi_composes", "cccc_raw_pungi_composes"]
odcs_target_dir_url: https://odcs.stg.fedoraproject.org/composes
tcp_ports: [3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007,
# These ports all required for gluster
111, 24007, 24008, 24009, 24010, 24011, 49152, 49153, 49154, 49155]
# Also for gluster.
udp_ports: [111]

View file

@ -1,38 +0,0 @@
---
csi_primary_contact: Factory 2 factory2-members@fedoraproject.org
csi_purpose: Run the on-demand-compose-service frontend API.
csi_relationship: |
The apache/mod_wsgi app is the only thing really running here
This host:
- relies on db01 for its database of activity (what composes have been
requested and what state are they in?)
- It also mounts an nfs shared provided by odcs-backend01.
- It provides http access to the compose contents on that nfs share.
# For the MOTD
csi_security_category: Low
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- odcs.compose.state-changed
group: apache
owner: apache
service: odcs
freezes: true
lvm_size: 30000
mem_size: 2048
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
num_cpus: 2
odcs_target_dir_url: https://odcs.fedoraproject.org/composes
tcp_ports: [80,
# These ports all required for gluster
111, 24007, 24008, 24009, 24010, 24011, 49152, 49153, 49154, 49155]
# Also for gluster.
udp_ports: [111]
virt_install_command: "{{ virt_install_command_two_nic }}"
# There vars are used to configure mod_wsgi
wsgi_procs: 2
wsgi_threads: 2

View file

@ -1,32 +0,0 @@
---
csi_primary_contact: Factory 2 factory2-members@fedoraproject.org
csi_purpose: Run the on-demand-compose-service frontend API.
csi_relationship: |
The apache/mod_wsgi app is the only thing really running here
This host:
- relies on db01 for its database of activity (what composes have been
requested and what state are they in?)
- It also mounts an nfs shared provided by odcs-backend01.
- It provides http access to the compose contents on that nfs share.
# For the MOTD
csi_security_category: Low
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# Set this to True for the F28 release and onwards.
freezes: false
lvm_size: 20000
mem_size: 2048
nfs_mount_opts: "ro,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
num_cpus: 2
odcs_allowed_source_types: ["tag", "module"]
odcs_target_dir_url: https://odcs.stg.fedoraproject.org/composes
tcp_ports: [80,
# These ports all required for gluster
111, 24007, 24008, 24009, 24010, 24011, 49152, 49153, 49154, 49155]
# Also for gluster.
udp_ports: [111]
# There vars are used to configure mod_wsgi
wsgi_procs: 2
wsgi_threads: 2

View file

@ -1,54 +0,0 @@
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-odcs
- sysadmin-releng
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-odcs
- sysadmin-releng
ipa_host_group: odcs
ipa_host_group_desc: On Demand Compose Service
odcs_allowed_clients_groups:
eln-sig: {"raw_config_keys": ["eln", "cccc"], "source_types": ["tag", "module", "build", "raw_config"]}
packager: {"source_types": ["module"]}
pungi-devel: {}
sysadmin-odcs: {}
odcs_allowed_clients_users:
humaton: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
jkaluza: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
mohanboddu: {"source_types": ["tag", "module", "build", "raw_config"], "target_dirs": ["private"]}
# Default queues for general ODCS backends.
odcs_celery_queues:
- pungi_composes
- cleanup
odcs_celery_router_config:
cleanup_task: odcs.server.celery_tasks.run_cleanup
default_queue: pungi_composes
routing_rules:
odcs.server.celery_tasks.generate_pungi_compose:
cccc_raw_pungi_composes:
source: "cccc.*"
source_type: 5 # "raw_config"
eln_raw_pungi_composes:
source: "eln.*"
source_type: 5 # "raw_config"
releng_pungi_composes:
owner: ["jkaluza", "mohanboddu", "humaton"]
source_type: [1, 2, 6, 7] # "tag", "module", "build", "pungi_compose"
releng_raw_pungi_composes:
source: "releng_.*"
source_type: 5 # "raw_config"
# Configs executed on releng backends must have "releng_" prefix.
odcs_raw_config_urls:
cccc:
config_filename: cccc.conf
url: https://pagure.io/fedora-ci/cccc-merged-configs.git
eln:
config_filename: eln.conf
url: https://pagure.io/pungi-fedora.git
releng_fmc:
config_filename: fedora-minimal-compose.conf
url: https://pagure.io/releng/fmc.git
releng_jkaluza_test_config:
config_filename: test.conf
url: https://pagure.io/jkaluza-test-compose.git

View file

@ -53,7 +53,7 @@ openqa_amqp_publisher_url: "{{ openqa_amqp_this_url }}"
# messages on prod rabbitmq. Only the queue names differs.
openqa_amqp_scheduler_cert: "{{ openqa_amqp_prod_certfile }}"
openqa_amqp_scheduler_key: "{{ openqa_amqp_prod_keyfile }}"
openqa_amqp_scheduler_routing_keys: ["org.fedoraproject.prod.pungi.compose.status.change", "org.fedoraproject.prod.bodhi.update.status.testing.koji-build-group.build.complete", "org.fedoraproject.prod.coreos.build.state.change", "org.fedoraproject.prod.odcs.compose.state-changed"]
openqa_amqp_scheduler_routing_keys: ["org.fedoraproject.prod.pungi.compose.status.change", "org.fedoraproject.prod.bodhi.update.status.testing.koji-build-group.build.complete", "org.fedoraproject.prod.coreos.build.state.change"]
openqa_amqp_scheduler_url: "{{ openqa_amqp_prod_url }}"
# fedora-messaging reporter settings (for both wiki/resultsdb reporters)

View file

@ -16,7 +16,6 @@ databases:
- mailman
- mirrormanager2
- notifications
- odcs
- pagure
- resultsdb
- tahrir
@ -39,7 +38,6 @@ dbs_to_backup:
- mailman
- mirrormanager2
- notifications
- odcs
- pagure
- resultsdb
- tahrir

View file

@ -1,11 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.130
ks_repo: http://10.3.163.35/pub/fedora/linux/releases/36/Server/x86_64/os/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-fedora
lvm_size: 200000
mem_size: 65536
num_cpus: 16
vmhost: bvmhost-x86-02.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View file

@ -1,9 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.110
ks_repo: http://10.3.163.35/repo/rhel/RHEL8-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-8-iad2
virt_install_command: "{{ virt_install_command_one_nic }}"
vmhost: bvmhost-x86-03.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View file

@ -1,8 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.167.254
eth0_ipv4_ip: 10.3.167.36
ks_repo: http://10.3.163.35/repo/rhel/RHEL8-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-8-iad2
vmhost: bvmhost-x86-01.stg.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View file

@ -1,9 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.111
ks_repo: http://10.3.163.35/repo/rhel/RHEL8-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-8-iad2
virt_install_command: "{{ virt_install_command_one_nic }}"
vmhost: bvmhost-x86-04.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View file

@ -1,8 +0,0 @@
---
datacenter: iad2
eth0_ipv4_gw: 10.3.167.254
eth0_ipv4_ip: 10.3.167.37
ks_repo: http://10.3.163.35/repo/rhel/RHEL8-x86_64/
ks_url: http://10.3.163.35/repo/rhel/ks/kvm-rhel-8-iad2
vmhost: bvmhost-x86-05.stg.iad2.fedoraproject.org
volgroup: /dev/vg_guests

View file

@ -311,33 +311,6 @@ ns05.fedoraproject.org
ns01.iad2.fedoraproject.org
ns02.iad2.fedoraproject.org
[odcs_frontend]
odcs-frontend01.iad2.fedoraproject.org
[odcs_frontend_stg]
odcs-frontend01.stg.iad2.fedoraproject.org
[odcs_backend:children]
odcs_backend_general
odcs_backend_releng
[odcs_backend_general]
odcs-backend01.iad2.fedoraproject.org
[odcs_backend_releng]
odcs-backend-releng01.iad2.fedoraproject.org
[odcs_backend_stg]
odcs-backend01.stg.iad2.fedoraproject.org
[odcs_stg:children]
odcs_frontend_stg
odcs_backend_stg
[odcs:children]
odcs_frontend
odcs_backend
[openqa]
openqa01.iad2.fedoraproject.org
@ -582,8 +555,6 @@ ipsilon01.stg.iad2.fedoraproject.org
koji01.stg.iad2.fedoraproject.org
mailman01.stg.iad2.fedoraproject.org
memcached01.stg.iad2.fedoraproject.org
odcs-backend01.stg.iad2.fedoraproject.org
odcs-frontend01.stg.iad2.fedoraproject.org
os-control01.stg.iad2.fedoraproject.org
pkgs01.stg.iad2.fedoraproject.org
proxy01.stg.iad2.fedoraproject.org
@ -1044,7 +1015,6 @@ mailman
memcached
nagios_iad2
oci_registry
odcs
openqa
openqa_lab
openqa_lab_workers
@ -1076,7 +1046,6 @@ ipsilon_stg
koji_stg
memcached_stg
oci_registry_stg
odcs_stg
pkgs_stg
proxies_stg
rabbitmq_stg

View file

@ -45,7 +45,6 @@
- import_playbook: /srv/web/infra/ansible/playbooks/groups/noc.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/os-control.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/oci-registry.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/odcs.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/openqa-workers.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/openqa.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/pagure.yml

View file

@ -59,16 +59,6 @@
nfs_src_dir: 'fedora_ostree_content_stg/ostree'
when: env == 'staging' and 'runroot' in group_names
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs'
when: env != 'staging' and 'runroot' in group_names
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs_stg'
when: env == 'staging' and 'runroot' in group_names
- role: nfs/client
mnt_dir: '/mnt/koji/compose/ostree'
nfs_src_dir: 'fedora_ostree_content_stg/compose/ostree'
@ -84,13 +74,6 @@
- role: keytab/service
kt_location: /etc/kojid/kojid.keytab
service: compile
- role: keytab/service
owner_user: root
owner_group: root
service: innercompose
host: "odcs{{ env_suffix }}.fedoraproject.org"
kt_location: /etc/kojid/secrets/odcs_inner.keytab
when: env == "staging"
# push built Flatpaks to candidate registry
- role: login-registry
candidate_registry: "candidate-registry.fedoraproject.org"

View file

@ -49,16 +49,6 @@
nfs_src_dir: 'fedora_ostree_content_stg/ostree'
when: env == 'staging' and 'runroot' in group_names and not inventory_hostname.startswith('buildvm-s390x')
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs'
when: env != 'staging' and 'runroot' in group_names and not inventory_hostname.startswith('buildvm-s390x')
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs_stg'
when: env == 'staging' and 'runroot' in group_names and not inventory_hostname.startswith('buildvm-s390x')
- role: nfs/client
mnt_dir: '/mnt/koji/compose/ostree'
nfs_src_dir: 'fedora_ostree_content_stg/compose/ostree'
@ -67,13 +57,6 @@
- role: keytab/service
kt_location: /etc/kojid/kojid.keytab
service: compile
- role: keytab/service
owner_user: root
owner_group: root
service: innercompose
host: "odcs{{ env_suffix }}.fedoraproject.org"
kt_location: /etc/kojid/secrets/odcs_inner.keytab
when: env == "staging"
- role: btrfs
btrfs_balance_period: weekly
# push built Flatpaks to candidate registry
@ -152,14 +135,5 @@
tags:
- sshfs
- name: Add /srv/odcs sshfs
mount: path="/srv/odcs"
state=present
fstype=fuse.sshfs
src="root@koji01{{ env_suffix }}.iad2.fedoraproject.org:/srv/odcs"
opts="noauto,_netdev,ServerAliveInterval=20,IdentityFile=/etc/primary-s390x-sshfs"
tags:
- sshfs
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"

View file

@ -41,8 +41,7 @@
- rsyncd
- { role: nfs/client, when: datacenter == "iad2" or datacenter == "rdu", mnt_dir: '/srv/pub', nfs_src_dir: 'fedora_ftp/fedora.redhat.com/pub' }
- { role: nfs/client, when: datacenter == "iad2" or datacenter == "rdu", mnt_dir: '/srv/pub/archive', nfs_src_dir: 'fedora_ftp_archive' }
- { role: nfs/client, when: datacenter == "iad2", mnt_dir: '/mnt/fedora_koji', nfs_src_dir: 'fedora_koji' } # needed for internal sync and odcs
- { role: nfs/client, when: datacenter == "iad2", mnt_dir: '/mnt/odcs', nfs_src_dir: 'fedora_odcs' } # needed for internal sync
- { role: nfs/client, when: datacenter == "iad2", mnt_dir: '/mnt/fedora_koji', nfs_src_dir: 'fedora_koji' } # needed for internal sync
- sudo
pre_tasks:

View file

@ -99,11 +99,6 @@
nfs_src_dir: 'fedora_ostree_content/compose/ostree'
when: env != 'staging'
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs'
when: env != 'staging'
# In staging, we mount fedora_koji as read only (see nfs_mount_opts)
- role: nfs/client
mnt_dir: '/mnt/fedora_koji_prod'

View file

@ -1,162 +0,0 @@
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: "odcs:odcs_stg"
- name: make the box be real
hosts: odcs:odcs_stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
pre_tasks:
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
roles:
- base
- rkhunter
- nagios_client
- zabbix/zabbix_agent
- hosts
- ipa/client
- rsyncd
- sudo
- collectd/base
tasks:
- import_tasks: "{{ tasks_path }}/motd.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: Set up odcs frontend service
hosts: odcs_frontend:odcs_frontend_stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- "{{ vars_path }}/{{ ansible_distribution }}.yml"
roles:
# openvpn on the prod frontend nodes
- {role: openvpn/client, when: env != "staging"}
- role: rabbit/user
username: "odcs{{ env_suffix }}"
sent_topics: ^org\.fedoraproject\.{{ env_short }}\.odcs\..*
- mod_wsgi
- role: nfs/client
mnt_dir: '/mnt/fedora_koji'
nfs_src_dir: 'fedora_koji'
when: env != 'staging'
- role: nfs/client
mnt_dir: '/mnt/fedora_koji_prod'
nfs_src_dir: 'fedora_koji'
when: env == 'staging'
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs_stg'
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
when: env == 'staging'
mount_stg: true
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs'
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
when: env != 'staging'
- odcs/frontend
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: Set up /mnt/koji on both the frontend and backend
hosts: odcs:odcs_stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: nfs/client
mnt_dir: '/mnt/fedora_koji'
nfs_src_dir: 'fedora_koji'
when: env != 'staging'
# In staging, we mount fedora_koji as read only (see nfs_mount_opts)
- role: nfs/client
mnt_dir: '/mnt/fedora_koji_prod'
nfs_src_dir: 'fedora_koji'
when: env == 'staging'
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs_stg'
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
mount_stg: true
when: env == 'staging'
- role: nfs/client
mnt_dir: '/srv/odcs'
nfs_src_dir: 'fedora_odcs'
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
when: env != 'staging'
post_tasks:
- file: src=/mnt/fedora_koji/koji dest=/mnt/koji state=link
tags: nfs/client
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: Set up odcs backend service
hosts: odcs_backend:odcs_backend_stg
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: odcs/backend
- role: keytab/service
service: odcs
owner_user: odcs
owner_group: odcs
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: Configure ODCS releng backends
hosts: odcs_backend_releng
user: root
gather_facts: True
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: keytab/service
service: odcs
# - role: rabbit/queue
#username: "fmc{{ env_suffix }}"
#queue_name: "{{ fmc_queue_name }}"
#routing_keys: "{{ fmc_routing_keys }}"
#thresholds:
# warning: 100
# critical: 1000
#sent_topics: ^org\.fedoraproject\.{{ env_short }}\.odcs\..*
# - role: fmc
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"

View file

@ -894,3 +894,10 @@
target: https://fedoraproject.org/wiki/Infrastructure/pdc-retirement
tags:
- pdc
- role: httpd/redirect
shortname: 00-old-odcs
website: odcs.fedoraproject.org
target: https://dl.fedoraproject.org/pub/eln/1/
tags:
- odcs

View file

@ -736,13 +736,6 @@
tags:
- zezere
- role: httpd/reverseproxy
website: odcs.fedoraproject.org
destname: odcs
proxyurl: http://localhost:10066
tags:
- odcs
- role: httpd/reverseproxy
website: greenwave.fedoraproject.org
destname: greenwave

View file

@ -129,8 +129,8 @@
- name: restart kojira
service: name=kojira state=started
- name: remount NFS on builders, composers, bodhi and odcs
hosts: builders_stg:releng_compose_stg:bodhi_backend_stg:odcs_backend_stg:odcs_frontend_stg
- name: remount NFS on builders, composers, bodhi
hosts: builders_stg:releng_compose_stg:bodhi_backend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml

View file

@ -1,120 +0,0 @@
- name: push packages out to frontend
hosts: odcs_frontend:odcs_frontend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
testing: False
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata
command: dnf clean all
check_mode: no
- name: update odcs and pungi packages from main repo
package: name={{item}} state=latest
when: not testing
with_items:
- odcs
- pungi
- libmodulemd
- name: push packages out to backend
hosts: odcs_backend:odcs_backend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
testing: False
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: clean all metadata
command: dnf clean all
check_mode: no
- name: update odcs and pungi packages from main repo
package: name={{item}} state=latest
when: not testing
with_items:
- odcs
- odcs-client
- pungi
- libmodulemd
- name: verify the frontend and stop it
hosts: odcs_frontend:odcs_frontend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush w.r.t. the frontend
nagios: action=downtime minutes=15 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
roles:
- odcs/base
- odcs/frontend
post_tasks:
- service: name="httpd" state=stopped
- service: name="odcs-celery-beat" state=stopped
- name: verify the backend, stop it, and then upgrade the db
hosts: odcs_backend:odcs_backend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
odcs_migrate_db: True
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: tell nagios to shush w.r.t. the backend
nagios: action=downtime minutes=15 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
roles:
- odcs/base
- odcs/backend
post_tasks:
- name: tell nagios to unshush w.r.t. the backend
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: restart the frontend
hosts: odcs_frontend:odcs_frontend_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- service: name="httpd" state=started
- service: name="odcs-celery-beat" state=started
post_tasks:
- name: tell nagios to unshush w.r.t. the frontend
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,5 +1,4 @@
Alias /pub /srv/pub
Alias /odcs /mnt/odcs
DocumentRoot /srv/web
@ -18,22 +17,6 @@ DocumentRoot /srv/web
</IfModule>
</Directory>
<Directory /mnt/odcs>
HeaderName /HEADER.html
ReadmeName /FOOTER.html
Options Indexes FollowSymLinks
<IfModule mod_authz_core.c>
# Apache 2.4
Require all granted
IndexOptions NameWidth=* FancyIndexing
</IfModule>
<IfModule !mod_authz_core.c>
# Apache 2.2
Order allow,deny
Allow from all
</IfModule>
</Directory>
<Directory /srv/pub>
HeaderName /HEADER.html
ReadmeName /FOOTER.html

View file

@ -56,7 +56,6 @@
- endpoints-external-composer.py
- endpoints-github2fedmsg.py
- endpoints-kerneltest.py
- endpoints-odcs-backend.py
- pkgdb.py
tags:
- config

View file

@ -1,71 +0,0 @@
---
- name: Install fmc and fedora-messaging rpm
package:
name:
- fedora-messaging
# fmc rpm is built in infra tags
- fmc
state: present
when: inventory_hostname.startswith('odcs-backend-releng')
tags:
- fmc
- name: Place fedora-minimal-compose.toml /etc/fedora-messaging/
template:
src: fedora-minimal-compose.toml
dest: /etc/fedora-messaging/fedora-minimal-compose.toml
owner: root
group: root
mode: 644
when: inventory_hostname.startswith('odcs-backend-releng')
tags:
- fmc
- name: Start and enable the fmc services
service:
name: fm-consumer@fedora-minimal-compose
state: started
enabled: yes
when: inventory_hostname.startswith('odcs-backend-releng')
tags:
- fmc
- name: Create /etc/pki/fedora-messaging
file:
dest: /etc/pki/fedora-messaging
mode: 0775
owner: root
group: root
state: directory
tags:
- fmc
- name: Deploy the fedora-messaging CA
copy:
src: "{{ private }}/files/rabbitmq/{{env}}/pki/ca.crt"
dest: /etc/pki/fedora-messaging/cacert.pem
mode: 0644
owner: root
group: root
tags:
- fmc
- name: Deploy the fedora-messaging cert
copy:
src: "{{ private }}/files/rabbitmq/{{env}}/pki/issued/fmc{{env_suffix}}.crt"
dest: /etc/pki/fedora-messaging/fmc-cert.pem
mode: 0644
owner: root
group: root
tags:
- fmc
- name: Deploy the fedora-messaging key
copy:
src: "{{ private }}/files/rabbitmq/{{env}}/pki/private/fmc{{env_suffix}}.key"
dest: /etc/pki/fedora-messaging/fmc-key.pem
mode: 0600
owner: root
group: root
tags:
- fmc

View file

@ -1,87 +0,0 @@
# A basic configuration for Fedora's message broker
#
# This file is in the TOML format.
amqp_url = "amqps://bodhi{{ env_suffix }}:@rabbitmq{{ env_suffix }}.fedoraproject.org/%2Fpubsub"
callback = "fedora_minimal_compose:Consumer"
passive_declares = true
[tls]
ca_cert = "/etc/pki/fedora-messaging/cacert.pem"
keyfile = "/etc/pki/fedora-messaging/fmc-key.pem"
certfile = "/etc/pki/fedora-messaging/fmc-cert.pem"
[client_properties]
app = "fmc"
app_url = "https://pagure.io/releng/fmc"
[exchanges."amq.topic"]
type = "topic"
durable = true
auto_delete = false
arguments = {}
# Queue names *must* be in the normal UUID format: run "uuidgen" and use the
# output as your queue name. If your queue is not exclusive, anyone can connect
# and consume from it, causing you to miss messages, so do not share your queue
# name. Any queues that are not auto-deleted on disconnect are garbage-collected
# after approximately one hour.
#
# If you require a stronger guarantee about delivery, please talk to Fedora's
# Infrastructure team.
[queues."{{ fmc_queue_name }}"]
durable = false
auto_delete = true
exclusive = true
arguments = {}
[[bindings]]
queue = "{{ fmc_queue_name }}"
exchange = "amq.topic"
routing_keys = {{ fmc_routing_keys }}
[consumer_config]
rawhide = {{FedoraRawhideNumber}}
odcs_url = 'https://odcs.fedoraproject.org'
odcs_file = '/etc/releng-odcs-oidc-token'
packages_list = ['anaconda', 'authconfig', 'python-blivet', 'pyparted', 'parted', 'pykickstart', 'blivet-gui', 'libblockdev', 'e2fsprogs', 'dosfstools', 'grub2', 'shim-signed', 'libselinux']
# When we have the ability to run one compose for multiple builds, use the following list
# packages_list = ['anaconda', 'authconfig', 'python-blivet', 'pyparted', 'parted', 'pykickstart', 'blivet-gui', 'libblockdev', 'e2fsprogs', 'dosfstools', 'grub2', 'shim-signed', 'libselinux', 'chrony', 'fcoe-utils', 'hfsplus-tools', 'firewalld', 'realmd', 'yelp', 'libtimezonemap', 'kernel', 'systemd', 'dracut', 'plymouth', 'gtk3', 'python3']
koji_url = 'https://koji.fedoraproject.org/kojihub'
principal = 'releng@FEDORAPROJECT.ORG'
keytab = '/etc/krb5.releng.keytab'
[qos]
prefetch_size = 0
prefetch_count = 25
[log_config]
level = "DEBUG"
version = 1
disable_existing_loggers = true
[log_config.formatters.simple]
format = "%(asctime)s %(levelname)s %(name)s - %(message)s"
[log_config.handlers.console]
class = "logging.StreamHandler"
formatter = "simple"
stream = "ext://sys.stdout"
[log_config.loggers.fedora_messaging]
level = "INFO"
propagate = false
handlers = ["console"]
[log_config.loggers.twisted]
level = "INFO"
propagate = false
handlers = ["console"]
[log_config.loggers.pika]
level = "WARNING"
propagate = false
handlers = ["console"]
[log_config.root]
level = "ERROR"
handlers = ["console"]

View file

@ -333,15 +333,6 @@ backend kojipkgs-backend
option httpchk GET /
{% endif %}
frontend odcs-frontend
bind 0.0.0.0:10066
default_backend odcs-backend
backend odcs-backend
balance hdr(appserver)
server odcs-frontend01 odcs-frontend01:80 check inter 20s rise 2 fall 3
option httpchk GET /api/1/composes/
{% if datacenter == "iad2" %}
# These ports are for proxying rabbitmq (AMQP) protocol through.
# At this moment, internal- and public-rabbitmq both point to the exact same set of

View file

@ -13,7 +13,7 @@ global enabled=allow
[provider_config]
global enabled=openid,saml2,openidc
openidc enabled extensions=fedora-account,beaker,waiverdb,odcs,wiki,src,kerneltest
openidc enabled extensions=fedora-account,beaker,waiverdb,wiki,src,kerneltest
{% if env == 'staging' %}
openidc subject salt={{ ipsilon_stg_openidc_subject_salt }}

View file

@ -15,7 +15,7 @@ default_mounts = /mnt/koji,/mnt/fedora_koji_prod/koji,/etc/kojid/secrets
; Each extra_mount needs to start with some of these prefixes. Other paths are
; not allowed for mounting. Only absolute paths are allowed here, no
; wildcards.
safe_roots = /mnt/koji/compose,/mnt/koji/atomic-cd,/srv/odcs/
safe_roots = /mnt/koji/compose,/mnt/koji/atomic-cd
; path substitutions is tuple per line, delimited by comma, order is
; important.
@ -42,12 +42,6 @@ path = /mnt/koji
fstype = bind
options = bind
[path2]
mountpoint = /srv/odcs
path = /srv/odcs
fstype = bind
options = bind
{% if env == 'staging' %}
; we need to mount the production split volume here.
[path3]

View file

@ -115,14 +115,6 @@ define service {
use websitetemplate
}
define service {
hostgroup_name proxies
service_description http-odcs
check_command check_website_ssl!odcs.fedoraproject.org!/api/1/composes/!result_repofile
max_check_attempts 8
use websitetemplate
}
# Some openshift apps
define service {
hostgroup_name proxies

View file

@ -334,7 +334,6 @@ command[check_fedmsg_gateway_proc]=/usr/lib64/nagios/plugins/check_procs -c 1:1
command[check_fedmsg_irc_proc]=/usr/lib64/nagios/plugins/check_procs -c 1:1 -C 'fedmsg-irc' -u fedmsg
command[check_fedmsg_tweet_proc]=/usr/lib64/nagios/plugins/check_procs -c 1:1 -C 'fedmsg-tweet' -u fedmsg
command[check_fedmsg_composer_proc]=/usr/lib64/nagios/plugins/check_procs -c 1:1 -C 'fedmsg-hub-3' -u apache
command[check_fedmsg_odcs_celery_proc]=/usr/lib64/nagios/plugins/check_procs -c 1:1 -C 'odcs-celery-backend ' -u odcs
command[check_haproxy_conns]=/usr/lib64/nagios/plugins/check_haproxy_conns.py
command[check_redis_proc]=/usr/lib64/nagios/plugins/check_procs -c 1:1 -C 'redis-server' -u redis
command[check_openvpn_link]=/usr/lib64/nagios/plugins/check_ping -H 192.168.1.41 -w 375.0,20% -c 500,60%

View file

@ -1,3 +0,0 @@
---
odcs_migrate_db: False

View file

@ -1,3 +0,0 @@
---
dependencies:
- { role: odcs/base }

View file

@ -1,235 +0,0 @@
---
- name: Install the httpd package, just to get the apache group on the system.
package:
pkg: httpd
state: present
tags:
- packages
- odcs
- odcs/backend
- name: Install support files for ISO creation
package:
name:
- libisoburn
- xorriso
- name: Make sure httpd isn't running.
service:
name: httpd
state: stopped
tags:
- odcs
- odcs/backend
- name: Make sure odcs-celery-backend isn't running.
service:
name: odcs-celery-backend
state: stopped
tags:
- odcs
- odcs/backend
- name: ensure Pungi cache files have right ownership
file:
path: "{{ item }}"
owner: odcs-server
group: odcs-server
state: touch
with_items:
- /var/tmp/pungi_cache_file.dbm.rw.lock
- /var/tmp/pungi_cache_file.dbm
tags:
- odcs
- odcs/backend
- name: generate the ODCS koji config
template:
src: etc/koji.conf.d/odcs.conf.j2
dest: /etc/koji.conf.d/odcs.conf
owner: odcs-server
group: odcs-server
mode: 0440
notify:
- restart odcs-celery-backend
tags:
- odcs
- odcs/backend
- name: add the odcs-server user to the apache group
user:
name: odcs-server
group: apache
groups: apache
append: true
notify:
- restart odcs-celery-backend
tags:
- odcs
- odcs/backend
- name: migrate the database
command: "{{ item }}"
with_items:
- odcs-manager upgradedb
become: yes
become_user: odcs-server
when: odcs_migrate_db
notify:
- restart odcs-celery-backend
tags:
- odcs
- odcs/backend
- name: Configure the odcs virtual host
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
rabbitmq_vhost:
name: /odcs
state: present
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
- name: Configure the HA policy for the odcs queues
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
rabbitmq_policy:
name: HA
apply_to: queues
pattern: .*
tags:
ha-mode: all
ha-sync-mode: automatic # Auto sync queues to new cluster members
ha-sync-batch-size: 10000 # Larger is faster, but must finish in 1 net_ticktime
vhost: /odcs
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
- name: Add a policy to limit queues to 1GB and remove after a month of no use
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
rabbitmq_policy:
apply_to: queues
name: pubsub_sweeper
state: present
pattern: ".*"
tags:
# Unused queues are killed after 1000 * 60 * 60 * 31 milliseconds (~a month)
expires: 111600000
# Queues can use at most 1GB of storage
max-length-bytes: 1073741824
vhost: /odcs
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
- name: Create the odcs-admin user for the odcs vhost (prod)
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
rabbitmq_user:
user: odcs-admin
password: "{{ (env == 'production')|ternary(rabbitmq_odcs_admin_password_production, rabbitmq_odcs_admin_password_staging) }}"
vhost: /odcs
configure_priv: .*
read_priv: .*
write_priv: .*
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
- name: Dump the admin password in a file for administrative operations
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
copy:
dest: /root/.odcs-rabbitmqpass
content: "{{ (env == 'production')|ternary(rabbitmq_odcs_admin_password_production, rabbitmq_odcs_admin_password_staging) }}"
mode: 0600
owner: root
group: root
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
- name: Grant the admin user access to the odcs vhost
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
rabbitmq_user:
user: admin
vhost: /odcs
configure_priv: .*
read_priv: .*
write_priv: .*
tags: administrator
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
- name: Grant the nagios-monitoring user access to the odcs vhost
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
rabbitmq_user:
user: nagios-monitoring
vhost: /odcs
configure_priv: "^$"
read_priv: "^$"
write_priv: "^$"
tags: monitoring
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
# Create a user with:
- name: Create a user for odcs access
run_once: true
delegate_to: "rabbitmq01{{ env_suffix }}.{{ datacenter }}.fedoraproject.org"
rabbitmq_user:
user: "odcs-private-queue{{ env_suffix }}"
vhost: /odcs
configure_priv: .*
write_priv: .*
read_priv: .*
state: present
tags:
- rabbitmq_cluster
- config
- odcs
- odcs/backend
- name: set releng user keytab
copy:
src: "{{private}}/files/keytabs/{{env}}/releng"
dest: /etc/krb5.releng.keytab
owner: odcs-server
group: odcs-server
mode: 0640
when: inventory_hostname.startswith('odcs-backend-releng')
- name: set releng odcs oidc token
copy:
src: "{{private}}/files/releng/{{env}}/releng-odcs-oidc-token"
dest: /etc/releng-odcs-oidc-token
when: inventory_hostname.startswith('odcs-backend-releng')
- name: Set MAILTO cronvar
cronvar:
name: MAILTO
value: releng-cron@lists.fedoraproject.org
when: inventory_hostname.startswith('odcs-backend-releng')
- name: Set cronjob to execute ELN periodic compose
cron:
name: "ELN periodic compose"
minute: "0"
hour: "*/4"
job: 'odcs --token=/etc/releng-odcs-oidc-token create-raw-config --compose-type=production --label=Alpha-0.`date "+\%s"` eln eln'
when: inventory_hostname.startswith('odcs-backend-releng')

View file

@ -1,19 +0,0 @@
[odcs_production]
server = https://koji.fedoraproject.org/kojihub
weburl = https://koji.fedoraproject.org/koji
topurl = https://kojipkgs.fedoraproject.org/
authtype = kerberos
krb_rdns = false
{% if inventory_hostname.startswith('odcs-backend-releng') %}
principal = releng@FEDORAPROJECT.ORG
keytab = /etc/krb5.releng.keytab
{% endif %}
[odcs_stg]
server = https://koji.stg.fedoraproject.org/kojihub
weburl = https://koji.stg.fedoraproject.org/koji
topurl = https://kojipkgs.stg.fedoraproject.org/
authtype = kerberos
krb_rdns = false

View file

@ -1,51 +0,0 @@
---
odcs_upgrade: False
odcs_force_postgres_ssl: False
odcs_pdc_insecure: False
odcs_pdc_develop: True
odcs_target_dir: /srv/odcs
odcs_target_dir_url: http://{{ inventory_hostname }}/composes
odcs_allowed_clients_groups: {"sysadmin-odcs": {}, "pungi-devel": {}, "packager": {"source_types": ["module"]}}
odcs_admin_groups: ["sysadmin-odcs", "pungi-devel"]
odcs_admin_users: []
odcs_raw_config_urls: {}
odcs_pungi_runroot_enabled: False
odcs_pungi_parent_runroot_channel: compose
odcs_pungi_parent_runroot_packages: ["pungi"]
odcs_pungi_parent_runroot_mounts: []
odcs_pungi_parent_runroot_weight: 3.5
odcs_pungi_parent_runroot_tag: f27-build
odcs_pungi_parent_runroot_arch: x86_64
odcs_pungi_runroot_target_dir: /mnt/koji/compose/odcs
odcs_pungi_runroot_target_dir_url: http://kojipkgs.fedoraproject.org/compose/odcs
odcs_messaging_backend: fedora-messaging
odcs_messaging_certificate: "/THIS/FILE/PROBABLY/DOESNT/EXIST"
odcs_messaging_private_key: "/THIS/FILE/PROBABLY/DOESNT/EXIST"
odcs_messaging_broker_urls: []
odcs_messaging_ca_cert: /etc/ssl/certs/ca-bundle.trust.crt
odcs_allowed_clients: {"users": [], "groups": []}
odcs_admins: {"users": [], "groups": []}
odcs_allowed_source_types: ["tag", "module"]
odcs_allowed_flags: null
odcs_allowed_arches: ['aarch64', 'armhfp', 'i386', 'ppc64le', 'x86_64', 's390x']
odcs_allowed_results: null
odcs_allowed_sources: null
odcs_arches: ["x86_64"]
odcs_sigkeys: []
odcs_messaging_producer_broker_urls: []
odcs_messaging_consumer_broker_urls: []
odcs_celery_broker_username: null
odcs_celery_broker_password: null
odcs_celery_broker_host: null
odcs_celery_broker_port: null
odcs_celery_broker_vhost: rabbitmq
odcs_celery_queues: ["pungi_composes", "cleanup"]
odcs_celery_router_config: null
odcs_runroot_ssh_hostnames: {"x86_64": "localhost"}
# The default is 24 hours
odcs_seconds_to_live: 86400
# # The default is 72 hours
odcs_max_seconds_to_live: 259200
odcs_celery_concurrency: 2
odcs_extra_target_dirs: {"private": "/srv/odcs/private"}
odcs_expected_backend_number: 0

View file

@ -1,12 +0,0 @@
/var/log/odcs-backend/*.log {
missingok
notifempty
compress
delaycompress
copytruncate
daily
dateext
rotate 7
size 50M
}

View file

@ -1,40 +0,0 @@
# Wrapper config for real raw config file.
# The goal of this wrapper is to ensure that certain config options cannot be
# changed by the submitted raw config.
# This Raw Config wrapper allows overriding certain raw config configuration
# options.
from raw_config import *
koji_profile = 'odcs_production'
link_type = 'symlink'
createrepo_deltas = False
dogpile_cache_backend = "dogpile.cache.dbm"
dogpile_cache_arguments = {
"filename": "/var/tmp/pungi_cache_file.dbm",
}
dogpile_cache_expiration_time = 14400
buildinstall_use_guestmount = True
# Allow overriding pkgset_koji_builds from ODCS client.
{%- if compose["builds"] %}
pkgset_koji_builds = [
{%- for build in compose["builds"].split(" ") %}
'{{ build }}',
{%- endfor %}
]
{%- endif %}
# Allow overriding sigkeys from ODCS client.
{%- if compose["sigkeys"] %}
sigkeys = [
{%- for sigkey in compose["sigkeys"].split(" ") %}
'{{ sigkey }}',
{%- endfor%}
]
{%- endif %}

View file

@ -1,182 +0,0 @@
# Automatically generated by ODCS.
# PRODUCT INFO
release_name = '{{ config.release_name }}'
release_short = '{{ config.release_short }}'
release_version = '{{ config.release_version }}'
release_is_layered = False
# GENERAL SETTINGS
bootable = {{ config.bootable }}
variants_file='variants.xml'
{%- if config.sigkeys %}
sigkeys = [
{%- for sigkey in config.sigkeys %}
'{{ sigkey }}',
{%- endfor%}
]
{%- else %}
sigkeys = [None]
{%- endif %}
hashed_directories = True
# RUNROOT settings
{%- if config.bootable %}
runroot = True
{%- else %}
runroot = False
{%- endif %}
# PKGSET
{%- if config.pkgset_source == 'repos' %}
pkgset_source = 'repos'
pkgset_repos = {
{%- for arch in config.arches %}
'{{ arch }}': ['{{ config.source }}',],
{%- endfor %}
}
{%- elif config.pkgset_source == 'koji' %}
pkgset_source = 'koji'
{%- if config.koji_tag %}
pkgset_koji_tag = '{{ config.koji_tag }}'
{%- else %}
pkgset_koji_tag = ""
{%- endif %}
{%- if config.koji_module_tags %}
pkgset_koji_module_tag = [
{%- for tag in config.koji_module_tags %}
'{{ tag }}',
{%- endfor %}
]
{%- endif %}
{%- if config.module_defaults_url %}
module_defaults_dir = {
"scm": "git",
"repo": '{{ config.module_defaults_url[0] }}',
"branch": '{{ config.module_defaults_url[1] }}',
"dir": ".",
}
{%- endif %}
pkgset_koji_inherit = {{ config.pkgset_koji_inherit }}
pkgset_koji_builds = [
{%- for build in config.builds %}
'{{ build }}',
{%- endfor %}
]
{%- endif %}
{%- if config.scratch_modules %}
mbs_api_url = "{{ config.mbs_api_url }}"
pkgset_scratch_modules = {
'^Temporary$': [
{%- for nsvc in config.scratch_modules %}
'{{ nsvc }}',
{%- endfor %}
]
}
{%- endif %}
{%- if config.scratch_build_tasks %}
pkgset_koji_scratch_tasks = [
{%- for task_id in config.scratch_build_tasks %}
'{{ task_id }}',
{%- endfor %}
]
{%- endif %}
{%- if config.source_type_str in ["tag", "build"] and not config.packages %}
# In case no package is requested, include all of them.
additional_packages = [
('^Temporary$', {
'*': [
'*',
],
}),
]
{%- endif %}
filter_system_release_packages = False
multilib = [
('^.*$', {
{%- for multilib_arch in config.multilib_arches %}
'{{ multilib_arch }}': {{ config.multilib_method }}
{%- endfor%}
}),
]
# GATHER
gather_source = '{{ config.gather_source }}'
gather_method = '{{ config.gather_method }}'
{%- if config.comps_file %}
comps_file = '{{ config.comps_file }}'
{%- endif %}
check_deps = {{ config.check_deps }}
greedy_method = 'build'
gather_lookaside_repos = [
('^.*$', {
'*': [
{%- for lookaside_repo in config.lookaside_repos %}
'{{ lookaside_repo }}',
{%- endfor %}
]
}),
]
# CREATEREPO
createrepo_c = True
createrepo_checksum = 'sha256'
# CHECKSUMS
media_checksums = ['sha256']
create_jigdo = False
# BUILDINSTALL
{%- if config.bootable %}
buildinstall_method="lorax"
buildinstall_topdir="/mnt/koji/compose/odcs"
{%- endif %}
skip_phases = [
{%- if "iso" not in config.results %}
"createiso",
{%- endif %}
{%- if "boot.iso" not in config.results %}
"buildinstall",
{%- endif %}
"live_media",
"live_images",
"ostree"]
link_type = 'symlink'
translate_paths = [
]
koji_profile = '{{ config.koji_profile }}'
dogpile_cache_backend = "dogpile.cache.dbm"
dogpile_cache_arguments = {
"filename": "/var/tmp/pungi_cache_file.dbm",
}
dogpile_cache_expiration_time = 14400
repoclosure_strictness = [('.*', {'*': 'off'})]
{%- if config.include_devel_modules %}
include_devel_modules = { "Temporary": [
{%- for ns in config.include_devel_modules %}
'{{ ns }}',
{%- endfor%}
]}
{%- endif %}

View file

@ -1,3 +0,0 @@
d /var/run/odcs-backend 0755 odcs-server -
d /var/log/odcs-backend 0755 odcs-server -

View file

@ -1,14 +0,0 @@
---
- name: restart fedmsg-hub-3
service:
name: fedmsg-hub-3
state: restarted
- name: restart odcs-celery-backend
service:
name: odcs-celery-backend
state: restarted
- name: restart odcs-celery-beat
service:
name: odcs-celery-beat
state: restarted

View file

@ -1,380 +0,0 @@
---
# install packages and generate shared configuration files
- name: install the packages required for ODCS
package:
pkg: "{{ item }}"
with_items:
- python3-psycopg2
- python3-odcs-common
- python3-celery
- odcs
- httpd
- make
- libxml2
- intltool
tags:
- odcs
- name: install the latest ODCS packagess
package:
pkg: "{{ item }}"
with_items:
- python3-odcs-common
- odcs
- odcs-client
when: odcs_upgrade
tags:
- odcs
- name: Enable the mod_auth_openidc module on rhel8
copy:
dest: /etc/dnf/modules.d/mod_auth_openidc.module
content: |
[mod_auth_openidc]
name=mod_auth_openidc
stream=2.3
profiles=
state=enabled
when: datacenter == "iad2"
# install required packages for frontend here, as we may
# need to reload httpd in next task when host is frontend
- name: install the packages required for ODCS frontend
package:
pkg: "{{ item }}"
state: present
with_items:
- mod_auth_openidc
- python3-mod_wsgi
when: inventory_hostname.startswith('odcs-frontend')
tags:
- odcs
- odcs/frontend
- name: install the packages required for ODCS backend
package:
pkg: "{{ item }}"
state: present
with_items:
- koji
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: create odcs-server group
group:
name: odcs-server
gid: 64321
state: present
- name: create odcs-server user
user:
name: odcs-server
uid: 64321
group: odcs-server
- name: create ODCS_TARGET_DIR
file:
path: "{{ item }}"
state: directory
owner: odcs-server
group: odcs-server
mode: 0775
# recurse: yes
follow: no
with_items:
- "{{ odcs_target_dir }}"
tags:
- odcs
- odcs/frontend
- name: create ODCS_TARGET_DIR private
file:
path: "{{ item }}"
state: directory
owner: odcs-server
group: odcs-server
mode: 0770
# recurse: yes
follow: no
with_items:
- "{{ odcs_target_dir }}/private"
tags:
- odcs
- odcs/frontend
- name: ensure ODCS service directories have right ownership
file:
path: "{{ item }}"
state: directory
owner: odcs-server
group: odcs-server
recurse: yes
follow: no
with_items:
- /var/run/odcs-backend
- /var/log/odcs-backend
tags:
- odcs
- odcs/backend
# this app config is shared by backend and frontend, but has different
# owner groups on backend and frontend, and notify different handlers,
# we can have vars set for frontend and backend seperately to do that,
# but it looks a little weird to have such special vars in
# inventory/group_vars/odcs-*, also we don't want to repeat the same
# required vars in frontend and backend, so just have 2 tasks in base
# to keep it simple.
- name: generate the ODCS app config for frontend
template:
src: etc/odcs/config.py.j2
dest: /etc/odcs/config.py
owner: odcs-server
group: apache
mode: 0440
notify:
- restart apache
when: inventory_hostname.startswith('odcs-frontend')
tags:
- odcs
- odcs/frontend
- name: generate the ODCS app config for backend
template:
src: etc/odcs/config.py.j2
dest: /etc/odcs/config.py
owner: odcs-server
group: odcs-server
mode: 0440
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: generate the ODCS raw_config_wrapper config for backend
template:
src: etc/odcs/raw_config_wrapper.conf.j2
dest: /etc/odcs/raw_config_wrapper.conf
owner: odcs-server
group: odcs-server
mode: 0440
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: generate the ODCS runroot_koji config for backend
template:
src: etc/odcs/runroot_koji.conf.j2
dest: /etc/odcs/runroot_koji.conf
owner: odcs-server
group: odcs-server
mode: 0440
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: Make sure the /etc/fedmsg.d/odcs.py file (provided by rpm) is absent.
file:
path: /etc/fedmsg.d/odcs.py
state: absent
tags:
- odcs
- odcs/backend
- name: copy the ODCS pungi config template to backend
copy:
src: "{{ roles_path }}/odcs/base/files/pungi.conf"
dest: /etc/odcs/pungi.conf
owner: odcs-server
group: odcs-server
mode: 0640
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: copy the odcs-celery-backend.service file.
template:
src: "etc/systemd/system/odcs-celery-backend.service.j2"
dest: /etc/systemd/system/odcs-celery-backend.service
owner: odcs-server
group: odcs-server
mode: 0640
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: copy the odcs-celery-beat.service file.
template:
src: "etc/systemd/system/odcs-celery-beat.service.j2"
dest: /etc/systemd/system/odcs-celery-beat.service
owner: odcs-server
group: odcs-server
mode: 0640
notify:
- restart odcs-celery-beat
when: inventory_hostname.startswith('odcs-frontend')
tags:
- odcs
- odcs/frontend
- name: Reload systemd daemon
systemd:
daemon_reload: yes
tags:
- odcs
- odcs/backend
- odcs/frontend
- name: enable ODCS backend (odcs-celery-backend)
service:
name: odcs-celery-backend
enabled: yes
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: enable ODCS beat (odcs-celery-beat)
service:
name: odcs-celery-beat
enabled: yes
when: inventory_hostname.startswith('odcs-frontend')
tags:
- odcs
- odcs/frontend
- name: copy the fedora-messaging.toml.j2 file.
template:
src: "fedora-messaging.toml.j2"
dest: /etc/fedora-messaging/config.toml
owner: odcs-server
group: odcs-server
mode: 0640
tags:
- odcs
- odcs/frontend
- odcs/backend
- name: copy the odcs-backend.conf tmpfiles.d file.
copy:
src: "{{ roles_path }}/odcs/base/files/tmpfiles.d/odcs-backend.conf"
dest: /etc/tmpfiles.d/odcs-backend.conf
owner: odcs-server
group: odcs-server
mode: 0640
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: configure logrotate
copy:
src: "{{ roles_path }}/odcs/base/files/logrotate.d/odcs"
dest: /etc/logrotate.d/odcs
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: Run systemd-tmpfiles --create
command: systemd-tmpfiles --create
args:
creates: /var/run/odcs-backend
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend
- name: copy the odcs rabbitmq private queue crt.
copy:
src: "{{private}}/files/rabbitmq/{{env}}/pki/issued/odcs-private-queue{{env_suffix}}.crt"
dest: /etc/odcs/odcs-private-queue.crt
owner: odcs-server
group: apache
mode: 0640
tags:
- odcs
- odcs/backend
- odcs/frontend
- name: copy the odcs rabbitmq private queue key.
copy:
src: "{{private}}/files/rabbitmq/{{env}}/pki/private/odcs-private-queue{{env_suffix}}.key"
dest: /etc/odcs/odcs-private-queue.key
owner: odcs-server
group: apache
mode: 0640
tags:
- odcs
- odcs/backend
- odcs/frontend
- name: copy the odcs rabbitmq crt.
copy:
src: "{{private}}/files/rabbitmq/{{env}}/pki/issued/odcs{{env_suffix}}.crt"
dest: /etc/odcs/odcs-rabbitmq.crt
owner: odcs-server
group: apache
mode: 0640
tags:
- odcs
- odcs/backend
- odcs/frontend
- name: copy the odcs rabbitmq key.
copy:
src: "{{private}}/files/rabbitmq/{{env}}/pki/private/odcs{{env_suffix}}.key"
dest: /etc/odcs/odcs-rabbitmq.key
owner: odcs-server
group: apache
mode: 0640
tags:
- odcs
- odcs/backend
- odcs/frontend
- name: copy the odcs rabbitmq CA cert.
copy:
src: "{{private}}/files/rabbitmq/{{env}}/pki/ca.crt"
dest: /etc/odcs/ca.crt
owner: odcs-server
group: apache
mode: 0640
tags:
- odcs
- odcs/backend
- odcs/frontend
- name: copy the custom_compose_raw_config_wrapper.conf file.
copy:
src: "{{ roles_path }}/odcs/base/files/odcs/custom_compose_raw_config_wrapper.conf"
dest: /etc/odcs/custom_compose_raw_config_wrapper.conf
owner: odcs-server
group: odcs-server
mode: 0640
notify:
- restart odcs-celery-backend
when: inventory_hostname.startswith('odcs-backend')
tags:
- odcs
- odcs/backend

View file

@ -1,172 +0,0 @@
from os import path
confdir = path.abspath(path.dirname(__file__))
# use parent dir as dbdir else fallback to current dir
dbdir = path.abspath(path.join(confdir, '..')) if confdir.endswith('conf') \
else confdir
class BaseConfiguration(object):
# Make this random (used to generate session keys)
SECRET_KEY = '74d9e9f9cd40e66fc6c4c2e9987dce48df3ce98542529fd0'
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(path.join(
dbdir, 'odcs.db'))
SQLALCHEMY_TRACK_MODIFICATIONS = False
HOST = '127.0.0.1'
PORT = 5005
DEBUG = False
# Global network-related values, in seconds
NET_TIMEOUT = 120
NET_RETRY_INTERVAL = 30
# Available backends are: console, file, journal.
LOG_BACKEND = 'journal'
# Path to log file when LOG_BACKEND is set to "file".
LOG_FILE = 'odcs.log'
# Available log levels are: debug, info, warn, error.
LOG_LEVEL = 'info'
SSL_ENABLED = False
class DevConfiguration(BaseConfiguration):
DEBUG = True
LOG_BACKEND = 'console'
LOG_LEVEL = 'debug'
# Global network-related values, in seconds
NET_TIMEOUT = 5
NET_RETRY_INTERVAL = 1
class TestConfiguration(BaseConfiguration):
LOG_BACKEND = 'console'
LOG_LEVEL = 'debug'
DEBUG = True
SQLALCHEMY_DATABASE_URI = 'sqlite:///{0}'.format(
path.join(dbdir, 'tests', 'test_odcs.db'))
# Global network-related values, in seconds
NET_TIMEOUT = 3
NET_RETRY_INTERVAL = 1
class ProdConfiguration(BaseConfiguration):
AUTH_BACKEND = 'openidc'
AUTH_OPENIDC_REQUIRED_SCOPES = [
'openid',
'https://id.fedoraproject.org/scope/groups',
'https://pagure.io/odcs/new-compose',
'https://pagure.io/odcs/renew-compose',
'https://pagure.io/odcs/delete-compose',
]
OIDC_BASE_NAMESPACE = 'https://pagure.io/odcs/'
{% if env == 'staging' %}
AUTH_OPENIDC_USERINFO_URI = 'https://id.stg.fedoraproject.org/openidc/UserInfo'
SECRET_KEY = "{{ odcs_stg_secret_key }}"
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://odcs:{{odcs_stg_db_password}}@db-odcs/odcs{{ '?sslmode=require' if odcs_force_postgres_ssl else '' }}'
KOJI_PROFILE = 'odcs_stg'
MBS_URL ='http://mbs.stg.fedoraproject.org/module-build-service'
KOJI_KRB_PRINCIPAL = 'odcs/odcs.stg.fedoraproject.org@STG.FEDORAPROJECT.ORG'
{% else %}
AUTH_OPENIDC_USERINFO_URI = 'https://id.fedoraproject.org/openidc/UserInfo'
SECRET_KEY = "{{ odcs_prod_secret_key }}"
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://odcs:{{odcs_prod_db_password}}@db-odcs/odcs{{ '?sslmode=require' if odcs_force_postgres_ssl else '' }}'
KOJI_PROFILE = 'odcs_production'
MBS_URL = 'http://mbs.fedoraproject.org/module-build-service'
KOJI_KRB_PRINCIPAL = 'odcs/odcs.stg.fedoraproject.org@STG.FEDORAPROJECT.ORG'
{% endif %}
KOJI_KRB_KEYTAB = '/etc/krb5.odcs_odcs{{env_suffix}}.fedoraproject.org.keytab'
KOJI_KRB_CCACHE = None
TARGET_DIR = "{{ odcs_target_dir }}"
TARGET_DIR_URL = "{{ odcs_target_dir_url }}"
EXTRA_TARGET_DIRS = {{ odcs_extra_target_dirs }}
ALLOWED_SOURCE_TYPES = {{ odcs_allowed_source_types }}
RAW_CONFIG_URLS = {{ odcs_raw_config_urls }}
ALLOWED_CLIENTS = {
'groups': {{ odcs_allowed_clients_groups }},
'users': {{ odcs_allowed_clients_users }},
}
ADMINS = {
'groups': {{ odcs_admin_groups }},
'users': {{ odcs_admin_users }},
}
SIGKEYS = {{ odcs_sigkeys }}
MESSAGING_BACKEND = "{{ odcs_messaging_backend }}"
MESSAGING_BROKER_URLS = {{ odcs_messaging_producer_broker_urls }}
MESSAGING_CERT_FILE = "/etc/ssl/odcs/umb-client.crt"
MESSAGING_KEY_FILE = "/etc/ssl/odcs/umb-client.key"
MESSAGING_CA_CERT = "{{ odcs_messaging_ca_cert }}"
{% if env == "staging" %}
MESSAGING_TOPIC_PREFIX = "org.fedoraproject.stg."
{% else %}
MESSAGING_TOPIC_PREFIX = "org.fedoraproject.prod."
{% endif %}
MESSAGING_TOPIC = "odcs.compose.state-changed"
INTERNAL_MESSAGING_TOPIC = "odcs.internal.msg"
PUNGI_KOJI_ARGS = ['--no-latest-link']
PUNGI_TIMEOUT=10800
PUNGI_RUNROOT_ENABLED = {{ odcs_pungi_runroot_enabled }}
PUNGI_PARENT_RUNROOT_CHANNEL = "{{ odcs_pungi_parent_runroot_channel }}"
PUNGI_PARENT_RUNROOT_PACKAGES = {{ odcs_pungi_parent_runroot_packages }}
PUNGI_PARENT_RUNROOT_MOUNTS = {{ odcs_pungi_parent_runroot_mounts }}
PUNGI_PARENT_RUNROOT_WEIGHT = {{ odcs_pungi_parent_runroot_weight }}
PUNGI_PARENT_RUNROOT_TAG = "{{ odcs_pungi_parent_runroot_tag }}"
PUNGI_PARENT_RUNROOT_ARCH = "{{ odcs_pungi_parent_runroot_arch }}"
PUNGI_RUNROOT_TARGET_DIR = "{{ odcs_pungi_runroot_target_dir }}"
PUNGI_RUNROOT_TARGET_DIR_URL = "{{ odcs_pungi_runroot_target_dir_url }}"
{% if odcs_allowed_source_types %}
ALLOWED_SOURCE_TYPES = {{ odcs_allowed_source_types }}
{% endif %}
{% if odcs_allowed_flags %}
ALLOWED_FLAGS = {{ odcs_allowed_flags }}
{% endif %}
{% if odcs_allowed_arches %}
ALLOWED_ARCHES = {{ odcs_allowed_arches }}
{% endif %}
{% if odcs_allowed_results %}
ALLOWED_RESULTS = {{ odcs_allowed_results }}
{% endif %}
{% if odcs_allowed_sources %}
ALLOWED_SOURCES = {{ odcs_allowed_sources }}
{% endif %}
CELERY_BROKER_URL = "amqps://odcs-private-queue{{ env_suffix }}@rabbitmq01{{ env_suffix }}.iad2.fedoraproject.org//odcs"
CELERY_CONFIG = {
'certfile': "/etc/odcs/odcs-private-queue.crt",
'keyfile': "/etc/odcs/odcs-private-queue.key",
'ca_certs': "/etc/odcs/ca.crt",
'broker_login_method': "EXTERNAL",
}
{% if odcs_celery_router_config %}
CELERY_ROUTER_CONFIG = {{ odcs_celery_router_config }}
{% endif %}
EXPECTED_BACKEND_NUMBER = {{ odcs_expected_backend_number }}

View file

@ -1,17 +0,0 @@
from raw_config import *
{% if env == 'staging' %}
koji_profile = 'odcs_stg'
{% else %}
koji_profile = 'odcs_production'
{% endif %}
# We cannot use hardlinks, because ODCS uses different volume and copy
# just takes lot of storage and time.
link_type = 'symlink'
{% if env == 'staging' %}
# Staging is used only for testing, so allow only x86_64 composes
tree_arches = ['x86_64']
{% endif %}

View file

@ -1,20 +0,0 @@
[odcs_stg]
server = https://koji.stg.fedoraproject.org/kojihub
weburl = https://koji.stg.fedoraproject.org/koji
topurl = https://kojipkgs.fedoraproject.org/
anon_retry = true
krb_rdns = false
authtype = kerberos
principal = innercompose/odcs.stg.fedoraproject.org@STG.FEDORAPROJECT.ORG
keytab = /etc/kojid/secrets/odcs_inner.keytab
[odcs_production]
server = https://koji.fedoraproject.org/kojihub
weburl = https://koji.fedoraproject.org/koji
topurl = https://kojipkgs.fedoraproject.org/
anon_retry = true
krb_rdns = false
authtype = kerberos
principal = innercompose/odcs.fedoraproject.org@FEDORAPROJECT.ORG
keytab = /etc/kojid/secrets/odcs_inner.keytab

View file

@ -1,24 +0,0 @@
[Unit]
Description=ODCS Celery backend service
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=forking
User=odcs-server
Group=odcs-server
WorkingDirectory=/tmp
ExecStart=/bin/sh -c '/usr/bin/celery-3 multi start worker \
-A odcs.server.celery_tasks --pidfile=/var/run/odcs-backend/%%n.pid \
--max-tasks-per-child=10 \
--logfile=/var/log/odcs-backend/%%n%%I.log --loglevel=INFO --concurrency={{ odcs_celery_concurrency }} -Q {{ odcs_celery_queues|join(',') }}'
ExecStop=/bin/sh -c '/usr/bin/celery-3 multi stopwait worker \
--pidfile=/var/run/odcs-backend/%%n.pid'
ExecReload=/bin/sh -c '/usr/bin/celery-3 multi restart worker \
-A odcs.server.celery_tasks --pidfile=/var/run/odcs-backend/%%n.pid \
--max-tasks-per-child=10 \
--logfile=/var/log/odcs-backend/%%n%%I.log --loglevel=INFO --concurrency={{ odcs_celery_concurrency }} -Q {{ odcs_celery_queues|join(',') }}'
[Install]
WantedBy=multi-user.target

View file

@ -1,15 +0,0 @@
[Unit]
Description=ODCS Celery beat service
After=network.target remote-fs.target nss-lookup.target
[Service]
Type=simple
User=odcs-server
Group=odcs-server
WorkingDirectory=/tmp
ExecStart=/bin/sh -c '/usr/bin/celery-3 -A odcs.server.celery_tasks beat --loglevel=debug'
[Install]
WantedBy=multi-user.target

View file

@ -1,21 +0,0 @@
# A sample configuration for fedora-messaging. This file is in the TOML format.
# For complete details on all configuration options, see the documentation.
amqp_url = "amqps://odcs{{ env_suffix }}:@rabbitmq{{ env_suffix }}.fedoraproject.org/%2Fpubsub"
# The topic_prefix configuration value will add a prefix to the topics of every sent message.
# This is used for migrating from fedmsg, and should not be used afterwards.
{% if env == "staging" %}
topic_prefix = "org.fedoraproject.stg"
{% else %}
topic_prefix = "org.fedoraproject.prod"
{% endif %}
[tls]
ca_cert = "/etc/odcs/ca.crt"
keyfile = "/etc/odcs/odcs-rabbitmq.key"
certfile = "/etc/odcs/odcs-rabbitmq.crt"
[client_properties]
app = "odcs"

View file

@ -1,7 +0,0 @@
---
odcs_migrate_db: False
odcs_endpoint: ''
odcs_allowed_named_hosts: []
odcs_allowed_hosts: []
odcs_force_ssl: False
odcs_target_dir: /srv/odcs

View file

@ -1,3 +0,0 @@
---
dependencies:
- { role: odcs/base }

View file

@ -1,87 +0,0 @@
---
- name: modify selinux so that httpd can serve data from NFS shares if needed
seboolean:
name: "{{item}}"
state: yes
persistent: yes
when: "'enabled' in ansible_selinux.status"
with_items:
# For requesting UserInfo from ipsilon.
- httpd_execmem
# For accessing /srv/odcs/
- httpd_use_nfs
tags:
- odcs
- odcs/frontend
- selinux
- name: Add apache user to odcs-server group.
user:
name: apache
groups: odcs-server
append: yes
notify:
- reload apache
tags:
- odcs
- odcs/frontend
- name: generate the ODCS Apache config
template:
src: etc/httpd/conf.d/odcs.conf.j2
dest: /etc/httpd/conf.d/odcs.conf
owner: apache
group: apache
mode: 0440
notify:
- reload apache
tags:
- odcs
- odcs/frontend
- name: Disable PrivateTmp=true in httpd.service.
lineinfile:
path: /usr/lib/systemd/system/httpd.service
regexp: '^PrivateTmp'
line: 'PrivateTmp=false'
notify:
- reload systemd
- restart apache
tags:
- odcs
- odcs/frontend
- name: ensure selinux lets httpd talk to postgres
seboolean: name={{item}} state=yes persistent=yes
with_items:
- httpd_can_network_connect_db
- httpd_can_network_connect
when: "'enabled' in ansible_selinux.status"
tags:
- odcs
- odcs/frontend
- selinux
- name: make httpd logs world readable
file:
name: /var/log/httpd
state: directory
mode: 0755
tags:
- odcs
- odcs/frontend
# This will initialize Alembic if the database is empty, and migrate to the
# latest revision
- name: migrate the database
command: "{{ item }}"
with_items:
- odcs-manager upgradedb
become: yes
become_user: odcs-server
when: odcs_migrate_db
tags:
- odcs
- odcs/frontend

View file

@ -1,73 +0,0 @@
{% if odcs_force_ssl %}
# Force SSL
RewriteEngine On
RewriteCond %{HTTPS} off
RewriteRule (.*) https://%{HTTP_HOST}%{REQUEST_URI}
{% endif %}
WSGIDaemonProcess odcs user=odcs-server group=apache processes={{wsgi_procs}} threads={{wsgi_threads}}
WSGIScriptAlias /{{ odcs_endpoint }} /usr/share/odcs/odcs.wsgi
{% if env == 'staging' %}
OIDCOAuthClientID {{ odcs_stg_oidc_client_id }}
OIDCOAuthClientSecret {{ odcs_stg_oidc_client_secret }}
OIDCOAuthIntrospectionEndpoint https://id.stg.fedoraproject.org/openidc/TokenInfo
{% else %}
OIDCOAuthClientID {{ odcs_prod_oidc_client_id }}
OIDCOAuthClientSecret {{ odcs_prod_oidc_client_secret }}
OIDCOAuthIntrospectionEndpoint https://id.fedoraproject.org/openidc/TokenInfo
{% endif %}
OIDCOAuthIntrospectionEndpointAuth client_secret_post
OIDCOAuthIntrospectionEndpointParams token_type_hint=Bearer
<Directory /usr/share/odcs>
WSGIProcessGroup odcs
WSGIApplicationGroup %{GLOBAL}
<RequireAll>
{% if odcs_allowed_named_hosts or odcs_allowed_hosts %}
<RequireAny>
Require method GET POST DELETE
{% if odcs_allowed_named_hosts %}
{{ 'Require host ' ~ odcs_allowed_named_hosts|join(' ') }}
{% endif %}
{% if odcs_allowed_hosts %}
{{ 'Require ip ' ~ odcs_allowed_hosts|join(' ') }}
{% endif %}
</RequireAny>
Require all granted
{% else %}
AuthType oauth20
<Limit GET HEAD OPTIONS>
Require all granted
</Limit>
<Limit POST PATCH PUT DELETE>
Require valid-user
</Limit>
{% endif %}
</RequireAll>
</Directory>
Alias "/composes" "{{ odcs_target_dir }}"
<Directory {{ odcs_target_dir }}>
{% if odcs_allowed_named_hosts or odcs_allowed_hosts %}
<RequireAny>
Require method GET
{% if odcs_allowed_named_hosts %}
{{ 'Require host ' ~ odcs_allowed_named_hosts|join(' ') }}
{% endif %}
{% if odcs_allowed_hosts -%}
{{ 'Require ip ' ~ odcs_allowed_hosts|join(' ') }}
{% endif %}
</RequireAny>
{% else %}
Require all granted
{% endif %}
Options +Indexes
</Directory>
<Directory "{{ odcs_target_dir }}/private">
Require all denied
</Directory>

View file

@ -783,38 +783,6 @@ nuancier.stg:
done: false
link:
time:
odcs-private-queue.stg:
path: odcs-private-queue.stg.crt
user: t0xic0der
certstat:
cstarted:
cstopped:
daystobt: 0
daystodd: 0
issuauth:
serialno:
stopdate:
strtdate:
notistat:
done: false
link:
time:
odcs.stg:
path: odcs.stg.crt
user: t0xic0der
certstat:
cstarted:
cstopped:
daystobt: 0
daystodd: 0
issuauth:
serialno:
stopdate:
strtdate:
notistat:
done: false
link:
time:
openqa.stg:
path: openqa.stg.crt
user: t0xic0der

View file

@ -8,7 +8,6 @@ data:
redis_url: ${REDIS_URL}
redis_password: ${REDIS_PASSWORD}
koji_config: fedora
odcs_uri: https://odcs{{ env_suffix }}.fedoraproject.org/
deltas_dir: ${OUTPUT_DIR}/deltas/
icons_dir: ${OUTPUT_DIR}/icons/
{% if env == 'staging' %}

View file

@ -96,7 +96,7 @@ refuse options = checksum
[ fedora-eln ]
comment = Fedora ELN composes
path = /./mnt/odcs
path = /srv/pub/eln/1/
{% if inventory_hostname in groups['download_tier1'] %}
hosts allow = {% for host in vars['dl_tier1'] %}{{host}},{% endfor %}
{% endif %}

View file

@ -11,7 +11,6 @@ scp db01.iad2.fedoraproject.org:/backups/koschei-$(date +%F).dump.xz /srv/web/in
scp db01.iad2.fedoraproject.org:/backups/bodhi2-$(date +%F).dump.xz /srv/web/infra/db-dumps/bodhi2.dump.xz
scp db01.iad2.fedoraproject.org:/backups/anitya-public-$(date +%F).dump.xz /srv/web/infra/db-dumps/anitya.dump.xz
scp db01.iad2.fedoraproject.org:/backups/mailman-$(date +%F).dump.xz /srv/web/infra/db-dumps/mailman.dump.xz
scp db01.iad2.fedoraproject.org:/backups/odcs-$(date +%F).dump.xz /srv/web/infra/db-dumps/odcs.dump.xz
scp db01.iad2.fedoraproject.org:/backups/hyperkitty-$(date +%F).dump.xz /srv/web/infra/db-dumps/hyperkitty.dump.xz
scp db01.iad2.fedoraproject.org:/backups/resultsdb-$(date +%F).dump.xz /srv/web/infra/db-dumps/resultsdb.dump.xz
scp db01.iad2.fedoraproject.org:/backups/waiverdb-$(date +%F).dump.xz /srv/web/infra/db-dumps/waiverdb.dump.xz