[Waiting for fedmsg EOL] Sunset github2fedmsg and fedmsg #2454

Merged
ryanlerch merged 1 commit from fedmsg_sunset into main 2025-02-13 10:09:41 +00:00
199 changed files with 17 additions and 11255 deletions

View file

@ -14,8 +14,6 @@ exclude_paths:
- scripts
# Ignore playbooks with strange error happening in <unicode string>
- playbooks/groups/copr-db.yml
# Exclude playbook that contains only include_playbook
- playbooks/fedmsgupdate.yml
# Some playbooks contains hardcoded paths, so exclude them from check
- playbooks/list-vms-per-host.yml
- playbooks/set_root_auth_keys.yml

View file

@ -2,9 +2,7 @@
global
log 127.0.0.1 local0 warning
# Set this to 4096 + 16384
# 16384 for the fedmsg gateway and 4096 for everybody else.
maxconn 20480
maxconn 4096
chroot /var/lib/haproxy
user haproxy
group haproxy

View file

@ -1,39 +0,0 @@
import operator
def invert_fedmsg_policy(groups, vars, env):
""" Given hostvars that map hosts -> topics, invert that
and return a dict that maps topics -> hosts.
Really, returns a list of tuples -- not a dict.
"""
if env == 'staging':
hosts = groups['staging'] + groups['staging_friendly']
else:
hosts = [h for h in groups['all'] if h not in groups['staging']]
inverted = {}
for host in hosts:
prefix = '.'.join([vars[host]['fedmsg_prefix'],
vars[host]['fedmsg_env']])
fqdn = vars[host].get('fedmsg_fqdn', host)
for cert in vars[host]['fedmsg_certs']:
for topic in cert.get('can_send', []):
key = prefix + '.' + topic
inverted[key] = inverted.get(key, [])
inverted[key].append(cert['service'] + '-' + fqdn)
result = list(inverted.items())
# Sort things so they come out in a reliable order (idempotence)
[inverted[key].sort() for key in inverted]
result.sort(key=operator.itemgetter(0))
return result
class FilterModule(object):
def filters(self):
return {
"invert_fedmsg_policy": invert_fedmsg_policy,
}

View file

@ -17,27 +17,6 @@
- name: Restart crond
action: service name=crond state=restarted
- name: Restart fedmsg-gateway
ansible.builtin.command: /usr/local/bin/conditional-restart.sh fedmsg-gateway
- name: Restart fedmsg-hub
ansible.builtin.command: /usr/local/bin/conditional-restart.sh {{ item }}
with_items:
- fedmsg-hub
- fedmsg-hub-3
- name: Restart fedmsg-hub-3
ansible.builtin.command: /usr/local/bin/conditional-restart.sh {{ item }}
with_items:
- fedmsg-hub
- fedmsg-hub-3
- name: Restart fedmsg-irc
ansible.builtin.command: /usr/local/bin/conditional-restart.sh fedmsg-irc
- name: Restart fedmsg-relay
ansible.builtin.command: /usr/local/bin/conditional-restart.sh fedmsg-relay
- name: Restart fm-consumer@koji_sync_listener
action: service name=fm-consumer@koji_sync_listener state=restarted enabled=yes

View file

@ -91,13 +91,6 @@ nft_custom6_rules: []
nft_custom_rules: []
# most of our systems are in IAD2
datacenter: iad2
# These are used to:
# 1) configure mod_wsgi
# 2) open iptables rules for fedmsg (per wsgi thread)
# 3) declare enough fedmsg endpoints for the service
#wsgi_fedmsg_service: bodhi
#wsgi_procs: 4
#wsgi_threads: 4
# Datanommer
datanommer_db_hostname: db-datanommer02
@ -120,33 +113,6 @@ env_suffix: ""
eth0_ipv4_nm: 24
eth1_ip: 10.0.0.10
eth1_nm: 255.255.255.0
# By default, fedmsg hosts are in passive mode. External hosts are typically
# active.
fedmsg_active: False
# By default, nodes get no fedmsg certs. They need to declare them explicitly.
fedmsg_certs: []
# A special flag that, when set to true, will disconnect the host from the
# global fedmsg-relay instance and set it up with its own local one. You can
# temporarily set this to true for a specific host to do some debugging -- so
# you can *replay real messages from the datagrepper history without having
# those broadcast to the rest of the bus*.
fedmsg_debug_loopback: False
fedmsg_env: prod
# By default, fedmsg sends error logs to sysadmin-datanommer-members@fp.o.
fedmsg_error_recipients:
- sysadmin-datanommer-members@fedoraproject.org
# By default, fedmsg should not log debug info. Groups can override this.
fedmsg_loglevel: INFO
# Amount of time to wait for connections after a socket is first established.
fedmsg_post_init_sleep: 1.0
# Other defaults for fedmsg environments
fedmsg_prefix: org.fedoraproject
# Everywhere, always, we should sign messages and validate signatures.
# However, we allow individual hosts and groups to override this. Use this very
# carefully.. and never in production (good for testing stuff in staging).
fedmsg_sign_messages: True
fedmsg_validate_signatures: True
#
# END: Ansible roles_path variables
#######
freezes: true

View file

@ -11,7 +11,6 @@ ansible_ifcfg_allowlist:
# https://bugzilla.redhat.com/show_bug.cgi?id=1283364
custom_rules: ['-A INPUT --proto tcp --sport 44334 --source 10.3.169.120 -j ACCEPT']
nft_custom_rules: ['add rule ip filter INPUT ip saddr 10.3.169.120 tcp sport 44334 counter accept']
fedmsg_error_recipients: []
host_group: autosign
ipa_client_shell_groups:
- sysadmin-releng

View file

@ -10,19 +10,6 @@ nft_custom_rules:
# This host is externally reachable
#
external: true
fedmsg_certs:
- can_send:
- ansible.playbook.complete
- ansible.playbook.start
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- infragit.receive
group: sysadmin
owner: root
service: scm
freezes: true
ipa_client_shell_groups:
- fi-apprentice

View file

@ -19,8 +19,6 @@ ipa_host_group: bodhi
ipa_host_group_desc: Bodhi update service
lvm_size: 100000
mem_size: 16384
## XXX -- note that the fedmsg_certs declaration does not happen here, but
# happens instead at the inventory/host_vars/ level s
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,sec=sys,nfsvers=3"
nrpe_procs_crit: 1000
# With 16 cpus, theres a bunch more kernel threads

View file

@ -17,6 +17,6 @@ source_registry: "registry.fedoraproject.org"
notes: |
Koji service employs a set of machines to build packages for the Fedora project.
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Relies on koji-hub, Packages, PkgDB, apache, fedora messaging, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new

View file

@ -27,6 +27,6 @@ notes: |
Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
* VMs built on top of buildvmhost
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Relies on koji-hub, Packages, PkgDB, apache, fedora messaging, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new

View file

@ -27,6 +27,6 @@ volgroup: /dev/vg_guests
notes: |
Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
* VMs built on top of buildvmhost
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Relies on koji-hub, Packages, PkgDB, apache, fedora messaging, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new

View file

@ -30,7 +30,7 @@ volgroup: /dev/vg_virt_buildvm_ppc64le_iscsi
notes: |
Koji service employs a set of virtual machines to build packages for the Fedora project. This group builds packages for ppcle architecture.
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Relies on koji-hub, Packages, PkgDB, apache, fedora messaging, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new
* virtual instances

View file

@ -27,6 +27,6 @@ volgroup: /dev/vg_guests
notes: |
Koji service employs a set of machines to build packages for the Fedora project. This playbook builds vm builders.
* VMs built on top of a s390x LPAR
* Relies on koji-hub, Packages, PkgDB, apache, fedmsg, fas, virthost, and is monitored by nagios
* Relies on koji-hub, Packages, PkgDB, apache, fedora messaging, fas, virthost, and is monitored by nagios
* Several services rely on the builders, including koschei, Bodhi, Tagger, SCM, Darkserver.
* Produces automated builds of packages for the architecture listed. Builders can be scaled by adding new

View file

@ -1,30 +0,0 @@
---
# Define resources for this group of hosts here.
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
ipa_client_shell_groups:
- sysadmin-datanommer
- sysadmin-noc
- sysadmin-veteran
ipa_client_sudo_groups:
- sysadmin-datanommer
- sysadmin-noc
ipa_host_group: busgateway
ipa_host_group_desc: Bridge between fedmsg and fedora-messaging
lvm_size: 20000
mem_size: 8192
num_cpus: 2
primary_auth_source: ipa
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [3999, # The fedmsg-relay republishes here. Listeners need to connect.
9941, # The fedmsg-relay listens here. Ephemeral producers connect.
3998, # The fedmsg-relay listens here. VPN producers connect.
9940, # The fedmsg-gateway republishes here. Proxies need to connect.
9919, # The websocket server publishes here. Proxies need to connect.
]

View file

@ -1,74 +0,0 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
nft_custom_rules:
- 'add rule ip filter INPUT ip saddr 10.3.163.39 tcp dport 873 counter accept'
- 'add rule ip filter INPUT ip saddr 192.168.1.59 tcp dport 873 counter accept'
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- github.commit_comment
- github.create
- github.delete
- github.fork
- github.gollum
- github.issue.assigned
- github.issue.closed
- github.issue.comment
- github.issue.edited
- github.issue.labeled
- github.issue.milestone
- github.issue.opened
- github.issue.reopened
- github.issue.unassigned
- github.issue.unlabeled
- github.label
- github.member
- github.page_build
- github.pull_request.assigned
- github.pull_request.closed
- github.pull_request.edited
- github.pull_request.labeled
- github.pull_request.opened
- github.pull_request_review
- github.pull_request_review_comment
- github.pull_request.review_requested
- github.pull_request.synchronize
- github.pull_request.unlabeled
- github.push
- github.release
- github.repository_vulnerability_alert
- github.star
- github.status
- github.team_add
- github.webhook
group: apache
owner: root
service: github2fedmsg
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-veteran
ipa_host_group: github2fedmsg
ipa_host_group_desc: Bridge select GitHub repo events into bus messages
lvm_size: 20000
mem_size: 2048
num_cpus: 2
primary_auth_source: ipa
tcp_ports: [80]
# for fedora-messaging
username: "github2fedmsg{{ env_suffix }}"
user_sent_topics: ^org\.fedoraproject\.{{ env_short }}\.github\..*
# Definining these vars has a number of effects
# 1) mod_wsgi is configured to use the vars for its own setup
# 2) iptables opens enough ports for all threads for fedmsg
# 3) roles/fedmsg/base/ declares enough fedmsg endpoints for all threads
wsgi_fedmsg_service: github2fedmsg
wsgi_procs: 2
wsgi_threads: 2

View file

@ -1,9 +0,0 @@
---
# XXX - this is not really a group of real hosts.
# Instead, it represents an application in openshift.
# See playbooks/openshift-apps/greenwave.yml
fedmsg_certs:
- can_send:
- logger.log
- greenwave.decision.update
service: greenwave

View file

@ -8,25 +8,6 @@ nft_custom_rules:
- 'add rule ip filter INPUT ip daddr 224.0.0.0/8 counter accept'
- 'add rule ip filter INPUT ip protocol vrrp counter accept'
docker_registry: "candidate-registry.fedoraproject.org"
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- buildsys.build.state.change
- buildsys.package.list.change
- buildsys.repo.done
- buildsys.repo.init
- buildsys.rpm.sign
- buildsys.tag
- buildsys.task.state.change
- buildsys.untag
group: apache
owner: root
service: koji
ipa_client_shell_groups:
- sysadmin-releng
ipa_client_sudo_groups:
@ -44,8 +25,6 @@ primary_auth_source: ipa
source_registry: "registry.fedoraproject.org"
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [80, 443, 111, 2049,
# These 8 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007]
tcp_ports: [80, 443, 111, 2049]
udp_ports: [111, 2049]
virt_install_command: "{{ virt_install_command_two_nic }}"

View file

@ -1,18 +1,6 @@
---
# common items for the releng-* boxes
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- mailman.receive
group: mailman
owner: mailman
service: mailman
ipa_client_shell_groups:
- sysadmin-tools
ipa_client_sudo_groups:
@ -66,9 +54,7 @@ num_cpus: 4
# Postfix main.cf
postfix_group: mailman
primary_auth_source: ipa
tcp_ports: [25, 80, 443,
# For outgoing fedmsg
3000, 3001, 3002, 3003]
tcp_ports: [25, 80, 443]
# mailman role variables
mailman_rest_api_username: "{{ mailman_rest_api_user }}"

View file

@ -15,19 +15,6 @@ exclude_iad2_hostgroups:
- zabbix_stg
- zabbix
- logdetective
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- nagios.host.state.change
- nagios.service.state.change
group: nagios
owner: root
service: nagios
#iad2_management_slowping:
# - ppc8-01-fsp.mgmt.fedoraproject.org
# - ppc8-02-fsp.mgmt.fedoraproject.org

View file

@ -1,29 +1,7 @@
---
# Define resources for this group of hosts here.
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- # The shell cert needs to be allowed to send these too so it can do alembic
# upgrades that trigger messages.
can_send:
- fmn.filter.update
- fmn.preference.update
- fmn.rule.update
- fmn.confirmation.update
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fmn.filter.update
- fmn.preference.update
- fmn.rule.update
- fmn.confirmation.update
group: fedmsg
owner: root
service: fmn
# For performance measurement.. for now. This can be removed whenever.
fedmsg_loglevel: DEBUG
lvm_size: 65536
max_mem_size: "{{ mem_size }}"
mem_size: 24576

View file

@ -1,21 +1,6 @@
---
# Define resources for this group of hosts here.
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- fmn.filter.update
- fmn.preference.update
- fmn.rule.update
- fmn.confirmation.update
group: apache
owner: root
service: fmn
lvm_size: 20000
mem_size: 1024
num_cpus: 2

View file

@ -16,8 +16,6 @@ deployment_type: stg
# this won't really work, there's not going to be any way to get at
# the webui from outside the box, but we gotta set it to something
external_hostname: openqa.oneboxtest.fedoraproject.org
# makes sure it sends stg not prod fedmsgs
fedmsg_env: stg
freezes: false
gw: 10.3.174.254

View file

@ -16,46 +16,6 @@ nft_custom_rules:
db_backup_dir: ['/backups']
dbs_to_backup: ['pagure']
env: pagure
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- pagure.git.receive
- pagure.issue.assigned.added
- pagure.issue.assigned.reset
- pagure.issue.comment.added
- pagure.issue.comment.edited
- pagure.issue.dependency.added
- pagure.issue.dependency.removed
- pagure.issue.drop
- pagure.issue.edit
- pagure.issue.new
- pagure.issue.tag.added
- pagure.issue.tag.removed
- pagure.project.deleted
- pagure.project.edit
- pagure.project.forked
- pagure.project.group.added
- pagure.project.new
- pagure.project.tag.edited
- pagure.project.tag.removed
- pagure.project.user.access.updated
- pagure.project.user.added
- pagure.pull-request.closed
- pagure.pull-request.comment.added
- pagure.pull-request.flag.added
- pagure.pull-request.flag.updated
- pagure.pull-request.new
- pagure.request.assigned.added
group: apache
owner: git
service: pagure
fedmsg_env: prod
fedmsg_prefix: io.pagure
freezes: true
host_backup_targets: ['/srv/git', '/var/www/releases']
ipa_client_shell_groups:
@ -81,9 +41,7 @@ stunnel_source_port: 8088
# the host_vars/$hostname file
tcp_ports: [22, 25, 80, 443, 8442, 8443, 8444, 8445,
# Used for the eventsource
8088,
# This is for the pagure public fedmsg relay
9940]
8088]
vpn: true
notes: |

View file

@ -7,17 +7,6 @@ clamscan_paths:
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
nft_custom_rules: ['add rule ip filter INPUT ip saddr 192.168.1.59 tcp dport 873 counter accept']
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- planet.post.new
group: planet-user
owner: root
service: planet
git_basepath: /
git_daemon_user: nobody
git_port: 9418

View file

@ -24,58 +24,6 @@ clamscan_paths:
# This host is externally reachable
#
external: true
fedmsg_active: True
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- git.branch
- git.mass_branch.complete
- git.mass_branch.start
- logger.log
- pagure.git.receive
group: sysadmin
owner: root
service: shell
- can_send:
- git.receive
- pagure.git.receive
group: packager
owner: root
service: scm
- can_send:
- git.lookaside.new
group: apache
owner: root
service: lookaside
- can_send:
- pagure.git.receive
- pagure.issue.assigned.added
- pagure.issue.assigned.reset
- pagure.issue.comment.added
- pagure.issue.dependency.added
- pagure.issue.dependency.removed
- pagure.issue.edit
- pagure.issue.new
- pagure.issue.tag.added
- pagure.issue.tag.removed
- pagure.project.edit
- pagure.project.forked
- pagure.project.group.added
- pagure.project.new
- pagure.project.tag.edited
- pagure.project.tag.removed
- pagure.project.user.added
- pagure.project.user.removed
- pagure.pull-request.closed
- pagure.pull-request.comment.added
- pagure.pull-request.comment.edited
- pagure.pull-request.flag.added
- pagure.pull-request.flag.updated
- pagure.pull-request.new
- pagure.request.assigned.added
group: apache
owner: pagure
service: pagure
ipa_client_shell_groups:
- packager
- sysadmin-cvs

View file

@ -11,10 +11,6 @@ custom_rules: [
'-A INPUT -p tcp -m tcp -s 127.0.0.1 --dport 6081 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 127.0.0.1 --dport 6082 -j ACCEPT',
# also allow varnish from internal for purge requests
'-A INPUT -p tcp -m tcp -s 192.168.1.0/24 --dport 6081 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 10.3.163.0/24 --dport 6081 -j ACCEPT',
# Allow happinesspackets.fedorainfracloud.org to talk to inbound fedmsg relay.
'-A INPUT -p tcp -m tcp --dport 9941 -s 209.132.184.58 -j ACCEPT',
# Allow openqa01 to talk to the inbound fedmsg relay.
'-A INPUT -p tcp -m tcp --dport 9941 -s 10.3.174.0/24 -j ACCEPT',
'-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.120 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.121 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.122 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.123 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.124 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.125 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.126 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.65 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.127 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.128 -j ACCEPT', '-A INPUT -p tcp -m tcp --dport 22623 -s 10.3.163.129 -j ACCEPT']
nft_custom_rules:
# Need for rsync from log01 for logs.
@ -27,10 +23,6 @@ nft_custom_rules:
# also allow varnish from internal for purge requests
- 'add rule ip filter INPUT ip saddr 192.168.1.0/24 tcp dport 6081 counter accept'
- 'add rule ip filter INPUT ip saddr 10.3.163.0/24 tcp dport 6081 counter accept'
# Allow happinesspackets.fedorainfracloud.org to talk to inbound fedmsg relay.
- 'add rule ip filter INPUT ip saddr 209.132.184.58 tcp dport 9941 counter accept'
# Allow openqa01 to talk to the inbound fedmsg relay.
- 'add rule ip filter INPUT ip saddr 10.3.174.0/24 tcp dport 9941 counter accept'
- 'add rule ip filter INPUT ip saddr 10.3.163.120 tcp dport 22623 counter accept'
- 'add rule ip filter INPUT ip saddr 10.3.163.121 tcp dport 22623 counter accept'
- 'add rule ip filter INPUT ip saddr 10.3.163.122 tcp dport 22623 counter accept'
@ -92,12 +84,6 @@ tcp_ports: [
15671,
# This is for TOTP
8443,
# For fedmsg websocket server over stunnel
9939,
# For fedmsg raw zeromq socket (outbound)
9940,
# 9941 is closed generally, is for the inbound fedmsg and is covered in
# custom_rules
]
varnish_group: proxies
zabbix_templates:

View file

@ -1,32 +1,6 @@
---
# common items for the releng-* boxes
dns: 10.3.163.33
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: root
owner: root
service: shell
- can_send:
- pungi.compose.phase.start
- pungi.compose.phase.stop
- pungi.compose.status.change
- pungi.compose.createiso.targets
- pungi.compose.createiso.imagefail
- pungi.compose.createiso.imagedone
- pungi.compose.ostree
- compose.27.complete
- compose.27.start
- compose.28.complete
- compose.28.rsync.complete
- compose.28.rsync.start
- compose.28.start
- compose.29.complete
- compose.29.start
group: masher
owner: root
service: releng
freezes: true
host_group: releng
ipa_client_shell_groups:

View file

@ -6,38 +6,6 @@ nft_custom_rules:
- 'add rule ip filter INPUT ip saddr 10.5.78.11 tcp dport 2049 counter accept'
- 'add rule ip filter INPUT ip saddr 10.5.78.11 tcp dport 5432 counter accept'
env: production
# Since retrace is on the qa network, it needs to actively connect to our
# inbound relay.
fedmsg_active: True
fedmsg_cert_prefix: faf
# Declare fedmsg certs that should be put in /etc/pki/fedmsg/
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: retrace
owner: root
service: shell
- can_send:
- faf.report.threshold1
- faf.report.threshold10
- faf.report.threshold100
- faf.report.threshold1000
- faf.report.threshold1000
- faf.report.threshold10000
- faf.report.threshold100000
- faf.report.threshold1000000
- faf.problem.threshold1
- faf.problem.threshold10
- faf.problem.threshold100
- faf.problem.threshold1000
- faf.problem.threshold1000
- faf.problem.threshold10000
- faf.problem.threshold100000
- faf.problem.threshold1000000
group: faf
owner: root
service: faf
freezes: false
ipa_client_shell_groups:
- retrace

View file

@ -18,32 +18,6 @@ nft_custom_rules:
# batcave01 also needs access to announce commits.
- 'add rule ip filter INPUT ip saddr 10.3.163.35 tcp dport 5050 counter accept'
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
# cookies!
- irc.karma
# standard meetbot stuff
- meetbot.meeting.complete
- meetbot.meeting.start
- meetbot.meeting.topic.update
# meetbot line items
- meetbot.meeting.item.agreed
- meetbot.meeting.item.accepted
- meetbot.meeting.item.rejected
- meetbot.meeting.item.action
- meetbot.meeting.item.info
- meetbot.meeting.item.idea
- meetbot.meeting.item.help
- meetbot.meeting.item.link
group: daemon
owner: root
service: supybot
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-mote
@ -61,13 +35,10 @@ num_cpus: 2
primary_auth_source: ipa
# for systems that do not match the above - specify the same parameter in
# the host_vars/$hostname file
tcp_ports: [80, 443,
# These 16 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015]
tcp_ports: [80, 443]
notes: |
Hosts services which help facilitate communication over IRC and related mediums.
There are a couple things running here.
* zodbot, a supybot instance. See the zodbot SOP for more info.
* fedmsg-irc, our fedmsg to IRC relay. 'journalctl -u fedmsg-irc'
* mote, a webapp running behind httpd that serves meetbot log files.

View file

@ -1,9 +0,0 @@
---
# XXX - this is not really a group of real hosts.
# Instead, it represents an application in openshift.
# See playbooks/openshift-apps/waiverdb.yml
fedmsg_certs:
- can_send:
- logger.log
- waiverdb.waiver.new
service: waiverdb

View file

@ -1,19 +1,6 @@
---
# Define resources for this group of hosts here.
deployment_type: prod
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
- can_send:
- wiki.article.edit
- wiki.upload.complete
group: apache
owner: root
service: mediawiki
ipa_client_shell_groups:
- fi-apprentice
- sysadmin-noc

View file

@ -1,78 +1,11 @@
---
# These set a config value in /etc/fedmsg.d/, see roles/bodhi2/base/
# These set a config value, see roles/bodhi2/base/
bodhi_masher_enabled: True
bodhi_signed_handler_enabled: False
bodhi_updates_handler_enabled: False
csi_primary_contact: Releng Admins sysadmin-releng-members@fedoraproject.org
csi_purpose: Run the Bodhi masher.
csi_relationship: |
The mashing of repos here happens as part of the 'fedmsg-hub' daemon. Check
logs with 'journalctl -u fedmsg-hub'. Check the bodhi masher docs/code for
more detail on what it does:
https://github.com/fedora-infra/bodhi/blob/develop/bodhi/consumers/masher.py
* This host relies on:
* db01 for its database, which is shares with the bodhi2 frontend nodes.
* An NFS mount of koji data in /mnt/koji/
* The fedmsg bus for triggering mashes.
* XMLRPC calls to koji for tagging and untagging updates.
* bugzilla for posting comments about status changes
* the wiki for getting information about QA "Test Cases"
* taksotron (resultsdb) for getting status-check results (gating updates).
* No other systems rely directly on this host. Everything depends on it
indirectly for the creation of new updates repos (which get synced out to
the master mirror for distribution.
# For the MOTD
csi_security_category: Medium
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.101
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
# These are certs for pungi
- can_send:
# new school pungi-koji stuff (ask dgilmore)
- pungi.compose.phase.start
- pungi.compose.phase.stop
- pungi.compose.status.change
- pungi.compose.createiso.targets
- pungi.compose.ostree
- releng.atomic.twoweek.begin
- releng.atomic.twoweek.complete
group: sysadmin-releng
owner: apache
service: releng
# These are certs for the masher to publish its own messages as it progresses.
- can_send:
- bodhi.mashtask.complete
- bodhi.mashtask.mashing
- bodhi.mashtask.start
- bodhi.mashtask.sync.done
- bodhi.mashtask.sync.wait
- bodhi.ostree.compose.start
- bodhi.ostree.compose.fail
- bodhi.ostree.compose.finish
- bodhi.errata.publish
- bodhi.update.eject
- bodhi.update.complete.testing
- bodhi.update.complete.stable
- bodhi.update.request.testing
- bodhi.update.request.stable
- bodhi.update.request.batched
- bodhi.update.karma.threshold.reach
- bodhi.buildroot_override.untag
- bodhi.update.comment
- bodhi.update.requirements_met.stable
group: apache
owner: root
service: bodhi
- can_send:
- bodhi.updates.epel.sync
- bodhi.updates.fedora.sync
group: ftpsync
owner: root
service: ftpsync
ks_repo: https://infrastructure.fedoraproject.org/pub/fedora/linux/releases/40/Server/x86_64/os/
ks_url: https://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-fedora
max_mem_size: 98304

View file

@ -1,38 +1,6 @@
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.121
fedmsg_certs:
- can_send:
- pungi.compose.phase.start
- pungi.compose.phase.stop
- pungi.compose.status.change
- pungi.compose.createiso.targets
- pungi.compose.createiso.imagefail
- pungi.compose.createiso.imagedone
- pungi.compose.ostree
- compose.branched.complete
- compose.branched.mash.complete
- compose.branched.mash.start
- compose.branched.image.complete
- compose.branched.image.start
- compose.branched.pungify.complete
- compose.branched.pungify.start
- compose.branched.rsync.complete
- compose.branched.rsync.start
- compose.branched.start
- compose.bikeshed.complete
- compose.bikeshed.mash.complete
- compose.bikeshed.mash.start
- compose.bikeshed.image.complete
- compose.bikeshed.image.start
- compose.bikeshed.pungify.complete
- compose.bikeshed.pungify.start
- compose.bikeshed.rsync.complete
- compose.bikeshed.rsync.start
- compose.bikeshed.start
group: sysadmin-releng
owner: root
service: releng
freezes: true
koji_hub_nfs: "fedora_koji"
kojihub_scheme: https

View file

@ -2,23 +2,6 @@
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.122
fedmsg_certs:
- can_send:
# new school pungi-koji stuff (ask dgilmore)
- pungi.compose.phase.start
- pungi.compose.phase.stop
- pungi.compose.status.change
- pungi.compose.createiso.targets
- pungi.compose.createiso.imagefail
- pungi.compose.createiso.imagedone
- pungi.compose.ostree
- compose.29.complete
- compose.29.start
- compose.29.rsync.start
- compose.29.rsync.complete
group: sysadmin-releng
owner: root
service: releng
# This VM is the compose host for IoT Edition
freezes: false
koji_hub_nfs: "fedora_koji"

View file

@ -1,38 +1,6 @@
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.123
fedmsg_certs:
- can_send:
- pungi.compose.phase.start
- pungi.compose.phase.stop
- pungi.compose.status.change
- pungi.compose.createiso.targets
- pungi.compose.createiso.imagefail
- pungi.compose.createiso.imagedone
- pungi.compose.ostree
- compose.rawhide.complete
- compose.rawhide.mash.complete
- compose.rawhide.mash.start
- compose.rawhide.image.complete
- compose.rawhide.image.start
- compose.rawhide.pungify.complete
- compose.rawhide.pungify.start
- compose.rawhide.rsync.complete
- compose.rawhide.rsync.start
- compose.rawhide.start
- compose.bikeshed.complete
- compose.bikeshed.mash.complete
- compose.bikeshed.mash.start
- compose.bikeshed.image.complete
- compose.bikeshed.image.start
- compose.bikeshed.pungify.complete
- compose.bikeshed.pungify.start
- compose.bikeshed.rsync.complete
- compose.bikeshed.rsync.start
- compose.bikeshed.start
group: sysadmin-releng
owner: root
service: releng
# rawhide is never frozen, the compose box should not be so we can make needed changes
freezes: false
koji_hub_nfs: "fedora_koji"

View file

@ -1,46 +1,6 @@
datacenter: iad2
eth0_ipv4_gw: 10.3.169.254
eth0_ipv4_ip: 10.3.169.124
fedmsg_certs:
- can_send:
# two-week-atomic stuff (ask maxamillion)
- releng.atomic.twoweek.begin
- releng.atomic.twoweek.complete
# new school pungi-koji stuff (ask dgilmore)
- pungi.compose.phase.start
- pungi.compose.phase.stop
- pungi.compose.status.change
- pungi.compose.createiso.targets
- pungi.compose.createiso.imagefail
- pungi.compose.createiso.imagedone
- pungi.compose.ostree
# traditional old school compose stuff
- compose.branched.complete
- compose.branched.mash.complete
- compose.branched.mash.start
- compose.branched.image.complete
- compose.branched.image.start
- compose.branched.pungify.complete
- compose.branched.pungify.start
- compose.branched.rsync.complete
- compose.branched.rsync.start
- compose.branched.start
- compose.epelbeta.complete
- compose.rawhide.complete
- compose.rawhide.mash.complete
- compose.rawhide.mash.start
- compose.rawhide.image.complete
- compose.rawhide.image.start
- compose.rawhide.pungify.complete
- compose.rawhide.pungify.start
- compose.rawhide.rsync.complete
- compose.rawhide.rsync.start
- compose.rawhide.start
- compose.29.start
- compose.29.complete
group: sysadmin-releng
owner: root
service: releng
koji_hub_nfs: "fedora_koji"
kojihub_scheme: https
kojihub_url: koji.fedoraproject.org/kojihub

View file

@ -10,7 +10,6 @@ eth0_ipv4_nm: 25
eth0_ipv6_ip: "2600:2701:4000:5211:dead:beef:00a7:9475"
eth0_ipv6_gw: "2600:2701:4000:5211::1"
eth0_ipv6_nm: 104
fedmsg_fqdn: people01.vpn.fedoraproject.org
freezes: false
#host_backup_targets: ['/srv/web']

View file

@ -87,15 +87,9 @@ vmhost-x86-cc05.rdu-cc.fedoraproject.org
ibiblio02.fedoraproject.org
ibiblio05.fedoraproject.org
[busgateway]
busgateway01.iad2.fedoraproject.org
[flatpak_cache]
flatpak-cache01.iad2.fedoraproject.org
[github2fedmsg]
github2fedmsg01.iad2.fedoraproject.org
[mailman]
mailman01.iad2.fedoraproject.org
@ -653,33 +647,6 @@ wiki01.stg.iad2.fedoraproject.org
wiki01.iad2.fedoraproject.org
wiki02.iad2.fedoraproject.org
# assorted categories of fedmsg services, for convenience
[fedmsg_hubs:children]
busgateway
pkgs
[fedmsg_ircs:children]
value
[fedmsg_relays:children]
busgateway
[fedmsg_gateways:children]
busgateway
proxies
[fedmsg_services:children]
fedmsg_hubs
fedmsg_ircs
fedmsg_relays
fedmsg_gateways
# These are groups that are using the python34 fedmsg stack.
[python34_fedmsg:children]
mailman
## END fedmsg services
#[cloud_hardware]
#virthost-aarch64-os01.fedorainfracloud.org
#virthost-aarch64-os02.fedorainfracloud.org
@ -968,7 +935,6 @@ buildvm
buildvm_aarch64
buildvm_ppc64le
bkernel
busgateway
bvirthost
certgetter
dbserver
@ -976,7 +942,6 @@ debuginfod
dns_iad2
download_iad2
flatpak_cache
github2fedmsg
ipa
ipsilon
koji

View file

@ -17,7 +17,6 @@
- import_playbook: /srv/web/infra/ansible/playbooks/groups/bodhi-backend.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/buildhw.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/buildvm.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/busgateway.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/certgetter.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/copr-backend.yml
# - import_playbook: /srv/web/infra/ansible/playbooks/groups/copr-db.yml
@ -30,7 +29,6 @@
- import_playbook: /srv/web/infra/ansible/playbooks/groups/dns.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/download.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/flatpak-cache.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/github2fedmsg.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/ipa.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/ipsilon.yml
- import_playbook: /srv/web/infra/ansible/playbooks/groups/koji-hub.yml

View file

@ -1,19 +0,0 @@
---
#
# Use this playbook to run over all the playbooks that have fedmsg.d in them.
#
# Call it with -t fedmsgdupdate to only run that one play.
#
- include_playbook: /srv/web/infra/ansible/playbooks/hosts/copr-be.cloud.fedoraproject.org.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/ask.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/bodhi.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/badges-backend.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/badges-web.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/busgateway.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/gallery.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/koji-hub.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/mailman.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/packages.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/releng.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/wiki.yml
- include_playbook: /srv/web/infra/ansible/playbooks/groups/value.yml

View file

@ -1,58 +0,0 @@
---
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: "busgateway"
- name: Dole out the generic configuration
hosts: busgateway
user: root
gather_facts: true
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- base
- rkhunter
- nagios_client
- zabbix/zabbix_agent
- hosts
- {role: openvpn/client, when: env != "staging"}
- ipa/client
- collectd/base
- fedmsg/base
- sudo
pre_tasks:
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: Dole out the service-specific config
hosts: busgateway
user: root
gather_facts: true
roles:
- role: fedmsg/hub
enable_websocket_server: true
- role: fedmsg/relay
- role: fedmsg/gateway
- role: collectd/fedmsg-service
process: fedmsg-hub
- role: collectd/fedmsg-service
process: fedmsg-relay
- role: collectd/fedmsg-service
process: fedmsg-gateway
- role: collectd/fedmsg-activation
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- "{{ vars_path }}/{{ ansible_distribution }}.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"

View file

@ -1,57 +0,0 @@
# create a new github2fedmsg server
# NOTE: should be used with --limit most of the time
# NOTE: make sure there is room/space for this server on the vmhost
# NOTE: most of these vars_path come from group_vars/github2fedmsg* or from hostvars
---
- import_playbook: "/srv/web/infra/ansible/playbooks/include/virt-create.yml"
vars:
myhosts: "github2fedmsg"
- name: Make the box be real
hosts: github2fedmsg
user: root
gather_facts: true
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- base
- rkhunter
- nagios_client
- zabbix/zabbix_agent
- hosts
- {role: openvpn/client,
when: env != "staging"}
- ipa/client
- collectd/base
- rsyncd
- sudo
- mod_wsgi
pre_tasks:
- import_tasks: "{{ tasks_path }}/yumrepos.yml"
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
- name: Deploy service-specific config
hosts: github2fedmsg
user: root
gather_facts: true
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
roles:
- github2fedmsg
- fedmsg/base
- {role: rabbit/user, when: deployment_type == "stg"}

View file

@ -64,7 +64,6 @@
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- gitolite/check_fedmsg_hooks
- {role: git/make_checkout_seed, when: env != "staging"}
- git/hooks
- git/checks

View file

@ -37,8 +37,6 @@
- supybot
- sudo
- rsyncd
- role: collectd/fedmsg-service
process: fedmsg-irc
- {role: nfs/client,
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3",
mnt_dir: '/srv/',

View file

@ -29,17 +29,6 @@
extensions:
- .ico
- role: fedmsg/crl
website: fedoraproject.org
path: /fedmsg
when: env != "staging"
- role: fedmsg/gateway/slave
stunnel_service: "websockets"
stunnel_source_port: 9939
stunnel_destination_port: 9938
when: env != "staging"
- role: httpd/fingerprints
website: admin.fedoraproject.org

View file

@ -128,14 +128,6 @@
proxyurl: http://value02
when: env == "staging"
- role: httpd/reverseproxy
website: apps.fedoraproject.org
destname: github2fedmsg
localpath: /github2fedmsg
remotepath: /github2fedmsg
header_scheme: true
proxyurl: http://localhost:10037
- role: httpd/reverseproxy
website: apps.fedoraproject.org
destname: fedora-notifications

View file

@ -14,7 +14,6 @@
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/bodhi-backend.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/bugzilla2fedmsg.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/github2fedmsg.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/ipsilon.yml"
- import_playbook: "/srv/web/infra/ansible/playbooks/groups/buildvm.yml"

View file

@ -1,95 +0,0 @@
# Restart fedmsg services
#
# The grande list of "what is running and where" is maintained here manually.
# By running this playbook, you're not actually guaranteed that all services
# everywhere will be restarted. As stuff changes over time, this playbook will
# need to be periodically updated with new things.
---
- name: Restart fedmsg-gateway instances
hosts: fedmsg_gateways
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Bounce the fedmsg-gateway service
service: name=fedmsg-gateway state=restarted
- name: Restart fedmsg-relay instances
hosts: fedmsg_relays
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Bounce the fedmsg-relay service
service: name=fedmsg-relay state=restarted
- name: Restart fedmsg-irc instances
hosts: fedmsg_ircs
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Bounce the fedmsg-irc service
service: name=fedmsg-irc state=restarted
- name: Tell nagios to be quiet about FMN for the moment
hosts: notifs_backend
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Schedule a 25 minute downtime. give notifs backend time to start up.
nagios: action=downtime minutes=25 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
# - name: Bounce the fmn-digests service
# service: name=fmn-digests@1 state=restarted
- name: Restart fedmsg-hub instances
hosts: fedmsg_hubs
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Bounce the fedmsg-hub service
service: name=fedmsg-hub state=restarted
- name: Restart moksha-hub instances
hosts: moksha_hubs
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Bounce the moksha-hub service
service: name=moksha-hub state=restarted

View file

@ -105,8 +105,6 @@
- /srv/web/infra/ansible/vars/global.yml
- /srv/private/ansible/vars.yml
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- service: name="fedmsg-hub" state=started
post_tasks:
- name: tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}

View file

@ -1,58 +0,0 @@
---
- name: Push packages out
hosts:
- fedmsg-hubs
- fedmsg-relays
- fedmsg-ircs
- fedmsg-gateways
- moksha-hubs
- datagrepper
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
testing: false
packages:
- fedmsg
- python2-fedmsg-meta-fedora-infrastructure
- python-moksha-hub
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: Clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
ansible.builtin.command: yum clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: Yum update fedmsg packages from the main repo
ansible.builtin.package: name={{item}} state=latest
when: not testing
with_items: "{{packages}}"
- name: Yum update fedmsg packages from testing repo
ansible.builtin.package: name={{item}} state=latest enablerepo=infrastructure-tags-stg
when: testing
with_items: "{{packages}}"
# Restart all the backend daemons
# - import_tasks: "{{tasks_path}}../restart-fedmsg-services.yml"
# Also restart the frontend web services
- name: Bounce apache
hosts: datagrepper:datagrepper_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- service: name="httpd" state=restarted

View file

@ -1,54 +0,0 @@
---
- name: Push packages out
hosts: packages:packages_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
testing: false
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
tasks:
- name: Clean all metadata {%if testing%}(with infrastructure-testing on){%endif%}
ansible.builtin.command: dnf clean all {%if testing%} --enablerepo=infrastructure-tags-stg {%endif%}
check_mode: no
- name: Update fedora-packages packages from main repo
ansible.builtin.package: name="fedora-packages" state=latest
when: not testing
- name: Update fedora-packages packages from testing repo
dnf: name="fedora-packages" state=latest enablerepo=infrastructure-tags-stg
when: testing
- name: Verify the config and restart it
hosts: packages:packages_stg
user: root
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
handlers:
- import_tasks: "{{ handlers_path }}/restart_services.yml"
pre_tasks:
- name: Tell nagios to shush
nagios: action=downtime minutes=60 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
roles:
- role: packages3/web
post_tasks:
- service: name="fedmsg-hub" state=stopped
- service: name="httpd" state=stopped
# TODO -- do any DB maintenance steps we need to do here..
- service: name="httpd" state=started
- service: name="fedmsg-hub" state=started
- name: Tell nagios to unshush
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,171 +0,0 @@
# Create the RabbitMQ users
---
- name: Setup RabbitMQ
hosts: rabbitmq[0]
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Create the RabbitMQ user
community.rabbitmq.rabbitmq_user:
user: "messaging-bridge{{ env_suffix }}.fedoraproject.org"
vhost: /pubsub
read_priv: "((a|z)mq\\.topic|amqp_to_zmq|amqp_bridge_verify_missing)"
write_priv: "((a|z)mq\\.topic|amqp_to_zmq|amqp_bridge_verify_missing)"
configure_priv: "^$"
tags:
- config
- rabbitmq_cluster
- name: Create the RabbitMQ queue amqp_to_zmq
community.rabbitmq.rabbitmq_queue:
name: amqp_to_zmq
vhost: /pubsub
login_user: admin
login_password: "{{ (env == 'production') | ternary(rabbitmq_admin_password_production, rabbitmq_admin_password_staging) }}"
tags:
- config
- rabbitmq_cluster
- name: Create the RabbitMQ queue for verify-missing
community.rabbitmq.rabbitmq_queue:
name: amqp_bridge_verify_missing
vhost: /pubsub
durable: true
auto_delete: false
message_ttl: 60000
login_user: admin
login_password: "{{ (env == 'production') | ternary(rabbitmq_admin_password_production, rabbitmq_admin_password_staging) }}"
tags:
- config
- rabbitmq_cluster
- name: Create the amqp-to-zmq bindings
community.rabbitmq.rabbitmq_binding:
name: amq.topic
destination: amqp_to_zmq
destination_type: queue
vhost: /pubsub
login_user: admin
login_password: "{{ (env == 'production') | ternary(rabbitmq_admin_password_production, rabbitmq_admin_password_staging) }}"
tags:
- config
- rabbitmq_cluster
- name: Create the verify-missing bindings
community.rabbitmq.rabbitmq_binding:
name: "{{ item }}"
destination: amqp_bridge_verify_missing
destination_type: queue
vhost: /pubsub
login_user: admin
login_password: "{{ (env == 'production') | ternary(rabbitmq_admin_password_production, rabbitmq_admin_password_staging) }}"
with_items:
- amq.topic
- zmq.topic
tags:
- config
- rabbitmq_cluster
# Now create the app
- name: Make the app be real
hosts: os_control[0]:os_control_stg[0]
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
roles:
- role: openshift/project
project_app: messaging-bridges
project_description: "ZeroMQ <-> AMQP bridges"
project_appowners:
- abompard
- kevin
tags:
- appowners
- role: openshift/secret-file
secret_file_app: messaging-bridges
secret_file_secret_name: fedmsg-key
secret_file_key: fedmsg-fedmsg-migration-tools.key
secret_file_privatefile: "fedmsg-certs/keys/fedmsg-migration-tools{{env_suffix}}.fedoraproject.org.key"
- role: openshift/secret-file
secret_file_app: messaging-bridges
secret_file_secret_name: fedmsg-cert
secret_file_key: fedmsg-fedmsg-migration-tools.crt
secret_file_privatefile: "fedmsg-certs/keys/fedmsg-migration-tools{{env_suffix}}.fedoraproject.org.crt"
- role: openshift/secret-file
secret_file_app: messaging-bridges
secret_file_secret_name: rabbitmq-ca
secret_file_key: rabbitmq-ca.crt
secret_file_privatefile: "rabbitmq/{{env}}/pki/ca.crt"
- role: openshift/secret-file
secret_file_app: messaging-bridges
secret_file_secret_name: rabbitmq-key
secret_file_key: rabbitmq-fedmsg-migration-tools.key
secret_file_privatefile: "rabbitmq/{{env}}/pki/private/messaging-bridge{{env_suffix}}.fedoraproject.org.key"
- role: openshift/secret-file
secret_file_app: messaging-bridges
secret_file_secret_name: rabbitmq-cert
secret_file_key: rabbitmq-fedmsg-migration-tools.crt
secret_file_privatefile: "rabbitmq/{{env}}/pki/issued/messaging-bridge{{env_suffix}}.fedoraproject.org.crt"
- role: openshift/object
object_app: messaging-bridges
object_file: imagestream.yml
object_objectname: imagestream.yml
- role: openshift/object
object_app: messaging-bridges
object_template: buildconfig.yml.j2
object_objectname: buildconfig.yml
- role: openshift/start-build
start_build_app: messaging-bridges
start_build_buildname: messaging-bridges-build
tags:
- never
- build
- role: openshift/object
object_app: messaging-bridges
object_template: configmap.yml.j2
object_objectname: configmap.yml
- role: openshift/object
object_app: messaging-bridges
object_file: service.yml
object_objectname: service.yml
- role: openshift/object
object_app: messaging-bridges
object_template: deploymentconfig.yml
object_objectname: deploymentconfig.yml
- role: openshift/rollout
rollout_app: messaging-bridges
rollout_dcname: amqp-to-zmq
tags:
- never
- rollout
- role: openshift/rollout
rollout_app: messaging-bridges
rollout_dcname: zmq-to-amqp
tags:
- never
- rollout
- role: openshift/rollout
rollout_app: messaging-bridges
rollout_dcname: verify-missing
tags:
- never
- rollout

View file

@ -1,124 +0,0 @@
---
- name: Make the app be real
hosts: os_control_stg[0]:os_control
user: root
gather_facts: false
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "/srv/private/ansible/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
vars:
- ocp4: true
roles:
- role: openshift/project
project_app: monitor-dashboard
project_description: POC for the dashboards
project_appowners:
- nphilipp
- asaleh
# - role: openshift/object
# object_app: monitor-dashboard
# object_template: configmap.yml.j2
# object_objectname: configmap.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: notifiers.yaml
# object_objectname: notifiers.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: dashboard_provision_config.yml
# object_objectname: dashboard_provision_config.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_template: dashboard_config.yml.j2
# object_objectname: dashboard_config.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_template: buildconfig.yml.j2
# object_objectname: buildconfig.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: imagestream.yml
# object_objectname: imagestream.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_template: datagrepper_configmap.yml.j2
# object_objectname: datagrepper_configmap.yml
- role: openshift/object
object_app: monitor-dashboard
object_file: service.yml
object_objectname: service.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: datagrepper_service.yml
# object_objectname: datagrepper_service.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: postgrest_service.yml
# object_objectname: postgrest_service.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: prest_service.yml
# object_objectname: prest_service.yml
- role: openshift/object
object_app: monitor-dashboard
object_file: route_serviceaccount.yml
object_objectname: route_serviceaccount.yml
# Route already present, will figure updating later
- role: openshift/object
object_app: monitor-dashboard
object_file: route.yml
object_objectname: route.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: datagrepper_route.yml
# object_objectname: datagrepper_route.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: postgrest_route.yml
# object_objectname: postgrest_route.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_file: prest_route.yml
# object_objectname: prest_route.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_template: postgrest_deploymentconfig.yml.j2
# object_objectname: postgrest_deploymentconfig.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_template: prest_deploymentconfig.yml.j2
# object_objectname: prest_deploymentconfig.yml
# - role: openshift/object
# object_app: monitor-dashboard
# object_template: datagrepper_deploymentconfig.yml.j2
# object_objectname: datagrepper_deploymentconfig.yml
- role: openshift/object
object_app: monitor-dashboard
object_template: deploymentconfig.yml.j2
object_objectname: deploymentconfig.yml

View file

@ -128,9 +128,6 @@ faf_web_throttle_burst: 1
# fedora-messaging config #
##############################
faf_fedmsg_server:
# The topic_prefix configuration value will add a prefix to the topics of every sent message.
# This is used for migrating from fedmsg, and should not be used afterwards.
faf_fedmsg_topic_prefix:
# [TLS]
faf_fedmsg_ca_cert:

View file

@ -7,10 +7,6 @@ amqp_url = "amqp://{{ faf_fedmsg_server }}?connection_attempts=3&retry_delay=5"
passive_declares = true
# The topic_prefix configuration value will add a prefix to the topics of every sent message.
# This is used for migrating from fedmsg, and should not be used afterwards.
topic_prefix = {{ faf_fedmsg_topic_prefix }}
[tls]
ca_cert = {{ faf_fedmsg_ca_cert }}
keyfile = {{ faf_fedmsg_keyfile }}

View file

@ -65,12 +65,6 @@
# Reject all further connections from less secure vpn
-A INPUT -s 192.168.100.0/24 -j REJECT --reject-with icmp-host-prohibited
{% endif %}
# if the host declares a fedmsg-enabled wsgi app, open ports for it
{% if wsgi_fedmsg_service is defined %}
{% for i in range(wsgi_procs * wsgi_threads) %}
-A INPUT -p tcp -m tcp --dport 30{{ '%02d' % i }} -j ACCEPT
{% endfor %}
{% endif %}
# if the host/group defines incoming tcp_ports - allow them

View file

@ -65,12 +65,6 @@
# Reject all further connections from less secure vpn
-A INPUT -s 192.168.100.0/24 -j REJECT --reject-with icmp-host-prohibited
{% endif %}
# if the host declares a fedmsg-enabled wsgi app, open ports for it
{% if wsgi_fedmsg_service is defined %}
{% for i in range(wsgi_procs * wsgi_threads) %}
-A INPUT -p tcp -m tcp --dport 30{{ '%02d' % i }} -j ACCEPT
{% endfor %}
{% endif %}
# smtp rules we want to allow vpn and out internal networks and mimecast
-A INPUT -s 192.168.100.0/24 -m tcp -p tcp --dport 25 -j ACCEPT

View file

@ -48,13 +48,6 @@
{% endfor %}
{% endif %}
# if the host declares a fedmsg-enabled wsgi app, open ports for it
{% if wsgi_fedmsg_service is defined %}
{% for i in range(wsgi_procs * wsgi_threads) %}
-A INPUT -p tcp -m tcp --dport 30{{ '%02d' % i }} -j ACCEPT
{% endfor %}
{% endif %}
# if the host/group defines incoming tcp_ports - allow them
{% if tcp_ports is defined %}

View file

@ -65,12 +65,6 @@ add rule ip filter INPUT ip saddr 192.168.100.0/24 udp dport 464 counter accept
# Reject all further connections from less secure vpn
add rule ip filter INPUT ip saddr 192.168.100.0/24 counter reject with icmp type host-prohibited
{% endif %}
# if the host declares a fedmsg-enabled wsgi app, open ports for it
{% if wsgi_fedmsg_service is defined %}
{% for i in range(wsgi_procs * wsgi_threads) %}
add rule ip filter INPUT tcp dport 30{{ '%02d' % i }} counter accept
{% endfor %}
{% endif %}
# if the host/group defines incoming tcp_ports - allow them

View file

@ -50,13 +50,6 @@ add rule ip filter INPUT ip saddr {{ hostvars[host]['eth0_ip'] }} counter reject
{% endfor %}
{% endif %}
# if the host declares a fedmsg-enabled wsgi app, open ports for it
{% if wsgi_fedmsg_service is defined %}
{% for i in range(wsgi_procs * wsgi_threads) %}
add rule ip filter INPUT tcp dport 30{{ '%02d' % i }} counter accept
{% endfor %}
{% endif %}
# if the host/group defines incoming tcp_ports - allow them
{% if tcp_ports is defined %}

View file

@ -5,14 +5,6 @@
# Broker address
amqp_url = "amqps://batcave{{ env_suffix }}:@rabbitmq{{ env_suffix }}.fedoraproject.org/%2Fpubsub"
# The topic_prefix configuration value will add a prefix to the topics of every sent message.
# This is used for migrating from fedmsg, and should not be used afterwards.
{% if env == "staging" %}
topic_prefix = "org.fedoraproject.stg"
{% else %}
topic_prefix = "org.fedoraproject.prod"
{% endif %}
[tls]
ca_cert = "/etc/pki/rabbitmq/batcave.ca"
keyfile = "/etc/pki/rabbitmq/batcave.key"

View file

@ -1,2 +0,0 @@
fedmsg_wallboard count:GAUGE:0:U

View file

@ -1,6 +0,0 @@
LoadPlugin exec
TypesDB "/usr/share/collectd/fedmsg-types.db"
<Plugin exec>
Exec "fedmsg" "/usr/bin/fedmsg-collectd" "--collectd-interval" "10"
</Plugin>

View file

@ -1,4 +0,0 @@
LoadPlugin exec
<Plugin exec>
Exec "fedmsg" "/usr/local/bin/fedmsg-map"
</Plugin>

View file

@ -1,143 +0,0 @@
#!/usr/bin/env python
""" Utility to scan a fedmsg setup for port availability.
Reports what percentage of fedmsg endpoints are bound and ready.
"""
import base64
import collections
import multiprocessing.pool
import socket
import sys
import time
import fedmsg.config
config = fedmsg.config.load_config()
timeout = 0.2
expected = '/wAAAAAAAAABfw=='
for_collectd = 'verbose' not in sys.argv
active = collections.defaultdict(list)
inactive = collections.defaultdict(list)
def info(content="\n"):
if not for_collectd:
sys.stdout.write(content)
sys.stdout.flush()
def scan_one(item):
name, endpoint = item
if not endpoint.startswith('tcp://'):
raise ValueError("Don't know how to deal with %r" % endpoint)
endpoint = endpoint[len('tcp://'):].split(':')
connection = None
try:
connection = socket.create_connection(endpoint, timeout)
actual = base64.b64encode(connection.recv(10))
if actual != expected:
inactive[name].append((
endpoint, "%r is not %r" % (actual, expected)))
info("F")
else:
active[name].append((endpoint, "all active"))
info(".")
except socket.error as e:
inactive[name].append((endpoint, str(e)))
info("F")
if connection:
connection.close()
def scan_all():
global active
global inactive
del active
del inactive
active = collections.defaultdict(list)
inactive = collections.defaultdict(list)
items = [(name, addr)
for name, endpoints in config['endpoints'].items()
for addr in endpoints]
# There is likely overhead in creating and destroying this thing, but we have
# memory leaks to track down.
pool = multiprocessing.pool.ThreadPool(25)
pool.map(scan_one, items)
pool.close()
info()
if 'verbose' in sys.argv:
import pprint
pprint.pprint(dict(active))
pprint.pprint(dict(inactive))
header = "".join([
"name".center(29),
"active".rjust(8),
"inactive".rjust(9),
"percent".rjust(9),
"reason".center(32),
])
info()
info(header + "\n")
info("-" * len(header) + "\n")
active_n_total, inactive_n_total = 0, 0
for name in sorted(config['endpoints']):
active_n = len(active[name])
inactive_n = len(inactive[name])
active_n_total += active_n
inactive_n_total += inactive_n
total = active_n + inactive_n
percent = ""
if total:
percent = "%%%0.1f" % (100 * float(active_n) / total)
reasons = set([reason for _, reason in inactive[name]])
info(name.rjust(29))
info(str(active_n).rjust(8))
info(str(inactive_n).rjust(9))
info(percent.rjust(9))
info(", ".join(reasons).rjust(32) + "\n")
info("-" * len(header) + "\n")
info(" total active: %i\n" % active_n_total)
info("total inactive: %i\n" % inactive_n_total)
value = 100 * float(active_n_total) / (active_n_total + inactive_n_total)
info("percent active: %%%0.1f\n" % value)
return value
if not for_collectd:
scan_all()
else:
interval = 5
host = socket.getfqdn()
while True:
start = time.time()
value = scan_all()
stop = timestamp = time.time()
delta = stop - start
output = (
"PUTVAL "
"{host}/fedmsg/percent "
"interval={interval} "
"{timestamp}:{value}"
).format(
host=host,
interval=interval,
timestamp=int(timestamp),
value="%0.1f" % value)
print(output)
if interval - delta > 0:
time.sleep(interval - delta)

View file

@ -1,122 +0,0 @@
#!/usr/bin/env python
""" fedmsg-service-collectd.py -- produce collectd stats on fedmsg daemons """
import json
import os
import pprint
import socket
import sys
import time
import zmq
#hostname = socket.gethostname().split('.')[0]
hostname = socket.gethostname() # Use FQDN, I guess..
def print_consumer(service, consumer):
timestamp = int(time.time())
print "PUTVAL %s/%s/queue_length-%s interval=5 %i:%i" % (
hostname,
service,
'%s_backlog' % consumer['name'],
timestamp,
consumer['backlog']
)
print "PUTVAL %s/%s/gauge-%s interval=5 %i:%i" % (
hostname,
service,
'%s_exceptions' % consumer['name'],
timestamp,
consumer['exceptions']
)
# These got introduced in a later version of moksha, so not every host has them
if 'headcount_in' in consumer:
print "PUTVAL %s/%s/gauge-%s interval=5 %i:%i" % (
hostname,
service,
'%s_in' % consumer['name'],
timestamp,
consumer['headcount_in']
)
if 'headcount_out' in consumer:
print "PUTVAL %s/%s/gauge-%s interval=5 %i:%i" % (
hostname,
service,
'%s_out' % consumer['name'],
timestamp,
consumer['headcount_out']
)
# And these got introduced even later
if 'times' in consumer:
maxval = 1000 * max(consumer['times'] or [0])
minval = 1000 * min(consumer['times'] or [0])
avgval = 0
if consumer['times']:
avgval = 1000 * sum(consumer['times']) / len(consumer['times'])
print "PUTVAL %s/%s/response_time-%s interval=5 %i:%i" % (
hostname,
service,
'%s_min' % consumer['name'],
timestamp,
minval,
)
print "PUTVAL %s/%s/response_time-%s interval=5 %i:%i" % (
hostname,
service,
'%s_max' % consumer['name'],
timestamp,
maxval,
)
print "PUTVAL %s/%s/response_time-%s interval=5 %i:%i" % (
hostname,
service,
'%s_avg' % consumer['name'],
timestamp,
avgval,
)
def print_producer(service, producer):
timestamp = int(time.time())
print "PUTVAL %s/%s/gauge-%s interval=5 %i:%i" % (
hostname,
service,
'%s_exceptions' % producer['name'],
timestamp,
producer['exceptions']
)
if __name__ == '__main__':
service = "{{ process }}"
fname = '/var/run/fedmsg/monitoring-%s.socket' % service
if not os.path.exists(fname):
print "UNKNOWN - %s does not exist" % fname
sys.exit(3)
connect_to = "ipc:///%s" % fname
ctx = zmq.Context()
s = ctx.socket(zmq.SUB)
s.connect(connect_to)
s.setsockopt(zmq.SUBSCRIBE, '')
try:
while True:
msg = s.recv()
msg = json.loads(msg)
for consumer in msg['consumers']:
if consumer['initialized']:
print_consumer(service, consumer)
for producer in msg['producers']:
if producer['initialized']:
print_producer(service, producer)
except KeyboardInterrupt:
pass

View file

@ -1,4 +0,0 @@
LoadPlugin exec
<Plugin exec>
Exec "{{user}}" "/usr/local/bin/fedmsg-service-collectd-{{ process }}"
</Plugin>

View file

@ -1,3 +0,0 @@
---
user: fedmsg
process: fedmsg-hub

View file

@ -1,14 +0,0 @@
[Unit]
Description=stunnel
After=network.target
Documentation=https://infrastructure.fedoraproject.org/infra/docs/fedmsg-websocket.txt
[Service]
ExecStart=/usr/bin/stunnel /etc/stunnel/stunnel.conf
Type=forking
User=root
Group=root
Restart=on-failure
[Install]
WantedBy=multi-user.target

View file

@ -35,38 +35,6 @@
state: yes
persistent: yes
# Set-up stunnel for the event source server
# - name: Install stunnel service definition
# ansible.builtin.copy: src=stunnel.service
# dest=/usr/lib/systemd/system/stunnel.service
# owner=root group=root mode=0755
# notify:
# - Reload systemd
# - Restart stunnel
# tags:
# - pagure
# - stunnel
# - name: Ensure old stunnel init file is gone
# ansible.builtin.file: dest=/etc/init.d/stunnel/stunnel.init state=absent
# tags:
# - pagure
# - stunnel
# - config
# - name: Install stunnel.conf
# ansible.builtin.template: src={{ item.file }}
# dest={{ item.dest }}
# owner=root group=root mode=0600
# with_items:
# - { file: stunnel-conf.j2, dest: /etc/stunnel/stunnel.conf }
# notify: Restart stunnel
# tags:
# - pagure
# - stunnel
# - config
# Set-up Pagure
- name: Create a group pagure we can use
@ -372,7 +340,6 @@
with_items:
- httpd
- postfix
# - stunnel
- redis
- pagure_ev
- pagure_logcom

View file

@ -3,14 +3,6 @@
amqp_url = "amqps://pagure{{ env_suffix }}:@rabbitmq{{ env_suffix }}.fedoraproject.org/%2Fpubsub"
# The topic_prefix configuration value will add a prefix to the topics of every sent message.
# This is used for migrating from fedmsg, and should not be used afterwards.
{% if env == "staging" %}
topic_prefix = "org.fedoraproject.stg"
{% else %}
topic_prefix = "org.fedoraproject.prod"
{% endif %}
[tls]
ca_cert = "/etc/pki/rabbitmq/pagurecert/src.fp.o.ca"
keyfile = "/etc/pki/rabbitmq/pagurecert/src.fp.o.key"

View file

@ -1,27 +0,0 @@
## WARNING ##
# This file is a replacement for the *regular* relay.py file we ship to all
# fedora infra prod/stg hosts.
# If you are seeing this file on a host, that is because it has
# fedmsg_debug_loopback set to 'true' in our ansible config. That should be an
# exceptional thing used only for debugging.
#
# Things to know:
#
# * No 'fedmsg-logger' statements on this host will make it to the real bus
# * They will only be broadcast locally (on this box)
# * No messages from the main fedmsg-relay on our bus will make it to this box.
# * Other messages from persistent fedmsg services will make it here.
#
# You can use this to test services locally with 'fedmsg-dg-replay'. Messages
# rebroadcast by that command will be replayed locally, to only this host.
config = dict(
endpoints={
"relay_outbound": [
"tcp://127.0.0.1:3999",
],
},
relay_inbound=[
"tcp://127.0.0.1:9941",
],
)

View file

@ -1,21 +0,0 @@
module fedmsg 1.1;
require {
type anon_inodefs_t;
type httpd_t;
class file write;
}
require {
type ptmx_t;
type httpd_t;
class chr_file getattr;
}
#============= httpd_t ==============
# For basic port binding
allow httpd_t anon_inodefs_t:file write;
# So that psutil can work from /etc/fedmsg.d/logging.py
allow httpd_t ptmx_t:chr_file getattr;

View file

@ -1,107 +0,0 @@
config = dict(
# Set this to dev if you're hacking on fedmsg or an app locally.
# Set to stg or prod if running in the Fedora Infrastructure.
environment="{{ fedmsg_env }}",
# Most hosts will be "false" here indicating that if they publish messages,
# they will passively bind to ports and have other consuming services
# actively connect to them. If this flag is set to True, then the service
# will actively connect out to a fedmsg-relay to have its messages forwarded
# on.
active={{fedmsg_active}},
{% if fedmsg_cert_prefix is defined %}
# Most fedmsg enabled services dynamically guess their cert_prefix by
# looking at the namespace of the python code that they're running in. For
# instance, bodhi's code is in the 'bodhi' python module, so fedmsg grabs
# that and uses it for its cert prefix. Some code, however, runs in an
# oddly-namespaced module, and so we allow the option here to override that
# at the host level.
cert_prefix='{{fedmsg_cert_prefix}}',
{% endif %}
{% if not (ansible_hostname.startswith('busgateway') or ansible_hostname.startswith('bodhi-backend')) %}
# These options provide a place for hub processes to write out their last
# processed message. This let's them read it in at startup and figure out
# what kind of backlog they have to deal with.
{% if env == 'staging' %}
# But we have it turned off in staging.
#status_directory="/var/run/fedmsg/status",
{% else %}
status_directory="/var/run/fedmsg/status",
{% endif %}
# This is the URL of a datagrepper instance that we can query for backlog.
{% if env == 'staging' %}
# But we have it turned off in staging.
#datagrepper_url="https://apps.stg.fedoraproject.org/datagrepper/raw",
{% else %}
datagrepper_url="https://apps.fedoraproject.org/datagrepper/raw",
{% endif %}
{% endif %}
# This used to be set to 1 for safety, but it turns out it was
# excessive. It is the number of seconds that fedmsg should sleep
# after it has initialized, but before it begins to try and send any
# messages. If set to a non-zero value, this will slow down one-off
# fedmsg scripts like the git post-receive hook and pkgdb2branch.
# If we are experiencing message-loss problems, one of the first things
# to try should be to turn this number up to a non-zero value. '1' should
# be more than sufficient.
post_init_sleep={{fedmsg_post_init_sleep}},
# This is the number of milliseconds to wait before timing out on
# connections.. notably to the fedmsg-relay in the event that it has
# crashed.
zmq_linger=2000,
# Default is 0
high_water_mark=0,
io_threads=1,
# We almost always want the fedmsg-hub to be sending messages with zmq as
# opposed to amqp or stomp. The only exception will be the bugzilla
# amqp<->zmq bridge service.
zmq_enabled=True,
# When subscribing to messages, we want to allow splats ('*') so we tell the
# hub to not be strict when comparing messages topics to subscription
# topics.
zmq_strict=False,
# See the following
# - http://tldp.org/HOWTO/TCP-Keepalive-HOWTO/overview.html
# - http://api.zeromq.org/3-2:zmq-setsockopt
zmq_tcp_keepalive=1,
zmq_tcp_keepalive_cnt=3,
zmq_tcp_keepalive_idle=60,
zmq_tcp_keepalive_intvl=5,
# See the following
# - https://github.com/fedora-infra/fedmsg/issues/305
# - http://api.zeromq.org/3-2:zmq-setsockopt
zmq_reconnect_ivl=100,
zmq_reconnect_ivl_max=500,
)
# This option adds an IPC socket by which we can monitor hub health.
try:
import os
import psutil
pid = os.getpid()
proc = [p for p in psutil.process_iter() if p.pid == pid][0]
# proc.name is a method on modern versions of psutil.
name = proc.name
if callable(name):
name = name()
config['moksha.monitoring.socket'] = \
'ipc:///var/run/fedmsg/monitoring-%s.socket' % name
config['moksha.monitoring.socket.mode'] = '770'
except (OSError, ImportError):
# We run into issues when trying to import psutil from mod_wsgi on rhel7
# but this feature is of no concern in that context, so just fail quietly.
# https://github.com/jmflinuxtx/kerneltest-harness/pull/17#issuecomment-48007837
pass

View file

@ -1,13 +0,0 @@
# This tells our nodes to also pull messages from anitya.
# Among those nodes is our public gateway which means that it will forward them
# back out to the public on "our bus".
config = dict(
{% if env != 'staging' %}
endpoints={
"anitya-public-relay": [
"tcp://anitya-frontend01.fedoraproject.org:9940",
],
},
{% endif %}
)

View file

@ -1,26 +0,0 @@
{% if datacenter == 'iad2' %}
{% if env == 'staging' %}
suffix = 'stg.iad2.fedoraproject.org'
{% else %}
suffix = 'iad2.fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% else %}
{% if env == 'staging' %}
suffix = 'stg.fedoraproject.org'
{% else %}
suffix = 'fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% endif %}
config = dict(
endpoints={
"bugzilla2fedmsg.bugzilla2fedmsg01": [
"tcp://bugzilla2fedmsg01.%s:3000" % suffix,
"tcp://bugzilla2fedmsg01.%s:3001" % suffix,
"tcp://bugzilla2fedmsg01.%s:3002" % suffix,
"tcp://bugzilla2fedmsg01.%s:3003" % suffix,
],
},
)

View file

@ -1,8 +0,0 @@
config = dict(
endpoints = {
"fedora-infrastructure": [
"tcp://hub.fedoraproject.org:9940",
],
}
)

View file

@ -1,26 +0,0 @@
{% if datacenter == 'iad2' %}
{% if env == 'staging' %}
suffix = 'stg.iad2.fedoraproject.org'
{% else %}
suffix = 'iad2.fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% else %}
{% if env == 'staging' %}
suffix = 'stg.fedoraproject.org'
{% else %}
suffix = 'fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% endif %}
config = dict(
endpoints={
"fedbadges.badges-backend01": [
"tcp://badges-backend01.%s:3000" % suffix,
"tcp://badges-backend01.%s:3001" % suffix,
"tcp://badges-backend01.%s:3002" % suffix,
"tcp://badges-backend01.%s:3003" % suffix,
],
},
)

View file

@ -1,20 +0,0 @@
{% if env == 'staging' %}
suffix = 'stg.iad2.fedoraproject.org'
{% else %}
suffix = 'iad2.fedoraproject.org'
{% endif %}
# REMEMBER -- if you change these, you need to also change the iptables rules in
# inventory/groups/fedimg and inventory/groups/fedimsg-stg
primary_threads = 4
atomic_threads = 2
NUM_FEDIMG_PORTS = 2 * ((primary_threads + atomic_threads) + 1)
config = dict(
endpoints={
"fedimg.fedimg01": [
"tcp://fedimg01.%s:30%0.2i" % (suffix, i)
for i in range(NUM_FEDIMG_PORTS)
],
},
)

View file

@ -1,26 +0,0 @@
{% if datacenter == 'iad2' %}
{% if env == 'staging' %}
suffix = 'stg.iad2.fedoraproject.org'
{% else %}
suffix = 'iad2.fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% else %}
{% if env == 'staging' %}
suffix = 'stg.fedoraproject.org'
{% else %}
suffix = 'fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% endif %}
config = dict(
endpoints={
"hotness.hotness01": [
"tcp://hotness01.%s:3000" % suffix,
"tcp://hotness01.%s:3001" % suffix,
"tcp://hotness01.%s:3002" % suffix,
"tcp://hotness01.%s:3003" % suffix,
],
},
)

View file

@ -1,37 +0,0 @@
{% if datacenter == 'iad2' %}
{% if env == 'staging' %}
suffix = 'stg.iad2.fedoraproject.org'
{% else %}
suffix = 'iad2.fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% else %}
{% if env == 'staging' %}
suffix = 'stg.fedoraproject.org'
{% else %}
suffix = 'fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% endif %}
config = dict(
endpoints={
# We need an endpoint for each of the mailman pieces that picks up and
# imports our plugin. So far that's the 'archiver' and the 'pipeline'.
"mailman.mailman01": [
"tcp://mailman01.%s:3000" % suffix,
"tcp://mailman01.%s:3001" % suffix,
# On Nov 19th, we started getting tracebacks that mailman3 had run
# out of endpoints. We're not sure what changed that required
# additional entries here, but we're adding them to try and make the
# error go away.
# One idea is that the mailman REST server grew the ability to list
# the archivers. Postorius calls that, and it (under the hood)
# tries to initialize all the archivers, which would inadvertently
# try to force our fedmsg plugin to establish a socket.
"tcp://mailman01.%s:3002" % suffix,
"tcp://mailman01.%s:3003" % suffix,
],
},
)

View file

@ -1,47 +0,0 @@
{% if datacenter == 'iad2' %}
{% if env == 'staging' %}
suffix = 'stg.iad2.fedoraproject.org'
{% else %}
suffix = 'iad2.fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% else %}
{% if env == 'staging' %}
suffix = 'stg.fedoraproject.org'
{% else %}
suffix = 'fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% endif %}
config = dict(
# This is a dict of possible addresses from which fedmsg can send
# messages. fedmsg.init(...) requires that a 'name' argument be passed
# to it which corresponds with one of the keys in this dict.
endpoints = {
# For message producers, fedmsg will try to guess the
# name of it's calling module to determine which endpoint definition
# to use. This can be overridden by explicitly providing the name in
# the initial call to fedmsg.init(...).
# This used to be on value01 and value03.. but now we just have one
"supybot.value02": [
"tcp://value02.%s:3000" % suffix,
],
# koji is not listed here since it publishes to the fedmsg-relay
# Dynamically generate endpoint declarations from our wsgi app vars.
# Eventually, replace *all* fedmsg endpoint definitions with this one loop
{% for host in groups['all']|sort %}
{% if 'wsgi_fedmsg_service' in hostvars[host] and env == hostvars[host]['env'] %}
"{{hostvars[host]['wsgi_fedmsg_service']}}.{{host.split('.')|first}}": [
{% for i in range(hostvars[host]['wsgi_procs'] * hostvars[host]['wsgi_threads']) %}
"tcp://{{host}}:30{{'%02d' % i}}",
{% endfor %}
],
{% endif %}
{% endfor %}
},
)

View file

@ -1,254 +0,0 @@
# Setup fedmsg logging.
# All of these modules are just used by the ContextInjector below.
import inspect
import hashlib
import logging
import os
import socket
import traceback
psutil = None
try:
import psutil
except (OSError, ImportError):
# We run into issues when trying to import psutil from inside mod_wsgi on
# rhel7. If we hit that here, then just fail quietly.
# https://github.com/jmflinuxtx/kerneltest-harness/pull/17#issuecomment-48007837
pass
radio_silence = """
*** %i instances of this error seen. No more mail will be sent. ***
""".strip()
seen_errors = {} # This could be a default dict.
error_limit = 100
class ContextInjector(logging.Filter):
""" Logging filter that adds context to log records.
Filters are typically used to "filter" log records. They declare a filter
method that can return True or False. Only records with 'True' will
actually be logged.
Here, we somewhat abuse the concept of a filter. We for the most part
return true, but we use the opportunity to hang important contextual
information on the log record to later be used by the logging Formatter. We
don't normally want to see all this stuff in normal log records, but we *do*
want to see it when we are emailed error messages. Seeing an error, but not
knowing which host it comes from, is not that useful.
After we've seen an error ~100 times, we stop sending email to avoid choking
the world.
http://docs.python.org/2/howto/logging-cookbook.html#filters-contextual
"""
def filter(self, record):
current_process = ContextInjector.get_current_process()
current_hostname = socket.gethostname()
record.host = current_hostname
record.proc = current_process
record.pid = current_process.pid
record.proc_name = current_process.name
record.command_line = current_process.cmdline
# These are callabls on more modern versions of psutil.
if callable(record.proc_name):
record.proc_name = record.proc_name()
if callable(record.command_line):
record.command_line = record.command_line()
record.command_line = " ".join(record.command_line)
record.callstack = self.format_callstack()
record.farewell = ""
key = hashlib.sha256(record.callstack.encode("utf-8")).hexdigest()
if not key in seen_errors:
seen_errors[key] = 0
if seen_errors[key] > error_limit:
return False
seen_errors[key] += 1
if seen_errors[key] > error_limit:
record.farewell = radio_silence % error_limit
msg_id = ""
try:
msg = self.get_msg_object()
if isinstance(msg, dict):
if 'msg_id' in msg:
msg_id = msg['msg_id']
elif 'msg' in msg:
if 'msg_id' in msg['msg']:
msg_id = msg['msg']['msg_id']
except:
pass
record.msg_id = msg_id
return True
@staticmethod
def get_msg_object():
""" Return the current request object
This is insane.
There is no way to know the actual fedmsg message at this point in
the code, so we're crawling our way down the call stack until we
find the first place with a 'msg' local instance variable and
attempt to extract the msg_id from it (later).
Please forgive me (and Ralph, the original author of this code).
"""
for frame in (f[0] for f in reversed(inspect.stack())):
if 'msg' in frame.f_locals:
return frame.f_locals['msg']
# This code is reached if there's no Request. Most common case is trac-admin
return None
@staticmethod
def format_callstack():
for i, frame in enumerate(f[0] for f in inspect.stack()):
if not '__name__' in frame.f_globals:
continue
modname = frame.f_globals['__name__'].split('.')[0]
if modname != "logging":
break
def _format_frame(frame):
if type(frame) not in [tuple, list]:
# It must be FrameSummary, convert it to tuples.
frame = (frame.filename, frame.lineno, frame.name, "")
return ' File "%s", line %i in %s\n %s' % (frame)
stack = traceback.extract_stack()
stack = stack[:-i]
return "\n".join([_format_frame(frame) for frame in stack])
@staticmethod
def get_current_process():
mypid = os.getpid()
if not psutil:
raise OSError("Could not import psutil for %r" % mypid)
for proc in psutil.process_iter():
if proc.pid == mypid:
return proc
# This should be impossible.
raise ValueError("Could not find process %r" % mypid)
@classmethod
def __json__(cls):
""" We need to be jsonifiable for "fedmsg-config" """
return {'name': 'ContextInjector'}
hefty_format = """Message
-------
[%(asctime)s][%(name)10s %(levelname)7s]
```python
%(message)s
```
%(farewell)s
Process Details
---------------
- host: %(host)s
- PID: %(pid)s
- name: %(proc_name)s
- command: %(command_line)s
- msg_id: %(msg_id)s
Callstack that lead to the logging statement
--------------------------------------------
```python
%(callstack)s
```
"""
# See the following for constraints on this format http://bit.ly/Xn1WDn
config = dict(
logging=dict(
version=1,
formatters=dict(
bare={
"datefmt": "%Y-%m-%d %H:%M:%S",
{% if inventory_hostname.startswith(('bodhi-backend01')) %}
# Here we use a different format just so we can include
# threadName. We set the threadName in the bodhi masher to some
# useful strings that will tell us which branch is doing what.
# See https://github.com/fedora-infra/bodhi/commit/66d94094
"format": "[%(asctime)s][%(name)10s %(levelname)7s]%(threadName)s %(message)s"
{% else %}
"format": "[%(asctime)s][%(name)10s %(levelname)7s] %(message)s"
{% endif %}
},
hefty={
"datefmt": "%Y-%m-%d %H:%M:%S",
"format": hefty_format,
},
),
filters=dict(
context={
# This "()" syntax in the stdlib doesn't seem to be documented
# anywhere. I had to read
# /usr/lib64/python2.7/logging/config.py to figure it out.
"()": ContextInjector,
},
),
handlers=dict(
console={
"class": "logging.StreamHandler",
"formatter": "bare",
"level": "{{fedmsg_loglevel}}",
"stream": "ext://sys.stdout",
},
mailer={
"class": "logging.handlers.SMTPHandler",
"formatter": "hefty",
"filters": ["context"],
"level": "ERROR",
{% if datacenter == 'iad2' %}
"mailhost": "bastion.iad2.fedoraproject.org",
{% else %}
"mailhost": "bastion.vpn.fedoraproject.org",
{% endif %}
"fromaddr": "fedmsg@fedoraproject.org",
"toaddrs": [
{% for recipient in fedmsg_error_recipients %}
"{{recipient}}",
{% endfor %}
],
"subject": "fedmsg error log %s" % socket.gethostname(),
},
),
loggers=dict(
fedmsg={
"level": "{{fedmsg_loglevel}}",
"propagate": False,
"handlers": ["console", "mailer"],
},
moksha={
"level": "{{fedmsg_loglevel}}",
"propagate": False,
"handlers": ["console", "mailer"],
},
),
# Root handler that deals with log messages that are not handled by the
# loggers explicitly configured in the `loggers` dict.
root={
"level": "{{fedmsg_loglevel}}",
"handlers": ["console", "mailer"],
},
),
)

View file

@ -1,30 +0,0 @@
config = dict(
routing_nitpicky=False,
routing_policy={
# The gist here is that only messages signed by the
# bodhi-app0{1,2,3,4,5,6,7,8} certificates may bear the
# "org.fedoraproject.prod.bodhi.update.request.stable" topic, or else
# they fail validation and are either dropped or marked as invalid
# (depending on the consumer's wishes).
#
# There is another option that we do not set. If `routing_nitpicky` is
# set to True, then a given message's topic *must* appear in this list
# in order for it to pass validation. For instance, we have
# routing_nitpicky set to False by default and no
# "org.fedoraproject.prod.logger.log" topics appear in this policy,
# therefore, any message bearing that topic and *any* certificate signed
# by our CA may pass validation.
#
# ** policy dynamically generated from inventory vars
# See ansible/filter_plugins/fedmsg.py for this inversion filter.
{% for topic, certs in groups | invert_fedmsg_policy(hostvars, env) %}
"{{topic}}": [
{% for cert in certs %}
"{{ cert }}",
{% endfor %}
"fedmsg-migration-tools{{env_suffix}}.fedoraproject.org",
],
{% endfor %}
},
)

View file

@ -1,45 +0,0 @@
{% if datacenter == 'iad2' %}
{% if env == 'staging' %}
suffix = 'stg.iad2.fedoraproject.org'
{% else %}
suffix = 'iad2.fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% else %}
{% if env == 'staging' %}
suffix = 'stg.fedoraproject.org'
{% else %}
suffix = 'fedoraproject.org'
vpn_suffix = 'vpn.fedoraproject.org'
{% endif %}
{% endif %}
# This is just an extension of fedmsg.d/endpoints.py. This dict
# will get merged in with the other.
config = dict(
endpoints={
# This is the output side of the relay to which all other
# services can listen.
"relay_outbound": [
# Messages emerge here
"tcp://busgateway01.%s:3999" % suffix,
],
},
# This is the address of an active->passive relay. It is used for the
# fedmsg-logger command which requires another service with a stable
# listening address for it to send messages to.
# It is also used by the git-hook, for the same reason.
# It is also used by the mediawiki php plugin which, due to the oddities of
# php, can't maintain a single passive-bind endpoint of it's own.
relay_inbound=[
# Primarily, scripts from inside iad2 connect here.
# Furthermore, scripts from outside (coprs, secondary arch koji) connect
# here via haproxy.
"tcp://busgateway01.%s:9941" % suffix,
# Scripts from the vpn (people03, anitya) connect here
"tcp://busgateway01.vpn.fedoraproject.org:3998",
],
)

View file

@ -1,42 +0,0 @@
config = dict(
sign_messages={{fedmsg_sign_messages}},
validate_signatures={{fedmsg_validate_signatures}},
ssldir="/etc/pki/fedmsg",
{% if env == 'staging' %}
ca_cert_location="https://stg.fedoraproject.org/fedmsg/ca.crt",
crl_location="https://stg.fedoraproject.org/fedmsg/crl.pem",
{% else %}
ca_cert_location="https://fedoraproject.org/fedmsg/ca.crt",
crl_location="https://fedoraproject.org/fedmsg/crl.pem",
{% endif %}
crl_cache="/var/run/fedmsg/crl.pem",
crl_cache_expiry=86400, # Daily
certnames=dict([
# This is the set of certs for this host, dynamically generated from the
# ``fedmsg_certs`` host vars
{% for cert in fedmsg_certs %}
("{{cert.get('alias', cert['service'])}}.{{inventory_hostname_short}}", "{{cert['service']}}-{{fedmsg_fqdn | default(inventory_hostname)}}"),
{% endfor %}
] + [
("shell.anitya-frontend01", "shell-anitya-frontend01.vpn.fedoraproject.org"),
("anitya.anitya-frontend01", "anitya-anitya-frontend01.vpn.fedoraproject.org"),
("shell.anitya-backend01", "shell-anitya-backend01.vpn.fedoraproject.org"),
("anitya.anitya-backend01", "anitya-anitya-backend01.vpn.fedoraproject.org"),
# FAF/retrace is on the qa network and talks to an inbound relay.
("shell.retrace01", "shell-retrace01.qa.fedoraproject.org"),
("faf.retrace01", "faf-retrace01.qa.fedoraproject.org"),
# This is for the copr backend, which is a little different. The
# "cert-prefix" is just "copr", and is hardcoded in
# backend/dispatcher.py. The hostname is also a little different,
# since it is an openstack node. This might be a little fragile. :/
# See https://github.com/fedora-infra/fedmsg/issues/199 for the plan.
("copr.dhcp-client03", "copr-copr-be.cloud.fedoraproject.org"),
("copr.copr-be-i-00000407", "copr-copr-be.cloud.fedoraproject.org"),
("copr.copr-be", "copr-copr-be.cloud.fedoraproject.org"),
]),
)

View file

@ -1,6 +0,0 @@
Alias {{path}} /srv/web/fedmsg
<Directory /srv/web/fedmsg>
Options Indexes
AllowOverride None
</Directory>

View file

@ -1,57 +0,0 @@
From 922c6f390525faa9a9d65a501a44d8081940194a Mon Sep 17 00:00:00 2001
From: Patrick Uiterwijk <puiterwijk@redhat.com>
Date: Thu, 1 Dec 2016 11:56:34 +0000
Subject: [PATCH] Only check for stomp messages after we decoded any ZMQMessage
Signed-off-by: Patrick Uiterwijk <puiterwijk@redhat.com>
---
fedmsg/consumers/__init__.py | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/fedmsg/consumers/__init__.py b/fedmsg/consumers/__init__.py
index 88324b1..a12a587 100644
--- a/fedmsg/consumers/__init__.py
+++ b/fedmsg/consumers/__init__.py
@@ -204,16 +204,23 @@ def _make_query(page=1):
def validate(self, message):
""" This needs to raise an exception, caught by moksha. """
- # If we're not validating, then everything is valid.
- # If this is turned on globally, our child class can override it.
- if not self.validate_signatures:
- return
-
if hasattr(message, '__json__'):
message = message.__json__()
if isinstance(message['body'], basestring):
message['body'] = json.loads(message['body'])
+ # Massage STOMP messages into a more compatible format.
+ if 'topic' not in message['body']:
+ message['body'] = {
+ 'topic': message.get('topic'),
+ 'msg': message['body'],
+ }
+
+ # If we're not validating, then everything is valid.
+ # If this is turned on globally, our child class can override it.
+ if not self.validate_signatures:
+ return
+
# We assume these match inside fedmsg.crypto, so we should enforce it.
if not message['topic'] == message['body']['topic']:
raise RuntimeWarning("Topic envelope mismatch.")
@@ -223,13 +230,6 @@ def validate(self, message):
def _consume(self, message):
- # Massage STOMP messages into a more compatible format.
- if 'topic' not in message['body']:
- message['body'] = {
- 'topic': message.get('topic'),
- 'msg': message['body'],
- }
-
try:
self.validate(message)
except RuntimeWarning as e:

View file

@ -1,2 +0,0 @@
[Service]
LimitNOFILE=160000

View file

@ -1,7 +0,0 @@
config = {
# Tell fedmsg-gateway where its special outgoing port is.
'fedmsg.consumers.gateway.port': 9940,
# Set this number to near, but not quite the fs.file-limit. Try 160000.
'fedmsg.consumers.gateway.high_water_mark': 160000,
}

View file

@ -1,3 +0,0 @@
# This is empty on purpose. See:
# https://fedorahosted.org/fedora-infrastructure/ticket/4995
config = {}

View file

@ -1,57 +0,0 @@
From 922c6f390525faa9a9d65a501a44d8081940194a Mon Sep 17 00:00:00 2001
From: Patrick Uiterwijk <puiterwijk@redhat.com>
Date: Thu, 1 Dec 2016 11:56:34 +0000
Subject: [PATCH] Only check for stomp messages after we decoded any ZMQMessage
Signed-off-by: Patrick Uiterwijk <puiterwijk@redhat.com>
---
fedmsg/consumers/__init__.py | 24 ++++++++++++------------
1 file changed, 12 insertions(+), 12 deletions(-)
diff --git a/fedmsg/consumers/__init__.py b/fedmsg/consumers/__init__.py
index 88324b1..a12a587 100644
--- a/fedmsg/consumers/__init__.py
+++ b/fedmsg/consumers/__init__.py
@@ -204,16 +204,23 @@ def _make_query(page=1):
def validate(self, message):
""" This needs to raise an exception, caught by moksha. """
- # If we're not validating, then everything is valid.
- # If this is turned on globally, our child class can override it.
- if not self.validate_signatures:
- return
-
if hasattr(message, '__json__'):
message = message.__json__()
if isinstance(message['body'], basestring):
message['body'] = json.loads(message['body'])
+ # Massage STOMP messages into a more compatible format.
+ if 'topic' not in message['body']:
+ message['body'] = {
+ 'topic': message.get('topic'),
+ 'msg': message['body'],
+ }
+
+ # If we're not validating, then everything is valid.
+ # If this is turned on globally, our child class can override it.
+ if not self.validate_signatures:
+ return
+
# We assume these match inside fedmsg.crypto, so we should enforce it.
if not message['topic'] == message['body']['topic']:
raise RuntimeWarning("Topic envelope mismatch.")
@@ -223,13 +230,6 @@ def validate(self, message):
def _consume(self, message):
- # Massage STOMP messages into a more compatible format.
- if 'topic' not in message['body']:
- message['body'] = {
- 'topic': message.get('topic'),
- 'msg': message['body'],
- }
-
try:
self.validate(message)
except RuntimeWarning as e:

View file

@ -1,3 +0,0 @@
# This is empty on purpose. See:
# https://fedorahosted.org/fedora-infrastructure/ticket/4995
config = {}

View file

@ -1,14 +0,0 @@
[Unit]
Description=stunnel
After=network.target
Documentation=https://infrastructure.fedoraproject.org/infra/docs/fedmsg-websocket.txt
[Service]
ExecStart=/usr/bin/stunnel /etc/stunnel/stunnel.conf
Type=forking
User=root
Group=root
Restart=on-failure
[Install]
WantedBy=multi-user.target

View file

@ -1,39 +0,0 @@
config = {
# Tell fedmsg-gateway where its special outgoing port is. haproxy will
# locally connect back to us here
'fedmsg.consumers.gateway.port': 9942,
# Set this number to near, but not quite the fs.file-limit. Try 160000.
'fedmsg.consumers.gateway.high_water_mark': 160000,
# Here is the addres of the master gateway on busgateway01. We'll get all
# our messages from here and forward them out fedmsg.consumers.gateway.port
'endpoints': {
'fedmsg-gateway-master': [
'tcp://busgateway01:9940',
],
}
}
# This option adds an IPC socket by which we can monitor hub health.
try:
import os
import psutil
pid = os.getpid()
proc = [p for p in psutil.process_iter() if p.pid == pid][0]
# proc.name is a method on modern versions of psutil.
name = proc.name
if callable(name):
name = name()
config['moksha.monitoring.socket'] = \
'ipc:///var/run/fedmsg/monitoring-%s.socket' % name
config['moksha.monitoring.socket.mode'] = '770'
except (OSError, ImportError):
# We run into issues when trying to import psutil from mod_wsgi on rhel7
# but this feature is of no concern in that context, so just fail quietly.
# https://github.com/jmflinuxtx/kerneltest-harness/pull/17#issuecomment-48007837
pass

View file

@ -1,8 +0,0 @@
cert = /etc/pki/tls/certs/wildcard-2024.fedoraproject.org.combined.cert
key = /etc/pki/tls/private/wildcard-2024.fedoraproject.org.key
pid = /var/run/stunnel.pid
[{{ stunnel_service }}]
accept = {{ stunnel_source_port }}
connect = {{ stunnel_destination_port }}

View file

@ -1,10 +0,0 @@
# This is false by default. You have to override it when including the role.
---
enable_websocket_server: false
# Absolute memory limit in megabytes for fedmsg-hub and fedmsg-bub-3
# services. Zero means infinity (no limit).
fedmsg_hub_memory_limit_mb: 0
# Restart fedmsg-hub.service on failure.
fedmsg_hub_auto_restart: false

View file

@ -1,9 +0,0 @@
config = {
# The presence of this will cause fedmsg-hub to start its own websocket
# server along with it.
'moksha.livesocket.websocket.port': 9919,
}
# And... this is a hack to get python-txws to work with python-six on epel7
import six
six.PY2 = not six.PY3

Some files were not shown because too many files have changed in this diff Show more