From 7c7f57271a380906c3a482b0dff07af6ac43b302 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Wed, 18 Mar 2015 21:05:41 +0000 Subject: [PATCH 01/20] Make ack yellow. :) --- roles/nagios_server/files/irc-colorize.py | 1 + 1 file changed, 1 insertion(+) diff --git a/roles/nagios_server/files/irc-colorize.py b/roles/nagios_server/files/irc-colorize.py index 122f8d0f93..3de3d8ec02 100755 --- a/roles/nagios_server/files/irc-colorize.py +++ b/roles/nagios_server/files/irc-colorize.py @@ -29,6 +29,7 @@ mirc_colors = { mapping = { 'RECOVERY': 'green', 'OK': 'green', + 'ACKNOWLEDGEMENT', 'yellow', 'UNKNOWN': 'purple', 'WARNING': 'teal', # 'red' probably makes the most sense here, but it behaved oddly From 58212b54f3441d5c92789d32ab51c4f0540344b7 Mon Sep 17 00:00:00 2001 From: Mikolaj Izdebski Date: Thu, 19 Mar 2015 08:56:59 +0100 Subject: [PATCH 02/20] Fix paths to main.cf in postfix_basic.yml See https://fedorahosted.org/fedora-infrastructure/ticket/4689 --- tasks/postfix_basic.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tasks/postfix_basic.yml b/tasks/postfix_basic.yml index 39b5e8b388..96533dbe37 100644 --- a/tasks/postfix_basic.yml +++ b/tasks/postfix_basic.yml @@ -8,10 +8,10 @@ action: copy src={{ item }} dest=/etc/postfix/main.cf with_first_found: - "{{ postfix_maincf }}" - - "{{ roles }}/base/files/postfix/main.cf.{{ ansible_fqdn }}" - - "{{ roles }}/base/files/postfix/main.cf.{{ inventory_hostname }}" - - "{{ roles }}/base/files/postfix/main.cf.{{ host_group }}" - - "{{ roles }}/base/files/postfix/main.cf.{{ postfix_group }}" + - "{{ roles }}/base/files/postfix/main.cf/main.cf.{{ ansible_fqdn }}" + - "{{ roles }}/base/files/postfix/main.cf/main.cf.{{ inventory_hostname }}" + - "{{ roles }}/base/files/postfix/main.cf/main.cf.{{ host_group }}" + - "{{ roles }}/base/files/postfix/main.cf/main.cf.{{ postfix_group }}" - "{{ roles }}/base/files/postfix/main.cf/main.cf" notify: - restart postfix From d8176192b4347a9f97bf01d12bcd94df55be2a83 Mon Sep 17 00:00:00 2001 From: Mikolaj Izdebski Date: Fri, 20 Feb 2015 19:55:21 +0100 Subject: [PATCH 03/20] Update Koschei playbook See https://fedorahosted.org/fedora-infrastructure/ticket/4690 This commit introduces the followith enhancements: - add koschei tag - yum-install koschei package - add extra fedorapeople repo - enable and start Koschei services - install Koschei config file - restart services on config update - install Koji certificates - avoid explicitly cleaning yum metadata - add alembic DB migration --- files/koschei/config.cfg.j2 | 58 ++++++++++++++ files/koschei/koschei.repo | 13 +++ .../hosts/koschei.cloud.fedoraproject.org.yml | 80 +++++++++++++++++++ 3 files changed, 151 insertions(+) create mode 100644 files/koschei/config.cfg.j2 create mode 100644 files/koschei/koschei.repo diff --git a/files/koschei/config.cfg.j2 b/files/koschei/config.cfg.j2 new file mode 100644 index 0000000000..bd26a2a2c1 --- /dev/null +++ b/files/koschei/config.cfg.j2 @@ -0,0 +1,58 @@ +# This is a config file for Koschei that can override values in default +# configuration in /usr/share/koschei/config.cfg. It is a python file expecting +# assignment to config dictionary which will be recursively merged with the +# default one. +config = { + "database_config": { + "username": "koschei", + "password": "{{ koschei_pgsql_password }}", + "database": "koschei" + }, + "koji_config": { + "cert": "/etc/koschei/koschei.pem", + "ca": "/etc/koschei/fedora-ca.cert", + "server_ca": "/etc/koschei/fedora-ca.cert", + }, + "flask": { + "SECRET_KEY": "{{ koschei_flask_secret_key }}", + }, + "logging": { + "loggers": { + "": { + "level": "DEBUG", + "handlers": ["stderr", "email"], + }, + }, + "handlers": { + "email": { + "class": "logging.handlers.SMTPHandler", + "level": "WARN", + "mailhost": "localhost", + "fromaddr": "koschei@fedoraproject.org", + "toaddrs": ['msimacek@redhat.com', 'mizdebsk@redhat.com'], + "subject": "Koschei warning", + }, + }, + }, + "fedmsg-publisher": { + "enabled": True, + "modname": "koschei", + }, +# "services": { +# "polling": { +# "interval": 60, +# }, +# }, + "dependency": { + "repo_chache_items": 5, + "keep_build_deps_for": 2 + }, + "koji_config": { + "max_builds": 30 + }, +} + +# Local Variables: +# mode: Python +# End: +# vi: ft=python diff --git a/files/koschei/koschei.repo b/files/koschei/koschei.repo new file mode 100644 index 0000000000..265806e614 --- /dev/null +++ b/files/koschei/koschei.repo @@ -0,0 +1,13 @@ +[koschei-mizdebsk] +name=Koschei repo +baseurl=https://mizdebsk.fedorapeople.org/koschei/repo/ +enabled=1 +gpgcheck=0 +metadata_expire=60 + +[koschei-msimacek] +name=Koschei repo +baseurl=https://msimacek.fedorapeople.org/koschei/repo/ +enabled=1 +gpgcheck=0 +metadata_expire=60 diff --git a/playbooks/hosts/koschei.cloud.fedoraproject.org.yml b/playbooks/hosts/koschei.cloud.fedoraproject.org.yml index 4f681bda1e..200896779f 100644 --- a/playbooks/hosts/koschei.cloud.fedoraproject.org.yml +++ b/playbooks/hosts/koschei.cloud.fedoraproject.org.yml @@ -15,21 +15,101 @@ gather_facts: True user: fedora sudo: yes + tags: koschei vars_files: - /srv/web/infra/ansible/vars/global.yml - "/srv/private/ansible/vars.yml" - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml + vars: + packages: + - koschei + services: + - koschei-polling + - koschei-resolver + - koschei-scheduler + - koschei-watcher + # httpd is here temporarly only, it will be removed once koschei + # implements "base" role + - httpd + # flag controlling whether koji PEM private key and certificate + # should be deployed by playbook + cert: false + tasks: - include: "{{ tasks }}/growroot_cloud.yml" - include: "{{ tasks }}/cloud_setup_basic.yml" - include: "{{ tasks }}/postfix_basic.yml" + # Temporary yum repo hosted on fedorapeople, it will be replaced by + # Fedora infra repo once Koschei completes RFR. Copr can't be used + # because of limitations of Fedora cloud routing -- machines in + # different networks can't access each other, even through public IP + - name: add koschei yum repo + action: copy src="{{ files }}/koschei/koschei.repo" dest="/etc/yum.repos.d/koschei.repo" + + - name: yum update koschei package + yum: name={{item}} state=latest + with_items: "{{packages}}" + register: yumupdate + # TODO: restart httpd + tags: + - packages + + - name: stop koschei + action: service name={{item}} state=stopped + with_items: "{{services}}" + when: yumupdate.changed + + - name: install /etc/koschei/config.cfg file + template: src="{{ files }}/koschei/config.cfg.j2" dest="/etc/koschei/config.cfg" + notify: + - restart koschei + # TODO: restart httpd + tags: + - config + + - name: install koschei.pem koji key and cert + copy: > + src="{{ private }}/files/koschei/koschei.pem" + dest="/etc/koschei/koschei.pem" + owner=koschei + group=koschei + mode=0400 + when: cert + tags: + - config + + - name: install koji ca cert + copy: > + src="{{ puppet_private }}/fedora-ca.cert" + dest="/etc/koschei/fedora-ca.cert" + owner=root + group=root + mode=0644 + tags: + - config + + - name: run koschei migration + command: alembic -c /usr/share/koschei/alembic.ini upgrade head + sudo_user: koschei + when: yumupdate.changed + + - name: enable koschei to start + action: service name={{item}} state=running enabled=true + with_items: "{{services}}" + tags: + - service + handlers: - include: "{{ handlers }}/restart_services.yml" + - name: restart koschei + action: service name={{item}} state=restarted + with_items: "{{services}}" + - name: setup fedmsg hosts: koschei.cloud.fedoraproject.org user: root From e13fd6713a11811fcfe0ed22cc5f99d088c461d6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miroslav=20Such=C3=BD?= Date: Thu, 19 Mar 2015 15:05:55 +0000 Subject: [PATCH 04/20] swift can listen on public IP because haproxy is now proxy --- playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml b/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml index da0e57b751..b3da3f74df 100644 --- a/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml +++ b/playbooks/hosts/fed-cloud09.cloud.fedoraproject.org.yml @@ -285,7 +285,7 @@ register: SERVICE_ID - shell: source /root/keystonerc_admin && keystone endpoint-list | grep {{SERVICE_ID.stdout}} | awk '{print $2}' register: ENDPOINT_ID - - shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{controller_hostname}}:8080/v1/AUTH_%(tenant_id)s' --adminurl 'https://{{controller_private_ip}}:8080' --internalurl 'https://{{controller_private_ip}}:8080/v1/AUTH_%(tenant_id)s' ) || true + - shell: source /root/keystonerc_admin && keystone endpoint-list |grep {{SERVICE_ID.stdout}} |grep -v {{ controller_hostname }} && (keystone endpoint-delete {{ENDPOINT_ID.stdout}} && keystone endpoint-create --region 'RegionOne' --service {{SERVICE_ID.stdout}} --publicurl 'https://{{controller_hostname}}:8080/v1/AUTH_%(tenant_id)s' --adminurl 'https://{{controller_hostname}}:8080' --internalurl 'https://{{controller_hostname}}:8080/v1/AUTH_%(tenant_id)s' ) || true # swift_s3 - shell: source /root/keystonerc_admin && keystone service-list | grep 'swift_s3' | awk '{print $2}' register: SERVICE_ID From 857adabe4f3f0d291448eab9e70e35e5fbbdb9b0 Mon Sep 17 00:00:00 2001 From: Ralph Bean Date: Wed, 18 Mar 2015 14:28:55 +0000 Subject: [PATCH 05/20] Make that 15. --- playbooks/manual/restart-fedmsg-services.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/playbooks/manual/restart-fedmsg-services.yml b/playbooks/manual/restart-fedmsg-services.yml index 23e95bbcf9..5af80b64be 100644 --- a/playbooks/manual/restart-fedmsg-services.yml +++ b/playbooks/manual/restart-fedmsg-services.yml @@ -58,8 +58,8 @@ - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml tasks: - - name: schedule a 10 minute downtime. give notifs backend time to start up. - nagios: action=downtime minutes=10 service=host host={{ inventory_hostname }} + - name: schedule a 15 minute downtime. give notifs backend time to start up. + nagios: action=downtime minutes=15 service=host host={{ inventory_hostname }} delegate_to: noc01.phx2.fedoraproject.org ignore_errors: true From f26d80d3b15bd4d3a0fbad39e0db621b71966bc5 Mon Sep 17 00:00:00 2001 From: Ralph Bean Date: Thu, 19 Mar 2015 15:07:32 +0000 Subject: [PATCH 06/20] Try to do nagios downtimes better for fmn upgrades. --- playbooks/manual/upgrade/fmn.yml | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/playbooks/manual/upgrade/fmn.yml b/playbooks/manual/upgrade/fmn.yml index dc2eb85c23..65ad828e08 100644 --- a/playbooks/manual/upgrade/fmn.yml +++ b/playbooks/manual/upgrade/fmn.yml @@ -33,7 +33,7 @@ pre_tasks: - name: tell nagios to shush w.r.t. the frontend - nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} + nagios: action=downtime minutes=15 service=host host={{ inventory_hostname }} delegate_to: noc01.phx2.fedoraproject.org ignore_errors: true @@ -55,7 +55,7 @@ pre_tasks: - name: tell nagios to shush w.r.t. the backend - nagios: action=downtime minutes=60 service=host host={{ inventory_hostname }} + nagios: action=downtime minutes=15 service=host host={{ inventory_hostname }} delegate_to: noc01.phx2.fedoraproject.org ignore_errors: true @@ -76,11 +76,13 @@ - name: And... start the backend again service: name="fedmsg-hub" state=started - post_tasks: - - name: tell nagios to unshush w.r.t. the backend - nagios: action=unsilence service=host host={{ inventory_hostname }} - delegate_to: noc01.phx2.fedoraproject.org - ignore_errors: true + # Don't bother unshushing the backend here. it takes a few minutes to start + # up anyways, so just let the downtime expire. + #post_tasks: + #- name: tell nagios to unshush w.r.t. the backend + # nagios: action=unsilence service=host host={{ inventory_hostname }} + # delegate_to: noc01.phx2.fedoraproject.org + # ignore_errors: true - name: restart the frontend hosts: notifs-web;notifs-web-stg From 4dd084d85b7f1c5b9db04ddebe2f13ddd21efe49 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Thu, 19 Mar 2015 15:22:35 +0000 Subject: [PATCH 07/20] Add f21 sshd config --- roles/base/files/ssh/sshd_config.21 | 151 ++++++++++++++++++++++++++++ 1 file changed, 151 insertions(+) create mode 100644 roles/base/files/ssh/sshd_config.21 diff --git a/roles/base/files/ssh/sshd_config.21 b/roles/base/files/ssh/sshd_config.21 new file mode 100644 index 0000000000..080de0d1ca --- /dev/null +++ b/roles/base/files/ssh/sshd_config.21 @@ -0,0 +1,151 @@ +# $OpenBSD: sshd_config,v 1.89 2013/02/06 00:20:42 dtucker Exp $ + +# This is the sshd server system-wide configuration file. See +# sshd_config(5) for more information. + +# This sshd was compiled with PATH=/usr/local/bin:/usr/bin + +# The strategy used for options in the default sshd_config shipped with +# OpenSSH is to specify options with their default value where +# possible, but leave them commented. Uncommented options override the +# default value. + +# If you want to change the port on a SELinux system, you have to tell +# SELinux about this change. +# semanage port -a -t ssh_port_t -p tcp #PORTNUMBER +# +#Port 22 +#AddressFamily any +#ListenAddress 0.0.0.0 +#ListenAddress :: + +# The default requires explicit activation of protocol 1 +#Protocol 2 + +# HostKey for protocol version 1 +#HostKey /etc/ssh/ssh_host_key +# HostKeys for protocol version 2 +#HostKey /etc/ssh/ssh_host_rsa_key +#HostKey /etc/ssh/ssh_host_dsa_key +#HostKey /etc/ssh/ssh_host_ecdsa_key + +# Lifetime and size of ephemeral version 1 server key +#KeyRegenerationInterval 1h +#ServerKeyBits 1024 + +# Logging +# obsoletes QuietMode and FascistLogging +#SyslogFacility AUTH +SyslogFacility AUTHPRIV +#LogLevel INFO + +# Authentication: + +#LoginGraceTime 2m +PermitRootLogin without-password +StrictModes yes +PasswordAuthentication no + +#MaxAuthTries 6 +#MaxSessions 10 + +#RSAAuthentication yes +#PubkeyAuthentication yes + +# The default is to check both .ssh/authorized_keys and .ssh/authorized_keys2 +# but this is overridden so installations will only check .ssh/authorized_keys +AuthorizedKeysFile .ssh/authorized_keys + +#AuthorizedPrincipalsFile none + +#AuthorizedKeysCommand none +#AuthorizedKeysCommandUser nobody + +# For this to work you will also need host keys in /etc/ssh/ssh_known_hosts +#RhostsRSAAuthentication no +# similar for protocol version 2 +#HostbasedAuthentication no +# Change to yes if you don't trust ~/.ssh/known_hosts for +# RhostsRSAAuthentication and HostbasedAuthentication +#IgnoreUserKnownHosts no +# Don't read the user's ~/.rhosts and ~/.shosts files +#IgnoreRhosts yes + +# Change to no to disable s/key passwords +#ChallengeResponseAuthentication yes +ChallengeResponseAuthentication no + +# Kerberos options +#KerberosAuthentication no +#KerberosOrLocalPasswd yes +#KerberosTicketCleanup yes +#KerberosGetAFSToken no +#KerberosUseKuserok yes + +# GSSAPI options +#GSSAPIAuthentication no +GSSAPIAuthentication yes +#GSSAPICleanupCredentials yes +GSSAPICleanupCredentials yes +#GSSAPIStrictAcceptorCheck yes +#GSSAPIKeyExchange no + +# Set this to 'yes' to enable PAM authentication, account processing, +# and session processing. If this is enabled, PAM authentication will +# be allowed through the ChallengeResponseAuthentication and +# PasswordAuthentication. Depending on your PAM configuration, +# PAM authentication via ChallengeResponseAuthentication may bypass +# the setting of "PermitRootLogin without-password". +# If you just want the PAM account and session checks to run without +# PAM authentication, then enable this but set PasswordAuthentication +# and ChallengeResponseAuthentication to 'no'. +# WARNING: 'UsePAM no' is not supported in Fedora and may cause several +# problems. +#UsePAM no +UsePAM yes + +#AllowAgentForwarding yes +#AllowTcpForwarding yes +#GatewayPorts no +#X11Forwarding no +X11Forwarding yes +#X11DisplayOffset 10 +#X11UseLocalhost yes +#PrintMotd yes +#PrintLastLog yes +#TCPKeepAlive yes +#UseLogin no +UsePrivilegeSeparation sandbox # Default for new installations. +#PermitUserEnvironment no +#Compression delayed +#ClientAliveInterval 0 +#ClientAliveCountMax 3 +#ShowPatchLevel no +#UseDNS yes +#PidFile /var/run/sshd.pid +#MaxStartups 10:30:100 +#PermitTunnel no +#ChrootDirectory none +#VersionAddendum none + +# no default banner path +#Banner none + +# Accept locale-related environment variables +AcceptEnv LANG LC_CTYPE LC_NUMERIC LC_TIME LC_COLLATE LC_MONETARY LC_MESSAGES +AcceptEnv LC_PAPER LC_NAME LC_ADDRESS LC_TELEPHONE LC_MEASUREMENT +AcceptEnv LC_IDENTIFICATION LC_ALL LANGUAGE +AcceptEnv XMODIFIERS + +# override default of no subsystems +Subsystem sftp /usr/libexec/openssh/sftp-server + +# Uncomment this if you want to use .local domain +#Host *.local +# CheckHostIP no + +# Example of overriding settings on a per-user basis +#Match User anoncvs +# X11Forwarding no +# AllowTcpForwarding no +# ForceCommand cvs server From 6d8983a34103b7424855ceceb05e7b74a755e41d Mon Sep 17 00:00:00 2001 From: Ralph Bean Date: Thu, 19 Mar 2015 15:32:05 +0000 Subject: [PATCH 08/20] Copy admin util scripts for notifs-backend01. --- .../backend/files/bin/fmn-disable-account | 47 +++++++++++++++++++ roles/notifs/backend/tasks/main.yml | 6 +++ 2 files changed, 53 insertions(+) create mode 100755 roles/notifs/backend/files/bin/fmn-disable-account diff --git a/roles/notifs/backend/files/bin/fmn-disable-account b/roles/notifs/backend/files/bin/fmn-disable-account new file mode 100755 index 0000000000..33f9bda44d --- /dev/null +++ b/roles/notifs/backend/files/bin/fmn-disable-account @@ -0,0 +1,47 @@ +#!/usr/bin/env python +""" fmn-disable-account USER + +Disables the FMN account for a user. +""" + +import argparse + +import fedmsg +import fedmsg.config + +import fmn.lib +import fmn.lib.models + + +def parse_args(): + parser = argparse.ArgumentParser(__doc__) + parser.add_argument('user', help='FAS username to disable.') + parser.add_argument('--context', nargs='+', default=['irc', 'email'], + help="Contexts to disable. Defaults to all.") + return parser.parse_args() + +def disable(session, user, contexts): + openid = '%s.id.fedoraproject.org' % user + for context in contexts: + pref = fmn.lib.models.Preference.load(session, openid, context) + if pref: + print "Disabling %r for %r" % (context, openid) + pref.set_enabled(session, False) + else: + print "No context %r found for %r" % (context, openid) + + +if __name__ == '__main__': + args = parse_args() + + config = fedmsg.config.load_config() + config.update({ + 'active': True, + 'name': 'relay_inbound', + 'cert_prefix': 'fmn', + }) + fedmsg.init(**config) + + session = fmn.lib.models.init(config['fmn.sqlalchemy.uri']) + + disable(session, args.user, args.context) diff --git a/roles/notifs/backend/tasks/main.yml b/roles/notifs/backend/tasks/main.yml index fdcef7a0a7..d74f18eb7c 100644 --- a/roles/notifs/backend/tasks/main.yml +++ b/roles/notifs/backend/tasks/main.yml @@ -34,3 +34,9 @@ tags: - notifs - notifs/backend + +- name: copy over admin utility scripts + synchronize: src=bin/ dest=/usr/local/bin/ + tags: + - notifs + - notifs/backend From b84251df18f4f9dc99489f8788fc317a2892aba5 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Thu, 19 Mar 2015 15:56:46 +0000 Subject: [PATCH 09/20] Add apprentice to koji stg --- inventory/group_vars/koji-stg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/group_vars/koji-stg b/inventory/group_vars/koji-stg index 13e761972e..8009bc0adf 100644 --- a/inventory/group_vars/koji-stg +++ b/inventory/group_vars/koji-stg @@ -12,7 +12,7 @@ tcp_ports: [ 80, 443, 111, 2049, udp_ports: [ 111, 2049 ] -fas_client_groups: sysadmin-releng +fas_client_groups: sysadmin-releng,fi-apprentice # These are consumed by a task in roles/fedmsg/base/main.yml fedmsg_certs: From 029e9eafad803a93ffbec47501ca5455f182aaa6 Mon Sep 17 00:00:00 2001 From: Patrick Uiterwijk Date: Thu, 19 Mar 2015 17:48:05 +0000 Subject: [PATCH 10/20] Switch FedOAuth to Ipsilon in stg --- roles/haproxy/files/haproxy.cfg.stg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/roles/haproxy/files/haproxy.cfg.stg b/roles/haproxy/files/haproxy.cfg.stg index 4a1950b64b..7fe3316ce2 100644 --- a/roles/haproxy/files/haproxy.cfg.stg +++ b/roles/haproxy/files/haproxy.cfg.stg @@ -93,9 +93,9 @@ listen totpcgiprovision 0.0.0.0:10019 server fas01 fas01:8444 check inter 5s rise 1 fall 2 option httpchk GET /index.cgi -listen fedoauth 0.0.0.0:10020 +listen ipsilon 0.0.0.0:10020 balance hdr(appserver) - server fedoauth01 fedoauth01:80 check inter 10s rise 1 fall 2 + server ipsilon01 ipsilon01:80 check inter 10s rise 1 fall 2 option httpchk GET /static/fedora/fedora-authn-logo-white.png listen askbot 0.0.0.0:10021 From fd417782c1c8ba8c6518c320de9a500eaade78ae Mon Sep 17 00:00:00 2001 From: Patrick Uiterwijk Date: Thu, 19 Mar 2015 17:55:18 +0000 Subject: [PATCH 11/20] And fix the checkpath for ipsilon --- roles/haproxy/files/haproxy.cfg.stg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/haproxy/files/haproxy.cfg.stg b/roles/haproxy/files/haproxy.cfg.stg index 7fe3316ce2..c8e8807fa1 100644 --- a/roles/haproxy/files/haproxy.cfg.stg +++ b/roles/haproxy/files/haproxy.cfg.stg @@ -96,7 +96,7 @@ listen totpcgiprovision 0.0.0.0:10019 listen ipsilon 0.0.0.0:10020 balance hdr(appserver) server ipsilon01 ipsilon01:80 check inter 10s rise 1 fall 2 - option httpchk GET /static/fedora/fedora-authn-logo-white.png + option httpchk GET /ui/fedora/repeater.png listen askbot 0.0.0.0:10021 balance hdr(appserver) From a8fa64de7db79c77a4f13cce605fa2e37953d882 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Thu, 19 Mar 2015 21:23:26 +0000 Subject: [PATCH 12/20] OOpsie --- roles/nagios_server/files/irc-colorize.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/roles/nagios_server/files/irc-colorize.py b/roles/nagios_server/files/irc-colorize.py index 3de3d8ec02..d62d3658a6 100755 --- a/roles/nagios_server/files/irc-colorize.py +++ b/roles/nagios_server/files/irc-colorize.py @@ -29,7 +29,7 @@ mirc_colors = { mapping = { 'RECOVERY': 'green', 'OK': 'green', - 'ACKNOWLEDGEMENT', 'yellow', + 'ACKNOWLEDGEMENT': 'yellow', 'UNKNOWN': 'purple', 'WARNING': 'teal', # 'red' probably makes the most sense here, but it behaved oddly From c1d072a1b56468b7631fa86a23bb4e299a437c92 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 20 Mar 2015 12:50:37 +0000 Subject: [PATCH 13/20] Comment this other selinux task since we aren't installing a custom selinux module here anymore. --- roles/mirrormanager/mirrorlist2/tasks/main.yml | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/roles/mirrormanager/mirrorlist2/tasks/main.yml b/roles/mirrormanager/mirrorlist2/tasks/main.yml index e47409f5c2..786432f936 100644 --- a/roles/mirrormanager/mirrorlist2/tasks/main.yml +++ b/roles/mirrormanager/mirrorlist2/tasks/main.yml @@ -58,14 +58,14 @@ - mirrorlist2 - selinux -- name: check to see if its even installed yet - shell: semodule -l | grep mirrorlist2 | wc -l - register: ficgeneral_grep - always_run: true - changed_when: "'0' in ficgeneral_grep.stdout" - tags: - - mirrorlist2 - - selinux +#- name: check to see if its even installed yet +# shell: semodule -l | grep mirrorlist2 | wc -l +# register: ficgeneral_grep +# always_run: true +# changed_when: "'0' in ficgeneral_grep.stdout" +# tags: +# - mirrorlist2 +# - selinux #- name: install our general mirrorlist2 selinux module # command: semodule -i /usr/share/mirrorlist2/mirrorlist2.pp From 71842302f473eec6910e6443606c169996f13f5d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miroslav=20Such=C3=BD?= Date: Fri, 20 Mar 2015 13:04:24 +0000 Subject: [PATCH 14/20] attach volumes --- tasks/persistent_cloud_new.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tasks/persistent_cloud_new.yml b/tasks/persistent_cloud_new.yml index 6314a32f92..bcdf73f497 100644 --- a/tasks/persistent_cloud_new.yml +++ b/tasks/persistent_cloud_new.yml @@ -61,5 +61,7 @@ # local_action: "shell nova --insecure {{_OS_AUTH_OPTS}} volume-attach {{inventory_hostname}} {{item}} " # with_items: volumes # when: volumes is defined and vm_status.stdout != "ACTIVE" -nova --os-username=admin --os-password=c24da73c18e7880cbb6f --os-tenant-name=copr --os-auth-url="https://fed-cloud09.cloud.fedoraproject.org:5000/v2.0" volume-attach test ff735862-ac95-4b7f-82ba-081583e46898 -nova --os-username=admin --os-password=c24da73c18e7880cbb6f --os-tenant-name=copr --os-auth-url="https://fed-cloud09.cloud.fedoraproject.org:5000/v2.0" volume-list |grep foo + +# instance can be both id and name, volume must be id +- shell: source keystonerc_admin && nova --os-tenant-name={{inventory_tenant}} volume-list | grep ' {{item}} ' | grep 'available' && nova --os-tenant-name={{inventory_tenant}} volume-attach "{{inventory_instance_name}}" "{{item}}" + with_items: inventory_volumes From 0f660ef5c47dd85a7d627da8dd11e5d3f7e110fa Mon Sep 17 00:00:00 2001 From: Ralph Bean Date: Fri, 20 Mar 2015 14:23:58 +0000 Subject: [PATCH 15/20] Update our git hook. With the changes from here: https://github.com/fedora-infra/fedmsg/pull/327 --- roles/git/hooks/files/post-receive-fedmsg | 29 ++++++++++++++++++----- 1 file changed, 23 insertions(+), 6 deletions(-) mode change 100644 => 100755 roles/git/hooks/files/post-receive-fedmsg diff --git a/roles/git/hooks/files/post-receive-fedmsg b/roles/git/hooks/files/post-receive-fedmsg old mode 100644 new mode 100755 index deb0ae1f5f..4169b91b53 --- a/roles/git/hooks/files/post-receive-fedmsg +++ b/roles/git/hooks/files/post-receive-fedmsg @@ -2,6 +2,7 @@ import getpass import os +import subprocess as sp import sys from collections import defaultdict @@ -26,15 +27,19 @@ config['active'] = True config['endpoints']['relay_inbound'] = config['relay_inbound'] fedmsg.init(name='relay_inbound', cert_prefix='scm', **config) + def revs_between(head, base): """ Yield revisions between HEAD and BASE. """ - # XXX REALLY, just yield head. - # We used to try to navigate the git history and return all the commits in - # between, but we got into infinite loops more than once because git. - # We could shell out to 'git rev-list head...base', but I'm just not ready - # to do that yet. - yield head.id + # pygit2 can't do a rev-list yet, so we have to shell out.. silly. + cmd = '/usr/bin/git rev-list %s...%s' % (head.id, base.id) + proc = sp.Popen(cmd.split(), stdout=sp.PIPE, stderr=sp.PIPE, cwd=abspath) + stdout, stderr = proc.communicate() + if proc.returncode != 0: + raise IOError('git rev-list failed: %r, err: %r' % (stdout, stderr)) + + for line in stdout.strip().split('\n'): + yield line.strip() def build_stats(commit): @@ -62,6 +67,8 @@ def build_stats(commit): return files, total +seen = [] + # Read in all the rev information git-receive-pack hands us. lines = [line.split() for line in sys.stdin.readlines()] for line in lines: @@ -112,6 +119,16 @@ for line in lines: print "* Publishing information for %i commits" % len(commits) for commit in commits: + # Keep track of whether or not we have already published this commit on + # another branch or not. It is conceivable that someone could make a + # commit to a number of branches, and push them all at the same time. + # Make a note in the fedmsg payload so we can try to reduce spam at a + # later stage. + if commit['rev'] in seen: + commit['seen'] = True + else: + commit['seen'] = False + seen.append(commit['rev']) if commit is None: continue From d7f41916869bc60bbe809c8516151b380a867ca6 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 20 Mar 2015 14:25:31 +0000 Subject: [PATCH 16/20] Convert mirrorlist-osuosl to mm2 --- inventory/host_vars/mirrorlist-osuosl.fedoraproject.org | 4 ++-- inventory/inventory | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org b/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org index 2a390d23d0..c9b86cc9be 100644 --- a/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org +++ b/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org @@ -2,8 +2,8 @@ nm: 255.255.255.192 gw: 140.211.169.193 dns: 8.8.8.8 -ks_url: http://209.132.181.6/repo/rhel/ks/kvm-rhel-6 -ks_repo: http://209.132.181.6/repo/rhel/RHEL6-x86_64/ +ks_url: http://209.132.181.6/repo/rhel/ks/kvm-rhel-7 +ks_repo: http://209.132.181.6/repo/rhel/RHEL7-x86_64/ volgroup: /dev/vg_server eth0_ip: 140.211.169.228 vmhost: osuosl02.fedoraproject.org diff --git a/inventory/inventory b/inventory/inventory index e6eb5f40f9..2a4fda80c0 100644 --- a/inventory/inventory +++ b/inventory/inventory @@ -344,12 +344,12 @@ memcached01.phx2.fedoraproject.org memcached02.phx2.fedoraproject.org [mirrorlist] -mirrorlist-osuosl.fedoraproject.org mirrorlist-ibiblio.fedoraproject.org mirrorlist-dedicatedsolutions.fedoraproject.org [mirrorlist2] mirrorlist-host1plus.fedoraproject.org +mirrorlist-osuosl.fedoraproject.org mirrorlist-phx2.phx2.fedoraproject.org [mirrorlist2-stg] From c54c13df3922d6d0fe52fb6044cddc0c3c024188 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 20 Mar 2015 15:40:43 +0000 Subject: [PATCH 17/20] Duh. This is an external host. --- inventory/host_vars/mirrorlist-osuosl.fedoraproject.org | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org b/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org index c9b86cc9be..aac1296c3b 100644 --- a/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org +++ b/inventory/host_vars/mirrorlist-osuosl.fedoraproject.org @@ -2,7 +2,7 @@ nm: 255.255.255.192 gw: 140.211.169.193 dns: 8.8.8.8 -ks_url: http://209.132.181.6/repo/rhel/ks/kvm-rhel-7 +ks_url: http://209.132.181.6/repo/rhel/ks/kvm-rhel-7-ext ks_repo: http://209.132.181.6/repo/rhel/RHEL7-x86_64/ volgroup: /dev/vg_server eth0_ip: 140.211.169.228 From b81790c07f1e0b99920104e3c8218379f9ea9ad4 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 20 Mar 2015 16:19:57 +0000 Subject: [PATCH 18/20] Move mirrorlist-dedicatedsolutions --- .../host_vars/mirrorlist-dedicatedsolutions.fedoraproject.org | 4 ++-- inventory/inventory | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/inventory/host_vars/mirrorlist-dedicatedsolutions.fedoraproject.org b/inventory/host_vars/mirrorlist-dedicatedsolutions.fedoraproject.org index a979ee00af..aa129fb17b 100644 --- a/inventory/host_vars/mirrorlist-dedicatedsolutions.fedoraproject.org +++ b/inventory/host_vars/mirrorlist-dedicatedsolutions.fedoraproject.org @@ -2,8 +2,8 @@ nm: 255.255.255.0 gw: 67.219.144.1 dns: 8.8.8.8 -ks_url: http://209.132.181.6/repo/rhel/ks/kvm-rhel-6 -ks_repo: http://209.132.181.6/repo/rhel/RHEL6-x86_64/ +ks_url: http://209.132.181.6/repo/rhel/ks/kvm-rhel-7-ext +ks_repo: http://209.132.181.6/repo/rhel/RHEL7-x86_64/ volgroup: /dev/vg_virthost eth0_ip: 67.219.144.67 vmhost: dedicatedsolutions01.fedoraproject.org diff --git a/inventory/inventory b/inventory/inventory index 2a4fda80c0..053e34a0ea 100644 --- a/inventory/inventory +++ b/inventory/inventory @@ -345,9 +345,9 @@ memcached02.phx2.fedoraproject.org [mirrorlist] mirrorlist-ibiblio.fedoraproject.org -mirrorlist-dedicatedsolutions.fedoraproject.org [mirrorlist2] +mirrorlist-dedicatedsolutions.fedoraproject.org mirrorlist-host1plus.fedoraproject.org mirrorlist-osuosl.fedoraproject.org mirrorlist-phx2.phx2.fedoraproject.org From 05f1c5c03dfa3987c20d26558cc278711c3d2163 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 20 Mar 2015 16:40:00 +0000 Subject: [PATCH 19/20] Add a proper resolv.conf for dedicatedsolutions --- roles/base/files/resolv.conf/dedicatedsolutions | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 roles/base/files/resolv.conf/dedicatedsolutions diff --git a/roles/base/files/resolv.conf/dedicatedsolutions b/roles/base/files/resolv.conf/dedicatedsolutions new file mode 100644 index 0000000000..2cfe4d6262 --- /dev/null +++ b/roles/base/files/resolv.conf/dedicatedsolutions @@ -0,0 +1,3 @@ +search vpn.fedoraproject.org fedoraproject.org +nameserver 8.8.8.8 +nameserver 8.8.4.4 From f2eb5ef7000bd258006379042ba4360cd43cce29 Mon Sep 17 00:00:00 2001 From: Kevin Fenzi Date: Fri, 20 Mar 2015 16:50:31 +0000 Subject: [PATCH 20/20] Move mirrorlist-ibiblio over to mm2 and then there were 0 mm1 ones. --- .../mirrorlist-ibiblio.fedoraproject.org | 4 +- inventory/inventory | 4 +- playbooks/groups/mirrorlist.yml | 56 - .../mirrorlist/files/mirrorlist_server.py | 1048 ----------------- .../mirrorlist/files/mm-authorized_key | 1 - .../mirrorlist/files/mm_sync_data | 4 - .../mirrorlist/files/supervisord.conf | 67 -- roles/mirrormanager/mirrorlist/tasks/main.yml | 60 - .../templates/mirrorlist-server.conf | 56 - roles/mirrormanager/mirrorlist/vars/main.yml | 4 - 10 files changed, 3 insertions(+), 1301 deletions(-) delete mode 100644 playbooks/groups/mirrorlist.yml delete mode 100755 roles/mirrormanager/mirrorlist/files/mirrorlist_server.py delete mode 100644 roles/mirrormanager/mirrorlist/files/mm-authorized_key delete mode 100644 roles/mirrormanager/mirrorlist/files/mm_sync_data delete mode 100644 roles/mirrormanager/mirrorlist/files/supervisord.conf delete mode 100644 roles/mirrormanager/mirrorlist/tasks/main.yml delete mode 100644 roles/mirrormanager/mirrorlist/templates/mirrorlist-server.conf delete mode 100644 roles/mirrormanager/mirrorlist/vars/main.yml diff --git a/inventory/host_vars/mirrorlist-ibiblio.fedoraproject.org b/inventory/host_vars/mirrorlist-ibiblio.fedoraproject.org index 0ff64f12e2..d9e5bf3439 100644 --- a/inventory/host_vars/mirrorlist-ibiblio.fedoraproject.org +++ b/inventory/host_vars/mirrorlist-ibiblio.fedoraproject.org @@ -2,8 +2,8 @@ nm: 255.255.255.128 gw: 152.19.134.129 dns: 152.2.21.1 -ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-6 -ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL6-x86_64/ +ks_url: http://infrastructure.fedoraproject.org/repo/rhel/ks/kvm-rhel-7-ext +ks_repo: http://infrastructure.fedoraproject.org/repo/rhel/RHEL7-x86_64/ volgroup: /dev/vg_ibiblio04 eth0_ip: 152.19.134.194 vmhost: ibiblio04.fedoraproject.org diff --git a/inventory/inventory b/inventory/inventory index 053e34a0ea..bda68950b2 100644 --- a/inventory/inventory +++ b/inventory/inventory @@ -343,12 +343,10 @@ fedoauth01.stg.phx2.fedoraproject.org memcached01.phx2.fedoraproject.org memcached02.phx2.fedoraproject.org -[mirrorlist] -mirrorlist-ibiblio.fedoraproject.org - [mirrorlist2] mirrorlist-dedicatedsolutions.fedoraproject.org mirrorlist-host1plus.fedoraproject.org +mirrorlist-ibiblio.fedoraproject.org mirrorlist-osuosl.fedoraproject.org mirrorlist-phx2.phx2.fedoraproject.org diff --git a/playbooks/groups/mirrorlist.yml b/playbooks/groups/mirrorlist.yml deleted file mode 100644 index dc039bdeae..0000000000 --- a/playbooks/groups/mirrorlist.yml +++ /dev/null @@ -1,56 +0,0 @@ -# create a new mirrorlist server -# NOTE: should be used with --limit most of the time -# NOTE: make sure there is room/space for this server on the vmhost -# NOTE: most of these vars_path come from group_vars/mirrorlist or from hostvars - -- name: make mirrorlist app server - hosts: mirrorlist - user: root - gather_facts: False - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - tasks: - - include: "{{ tasks }}/virt_instance_create.yml" - - handlers: - - include: "{{ handlers }}/restart_services.yml" - -- name: make the box be real - hosts: mirrorlist - user: root - gather_facts: True - - vars_files: - - /srv/web/infra/ansible/vars/global.yml - - "/srv/private/ansible/vars.yml" - - /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml - - roles: - - base - - rkhunter - - denyhosts - - nagios_client - - geoip - - hosts - - fas_client - - collectd/base - - mirrormanager/mirrorlist - - sudo - - { role: openvpn/client, - when: env != "staging" } - - tasks: - # this is how you include other task lists - - include: "{{ tasks }}/yumrepos.yml" - - include: "{{ tasks }}/2fa_client.yml" - - include: "{{ tasks }}/motd.yml" - - include: "{{ tasks }}/apache.yml" - - include: "{{ tasks }}/mod_wsgi.yml" - - - handlers: - - include: "{{ handlers }}/restart_services.yml" diff --git a/roles/mirrormanager/mirrorlist/files/mirrorlist_server.py b/roles/mirrormanager/mirrorlist/files/mirrorlist_server.py deleted file mode 100755 index 99c7a0c3fe..0000000000 --- a/roles/mirrormanager/mirrorlist/files/mirrorlist_server.py +++ /dev/null @@ -1,1048 +0,0 @@ -#!/usr/bin/python -# -# Copyright (c) 2007-2013 Dell, Inc. -# by Matt Domsch -# Licensed under the MIT/X11 license - -# standard library modules in alphabetical order -import datetime -import getopt -import logging -import logging.handlers -import os -import random -import cPickle as pickle -import select -import signal -import socket -from SocketServer import StreamRequestHandler, ForkingMixIn, UnixStreamServer, BaseServer -import sys -from string import zfill, atoi -import time -import traceback - -try: - import threading -except ImportError: - import dummy_threading as threading - -# not-so-standard library modules that this program needs -from IPy import IP -import GeoIP -import radix -from weighted_shuffle import weighted_shuffle - -# can be overridden on the command line -pidfile = '/var/run/mirrormanager/mirrorlist_server.pid' -socketfile = '/var/run/mirrormanager/mirrorlist_server.sock' -cachefile = '/var/lib/mirrormanager/mirrorlist_cache.pkl' -internet2_netblocks_file = '/var/lib/mirrormanager/i2_netblocks.txt' -global_netblocks_file = '/var/lib/mirrormanager/global_netblocks.txt' -logfile = None -debug = False -must_die = False -# at a point in time when we're no longer serving content for versions -# that don't use yum prioritymethod=fallback -# (e.g. after Fedora 7 is past end-of-life) -# then we can set this value to True -# this only affects results requested using path=... -# for dirs which aren't repositories (such as iso/) -# because we don't know the Version associated with that dir here. -default_ordered_mirrorlist = False - -gipv4 = None -gipv6 = None - -# key is strings in tuple (repo.prefix, arch) -mirrorlist_cache = {} - -# key is directory.name, returns keys for mirrorlist_cache -directory_name_to_mirrorlist = {} - -# key is an IPy.IP structure, value is list of host ids -host_netblock_cache = {} - -# key is hostid, value is list of countries to allow -host_country_allowed_cache = {} - -repo_arch_to_directoryname = {} - -# redirect from a repo with one name to a repo with another -repo_redirect = {} -country_continent_redirect_cache = {} - -# our own private copy of country_continents to be edited -country_continents = GeoIP.country_continents - -disabled_repositories = {} -host_bandwidth_cache = {} -host_country_cache = {} -host_max_connections_cache = {} -file_details_cache = {} -hcurl_cache = {} -asn_host_cache = {} -internet2_tree = radix.Radix() -global_tree = radix.Radix() -host_netblocks_tree = radix.Radix() -netblock_country_tree = radix.Radix() -location_cache = {} -netblock_country_cache = {} - -## Set up our syslog data. -syslogger = logging.getLogger('mirrormanager') -syslogger.setLevel(logging.INFO) -handler = logging.handlers.SysLogHandler(address='/dev/log', facility=logging.handlers.SysLogHandler.LOG_LOCAL4) -syslogger.addHandler(handler) - -def lookup_ip_asn(tree, ip): - """ @t is a radix tree - @ip is an IPy.IP object which may be contained in an entry in l - """ - node = tree.search_best(ip.strNormal()) - if node is None: - return None - return node.data['asn'] - - -def uniqueify(seq, idfun=None): - # order preserving - if idfun is None: - def idfun(x): return x - seen = {} - result = [] - for item in seq: - marker = idfun(item) - # in old Python versions: - # if seen.has_key(marker) - # but in new ones: - if marker in seen: continue - seen[marker] = 1 - result.append(item) - return result - -##### Metalink Support ##### - -def metalink_header(): - # fixme add alternate format pubdate when specified - pubdate = datetime.datetime.utcnow().strftime("%a, %d %b %Y %H:%M:%S GMT") - doc = '' - doc += '\n' - doc += '\n' - doc += indent(2) + '\n' % (file) - y = detailslist[0] - - def details(y, indentlevel=2): - doc = '' - if y['timestamp'] is not None: - doc += indent(indentlevel+1) + '%s\n' % y['timestamp'] - if y['size'] is not None: - doc += indent(indentlevel+1) + '%s\n' % y['size'] - doc += indent(indentlevel+1) + '\n' - hashes = ('md5', 'sha1', 'sha256', 'sha512') - for h in hashes: - if y[h] is not None: - doc += indent(indentlevel+2) + '%s\n' % (h, y[h]) - doc += indent(indentlevel+1) + '\n' - return doc - - doc += details(y, 2) - # there can be multiple files - if len(detailslist) > 1: - doc += indent(3) + '\n' - for y in detailslist[1:]: - doc += indent(4) + '\n' - doc += details(y,5) - doc += indent(4) + '\n' - doc += indent(3) + '\n' - - doc += indent(3) + '\n' - for (hostid, hcurls) in hosts_and_urls: - private = '' - if hostid not in cache['global']: - private = 'mm0:private="True"' - for url in hcurls: - protocol = url.split(':')[0] - # FIXME January 2010 - # adding protocol= here is not part of the Metalink 3.0 spec, - # but MirrorManager 1.2.6 used it accidentally, as did yum 3.2.20-3 as released - # in Fedora 8, 9, and 10. After those three are EOL (~January 2010), the - # extra protocol= can be removed. - doc += indent(4) + '' % (protocol, protocol, host_country_cache[hostid].upper(), preference, private) - doc += url - doc += '\n' - preference = max(preference-1, 1) - doc += indent(3) + '\n' - doc += indent(2) + '\n' - doc += indent(1) + '\n' - doc += '\n' - return ('metalink', 200, doc) - -def tree_lookup(tree, ip, field, maxResults=None): - # fast lookup in the tree; if present, find all the matching values by deleting the found one and searching again - # this is safe w/o copying the tree again only because this is the only place the tree is used, and - # we'll get a new copy of the tree from our parent the next time it fork()s. - # returns a list of tuples (prefix, data) - result = [] - len_data = 0 - if ip is None: - return result - node = tree.search_best(ip.strNormal()) - while node is not None: - prefix = node.prefix - if type(node.data[field]) == list: - len_data += len(node.data[field]) - else: - len_data += 1 - t = (prefix, node.data[field],) - result.append(t) - if maxResults is None or len_data < maxResults: - tree.delete(prefix) - node = tree.search_best(ip.strNormal()) - else: - break - return result - -def trim_by_client_country(s, clientCountry): - if clientCountry is None: - return s - r = s.copy() - for hostid in s: - if hostid in host_country_allowed_cache and \ - clientCountry not in host_country_allowed_cache[hostid]: - r.remove(hostid) - return r - -def shuffle(s): - l = [] - for hostid in s: - item = (host_bandwidth_cache[hostid], hostid) - l.append(item) - newlist = weighted_shuffle(l) - results = [] - for (bandwidth, hostid) in newlist: - results.append(hostid) - return results - -continents = {} - -def handle_country_continent_redirect(): - new_country_continents = GeoIP.country_continents - for country, continent in country_continent_redirect_cache.iteritems(): - new_country_continents[country] = continent - global country_continents - country_continents = new_country_continents - -def setup_continents(): - new_continents = {} - handle_country_continent_redirect() - for c in country_continents.keys(): - continent = country_continents[c] - if continent not in new_continents: - new_continents[continent] = [c] - else: - new_continents[continent].append(c) - global continents - continents = new_continents - -def do_global(kwargs, cache, clientCountry, header): - c = trim_by_client_country(cache['global'], clientCountry) - header += 'country = global ' - return (header, c) - -def do_countrylist(kwargs, cache, clientCountry, requested_countries, header): - - def collapse(d): - """ collapses a dict {key:set(hostids)} into a set of hostids """ - s = set() - for country, hostids in d.iteritems(): - for hostid in hostids: - s.add(hostid) - return s - - country_cache = {} - for c in requested_countries: - if c in cache['byCountry']: - country_cache[c] = cache['byCountry'][c] - header += 'country = %s ' % c - s = collapse(country_cache) - s = trim_by_client_country(s, clientCountry) - return (header, s) - -def get_same_continent_countries(clientCountry, requested_countries): - result = [] - for r in requested_countries: - if r in country_continents: - requestedCountries = [c.upper() for c in continents[country_continents[r]] \ - if c != clientCountry ] - result.extend(requestedCountries) - uniqueify(result) - return result - -def do_continent(kwargs, cache, clientCountry, requested_countries, header): - if len(requested_countries) > 0: - rc = requested_countries - else: - rc = [clientCountry] - clist = get_same_continent_countries(clientCountry, rc) - return do_countrylist(kwargs, cache, clientCountry, clist, header) - -def do_country(kwargs, cache, clientCountry, requested_countries, header): - if 'GLOBAL' in requested_countries: - return do_global(kwargs, cache, clientCountry, header) - return do_countrylist(kwargs, cache, clientCountry, requested_countries, header) - -def do_netblocks(kwargs, cache, header): - hostresults = set() - if not kwargs.has_key('netblock') or kwargs['netblock'] == "1": - tree_results = tree_lookup(host_netblocks_tree, kwargs['IP'], 'hosts') - for (prefix, hostids) in tree_results: - for hostid in hostids: - if hostid in cache['byHostId']: - hostresults.add((prefix, hostid,)) - header += 'Using preferred netblock ' - return (header, hostresults) - -def do_internet2(kwargs, cache, clientCountry, header): - hostresults = set() - ip = kwargs['IP'] - if ip is None: - return (header, hostresults) - asn = lookup_ip_asn(internet2_tree, ip) - if asn is not None: - header += 'Using Internet2 ' - if clientCountry is not None and clientCountry in cache['byCountryInternet2']: - hostresults = cache['byCountryInternet2'][clientCountry] - hostresults = trim_by_client_country(hostresults, clientCountry) - return (header, hostresults) - -def do_asn(kwargs, cache, header): - hostresults = set() - ip = kwargs['IP'] - if ip is None: - return (header, hostresults) - asn = lookup_ip_asn(global_tree, ip) - if asn is not None and asn in asn_host_cache: - for hostid in asn_host_cache[asn]: - if hostid in cache['byHostId']: - hostresults.add(hostid) - header += 'Using ASN %s ' % asn - return (header, hostresults) - -def do_geoip(kwargs, cache, clientCountry, header): - hostresults = set() - if clientCountry is not None and clientCountry in cache['byCountry']: - hostresults = cache['byCountry'][clientCountry] - header += 'country = %s ' % clientCountry - hostresults = trim_by_client_country(hostresults, clientCountry) - return (header, hostresults) - -def do_location(kwargs, header): - hostresults = set() - if 'location' in kwargs and kwargs['location'] in location_cache: - hostresults = set(location_cache[kwargs['location']]) - header += "Using location %s " % kwargs['location'] - return (header, hostresults) - -def add_host_to_cache(cache, hostid, hcurl): - if hostid not in cache: - cache[hostid] = [hcurl] - else: - cache[hostid].append(hcurl) - return cache - -def append_path(hosts, cache, file, pathIsDirectory=False): - """ given a list of hosts, return a list of objects: - [(hostid, [hcurls]), ... ] - in the same order, appending file if it's not None""" - subpath = None - results = [] - if 'subpath' in cache: - subpath = cache['subpath'] - for hostid in hosts: - hcurls = [] - for hcurl_id in cache['byHostId'][hostid]: - s = hcurl_cache[hcurl_id] - if subpath is not None: - s += "/" + subpath - if file is None and pathIsDirectory: - s += "/" - if file is not None: - if not s.endswith('/'): - s += "/" - s += file - hcurls.append(s) - results.append((hostid, hcurls)) - return results - -def trim_to_preferred_protocols(hosts_and_urls): - """ remove all but http and ftp URLs, - and if both http and ftp are offered, - leave only http. Return [(hostid, url), ...] """ - results = [] - try_protocols = ('https', 'http', 'ftp') - for (hostid, hcurls) in hosts_and_urls: - protocols = {} - url = None - for hcurl in hcurls: - for p in try_protocols: - if hcurl.startswith(p+':'): - protocols[p] = hcurl - - for p in try_protocols: - if p in protocols: - url = protocols[p] - break - - if url is not None: - results.append((hostid, url)) - return results - -def client_ip_to_country(ip): - clientCountry = None - if ip is None: - return None - - # lookup in the cache first - tree_results = tree_lookup(netblock_country_tree, ip, 'country', maxResults=1) - if len(tree_results) > 0: - (prefix, clientCountry) = tree_results[0] - return clientCountry - - # attempt IPv6, then IPv6 6to4 as IPv4, then Teredo, then IPv4 - try: - if ip.version() == 6: - if gipv6 is not None: - clientCountry = gipv6.country_code_by_addr_v6(ip.strNormal()) - if clientCountry is None: - # Try the IPv6-to-IPv4 translation schemes - for scheme in (convert_6to4_v4, convert_teredo_v4): - result = scheme(ip) - if result is not None: - ip = result - break - if ip.version() == 4 and gipv4 is not None: - clientCountry = gipv4.country_code_by_addr(ip.strNormal()) - except: - pass - return clientCountry - -def do_mirrorlist(kwargs): - global debug - global logfile - - def return_error(kwargs, message='', returncode=200): - d = dict(returncode=returncode, message=message, resulttype='mirrorlist', results=[]) - if 'metalink' in kwargs and kwargs['metalink']: - d['resulttype'] = 'metalink' - d['results'] = metalink_failuredoc(message) - return d - - if not (kwargs.has_key('repo') and kwargs.has_key('arch')) and not kwargs.has_key('path'): - return return_error(kwargs, message='# either path=, or repo= and arch= must be specified') - - file = None - cache = None - pathIsDirectory = False - if kwargs.has_key('path'): - path = kwargs['path'].strip('/') - - # Strip duplicate "//" from the path - path = path.replace('//', '/') - - subheader = "# path = %s " % (path) - header = subheader - - sdir = path.split('/') - try: - # path was to a directory - cache = mirrorlist_cache['/'.join(sdir)] - pathIsDirectory=True - except KeyError: - # path was to a file, try its directory - file = sdir[-1] - sdir = sdir[:-1] - try: - cache = mirrorlist_cache['/'.join(sdir)] - except KeyError: - return return_error(kwargs, message=header + 'error: invalid path') - dir = '/'.join(sdir) - else: - if u'source' in kwargs['repo']: - kwargs['arch'] = u'source' - repo = repo_redirect.get(kwargs['repo'], kwargs['repo']) - arch = kwargs['arch'] - subheader = "# repo = %s arch = %s " % (repo, arch) - header = subheader - - if repo in disabled_repositories: - return return_error(kwargs, message=header + 'repo disabled') - try: - dir = repo_arch_to_directoryname[(repo, arch)] - if 'metalink' in kwargs and kwargs['metalink']: - dir += '/repodata' - file = 'repomd.xml' - else: - pathIsDirectory=True - cache = mirrorlist_cache[dir] - except KeyError: - repos = repo_arch_to_directoryname.keys() - repos.sort() - repo_information = header + "error: invalid repo or arch\n" - repo_information += "# following repositories are available:\n" - for i in repos: - if i[0] is not None and i[1] is not None: - repo_information += "# repo=%s&arch=%s\n" % i - return return_error(kwargs, message=repo_information) - - # set kwargs['IP'] exactly once - try: - kwargs['IP'] = IP(kwargs['client_ip']) - except: - kwargs['IP'] = None - - ordered_mirrorlist = cache.get('ordered_mirrorlist', default_ordered_mirrorlist) - done = 0 - location_results = set() - netblock_results = set() - asn_results = set() - internet2_results = set() - country_results = set() - geoip_results = set() - continent_results = set() - global_results = set() - - header, location_results = do_location(kwargs, header) - - requested_countries = [] - if kwargs.has_key('country'): - requested_countries = uniqueify([c.upper() for c in kwargs['country'].split(',') ]) - - # if they specify a country, don't use netblocks or ASN - if not 'country' in kwargs: - header, netblock_results = do_netblocks(kwargs, cache, header) - if len(netblock_results) > 0: - if not ordered_mirrorlist: - done=1 - - if not done: - header, asn_results = do_asn(kwargs, cache, header) - if len(asn_results) + len(netblock_results) >= 3: - if not ordered_mirrorlist: - done = 1 - - clientCountry = client_ip_to_country(kwargs['IP']) - - if clientCountry is None: - print_client_country = "N/A" - else: - print_client_country = clientCountry - - if debug and kwargs.has_key('repo') and kwargs.has_key('arch'): - msg = "IP: %s; DATE: %s; COUNTRY: %s; REPO: %s; ARCH: %s\n" % ( - (kwargs['IP'] or 'None'), time.strftime("%Y-%m-%d"), - print_client_country, kwargs['repo'], kwargs['arch']) - - sys.stdout.write(msg) - sys.stdout.flush() - - if logfile is not None: - logfile.write(msg) - logfile.flush() - - if not done: - header, internet2_results = do_internet2(kwargs, cache, clientCountry, header) - if len(internet2_results) + len(netblock_results) + len(asn_results) >= 3: - if not ordered_mirrorlist: - done = 1 - - if not done and 'country' in kwargs: - header, country_results = do_country(kwargs, cache, clientCountry, requested_countries, header) - if len(country_results) == 0: - header, continent_results = do_continent(kwargs, cache, clientCountry, requested_countries, header) - done = 1 - - if not done: - header, geoip_results = do_geoip(kwargs, cache, clientCountry, header) - if len(geoip_results) >= 3: - if not ordered_mirrorlist: - done = 1 - - if not done: - header, continent_results = do_continent(kwargs, cache, clientCountry, [], header) - if len(geoip_results) + len(continent_results) >= 3: - done = 1 - - if not done: - header, global_results = do_global(kwargs, cache, clientCountry, header) - - def _random_shuffle(s): - l = list(s) - random.shuffle(l) - return l - - def _ordered_netblocks(s): - def ipy_len(t): - (prefix, hostid) = t - return IP(prefix).len() - v4_netblocks = [] - v6_netblocks = [] - for (prefix, hostid) in s: - ip = IP(prefix) - if ip.version() == 4: - v4_netblocks.append((prefix, hostid)) - elif ip.version() == 6: - v6_netblocks.append((prefix, hostid)) - # mix up the order, as sort will preserve same-key ordering - random.shuffle(v4_netblocks) - v4_netblocks.sort(key=ipy_len) - random.shuffle(v6_netblocks) - v6_netblocks.sort(key=ipy_len) - v4_netblocks = [t[1] for t in v4_netblocks] - v6_netblocks = [t[1] for t in v6_netblocks] - return v6_netblocks + v4_netblocks - - def whereismymirror(result_sets): - return_string = 'None' - allhosts = [] - found = False - for (l,s,f) in result_sets: - if len(l) > 0: - allhosts.extend(f(l)) - if not found: - return_string = s - found = True - - allhosts = uniqueify(allhosts) - return allhosts, return_string - - result_sets = [ - (location_results, "location", _random_shuffle), - (netblock_results, "netblocks", _ordered_netblocks), - (asn_results, "asn", _random_shuffle), - (internet2_results, "I2", _random_shuffle), - (country_results, "country", shuffle), - (geoip_results, "geoip", shuffle), - (continent_results, "continent", shuffle), - (global_results, "global", shuffle), - ] - - allhosts, where_string = whereismymirror(result_sets) - try: - ip_str = kwargs['IP'].strNormal() - except: - ip_str = 'Unknown IP' - - pid = str(os.getpid()) - log_string = "mirrorlist(%s): %s found its best mirror from %s (%s)" % (pid, ip_str, where_string, subheader) - syslogger.info(log_string) - - hosts_and_urls = append_path(allhosts, cache, file, pathIsDirectory=pathIsDirectory) - - if 'metalink' in kwargs and kwargs['metalink']: - (resulttype, returncode, results)=metalink(cache, dir, file, hosts_and_urls) - d = dict(message=None, resulttype=resulttype, returncode=returncode, results=results) - return d - - else: - host_url_list = trim_to_preferred_protocols(hosts_and_urls) - d = dict(message=header, resulttype='mirrorlist', returncode=200, results=host_url_list) - return d - -def setup_cache_tree(cache, field): - tree = radix.Radix() - for k, v in cache.iteritems(): - node = tree.add(k.strNormal()) - node.data[field] = v - return tree - -def setup_netblocks(netblocks_file, asns_wanted=None): - tree = radix.Radix() - if netblocks_file is not None: - try: - f = open(netblocks_file, 'r') - except: - return tree - for l in f: - try: - s = l.split() - start, mask = s[0].split('/') - mask = int(mask) - if mask == 0: continue - asn = int(s[1]) - if asns_wanted is None or asn in asns_wanted: - node = tree.add(s[0]) - node.data['asn'] = asn - except: - pass - f.close() - return tree - -def read_caches(): - global mirrorlist_cache - global host_netblock_cache - global host_country_allowed_cache - global host_max_connections_cache - global repo_arch_to_directoryname - global repo_redirect - global country_continent_redirect_cache - global disabled_repositories - global host_bandwidth_cache - global host_country_cache - global file_details_cache - global hcurl_cache - global asn_host_cache - global location_cache - global netblock_country_cache - - data = {} - try: - f = open(cachefile, 'r') - data = pickle.load(f) - f.close() - except: - pass - - if 'mirrorlist_cache' in data: - mirrorlist_cache = data['mirrorlist_cache'] - if 'host_netblock_cache' in data: - host_netblock_cache = data['host_netblock_cache'] - if 'host_country_allowed_cache' in data: - host_country_allowed_cache = data['host_country_allowed_cache'] - if 'repo_arch_to_directoryname' in data: - repo_arch_to_directoryname = data['repo_arch_to_directoryname'] - if 'repo_redirect_cache' in data: - repo_redirect = data['repo_redirect_cache'] - if 'country_continent_redirect_cache' in data: - country_continent_redirect_cache = data['country_continent_redirect_cache'] - if 'disabled_repositories' in data: - disabled_repositories = data['disabled_repositories'] - if 'host_bandwidth_cache' in data: - host_bandwidth_cache = data['host_bandwidth_cache'] - if 'host_country_cache' in data: - host_country_cache = data['host_country_cache'] - if 'file_details_cache' in data: - file_details_cache = data['file_details_cache'] - if 'hcurl_cache' in data: - hcurl_cache = data['hcurl_cache'] - if 'asn_host_cache' in data: - asn_host_cache = data['asn_host_cache'] - if 'location_cache' in data: - location_cache = data['location_cache'] - if 'netblock_country_cache' in data: - netblock_country_cache = data['netblock_country_cache'] - if 'host_max_connections_cache' in data: - host_max_connections_cache = data['host_max_connections_cache'] - - setup_continents() - global internet2_tree - global global_tree - global host_netblocks_tree - global netblock_country_tree - - internet2_tree = setup_netblocks(internet2_netblocks_file) - global_tree = setup_netblocks(global_netblocks_file, asn_host_cache) - # host_netblocks_tree key is a netblock, value is a list of host IDs - host_netblocks_tree = setup_cache_tree(host_netblock_cache, 'hosts') - # netblock_country_tree key is a netblock, value is a single country string - netblock_country_tree = setup_cache_tree(netblock_country_cache, 'country') - -def errordoc(metalink, message): - if metalink: - doc = metalink_failuredoc(message) - else: - doc = message - return doc - -class MirrorlistHandler(StreamRequestHandler): - def handle(self): - signal.signal(signal.SIGHUP, signal.SIG_IGN) - random.seed() - try: - # read size of incoming pickle - readlen = 0 - size = '' - while readlen < 10: - size += self.rfile.read(10 - readlen) - readlen = len(size) - size = atoi(size) - - # read the pickle - readlen = 0 - p = '' - while readlen < size: - p += self.rfile.read(size - readlen) - readlen = len(p) - d = pickle.loads(p) - self.connection.shutdown(socket.SHUT_RD) - except: - pass - - try: - try: - r = do_mirrorlist(d) - except: - raise - message = r['message'] - results = r['results'] - resulttype = r['resulttype'] - returncode = r['returncode'] - except Exception, e: - message=u'# Bad Request %s\n# %s' % (e, d) - exception_msg = traceback.format_exc(e) - sys.stderr.write(message+'\n') - sys.stderr.write(exception_msg) - sys.stderr.flush() - returncode = 400 - results = [] - resulttype = 'mirrorlist' - if d['metalink']: - resulttype = 'metalink' - results = errordoc(d['metalink'], message) - - try: - p = pickle.dumps({'message':message, 'resulttype':resulttype, 'results':results, 'returncode':returncode}) - self.connection.sendall(zfill('%s' % len(p), 10)) - - self.connection.sendall(p) - self.connection.shutdown(socket.SHUT_WR) - except: - pass - -def sighup_handler(signum, frame): - global logfile - if logfile is not None: - name = logfile.name - logfile.close() - logfile = open(name, 'a') - - # put this in a separate thread so it doesn't block clients - if threading.active_count() < 2: - thread = threading.Thread(target=load_databases_and_caches) - thread.daemon = False - try: - thread.start() - except KeyError: - # bug fix for handing an exception when unable to delete from _limbo even though it's not in limbo - # https://code.google.com/p/googleappengine/source/browse/trunk/python/google/appengine/dist27/threading.py?r=327 - pass - -def sigterm_handler(signum, frame): - global must_die - signal.signal(signal.SIGHUP, signal.SIG_IGN) - signal.signal(signal.SIGTERM, signal.SIG_IGN) - if signum == signal.SIGTERM: - must_die = True - -class ForkingUnixStreamServer(ForkingMixIn, UnixStreamServer): - request_queue_size = 300 - def finish_request(self, request, client_address): - signal.signal(signal.SIGHUP, signal.SIG_IGN) - BaseServer.finish_request(self, request, client_address) - -def parse_args(): - global cachefile - global socketfile - global internet2_netblocks_file - global global_netblocks_file - global debug - global logfile - global pidfile - opts, args = getopt.getopt(sys.argv[1:], "c:i:g:p:s:dl:", - ["cache", "internet2_netblocks", "global_netblocks", "pidfile", "socket", "debug", "log="]) - for option, argument in opts: - if option in ("-c", "--cache"): - cachefile = argument - if option in ("-i", "--internet2_netblocks"): - internet2_netblocks_file = argument - if option in ("-g", "--global_netblocks"): - global_netblocks_file = argument - if option in ("-s", "--socket"): - socketfile = argument - if option in ("-p", "--pidfile"): - pidfile = argument - if option in ("-l", "--log"): - try: - logfile = open(argument, 'a') - except: - logfile = None - if option in ("-d", "--debug"): - debug = True - -def open_geoip_databases(): - global gipv4 - global gipv6 - try: - gipv4 = GeoIP.open("/usr/share/GeoIP/GeoIP.dat", GeoIP.GEOIP_STANDARD) - except: - gipv4=None - try: - gipv6 = GeoIP.open("/usr/share/GeoIP/GeoIPv6.dat", GeoIP.GEOIP_STANDARD) - except: - gipv6=None - -def convert_6to4_v4(ip): - all_6to4 = IP('2002::/16') - if ip.version() != 6 or ip not in all_6to4: - return None - parts=ip.strNormal().split(':') - - ab = int(parts[1],16) - a = (ab >> 8) & 0xFF - b = ab & 0xFF - cd = int(parts[2],16) - c = (cd >> 8) & 0xFF - d = cd & 0xFF - - v4addr = '%d.%d.%d.%d' % (a,b,c,d) - return IP(v4addr) - -def convert_teredo_v4(ip): - teredo_std = IP('2001::/32') - teredo_xp = IP('3FFE:831F::/32') - if ip.version() != 6 or (ip not in teredo_std and ip not in teredo_xp): - return None - parts=ip.strNormal().split(':') - - ab = int(parts[6],16) - a = ((ab >> 8) & 0xFF) ^ 0xFF - b = (ab & 0xFF) ^ 0xFF - cd = int(parts[7],16) - c = ((cd >> 8) & 0xFF) ^ 0xFF - d = (cd & 0xFF) ^ 0xFF - - v4addr = '%d.%d.%d.%d' % (a,b,c,d) - return IP(v4addr) - -def load_databases_and_caches(*args, **kwargs): - sys.stderr.write("load_databases_and_caches...") - sys.stderr.flush() - open_geoip_databases() - read_caches() - sys.stderr.write("done.\n") - sys.stderr.flush() - -def remove_pidfile(pidfile): - os.unlink(pidfile) - -def create_pidfile_dir(pidfile): - piddir = os.path.dirname(pidfile) - try: - os.makedirs(piddir, mode=0755) - except OSError, err: - if err.errno == 17: # File exists - pass - else: - raise - except: - raise - -def write_pidfile(pidfile, pid): - create_pidfile_dir(pidfile) - f = open(pidfile, 'w') - f.write(str(pid)+'\n') - f.close() - return 0 - -def manage_pidfile(pidfile): - """returns 1 if another process is running that is named in pidfile, - otherwise creates/writes pidfile and returns 0.""" - pid = os.getpid() - try: - f = open(pidfile, 'r') - except IOError, err: - if err.errno == 2: # No such file or directory - return write_pidfile(pidfile, pid) - return 1 - - oldpid=f.read() - f.close() - - # is the oldpid process still running? - try: - os.kill(int(oldpid), 0) - except ValueError: # malformed oldpid - return write_pidfile(pidfile, pid) - except OSError, err: - if err.errno == 3: # No such process - return write_pidfile(pidfile, pid) - return 1 - - -def main(): - global logfile - global pidfile - signal.signal(signal.SIGHUP, signal.SIG_IGN) - parse_args() - manage_pidfile(pidfile) - - oldumask = os.umask(0) - try: - os.unlink(socketfile) - except: - pass - - load_databases_and_caches() - signal.signal(signal.SIGHUP, sighup_handler) - # restart interrupted syscalls like select - signal.siginterrupt(signal.SIGHUP, False) - ss = ForkingUnixStreamServer(socketfile, MirrorlistHandler) - - while not must_die: - try: - ss.serve_forever() - except select.error: - pass - - try: - os.unlink(socketfile) - except: - pass - - if logfile is not None: - try: - logfile.close() - except: - pass - - remove_pidfile(pidfile) - return 0 - - -if __name__ == "__main__": - try: - sys.exit(main()) - except KeyboardInterrupt: - sys.exit(-1) diff --git a/roles/mirrormanager/mirrorlist/files/mm-authorized_key b/roles/mirrormanager/mirrorlist/files/mm-authorized_key deleted file mode 100644 index bef6a5561a..0000000000 --- a/roles/mirrormanager/mirrorlist/files/mm-authorized_key +++ /dev/null @@ -1 +0,0 @@ -ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAgEA1+Mq0V8RPuCZrjsSz2y56tVR+hykwoYSodhs6ivqkuf16PHo1Aq2PR+I7xnxnJ/fqF20UEV4bmRFs9k4y/QnMOwi90O32vC3WZmfLfoY9S9dzUYXXOYy0lQZPMjmB+yOYZhhGIu2HTc4/CHKjVpCUZKHXwgkouEwsAEHQI86TrepvPwt5GrmAeZjcRp8T3maaFlRf8UFgcajF9ztgiEnBpWBEKaemtUtQ/g9cr/SwNMT3GK+M4qMVaXkuTCeiKwqxueZmTgn76aQ11sfoWYi1lVCyYt02iMDoBLaLERbwBLt3WpY/l3tQaZiPVoRPDH2EQb6v/XISsXNsqGtnc8APfhkVniURNwW/Qz8eXhpnd7GlU90iWPExvGo/Eaj2cNemgiNZH7/U7OKe3/7Li+IpPDhLCfJ8ue7Nqn+2uqhEWvzZXvamzcvEg89PBbgrdHDvJqVLnfsMDuDEo93KZ2pCfTGmjbYjbV3nnArhimLjkkb2E86489F09p65e4AoZw3HFgeW8yA3ecXtXd9FmYYkL0urT1AZFuJ/9B364h4gOqUt0oO7aW0vqhS1hW+7brIJ2DzM/vn90ONk8JVt1T3DvmgmVLxtuo1wu1PkDtekZNtJPGuGzvt5TYDjGPwZzrkYdYazLow3NMCyrhV6oNm8sxuZCLxeyZdM83Q7xty5nM= toshio@puppet1.fedora.phx.redhat.com diff --git a/roles/mirrormanager/mirrorlist/files/mm_sync_data b/roles/mirrormanager/mirrorlist/files/mm_sync_data deleted file mode 100644 index e48bdc7b3b..0000000000 --- a/roles/mirrormanager/mirrorlist/files/mm_sync_data +++ /dev/null @@ -1,4 +0,0 @@ -#!/bin/bash - -rsync -az --delete-delay --delay-updates --delete mirrormanager@bapp02:/var/lib/mirrormanager/ /var/lib/mirrormanager/ -kill -HUP $(cat /var/run/mirrormanager/mirrorlist_server.pid) diff --git a/roles/mirrormanager/mirrorlist/files/supervisord.conf b/roles/mirrormanager/mirrorlist/files/supervisord.conf deleted file mode 100644 index 2183276e39..0000000000 --- a/roles/mirrormanager/mirrorlist/files/supervisord.conf +++ /dev/null @@ -1,67 +0,0 @@ - -[supervisord] -http_port=/var/tmp/supervisor.sock ; (default is to run a UNIX domain socket server) -;http_port=127.0.0.1:9001 ; (alternately, ip_address:port specifies AF_INET) -;sockchmod=0700 ; AF_UNIX socketmode (AF_INET ignore, default 0700) -;sockchown=nobody.nogroup ; AF_UNIX socket uid.gid owner (AF_INET ignores) -;umask=022 ; (process file creation umask;default 022) -logfile=/var/log/supervisor/supervisord.log ; (main log file;default $CWD/supervisord.log) -logfile_maxbytes=50MB ; (max main logfile bytes b4 rotation;default 50MB) -logfile_backups=10 ; (num of main logfile rotation backups;default 10) -loglevel=info ; (logging level;default info; others: debug,warn) -pidfile=/var/run/supervisord.pid ; (supervisord pidfile;default supervisord.pid) -nodaemon=false ; (start in foreground if true;default false) -minfds=1024 ; (min. avail startup file descriptors;default 1024) -minprocs=200 ; (min. avail process descriptors;default 200) - -;nocleanup=true ; (don't clean up tempfiles at start;default false) -;http_username=user ; (default is no username (open system)) -;http_password=123 ; (default is no password (open system)) -;childlogdir=/tmp ; ('AUTO' child log dir, default $TEMP) -;user=chrism ; (default is current user, required if root) -;directory=/tmp ; (default is not to cd during start) -;environment=KEY=value ; (key value pairs to add to environment) - -[supervisorctl] -serverurl=unix:///var/tmp/supervisor.sock ; use a unix:// URL for a unix socket -;serverurl=http://127.0.0.1:9001 ; use an http:// url to specify an inet socket -;username=chris ; should be same as http_username if set -;password=123 ; should be same as http_password if set -;prompt=mysupervisor ; cmd line prompt (default "supervisor") - -; The below sample program section shows all possible program subsection values, -; create one or more 'real' program: sections to be able to control them under -; supervisor. - -;[program:theprogramname] -;command=/bin/cat ; the program (relative uses PATH, can take args) -;priority=999 ; the relative start priority (default 999) -;autostart=true ; start at supervisord start (default: true) -;autorestart=true ; retstart at unexpected quit (default: true) -;startsecs=10 ; number of secs prog must stay running (def. 10) -;startretries=3 ; max # of serial start failures (default 3) -;exitcodes=0,2 ; 'expected' exit codes for process (default 0,2) -;stopsignal=QUIT ; signal used to kill process (default TERM) -;stopwaitsecs=10 ; max num secs to wait before SIGKILL (default 10) -;user=chrism ; setuid to this UNIX account to run the program -;log_stdout=true ; if true, log program stdout (default true) -;log_stderr=true ; if true, log program stderr (def false) -;logfile=/var/log/cat.log ; child log path, use NONE for none; default AUTO -;logfile_maxbytes=1MB ; max # logfile bytes b4 rotation (default 50MB) -;logfile_backups=10 ; # of logfile backups (default 10) - -[program:mirrorlist_server] -command=/usr/share/mirrormanager/mirrorlist-server/mirrorlist_server.py -priority=1 -autostart=true -autorestart=true -startsecs=10 -startretries=5 -stopsignal=TERM -stopwaitsecs=10 -user=mirrormanager -log_stderr=true -log_stdout=true -logfile=/var/log/mirrormanager/mirrorlist-server.log -logfile_maxbytes=50MB -logfile_backups=10 diff --git a/roles/mirrormanager/mirrorlist/tasks/main.yml b/roles/mirrormanager/mirrorlist/tasks/main.yml deleted file mode 100644 index ccca26f883..0000000000 --- a/roles/mirrormanager/mirrorlist/tasks/main.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -# tasklist for setting up the mirrorlist app components -# create mirrormanager user -- name: add mirrormanager user - uid {{ mirrormanager_uid }} - user: name=mirrormanager uid={{ mirrormanager_uid }} state=present home=/home/mirrormanager createhome=yes - -- name: install supervisor and mirrormanager - yum: pkg={{ item }} state=present - with_items: - - supervisor - - mirrormanager - tags: - - packages - -# Put in the HOTFIX -- name: HOTFIX mirrorlist_server.py - copy: > - src=mirrorlist_server.py dest=/usr/share/mirrormanager/mirrorlist-server/mirrorlist_server.py - owner=root group=root mode=0755 - tags: - - files - notify: - - restart httpd - - restart supervisord - -# mirrormanager user ssh key(s) -- name: add authorized_keys for mirrormanager - authorized_key: key="{{ item }}" user=mirrormanager state=present - with_file: - - mm-authorized_key - -# install mirrorlist-server.conf apache config -- name: mirrorlist-server apache conf - template: src=mirrorlist-server.conf dest=/etc/httpd/conf.d/mirrorlist-server.conf - notify: - - restart apache - tags: - - config - -# nuke mirrormanager.conf so that it doesn't start up -- name: kill /etc/httpd/conf.d/mirrrormanager.conf - copy: 'content="#blanked on purpose" dest=/etc/httpd/conf.d/mirrormanager.conf' - when: inventory_hostname.startswith('mirrorlist') - notify: - - restart apache - tags: - - config - -# selinux policy - mirrormanager - put in place - for the sockfile -# - -# setup and configure supervisord -- name: /etc/supervisord.conf - copy: src=supervisord.conf dest=/etc/supervisord.conf mode=0644 - notify: - - restart supervisord - -- name: enable supervisord - service: name=supervisord state=started enabled=yes - diff --git a/roles/mirrormanager/mirrorlist/templates/mirrorlist-server.conf b/roles/mirrormanager/mirrorlist/templates/mirrorlist-server.conf deleted file mode 100644 index 635c4c9ede..0000000000 --- a/roles/mirrormanager/mirrorlist/templates/mirrorlist-server.conf +++ /dev/null @@ -1,56 +0,0 @@ -KeepAlive off - -Alias /mirrorlists /var/lib/mirrormanager/mirrorlists/ -Alias /publiclist /var/lib/mirrormanager/mirrorlists/publiclist/ -Alias /static /var/lib/mirrormanager/mirrorlists/static/ - - - Options Indexes FollowSymLinks - - -WSGIDaemonProcess mirrorlist user=apache processes={{ mirrorlist_procs }} threads=1 display-name=mirrorlist maximum-requests=1000 - -WSGIScriptAlias /metalink /usr/share/mirrormanager/mirrorlist-server/mirrorlist_client.wsgi -WSGIScriptAlias /mirrorlist /usr/share/mirrormanager/mirrorlist-server/mirrorlist_client.wsgi - -# Set this if you do not have a Reverse Proxy (HTTP Accelerator) that -# is in front of your application server running this code. -# SetEnv mirrorlist_client.noreverseproxy 1 - - - WSGIProcessGroup mirrorlist - - # Apache 2.4 - Require all granted - - - # Apache 2.2 - Order deny,allow - Allow from all - - - - - WSGIProcessGroup mirrorlist - - # Apache 2.4 - Require all granted - - - # Apache 2.2 - Order deny,allow - Allow from all - - - - - - # Apache 2.4 - Require all granted - - - # Apache 2.2 - Order deny,allow - Allow from all - - diff --git a/roles/mirrormanager/mirrorlist/vars/main.yml b/roles/mirrormanager/mirrorlist/vars/main.yml deleted file mode 100644 index 089784c9b0..0000000000 --- a/roles/mirrormanager/mirrorlist/vars/main.yml +++ /dev/null @@ -1,4 +0,0 @@ -mirrormanager_uid: 441 -mirrormanager_gid: 441 -mirrors_gid: 263 -mirrors2_gid: 529