gluster: remove old gluster role

Signed-off-by: Kevin Fenzi <kevin@scrye.com>
This commit is contained in:
Kevin Fenzi 2024-09-24 11:40:42 -07:00
parent 5c2d61d9f8
commit 8a18b535f8
11 changed files with 0 additions and 395 deletions

View file

@ -109,9 +109,6 @@
- name: reload proxyhttpd
command: /usr/local/bin/proxy-conditional-reload.sh httpd httpd
- name: restart glusterd
service: name=glusterd state=restarted
- name: run rkhunter
command: rkhunter --propupd

View file

@ -1,35 +0,0 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
freezes: false
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-packages
- sysadmin-veteran
- sysadmin-web
ipa_client_sudo_groups:
- sysadmin-noc
- sysadmin-packages
- sysadmin-veteran
- sysadmin-web
ipa_host_group: packages
lvm_size: 100000
max_mem_size: 8192
mem_size: 8192
nfs_mount_opts: "rw,hard,bg,intr,noatime,nodev,nosuid,nfsvers=3"
num_cpus: 4
primary_auth_source: ipa
pythonsitelib: /usr/lib/python2.7/site-packages
tcp_ports: [80, 443,
# This is for glusterd
6996,
# These 16 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015]

View file

@ -1,29 +0,0 @@
---
# Define resources for this group of hosts here.
# Neeed for rsync from log01 for logs.
custom_rules: ['-A INPUT -p tcp -m tcp -s 10.3.163.39 --dport 873 -j ACCEPT', '-A INPUT -p tcp -m tcp -s 192.168.1.59 --dport 873 -j ACCEPT']
# These are consumed by a task in roles/fedmsg/base/main.yml
fedmsg_certs:
- can_send:
- logger.log
group: sysadmin
owner: root
service: shell
ipa_client_shell_groups:
- sysadmin-noc
- sysadmin-packages
- sysadmin-veteran
- sysadmin-web
ipa_client_sudo_groups:
- sysadmin-noc
- sysadmin-packages
- sysadmin-veteran
- sysadmin-web
ipa_host_group: packages
lvm_size: 80000
mem_size: 4096
num_cpus: 4
pythonsitelib: /usr/lib/python2.7/site-packages
tcp_ports: [80, 443,
# These 16 ports are used by fedmsg. One for each wsgi thread.
3000, 3001, 3002, 3003, 3004, 3005, 3006, 3007, 3008, 3009, 3010, 3011, 3012, 3013, 3014, 3015]

View file

@ -1,85 +0,0 @@
# Nuke and rebuild the xapian search index for fedora-packages
#
# "install_packages_indexer" is a host_var that instructs this to only run on
# one of the multiple packages nodes. The cache that this rebuilds is shared
# between nodes with gluster.
- name: enter maintenance mode
hosts: packages:packages_stg
user: root
gather_facts: False
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "{{ private }}/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: tell nagios to shush for these hosts
nagios: action=downtime minutes=300 service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true
- name: stop the cache warmer
service: name={{item}} state=stopped
with_items:
- fedmsg-hub
- name: Rebuild that search index on the side and install it (just staging)
hosts: packages_stg
user: root
become: True
become_user: apache
gather_facts: False
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "{{ private }}/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Pull in the list of packages from pkgdb. Go get a snack. (2 hours)
command: /usr/bin/fcomm-index-packages --index-db-dest=/var/cache/fedoracommunity/packages/xapian --icons-dest /var/cache/fedoracommunity/packages/icons --mdapi-url=https://apps.stg.fedoraproject.org/mdapi --icons-url=https://dl.fedoraproject.org/pub/alt/screenshots
when: install_packages_indexer
- name: Rebuild that search index on the side and install it. (just prod)
hosts: packages
tags: rebuild-prod-index
user: root
become: True
become_user: apache
gather_facts: False
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "{{ private }}/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Pull in the list of packages from pkgdb. Go get a snack. (2 hours)
command: /usr/bin/fcomm-index-packages --index-db-dest=/var/cache/fedoracommunity/packages/xapian --icons-dest /var/cache/fedoracommunity/packages/icons --mdapi-url=https://apps.fedoraproject.org/mdapi --icons-url=https://dl.fedoraproject.org/pub/alt/screenshots
when: install_packages_indexer
- name: leave maintenance mode
hosts: packages:packages_stg
user: root
gather_facts: False
vars_files:
- /srv/web/infra/ansible/vars/global.yml
- "{{ private }}/vars.yml"
- /srv/web/infra/ansible/vars/{{ ansible_distribution }}.yml
tasks:
- name: Make sure the perms are straight
file: path=/var/cache/fedoracommunity/packages/ state=directory owner=apache group=fedmsg mode="g+rw" recurse=yes
- name: Restart the cache worker
service: name={{item}} state=started
with_items:
- fedmsg-hub
- name: tell nagios to start bothering us again
nagios: action=unsilence service=host host={{ inventory_hostname_short }}{{ env_suffix }}
delegate_to: noc01.iad2.fedoraproject.org
ignore_errors: true

View file

@ -1,33 +0,0 @@
---
- name: install needed packages
package:
name: "{{ item }}"
state: present
with_items:
- glusterfs-fuse
tags:
- packages
- name: make the mountdir
file:
state: directory
path: "{{ mountdir }}"
owner: "{{ owner }}"
group: "{{ group }}"
- name: copy over the client config
template:
src: client.config
dest: /etc/glusterfs/glusterfs.{{glusterservername}}.vol
mode: 0640
#notify:
#- remount? no idea...
- name: mount it up
mount:
src: /etc/glusterfs/glusterfs.{{glusterservername}}.vol
state: mounted
fstype: glusterfs
name: "{{mountdir}}"
ignore_errors: True

View file

@ -1,55 +0,0 @@
# Config for {{ glusterservername }}
# Generated by ansible
{% for server in servers %}
volume vol-{{ servers.index(server) }}
type protocol/client
option transport-type tcp
option remote-host {{ server }}
option transport.socket.nodelay on
option remote-port 6996
option remote-subvolume iothreads
option username {{ username }}
option password {{ password }}
end-volume
{% endfor %}
volume mirror-0
type cluster/replicate
subvolumes {% for server in servers %}vol-{{loop.index - 1}} {% endfor %}
{% if servers.index(inventory_hostname) %}
option read-subvolume vol-{{ servers.index(inventory_hostname)}}{% endif %}
end-volume
volume writebehind
type performance/write-behind
option cache-size 4MB
# option flush-behind on # olecam: increasing the performance of handling lots of small files
subvolumes mirror-0
end-volume
volume iothreads
type performance/io-threads
option thread-count 16 # default is 16
subvolumes writebehind
end-volume
volume iocache
type performance/io-cache
option cache-size {{ (ansible_memtotal_mb / 5) |round |int }}MB
option cache-timeout 30
subvolumes iothreads
end-volume
volume statprefetch
type performance/stat-prefetch
subvolumes iocache
end-volume
#volume nfs-server
# type nfs/server
# option nfs.dynamic-volumes on
# subvolumes mirror-0
#end-volume

View file

@ -1,13 +0,0 @@
# gluster/consolidated
Three things to know about this role:
- It consolidates the gluster/server and gluster/client roles.
- It gets gluster working on F25 and F26.
- It requires a ton of open ports on the hosts for `gluster peer probe` to work.
See `inventory/group_vars/odcs-backend` for an example.
Our older gluster/server and gluster/client roles only seemed to work for el7.
The advice from `#gluster` was to use the `gluster_volume` ansible module
instead of configuring the `.vol` file directly ourselves. That is what this
role does.

View file

@ -1,84 +0,0 @@
- name: install needed packages for server
package: name={{ item }} state=present
with_items:
- glusterfs-server
tags:
- packages
- gluster
- name: start glusterd for the server
service: name=glusterd state=started enabled=true
tags:
- gluster
- name: Servers discover each other, pass one.
command: gluster peer probe {{ item }}
with_items: '{{groups[gluster_server_group]}}'
ignore_errors: true
changed_when: false
tags:
- gluster
- name: Servers discover each other, pass two.
command: gluster peer probe {{ item }}
with_items: '{{groups[gluster_server_group]}}'
changed_when: false
ignore_errors: true
tags:
- gluster
- name: install needed packages for client
package: name={{ item }} state=present
with_items:
- glusterfs-cli
- glusterfs-fuse
- libselinux-python
tags:
- packages
- gluster
- name: set sebooleans so httpd can talk to the gluster mount.
seboolean: name={{ item }}
state=true
persistent=true
with_items:
- nis_enabled
- httpd_use_fusefs
tags:
- gluster
- name: Ensure Gluster brick directories exist.
file: "path={{ gluster_brick_dir }} state=directory mode=0775"
when: inventory_hostname in groups[gluster_server_group]
tags:
- gluster
- name: Ensure Gluster mount directories exist.
file: "path={{ gluster_mount_dir }} state=directory mode=0775"
tags:
- gluster
- name: Configure Gluster volume.
gluster_volume:
state: present
name: "{{ gluster_brick_name }}"
brick: "{{ gluster_brick_dir }}"
# This cannot be '1'
#replicas: "{{ groups[gluster_server_group] | count }}"
cluster: "{{ groups[gluster_server_group] | join(',') }}"
host: "{{ inventory_hostname }}"
force: yes
run_once: true
ignore_errors: true
tags:
- gluster
- name: Ensure Gluster volume is mounted.
mount:
name: "{{ gluster_mount_dir }}"
src: "{{ inventory_hostname }}:/{{ gluster_brick_name }}"
fstype: glusterfs
opts: "defaults,_netdev"
state: mounted
tags:
- gluster

View file

@ -1,3 +0,0 @@
---
- name: restart glusterd
service: name=glusterd state=restarted

View file

@ -1,26 +0,0 @@
---
- name: install needed packages
package: name={{ item }} state=present
with_items:
- glusterfs-server
tags:
- packages
- name: make the datadir
file: dest={{ datadir }} owner={{ owner }} group={{ group }} state=directory
notify:
- restart glusterd
- name: make the datapath
file: dest={{ datadir }}/{{ glusterservername }} state=directory
notify:
- restart glusterd
- name: copy over the server config
template: src=server.config dest=/etc/glusterfs/glusterd.vol mode=0640
notify:
- restart glusterd
- name: glusterd service
service: name=glusterd state=started enabled=yes

View file

@ -1,29 +0,0 @@
# Config for {{ glusterservername }}
# Generated by ansible
volume posix
type storage/posix
option directory {{ datadir }}/{{ glusterservername }}
end-volume
volume locks
type features/locks
option mandatory-locks on
subvolumes posix
end-volume
volume iothreads
type performance/io-threads
option thread-count 16
subvolumes locks
end-volume
volume server-tcp
type protocol/server
subvolumes iothreads
option transport-type tcp
option auth.login.iothreads.allow {{ username }}
option auth.login.{{ username }}.password {{ password }}
option transport.socket.listen-port 6996
option transport.socket.nodelay on
end-volume