-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathocplabs-cns-nodes-setup.yaml
More file actions
144 lines (144 loc) · 5.47 KB
/
ocplabs-cns-nodes-setup.yaml
File metadata and controls
144 lines (144 loc) · 5.47 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
- hosts: glusterfs
tasks:
- name: start_lvm_socket | Start LVM Socket
systemd:
name: lvm2-lvmetad.socket
enabled: yes
daemon_reload: yes
state: started
- name: start_lvm_service | Start LVM Service
systemd:
name: lvm2-lvmetad.service
enabled: yes
daemon_reload: yes
state: started
- name: check_ostree | Check if system is Atomic Host
stat: path=/etc/ostree
register: check_ostree
- name: docker_check_node_images | Check Docker Images for Gluster
shell: docker images 'rhgs3/{{item}}' | grep -e '.*/rhgs3/{{item.split(":")[0]}}.*{{item.split(":")[1]}}.*' | awk '{}END{ if (!NR) print "{{item}}"}'
register: docker_images_missing
with_items:
- rhgs-server-rhel7:v3.11.1
- rhgs-volmanager-rhel7:v3.11.1
- rhgs-gluster-block-prov-rhel7:v3.11.1
- rhgs-s3-server-rhel7:v3.11.1
- name: docker_pull_nodes | Pull Docker Images for Gluster
shell: docker pull rhgs3/{{item}}
with_items: "{{ docker_images_missing.results|map(attribute='stdout_lines')|list }}"
when: "check_ostree.stat.exists and docker_images_missing.results is defined"
- name: kernel_modules | Ensure that kernel modules are installed (https://access.redhat.com/documentation/en-us/red_hat_gluster_storage/3.3/html-single/container-native_storage_for_openshift_container_platform/#idm139750772891936)
shell: modprobe {{item}}
with_items:
- dm_snapshot
- dm_mirror
- dm_thin_pool
- target_core_user
- dm_multipath
- name: kernel_modules | Create conf files for modules
file:
path: /etc/modules-load.d/{{ item }}.conf
state: touch
mode: "u=rw,g=r,o=r"
with_items:
- dm_snapshot
- dm_mirror
- dm_thin_pool
- target_core_user
- dm_multipath
- name: kernel_modules | Add modules to conf file
lineinfile: dest=/etc/modules-load.d/{{ item }}.conf line="{{ item }}"
with_items:
- dm_snapshot
- dm_mirror
- dm_thin_pool
- target_core_user
- dm_multipath
- name: raw_device | Remove partition table
shell: dd if=/dev/zero of=/dev/sdb bs=512 count=1 conv=notrunc
- name: setup_firewall_glusterfs | Setup firewall for GlusterFS (port 24007)
iptables:
chain: OS_FIREWALL_ALLOW
protocol: tcp
destination_port: 24007
jump: ACCEPT
- name: setup_firewall_glusterfs | Setup firewall for GlusterFS (port 24008)
iptables:
chain: OS_FIREWALL_ALLOW
protocol: tcp
destination_port: 24008
jump: ACCEPT
- name: setup_firewall_glusterfs | Setup firewall for GlusterFS (port 2222)
iptables:
chain: OS_FIREWALL_ALLOW
protocol: tcp
destination_port: 2222
jump: ACCEPT
- name: setup_firewall_glusterfs | Setup firewall for GlusterFS (port 49152:49664)
iptables:
chain: OS_FIREWALL_ALLOW
protocol: tcp
destination_port: 49152:49664
jump: ACCEPT
- name: setup_firewall_glusterfs | Setup firewall for GlusterFS (port 24010)
iptables:
chain: OS_FIREWALL_ALLOW
protocol: tcp
destination_port: 24010
jump: ACCEPT
- name: setup_firewall_glusterfs | Setup firewall for GlusterFS (port 3260)
iptables:
chain: OS_FIREWALL_ALLOW
protocol: tcp
destination_port: 3260
jump: ACCEPT
- name: setup_firewall_glusterfs | Setup firewall for GlusterFS (port 111)
iptables:
chain: OS_FIREWALL_ALLOW
protocol: tcp
destination_port: 111
jump: ACCEPT
- name: setup_firewall_glusterfs | Save firewall settings for GlusterFS
shell: iptables-save > /etc/sysconfig/iptables
- name: setup_rpcbind | Setup RPC-Bind
shell: systemctl add-wants multi-user rpcbind.service
- name: setup_rpcbind | Enable RPC-Bind service
systemd:
name: rpcbind.service
enabled: yes
daemon_reload: yes
state: started
- hosts: masters:nodes
tasks:
- name: setup_selinux | Set SELinux policy for GlusterFS
shell: setsebool -P virt_sandbox_use_fusefs 1
- hosts: localhost
tasks:
- name: setup_gluster_containers | Login
shell: oc login -u admin -p 'redhat2018!' https://master.ocplabs.com:8443 --insecure-skip-tls-verify=true
- name: setup_gluster_container | Mark master as compute node
shell: oc label node master.ocplabs.com node-role.kubernetes.io/compute=true --overwrite
- name: setup_gluster_containers | Create glusterfs projects
shell: oc new-project glusterfs
ignore_errors: yes
- name: setup_gluster_containers | Create SCC
shell: oc adm policy add-scc-to-user privileged -z {{item}}
with_items:
- glusterfs
- router
- default
- name: setup_gluster_container | CNS deploy
shell: cns-deploy -n glusterfs -g ocplabs-cns-topology.json -y --verbose
- name: setup_gluster_container | Get Cluster-ID
shell: |
export HEKETI_CLI_SERVER=http://heketi-glusterfs.apps.ocplabs.com
heketi-cli topology info | grep Cluster | awk -F': ' '{print $2}' | sort -u
register: heketi_clusterid
- name: setup_gluster_default_storageclass | Create GlusterFS storage-class Kubernetes object
template: owner=root group=root mode=644
src=ocplabs-cns-storageclass-template.yaml
dest=/tmp/ocplabs-cns-storageclass.yaml
- name: setup_gluster_default_storageclass | Create GlusterFS storage-class
shell: oc create -f /tmp/ocplabs-cns-storageclass.yaml
- name: setup_gluster_default_storageclass | Setup GlusterFS file as default storage-class
shell: oc annotate storageclass gluster-file storageclass.kubernetes.io/is-default-class="true"