-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathprovision-solr.yml
executable file
·135 lines (132 loc) · 5.84 KB
/
provision-solr.yml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
#!/usr/bin/env ansible-playbook
#
# (c) 2017 DataNexus Inc. All Rights Reserved
---
# Build our solr and zookeeper host groups
- name: Create solr and zookeeper host groups
hosts: localhost
gather_facts: yes
vars_files:
- vars/solr.yml
tasks:
# load the 'configuration file', if one was defined, to get any variables
# we might need from that file when constructing our host groups
- name: Load configuration file
include_vars:
file: "{{config_file | default('config.yml')}}"
# if we're using dynamic provisioning; build the host groups from the
# meta-data associated with the matching nodes in the selected cloud
- block:
# get a list of the node_map entries for this application
- set_fact:
node_map_entries: "{{node_map | selectattr('application', 'equalto', application) | list}}"
# if more than one node_map entry was found or no matching node_map
# entries were found, then it's an error
- name: Fail playbook run if multiple solr node_map entries were found
fail:
msg: "Multiple {{application}} node_map entries found"
when: node_map_entries | length > 1
- name: Fail playbook run if no solr node_map entries were found
fail:
msg: "No {{application}} node_map entries found"
when: node_map_entries | length == 0
# build the solr and zookeeper host groups from existing inventory
- include_role:
name: build-app-host-groups
vars:
host_group_list:
- name: solr
- name: zookeeper
- set_fact:
num_solr_nodes: "{{groups['solr'] | default([]) | length}}"
num_zk_nodes: "{{groups['zookeeper'] | default([]) | length}}"
# if an existing set of Solr/Fusion nodes were found, throw an error
# unless the reuse_existing_nodes flag was set to 'true' or 'yes')
- block:
- name: Fail playbook run if existing nodes found and user did not request reuse
fail:
msg: "Found an existing set of nodes - {{(groups['solr'] | to_yaml).split('\n').0}}; aborting playbook run"
when: not((reuse_existing_nodes | default(false)) | bool)
when: num_solr_nodes | int > 0
# if an external Zookeeper ensemble (or node) was not found and we're
# deploying an Solr/Fusion cluster (or multiple matching Solr/Fusion
# nodes were found), then it's an error
- name: Fail playbook run if cluster deployment and external zookeeper ensemble not found
fail:
msg: "An external Zookeeper ensemble is required for Solr/Fusion cluster deployments"
when:
- (num_solr_nodes | int == 0 and node_map_entries.0.count > 1) or num_solr_nodes | int > 1
- num_zk_nodes | int == 0
# if there were no Solr/Fusion nodes found, then deploy a matching set of
# instances into the target cloud environment, ensuring that there
# are an appropriately tagged, based on the input tags and the node_map
# entries for this application
- name: Launch AWS VMs
include_role:
name: 'aws'
when: num_solr_nodes | int == 0 and cloud == 'aws'
- name: Launch OSP VMs
include_role:
name: 'osp'
when: num_solr_nodes | int == 0 and cloud == 'osp'
when: cloud is defined and (cloud == 'aws' or cloud == 'osp')
# If we're dynamically provisioning, then do some final configuration on the
# VMs that were just launched (assuming that we're not re-using existing VMs)
- name: Complete OS configuration
hosts: solr
gather_facts: yes
vars_files:
- vars/solr.yml
tasks:
# first, initialize the play
- include_role:
name: initialize-play
vars:
gather_facts: false
skip_network_restart: true
# if this is a cloud deployment and we need to (re)configure nodes...
- block:
# then run the `preflight` role to finish the configuration of the nodes
# that were just allocated
- include_role:
name: preflight
vars:
mountpoint: "/data"
# and set a fact indicating that we (re)configured nodes in this play
- set_fact:
configured_nodes: true
when:
- cloud is defined and (cloud == 'aws' or cloud == 'osp')
- ((force_node_reconfig | default(false)) | bool) or ((hostvars['localhost']['num_solr_nodes'] | int) == 0)
# Collect some Zookeeper related facts
- name: Gather facts from Zookeeper host group (if defined)
hosts: zookeeper
tasks: []
# And deploy LucidWorks Fusion to the nodes in the `solr` host group; note that
# if there is more than one node in the `solr` host group, those nodes will be
# configured as a Solr cluster
- name: Install/configure Solr server(s)
hosts: solr
gather_facts: no
vars_files:
- vars/solr.yml
vars:
- combined_package_list: "{{ (default_packages|default([])) | union(solr_package_list) | union((install_packages_by_tag|default({})).solr|default([])) }}"
- solr_nodes: "{{groups['solr']}}"
- zookeeper_nodes: "{{groups['zookeeper'] | default([])}}"
pre_tasks:
# first, initialize the play by loading any `config_file` that may have
# been passed in, restarting the network on the target nodes (if desired),
# and determining the `data_iface` and `api_iface` values from the input
# `iface_description_array` (if one was passed in)
- include_role:
name: initialize-play
vars:
skip_network_restart: "{{configured_nodes | default(false)}}"
# and now that we know the name of our `data_iface`, we can construct the
# list of zk_nodes (the data_iface IP addresses of our zookeeper_nodes)
- set_fact:
zk_nodes: "{{(zookeeper_nodes | default([])) | map('extract', hostvars, [('ansible_' + data_iface), 'ipv4', 'address']) | list}}"
# now that we have all of the facts we need, provision the input nodes
roles:
- role: solr