Skip to content

Commit 99a0274

Browse files
update for OSP single subnet; ansible 2.4
1 parent 7641b06 commit 99a0274

File tree

13 files changed

+152
-335
lines changed

13 files changed

+152
-335
lines changed

provision-solr.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -63,7 +63,7 @@
6363
when: num_solr_nodes | int == 0 and cloud == 'aws'
6464
- include_role:
6565
name: 'osp'
66-
when: num_solr_nodes == 0 and cloud == 'osp'
66+
when: num_solr_nodes | int == 0 and cloud == 'osp'
6767
when: cloud is defined and (cloud == 'aws' or cloud == 'osp')
6868

6969
# If we're dynamically provisioning, then do some final configuration on the

roles/aws/tasks/main.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@
2020
- set_fact:
2121
application_roles: "{{((roles_list | length) == (node_map_entries | length)) | ternary(roles_list, roles_list + ['none'])}}"
2222
# Launch AMIs if they aren't already running
23-
- ec2_remote_facts:
23+
- ec2_instance_facts:
2424
region: "{{region}}"
2525
filters:
2626
instance-state-name: running
@@ -49,5 +49,5 @@
4949
when: internal_subnet is undefined
5050
# if we didn't find any matching instances that are running, then
5151
# launch a set of VMs with those tags
52-
- include: launch-amis.yml static=no
52+
- include_tasks: launch-amis.yml
5353
when: not matching_instances_found

roles/build-app-host-groups/files/build_aws_host_groups.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -9,9 +9,9 @@
99
node_list_name: "{{(hg_item_role == 'none') | ternary((hg_item_name + '_nodes'), (hg_item_name + '_' + hg_item_role + '_nodes'))}}"
1010
app_group_name: "{{(hg_item_role == 'none') | ternary(hg_item_name,(hg_item_name + '_' + hg_item_role))}}"
1111
# then, gather the facts that are used to create our host group from the
12-
# `ec2_remote_facts` module
12+
# `ec2_instance_facts` module
1313
- name: Gather facts for the hosts that match our filter
14-
ec2_remote_facts:
14+
ec2_instance_facts:
1515
region: "{{region}}"
1616
filters:
1717
instance-state-name: running

roles/build-app-host-groups/files/build_osp_host_groups.yml

+14-7
Original file line numberDiff line numberDiff line change
@@ -29,17 +29,24 @@
2929
# containing that list and create the appropriately named host groups
3030
- set_fact:
3131
"{{node_list_name}}": "{{app_nodes}}"
32+
3233
- name: Create {{app_group_name}} host group from OpenStack meta-data
3334
add_host:
3435
name: "{{item}}"
3536
groups: "{{app_group_name}},{{node_list_name}}"
36-
ansible_ssh_host: "{{(os_inventory_json | json_query('_meta.hostvars.\"' + item + '\".openstack.addresses.private[].addr') | list).0}}"
37+
ansible_host: "{{ (os_inventory_json | json_query('_meta.hostvars.\"' + item + '\".openstack.addresses.private[1].addr')) }}"
3738
ansible_ssh_private_key_file: "{{private_key_path}}/{{region}}-{{project}}-{{hg_item_name}}-{{domain}}-private-key.pem"
3839
with_items: "{{app_nodes | default([])}}"
39-
# finally, add the floating IP addresses for nodes in this application group to
40-
# the `osp_floating_ip` list (this fact currently is not used, but it was used
41-
# in the past so we still build it)
42-
- name: Add floating IP addresses to osp_floating_ip for the {{app_group_name}} nodes
43-
set_fact:
44-
osp_floating_ip: "{{(osp_floating_ip | default({})) | combine({item: (os_inventory_json | json_query('_meta.hostvars.\"' + item + '\".openstack.addresses.public') | selectattr('OS-EXT-IPS:type', 'equalto', 'floating') | map(attribute='addr') | list).0})}}"
40+
when:
41+
- internal_uuid == external_uuid
42+
43+
# multiple subnet hostgroups already have the correct ansible_host
44+
- name: Create {{app_group_name}} host group from OpenStack meta-data
45+
add_host:
46+
name: "{{item}}"
47+
groups: "{{app_group_name}},{{node_list_name}}"
48+
ansible_host: "{{(os_inventory_json | json_query('_meta.hostvars.\"' + item + '\".openstack.addresses.private[].addr') | list).0}}"
49+
ansible_ssh_private_key_file: "{{private_key_path}}/{{region}}-{{project}}-{{hg_item_name}}-{{domain}}-private-key.pem"
4550
with_items: "{{app_nodes | default([])}}"
51+
when:
52+
- internal_uuid != external_uuid

roles/build-app-host-groups/tasks/main.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -22,14 +22,14 @@
2222
cluster_nodes: "{{(os_inventory_json | json_query('[\"meta-Cluster_' + (cluster | default('a')) + '\"]')).0}}"
2323
# and, finally, loop through the host_group_list, building each host group
2424
# (in turn) from the lists of nodes we just constructed
25-
- include: ../files/build_osp_host_groups.yml
25+
- include_tasks: ../files/build_osp_host_groups.yml
2626
with_items: "{{host_group_list}}"
2727
loop_control:
2828
loop_var: host_group_item
2929
when: cloud == "osp"
3030
# If we're building a cluster in an AWS environtment, then loop through the
3131
# host_group_list, building each host group (in turn)
32-
- include: ../files/build_aws_host_groups.yml
32+
- include_tasks: ../files/build_aws_host_groups.yml
3333
with_items: "{{host_group_list}}"
3434
loop_control:
3535
loop_var: host_group_item

roles/build-app-host-groups/utils/openstack.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -120,7 +120,7 @@ def get_host_groups(inventory, refresh=False):
120120

121121
def append_hostvars(hostvars, groups, key, server, namegroup=False):
122122
hostvars[key] = dict(
123-
ansible_ssh_host=server['interface_ip'],
123+
ansible_host=server['interface_ip'],
124124
openstack=server)
125125
for group in get_groups_from_server(server, namegroup=namegroup):
126126
groups[group].append(key)
+1-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,3 @@
11
# (c) 2017 DataNexus Inc. All Rights Reserved
22
---
3-
- include: setup-solr-server-properties.yml static=no
3+
- include_tasks: setup-solr-server-properties.yml

roles/configure-solr-nodes/tasks/setup-solr-server-properties.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -129,7 +129,7 @@
129129
when: not (solr_data_dir is undefined or solr_data_dir is none or solr_data_dir | trim == '')
130130
# loop through the items in our list and, if they don't exist in the new
131131
# data directory, move them over there
132-
- include: move-files.yml
132+
- include_tasks: move-files.yml
133133
with_items: "{{(dir_list.files + file_list.files) | map(attribute='path') | list}}"
134134
when: not (solr_data_dir is undefined or solr_data_dir is none or solr_data_dir | trim == '')
135135
## and finally, now that the contents are moved, remove the default

roles/initialize-play/tasks/main.yml

+3-6
Original file line numberDiff line numberDiff line change
@@ -50,16 +50,14 @@
5050
# description values, while the value that type is set to will depend on the
5151
# 'type' in each of those values; currently either 'cidr' or 'name' values
5252
# are supported for the 'type' field)
53-
- include: "{{role_path}}/files/get_iface_name.yml"
54-
static: no
53+
- include_tasks: "{{role_path}}/files/get_iface_name.yml"
5554
with_items: "{{iface_description_array}}"
5655
loop_control:
5756
loop_var: iface_description
5857
when: not (iface_description_array is undefined)
5958
# finally, get values for the addresses of the `data_iface` and `api_iface`
6059
# (if defined)
61-
- include: "{{role_path}}/files/get_iface_addr.yml"
62-
static: no
60+
- include_tasks: "{{role_path}}/files/get_iface_addr.yml"
6361
vars:
6462
iface_name: "{{data_iface}}"
6563
as_fact: "data_addr"
@@ -70,8 +68,7 @@
7068
- set_fact:
7169
api_iface: "{{(lcl_iface_names | reject('equalto', 'lo') | reject('equalto', data_iface) | list).0}}"
7270
when: api_iface == ''
73-
- include: "{{role_path}}/files/get_iface_addr.yml"
74-
static: no
71+
- include_tasks: "{{role_path}}/files/get_iface_addr.yml"
7572
vars:
7673
iface_name: "{{api_iface}}"
7774
as_fact: "api_addr"

roles/osp/tasks/launch-vms.yml

+112-59
Original file line numberDiff line numberDiff line change
@@ -133,9 +133,9 @@
133133
# few tasks
134134
- set_fact:
135135
node_role_list: []
136-
nic_list: "{{(external_uuid is defined) | ternary([{'net-id': internal_uuid}, {'net-id': external_uuid}], [{'net-id': internal_uuid}])}}"
136+
nic_list: "{{ (internal_uuid != external_uuid) | ternary([{ 'net-id': internal_uuid }, { 'net-id': external_uuid }], [{ 'net-id': external_uuid }]) }}"
137137
- name: Build up a list of roles (per node being created)
138-
include: build-role-list.yml static=no
138+
include_tasks: build-role-list.yml
139139
with_items: "{{node_map | selectattr('application', 'equalto', application) | list}}"
140140
loop_control:
141141
loop_var: node_map_entry
@@ -149,68 +149,121 @@
149149
display_name: "{{project}}_{{application}}_{{node_role_list[item | int]}}_{{cluster | default('a')}}_{{item}}"
150150
with_sequence: start=0 end="{{(node_role_list | length) - 1}}"
151151
when: data_volume is defined
152-
# openstack instance names must be unique, so we need to add a sequence number to each name
153-
- name: Launch instances
154-
os_server:
155-
state: present
156-
cloud: "{{tenant}}"
157-
name: "{{cloud}}_{{tenant}}_{{project}}_{{dataflow | default('none')}}_{{application}}_{{domain}}_{{cluster | default('a')}}_{{item}}"
158-
meta:
159-
Name: "{{cloud}}_{{tenant}}_{{project}}_{{dataflow | default('none')}}_{{application}}_{{domain}}_{{cluster | default('a')}}"
160-
Tenant: "{{tenant}}"
161-
Project: "{{project}}"
162-
Cloud: "{{cloud}}"
163-
Domain: "{{domain}}"
164-
Application: "{{application}}"
165-
Cluster: "{{cluster | default('a')}}"
166-
Role: "{{node_role_list[item | int]}}"
167-
Dataflow: "{{dataflow | default('none')}}"
168-
region_name: "{{region}}"
169-
availability_zone: "{{zone}}"
170-
image: "{{image}}"
171-
key_name: "{{keypair.key.name}}"
172-
timeout: 200
173-
auto_ip: yes
174-
reuse_ips: true
175-
flavor: "{{type}}"
176-
nics: "{{nic_list}}"
177-
security_groups: "{{sg_names_str}}"
178-
volumes: "{{project}}_{{application}}_{{node_role_list[item | int]}}_{{cluster | default('a')}}_{{item}}"
179-
register: osp_out
180-
with_sequence: start=0 end="{{(node_role_list | length) - 1}}"
181-
# setup a floating IP address for each instance from the float_pool
182-
- name: assigning floating IPs to instances
183-
os_floating_ip:
184-
state: present
185-
reuse: true
186-
cloud: "{{tenant}}"
187-
server: "{{item.server.id}}"
188-
fixed_address: "{{item.server.addresses.public.0['addr']}}"
189-
network: "{{float_pool}}"
190-
nat_destination: "{{external_uuid}}"
191-
with_items: "{{osp_out.results}}"
192-
register:
193-
when: not osp_out | skipped and osp_out.changed and osp_out.results | length > 0
194-
# construct the `app_group_name_list` and `node_list_name_list` lists from the
195-
# `application_roles` list
152+
153+
# construct the `app_group_name_list` and `node_list_name_list` lists from the `application_roles` list
196154
- set_fact:
197155
node_list_name_list: "{{(node_list_name_list | default([])) + [((item == 'none') | ternary((application + '_nodes'), (application + '_' + item + '_nodes')))]}}"
198156
app_group_name_list: "{{(app_group_name_list | default([])) + [((item == 'none') | ternary(application, application + '_' + item))]}}"
199157
with_items: "{{node_role_list}}"
200-
# add the instances created to the corresponding application host group
201-
- name: Add new instances to the appropriate host groups
202-
add_host:
203-
name: "{{item.1.server.addresses.private.0['addr']}}"
204-
groups: "{{app_group_name_list[item.0 | int]}},{{node_list_name_list[item.0 | int]}}"
205-
ansible_ssh_host: "{{item.1.server.addresses.private.0['addr']}}"
206-
ansible_ssh_private_key_file: "{{private_keyfile_path}}"
207-
with_indexed_items: "{{osp_out.results}}"
158+
159+
# this handles single subnet, eg, when internal and external uuids are identical
160+
# openstack instance names must be unique, so we need to add a sequence number to each name
161+
- block:
162+
- name: Launch single subnet instances
163+
os_server:
164+
state: present
165+
cloud: "{{tenant}}"
166+
name: "{{cloud}}_{{tenant}}_{{project}}_{{dataflow | default('none')}}_{{application}}_{{domain}}_{{cluster | default('a')}}_{{item}}"
167+
meta:
168+
Name: "{{cloud}}_{{tenant}}_{{project}}_{{dataflow | default('none')}}_{{application}}_{{domain}}_{{cluster | default('a')}}"
169+
Tenant: "{{tenant}}"
170+
Project: "{{project}}"
171+
Cloud: "{{cloud}}"
172+
Domain: "{{domain}}"
173+
Application: "{{application}}"
174+
Cluster: "{{cluster | default('a')}}"
175+
Role: "{{node_role_list[item | int]}}"
176+
Dataflow: "{{dataflow | default('none')}}"
177+
region_name: "{{region}}"
178+
availability_zone: "{{zone}}"
179+
image: "{{image}}"
180+
key_name: "{{keypair.key.name}}"
181+
timeout: 200
182+
floating_ip_pools: "{{ float_pool }}"
183+
reuse_ips: true
184+
flavor: "{{type}}"
185+
nics: "{{nic_list}}"
186+
security_groups: "{{sg_names_str}}"
187+
volumes: "{{project}}_{{application}}_{{node_role_list[item | int]}}_{{cluster | default('a')}}_{{item}}"
188+
register: osp_single
189+
with_sequence: start=0 end="{{(node_role_list | length) - 1}}"
190+
191+
- name: Add new instances to the appropriate host groups
192+
add_host:
193+
name: "{{item[1].server.addresses.private.0['addr']}}"
194+
groups: "{{app_group_name_list[item.0 | int]}},{{node_list_name_list[item.0 | int]}}"
195+
ansible_host: "{{item[1].server.addresses.private.1['addr']}}"
196+
ansible_ssh_private_key_file: "{{private_keyfile_path}}"
197+
with_indexed_items: "{{osp_single.results}}"
198+
when:
199+
- internal_uuid == external_uuid
200+
201+
# this handles multiple subnets
202+
# openstack instance names must be unique, so we need to add a sequence number to each name
203+
- block:
204+
- name: Launch multiple subnet instances
205+
os_server:
206+
state: present
207+
cloud: "{{tenant}}"
208+
name: "{{cloud}}_{{tenant}}_{{project}}_{{dataflow | default('none')}}_{{application}}_{{domain}}_{{cluster | default('a')}}_{{item}}"
209+
meta:
210+
Name: "{{cloud}}_{{tenant}}_{{project}}_{{dataflow | default('none')}}_{{application}}_{{domain}}_{{cluster | default('a')}}"
211+
Tenant: "{{tenant}}"
212+
Project: "{{project}}"
213+
Cloud: "{{cloud}}"
214+
Domain: "{{domain}}"
215+
Application: "{{application}}"
216+
Cluster: "{{cluster | default('a')}}"
217+
Role: "{{node_role_list[item | int]}}"
218+
Dataflow: "{{dataflow | default('none')}}"
219+
region_name: "{{region}}"
220+
availability_zone: "{{zone}}"
221+
image: "{{image}}"
222+
key_name: "{{keypair.key.name}}"
223+
timeout: 200
224+
auto_ip: yes
225+
reuse_ips: true
226+
flavor: "{{type}}"
227+
nics: "{{nic_list}}"
228+
security_groups: "{{sg_names_str}}"
229+
volumes: "{{project}}_{{application}}_{{node_role_list[item | int]}}_{{cluster | default('a')}}_{{item}}"
230+
register: osp_multiple
231+
with_sequence: start=0 end="{{(node_role_list | length) - 1}}"
232+
# setup a floating IP address for each instance from the float_pool
233+
- name: assigning floating IPs to instances
234+
os_floating_ip:
235+
state: present
236+
reuse: true
237+
cloud: "{{tenant}}"
238+
server: "{{item.server.id}}"
239+
fixed_address: "{{item.server.addresses.public.0['addr']}}"
240+
network: "{{float_pool}}"
241+
nat_destination: "{{external_uuid}}"
242+
with_items: "{{osp_multiple.results}}"
243+
register:
244+
when: not osp_multiple | skipped and osp_multiple.changed and osp_multiple.results | length > 0
245+
246+
# add the instances created to the corresponding application host group
247+
- name: Add new instances to the appropriate host groups
248+
add_host:
249+
name: "{{item.1.server.addresses.private.0['addr']}}"
250+
groups: "{{app_group_name_list[item.0 | int]}},{{node_list_name_list[item.0 | int]}}"
251+
ansible_host: "{{item.1.server.addresses.private.0['addr']}}"
252+
ansible_ssh_private_key_file: "{{private_keyfile_path}}"
253+
with_indexed_items: "{{osp_multiple.results}}"
254+
when:
255+
- internal_uuid != external_uuid
256+
257+
# sigh @ ansible for making me do this
258+
- set_fact:
259+
osp: "{{ (internal_uuid == external_uuid) | ternary(osp_single, osp_multiple) }}"
260+
208261
# wait_for doesn't work with a proxy, so we need to ssh and check output
209262
- name: Wait for instances to be accessible via SSH
210-
shell: /bin/sleep 10 && /usr/bin/ssh -i "{{private_keyfile_path}}" "{{user}}@{{item.server.addresses.private.0['addr']}}" echo DataNexus
263+
shell: /bin/sleep 20 && /usr/bin/ssh -i "{{ private_keyfile_path }}" "{{ user }}@{{ hostvars[item.server.addresses.private.0['addr']].ansible_host }}" echo DataNexus
211264
register: output
212-
retries: 24
213-
delay: 15
265+
retries: 4
266+
delay: 10
214267
until: output.stdout.find('DataNexus') != -1
215-
with_items: "{{osp_out.results}}"
216-
when: not osp_out | skipped and osp_out.changed and osp_out.results | length > 0
268+
with_items: "{{osp.results}}"
269+
when: not osp | skipped and osp.changed and osp.results | length > 0

roles/osp/tasks/main.yml

+7-1
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,11 @@
1919
# should be added to the `roles_list` we just constructed, above)
2020
- set_fact:
2121
application_roles: "{{((roles_list | length) == (node_map_entries | length)) | ternary(roles_list, roles_list + ['none'])}}"
22+
23+
# set the list of nodes for this application
24+
- set_fact:
25+
application_nodes: "{{(os_inventory_json | json_query('[\"meta-Application_' + application + '\"]')).0}}"
26+
2227
# and build a list of the instances that match the input roles from the
2328
# matching `node_map` entries
2429
- set_fact:
@@ -29,6 +34,7 @@
2934
# that are used when launching instances (below) if no matching nodes are found
3035
- set_fact:
3136
matching_instances: "{{cloud_nodes | intersect(tenant_nodes) | intersect(project_nodes) | intersect(dataflow_nodes) | intersect(domain_nodes) | intersect(application_nodes) | intersect(cluster_nodes) | intersect(role_nodes)}}"
37+
3238
- set_fact:
3339
matching_instances_found: "{{not (matching_instances | length) == 0}}"
3440
root_volume_default: "{{(data_volume is defined) | ternary(11, 40)}}"
@@ -38,5 +44,5 @@
3844
when: internal_subnet is undefined
3945
# if we didn't find any matching instances, then launch a set of VMs that are
4046
# tagged with the input tags
41-
- include: launch-vms.yml static=no
47+
- include_tasks: launch-vms.yml
4248
when: not matching_instances_found

0 commit comments

Comments
 (0)