Skip to content

Commit ec57694

Browse files
author
Tom McSweeney
committed
Add labels to the fail tasks to clarify output of playbook runs; clean up extraneous whitespace
1 parent 515043e commit ec57694

File tree

3 files changed

+13
-8
lines changed

3 files changed

+13
-8
lines changed

provision-solr.yml

+10-5
Original file line numberDiff line numberDiff line change
@@ -22,10 +22,12 @@
2222
node_map_entries: "{{node_map | selectattr('application', 'equalto', application) | list}}"
2323
# if more than one node_map entry was found or no matching node_map
2424
# entries were found, then it's an error
25-
- fail:
25+
- name: Fail playbook run if multiple solr node_map entries were found
26+
fail:
2627
msg: "Multiple {{application}} node_map entries found"
2728
when: node_map_entries | length > 1
28-
- fail:
29+
- name: Fail playbook run if no solr node_map entries were found
30+
fail:
2931
msg: "No {{application}} node_map entries found"
3032
when: node_map_entries | length == 0
3133
# build the solr and zookeeper host groups from existing inventory
@@ -49,7 +51,8 @@
4951
# if an external Zookeeper ensemble (or node) was not found and we're
5052
# deploying an Solr/Fusion cluster (or multiple matching Solr/Fusion
5153
# nodes were found), then it's an error
52-
- fail:
54+
- name: Fail playbook run if cluster deployment and external zookeeper ensemble not found
55+
fail:
5356
msg: "An external Zookeeper ensemble is required for Solr/Fusion cluster deployments"
5457
when:
5558
- (num_solr_nodes | int == 0 and node_map_entries.0.count > 1) or num_solr_nodes | int > 1
@@ -58,10 +61,12 @@
5861
# instances into the target cloud environment, ensuring that there
5962
# are an appropriately tagged, based on the input tags and the node_map
6063
# entries for this application
61-
- include_role:
64+
- name: Launch AWS VMs
65+
include_role:
6266
name: 'aws'
6367
when: num_solr_nodes | int == 0 and cloud == 'aws'
64-
- include_role:
68+
- name: Launch OSP VMs
69+
include_role:
6570
name: 'osp'
6671
when: num_solr_nodes | int == 0 and cloud == 'osp'
6772
when: cloud is defined and (cloud == 'aws' or cloud == 'osp')

roles/osp/tasks/launch-vms.yml

+1-1
Original file line numberDiff line numberDiff line change
@@ -260,7 +260,7 @@
260260

261261
# wait_for doesn't work with a proxy, so we need to ssh and check output
262262
- name: Wait for instances to be accessible via SSH
263-
shell: /bin/sleep 20 && /usr/bin/ssh -i "{{ private_keyfile_path }}" "{{ user }}@{{ hostvars[item.server.addresses.private.0['addr']].ansible_host }}" echo DataNexus
263+
shell: /bin/sleep 20 && /usr/bin/ssh -i "{{ private_keyfile_path }}" "{{ user }}@{{ hostvars[item.server.addresses.private.0['addr']].ansible_host }}" echo DataNexus
264264
register: output
265265
retries: 4
266266
delay: 10

roles/preflight/tasks/main.yml

+2-2
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
dest: "/etc/sysconfig/network-scripts/ifcfg-{{api_iface}}"
1212
remote_src: True
1313
- name: Replace the device in the new ifcfg-{{api_iface}} script
14-
lineinfile:
14+
lineinfile:
1515
name: "/etc/sysconfig/network-scripts/ifcfg-{{api_iface}}"
1616
regexp: '^DEVICE='
1717
line: 'DEVICE="{{api_iface}}"'
@@ -81,7 +81,7 @@
8181
- name: Set hostname actively so rebooting is unnecessary
8282
command: /usr/bin/hostnamectl set-hostname {{application}}-{{uuid.stdout}}
8383
- name: Set pretty hostname actively so rebooting is unnecessary
84-
command: /usr/bin/hostnamectl --pretty set-hostname "{{tenant}} {{application}}"
84+
command: /usr/bin/hostnamectl --pretty set-hostname "{{tenant}} {{application}}"
8585
- name: Set new hostname in /etc/hostname
8686
replace:
8787
path: /etc/hostname

0 commit comments

Comments
 (0)