From a104a19f16c8ab53b81d978ed25e306e24957e3e Mon Sep 17 00:00:00 2001 From: lhoss Date: Thu, 1 Jul 2021 12:37:27 +0200 Subject: [PATCH 1/2] new 4-node cluster (3M 1W) --- inventory/cdpAz3M/application.yml | 11 + inventory/cdpAz3M/cluster_3M.yml | 534 +++++++++++++++++++++++++ inventory/cdpAz3M/definition.yml | 26 ++ inventory/cdpAz3M/inventory_static.ini | 54 +++ 4 files changed, 625 insertions(+) create mode 100644 inventory/cdpAz3M/application.yml create mode 100644 inventory/cdpAz3M/cluster_3M.yml create mode 100644 inventory/cdpAz3M/definition.yml create mode 100644 inventory/cdpAz3M/inventory_static.ini diff --git a/inventory/cdpAz3M/application.yml b/inventory/cdpAz3M/application.yml new file mode 100644 index 0000000..dd8a16e --- /dev/null +++ b/inventory/cdpAz3M/application.yml @@ -0,0 +1,11 @@ +--- +# Just a dummy playbook call from the main playbook after the cluster install was done +- name: Coda + hosts: localhost + connection: local + gather_facts: no + become: no + tasks: + - name: Deployment results + debug: + msg: Success! \ No newline at end of file diff --git a/inventory/cdpAz3M/cluster_3M.yml b/inventory/cdpAz3M/cluster_3M.yml new file mode 100644 index 0000000..d8a4596 --- /dev/null +++ b/inventory/cdpAz3M/cluster_3M.yml @@ -0,0 +1,534 @@ +--- +## Below "clusters" config depends on extra secret vars to be set in the profile or inventory: +# gateway_master_secret +# idbroker_master_secret + +## Other custom vars used in the "clusters" config: +tmp_base: "/tmp" # TODO on DSS: /app/tmp +# ensure the HDFS replication is not set higher than the number of worker nodes! +dfs_replication: 1 + + +## Start of config expected by cloudera playbooks/roles: +cloudera_manager_version: 7.1.4 + +clusters: + - name: DSS Preview Cluster + repositories: + - https://archive.cloudera.com/cdh7/7.1.4.0/parcels/ + services: + - ZOOKEEPER + - HDFS + - YARN + - SPARK_ON_YARN + - RANGER + - HUE + - INFRA_SOLR + - ATLAS + - HIVE + - HIVE_ON_TEZ + - TEZ + - ZEPPELIN + - KAFKA + - KNOX + - HBASE + - STREAMS_MESSAGING_MANAGER + - SCHEMAREGISTRY + - QUEUEMANAGER + + ### DSS_DEV service configs + # Note: The config was derived from a cluster export JSON (converted to YML via custom J2-based script) + configs: + # Service + RANGER: + RANGER_ADMIN: + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + RANGER_TAGSYNC: + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + RANGER_USERSYNC: + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + ranger.usersync.sleeptimeinmillisbetweensynccycle: "14400000" + + #SERVICEWIDE: + + + # Service + HUE: + HUE_LOAD_BALANCER: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + HUE_SERVER: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + KT_RENEWER: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + SERVICEWIDE: + service_config_suppression_hue_load_balancer_count_validator: "true" + time_zone: "Europe/Zurich" + + + # Service + HDFS: + #BALANCER: + + DATANODE: + datanode_data_directories_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + # DONE custom dirs (all 9 disks) + dfs_data_dir_list: "/app/data/disk01/dfs/dn,/app/data/disk02/dfs/dn,/app/data/disk03/dfs/dn,/app/data/disk04/dfs/dn,/app/data/disk05/dfs/dn,/app/data/disk06/dfs/dn,/app/data/disk07/dfs/dn,/app/data/disk08/dfs/dn,/app/data/disk09/dfs/dn" + dfs_datanode_data_dir_perm: "700" + dfs_datanode_du_reserved: "10732175360" + # proposed value by xlaho 2 (vs '3'): + dfs_datanode_failed_volumes_tolerated: "2" + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + role_config_suppression_dfs_datanode_http_port: "true" + role_config_suppression_dfs_datanode_port: "true" + + FAILOVERCONTROLLER: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + GATEWAY: + dfs_client_use_trash: "true" + + HTTPFS: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + JOURNALNODE: + # DONE custom dirs + dfs_journalnode_edits_dir: "/app/data/journalnode/dfs/jn" + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + journalnode_edits_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + + NAMENODE: + # DONE custom dirs + dfs_name_dir_list: "/app/data/namenode/dfs/nn" + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + namenode_data_directories_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + role_health_suppression_name_node_safe_mode: "true" + + NFSGATEWAY: + dfs_nfs3_dump_dir: "{{tmp_base}}/.hdfs-nfs" + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + nfsgateway_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + SECONDARYNAMENODE: + # DONE custom dirs + fs_checkpoint_dir_list: "/app/data/namenode/dfs/snn" + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + secondarynamenode_checkpoint_directories_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + SERVICEWIDE: + dfs_encrypt_data_transfer_algorithm: "AES/CTR/NoPadding" + service_health_suppression_hdfs_verify_ec_with_topology: "true" + dfs_replication: "{{ dfs_replication }}" + + + # Service + # Custom renamed to INFRA_SOLR (since the script put 2 SOLR_SERVER elements here, 1 I removed) + INFRA_SOLR: + GATEWAY: + client_config_root_dir: "/etc/solr-infra" + + SOLR_SERVER: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":524288000,"critical":262144000}' + process_auto_restart: "true" + role_config_suppression_solr_http_port: "true" + role_config_suppression_solr_https_port: "true" + # DONE custom dir + solr_data_dir: "/app/data/solr-infra" + solr_java_direct_memory_size: "11471421440" + solr_java_heapsize: "8823767040" + + SERVICEWIDE: + hdfs_data_dir: "/solr-infra" + rm_dirty: "true" + zookeeper_znode: "/solr-infra" + + + # Service + ATLAS: + ATLAS_SERVER: + atlas_max_heap_size: "4096" + #SERVICEWIDE: + + + # Service + ZOOKEEPER: + SERVER: + # DONE custom dirs + dataDir: "/app/data/zookeeper" + dataLogDir: "/app/data/zookeeper" + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + maxClientCnxns: "300" + maxSessionTimeout: "60000" + zookeeper_server_data_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + zookeeper_server_data_log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + + # Service + QUEUEMANAGER: + QUEUEMANAGER_STORE: + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_threshold: "DEBUG" + process_auto_restart: "true" + QUEUEMANAGER_WEBAPP: + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + process_auto_restart: "true" + + + # Service + HIVE_ON_TEZ: + #GATEWAY: + + HIVESERVER2: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + hive_on_tez_hs2_downloaded_resources_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + hive_on_tez_hs2_exec_local_scratch_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + process_auto_restart: "true" + + SERVICEWIDE: + hms_connector: "hive" + + + # Service + ZEPPELIN: + ZEPPELIN_SERVER: + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + # DONE custom dirs + zeppelin.dep.localrepo: "/app/data/zeppelin/local-repo" + zeppelin.interpreter.localRepo: "/app/data/zeppelin/local-repo" + zeppelin.war.tempdir: "/app/data/zeppelin/webapps" + + #SERVICEWIDE: + + # TODO test install with SPARK3 (requires Spark3 parcel repo setup?!) + # Service + #SPARK3_ON_YARN: + # GATEWAY: + # SPARK3_YARN_HISTORY_SERVER: + + + # Service + KAFKA: + KAFKA_BROKER: + # DONE custom dirs + log.dirs: "/app/data/kafka" + log.retention.ms: "259200000" + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + process_auto_restart: "true" + role_config_suppression_port: "true" + ## Disable following 2 because they require extra custom configs like a "Broker List" to work + # KAFKA_CONNECT: + # log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + # process_auto_restart: "true" + # KAFKA_MIRROR_MAKER: + # log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + # process_auto_restart: "true" + + SERVICEWIDE: + log.cleaner.delete.retention.ms: "259200000" + offsets.retention.minutes: "4320" + + + # Service + HIVE: + #GATEWAY: + + HIVEMETASTORE: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + hive_metastore_server_max_message_size: "858993459" + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + ## Note: we only need "HIVESERVER2" role in service "HIVE_ON_TEZ" + #HIVESERVER2: + # heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + ## TODO fix bug to get "WEBHCAT" enabled (or test on 7.1.6 ?!) + # WEBHCAT: + # heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + # log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + #SERVICEWIDE: + # set via varlib*j2: + #hive_metastore_derby_path: "/app/opt/hive/cloudera_manager/derby/metastore_db" + + + # Service °17 is KNOX + KNOX: + #GATEWAY: + + IDBROKER: + idbroker_master_secret: "{{ idbroker_master_secret }}" + # set via varlib*j2: + #idbroker_conf_dir: "/app/opt/knox/idbroker/conf" + #idbroker_data_dir: "/app/opt/knox/idbroker/data" + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + process_auto_restart: "true" + + KNOX_GATEWAY: + gateway_master_secret: "{{ gateway_master_secret }}" + #gateway_conf_dir: "/app/opt/knox/gateway/conf" + #gateway_data_dir: "/app/opt/knox/gateway/data" + #gateway_ranger_knox_plugin_conf_path: "/app/opt/knox/ranger-knox-plugin" + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + process_auto_restart: "true" + + #SERVICEWIDE: + + + # Service °18 is TEZ + TEZ: {} + # #GATEWAY: + # SERVICEWIDE: + # tez_version_uploaded: "0.9.1.7.1.5.0-257" + + + # Service °19 is YARN + YARN: + GATEWAY: + mapred_reduce_tasks: "12" + mapred_submit_replication: "3" + + JOBHISTORY: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + + NODEMANAGER: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + nodemanager_local_data_directories_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + nodemanager_log_directories_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + nodemanager_recovery_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + yarn_nodemanager_heartbeat_interval_ms: "100" + # DONE custom dirs. Note: The nodes have a custom dir for it: /app/data/nodemanager + yarn_nodemanager_local_dirs: "/app/data/disk01/yarn/nm,/app/data/disk02/yarn/nm,/app/data/disk03/yarn/nm,/app/data/disk04/yarn/nm,/app/data/disk05/yarn/nm,/app/data/disk06/yarn/nm,/app/data/disk07/yarn/nm,/app/data/disk08/yarn/nm,/app/data/disk09/yarn/nm" + # set via varlib*j2: + #yarn_nodemanager_recovery_dir: "/app/opt/hadoop-yarn/yarn-nm-recovery" + yarn_nodemanager_resource_memory_mb: "32768" + + RESOURCEMANAGER: + heap_dump_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + yarn_resourcemanager_max_completed_applications: "1000" + yarn_scheduler_maximum_allocation_mb: "10067" + yarn_scheduler_maximum_allocation_vcores: "8" + + SERVICEWIDE: + rm_dirty: "true" + + + # Service °20 is LIVY + # LIVY: + # GATEWAY: + # LIVY_SERVER: + # log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + # process_auto_restart: "true" + # SERVICEWIDE: + + + # Service °21 is SPARK_ON_YARN + SPARK_ON_YARN: + #GATEWAY: + SPARK_YARN_HISTORY_SERVER: + # set via varlib*j2: + #local_storage_dir: "/app/opt/spark/history" + log_directory_free_space_absolute_thresholds: '{"warning":2147483648,"critical":1073741824}' + process_auto_restart: "true" + #SERVICEWIDE: + + + # Service °22 is HBASE + HBASE: + # #GATEWAY: + # HBASERESTSERVER: + # HBASETHRIFTSERVER: + # MASTER: + REGIONSERVER: + hbase_bucketcache_size: "1" + hbase_regionserver_java_heapsize: "4294967296" #4GB + SERVICEWIDE: + rm_dirty: "true" + + + # Service °23 is STREAMS_MESSAGING_MANAGER + STREAMS_MESSAGING_MANAGER: + STREAMS_MESSAGING_MANAGER_SERVER: + SMM_HEAP_SIZE: "5928" + log_directory_free_space_absolute_thresholds: '{"warning":5368709120,"critical":2147483648}' + STREAMS_MESSAGING_MANAGER_UI: + log_directory_free_space_absolute_thresholds: '{"warning":5368709120,"critical":2147483648}' + + #SERVICEWIDE: + + + ## Services newly added (since not present on the cluster we derived above cfg from) + # Service + #SCHEMAREGISTRY: + + ### DSS_DEV host_templates + host_templates: + Master1: + ZOOKEEPER: [SERVER] + HDFS: + - NAMENODE + #- JOURNALNODE + #- FAILOVERCONTROLLER + - NFSGATEWAY + - HTTPFS + - GATEWAY + YARN: + - JOBHISTORY + - GATEWAY + KAFKA: + - KAFKA_BROKER + - GATEWAY + HBASE: + - MASTER + - HBASETHRIFTSERVER + # 1.try leave out the "GATEWAY". 2. add on ALL nodes?! + #- GATEWAY + + ## Moved from EDGE-Nodes + #HDFS: + HIVE: + - HIVEMETASTORE + - GATEWAY + #- WEBHCAT #TODO fix bug (or test on 7.1.6 ?!) + HIVE_ON_TEZ: + - HIVESERVER2 + - GATEWAY + RANGER: + - RANGER_ADMIN + - RANGER_TAGSYNC + - RANGER_USERSYNC + ATLAS: + - ATLAS_SERVER + KNOX: + - IDBROKER + - KNOX_GATEWAY + # Gateway-only Roles: + TEZ: + - GATEWAY + SPARK_ON_YARN: + - GATEWAY + + + Master2: + ZOOKEEPER: [SERVER] + HDFS: + # 1.try using 2 NameNodes did not work -> 2.try with SECONDARYNAMENODE + #- NAMENODE + - SECONDARYNAMENODE + #- JOURNALNODE + #- FAILOVERCONTROLLER + YARN: + - RESOURCEMANAGER + #- JOBHISTORY # HA? + HIVE: + - HIVEMETASTORE + - GATEWAY + # we only need "HIVESERVER2" role in service "HIVE_ON_TEZ" + #- HIVESERVER2 + #- WEBHCAT #TODO fix bug (or test on 7.1.6 ?!) + HIVE_ON_TEZ: + - HIVESERVER2 + - GATEWAY + KAFKA: + - KAFKA_BROKER + ## disabled + #- KAFKA_CONNECT + #- KAFKA_MIRROR_MAKER + SCHEMAREGISTRY: + - SCHEMA_REGISTRY_SERVER + ## Moved from EDGE-Nodes + INFRA_SOLR: + - SOLR_SERVER + - GATEWAY + HUE: + - HUE_LOAD_BALANCER + - HUE_SERVER + QUEUEMANAGER: + - QUEUEMANAGER_STORE + - QUEUEMANAGER_WEBAPP + + Master3: + ZOOKEEPER: [SERVER] + HDFS: + #- JOURNALNODE + #- FAILOVERCONTROLLER + - BALANCER + - GATEWAY + YARN: + - RESOURCEMANAGER + - GATEWAY + SPARK_ON_YARN: + - SPARK_YARN_HISTORY_SERVER + - GATEWAY + # required by Spark history server + KAFKA: + - KAFKA_BROKER + - GATEWAY + ## disabled + #- KAFKA_CONNECT + #- KAFKA_MIRROR_MAKER + SCHEMAREGISTRY: + - SCHEMA_REGISTRY_SERVER + HBASE: + - MASTER + - HBASERESTSERVER + #- GATEWAY + STREAMS_MESSAGING_MANAGER: + - STREAMS_MESSAGING_MANAGER_SERVER + - STREAMS_MESSAGING_MANAGER_UI + + # TODO later: what about "HIVE_LLAP" (roles: HIVESERVER2,LLAPPROXY) + INFRA_SOLR: + - SOLR_SERVER + - GATEWAY + ZEPPELIN: + - ZEPPELIN_SERVER + # Gateway-only Roles: + #HIVE: [GATEWAY] + HIVE: + - GATEWAY + HIVE_ON_TEZ: + - GATEWAY + TEZ: + - GATEWAY + + + # TODO Do we need GATEWAY roles on workers? + Workers: + HDFS: [DATANODE] + YARN: [NODEMANAGER] + HBASE: [REGIONSERVER] + + +mgmt: + name: Cloudera Management Service + services: [ALERTPUBLISHER, EVENTSERVER, HOSTMONITOR, REPORTSMANAGER, SERVICEMONITOR] + +hosts: + configs: + host_default_proc_memswap_thresholds: + warning: never + critical: never + host_memswap_thresholds: + warning: never + critical: never + host_config_suppression_agent_system_user_group_validator: true \ No newline at end of file diff --git a/inventory/cdpAz3M/definition.yml b/inventory/cdpAz3M/definition.yml new file mode 100644 index 0000000..1722bab --- /dev/null +++ b/inventory/cdpAz3M/definition.yml @@ -0,0 +1,26 @@ +--- +use_default_cluster_definition: no +use_download_mirror: no +preload_cm_parcel_repo: no + +## [LH] moved vars here from ~/.config/cloudera-deploy/profiles: +# but specifying it here seems not working, so I provide it via cmdline --extra-vars +#admin_password: "" + +# Secret vars used in the "clusters" config: +gateway_master_secret: "{{ admin_password }}" +idbroker_master_secret: "{{ admin_password }}" + +## Specifies the Cloud Infrastructure provider, CDP presently supports GCP, AWS and Azure +## Those should not necessary when using a static Ansible inventory +# infra_type: +# infra: +# datahub: +# env: + +## Vars to configure the "teardown" playbook tag +teardown_preserve_parcels: true +# Teardown the "cluster" but not CM, nor CMS (the CM services, under link "Cloudera Management Service"!) +teardown_everything: false +teardown_cms: False +teardown_cluster: "all" diff --git a/inventory/cdpAz3M/inventory_static.ini b/inventory/cdpAz3M/inventory_static.ini new file mode 100644 index 0000000..d8a1892 --- /dev/null +++ b/inventory/cdpAz3M/inventory_static.ini @@ -0,0 +1,54 @@ +# Inventory for an 3-node master cluster, to be used for ex with config "cluster_3M" + +# IDEA: Adding localhost to the inventory, so that it is part of "groups.all" (and it also gets important "set_fact" ) +# but only works when avoiding ssh, via ansible_connection=local +# Note: Might need to comment out for the "teardown" tags, except if in cluster playbook no "hosts: all" is used +[local] +localhost ansible_connection=local + + +[master1] +eval-cdp-public0.internal.cloudapp.net host_template=Master1 ansible_host=52.168.145.76 + +[master2] +eval-cdp-public1.internal.cloudapp.net host_template=Master2 ansible_host=40.71.126.110 + +[master3] +eval-cdp-public2.internal.cloudapp.net host_template=Master3 ansible_host=40.71.162.117 + +[cloudera_manager:children] +master1 + +[edge:children] +master1 + +[cluster_master_nodes:children] +master1 +master2 +master3 + +[cluster_worker_nodes] +eval-cdp-public3.internal.cloudapp.net host_template=Workers ansible_host=40.71.160.58 +#eval-cdp-public4.internal.cloudapp.net host_template=Workers ansible_host=__IP__ +#eval-cdp-public5.internal.cloudapp.net host_template=Workers ansible_host=__IP__ + +[cluster:children] +#cluster_edge_nodes +cluster_master_nodes +cluster_worker_nodes + + +[db_server:children] +cloudera_manager + +[deployment:children] +cluster +db_server + +# Note: TF deploys our OS users incl. ssh-pub key, so no more need to set the "ansible_user".. +#[deployment:vars] +# Ansible will defer to the running SSH Agent for relevant keys +# Set the following to hardcode the SSH private key for the instances +# ansible_ssh_private_key_file=~/.ssh/mykey.pem +#ansible_user=centos +#ansible_user=adminuser From 105ba44f1ff5bb06d645f6a8574cb239d2c6984e Mon Sep 17 00:00:00 2001 From: lhoss Date: Thu, 1 Jul 2021 12:38:47 +0200 Subject: [PATCH 2/2] configure kerberos (MIT kdc) deployment --- inventory/cdpAz3M/cluster_3M.yml | 9 +++++++++ inventory/cdpAz3M/inventory_static.ini | 2 ++ 2 files changed, 11 insertions(+) diff --git a/inventory/cdpAz3M/cluster_3M.yml b/inventory/cdpAz3M/cluster_3M.yml index d8a4596..59ed008 100644 --- a/inventory/cdpAz3M/cluster_3M.yml +++ b/inventory/cdpAz3M/cluster_3M.yml @@ -1,4 +1,13 @@ --- +## Cloudera roles vars overrides + +## Note for kerberos deployment to be enabled, we need to ensure 2 things: +## 1) cluster.yml: "kerberos" tag included. +## 2) existing inventory group "krb5_server" with 1 node +krb5_kdc_type: "MIT KDC" +krb5_kdc_master_password: changeme + + ## Below "clusters" config depends on extra secret vars to be set in the profile or inventory: # gateway_master_secret # idbroker_master_secret diff --git a/inventory/cdpAz3M/inventory_static.ini b/inventory/cdpAz3M/inventory_static.ini index d8a1892..9d8604c 100644 --- a/inventory/cdpAz3M/inventory_static.ini +++ b/inventory/cdpAz3M/inventory_static.ini @@ -37,6 +37,8 @@ eval-cdp-public3.internal.cloudapp.net host_template=Workers ansible_host=40.71. cluster_master_nodes cluster_worker_nodes +[krb5_server:children] +cloudera_manager [db_server:children] cloudera_manager