Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

S-332697:3.1.1-19 changes in Install Toolkit : 522 #813

Open
wants to merge 1 commit into
base: dev
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions roles/hdfs_prepare/defaults/main.yml
Original file line number Diff line number Diff line change
@@ -1,2 +1,7 @@
---
# defaults file for precheck
## Compute RPM version from Spectrum Scale version
scale_rpmversion: "{{ scale_version | regex_replace('^([0-9.]+)\\.([0-9])$', '\\1-\\2') }}"
## Specify package extraction path and gpg key path
scale_extracted_default_path: "/usr/lpp/mmfs"
scale_extracted_path: "{{ scale_extracted_default_path }}/{{ scale_version }}"
87 changes: 73 additions & 14 deletions roles/hdfs_prepare/tasks/java_home.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,37 @@
set_fact:
scale_hdfs_nodes_list: "{{ scale_hdfs_nodes_list | unique }}"

- name: check | Check if atleast one hdfs node is configured
assert:
that:
- scale_hdfs_nodes_list|length > 0
fail_msg: "No hdfs nodes configured"
- name: global_var | Initialize
set_fact:
scale_hdfs_nodes_list: []
scale_hdfs_namenodes_list: []
scale_hdfs_datanodes_list: []

- name: global_var | Collect all HDFS NameNodes
set_fact:
scale_hdfs_namenodes_list: "{{ item.namenodes | unique }}"
delegate_to: localhost
run_once: true

- name: global_var | Collect all HDFS DataNodes
set_fact:
scale_hdfs_datanodes_list: "{{ item.datanodes | unique }}"
delegate_to: localhost
run_once: true

- name: global_var | Get HDFS nodes
set_fact:
scale_hdfs_nodes_list: "{{ scale_hdfs_namenodes_list + scale_hdfs_datanodes_list }}"

- name: global_var | make unique HDFS nodes
set_fact:
scale_hdfs_nodes_list: "{{ scale_hdfs_nodes_list | unique }}"

- name: check | Check if atleast one hdfs node is configured
assert:
that:
Expand Down Expand Up @@ -58,30 +89,57 @@
- name: check | verify JAVA
command: "ls {{ javahome_path }}/bin/java"
register: jvm_list
when:
when:
- ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list
- javahome_path|length > 0

- fail:
msg: "JAVA_HOME not set properly"
when:
when:
- ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list
- jvm_list.rc != 0

- name: check | Fetch hdfs extracted tar
- name: check | get hdfs rpm dir
set_fact:
hdfs_dependency_jars_dir: "hadoop-3.1.4"
hdfs_rpm_dir: "hdfs_3.1.1.x"

- name: Check and fetch gpfs.hdfs-protocol version
shell: "rpm -q gpfs.hdfs-protocol --qf %{VERSION}-%{RELEASE}"
register: gpfs_hdfs_protocol_version
- name: check | get hdfs rpm dir
set_fact:
hdfs_rpm_dir: "hdfs_3.2.2.x"
when:
- ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list
- transparency_322_enabled|bool
ignore_errors: true

- debug:
msg: "gpfs_hdfs_protocol_version: {{ gpfs_hdfs_protocol_version}}"
msg: "hdfs_rpm_dir: {{ hdfs_rpm_dir}}"

- name: Check and fetch gpfs.hdfs-protocol version
shell:
rpm -qp "{{ scale_extracted_path }}/hdfs_rpms/rhel/{{ hdfs_rpm_dir }}/gpfs.hdfs-protocol-*.x86_64.rpm"
--qf '%{VERSION}.%{RELEASE}\n' | cut -d '.' -f -4
register: gpfs_hdfs_protocol_version
delegate_to: localhost
run_once: true

- debug:
msg: "gpfs_hdfs_protocol_version: {{ gpfs_hdfs_protocol_version.stdout }}"

- name: check | Fetch hdfs extracted tar
set_fact:
hdfs_dependency_jars_dir: "/opt/hadoop/jars/hadoop-3.1.4"
when:
- transparency_322_enabled|bool == False
- gpfs_hdfs_protocol_version.rc == 0
- gpfs_hdfs_protocol_version.stdout is version('3.1.1.19', '<')

- name: check | Fetch hdfs extracted tar
set_fact:
hdfs_dependency_jars_dir: "/opt/cloudera/parcels"
when:
- transparency_322_enabled|bool == False
- gpfs_hdfs_protocol_version.rc == 0
- gpfs_hdfs_protocol_version.stdout is version('3.1.1.18', '>')

- name: Check gpfs.hdfs-protocol version for standalone installation
fail:
Expand All @@ -93,24 +151,25 @@
- ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list
- transparency_322_enabled|bool
- gpfs_hdfs_protocol_version.rc == 0
- gpfs_hdfs_protocol_version.stdout_lines[0] < '3.2.2-5'

- gpfs_hdfs_protocol_version.stdout is version('3.2.2.5', '<')
- debug:
msg: "hdfs_dependency_jars_dir: {{ hdfs_dependency_jars_dir }}"
when:
- transparency_322_enabled|bool == False

- name: check | verify dependency jars
command: "ls /opt/hadoop/jars/{{ hdfs_dependency_jars_dir }}"
command: "ls {{ hdfs_dependency_jars_dir }}"
register: dep_jars
when:
- ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list
- transparency_322_enabled|bool == False
ignore_errors: true

- fail:
msg: >
"Dependency jars not exist in /opt/hadoop/jars directory, which are essential prerequisites, For further details, "
"Dependency jars not exist in {{ hdfs_dependency_jars_dir }} directory, which are essential prerequisites, For further details, "
"please consult the documentation via the following link: https://www.ibm.com/docs/en/storage-scale-bda?topic=hdfs-setup"
when:
- ansible_fqdn in scale_hdfs_nodes_list or inventory_hostname in scale_hdfs_nodes_list
- transparency_322_enabled|bool == False
- dep_jars.rc != 0

- dep_jars.rc != 0