forked from LLNL/magpie
-
Notifications
You must be signed in to change notification settings - Fork 0
/
magpie-run-hadoop-upgradehdfs
executable file
·77 lines (64 loc) · 2.47 KB
/
magpie-run-hadoop-upgradehdfs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
#!/bin/bash
#############################################################################
# Copyright (C) 2013 Lawrence Livermore National Security, LLC.
# Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
# Written by Albert Chu <[email protected]>
# LLNL-CODE-644248
#
# This file is part of Magpie, scripts for running Hadoop on
# traditional HPC systems. For details, see <URL>.
#
# Magpie is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# Magpie is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Magpie. If not, see <http://www.gnu.org/licenses/>.
#############################################################################
# This script is the core upgradehdfs script. For the most part, it
# shouldn't be editted. See job submission files for configuration
# details.
# XXX does this work w/ federation?
source ${MAGPIE_SCRIPTS_HOME}/magpie-common-exports
source ${MAGPIE_SCRIPTS_HOME}/magpie-common-functions
cd ${HADOOP_HOME}
# to get hadoopnoderank
Magpie_am_I_a_hadoop_node
# to get hadooptmpdir
Magpie_calculate_hadoop_filesystem_paths ${hadoopnoderank}
currentnamenodepath="${hadooptmpdir}/dfs/name/current"
previousnamenodepath="${hadooptmpdir}/dfs/name/previous"
# The fact we are in this script means we are out of safe mode.
#
# So we need to finalize the upgrade and wait for the previous name node data to be deleted.
# Wait for the previous namenode data path to be created if it hasn't been created yet
while true
do
if [ -d ${previousnamenodepath} ]
then
break;
fi
echo "Previous namenode directory ${previousnamenodepath} not seen, sleeping 60 seconds waiting for it."
sleep 60
done
command="${hadoopcmdprefix}/hdfs dfsadmin -finalizeUpgrade"
echo "Running $command" >&2
$command
echo "Sleeping 60 seconds to wait for finalizeUpgrade to complete"
sleep 60
while true
do
if [ ! -d ${previousnamenodepath} ]
then
break;
fi
echo "Previous namenode directory ${previousnamenodepath} still not deleted, sleeping 60 seconds waiting for it."
sleep 60
done
exit 0