1.NN重复初始化后使用的clean.sh
#!/bin/bash
for host in hadoop102 hadoop103 hadoop104
do
#删除内容
ssh $host rm -rf $HADOOP_HOME/data $HADOOP_HOME/logs
ssh $host sudo rm -rf /tmp/*
done
2.查看所有节点的java进程jpsall.sh
#!/bin/bash
for host in hadoop102 hadoop103 hadoop104
do
echo "=========================$host============================"
ssh $host jps
done
3.集群启动,停止mycluster.sh
#!/bin/bash
if [ $# -ne 1 ]
then
echo "args number error!!!"
exit
fi
case $1 in
"start")
ssh hadoop102 $HADOOP_HOME/sbin/start-dfs.sh
ssh hadoop103 $HADOOP_HOME/sbin/start-yarn.sh
;;
"stop")
ssh hadoop102 $HADOOP_HOME/sbin/stop-dfs.sh
ssh hadoop103 $HADOOP_HOME/sbin/stop-yarn.sh
;;
*)
echo "args info error!!!"
;;
esac
可考虑添加历史服务器的启动:
echo “ ———————- 启动 historyserver ———————-“
ssh hadoop102 “/opt/module/hadoop-3.1.3/bin/mapred —daemon start historyserver”
;;
echo “ ———————- 关闭 historyserver ———————-“
ssh hadoop102 “/opt/module/hadoop-3.1.3/bin/mapred —daemon stop historyserver”
4.集群分发脚本xsync
#!/bin/bash
#1. 判断参数个数
if [ $# -lt 1 ]
then
echo Not Enough Arguement!
exit;
fi
#2. 遍历集群所有机器
for host in hadoop102 hadoop103 hadoop104
do
echo ==================== $host ====================
#3. 遍历所有目录,挨个发送
for file in $@
do
#4 判断文件是否存在
if [ -e $file ]
then
#5. 获取父目录
pdir=$(cd -P $(dirname $file); pwd)
#6. 获取当前文件的名称
fname=$(basename $file)
ssh $host "mkdir -p $pdir"
rsync -av $pdir/$fname $host:$pdir
else
echo $file does not exists!
fi
done
done
for((host=103; host<105; host++)); do
echo —————————- hadoop$host ———————
rsync -rvl $pdir/$fname $user@hadoop$host:$pdir
done
另一种脚本编写思路,用循环的方式依次登录每个节点。
5.zookeeper群起脚本
#!/bin/bash
if [ $# -ne 1 ]
then
echo "args number error!!!"
exit
fi
var=""
case $1 in
"start")
var="start"
;;
"stop")
var="stop"
;;
"status")
var="status"
;;
*)
echo "args info error!!!"
exit
;;
esac
for host in hadoop102 hadoop103 hadoop104
do
echo "==========================$host======================"
ssh $host /opt/module/zookeeper-3.5.7/bin/zkServer.sh $var
done