hadoop

HOME

  1. #查看主机名
  2. hostname
  3. #修改主机名
  4. sudo hostnamectl set-hostname linux01
  5. # 关闭防火墙,关闭防火墙开机自启
  6. systemctl stop firewalld
  7. systemctl disable firewalld.service
  8. #在/etc/profile.d文件下新建文件my_home.sh,把下面的写入文件中
  9. export HADOOP_HOME=/opt/hadoop-3.1.3
  10. export PATH=$PATH:$HADOOP_HOME/bin
  11. export PATH=$PATH:$HADOOP_HOME/sbin
  12. export JAVA_LIBRARY_PATH=$PATH:$HADOOP_HOME/lib/native
  13. export JAVA_HOME=/opt/jdk1.8.0_321
  14. export PATH=$PATH:$JAVA_HOME/bin
  15. export SCALA_HOME=/opt/scala-2.12.16
  16. export PATH=$PATH:$SCALA_HOME/bin
  17. export SBT_HOME=/opt/sbt
  18. export PATH=$PATH:$SBT_HOME/bin
  19. export SPARK_HOME=/opt/spark-yarn
  20. export H=$PATH:$SPARK_HOME/bin
  21. source /etc/profile
  1. #查看主机名
  2. hostname
  3. #修改主机名
  4. sudo hostnamectl set-hostname linux01
  5. cat /etc/sysconfig/network-scripts/ifcfg-ens33
  6. cat > /etc/sysconfig/network-scripts/ifcfg-ens33
  7. DEVICE=ens33
  8. TYPE=Ethernet
  9. ONBOOT=yes
  10. BOOTPROTO=static
  11. NAME="ens33"
  12. IPADDR=192.168.10.100
  13. PREFIX=24
  14. GATEWAY=192.168.10.2
  15. DNS1=192.168.10.2
  16. # 重启
  17. reboot
  18. cat >/etc/hosts
  19. 192.168.10.100 linux00
  20. 192.168.10.101 linux01
  21. 192.168.10.102 linux02
  22. 192.168.10.103 linux03
  23. 192.168.10.104 linux04
  24. 192.168.10.105 linux05
  25. 192.168.10.106 linux06
  26. 192.168.10.107 linux07
  27. 192.168.10.108 linux08
  28. 192.168.10.109 linux09
  1. ifconfig ens33 up
  2. systemctl stop NetworkManager
  3. ifup ens33
  4. systemctl restart network.service
  5. ifconfig
  1. #!/bin/bash
  2. echo "=========================================="
  3. java -version
  4. echo "=========================================="
  5. hadoop version
  6. echo "=========================================="
  7. scala -version
  8. echo "=========================================="
  9. sbt --version
  10. echo "=========================================="
  11. $SPARK_HOME/bin/spark-shell

免密登录

  1. ssh-keygen -t rsa
  2. ssh-copy-id linux02
  3. ssh-copy-id linux03
  4. ssh-copy-id linux04

分发脚本

chmod 777 /bin/sync

  1. #!/bin/bash
  2. #1. 判断参数个数
  3. if [ $# -lt 1 ]
  4. then
  5. echo Not Enough Arguement!
  6. exit;
  7. fi
  8. #2. 遍历集群所有机器
  9. for host in linux02 linux03 linux04
  10. do
  11. echo ==================== $host ====================
  12. #3. 遍历所有目录,挨个发送
  13. for file in $@
  14. do
  15. #4. 判断文件是否存在
  16. if [ -e $file ]
  17. then
  18. #5. 获取父目录
  19. pdir=$(cd -P $(dirname $file); pwd)
  20. #6. 获取当前文件的名称
  21. fname=$(basename $file)
  22. ssh $host "mkdir -p $pdir"
  23. rsync -av $pdir/$fname $host:$pdir
  24. else
  25. echo $file does not exists!
  26. fi
  27. done
  28. done

集群部署规划

NameNode和SecondaryNameNode不要安装在同一台服务器 ResourceManager也很消耗内存,不要和NameNode、SecondaryNameNode配置在同一台机器上。

linux02 linux03 linux04
HDFS
NameNode
DataNode
JobHistoryServer


DataNode
SecondaryNameNode
DataNode
YARN

NodeManager
ResourceManager
NodeManager


NodeManager

conf

/opt/hadoop-3.1.3/etc/hadoop

  1. <configuration>
  2. <!-- 指定 NameNode 的地址 -->
  3. <property>
  4. <name>fs.defaultFS</name>
  5. <value>hdfs://linux02:8020</value>
  6. </property>
  7. <!-- 指定 hadoop 数据的存储目录 -->
  8. <property>
  9. <name>hadoop.tmp.dir</name>
  10. <value>/opt/hadoop-3.1.3/data</value>
  11. </property>
  12. <!-- 配置 HDFS 网页登录使用的静态用户为 root -->
  13. <property>
  14. <name>hadoop.http.staticuser.user</name>
  15. <value>root</value>
  16. </property>
  17. </configuration>

<configuration>
<!-- nn web 端访问地址-->
<property>
 <name>dfs.namenode.http-address</name>
 <value>linux02:9870</value>
 </property>
<!-- 2nn web 端访问地址-->
 <property>
 <name>dfs.namenode.secondary.http-address</name>
 <value>linux04:9868</value>
 </property>
</configuration>
<configuration>
 <!-- 指定 MR 走 shuffle -->
 <property>
 <name>yarn.nodemanager.aux-services</name>
 <value>mapreduce_shuffle</value>
 </property>
 <!-- 指定 ResourceManager 的地址-->
 <property>
 <name>yarn.resourcemanager.hostname</name>
 <value>linux03</value>
 </property>
 <!-- 环境变量的继承 -->
 <property>
 <name>yarn.nodemanager.env-whitelist</name>

<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CO
NF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAP
RED_HOME</value>
 </property>


 <!-- 开启日志聚集功能 -->
<property>
    <name>yarn.log-aggregation-enable</name>
    <value>true</value>
</property>
<!-- 设置日志聚集服务器地址 -->
<property>  
    <name>yarn.log.server.url</name>  
    <value>http://linux02:19888/jobhistory/logs</value>
</property>
<!-- 设置日志保留时间为7天 -->
<property>
    <name>yarn.log-aggregation.retain-seconds</name>
    <value>604800</value>
</property>

</configuration>
<configuration>
<!-- 指定 MapReduce 程序运行在 Yarn 上 -->
 <property>
   <name>mapreduce.framework.name</name>
   <value>yarn</value>
 </property>

<!-- 历史服务器端地址 -->
<property>
   <name>mapreduce.jobhistory.address</name>
   <value>linux02:10020</value>
</property>  

<!-- 历史服务器web端地址 -->
<property>
    <name>mapreduce.jobhistory.webapp.address</name>
    <value>linux02:19888</value>
</property>


   <property>
        <name>yarn.app.mapreduce.am.env</name>
        <value>HADOOP_MAPRED_HOME=/opt/hadoop-3.1.3</value>
    </property>
    <property>
        <name>mapreduce.map.env</name>
        <value>HADOOP_MAPRED_HOME=/opt/hadoop-3.1.3</value>
    </property>
    <property>
        <name>mapreduce.reduce.env</name>
        <value>HADOOP_MAPRED_HOME=/opt/hadoop-3.1.3</value>
    </property>


</configuration>
linux02
linux03
linux04

初始化

hdfs namenode -format

启动脚本

#!/bin/bash

if [ $# -lt 1 ]
then
    echo "No Args Input..."
    exit ;
fi

case $1 in
"start")
        echo " =================== 启动 hadoop集群 ==================="

        echo " --------------- 启动 hdfs ---------------"
        ssh linux02 "/opt/hadoop-3.1.3/sbin/start-dfs.sh"
        echo " --------------- 启动 yarn ---------------"
        ssh linux03 "/opt/hadoop-3.1.3/sbin/start-yarn.sh"
        echo " --------------- 启动 historyserver ---------------"
        ssh linux02 "/opt/hadoop-3.1.3/bin/mapred --daemon start historyserver"
;;
"stop")
        echo " =================== 关闭 hadoop集群 ==================="

        echo " --------------- 关闭 historyserver ---------------"
        ssh linux02 "/opt/hadoop-3.1.3/bin/mapred --daemon stop historyserver"
        echo " --------------- 关闭 yarn ---------------"
        ssh linux03 "/opt/hadoop-3.1.3/sbin/stop-yarn.sh"
        echo " --------------- 关闭 hdfs ---------------"
        ssh linux02 "/opt/hadoop-3.1.3/sbin/stop-dfs.sh"
;;
*)
    echo "Input Args Error..."
;;
esac

查看所有进程脚本

#!/bin/bash

for host in linux02 linux03 linux04
do
        echo =============== $host ===============
        ssh $host jps 
done

格式化Hadoop

先停进程
在删除data logs
再格式化 hdfs namenode -format
在启动集群

test hadoop

# 创建文件夹
hadoop fs -mkdir /myinput

hadoop fs -put wcinput/word.txt /myinput

hadoop fs -put 文件路径 hdfs中的路径

hadoop fs -put jdk-8u301-linux-x64.rpm /myinput

hadoop jar /opt/hadoop-3.1.3/share/hadoop/mapreduce/hadoop-mapreduce-examples-3.1.3.jar wordcount /myinput /output1

spark

local env

sc.textFile("data/word.txt").flatMap(_.split(" ")).map((_,1)).reduceByKey(_+_).collect


bin/spark-submit \
--class org.apache.spark.examples.SparkPi \
--master local[*] \
./examples/jars/spark-examples_2.12-3.0.0.jar \
10

yarn env

./bin/spark-submit \
--class org.apache.spark.examples.SparkPi \
--master yarn \
--deploy-mode client \
./examples/jars/spark-examples_2.12-3.0.0.jar 10

# 启动Spark 集群
sbin/start-all.sh
# 启动Spark 历史服务器
sbin/start-history-server.sh
$SPARK_HOME/bin/spark-submit \
--class org.apache.spark.examples.SparkPi \
--master yarn \
--properties-file /home/job1/conf.conf \
$SPARK_HOME/examples/jars/spark-examples_2.12-3.0.0.jar 10