安装前准备

  • 在三台主机都要操作,在root用户下操作

配置hosts

  1. cat >> /etc/hosts << EOF
  2. 192.168.4.195 hadoop-node01
  3. 192.168.4.194 hadoop-node02
  4. 192.168.4.192 hadoop-node03
  5. EOF

关闭防火墙

[root@hadoop-node01 ~]# systemctl stop firewalld
[root@hadoop-node01 ~]# systemctl disable firewalld
[root@hadoop-node02 ~]# systemctl stop firewalld
[root@hadoop-node02 ~]# systemctl disable firewalld
[root@hadoop-node03 ~]# systemctl stop firewalld
[root@hadoop-node03 ~]# systemctl disable firewalld

JAVA环境部署

  • 在第一台主机操作
[root@hadoop-node01 opt]# cd /opt
[root@hadoop-node01 opt]# curl -o jdk-8u20-linux-x64.tar.gz http://file.mrlapulga.com/Jdk/jdk-8u20-linux-x64.tar.gz
[root@hadoop-node01 opt]# tar zxf jdk-8u20-linux-x64.tar.gz
[root@hadoop-node01 opt]# scp -r jdk1.8.0_20/ root@hadoop-node02:/opt/
[root@hadoop-node01 opt]# scp -r jdk1.8.0_20/ root@hadoop-node03:/opt/
  • 在三台主机都操作
cat >> /etc/profile << EOF
export JAVA_HOME=/opt/jdk1.8.0_20
export JRE_HOME=\${JAVA_HOME}/jre
export CLASSPATH=.:\${JAVA_HOME}/lib:\${JRE_HOME}/lib
export PATH=\$PATH:\${JAVA_HOME}/bin:\${JRE_HOME}/bin
EOF
[root@hadoop-node01 ~]# source /etc/profile
[root@hadoop-node01 ~]# java -version
java version "1.8.0_20"
Java(TM) SE Runtime Environment (build 1.8.0_20-b26)
Java HotSpot(TM) 64-Bit Server VM (build 25.20-b23, mixed mode)
[root@hadoop-node02 ~]# source /etc/profile
[root@hadoop-node02 ~]# java -version
java version "1.8.0_20"
Java(TM) SE Runtime Environment (build 1.8.0_20-b26)
Java HotSpot(TM) 64-Bit Server VM (build 25.20-b23, mixed mode)
[root@hadoop-node03 ~]# source /etc/profile
[root@hadoop-node03 ~]# java -version
java version "1.8.0_20"
Java(TM) SE Runtime Environment (build 1.8.0_20-b26)
Java HotSpot(TM) 64-Bit Server VM (build 25.20-b23, mixed mode)

解压并安装配置Hadoop

以下操作在第一台主机操作

  • 解压hadoop并创建相关目录
[root@hadoop-node01 ~]# cd /opt/
[root@hadoop-node01 opt]# curl -o hadoop-3.2.0.tar.gz https://mirrors.aliyun.com/apache/hadoop/common/stable/hadoop-3.2.0.tar.gz
[root@hadoop-node01 opt]# mkdir hadoop
[root@hadoop-node01 opt]# tar zxf hadoop-3.2.0.tar.gz -C hadoop
[root@hadoop-node01 opt]# cd hadoop
[root@hadoop-node01 hadoop]# mkdir -p tmp dfs dfs/data dfs/name
  • 配置core-site.xml
[root@hadoop-node01 hadoop]# cd ..
[root@hadoop-node01 opt]# vim hadoop/hadoop-3.2.0/etc/hadoop/core-site.xml
<configuration>
    <property>
        <name>fs.defaultFS</name>
        <value>hdfs://192.168.4.195:9000</value>
    </property>
    <property>
        <name>hadoop.tmp.dir</name>
        <value>file:/opt/hadoop/tmp</value>
    </property>
    <property>
        <name>io.file.buffer.size</name>
        <value>131702</value>
    </property>
</configuration>
  • 配置hdfs-site.xml
[root@hadoop-node01 opt]# vi hadoop/hadoop-3.2.0/etc/hadoop/hdfs-site.xml
<configuration>
    <property>
        <name>dfs.namenode.name.dir</name>
        <value>file:/opt/hadoop/dfs/name</value>
    </property>
    <property>
        <name>dfs.datanode.data.dir</name>
        <value>file:/opt/hadoop/dfs/data</value>
    </property>
    <property>
        <name>dfs.replication</name>
        <value>2</value>
    </property>
    <property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>192.168.4.195:9001</value>
    </property>
    <property>
    <name>dfs.webhdfs.enabled</name>
    <value>true</value>
    </property>
</configuration>
  • 配置mapred-site.xml
[root@hadoop-node01 opt]# vi hadoop/hadoop-3.2.0/etc/hadoop/mapred-site.xml
<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.address</name>
        <value>192.168.4.195:10020</value>
    </property>
    <property>
        <name>mapreduce.jobhistory.webapp.address</name>
        <value>192.168.4.195:19888</value>
    </property>
</configuration>
  • 配置yarn-site.xml
[root@hadoop-node01 opt]# vi hadoop/hadoop-3.2.0/etc/hadoop/yarn-site.xml
<configuration>
<!-- Site specific YARN configuration properties -->
    <property>
        <name>yarn.nodemanager.aux-services</name>
        <value>mapreduce_shuffle</value>
    </property>
    <property>
        <name>yarn.nodemanager.auxservices.mapreduce.shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
    </property>
    <property>
        <name>yarn.resourcemanager.scheduler.address</name>
        <value>192.168.4.195:8030</value>
    </property>
    <property>
        <name>yarn.resourcemanager.resource-tracker.address</name>
        <value>192.168.4.195:8031</value>
    </property>
    <property>
        <name>yarn.resourcemanager.address</name>
        <value>192.168.4.195:8032</value>
    </property>
    <property>
        <name>yarn.resourcemanager.admin.address</name>
        <value>192.168.4.195:8033</value>
    </property>
    <property>
        <name>yarn.resourcemanager.webapp.address</name>
        <value>192.168.4.195:8088</value>
    </property>
    <property>
        <name>yarn.nodemanager.resource.memory-mb</name>
        <value>768</value>
    </property>
</configuration>
  • 配置hadoop-env.sh、yarn-env.sh的JAVA_HOME
vi hadoop/hadoop-3.2.0/etc/hadoop/hadoop-env.sh
vi hadoop/hadoop-3.2.0/etc/hadoop/yarn-env.sh

最后一行添加:

export JAVA_HOME=/opt/jdk1.8.0_20/
  • 配置主从节点
[root@hadoop-node01 opt]# echo "hadoop-node02" >> hadoop/hadoop-3.2.0/etc/hadoop/masters
[root@hadoop-node01 opt]# echo -e "hadoop-node02\nhadoop-node03" >> hadoop/hadoop-3.2.0/etc/hadoop/slaves
  • 将hadoop目录复制到其他两个节点
[root@hadoop-node01 opt]# scp -r hadoop root@hadoop-node02:/opt/
[root@hadoop-node01 opt]# scp -r hadoop root@hadoop-node03:/opt/

以下操作在三台主机操作

  • 添加hadoop用户并赋权
[root@hadoop-node01 ~]# useradd hadoop
[root@hadoop-node01 ~]# chown -R hadoop.hadoop /opt/hadoop
[root@hadoop-node01 ~]# passwd hadoop
[root@hadoop-node01 ~]# su hadoop
[hadoop@hadoop-node01 root]$ cd
[root@hadoop-node02 ~]# useradd hadoop
[root@hadoop-node02 ~]# chown -R hadoop.hadoop /opt/hadoop
[root@hadoop-node02 ~]# passwd hadoop
[root@hadoop-node02 ~]# su hadoop
[hadoop@hadoop-node02 root]$ cd
[root@hadoop-node03 ~]# useradd hadoop
[root@hadoop-node03 ~]# chown -R hadoop.hadoop /opt/hadoop
[root@hadoop-node03 ~]# passwd hadoop
[root@hadoop-node03 ~]# su hadoop
[hadoop@hadoop-node03 root]$ cd
  • 主机免密登录配置
[hadoop@hadoop-node01 ~]$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
[hadoop@hadoop-node01 ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@hadoop-node01 ~]$ chmod 0600 ~/.ssh/authorized_keys
[hadoop@hadoop-node02 ~]$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
[hadoop@hadoop-node02 ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@hadoop-node02 ~]$ chmod 0600 ~/.ssh/authorized_keys
[hadoop@hadoop-node03 ~]$ ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
[hadoop@hadoop-node03 ~]$ cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
[hadoop@hadoop-node03 ~]$ chmod 0600 ~/.ssh/authorized_keys
  • 互相免密认证
[hadoop@hadoop-node01 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node02
[hadoop@hadoop-node01 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node03
[hadoop@hadoop-node02 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node01
[hadoop@hadoop-node02 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node03
[hadoop@hadoop-node03 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node01
[hadoop@hadoop-node03 ~]$ ssh-copy-id -i ~/.ssh/id_rsa.pub hadoop@hadoop-node02

以下操作在第一台主机操作

  • 在第一个节点启动服务
[hadoop@hadoop-node01 ~]$ cd /opt/hadoop/hadoop-3.2.0/
[root@hadoop-node01 hadoop-3.2.0]# bin/hdfs namenode -format
    # 初始化
[hadoop@hadoop-node01 hadoop-3.2.0]$ sbin/start-dfs.sh
Starting namenodes on [hadoop-node01]
Starting datanodes
Starting secondary namenodes [hadoop-node01]
    # 启动hdfs
[hadoop@hadoop-node01 hadoop-3.2.0]$ sbin/start-yarn.sh
Starting resourcemanager
Starting nodemanagers
    # 启动yarn