单机 Zookeeper + Hadoop + Hbase

1、Java环境

  1. ## 安装 jdk
  2. yum install -y java-1.8.0-openjdk java-1.8.0-openjdk-devel
  3. ## 安装目录 --> /usr/lib/jvm
  4. ## 配置java环境变量 /etc/profile
  5. export JAVA_HOME=/usr/lib/jvm/jre
  6. export JRE_HOME=/usr/lib/jvm/jre
  7. export CLASSPATH=.:${JAVA_HOME}/lib:${JRE_HOME}/lib
  8. export PATH=${JAVA_HOME}/bin:$PATH
  9. ## 使配置生效
  10. source /etc/profile

2、安装 Zookeeper

下载: http://archive.apache.org/dist/zookeeper/zookeeper-3.5.9/apache-zookeeper-3.5.9-bin.tar.gz

http://archive.apache.org/dist/zookeeper/zookeeper-3.5.9/apache-zookeeper-3.5.9.tar.gz

  • 解压
  1. tar -xzvf apache-zookeeper-3.5.9-bin.tar.gz
  • 配置环境变量
  1. vi /etc/profile
  2. export ZOOKEEPER_HOME=/var/local/hbase/apache-zookeeper-3.5.9-bin
  3. export PATH=$ZOOKEEPER_HOME/bin:$PATH
  4. ## 使配置生效
  5. source /etc/profile
  • 复制配置文件
  1. cp /var/local/hbase/apache-zookeeper-3.5.9-bin/conf/zoo_sample.cfg /var/local/hbase/apache-zookeeper-3.5.9-bin/conf/zoo.cfg
  • 创建目录
  1. mkdir /var/local/hbase/apache-zookeeper-3.5.9-bin/run
  2. mkdir /var/local/hbase/apache-zookeeper-3.5.9-bin/run/data
  3. mkdir /var/local/hbase/apache-zookeeper-3.5.9-bin/run/log
  • 修改配置文件
  1. vi /var/local/hbase/apache-zookeeper-3.5.9-bin/conf/zoo.cfg
  2. ## 修改如下两处(没有就增加):
  3. dataDir=/var/local/hbase/apache-zookeeper-3.5.9-bin/run/data
  4. dataLogDir=/var/local/hbase/apache-zookeeper-3.5.9-bin/run/log
  • 解启动zookeeper
  1. /var/local/hbase/apache-zookeeper-3.5.9-bin/bin/zkServer.sh start
  • zookeeper 使用
  1. ## 链接 zookeeper
  2. /var/local/hbase/apache-zookeeper-3.5.9-bin/bin/zkCli.sh
  3. ## 使用 ls 命令来查看当前 ZooKeeper 中所包含的内容
  4. ls

3、安装 Hadoop

下载:https://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-3.2.2/hadoop-3.2.2.tar.gz

  • 解压
  1. tar -xzvf hadoop-3.2.2.tar.gz
  • 配置环境变量
  1. vi /etc/profile
  2. export HADOOP_HOME=/var/local/hbase/hadoop-3.2.2
  3. export PATH=${HADOOP_HOME}/bin:$PATH
  4. ## 使配置生效
  5. source /etc/profile
  • 修改hadoop配置文件
  1. vim /var/local/hbase/hadoop-3.2.2/etc/hadoop/hadoop-env.sh
  2. ## 设置java_home
  3. JAVA_HOME=/usr/lib/jvm/jre
  • 创建目录:
  1. mkdir /var/local/hbase/hadoop-3.2.2/run
  2. mkdir /var/local/hbase/hadoop-3.2.2/run/hadoop
  • 修改hosts文件
  1. vi /etc/hosts
  2. ## 添加
  3. 192.168.31.131 hadoop1 hadoop1
  • 修改配置文件 core-site.xml
  1. vi /var/local/hbase/hadoop-3.2.2/etc/hadoop/core-site.xml
  1. <configuration>
  2. <property>
  3. <name>fs.defaultFS</name>
  4. <value>hdfs://hadoop1:8020</value>
  5. </property>
  6. <property>
  7. <!--指定 hadoop 存储临时文件的目录-->
  8. <name>hadoop.tmp.dir</name>
  9. <value>/var/local/hbase/hadoop-3.2.2/run/hadoop</value>
  10. </property>
  11. <property>
  12. <name>hadoop.native.lib</name>
  13. <value>false</value>
  14. <description></description>
  15. </property>
  16. </configuration>
  • 修改 hdfs-site.xml 文件
  1. vi /var/local/hbase/hadoop-3.2.2/etc/hadoop/hdfs-site.xml
  1. <configuration>
  2. <property>
  3. <name>dfs.replication</name>
  4. <value>1</value>
  5. </property>
  6. <property>
  7. <name>dfs.secondary.http.address</name>
  8. <value>hadoop1:50070</value>
  9. </property>
  10. </configuration>
  • 修改文件:mapred-site.xml
  1. vi /var/local/hbase/hadoop-3.2.2/etc/hadoop/mapred-site.xml
  1. <configuration>
  2. <property>
  3. <name>mapreduce.framework.name</name>
  4. <value>yarn</value>
  5. </property>
  6. </configuration>
  • 修改文件:yarn-site.xml
  1. vi /var/local/hbase/hadoop-3.2.2/etc/hadoop/yarn-site.xml
  1. <configuration>
  2. <property>
  3. <!--配置 NodeManager 上运行的附属服务。需要配置成 mapreduce_shuffle 后才可以在 Yarn 上运行 MapReduce 程序。-->
  4. <name>yarn.nodemanager.aux-services</name>
  5. <value>mapreduce_shuffle</value>
  6. </property>
  7. </configuration>
  • 主机访问设置
  1. ## 在 /var/local/hbase 目录下
  2. ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
  3. cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
  4. chmod 0600 ~/.ssh/authorized_keys
  • 格式化 hdfs
  1. /var/local/hbase/hadoop-3.2.2/bin/hdfs namenode -format
  • 修改 hdfs 启动脚本:
  1. vi /var/local/hbase/hadoop-3.2.2/sbin/start-dfs.sh
  2. ## 顶部增加
  3. HDFS_DATANODE_USER=root
  4. HADOOP_SECURE_DN_USER=hdfs
  5. HDFS_NAMENODE_USER=root
  6. HDFS_SECONDARYNAMENODE_USER=root
  • 修改 hdfs 停止脚本:
  1. vi /var/local/hbase/hadoop-3.2.2/sbin/stop-dfs.sh
  2. ## 顶部增加
  3. HDFS_DATANODE_USER=root
  4. HADOOP_SECURE_DN_USER=hdfs
  5. HDFS_NAMENODE_USER=root
  6. HDFS_SECONDARYNAMENODE_USER=root
  • 修改yarn启动脚本:
  1. vi /var/local/hbase/hadoop-3.2.2/sbin/start-yarn.sh
  2. ## 顶部增加
  3. YARN_RESOURCEMANAGER_USER=root
  4. HADOOP_SECURE_DN_USER=yarn
  5. YARN_NODEMANAGER_USER=root
  • 修改yarn停止脚本:
  1. vi /var/local/hbase/hadoop-3.2.2/sbin/stop-yarn.sh
  2. ## 顶部增加
  3. YARN_RESOURCEMANAGER_USER=root
  4. HADOOP_SECURE_DN_USER=yarn
  5. YARN_NODEMANAGER_USER=root
  • 启动 hdfs
  1. export JAVA_HOME=/usr/lib/jvm/jre
  2. ## 启动
  3. /var/local/hbase/hadoop-3.2.2/sbin/start-dfs.sh
  4. ## 停止
  5. /var/local/hbase/hadoop-3.2.2/sbin/stop-dfs.sh
  • 浏览器访问验证
  1. http://192.168.1.132:50070/

image-20210814170700941.png

  • 启动 yarn
  1. ## 启动
  2. /var/local/hbase/hadoop-3.2.2/sbin/start-yarn.sh
  3. ## 停止
  4. /var/local/hbase/hadoop-3.2.2/sbin/stop-yarn.sh
  • 浏览器访问验证
  1. http://192.168.1.132:8088/

image-20210814170845809.png

4、安装 Hbase

下载: https://www.apache.org/dyn/closer.lua/hbase/2.3.6/hbase-2.3.6-bin.tar.gz

  • 解压
  1. tar -xzvf hbase-2.3.6-bin.tar.gz
  • 修改环境变量
  1. vi /etc/profile
  2. export HBASE_HOME=/var/local/hbase/hbase-2.3.6
  3. export PATH=$HBASE_HOME/bin:$PATH
  4. ## 使配置生效
  5. source /etc/profile
  • 修改 hbase 配置文件
  1. vi /var/local/hbase/hbase-2.3.6/conf/hbase-env.sh
  2. ### 修改两处
  3. export JAVA_HOME=/usr/lib/jvm/jre ## 28 行
  4. export HBASE_MANAGES_ZK=flase ## 126 行
  • 修改 hbase-site
  1. vi /var/local/hbase/hbase-2.3.6/conf/hbase-site.xml
  1. <configuration>
  2. <!--指定 HBase 以分布式模式运行-->
  3. <property>
  4. <name>hbase.cluster.distributed</name>
  5. <value>true</value>
  6. </property>
  7. <!--指定 HBase 数据存储路径为 HDFS 上的 hbase 目录,hbase 目录不需要预先创建,程序会自动创建-->
  8. <property>
  9. <name>hbase.rootdir</name>
  10. <value>hdfs://hadoop1:8020/hbase</value>
  11. </property>
  12. <!--指定 zookeeper 数据的存储位置-->
  13. <property>
  14. <name>hbase.zookeeper.property.dataDir</name>
  15. <value>/var/local/hbase/apache-zookeeper-3.5.9-bin/run/data</value>
  16. </property>
  17. <!--指定 Hbase Web UI 默认端口-->
  18. <property>
  19. <name>hbase.master.info.port</name>
  20. <value>16010</value>
  21. </property>
  22. <!-- regionserver 信息 web界面接口 -->
  23. <property>
  24. <name>hbase.regionserver.info.port</name>
  25. <value>16030</value>
  26. </property>
  27. <!--指定外置zookeeper-->
  28. <property>
  29. <name>hbase.zookeeper.quorum</name>
  30. <value>hadoop1:2181</value>
  31. </property>
  32. <!--解决 hostname 为 localhost 问题-->
  33. <property>
  34. <name>hbase.master.ipc.address</name>
  35. <value>0.0.0.0</value>
  36. </property>
  37. <!--解决 hostname 为 localhost 问题-->
  38. <property>
  39. <name>hbase.regionserver.ipc.address</name>
  40. <value>0.0.0.0</value>
  41. </property>
  42. </configuration>
  • 修改 regionservers 文件
  1. vim /var/local/hbase/hbase-2.3.6/conf/regionservers
  2. ## 将 localhost 修改为 hadoop1
  • 启动 hbase
  1. ## 启动
  2. /var/local/hbase/hbase-2.3.6/bin/start-hbase.sh
  3. ## 停止
  4. /var/local/hbase/hbase-2.3.6/bin/stop-hbase.sh
  5. /home/hbase/hbase-2.3.6/bin/start-hbase.sh
  6. /home/hbase/hbase-2.3.6/bin/stop-hbase.sh
  • 浏览器访问
  1. http://192.168.1.132:16010
  1. /var/local/hbase/apache-zookeeper-3.5.9-bin/bin/zkServer.sh start
  2. /var/local/hbase/hadoop-3.2.2/sbin/start-dfs.sh
  3. /var/local/hbase/hadoop-3.2.2/sbin/start-yarn.sh
  4. /var/local/hbase/hbase-2.3.6/bin/start-hbase.sh