[安装模式] : 单机版 : 伪分布式 : 分布式[下载地址] : http://hbase.apache.org/downloads.html : http://192.168.10.11:16010/master-status
安装配置 - 单机版
[配置环境]
# 00 主机名配置
: vim /etc/hosts -> 192.168.10.10 local.hbase.xknower.com
: vim /etc/sysconfig/network
# 01 创建安装目录
: mkdir /opt/hadoop/
: 02 解压安装
: tar -zxvf /opt/hadoop/hbase-2.2.4-bin.tar.gz -C /opt/hadoop/
: 03 配置环境
// vim /etc/profile (source /etc/profile)
export HBASE_HOME=/opt/hadoop/hbase-2.2.4
export PATH=$HBASE_HOME/bin:$PATH
[]
# JAVA_HOME 参数配
: vim ${HBASE_HOME}/conf/hbase-env.sh
> export JAVA_HOME=/opt/hadoop/jdk1.8.0_211
: vim ${HBASE_HOME}/conf/hbase-site.xml
> mkdir -p /opt/hadoop/data/myhbase
> mkdir -p /opt/hadoop/data/myhbase/tmp
<!-- hbase.rootdir -->
<!-- hbase.rootdir 单机版一定要写绝对文件路径(手动创建), 而不是写IP端口 -->
<property>
<name>hbase.rootdir</name>
<value>file:///opt/hadoop/data/myhbase</value>
<description>The directory shared byregion servers.</description>
</property>
<!-- fs.defaultFS -->
<!-- fs.defaultFS 单机版一定要写绝对文件路径(手动创建), 而不是写IP端口 -->
<property>
<name>fs.defaultFS</name>
<value>file:///opt/hadoop/data/myhbase</value>
</property>
<!-- hbase的端口 -->
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2182</value>
<description>Property from ZooKeeper'sconfig zoo.cfg. The port at which the clients will connect.
</description>
</property>
<!-- 超时时间 -->
<property>
<name>zookeeper.session.timeout</name>
<value>120000</value>
</property>
<!-- zookeeper 集群配置, 如果是集群, 则添加其它的主机地址 -->
<property>
<name>hbase.zookeeper.quorum</name>
<value>192.168.10.10</value>
</property>
<!-- hbase.tmp.dir -->
<property>
<name>hbase.tmp.dir</name>
<value>/opt/hadoop/data/myhbase/tmp</value>
</property>
<!-- hbase.cluster.distributed -->
<property>
<name>hbase.cluster.distributed</name>
<value>flase</value>
</property>
<!-- hbase.zookeeper.property.dataDir -->
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/opt/hadoop/data/myhbase</value>
</property>
<!-- zookeeper.znode.parent > /hbase/master -->
<property>
<name>zookeeper.znode.parent</name>
<value>/hbase</value>
</property>
<!-- 上面配置出现异常, 只配置下面就可以了 -->
<property>
<name>hbase.rootdir</name>
<value>file:///opt/hadoop/data/myhbase</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/opt/hadoop/data/myhbase</value>
</property>
<property>
<name>hbase.unsafe.stream.capability.enforce</name>
<value>false</value>
</property>
<!-- 开启二级索引 -->
<property>
<name>hbase.regionserver.wal.codec</name>
<value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
</property>
[启动]
: start-hbase.sh
[执行]
: hbase shell
: list
# 创建表 hbase_test , 两个列族CF1和CF2
> create 'hbase_test', {NAME=>'cf1'}, {NAME=>'cf2'}
# 添加数据
> put 'hbase_test', '001','cf1:name','Tom'
> scan 'hbase_test'
> get 'hbase_test','001'
[]
# HBase WEB 管理页面
: http://192.168.10.10:16010/master-status
[root@localhost conf]# start-hbase.sh
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/hadoop/hadoop-2.10.0/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop/hbase-2.2.4/lib/client-facing-thirdparty/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/hadoop/hadoop-2.10.0/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop/hbase-2.2.4/lib/client-facing-thirdparty/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
The authenticity of host '192.168.10.10 (192.168.10.10)' can't be established.
ECDSA key fingerprint is SHA256:ygYQ2NC+QmtkASSzSgI1QlTDIeA+pbQ6O7J4B6uxFfo.
ECDSA key fingerprint is MD5:bb:c5:e0:27:9c:fc:3d:3c:03:5e:6c:a6:48:48:ca:19.
Are you sure you want to continue connecting (yes/no)? yes
192.168.10.10: Warning: Permanently added '192.168.10.10' (ECDSA) to the list of known hosts.
root@192.168.10.10's password:
192.168.10.10: running zookeeper, logging to /opt/hadoop/hbase-2.2.4/bin/../logs/hbase-root-zookeeper-local.hadoop.xknower.com.out
running master, logging to /opt/hadoop/hbase-2.2.4/logs/hbase-root-master-local.hadoop.xknower.com.out
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/hadoop/hadoop-2.10.0/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop/hbase-2.2.4/lib/client-facing-thirdparty/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
: running regionserver, logging to /opt/hadoop/hbase-2.2.4/logs/hbase-root-regionserver-local.hadoop.xknower.com.out
: SLF4J: Class path contains multiple SLF4J bindings.
: SLF4J: Found binding in [jar:file:/opt/hadoop/hadoop-2.10.0/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
: SLF4J: Found binding in [jar:file:/opt/hadoop/hbase-2.2.4/lib/client-facing-thirdparty/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
: SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
: SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
[root@localhost conf]# hbase shell
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/opt/hadoop/hadoop-2.10.0/share/hadoop/common/lib/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/opt/hadoop/hbase-2.2.4/lib/client-facing-thirdparty/slf4j-log4j12-1.7.25.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
SLF4J: Actual binding is of type [org.slf4j.impl.Log4jLoggerFactory]
HBase Shell
Use "help" to get list of supported commands.
Use "exit" to quit this interactive shell.
For Reference, please visit: http://hbase.apache.org/2.0/book.html#shell
Version 2.2.4, r67779d1a325a4f78a468af3339e73bf075888bac, 2020年 03月 11日 星期三 12:57:39 CST
Took 0.0022 seconds
hbase(main):001:0>
[Zookeeper]
: http://mirrors.shu.edu.cn/apache/zookeeper/
: zookeeper-3.4.14.tar.gz
[基本配置]
: mkdir data && mkdir logs
# 配置
: zoo.cfg
tickTime=2000
dataDir=/app/zookeeper-3.4.14/data
dataLogDir=/app/zookeeper-3.4.14/logs
clientPort=2181
[启动命令]
./zkServer.sh start
./zkServer.sh stop
./zkServer.sh restart
./zkServer.sh status
[伪分布式配置]
# 配置 zoo.cfg
tickTime=2000
dataDir=/app/zookeepers/zookeeper1/data
dataLogDir=/app/zookeepers/zookeeper1/logs
clientPort=2181
initLimit=5
syncLimit=2
server.1=127.0.0.1:2888:3888
server.2=127.0.0.1:2889:4888
server.3=127.0.0.1:2890:5888
# 拷贝多个目录, Zookeeper 并配置
tickTime=2000
dataDir=/app/zookeepers/zookeeper1/data
dataLogDir=/app/zookeepers/zookeeper1/logs
clientPort=2181
initLimit=5
syncLimit=2
server.1=127.0.0.1:2888:3888
server.2=127.0.0.1:2889:4888
server.3=127.0.0.1:2890:5888
-> echo 1 > data/myid
tickTime=2000
dataDir=/app/zookeepers/zookeeper2/data
dataLogDir=/app/zookeepers/zookeeper2/logs
clientPort=3181
initLimit=5
syncLimit=2
server.1=127.0.0.1:2888:3888
server.2=127.0.0.1:2889:4888
server.3=127.0.0.1:2890:5888
echo 2 > data/myid
tickTime=2000
dataDir=/app/zookeepers/zookeeper3/data
dataLogDir=/app/zookeepers/zookeeper3/logs
clientPort=4181
initLimit=5
syncLimit=2
server.1=127.0.0.1:2888:3888
server.2=127.0.0.1:2889:4888
server.3=127.0.0.1:2890:5888
-> echo 3 > data/myid
[hadoop]
: http://hadoop.apache.org/
[]
: ssh-keygen -t rsa -P ''
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
chmod 600 ~/.ssh/authorized_keys
vim /etc/ssh/sshd_config
RSAAuthentication yes # 启用 RSA 认证
PubkeyAuthentication yes # 启用公钥私钥配对认证方式
AuthorizedKeysFile %h/.ssh/authorized_keys # 公钥文件路径
: service ssh restart
[]
: mkdir -p /opt/hadoop/tmp && mkdir /opt/hadoop/hdfs
: mkdir -p /opt/hadoop/hdfs/data && mkdir /opt/hadoop/hdfs/name
# vim /etc/profiles
HADOOP_HOME=/opt/hadoop-2.10.0
export PATH=$PATH:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
[]
hadoop-env.sh
yarn-env.sh
core-site.xml
hdfs-site.xml
mapred-site.xml
yarn-site.xml
[hadoop-env.sh]
# The java implementation to use.
#export JAVA_HOME=${JAVA_HOME}
export JAVA_HOME=/opt/jdk1.8.0_211
[yarn-env.sh]
: export JAVA_HOME=/app/jdk1.8.0_171
[core-site.xml]
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
<description>HDFS的URI,文件系统://namenode标识:端口号</description>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/tmp</value>
<description>namenode上本地的hadoop临时文件夹</description>
</property>
</configuration>
[hdfs-site.xml]
<configuration>
<property>
<name>dfs.name.dir</name>
<value>/opt/hadoop/hdfs/name</value>
<description>namenode上存储hdfs名字空间元数据 </description>
</property>
<property>
<name>dfs.data.dir</name>
<value>/opt/hadoop/hdfs/data</value>
<description>datanode上数据块的物理存储位置</description>
</property>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
</configuration>
[mapred-site.xml]
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
</configuration>
[yarn-site.xml]
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>192.168.10.11:8099</value>
<description>这个地址是mr管理界面的</description>
</property>
</configuration>
[]
# 格式化
: hadoop namenode -format
# 启动
: start-dfs.sh
# start-dfs.sh 、 stop-dfs.sh 支持root用户操作
#!/usr/bin/env bash
HDFS_DATANODE_USER=root
HADOOP_SECURE_DN_USER=hdfs
HDFS_NAMENODE_USER=root
HDFS_SECONDARYNAMENODE_USER=root
# start-yarn.sh、stop-yarn.sh 支持root用户操作
#!/usr/bin/env bash
YARN_RESOURCEMANAGER_USER=root
HADOOP_SECURE_DN_USER=yarn
YARN_NODEMANAGER_USER=root
: http://localhost:9870/