1. 获取镜像
# 查看可用的稳定版本
sudo docker search hue
sudo docker pull gethue/hue:latest
sudo docker image ls |grep hue
2. 启动容器
sudo docker run -d --name=hue --network bigdata -p 28888:8888 gethue/hue:latest
sudo docker ps |grep hue
sudo docker start hue
sudo docker restart hue
sudo docker stop hue
sudo docker rm hue
sudo docker logs hue
sudo docker logs -f hue
3. 验证
Web UI:http://LTSR003:28888 (admin/123456,首次登录时创建)
4. 基本配置
配置密钥和时区
docker exec -it hue bash
vi /usr/share/hue/desktop/conf/hue.ini
内容如下:
[desktop]
secret_key=jFE93j;2[290-eiw.KEiwN2s3['d;/.q[eIW^y#e=+Iei*@Mn<qW5o
http_host=0.0.0.0
http_port=8888
time_zone=Asia/Shanghai
配置元数据
docker exec -it mysql bash
mysql -uroot -proot
内容如下:
create database huedb;
grant all on huedb.* to root@'%' identified by 'root';
grant all on huedb.* to root@localhost identified by 'root';
grant all on huedb.* to root@172.16.0.6 identified by 'root';
ALTER DATABASE huedb CHARACTER SET latin1;
flush privileges;
quit;
5. 集成
进入容器
docker exec -it hue bash
vi /usr/share/hue/desktop/conf/hue.ini
1. Hue与HDFS集成
配置Hue
[hadoop]
[[hdfs_clusters]]
[[[default]]]
fs_defaultfs=hdfs://172.16.0.2:9000
webhdfs_url=http://172.16.0.2:50070/webhdfs/v1
hadoop_hdfs_home=/usr/local/hadoop-2.7.3
hadoop_bin=/usr/local/hadoop-2.7.3/bin
hadoop_conf_dir=/usr/local/hadoop-2.7.3/etc/hadoop
启动httpfs ```bash
1.进入容器
docker exec -it hadoop-master bash
开启httpfs
cd /usr/local/hadoop-2.7.3/ bin/hdfs dfs -chmod -R o+x /tmp sbin/httpfs.sh start
检查端口(容器上执行)
netstat -ant |grep 14000
检查URL(宿主机执行)
sudo docker inspect hadoop-master| grep IPAddress curl -i “http://172.16.0.2:14000/webhdfs/v1/?user.name=root&op=GETHOMEDIRECTORY“ curl -i “http://172.16.0.2:50070/webhdfs/v1/?user.name=root&op=GETHOMEDIRECTORY“
<a name="pe1HU"></a>
## 2. Hue与YARN集成
```bash
[hadoop]
[[yarn_clusters]]
[[[default]]]
submit_to=True
resourcemanager_host=172.16.0.2
resourcemanager_port=8032
resourcemanager_api_url=http://172.16.0.2:8088
proxy_api_url=http://172.16.0.2:8088
history_server_api_url=http://172.16.0.2:19888
3. Hue与MySQL集成
1. 配置MySQL作为Hue元数据库
[desktop]
[[database]]
engine=mysql
host=172.16.0.6
port=3306
user=root
password=root
name=huedb
2. 配置Hue管理MySQL数据库
[librdbms]
[[databases]]
[[[mysql]]] # removed the symbol “##”
nice_name="MySQL DB"
engine=mysql
host=172.16.0.6
port=3306
user=root
password=root
4. Hue与Hive集成
[beeswax]
hive_server_host=172.16.0.5
hive_server_port=10000
hive_conf_dir=/usr/local/apache-hive-2.3.2-bin/conf
auth_username=root
auth_password=root
thrift_version=7
[metastore]
# 许使用hive创建数据库表等操作
enable_new_create_table=true
5. Hue与Zookeeper集成
[zookeeper]
[[clusters]]
[[[default]]]
host_ports=172.16.0.101:2181,172.16.0.102:2181,172.16.0.103:2181
6. Hue与Spark集成
# 1. Distributed SQL Engine
# 1.2. Thrift Server
[spark]
# 注意:此处为hive2Server的IP地址
sql_server_host=172.16.0.5
sql_server_port=10000
security_enabled=false
# 2. Apache Livy
[spark]
livy_server_url=http://172.16.0.2:8998
7. Hue集成NoteBook
[notebook]
[[interpreters]]
# 1. Distributed SQL Engine
# 1.1. SqlAlchemy
# error:“Password should be set if and only if in LDAP or CUSTOM mode; Remove password or use one of those modes”
[[[sparksql]]]
name=Spark SQL
interface=sqlalchemy
options='{"url": "hive://root:root@172.16.0.5:10000/default"}'
# 1.2. Thrift Server
# 需要同时配置[spark]节点
[[[sparksql]]]
name=Spark SQL
interface=hiveserver2
# 2. Apache Livy
# 需要同时配置[spark]节点
[[[sparksql]]]
name=Spark SQL
interface=livy
[[[spark]]]
name=Scala
interface=livy
[[[hive]]]
name=Hive
interface=hiveserver2
[[[mysql]]]
name=MySQL
interface=sqlalchemy
options='{"url": "mysql://root:root@172.16.0.6:3306/huedb"}'
- 参考配置