1. 获取镜像
# 查看可用的稳定版本sudo docker search huesudo docker pull gethue/hue:latestsudo docker image ls |grep hue
2. 启动容器
sudo docker run -d --name=hue --network bigdata -p 28888:8888 gethue/hue:latestsudo docker ps |grep huesudo docker start huesudo docker restart huesudo docker stop huesudo docker rm huesudo docker logs huesudo docker logs -f hue
3. 验证
Web UI:http://LTSR003:28888 (admin/123456,首次登录时创建)
4. 基本配置
配置密钥和时区
docker exec -it hue bashvi /usr/share/hue/desktop/conf/hue.ini
内容如下:
[desktop]secret_key=jFE93j;2[290-eiw.KEiwN2s3['d;/.q[eIW^y#e=+Iei*@Mn<qW5ohttp_host=0.0.0.0http_port=8888time_zone=Asia/Shanghai
配置元数据
docker exec -it mysql bashmysql -uroot -proot
内容如下:
create database huedb;grant all on huedb.* to root@'%' identified by 'root';grant all on huedb.* to root@localhost identified by 'root';grant all on huedb.* to root@172.16.0.6 identified by 'root';ALTER DATABASE huedb CHARACTER SET latin1;flush privileges;quit;
5. 集成

进入容器
docker exec -it hue bashvi /usr/share/hue/desktop/conf/hue.ini
1. Hue与HDFS集成
配置Hue
[hadoop][[hdfs_clusters]][[[default]]]fs_defaultfs=hdfs://172.16.0.2:9000webhdfs_url=http://172.16.0.2:50070/webhdfs/v1hadoop_hdfs_home=/usr/local/hadoop-2.7.3hadoop_bin=/usr/local/hadoop-2.7.3/binhadoop_conf_dir=/usr/local/hadoop-2.7.3/etc/hadoop
启动httpfs ```bash
1.进入容器
docker exec -it hadoop-master bash
开启httpfs
cd /usr/local/hadoop-2.7.3/ bin/hdfs dfs -chmod -R o+x /tmp sbin/httpfs.sh start
检查端口(容器上执行)
netstat -ant |grep 14000
检查URL(宿主机执行)
sudo docker inspect hadoop-master| grep IPAddress curl -i “http://172.16.0.2:14000/webhdfs/v1/?user.name=root&op=GETHOMEDIRECTORY“ curl -i “http://172.16.0.2:50070/webhdfs/v1/?user.name=root&op=GETHOMEDIRECTORY“
<a name="pe1HU"></a>## 2. Hue与YARN集成```bash[hadoop][[yarn_clusters]][[[default]]]submit_to=Trueresourcemanager_host=172.16.0.2resourcemanager_port=8032resourcemanager_api_url=http://172.16.0.2:8088proxy_api_url=http://172.16.0.2:8088history_server_api_url=http://172.16.0.2:19888
3. Hue与MySQL集成
1. 配置MySQL作为Hue元数据库
[desktop][[database]]engine=mysqlhost=172.16.0.6port=3306user=rootpassword=rootname=huedb
2. 配置Hue管理MySQL数据库
[librdbms][[databases]][[[mysql]]] # removed the symbol “##”nice_name="MySQL DB"engine=mysqlhost=172.16.0.6port=3306user=rootpassword=root
4. Hue与Hive集成
[beeswax]hive_server_host=172.16.0.5hive_server_port=10000hive_conf_dir=/usr/local/apache-hive-2.3.2-bin/confauth_username=rootauth_password=rootthrift_version=7[metastore]# 许使用hive创建数据库表等操作enable_new_create_table=true
5. Hue与Zookeeper集成
[zookeeper][[clusters]][[[default]]]host_ports=172.16.0.101:2181,172.16.0.102:2181,172.16.0.103:2181
6. Hue与Spark集成
# 1. Distributed SQL Engine# 1.2. Thrift Server[spark]# 注意:此处为hive2Server的IP地址sql_server_host=172.16.0.5sql_server_port=10000security_enabled=false# 2. Apache Livy[spark]livy_server_url=http://172.16.0.2:8998
7. Hue集成NoteBook
[notebook][[interpreters]]# 1. Distributed SQL Engine# 1.1. SqlAlchemy# error:“Password should be set if and only if in LDAP or CUSTOM mode; Remove password or use one of those modes”[[[sparksql]]]name=Spark SQLinterface=sqlalchemyoptions='{"url": "hive://root:root@172.16.0.5:10000/default"}'# 1.2. Thrift Server# 需要同时配置[spark]节点[[[sparksql]]]name=Spark SQLinterface=hiveserver2# 2. Apache Livy# 需要同时配置[spark]节点[[[sparksql]]]name=Spark SQLinterface=livy[[[spark]]]name=Scalainterface=livy[[[hive]]]name=Hiveinterface=hiveserver2[[[mysql]]]name=MySQLinterface=sqlalchemyoptions='{"url": "mysql://root:root@172.16.0.6:3306/huedb"}'
- 参考配置
