core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://bigdata</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop/data</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>
<property>
<name></name>
<value></value>
</property>
hdfa.-site.xml
<property>
<name>dfs.nameservices</name>
<value>bigdata</value>
</property>
<property>
<name>dfs.ha.namenodes.bigdata</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bigdata.nn1</name>
<value>master:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.bigdata.nn2</name>
<value>master:50070</value>
</property>
<property>
<name>dfs.namenode.rpc-address.bigdata.nn1</name>
<value>slave1:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.bigdata.nn2</name>
<value>salve2:50070</value>
</property>
<property>
<name>dfs.namennode.shared.edits.dir</name>
<value>qjournal://master:8485;slave1:8485;slave2:8485/bigdata</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/opt/module/hadoop/journaldata</value>
</property>
<property>
<name>dfs.client.failover.property.provider.bigdata</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence
shell(true)
</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.connent-timeout</name>
<value>3000</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/opt/module/hadoop/data/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/opt/module/hadoop/data/datanode</value>
</property>
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
yarn-site.xml
<property>
<name>yarn.resourcemanager.ha.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.resourcemanager.cluster.id</name>
<value>YN</value>
</property>
<property>
<name>yarn.resourcemanager.ha.rm-ids</name>
<value>rm1,rm2</value>
</property>
<property>
<name>yarn.resourcemanager.hostname.rm1</name>
<value>master</value>
</property>
<property>
<name>yarn.resourcemanager.hostname</name>
<value>slave1</value>
</property>
<property>
<name>yarn.resourcemanager.store.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.recovery.ZKRMStateStore</value>
</property>
<property>
<name>yarn.resourcemanager.zk-address</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>
<property>
<name>yarn.resourcemanager.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
mapreduce-site.xml
<property>
<name>mapreudce.framwork.name</name>
<value>yarn</value>
</property>
<property>
<name>mapreduce.jobhistory.address</name>
<value>master:10020</value>
</property>
<property>
<name>mapreduce_jobhistory.wabapp.address</name>
<value>master:19888</value>
</property>
hive
export hadoop_home = /opt/module/hadoop
export hive_conf_hoem = /opt/module/hive/conf
touch hive-site.xml
<configuration>
<property>
<name>javax.jdo.option.ConectionURL</name>
<value>jdbc:mysql://master:3306/test?IfNotExist&useSSH=false</value>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Dirver</value>
</property>
<property>
<name>javax.jdo.option.ConnectionUser</name>
<value>root</value>
</property>
<property>
<name>jaavx.jdo.option.ConnectionPassword</name>
<value>123456</value>
</property>
</configuration>
hbase
vi hbase-env.xml
java_home = /opt/module/java
注释 jdk+8
修改 habse_manager_ = false
vi hbase-site.xml
<property>
<name>hbase.rootdir</name>
<value>hdfs://master:8020/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>habse.zookeeper.address</name>
<value>master:2181,slave1:2181,slave2:2181</value>
</property>
<property>
<name>hbbase.zookeeper.property.dir</name>
<value>/opt/modules/hbase/data</value>
</property>
zookeeper
vi zoo.conf
datadir = =/opt/module/zookeper/data
server.1=masrer:2888:3888
server.2=slave1:2888:3888
server.3=slave2:2888.3888
mdkir data
echo 1 > myid
mysql
rm -rf /etc/my.cnf
cd mysql
mysqld --initialize --user=root --basedir=/opt/module/mysql datadir=/opt/module/mysql/data
记录初始化密码
mysqld_safa --user=root &
回车
cd ../support-files
vi support-files
添加
basedir=/opt/module/mysql
datadir=/opt/module/mysql/data
启动mysql服务
mysql.server start
进入mysql中
mysql -user=root -p
初始化密码
修改密码
set password = "123456"
远程连接
grant all privileges on *.* to 'root'@'%' identified by 123456
设置开机mysql服务
cp mysql.server /etc/init.d/mysqld
chkconfig —add /etc/init.d/mysq.d
chkconfig mysqld on