- topology.sh配置文件
#!/bin/bash
HADOOP_CONF=/home/hadoop/hadoop-2.9.2/etc/hadoop
while [ $# -gt 0 ] ; do
#输入参数:192.168.100.106
args=$1
exec<${HADOOP_CONF}/topology.data
result=""
while read line ; do
ar=( $line )
if [ "${ar[0]}" = "$arg" ]; then
result="${ar[1]}"
break
fi
done
shift
if [ -z "$result" ] ; then
echo -n "/default-rack"
else
echo -n "$result"
fi
done
- hdfs-site.xml 配置文件:
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hadoop/hadoop-2.9.2/hdfs/name</value>
</property>
<br />
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hadoop/hadoop-2.9.2/hdfs/data</value>
</property>
<br />
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<br />
<property>
<name>dfs.webhdfs.enabled</name>
<value>true</value>
</property>
<br />
<property>
<name>dfs.nameservices</name>
<value>clusterA</value>
</property>
<br />
<property>
<name>dfs.ha.namenodes.clusterA</name>
<value>nn1,nn2</value>
</property>
<br />
<property>
<name>dfs.namenode.rpc-address.clusterA.nn1</name>
<value>master:9000</value>
</property>
<br />
<property>
<name>dfs.namenode.http-address.clusterA.nn1</name>
<value>master:50070</value>
</property>
<br />
<property>
<name>dfs.namenode.rpc-address.clusterA.nn2</name>
<value>master-st:9000</value>
</property>
<br />
<property>
<name>dfs.namenode.http-address.clusterA.nn2</name>
<value>master-st:50070</value>
<br />
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://master:8485;clz1:8485;clz2:8485/clusterA</value>
</property>
<br />
<property>
<name>dfs.journalnode.edit.dir</name>
<value>/home/hadoop/hadoop-2.9.2/journal</value>
</property>
<br />
<property>
<br />
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://master:8485;clz1:8485;clz2:8485/clusterA</value>
</property>
<br />
<property>
<name>dfs.journalnode.edit.dir</name>
<value>/home/hadoop/hadoop-2.9.2/journal</value>
</property>
<br />
<property>
<name>dfs.ha.automatic-failover.enabled.clusterA</name>
<value>true</value>
</property>
<br />
<property>
<name>dfs.client.failover.proxy.provider.clusterA</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<br />
<property>
<name>dfs.ha.fencing.methods</name>
<value>
sshfence
shell(/bin/true)
</value>
</property>
<br />
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_dsa</value>
</property>
<br />
<property>
<name>dfs.ha.fencing.ssh.connect-timeout</name>
<value>10000</value>
</property>
- core-site.xml配置文件
<property>
<name>fs.defaultFS</name>
<value>hdfs://clusterA</value>
</property>
<br />
<property>
<name>io.file.buffer.size</name>
<value>131072</value>
</property>
<br />
<property>
<name>net.topology.script.file.name</name>
<value>/home/hadoop/hadoop-2.9.2/etc/hadoop/topology.sh</value>
</property>
<br />
<property>
<name>ha.zookeeper.quorum</name>
<value>master:2181,clz1:2181,clz2:2181</value>
</property>
<br />