123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175 |
- <?xml version="1.0" encoding="UTF-8"?>
- <configuration>
- <property>
- <name>dfs.client.read.shortcircuit.skip.checksum</name>
- <value>true</value>
- </property>
- <property>
- <name>dfs.namenode.kerberos.principal.pattern</name>
- <value>*</value>
- </property>
- <property>
- <name>dfs.nameservices.mappings</name>
- <value>[{"name":"hacluster","roleInstances":["21","22"]}]</value>
- </property>
- <property>
- <name>dfs.client.https.need-auth</name>
- <value>false</value>
- </property>
- <property>
- <name>dfs.replication</name>
- <value>3</value>
- </property>
- <property>
- <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
- <value>true</value>
- </property>
- <property>
- <name>dfs.nameservices</name>
- <value>hacluster,haclusterX</value>
- </property>
- <property>
- <name>dfs.datanode.kerberos.https.principal</name>
- <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
- </property>
- <property>
- <name>dfs.namenode.kerberos.https.principal</name>
- <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
- </property>
- <property>
- <name>dfs.client.file-block-storage-locations.timeout.millis</name>
- <value>600000</value>
- </property>
- <property>
- <name>dfs.client.failover.connection.retries.on.timeouts</name>
- <value>0</value>
- </property>
- <property>
- <name>dfs.client.close.ack-timeout</name>
- <value>900000</value>
- </property>
- <property>
- <name>dfs.namenode.rpc-address.haclusterX.remotenn2</name>
- <value></value>
- </property>
- <property>
- <name>oi.dfs.colocation.zookeeper.quorum</name>
- <value>hd09:24002,hd08:24002,hd07:24002</value>
- </property>
- <property>
- <name>dfs.namenode.rpc-address.haclusterX.remotenn1</name>
- <value></value>
- </property>
- <property>
- <name>dfs.web.authentication.kerberos.principal</name>
- <value>HTTP/_HOST@HADOOP.COM</value>
- </property>
- <property>
- <name>dfs.client.socket-timeout</name>
- <value>600000</value>
- </property>
- <property>
- <name>dfs.client.socketcache.expiryMsec</name>
- <value>900</value>
- </property>
- <property>
- <name>dfs.datanode.socket.write.timeout</name>
- <value>600000</value>
- </property>
- <property>
- <name>dfs.client.failover.proxy.provider.haclusterX</name>
- <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
- </property>
- <property>
- <name>dfs.client.failover.connection.retries</name>
- <value>0</value>
- </property>
- <property>
- <name>dfs.http.policy</name>
- <value>HTTPS_ONLY</value>
- </property>
- <property>
- <name>dfs.client.failover.proxy.provider.hacluster</name>
- <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
- </property>
- <property>
- <name>dfs.datanode.kerberos.principal</name>
- <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
- </property>
- <property>
- <name>dfs.domain.socket.path</name>
- <value>/var/run/FusionInsight-HDFS/dn_socket</value>
- </property>
- <property>
- <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
- <value>DEFAULT</value>
- </property>
- <property>
- <name>dfs.client.read.shortcircuit</name>
- <value>true</value>
- </property>
- <property>
- <name>dfs.namenode.kerberos.principal</name>
- <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
- </property>
- <property>
- <name>dfs.namenode.rpc-address.hacluster.21</name>
- <value>hd08:25000</value>
- </property>
- <property>
- <name>dfs.namenode.rpc-address.hacluster.22</name>
- <value>hd09:25000</value>
- </property>
- <property>
- <name>dfs.ha.namenodes.hacluster</name>
- <value>21,22</value>
- </property>
- <property>
- <name>yarn.hdfs-site.customized.configs</name>
- <value></value>
- </property>
- <property>
- <name>ipc.client.connect.max.retries.on.timeouts</name>
- <value>45</value>
- </property>
- <property>
- <name>dfs.client.socketcache.capacity</name>
- <value>0</value>
- </property>
- <property>
- <name>dfs.blocksize</name>
- <value>134217728</value>
- </property>
- <property>
- <name>dfs.datanode.address</name>
- <value>hd08:25009</value>
- </property>
- <property>
- <name>dfs.distcp</name>
- <value>haclusterX</value>
- </property>
- <property>
- <name>dfs.ha.namenodes.haclusterX</name>
- <value>remotenn1,remotenn2</value>
- </property>
- <property>
- <name>yarn.distcp.fs-limits.max-directory-items</name>
- <value>10000000</value>
- </property>
- <property>
- <name>dfs.datanode.socket.reuse.keepalive</name>
- <value>-1</value>
- </property>
- <property>
- <name>dfs.client.failover.max.attempts</name>
- <value>10</value>
- </property>
- <property>
- <name>dfs.datanode.http.address</name>
- <value>hd08:25010</value>
- </property>
- <property>
- <name>dfs.client.block.write.replace-datanode-on-failure.replication</name>
- <value>2</value>
- </property>
- </configuration>
|