hdfs-site.xml 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175
  1. <?xml version="1.0" encoding="UTF-8"?>
  2. <configuration>
  3. <property>
  4. <name>dfs.client.read.shortcircuit.skip.checksum</name>
  5. <value>true</value>
  6. </property>
  7. <property>
  8. <name>dfs.namenode.kerberos.principal.pattern</name>
  9. <value>*</value>
  10. </property>
  11. <property>
  12. <name>dfs.nameservices.mappings</name>
  13. <value>[{"name":"hacluster","roleInstances":["21","22"]}]</value>
  14. </property>
  15. <property>
  16. <name>dfs.client.https.need-auth</name>
  17. <value>false</value>
  18. </property>
  19. <property>
  20. <name>dfs.replication</name>
  21. <value>3</value>
  22. </property>
  23. <property>
  24. <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
  25. <value>true</value>
  26. </property>
  27. <property>
  28. <name>dfs.nameservices</name>
  29. <value>hacluster,haclusterX</value>
  30. </property>
  31. <property>
  32. <name>dfs.datanode.kerberos.https.principal</name>
  33. <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
  34. </property>
  35. <property>
  36. <name>dfs.namenode.kerberos.https.principal</name>
  37. <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
  38. </property>
  39. <property>
  40. <name>dfs.client.file-block-storage-locations.timeout.millis</name>
  41. <value>600000</value>
  42. </property>
  43. <property>
  44. <name>dfs.client.failover.connection.retries.on.timeouts</name>
  45. <value>0</value>
  46. </property>
  47. <property>
  48. <name>dfs.client.close.ack-timeout</name>
  49. <value>900000</value>
  50. </property>
  51. <property>
  52. <name>dfs.namenode.rpc-address.haclusterX.remotenn2</name>
  53. <value></value>
  54. </property>
  55. <property>
  56. <name>oi.dfs.colocation.zookeeper.quorum</name>
  57. <value>hd09:24002,hd08:24002,hd07:24002</value>
  58. </property>
  59. <property>
  60. <name>dfs.namenode.rpc-address.haclusterX.remotenn1</name>
  61. <value></value>
  62. </property>
  63. <property>
  64. <name>dfs.web.authentication.kerberos.principal</name>
  65. <value>HTTP/_HOST@HADOOP.COM</value>
  66. </property>
  67. <property>
  68. <name>dfs.client.socket-timeout</name>
  69. <value>600000</value>
  70. </property>
  71. <property>
  72. <name>dfs.client.socketcache.expiryMsec</name>
  73. <value>900</value>
  74. </property>
  75. <property>
  76. <name>dfs.datanode.socket.write.timeout</name>
  77. <value>600000</value>
  78. </property>
  79. <property>
  80. <name>dfs.client.failover.proxy.provider.haclusterX</name>
  81. <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  82. </property>
  83. <property>
  84. <name>dfs.client.failover.connection.retries</name>
  85. <value>0</value>
  86. </property>
  87. <property>
  88. <name>dfs.http.policy</name>
  89. <value>HTTPS_ONLY</value>
  90. </property>
  91. <property>
  92. <name>dfs.client.failover.proxy.provider.hacluster</name>
  93. <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
  94. </property>
  95. <property>
  96. <name>dfs.datanode.kerberos.principal</name>
  97. <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
  98. </property>
  99. <property>
  100. <name>dfs.domain.socket.path</name>
  101. <value>/var/run/FusionInsight-HDFS/dn_socket</value>
  102. </property>
  103. <property>
  104. <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
  105. <value>DEFAULT</value>
  106. </property>
  107. <property>
  108. <name>dfs.client.read.shortcircuit</name>
  109. <value>true</value>
  110. </property>
  111. <property>
  112. <name>dfs.namenode.kerberos.principal</name>
  113. <value>hdfs/hadoop.hadoop.com@HADOOP.COM</value>
  114. </property>
  115. <property>
  116. <name>dfs.namenode.rpc-address.hacluster.21</name>
  117. <value>hd08:25000</value>
  118. </property>
  119. <property>
  120. <name>dfs.namenode.rpc-address.hacluster.22</name>
  121. <value>hd09:25000</value>
  122. </property>
  123. <property>
  124. <name>dfs.ha.namenodes.hacluster</name>
  125. <value>21,22</value>
  126. </property>
  127. <property>
  128. <name>yarn.hdfs-site.customized.configs</name>
  129. <value></value>
  130. </property>
  131. <property>
  132. <name>ipc.client.connect.max.retries.on.timeouts</name>
  133. <value>45</value>
  134. </property>
  135. <property>
  136. <name>dfs.client.socketcache.capacity</name>
  137. <value>0</value>
  138. </property>
  139. <property>
  140. <name>dfs.blocksize</name>
  141. <value>134217728</value>
  142. </property>
  143. <property>
  144. <name>dfs.datanode.address</name>
  145. <value>hd08:25009</value>
  146. </property>
  147. <property>
  148. <name>dfs.distcp</name>
  149. <value>haclusterX</value>
  150. </property>
  151. <property>
  152. <name>dfs.ha.namenodes.haclusterX</name>
  153. <value>remotenn1,remotenn2</value>
  154. </property>
  155. <property>
  156. <name>yarn.distcp.fs-limits.max-directory-items</name>
  157. <value>10000000</value>
  158. </property>
  159. <property>
  160. <name>dfs.datanode.socket.reuse.keepalive</name>
  161. <value>-1</value>
  162. </property>
  163. <property>
  164. <name>dfs.client.failover.max.attempts</name>
  165. <value>10</value>
  166. </property>
  167. <property>
  168. <name>dfs.datanode.http.address</name>
  169. <value>hd08:25010</value>
  170. </property>
  171. <property>
  172. <name>dfs.client.block.write.replace-datanode-on-failure.replication</name>
  173. <value>2</value>
  174. </property>
  175. </configuration>