<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>hbase.rootdir</name>
<value>hdfs://$htd-analytics-master$:9000/hbase</value>
</property>
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<property>
<name>hbase.regionserver.wal.codec</name>
<value>org.apache.hadoop.hbase.regionserver.wal.IndexedWALEditCodec</value>
</property>
<property>
<name>hbase.region.server.rpc.scheduler.factory.class</name>
<value>org.apache.hadoop.hbase.ipc.PhoenixRpcSchedulerFactory</value>
<description>Factory to create the Phoenix RPC Scheduler that knows to put index updates into index queues</description>
</property>
<property>
<name>hbase.master.loadbalancer.class</name>
<value>org.apache.phoenix.hbase.index.balancer.IndexLoadBalancer</value>
</property>
<property>
<name>hbase.coprocessor.master.classes</name>
<value>org.apache.phoenix.hbase.index.master.IndexMasterObserver</value>
</property>
<property>
<name>hbase.coprocessor.regionserver.classes</name>
<value>org.apache.hadoop.hbase.regionserver.LocalIndexMerger</value>
</property>
<property>
<name>hbase.zookeeper.quorum</name>
<value>$htd-analytics-master$</value>
</property>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/opt/interset/data/hbase/zookeeper</value>
</property>
<property>
<name>dfs.client.read.shortcircuit</name>
<value>true</value>
<description>This configuration parameter turns on short-circuit local reads.</description>
</property>
<property>
<name>dfs.domain.socket.path</name>
<value>/home/interset/sockets/dn_socket</value>
<description>
Optional. This is a path to a UNIX domain socket that will be used for communication
between the DataNode and local HDFS clients. If the string "_PORT" is present in this path,
it will be replaced by the TCP port of the DataNode.
</description>
</property>
<property>
<name>phoenix.query.maxGlobalMemoryPercentage</name>
<value>75</value>
</property>
<property>
<name>phoenix.coprocessor.maxServerCacheTimeToLiveMs</name>
<value>60000</value>
</property>
<property>
<name>phoenix.query.maxServerCacheBytes</name>
<value>1073741824</value>
</property>
<property>
<name>phoenix.mutate.maxSize</name>
<value>5000000</value>
</property>
<property>
<name>phoenix.mutate.upsertBatchSize</name>
<value>100000</value>
</property>
<property>
<name>phoenix.mutate.batchSize</name>
<value>100000</value>
</property>
<property>
<name>phoenix.query.maxSpoolToDiskBytes</name>
<value>10737418240</value>
<description>Threshold size in bytes up to which results from parallelly executed query results are spooled to disk above which the query will fail</description>
</property>
<!--property>
<name>hbase.hregion.max.filesize</name>
<value>107374182400</value>
</property>
<property>
<name>hbase.hregion.majorcompaction</name>
<value>0</value>
</property-->
<property>
<name>hbase.client.scanner.timeout.period</name>
<value>3600000</value>
<description>HRegion server lease period in milliseconds. Default is
60 seconds. Clients must report in within this period else they are
considered dead.</description>
</property>
<property>
<name>phoenix.query.timeoutMs</name>
<value>36000000</value>
</property>
<property>
<name>phoenix.sequence.saltBuckets</name>
<value>8</value>
</property>
</configuration>