diff --git a/phoenix-assembly/pom.xml b/phoenix-assembly/pom.xml
index 078173fe621..2f5063acba4 100644
--- a/phoenix-assembly/pom.xml
+++ b/phoenix-assembly/pom.xml
@@ -252,6 +252,11 @@
phoenix-hbase-compat-2.6.0
${project.version}
+
+ org.apache.phoenix
+ phoenix-hbase-compat-2.6.4
+ ${project.version}
+
diff --git a/phoenix-core-client/pom.xml b/phoenix-core-client/pom.xml
index 07769c9f685..deeb516ea12 100644
--- a/phoenix-core-client/pom.xml
+++ b/phoenix-core-client/pom.xml
@@ -292,6 +292,9 @@
|| ("${hbase.compat.version}".equals("2.6.0")
&& hbaseMinor == 6
&& hbasePatch >=0)
+ || ("${hbase.compat.version}".equals("2.6.4")
+ && hbaseMinor == 6
+ && hbasePatch >=4)
)
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
index dbca9f8b962..420d0d570fe 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/iterate/ScanningResultIterator.java
@@ -53,6 +53,7 @@
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.client.metrics.ScanMetrics;
import org.apache.hadoop.util.ReflectionUtils;
+import org.apache.phoenix.compat.hbase.CompatScanMetrics;
import org.apache.phoenix.compile.ExplainPlanAttributes.ExplainPlanAttributesBuilder;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.exception.SQLExceptionCode;
@@ -163,6 +164,20 @@ private void updateMetrics() {
changeMetric(scanMetricsHolder.getCountOfBytesScanned(),
scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME));
changeMetric(scanMetricsHolder.getCountOfRowsPaged(), dummyRowCounter);
+ changeMetric(scanMetricsHolder.getFsReadTime(),
+ CompatScanMetrics.getFsReadTime(scanMetricsMap));
+ changeMetric(scanMetricsHolder.getCountOfBytesReadFromFS(),
+ CompatScanMetrics.getBytesReadFromFs(scanMetricsMap));
+ changeMetric(scanMetricsHolder.getCountOfBytesReadFromMemstore(),
+ CompatScanMetrics.getBytesReadFromMemstore(scanMetricsMap));
+ changeMetric(scanMetricsHolder.getCountOfBytesReadFromBlockcache(),
+ CompatScanMetrics.getBytesReadFromBlockCache(scanMetricsMap));
+ changeMetric(scanMetricsHolder.getCountOfBlockReadOps(),
+ CompatScanMetrics.getBlockReadOpsCount(scanMetricsMap));
+ changeMetric(scanMetricsHolder.getRpcScanProcessingTime(),
+ CompatScanMetrics.getRpcScanProcessingTime(scanMetricsMap));
+ changeMetric(scanMetricsHolder.getRpcScanQueueWaitTime(),
+ CompatScanMetrics.getRpcScanQueueWaitTime(scanMetricsMap));
changeMetric(GLOBAL_SCAN_BYTES, scanMetricsMap.get(BYTES_IN_RESULTS_METRIC_NAME));
changeMetric(GLOBAL_HBASE_COUNT_RPC_CALLS, scanMetricsMap.get(RPC_CALLS_METRIC_NAME));
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
index 8ee8de69718..af70f9807b6 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/MetricType.java
@@ -261,6 +261,18 @@ public enum MetricType {
COUNT_REMOTE_RPC_RETRIES("rrr", "Number of remote RPC retries", LogLevel.DEBUG, PLong.INSTANCE),
COUNT_ROWS_SCANNED("ws", "Number of rows scanned", LogLevel.DEBUG, PLong.INSTANCE),
COUNT_ROWS_FILTERED("wf", "Number of rows filtered", LogLevel.DEBUG, PLong.INSTANCE),
+ FS_READ_TIME("frt", "Time spent in filesystem read", LogLevel.DEBUG, PLong.INSTANCE),
+ BYTES_READ_FROM_FS("brff", "Number of bytes read from filesystem", LogLevel.DEBUG,
+ PLong.INSTANCE),
+ BYTES_READ_FROM_MEMSTORE("brfm", "Number of bytes read from memstore", LogLevel.DEBUG,
+ PLong.INSTANCE),
+ BYTES_READ_FROM_BLOCKCACHE("brfc", "Number of bytes read from blockcache", LogLevel.DEBUG,
+ PLong.INSTANCE),
+ BLOCK_READ_OPS_COUNT("broc", "Number of block read operations", LogLevel.DEBUG, PLong.INSTANCE),
+ RPC_SCAN_PROCESSING_TIME("rsp", "Time spent in RPC scan processing", LogLevel.DEBUG,
+ PLong.INSTANCE),
+ RPC_SCAN_QUEUE_WAIT_TIME("rsqw", "Time spent in RPC scan queue wait", LogLevel.DEBUG,
+ PLong.INSTANCE),
COUNTER_METADATA_INCONSISTENCY("mi", "Number of times the metadata inconsistencies ",
LogLevel.DEBUG, PLong.INSTANCE),
NUM_SYSTEM_TABLE_RPC_SUCCESS("nstrs", "Number of successful system table RPC calls",
diff --git a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
index ea82467aa81..0c827602afc 100644
--- a/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
+++ b/phoenix-core-client/src/main/java/org/apache/phoenix/monitoring/ScanMetricsHolder.java
@@ -17,6 +17,10 @@
*/
package org.apache.phoenix.monitoring;
+import static org.apache.phoenix.monitoring.MetricType.BLOCK_READ_OPS_COUNT;
+import static org.apache.phoenix.monitoring.MetricType.BYTES_READ_FROM_BLOCKCACHE;
+import static org.apache.phoenix.monitoring.MetricType.BYTES_READ_FROM_FS;
+import static org.apache.phoenix.monitoring.MetricType.BYTES_READ_FROM_MEMSTORE;
import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_IN_REMOTE_RESULTS;
import static org.apache.phoenix.monitoring.MetricType.COUNT_BYTES_REGION_SERVER_RESULTS;
import static org.apache.phoenix.monitoring.MetricType.COUNT_MILLS_BETWEEN_NEXTS;
@@ -28,7 +32,10 @@
import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_CALLS;
import static org.apache.phoenix.monitoring.MetricType.COUNT_RPC_RETRIES;
import static org.apache.phoenix.monitoring.MetricType.COUNT_SCANNED_REGIONS;
+import static org.apache.phoenix.monitoring.MetricType.FS_READ_TIME;
import static org.apache.phoenix.monitoring.MetricType.PAGED_ROWS_COUNTER;
+import static org.apache.phoenix.monitoring.MetricType.RPC_SCAN_PROCESSING_TIME;
+import static org.apache.phoenix.monitoring.MetricType.RPC_SCAN_QUEUE_WAIT_TIME;
import static org.apache.phoenix.monitoring.MetricType.SCAN_BYTES;
import java.io.IOException;
@@ -52,6 +59,13 @@ public class ScanMetricsHolder {
private final CombinableMetric countOfRowsFiltered;
private final CombinableMetric countOfBytesScanned;
private final CombinableMetric countOfRowsPaged;
+ private final CombinableMetric fsReadTime;
+ private final CombinableMetric countOfBytesReadFromFS;
+ private final CombinableMetric countOfBytesReadFromMemstore;
+ private final CombinableMetric countOfBytesReadFromBlockcache;
+ private final CombinableMetric countOfBlockReadOps;
+ private final CombinableMetric rpcScanProcessingTime;
+ private final CombinableMetric rpcScanQueueWaitTime;
private Map scanMetricMap;
private Object scan;
@@ -83,6 +97,13 @@ private ScanMetricsHolder(ReadMetricQueue readMetrics, String tableName, Scan sc
countOfRowsFiltered = readMetrics.allotMetric(COUNT_ROWS_FILTERED, tableName);
countOfBytesScanned = readMetrics.allotMetric(SCAN_BYTES, tableName);
countOfRowsPaged = readMetrics.allotMetric(PAGED_ROWS_COUNTER, tableName);
+ fsReadTime = readMetrics.allotMetric(FS_READ_TIME, tableName);
+ countOfBytesReadFromFS = readMetrics.allotMetric(BYTES_READ_FROM_FS, tableName);
+ countOfBytesReadFromMemstore = readMetrics.allotMetric(BYTES_READ_FROM_MEMSTORE, tableName);
+ countOfBytesReadFromBlockcache = readMetrics.allotMetric(BYTES_READ_FROM_BLOCKCACHE, tableName);
+ countOfBlockReadOps = readMetrics.allotMetric(BLOCK_READ_OPS_COUNT, tableName);
+ rpcScanProcessingTime = readMetrics.allotMetric(RPC_SCAN_PROCESSING_TIME, tableName);
+ rpcScanQueueWaitTime = readMetrics.allotMetric(RPC_SCAN_QUEUE_WAIT_TIME, tableName);
}
public CombinableMetric getCountOfRemoteRPCcalls() {
@@ -141,6 +162,34 @@ public CombinableMetric getCountOfRowsPaged() {
return countOfRowsPaged;
}
+ public CombinableMetric getFsReadTime() {
+ return fsReadTime;
+ }
+
+ public CombinableMetric getCountOfBytesReadFromFS() {
+ return countOfBytesReadFromFS;
+ }
+
+ public CombinableMetric getCountOfBytesReadFromMemstore() {
+ return countOfBytesReadFromMemstore;
+ }
+
+ public CombinableMetric getCountOfBytesReadFromBlockcache() {
+ return countOfBytesReadFromBlockcache;
+ }
+
+ public CombinableMetric getCountOfBlockReadOps() {
+ return countOfBlockReadOps;
+ }
+
+ public CombinableMetric getRpcScanProcessingTime() {
+ return rpcScanProcessingTime;
+ }
+
+ public CombinableMetric getRpcScanQueueWaitTime() {
+ return rpcScanQueueWaitTime;
+ }
+
public void setScanMetricMap(Map scanMetricMap) {
this.scanMetricMap = scanMetricMap;
}
@@ -154,5 +203,4 @@ public String toString() {
return "{\"Exception while converting scan metrics to Json\":\"" + e.getMessage() + "\"}";
}
}
-
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DataTableScanMetrics.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DataTableScanMetrics.java
new file mode 100644
index 00000000000..51d441280dd
--- /dev/null
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/DataTableScanMetrics.java
@@ -0,0 +1,124 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.coprocessor;
+
+import java.util.Map;
+import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics;
+import org.apache.phoenix.compat.hbase.CompatScanMetrics;
+import org.apache.phoenix.compat.hbase.CompatThreadLocalServerSideScanMetrics;
+
+/**
+ * Stores scan metrics from data table operations performed during:
+ *
+ * - Uncovered global index scans
+ * - Read repair operations
+ *
+ * These metrics help identify latency variations that occur when both data table and index table
+ * are scanned together, and are used to populate {@link ThreadLocalServerSideScanMetrics} for index
+ * table RPC calls.
+ */
+public class DataTableScanMetrics {
+ private final long fsReadTimeInMs;
+ private final long bytesReadFromFS;
+ private final long bytesReadFromMemstore;
+ private final long bytesReadFromBlockcache;
+ private final long blockReadOps;
+
+ protected DataTableScanMetrics(long fsReadTimeInMs, long bytesReadFromFS,
+ long bytesReadFromMemstore, long bytesReadFromBlockcache, long blockReadOps) {
+ this.fsReadTimeInMs = fsReadTimeInMs;
+ this.bytesReadFromFS = bytesReadFromFS;
+ this.bytesReadFromMemstore = bytesReadFromMemstore;
+ this.bytesReadFromBlockcache = bytesReadFromBlockcache;
+ this.blockReadOps = blockReadOps;
+ }
+
+ public long getFsReadTimeInMs() {
+ return fsReadTimeInMs;
+ }
+
+ public long getBytesReadFromFS() {
+ return bytesReadFromFS;
+ }
+
+ public long getBytesReadFromMemstore() {
+ return bytesReadFromMemstore;
+ }
+
+ public long getBytesReadFromBlockcache() {
+ return bytesReadFromBlockcache;
+ }
+
+ public long getBlockReadOps() {
+ return blockReadOps;
+ }
+
+ public static class Builder {
+ protected long fsReadTimeInMs = 0;
+ protected long bytesReadFromFS = 0;
+ protected long bytesReadFromMemstore = 0;
+ protected long bytesReadFromBlockcache = 0;
+ protected long blockReadOps = 0;
+
+ public Builder setFsReadTimeInMs(long fsReadTimeInMs) {
+ this.fsReadTimeInMs = fsReadTimeInMs;
+ return this;
+ }
+
+ public Builder setBytesReadFromFS(long bytesReadFromFS) {
+ this.bytesReadFromFS = bytesReadFromFS;
+ return this;
+ }
+
+ public Builder setBytesReadFromMemstore(long bytesReadFromMemstore) {
+ this.bytesReadFromMemstore = bytesReadFromMemstore;
+ return this;
+ }
+
+ public Builder setBytesReadFromBlockcache(long bytesReadFromBlockcache) {
+ this.bytesReadFromBlockcache = bytesReadFromBlockcache;
+ return this;
+ }
+
+ public Builder setBlockReadOps(long blockReadOps) {
+ this.blockReadOps = blockReadOps;
+ return this;
+ }
+
+ public DataTableScanMetrics build() {
+ return new DataTableScanMetrics(fsReadTimeInMs, bytesReadFromFS, bytesReadFromMemstore,
+ bytesReadFromBlockcache, blockReadOps);
+ }
+ }
+
+ public static void buildDataTableScanMetrics(Map scanMetrics, Builder builder) {
+ builder.setFsReadTimeInMs(CompatScanMetrics.getFsReadTime(scanMetrics))
+ .setBytesReadFromFS(CompatScanMetrics.getBytesReadFromFs(scanMetrics))
+ .setBytesReadFromMemstore(CompatScanMetrics.getBytesReadFromMemstore(scanMetrics))
+ .setBytesReadFromBlockcache(CompatScanMetrics.getBytesReadFromBlockCache(scanMetrics))
+ .setBlockReadOps(CompatScanMetrics.getBlockReadOpsCount(scanMetrics));
+ }
+
+ public void populateThreadLocalServerSideScanMetrics() {
+ CompatThreadLocalServerSideScanMetrics.addFsReadTime(fsReadTimeInMs);
+ CompatThreadLocalServerSideScanMetrics.addBytesReadFromFs(bytesReadFromFS);
+ CompatThreadLocalServerSideScanMetrics.addBytesReadFromMemstore(bytesReadFromMemstore);
+ CompatThreadLocalServerSideScanMetrics.addBytesReadFromBlockCache(bytesReadFromBlockcache);
+ CompatThreadLocalServerSideScanMetrics.addBlockReadOpsCount(blockReadOps);
+ }
+}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java
index e0c9f6298fb..9279eadf4f9 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/coprocessor/UncoveredGlobalIndexRegionScanner.java
@@ -20,8 +20,10 @@
import static org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants.PHYSICAL_DATA_TABLE_NAME;
import java.io.IOException;
+import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
+import java.util.Map;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.ExecutionException;
@@ -71,6 +73,8 @@ public class UncoveredGlobalIndexRegionScanner extends UncoveredIndexRegionScann
protected final int rowCountPerTask;
protected String exceptionMessage;
protected final HTableFactory hTableFactory;
+ private final boolean isScanMetricsEnabled;
+ private List dataTableScanMetrics;
// This relies on Hadoop Configuration to handle warning about deprecated configs and
// to set the correct non-deprecated configs when an old one shows up.
@@ -101,6 +105,10 @@ public UncoveredGlobalIndexRegionScanner(final RegionScanner innerScanner, final
ScanUtil.addEmptyColumnToScan(dataTableScan, indexMaintainer.getDataEmptyKeyValueCF(),
indexMaintainer.getEmptyKeyValueQualifierForDataTable());
}
+ isScanMetricsEnabled = scan.isScanMetricsEnabled();
+ if (isScanMetricsEnabled) {
+ dataTableScanMetrics = new ArrayList<>();
+ }
}
@Override
@@ -117,6 +125,7 @@ protected void scanDataRows(Collection dataRowKeys, long startTime) thro
if (dataScan == null) {
return;
}
+ dataScan.setScanMetricsEnabled(isScanMetricsEnabled);
try (ResultScanner resultScanner = dataHTable.getScanner(dataScan)) {
for (Result result = resultScanner.next(); (result != null); result = resultScanner.next()) {
if (ScanUtil.isDummy(result)) {
@@ -134,6 +143,13 @@ protected void scanDataRows(Collection dataRowKeys, long startTime) thro
+ region.getRegionInfo().getRegionNameAsString() + " could not complete on time (in "
+ pageSizeMs + " ms) and" + " will be resubmitted");
}
+ if (isScanMetricsEnabled) {
+ Map scanMetrics = resultScanner.getScanMetrics().getMetricsMap();
+ long scanTimeInMs = EnvironmentEdgeManager.currentTimeMillis() - startTime;
+ // Capture scan time to identify slowest parallel scan later as that's the one which slows
+ // down the whole merge operation from data table.
+ dataTableScanMetrics.add(buildDataTableScanMetrics(scanMetrics, scanTimeInMs));
+ }
} catch (Throwable t) {
exceptionMessage = "scanDataRows fails for at least one task";
ClientUtil.throwIOException(dataHTable.getName().toString(), t);
@@ -208,10 +224,66 @@ protected void scanDataTableRows(long startTime) throws IOException {
addTasksForScanningDataTableRowsInParallel(tasks, setList.get(i), startTime);
}
submitTasks(tasks);
+ if (isScanMetricsEnabled) {
+ DataTableScanMetricsWithScanTime dataTableScanMetricsForSlowestScan = null;
+ for (DataTableScanMetricsWithScanTime dataTableScanMetrics : dataTableScanMetrics) {
+ if (dataTableScanMetricsForSlowestScan == null) {
+ dataTableScanMetricsForSlowestScan = dataTableScanMetrics;
+ } else if (
+ dataTableScanMetricsForSlowestScan.getScanTimeInMs()
+ < dataTableScanMetrics.getScanTimeInMs()
+ ) {
+ dataTableScanMetricsForSlowestScan = dataTableScanMetrics;
+ }
+ }
+ if (dataTableScanMetricsForSlowestScan != null) {
+ dataTableScanMetricsForSlowestScan.populateThreadLocalServerSideScanMetrics();
+ }
+ }
if (state == State.SCANNING_DATA_INTERRUPTED) {
state = State.SCANNING_DATA;
} else {
state = State.READY;
}
}
+
+ private static DataTableScanMetricsWithScanTime
+ buildDataTableScanMetrics(Map scanMetrics, long scanTimeInMs) {
+ DataTableScanMetricsWithScanTime.Builder builder =
+ new DataTableScanMetricsWithScanTime.Builder();
+ builder.setScanTimeInMs(scanTimeInMs);
+ DataTableScanMetrics.buildDataTableScanMetrics(scanMetrics, builder);
+ return builder.build();
+ }
+
+ private static class DataTableScanMetricsWithScanTime extends DataTableScanMetrics {
+ private final long scanTimeInMs;
+
+ public DataTableScanMetricsWithScanTime(long scanTimeInMs, long fsReadTimeInMs,
+ long bytesReadFromFS, long bytesReadFromMemstore, long bytesReadFromBlockcache,
+ long blockReadOps) {
+ super(fsReadTimeInMs, bytesReadFromFS, bytesReadFromMemstore, bytesReadFromBlockcache,
+ blockReadOps);
+ this.scanTimeInMs = scanTimeInMs;
+ }
+
+ public long getScanTimeInMs() {
+ return scanTimeInMs;
+ }
+
+ private static class Builder extends DataTableScanMetrics.Builder {
+ private long scanTimeInMs = 0;
+
+ public Builder setScanTimeInMs(long scanTimeInMs) {
+ this.scanTimeInMs = scanTimeInMs;
+ return this;
+ }
+
+ @Override
+ public DataTableScanMetricsWithScanTime build() {
+ return new DataTableScanMetricsWithScanTime(scanTimeInMs, fsReadTimeInMs, bytesReadFromFS,
+ bytesReadFromMemstore, bytesReadFromBlockcache, blockReadOps);
+ }
+ }
+ }
}
diff --git a/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java b/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
index 8e82f170362..2b814f9be17 100644
--- a/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
+++ b/phoenix-core-server/src/main/java/org/apache/phoenix/index/GlobalIndexChecker.java
@@ -29,6 +29,7 @@
import java.sql.SQLException;
import java.util.Iterator;
import java.util.List;
+import java.util.Map;
import java.util.Optional;
import java.util.Random;
import org.apache.hadoop.hbase.Cell;
@@ -57,6 +58,7 @@
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.phoenix.coprocessor.BaseRegionScanner;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
+import org.apache.phoenix.coprocessor.DataTableScanMetrics;
import org.apache.phoenix.coprocessor.DelegateRegionScanner;
import org.apache.phoenix.coprocessorclient.BaseScannerRegionObserverConstants;
import org.apache.phoenix.filter.EmptyColumnOnlyFilter;
@@ -154,6 +156,7 @@ public class GlobalIndexScanner extends BaseRegionScanner {
private String indexName;
private long pageSizeMs;
private boolean initialized = false;
+ private boolean isScanMetricsEnabled = false;
public GlobalIndexScanner(RegionCoprocessorEnvironment env, Scan scan, RegionScanner scanner,
GlobalIndexCheckerSource metricsSource) throws IOException {
@@ -185,6 +188,7 @@ public GlobalIndexScanner(RegionCoprocessorEnvironment env, Scan scan, RegionSca
DEFAULT_REPAIR_LOGGING_PERCENT);
random = new Random(EnvironmentEdgeManager.currentTimeMillis());
pageSizeMs = getPageSizeMsForRegionScanner(scan);
+ isScanMetricsEnabled = scan.isScanMetricsEnabled();
}
@Override
@@ -340,6 +344,12 @@ public long getMvccReadPoint() {
return scanner.getMvccReadPoint();
}
+ private DataTableScanMetrics buildDataTableScanMetrics(Map scanMetrics) {
+ DataTableScanMetrics.Builder builder = new DataTableScanMetrics.Builder();
+ DataTableScanMetrics.buildDataTableScanMetrics(scanMetrics, builder);
+ return builder.build();
+ }
+
private void repairIndexRows(byte[] indexRowKey, long ts, List row) throws IOException {
if (buildIndexScanForDataTable == null) {
buildIndexScanForDataTable = new Scan();
@@ -397,8 +407,14 @@ private void repairIndexRows(byte[] indexRowKey, long ts, List| row) throws
buildIndexScanForDataTable.setAttribute(BaseScannerRegionObserverConstants.INDEX_ROW_KEY,
indexRowKey);
Result result = null;
+ buildIndexScanForDataTable.setScanMetricsEnabled(isScanMetricsEnabled);
try (ResultScanner resultScanner = dataHTable.getScanner(buildIndexScanForDataTable)) {
result = resultScanner.next();
+ if (isScanMetricsEnabled) {
+ Map scanMetrics = resultScanner.getScanMetrics().getMetricsMap();
+ DataTableScanMetrics dataTableScanMetrics = buildDataTableScanMetrics(scanMetrics);
+ dataTableScanMetrics.populateThreadLocalServerSideScanMetrics();
+ }
} catch (Throwable t) {
ClientUtil.throwIOException(dataHTable.getName().toString(), t);
}
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/monitoring/HBaseScanMetricsIT.java b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/HBaseScanMetricsIT.java
new file mode 100644
index 00000000000..a8f86f7b277
--- /dev/null
+++ b/phoenix-core/src/it/java/org/apache/phoenix/monitoring/HBaseScanMetricsIT.java
@@ -0,0 +1,542 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.monitoring;
+
+import java.sql.Connection;
+import java.sql.DriverManager;
+import java.sql.ResultSet;
+import java.sql.SQLException;
+import java.sql.Statement;
+import java.util.Map;
+import java.util.concurrent.CountDownLatch;
+import java.util.concurrent.Executors;
+import java.util.concurrent.ThreadPoolExecutor;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import org.apache.hadoop.hbase.HConstants;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.regionserver.CompactSplit;
+import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.util.VersionInfo;
+import org.apache.phoenix.compile.ExplainPlan;
+import org.apache.phoenix.compile.ExplainPlanAttributes;
+import org.apache.phoenix.end2end.NeedsOwnMiniClusterTest;
+import org.apache.phoenix.hbase.index.IndexRegionObserver;
+import org.apache.phoenix.jdbc.PhoenixStatement;
+import org.apache.phoenix.query.BaseTest;
+import org.apache.phoenix.query.QueryServices;
+import org.apache.phoenix.util.PhoenixRuntime;
+import org.apache.phoenix.util.ReadOnlyProps;
+import org.apache.phoenix.util.TestUtil;
+import org.junit.Assert;
+import org.junit.Assume;
+import org.junit.BeforeClass;
+import org.junit.Test;
+import org.junit.experimental.categories.Category;
+
+import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
+
+@Category(NeedsOwnMiniClusterTest.class)
+public class HBaseScanMetricsIT extends BaseTest {
+
+ @BeforeClass
+ public static void setup() throws Exception {
+ Assume.assumeTrue(VersionInfo.compareVersion(VersionInfo.getVersion(), "2.6.3") > 0);
+ Map props = Maps.newHashMapWithExpectedSize(4);
+ props.put(QueryServices.COLLECT_REQUEST_LEVEL_METRICS, "true");
+ props.put(HRegion.MEMSTORE_PERIODIC_FLUSH_INTERVAL, "0");
+ props.put(CompactSplit.HBASE_REGION_SERVER_ENABLE_COMPACTION, "false");
+ setUpTestDriver(new ReadOnlyProps(props));
+ }
+
+ @Test
+ public void testSinglePointLookupQuery() throws Exception {
+ String tableName = generateUniqueName();
+ String sql = "SELECT * FROM " + tableName + " WHERE k1 = 1 AND k2 = 'a'";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTableAndUpsertData(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(tableName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block + 1 Bloom Block
+ assertOnReadsFromFs(tableName, getQueryReadMetrics(rs), 2);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(tableName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testMultiPointLookupQueryWithoutBloomFilter() throws Exception {
+ String tableName = generateUniqueName();
+ String sql = "SELECT * FROM " + tableName + " WHERE k1 IN (1, 2, 3) AND k2 IN ('a', 'b', 'c')";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTableAndUpsertData(conn, tableName, "BLOOMFILTER='NONE'");
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(tableName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block
+ assertOnReadsFromFs(tableName, getQueryReadMetrics(rs), 1);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(tableName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testMultiPointLookupQueryWithBloomFilter() throws Exception {
+ String tableName = generateUniqueName();
+ String sql = "SELECT * FROM " + tableName + " WHERE k1 IN (1, 2, 3) AND k2 IN ('a', 'b', 'c')";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTableAndUpsertData(conn, tableName,
+ "\"phoenix.bloomfilter.multikey.pointlookup\"=true");
+ Statement stmt = conn.createStatement();
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(tableName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block + 1 Bloom Block
+ assertOnReadsFromFs(tableName, getQueryReadMetrics(rs), 2, true);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(tableName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testBytesReadInClientUpsertSelect() throws Exception {
+ String sourceTableName = generateUniqueName();
+ String targetTableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTableAndUpsertData(conn, sourceTableName, "");
+ createTable(conn, targetTableName, "");
+ assertOnReadsFromMemstore(sourceTableName,
+ getMutationReadMetrics(conn, targetTableName, sourceTableName, 1));
+ TestUtil.flush(utility, TableName.valueOf(sourceTableName));
+ // 1 Data Block from source table
+ assertOnReadsFromFs(sourceTableName,
+ getMutationReadMetrics(conn, targetTableName, sourceTableName, 2), 1);
+ assertOnReadsFromBlockcache(sourceTableName,
+ getMutationReadMetrics(conn, targetTableName, sourceTableName, 3));
+ }
+ }
+
+ @Test
+ public void testAggregateQueryWithoutGroupBy() throws Exception {
+ String tableName = generateUniqueName();
+ String sql = "SELECT MAX(v1) FROM " + tableName + " WHERE k1 = 1";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'a2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'c', 'c1', 'c2')");
+ conn.commit();
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(tableName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block from table
+ assertOnReadsFromFs(tableName, getQueryReadMetrics(rs), 1);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(tableName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testAggregateQueryWithGroupBy() throws Exception {
+ String tableName = generateUniqueName();
+ String sql = "SELECT MAX(v1) FROM " + tableName + " WHERE v2 = 'v2' GROUP BY k1";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'c', 'c1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'd', 'd1', 'd2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'e', 'e1', 'v2')");
+ conn.commit();
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(tableName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block from table
+ assertOnReadsFromFs(tableName, getQueryReadMetrics(rs), 1);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(tableName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testAggregateQueryWithGroupByAndOrderBy() throws Exception {
+ String tableName = generateUniqueName();
+ String sql = "SELECT v2, MAX(v1) FROM " + tableName + " GROUP BY v2 ORDER BY v2";
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'c', 'c1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'd', 'd1', 'd2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'e', 'e1', 'v2')");
+ conn.commit();
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(tableName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block from table
+ assertOnReadsFromFs(tableName, getQueryReadMetrics(rs), 1);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(tableName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testUnionAllQuery() throws Exception {
+ String tableName1 = generateUniqueName();
+ String tableName2 = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName1, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("UPSERT INTO " + tableName1 + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName1 + " (k1, k2, v1, v2) VALUES (1, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName1 + " (k1, k2, v1, v2) VALUES (1, 'c', 'c1', 'v2')");
+ conn.commit();
+ createTable(conn, tableName2, "");
+ stmt.execute("UPSERT INTO " + tableName2 + " (k1, k2, v1, v2) VALUES (3, 'd', 'd1', 'd2')");
+ stmt.execute("UPSERT INTO " + tableName2 + " (k1, k2, v1, v2) VALUES (3, 'e', 'e1', 'v2')");
+ conn.commit();
+ String sql = "SELECT MAX(v1) FROM (SELECT k1, v1 FROM " + tableName1
+ + " UNION ALL SELECT k1, v1 FROM " + tableName2 + ") GROUP BY k1 HAVING MAX(v1) > 'c1'";
+ ResultSet rs = stmt.executeQuery(sql);
+ Map> readMetrics = getQueryReadMetrics(rs);
+ assertOnReadsFromMemstore(tableName1, readMetrics);
+ assertOnReadsFromMemstore(tableName2, readMetrics);
+ TestUtil.flush(utility, TableName.valueOf(tableName1));
+ TestUtil.flush(utility, TableName.valueOf(tableName2));
+ rs = stmt.executeQuery(sql);
+ // 1 Data block per table in UNION ALL
+ readMetrics = getQueryReadMetrics(rs);
+ assertOnReadsFromFs(tableName1, readMetrics, 1);
+ assertOnReadsFromFs(tableName2, readMetrics, 1);
+ rs = stmt.executeQuery(sql);
+ readMetrics = getQueryReadMetrics(rs);
+ assertOnReadsFromBlockcache(tableName1, readMetrics);
+ assertOnReadsFromBlockcache(tableName2, readMetrics);
+ }
+ }
+
+ @Test
+ public void testJoinQuery() throws Exception {
+ String tableName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'c', 'c1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'd', 'd1', 'd2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'e', 'e1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'f', 'f1', 'v2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (5, 'g', 'g1', 'v2')");
+ conn.commit();
+ String sql =
+ "SELECT a.k1 as k1, b.k2 as k2, b.v1 as v1, a.total_count as total_count FROM (SELECT k1, COUNT(*) as total_count FROM "
+ + tableName + " WHERE k1 IN (1, 3) GROUP BY k1) a JOIN (SELECT k1, k2, v1 FROM "
+ + tableName + " WHERE k1 IN (1, 3) AND k2 = 'a') b ON a.k1 = b.k1";
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(tableName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block for left table and data block for right table is read from block cache
+ assertOnReadsFromFs(tableName, getQueryReadMetrics(rs), 1, true);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(tableName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testQueryOnUncoveredIndex() throws Exception {
+ String tableName = generateUniqueName();
+ String indexName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("CREATE UNCOVERED INDEX " + indexName + " ON " + tableName + " (v1)");
+ conn.commit();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'a2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (2, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'c', 'c1', 'c2')");
+ conn.commit();
+ String sql = "SELECT k1, k2, v1, v2 FROM " + tableName + " WHERE v1 = 'b1'";
+ ExplainPlan explainPlan =
+ stmt.unwrap(PhoenixStatement.class).optimizeQuery(sql).getExplainPlan();
+ ExplainPlanAttributes planAttributes = explainPlan.getPlanStepsAsAttributes();
+ String tableNameFromExplainPlan = planAttributes.getTableName();
+ Assert.assertEquals(indexName, tableNameFromExplainPlan);
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(indexName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ TestUtil.flush(utility, TableName.valueOf(indexName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data block from index table and 1 data block from data table as index is uncovered
+ assertOnReadsFromFs(indexName, getQueryReadMetrics(rs), 2);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(indexName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testQueryOnCoveredIndexWithoutReadRepair() throws Exception {
+ String tableName = generateUniqueName();
+ String indexName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("CREATE INDEX " + indexName + " ON " + tableName + " (v1) INCLUDE (v2)");
+ conn.commit();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'a2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (2, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'c', 'c1', 'c2')");
+ conn.commit();
+ String sql = "SELECT k1, k2, v1, v2 FROM " + tableName + " WHERE v1 = 'b1'";
+ ExplainPlan explainPlan =
+ stmt.unwrap(PhoenixStatement.class).optimizeQuery(sql).getExplainPlan();
+ ExplainPlanAttributes planAttributes = explainPlan.getPlanStepsAsAttributes();
+ String tableNameFromExplainPlan = planAttributes.getTableName();
+ Assert.assertEquals(indexName, tableNameFromExplainPlan);
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(indexName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ TestUtil.flush(utility, TableName.valueOf(indexName));
+ rs = stmt.executeQuery(sql);
+ // 1 Data Block from index table
+ assertOnReadsFromFs(indexName, getQueryReadMetrics(rs), 1);
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(indexName, getQueryReadMetrics(rs));
+ }
+ }
+
+ @Test
+ public void testQueryOnCoveredIndexWithReadRepair() throws Exception {
+ String tableName = generateUniqueName();
+ String indexName = generateUniqueName();
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ stmt.execute("CREATE INDEX " + indexName + " ON " + tableName + " (v1) INCLUDE (v2)");
+ conn.commit();
+ IndexRegionObserver.setFailPostIndexUpdatesForTesting(true);
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'a1', 'a2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (2, 'b', 'b1', 'b2')");
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'c', 'c1', 'c2')");
+ conn.commit();
+ IndexRegionObserver.setFailPostIndexUpdatesForTesting(false);
+ TestUtil.dumpTable(conn, TableName.valueOf(indexName));
+ String sql = "SELECT k1, k2, v1, v2 FROM " + tableName + " WHERE v1 = 'b1'";
+ ResultSet rs = stmt.executeQuery(sql);
+ assertOnReadsFromMemstore(indexName, getQueryReadMetrics(rs));
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ TestUtil.flush(utility, TableName.valueOf(indexName));
+ sql = "SELECT k1, k2, v1, v2 FROM " + tableName + " WHERE v1 = 'c1'";
+ rs = stmt.executeQuery(sql);
+ // 1 data block of index table from GlobalIndexScanner, 1 bloom block of data table while
+ // doing read repair, 2 times same data block of data table while doing read repair as read
+ // repair opens region scanner thrice and second time its done with caching to block cache
+ // disabled and third time its done with caching to block cache enabled. The newly repaired
+ // column qualifier will be in memstore of index table and
+ // GlobalIndexScanner verifies if row got repaired correctly so, read will even happen from
+ // memstore.
+ assertOnReadsFromFs(indexName, getQueryReadMetrics(rs), 4, true, true);
+ sql = "SELECT k1, k2, v1, v2 FROM " + tableName + " WHERE v1 = 'a1'";
+ rs = stmt.executeQuery(sql);
+ assertOnReadsFromBlockcache(indexName, getQueryReadMetrics(rs), true);
+ } finally {
+ IndexRegionObserver.setFailPostIndexUpdatesForTesting(false);
+ }
+ }
+
+ @Test
+ public void testScanRpcQueueWaitTime() throws Exception {
+ int handlerCount =
+ Integer.parseInt(utility.getConfiguration().get(HConstants.REGION_SERVER_HANDLER_COUNT));
+ int threadPoolSize = 6 * handlerCount;
+ ThreadPoolExecutor executor = (ThreadPoolExecutor) Executors.newFixedThreadPool(threadPoolSize);
+ String tableName = generateUniqueName();
+ int numRows = 10000;
+ CountDownLatch latch = new CountDownLatch(threadPoolSize);
+ try (Connection conn = DriverManager.getConnection(getUrl())) {
+ createTable(conn, tableName, "");
+ Statement stmt = conn.createStatement();
+ for (int i = 1; i <= numRows; i++) {
+ stmt.execute(
+ "UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (" + i + ", 'a', 'v1', 'v2')");
+ if (i % 100 == 0) {
+ conn.commit();
+ }
+ }
+ conn.commit();
+ TestUtil.flush(utility, TableName.valueOf(tableName));
+ AtomicLong scanRpcQueueWaitTime = new AtomicLong(0);
+ AtomicLong rpcScanProcessingTime = new AtomicLong(0);
+ for (int i = 0; i < threadPoolSize; i++) {
+ executor.execute(new Runnable() {
+ @Override
+ public void run() {
+ try {
+ Statement stmt = conn.createStatement();
+ stmt.setFetchSize(2);
+ ResultSet rs = stmt.executeQuery("SELECT * FROM " + tableName);
+ int rowsRead = 0;
+ while (rs.next()) {
+ rowsRead++;
+ }
+ Assert.assertEquals(numRows, rowsRead);
+ Map> readMetrics =
+ PhoenixRuntime.getRequestReadMetricInfo(rs);
+ scanRpcQueueWaitTime
+ .addAndGet(readMetrics.get(tableName).get(MetricType.RPC_SCAN_QUEUE_WAIT_TIME));
+ rpcScanProcessingTime
+ .addAndGet(readMetrics.get(tableName).get(MetricType.RPC_SCAN_PROCESSING_TIME));
+ } catch (SQLException e) {
+ throw new RuntimeException(e);
+ } finally {
+ latch.countDown();
+ }
+ }
+ });
+ }
+ latch.await();
+ Assert.assertTrue(scanRpcQueueWaitTime.get() > 0);
+ Assert.assertTrue(rpcScanProcessingTime.get() > 0);
+ } finally {
+ executor.shutdown();
+ executor.awaitTermination(10, TimeUnit.SECONDS);
+ }
+ }
+
+ private void createTable(Connection conn, String tableName, String ddlOptions) throws Exception {
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("CREATE TABLE " + tableName
+ + " (k1 INTEGER NOT NULL, k2 varchar NOT NULL, v1 VARCHAR, v2 VARCHAR, CONSTRAINT PK PRIMARY KEY (k1, k2)) "
+ + ddlOptions);
+ conn.commit();
+ }
+ }
+
+ private void createTableAndUpsertData(Connection conn, String tableName, String ddlOptions)
+ throws Exception {
+ createTable(conn, tableName, ddlOptions);
+ try (Statement stmt = conn.createStatement()) {
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (1, 'a', 'v1', 'v2')");
+ conn.commit();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (2, 'b', 'v1', 'v2')");
+ conn.commit();
+ stmt.execute("UPSERT INTO " + tableName + " (k1, k2, v1, v2) VALUES (3, 'c', 'v1', 'v2')");
+ conn.commit();
+ }
+ }
+
+ private void assertOnReadsFromMemstore(String tableName,
+ Map> readMetrics) throws Exception {
+ Assert.assertTrue(readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_MEMSTORE) > 0);
+ Assert.assertEquals(0, (long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_FS));
+ Assert.assertEquals(0,
+ (long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_BLOCKCACHE));
+ }
+
+ private void assertOnReadsFromFs(String tableName, Map> readMetrics,
+ long expectedBlocksReadOps) throws Exception {
+ assertOnReadsFromFs(tableName, readMetrics, expectedBlocksReadOps, false);
+ }
+
+ private void assertOnReadsFromFs(String tableName, Map> readMetrics,
+ long expectedBlocksReadOps, boolean isReadFromBlockCacheExpected) throws Exception {
+ assertOnReadsFromFs(tableName, readMetrics, expectedBlocksReadOps, isReadFromBlockCacheExpected,
+ false);
+ }
+
+ private void assertOnReadsFromFs(String tableName, Map> readMetrics,
+ long expectedBlocksReadOps, boolean isReadFromBlockCacheExpected,
+ boolean isReadFromMemstoreExpected) throws Exception {
+ Assert.assertTrue(readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_FS) > 0);
+ Assert.assertEquals(expectedBlocksReadOps,
+ (long) readMetrics.get(tableName).get(MetricType.BLOCK_READ_OPS_COUNT));
+ if (isReadFromMemstoreExpected) {
+ Assert
+ .assertTrue((long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_MEMSTORE) > 0);
+ } else {
+ Assert.assertEquals(0,
+ (long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_MEMSTORE));
+ }
+ if (isReadFromBlockCacheExpected) {
+ Assert.assertTrue(
+ (long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_BLOCKCACHE) > 0);
+ } else {
+ Assert.assertEquals(0,
+ (long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_BLOCKCACHE));
+ }
+ Assert.assertTrue(readMetrics.get(tableName).get(MetricType.RPC_SCAN_PROCESSING_TIME) > 0);
+ Assert.assertTrue(readMetrics.get(tableName).get(MetricType.FS_READ_TIME) > 0);
+ }
+
+ private void assertOnReadsFromBlockcache(String tableName,
+ Map> readMetrics) throws Exception {
+ assertOnReadsFromBlockcache(tableName, readMetrics, false);
+ }
+
+ private void assertOnReadsFromBlockcache(String tableName,
+ Map> readMetrics, boolean isReadFromMemstoreExpected)
+ throws Exception {
+ Assert.assertTrue(readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_BLOCKCACHE) > 0);
+ Assert.assertEquals(0, (long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_FS));
+ Assert.assertEquals(0, (long) readMetrics.get(tableName).get(MetricType.BLOCK_READ_OPS_COUNT));
+ if (isReadFromMemstoreExpected) {
+ Assert
+ .assertTrue((long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_MEMSTORE) > 0);
+ } else {
+ Assert.assertEquals(0,
+ (long) readMetrics.get(tableName).get(MetricType.BYTES_READ_FROM_MEMSTORE));
+ }
+ }
+
+ private Map> getQueryReadMetrics(ResultSet rs) throws Exception {
+ int rowCount = 0;
+ while (rs.next()) {
+ rowCount++;
+ }
+ Assert.assertTrue(rowCount > 0);
+ Map> readMetrics = PhoenixRuntime.getRequestReadMetricInfo(rs);
+ System.out.println("Query readMetrics: " + readMetrics);
+ return readMetrics;
+ }
+
+ private Map> getMutationReadMetrics(Connection conn,
+ String targetTableName, String sourceTableName, int rowId) throws Exception {
+ Statement stmt = conn.createStatement();
+ stmt.execute("UPSERT INTO " + targetTableName + " (k1, k2, v1, v2) SELECT * FROM "
+ + sourceTableName + " WHERE k1 = " + rowId);
+ conn.commit();
+ Map> readMetrics =
+ PhoenixRuntime.getReadMetricInfoForMutationsSinceLastReset(conn);
+ System.out.println("Mutation readMetrics: " + readMetrics);
+ PhoenixRuntime.resetMetrics(conn);
+ return readMetrics;
+ }
+}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
new file mode 100644
index 00000000000..5a96f371f4b
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.Map;
+
+public class CompatScanMetrics {
+ private CompatScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static Long getFsReadTime(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromFs(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromMemstore(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromBlockCache(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBlockReadOpsCount(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getRpcScanProcessingTime(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getRpcScanQueueWaitTime(Map scanMetrics) {
+ return 0L;
+ }
+}
diff --git a/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
new file mode 100644
index 00000000000..89151d1fe3c
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.0/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+public class CompatThreadLocalServerSideScanMetrics {
+ private CompatThreadLocalServerSideScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static void addFsReadTime(long fsReadTimeInMs) {
+ }
+
+ public static void addBytesReadFromFs(long bytesReadFromFS) {
+ }
+
+ public static void addBytesReadFromMemstore(long bytesReadFromMemstore) {
+ }
+
+ public static void addBytesReadFromBlockCache(long bytesReadFromBlockCache) {
+ }
+
+ public static void addBlockReadOpsCount(long blockReadOpsCount) {
+ }
+}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
new file mode 100644
index 00000000000..5a96f371f4b
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.Map;
+
+public class CompatScanMetrics {
+ private CompatScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static Long getFsReadTime(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromFs(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromMemstore(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromBlockCache(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBlockReadOpsCount(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getRpcScanProcessingTime(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getRpcScanQueueWaitTime(Map scanMetrics) {
+ return 0L;
+ }
+}
diff --git a/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
new file mode 100644
index 00000000000..89151d1fe3c
--- /dev/null
+++ b/phoenix-hbase-compat-2.5.4/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+public class CompatThreadLocalServerSideScanMetrics {
+ private CompatThreadLocalServerSideScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static void addFsReadTime(long fsReadTimeInMs) {
+ }
+
+ public static void addBytesReadFromFs(long bytesReadFromFS) {
+ }
+
+ public static void addBytesReadFromMemstore(long bytesReadFromMemstore) {
+ }
+
+ public static void addBytesReadFromBlockCache(long bytesReadFromBlockCache) {
+ }
+
+ public static void addBlockReadOpsCount(long blockReadOpsCount) {
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
new file mode 100644
index 00000000000..5a96f371f4b
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
@@ -0,0 +1,54 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.Map;
+
+public class CompatScanMetrics {
+ private CompatScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static Long getFsReadTime(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromFs(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromMemstore(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBytesReadFromBlockCache(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getBlockReadOpsCount(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getRpcScanProcessingTime(Map scanMetrics) {
+ return 0L;
+ }
+
+ public static Long getRpcScanQueueWaitTime(Map scanMetrics) {
+ return 0L;
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
new file mode 100644
index 00000000000..89151d1fe3c
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.0/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
@@ -0,0 +1,39 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+public class CompatThreadLocalServerSideScanMetrics {
+ private CompatThreadLocalServerSideScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static void addFsReadTime(long fsReadTimeInMs) {
+ }
+
+ public static void addBytesReadFromFs(long bytesReadFromFS) {
+ }
+
+ public static void addBytesReadFromMemstore(long bytesReadFromMemstore) {
+ }
+
+ public static void addBytesReadFromBlockCache(long bytesReadFromBlockCache) {
+ }
+
+ public static void addBlockReadOpsCount(long blockReadOpsCount) {
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.4/pom.xml b/phoenix-hbase-compat-2.6.4/pom.xml
new file mode 100644
index 00000000000..bdfa71fa1f4
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/pom.xml
@@ -0,0 +1,110 @@
+
+
+
+ 4.0.0
+
+ org.apache.phoenix
+ phoenix
+ 5.3.0-SNAPSHOT
+
+
+ phoenix-hbase-compat-2.6.4
+ Phoenix Hbase 2.6.4 compatibility
+ Compatibility module for HBase 2.6.4+
+
+
+ 2.6.4-SNAPSHOT
+
+
+
+
+
+ org.apache.hbase
+ hbase-client
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-common
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-server
+ ${hbase26.compat.version}
+ provided
+
+
+
+ org.apache.hbase
+ hbase-hadoop-compat
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-hadoop2-compat
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-protocol
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-protocol-shaded
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-zookeeper
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-metrics
+ ${hbase26.compat.version}
+ provided
+
+
+ org.apache.hbase
+ hbase-metrics-api
+ ${hbase26.compat.version}
+ provided
+
+
+ org.slf4j
+ slf4j-api
+ provided
+
+
+
+ junit
+ junit
+ test
+
+
+
+
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/ByteStringer.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/ByteStringer.java
new file mode 100644
index 00000000000..e8c3f2fff89
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/ByteStringer.java
@@ -0,0 +1,32 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import com.google.protobuf.ByteString;
+
+// This has different signature in the HBase 2 and 3 modules
+// This only comes together after the maven-replacer plugin relocates all protobuf code.
+public class ByteStringer {
+
+ private ByteStringer() {
+ }
+
+ public static ByteString wrap(final byte[] array) {
+ return org.apache.hadoop.hbase.util.ByteStringer.wrap(array);
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateFilter.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateFilter.java
new file mode 100644
index 00000000000..c1fb96f4f00
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateFilter.java
@@ -0,0 +1,37 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterBase;
+
+public class CompatDelegateFilter extends FilterBase {
+ protected Filter delegate = null;
+
+ public CompatDelegateFilter(Filter delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public ReturnCode filterKeyValue(Cell v) throws IOException {
+ return delegate.filterKeyValue(v);
+ }
+
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
new file mode 100644
index 00000000000..ed634aa0cd2
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatDelegateHTable.java
@@ -0,0 +1,141 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hbase.CompareOperator;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+
+public abstract class CompatDelegateHTable implements Table {
+
+ protected final Table delegate;
+
+ public CompatDelegateHTable(Table delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public HTableDescriptor getTableDescriptor() throws IOException {
+ return delegate.getTableDescriptor();
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, Put put) throws IOException {
+ return delegate.checkAndPut(row, family, qualifier, compareOp, value, put);
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, Delete delete) throws IOException {
+ return delegate.checkAndDelete(row, family, qualifier, compareOp, value, delete);
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, RowMutations mutation) throws IOException {
+ return delegate.checkAndMutate(row, family, qualifier, compareOp, value, mutation);
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, byte[] value, Put put)
+ throws IOException {
+ return delegate.checkAndPut(row, family, qualifier, value, put);
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, byte[] value,
+ Delete delete) throws IOException {
+ return delegate.checkAndDelete(row, family, qualifier, value, delete);
+ }
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, Put put) throws IOException {
+ return delegate.checkAndPut(row, family, qualifier, op, value, put);
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, Delete delete) throws IOException {
+ return delegate.checkAndDelete(row, family, qualifier, op, value, delete);
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOperator op,
+ byte[] value, RowMutations mutation) throws IOException {
+ return delegate.checkAndMutate(row, family, qualifier, op, value, mutation);
+ }
+
+ @Override
+ public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
+ return delegate.checkAndMutate(row, family);
+ }
+
+ @Override
+ public void setOperationTimeout(int operationTimeout) {
+ delegate.setOperationTimeout(operationTimeout);
+ }
+
+ @Override
+ public int getOperationTimeout() {
+ return delegate.getOperationTimeout();
+ }
+
+ @Override
+ public int getRpcTimeout() {
+ return delegate.getRpcTimeout();
+ }
+
+ @Override
+ public void setRpcTimeout(int rpcTimeout) {
+ delegate.setRpcTimeout(rpcTimeout);
+ }
+
+ @Override
+ public int getReadRpcTimeout() {
+ return delegate.getReadRpcTimeout();
+ }
+
+ @Override
+ public void setReadRpcTimeout(int readRpcTimeout) {
+ delegate.setReadRpcTimeout(readRpcTimeout);
+ }
+
+ @Override
+ public int getWriteRpcTimeout() {
+ return delegate.getWriteRpcTimeout();
+ }
+
+ @Override
+ public void setWriteRpcTimeout(int writeRpcTimeout) {
+ delegate.setWriteRpcTimeout(writeRpcTimeout);
+ }
+
+ @Override
+ public boolean[] existsAll(List gets) throws IOException {
+ return delegate.existsAll(gets);
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
new file mode 100644
index 00000000000..ad39db49b93
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexHalfStoreFileReader.java
@@ -0,0 +1,38 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.io.hfile.CacheConfig;
+import org.apache.hadoop.hbase.io.hfile.HFileInfo;
+import org.apache.hadoop.hbase.io.hfile.ReaderContext;
+import org.apache.hadoop.hbase.regionserver.StoreFileInfo;
+import org.apache.hadoop.hbase.regionserver.StoreFileReader;
+
+public class CompatIndexHalfStoreFileReader extends StoreFileReader {
+
+ public CompatIndexHalfStoreFileReader(final FileSystem fs, final CacheConfig cacheConf,
+ final Configuration conf, final ReaderContext readerContext, final HFileInfo hFileInfo, Path p)
+ throws IOException {
+ super(readerContext, hFileInfo, cacheConf, new StoreFileInfo(conf, fs, p, true), conf);
+ }
+
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java
new file mode 100644
index 00000000000..c23b0be5d87
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatIndexedHLogReader.java
@@ -0,0 +1,24 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.regionserver.wal.ProtobufWALStreamReader;
+
+public abstract class CompatIndexedHLogReader extends ProtobufWALStreamReader {
+
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java
new file mode 100644
index 00000000000..62e328a54f9
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatLocalIndexStoreFileScanner.java
@@ -0,0 +1,33 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.regionserver.StoreFileScanner;
+
+public class CompatLocalIndexStoreFileScanner extends StoreFileScanner {
+
+ public CompatLocalIndexStoreFileScanner(CompatIndexHalfStoreFileReader reader,
+ boolean cacheBlocks, boolean pread, boolean isCompaction, long readPt, long scannerOrder,
+ boolean canOptimizeForNonNullColumn) {
+ super(reader, reader.getScanner(cacheBlocks, pread, isCompaction), !isCompaction,
+ reader.getHFileReader().hasMVCCInfo(), readPt, scannerOrder, canOptimizeForNonNullColumn,
+ reader.getHFileReader().getDataBlockEncoding() == DataBlockEncoding.ROW_INDEX_V1);
+ }
+
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
new file mode 100644
index 00000000000..03d12c84ed1
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatOmidTransactionTable.java
@@ -0,0 +1,51 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.Put;
+import org.apache.hadoop.hbase.client.RowMutations;
+import org.apache.hadoop.hbase.client.Table;
+import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
+
+public abstract class CompatOmidTransactionTable implements Table {
+
+ @Override
+ public boolean checkAndPut(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, Put put) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean checkAndDelete(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, Delete delete) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public boolean checkAndMutate(byte[] row, byte[] family, byte[] qualifier, CompareOp compareOp,
+ byte[] value, RowMutations mutation) throws IOException {
+ throw new UnsupportedOperationException();
+ }
+
+ @Override
+ public CheckAndMutateBuilder checkAndMutate(byte[] row, byte[] family) {
+ throw new UnsupportedOperationException();
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPagingFilter.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPagingFilter.java
new file mode 100644
index 00000000000..84108750fc1
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPagingFilter.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.filter.Filter;
+import org.apache.hadoop.hbase.filter.FilterBase;
+
+public abstract class CompatPagingFilter extends FilterBase {
+ protected Filter delegate = null;
+
+ public CompatPagingFilter(Filter delegate) {
+ this.delegate = delegate;
+ }
+
+ @Override
+ public ReturnCode filterKeyValue(Cell v) throws IOException {
+
+ if (delegate != null) {
+ return delegate.filterKeyValue(v);
+ }
+ return super.filterKeyValue(v);
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
new file mode 100644
index 00000000000..a59843351f2
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatPhoenixRpcScheduler.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import org.apache.hadoop.hbase.ipc.CallRunner;
+import org.apache.hadoop.hbase.ipc.RpcScheduler;
+
+/**
+ * {@link RpcScheduler} that first checks to see if this is an index or metadata update before
+ * passing off the call to the delegate {@link RpcScheduler}.
+ */
+public abstract class CompatPhoenixRpcScheduler extends RpcScheduler {
+ protected RpcScheduler delegate;
+
+ @Override
+ public boolean dispatch(CallRunner task) {
+ try {
+ return compatDispatch(task);
+ } catch (Exception e) {
+ // This never happens with Hbase 2.5
+ throw new RuntimeException(e);
+ }
+ }
+
+ public int getActiveRpcHandlerCount() {
+ return delegate.getActiveRpcHandlerCount();
+ }
+
+ @Override
+ public int getActiveBulkLoadRpcHandlerCount() {
+ return delegate.getActiveBulkLoadRpcHandlerCount();
+ }
+
+ @Override
+ public int getBulkLoadQueueLength() {
+ return delegate.getBulkLoadQueueLength();
+ }
+
+ public abstract boolean compatDispatch(CallRunner task) throws IOException, InterruptedException;
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
new file mode 100644
index 00000000000..c35e272077c
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatScanMetrics.java
@@ -0,0 +1,56 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.util.Map;
+import org.apache.hadoop.hbase.client.metrics.ServerSideScanMetrics;
+
+public class CompatScanMetrics {
+
+ private CompatScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static Long getFsReadTime(Map scanMetrics) {
+ return scanMetrics.get(ServerSideScanMetrics.FS_READ_TIME_METRIC_NAME);
+ }
+
+ public static Long getBytesReadFromFs(Map scanMetrics) {
+ return scanMetrics.get(ServerSideScanMetrics.BYTES_READ_FROM_FS_METRIC_NAME);
+ }
+
+ public static Long getBytesReadFromMemstore(Map scanMetrics) {
+ return scanMetrics.get(ServerSideScanMetrics.BYTES_READ_FROM_MEMSTORE_METRIC_NAME);
+ }
+
+ public static Long getBytesReadFromBlockCache(Map scanMetrics) {
+ return scanMetrics.get(ServerSideScanMetrics.BYTES_READ_FROM_BLOCK_CACHE_METRIC_NAME);
+ }
+
+ public static Long getBlockReadOpsCount(Map scanMetrics) {
+ return scanMetrics.get(ServerSideScanMetrics.BLOCK_READ_OPS_COUNT_METRIC_NAME);
+ }
+
+ public static Long getRpcScanProcessingTime(Map scanMetrics) {
+ return scanMetrics.get(ServerSideScanMetrics.RPC_SCAN_PROCESSING_TIME_METRIC_NAME);
+ }
+
+ public static Long getRpcScanQueueWaitTime(Map scanMetrics) {
+ return scanMetrics.get(ServerSideScanMetrics.RPC_SCAN_QUEUE_WAIT_TIME_METRIC_NAME);
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
new file mode 100644
index 00000000000..ab3b0a11e45
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatThreadLocalServerSideScanMetrics.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import org.apache.hadoop.hbase.monitoring.ThreadLocalServerSideScanMetrics;
+
+public class CompatThreadLocalServerSideScanMetrics {
+ private CompatThreadLocalServerSideScanMetrics() {
+ // Not to be instantiated
+ }
+
+ public static void addFsReadTime(long fsReadTimeInMs) {
+ ThreadLocalServerSideScanMetrics.addFsReadTime(fsReadTimeInMs);
+ }
+
+ public static void addBytesReadFromFs(long bytesReadFromFS) {
+ ThreadLocalServerSideScanMetrics.addBytesReadFromFs(bytesReadFromFS);
+ }
+
+ public static void addBytesReadFromMemstore(long bytesReadFromMemstore) {
+ ThreadLocalServerSideScanMetrics.addBytesReadFromMemstore(bytesReadFromMemstore);
+ }
+
+ public static void addBytesReadFromBlockCache(long bytesReadFromBlockCache) {
+ ThreadLocalServerSideScanMetrics.addBytesReadFromBlockCache(bytesReadFromBlockCache);
+ }
+
+ public static void addBlockReadOpsCount(long blockReadOpsCount) {
+ ThreadLocalServerSideScanMetrics.addBlockReadOpsCount(blockReadOpsCount);
+ }
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
new file mode 100644
index 00000000000..38a940aa43e
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/CompatUtil.java
@@ -0,0 +1,41 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+import java.io.IOException;
+import java.util.List;
+import org.apache.hadoop.hbase.MetaTableAccessor;
+import org.apache.hadoop.hbase.client.Connection;
+import org.apache.hadoop.hbase.client.RegionInfo;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+public class CompatUtil {
+
+ private static final Logger LOGGER = LoggerFactory.getLogger(CompatUtil.class);
+
+ private CompatUtil() {
+ // Not to be instantiated
+ }
+
+ public static List getMergeRegions(Connection conn, RegionInfo regionInfo)
+ throws IOException {
+ return MetaTableAccessor.getMergeRegions(conn, regionInfo);
+ }
+
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
new file mode 100644
index 00000000000..ccd416a9143
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/HbaseCompatCapabilities.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.compat.hbase;
+
+public class HbaseCompatCapabilities {
+ // Currently every supported HBase version has the same capabilities, so there is
+ // nothing in here.
+}
diff --git a/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/package-info.java b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/package-info.java
new file mode 100644
index 00000000000..7c098f49719
--- /dev/null
+++ b/phoenix-hbase-compat-2.6.4/src/main/java/org/apache/phoenix/compat/hbase/package-info.java
@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * This package contains compatibility classes for bridging differences
+ * between different versions of HBase.
+ */
+package org.apache.phoenix.compat.hbase;
diff --git a/pom.xml b/pom.xml
index ccae93600f6..a1d633ee5e1 100644
--- a/pom.xml
+++ b/pom.xml
@@ -45,6 +45,7 @@
+ phoenix-hbase-compat-2.6.4
phoenix-hbase-compat-2.6.0
phoenix-hbase-compat-2.5.4
phoenix-hbase-compat-2.5.0
@@ -53,7 +54,6 @@
phoenix-core
phoenix-pherf
phoenix-tracing-webapp
-
@@ -78,7 +78,8 @@
2.5.10-hadoop3
2.5.12-hadoop3
2.6.1-hadoop3
- 2.6.3-hadoop3
+ 2.6.3-hadoop3
+ 2.6.4-SNAPSHOT
1.8
${compileSource}
@@ -374,6 +375,11 @@
phoenix-hbase-compat-2.6.0
${project.version}
+
+ org.apache.phoenix
+ phoenix-hbase-compat-2.6.4
+ ${project.version}
+
org.apache.phoenix
@@ -2170,7 +2176,7 @@
- phoenix-hbase-compat-2.6.2
+ phoenix-hbase-compat-2.6.4
hbase.profile
@@ -2180,10 +2186,27 @@
2.6
- 2.6.0
+ 2.6.4
${hbase-2.6.runtime.version}
+
+
+ phoenix-hbase-compat-2.6.3
+
+
+ hbase.profile
+ 2.6.3
+
+
+
+ 2.6.3
+
+ 3.4.1
+ 2.6.0
+ ${hbase-2.6.3.runtime.version}
+
+
phoenix-hbase-compat-2.6.0
| |