Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.phoenix.util;
import static org.apache.phoenix.schema.types.PDataType.ARRAY_TYPE_SUFFIX;
import static org.apache.phoenix.thirdparty.com.google.common.base.Preconditions.checkNotNull;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.Reader;
import java.nio.charset.StandardCharsets;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.SQLFeatureNotSupportedException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Properties;
import java.util.Set;
import java.util.TreeSet;
import javax.annotation.Nullable;
import org.apache.commons.lang3.StringEscapeUtils;
import org.apache.hadoop.hbase.Cell;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Mutation;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.util.Pair;
import org.apache.hbase.thirdparty.com.google.common.annotations.VisibleForTesting;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.LiteralExpression;
import org.apache.phoenix.expression.OrderByExpression;
import org.apache.phoenix.expression.RowKeyColumnExpression;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixMonitoredConnection;
import org.apache.phoenix.jdbc.PhoenixMonitoredResultSet;
import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
import org.apache.phoenix.jdbc.PhoenixStatement;
import org.apache.phoenix.monitoring.ConnectionQueryServicesMetric;
import org.apache.phoenix.monitoring.GlobalClientMetrics;
import org.apache.phoenix.monitoring.GlobalMetric;
import org.apache.phoenix.monitoring.HistogramDistribution;
import org.apache.phoenix.monitoring.MetricType;
import org.apache.phoenix.monitoring.PhoenixTableMetric;
import org.apache.phoenix.monitoring.TableMetricsManager;
import org.apache.phoenix.monitoring.connectionqueryservice.ConnectionQueryServicesMetricsManager;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.query.QueryServices;
import org.apache.phoenix.schema.AmbiguousColumnException;
import org.apache.phoenix.schema.ColumnNotFoundException;
import org.apache.phoenix.schema.KeyValueSchema;
import org.apache.phoenix.schema.KeyValueSchema.KeyValueSchemaBuilder;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PColumnFamily;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.RowKeyValueAccessor;
import org.apache.phoenix.schema.ValueBitSet;
import org.apache.phoenix.schema.types.PDataType;
import org.apache.phoenix.thirdparty.com.google.common.base.Function;
import org.apache.phoenix.thirdparty.com.google.common.base.Joiner;
import org.apache.phoenix.thirdparty.com.google.common.base.Splitter;
import org.apache.phoenix.thirdparty.com.google.common.collect.ImmutableList;
import org.apache.phoenix.thirdparty.com.google.common.collect.Lists;
import org.apache.phoenix.thirdparty.com.google.common.collect.Maps;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLine;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.CommandLineParser;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.DefaultParser;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.HelpFormatter;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.Option;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.Options;
import org.apache.phoenix.thirdparty.org.apache.commons.cli.ParseException;
/**
*
* Collection of non JDBC compliant utility methods
*
*
* @since 0.1
*/
public class PhoenixRuntime {
//TODO use strings, char needs a lot of error-prone conversions
public static final char JDBC_PROTOCOL_TERMINATOR = ';';
public static final char JDBC_PROTOCOL_SEPARATOR = ':';
/**
* JDBC URL jdbc protocol identifier
*/
public static final String JDBC_PROTOCOL_IDENTIFIER = "jdbc";
/**
* JDBC URL phoenix protocol identifier (protocol determined from Configuration)
*/
public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER = "phoenix";
/**
* JDBC URL phoenix protocol identifier for ZK HBase connection
*/
public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_ZK = "phoenix+zk";
/**
* JDBC URL phoenix protocol identifier for the deprecated Master based HBase connection
*/
public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_MASTER = "phoenix+master";
/**
* JDBC URL phoenix protocol identifier for RPC based HBase connection
*/
public static final String JDBC_PHOENIX_PROTOCOL_IDENTIFIER_RPC = "phoenix+rpc";
/**
* JDBC URL phoenix protocol identifier
*/
public static final String JDBC_PHOENIX_THIN_IDENTIFIER = "thin";
/**
* Root for the generic JDBC URL that the Phoenix accepts.
*/
public static final String JDBC_PROTOCOL =
JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_PROTOCOL_IDENTIFIER;
/**
* Root for the explicit ZK JDBC URL that the Phoenix accepts.
*/
public static final String JDBC_PROTOCOL_ZK =
JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR
+ JDBC_PHOENIX_PROTOCOL_IDENTIFIER_ZK;
/**
* Root for the explicit Master (HRPC) JDBC URL that the Phoenix accepts.
*/
public static final String JDBC_PROTOCOL_MASTER =
JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR
+ JDBC_PHOENIX_PROTOCOL_IDENTIFIER_MASTER;
/**
* Root for the explicit Master (HRPC) JDBC URL that the Phoenix accepts.
*/
public static final String JDBC_PROTOCOL_RPC =
JDBC_PROTOCOL_IDENTIFIER + JDBC_PROTOCOL_SEPARATOR
+ JDBC_PHOENIX_PROTOCOL_IDENTIFIER_RPC;
/**
* Root for the JDBC URL used by the thin driver. Duplicated here to avoid dependencies between
* modules.
*/
public static final String JDBC_THIN_PROTOCOL =
JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + JDBC_PHOENIX_THIN_IDENTIFIER;
@Deprecated
public static final String EMBEDDED_JDBC_PROTOCOL =
PhoenixRuntime.JDBC_PROTOCOL + PhoenixRuntime.JDBC_PROTOCOL_SEPARATOR;
/**
* Use this connection property to control HBase timestamps
* by specifying your own long timestamp value at connection time.
* Specifying this property will force the connection to be read
* only - no DML or DDL will be allowed.
*/
public static final String CURRENT_SCN_ATTRIB = "CurrentSCN";
/**
* Internal connection property to force an index to be built at a
* given time stamp.
*/
public static final String BUILD_INDEX_AT_ATTRIB = "BuildIndexAt";
/**
* Use this connection property to help with fairness of resource allocation
* for the client and server. The value of the attribute determines the
* bucket used to rollup resource usage for a particular tenant/organization. Each tenant
* may only use a percentage of total resources, governed by the {@link org.apache.phoenix.query.QueryServices}
* configuration properties
*/
public static final String TENANT_ID_ATTRIB = "TenantId";
/**
* Use this connection property to prevent an upgrade from occurring when
* connecting to a new server version.
*/
public static final String NO_UPGRADE_ATTRIB = "NoUpgrade";
/**
* Use this connection property to control the number of rows that are
* batched together on an UPSERT INTO table1... SELECT ... FROM table2.
* It's only used when autoCommit is true and your source table is
* different than your target table or your SELECT statement has a
* GROUP BY clause.
*/
public final static String UPSERT_BATCH_SIZE_ATTRIB = "UpsertBatchSize";
/**
* Use this connection property to control the number of bytes that are
* batched together on an UPSERT INTO table1... SELECT ... FROM table2.
* It's only used when autoCommit is true and your source table is
* different than your target table or your SELECT statement has a
* GROUP BY clause. Overrides the value of UpsertBatchSize.
*/
public final static String UPSERT_BATCH_SIZE_BYTES_ATTRIB = "UpsertBatchSizeBytes";
/**
* Use this connection property to explicitly enable or disable auto-commit on a new connection.
*/
public static final String AUTO_COMMIT_ATTRIB = "AutoCommit";
/**
* Use this connection property to explicitly set read consistency level on a new connection.
*/
public static final String CONSISTENCY_ATTRIB = "Consistency";
/**
* Use this connection property to explicitly enable or disable request level metric collection.
*/
public static final String REQUEST_METRIC_ATTRIB = "RequestMetric";
/**
* Use this column name on the row returned by explain plan result set to get estimate of number
* of bytes read.
*/
public static final String EXPLAIN_PLAN_ESTIMATED_BYTES_READ_COLUMN =
PhoenixStatement.EXPLAIN_PLAN_BYTES_ESTIMATE_COLUMN_ALIAS;
/**
* Use this column name on the row returned by explain plan result set to get estimate of number
* of rows read.
*/
public static final String EXPLAIN_PLAN_ESTIMATED_ROWS_READ_COLUMN =
PhoenixStatement.EXPLAIN_PLAN_ROWS_COLUMN_ALIAS;
/**
* Use this column name on the row returned by explain plan result set to get timestamp at which
* the estimate of number or bytes/rows was collected
*/
public static final String EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN =
PhoenixStatement.EXPLAIN_PLAN_ESTIMATE_INFO_TS_COLUMN_ALIAS;
/**
* All Phoenix specific connection properties
* TODO: use enum instead
*/
private final static String[] CONNECTION_PROPERTIES = {
CURRENT_SCN_ATTRIB,
TENANT_ID_ATTRIB,
UPSERT_BATCH_SIZE_ATTRIB,
AUTO_COMMIT_ATTRIB,
CONSISTENCY_ATTRIB,
REQUEST_METRIC_ATTRIB,
};
/**
* Use this as the zookeeper quorum name to have a connection-less connection. This enables
* Phoenix-compatible HFiles to be created in a map/reduce job by creating tables,
* upserting data into them, and getting the uncommitted state through {@link #getUncommittedData(Connection)}
*/
public final static String CONNECTIONLESS = "none";
/**
* Use this connection property prefix for annotations that you want to show up in traces and log lines emitted by Phoenix.
* This is useful for annotating connections with information available on the client (e.g. user or session identifier) and
* having these annotation automatically passed into log lines and traces by Phoenix.
*/
public static final String ANNOTATION_ATTRIB_PREFIX = "phoenix.annotation.";
private static final String HEADER_IN_LINE = "in-line";
private static final String SQL_FILE_EXT = ".sql";
private static final String CSV_FILE_EXT = ".csv";
/**
* Provides a mechanism to run SQL scripts against, where the arguments are:
* 1) connection URL string
* 2) one or more paths to either SQL scripts or CSV files
* If a CurrentSCN property is set on the connection URL, then it is incremented
* between processing, with each file being processed by a new connection at the
* increment timestamp value.
*/
public static void main(String [] args) {
ExecutionCommand execCmd = ExecutionCommand.parseArgs(args);
String jdbcUrl;
if (execCmd.getConnectionString().startsWith(JDBC_PROTOCOL)) {
jdbcUrl = execCmd.getConnectionString();
} else {
jdbcUrl = JDBC_PROTOCOL + JDBC_PROTOCOL_SEPARATOR + execCmd.getConnectionString();
}
int exitStatus = 0;
PhoenixConnection conn = null;
try {
Properties props = new Properties();
if (execCmd.isLocalIndexUpgrade()) {
props.setProperty(QueryServices.LOCAL_INDEX_CLIENT_UPGRADE_ATTRIB, "false");
}
if (execCmd.binaryEncoding != null) {
props.setProperty(QueryServices.UPLOAD_BINARY_DATA_TYPE_ENCODING, execCmd.binaryEncoding);
}
conn = DriverManager.getConnection(jdbcUrl, props).unwrap(PhoenixConnection.class);
conn.setRunningUpgrade(true);
if (execCmd.isMapNamespace()) {
String srcTable = execCmd.getSrcTable();
System.out.println("Starting upgrading table:" + srcTable + "... please don't kill it in between!!");
UpgradeUtil.upgradeTable(conn, srcTable);
} else if (execCmd.isUpgrade()) {
if (conn.getClientInfo(PhoenixRuntime.CURRENT_SCN_ATTRIB) != null) { throw new SQLException(
"May not specify the CURRENT_SCN property when upgrading"); }
if (conn.getClientInfo(PhoenixRuntime.TENANT_ID_ATTRIB) != null) { throw new SQLException(
"May not specify the TENANT_ID_ATTRIB property when upgrading"); }
if (execCmd.getInputFiles().isEmpty()) {
List tablesNeedingUpgrade = UpgradeUtil.getPhysicalTablesWithDescRowKey(conn);
if (tablesNeedingUpgrade.isEmpty()) {
String msg = "No tables are required to be upgraded due to incorrect row key order (PHOENIX-2067 and PHOENIX-2120)";
System.out.println(msg);
} else {
String msg = "The following tables require upgrade due to a bug causing the row key to be incorrectly ordered (PHOENIX-2067 and PHOENIX-2120):\n"
+ Joiner.on(' ').join(tablesNeedingUpgrade);
System.out.println("WARNING: " + msg);
}
List unsupportedTables = UpgradeUtil.getPhysicalTablesWithDescVarbinaryRowKey(conn);
if (!unsupportedTables.isEmpty()) {
String msg = "The following tables use an unsupported VARBINARY DESC construct and need to be changed:\n"
+ Joiner.on(' ').join(unsupportedTables);
System.out.println("WARNING: " + msg);
}
} else {
UpgradeUtil.upgradeDescVarLengthRowKeys(conn, execCmd.getInputFiles(), execCmd.isBypassUpgrade());
}
} else if(execCmd.isLocalIndexUpgrade()) {
UpgradeUtil.upgradeLocalIndexes(conn);
} else {
for (String inputFile : execCmd.getInputFiles()) {
if (inputFile.endsWith(SQL_FILE_EXT)) {
PhoenixRuntime.executeStatements(conn, new InputStreamReader(
new FileInputStream(inputFile), StandardCharsets.UTF_8),
Collections.emptyList());
} else if (inputFile.endsWith(CSV_FILE_EXT)) {
String tableName = execCmd.getTableName();
if (tableName == null) {
tableName = SchemaUtil.normalizeIdentifier(
inputFile.substring(inputFile.lastIndexOf(File.separatorChar) + 1,
inputFile.length() - CSV_FILE_EXT.length()));
}
CSVCommonsLoader csvLoader = new CSVCommonsLoader(conn, tableName, execCmd.getColumns(),
execCmd.isStrict(), execCmd.getFieldDelimiter(), execCmd.getQuoteCharacter(),
execCmd.getEscapeCharacter(), execCmd.getArrayElementSeparator());
csvLoader.upsert(inputFile);
}
}
}
} catch (Throwable t) {
t.printStackTrace();
exitStatus = 1;
} finally {
if (conn != null) {
try {
conn.close();
} catch (SQLException e) {
//going to shut jvm down anyway. So might as well feast on it.
}
}
System.exit(exitStatus);
}
}
public static final String PHOENIX_TEST_DRIVER_URL_PARAM = "test=true";
public static final String SCHEMA_ATTRIB = "schema";
private PhoenixRuntime() {
}
public static final String[] getConnectionProperties() {
return Arrays.copyOf(CONNECTION_PROPERTIES, CONNECTION_PROPERTIES.length);
}
/**
* Runs a series of semicolon-terminated SQL statements using the connection provided, returning
* the number of SQL statements executed. Note that if the connection has specified an SCN through
* the {@link org.apache.phoenix.util.PhoenixRuntime#CURRENT_SCN_ATTRIB} connection property, then the timestamp
* is bumped up by one after each statement execution.
* @param conn an open JDBC connection
* @param reader a reader for semicolumn separated SQL statements
* @param binds the binds for all statements
* @return the number of SQL statements that were executed
* @throws IOException
* @throws SQLException
*/
public static int executeStatements(Connection conn, Reader reader, List