All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.phoenix.execute.BaseQueryPlan Maven / Gradle / Ivy

There is a newer version: 5.1.0-HBase-2.0.0.2
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.phoenix.execute;

import java.io.ByteArrayOutputStream;
import java.io.DataOutputStream;
import java.io.IOException;
import java.sql.ParameterMetaData;
import java.sql.SQLException;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.client.Scan;
import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
import org.apache.hadoop.hbase.io.TimeRange;
import org.apache.hadoop.hbase.util.Bytes;
import org.apache.hadoop.io.WritableUtils;
import org.apache.htrace.TraceScope;
import org.apache.phoenix.cache.ServerCacheClient.ServerCache;
import org.apache.phoenix.compile.ExplainPlan;
import org.apache.phoenix.compile.FromCompiler;
import org.apache.phoenix.compile.GroupByCompiler.GroupBy;
import org.apache.phoenix.compile.OrderByCompiler.OrderBy;
import org.apache.phoenix.compile.QueryPlan;
import org.apache.phoenix.compile.RowProjector;
import org.apache.phoenix.compile.ScanRanges;
import org.apache.phoenix.compile.StatementContext;
import org.apache.phoenix.compile.WhereCompiler;
import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
import org.apache.phoenix.expression.Expression;
import org.apache.phoenix.expression.ProjectedColumnExpression;
import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
import org.apache.phoenix.index.IndexMaintainer;
import org.apache.phoenix.iterate.DefaultParallelScanGrouper;
import org.apache.phoenix.iterate.DelegateResultIterator;
import org.apache.phoenix.iterate.ParallelIteratorFactory;
import org.apache.phoenix.iterate.ParallelScanGrouper;
import org.apache.phoenix.iterate.ResultIterator;
import org.apache.phoenix.jdbc.PhoenixConnection;
import org.apache.phoenix.jdbc.PhoenixStatement.Operation;
import org.apache.phoenix.parse.FilterableStatement;
import org.apache.phoenix.parse.HintNode.Hint;
import org.apache.phoenix.parse.ParseNodeFactory;
import org.apache.phoenix.parse.TableName;
import org.apache.phoenix.query.QueryConstants;
import org.apache.phoenix.schema.KeyValueSchema;
import org.apache.phoenix.schema.PColumn;
import org.apache.phoenix.schema.PName;
import org.apache.phoenix.schema.PTable;
import org.apache.phoenix.schema.PTable.ImmutableStorageScheme;
import org.apache.phoenix.schema.PTable.IndexType;
import org.apache.phoenix.schema.PTableType;
import org.apache.phoenix.schema.TableRef;
import org.apache.phoenix.trace.TracingIterator;
import org.apache.phoenix.trace.util.Tracing;
import org.apache.phoenix.util.ByteUtil;
import org.apache.phoenix.util.IndexUtil;
import org.apache.phoenix.util.LogUtil;
import org.apache.phoenix.util.SQLCloseables;
import org.apache.phoenix.util.ScanUtil;

import com.google.common.collect.ImmutableSet;
import com.google.common.collect.Lists;

import static org.apache.phoenix.util.IndexUtil.serializeDataTableColumnsToJoin;
import static org.apache.phoenix.util.IndexUtil.serializeIndexMaintainerIntoScan;
import static org.apache.phoenix.util.IndexUtil.serializeSchemaIntoScan;
import static org.apache.phoenix.util.IndexUtil.serializeViewConstantsIntoScan;

/**
 *
 * Query plan that has no child plans
 *
 * 
 * @since 0.1
 */
public abstract class BaseQueryPlan implements QueryPlan {
	private static final Log LOG = LogFactory.getLog(BaseQueryPlan.class);
    protected static final long DEFAULT_ESTIMATED_SIZE = 10 * 1024; // 10 K
    
    protected final TableRef tableRef;
    protected final Set tableRefs;
    protected final StatementContext context;
    protected final FilterableStatement statement;
    protected final RowProjector projection;
    protected final ParameterMetaData paramMetaData;
    protected final Integer limit;
    protected final Integer offset;
    protected final OrderBy orderBy;
    protected final GroupBy groupBy;
    protected final ParallelIteratorFactory parallelIteratorFactory;    
    /*
     * The filter expression that contains CorrelateVariableFieldAccessExpression
     * and will have impact on the ScanRanges. It will recompiled at runtime 
     * immediately before creating the ResultIterator.
     */
    protected final Expression dynamicFilter;
    protected Long estimatedRows;
    protected Long estimatedSize;
    protected Long estimateInfoTimestamp;
    private boolean explainPlanCalled;
    

    protected BaseQueryPlan(
            StatementContext context, FilterableStatement statement, TableRef table,
            RowProjector projection, ParameterMetaData paramMetaData, Integer limit, Integer offset, OrderBy orderBy,
            GroupBy groupBy, ParallelIteratorFactory parallelIteratorFactory,
            Expression dynamicFilter) {
        this.context = context;
        this.statement = statement;
        this.tableRef = table;
        this.tableRefs = ImmutableSet.of(table);
        this.projection = projection;
        this.paramMetaData = paramMetaData;
        this.limit = limit;
        this.offset = offset;
        this.orderBy = orderBy;
        this.groupBy = groupBy;
        this.parallelIteratorFactory = parallelIteratorFactory;
        this.dynamicFilter = dynamicFilter;
    }

	@Override
	public Operation getOperation() {
		return Operation.QUERY;
	}
	
    @Override
    public boolean isDegenerate() {
        return context.getScanRanges() == ScanRanges.NOTHING;

    }
    
    @Override
    public GroupBy getGroupBy() {
        return groupBy;
    }

    
    @Override
    public OrderBy getOrderBy() {
        return orderBy;
    }

    @Override
    public TableRef getTableRef() {
        return tableRef;
    }

    @Override
    public Set getSourceRefs() {
        return tableRefs;
    }

    @Override
    public Integer getLimit() {
        return limit;
    }
    
    @Override
    public Integer getOffset() {
        return offset;
    }

    @Override
    public RowProjector getProjector() {
        return projection;
    }
    
    public Expression getDynamicFilter() {
        return dynamicFilter;
    }

//    /**
//     * Sets up an id used to do round robin queue processing on the server
//     * @param scan
//     */
//    private void setProducer(Scan scan) {
//        byte[] producer = Bytes.toBytes(UUID.randomUUID().toString());
//        scan.setAttribute(HBaseServer.CALL_QUEUE_PRODUCER_ATTRIB_NAME, producer);
//    }

    @Override
    public final ResultIterator iterator() throws SQLException {
        return iterator(DefaultParallelScanGrouper.getInstance());
    }
    
    @Override
    public final ResultIterator iterator(ParallelScanGrouper scanGrouper) throws SQLException {
        return iterator(scanGrouper, null);
    }

    @Override
    public final ResultIterator iterator(ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
        return iterator(Collections.emptyMap(), scanGrouper, scan);
    }

	private ResultIterator getWrappedIterator(final Map dependencies,
			ResultIterator iterator) {
		ResultIterator wrappedIterator = dependencies.isEmpty() ? iterator : new DelegateResultIterator(iterator) {
			@Override
			public void close() throws SQLException {
				try {
					super.close();
				} finally {
					SQLCloseables.closeAll(dependencies.values());
				}
			}
		};
		return wrappedIterator;
	}

    public final ResultIterator iterator(final Map caches,
            ParallelScanGrouper scanGrouper, Scan scan) throws SQLException {
         if (scan == null) {
             scan = context.getScan();
         }
         
		/*
		 * For aggregate queries, we still need to let the AggregationPlan to
		 * proceed so that we can give proper aggregates even if there are no
		 * row to be scanned.
		 */
        if (context.getScanRanges() == ScanRanges.NOTHING && !getStatement().isAggregate()) {
        return getWrappedIterator(caches, ResultIterator.EMPTY_ITERATOR);
        }
        
        if (tableRef == TableRef.EMPTY_TABLE_REF) {
            return newIterator(scanGrouper, scan, caches);
        }
        
        // Set miscellaneous scan attributes. This is the last chance to set them before we
        // clone the scan for each parallelized chunk.
        TableRef tableRef = context.getCurrentTable();
        PTable table = tableRef.getTable();
        
        if (dynamicFilter != null) {
            WhereCompiler.compile(context, statement, null, Collections.singletonList(dynamicFilter), false, null);            
        }
        
        if (OrderBy.REV_ROW_KEY_ORDER_BY.equals(orderBy)) {
            ScanUtil.setReversed(scan);
            // Hack for working around PHOENIX-3121 and HBASE-16296.
            // TODO: remove once PHOENIX-3121 and/or HBASE-16296 are fixed.
            int scannerCacheSize = context.getStatement().getFetchSize();
            if (limit != null && limit % scannerCacheSize == 0) {
                scan.setCaching(scannerCacheSize + 1);
            }
        }
        
        if (statement.getHint().hasHint(Hint.SMALL)) {
            scan.setSmall(true);
        }
        
        PhoenixConnection connection = context.getConnection();

        // set read consistency
        if (table.getType() != PTableType.SYSTEM) {
            scan.setConsistency(connection.getConsistency());
        }
        // TODO fix this in PHOENIX-2415 Support ROW_TIMESTAMP with transactional tables
        if (!table.isTransactional()) {
	                // Get the time range of row_timestamp column
	        TimeRange rowTimestampRange = context.getScanRanges().getRowTimestampRange();
	        // Get the already existing time range on the scan.
	        TimeRange scanTimeRange = scan.getTimeRange();
	        Long scn = connection.getSCN();
	        if (scn == null) {
			// Always use latest timestamp unless scn is set or transactional (see PHOENIX-4089)
                scn = HConstants.LATEST_TIMESTAMP;
	        }
	        try {
	            TimeRange timeRangeToUse = ScanUtil.intersectTimeRange(rowTimestampRange, scanTimeRange, scn);
	            if (timeRangeToUse == null) {
	                return ResultIterator.EMPTY_ITERATOR;
	            }
	            scan.setTimeRange(timeRangeToUse.getMin(), timeRangeToUse.getMax());
	        } catch (IOException e) {
	            throw new RuntimeException(e);
	        }
	    }
        byte[] tenantIdBytes;
        if( table.isMultiTenant() == true ) {
            tenantIdBytes = connection.getTenantId() == null ? null :
                    ScanUtil.getTenantIdBytes(
                            table.getRowKeySchema(),
                            table.getBucketNum() != null,
                            connection.getTenantId(), table.getViewIndexId() != null);
        } else {
            tenantIdBytes = connection.getTenantId() == null ? null : connection.getTenantId().getBytes();
        }

        ScanUtil.setTenantId(scan, tenantIdBytes);
        String customAnnotations = LogUtil.customAnnotationsToString(connection);
        ScanUtil.setCustomAnnotations(scan, customAnnotations == null ? null : customAnnotations.getBytes());
        // Set local index related scan attributes. 
        if (table.getIndexType() == IndexType.LOCAL) {
            ScanUtil.setLocalIndex(scan);
            Set dataColumns = context.getDataColumns();
            // If any data columns to join back from data table are present then we set following attributes
            // 1. data columns to be projected and their key value schema.
            // 2. index maintainer and view constants if exists to build data row key from index row key. 
            // TODO: can have an hint to skip joining back to data table, in that case if any column to
            // project is not present in the index then we need to skip this plan.
            if (!dataColumns.isEmpty()) {
                // Set data columns to be join back from data table.
                PTable parentTable = context.getCurrentTable().getTable();
                String parentSchemaName = parentTable.getParentSchemaName().getString();
                String parentTableName = parentTable.getParentTableName().getString();
                final ParseNodeFactory FACTORY = new ParseNodeFactory();
                // TODO: is it necessary to re-resolve the table?
                TableRef dataTableRef =
                        FromCompiler.getResolver(
                            FACTORY.namedTable(null, TableName.create(parentSchemaName, parentTableName)),
                            context.getConnection()).resolveTable(parentSchemaName, parentTableName);
                PTable dataTable = dataTableRef.getTable();
                // Set data columns to be join back from data table.
                serializeDataTableColumnsToJoin(scan, dataColumns, dataTable);
                KeyValueSchema schema = ProjectedColumnExpression.buildSchema(dataColumns);
                // Set key value schema of the data columns.
                serializeSchemaIntoScan(scan, schema);
                // Set index maintainer of the local index.
                serializeIndexMaintainerIntoScan(scan, dataTable, context);
                // Set view constants if exists.
                serializeViewConstantsIntoScan(scan, dataTable);
            }
        }
        
        if (LOG.isDebugEnabled()) {
        	LOG.debug(LogUtil.addCustomAnnotations("Scan ready for iteration: " + scan, connection));
        }
        
        ResultIterator iterator =  newIterator(scanGrouper, scan, caches);
        if (LOG.isDebugEnabled()) {
        	LOG.debug(LogUtil.addCustomAnnotations("Iterator ready: " + iterator, connection));
        }

        // wrap the iterator so we start/end tracing as we expect
        TraceScope scope =
                Tracing.startNewSpan(context.getConnection(), "Creating basic query for "
                        + getPlanSteps(iterator));
        return (scope.getSpan() != null) ? new TracingIterator(scope, iterator) : iterator;
    }

    abstract protected ResultIterator newIterator(ParallelScanGrouper scanGrouper, Scan scan, Map caches) throws SQLException;
    
    @Override
    public long getEstimatedSize() {
        return DEFAULT_ESTIMATED_SIZE;
    }

    @Override
    public ParameterMetaData getParameterMetaData() {
        return paramMetaData;
    }

    @Override
    public FilterableStatement getStatement() {
        return statement;
    }

    @Override
    public StatementContext getContext() {
        return context;
    }

    @Override
    public ExplainPlan getExplainPlan() throws SQLException {
        explainPlanCalled = true;
        if (context.getScanRanges() == ScanRanges.NOTHING) {
            return new ExplainPlan(Collections.singletonList("DEGENERATE SCAN OVER " + getTableRef().getTable().getName().getString()));
        }
        
        // Optimize here when getting explain plan, as queries don't get optimized until after compilation
        QueryPlan plan = context.getConnection().getQueryServices().getOptimizer().optimize(context.getStatement(), this);
        ExplainPlan exp = plan instanceof BaseQueryPlan ? new ExplainPlan(getPlanSteps(plan.iterator())) : plan.getExplainPlan();
        this.estimatedRows = plan.getEstimatedRowsToScan();
        this.estimatedSize = plan.getEstimatedBytesToScan();
        this.estimateInfoTimestamp = plan.getEstimateInfoTimestamp();
        return exp;
    }

    private List getPlanSteps(ResultIterator iterator){
        List planSteps = Lists.newArrayListWithExpectedSize(5);
        iterator.explain(planSteps);
        return planSteps;
    }

    @Override
    public boolean isRowKeyOrdered() {
        return groupBy.isEmpty() ? orderBy.getOrderByExpressions().isEmpty() : groupBy.isOrderPreserving();
    }
    
    @Override
    public Long getEstimatedRowsToScan() throws SQLException {
        if (!explainPlanCalled) {
            getExplainPlan();
        }
        return estimatedRows;
    }

    @Override
    public Long getEstimatedBytesToScan() throws SQLException {
        if (!explainPlanCalled) {
            getExplainPlan();
        }
        return estimatedSize;
    }

    @Override
    public Long getEstimateInfoTimestamp() throws SQLException {
        if (!explainPlanCalled) {
            getExplainPlan();
        }
        return estimateInfoTimestamp;
    }

    public ParallelIteratorFactory getParallelIteratorFactory() {
        return parallelIteratorFactory;
    }
}




© 2015 - 2025 Weber Informatics LLC | Privacy Policy