com.bigdata.service.ndx.ClientIndexViewRefactor Maven / Gradle / Ivy
/**
Copyright (C) SYSTAP, LLC DBA Blazegraph 2006-2016. All rights reserved.
Contact:
SYSTAP, LLC DBA Blazegraph
2501 Calvert ST NW #106
Washington, DC 20008
[email protected]
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; version 2 of the License.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
/*
* Created on Apr 22, 2007
*/
package com.bigdata.service.ndx;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.concurrent.TimeUnit;
import com.bigdata.btree.IndexMetadata;
import com.bigdata.journal.ITx;
import com.bigdata.mdi.IMetadataIndex;
import com.bigdata.mdi.PartitionLocator;
import com.bigdata.resources.StaleLocatorException;
import com.bigdata.service.AbstractScaleOutFederation;
import com.bigdata.service.IDataService;
/**
*
* A client-side view of a scale-out index as of some timestamp.
*
*
* This view automatically handles the split, join, or move of index partitions
* within the federation. The {@link IDataService} throws back a (sometimes
* wrapped) {@link StaleLocatorException} when it does not have a registered
* index as of some timestamp. If this exception is observed when the client
* makes a request using a cached {@link PartitionLocator} record then the
* locator record is stale. The client automatically fetches the locator
* record(s) covering the same key range as the stale locator record and the
* re-issues the request against the index partitions identified in those
* locator record(s). This behavior correctly handles index partition split,
* merge, and move scenarios. The implementation of this policy is limited to
* exactly three places in the code: {@link AbstractDataServiceProcedureTask},
* {@link PartitionedTupleIterator}, and {@link DataServiceTupleIterator}.
*
*
* Note that only {@link ITx#UNISOLATED} and {@link ITx#READ_COMMITTED}
* operations are subject to stale locators since they are not based on a
* historical committed state of the database. Historical read and
* fully-isolated operations both read from historical committed states and the
* locators are never updated for historical states (only the current state of
* an index partition is split, joined, or moved - the historical states always
* remain behind).
*
*
* @author Bryan Thompson
*/
public class ClientIndexViewRefactor extends AbstractScaleOutClientIndexView2 {
/**
* Create a view on a scale-out index.
*
* @param fed
* The federation containing the index.
* @param name
* The index name.
* @param timestamp
* A transaction identifier, {@link ITx#UNISOLATED} for the
* unisolated index view, {@link ITx#READ_COMMITTED}, or
* timestamp
for a historical view no later than
* the specified timestamp.
* @param metadataIndex
* The {@link IMetadataIndex} for the named scale-out index as of
* that timestamp. Note that the {@link IndexMetadata} on this
* object contains the template {@link IndexMetadata} for the
* scale-out index partitions.
*/
public ClientIndexViewRefactor(final AbstractScaleOutFederation fed,
final String name, final long timestamp,
final IMetadataIndex metadataIndex) {
super(fed,name,timestamp,metadataIndex);
}
/**
* Runs a set of tasks.
*
* Note: If {@link #getRecursionDepth()} evaluates to a value larger than
* zero then the task(s) will be forced to execute in the caller's thread.
*
* {@link StaleLocatorException}s are handled by the recursive application
* of submit()
. These recursively submitted tasks are forced
* to run in the caller's thread by incrementing the
* {@link #getRecursionDepth()} counter. This is done to prevent the thread
* pool from becoming deadlocked as threads wait on threads handling stale
* locator retries. The deadlock situation arises as soon as all threads in
* the thread pool are waiting on stale locator retries as there are no
* threads remaining to process those retries.
*
* @param parallel
* true
iff the tasks MAY be run in parallel.
* @param tasks
* The tasks to be executed.
*/
protected void runTasks(final boolean parallel,
final ArrayList tasks) {
if (tasks.isEmpty()) {
log.warn("No tasks to run?", new RuntimeException(
"No tasks to run?"));
return;
}
if (getRecursionDepth().get() > 0) {
/*
* Force sequential execution of the tasks in the caller's thread.
*/
runInCallersThread(tasks);
} else if (tasks.size() == 1) {
runOne(tasks.get(0));
} else if (parallel) {
/*
* Map procedure across the index partitions in parallel.
*/
runParallel(tasks);
} else {
/*
* Sequential execution against of each split in turn.
*/
runSequence(tasks);
}
}
/**
* Maps a set of {@link DataServiceProcedureTask} tasks across the index
* partitions in strict sequence. The tasks are run on the
* {@link #getThreadPool()} so that sequential tasks never increase the
* total burden placed by the client above the size of that thread pool.
*
* @param tasks
* The tasks.
*/
private void runOne(final AbstractDataServiceProcedureTask task) {
if (log.isInfoEnabled())
log.info("Running one task (#active="
+ getThreadPool().getActiveCount() + ", queueSize="
+ getThreadPool().getQueue().size() + ") : "
+ task.toString());
try {
final Future f = getThreadPool().submit(task);
// await completion of the task.
f.get(taskTimeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
if (log.isInfoEnabled())
log.info("Execution failed: task=" + task, e);
throw new ClientException("Execution failed: " + task,e);
}
}
/**
* Maps a set of {@link DataServiceProcedureTask} tasks across the index
* partitions in parallel.
*
* @param tasks
* The tasks.
*/
private void runParallel(
final ArrayList tasks) {
final long begin = System.currentTimeMillis();
if(log.isInfoEnabled())
log.info("Running " + tasks.size() + " tasks in parallel (#active="
+ getThreadPool().getActiveCount() + ", queueSize="
+ getThreadPool().getQueue().size() + ") : "
+ tasks.get(0).toString());
int nfailed = 0;
final LinkedList causes = new LinkedList();
try {
final List> futures = getThreadPool().invokeAll(tasks,
taskTimeout, TimeUnit.MILLISECONDS);
final Iterator> itr = futures.iterator();
int i = 0;
while(itr.hasNext()) {
final Future f = itr.next();
try {
f.get();
} catch (ExecutionException e) {
final AbstractDataServiceProcedureTask task = tasks.get(i);
// log w/ stack trace so that we can see where this came
// from.
log.error("Execution failed: task=" + task, e);
if (task.causes != null) {
causes.addAll(task.causes);
} else {
causes.add(e);
}
nfailed++;
}
}
} catch (InterruptedException e) {
throw new RuntimeException("Interrupted: "+e);
}
if (nfailed > 0) {
throw new ClientException("Execution failed: ntasks="
+ tasks.size() + ", nfailed=" + nfailed, causes);
}
if (log.isInfoEnabled())
log.info("Ran " + tasks.size() + " tasks in parallel: elapsed="
+ (System.currentTimeMillis() - begin));
}
/**
* Maps a set of {@link DataServiceProcedureTask} tasks across the index
* partitions in strict sequence. The tasks are run on the
* {@link #getThreadPool()} so that sequential tasks never increase the
* total burden placed by the client above the size of that thread pool.
*
* @param tasks
* The tasks.
*/
private void runSequence(final ArrayList tasks) {
if (log.isInfoEnabled())
log.info("Running " + tasks.size() + " tasks in sequence (#active="
+ getThreadPool().getActiveCount() + ", queueSize="
+ getThreadPool().getQueue().size() + ") : "
+ tasks.get(0).toString());
final Iterator itr = tasks.iterator();
while (itr.hasNext()) {
final AbstractDataServiceProcedureTask task = itr.next();
try {
final Future f = getThreadPool().submit(task);
// await completion of the task.
f.get(taskTimeout, TimeUnit.MILLISECONDS);
} catch (Exception e) {
if(log.isInfoEnabled()) log.info("Execution failed: task=" + task, e);
throw new ClientException("Execution failed: " + task, e, task.causes);
}
}
}
/**
* Executes the tasks in the caller's thread.
*
* @param tasks
* The tasks.
*/
private void runInCallersThread(
final ArrayList tasks) {
final int ntasks = tasks.size();
if (WARN && ntasks > 1)
log.warn("Running " + ntasks
+ " tasks in caller's thread: recursionDepth="
+ getRecursionDepth().get() + "(#active="
+ getThreadPool().getActiveCount() + ", queueSize="
+ getThreadPool().getQueue().size() + ") : "
+ tasks.get(0).toString());
final Iterator itr = tasks.iterator();
while (itr.hasNext()) {
final AbstractDataServiceProcedureTask task = itr.next();
try {
task.call();
} catch (Exception e) {
// if (log.isInfoEnabled())
// log.info("Execution failed: task=" + task, e);
throw new ClientException("Execution failed: recursionDepth="
+ getRecursionDepth() + ", task=" + task, e,
task.causes);
}
}
}
}