import com.google.storage.onestore.v3.OnestoreEntity.CompositeIndex;
import com.google.storage.onestore.v3.OnestoreEntity.EntityProto;
import com.google.storage.onestore.v3.OnestoreEntity.Index;
import com.google.storage.onestore.v3.OnestoreEntity.IndexPostfix;
import com.google.storage.onestore.v3.OnestoreEntity.IndexPostfix_IndexValue;
import com.google.storage.onestore.v3.OnestoreEntity.Path;
import com.google.storage.onestore.v3.OnestoreEntity.Path.Element;
import com.google.storage.onestore.v3.OnestoreEntity.Property;
import com.google.storage.onestore.v3.OnestoreEntity.Property.Meaning;
import com.google.storage.onestore.v3.OnestoreEntity.PropertyValue;
import com.google.storage.onestore.v3.OnestoreEntity.Reference;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.lang.reflect.Constructor;
import java.lang.reflect.InvocationTargetException;
import java.nio.charset.StandardCharsets;
import java.security.AccessController;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.security.PrivilegedAction;
import java.security.PrivilegedActionException;
import java.security.PrivilegedExceptionAction;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashMap;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.WeakHashMap;
import java.util.concurrent.ScheduledFuture;
import java.util.concurrent.ScheduledThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.regex.Pattern;
import org.checkerframework.checker.nullness.qual.Nullable;
/**
* A local implementation of the Datastore.
*
* This is a memory-based implementation which can persist itself to disk through a batch
* operation.
*
*
This class is no longer a {@link com.google.appengine.tools.development.LocalRpcService},
* however it is still called a service for backwards compatibility.
*/
public abstract class LocalDatastoreService {
// TODO Improve this implementation such that it's backed by
// something like Derby, which is bundled with the JDK, perhaps in
// "iteration 2".
private static final Logger logger = Logger.getLogger(LocalDatastoreService.class.getName());
static final double DEFAULT_DEADLINE_SECONDS = 30.0;
static final double MAX_DEADLINE_SECONDS = DEFAULT_DEADLINE_SECONDS;
/**
* Default number of {@link Entity} objects to retrieve at a time. This is an optimization that
* avoids making an RPC call for each {@link Entity}.
*/
// NOTE: Keep synchronized with `megastore_batch_size` default value at
//
static final int DEFAULT_BATCH_SIZE = 20;
// This should be synchronized with the production datastore's --max_query_results flag value.
public static final int MAX_QUERY_RESULTS = 300;
/** The package name for this service. */
public static final String PACKAGE = "datastore_v3";
/** How long a query can stay "live" before we expire it. */
public static final String MAX_QUERY_LIFETIME_PROPERTY = "datastore.max_query_lifetime";
private static final int DEFAULT_MAX_QUERY_LIFETIME = 30000;
/** How long a transaction can stay "live" before we expire it. */
public static final String MAX_TRANSACTION_LIFETIME_PROPERTY = "datastore.max_txn_lifetime";
private static final int DEFAULT_MAX_TRANSACTION_LIFETIME = 300000;
/** How long to wait before updating the persistent store in milliseconds. */
public static final String STORE_DELAY_PROPERTY = "datastore.store_delay";
static final int DEFAULT_STORE_DELAY_MS = 30000;
static final int MAX_STRING_LENGTH = DataTypeUtils.MAX_STRING_PROPERTY_LENGTH;
static final int MAX_LINK_LENGTH = DataTypeUtils.MAX_LINK_PROPERTY_LENGTH;
static final int MAX_BLOB_LENGTH = 1048487;
/** The maximum number of entity groups in a transaction. */
public static final int MAX_EG_PER_TXN = 25;
/** Where to read/store the datastore from/to. */
public static final String BACKING_STORE_PROPERTY = "datastore.backing_store";
/** The backing store file format version number. */
private static final long CURRENT_STORAGE_VERSION = 2;
/**
* True to emulate Datastore vnext features. These features mandate strong consistency everywhere,
* so custom {@link HighRepJobPolicy} policies are disallowed while this flag is true.
*/
// Emulates Datastore-on-spanner features. These features include strong consistency
// everywhere and increased latency on writes due to high replication.
public static final String EMULATE_VNEXT_FEATURES = "datastore.use_vnext";
/**
* True to prevent the datastore from writing {@link
* com.google.apphosting.utils.config.IndexesXmlReader#GENERATED_INDEX_FILENAME}.
*/
public static final String NO_INDEX_AUTO_GEN_PROP = "datastore.no_index_auto_gen";
/** True to put the datastore into "memory-only" mode. */
public static final String NO_STORAGE_PROPERTY = "datastore.no_storage";
/**
* The fully-qualifed name of a class that implements {@link HighRepJobPolicy} and has a no-arg
* constructor. If not provided we use a {@link DefaultHighRepJobPolicy}. See the javadoc for this
* class for information on its configurable properties.
*/
public static final String HIGH_REP_JOB_POLICY_CLASS_PROPERTY =
"datastore.high_replication_job_policy_class";
/**
* If this property exists we consider the datastore to be high replication independent of the
* high rep job policy.
*/
public static final String FORCE_IS_HIGH_REP_PROPERTY = "datastore.force_is_high_replication";
public static final String INDEX_CONFIGURATION_FORMAT_PROPERTY =
"datastore.index_configuration_format";
private static final Pattern RESERVED_NAME = Pattern.compile("^__.*__$");
private static final String SUBSCRIPTION_MAP_KIND = "__ProspectiveSearchSubscriptions__";
/**
* Reserved kinds that we allow writes to. This lets us throw an exception when users try to write
* reserved kinds but lets the kinds used by the local blobstore get through. This means usercode
* can write to these reserved blobstore kinds in the dev appserver, which can't happen in prod.
* This is an acceptable discrepancy.
*/
static final ImmutableSet RESERVED_KIND_ALLOWLIST =
ImmutableSet.of(
ReservedKinds.BLOB_UPLOAD_SESSION_KIND,
ReservedKinds.GOOGLE_STORAGE_FILE_KIND,
BlobInfoFactory.KIND,
SUBSCRIPTION_MAP_KIND,
ImagesReservedKinds.BLOB_SERVING_URL_KIND);
static final String ENTITY_GROUP_MESSAGE =
"cross-group transaction need to be explicitly specified, "
+ "see TransactionOptions.Builder.withXG";
static final String TOO_MANY_ENTITY_GROUP_MESSAGE =
"operating on too many entity groups in a single transaction.";
static final String MULTI_EG_TXN_NOT_ALLOWED =
"transactions on multiple entity groups only allowed in High Replication applications";
static final String CONTENTION_MESSAGE =
"too much contention on these datastore entities. please try again.";
static final String TRANSACTION_CLOSED = "transaction closed";
static final String TRANSACTION_NOT_FOUND = "transaction has expired or is invalid";
static final String TRANSACTION_RETRY_ON_READ_ONLY =
"specifying a previous transaction handle on a read-only transaction is not allowed";
static final String TRANSACTION_RETRY_ON_PREVIOUSLY_READ_ONLY =
"Cannot retry a read-only transaction.";
static final String TRANSACTION_OPTIONS_CHANGED_ON_RESET =
"Transaction options should be the same as specified previous transaction.";
static final String NAME_TOO_LONG =
"The key path element name is longer than " + MAX_STRING_LENGTH + " bytes.";
static final String QUERY_NOT_FOUND =
"query has expired or is invalid. Please "
+ "restart it with the last cursor to read more results.";
static final String VNEXT_POLICY_DISALLOWED =
"custom job policies are disallowed "
+ "when using vnext features as vnext must be strongly consistent.";
/**
* Index of the bit in a 64-bit integer representation of the non-inclusive upper bound of the
* sequential id space. Integer ids must be representable as IEEE 64-bit floats to support JSON
* numeric encoding. Ids in any id space can be no greater than 1 << _MAX_SEQUENTIAL_BIT + 1.
*/
private static final long MAX_SEQUENTIAL_BIT = 52;
/**
* Maximum permitted value for the counter which generates sequential ids. The sequential id space
* is (0, 2^52) non-inclusive.
*/
private static final long MAX_SEQUENTIAL_COUNTER = (1L << MAX_SEQUENTIAL_BIT) - 1;
/** Maximum valid sequential id. */
private static final long MAX_SEQUENTIAL_ID = MAX_SEQUENTIAL_COUNTER;
/**
* Maximum permitted value for the counter which generates scattered ids. The scattered id space
* is [2^52, 2^52 + 2^51).
*/
private static final long MAX_SCATTERED_COUNTER = (1L << (MAX_SEQUENTIAL_BIT - 1)) - 1;
/** Number of empty high-order bits above the scattered id space. */
private static final long SCATTER_SHIFT = 64 - MAX_SEQUENTIAL_BIT + 1;
public static final String AUTO_ID_ALLOCATION_POLICY_PROPERTY =
"datastore.auto_id_allocation_policy";
/**
* The set of supported values for autoIdAllocationPolicy, which controls how auto IDs are
* assigned by put().
*/
public static enum AutoIdAllocationPolicy {
SEQUENTIAL,
SCATTERED
}
// Important that these begin with 1, because 0 stands for "not assigned".
private final AtomicLong entityIdSequential = new AtomicLong(1);
private final AtomicLong entityIdScattered = new AtomicLong(1);
private AutoIdAllocationPolicy autoIdAllocationPolicy = AutoIdAllocationPolicy.SEQUENTIAL;
private final AtomicLong queryId = new AtomicLong(0);
/** The location this database is persisted to and loaded from. */
private String backingStore;
/** The set of Profiles for this datastore, categorized by name. */
private final Map profiles =
Collections.synchronizedMap(new HashMap());
private final Map specialPropertyMap = Maps.newHashMap();
// TODO: Replace the use of the Clock with Stopwatch.
private Clock clock;
private static final long MAX_BATCH_GET_KEYS = 1000000000;
/**
* Mimic production behavior by limiting number of datastore actions this should be synchronized
* with MegastoreDatastore._DEFAULT_MAX_ACTIONS_PER_TXN
*/
private static final long MAX_ACTIONS_PER_TXN = 5;
/**
* Calls the add method on the taskqueue service.
*
* Subclasses should override this to use the appropriate method of calling other services.
*/
protected abstract void addActionImpl(TaskQueueAddRequest action);
/**
* Clear out the in-memory datastore. Note that this does not clear out any data that has been
* persisted on disk.
*/
public void clearProfiles() {
profiles.clear();
}
/** Clear out the query history that we use for generating indexes. */
public void clearQueryHistory() {
LocalCompositeIndexManager.getInstance().clearQueryHistory();
}
/**
* The amount of time a query is protected from being "GC'd". Must be no shorter than the max
* request deadline set by the dev appserver.
*/
private int maxQueryLifetimeMs;
/**
* The amount of time a txn is protected from being "GC'd". Must be no shorter than the max
* request deadline set by the dev appserver.
*/
private int maxTransactionLifetimeMs;
/**
* The number of core threads to use for background tasks for all LocalDatastoreService instances.
*/
private static final int BACKGROUND_THREAD_COUNT = 10;
private static final ScheduledThreadPoolExecutor scheduler = createScheduler();
/** Creates a new scheduler. */
private static ScheduledThreadPoolExecutor createScheduler() {
ScheduledThreadPoolExecutor scheduler =
new ScheduledThreadPoolExecutor(
BACKGROUND_THREAD_COUNT,
new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat("LocalDatastoreService-%d")
.build());
scheduler.setRemoveOnCancelPolicy(true);
AccessController.doPrivileged(
new PrivilegedAction() {
@Override
public Object run() {
Runtime.getRuntime()
.addShutdownHook(
new Thread() {
@Override
public void run() {
cleanupActiveServices();
}
});
return null;
}
});
return scheduler;
}
/** A set of all active LocalDatastoreService objects. */
private static final Set activeServices = Sets.newConcurrentHashSet();
/** Hands out transaction handles */
private final AtomicInteger transactionHandleProvider = new AtomicInteger(0);
/** How often we attempt to persist the database. */
private int storeDelayMs;
/** Is the datastore dirty, requiring a write? */
private volatile boolean dirty;
/**
* A lock around the database that is used to prevent background persisting from interfering with
* real time updates.
*/
private final ReadWriteLock globalLock = new ReentrantReadWriteLock();
private boolean noStorage;
/** The pseudoKinds known to this local datastore. */
private PseudoKinds pseudoKinds;
private HighRepJobPolicy highRepJobPolicy;
private boolean spannerBacked;
private LocalDatastoreCostAnalysis costAnalysis;
/** The list of scheduled tasks for this instance of the service. */
private final List> scheduledTasks = new ArrayList<>();
public LocalDatastoreService() {
setMaxQueryLifetime(DEFAULT_MAX_QUERY_LIFETIME);
setMaxTransactionLifetime(DEFAULT_MAX_TRANSACTION_LIFETIME);
setStoreDelay(DEFAULT_STORE_DELAY_MS);
enableScatterProperty(true);
}
public void init(LocalServiceContext context, Map properties) {
init(context.getLocalServerEnvironment().getAppDir(), context.getClock(), properties);
}
public void init(File appDirectory, Clock clock, Map properties) {
this.clock = clock;
String storeFile = properties.get(BACKING_STORE_PROPERTY);
String noStorageProp = properties.get(NO_STORAGE_PROPERTY);
if (noStorageProp != null) {
noStorage = Boolean.parseBoolean(noStorageProp);
}
if (storeFile == null && !noStorage) {
File dir = GenerationDirectory.getGenerationDirectory(appDirectory);
dir.mkdirs();
storeFile = dir.getAbsolutePath() + File.separator + "local_db.bin";
}
setBackingStore(storeFile);
String storeDelayTime = properties.get(STORE_DELAY_PROPERTY);
storeDelayMs = parseInt(storeDelayTime, storeDelayMs, STORE_DELAY_PROPERTY);
String maxQueryLifetime = properties.get(MAX_QUERY_LIFETIME_PROPERTY);
maxQueryLifetimeMs =
parseInt(maxQueryLifetime, maxQueryLifetimeMs, MAX_QUERY_LIFETIME_PROPERTY);
String maxTxnLifetime = properties.get(MAX_TRANSACTION_LIFETIME_PROPERTY);
maxTransactionLifetimeMs =
parseInt(maxTxnLifetime, maxTransactionLifetimeMs, MAX_TRANSACTION_LIFETIME_PROPERTY);
autoIdAllocationPolicy =
getEnumProperty(
properties,
AutoIdAllocationPolicy.class,
AUTO_ID_ALLOCATION_POLICY_PROPERTY,
autoIdAllocationPolicy);
IndexConfigurationFormat indexConfigurationFormat =
getEnumProperty(
properties,
IndexConfigurationFormat.class,
INDEX_CONFIGURATION_FORMAT_PROPERTY,
IndexConfigurationFormat.DEFAULT);
LocalCompositeIndexManager.init(indexConfigurationFormat);
LocalCompositeIndexManager.getInstance().setAppDir(appDirectory);
LocalCompositeIndexManager.getInstance().setClock(clock);
String noIndexAutoGenProp = properties.get(NO_INDEX_AUTO_GEN_PROP);
if (noIndexAutoGenProp != null) {
LocalCompositeIndexManager.getInstance()
.setStoreIndexConfiguration(!Boolean.parseBoolean(noIndexAutoGenProp));
}
String vnextPropStr = properties.get(EMULATE_VNEXT_FEATURES);
if (vnextPropStr != null) {
spannerBacked = Boolean.parseBoolean(vnextPropStr);
} else {
spannerBacked = false;
}
initHighRepJobPolicy(properties);
/* Create and register all pseudo-kinds */
pseudoKinds = new PseudoKinds();
pseudoKinds.register(new KindPseudoKind(this));
pseudoKinds.register(new PropertyPseudoKind(this));
pseudoKinds.register(new NamespacePseudoKind(this));
if (!spannerBacked()) {
pseudoKinds.register(new EntityGroupPseudoKind());
}
costAnalysis = new LocalDatastoreCostAnalysis(LocalCompositeIndexManager.getInstance());
logger.info(
String.format(
"Local Datastore initialized: \n" + "\tType: %s\n" + "\tStorage: %s",
spannerBacked() ? "VNext" : "High Replication",
noStorage ? "In-memory" : backingStore));
}
private static > T getEnumProperty(
Map properties, Class enumType, String propertyName, T defaultIfNotSet) {
String propertyValue = properties.get(propertyName);
if (propertyValue == null) {
return defaultIfNotSet;
}
try {
return Enum.valueOf(enumType, propertyValue.toUpperCase());
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
String.format("Invalid value \"%s\" for property \"%s\"", propertyValue, propertyName),
e);
}
}
boolean spannerBacked() {
return spannerBacked;
}
private void initHighRepJobPolicy(Map properties) {
String highRepJobPolicyStr = properties.get(HIGH_REP_JOB_POLICY_CLASS_PROPERTY);
if (highRepJobPolicyStr == null) {
if (spannerBacked()) {
highRepJobPolicy = new SpannerJobPolicy();
} else {
DefaultHighRepJobPolicy defaultPolicy = new DefaultHighRepJobPolicy(properties);
spannerBacked = false;
highRepJobPolicy = defaultPolicy;
}
} else {
if (spannerBacked()) {
// Spanner is always strongly consistent - cannot change consistency model
throw newError(ErrorCode.BAD_REQUEST, VNEXT_POLICY_DISALLOWED);
}
try {
Class> highRepJobPolicyCls = Class.forName(highRepJobPolicyStr);
Constructor> ctor = highRepJobPolicyCls.getDeclaredConstructor();
// In case the constructor isn't accessible to us.
ctor.setAccessible(true);
// We'll get a ClassCastException if the class doesn't implement the
this.highRepJobPolicy = (HighRepJobPolicy) ctor.newInstance();
} catch (ClassNotFoundException
| InvocationTargetException
| NoSuchMethodException
| InstantiationException
| IllegalAccessException e) {
throw new IllegalArgumentException(e);
}
}
}
// Spanner Job Policy
// Applies all jobs immediately to ensure strong consistency.
static class SpannerJobPolicy implements HighRepJobPolicy {
@Override
public boolean shouldApplyNewJob(Key entityGroup) {
return true;
}
@Override
public boolean shouldRollForwardExistingJob(Key entityGroup) {
return true;
}
}
private static int parseInt(String valStr, int defaultVal, String propName) {
if (valStr != null) {
try {
return Integer.parseInt(valStr);
} catch (NumberFormatException e) {
logger.log(
Level.WARNING,
"Expected a numeric value for property "
+ propName
+ "but received, "
+ valStr
+ ". Resetting property to the default.");
}
}
return defaultVal;
}
public void start() {
AccessController.doPrivileged(
new PrivilegedAction() {
@Override
public Object run() {
startInternal();
return null;
}
});
}
private synchronized void startInternal() {
if (activeServices.contains(this)) {
return;
}
load();
activeServices.add(this);
scheduledTasks.add(
scheduler.scheduleWithFixedDelay(
new Runnable() {
@Override
public void run() {
removeStaleQueries(clock.getCurrentTime());
}
},
maxQueryLifetimeMs * 5,
maxQueryLifetimeMs * 5,
TimeUnit.MILLISECONDS));
scheduledTasks.add(
scheduler.scheduleWithFixedDelay(
new Runnable() {
@Override
public void run() {
removeStaleTransactions(clock.getCurrentTime());
}
},
maxTransactionLifetimeMs * 5,
maxTransactionLifetimeMs * 5,
TimeUnit.MILLISECONDS));
if (!noStorage) {
scheduledTasks.add(
scheduler.scheduleWithFixedDelay(
new Runnable() {
@Override
public void run() {
persist();
}
},
storeDelayMs,
storeDelayMs,
TimeUnit.MILLISECONDS));
}
}
public synchronized void stop() {
if (!activeServices.contains(this)) {
return;
}
activeServices.remove(this);
if (!noStorage) {
// All information about unapplied jobs is transient, so roll everything
// forward before we shut down.
rollForwardAllUnappliedJobs();
persist();
}
clearProfiles();
clearQueryHistory();
for (ScheduledFuture> scheduledTask : scheduledTasks) {
scheduledTask.cancel(false);
}
scheduledTasks.clear();
}
private void rollForwardAllUnappliedJobs() {
// Since we're not persisting unapplied jobs, force all unapplied jobs to
// roll forward before we shut down. We could actually persist this
// information if we wanted, but it doesn't seem worth the effort (we'd
// need to make all the Runnables implement Serializable).
for (Profile profile : profiles.values()) {
if (profile.getGroups() != null) {
for (Profile.EntityGroup eg : profile.getGroups().values()) {
eg.rollForwardUnappliedJobs();
}
}
}
}
public void setMaxQueryLifetime(int milliseconds) {
this.maxQueryLifetimeMs = milliseconds;
}
public void setMaxTransactionLifetime(int milliseconds) {
this.maxTransactionLifetimeMs = milliseconds;
}
public void setBackingStore(String backingStore) {
this.backingStore = backingStore;
}
public void setStoreDelay(int delayMs) {
this.storeDelayMs = delayMs;
}
public void setNoStorage(boolean noStorage) {
this.noStorage = noStorage;
}
// TODO: Add a unit test for this.
public void enableScatterProperty(boolean enable) {
if (enable) {
specialPropertyMap.put(Entity.SCATTER_RESERVED_PROPERTY, SpecialProperty.SCATTER);
} else {
specialPropertyMap.remove(Entity.SCATTER_RESERVED_PROPERTY);
}
}
public String getPackage() {
return PACKAGE;
}
public GetResponse get(@SuppressWarnings("unused") Status status, GetRequest request) {
GetResponse response = new GetResponse();
LiveTxn liveTxn = null;
for (Reference key : request.keys()) {
validatePathComplete(key);
String app = key.getApp();
Path groupPath = getGroup(key);
GetResponse.Entity responseEntity = response.addEntity();
Profile profile = getOrCreateProfile(app);
synchronized (profile) {
Profile.EntityGroup eg = profile.getGroup(groupPath);
if (request.hasTransaction()) {
if (liveTxn == null) {
liveTxn = profile.getTxn(request.getTransaction().getHandle());
}
// this will throw an exception if we attempt to read from
// the wrong entity group
eg.addTransaction(liveTxn);
}
boolean eventualConsistency = request.hasFailoverMs() && liveTxn == null;
EntityProto entity = pseudoKinds.get(liveTxn, eg, key, eventualConsistency);
if (entity == PseudoKinds.NOT_A_PSEUDO_KIND) {
VersionedEntity versionedEntity = eg.get(liveTxn, key, eventualConsistency);
if (versionedEntity == null) {
entity = null;
if (!eventualConsistency) {
responseEntity.setVersion(profile.getReadTimestamp());
}
} else {
entity = versionedEntity.entityProto();
responseEntity.setVersion(versionedEntity.version());
}
}
if (entity != null) {
responseEntity.getMutableEntity().copyFrom(entity);
postprocessEntity(responseEntity.getMutableEntity());
} else {
responseEntity.getMutableKey().copyFrom(key);
}
// Give all entity groups with unapplied jobs the opportunity to catch
// up. Note that this will not impact the result we're about to return.
profile.groom();
}
}
return response;
}
public PutResponse put(Status status, PutRequest request) {
globalLock.readLock().lock();
try {
return putImpl(status, request);
} finally {
globalLock.readLock().unlock();
}
}
/** Prepares an entity provided by a client for storage. */
private void preprocessEntity(EntityProto entity) {
preprocessIndexesWithoutEmptyListSupport(entity);
processEntityForSpecialProperties(entity, true);
}
/** Prepares a stored entity to be returned to a client. */
private void postprocessEntity(EntityProto entity) {
processEntityForSpecialProperties(entity, false);
postprocessIndexes(entity);
}
// Equivalent to processSpecialProperties in prod.
/**
* Adds the appropriate special properties to the given entity and removes all others.
*
* @param entity the entity to modify.
* @param store true if we're storing the entity, false if we're loading it.
*/
private void processEntityForSpecialProperties(EntityProto entity, boolean store) {
for (Iterator iter = entity.mutablePropertys().iterator(); iter.hasNext(); ) {
if (specialPropertyMap.containsKey(iter.next().getName())) {
iter.remove();
}
}
for (SpecialProperty specialProp : specialPropertyMap.values()) {
if (store ? specialProp.isStored() : specialProp.isVisible()) {
PropertyValue value = specialProp.getValue(entity);
if (value != null) {
entity.addProperty(specialProp.getProperty(value));
}
}
}
}
@SuppressWarnings("unused") // status
public PutResponse putImpl(Status status, PutRequest request) {
PutResponse response = new PutResponse();
if (request.entitySize() == 0) {
return response;
}
Cost totalCost = response.getMutableCost();
String app = request.entitys().get(0).getKey().getApp();
List clones = new ArrayList<>();
for (EntityProto entity : request.entitys()) {
validateAndProcessEntityProto(entity);
EntityProto clone = entity.clone();
clones.add(clone);
checkArgument(clone.hasKey());
Reference key = clone.getKey();
checkArgument(key.getPath().elementSize() > 0);
clone.getMutableKey().setApp(app);
Element lastPath = getLastElement(key);
if (lastPath.getId() == 0 && !lastPath.hasName()) {
if (autoIdAllocationPolicy == AutoIdAllocationPolicy.SEQUENTIAL) {
lastPath.setId(entityIdSequential.getAndIncrement());
} else {
lastPath.setId(toScatteredId(entityIdScattered.getAndIncrement()));
}
}
preprocessEntity(clone);
if (clone.getEntityGroup().elementSize() == 0) {
// The entity needs its entity group set.
Path group = clone.getMutableEntityGroup();
Element root = key.getPath().elements().get(0);
Element pathElement = group.addElement();
pathElement.setType(root.getType());
if (root.hasName()) {
pathElement.setName(root.getName());
} else {
pathElement.setId(root.getId());
}
} else {
// update an existing entity
checkState(clone.hasEntityGroup() && clone.getEntityGroup().elementSize() > 0);
}
}
Map> entitiesByEntityGroup = new LinkedHashMap<>();
Map writtenVersions = new HashMap<>();
final Profile profile = getOrCreateProfile(app);
synchronized (profile) {
LiveTxn liveTxn = null;
for (EntityProto clone : clones) {
Profile.EntityGroup eg = profile.getGroup(clone.getEntityGroup());
if (request.hasTransaction()) {
// If there's a transaction we delay the put until
// the transaction is committed.
if (liveTxn == null) {
liveTxn = profile.getTxn(request.getTransaction().getHandle());
}
checkRequest(!liveTxn.isReadOnly(), "Cannot modify entities in a read-only transaction.");
// this will throw an exception if we attempt to
// modify the wrong entity group
eg.addTransaction(liveTxn).addWrittenEntity(clone);
} else {
List entities = entitiesByEntityGroup.get(clone.getEntityGroup());
if (entities == null) {
entities = new ArrayList<>();
entitiesByEntityGroup.put(clone.getEntityGroup(), entities);
}
entities.add(clone);
}
response.mutableKeys().add(clone.getKey());
}
for (final Map.Entry> entry : entitiesByEntityGroup.entrySet()) {
Profile.EntityGroup eg = profile.getGroup(entry.getKey());
eg.incrementVersion();
LocalDatastoreJob job =
new WriteJob(
highRepJobPolicy,
eg,
profile,
entry.getValue(),
Collections.emptyList());
addTo(totalCost, job.calculateJobCost());
eg.addJob(job);
for (EntityProto entity : entry.getValue()) {
writtenVersions.put(entity.getKey(), job.getMutationTimestamp(entity.getKey()));
}
}
}
if (!request.hasTransaction()) {
logger.fine("put: " + request.entitySize() + " entities");
// Fill the version numbers, in the same order
for (Reference key : response.keys()) {
response.addVersion(writtenVersions.get(key));
}
}
response.setCost(totalCost);
return response;
}
private void validateAndProcessEntityProto(EntityProto entity) {
validatePathForPut(entity.getKey());
for (Property prop : entity.propertys()) {
validateAndProcessProperty(prop);
validateLengthLimit(prop);
}
for (Property prop : entity.rawPropertys()) {
validateAndProcessProperty(prop);
validateRawPropLengthLimit(prop);
}
}
private void validatePathComplete(Reference key) {
Path path = key.getPath();
for (Element ele : path.elements()) {
if (ele.getName().isEmpty() && ele.getId() == 0) {
throw newError(
ErrorCode.BAD_REQUEST, String.format("Incomplete key.path.element: %s", ele));
}
}
}
private void validatePathForPut(Reference key) {
Path path = key.getPath();
for (Element ele : path.elements()) {
String type = ele.getType();
if (RESERVED_NAME.matcher(type).matches() && !RESERVED_KIND_ALLOWLIST.contains(type)) {
throw newError(
ErrorCode.BAD_REQUEST,
String.format("The key path element kind \"%s\" is reserved.", ele.getType()));
}
if (ele.hasName() && ele.getNameAsBytes().length > MAX_STRING_LENGTH) {
throw newError(ErrorCode.BAD_REQUEST, NAME_TOO_LONG);
}
}
}
private void validateAndProcessProperty(Property prop) {
if (RESERVED_NAME.matcher(prop.getName()).matches()) {
throw newError(
ErrorCode.BAD_REQUEST, String.format("illegal property.name: %s", prop.getName()));
}
PropertyValue val = prop.getMutableValue();
if (val.hasUserValue() && !val.getUserValue().hasObfuscatedGaiaid()) {
// If not already set, populate obfuscated gaia id with hash of email address.
PropertyValue.UserValue userVal = val.getMutableUserValue();
userVal.setObfuscatedGaiaid(Integer.toString(userVal.getEmail().hashCode()));
}
}
private void validateLengthLimit(Property property) {
String name = property.getName();
PropertyValue value = property.getValue();
if (value.hasStringValue()) {
if (property.hasMeaning() && property.getMeaningEnum() == Property.Meaning.ATOM_LINK) {
if (value.getStringValueAsBytes().length > MAX_LINK_LENGTH) {
throw newError(
ErrorCode.BAD_REQUEST,
"Link property "
+ name
+ " is too long. Use TEXT for links over "
+ MAX_LINK_LENGTH
+ " bytes.");
}
} else if (property.hasMeaning()
&& property.getMeaningEnum() == Property.Meaning.ENTITY_PROTO) {
if (value.getStringValueAsBytes().length > MAX_BLOB_LENGTH) {
throw newError(
ErrorCode.BAD_REQUEST,
"embedded entity property "
+ name
+ " is too big. It cannot exceed "
+ MAX_BLOB_LENGTH
+ " bytes.");
}
} else {
if (value.getStringValueAsBytes().length > MAX_STRING_LENGTH) {
throw newError(
ErrorCode.BAD_REQUEST,
"string property "
+ name
+ " is too long. It cannot exceed "
+ MAX_STRING_LENGTH
+ " bytes.");
}
}
}
}
private void validateRawPropLengthLimit(Property property) {
String name = property.getName();
PropertyValue value = property.getValue();
if (!value.hasStringValue() || !property.hasMeaning()) {
return;
}
if (property.getMeaningEnum() == Property.Meaning.BLOB
|| property.getMeaningEnum() == Property.Meaning.ENTITY_PROTO
|| property.getMeaningEnum() == Property.Meaning.TEXT) {
if (value.getStringValueAsBytes().length > MAX_BLOB_LENGTH) {
throw newError(
ErrorCode.BAD_REQUEST,
"Property " + name + " is too long. It cannot exceed " + MAX_BLOB_LENGTH + " bytes.");
}
}
}
public DeleteResponse delete(Status status, DeleteRequest request) {
globalLock.readLock().lock();
try {
return deleteImpl(status, request);
} finally {
globalLock.readLock().unlock();
}
}
public VoidProto addActions(Status status, TaskQueueBulkAddRequest request) {
globalLock.readLock().lock();
try {
addActionsImpl(status, request);
} finally {
globalLock.readLock().unlock();
}
return VoidProto.getDefaultInstance();
}
/**
* Returns the entity group for the specified key. This is simply a new {@code Path} instance
* containing the first element in the specified key.
*/
private Path getGroup(Reference key) {
Path path = key.getPath();
Path group = new Path();
group.addElement(path.getElement(0));
return group;
}
@SuppressWarnings("unused") // status
public DeleteResponse deleteImpl(Status status, DeleteRequest request) {
DeleteResponse response = new DeleteResponse();
if (request.keySize() == 0) {
return response;
}
Cost totalCost = response.getMutableCost();
// We don't support requests that span apps, so the app for the first key
// is the app for all keys.
String app = request.keys().get(0).getApp();
final Profile profile = getOrCreateProfile(app);
LiveTxn liveTxn = null;
// Maintain a mapping of keys by entity group so that we can apply one job
// per entity group.
Map> keysByEntityGroup = new LinkedHashMap<>();
Map writtenVersions = new HashMap<>();
synchronized (profile) {
for (final Reference key : request.keys()) {
validatePathComplete(key);
Path group = getGroup(key);
if (request.hasTransaction()) {
if (liveTxn == null) {
liveTxn = profile.getTxn(request.getTransaction().getHandle());
}
checkRequest(!liveTxn.isReadOnly(), "Cannot modify entities in a read-only transaction.");
Profile.EntityGroup eg = profile.getGroup(group);
// this will throw an exception if we attempt to modify
// the wrong entity group
eg.addTransaction(liveTxn).addDeletedEntity(key);
} else {
List keysToDelete = keysByEntityGroup.get(group);
if (keysToDelete == null) {
keysToDelete = new ArrayList<>();
keysByEntityGroup.put(group, keysToDelete);
}
keysToDelete.add(key);
}
}
// Now loop over the entity groups. We will attempt to apply one job that
// does all the work for each entity group.
for (final Map.Entry> entry : keysByEntityGroup.entrySet()) {
Profile.EntityGroup eg = profile.getGroup(entry.getKey());
eg.incrementVersion();
LocalDatastoreJob job =
new WriteJob(
highRepJobPolicy,
eg,
profile,
Collections.emptyList(),
entry.getValue());
addTo(totalCost, job.calculateJobCost());
eg.addJob(job);
for (Reference deletedKey : entry.getValue()) {
writtenVersions.put(deletedKey, job.getMutationTimestamp(deletedKey));
}
}
}
if (!request.hasTransaction()) {
for (Reference key : request.keys()) {
response.addVersion(writtenVersions.get(key));
}
}
return response;
}
@SuppressWarnings("unused") // status
private void addActionsImpl(Status status, TaskQueueBulkAddRequest request) {
// Does not verify that every TaskQueueAddRequest is part of the same transaction because this
// checking is done at the API level.
if (request.getAddRequestCount() == 0) {
return;
}
// The transactional tasks need to be associated with the txn.
// When the txn is committed the tasks will be sent back over to
// the taskqueue stub. We need to wipe out their transactions before sending
// so that the tasks actually get added and we don't continue spinning
// around in and infinite loop.
List addRequests = new ArrayList<>(request.getAddRequestCount());
for (TaskQueueAddRequest addRequest : request.getAddRequestList()) {
addRequests.add(
addRequest.toBuilder().clearTransaction().clearDatastoreTransaction().build());
}
Transaction transaction;
if (request.getAddRequestList().get(0).hasDatastoreTransaction()) {
ByteString datastoreTransaction =
request.getAddRequestList().get(0).getDatastoreTransaction();
try {
transaction = Transaction.parser().parseFrom(datastoreTransaction);
} catch (InvalidProtocolBufferException e) {
throw newError(ErrorCode.BAD_REQUEST, "Invalid transaction");
}
} else {
transaction = toProto1(request.getAddRequest(0).getTransaction());
}
Profile profile = profiles.get(transaction.getApp());
LiveTxn liveTxn = profile.getTxn(transaction.getHandle());
liveTxn.addActions(addRequests);
}
static Transaction toProto1(
com.google.apphosting.api.proto2api.DatastorePb.Transaction txn) {
Transaction txnProto = new Transaction();
boolean unused = txnProto.mergeFrom(txn.toByteArray());
return txnProto;
}
@SuppressWarnings("unused") // status
public QueryResult runQuery(Status status, Query query) {
// Construct a validated query right away so we can fail fast
// if something is wrong.
final LocalCompositeIndexManager.ValidatedQuery validatedQuery =
new LocalCompositeIndexManager.ValidatedQuery(query);
query = validatedQuery.getV3Query();
// Modernize the query's cursors.
// NOTE: Modernization must follow (not precede) construction of
// LocalCompositeIndexManager.ValidatedQuery. I don't know why.
try {
CursorModernizer.modernizeQueryCursors(query);
} catch (InvalidConversionException e) {
throw newError(ErrorCode.BAD_REQUEST, "Invalid cursor");
}
String app = query.getApp();
Profile profile = getOrCreateProfile(app);
// The real datastore supports executing ancestor queries in transactions.
// For now we're just going to make sure the entity group of the ancestor
// is the same entity group with which the transaction is associated and
// skip providing a transactionally consistent result set.
synchronized (profile) {
// Having a transaction implies we have an ancestor, but having an
// ancestor does not imply we have a transaction.
if (query.hasTransaction() || query.hasAncestor()) {
// Query can only have a txn if it is an ancestor query. Either way we
// know we've got an ancestor.
Path groupPath = getGroup(query.getAncestor());
Profile.EntityGroup eg = profile.getGroup(groupPath);
if (query.hasTransaction()) {
if (!app.equals(query.getTransaction().getApp())) {
throw newError(
ErrorCode.INTERNAL_ERROR,
"Can't query app "
+ app
+ "in a transaction on app "
+ query.getTransaction().getApp());
}
LiveTxn liveTxn = profile.getTxn(query.getTransaction().getHandle());
// this will throw an exception if we attempt to read from
// the wrong entity group
eg.addTransaction(liveTxn);
// Use snapshot profile.
profile = eg.getSnapshot(liveTxn);
}
if (query.hasAncestor()) {
if (query.hasTransaction() || !query.hasFailoverMs()) {
// Either we have a transaction or the user has requested strongly
// consistent results. Either way, we need to apply jobs.
eg.rollForwardUnappliedJobs();
}
}
}
if (query.hasSearchQuery()) {
throw newError(ErrorCode.BAD_REQUEST, "full-text search unsupported");
}
// Run as a PseudoKind query if necessary, otherwise check the actual local datastore
List queryEntities = pseudoKinds.runQuery(query);
Map versions = null;
if (queryEntities == null) {
Collection versionedEntities = null;
Map extents = profile.getExtents();
Extent extent = extents.get(query.getKind());
if (extent != null) {
// Make a copy of the list of all the entities in the extent
versionedEntities = extent.getAllEntities();
} else if (!query.hasKind()) {
// Kind-less query, so we need a list containing all entities of
// all kinds.
versionedEntities = profile.getAllEntities();
if (query.orderSize() == 0) {
// add a sort by key asc to match the behavior of prod
query.addOrder(
new Order()
.setDirection(Query.Order.Direction.ASCENDING)
.setProperty(Entity.KEY_RESERVED_PROPERTY));
}
} else {
// no extent - we're querying for a kind without any entities
}
if (versionedEntities != null) {
queryEntities = new ArrayList<>();
versions = new HashMap<>();
for (VersionedEntity entity : versionedEntities) {
queryEntities.add(entity.entityProto());
versions.put(entity.entityProto().getKey(), entity.version());
}
}
}
// Give all entity groups with unapplied jobs the opportunity to catch
// up. Note that this will not impact the result of the query we're
// currently fulfilling since we already have the (unfiltered) result
// set.
profile.groom();
if (queryEntities == null) {
// so we don't need to check for null anywhere else down below
queryEntities = Collections.emptyList();
}
// Building filter predicate
List> predicates = new ArrayList<>();
// apply ancestor restriction
if (query.hasAncestor()) {
final List ancestorPath = query.getAncestor().getPath().elements();
predicates.add(
new Predicate() {
@Override
public boolean apply(EntityProto entity) {
List path = entity.getKey().getPath().elements();
return path.size() >= ancestorPath.size()
&& path.subList(0, ancestorPath.size()).equals(ancestorPath);
}
});
}
if (query.isShallow()) {
final long keyPathLength =
query.hasAncestor() ? query.getAncestor().getPath().elementSize() + 1 : 1;
predicates.add(
new Predicate() {
@Override
public boolean apply(EntityProto entity) {
return entity.getKey().getPath().elementSize() == keyPathLength;
}
});
}
// apply namespace restriction
final boolean hasNamespace = query.hasNameSpace();
final String namespace = query.getNameSpace();
predicates.add(
new Predicate() {
@Override
public boolean apply(EntityProto entity) {
Reference ref = entity.getKey();
// Filter all elements not in the query's namespace.
if (hasNamespace) {
if (!ref.hasNameSpace() || !namespace.equals(ref.getNameSpace())) {
return false;
}
} else {
if (ref.hasNameSpace()) {
return false;
}
}
return true;
}
});
// Get entityComparator with filter matching capability
final EntityProtoComparator entityComparator =
new EntityProtoComparator(
validatedQuery.getQuery().orders(), validatedQuery.getQuery().filters());
// applying filter restrictions
predicates.add(
new Predicate() {
@Override
public boolean apply(EntityProto entity) {
return entityComparator.matches(entity);
}
});
Predicate queryPredicate =
Predicates.not(Predicates.and(predicates));
// The ordering of the following operations is important to maintain correct
// query functionality.
// Filtering entities
Iterables.removeIf(queryEntities, queryPredicate);
// Expanding projections
if (query.propertyNameSize() > 0) {
queryEntities = createIndexOnlyQueryResults(queryEntities, entityComparator);
}
// Sorting entities
Collections.sort(queryEntities, entityComparator);
// Apply group by. This must happen after sorting to select the correct first entity.
queryEntities = applyGroupByProperties(queryEntities, query);
// store the query and return the results
LiveQuery liveQuery = new LiveQuery(queryEntities, versions, query, entityComparator, clock);
// CompositeIndexManager does some filesystem reads/writes, so needs to
// be privileged.
AccessController.doPrivileged(
new PrivilegedAction() {
@Override
public Object run() {
LocalCompositeIndexManager.getInstance().processQuery(validatedQuery.getV3Query());
return null;
}
});
// Using next function to prefetch results and return them from runQuery
QueryResult result =
liveQuery.nextResult(
query.hasOffset() ? query.getOffset() : null,
query.hasCount() ? query.getCount() : null,
query.isCompile());
if (query.isCompile()) {
result.setCompiledQuery(liveQuery.compileQuery());
}
if (result.isMoreResults()) {
long cursor = queryId.getAndIncrement();
profile.addQuery(cursor, liveQuery);
result.getMutableCursor().setApp(query.getApp()).setCursor(cursor);
}
// Copy the index list for the query into the result.
for (Index index : LocalCompositeIndexManager.getInstance().queryIndexList(query)) {
result.addIndex(wrapIndexInCompositeIndex(app, index));
} // for
return result;
}
}
@AutoValue
abstract static class NameValue {
public abstract String name();
public abstract PropertyValue value();
public static NameValue of(String name, PropertyValue value) {
return new AutoValue_LocalDatastoreService_NameValue(name, value);
}
}
/**
* Creates a new List of entities after applying group by properties.
*
* @param queryEntities a sorted list of entities.
* @param query the current query.
* @return a new list of entities with unique properties.
*/
private List applyGroupByProperties(List queryEntities, Query query) {
Set groupByProperties = Sets.newHashSet(query.groupByPropertyNames());
// Nothing to do if there are no group by properties.
if (groupByProperties.isEmpty()) {
return queryEntities;
}
Set lastEntity = Sets.newHashSet();
List results = Lists.newArrayList();
for (EntityProto entity : queryEntities) {
boolean isFirst = false;
for (Property prop : entity.propertys()) {
if (groupByProperties.contains(prop.getName())
&& !lastEntity.contains(NameValue.of(prop.getName(), prop.getValue()))) {
isFirst = true;
break;
}
}
if (isFirst) {
results.add(entity);
// Set lastEntity to be the new set of properties.
lastEntity.clear();
for (Property prop : entity.propertys()) {
if (groupByProperties.contains(prop.getName())) {
lastEntity.add(NameValue.of(prop.getName(), prop.getValue()));
}
}
}
}
return results;
}
/**
* Converts a normal result set into the results seen in an index-only query (a projection).
*
* @param queryEntities the results to convert
* @param entityComparator the comparator derived from the query
* @return the converted results
*/
private List createIndexOnlyQueryResults(
List queryEntities, EntityProtoComparator entityComparator) {
Set postfixProps =
Sets.newHashSetWithExpectedSize(entityComparator.getAdjustedOrders().size());
for (Query.Order order : entityComparator.getAdjustedOrders()) {
postfixProps.add(order.getProperty());
}
List results = Lists.newArrayListWithExpectedSize(queryEntities.size());
for (EntityProto entity : queryEntities) {
List indexEntities = createIndexEntities(entity, postfixProps, entityComparator);
results.addAll(indexEntities);
}
return results;
}
/**
* Splits a full entity into all index entities seen in a projection.
*
* @param entity the entity to split
* @param postfixProps the properties included in the postfix
* @return A list of the index entities.
*/
private ImmutableList createIndexEntities(
EntityProto entity, Set postfixProps, EntityProtoComparator entityComparator) {
SetMultimap toSplit =
MultimapBuilder.hashKeys(postfixProps.size()).hashSetValues(1).build();
Set seen = Sets.newHashSet();
boolean splitRequired = false;
for (Property prop : entity.propertys()) {
if (postfixProps.contains(prop.getName())) {
// If we have multiple values for any postfix property, we need to split.
splitRequired |= !seen.add(prop.getName());
// Only add the value if it matches the query filters
if (entityComparator.matches(prop)) {
toSplit.put(prop.getName(), prop.getValue());
}
}
}
if (!splitRequired) {
// No need for splitting!
return ImmutableList.of(entity);
}
EntityProto clone = new EntityProto();
clone.getMutableKey().copyFrom(entity.getKey());
clone.getMutableEntityGroup();
List results = Lists.newArrayList(clone);
for (Map.Entry> entry : toSplit.asMap().entrySet()) {
if (entry.getValue().size() == 1) {
// No need for cloning!
for (EntityProto result : results) {
result
.addProperty()
.setName(entry.getKey())
.setMeaning(Property.Meaning.INDEX_VALUE)
.getMutableValue()
.copyFrom(Iterables.getOnlyElement(entry.getValue()));
}
continue;
}
List splitResults =
Lists.newArrayListWithCapacity(results.size() * entry.getValue().size());
for (PropertyValue value : entry.getValue()) {
for (EntityProto result : results) {
EntityProto split = result.clone();
split
.addProperty()
.setName(entry.getKey())
.setMeaning(Property.Meaning.INDEX_VALUE)
.getMutableValue()
.copyFrom(value);
splitResults.add(split);
}
}
results = splitResults;
}
return ImmutableList.copyOf(results);
}
/**
* Retrieves the value in the given map keyed by the provided key, throwing an appropriate {@link
* ApplicationException} if there is no value associated with the key.
*/
private static T safeGetFromExpiringMap(Map map, long key, String errorMsg) {
T value = map.get(key);
if (value == null) {
throw newError(ErrorCode.BAD_REQUEST, errorMsg);
}
return value;
}
@SuppressWarnings("unused") // status
public QueryResult next(Status status, NextRequest request) {
Profile profile = profiles.get(request.getCursor().getApp());
LiveQuery liveQuery = profile.getQuery(request.getCursor().getCursor());
QueryResult result =
liveQuery.nextResult(
request.hasOffset() ? request.getOffset() : null,
request.hasCount() ? request.getCount() : null,
request.isCompile());
if (result.isMoreResults()) {
result.setCursor(request.getCursor());
} else {
profile.removeQuery(request.getCursor().getCursor());
}
return result;
}
@SuppressWarnings("unused") // status
public VoidProto deleteCursor(Status status, Cursor request) {
Profile profile = profiles.get(request.getApp());
profile.removeQuery(request.getCursor());
return VoidProto.getDefaultInstance();
}
@SuppressWarnings("unused") // status
public Transaction beginTransaction(Status status, BeginTransactionRequest req) {
Profile profile = getOrCreateProfile(req.getApp());
if (req.hasPreviousTransaction()) {
if (req.getModeEnum() == TransactionMode.READ_ONLY) {
throw newError(ErrorCode.BAD_REQUEST, TRANSACTION_RETRY_ON_READ_ONLY);
}
// synchronize to prevent check-remove race on previous transaction
synchronized (profile) {
LiveTxn previousTransaction =
profile.getTxnQuietly(req.getPreviousTransaction().getHandle());
if (previousTransaction != null) {
if (previousTransaction.concurrencyMode == ConcurrencyMode.READ_ONLY) {
throw newError(ErrorCode.BAD_REQUEST, TRANSACTION_RETRY_ON_PREVIOUSLY_READ_ONLY);
}
if (previousTransaction.allowMultipleEg != req.isAllowMultipleEg()) {
throw newError(ErrorCode.BAD_REQUEST, TRANSACTION_OPTIONS_CHANGED_ON_RESET);
}
profile.removeTxn(req.getPreviousTransaction().getHandle());
}
}
}
Transaction txn =
new Transaction()
.setApp(req.getApp())
.setHandle(transactionHandleProvider.getAndIncrement());
ConcurrencyMode mode = toConcurrencyMode(req.getModeEnum());
profile.addTxn(txn.getHandle(), new LiveTxn(clock, req.isAllowMultipleEg(), req.getModeEnum()));
return txn;
}
@SuppressWarnings("unused") // status
public CommitResponse commit(Status status, final Transaction req) {
Profile profile = profiles.get(req.getApp());
checkNotNull(profile);
CommitResponse response = new CommitResponse();
globalLock.readLock().lock();
// Synchronized so we can't commit and rollback at the same time.
synchronized (profile) {
LiveTxn liveTxn;
try {
liveTxn = profile.removeTxn(req.getHandle());
try {
if (liveTxn.isDirty()) {
response = commitImpl(liveTxn, profile);
} else {
// cost of a read-only txn is 0
response.setCost(new Cost().setEntityWrites(0).setIndexWrites(0));
}
} catch (ApplicationException e) {
// commit failed, re-add transaction so that it can be rolled back or reset.
profile.addTxn(
req.getHandle(),
new LiveTxn(clock, liveTxn.allowMultipleEg, liveTxn.originalTransactionMode, true));
throw e;
}
} finally {
globalLock.readLock().unlock();
}
// Sends all pending actions.
// Note: this is an approximation of the true Datastore behavior.
// Currently, dev_server holds taskqueue tasks in memory, so they are lost
// on a dev_server restart.
// TODO: persist actions as a part of the transactions when
// taskqueue tasks become durable.
for (TaskQueueAddRequest action : liveTxn.getActions()) {
try {
addActionImpl(action);
} catch (ApplicationException e) {
logger.log(Level.WARNING, "Transactional task: " + action + " has been dropped.", e);
}
}
}
return response;
}
/** Requires a lock on the provided profile. */
private CommitResponse commitImpl(LiveTxn liveTxn, final Profile profile) {
// assumes we already have a lock on the profile
CommitResponse response = new CommitResponse();
for (EntityGroupTracker tracker : liveTxn.getAllTrackers()) {
// This will throw an exception if the entity group
// has been modified since this transaction started.
tracker.checkEntityGroupVersion();
}
int deleted = 0;
int written = 0;
Cost totalCost = new Cost();
long commitTimestamp = profile.incrementAndGetCommitTimestamp();
for (EntityGroupTracker tracker : liveTxn.getAllTrackers()) {
Profile.EntityGroup eg = tracker.getEntityGroup();
eg.incrementVersion();
final Collection writtenEntities = tracker.getWrittenEntities();
final Collection deletedKeys = tracker.getDeletedKeys();
LocalDatastoreJob job =
new WriteJob(
highRepJobPolicy, eg, profile, commitTimestamp, writtenEntities, deletedKeys);
addTo(totalCost, job.calculateJobCost());
eg.addJob(job);
deleted += deletedKeys.size();
written += writtenEntities.size();
for (EntityProto writtenEntity : writtenEntities) {
response
.addVersion()
.setRootEntityKey(writtenEntity.getKey())
.setVersion(job.getMutationTimestamp(writtenEntity.getKey()));
}
for (Reference deletedKey : deletedKeys) {
response
.addVersion()
.setRootEntityKey(deletedKey)
.setVersion(job.getMutationTimestamp(deletedKey));
}
}
logger.fine(
"committed: "
+ written
+ " puts, "
+ deleted
+ " deletes in "
+ liveTxn.getAllTrackers().size()
+ " entity groups");
response.setCost(totalCost);
return response;
}
@SuppressWarnings("unused") // status
public VoidProto rollback(Status status, Transaction req) {
profiles.get(req.getApp()).removeTxn(req.getHandle());
return VoidProto.getDefaultInstance();
}
// index operations
@SuppressWarnings("unused") // status
public Integer64Proto createIndex(Status status, CompositeIndex req) {
throw new UnsupportedOperationException("Not yet implemented.");
}
@SuppressWarnings("unused") // status
public VoidProto updateIndex(Status status, CompositeIndex req) {
throw new UnsupportedOperationException("Not yet implemented.");
}
private CompositeIndex wrapIndexInCompositeIndex(String app, @Nullable Index index) {
CompositeIndex ci =
new CompositeIndex().setAppId(app).setState(CompositeIndex.State.READ_WRITE);
if (index != null) {
ci.setDefinition(index);
}
return ci;
}
@SuppressWarnings("unused") // status
public CompositeIndices getIndices(Status status, StringProto req) {
Set indexSet = LocalCompositeIndexManager.getInstance().getIndexes();
CompositeIndices answer = new CompositeIndices();
for (Index index : indexSet) {
CompositeIndex ci = wrapIndexInCompositeIndex(req.getValue(), index);
answer.addIndex(ci);
}
return answer;
}
@SuppressWarnings("unused") // status
public VoidProto deleteIndex(Status status, CompositeIndex req) {
throw new UnsupportedOperationException("Not yet implemented.");
}
@SuppressWarnings("unused") // status
public AllocateIdsResponse allocateIds(Status status, AllocateIdsRequest req) {
globalLock.readLock().lock();
try {
return allocateIdsImpl(req);
} finally {
globalLock.readLock().unlock();
}
}
private AllocateIdsResponse allocateIdsImpl(AllocateIdsRequest req) {
if (req.hasSize()) {
if (req.getSize() > MAX_BATCH_GET_KEYS) { // 1 billion
throw newError(
ErrorCode.BAD_REQUEST,
"cannot get more than " + MAX_BATCH_GET_KEYS + " keys in a single call");
}
// The local implementation just ignores the path in the request
// because we have a single, global counter.
// Suppose currentId is 100 and the request is for 5.
// We'll return a range of 100 - 104, leaving entityIdSequential with a value of 105.
// Now the next request asks for 10.
// We'll return a range of 105 - 114, leaving entityIdSequential with a value of 115.
long start = entityIdSequential.getAndAdd(req.getSize());
return new AllocateIdsResponse().setStart(start).setEnd(start + req.getSize() - 1);
} else {
long current = entityIdSequential.get();
while (current <= req.getMax()) {
if (entityIdSequential.compareAndSet(current, req.getMax() + 1)) {
break;
}
current = entityIdSequential.get();
}
return new AllocateIdsResponse()
.setStart(current)
.setEnd(Math.max(req.getMax(), current - 1));
}
}
static long toScatteredId(long counter) {
if (counter >= MAX_SCATTERED_COUNTER) {
throw newError(ErrorCode.INTERNAL_ERROR, "Maximum scattered ID counter value exceeded");
}
return MAX_SEQUENTIAL_ID + 1 + Long.reverse(counter << SCATTER_SHIFT);
}
Profile getOrCreateProfile(String app) {
synchronized (profiles) {
checkArgument(app != null && app.length() > 0, "appId not set");
Profile profile = profiles.get(app);
if (profile == null) {
profile = new Profile();
profiles.put(app, profile);
}
return profile;
}
}
Extent getOrCreateExtent(Profile profile, String kind) {
Map extents = profile.getExtents();
synchronized (extents) {
Extent e = extents.get(kind);
if (e == null) {
e = new Extent();
extents.put(kind, e);
}
return e;
}
}
//
private void load() {
if (noStorage) {
return;
}
File backingStoreFile = new File(backingStore);
String path = backingStoreFile.getAbsolutePath();
if (!backingStoreFile.exists()) {
logger.log(
Level.INFO, "The backing store, " + path + ", does not exist. " + "It will be created.");
backingStoreFile.getParentFile().mkdirs();
return;
}
long start = clock.getCurrentTime();
try (ObjectInputStream objectIn =
new ObjectInputStream(new BufferedInputStream(new FileInputStream(backingStore)))) {
long version = -objectIn.readLong();
if (version < 0) {
// It's not the version code after all, it's the sequential ID counter of a persisted
// local datastore from a time before version codes and scattered IDs.
entityIdSequential.set(-version);
} else {
entityIdSequential.set(objectIn.readLong());
entityIdScattered.set(objectIn.readLong());
}
@SuppressWarnings("unchecked")
Map profilesOnDisk = (Map) objectIn.readObject();
synchronized (profiles) {
profiles.clear();
profiles.putAll(profilesOnDisk);
}
long end = clock.getCurrentTime();
logger.log(Level.INFO, "Time to load datastore: " + (end - start) + " ms");
} catch (FileNotFoundException e) {
// Should never happen, because we just checked for it
logger.log(Level.SEVERE, "Failed to find the backing store, " + path);
} catch (IOException | ClassNotFoundException e) {
logger.log(Level.INFO, "Failed to load from the backing store, " + path, e);
}
}
/** A profile for an application. Contains all the Extents owned by the application. */
static class Profile implements Serializable {
private static final long MINIMUM_VERSION = 1;
/* Default serial version from 195 SDK. */
private static final long serialVersionUID = -4667954926644227154L;
/**
* An EntityGroup maintains a consistent view of a profile during a transaction. All access to
* an entity group should be synchronized on the enclosing profile.
*/
class EntityGroup {
private final Path path;
private final AtomicLong version = new AtomicLong();
private final WeakHashMap snapshots = new WeakHashMap();
// Using a LinkedList because we insert at the end and remove from the front.
private final LinkedList unappliedJobs =
new LinkedList();
private EntityGroup(Path path) {
this.path = path;
}
public long getVersion() {
return version.get();
}
/**
* Mark an entity group as modified. If there are open transactions for the current version of
* the entity group, this will take a snapshot of the profile which the transactions will
* continue to read from. You must call this before actually modifying the entities, or the
* snapshot will be incorrect.
*/
public void incrementVersion() {
long oldVersion = version.getAndIncrement();
Profile snapshot = null;
for (Map.Entry entry : snapshots.entrySet()) {
LiveTxn txn = entry.getKey();
if (txn.trackEntityGroup(this).getEntityGroupVersion() == oldVersion) {
if (snapshot == null) {
snapshot = takeSnapshot();
}
entry.setValue(snapshot);
}
}
}
/**
* Get an entity from the profile by key.
*
* If there's a transaction, it reads from the transaction's snapshot instead of the
* profile.
*/
public VersionedEntity get(
@Nullable LiveTxn liveTxn, Reference key, boolean eventualConsistency) {
if (!eventualConsistency) {
// User wants strongly consistent results so we must roll forward.
rollForwardUnappliedJobs();
}
Profile profile = getSnapshot(liveTxn);
Map extents = profile.getExtents();
Extent extent = extents.get(getKind(key));
if (extent != null) {
return extent.getEntityByKey(key);
}
return null;
}
public EntityGroupTracker addTransaction(LiveTxn txn) {
EntityGroupTracker tracker = txn.trackEntityGroup(this);
if (!snapshots.containsKey(txn)) {
snapshots.put(txn, null);
}
return tracker;
}
public void removeTransaction(LiveTxn txn) {
snapshots.remove(txn);
}
private Profile getSnapshot(@Nullable LiveTxn txn) {
if (txn == null) {
return Profile.this;
}
Profile snapshot = snapshots.get(txn);
if (snapshot == null) {
return Profile.this;
} else {
return snapshot;
}
}
//
private Profile takeSnapshot() {
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos);
oos.writeObject(Profile.this);
oos.close();
ByteArrayInputStream bis = new ByteArrayInputStream(bos.toByteArray());
ObjectInputStream ois = new ObjectInputStream(bis);
return (Profile) ois.readObject();
} catch (IOException | ClassNotFoundException ex) {
throw new RuntimeException("Unable to take transaction snapshot.", ex);
}
}
@Override
public String toString() {
return path.toString();
}
/**
* Returns the latest unapplied {@link LocalDatastoreJob} for this entity group. If there are
* no unapplied job, returns {@code null}.
*/
public @Nullable LocalDatastoreJob getLastJob() {
return unappliedJobs.isEmpty() ? null : unappliedJobs.getLast();
}
public void addJob(LocalDatastoreJob job) {
// need to apply existing unapplied jobs before we can try to apply
// this one
unappliedJobs.addLast(job);
getGroupsWithUnappliedJobs().add(path);
maybeRollForwardUnappliedJobs();
}
/** Rolls forward all unapplied jobs for the entity group without consulting the policy. */
public void rollForwardUnappliedJobs() {
if (!unappliedJobs.isEmpty()) {
for (LocalDatastoreJob applyJob : unappliedJobs) {
applyJob.apply();
}
unappliedJobs.clear();
getGroupsWithUnappliedJobs().remove(path);
logger.fine("Rolled forward unapplied jobs for " + path);
}
}
/**
* Attempts to roll forward all unapplied jobs for the entity group, consulting the policy for
* each job to see if it should proceed. Since jobs must be applied in order, we stop applying
* as soon as we are unable to apply a single job.
*/
public void maybeRollForwardUnappliedJobs() {
int jobsAtStart = unappliedJobs.size();
logger.fine(
String.format("Maybe rolling forward %d unapplied jobs for %s.", jobsAtStart, path));
int applied = 0;
for (Iterator iter = unappliedJobs.iterator(); iter.hasNext(); ) {
if (iter.next().tryApply()) {
iter.remove();
applied++;
} else {
// Jobs must apply in order, so if we get one failure we have to
// stop.
break;
}
}
if (unappliedJobs.isEmpty()) {
getGroupsWithUnappliedJobs().remove(path);
}
logger.fine(
String.format("Rolled forward %d of %d jobs for %s", applied, jobsAtStart, path));
}
public Key pathAsKey() {
Reference entityGroupRef = new Reference();
entityGroupRef.setPath(path);
return LocalCompositeIndexManager.KeyTranslator.createFromPb(entityGroupRef);
}
}
// synchronized to ensure a consistent view of all the entities
public synchronized List getAllEntities() {
List entities = new ArrayList<>();
for (Extent extent : extents.values()) {
entities.addAll(extent.getAllEntities());
}
return entities;
}
private long lastCommitTimestamp = MINIMUM_VERSION;
private final Map extents =
Collections.synchronizedMap(new HashMap());
// These four maps are transient to preserve the Serialized format.
// Since uncommitted transactions are not persisted it is
// fine to reset the entity group version numbers when
// loading the Datastore from disk.
// All access to this map must be synchronized. We initialize it lazily
// because initializers for transient fields don't run when an object is
// deserialized.
private transient Map groups;
// All access to this set must be synchronized. We initialize it lazily
// because initializers for transient fields don't run when an object is
// deserialized.
private transient Set groupsWithUnappliedJobs;
/** The set of outstanding query results, keyed by query id (also referred to as "cursor"). */
private transient Map queries;
/** The set of active transactions, keyed by transaction id (also referred to as "handle"). */
private transient Map txns;
/**
* Returns the current timestamp of the profile. This is equal to the commit timestamp of the
* last job added to the profile.
*/
public long getReadTimestamp() {
return lastCommitTimestamp;
}
/**
* Returns a commit timestamp for a newly created Job. This increments the read timestamp of the
* profile.
*/
private long incrementAndGetCommitTimestamp() {
return ++lastCommitTimestamp;
}
/**
* Returns the set of all {@code Extents} for this {@code Profile}, organized by kind. The
* returned {@code Map} is synchronized.
*
* @return map of {@code Extents} organized by kind
*/
public Map getExtents() {
return extents;
}
public synchronized EntityGroup getGroup(Path path) {
Map map = getGroups();
EntityGroup group = map.get(path);
if (group == null) {
group = new EntityGroup(path);
map.put(path, group);
}
return group;
}
/**
* The "groomer" for the local datastore. Rather than relying on a background thread, we instead
* implement a method that iterates over all entity groups with unapplied jobs and gives each
* entity group the opportunity to apply these jobs. This makes grooming behavior independent of
* time and instead ties it to operations that users control, which makes tests much easier to
* write.
*/
private synchronized void groom() {
// Need to iterate over a copy because grooming manipulates the list
// we're iterating over. Note that a consistent order is necessary to
// get consistent grooming.
for (Path path : new LinkedHashSet(getGroupsWithUnappliedJobs())) {
EntityGroup eg = getGroup(path);
eg.maybeRollForwardUnappliedJobs();
}
}
public synchronized LiveQuery getQuery(long cursor) {
return safeGetFromExpiringMap(getQueries(), cursor, QUERY_NOT_FOUND);
}
public synchronized void addQuery(long cursor, LiveQuery query) {
getQueries().put(cursor, query);
}
private synchronized LiveQuery removeQuery(long cursor) {
LiveQuery query = getQuery(cursor);
queries.remove(cursor);
return query;
}
private synchronized Map getQueries() {
if (queries == null) {
queries = new HashMap<>();
}
return queries;
}
public synchronized LiveTxn getTxn(long handle) {
return safeGetFromExpiringMap(getTxns(), handle, TRANSACTION_NOT_FOUND);
}
@Nullable
public synchronized LiveTxn getTxnQuietly(long handle) {
return getTxns().get(handle);
}
public synchronized void addTxn(long handle, LiveTxn txn) {
getTxns().put(handle, txn);
}
private synchronized LiveTxn removeTxn(long handle) {
LiveTxn txn = getTxn(handle);
txn.close();
txns.remove(handle);
return txn;
}
private synchronized Map getTxns() {
if (txns == null) {
txns = new HashMap<>();
}
return txns;
}
private synchronized Map getGroups() {
if (groups == null) {
groups = new LinkedHashMap();
}
return groups;
}
private synchronized Set getGroupsWithUnappliedJobs() {
if (groupsWithUnappliedJobs == null) {
groupsWithUnappliedJobs = new LinkedHashSet();
}
return groupsWithUnappliedJobs;
}
}
/**
* An EntityProto with its associated version number. Version numbers are monotically increasing
* with every modification to the entity.
*/
@AutoValue
abstract static class VersionedEntity {
public abstract EntityProto entityProto();
public abstract long version();
public static VersionedEntity create(EntityProto entityProto, long version) {
return new AutoValue_LocalDatastoreService_VersionedEntity(entityProto, version);
}
}
/** The set of all {@link EntityProto EntityProtos} of a single kind, organized by id. */
static class Extent implements Serializable {
/**
* Uses a LinkedHashMap to facilitate testing. Yes, there's a cost to this but this is the
* datastore stub so it should be tolerable.
*
* Entities are identified by the key of the proto, which is guaranteed to be unique.
*
*
Non-final to permit manual initialization during deserialization. Serialization uses proto
* format for EntityProtos, instead of default Java format, to keep SDK local db backwards
* compatible as proto internals change.
*/
private Map entities = new LinkedHashMap<>();
/**
* A mapping from entity keys to last updated versions. If an entity exists in the extent, then
* it is guaranteed to have a version in this map. We use MINIMUM_VERSION to represent entities
* for which the version number is unknown, for example because they were restored from a legacy
* Profile serialization format.
*/
private Map versions = new HashMap<>();
/* Default serial version from 195 SDK. */
private static final long serialVersionUID = 1199103439874512494L;
/**
* The property name used to store the version of an entity when persisting the datastore to
* disk.
*/
private static final String ENTITY_VERSION_RESERVED_PROPERTY = "__entity_version__";
public Collection getAllEntities() {
ImmutableList.Builder builder = ImmutableList.builder();
for (Reference key : entities.keySet()) {
builder.add(getEntityByKey(key));
}
return builder.build();
}
public Collection getAllEntityProtos() {
return entities.values();
}
public VersionedEntity getEntityByKey(Reference key) {
EntityProto entity = entities.get(key);
Long version = versions.get(key);
return (entity == null) ? null : VersionedEntity.create(entity, version);
}
public EntityProto getEntityProtoByKey(Reference key) {
return entities.get(key);
}
public void removeEntity(Reference key) {
versions.remove(key);
entities.remove(key);
}
public void putEntity(VersionedEntity entity) {
Reference key = entity.entityProto().getKey();
entities.put(key, entity.entityProto());
versions.put(key, entity.version());
}
/**
* Serializes a given {@link VersionedEntity} to a byte array, used by the {@link Serializable}
* implementation of {@link Extent}.
*/
private byte[] serializeEntity(VersionedEntity entity) {
EntityProto stored = new EntityProto();
stored.copyFrom(entity.entityProto());
Property version = stored.addProperty();
version.setName(ENTITY_VERSION_RESERVED_PROPERTY);
version.setValue(new PropertyValue().setInt64Value(entity.version()));
return stored.toByteArray();
}
/**
* Deserializes a {@link VersionedEntity} from a byte array, as returned by {@link
* #serializeEntity(VersionedEntity)}.
*/
private VersionedEntity deserializeEntity(byte[] serialized) throws IOException {
EntityProto entityProto = new EntityProto();
if (!entityProto.parseFrom(serialized)) {
throw new IOException("Corrupt or incomplete EntityProto");
}
long version = Profile.MINIMUM_VERSION;
for (Iterator iter = entityProto.mutablePropertys().iterator(); iter.hasNext(); ) {
Property property = iter.next();
if (property.getName().equals(ENTITY_VERSION_RESERVED_PROPERTY)) {
version = property.getValue().getInt64Value();
iter.remove();
break;
}
}
return VersionedEntity.create(entityProto, version);
}
private void writeObject(ObjectOutputStream out) throws IOException {
// We must call putFields() and writeFields() to write the Extent Object header.
// This permits us to later call readFields() to try reading the legacy format.
out.putFields();
out.writeFields();
out.writeLong(CURRENT_STORAGE_VERSION);
out.writeInt(entities.size());
for (VersionedEntity entity : getAllEntities()) {
out.writeObject(serializeEntity(entity));
}
}
private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
ObjectInputStream.GetField fields = in.readFields();
if (fields.get("entities", null) != null) {
// Legacy storage format.
@SuppressWarnings("unchecked")
LinkedHashMap legacy =
(LinkedHashMap) fields.get("entities", null);
entities = legacy;
versions = new HashMap<>();
for (Reference key : entities.keySet()) {
versions.put(key, Profile.MINIMUM_VERSION);
}
} else {
entities = new LinkedHashMap<>();
versions = new HashMap<>();
long version = in.readLong();
if (version == CURRENT_STORAGE_VERSION) {
int entityCount = in.readInt();
for (int i = 0; i < entityCount; ++i) {
VersionedEntity entity = deserializeEntity((byte[]) in.readObject());
Reference key = entity.entityProto().getKey();
entities.put(key, entity.entityProto());
versions.put(key, entity.version());
}
} else {
throw new IOException(String.format("Unsupported storage format [%d]", version));
}
}
}
}
/** A {@link LocalDatastoreJob} that puts and deletes a set of entities. */
class WriteJob extends LocalDatastoreJob {
private final Profile profile;
// TODO: remove this field and rely instead on the list of unappliedJobs in EntityGroup.
@Nullable private LocalDatastoreJob previousJob;
private final Map puts;
private final Set deletes;
WriteJob(
HighRepJobPolicy jobPolicy,
Profile.EntityGroup entityGroup,
Profile profile,
Iterable puts,
Iterable deletes) {
this(
jobPolicy,
entityGroup,
profile,
checkNotNull(profile).incrementAndGetCommitTimestamp(),
puts,
deletes);
}
WriteJob(
HighRepJobPolicy jobPolicy,
Profile.EntityGroup entityGroup,
Profile profile,
long commitTimestamp,
Iterable puts,
Iterable deletes) {
super(jobPolicy, entityGroup.pathAsKey(), commitTimestamp);
this.profile = checkNotNull(profile);
this.previousJob = entityGroup.getLastJob();
this.deletes = ImmutableSet.copyOf(deletes);
// The list of EntityProto to write might contain duplicate. In that case, the last one
// written wins.
Map dedupePuts = new HashMap<>();
for (EntityProto put : puts) {
dedupePuts.put(put.getKey(), put);
}
this.puts = ImmutableMap.copyOf(dedupePuts);
}
@Override
EntityProto getEntity(Reference key) {
if (deletes.contains(key)) {
return null;
} else if (puts.containsKey(key)) {
return puts.get(key);
} else {
return getSnapshotEntity(key);
}
}
@Override
EntityProto getSnapshotEntity(Reference key) {
if (previousJob != null) {
return previousJob.getEntity(key);
} else {
Extent extent = profile.getExtents().get(getKind(key));
return extent == null ? null : extent.getEntityProtoByKey(key);
}
}
@Override
Cost calculateJobCost() {
Cost totalCost = new Cost();
// Deletes
for (Reference key : deletes) {
EntityProto oldEntity = getSnapshotEntity(key);
if (oldEntity != null) {
addTo(totalCost, costAnalysis.getWriteCost(oldEntity));
}
}
// Puts
for (EntityProto entity : puts.values()) {
EntityProto oldEntity = getSnapshotEntity(entity.getKey());
addTo(totalCost, costAnalysis.getWriteOps(oldEntity, entity));
}
return totalCost;
}
@Override
void applyInternal() {
// Just before applying this job, the profile is all caught up so we can remove the back link
// to the previous job. Keeping the link would lead to OOM in case we have a policy that
// always leaves a job unapplied per entity group.
previousJob = null;
for (Reference key : deletes) {
Extent extent = profile.getExtents().get(getKind(key));
if (extent != null) {
extent.removeEntity(key);
}
}
for (Map.Entry entry : puts.entrySet()) {
if (!isNoOpWrite(entry.getKey())) {
Extent extent = getOrCreateExtent(profile, getKind(entry.getKey()));
extent.putEntity(VersionedEntity.create(entry.getValue(), timestamp));
}
}
dirty = true;
}
@Override
public long getMutationTimestamp(Reference key) {
if (isNoOpWrite(key)) {
if (previousJob != null) {
return previousJob.getMutationTimestamp(key);
} else {
Extent extent = profile.getExtents().get(getKind(key));
if (extent != null) {
VersionedEntity entity = extent.getEntityByKey(key);
if (entity != null) {
return entity.version();
}
}
return profile.getReadTimestamp();
}
} else {
return timestamp;
}
}
/**
* Returns true if this job does not modify the state of the entity with the given {@link
* Reference key}.
*/
public boolean isNoOpWrite(Reference key) {
if (deletes.contains(key)) {
return getSnapshotEntity(key) == null;
} else if (puts.containsKey(key)) {
return equalProperties(getSnapshotEntity(key), puts.get(key));
} else {
return true;
}
}
}
static class HasCreationTime {
private final long creationTime;
HasCreationTime(long creationTime) {
this.creationTime = creationTime;
}
long getCreationTime() {
return creationTime;
}
}
/** An outstanding query. */
class LiveQuery extends HasCreationTime {
class DecompiledCursor {
final EntityProto cursorEntity;
final boolean inclusive;
final boolean isStart;
public DecompiledCursor(CompiledCursor compiledCursor, boolean isStart) {
// The cursor is unset.
if (compiledCursor == null) {
cursorEntity = null;
inclusive = false;
this.isStart = isStart;
return;
}
IndexPostfix position = compiledCursor.getPostfixPosition();
// The cursor has been set but without any position data. Treating as the default start
// cursor places it before all entities.
if (!(position.hasKey() || position.indexValueSize() > 0)) {
cursorEntity = null;
inclusive = false;
this.isStart = true;
return;
}
cursorEntity = decompilePosition(position);
inclusive = position.isBefore();
this.isStart = isStart;
}
public int getPosition(EntityProtoComparator entityComparator) {
if (cursorEntity == null) {
return isStart ? 0 : Integer.MAX_VALUE;
}
int loc = Collections.binarySearch(entities, cursorEntity, entityComparator);
if (loc < 0) { // savedEntity doesn't exist
return -(loc + 1); // insertion_point
} else { // savedEntity exists
return inclusive ? loc : loc + 1;
}
}
public EntityProto getCursorEntity() {
return cursorEntity;
}
}
private final Set orderProperties;
private final Set projectedProperties;
private final Set groupByProperties;
private final Query query;
private final List entities;
private final Map versions;
private EntityProto lastResult = null;
private int remainingOffset = 0;
public LiveQuery(
List entities,
@Nullable Map versions,
Query query,
EntityProtoComparator entityComparator,
Clock clock) {
super(clock.getCurrentTime());
if (entities == null) {
throw new NullPointerException("entities cannot be null");
}
this.query = query;
this.remainingOffset = query.getOffset();
orderProperties = new HashSet<>();
for (Query.Order order : entityComparator.getAdjustedOrders()) {
if (!Entity.KEY_RESERVED_PROPERTY.equals(order.getProperty())) {
orderProperties.add(order.getProperty());
}
}
groupByProperties = Sets.newHashSet(query.groupByPropertyNames());
projectedProperties = Sets.newHashSet(query.propertyNames());
this.entities = Lists.newArrayList(entities);
ImmutableMap.Builder versionsBuilder = ImmutableMap.builder();
if (this.projectedProperties.isEmpty() && !this.query.isKeysOnly() && versions != null) {
for (EntityProto entity : this.entities) {
Reference key = entity.getKey();
checkArgument(versions.containsKey(key));
versionsBuilder.put(key, versions.get(key));
}
}
this.versions = versionsBuilder.buildOrThrow();
// Apply cursors
DecompiledCursor startCursor =
new DecompiledCursor(query.hasCompiledCursor() ? query.getCompiledCursor() : null, true);
DecompiledCursor endCursor =
new DecompiledCursor(
query.hasEndCompiledCursor() ? query.getEndCompiledCursor() : null, false);
lastResult = startCursor.getCursorEntity();
int endCursorPos = Math.min(endCursor.getPosition(entityComparator), this.entities.size());
int startCursorPos = Math.min(endCursorPos, startCursor.getPosition(entityComparator));
if (endCursorPos < this.entities.size()) {
this.entities.subList(endCursorPos, this.entities.size()).clear();
}
this.entities.subList(0, startCursorPos).clear();
// Apply limit.
if (query.hasLimit()) {
int toIndex =
(int) Math.min((long) query.getLimit() + query.getOffset(), Integer.MAX_VALUE);
if (toIndex < this.entities.size()) {
this.entities.subList(toIndex, this.entities.size()).clear();
}
}
}
private int offsetResults(int offset) {
int realOffset = Math.min(Math.min(offset, entities.size()), MAX_QUERY_RESULTS);
if (realOffset > 0) {
lastResult = entities.get(realOffset - 1);
entities.subList(0, realOffset).clear();
remainingOffset -= realOffset;
}
return realOffset;
}
public QueryResult nextResult(Integer offset, Integer count, boolean compile) {
QueryResult result = new QueryResult();
if (count == null) {
if (query.hasCount()) {
count = query.getCount();
} else {
count = DEFAULT_BATCH_SIZE;
}
}
if (offset != null && offset != remainingOffset) {
throw newError(ErrorCode.BAD_REQUEST, "offset mismatch");
}
offset = remainingOffset;
if (offset > 0) {
result.setSkippedResults(offsetResults(offset));
if (compile) {
result
.getMutableSkippedResultsCompiledCursor()
.setPostfixPosition(compilePosition(lastResult));
}
}
if (offset == result.getSkippedResults()) {
// Offset has been satisfied so return real results
List entities = removeEntities(Math.min(MAX_QUERY_RESULTS, count));
for (EntityProto entity : entities) {
result.mutableResults().add(postProcessEntityForQuery(entity));
if (!versions.isEmpty()) {
result.mutableVersions().add(versions.get(entity.getKey()));
}
if (compile) {
result.addResultCompiledCursor().setPostfixPosition(compilePosition(entity));
}
}
}
result.setMoreResults(!entities.isEmpty());
result.setKeysOnly(query.isKeysOnly());
result.setIndexOnly(query.propertyNameSize() > 0);
if (compile) {
result.getMutableCompiledCursor().setPostfixPosition(compilePosition(lastResult));
}
return result;
}
/** Removes and returns the given number of entities from the result set. */
private List removeEntities(int count) {
List subList = entities.subList(0, Math.min(count, entities.size()));
if (!subList.isEmpty()) {
lastResult = subList.get(subList.size() - 1);
}
List results = new ArrayList<>(subList);
subList.clear();
return results;
}
/** Converts an entity to the format requested by the user. */
private EntityProto postProcessEntityForQuery(EntityProto entity) {
EntityProto result;
if (!projectedProperties.isEmpty()) {
result = new EntityProto();
result.getMutableKey().copyFrom(entity.getKey());
result.getMutableEntityGroup();
Set seenProps = Sets.newHashSetWithExpectedSize(query.propertyNameSize());
for (Property prop : entity.propertys()) {
if (projectedProperties.contains(prop.getName())) {
// Dev stubs should have already removed multi-valued properties.
if (!seenProps.add(prop.getName())) {
throw newError(
ErrorCode.INTERNAL_ERROR, "LocalDatastoreServer produced invalid results.");
}
result
.addProperty()
.setName(prop.getName())
.setMeaning(Property.Meaning.INDEX_VALUE)
.setMultiple(false)
.getMutableValue()
.copyFrom(prop.getValue());
}
}
} else if (query.isKeysOnly()) {
result = new EntityProto();
result.getMutableKey().copyFrom(entity.getKey());
result.getMutableEntityGroup();
} else {
result = entity.clone();
}
postprocessEntity(result);
return result;
}
private EntityProto decompilePosition(IndexPostfix position) {
EntityProto result = new EntityProto();
if (position.hasKey()) {
if (query.hasKind()) {
String queryKind = query.getKind();
String cursorKind = getLast(position.getKey().getPath().elements()).getType();
if (!queryKind.equals(cursorKind)) {
// This is not technically a problem, but we try to throw exceptions in as many
// 'unsupported' use cases as possible
throw newError(
ErrorCode.BAD_REQUEST,
String.format(
"The query kind is %s but cursor.postfix_position.key kind is %s.",
queryKind, cursorKind));
}
}
result.setKey(position.getKey());
}
Set cursorProperties =
groupByProperties.isEmpty() ? orderProperties : groupByProperties;
Set remainingProperties = new HashSet<>(cursorProperties);
for (IndexPostfix_IndexValue prop : position.indexValues()) {
if (!cursorProperties.contains(prop.getPropertyName())) {
// This is not technically a problem, but the datastore will likely
// an throw exception in this case.
throw newError(ErrorCode.BAD_REQUEST, "cursor does not match query");
}
remainingProperties.remove(prop.getPropertyName());
result.addProperty().setName(prop.getPropertyName()).setValue(prop.getValue());
}
if (!remainingProperties.isEmpty()) {
throw newError(ErrorCode.BAD_REQUEST, "cursor does not match query");
}
return result;
}
private IndexPostfix compilePosition(EntityProto entity) {
/* TODO: This is not actually how compiled cursors behave in
* the real datastore. We are storing all values of relevant properties while
* the datastore would normally only store the index key (which contains
* the exact property values that caused in entity to appear in the result
* set). We can do this because result set does not contain duplicates.
* However if Query.distinct=false was supported this would not work.
*/
IndexPostfix position = new IndexPostfix();
if (entity != null) {
// The cursor properties will be the group by properties, or the order properties if no
// group by properties exist.
Set cursorProperties;
if (groupByProperties.isEmpty()) {
cursorProperties = Sets.newHashSet(orderProperties);
// We always want to add the key when we are not doing group by queries.
cursorProperties.add(Entity.KEY_RESERVED_PROPERTY);
position.setKey(entity.getKey());
} else {
cursorProperties = groupByProperties;
}
for (Property prop : entity.propertys()) {
if (cursorProperties.contains(prop.getName())) {
position.addIndexValue().setPropertyName(prop.getName()).setValue(prop.getValue());
}
}
// This entity has already been returned so exclude it.
position.setBefore(false);
CursorModernizer.setBeforeAscending(position, CursorModernizer.firstSortDirection(query));
}
return position;
}
public CompiledQuery compileQuery() {
CompiledQuery result = new CompiledQuery();
PrimaryScan scan = result.getMutablePrimaryScan();
// saving the entire original query as the index
scan.setIndexNameAsBytes(query.toByteArray());
return result;
}
}
/** An outstanding txn. All methods that operate on mutable members must be synchronized. */
static class LiveTxn extends HasCreationTime {
/** Defines the concurrency mechanis, used by this transaction. */
enum ConcurrencyMode {
/**
* The transaction obtains exclusive locks only at commit time, but still guarantees the
* validity of the snapshot at every read.
*/
OPTIMISTIC,
/** The transaction obtains exclusive locks on the first read or write to an entity. */
PESSIMISTIC,
/**
* The transaction obtains shared locks on reads and upgrade them to exclusive locks at commit
* time.
*/
SHARED_READ,
/**
* The transaction can only do reads and do not cause contention with any other transaction.
*/
READ_ONLY;
}
private final Map entityGroups = new HashMap<>();
private final List actions = new ArrayList<>();
private final boolean allowMultipleEg;
private boolean failed = false;
private final ConcurrencyMode concurrencyMode;
// The original transaction mode set by the request.
private final TransactionMode originalTransactionMode;
LiveTxn(Clock clock, boolean allowMultipleEg, TransactionMode transactionMode) {
this(clock, allowMultipleEg, transactionMode, false);
}
LiveTxn(Clock clock, boolean allowMultipleEg, TransactionMode transactionMode, boolean failed) {
super(clock.getCurrentTime());
this.originalTransactionMode = transactionMode;
ConcurrencyMode concurrencyMode = toConcurrencyMode(transactionMode);
// TODO: maybe support those extra modes.
checkArgument(
concurrencyMode != ConcurrencyMode.PESSIMISTIC
&& concurrencyMode != ConcurrencyMode.SHARED_READ);
this.allowMultipleEg = allowMultipleEg;
this.concurrencyMode = concurrencyMode;
this.failed = failed;
}
/** Sets the entity group in a threadsafe way. */
synchronized EntityGroupTracker trackEntityGroup(Profile.EntityGroup newEntityGroup) {
if (newEntityGroup == null) {
throw new NullPointerException("entityGroup cannot be null");
}
checkFailed();
EntityGroupTracker tracker = entityGroups.get(newEntityGroup);
if (tracker == null) {
if (allowMultipleEg) {
if (entityGroups.size() >= MAX_EG_PER_TXN) {
throw newError(ErrorCode.BAD_REQUEST, TOO_MANY_ENTITY_GROUP_MESSAGE);
}
} else {
if (entityGroups.size() >= 1) {
Profile.EntityGroup entityGroup = entityGroups.keySet().iterator().next();
throw newError(
ErrorCode.BAD_REQUEST,
ENTITY_GROUP_MESSAGE + "found both " + entityGroup + " and " + newEntityGroup);
}
}
/* NOTE: if we start delaying snapshotting until the first read as
* in the real datastore, this check must move to the snapshotting
* logic. */
/* Check if the other entity groups are still unchanged, i.e. that we
* have a consistent snapshot (safe to do before creating the new
* tracker as we have a lock on the profile). */
for (EntityGroupTracker other : getAllTrackers()) {
try {
other.checkEntityGroupVersion();
} catch (ApplicationException e) {
/* Fail all future requests except rollback */
failed = true;
throw e;
}
}
tracker = new EntityGroupTracker(newEntityGroup, isReadOnly());
entityGroups.put(newEntityGroup, tracker);
}
return tracker;
}
synchronized Collection getAllTrackers() {
return entityGroups.values();
}
synchronized void addActions(Collection newActions) {
checkFailed();
if (actions.size() + newActions.size() > MAX_ACTIONS_PER_TXN) {
throw newError(
ErrorCode.BAD_REQUEST, "Too many messages, maximum allowed: " + MAX_ACTIONS_PER_TXN);
}
actions.addAll(newActions);
}
synchronized Collection getActions() {
return new ArrayList<>(actions);
}
synchronized boolean isDirty() {
checkFailed();
for (EntityGroupTracker tracker : getAllTrackers()) {
if (tracker.isDirty()) {
return true;
}
}
return false;
}
synchronized void close() {
// Calling close is optional. Eventually the transaction will
// timeout and get GC'd since EntityGroup uses a WeakHashMap.
// Closing the transaction does prevent us from making an extra,
// useless snapshot. In particular, the transaction should
// be closed during commit before modifying any entities,
// to prevent an extra snapshot during each commit.
for (EntityGroupTracker tracker : getAllTrackers()) {
tracker.getEntityGroup().removeTransaction(this);
}
}
synchronized boolean isReadOnly() {
return concurrencyMode == ConcurrencyMode.READ_ONLY;
}
private void checkFailed() {
if (failed) {
throw newError(ErrorCode.BAD_REQUEST, TRANSACTION_CLOSED);
}
}
}
static class EntityGroupTracker {
private final Profile.EntityGroup entityGroup;
private final Long entityGroupVersion;
private final boolean readOnly;
// Keys of entities that we've written or deleted during the txn to this entity group.
// We use these to delay mutations until this txn is applied on this entity group.
private final Map written = new HashMap<>();
private final Set deleted = new HashSet<>();
EntityGroupTracker(Profile.EntityGroup entityGroup, boolean readOnly) {
this.entityGroup = entityGroup;
this.entityGroupVersion = entityGroup.getVersion();
this.readOnly = readOnly;
}
synchronized Profile.EntityGroup getEntityGroup() {
return entityGroup;
}
synchronized void checkEntityGroupVersion() {
if (!entityGroupVersion.equals(entityGroup.getVersion())) {
throw newError(ErrorCode.CONCURRENT_TRANSACTION, CONTENTION_MESSAGE);
}
}
synchronized Long getEntityGroupVersion() {
return entityGroupVersion;
}
/** Records that this entity was written in this transaction. */
synchronized void addWrittenEntity(EntityProto entity) {
checkState(!readOnly);
Reference key = entity.getKey();
written.put(key, entity);
// If the entity was deleted earlier in the transaction, this overrides
// that delete.
deleted.remove(key);
}
/** Records that this entity was deleted in this transaction. */
synchronized void addDeletedEntity(Reference key) {
checkState(!readOnly);
deleted.add(key);
// If the entity was written earlier in the transaction, this overrides
// that delete.
written.remove(key);
}
synchronized Collection getWrittenEntities() {
return new ArrayList<>(written.values());
}
synchronized Collection getDeletedKeys() {
return new ArrayList<>(deleted);
}
synchronized boolean isDirty() {
return written.size() + deleted.size() > 0;
}
}
/**
* Broken out to support testing.
*
* @return Number of pruned objects.
*/
static int pruneHasCreationTimeMap(
long now, int maxLifetimeMs, Map hasCreationTimeMap) {
// Entries with values that were created before the deadline are removed.
long deadline = now - maxLifetimeMs;
int numPrunedObjects = 0;
for (Iterator extends Map.Entry> queryIt =
hasCreationTimeMap.entrySet().iterator();
queryIt.hasNext(); ) {
Map.Entry entry = queryIt.next();
HasCreationTime query = entry.getValue();
if (query.getCreationTime() < deadline) {
queryIt.remove();
numPrunedObjects++;
}
}
return numPrunedObjects;
}
// NB: This is mostly copied from //j/c/g/ah/datastore/SpecialProperty.
// However, that has Megastore dependencies, and thanks to the wonder of enums in Java,
// if there's a way to separate it out nicely, it's beyond me.
/**
* SpecialProperty encodes the information needed to know when and how to generate special
* properties. Special properties are entity properties that are created dynamically at store or
* load time).
*/
static enum SpecialProperty {
SCATTER(false, true, Meaning.BYTESTRING) {
/** Number of bytes of the hash to save */
private static final int SMALL_LENGTH = 2;
//
@SuppressWarnings("UnsafeFinalization")
@Override
PropertyValue getValue(EntityProto entity) {
int hashCode = 0;
for (Element elem : entity.getKey().getPath().elements()) {
if (elem.hasId()) {
// Convert to string and take the hash of that in order to get a
// nice distribution in the upper bits.
hashCode = (int) (hashCode ^ elem.getId());
} else if (elem.hasName()) {
hashCode ^= elem.getName().hashCode();
} else {
throw new IllegalStateException(
"Couldn't find name or id for entity " + entity.getKey());
}
}
// We're just using MD5 to get a good distribution of bits.
try {
byte[] digest =
MessageDigest.getInstance("MD5")
.digest(("" + hashCode).getBytes(StandardCharsets.UTF_8));
// The Charset doesn't much matter here since decimal digits and minus are
// the same in all common ones.
byte[] miniDigest = new byte[SMALL_LENGTH];
System.arraycopy(digest, 0, miniDigest, 0, SMALL_LENGTH);
if ((miniDigest[0] & 0x01) != 0) {
PropertyValue value = new PropertyValue();
value.setStringValueAsBytes(miniDigest);
return value;
}
} catch (NoSuchAlgorithmException ex) {
Logger logger = Logger.getLogger(SpecialProperty.class.getName());
logger.log(
Level.WARNING,
"Your JDK doesn't have an MD5 implementation, which is required for scatter "
+ " property support.");
}
return null;
}
};
/** The reserved name of the special property. */
private final String name;
/** Whether or not the property is populated in the EntityProto that is sent to the user. */
private final boolean isVisible;
/**
* Whether or not the special property's value is persisted in the datastore and indexed in
* native indices like normal values.
*/
private final boolean isStored;
private final Meaning meaning;
/**
* These two properties apply to the transition of the entity between the storage layer and the
* user layer. We always overwrite the property if we should add and it already exists.
* Stripping a property silently succeeds if the property doesn't already exist.
*
* If isVisible == true, then we add the property when it transitions from storage to user.
* If isVisible == false, then we strip the property when it transitions from storage to user.
* If isStored == true, then we add the property when it transitions from user to storage. If
* isStored == false, then we strip the property when it transitions from user to storage.
*/
private SpecialProperty(boolean isVisible, boolean isStored, Meaning meaning) {
this.name = "__" + name().toLowerCase() + "__";
this.isVisible = isVisible;
this.isStored = isStored;
this.meaning = meaning;
}
/** Returns the reserved name of the special property. */
public final String getName() {
return name;
}
/** Returns true iff this property is populated in the EntityProto that is sent to the user. */
public final boolean isVisible() {
return isVisible;
}
/**
* Returns true iff this property is populated in the entity that is persisted to the datastore.
* This means that there is a built-in index for the property.
*/
final boolean isStored() {
return isStored;
}
/**
* Returns this property's value. Must be overridden for any property whose {@link #isStored}
* method returns true.
*
* @param entity the entity for which the value is being obtained
*/
PropertyValue getValue(EntityProto entity) {
throw new UnsupportedOperationException();
}
/** Returns a property with the given value for this SpecialProperty. */
Property getProperty(PropertyValue value) {
Property processedProp = new Property();
processedProp.setName(getName());
processedProp.setValue(value);
processedProp.setMultiple(false);
processedProp.setMeaning(meaning);
return processedProp;
}
}
Map getSpecialPropertyMap() {
return Collections.unmodifiableMap(specialPropertyMap);
}
private void persist() {
globalLock.writeLock().lock();
try {
AccessController.doPrivileged(
new PrivilegedExceptionAction() {
@Override
public Object run() throws IOException {
if (noStorage || !dirty) {
return null;
}
long start = clock.getCurrentTime();
try (ObjectOutputStream objectOut =
new ObjectOutputStream(
new BufferedOutputStream(new FileOutputStream(backingStore)))) {
objectOut.writeLong(-CURRENT_STORAGE_VERSION);
objectOut.writeLong(entityIdSequential.get());
objectOut.writeLong(entityIdScattered.get());
objectOut.writeObject(profiles);
}
dirty = false;
long end = clock.getCurrentTime();
logger.log(Level.INFO, "Time to persist datastore: " + (end - start) + " ms");
return null;
}
});
} catch (PrivilegedActionException e) {
Throwable t = e.getCause();
if (t instanceof IOException) {
logger.log(Level.SEVERE, "Unable to save the datastore", e);
} else {
throw new RuntimeException(e);
}
throw new RuntimeException(t);
} finally {
globalLock.writeLock().unlock();
}
}
/**
* Triggers the stale query sweeper with a simulated delay sufficient to expire all active
* queries.
*
* @return The number of queries removed.
*/
int expireOutstandingQueries() {
return removeStaleQueries(maxQueryLifetimeMs * 2 + clock.getCurrentTime());
}
/**
* Removes any stale queries at the given time.
*
* @param currentTime the current time to use when calculating if a query is stale
* @return the number of queries removed
*/
private int removeStaleQueries(long currentTime) {
int totalPruned = 0;
synchronized (profiles) {
for (Profile profile : profiles.values()) {
synchronized (profile.getQueries()) {
totalPruned +=
pruneHasCreationTimeMap(currentTime, maxQueryLifetimeMs, profile.getQueries());
}
}
}
return totalPruned;
}
/**
* Triggers the stale transaction sweeper with a simulated delay sufficient to expire all active
* transactions.
*
* @return The number of transactions removed.
*/
int expireOutstandingTransactions() {
return removeStaleTransactions(maxTransactionLifetimeMs * 2 + clock.getCurrentTime());
}
/**
* Removes any stale transactions at the given time.
*
* @param currentTime the current time to use when calculating if a transaction is stale
* @return the number of transactions removed
*/
private int removeStaleTransactions(long currentTime) {
int totalPruned = 0;
for (Profile profile : profiles.values()) {
synchronized (profile.getTxns()) {
totalPruned +=
pruneHasCreationTimeMap(currentTime, maxTransactionLifetimeMs, profile.getTxns());
}
}
return totalPruned;
}
/**
* Cleans up any actively running services.
*
* This should only be called when the JVM is exiting.
*/
// @VisibleForTesting
static int cleanupActiveServices() {
int cleanedUpServices = 0;
logger.info("scheduler shutting down.");
for (LocalDatastoreService service : activeServices) {
cleanedUpServices++;
service.stop();
}
scheduler.shutdownNow();
logger.info("scheduler finished shutting down.");
return cleanedUpServices;
}
/** Returns the count of all actively running {@link LocalDatastoreService} instances. */
static int getActiveServiceCount() {
return activeServices.size();
}
public Double getDefaultDeadline(boolean isOfflineRequest) {
return DEFAULT_DEADLINE_SECONDS;
}
public Double getMaximumDeadline(boolean isOfflineRequest) {
return MAX_DEADLINE_SECONDS;
}
/** Returns true if the two given {@link EntityProto entities} have the same property values. */
static boolean equalProperties(@Nullable EntityProto entity1, EntityProto entity2) {
return entity1 != null
&& entity1.propertys().equals(entity2.propertys())
&& entity1.rawPropertys().equals(entity2.rawPropertys());
}
/** Adds {@code addMe} to {@code target}. */
private static void addTo(Cost target, Cost addMe) {
target.setEntityWrites(target.getEntityWrites() + addMe.getEntityWrites());
target.setIndexWrites(target.getIndexWrites() + addMe.getIndexWrites());
}
/** Returns the transaction {@link ConcurrencyMode} for the given V3 transaction mode. */
private static ConcurrencyMode toConcurrencyMode(TransactionMode transactionMode) {
switch (transactionMode) {
case UNKNOWN:
// TODO: map to SHARED_READ in spanner mode.
case READ_WRITE:
// TODO: map to SHARED_READ in spanner mode.
// TODO: map to PESSIMISTIC in megastore mode.
return ConcurrencyMode.OPTIMISTIC;
case READ_ONLY:
return ConcurrencyMode.READ_ONLY;
default:
throw new IllegalArgumentException("Unknown transaction mode: " + transactionMode);
}
}
}