com.pivotal.gemfirexd.internal.engine.Misc Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of snappydata-store-core Show documentation
Show all versions of snappydata-store-core Show documentation
TIBCO ComputeDB store based off Pivotal GemFireXD
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.pivotal.gemfirexd.internal.engine;
import java.io.CharArrayWriter;
import java.io.IOException;
import java.io.InputStream;
import java.io.Serializable;
import java.io.StringWriter;
import java.sql.ResultSet;
import java.sql.ResultSetMetaData;
import java.sql.SQLException;
import java.util.*;
import java.util.Map.Entry;
import java.util.concurrent.locks.Condition;
import javax.xml.parsers.DocumentBuilder;
import javax.xml.parsers.DocumentBuilderFactory;
import javax.xml.parsers.ParserConfigurationException;
import javax.xml.transform.OutputKeys;
import javax.xml.transform.Transformer;
import javax.xml.transform.TransformerException;
import javax.xml.transform.TransformerFactory;
import javax.xml.transform.dom.DOMSource;
import javax.xml.transform.stream.StreamResult;
import com.gemstone.gemfire.CancelException;
import com.gemstone.gemfire.ForcedDisconnectException;
import com.gemstone.gemfire.GemFireException;
import com.gemstone.gemfire.LogWriter;
import com.gemstone.gemfire.cache.*;
import com.gemstone.gemfire.cache.execute.EmptyRegionFunctionException;
import com.gemstone.gemfire.cache.execute.FunctionException;
import com.gemstone.gemfire.cache.execute.FunctionInvocationTargetException;
import com.gemstone.gemfire.cache.execute.NoMemberFoundException;
import com.gemstone.gemfire.cache.hdfs.HDFSIOException;
import com.gemstone.gemfire.cache.persistence.PartitionOfflineException;
import com.gemstone.gemfire.distributed.DistributedMember;
import com.gemstone.gemfire.distributed.internal.InternalDistributedSystem;
import com.gemstone.gemfire.distributed.internal.membership.InternalDistributedMember;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.GemFireStatSampler;
import com.gemstone.gemfire.internal.InsufficientDiskSpaceException;
import com.gemstone.gemfire.internal.LocalLogWriter;
import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
import com.gemstone.gemfire.internal.cache.LocalRegion;
import com.gemstone.gemfire.internal.cache.NoDataStoreAvailableException;
import com.gemstone.gemfire.internal.cache.PRHARedundancyProvider;
import com.gemstone.gemfire.internal.cache.PartitionedRegion;
import com.gemstone.gemfire.internal.cache.PutAllPartialResultException;
import com.gemstone.gemfire.internal.cache.TXManagerImpl;
import com.gemstone.gemfire.internal.cache.execute.BucketMovedException;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.shared.ClientSharedUtils;
import com.gemstone.gemfire.internal.shared.SystemProperties;
import com.gemstone.gemfire.internal.snappy.CallbackFactoryProvider;
import com.gemstone.gemfire.internal.snappy.StoreCallbacks;
import com.gemstone.gemfire.internal.util.DebuggerSupport;
import com.pivotal.gemfirexd.Attribute;
import com.pivotal.gemfirexd.Constants;
import com.pivotal.gemfirexd.auth.callback.UserAuthenticator;
import com.pivotal.gemfirexd.internal.engine.distributed.FunctionExecutionException;
import com.pivotal.gemfirexd.internal.engine.distributed.GfxdDistributionAdvisor;
import com.pivotal.gemfirexd.internal.engine.distributed.utils.GemFireXDUtils;
import com.pivotal.gemfirexd.internal.engine.jdbc.GemFireXDRuntimeException;
import com.pivotal.gemfirexd.internal.engine.sql.conn.GfxdHeapThresholdListener;
import com.pivotal.gemfirexd.internal.engine.store.GemFireStore;
import com.pivotal.gemfirexd.internal.iapi.error.DerbySQLException;
import com.pivotal.gemfirexd.internal.iapi.error.StandardException;
import com.pivotal.gemfirexd.internal.iapi.jdbc.AuthenticationService;
import com.pivotal.gemfirexd.internal.iapi.reference.SQLState;
import com.pivotal.gemfirexd.internal.iapi.services.context.ContextService;
import com.pivotal.gemfirexd.internal.iapi.sql.conn.LanguageConnectionContext;
import com.pivotal.gemfirexd.internal.iapi.sql.dictionary.TableDescriptor;
import com.pivotal.gemfirexd.internal.iapi.sql.execute.ExecutionContext;
import com.pivotal.gemfirexd.internal.iapi.types.DataValueDescriptor;
import com.pivotal.gemfirexd.internal.impl.jdbc.Util;
import com.pivotal.gemfirexd.internal.impl.jdbc.authentication.AuthenticationServiceBase;
import com.pivotal.gemfirexd.internal.impl.jdbc.authentication.LDAPAuthenticationSchemeImpl;
import com.pivotal.gemfirexd.internal.impl.sql.execute.PlanUtils;
import com.pivotal.gemfirexd.internal.shared.common.sanity.SanityManager;
import com.pivotal.gemfirexd.tools.planexporter.CreateXML;
import org.w3c.dom.Document;
import org.w3c.dom.Element;
/**
* Some global miscellaneous stuff with an initialize method which logs some
* basic information about the GemFireXD configuration. Contains some utility
* methods that, if proven to be useful, can be moved to a more specific utility
* class in the future.
*
* @author Eric Zoerner
*/
public abstract class Misc {
/** no instance allowed */
private Misc() {
}
/**
* misc utilities
*/
public static void waitForDebugger() {
DebuggerSupport.waitForJavaDebugger(new LocalLogWriter(
LocalLogWriter.ALL_LEVEL));
}
/**
* Get the {@link LogWriter} for the cache/DS.
*/
public static LogWriter getCacheLogWriter() {
return getDistributedSystem().getLogWriter();
}
/**
* Get the {@link LogWriter} for the cache returning null instead of throwing
* {@link CacheClosedException} if the cache has been closed.
*/
public static LogWriter getCacheLogWriterNoThrow() {
final GemFireCacheImpl cache = getGemFireCacheNoThrow();
if (cache != null) {
return cache.getLogger();
}
return null;
}
public static LogWriterI18n getI18NLogWriter() {
return getDistributedSystem().getLogWriterI18n();
}
/**
* Return the {@link GemFireStore} instance.
*/
public final static GemFireStore getMemStore() {
final GemFireStore memStore = GemFireStore.getBootedInstance();
if (memStore != null) {
return memStore;
}
throw new CacheClosedException("Misc#getMemStore: no store found."
+ " GemFireXD not booted or closed down.");
}
/**
* Return the {@link GemFireStore} instance that may still be in the process
* of being booted.
*/
public final static GemFireStore getMemStoreBooting() {
final GemFireStore memStore = GemFireStore.getBootingInstance();
if (memStore != null) {
return memStore;
}
throw new CacheClosedException("Misc#getMemStoreBooting: no store found."
+ " GemFireXD not booted or closed down.");
}
public final static GemFireStore getMemStoreBootingNoThrow() {
return GemFireStore.getBootingInstance();
}
public static void waitForSamplerInitialization() {
InternalDistributedSystem system = getDistributedSystem();
final GemFireStatSampler sampler = system.getStatSampler();
if (sampler != null) {
try {
sampler.waitForInitialization(system.getConfig().getAckWaitThreshold() * 1000L);
} catch (InterruptedException ie) {
checkIfCacheClosing(ie);
Thread.currentThread().interrupt();
}
}
}
/**
* Return true if initial DDL replay is in progress. It may happen that this
* is false even when {@link #initialDDLReplayDone()} returns false since
* there may be configuration SQL scripts running.
*/
public static boolean initialDDLReplayInProgress() {
final GemFireStore memStore = GemFireStore.getBootingInstance();
return (memStore == null || memStore.initialDDLReplayInProgress());
}
/**
* Return true if initial DDL replay is complete.
*/
public static boolean initialDDLReplayDone() {
final GemFireStore memStore = GemFireStore.getBootingInstance();
return (memStore != null && memStore.initialDDLReplayDone());
}
/**
* Return the GemFire Cache.
*/
public static GemFireCacheImpl getGemFireCache() {
return GemFireCacheImpl.getExisting();
}
/**
* Return the GemFire Cache. This returns null instead of throwing
* {@link CacheClosedException} if the cache is closed.
*/
public static GemFireCacheImpl getGemFireCacheNoThrow() {
final GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
if (cache != null && !cache.isClosed()) {
return cache;
}
return null;
}
/**
* Return the connected GemFire DistributedSystem else throws a
* DistributedSystemDisconnectedException if no DS found or if it has been
* disconnected.
*/
public static InternalDistributedSystem getDistributedSystem() {
// try to extract DS from the singleton cache since that is likely to be
// more efficient
final GemFireCacheImpl cache = getGemFireCacheNoThrow();
if (cache != null) {
return cache.getDistributedSystem();
}
// if no cache found or closed then fallback to IDS
// try to avoid getConnectedInstance due to locking involved
InternalDistributedSystem sys = InternalDistributedSystem.getAnyInstance();
if (sys == null || !sys.isConnected()) {
sys = InternalDistributedSystem.getConnectedInstance();
}
if (sys == null) {
throw InternalDistributedSystem.newDisconnectedException(null);
}
return sys;
}
public static InternalDistributedMember getMyId() {
final InternalDistributedMember self = GemFireStore.getMyId();
if (self != null) {
return self;
}
else {
return getDistributedSystem().getDistributedMember();
}
}
public static Set getLeadNode() {
GfxdDistributionAdvisor advisor = GemFireXDUtils.getGfxdAdvisor();
InternalDistributedSystem ids = Misc.getDistributedSystem();
if (ids.isLoner()) {
return Collections.singleton(
ids.getDistributedMember());
}
Set allMembers = ids.getAllOtherMembers();
for (DistributedMember m : allMembers) {
GfxdDistributionAdvisor.GfxdProfile profile = advisor
.getProfile((InternalDistributedMember)m);
if (profile != null && profile.hasSparkURL()) {
Set s = new HashSet();
s.add(m);
return Collections.unmodifiableSet(s);
}
}
throw new NoMemberFoundException("SnappyData Lead node is not available");
}
/**
* Check if {@link GemFireCache} is closed or is in the process of closing and
* throw {@link CacheClosedException} if so.
*/
public static void checkIfCacheClosing(Throwable t)
throws CacheClosedException {
final GemFireStore memStore = GemFireStore.getBootingInstance();
if (memStore != null) {
memStore.getAdvisee().getCancelCriterion().checkCancelInProgress(t);
// clear interrupted flag before waiting somewhere else
if (t instanceof InterruptedException) {
Thread.interrupted();
}
}
else {
// in case of forced disconnect we need to see if the system is still
// around and shutting down
InternalDistributedSystem sys = InternalDistributedSystem.getAnyInstance();
if (sys != null) {
sys.getCancelCriterion().checkCancelInProgress(t);
// clear interrupted flag before waiting somewhere else
if (t instanceof InterruptedException) {
Thread.interrupted();
}
} else {
throw new CacheClosedException("Misc#getMemStoreBooting: no store found."
+ " GemFireXD not booted or closed down.");
}
}
}
public static void awaitForCondition(Condition cond) {
while (true) {
try {
cond.await();
break;
} catch (InterruptedException ie) {
Thread.currentThread().interrupt();
Misc.checkIfCacheClosing(ie);
}
}
}
/**
* Check if {@link GemFireCache} is closed or in process of closing, if not
* throw proposed RunTimeException else ignore the exception.
*
* @param e the actual exception being thrown which will be cause
*/
public static void throwIfCacheNotClosed(RuntimeException e) {
GemFireCacheImpl cache = GemFireCacheImpl.getInstance();
if (cache == null || cache.isClosed()) {
return;
}
String isCancelling = cache.getCancelCriterion().cancelInProgress();
if (isCancelling == null) {
throw e;
}
}
public static PartitionResolver createPartitionResolverForSampleTable(final String reservoirRegionName) {
// TODO: Should this be serializable?
// TODO: Should this have call back from bucket movement module?
return new PartitionResolver() {
public String getName()
{
return "PartitionResolverForSampleTable";
}
public Serializable getRoutingObject(EntryOperation opDetails)
{
Object k = opDetails.getKey();
StoreCallbacks callback = CallbackFactoryProvider.getStoreCallbacks();
int v = callback.getLastIndexOfRow(k);
if (v != -1) {
return (Serializable)v;
} else {
return (Serializable)k;
}
}
public void close() {}
};
}
public static String getReservoirRegionNameForSampleTable(
String schema, String resolvedBaseName) {
Region regionBase = Misc.getRegionForTable(resolvedBaseName, true);
return schema + "_SAMPLE_INTERNAL_" + regionBase.getName();
}
public volatile static boolean reservoirRegionCreated = false;
public static PartitionedRegion createReservoirRegionForSampleTable(
String reservoirRegionName, String resolvedBaseName) {
Region regionBase = Misc.getRegionForTable(resolvedBaseName, false);
GemFireCacheImpl cache = GemFireCacheImpl.getExisting();
Region childRegion = cache.getRegion(reservoirRegionName);
if (childRegion == null && regionBase != null) {
RegionAttributes attributesBase = regionBase.getAttributes();
PartitionAttributes partitionAttributesBase = attributesBase.getPartitionAttributes();
AttributesFactory afact = new AttributesFactory();
afact.setDataPolicy(attributesBase.getDataPolicy());
PartitionAttributesFactory paf = new PartitionAttributesFactory();
paf.setTotalNumBuckets(partitionAttributesBase.getTotalNumBuckets());
paf.setRedundantCopies(partitionAttributesBase.getRedundantCopies());
paf.setLocalMaxMemory(partitionAttributesBase.getLocalMaxMemory());
PartitionResolver partResolver = createPartitionResolverForSampleTable(reservoirRegionName);
paf.setPartitionResolver(partResolver);
paf.setColocatedWith(regionBase.getFullPath());
afact.setPartitionAttributes(paf.create());
childRegion = cache.createRegion(reservoirRegionName, afact.create());
}
if (childRegion != null) reservoirRegionCreated = true;
return (PartitionedRegion)childRegion;
}
public static PartitionedRegion getReservoirRegionForSampleTable(String reservoirRegionName) {
if (reservoirRegionName != null) {
GemFireCacheImpl cache = GemFireCacheImpl.getExisting();
Region childRegion = cache.getRegion(reservoirRegionName);
if (childRegion != null) {
return (PartitionedRegion)childRegion;
}
}
return null;
}
public static PartitionedRegion.PRLocalScanIterator
getLocalBucketsIteratorForSampleTable(PartitionedRegion reservoirRegion,
Set bucketSet, boolean fetchFromRemote) {
if (reservoirRegion != null && bucketSet != null) {
if (bucketSet.size() > 0) {
return reservoirRegion.getAppropriateLocalEntriesIterator(bucketSet,
true, false, true, null, fetchFromRemote);
}
}
return null;
}
/**
* Given a table name of the form "SCHEMA_NAME.TABLE_NAME", return the GemFire
* Region that hosts its data. If the name of the table does not have dot in
* it, then the region for the schema by that name is returned. Throw a
* RegionDestroyedException if "throwIfNotFound" flag is true and the region
* was not found.
*
* @param tableName
* the fully qualified name of the table or schema
*/
public static Region getRegionForTable(String tableName,
boolean throwIfNotFound) {
return getRegion(getRegionPath(tableName), throwIfNotFound, false);
}
public static String getRegionPath(String qualifiedTableName) {
final StringBuilder sb = new StringBuilder(qualifiedTableName);
if (sb.charAt(0) != '/') {
sb.insert(0, '/');
}
int len = sb.length();
for (int i = 0; i < len; ++i) {
char ch = sb.charAt(i);
if (ch == '.') {
sb.setCharAt(i, '/');
}
}
return sb.toString();
}
/**
* Get the region corresponding to given path. Throw a
* RegionDestroyedException if "throwIfNotFound" flag is true and the region
* was not found.
*/
@SuppressWarnings("unchecked")
public static Region getRegion(String regionPath,
boolean throwIfNotFound, final boolean returnUnInitialized) throws RegionDestroyedException {
if (throwIfNotFound) {
final Region region = getGemFireCache().getRegion(
regionPath, false, returnUnInitialized);
if (region != null) {
return region;
}
else {
throw new RegionDestroyedException(
LocalizedStrings.Region_CLOSED_OR_DESTROYED
.toLocalizedString(regionPath), regionPath);
}
}
else {
return getGemFireCache().getRegion(
regionPath, false, returnUnInitialized);
}
}
/**
* Get the region corresponding to given path; will return an uninitialized
* region also instead of waiting for it to initialize. Throw a
* RegionDestroyedException if region was not found.
*/
public static LocalRegion getRegionByPath(String regionPath) {
final LocalRegion region = getGemFireCache().getRegionByPath(regionPath, false);
if (region != null) {
return region;
}
else {
throw new RegionDestroyedException(
LocalizedStrings.Region_CLOSED_OR_DESTROYED
.toLocalizedString(regionPath),
regionPath);
}
}
/**
* Get the region corresponding to given path; will return an uninitialized
* region also instead of waiting for it to initialize. If the "throwIfFound"
* flag is true then throw a RegionDestroyedException if region was not found.
*/
@SuppressWarnings("unchecked")
public static Region getRegionByPath(String regionPath,
boolean throwIfNotFound) {
if (throwIfNotFound) {
return getRegionByPath(regionPath);
}
else {
return getGemFireCache().getRegionByPath(regionPath, false);
}
}
/** get the region corresponding to the TableDescriptor */
public static Region getRegion(TableDescriptor td,
LanguageConnectionContext lcc, boolean throwIfNotFound,
final boolean returnUnInitialized) {
return getRegion(getRegionPath(td, lcc), throwIfNotFound,
returnUnInitialized);
}
/**
* Get the region corresponding to the TableDescriptor. Can return
* uninitialized region.
*/
public static Region getRegionByPath(TableDescriptor td,
LanguageConnectionContext lcc, boolean throwIfNotFound) {
return getRegionByPath(getRegionPath(td, lcc), throwIfNotFound);
}
/**
* Given a table name of the form "SCHEMA_NAME.TABLE_NAME", return the GemFire
* Region that hosts its data. Can return uninitialized region. If the name of
* the table does not have dot in it, then the region for the schema by that
* name is returned. Throw a RegionDestroyedException if "throwIfNotFound"
* flag is true and the region was not found.
*
* @param tableName
* the fully qualified name of the table or schema
*/
public static Region getRegionForTableByPath(String tableName,
boolean throwIfNotFound) {
return getRegionByPath(getRegionPath(tableName), throwIfNotFound);
}
/** get the LanguageConnectionContext object */
public static LanguageConnectionContext getLanguageConnectionContext() {
return (LanguageConnectionContext)ContextService
.getContextOrNull(LanguageConnectionContext.CONTEXT_ID);
}
/** get actual schema name */
public static String getSchemaName(String schemaName,
LanguageConnectionContext lcc) {
return ((schemaName != null && schemaName.length() > 0) ? schemaName
: getDefaultSchemaName(lcc));
}
public static String getDefaultSchemaName(LanguageConnectionContext lcc) {
if (lcc == null) {
lcc = getLanguageConnectionContext();
}
if (lcc == null) {
throw GemFireXDRuntimeException.newRuntimeException(
"No current connection (LCC null).", Util.noCurrentConnection());
}
return lcc.getDefaultSchema().getSchemaName();
}
public static String getRegionPath(String schemaName, String tableName,
LanguageConnectionContext lcc) {
return new StringBuilder().append('/')
.append(getSchemaName(schemaName, lcc)).append('/').append(tableName)
.toString();
}
public static String getRegionPath(TableDescriptor td,
LanguageConnectionContext lcc) {
return getRegionPath(td.getSchemaName(), td.getName(), lcc);
}
public static String getFullTableNameFromRegionPath(final String regionPath) {
if (regionPath != null && regionPath.length() > 0) {
final int start = regionPath.charAt(0) == '/' ? 1 : 0;
final int end = regionPath.indexOf('/', start + 1);
return end != -1 ? (regionPath.substring(start, end) + '.' + regionPath
.substring(end + 1)) : regionPath.substring(start);
}
else {
return "";
}
}
public static String getFullTableName(String schemaName, String tableName,
LanguageConnectionContext lcc) {
return generateFullTableName(new StringBuilder(), schemaName, tableName,
lcc).toString();
}
/**
* Generates full table name by appending to given StringBuilder and returning
* it.
*/
public static StringBuilder generateFullTableName(StringBuilder sb,
String schemaName, String tableName, LanguageConnectionContext lcc) {
return sb.append(getSchemaName(schemaName, lcc)).append('.')
.append(tableName);
}
public static String getFullTableName(TableDescriptor td,
LanguageConnectionContext lcc) {
return getFullTableName(td.getSchemaName(), td.getName(), lcc);
}
/**
* Convert a SQL ResultSet object to a XML Element. The
* addTypeInfo
argument specifies whether or not to add a "type"
* attribute to each field that has the result of {@link Class#getName()} as
* its value.
*
* @param ignoreTypeInfo
* if type of the result set has to be ignored and just its
* toString() value is to be set in the XML
*/
public static Element resultSetToXMLElement(ResultSet rs,
boolean addTypeInfo, boolean ignoreTypeInfo) throws SQLException,
IOException, ParserConfigurationException {
DocumentBuilderFactory factory = DocumentBuilderFactory.newInstance();
DocumentBuilder builder = factory.newDocumentBuilder();
Document doc = builder.newDocument();
Element resultSetElement = doc.createElement("resultSet");
doc.appendChild(resultSetElement);
if (rs != null) {
ResultSetMetaData rsMD = rs.getMetaData();
int numCols = rsMD.getColumnCount();
while (rs.next()) {
Element resultElement = doc.createElement("row");
resultSetElement.appendChild(resultElement);
for (int index = 1; index <= numCols; ++index) {
String colName = rsMD.getColumnName(index);
Element fieldElement = doc.createElement("field");
fieldElement.setAttribute("name", colName);
if (addTypeInfo && !ignoreTypeInfo) {
Object result = rs.getObject(index);
if (result != null) {
fieldElement.setAttribute("type", result.getClass().getName());
}
}
String valStr = rs.getString(index);
if (valStr == null) {
valStr = "NULL";
}
if (valStr.length() > 0) {
fieldElement.appendChild(doc.createTextNode(valStr));
}
resultElement.appendChild(fieldElement);
}
}
rs.close();
}
return resultSetElement;
}
/** serialize a given XML Element to a XML string representation */
public static String serializeXML(Element el) throws TransformerException {
TransformerFactory transfac = TransformerFactory.newInstance();
Transformer trans = transfac.newTransformer();
trans.setOutputProperty(OutputKeys.OMIT_XML_DECLARATION, "yes");
trans.setOutputProperty(OutputKeys.INDENT, "yes");
// create string from xml tree
StringWriter sw = new StringWriter();
StreamResult result = new StreamResult(sw);
DOMSource source = new DOMSource(el);
trans.transform(source, result);
return sw.toString();
}
public static char[] serializeXMLAsCharArr(List el,
String xsltFileName) {
try {
TransformerFactory tFactory = TransformerFactory.newInstance();
CharArrayWriter cw = new CharArrayWriter();
ClassLoader cl = InternalDistributedSystem.class.getClassLoader();
// fix for bug 33274 - null classloader in Sybase app server
if (cl == null) {
cl = ClassLoader.getSystemClassLoader();
}
final String style = CreateXML.class.getPackage().getName()
.replaceAll("\\.", "/")
+ "/resources/" + xsltFileName;
if (GemFireXDUtils.TracePlanGeneration) {
SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_PLAN_GENERATION,
"using stylesheet : " + style);
}
InputStream is = cl.getResourceAsStream(style);
Transformer transformer = tFactory
.newTransformer(new javax.xml.transform.stream.StreamSource(is));
for (Element e : el) {
DOMSource source = new DOMSource(e);
StreamResult result = new StreamResult(cw);
if (GemFireXDUtils.TracePlanGeneration) {
SanityManager.DEBUG_PRINT(GfxdConstants.TRACE_PLAN_GENERATION,
"about to transform " + e);
}
transformer.transform(source, result);
}
is.close();
return cw.toCharArray();
} catch (Exception te) {
throw GemFireXDRuntimeException.newRuntimeException(
"serializeXMLAsCharArr: unexpected exception", te);
}
}
public static int getHashCodeFromDVD(DataValueDescriptor dvd) {
int hash = 0;
if (dvd != null) {
// The following line is an issue with SQLVarChar since
// this returns a string and their is a difference in hash value
// for SqlVarChar and string for the same varchar.
/*Object o = dvd.getObject();
if (o != null) {
hash = o.hashCode();
}*/// fix for #40407 # 40379
hash = dvd.hashCode();
}
return hash;
}
public static int getUnifiedHashCodeFromDVD(DataValueDescriptor dvd,
int numPartitions) {
StoreCallbacks callback = CallbackFactoryProvider.getStoreCallbacks();
if (dvd != null) {
return callback.getHashCodeSnappy(dvd, numPartitions);
} else {
return 0;
}
}
public static int getUnifiedHashCodeFromDVD(DataValueDescriptor[] dvds,
int numPartitions) {
StoreCallbacks callback = CallbackFactoryProvider.getStoreCallbacks();
if (dvds != null) {
return callback.getHashCodeSnappy(dvds, numPartitions);
} else {
return 0;
}
}
/**
* Returns true if security is enabled for SnappyData.
* Only LDAP scheme is supported currently.
*/
public static boolean isSecurityEnabled() {
AuthenticationService authService = Misc.getMemStoreBooting()
.getDatabase().getAuthenticationService();
if (authService != null) {
UserAuthenticator auth = ((AuthenticationServiceBase)authService)
.getAuthenticationScheme();
return auth instanceof LDAPAuthenticationSchemeImpl;
}
return false;
}
/* Returns true if LDAP Security is Enabled */
public static boolean checkLDAPAuthProvider(Map