All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.gemstone.gemfire.internal.cache.xmlcache.CacheXmlParser Maven / Gradle / Ivy

The newest version!
/*
 * Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
 *
 * Licensed under the Apache License, Version 2.0 (the "License"); you
 * may not use this file except in compliance with the License. You
 * may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied. See the License for the specific language governing
 * permissions and limitations under the License. See accompanying
 * LICENSE file.
 */
package com.gemstone.gemfire.internal.cache.xmlcache;

import java.io.File;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.Collections;
import java.util.EmptyStackException;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.Stack;
import java.util.StringTokenizer;

import javax.xml.parsers.SAXParser;
import javax.xml.parsers.SAXParserFactory;

import org.xml.sax.Attributes;
import org.xml.sax.ContentHandler;
import org.xml.sax.InputSource;
import org.xml.sax.Locator;
import org.xml.sax.SAXException;
import org.xml.sax.SAXParseException;
import org.xml.sax.helpers.DefaultHandler;

import com.gemstone.gemfire.DataSerializable;
import com.gemstone.gemfire.DataSerializer;
import com.gemstone.gemfire.InternalGemFireException;
import com.gemstone.gemfire.cache.Cache;
import com.gemstone.gemfire.cache.CacheException;
import com.gemstone.gemfire.cache.CacheListener;
import com.gemstone.gemfire.cache.CacheLoader;
import com.gemstone.gemfire.cache.CacheWriter;
import com.gemstone.gemfire.cache.CacheWriterException;
import com.gemstone.gemfire.cache.CacheXmlException;
import com.gemstone.gemfire.cache.CustomExpiry;
import com.gemstone.gemfire.cache.DataPolicy;
import com.gemstone.gemfire.cache.Declarable;
import com.gemstone.gemfire.cache.DiskStoreFactory;
import com.gemstone.gemfire.cache.DiskWriteAttributes;
import com.gemstone.gemfire.cache.DynamicRegionFactory;
import com.gemstone.gemfire.cache.EvictionAction;
import com.gemstone.gemfire.cache.EvictionAttributes;
import com.gemstone.gemfire.cache.ExpirationAction;
import com.gemstone.gemfire.cache.ExpirationAttributes;
import com.gemstone.gemfire.cache.GatewayException;
import com.gemstone.gemfire.cache.InterestPolicy;
import com.gemstone.gemfire.cache.LossAction;
import com.gemstone.gemfire.cache.MembershipAttributes;
import com.gemstone.gemfire.cache.MirrorType;
import com.gemstone.gemfire.cache.PartitionAttributes;
import com.gemstone.gemfire.cache.PartitionResolver;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.cache.RegionExistsException;
import com.gemstone.gemfire.cache.ResumptionAction;
import com.gemstone.gemfire.cache.Scope;
import com.gemstone.gemfire.cache.SubscriptionAttributes;
import com.gemstone.gemfire.cache.TimeoutException;
import com.gemstone.gemfire.cache.TransactionListener;
import com.gemstone.gemfire.cache.TransactionWriter;
import com.gemstone.gemfire.cache.asyncqueue.AsyncEventListener;
import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueue;
import com.gemstone.gemfire.cache.asyncqueue.AsyncEventQueueFactory;
import com.gemstone.gemfire.cache.client.ClientCache;
import com.gemstone.gemfire.cache.client.PoolFactory;
import com.gemstone.gemfire.cache.execute.Function;
import com.gemstone.gemfire.cache.hdfs.HDFSEventQueueAttributes;
import com.gemstone.gemfire.cache.hdfs.HDFSEventQueueAttributesFactory;
import com.gemstone.gemfire.cache.hdfs.HDFSStore.HDFSCompactionConfig;
import com.gemstone.gemfire.cache.hdfs.HDFSStoreFactory.HDFSCompactionConfigFactory;
import com.gemstone.gemfire.cache.hdfs.internal.HDFSStoreCreation;
import com.gemstone.gemfire.cache.partition.PartitionListener;
import com.gemstone.gemfire.cache.query.IndexType;
import com.gemstone.gemfire.cache.query.internal.index.IndexCreationData;
import com.gemstone.gemfire.cache.server.CacheServer;
import com.gemstone.gemfire.cache.server.ClientSubscriptionConfig;
import com.gemstone.gemfire.cache.server.ServerLoadProbe;
import com.gemstone.gemfire.cache.util.BridgeWriter;
import com.gemstone.gemfire.cache.util.Gateway;
import com.gemstone.gemfire.cache.util.GatewayConflictResolver;
import com.gemstone.gemfire.cache.util.GatewayEventListener;
import com.gemstone.gemfire.cache.util.GatewayHub;
import com.gemstone.gemfire.cache.util.GatewayQueueAttributes;
import com.gemstone.gemfire.cache.util.ObjectSizer;
import com.gemstone.gemfire.cache.wan.GatewayEventFilter;
import com.gemstone.gemfire.cache.wan.GatewayReceiver;
import com.gemstone.gemfire.cache.wan.GatewayReceiverFactory;
import com.gemstone.gemfire.cache.wan.GatewaySender;
import com.gemstone.gemfire.cache.wan.GatewaySenderFactory;
import com.gemstone.gemfire.cache.wan.GatewayTransportFilter;
import com.gemstone.gemfire.compression.Compressor;
import com.gemstone.gemfire.i18n.LogWriterI18n;
import com.gemstone.gemfire.internal.Assert;
import com.gemstone.gemfire.internal.InternalDataSerializer;
import com.gemstone.gemfire.internal.cache.DiskStoreAttributes;
import com.gemstone.gemfire.internal.cache.DiskWriteAttributesImpl;
import com.gemstone.gemfire.internal.cache.EvictionAttributesImpl;
import com.gemstone.gemfire.internal.cache.FixedPartitionAttributesImpl;
import com.gemstone.gemfire.internal.cache.GemFireCacheImpl;
import com.gemstone.gemfire.internal.cache.PartitionAttributesImpl;
import com.gemstone.gemfire.internal.cache.PartitionedRegionHelper;
import com.gemstone.gemfire.internal.cache.lru.LRUCapacityController;
import com.gemstone.gemfire.internal.cache.lru.MemLRUCapacityController;
import com.gemstone.gemfire.internal.datasource.ConfigProperty;
import com.gemstone.gemfire.internal.i18n.LocalizedStrings;
import com.gemstone.gemfire.internal.jndi.JNDIInvoker;
import com.gemstone.gemfire.pdx.PdxSerializer;

/**
 * Parses an XML file and creates a {@link Cache}/{@link ClientCache} and {@link Region}s from it.
 * It works in two phases. The first phase parses the XML and instantiates
 * {@link Declarable}s. If any problems occur, a {@link CacheXmlException} is
 * thrown. The second phase actually {@linkplain CacheCreation#create creates}
 * the {@link Cache}/{@link ClientCache},{@link Region}s, etc.
 *
 * @author David Whitlock
 *
 * @since 3.0
 */
@SuppressWarnings("deprecation")
public class CacheXmlParser extends CacheXml implements ContentHandler {

  LogWriterI18n logWriter;
  /** The cache to be created */
  private CacheCreation cache;
  /** The stack of intermediate values used while parsing */
  private final Stack stack = new Stack();

  ////////////////////// Static Methods //////////////////////
  /**
   * Parses XML data and from it creates an instance of
   * CacheXmlParser that can be used to {@link #create}the
   * {@link Cache}, etc.
   *
   * @param logwriter a LogWriterI18n currently meant for testing
   *          purposes
   *
   * @param is the InputStream of XML to be parsed
   *
   * @return a CacheXmlParser, typically used to create a cache
   *         from the parsed XML
   *
   * @throws CacheXmlException Something went wrong while parsing the XML
   *
   * @since 4.0
   *
   */
  public static CacheXmlParser parse(InputStream is, LogWriterI18n logwriter) {
    CacheXmlParser handler = new CacheXmlParser();
    handler.setLogWriter(logwriter);
    try {
      SAXParserFactory factory = SAXParserFactory.newInstance();
      factory.setValidating(true);
      SAXParser parser = factory.newSAXParser();
// The following taken from http://xerces.apache.org/xerces2-j/faq-pcfp.html
//      parser.setProperty(
//          "http://java.sun.com/xml/jaxp/properties/schemaLanguage",
//          "http://www.w3.org/2001/XMLSchema");
      parser.parse(is, new DefaultHandlerDelegate(handler));
      return handler;
    }
    catch (Exception ex) {
      if (ex instanceof CacheXmlException) {
        while (true /*ex instanceof CacheXmlException*/) {
          Throwable cause = ex.getCause();
          if (!(cause instanceof CacheXmlException)) {
            break;
          }
          else {
            ex = (CacheXmlException) cause;
          }
        }
        throw (CacheXmlException) ex;
      }
      else if (ex instanceof SAXException) {
        // Silly JDK 1.4.2 XML parser wraps RunTime exceptions in a
        // SAXException. Pshaw!
        SAXException sax = (SAXException) ex;
        Exception cause = sax.getException();
        if (cause instanceof CacheXmlException) {
          while (true /*cause instanceof CacheXmlException*/) {
            Throwable cause2 = cause.getCause();
            if (!(cause2 instanceof CacheXmlException)) {
              break;
            }
            else {
              cause = (CacheXmlException) cause2;
            }
          }
          throw (CacheXmlException) cause;
        }
      }
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_WHILE_PARSING_XML.toLocalizedString(), ex);
    }
  }

//  /**
//   * Parse XML data in the InputStream
//   *
//   * using {@link #parse(InputStream, LogWriterI18n)}.
//   */
//  public static CacheXmlParser parse(InputStream is, LogWriterI18n logwriter) {
//    return parse(new InputSource(is), logwriter);
//  }

  /**
   * Parse XML data in the InputStream
   *
   * using {@link #parse(InputStream, LogWriterI18n)}.
   */
  public static CacheXmlParser parse(InputStream is) {
    return parse(is, null);
  }

  /**
   * Helper method for parsing an integer
   *
   * @throws CacheXmlException If s is a malformed integer
   */
  private static int parseInt(String s) {
    try {
      return Integer.parseInt(s);
    }
    catch (NumberFormatException ex) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_MALFORMED_INTEGER_0.toLocalizedString(s), ex);
    }
  }

  /**
   * Helper method for parsing a long
   *
   * @throws CacheXmlException If s is a malformed long
   */
  private static long parseLong(String s) {
    try {
      return Long.parseLong(s);
    }
    catch (NumberFormatException ex) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_MALFORMED_INTEGER_0.toLocalizedString(s), ex);
    }
  }

  /**
   * Helper method for parsing a boolean
   *
   */
  private static boolean parseBoolean(String s) {
    return Boolean.valueOf(s).booleanValue();
  }

  /**
   * Helper method for parsing an float
   *
   * @throws CacheXmlException If s is a malformed float
   */
  private static float parseFloat(String s) {
    try {
      return Float.parseFloat(s);
    }
    catch (NumberFormatException ex) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_MALFORMED_FLOAT_0.toLocalizedString(s), ex);
    }
  }

  ////////////////////// Instance Methods //////////////////////
  /**
   * Returns the {@link CacheCreation} generated by this parser.
   */
  public CacheCreation getCacheCreation() {
    return this.cache;
  }

  /**
   * Creates cache artifacts ({@link Cache}s, etc.) based upon the XML parsed
   * by this parser.
   *
   * @throws TimeoutException
   * @throws CacheWriterException
   * @throws RegionExistsException
   */
  public void create(GemFireCacheImpl cache) throws TimeoutException,
                                         GatewayException,
                                         CacheWriterException,
                                         RegionExistsException
  {
    if (this.cache == null) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_NO_CACHE_ELEMENT_SPECIFIED.toLocalizedString());
    }
    this.cache.create(cache);
  }

  /**
   * When a cache element is first encountered, we create a
   * {@link CacheCreation}and fill it in appropriately
   */
  private void startCache(Attributes atts) {
    if (this.cache != null) {
      throw new CacheXmlException("Only a single cache or client-cache element is allowed");
    }
    this.cache = new CacheCreation(true);
    String lockLease = atts.getValue(LOCK_LEASE);
    if (lockLease != null) {
      this.cache.setLockLease(parseInt(lockLease));
    }
    String lockTimeout = atts.getValue(LOCK_TIMEOUT);
    if (lockTimeout != null) {
      this.cache.setLockTimeout(parseInt(lockTimeout));
    }
    String searchTimeout = atts.getValue(SEARCH_TIMEOUT);
    if (searchTimeout != null) {
      this.cache.setSearchTimeout(parseInt(searchTimeout));
    }
    String messageSyncInterval = atts.getValue(MESSAGE_SYNC_INTERVAL);
    if (messageSyncInterval != null) {
      this.cache.setMessageSyncInterval(parseInt(messageSyncInterval));
    }
    String isServer = atts.getValue(IS_SERVER);
    if (isServer != null) {
      boolean b = Boolean.valueOf(isServer).booleanValue();
      this.cache.setIsServer(b);
    }
    String copyOnRead = atts.getValue(COPY_ON_READ);
    if (copyOnRead != null) {
      this.cache.setCopyOnRead(Boolean.valueOf(copyOnRead).booleanValue());
    }
    stack.push(this.cache);
  }

  /**
   * When a client-cache element is first encountered, we create a
   * {@link ClientCacheCreation}and fill it in appropriately
   */
  private void startClientCache(Attributes atts) {
    if (this.cache != null) {
      throw new CacheXmlException("Only a single cache or client-cache element is allowed");
    }
    this.cache = new ClientCacheCreation(true);
    String copyOnRead = atts.getValue(COPY_ON_READ);
    if (copyOnRead != null) {
      this.cache.setCopyOnRead(Boolean.valueOf(copyOnRead).booleanValue());
    }
    stack.push(this.cache);
  }
  /**
   * @since 5.7
   */
  private void startPool(Attributes atts) {
    PoolFactory f = this.cache.createPoolFactory();
    String name = atts.getValue(NAME).trim();
    stack.push(name);
    stack.push(f);
    String v;
    v = atts.getValue(FREE_CONNECTION_TIMEOUT);
    if (v != null) {
      f.setFreeConnectionTimeout(parseInt(v));
    }
    v = atts.getValue(LOAD_CONDITIONING_INTERVAL);
    if (v != null) {
      f.setLoadConditioningInterval(parseInt(v));
    }
    v = atts.getValue(MIN_CONNECTIONS);
    if (v != null) {
      f.setMinConnections(parseInt(v));
    }
    v = atts.getValue(MAX_CONNECTIONS);
    if (v != null) {
      f.setMaxConnections(parseInt(v));
    }
    v = atts.getValue(RETRY_ATTEMPTS);
    if (v != null) {
      f.setRetryAttempts(parseInt(v));
    }
    v = atts.getValue(IDLE_TIMEOUT);
    if (v != null) {
      f.setIdleTimeout(parseLong(v));
    }
    v = atts.getValue(PING_INTERVAL);
    if (v != null) {
      f.setPingInterval(parseLong(v));
    }
    v = atts.getValue(SUBSCRIPTION_ENABLED);
    if (v != null) {
      f.setSubscriptionEnabled(parseBoolean(v));
    }
    v = atts.getValue(PR_SINGLE_HOP_ENABLED);
    if (v != null) {
      f.setPRSingleHopEnabled(parseBoolean(v));
    }
    v = atts.getValue(SUBSCRIPTION_MESSAGE_TRACKING_TIMEOUT);
    if (v != null) {
      f.setSubscriptionMessageTrackingTimeout(parseInt(v));
    }
    v = atts.getValue(SUBSCRIPTION_ACK_INTERVAL);
    if (v != null) {
      f.setSubscriptionAckInterval(parseInt(v));
    }
    v = atts.getValue(SUBSCRIPTION_REDUNDANCY);
    if (v != null) {
      f.setSubscriptionRedundancy(parseInt(v));
    }
    v = atts.getValue(READ_TIMEOUT);
    if (v != null) {
      f.setReadTimeout(parseInt(v));
    }
    v = atts.getValue(SERVER_GROUP);
    if (v != null) {
      f.setServerGroup(v.trim());
    }
    v = atts.getValue(SOCKET_BUFFER_SIZE);
    if (v != null) {
      f.setSocketBufferSize(parseInt(v));
    }
    v = atts.getValue(STATISTIC_INTERVAL);
    if (v != null) {
      f.setStatisticInterval(parseInt(v));
    }
    v = atts.getValue(THREAD_LOCAL_CONNECTIONS);
    if (v != null) {
      f.setThreadLocalConnections(parseBoolean(v));
    }
    v = atts.getValue(MULTIUSER_SECURE_MODE_ENABLED);
    if(v != null) {
      f.setMultiuserAuthentication(parseBoolean(v));
    }
  }
  /**
   * @since 5.7
   */
  private void endPool() {
    PoolFactory f = (PoolFactory)stack.pop();
    String name = (String)stack.pop();
    f.create(name);
  }
  /**
   * @since 5.7
   */
  private void doLocator(Attributes atts) {
    PoolFactory f = (PoolFactory)stack.peek();
    String host = atts.getValue(HOST).trim();
    int port = parseInt(atts.getValue(PORT));
    f.addLocator(host, port);
  }
  /**
   * @since 5.7
   */
  private void doServer(Attributes atts) {
    PoolFactory f = (PoolFactory)stack.peek();
    String host = atts.getValue(HOST).trim();
    int port = parseInt(atts.getValue(PORT));
    f.addServer(host, port);
  }

  /**
   * When a cache-server element is first encountered, we create
   * a new {@link CacheCreation#addCacheServer() CacheServer} in the cache.
   *
   * @since 4.0
   */
  private void startCacheServer(Attributes atts) {
    CacheServer bridge = this.cache.addCacheServer();
    String port = atts.getValue(PORT);
    if (port != null) {
      bridge.setPort(parseInt(port));
    }
    String bindAddress = atts.getValue(BIND_ADDRESS);
    if (bindAddress != null) {
      bridge.setBindAddress(bindAddress.trim());
    }
    String hostnameForClients = atts.getValue(HOSTNAME_FOR_CLIENTS);
    if (hostnameForClients != null) {
      bridge.setHostnameForClients(hostnameForClients.trim());
    }
    String maxConnections= atts.getValue(MAX_CONNECTIONS);
    if (maxConnections != null) {
      bridge.setMaxConnections(parseInt(maxConnections));
    }
    String maxThreads= atts.getValue(MAX_THREADS);
    if (maxThreads != null) {
      bridge.setMaxThreads(parseInt(maxThreads));
    }
    String notifyBySubscription = atts.getValue(NOTIFY_BY_SUBSCRIPTION);
    if (notifyBySubscription != null) {
      boolean b = Boolean.valueOf(notifyBySubscription).booleanValue();
      bridge.setNotifyBySubscription(b);
    }
    String socketBufferSize = atts.getValue(SOCKET_BUFFER_SIZE);
    if (socketBufferSize != null) {
      bridge.setSocketBufferSize(Integer.parseInt(socketBufferSize));
    }
    String tcpDelay = atts.getValue(TCP_NO_DELAY);
    if (tcpDelay != null) {
      bridge.setTcpNoDelay(Boolean.valueOf(tcpDelay).booleanValue());
    }
    String maximumTimeBetweenPings = atts.getValue(MAXIMUM_TIME_BETWEEN_PINGS);
    if (maximumTimeBetweenPings != null) {
      bridge.setMaximumTimeBetweenPings(Integer.parseInt(maximumTimeBetweenPings));
    }
    String maximumMessageCount = atts.getValue(MAXIMUM_MESSAGE_COUNT);
    if (maximumMessageCount != null) {
      bridge.setMaximumMessageCount(Integer.parseInt(maximumMessageCount));
    }
    String messageTimeToLive = atts.getValue(MESSAGE_TIME_TO_LIVE);
    if (messageTimeToLive != null) {
      bridge.setMessageTimeToLive(Integer.parseInt(messageTimeToLive));
    }
    String loadPollInterval = atts.getValue(LOAD_POLL_INTERVAL);
    if (loadPollInterval != null) {
      bridge.setLoadPollInterval(Long.parseLong(loadPollInterval));
    }
    this.stack.push(bridge);
  }

  /**
   * When a gateway-hub element is first encountered,
   * create a new GatewayHub in the cache.
   *
   * @since 4.2
   */
  private void startGatewayHub(Attributes atts) {
    String id = atts.getValue(ID);
    String portStr = atts.getValue(PORT);
    int port = GatewayHub.DEFAULT_PORT;
    if (portStr != null) {
      port = Integer.parseInt(portStr);
    }
    GatewayHub hub = this.cache.addGatewayHub(id, port);
    String bindAddress = atts.getValue(BIND_ADDRESS);
    if (bindAddress != null) {
      hub.setBindAddress(bindAddress.trim());
    }
    String startupPolicy = atts.getValue(STARTUP_POLICY);
    if (startupPolicy == null) {
    } else if (startupPolicy.equals(GatewayHub.STARTUP_POLICY_NONE)) {
      hub.setStartupPolicy(GatewayHub.STARTUP_POLICY_NONE);
    } else if (startupPolicy.equals(GatewayHub.STARTUP_POLICY_PRIMARY)) {
      hub.setStartupPolicy(GatewayHub.STARTUP_POLICY_PRIMARY);
    } else if (startupPolicy.equals(GatewayHub.STARTUP_POLICY_SECONDARY)) {
      hub.setStartupPolicy(GatewayHub.STARTUP_POLICY_SECONDARY);
    } else {
      throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_GATEWAY_HUB_POLICY_0.toLocalizedString(startupPolicy));
    }
    String socketBufferSize = atts.getValue(SOCKET_BUFFER_SIZE);
    if (socketBufferSize != null) {
      hub.setSocketBufferSize(Integer.parseInt(socketBufferSize));
    }
    String maximumTimeBetweenPings = atts.getValue(MAXIMUM_TIME_BETWEEN_PINGS);
    if (maximumTimeBetweenPings != null) {
      hub.setMaximumTimeBetweenPings(Integer.parseInt(maximumTimeBetweenPings));
    }
    String manualStart = atts.getValue(MANUAL_START);
    if (manualStart != null) {
      hub.setManualStart(Boolean.valueOf(manualStart).booleanValue());
    }

    stack.push(hub);
  }

  private void startGatewaySender(Attributes atts) {
    GatewaySenderFactory gatewaySenderFactory = this.cache.createGatewaySenderFactory();
    
    String parallel = atts.getValue(PARALLEL);
    if(parallel == null){
      gatewaySenderFactory.setParallel(GatewaySender.DEFAULT_IS_PARALLEL);
    }else{
      gatewaySenderFactory.setParallel(Boolean.parseBoolean(parallel));  
    }
    
    //manual-start 
    String manualStart = atts.getValue(MANUAL_START);
    if(manualStart == null){
      gatewaySenderFactory.setManualStart(GatewaySender.DEFAULT_MANUAL_START);
    }else{
      gatewaySenderFactory.setManualStart(Boolean.parseBoolean(manualStart));  
    }
    
   //socket-buffer-size
    String socketBufferSize = atts.getValue(SOCKET_BUFFER_SIZE);
    if(socketBufferSize == null){
      gatewaySenderFactory.setSocketBufferSize(GatewaySender.DEFAULT_SOCKET_BUFFER_SIZE);
    }else{
      gatewaySenderFactory.setSocketBufferSize(Integer.parseInt(socketBufferSize)); 
    }
    
    //socket-read-timeout
    String socketReadTimeout = atts.getValue(SOCKET_READ_TIMEOUT);
    if(socketReadTimeout == null){
      gatewaySenderFactory.setSocketReadTimeout(GatewaySender.DEFAULT_SOCKET_READ_TIMEOUT);
    }else{
      gatewaySenderFactory.setSocketReadTimeout(Integer.parseInt(socketReadTimeout)); 
    }
    
    //batch-conflation
    String batchConflation = atts.getValue(ENABLE_BATCH_CONFLATION);
    if(batchConflation == null){
      gatewaySenderFactory.setBatchConflationEnabled(GatewaySender.DEFAULT_BATCH_CONFLATION);
    }else{
      gatewaySenderFactory.setBatchConflationEnabled(Boolean.parseBoolean(batchConflation)); 
    }
    
    //batch-size
    String batchSize = atts.getValue(BATCH_SIZE);
    if(batchSize == null){
      gatewaySenderFactory.setBatchSize(GatewaySender.DEFAULT_BATCH_SIZE);
    }else{
      gatewaySenderFactory.setBatchSize(Integer.parseInt(batchSize));
    }
    
    //batch-time-interval
    String batchTimeInterval = atts.getValue(BATCH_TIME_INTERVAL);
    if(batchTimeInterval == null){
      gatewaySenderFactory.setBatchTimeInterval(GatewaySender.DEFAULT_BATCH_TIME_INTERVAL);
    }else{
      gatewaySenderFactory.setBatchTimeInterval(Integer.parseInt(batchTimeInterval));
    }
    
    //enable-persistence
    String enablePersistence = atts.getValue(ENABLE_PERSISTENCE);
    if(enablePersistence == null){
      gatewaySenderFactory.setPersistenceEnabled(GatewaySender.DEFAULT_PERSISTENCE_ENABLED);
    }else{
      gatewaySenderFactory.setPersistenceEnabled(Boolean.parseBoolean(enablePersistence));
    }
    
    String diskStoreName = atts.getValue(DISK_STORE_NAME);
    if(diskStoreName == null){
      gatewaySenderFactory.setDiskStoreName(null);
    }else{
      gatewaySenderFactory.setDiskStoreName(diskStoreName);
    }
    
    String diskSynchronous = atts.getValue(DISK_SYNCHRONOUS);
    if (diskSynchronous == null) {
      gatewaySenderFactory.setDiskSynchronous(GatewaySender.DEFAULT_DISK_SYNCHRONOUS);
    } else {
      gatewaySenderFactory.setDiskSynchronous(Boolean.parseBoolean(diskSynchronous));
    }
    
    //maximum-queue-memory
    String maxQueueMemory = atts.getValue(MAXIMUM_QUEUE_MEMORY);
    if(maxQueueMemory == null){
      gatewaySenderFactory.setMaximumQueueMemory(GatewaySender.DEFAULT_MAXIMUM_QUEUE_MEMORY);
    }else{
      gatewaySenderFactory.setMaximumQueueMemory(Integer.parseInt(maxQueueMemory));
    }
    
    
    String alertThreshold = atts.getValue(ALERT_THRESHOLD);
    if(alertThreshold == null){
      gatewaySenderFactory.setAlertThreshold(GatewaySender.DEFAULT_ALERT_THRESHOLD);
    }else{
      gatewaySenderFactory.setAlertThreshold(Integer.parseInt(alertThreshold));
    }
    
    String dispatcherThreads = atts.getValue(DISPATCHER_THREADS);
    if (dispatcherThreads == null) {
      gatewaySenderFactory
          .setDispatcherThreads(GatewaySender.DEFAULT_DISPATCHER_THREADS);
    } else {
      gatewaySenderFactory.setDispatcherThreads(Integer
          .parseInt(dispatcherThreads));
    }
    
    String id = atts.getValue(ID);
    String orderPolicy = atts.getValue(ORDER_POLICY);
    if (orderPolicy != null) {
      try {
        gatewaySenderFactory.setOrderPolicy(Gateway.OrderPolicy.valueOf(orderPolicy.toUpperCase()));
      } catch (IllegalArgumentException e) {
        throw new InternalGemFireException(
            LocalizedStrings.SerialGatewaySender_UNKNOWN_GATEWAY_ORDER_POLICY_0_1
                .toLocalizedString(new Object[] { id, orderPolicy }));
      }
    }
    
    String remoteDS = atts.getValue(REMOTE_DISTRIBUTED_SYSTEM_ID);
    stack.push(id);
    stack.push(remoteDS);
    stack.push(gatewaySenderFactory);
//    GatewaySender sender = gatewaySenderFactory.create(id, Integer.parseInt(remoteDS));
//    stack.push(sender);
  }

  private void startGatewayReceiver(Attributes atts) {
    GatewayReceiverFactory receiverFactory = this.cache.createGatewayReceiverFactory();
    
    //port 
    String startPort = atts.getValue(START_PORT);
    if(startPort == null){
      receiverFactory.setStartPort(GatewayReceiver.DEFAULT_START_PORT);
    }else{
      receiverFactory.setStartPort(Integer.parseInt(startPort));  
    }
    
    String endPort = atts.getValue(END_PORT);
    if(endPort == null){
      receiverFactory.setEndPort(GatewayReceiver.DEFAULT_END_PORT);
    }else{
      receiverFactory.setEndPort(Integer.parseInt(endPort));  
    }
    
    String bindAddress = atts.getValue(BIND_ADDRESS);
    if(bindAddress == null){
      receiverFactory.setBindAddress(GatewayReceiver.DEFAULT_BIND_ADDRESS);
    }else{
      receiverFactory.setBindAddress(bindAddress);  
    }
    
    //maximum-time-between-pings  
    String maxTimeBetweenPings = atts.getValue(MAXIMUM_TIME_BETWEEN_PINGS);
    if(maxTimeBetweenPings == null){
      receiverFactory.setMaximumTimeBetweenPings(GatewayReceiver.DEFAULT_MAXIMUM_TIME_BETWEEN_PINGS);
    }else{
      receiverFactory.setMaximumTimeBetweenPings(Integer.parseInt(maxTimeBetweenPings));  
    }
    
    //socket-buffer-size   
    String socketBufferSize = atts.getValue(SOCKET_BUFFER_SIZE);
    if(socketBufferSize == null){
      receiverFactory.setSocketBufferSize(GatewayReceiver.DEFAULT_SOCKET_BUFFER_SIZE);
    }else{
      receiverFactory.setSocketBufferSize(Integer.parseInt(socketBufferSize));  
    }
    
    // hostname-for-senders
    String hostnameForSenders = atts.getValue(HOSTNAME_FOR_SENDERS);
    if (hostnameForSenders == null) {
      receiverFactory
          .setHostnameForSenders(GatewayReceiver.DEFAULT_HOSTNAME_FOR_SENDERS);
    } else {
      receiverFactory.setHostnameForSenders(hostnameForSenders);
    } 

    stack.push(receiverFactory);
  }

  /**
   * When a gateway element is first encountered, create
   * a new Gateway in the cache.
   *
   * @since 4.2
   */
  private void startGateway(Attributes atts) {
    GatewayHub hub = (GatewayHub) stack.peek();
    String id = atts.getValue(ID);
    String concurrencyLevel = atts.getValue(CONCURRENCY_LEVEL);
    int concurrencyLevelInt = Gateway.DEFAULT_CONCURRENCY_LEVEL;
    if (concurrencyLevel != null) {
      concurrencyLevelInt = Integer.parseInt(concurrencyLevel);
    }
    Gateway gateway = null;
    try {
      gateway = hub.addGateway(id, concurrencyLevelInt);
    } catch (GatewayException e) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_COULD_NOT_CREATE_GATEWAY_WITH_ID_0.toLocalizedString(id), e);
    }
    String earlyAck = atts.getValue(EARLY_ACK);
    if (earlyAck != null) {
      gateway.setEarlyAck(Boolean.valueOf(earlyAck).booleanValue());
    }
    String socketBufferSize = atts.getValue(SOCKET_BUFFER_SIZE);
    if (socketBufferSize != null) {
      gateway.setSocketBufferSize(Integer.parseInt(socketBufferSize));
    }
    String socketReadTimeout = atts.getValue(SOCKET_READ_TIMEOUT);
    if (socketReadTimeout != null) {
      gateway.setSocketReadTimeout(Integer.parseInt(socketReadTimeout));
    }
    String orderPolicy = atts.getValue(ORDER_POLICY);
    if (orderPolicy != null) {
      try {
        gateway.setOrderPolicy(Gateway.OrderPolicy.valueOf(orderPolicy.toUpperCase()));
      } catch (IllegalArgumentException e) {
        throw new InternalGemFireException(
            LocalizedStrings.CacheXmlParser_UNKNOWN_GATEWAY_ORDER_POLICY_0_1
                .toLocalizedString(new Object[] { id, orderPolicy }));
      }
    }
    stack.push(gateway);
  }

  /**
   * When a gateway-endpoint element is first encountered,
   * create a new endpoint for the current Gateway.
   *
   * @since 4.2
   */
  private void startGatewayEndpoint(Attributes atts) {
    Gateway gateway = (Gateway) stack.peek();
    String id = atts.getValue(ID);
    String host = atts.getValue(HOST);
    int port = Integer.parseInt(atts.getValue(PORT));
    try {
      gateway.addEndpoint(id, host, port);
    } catch (GatewayException e) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_COULD_NOT_ADD_ENDPOINT_WITH_ID_0.toLocalizedString(id), e);
    }
  }

  /**
   * When a gateway-queue element is first encountered,
   * create a new GatewayQueueAttributes for the current
   * Gateway.
   *
   * @since 4.2
   */
  private void startGatewayQueue(Attributes atts) {

    Gateway gateway = (Gateway) stack.peek();
    GatewayQueueAttributes queueAttributes = gateway.getQueueAttributes();

    // Initialize disk store for overflow
    String diskStoreName = atts.getValue(DISK_STORE_NAME);
    if (diskStoreName != null) {
      queueAttributes.setDiskStoreName(diskStoreName);
    } else {
      // Initialize overflow directory
      String overflowDirectory = atts.getValue(OVERFLOW_DIRECTORY);
      if (overflowDirectory != null) {
        queueAttributes.setOverflowDirectory(overflowDirectory);
      }
      //Determine whether Oplog Compaction is enabled
      String enableOplogRolling = atts.getValue(ROLL_OPLOG);
      String enableOplogCompaction = atts.getValue(AUTO_COMPACT);
      if (enableOplogRolling != null) {
        queueAttributes.setRollOplogs(Boolean.valueOf(enableOplogRolling).booleanValue());
      } else if (enableOplogCompaction != null) {
        queueAttributes.setRollOplogs(Boolean.valueOf(enableOplogCompaction).booleanValue());
      }
    }

    // Initialize maximum amount of memory in queue
    String maximumQueueMemoryStr = atts.getValue(MAXIMUM_QUEUE_MEMORY);
    if (maximumQueueMemoryStr != null) {
      queueAttributes.setMaximumQueueMemory(parseInt(maximumQueueMemoryStr));
    }

    // Initialize batch size
    String batchSizeStr = atts.getValue(BATCH_SIZE);
    if (batchSizeStr != null) {
      queueAttributes.setBatchSize(parseInt(batchSizeStr));
    }

    // Determine batch time interval
    String batchTimeIntervalStr = atts.getValue(BATCH_TIME_INTERVAL);
    if (batchTimeIntervalStr != null) {
      queueAttributes.setBatchTimeInterval(parseInt(batchTimeIntervalStr));
    }

    // Determine whether batch conflation is enabled
    String batchConflationStr = atts.getValue(BATCH_CONFLATION);
    if (batchConflationStr != null) {
      queueAttributes.setBatchConflation(Boolean.valueOf(batchConflationStr).booleanValue());
    }

    // Determine whether persistence is enabled
    String enablePersistenceStr = atts.getValue(ENABLE_PERSISTENCE);
    if (enablePersistenceStr != null) {
      queueAttributes.setEnablePersistence(Boolean.valueOf(enablePersistenceStr).booleanValue());
    }

    // Initialize alert threshold
    String alertThresholdStr = atts.getValue(ALERT_THRESHOLD);
    if (alertThresholdStr != null) {
      queueAttributes.setAlertThreshold(parseInt(alertThresholdStr));
    }

  }

  /**
   * set attributes from clientHaQueue when we finish a cache server
   */
  private void endCacheServer() {
    List groups = new ArrayList();
    ServerLoadProbe probe = null;
    ClientHaQueueCreation haCreation = null;
    
    if(stack.peek() instanceof ServerLoadProbe) {
      probe = (ServerLoadProbe) stack.pop();
    }
    
    
    
    if(stack.peek() instanceof ClientHaQueueCreation) {
      haCreation = (ClientHaQueueCreation)stack.pop();
    }
    
    while (stack.peek() instanceof String) {
      groups.add(stack.pop());
    }
    CacheServer bs = (CacheServer)stack.pop();
    if (groups.size() > 0) {
      Collections.reverse(groups);
      String[] groupArray = new String[groups.size()];
      groups.toArray(groupArray);
      bs.setGroups(groupArray);
    }
    if(probe != null) {
      bs.setLoadProbe(probe);
    }
    if (haCreation != null) {
      ClientSubscriptionConfig csc = bs.getClientSubscriptionConfig();
      String diskStoreName = haCreation.getDiskStoreName();
      if (diskStoreName!=null) {
        csc.setDiskStoreName(diskStoreName);
      } else {
        csc.setOverflowDirectory(haCreation.getOverflowDirectory()==null?ClientSubscriptionConfig.DEFAULT_OVERFLOW_DIRECTORY:haCreation.getOverflowDirectory());
      }
      csc.setCapacity(haCreation.getCapacity());
      csc.setEvictionPolicy(haCreation.getEvictionPolicy());
    }
  }
  
  /**
   * When a load-probe element is encountered,
   * create a new probe for the current CacheServer.
   *
   * @since 5.7
   */
  private void endLoadProbe() {
    Declarable d = createDeclarable();
    if (!(d instanceof ServerLoadProbe)) {
      throw new CacheXmlException(
        LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_1
          .toLocalizedString(new Object[] { d.getClass().getName(), 
                                            "BridgeLoadProbe"}));
    }
    stack.push(d);
  }

  /**
   * Pop the GatewayHub off the stack and set
   * it in the Cache.
   *
   * @since 4.2
   */
  private void endGatewayHub() {
    stack.pop();
  }

  private void endSerialGatewaySender() {
    GatewaySenderFactory senderFactory = (GatewaySenderFactory)stack.pop();
    String remoteDSString = (String)stack.pop();
    String id = (String)stack.pop();
    senderFactory.create(id, Integer.parseInt(remoteDSString));
  }

  private void endGatewayReceiver() {
    GatewayReceiverFactory receiverFactory = (GatewayReceiverFactory)stack.pop();
    receiverFactory.create();
  }

  private void startDynamicRegionFactory(Attributes atts) {
    // push attributes onto the stack for processing in endDynamicRegionFactory
    String disablePersist = atts.getValue(DISABLE_PERSIST_BACKUP);
    if (disablePersist == null) {
      stack.push("false");
    }
    else {
      stack.push(disablePersist);
    }
    String disableRegisterInterest = atts.getValue(DISABLE_REGISTER_INTEREST);
    if (disableRegisterInterest == null) {
      stack.push("false");
    }
    else {
      stack.push(disableRegisterInterest);
    }
    String poolName = atts.getValue(POOL_NAME);
    if (poolName == null) {
      stack.push(null);
    }
    else {
      stack.push(poolName);
    }

    // hi-jack RegionAttributesCreation for the disk-dirs, loader, writer and compressor
    RegionAttributesCreation attrs = new RegionAttributesCreation(this.cache);
    stack.push(attrs);
  }


  private void endDynamicRegionFactory() {
    File dir = null;
    RegionAttributesCreation attrs;
    {
      Object o = stack.pop();
      if (o instanceof File) {
        dir = (File)o;
        stack.pop(); //dir size to be popped out. being used by persistent directories
        attrs = (RegionAttributesCreation) stack.pop();
      } else {
        attrs = (RegionAttributesCreation) o;
      }
    }
    String poolName = (String)stack.pop();
    String disableRegisterInterest = (String)stack.pop();
    String disablePersistBackup = (String)stack.pop();
    CacheWriter cw = attrs.getCacheWriter();
    if(poolName !=null && cw != null) {
      throw new CacheXmlException("You cannot specify both a poolName and a cacheWriter for a dynamic-region-factory.");
    }
    if (cw != null && !(cw instanceof BridgeWriter)) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_THE_DYNAMICREGIONFACTORY_CACHEWRITER_MUST_BE_AN_INSTANCE_OF_BRIDGEWRITER.toLocalizedString());
    }
    DynamicRegionFactory.Config cfg;
    if(poolName != null) {
      cfg =
        new DynamicRegionFactory.Config(dir, poolName,
            !Boolean.valueOf(disablePersistBackup).booleanValue(),
            !Boolean.valueOf(disableRegisterInterest).booleanValue());
    } else {
      cfg =
        new DynamicRegionFactory.Config(dir, (BridgeWriter)cw,
          !Boolean.valueOf(disablePersistBackup).booleanValue(),
          !Boolean.valueOf(disableRegisterInterest).booleanValue());
    }
    CacheCreation cache = (CacheCreation)stack.peek();
    cache.setDynamicRegionFactoryConfig(cfg);
  }

  /**
   * Pop the Gateway off the stack and add
   * it to the GatewayHub's known Gateways.
   *
   * @since 4.2
   */
  private void endGateway() {
    stack.pop();
  }

  /**
   * When a gateway-listener element is encountered,
   * create a new listener for the current Gateway.
   *
   * @since 5.1
   */
  private void endGatewayListener() {
    Declarable d = createDeclarable();
    if (!(d instanceof GatewayEventListener)) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_GATEWAYEVENTLISTENER.toLocalizedString(d.getClass().getName()));
    }
    Gateway gateway = (Gateway) stack.peek();
    gateway.addListener((GatewayEventListener) d);
  }

  /**
   * When a gateway-conflict-resolver element is encountered,
   * create a new listener for the Cache.
   * @since 7.0
   */
  private void endGatewayConflictResolver() {
    Declarable d = createDeclarable();
    if (!(d instanceof GatewayConflictResolver)) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_GATEWAYCONFLICTRESOLVER.toLocalizedString(d.getClass().getName()));
    }
    CacheCreation c = (CacheCreation)stack.peek();
    c.setGatewayConflictResolver((GatewayConflictResolver)d);
//    this.cache.setGatewayConflictResolver((GatewayConflictResolver)d);
  }

  /**
   * When a region element is first encountered, we create a
   * {@link RegionCreation}and push it on the stack.
   */
  private void startRegion(Attributes atts) {
    String name = atts.getValue(NAME);
    String refid = atts.getValue(REFID);
    Assert.assertTrue(name != null);
    RegionCreation region = new RegionCreation(this.cache, name, refid);
    stack.push(region);
  }

  /**
   * When a cache-transaction-manager element is found, create a
   * container for the potential transaction-listener and push it
   * on the stack
   */
  private void startCacheTransactionManager() {
    stack.push(new CacheTransactionManagerCreation());
  }

  /**
   * After popping the current RegionCreation off the stack, if
   * the element on top of the stack is a RegionCreation, then
   * it is the parent region.
   */
  private void endRegion()
      throws RegionExistsException {
    RegionCreation region = (RegionCreation) stack.pop();
    boolean isRoot = false;
    if (stack.isEmpty()) {
      isRoot = true;
    } else if (!(stack.peek() instanceof RegionCreation)) {
      isRoot = true;
    }
    if (isRoot) {
      this.cache.addRootRegion(region);
    }
    else {
      RegionCreation parent = (RegionCreation) stack.peek();
      parent.addSubregion(region.getName(), region);
    }
  }

  /**
   * Add the transaction-manager creation code to the cache
   * creation code
   */
  private void endCacheTransactionManager() {
    CacheTransactionManagerCreation txMgrCreation = (CacheTransactionManagerCreation) stack
        .pop();
    this.cache.addCacheTransactionManagerCreation(txMgrCreation);
  }

  /**
   * Create a transaction-listener using the declarable interface
   * and set the transaction manager with the newly instantiated listener.
   */
  private void endTransactionListener() {
    Declarable d = createDeclarable();
    if (!(d instanceof TransactionListener)) {
      throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_CACHELISTENER.toLocalizedString(d.getClass().getName()));
    }
    CacheTransactionManagerCreation txMgrCreation = (CacheTransactionManagerCreation) stack
        .peek();
    txMgrCreation.addListener((TransactionListener) d);
  }

  /**
   * When a disk-store element is first encountered, we
   * create a {@link DiskStoreAttributes}, populate it accordingly, and
   * push it on the stack.
   */
  private void startDiskStore(Attributes atts) {
    // this is the only place to create DSAC objects
    DiskStoreAttributesCreation attrs = new DiskStoreAttributesCreation();
    String name = atts.getValue(NAME);
    if (name == null) {
      throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_NULL_DiskStoreName.toLocalizedString());
    }
    else {
      attrs.setName(name);
    }

    String autoCompact = atts.getValue(AUTO_COMPACT);
    if (autoCompact != null) {
      attrs.setAutoCompact(Boolean.valueOf(autoCompact).booleanValue());
    }
    
    String compactionThreshold = atts.getValue(COMPACTION_THRESHOLD);
    if (compactionThreshold != null) {
      attrs.setCompactionThreshold(parseInt(compactionThreshold));
    }
    
    String allowForceCompaction = atts.getValue(ALLOW_FORCE_COMPACTION);
    if (allowForceCompaction != null) {
      attrs.setAllowForceCompaction(Boolean.valueOf(allowForceCompaction).booleanValue());
    }
    
    String maxOplogSize = atts.getValue(MAX_OPLOG_SIZE);
    if (maxOplogSize != null) {
      attrs.setMaxOplogSize(parseInt(maxOplogSize));
    }

    String timeInterval = atts.getValue(TIME_INTERVAL);
    if (timeInterval != null) {
      attrs.setTimeInterval(parseInt(timeInterval));
    }
    
    String writeBufferSize = atts.getValue(WRITE_BUFFER_SIZE);
    if (writeBufferSize != null) {
      attrs.setWriteBufferSize(parseInt(writeBufferSize));
    }
    
    String queueSize = atts.getValue(QUEUE_SIZE);
    if (queueSize != null) {
      attrs.setQueueSize(parseInt(queueSize));
    }
    
    stack.push(attrs);
  }
  
  /**
   * When a hdfs-store element is first encountered, we
   * create a {@link HDFSStoreCreation}, populate it accordingly, and
   * push it on the stack.
   * 
   * {@code
   * 
   * ...
   * 
   * }
   * 
   */
  private void startHDFSStore(Attributes atts) {
    // this is the only place to create DSAC objects
    HDFSStoreCreation attrs = new HDFSStoreCreation();
    String name = atts.getValue(NAME);
    if (name == null) {
      throw new InternalGemFireException(
          LocalizedStrings.CacheXmlParser_NULL_DiskStoreName.toLocalizedString());
    } else {
      attrs.setName(name);
    }

    String namenode = atts.getValue(HDFS_NAMENODE_URL);
    if (namenode == null) {
      throw new InternalGemFireException(
          LocalizedStrings.CacheXmlParser_NULL_DiskStoreName.toLocalizedString());
    } else {
      attrs.setNameNodeURL(namenode);
    }

    String clientConfig = atts.getValue(HDFS_CLIENT_CONFIG_FILE);
    if (clientConfig != null) {
      attrs.setHDFSClientConfigFile(clientConfig);
    }
    
    String folderPath = atts.getValue(HDFS_HOME_DIR);
    if (folderPath != null) {
      attrs.setHomeDir(folderPath);
    }
    
    
    String batchCacheSize = atts.getValue(HDFS_BLOCK_CACHE_SIZE);
    if (batchCacheSize != null) {
      try {
        attrs.setBlockCacheSize(Float.valueOf(batchCacheSize));
      } catch (NumberFormatException e) {
        throw new CacheXmlException(
            LocalizedStrings.DistributedSystemConfigImpl_0_IS_NOT_A_VALID_INTEGER_1
            .toLocalizedString(new Object[] { batchCacheSize, HDFS_BLOCK_CACHE_SIZE }),
            e);
      }
    }
    
    String maxFileSize = atts.getValue(HDFS_MAX_FILE_SIZE);
    if (maxFileSize != null) {
      attrs.setMaxFileSize(parseInt(maxFileSize));
    }
    
    String fileRolloverInterval = atts.getValue(HDFS_TIME_FOR_FILE_ROLLOVER);
    if (fileRolloverInterval != null) {
      attrs.setFileRolloverInterval(parseInt(fileRolloverInterval));
    }
    stack.push(name);
    stack.push(attrs);
  }
  
  /**
   * After popping the current HDFSStoreCreation off the
   * stack, we add it to the HDFSStoreCreation that should be on the
   * top of the stack.
   */
  private void endHDFSStore() {
    HDFSStoreCreation hsc = (HDFSStoreCreation) stack.pop();
    String name = (String) stack.pop();
    CacheCreation cache;
    Object top = stack.peek();
    if (top instanceof CacheCreation) {
      cache = (CacheCreation) top;
    }
    else {
      String s = "Did not expect a " + top.getClass().getName()
          + " on top of the stack.";
      Assert.assertTrue(false, s);
      cache = null; // Dead code
    }
    if (name != null) {
      cache.addHDFSStore(name, hsc);
    }
  }
	
  /**
   * When a hdfs-compaction element is first encountered, we
   * create a {@link HDFSCompactionConfig}, populate it accordingly
   * 
   * {@code
   * 
   * 
   * 
   * }
   * 
*/ private void startHDFSCompaction(Attributes atts) { /* * This following code assumes that the xml is well guarded by schema * validation and the compaction element was a child of hdfs-store element. * Hence the elements on the stack top will be hdfs-store-config instance */ HDFSStoreCreation storeConfig = (HDFSStoreCreation) stack.peek(); String strategy = atts.getValue(HDFS_COMPACTION_STRATEGY); HDFSCompactionConfigFactory config = storeConfig.createCompactionConfigFactory(strategy); // configure compaction disable switches if (atts.getValue(HDFS_COMPACTION_ENABLE_COMPACTION) != null) { config.setAutoCompaction(Boolean.valueOf(atts.getValue(HDFS_COMPACTION_ENABLE_COMPACTION))); } if (atts.getValue(HDFS_COMPACTION_ENABLE_MAJOR_COMPACTION) != null) { config.setAutoMajorCompaction(Boolean.valueOf(atts .getValue(HDFS_COMPACTION_ENABLE_MAJOR_COMPACTION))); } // configure max input file size Integer value = getIntValue(atts, HDFS_COMPACTION_MAX_INPUT_FILE_SIZE_MB); if (value != null ) { config.setMaxInputFileSizeMB(value); } // configure min input file count value = getIntValue(atts, HDFS_COMPACTION_MIN_INPUT_FILE_COUNT); if (value != null) { config.setMinInputFileCount(value); } // configure max iteration size advice value = getIntValue(atts, HDFS_COMPACTION_MAX_INPUT_FILE_COUNT); if (value != null) { config.setMaxInputFileCount(value); } // configure compaction concurrency value = getIntValue(atts, HDFS_COMPACTION_MAX_CONCURRENCY); if (value != null) config.setMaxThreads(value); value = getIntValue(atts, HDFS_COMPACTION_MAJOR_COMPACTION_CONCURRENCY); if (value != null) { config.setMajorCompactionMaxThreads(value); } // configure major compaction interval value = getIntValue(atts, HDFS_COMPACTION_MAJOR_COMPACTION_INTERVAL_MINS); if (value != null) { config.setMajorCompactionIntervalMins(value); } // configure major compaction interval value = getIntValue(atts, HDFS_COMPACTION_OLD_FILES_CLEANUP_INTERVAL_MINS); if (value != null) { config.setOldFilesCleanupIntervalMins(value); } storeConfig.setHDFSCompactionConfig(config.getConfigView()); } private Integer getIntValue(Attributes atts, String param) { String maxInputFileSizeMB = atts.getValue(param); if (maxInputFileSizeMB != null) { try { return Integer.valueOf(maxInputFileSizeMB); } catch (NumberFormatException e) { throw new CacheXmlException( LocalizedStrings.DistributedSystemConfigImpl_0_IS_NOT_A_VALID_INTEGER_1 .toLocalizedString(new Object[] { maxInputFileSizeMB, param }), e); } } return null; } private void startHDFSEventQueue(Attributes atts) { HDFSEventQueueAttributesFactory eventFactory = new HDFSEventQueueAttributesFactory(); //batch-size String batchSize = atts.getValue(HDFS_QUEUE_BATCH_SIZE); if(batchSize != null){ eventFactory.setBatchSizeMB(Integer.parseInt(batchSize)); } //batch-size String batchInterval = atts.getValue(BATCH_TIME_INTERVAL); if(batchInterval != null){ eventFactory.setBatchTimeInterval(Integer.parseInt(batchInterval)); } //maximum-queue-memory String maxQueueMemory = atts.getValue(MAXIMUM_QUEUE_MEMORY); if(maxQueueMemory != null){ eventFactory.setMaximumQueueMemory(Integer.parseInt(maxQueueMemory)); } //persistent String persistent = atts.getValue(PERSISTENT); if(persistent != null){ eventFactory.setPersistent(Boolean.parseBoolean(persistent)); } String diskStoreName = atts.getValue(DISK_STORE_NAME); if(diskStoreName != null){ eventFactory.setDiskStoreName(diskStoreName); } String diskSynchronous = atts.getValue(DISK_SYNCHRONOUS); if(diskSynchronous != null){ eventFactory.setDiskSynchronous(Boolean.parseBoolean(diskSynchronous)); } HDFSEventQueueAttributes eventAttribs = eventFactory.create(); stack.push(eventAttribs); } private void endHDFSEventQueue() { HDFSEventQueueAttributes eventAttribs = (HDFSEventQueueAttributes) stack.pop(); Object storeCreation = stack.peek(); if (!(storeCreation instanceof HDFSStoreCreation)) //TODO:HDFS throw a proper error string throw new CacheXmlException("Store attributes should be a child of store"); HDFSStoreCreation store = (HDFSStoreCreation)storeCreation; // put back the popped element store.setHDFSEventQueueAttributes(eventAttribs); } /** * Create a transaction-writer using the declarable interface * and set the transaction manager with the newly instantiated writer. */ private void endTransactionWriter() { Declarable d = createDeclarable(); if (!(d instanceof TransactionWriter)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_TRANSACTION_WRITER.toLocalizedString(d.getClass().getName())); } CacheTransactionManagerCreation txMgrCreation = (CacheTransactionManagerCreation) stack .peek(); txMgrCreation.setWriter((TransactionWriter) d); } /** * When a region-attributes element is first encountered, we * create a {@link RegionAttributesCreation}, populate it accordingly, and * push it on the stack. */ private void startRegionAttributes(Attributes atts) { RegionAttributesCreation attrs = new RegionAttributesCreation(this.cache); String scope = atts.getValue(SCOPE); if (scope == null) { } else if (scope.equals(LOCAL)) { attrs.setScope(Scope.LOCAL); } else if (scope.equals(DISTRIBUTED_NO_ACK)) { attrs.setScope(Scope.DISTRIBUTED_NO_ACK); } else if (scope.equals(DISTRIBUTED_ACK)) { attrs.setScope(Scope.DISTRIBUTED_ACK); } else if (scope.equals(GLOBAL)) { attrs.setScope(Scope.GLOBAL); } else { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_SCOPE_0.toLocalizedString(scope)); } String mirror = atts.getValue(MIRROR_TYPE); if (mirror == null) { } else if (mirror.equals(NONE)) { attrs.setMirrorType(MirrorType.NONE); } else if (mirror.equals(KEYS)) { attrs.setMirrorType(MirrorType.KEYS); } else if (mirror.equals(KEYS_VALUES)) { attrs.setMirrorType(MirrorType.KEYS_VALUES); } else { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_MIRROR_TYPE_0.toLocalizedString(mirror)); } { String dp = atts.getValue(DATA_POLICY); if (dp == null) { } else if (dp.equals(NORMAL_DP)) { attrs.setDataPolicy(DataPolicy.NORMAL); } else if (dp.equals(PRELOADED_DP)) { attrs.setDataPolicy(DataPolicy.PRELOADED); } else if (dp.equals(EMPTY_DP)) { attrs.setDataPolicy(DataPolicy.EMPTY); } else if (dp.equals(REPLICATE_DP)) { attrs.setDataPolicy(DataPolicy.REPLICATE); } else if (dp.equals(PERSISTENT_REPLICATE_DP)) { attrs.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE); } else if (dp.equals(PARTITION_DP)) { attrs.setDataPolicy(DataPolicy.PARTITION); } else if (dp.equals(PERSISTENT_PARTITION_DP)) { attrs.setDataPolicy(DataPolicy.PERSISTENT_PARTITION); } else if (dp.equals(HDFS_PARTITION_DP)) { attrs.setDataPolicy(DataPolicy.HDFS_PARTITION); } else if (dp.equals(HDFS_PERSISTENT_PARTITION_DP)) { attrs.setDataPolicy(DataPolicy.HDFS_PERSISTENT_PARTITION); } else { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_DATA_POLICY_0.toLocalizedString(dp)); } } String initialCapacity = atts.getValue(INITIAL_CAPACITY); if (initialCapacity != null) { attrs.setInitialCapacity(parseInt(initialCapacity)); } String concurrencyLevel = atts.getValue(CONCURRENCY_LEVEL); if (concurrencyLevel != null) { attrs.setConcurrencyLevel(parseInt(concurrencyLevel)); } String concurrencyChecksEnabled = atts.getValue(CONCURRENCY_CHECKS_ENABLED); if (concurrencyChecksEnabled != null) { attrs.setConcurrencyChecksEnabled(Boolean.valueOf(concurrencyChecksEnabled).booleanValue()); } String loadFactor = atts.getValue(LOAD_FACTOR); if (loadFactor != null) { attrs.setLoadFactor(parseFloat(loadFactor)); } String statisticsEnabled = atts.getValue(STATISTICS_ENABLED); if (statisticsEnabled != null) { attrs.setStatisticsEnabled(Boolean.valueOf(statisticsEnabled) .booleanValue()); } String ignoreJTA = atts.getValue(IGNORE_JTA); if (ignoreJTA != null) { attrs.setIgnoreJTA(Boolean.valueOf(ignoreJTA).booleanValue()); } String isLockGrantor = atts.getValue(IS_LOCK_GRANTOR); if (isLockGrantor != null) { attrs.setLockGrantor(Boolean.valueOf(isLockGrantor).booleanValue()); } String persistBackup = atts.getValue(PERSIST_BACKUP); if (persistBackup != null) { attrs.setPersistBackup(Boolean.valueOf(persistBackup).booleanValue()); } String earlyAck = atts.getValue(EARLY_ACK); if (earlyAck != null) { attrs.setEarlyAck(Boolean.valueOf(earlyAck).booleanValue()); } String mcastEnabled = atts.getValue(MULTICAST_ENABLED); if (mcastEnabled != null) { attrs.setMulticastEnabled(Boolean.valueOf(mcastEnabled).booleanValue()); } String indexUpdateType = atts.getValue(INDEX_UPDATE_TYPE); attrs.setIndexMaintenanceSynchronous(indexUpdateType == null || indexUpdateType.equals(INDEX_UPDATE_TYPE_SYNCH)); String poolName = atts.getValue(POOL_NAME); if (poolName != null) { attrs.setPoolName(poolName); } String diskStoreName = atts.getValue(DISK_STORE_NAME); if (diskStoreName != null) { attrs.setDiskStoreName(diskStoreName); } String isDiskSynchronous = atts.getValue(DISK_SYNCHRONOUS); if (isDiskSynchronous != null) { attrs.setDiskSynchronous(Boolean.valueOf(isDiskSynchronous).booleanValue()); } String id = atts.getValue(ID); if (id != null) { attrs.setId(id); } String refid = atts.getValue(REFID); if (refid != null) { attrs.setRefid(refid); } String enableGateway = atts.getValue(ENABLE_GATEWAY); if (enableGateway != null) { attrs.setEnableGateway(Boolean.valueOf(enableGateway).booleanValue()); } else { // 4.2 compatibility enableGateway = atts.getValue(ENABLE_WAN); if (enableGateway != null) { attrs.setEnableGateway(Boolean.valueOf(enableGateway).booleanValue()); } } String enableSubscriptionConflation = atts.getValue(ENABLE_SUBSCRIPTION_CONFLATION); if (enableSubscriptionConflation != null) { attrs.setEnableSubscriptionConflation(Boolean.valueOf(enableSubscriptionConflation).booleanValue()); } String enableBridgeConflation = atts.getValue(ENABLE_BRIDGE_CONFLATION); // as of 5.7 enable-bridge-conflation is deprecated. // so ignore it if enable-subscription-conflation is set if (enableBridgeConflation != null && enableSubscriptionConflation == null) { attrs.setEnableSubscriptionConflation(Boolean.valueOf(enableBridgeConflation).booleanValue()); } if (enableBridgeConflation == null && enableSubscriptionConflation == null) { // 4.1 compatibility enableBridgeConflation = atts.getValue("enable-conflation"); if (enableBridgeConflation != null) { attrs.setEnableSubscriptionConflation(Boolean.valueOf(enableBridgeConflation).booleanValue()); } } /* deprecated in prPersistSprint1 String publisherStr = atts.getValue(PUBLISHER); if (publisherStr != null) { attrs.setPublisher(Boolean.valueOf(publisherStr).booleanValue()); } */ String enableAsyncConflation = atts.getValue(ENABLE_ASYNC_CONFLATION); if (enableAsyncConflation != null) { attrs.setEnableAsyncConflation(Boolean.valueOf(enableAsyncConflation).booleanValue()); } String gatewayHubId = atts.getValue(GATEWAY_HUB_ID); if (gatewayHubId != null) { attrs.setGatewayHubId(gatewayHubId); } String cloningEnabledStr = atts.getValue(CLONING_ENABLED); if (cloningEnabledStr != null) { attrs.setCloningEnable(Boolean.valueOf(cloningEnabledStr).booleanValue()); } String gatewaySenderIds = atts.getValue(GATEWAY_SENDER_IDS); if(gatewaySenderIds != null && (gatewaySenderIds.length() != 0)){ StringTokenizer st = new StringTokenizer(gatewaySenderIds, ","); while(st.hasMoreElements()){ attrs.addGatewaySenderId(st.nextToken()); } } String asyncEventQueueIds = atts.getValue(ASYNC_EVENT_QUEUE_IDS); if(asyncEventQueueIds != null && (asyncEventQueueIds.length() != 0)){ StringTokenizer st = new StringTokenizer(asyncEventQueueIds, ","); while(st.hasMoreElements()){ attrs.addAsyncEventQueueId(st.nextToken()); } } String hdfsStoreName = atts.getValue(HDFS_STORE_NAME); if (hdfsStoreName != null) { attrs.setHDFSStoreName(hdfsStoreName); } String hdfsWriteOnly= atts.getValue(HDFS_WRITE_ONLY); if (hdfsWriteOnly != null) { attrs.setHDFSWriteOnly(Boolean.valueOf(hdfsWriteOnly).booleanValue()); } String enableOffHeapMemoryStr = atts.getValue(ENABLE_OFF_HEAP_MEMORY); if(enableOffHeapMemoryStr != null) { attrs.setEnableOffHeapMemory(Boolean.valueOf(enableOffHeapMemoryStr).booleanValue()); } stack.push(attrs); } /** * After popping the current DiskStoreAttributesCreation off the * stack, we add it to the DiskStoreAttionCreation that should be on the * top of the stack. */ private void endDiskStore() { DiskStoreAttributesCreation dsac = (DiskStoreAttributesCreation) stack.pop(); CacheCreation cache; Object top = stack.peek(); if (top instanceof CacheCreation) { cache = (CacheCreation) top; } else { String s = "Did not expected a " + top.getClass().getName() + " on top of the stack."; Assert.assertTrue(false, s); cache = null; // Dead code } String name = dsac.getName(); if (name != null) { cache.setDiskStore(name, dsac); } } /** * After popping the current RegionAttributesCreation off the * stack, we add it to the RegionCreation that should be on the * top of the stack. */ private void endRegionAttributes() { RegionAttributesCreation attrs = (RegionAttributesCreation) stack.pop(); CacheCreation cache; Object top = stack.peek(); if (top instanceof RegionCreation) { RegionCreation region = (RegionCreation) top; region.setAttributes(attrs); cache = (CacheCreation) region.getCache(); } else if (top instanceof CacheCreation) { cache = (CacheCreation) top; } else { String s = "Did not expected a " + top.getClass().getName() + " on top of the stack."; Assert.assertTrue(false, s); cache = null; // Dead code } String id = attrs.getId(); if (id != null) { cache.setRegionAttributes(id, attrs); } } /** * When a cache element is finished */ private void endCache() { } /** * When a client-cache element is finished */ private void endClientCache() { } /** *

* When the end of a string element is encountered, convert the * data to a String */ // This converts the StringBuffer to a // String because a StringBuffer is // solely used (as a marker) by the characters method // and by doing this conversion we allow for multiple consecutive string // elements, otherwise characters would continue to // append and our stack order would be out of whack. See bug 32122. private void endString() { StringBuffer str = (StringBuffer) stack.pop(); stack.push(str.toString()/* .trim() */); } /** * finish parsing a "group" element which is just a string * @since 5.7 */ private void endGroup() { StringBuffer str = (StringBuffer) stack.pop(); stack.push(str.toString().trim()); } private void endClassName() { StringBuffer str = (StringBuffer) stack.pop(); stack.push(str.toString().trim()); // trim fixes bug 32928 } /** * When an entry element is finished, the value * should be on the stop of the stack followed by the key. The * RegionCreation for the region being created should be below * that. */ private void endEntry() { Object value = stack.pop(); Object key = stack.pop(); RegionCreation region = (RegionCreation) stack.peek(); //changed by mitul after modifying code for Region implements Map region.put(key, value); } /** * When a key-constraint element is finished, the name of the * class should be on top of the stack. * * @throws CacheXmlException If the key constraint class cannot be loaded */ private void endKeyConstraint() { String className = ((StringBuffer) stack.pop()).toString().trim(); Class c; try { c = InternalDataSerializer.getCachedClass(className); } catch (Exception ex) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_COULD_NOT_LOAD_KEYCONSTRAINT_CLASS_0.toLocalizedString(className), ex); } // The region attributes should be on top of the stack RegionAttributesCreation attrs = peekRegionAttributesContext("key-constraint"); attrs.setKeyConstraint(c); } /** * When a value-constraint element is finished, the name of the * class should be on top of the stack. * * @throws CacheXmlException If the value constraint class cannot be loaded */ private void endValueConstraint() { String className = ((StringBuffer) stack.pop()).toString().trim(); Class c; try { c = InternalDataSerializer.getCachedClass(className); } catch (Exception ex) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_COULD_NOT_LOAD_VALUECONSTRAINT_CLASS_0.toLocalizedString(className), ex); } // The region attributes should be on top of the stack RegionAttributesCreation attrs = peekRegionAttributesContext("value-constraint"); attrs.setValueConstraint(c); } /** * When a region-time-to-live element is finished, the * {@link ExpirationAttributes} are on top of the stack followed by the * {@link RegionAttributesCreation} to which the expiration attributes are * assigned. */ private void endRegionTimeToLive() { ExpirationAttributes expire = (ExpirationAttributes) stack.pop(); RegionAttributesCreation attrs = peekRegionAttributesContext("region-time-to-live"); attrs.setRegionTimeToLive(expire); } /** * When a region-idle-time element is finished, the * {@link ExpirationAttributes} are on top of the stack followed by the * {@link RegionAttributesCreation} to which the expiration attributes are * assigned. */ private void endRegionIdleTime() { ExpirationAttributes expire = (ExpirationAttributes) stack.pop(); RegionAttributesCreation attrs = peekRegionAttributesContext("region-idle-time"); attrs.setRegionIdleTimeout(expire); } private RegionAttributesCreation peekRegionAttributesContext(String dependentElement) { Object a = stack.peek(); if (!(a instanceof RegionAttributesCreation)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_REGIONATTRIBUTES.toLocalizedString(dependentElement)); } return (RegionAttributesCreation) a; } private PartitionAttributesImpl peekPartitionAttributesImpl(String dependentElement) { Object a = stack.peek(); if (!(a instanceof PartitionAttributesImpl)) { throw new CacheXmlException( LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_PARTITIONATTRIBUTES .toLocalizedString(dependentElement)); } return (PartitionAttributesImpl) a; } /** * When a entry-time-to-live element is finished, an optional * Declarable (the custom-expiry) is followed by the * {@link ExpirationAttributes} are on top of the stack followed by either the * {@link RegionAttributesCreation} to which the expiration attributes are * assigned, or the attributes for a {@link PartitionAttributes} to which the attributes are * assigned. */ private void endEntryTimeToLive() { Declarable custom = null; if (stack.peek() instanceof Declarable) { custom = (Declarable)stack.pop(); } ExpirationAttributes expire = (ExpirationAttributes) stack.pop(); Object a = stack.peek(); // if (a instanceof PartitionAttributesFactory) { // ((PartitionAttributesFactory) a).setEntryTimeToLive(expire); // } else if (a instanceof RegionAttributesCreation) { ((RegionAttributesCreation)a).setEntryTimeToLive(expire); if (custom != null) { ((RegionAttributesCreation)a).setCustomEntryTimeToLive((CustomExpiry)custom); } } else { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_REGIONATTRIBUTES_OR_PARTITIONATTRIBUTES.toLocalizedString(ENTRY_TIME_TO_LIVE)); } } /** * When a entry-idle-time element is finished, an optional * Declarable (the custom-expiry) is followed by the * {@link ExpirationAttributes} are on top of the stack followed by the * {@link RegionAttributesCreation} to which the expiration attributes are * assigned. */ private void endEntryIdleTime() { Declarable custom = null; if (stack.peek() instanceof Declarable) { custom = (Declarable)stack.pop(); } ExpirationAttributes expire = (ExpirationAttributes) stack.pop(); Object a = stack.peek(); // if (a instanceof PartitionAttributesFactory) { // ((PartitionAttributesFactory) a).setEntryIdleTimeout(expire); // } else if (a instanceof RegionAttributesCreation) { ((RegionAttributesCreation)a).setEntryIdleTimeout(expire); if (custom != null) { ((RegionAttributesCreation)a).setCustomEntryIdleTimeout((CustomExpiry)custom); } } else { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_REGIONATTRIBUTES_OR_PARTITIONATTRIBUTES.toLocalizedString(ENTRY_IDLE_TIME)); } } /** * When a partition-attributes element is finished, the * {@link PartitionAttributes} are on top of the stack followed by the * {@link RegionAttributesCreation} to which the partition attributes are * assigned. */ private void endPartitionAttributes() { PartitionAttributesImpl paf = (PartitionAttributesImpl) stack.pop(); paf.validateAttributes(); RegionAttributesCreation rattrs = peekRegionAttributesContext(PARTITION_ATTRIBUTES); // change the 5.0 default data policy (EMPTY) to the current default if (rattrs.hasDataPolicy() && rattrs.getDataPolicy() == DataPolicy.EMPTY && (this.version.compareTo(VERSION_5_0) == 0)) { rattrs.setDataPolicy(PartitionedRegionHelper.DEFAULT_DATA_POLICY); } rattrs.setPartitionAttributes(paf); } /** * When a fixed-partition-attributes element is finished */ private void endFixedPartitionAttributes() { } /** * When a membership-attributes element is finished, the * arguments for constructing the MembershipAttributes are on the stack. */ private void endMembershipAttributes() { Set roles = new HashSet(); Object obj = null; while (!(obj instanceof Object[])) { obj = stack.pop(); if (obj instanceof String) { // found a required-role name roles.add(obj); } } Object[] attrs = (Object[]) obj; String laName = ((String) attrs[0]).toUpperCase().replace('-', '_'); String raName = ((String) attrs[1]).toUpperCase().replace('-', '_'); LossAction laction = LossAction.fromName(laName); ResumptionAction raction = ResumptionAction.fromName(raName); MembershipAttributes ra = new MembershipAttributes( (String[]) roles.toArray(new String[roles.size()]), laction, raction); RegionAttributesCreation rattrs = (RegionAttributesCreation) stack.peek(); rattrs.setMembershipAttributes(ra); } /** * When a required-role element is finished, */ private void endRequiredRole() { // do nothing... wait for endMembershipAttributes() } /** * When a disk-write-attributes element is finished, the * {@link DiskWriteAttributes} is on top of the stack followed by the * {@link RegionAttributesCreation} to which the expiration attributes are * assigned. */ private void endDiskWriteAttributes() { DiskWriteAttributes dwa = (DiskWriteAttributes) stack.pop(); RegionAttributesCreation attrs = peekRegionAttributesContext(DISK_WRITE_ATTRIBUTES); attrs.setDiskWriteAttributes(dwa); } /** * When a disk-dir element is finished, the name of the * directory is on top of the stack. Create a new {@link File}and push it on * the stack. */ private void endDiskDir() { StringBuffer dirName = (StringBuffer) stack.pop(); File dir = new File(dirName.toString().trim()); if(!dir.exists()){ } stack.push(dir); } /** * When a disk-dirs element is finished, the directory * {@link File}s are on the stack followed by the {@link * RegionAttributesCreation} to which the expiration attributes are assigned. */ private void endDiskDirs() { List dirs = new ArrayList(); List sizes = new ArrayList(); while (stack.peek() instanceof File) { dirs.add(stack.pop()); sizes.add(stack.pop()); } Assert.assertTrue(!dirs.isEmpty()); Assert.assertTrue(!sizes.isEmpty()); //should set the disk-dirs and sizes in reverse order since parsers would have reversed //the order because of pushing into stack File[] disks = new File[dirs.size()]; int dirsLength = dirs.size(); for(int i=0; isynchronous-writes element is encounter, we push a * {@link DiskWriteAttributes} for doing synchronous writes on the stack. */ private void startSynchronousWrites() { int maxOplogSize = ((Integer)stack.pop()).intValue(); String rollOplog = (String)stack.pop(); //convery megabytes to bytes for DiskWriteAttributes creation long maxOplogSizeInBytes = maxOplogSize; maxOplogSizeInBytes = maxOplogSizeInBytes * 1024 * 1024; Properties props = new Properties(); props.setProperty(MAX_OPLOG_SIZE,String.valueOf(maxOplogSizeInBytes)); props.setProperty(ROLL_OPLOG, rollOplog); props.setProperty(DiskWriteAttributesImpl.SYNCHRONOUS_PROPERTY, "true"); stack.push(new DiskWriteAttributesImpl(props)); } /** * When a asynchronous-writes element is encounter, we push a * {@link DiskWriteAttributes} for doing asynchronous writes on the stack. */ private void startAsynchronousWrites(Attributes atts) { int maxOplogSize = ((Integer)stack.pop()).intValue(); String rollOplog = (String)stack.pop(); // convery megabytes to bytes for DiskWriteAttributes creation long maxOplogSizeInBytes = maxOplogSize; maxOplogSizeInBytes = maxOplogSizeInBytes * 1024 * 1024; long timeInterval = parseLong(atts.getValue(TIME_INTERVAL)); long bytesThreshold = parseLong(atts.getValue(BYTES_THRESHOLD)); Properties props = new Properties(); props.setProperty(MAX_OPLOG_SIZE,String.valueOf(maxOplogSizeInBytes)); props.setProperty(ROLL_OPLOG,rollOplog); props.setProperty(TIME_INTERVAL,String.valueOf(timeInterval)); props.setProperty(DiskWriteAttributesImpl.SYNCHRONOUS_PROPERTY,"false"); props.setProperty(BYTES_THRESHOLD,String.valueOf(bytesThreshold)); stack.push(new DiskWriteAttributesImpl(props)); } /** * When a parition-attributes element is encountered, we push a * ParitionAttributes?? for configuring paritioned storage on the * stack. */ private void startPartitionAttributes(Attributes atts) { PartitionAttributesImpl paf = new PartitionAttributesImpl(); String redundancy = atts.getValue(PARTITION_REDUNDANT_COPIES); if (redundancy != null) { paf.setRedundantCopies(parseInt(redundancy)); } String localMaxMem = atts.getValue(LOCAL_MAX_MEMORY); if (localMaxMem != null) { paf.setLocalMaxMemory(parseInt(localMaxMem)); } String totalMaxMem = atts.getValue(TOTAL_MAX_MEMORY); if (totalMaxMem != null) { paf.setTotalMaxMemory(parseLong(totalMaxMem)); } String totalNumBuckets = atts.getValue(TOTAL_NUM_BUCKETS); if (totalNumBuckets != null) { paf.setTotalNumBuckets(parseInt(totalNumBuckets)); } String colocatedWith = atts.getValue(PARTITION_COLOCATED_WITH); if (colocatedWith != null) { paf.setColocatedWith(colocatedWith); } String recoveryDelay = atts.getValue(RECOVERY_DELAY); if (recoveryDelay != null) { paf.setRecoveryDelay(parseInt(recoveryDelay)); } String startupRecoveryDelay = atts.getValue(STARTUP_RECOVERY_DELAY); if (startupRecoveryDelay != null) { paf.setStartupRecoveryDelay(parseInt(startupRecoveryDelay)); } stack.push(paf); } /** * When a fixed-partition-attributes element is encountered, we * create an instance of FixedPartitionAttributesImpl and add it to the * PartitionAttributesImpl stack. */ private void startFixedPartitionAttributes(Attributes atts) { FixedPartitionAttributesImpl fpai = new FixedPartitionAttributesImpl(); String partitionName = atts.getValue(PARTITION_NAME); if (partitionName != null) { fpai.setPartitionName(partitionName); } String isPrimary = atts.getValue(IS_PRIMARY); if (isPrimary != null) { fpai.isPrimary(parseBoolean(isPrimary)); } String numBuckets = atts.getValue(NUM_BUCKETS); if (numBuckets != null) { fpai.setNumBuckets(parseInt(numBuckets)); } Object a = stack.peek(); if (a instanceof PartitionAttributesImpl) { ((PartitionAttributesImpl)a).addFixedPartitionAttributes(fpai); } } /** * When a membership-attributes element is encountered, we push * an array of attributes for creation of a MembershipAttributes. */ private void startMembershipAttributes(Attributes atts) { Object[] attrs = new Object[2]; // loss-action, resumption-action attrs[0] = atts.getValue(LOSS_ACTION) == null ? LossAction.NO_ACCESS.toString() : atts.getValue(LOSS_ACTION); attrs[1] = atts.getValue(RESUMPTION_ACTION) == null ? ResumptionAction.REINITIALIZE.toString() : atts.getValue(RESUMPTION_ACTION); stack.push(attrs); } /** * When a subscription-attributes element is first encountered, * we create an SubscriptionAttibutes?? object from the element's * attributes and stick it in the current region attributes. */ private void startSubscriptionAttributes(Attributes atts) { String ip = atts.getValue(INTEREST_POLICY); SubscriptionAttributes sa; if (ip == null) { sa = new SubscriptionAttributes(); } else if (ip.equals(ALL)) { sa = new SubscriptionAttributes(InterestPolicy.ALL); } else if (ip.equals(CACHE_CONTENT)) { sa = new SubscriptionAttributes(InterestPolicy.CACHE_CONTENT); } else { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_INTERESTPOLICY_0.toLocalizedString(ip)); } RegionAttributesCreation rattrs = (RegionAttributesCreation) stack.peek(); rattrs.setSubscriptionAttributes(sa); } /** * When a required-role element is encountered, we push a string * for creation of MembershipAttributes. */ private void startRequiredRole(Attributes atts) { stack.push(atts.getValue(NAME)); } /** * When a index element is encounter, we create the * IndexCreationData object from the Stack. Set the required parameters in the * IndexCreationData object & push it on stack. * */ private void startIndex(Attributes atts) { boolean isPrimary = false; String type = ""; IndexCreationData icd = new IndexCreationData(atts.getValue(NAME)); int len = atts.getLength(); if (len > 1) { if (Boolean.valueOf(atts.getValue(KEY_INDEX))) { icd.setIndexType(IndexType.PRIMARY_KEY); isPrimary = true; } type = atts.getValue(INDEX_TYPE); } if (len > 2) { String fromClause = atts.getValue(FROM_CLAUSE); String expression = atts.getValue(EXPRESSION); String importStr = atts.getValue(IMPORTS); if (isPrimary) { icd.setIndexData(IndexType.PRIMARY_KEY, null, expression, null); } else { if (type == null) { type = RANGE_INDEX_TYPE; } if (type.equals(HASH_INDEX_TYPE)) { icd.setIndexData(IndexType.HASH, fromClause, expression, importStr); } else if (type.equals(RANGE_INDEX_TYPE)){ icd.setIndexData(IndexType.FUNCTIONAL, fromClause, expression, importStr); } else { if (getLogWriter() != null) { getLogWriter().info(LocalizedStrings.CacheXmlParser_UNKNOWN_INDEX_TYPE, type); } icd.setIndexData(IndexType.FUNCTIONAL, fromClause, expression, importStr); } } } this.stack.push(icd); } /** * When index element is ending we need to verify all attributes because of * new index tag definition since 6.6.1 and support previous definition also. * * if functional element was not there then we need to validate * expression and fromClause as not null. */ private void endIndex() { boolean throwExcep = false; IndexCreationData icd = (IndexCreationData) this.stack.pop(); if (icd.getIndexType() == null) { throwExcep = true; } else { if (icd.getIndexType().equals(IndexType.PRIMARY_KEY)) { if (icd.getIndexExpression() == null) { throwExcep = true; } } else { if (icd.getIndexExpression() == null && icd.getIndexFromClause() == null) { throwExcep = true; } } } if (!throwExcep) { RegionCreation rc = (RegionCreation) this.stack.peek(); rc.addIndexData(icd); } else { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_CACHEXMLPARSERENDINDEXINDEX_CREATION_ATTRIBUTE_NOT_CORRECTLY_SPECIFIED.toLocalizedString()); } } /** * When a functional element is encounter, we pop the * IndexCreationData object from the Stack. Set the required parameters in the * IndexCreationData object & set it in RegionCreation object. * */ private void startFunctionalIndex(Attributes atts) { boolean throwExcep = false; IndexCreationData icd = (IndexCreationData) this.stack.peek(); //icd.setIndexType(FUNCTIONAL); int len = -1; if ((len = atts.getLength()) > 1) { String fromClause = atts.getValue(FROM_CLAUSE); String expression = atts.getValue(EXPRESSION); String importStr = null; if (len == 3) importStr = atts.getValue(IMPORTS); if (fromClause == null || expression == null) { throwExcep = true; } else { icd.setIndexData(IndexType.FUNCTIONAL, fromClause, expression, importStr); } } else { throwExcep = true; } if (throwExcep) { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_CACHEXMLPARSERSTARTFUNCTIONALINDEXINDEX_CREATION_ATTRIBUTE_NOT_CORRECTLY_SPECIFIED.toLocalizedString()); } } /** * When a primary-key element is encounter, we pop the * IndexCreationData object from the Stack. Set the required parameters in the * IndexCreationData object & set it in RegionCreation object. * */ private void startPrimaryKeyIndex(Attributes atts) { IndexCreationData icd = (IndexCreationData) this.stack.peek(); //icd.setIndexType(PRIMARY_KEY); boolean throwExcep = false; if (atts.getLength() == 1) { String field = atts.getValue(FIELD); if (field == null) { throwExcep = true; } else { icd.setIndexData(IndexType.PRIMARY_KEY, null, field, null); } } else { throwExcep = true; } if (throwExcep) { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_CACHEXMLPARSERSTARTPRIMARYKEYINDEXPRIMARYKEY_INDEX_CREATION_FIELD_IS_NULL.toLocalizedString()); } } /** * When a expiration-attributes element is first encountered, * we create an ExpirationAttibutes?? object from the element's * attributes and push it on the stack. */ private void startExpirationAttributes(Attributes atts) { int timeout = parseInt(atts.getValue(TIMEOUT)); String action = atts.getValue(ACTION); ExpirationAttributes expire; if (action == null) { expire = new ExpirationAttributes(timeout); } else if (action.equals(INVALIDATE)) { expire = new ExpirationAttributes(timeout, ExpirationAction.INVALIDATE); } else if (action.equals(DESTROY)) { expire = new ExpirationAttributes(timeout, ExpirationAction.DESTROY); } else if (action.equals(LOCAL_INVALIDATE)) { expire = new ExpirationAttributes(timeout, ExpirationAction.LOCAL_INVALIDATE); } else if (action.equals(LOCAL_DESTROY)) { expire = new ExpirationAttributes(timeout, ExpirationAction.LOCAL_DESTROY); } else { throw new InternalGemFireException(LocalizedStrings.CacheXmlParser_UNKNOWN_EXPIRATION_ACTION_0.toLocalizedString(action)); } stack.push(expire); } /** * When a serializer-registration element is first encountered, we need * to create the wrapper object to hold the data, and put it on the stack. */ private void startSerializerRegistration(){ //The logWriter appears to be null during the unit tests... so just to //be sure we don't cause a problem, check for null... SerializerCreation sc = new SerializerCreation(); this.stack.push(sc); } /** * When an instantiator element is first encountered, * we need to hang on to the id attribute for use in registration in the end * tag function. */ private void startInstantiator(Attributes atts) { int id = parseInt(atts.getValue(ID)); this.stack.push(id); } /** * Creates and initializes an instance of {@link Declarable} from the contents * of the stack. * * @throws CacheXmlException Something goes wrong while instantiating or * initializing the declarable */ private Declarable createDeclarable() { Properties props = new Properties(); Object top = stack.pop(); while (top instanceof Parameter) { Parameter param = (Parameter) top; props.put(param.getName(), param.getValue()); top = stack.pop(); } if (getLogWriter() != null) { getLogWriter().info(LocalizedStrings.CacheXmlParser_XML_PARSER_CREATEDECLARABLE_PROPERTIES__0, props); } Assert.assertTrue(top instanceof String); String className = (String) top; if (getLogWriter() != null) { getLogWriter().info( LocalizedStrings.CacheXmlParser_XML_PARSER_CREATEDECLARABLE_CLASS_NAME_0, className); } Object o; try { Class c = InternalDataSerializer.getCachedClass(className); o = c.newInstance(); } catch (Exception ex) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_WHILE_INSTANTIATING_A_0.toLocalizedString(className), ex); } if (!(o instanceof Declarable)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_CLASS_0_IS_NOT_AN_INSTANCE_OF_DECLARABLE.toLocalizedString(className)); } Declarable d = (Declarable) o; d.init(props); this.cache.addDeclarableProperties(d, props); return d; } /** * Ending the compressor registration should leave us with a * class name on the stack. Pull it off and setup the {@link Compressor} * on the region attributes. */ private void endCompressor() { Class klass = getClassFromStack(); if(!Compressor.class.isAssignableFrom(klass)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_COMPRESSOR .toLocalizedString(klass.getName())); } Compressor compressor; try { compressor = (Compressor) klass.newInstance(); } catch (Exception ex) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_WHILE_INSTANTIATING_A_0.toLocalizedString(klass.getName()), ex); } Object a = stack.peek(); if (a instanceof RegionAttributesCreation) { RegionAttributesCreation attrs = (RegionAttributesCreation) a; attrs.setCompressor(compressor); } else { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_REGIONATTRIBUTES_OR_1.toLocalizedString(new Object[] {COMPRESSOR, DYNAMIC_REGION_FACTORY})); } } /** * When a cache-loader element is finished, the {@link * Parameter}s and class names are popped off the stack. The cache loader is * instantiated and initialized with the parameters, if appropriate. * When the loader is being created in a dynamic-region-factory, there may * be a disk-dir element on the stack, represented by a File object. * Otherwise, dynamic-region-factory uses a RegionAttributesCreation, just * like a region, and is treated the same.cache-writer element is finished, the {@link * Parameter}s and class names are popped off the stack. The cache writer is * instantiated and initialized with the parameters, if appropriate. *

A cache-writer may be created in the context of region-attributes or * dynamic-region-factory. In the latter case, there may be a disk-dir on * top of the stack, represented by a File object. */ private void endCacheWriter() { Declarable d = createDeclarable(); if (!(d instanceof CacheWriter)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_CACHEWRITER.toLocalizedString(d.getClass().getName())); } Object a = stack.peek(); // check for partition-attributes // if (a instanceof PartitionAttributesFactory) { // PartitionAttributesFactory fac = (PartitionAttributesFactory) a; // fac.setCacheWriter((CacheWriter) d); // } // else // check for disk-dir if ((a instanceof File)) { Object sav = stack.pop(); Object size = stack.pop(); // pop out disk size a = stack.peek(); // if (!(a instanceof RegionAttributesCreation)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_1.toLocalizedString(new Object[] {CACHE_WRITER, DYNAMIC_REGION_FACTORY})); } stack.push(size); stack.push(sav); } // check for normal region-attributes else if (!(a instanceof RegionAttributesCreation)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_REGIONATTRIBUTES.toLocalizedString(CACHE_WRITER)); } RegionAttributesCreation attrs = (RegionAttributesCreation) a; attrs.setCacheWriter((CacheWriter) d); } private void endCustomExpiry() { Declarable d = createDeclarable(); if (!(d instanceof CustomExpiry)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_CUSTOMEXPIRY.toLocalizedString(d.getClass().getName())); } stack.push(d); } /** * Create an lru-entry-count eviction controller, assigning * it to the enclosed region-attributes. Allow any combination * of attributes to be provided. Use the default values for any attribute that is not provided. * @param atts */ /** * @param atts */ private void startLRUEntryCount(Attributes atts) { final String maximum = atts.getValue(MAXIMUM); int max = LRUCapacityController.DEFAULT_MAXIMUM_ENTRIES; if (maximum != null) { max = parseInt(maximum); } final String lruAction = atts.getValue(ACTION); EvictionAction action = EvictionAction.DEFAULT_EVICTION_ACTION; if (lruAction != null) { action = EvictionAction.parseAction(lruAction); } RegionAttributesCreation regAttrs = peekRegionAttributesContext(LRU_ENTRY_COUNT); regAttrs.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(max, action)); } /** * Start the configuration of a lru-memory-size eviction controller. Allow * for any of the attributes to be missing. Store the attributes on the stack * anticipating the declaration of an {@link ObjectSizer}. * @param atts */ private void startLRUMemorySize(Attributes atts) { String lruAction = atts.getValue(ACTION); EvictionAction action = EvictionAction.DEFAULT_EVICTION_ACTION; if(lruAction != null){ action = EvictionAction.parseAction(lruAction); } String maximum = atts.getValue(MAXIMUM); int max = MemLRUCapacityController.DEFAULT_MAXIMUM_MEGABYTES; if (maximum != null) { max = parseInt(maximum); } // Store for later addition of ObjectSizer, if any (the cast is for clarity sake) stack.push(EvictionAttributes.createLRUMemoryAttributes(max, null, action)); } /** * Complete the configuration of a lru-memory-size eviction controller. * Check for the declaration of an {@link ObjectSizer}. Assign the attributes to the * enclose region-attributes */ private void endLRUMemorySize() { Object declCheck = stack.peek(); Declarable d = null; if (declCheck instanceof String || declCheck instanceof Parameter) { d = createDeclarable(); if (!(d instanceof ObjectSizer)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_OBJECTSIZER.toLocalizedString(d.getClass().getName())); } } EvictionAttributesImpl eai = (EvictionAttributesImpl) stack.pop(); if (d != null) { eai.setObjectSizer((ObjectSizer) d); } RegionAttributesCreation regAttrs = peekRegionAttributesContext(LRU_MEMORY_SIZE); regAttrs.setEvictionAttributes(eai); } /** * Create an lru-heap-percentage eviction controller, assigning * it to the enclosed region-attributes * @param atts */ private void startLRUHeapPercentage(Attributes atts) { final String lruAction = atts.getValue(ACTION); EvictionAction action = EvictionAction.DEFAULT_EVICTION_ACTION; if (lruAction != null) { action = EvictionAction.parseAction(lruAction); } // Store for later addition of ObjectSizer, if any stack.push(EvictionAttributes.createLRUHeapAttributes(null, action)); } /** * Complete the configuration of a lru-heap-percentage eviction controller. * Check for the declaration of an {@link ObjectSizer}. Assign the attributes to the * enclosed region-attributes */ private void endLRUHeapPercentage() { Object declCheck = stack.peek(); Declarable d = null; if (declCheck instanceof String || declCheck instanceof Parameter) { d = createDeclarable(); if (!(d instanceof ObjectSizer)) { String s = "A " + d.getClass().getName() + " is not an instance of a ObjectSizer"; throw new CacheXmlException(s); } } EvictionAttributesImpl eai = (EvictionAttributesImpl) stack.pop(); if (d != null) { eai.setObjectSizer((ObjectSizer) d); } RegionAttributesCreation regAttrs = peekRegionAttributesContext(LRU_HEAP_PERCENTAGE); regAttrs.setEvictionAttributes(eai); } /** * When a cache-listener element is finished, the {@link * Parameter}s and class names are popped off the stack. The cache listener is * instantiated and initialized with the parameters, if appropriate. */ private void endCacheListener() { Declarable d = createDeclarable(); if (!(d instanceof CacheListener)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_CACHELISTENER.toLocalizedString(d.getClass().getName())); } RegionAttributesCreation attrs = peekRegionAttributesContext(CACHE_LISTENER); attrs.addCacheListener((CacheListener) d); } private void startAsyncEventQueue(Attributes atts) { AsyncEventQueueCreation asyncEventQueueCreation = new AsyncEventQueueCreation(); //id String id = atts.getValue(ID); asyncEventQueueCreation.setId(id); String parallel = atts.getValue(PARALLEL); if(parallel == null){ asyncEventQueueCreation.setParallel(GatewaySender.DEFAULT_IS_PARALLEL); }else{ asyncEventQueueCreation.setParallel(Boolean.parseBoolean(parallel)); } //batch-size String batchSize = atts.getValue(BATCH_SIZE); if(batchSize == null){ asyncEventQueueCreation.setBatchSize(GatewaySender.DEFAULT_BATCH_SIZE); }else{ asyncEventQueueCreation.setBatchSize(Integer.parseInt(batchSize)); } //batch-time-interval String batchTimeInterval = atts.getValue(BATCH_TIME_INTERVAL); if(batchTimeInterval == null){ asyncEventQueueCreation.setBatchTimeInterval(GatewaySender.DEFAULT_BATCH_TIME_INTERVAL); }else{ asyncEventQueueCreation.setBatchTimeInterval(Integer.parseInt(batchTimeInterval)); } //batch-conflation String batchConflation = atts.getValue(ENABLE_BATCH_CONFLATION); if(batchConflation == null){ asyncEventQueueCreation.setBatchConflationEnabled(GatewaySender.DEFAULT_BATCH_CONFLATION); }else{ asyncEventQueueCreation.setBatchConflationEnabled(Boolean.parseBoolean(batchConflation)); } //maximum-queue-memory String maxQueueMemory = atts.getValue(MAXIMUM_QUEUE_MEMORY); if(maxQueueMemory == null){ asyncEventQueueCreation.setMaximumQueueMemory(GatewaySender.DEFAULT_MAXIMUM_QUEUE_MEMORY); }else{ asyncEventQueueCreation.setMaximumQueueMemory(Integer.parseInt(maxQueueMemory)); } //persistent String persistent = atts.getValue(PERSISTENT); if(persistent == null){ asyncEventQueueCreation.setPersistent(GatewaySender.DEFAULT_PERSISTENCE_ENABLED); }else{ asyncEventQueueCreation.setPersistent(Boolean.parseBoolean(persistent)); } //diskStoreName String diskStoreName = atts.getValue(DISK_STORE_NAME); if(diskStoreName == null){ asyncEventQueueCreation.setDiskStoreName(null); }else{ asyncEventQueueCreation.setDiskStoreName(diskStoreName); } //diskSynchronous String diskSynchronous = atts.getValue(DISK_SYNCHRONOUS); if (diskSynchronous == null) { asyncEventQueueCreation.setDiskSynchronous(GatewaySender.DEFAULT_DISK_SYNCHRONOUS); } else { asyncEventQueueCreation.setDiskSynchronous(Boolean.parseBoolean(diskSynchronous)); } String dispatcherThreads = atts.getValue(DISPATCHER_THREADS); if (dispatcherThreads == null) { asyncEventQueueCreation .setDispatcherThreads(GatewaySender.DEFAULT_DISPATCHER_THREADS); } else { asyncEventQueueCreation.setDispatcherThreads(Integer .parseInt(dispatcherThreads)); } String orderPolicy = atts.getValue(ORDER_POLICY); if (orderPolicy != null) { try { asyncEventQueueCreation.setOrderPolicy(Gateway.OrderPolicy.valueOf(orderPolicy.toUpperCase())); } catch (IllegalArgumentException e) { throw new InternalGemFireException( LocalizedStrings.AsyncEventQueue_UNKNOWN_ORDER_POLICY_0_1 .toLocalizedString(new Object[] { id, orderPolicy })); } } stack.push(asyncEventQueueCreation); } private void endAsyncEventListener() { Declarable d = createDeclarable(); if (!(d instanceof AsyncEventListener)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_ASYNCEVENTLISTENER.toLocalizedString(d.getClass().getName())); } AsyncEventQueueCreation eventChannel = peekAsyncEventQueueContext(ASYNC_EVENT_LISTENER); eventChannel.setAsyncEventListener((AsyncEventListener) d); } private AsyncEventQueueCreation peekAsyncEventQueueContext(String dependentElement) { Object a = stack.peek(); if (!(a instanceof AsyncEventQueueCreation)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_ASYNCEVENTQUEUE.toLocalizedString(dependentElement)); } return (AsyncEventQueueCreation) a; } private void endAsyncEventQueue() { AsyncEventQueueCreation asyncEventChannelCreation = (AsyncEventQueueCreation) stack.peek(); AsyncEventQueueFactory factory = cache.createAsyncEventQueueFactory(); factory.setParallel(asyncEventChannelCreation.isParallel()); factory.setBatchSize(asyncEventChannelCreation.getBatchSize()); factory.setBatchTimeInterval(asyncEventChannelCreation.getBatchTimeInterval()); factory.setBatchConflationEnabled(asyncEventChannelCreation.isBatchConflationEnabled()); factory.setPersistent(asyncEventChannelCreation.isPersistent()); factory.setDiskStoreName(asyncEventChannelCreation.getDiskStoreName()); factory.setDiskSynchronous(asyncEventChannelCreation.isDiskSynchronous()); factory.setMaximumQueueMemory(asyncEventChannelCreation.getMaximumQueueMemory()); factory.setDispatcherThreads(asyncEventChannelCreation.getDispatcherThreads()); factory.setOrderPolicy(asyncEventChannelCreation.getOrderPolicy()); AsyncEventQueue asyncEventChannel = factory.create(asyncEventChannelCreation.getId(), asyncEventChannelCreation.getAsyncEventListener()); stack.pop(); } /** * When a partition-resolver element is finished, the {@link * Parameter}s and class names are popped off the stack. The * PartitionResolver is instantiated and initialized with the * parameters, if appropriate. */ private void endPartitionResolver() { Declarable d = createDeclarable(); if (!(d instanceof PartitionResolver)) { throw new CacheXmlException( LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_1 .toLocalizedString(new Object[] { d.getClass().getName(), "PartitionResolver"})); } PartitionAttributesImpl pai = peekPartitionAttributesImpl(PARTITION_ATTRIBUTES); pai.setPartitionResolver((PartitionResolver) d); } /** * When a partition-listener element is finished, the {@link * Parameter}s and class names are popped off the stack. The * PartitionListener is instantiated and initialized with the * parameters, if appropriate. */ private void endPartitionListener() { Declarable d = createDeclarable(); if (!(d instanceof PartitionListener)) { throw new CacheXmlException( LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_1 .toLocalizedString(new Object[] { d.getClass().getName(), "PartitionListener"})); } PartitionAttributesImpl pai = peekPartitionAttributesImpl(PARTITION_ATTRIBUTES); pai.addPartitionListener((PartitionListener) d); } /** * When we have encountered a FunctionService element, we create the object * and push it onto stack */ private void startFunctionService() { this.stack.push(new FunctionServiceCreation()); } /** * When we have finished a FunctionService element, we create the object * and push it onto stack */ private void endFunctionService() { Object top = stack.pop(); if (! (top instanceof FunctionServiceCreation)) { throw new CacheXmlException(LocalizedStrings. CacheXmlParser_EXPECTED_A_FUNCTIONSERVICECREATION_INSTANCE .toLocalizedString()); } FunctionServiceCreation fsc = (FunctionServiceCreation)top; fsc.create(); } /** * Start the Resource Manager element configuration * @param atts XML attributes for the resource-manager */ private void startResourceManager(final Attributes atts) { ResourceManagerCreation rmc = new ResourceManagerCreation(); { String chp = atts.getValue(CRITICAL_HEAP_PERCENTAGE); if (chp != null) { rmc.setCriticalHeapPercentage(parseFloat(chp)); } else { rmc.setCriticalHeapPercentageToDefault(); } } { String ehp = atts.getValue(EVICTION_HEAP_PERCENTAGE); if (ehp != null) { rmc.setEvictionHeapPercentage(parseFloat(ehp)); } else { rmc.setEvictionHeapPercentageToDefault(); } } { String chp = atts.getValue(CRITICAL_OFF_HEAP_PERCENTAGE); if (chp != null) { rmc.setCriticalOffHeapPercentage(parseFloat(chp)); } else { rmc.setCriticalOffHeapPercentageToDefault(); } } { String ehp = atts.getValue(EVICTION_OFF_HEAP_PERCENTAGE); if (ehp != null) { rmc.setEvictionOffHeapPercentage(parseFloat(ehp)); } else { rmc.setEvictionOffHeapPercentageToDefault(); } } this.stack.push(rmc); } private void endResourceManager() { Object top = stack.pop(); if (! (top instanceof ResourceManagerCreation)) { throw new CacheXmlException("Expected a ResourceManagerCreation instance"); } ResourceManagerCreation rmc = (ResourceManagerCreation)top; // TODO set any listeners here // rmc.addResourceListener(null); this.cache.setResourceManagerCreation(rmc); } private void endBackup() { StringBuffer str = (StringBuffer) stack.pop(); File backup = new File(str.toString().trim()); this.cache.addBackup(backup); } /** * When we have finished a function element, we create the Declarable * and push it onto stack */ private void endFunctionName() { Declarable d = createDeclarable(); if (!(d instanceof Function)) { String s = LocalizedStrings. CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_FUNCTION .toLocalizedString(d.getClass().getName()); throw new CacheXmlException(s); } Object fs = stack.peek(); if (! (fs instanceof FunctionServiceCreation)) { throw new CacheXmlException( LocalizedStrings. CacheXmlParser_A_0_IS_ONLY_ALLOWED_IN_THE_CONTEXT_OF_1_MJTDEBUG_E_2 .toLocalizedString(new Object[] {FUNCTION, FUNCTION_SERVICE, fs})); } FunctionServiceCreation funcService = (FunctionServiceCreation) fs; funcService.registerFunction((Function) d); } private Class getClassFromStack() { Object o = this.stack.peek(); if(! (o instanceof String)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_NO_CLASSNAME_FOUND .toLocalizedString()); } String className = (String)this.stack.pop(); try { Class c = InternalDataSerializer.getCachedClass(className); return c; } catch(Exception e) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_CLASS_NOT_FOUND .toLocalizedString(className), e); } } /** * Ending the top level serialization-registration element and * actually doing the work of registering all the components. */ private void endSerializerRegistration() { SerializerCreation sc = (SerializerCreation) this.stack.pop(); sc.create(getLogWriter()==null?null:getLogWriter().convertToLogWriter()); this.cache.setSerializerCreation(sc); } /** * Ending the serialization registration should leave us with a class name * on the stack. We will call the DataSerializer.register() with the class * once we find it. */ private void endSerializer() { Class c = getClassFromStack(); if(! (DataSerializer.class.isAssignableFrom(c))) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_NOT_A_SERIALIZER .toLocalizedString(c.getName())); } SerializerCreation sr = (SerializerCreation) this.stack.peek(); sr.registerSerializer(c); } /** * Ending the instantiator registration should leave us with a class name * and an Integer ID on the stack. Pull them off, and setup the instantiator * with an anonymous inner class to do the work. */ private void endInstantiator() { final Class c = getClassFromStack(); Class[] ifaces = c.getInterfaces(); boolean found = false; for(Class clazz : ifaces){ if(clazz == DataSerializable.class) { found = true; break; } } if(!found) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_DATA_SERIALIZABLE .toLocalizedString(c.getName())); } //the next thing on the stack should be the Integer registration ID Object o = this.stack.peek(); if(!(o instanceof Integer)) { String s = LocalizedStrings.CacheXmlParser_NO_SERIALIZATION_ID .toLocalizedString(); throw new CacheXmlException(s); } Integer id = (Integer) this.stack.pop(); SerializerCreation sc = (SerializerCreation) this.stack.peek(); sc.registerInstantiator(c, id); } /** * When we first encounter a parameter element, we push its * name element on to the stack. */ private void startParameter(Attributes atts) { String name = atts.getValue(NAME); Assert.assertTrue(name != null); stack.push(name); } /** * When we have finished a parameter element, create a * {@link Parameter}from the top two elements of the stack. */ private void endParameter() { Object value = stack.pop(); String name = (String) stack.pop(); stack.push(new Parameter(name, value)); } /** * When we have finished a declarable, instantiate an instance * of the {@link Declarable}and push it on the stack. */ private void endDeclarable() { Declarable d = createDeclarable(); stack.push(d); } public void startElement(String namespaceURI, String localName, String qName, Attributes atts) throws SAXException { if (qName.equals(CACHE)) { startCache(atts); } else if (qName.equals(CLIENT_CACHE)) { startClientCache(atts); } else if (qName.equals(BRIDGE_SERVER)) { startCacheServer(atts); } else if (qName.equals(CACHE_SERVER)) { startCacheServer(atts); } else if (qName.equals(LOAD_PROBE)) { } else if (qName.equals(CONNECTION_POOL)) { startPool(atts); } else if(qName.equals(CLIENT_SUBSCRIPTION)){ /*cache.getLogger().config( "starts CLIENT_HA_QUEUE : " + atts.getValue(HA_EVICTION_POLICY));*/ startClientHaQueue(atts); } else if (qName.equals(DYNAMIC_REGION_FACTORY)) { startDynamicRegionFactory(atts); } else if (qName.equals(GATEWAY_HUB)) { startGatewayHub(atts); } else if (qName.equals(GATEWAY_SENDER)) { startGatewaySender(atts); } else if (qName.equals(GATEWAY_RECEIVER)) { startGatewayReceiver(atts); } else if (qName.equals(GATEWAY_EVENT_FILTER)) { } else if (qName.equals(GATEWAY_TRANSPORT_FILTER)) { } else if (qName.equals(GATEWAY_EVENT_LISTENER)) { } else if (qName.equals(GATEWAY)) { startGateway(atts); } else if (qName.equals(GATEWAY_ENDPOINT)) { startGatewayEndpoint(atts); } else if (qName.equals(GATEWAY_LISTENER)) { } else if (qName.equals(GATEWAY_QUEUE)) { startGatewayQueue(atts); } else if (qName.equals(ASYNC_EVENT_QUEUE)) { startAsyncEventQueue(atts); } else if (qName.equals(GATEWAY_CONFLICT_RESOLVER)) { } else if (qName.equals(LOCATOR)) { doLocator(atts); } else if (qName.equals(REGION)) { startRegion(atts); } else if (qName.equals(VM_ROOT_REGION)) { startRegion(atts); } else if (qName.equals(REGION_ATTRIBUTES)) { startRegionAttributes(atts); } else if (qName.equals(DISK_STORE)) { startDiskStore(atts); } else if (qName.equals(KEY_CONSTRAINT)) { } else if (qName.equals(VALUE_CONSTRAINT)) { } else if (qName.equals(INDEX_UPDATE_TYPE)) { } else if (qName.equals(REGION_TIME_TO_LIVE)) { } else if (qName.equals(REGION_IDLE_TIME)) { } else if (qName.equals(ENTRY_TIME_TO_LIVE)) { } else if (qName.equals(ENTRY_IDLE_TIME)) { } else if (qName.equals(EXPIRATION_ATTRIBUTES)) { startExpirationAttributes(atts); } else if (qName.equals(SERVER)) { doServer(atts); } else if (qName.equals(CUSTOM_EXPIRY)) { } else if (qName.equals(SUBSCRIPTION_ATTRIBUTES)) { startSubscriptionAttributes(atts); } else if (qName.equals(ENTRY)) { } else if (qName.equals(CLASS_NAME)) { } else if (qName.equals(PARAMETER)) { startParameter(atts); } else if (qName.equals(DISK_WRITE_ATTRIBUTES)) { startDiskWriteAttributes(atts); } else if (qName.equals(SYNCHRONOUS_WRITES)) { startSynchronousWrites(); } else if (qName.equals(ASYNCHRONOUS_WRITES)) { startAsynchronousWrites(atts); } else if (qName.equals(DISK_DIRS)) { } else if (qName.equals(DISK_DIR)) { startDiskDir(atts); } else if (qName.equals(GROUP)) { } else if (qName.equals(PARTITION_ATTRIBUTES)) { startPartitionAttributes(atts); } else if (qName.equals(FIXED_PARTITION_ATTRIBUTES)) { startFixedPartitionAttributes(atts); } else if (qName.equals(REQUIRED_ROLE)) { startRequiredRole(atts); } else if (qName.equals(MEMBERSHIP_ATTRIBUTES)) { startMembershipAttributes(atts); } else if (qName.equals(LOCAL_PROPERTIES)) { startPartitionProperties(atts, LOCAL_PROPERTIES); } else if (qName.equals(GLOBAL_PROPERTIES)) { startPartitionProperties(atts, GLOBAL_PROPERTIES); } else if (qName.equals(CACHE_LOADER)) { } else if (qName.equals(CACHE_WRITER)) { } else if (qName.equals(EVICTION_ATTRIBUTES)) { } else if (qName.equals(LRU_ENTRY_COUNT)) { startLRUEntryCount(atts); // internal to eviction-attributes } else if (qName.equals(LRU_MEMORY_SIZE)) { // internal to eviction-attributes // Visit endLRUMemorySize() to know the completion // of lru-memory-size eviction configuration startLRUMemorySize(atts); } else if (qName.equals(LRU_HEAP_PERCENTAGE)) { startLRUHeapPercentage(atts); // internal to eviction-attributes } else if (qName.equals(CACHE_LISTENER)) { } else if (qName.equals(ASYNC_EVENT_LISTENER)) { } else if (qName.equals(KEY)) { } else if (qName.equals(VALUE)) { } else if (qName.equals(STRING)) { } else if (qName.equals(DECLARABLE)) { } else if (qName.equals(INDEX)) { //Asif: Create an object of type IndexCreationData & //push it in stack startIndex(atts); //this.stack.push(new IndexCreationData(atts.getValue(NAME))); } else if (qName.equals(FUNCTIONAL)) { startFunctionalIndex(atts); } else if (qName.equals(PRIMARY_KEY)) { startPrimaryKeyIndex(atts); } else if (qName.equals(TRANSACTION_MANAGER)) { startCacheTransactionManager(); } else if (qName.equals(TRANSACTION_LISTENER)) { } else if (qName.equals(TRANSACTION_WRITER)) { } else if (qName.equals(JNDI_BINDINGS)) { // added by Nand Kishor } else if (qName.equals(JNDI_BINDING)) { // added by Nand Kishor //Asif: Push the BindingCreation object in the stack Map gfSpecific = new HashMap(); mapJNDI(atts, gfSpecific); List vendorSpecific = new ArrayList(); this.stack.push(new BindingCreation(gfSpecific, vendorSpecific)); } else if (qName.equals(CONFIG_PROPERTY_BINDING)) { //Asif : Peek at the BindingCreation object from stack // & get the vendor specific data map BindingCreation bc = (BindingCreation) this.stack.peek(); List vendorSpecific = bc.getVendorSpecificList(); // Rohit: Add a ConfigProperty Data Object to the list. vendorSpecific.add(new ConfigProperty()); } else if (qName.equals(CONFIG_PROPERTY_NAME)) { } else if (qName.equals(CONFIG_PROPERTY_VALUE)) { } else if (qName.equals(CONFIG_PROPERTY_TYPE)) { } else if (qName.equals(PARTITION_RESOLVER)) { } else if (qName.equals(PARTITION_LISTENER)) { } else if (qName.equals(FUNCTION_SERVICE)) { startFunctionService(); } else if (qName.equals(FUNCTION)) { } else if (qName.equals(TOP_SERIALIZER_REGISTRATION)) { startSerializerRegistration(); } else if (qName.equals(INITIALIZER)) { startInitializer(); } else if (qName.equals(INSTANTIATOR_REGISTRATION)) { startInstantiator(atts); } else if (qName.equals(SERIALIZER_REGISTRATION)) { //do nothing } else if (qName.equals(RESOURCE_MANAGER)) { startResourceManager(atts); } else if (qName.equals(BACKUP)) { //do nothing } else if (qName.equals(PDX)) { startPdx(atts); } else if(qName.equals(PDX_SERIALIZER)) { //do nothing } else if (qName.equals(HDFS_STORE)) { startHDFSStore(atts); } else if (qName.equals(HDFS_EVENT_QUEUE)) { startHDFSEventQueue(atts); } else if (qName.equals(HDFS_COMPACTION)) { startHDFSCompaction(atts); } else if (qName.equals(COMPRESSOR)) { } else { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_UNKNOWN_XML_ELEMENT_0.toLocalizedString(qName)); } } private void startPdx(Attributes atts) { String readSerialized = atts.getValue(READ_SERIALIZED); if(readSerialized != null) { cache.setPdxReadSerialized(Boolean.parseBoolean(readSerialized)); } String ignoreUnreadFields = atts.getValue(IGNORE_UNREAD_FIELDS); if(ignoreUnreadFields != null) { cache.setPdxIgnoreUnreadFields(Boolean.parseBoolean(ignoreUnreadFields)); } String persistent = atts.getValue(PERSISTENT); if(persistent != null) { cache.setPdxPersistent(Boolean.parseBoolean(persistent)); } String diskStoreName = atts.getValue(DISK_STORE_NAME); if(diskStoreName != null) { cache.setPdxDiskStore(diskStoreName); } } /** * When a client-subscription element is first encountered, * create a new {@link ClientSubscriptionConfig } to store the * eviction-policy,

* capacity and * overflow-directory, then pass these values to Bridge Server * * @since 5.7 */ private void startClientHaQueue(Attributes atts) { ClientHaQueueCreation clientHaQueue = new ClientHaQueueCreation(); String haEvictionPolicy = atts.getValue(CLIENT_SUBSCRIPTION_EVICTION_POLICY); if (haEvictionPolicy != null) { clientHaQueue.setEvictionPolicy(haEvictionPolicy); } String haCapacity = atts.getValue(CLIENT_SUBSCRIPTION_CAPACITY); if (haCapacity != null) { clientHaQueue.setCapacity(Integer.parseInt(haCapacity)); } String diskStoreName = atts.getValue(DISK_STORE_NAME); if (diskStoreName != null) { clientHaQueue.setDiskStoreName(diskStoreName); } else { String haOverflowDirectory = atts.getValue(OVERFLOW_DIRECTORY); if (haOverflowDirectory != null) { clientHaQueue.setOverflowDirectory(haOverflowDirectory); } } this.stack.push(clientHaQueue); } /** * Add a marker string to look for when in endPartitionProperties * @param atts * @param localOrGlobal either the string LOCAL_PROPERTIES or GLOBAL_PROPERTIES */ private void startPartitionProperties(Attributes atts, String localOrGlobal) { stack.push(localOrGlobal); } private void startDiskDir(Attributes atts) { String size = atts.getValue(DIR_SIZE); Integer diskSize = null; if(size==null) { diskSize = Integer.valueOf(DiskStoreFactory.DEFAULT_DISK_DIR_SIZE); } else { diskSize = Integer.valueOf(size); } stack.push(diskSize); } private void startDiskWriteAttributes(Attributes atts) { String roll = atts.getValue(ROLL_OPLOG); if (roll == null) { roll = "true"; // because it defaults to true } String maxOp = atts.getValue(MAX_OPLOG_SIZE); int maxOplogSize = 0; if (maxOp != null) { maxOplogSize = parseInt(maxOp); } else { maxOplogSize = DiskWriteAttributesImpl.getDefaultMaxOplogSize(); } stack.push(roll); stack.push(Integer.valueOf(maxOplogSize)); } public void endElement(String namespaceURI, String localName, String qName) throws SAXException { try { // logWriter.fine("endElement namespaceURI=" + namespaceURI // + "; localName = " + localName + "; qName = " + qName); if (qName.equals(CACHE)) { endCache(); } else if (qName.equals(CLIENT_CACHE)) { endClientCache(); } else if (qName.equals(BRIDGE_SERVER)) { endCacheServer(); } else if (qName.equals(CACHE_SERVER)) { endCacheServer(); } else if (qName.equals(LOAD_PROBE)) { endLoadProbe(); }else if(qName.equals(CLIENT_SUBSCRIPTION)){ endClientHaQueue(); } else if (qName.equals(CONNECTION_POOL)) { endPool(); } else if (qName.equals(DYNAMIC_REGION_FACTORY)) { endDynamicRegionFactory(); } else if (qName.equals(GATEWAY_HUB)) { endGatewayHub(); } else if (qName.equals(GATEWAY_SENDER)) { endSerialGatewaySender(); } else if (qName.equals(GATEWAY_RECEIVER)) { endGatewayReceiver(); } else if (qName.equals(GATEWAY_EVENT_FILTER)) { endGatewayEventFilter(); } else if (qName.equals(GATEWAY_TRANSPORT_FILTER)) { endGatewayTransportFilter(); } else if (qName.equals(GATEWAY)) { endGateway(); } else if (qName.equals(GATEWAY_ENDPOINT)) { } else if (qName.equals(GATEWAY_LISTENER)) { endGatewayListener(); } else if (qName.equals(GATEWAY_QUEUE)) { } else if (qName.equals(ASYNC_EVENT_QUEUE)) { endAsyncEventQueue(); } else if (qName.equals(GATEWAY_CONFLICT_RESOLVER)) { endGatewayConflictResolver(); } else if (qName.equals(REGION)) { endRegion(); } else if (qName.equals(VM_ROOT_REGION)) { endRegion(); } else if (qName.equals(REGION_ATTRIBUTES)) { endRegionAttributes(); } else if (qName.equals(DISK_STORE)) { endDiskStore(); } else if (qName.equals(KEY_CONSTRAINT)) { endKeyConstraint(); } else if (qName.equals(VALUE_CONSTRAINT)) { endValueConstraint(); } else if (qName.equals(REGION_TIME_TO_LIVE)) { endRegionTimeToLive(); } else if (qName.equals(REGION_IDLE_TIME)) { endRegionIdleTime(); } else if (qName.equals(ENTRY_TIME_TO_LIVE)) { endEntryTimeToLive(); } else if (qName.equals(ENTRY_IDLE_TIME)) { endEntryIdleTime(); } else if (qName.equals(CUSTOM_EXPIRY)) { endCustomExpiry(); } else if (qName.equals(DISK_WRITE_ATTRIBUTES)) { endDiskWriteAttributes(); } else if (qName.equals(SYNCHRONOUS_WRITES)) { } else if (qName.equals(ASYNCHRONOUS_WRITES)) { } else if (qName.equals(DISK_DIRS)) { endDiskDirs(); } else if (qName.equals(DISK_DIR)) { endDiskDir(); } else if (qName.equals(GROUP)) { endGroup(); } else if (qName.equals(PARTITION_ATTRIBUTES)) { endPartitionAttributes(); } else if (qName.equals(FIXED_PARTITION_ATTRIBUTES)) { endFixedPartitionAttributes(); } else if (qName.equals(LOCAL_PROPERTIES)) { endPartitionProperites(LOCAL_PROPERTIES); } else if (qName.equals(GLOBAL_PROPERTIES)) { endPartitionProperites(GLOBAL_PROPERTIES); } else if (qName.equals(MEMBERSHIP_ATTRIBUTES)) { endMembershipAttributes(); } else if (qName.equals(REQUIRED_ROLE)) { endRequiredRole(); } else if (qName.equals(EXPIRATION_ATTRIBUTES)) { } else if (qName.equals(CUSTOM_EXPIRY)) { endCustomExpiry(); } else if (qName.equals(SUBSCRIPTION_ATTRIBUTES)) { } else if (qName.equals(ENTRY)) { endEntry(); } else if (qName.equals(CLASS_NAME)) { endClassName(); } else if (qName.equals(PARAMETER)) { endParameter(); } else if (qName.equals(CACHE_LOADER)) { endCacheLoader(); } else if (qName.equals(CACHE_WRITER)) { endCacheWriter(); } else if (qName.equals(EVICTION_ATTRIBUTES)) { } else if (qName.equals(LRU_ENTRY_COUNT)) { // internal to eviction-attributes } else if (qName.equals(LRU_MEMORY_SIZE)) { endLRUMemorySize(); // internal to eviction-attributes } else if (qName.equals(LRU_HEAP_PERCENTAGE)) { endLRUHeapPercentage(); // internal to eviction-attributes } else if (qName.equals(CACHE_LISTENER)) { endCacheListener(); } else if (qName.equals(ASYNC_EVENT_LISTENER)) { endAsyncEventListener(); } else if (qName.equals(KEY)) { } else if (qName.equals(VALUE)) { } else if (qName.equals(STRING)) { endString(); } else if (qName.equals(DECLARABLE)) { endDeclarable(); } else if (qName.equals(FUNCTIONAL)) { } else if (qName.equals(INDEX)) { endIndex(); } else if (qName.equals(PRIMARY_KEY)) { } else if (qName.equals(TRANSACTION_MANAGER)) { endCacheTransactionManager(); } else if (qName.equals(TRANSACTION_LISTENER)) { endTransactionListener(); } else if (qName.equals(TRANSACTION_WRITER)) { endTransactionWriter(); } else if (qName.equals(JNDI_BINDINGS)) { } else if (qName.equals(JNDI_BINDING)) { //Asif Pop the BindingCreation object BindingCreation bc = (BindingCreation) this.stack.pop(); JNDIInvoker.mapDatasource(bc.getGFSpecificMap(), bc .getVendorSpecificList()); } else if (qName.equals(CONFIG_PROPERTY_BINDING)) { } else if (qName.equals(CONFIG_PROPERTY_NAME)) { String name = null; if (this.stack.peek() instanceof StringBuffer) // Pop the config-property-name element value from the stack. name = ((StringBuffer) this.stack.pop()).toString(); BindingCreation bc = (BindingCreation) this.stack.peek(); List vsList = bc.getVendorSpecificList(); ConfigProperty cp = (ConfigProperty) vsList.get(vsList.size() - 1); if (name == null) { String excep = LocalizedStrings.CacheXmlParser_EXCEPTION_IN_PARSING_ELEMENT_0_THIS_IS_A_REQUIRED_FIELD.toLocalizedString( qName ); throw new CacheXmlException(excep); } else { // set the name. cp.setName(name); } } else if (qName.equals(CONFIG_PROPERTY_VALUE)) { String value = null; // Pop the config-property-value element value from the stack. if (this.stack.peek() instanceof StringBuffer) value = ((StringBuffer) this.stack.pop()).toString(); BindingCreation bc = (BindingCreation) this.stack.peek(); List vsList = bc.getVendorSpecificList(); ConfigProperty cp = (ConfigProperty) vsList.get(vsList.size() - 1); // Set the value to the ConfigProperty Data Object. cp.setValue(value); } else if (qName.equals(CONFIG_PROPERTY_TYPE)) { String type = null; if (this.stack.peek() instanceof StringBuffer) type = ((StringBuffer) this.stack.pop()).toString(); BindingCreation bc = (BindingCreation) this.stack.peek(); List vsList = bc.getVendorSpecificList(); ConfigProperty cp = (ConfigProperty) vsList.get(vsList.size() - 1); if (type == null) { String excep = LocalizedStrings.CacheXmlParser_EXCEPTION_IN_PARSING_ELEMENT_0_THIS_IS_A_REQUIRED_FIELD.toLocalizedString( qName ); throw new CacheXmlException(excep); } else { cp.setType(type); } } else if (qName.equals(LRU_MEMORY_SIZE)) { // internal to eviction-attributes // Visit startLRUMemorySize() to know the begining // of lru-memory-size eviction configuration endLRUMemorySize(); } else if (qName.equals(LOCATOR)) { // nothing needed } else if (qName.equals(SERVER)) { // nothing needed } else if (qName.equals(PARTITION_RESOLVER)) { endPartitionResolver(); } else if (qName.equals(PARTITION_LISTENER)) { endPartitionListener(); } else if (qName.equals(FUNCTION)) { endFunctionName(); } else if (qName.equals(FUNCTION_SERVICE)) { endFunctionService(); } else if (qName.equals(TOP_SERIALIZER_REGISTRATION)) { endSerializerRegistration(); } else if (qName.equals(INITIALIZER)) { endInitializer(); } else if (qName.equals(SERIALIZER_REGISTRATION)) { endSerializer(); } else if (qName.equals(INSTANTIATOR_REGISTRATION)) { endInstantiator(); } else if (qName.equals(RESOURCE_MANAGER)) { endResourceManager(); } else if (qName.equals(BACKUP)) { endBackup(); } else if (qName.equals(PDX)) { //nothing needed } else if (qName.equals(PDX_SERIALIZER)) { endPdxSerializer(); } else if (qName.equals(HDFS_STORE)) { endHDFSStore(); } else if (qName.equals(HDFS_EVENT_QUEUE)) { endHDFSEventQueue(); } else if (qName.equals(HDFS_COMPACTION)) { // nothing needed } else if (qName.equals(COMPRESSOR)) { endCompressor(); } else { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_UNKNOWN_XML_ELEMENT_0.toLocalizedString(qName)); } } catch (CacheException ex) { throw new SAXException(LocalizedStrings.CacheXmlParser_A_CACHEEXCEPTION_WAS_THROWN_WHILE_PARSING_XML.toLocalizedString(), ex); } } private void endGatewayTransportFilter() { Declarable d = createDeclarable(); if (!(d instanceof GatewayTransportFilter)) { throw new CacheXmlException( LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_1 .toLocalizedString(new Object[] { d.getClass().getName(), "GatewayTransportFilter"})); } Object a = stack.peek(); if(a instanceof GatewaySenderFactory){ GatewaySenderFactory senderFactory = (GatewaySenderFactory)a; senderFactory.addGatewayTransportFilter((GatewayTransportFilter)d); }else if (a instanceof GatewayReceiverFactory){ GatewayReceiverFactory receiverFactory = (GatewayReceiverFactory)a; receiverFactory.addGatewayTransportFilter((GatewayTransportFilter)d); }else{ throw new CacheXmlException( LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_GATEWAYSENDER_OR_GATEWAYRECEIVER .toLocalizedString(GATEWAY_TRANSPORT_FILTER)); } } private void endGatewayEventFilter() { Declarable d = createDeclarable(); if (!(d instanceof GatewayEventFilter)) { throw new CacheXmlException( LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_1 .toLocalizedString(new Object[] { d.getClass().getName(), "GatewayEventFilter"})); } GatewaySenderFactory senderFactory = peekGatewaySender(GATEWAY_EVENT_FILTER); senderFactory.addGatewayEventFilter((GatewayEventFilter)d); } private GatewaySenderFactory peekGatewaySender(String dependentElement) { Object a = stack.peek(); if (!(a instanceof GatewaySenderFactory)) { throw new CacheXmlException( LocalizedStrings.CacheXmlParser_A_0_MUST_BE_DEFINED_IN_THE_CONTEXT_OF_GATEWAY_SENDER .toLocalizedString(dependentElement)); } return (GatewaySenderFactory)a; } /** * */ private void endPdxSerializer() { Declarable d = createDeclarable(); if (!(d instanceof PdxSerializer)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_A_0_IS_NOT_AN_INSTANCE_OF_A_PDX_SERIALIZER.toLocalizedString(d.getClass().getName())); } PdxSerializer serializer = (PdxSerializer) d; this.cache.setPdxSerializer(serializer); } private void startInitializer() { } private void endInitializer() { Properties props = new Properties(); Object top = stack.pop(); while (top instanceof Parameter) { Parameter param = (Parameter) top; props.put(param.getName(), param.getValue()); top = stack.pop(); } Assert.assertTrue(top instanceof String); String className = (String) top; Object o; try { Class c = InternalDataSerializer.getCachedClass(className); o = c.newInstance(); } catch (Exception ex) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_WHILE_INSTANTIATING_A_0.toLocalizedString(className), ex); } if (!(o instanceof Declarable)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_CLASS_0_IS_NOT_AN_INSTANCE_OF_DECLARABLE.toLocalizedString(className)); } Declarable d = (Declarable) o; this.cache.setInitializer(d, props); } /** * Do nothing * @since 5.7 */ private void endClientHaQueue() { } /** * Process either the local-properties or global-properties for a * {@link com.gemstone.gemfire.internal.cache.PartitionedRegion} * @param globalOrLocal either the string {@link CacheXml#LOCAL_PROPERTIES} or {@link CacheXml#GLOBAL_PROPERTIES} */ private void endPartitionProperites(String globalOrLocal) { Properties props = new Properties(); Object top = stack.pop(); while (! top.equals(globalOrLocal)) { if (!(top instanceof Parameter)) { throw new CacheXmlException(LocalizedStrings.CacheXmlParser_ONLY_A_PARAMETER_IS_ALLOWED_IN_THE_CONTEXT_OF_0.toLocalizedString(globalOrLocal)); } Parameter param = (Parameter) top; props.put(param.getName(), param.getValue()); top = stack.pop(); } if (globalOrLocal.equals(GLOBAL_PROPERTIES)) { PartitionAttributesImpl pai = peekPartitionAttributesImpl(GLOBAL_PROPERTIES); pai.setGlobalProperties(props); } else if (globalOrLocal.equals(LOCAL_PROPERTIES)) { PartitionAttributesImpl pai = peekPartitionAttributesImpl(LOCAL_PROPERTIES); pai.setLocalProperties(props); } else { Assert.assertTrue(false, "Argument globalOrLocal has unexpected value " + globalOrLocal); } } public void characters(char[] ch, int start, int length) throws SAXException { // This method needs to handle XML chunking, so its uses a // StringBuffer to uniquely identify previous calls and will // append to the existing StringBuffer for each subsequent call Object o = null; try { o = stack.peek(); } catch (EmptyStackException firstTime) { // No entries on the stack, this is the first element that // performs any stack operations, initialize a StringBuffer (see // finally block) } finally { StringBuffer chars = null; if (o instanceof StringBuffer) { chars = (StringBuffer) o; chars.append(ch, start, length); if (getLogWriter() != null) { getLogWriter().info( LocalizedStrings.CacheXmlParser_XML_PARSER_CHARACTERS_APPENDED_CHARACTER_DATA_0, chars); } } else { chars = new StringBuffer(length); chars.append(ch, start, length); stack.push(chars); if (getLogWriter() != null) { getLogWriter().info( LocalizedStrings.CacheXmlParser_XML_PARSER_CHARACTERS_NEW_CHARACTER_DATA_0, chars); } } } } ////////// Inherited methods that don't do anything ////////// public void setDocumentLocator(Locator locator) { } public void startDocument() throws SAXException { } public void endDocument() throws SAXException { } public void startPrefixMapping(String prefix, String uri) throws SAXException { } public void endPrefixMapping(String prefix) throws SAXException { } public void ignorableWhitespace(char[] ch, int start, int length) throws SAXException { } public void processingInstruction(String target, String data) throws SAXException { } public void skippedEntity(String name) throws SAXException { } public void setLogWriter(LogWriterI18n lw) { this.logWriter = lw; } public LogWriterI18n getLogWriter() { return this.logWriter; } /* * Binds a jndi name of datasource to a context. @param atts Attributes of * jndi name and Datasource related information. * */ private void mapJNDI(Attributes atts, Map gfSpecific) { int attsLen = atts.getLength(); String key = ""; String value = ""; // put attributes into a Map for (int i = 0; i < attsLen; i++) { key = atts.getQName(i); value = atts.getValue(key); gfSpecific.put(key, value); } } /////////////////////// Inner Classes /////////////////////// /** * Class that delegates all of the methods of a {@link * org.xml.sax.helpers.DefaultHandler} to a * {@link CacheXmlParser} that implements all of the methods of * DefaultHandler, but is not a * DefaultHandler. */ static class DefaultHandlerDelegate extends DefaultHandler { /** The CacheXmlParser that does the real work */ private final CacheXmlParser handler; /** * Creates a new DefaultHandlerDelegate that delegates to the * given CacheXmlParser. */ public DefaultHandlerDelegate(CacheXmlParser handler) { this.handler = handler; } @Override public InputSource resolveEntity(String publicId, String systemId) throws SAXException { return handler.resolveEntity(publicId, systemId); } @Override public void setDocumentLocator(Locator locator) { handler.setDocumentLocator(locator); } @Override public void startDocument() throws SAXException { handler.startDocument(); } @Override public void endDocument() throws SAXException { handler.endDocument(); } @Override public void startPrefixMapping(String prefix, String uri) throws SAXException { handler.startPrefixMapping(prefix, uri); } @Override public void endPrefixMapping(String prefix) throws SAXException { handler.endPrefixMapping(prefix); } @Override public void startElement(String uri, String localName, String qName, Attributes attributes) throws SAXException { handler.startElement(uri, localName, qName, attributes); } @Override public void endElement(String uri, String localName, String qName) throws SAXException { handler.endElement(uri, localName, qName); } @Override public void characters(char[] ch, int start, int length) throws SAXException { handler.characters(ch, start, length); } @Override public void ignorableWhitespace(char[] ch, int start, int length) throws SAXException { handler.ignorableWhitespace(ch, start, length); } @Override public void processingInstruction(String target, String data) throws SAXException { handler.processingInstruction(target, data); } @Override public void skippedEntity(String name) throws SAXException { handler.skippedEntity(name); } @Override public void warning(SAXParseException e) throws SAXException { handler.warning(e); } @Override public void error(SAXParseException e) throws SAXException { handler.error(e); } @Override public void fatalError(SAXParseException e) throws SAXException { handler.fatalError(e); } } /** * Represents a parameter used to initialize a {@link Declarable} */ static class Parameter { /** The name of the parameter */ private final String name; /** The value of the parameter */ private final Object value; /** * Creates a new Parameter with the given name and value. */ public Parameter(String name, Object value) { this.name = name; this.value = value; } public String getName() { return this.name; } public Object getValue() { return this.value; } } }





© 2015 - 2024 Weber Informatics LLC | Privacy Policy