All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.cloudgraph.bigtable.connect.BigTableConnection Maven / Gradle / Ivy

Go to download

CloudGraph(tm) is a suite of Service Data Object (SDO) 2.1 services designed for relational and big-table style "cloud" databases, such as HBase and others.

The newest version!
/**
 * Copyright 2017 TerraMeta Software, Inc.
 * 
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 * 
 *     http://www.apache.org/licenses/LICENSE-2.0
 * 
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.cloudgraph.bigtable.connect;

import java.io.IOException;
import java.util.Iterator;
import java.util.Map;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.commons.pool2.ObjectPool;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.hbase.client.BufferedMutator;
import org.apache.hadoop.hbase.client.BufferedMutatorParams;
import org.apache.hadoop.hbase.client.RegionLocator;
import org.apache.hadoop.hbase.client.Table;
import org.cloudgraph.core.Connection;
import org.cloudgraph.core.client.TableName;
import org.cloudgraph.hbase.client.HBaseAdmin;
import org.cloudgraph.hbase.client.HBaseBufferedMutator;
import org.cloudgraph.hbase.client.HBaseRegionLocator;
import org.cloudgraph.hbase.client.HBaseTable;
import org.cloudgraph.store.mapping.StoreMappingProp;
import org.cloudgraph.store.service.GraphServiceException;

import com.google.common.cache.CacheBuilder;
import com.google.common.cache.CacheLoader;
import com.google.common.cache.LoadingCache;
import com.google.common.cache.RemovalListener;
import com.google.common.cache.RemovalNotification;

/**
 * Pooled BigTable connection wrapper which complies with Apache pool semantics
 * and maintains a cache or table handles for each connection.
 * 
 * 

* For some HBase implementations, e.g. MAPR, the management of connections and * table handles is super critical, as the API is extremely performance and * resource costly at scale. *

* * @author Scott Cinnamond * @since 2.0.1 */ public class BigTableConnection implements Connection { private static Log log = LogFactory.getLog(BigTableConnection.class); private org.apache.hadoop.hbase.client.Connection con; private ObjectPool pool; private Configuration config; private LoadingCache tableCache; public BigTableConnection(org.apache.hadoop.hbase.client.Connection conection, ObjectPool pool, Configuration config) { super(); this.con = conection; this.pool = pool; this.config = config; final int cacheMax = StoreMappingProp.getConnectionTablecacheSizeMax(); final int cacheTimeout = StoreMappingProp.getConnectionTablecacheTimeoutSeconds(); Map propsMap = StoreMappingProp.getConnectionTableConfigProperties(); Iterator keys = propsMap.keySet().iterator(); while (keys.hasNext()) { String key = keys.next(); String value = propsMap.get(key); this.config.set(key, value); } this.tableCache = CacheBuilder.newBuilder().maximumSize(cacheMax) .expireAfterAccess(cacheTimeout, TimeUnit.SECONDS) .removalListener(new RemovalListener() { @Override public void onRemoval(RemovalNotification event) { try { event.getValue().close(); if (log.isDebugEnabled()) log.debug("closed evicted table " + this + " " + event.getKey()); } catch (IOException e) { log.warn(e.getMessage(), e); } } }).build(new CacheLoader() { @Override public Table load(TableName tableName) throws Exception { if (log.isDebugEnabled()) log.debug("loading table " + this + " " + tableName); return con.getTable(org.apache.hadoop.hbase.TableName.valueOf(tableName.getNamespace(), tableName.getTableName())); } }); if (log.isDebugEnabled()) log.debug("created " + this + " pool active/idle " + pool.getNumActive() + "/" + pool.getNumIdle() + " tableCacheMax/tableCacheTimeout: " + cacheMax + "/" + cacheTimeout); } @Override public void close() throws IOException { if (log.isDebugEnabled()) log.debug("closing wrapped connection, " + this.con); try { this.pool.returnObject(this); } catch (Exception e) { throw new GraphServiceException(e); } } @Override public void destroy() throws IOException { this.tableCache.invalidateAll(); this.tableCache.cleanUp(); // above should evict and close any tables but just to be sure for (Table table : this.tableCache.asMap().values()) table.close(); if (log.isDebugEnabled()) log.debug("destroyed " + this + " pool active/idle " + pool.getNumActive() + "/" + pool.getNumIdle()); } @Override public boolean isClosed() { return con.isClosed(); } @Override public void abort(String why, Throwable e) { con.abort(why, e); } @Override public boolean isAborted() { return con.isAborted(); } public Configuration getConfiguration() { return con.getConfiguration(); } @Override public boolean tableExists(TableName tableName) throws IOException { boolean exists = false; Table table = this.tableCache.getIfPresent(tableName); if (table != null) { exists = true; } else { exists = con.getAdmin().tableExists( org.apache.hadoop.hbase.TableName.valueOf(tableName.getNamespace(), tableName.getTableName())); if (exists) { try { this.tableCache.get(tableName); } catch (ExecutionException e) { log.error(e.getMessage(), e); } } } return exists; } @Override public org.cloudgraph.core.client.Table getTable(TableName tableName) throws IOException { Table result = null; try { result = this.tableCache.get(tableName); } catch (ExecutionException e) { log.error(e.getMessage(), e); } if (result != null) return new HBaseTable(result); else return null; } @Override public org.cloudgraph.core.client.Table getTable(TableName tableName, ExecutorService pool) throws IOException { Table result = null; try { result = this.tableCache.get(tableName); } catch (ExecutionException e) { log.error(e.getMessage(), e); } if (result != null) return new HBaseTable(result); else return null; } @Override public org.cloudgraph.core.client.BufferedMutator getBufferedMutator(TableName tableName) throws IOException { BufferedMutator result = con.getBufferedMutator(org.apache.hadoop.hbase.TableName.valueOf( tableName.getNamespace(), tableName.getTableName())); if (result != null) return new HBaseBufferedMutator(result); return null; } public BufferedMutator getBufferedMutator(BufferedMutatorParams params) throws IOException { return con.getBufferedMutator(params); } @Override public org.cloudgraph.core.client.RegionLocator getRegionLocator(TableName tableName) throws IOException { RegionLocator result = con.getRegionLocator(org.apache.hadoop.hbase.TableName.valueOf( tableName.getNamespace(), tableName.getTableName())); if (result != null) return new HBaseRegionLocator(result); return null; } @Override public org.cloudgraph.core.client.Admin getAdmin() throws IOException { return new HBaseAdmin(con.getAdmin()); } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy