
metridoc.plugins.sql.SqlPlus.groovy Maven / Gradle / Ivy
The newest version!
/*
* Copyright 2010 Trustees of the University of Pennsylvania Licensed under the
* Educational Community License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.osedu.org/licenses/ECL-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an "AS IS"
* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing
* permissions and limitations under the License.
*/
package metridoc.plugins.sql
import groovy.sql.Sql
import java.sql.BatchUpdateException
import java.sql.Connection
import java.sql.PreparedStatement
import java.sql.Statement
import java.util.regex.Matcher
import java.util.regex.Pattern
import javax.sql.DataSource
import metridoc.utils.Assert
import metridoc.utils.PropertyUtils
import org.slf4j.LoggerFactory
import metridoc.sql.BulkSqlCalls
import groovy.util.logging.Slf4j
/**
* Created by IntelliJ IDEA.
* User: tbarker
* Date: 7/18/11
* Time: 3:38 PM
*/
@Slf4j
class SqlPlus extends Sql {
static final PHASE_NAMES = "phaseName"
static final Set IGNORED_KEYS = [PHASE_NAMES, "order"]
BulkSqlCalls bulkSqlCalls
boolean validate = false
boolean truncate = false
SqlPlus(DataSource dataSource) {
super(dataSource)
}
BulkSqlCalls getBulkSqlCalls() {
if(bulkSqlCalls) {
return bulkSqlCalls
}
bulkSqlCalls = PluginSqlDb.getBulkCalls(this.createConnection())
}
int bulkInsert(String from, String to, List columns) {
def sql = getBulkSqlCalls().getBulkInsert(from, to, columns)
log.debug("executing bulk sql: {}", sql)
super.executeUpdate(sql)
}
int bulkInsert(String from, String to, Map columnMap) {
def sql = getBulkSqlCalls().getBulkInsert(from, to, columnMap)
log.debug("executing bulk sql: {}", sql)
super.executeUpdate(sql)
}
int bulkInsertNoDup(String from, String to, String noDupColumn, List columns) {
def sql = getBulkSqlCalls().getNoDuplicateBulkInsert(from, to, noDupColumn, columns)
log.debug("executing bulk sql: {}", sql)
super.executeUpdate(sql)
}
int bulkInsertNoDup(String from, String to, String noDupColumn, Map columnMap) {
def sql = getBulkSqlCalls().getNoDuplicateBulkInsert(from, to, noDupColumn, columnMap)
log.debug("executing bulk sql: {}", sql)
super.executeUpdate(sql)
}
void runBatchFile(LinkedHashMap args) {
Assert.notNull(args.fileName, "SqlPlus requires a fileName to runBatchFile")
ConfigObject file = new PropertyUtils().getConfig(args.fileName)
def phases = getPhases(file)
phases.each {order, phase ->
long beginPhaseTime = new Date().getTime()
def phaseName = phase.phaseName
log.info "starting phase ${phaseName}"
if (!exclude(args.exclude, phaseName)) {
phase.each {sqlName, value ->
def fullSqlName = "${phaseName}.${sqlName}"
if (!exclude(args.exclude, fullSqlName)) {
if (!IGNORED_KEYS.contains(sqlName)) {
log.info("running ${fullSqlName}")
long startTime = new Date().getTime()
int updateCount = executeUpdate(value.sql)
long endTime = new Date().getTime()
long totalTime = endTime - startTime
log.info("finished running ${fullSqlName} with ${updateCount} updates, took ${totalTime} milliseconds")
}
} else {
log.info("skipping sql ${fullSqlName}")
}
}
} else {
log.info("skipping phase ${phaseName}")
}
long endPhaseTime = new Date().getTime()
long totalPhaseTime = endPhaseTime - beginPhaseTime
log.info "finished ${phaseName}, took ${totalPhaseTime} milliseconds"
}
}
private static boolean exclude(exclude, String value) {
if (exclude) {
return exclude.contains(value)
}
return false
}
private static SortedMap getPhases(ConfigObject configObject) {
def result = new TreeMap()
int defaultOrder = 1000
ConfigObject sqlPhases = configObject.sql
sqlPhases.each {key, value ->
try {
value[PHASE_NAMES] = key
value.order = value.containsKey("order") ? value.order : defaultOrder
result.put(Double.valueOf(value.order.toString()), value)
} catch (Exception ex) {
throw new RuntimeException("there was an error loading phase ${key}", ex)
}
}
return result
}
int[] runBatch(String insertOrTable, Map batch, boolean logEachBatch) {
if (batch == null) {
throw new IllegalArgumentException("a record must be a none null Map to use batch inserting")
}
if (!(batch instanceof Map)) {
throw new IllegalArgumentException("record ${batch} must be of type Map to use batch inserting")
}
runListBatch([batch], insertOrTable)
}
int[] runBatch(String insertOrTable, Map batch) {
runBatch(insertOrTable, batch, false)
}
int[] runBatch(String insertOrTable, List
© 2015 - 2025 Weber Informatics LLC | Privacy Policy