Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
This project contains the business service code. This is a classic service tier where business logic is defined along with it's associated
transaction management configuration.
/*
* Copyright 2015 herd contributors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.finra.herd.service.impl;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import javax.sql.DataSource;
import org.apache.commons.lang3.StringUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.dao.DataAccessException;
import org.springframework.jdbc.CannotGetJdbcConnectionException;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.jdbc.datasource.DriverManagerDataSource;
import org.springframework.stereotype.Service;
import org.springframework.transaction.annotation.Propagation;
import org.springframework.transaction.annotation.Transactional;
import org.springframework.transaction.support.DefaultTransactionDefinition;
import org.springframework.util.Assert;
import org.finra.herd.core.helper.ConfigurationHelper;
import org.finra.herd.dao.JdbcDao;
import org.finra.herd.dao.S3Dao;
import org.finra.herd.model.api.xml.JdbcConnection;
import org.finra.herd.model.api.xml.JdbcDatabaseType;
import org.finra.herd.model.api.xml.JdbcExecutionRequest;
import org.finra.herd.model.api.xml.JdbcExecutionResponse;
import org.finra.herd.model.api.xml.JdbcStatement;
import org.finra.herd.model.api.xml.JdbcStatementResultSet;
import org.finra.herd.model.api.xml.JdbcStatementStatus;
import org.finra.herd.model.api.xml.JdbcStatementType;
import org.finra.herd.model.api.xml.S3PropertiesLocation;
import org.finra.herd.model.dto.ConfigurationValue;
import org.finra.herd.model.dto.S3FileTransferRequestParamsDto;
import org.finra.herd.service.JdbcService;
import org.finra.herd.service.helper.StorageHelper;
import org.finra.herd.service.helper.VelocityHelper;
/**
* Default implementation of {@link org.finra.herd.service.JdbcService} which uses Spring's JDBC wrapper framework to handle connections and transactions.
*/
@Service
public class JdbcServiceImpl implements JdbcService
{
public static final String DRIVER_REDSHIFT = "com.amazon.redshift.jdbc41.Driver";
public static final String DRIVER_POSTGRES = "org.postgresql.Driver";
public static final String DRIVER_ORACLE = "oracle.jdbc.OracleDriver";
public static final String DRIVER_MYSQL = "com.mysql.jdbc.Driver";
@Autowired
private ConfigurationHelper configurationHelper;
@Autowired
private JdbcDao jdbcDao;
@Autowired
private S3Dao s3Dao;
@Autowired
private StorageHelper storageHelper;
@Autowired
private VelocityHelper velocityHelper;
/**
* This implementation uses a {@link DriverManagerDataSource} and {@link DefaultTransactionDefinition}. It suspends the existing transaction and purposely
* runs this logic in "no transaction" to ensure we don't create a connection that would potentially become idle while all JDBC tasks execute. If the
* underlying connection pool has an abandoned connection timeout, it would reclaim and close the connection. Then when all the JDBC tasks below finish,
* this transaction would try to commit and would generate a "commit failed" exception because the connection is already closed. This approach is fine since
* we are not actually doing any "herd" DB operations below. When all the below JDBC operations are finished, nothing would happen here except the callers
* transaction would pick up where it left off which would be needed to write workflow variables, etc.
*/
@Override
@Transactional(propagation = Propagation.NOT_SUPPORTED)
public JdbcExecutionResponse executeJdbc(JdbcExecutionRequest jdbcExecutionRequest)
{
return executeJdbcImpl(jdbcExecutionRequest);
}
/**
* This implementation uses a {@link DriverManagerDataSource}. Uses existing Spring ORM transaction.
*
* @param jdbcExecutionRequest JDBC execution request
*
* @return {@link JdbcExecutionResponse}
*/
protected JdbcExecutionResponse executeJdbcImpl(JdbcExecutionRequest jdbcExecutionRequest)
{
validateJdbcExecutionRequest(jdbcExecutionRequest);
// Optionally, get properties from S3
S3PropertiesLocation s3PropertiesLocation = jdbcExecutionRequest.getS3PropertiesLocation();
Map variables = getVariablesFromS3(s3PropertiesLocation);
// Create data source
DataSource dataSource = createDataSource(jdbcExecutionRequest.getConnection(), variables);
// Execute the requested statements
List requestJdbcStatements = jdbcExecutionRequest.getStatements();
List responseJdbcStatements = executeStatements(requestJdbcStatements, dataSource, variables);
// Create and return the execution result
return new JdbcExecutionResponse(null, responseJdbcStatements);
}
/**
* Returns a map of key-value from the specified S3 properties location. Returns null if the specified location is null.
*
* @param s3PropertiesLocation the location of a Java properties file in S3
*
* @return {@link Map} of key-values
*/
private Map getVariablesFromS3(S3PropertiesLocation s3PropertiesLocation)
{
Map variables = null;
if (s3PropertiesLocation != null)
{
Properties properties = getProperties(s3PropertiesLocation);
variables = new HashMap<>();
for (Map.Entry