Please wait. This can take some minutes ...
Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance.
Project price only 1 $
You can buy this project and download/modify it how often you want.
com.yanyun.log.configuration.LogAutoConfiguration Maven / Gradle / Ivy
package com.yanyun.log.configuration;
import com.yanyun.log.aop.DealAopSendMsg;
import com.yanyun.log.configuration.properties.DealServicePoolProperties;
import com.yanyun.log.configuration.properties.ElasticSearchProperties;
import com.yanyun.log.configuration.properties.HbaseProperties;
import com.yanyun.log.configuration.properties.KubeMqProperties;
import com.yanyun.log.service.MQDealService;
import com.yanyun.log.service.MQSendEventService;
import com.yanyun.log.websocket.LogSocketConfig;
import io.swagger.annotations.ApiOperation;
import org.apache.hadoop.hbase.HBaseConfiguration;
import org.apache.hadoop.hbase.client.Connection;
import org.apache.hadoop.hbase.client.ConnectionFactory;
import org.apache.http.HttpHost;
import org.elasticsearch.client.RestClient;
import org.elasticsearch.client.RestClientBuilder;
import org.elasticsearch.client.RestHighLevelClient;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnClass;
import org.springframework.boot.autoconfigure.condition.ConditionalOnMissingBean;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.security.core.context.SecurityContextHolder;
import org.springframework.web.client.RestTemplate;
import org.springframework.web.socket.config.annotation.EnableWebSocketMessageBroker;
import java.io.IOException;
import java.net.MalformedURLException;
import java.net.URL;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
/**
* 日志组件的自动配置类
*/
@Configuration
@EnableConfigurationProperties({KubeMqProperties.class, ElasticSearchProperties.class, DealServicePoolProperties.class, HbaseProperties.class})
@EnableScheduling
@EnableWebSocketMessageBroker
public class LogAutoConfiguration {
/**
* 远程调用的工具Bean,如果提供了则不会进行提供
* @return
*/
@Bean
@ConditionalOnMissingBean
public RestTemplate restTemplate(){
return new RestTemplate();
}
@Value("${gather.ip.url}")
private String ip2RegionAddr;
/**
* 注入mq发送消息的Bean对象
*
* @return
*/
@Bean
@ConditionalOnMissingBean
public MQSendEventService mqSendEventService(KubeMqProperties kubeMqProperties, MQDealService mqDealService) {
return new MQSendEventService(kubeMqProperties, mqDealService);
}
/**
* 注入拦截实现Bean,该Bean中定义了拦截的策略与采集的数据
*
* @param mqSendEventService
* @return
*/
@Bean
@ConditionalOnMissingBean
@ConditionalOnClass(value = {ApiOperation.class, SecurityContextHolder.class})
public DealAopSendMsg dealAopSendMsg(MQSendEventService mqSendEventService,
@Qualifier("senderThreadPool") ExecutorService senderThreadPool,
RestTemplate restTemplate) {
return new DealAopSendMsg(mqSendEventService, senderThreadPool,restTemplate,ip2RegionAddr);
}
/**
* 注入 ES的客户端工具 RestHighLevelClient
*
* @return
*/
@Bean(destroyMethod = "close")
@ConditionalOnMissingBean
public RestHighLevelClient restHighLevelClient(ElasticSearchProperties elasticSearchProperties) {
//获取请求地址
String url = elasticSearchProperties.getUrl();
//查看是否是集群配置
if (url.contains(",")) {
//如果是集群,分解配置项
String[] split = url.split(",");
//组装httpHost信息组合
HttpHost[] hosts = new HttpHost[split.length];
for (int i = 0; i < split.length; i++) {
URL source = null;
try {
source = new URL(split[i]);
hosts[i] = new HttpHost(source.getHost(), source.getPort(), source.getProtocol());
} catch (MalformedURLException e) {
e.printStackTrace();
}
}
//组装操作ES的客户端的构建器
RestClientBuilder builder = RestClient
.builder(hosts) //设置连接地址
//配置请求参数
.setRequestConfigCallback(requestConfigBuilder -> {
return requestConfigBuilder.setConnectionRequestTimeout(elasticSearchProperties.getConnectionRequestTimeout()) //request连接超时
.setConnectTimeout(elasticSearchProperties.getConnectTimeout()) //连接超时
.setSocketTimeout(elasticSearchProperties.getSocketTimeout()); //socket超时
});
return new RestHighLevelClient(builder);
} else {
URL source = null;
try {
source = new URL(url);
} catch (MalformedURLException e) {
e.printStackTrace();
}
RestClientBuilder builder = RestClient.builder(
new HttpHost(source.getHost(), source.getPort(), source.getProtocol())
).setRequestConfigCallback(requestConfigBuilder -> {
return requestConfigBuilder.setConnectionRequestTimeout(elasticSearchProperties.getConnectionRequestTimeout())
.setConnectTimeout(elasticSearchProperties.getConnectTimeout())
.setSocketTimeout(elasticSearchProperties.getSocketTimeout());
});
return new RestHighLevelClient(builder);
}
}
/**
* 返回hbase的连接池对象
* 配置hbase的参数有:
* zookeeper的地址端口
* hbaseMaster的地址端口
* 连接池的核心数、最大数、存活时长(毫秒)、队列大小
* 涉及的端口需要访问到的: zookeeper端口(2181),master端口(16000),regionServer端口 16020
* 如有需要可以开启web管理端口:16010
*
* @return
*/
@Bean
@ConditionalOnMissingBean
public Connection hbaseConnection(HbaseProperties hbaseProperties) throws IOException {
//创建配置对象
org.apache.hadoop.conf.Configuration configuration = HBaseConfiguration.create();
//设置zookeeper集群
configuration.set("hbase.zookeeper.quorum", hbaseProperties.getZookeeperCluster());
//设置hbase master的地址
//configuration.set("hbase.master", hbaseProperties.getHbaseMaster());
//设置连接池信息
ExecutorService executorService = new ThreadPoolExecutor(hbaseProperties.getPoolCoreSize(),
hbaseProperties.getPoolMaxSize(),
hbaseProperties.getKeepAliveTime(), TimeUnit.MILLISECONDS,
new LinkedBlockingDeque<>(hbaseProperties.getQueueSize()));
//创建连接对象
Connection connection = ConnectionFactory.createConnection(configuration, executorService);
return connection;
}
/**
* 注入消息的处理器
*
* @return
*/
@Bean(initMethod = "initBean")
@ConditionalOnMissingBean
public MQDealService mqDealService(KubeMqProperties kubeMqProperties,
ElasticSearchProperties elasticSearchProperties,
RestHighLevelClient restHighLevelClient,
Connection hbaseConnection,
HbaseProperties hbaseProperties,
@Qualifier("writeESThreadPool") ExecutorService writeESThreadPool,
@Qualifier("writeHbaseThreadPool") ExecutorService writeHbaseThreadPool) {
return new MQDealService(kubeMqProperties,
elasticSearchProperties,
restHighLevelClient,
hbaseConnection,
hbaseProperties,
writeESThreadPool,
writeHbaseThreadPool);
}
/**
* 自定义线程池,用来处理发送行为数据到MQ
*
* @return
*/
@Bean
public ExecutorService senderThreadPool(DealServicePoolProperties dealServicePoolProperties) {
ExecutorService poolExecutor = new ThreadPoolExecutor(
dealServicePoolProperties.getSenderCorePoolSize(),
dealServicePoolProperties.getSenderMaxPoolSize(),
dealServicePoolProperties.getSenderKeepAliveTime(), TimeUnit.MILLISECONDS,
new LinkedBlockingDeque<>(dealServicePoolProperties.getSenderQueueSize()),
new NamedThreadFactory("SENDER"),
//丢弃策略为丢弃最老的线程任务
new ThreadPoolExecutor.DiscardOldestPolicy()
);
return poolExecutor;
}
/**
* 自定义线程池,用来处理写入日志数据到ES的
*
* @return
*/
@Bean
public ExecutorService writeESThreadPool(DealServicePoolProperties dealServicePoolProperties) {
ExecutorService poolExecutor = new ThreadPoolExecutor(
dealServicePoolProperties.getEsCorePoolSize(),
dealServicePoolProperties.getEsMaxPoolSize(),
dealServicePoolProperties.getEsKeepAliveTime(), TimeUnit.MILLISECONDS,
new LinkedBlockingDeque<>(dealServicePoolProperties.getEsQueueSize()),
new NamedThreadFactory("ES"),
//丢弃策略为丢弃最老的线程任务
new ThreadPoolExecutor.DiscardOldestPolicy()
);
return poolExecutor;
}
/**
* 自定义线程池,用来处理写入日志信息到Hbase
*
* @return
*/
@Bean
public ExecutorService writeHbaseThreadPool(DealServicePoolProperties dealServicePoolProperties) {
ExecutorService poolExecutor = new ThreadPoolExecutor(
dealServicePoolProperties.getHbaseCorePoolSize(),
dealServicePoolProperties.getHbaseMaxPoolSize(),
dealServicePoolProperties.getHbaseKeepAliveTime(), TimeUnit.MILLISECONDS,
new LinkedBlockingDeque<>(dealServicePoolProperties.getHbaseQueueSize()),
new NamedThreadFactory("HBASE"),
//丢弃策略为丢弃最老的线程任务
new ThreadPoolExecutor.DiscardOldestPolicy()
);
return poolExecutor;
}
/**
* 线程池监控
*/
@Bean
public LogSocketConfig logSocketConfig(@Qualifier("senderThreadPool") ExecutorService senderThreadPool,
@Qualifier("writeESThreadPool") ExecutorService writeESThreadPool,
@Qualifier("writeHbaseThreadPool") ExecutorService writeHbaseThreadPool){
return new LogSocketConfig(senderThreadPool,writeESThreadPool,writeHbaseThreadPool);
}
/**
* 自定义具名线程Factory
*/
private static class NamedThreadFactory implements ThreadFactory {
private static final AtomicInteger poolNumber = new AtomicInteger(1);
private final ThreadGroup group;
private final AtomicInteger threadNumber = new AtomicInteger(1);
private final String namePrefix;
NamedThreadFactory(String customPrefix) {
SecurityManager s = System.getSecurityManager();
group = (s != null) ? s.getThreadGroup() :
Thread.currentThread().getThreadGroup();
namePrefix = customPrefix + "-thread-";
}
public Thread newThread(Runnable r) {
Thread t = new Thread(group, r,
namePrefix + threadNumber.getAndIncrement(),
0);
if (t.isDaemon())
t.setDaemon(false);
if (t.getPriority() != Thread.NORM_PRIORITY)
t.setPriority(Thread.NORM_PRIORITY);
return t;
}
}
}