![JAR search and dependency download from the Maven repository](/logo.png)
org.teasoft.beex.mongodb.MongodbSqlLib Maven / Gradle / Ivy
/*
* Copyright 2020-2023 the original author.All rights reserved.
* Kingstar([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.teasoft.beex.mongodb;
import static com.mongodb.client.model.Accumulators.avg;
import static com.mongodb.client.model.Accumulators.max;
import static com.mongodb.client.model.Accumulators.min;
import static com.mongodb.client.model.Accumulators.sum;
import static com.mongodb.client.model.Aggregates.group;
import static com.mongodb.client.model.Projections.excludeId;
import static com.mongodb.client.model.Projections.fields;
import static com.mongodb.client.model.Projections.include;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.Serializable;
import java.lang.reflect.Field;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.LinkedHashMap;
import java.util.List;
import java.util.Map;
import org.bson.BsonDocument;
import org.bson.BsonString;
import org.bson.BsonValue;
import org.bson.Document;
import org.bson.conversions.Bson;
import org.bson.types.ObjectId;
import org.teasoft.bee.mongodb.GridFsFile;
import org.teasoft.bee.mongodb.MongoSqlStruct;
import org.teasoft.bee.mongodb.MongodbBeeSql;
import org.teasoft.bee.mongodb.SuidFile;
import org.teasoft.bee.osql.Cache;
import org.teasoft.bee.osql.FunctionType;
import org.teasoft.bee.osql.IncludeType;
import org.teasoft.bee.osql.ObjSQLException;
import org.teasoft.bee.osql.OrderType;
import org.teasoft.bee.osql.SuidType;
import org.teasoft.bee.osql.annotation.GridFsMetadata;
import org.teasoft.bee.osql.api.Condition;
import org.teasoft.bee.osql.exception.BeeErrorGrammarException;
import org.teasoft.bee.osql.exception.BeeIllegalBusinessException;
import org.teasoft.beex.json.JsonUtil;
import org.teasoft.beex.mongodb.ds.MongoContext;
import org.teasoft.beex.mongodb.ds.SingleMongodbFactory;
import org.teasoft.beex.osql.mongodb.CreateIndex;
import org.teasoft.beex.osql.mongodb.GeoFind;
import org.teasoft.beex.osql.mongodb.IndexPair;
import org.teasoft.beex.osql.mongodb.IndexType;
import org.teasoft.honey.database.DatabaseClientConnection;
import org.teasoft.honey.osql.core.AbstractBase;
import org.teasoft.honey.osql.core.BeeFactory;
import org.teasoft.honey.osql.core.ConditionImpl;
import org.teasoft.honey.osql.core.ConditionImpl.FunExpress;
import org.teasoft.honey.osql.core.ExceptionHelper;
import org.teasoft.honey.osql.core.HoneyConfig;
import org.teasoft.honey.osql.core.HoneyContext;
import org.teasoft.honey.osql.core.HoneyUtil;
import org.teasoft.honey.osql.core.JsonResultWrap;
import org.teasoft.honey.osql.core.Logger;
import org.teasoft.honey.osql.core.NameTranslateHandle;
import org.teasoft.honey.osql.core.StringConst;
import org.teasoft.honey.osql.mongodb.MongoConditionHelper;
import org.teasoft.honey.osql.name.NameUtil;
import org.teasoft.honey.sharding.ShardingReg;
import org.teasoft.honey.sharding.ShardingUtil;
import org.teasoft.honey.sharding.engine.mongodb.MongodbShardingDdlEngine;
import org.teasoft.honey.sharding.engine.mongodb.MongodbShardingSelectEngine;
import org.teasoft.honey.sharding.engine.mongodb.MongodbShardingSelectFunEngine;
import org.teasoft.honey.sharding.engine.mongodb.MongodbShardingSelectJsonEngine;
import org.teasoft.honey.sharding.engine.mongodb.MongodbShardingSelectListStringArrayEngine;
import org.teasoft.honey.util.ObjectUtils;
import org.teasoft.honey.util.StreamUtil;
import org.teasoft.honey.util.StringParser;
import org.teasoft.honey.util.StringUtils;
import com.mongodb.BasicDBObject;
import com.mongodb.DBObject;
import com.mongodb.MongoGridFSException;
import com.mongodb.MongoTimeoutException;
import com.mongodb.client.AggregateIterable;
import com.mongodb.client.ClientSession;
import com.mongodb.client.FindIterable;
import com.mongodb.client.MongoCollection;
import com.mongodb.client.MongoCursor;
import com.mongodb.client.MongoDatabase;
import com.mongodb.client.gridfs.GridFSBucket;
import com.mongodb.client.gridfs.GridFSBuckets;
import com.mongodb.client.gridfs.GridFSDownloadStream;
import com.mongodb.client.gridfs.GridFSFindIterable;
import com.mongodb.client.gridfs.model.GridFSFile;
import com.mongodb.client.gridfs.model.GridFSUploadOptions;
import com.mongodb.client.model.Aggregates;
import com.mongodb.client.model.Filters;
import com.mongodb.client.model.IndexModel;
import com.mongodb.client.model.IndexOptions;
import com.mongodb.client.model.Indexes;
import com.mongodb.client.result.DeleteResult;
import com.mongodb.client.result.InsertManyResult;
import com.mongodb.client.result.UpdateResult;
/**
* Mongodb SqlLib.
* @author Jade
* @author Kingstar
* @since 2.0
* support output mongo shell command
* @since 2.1
*/
public class MongodbSqlLib extends AbstractBase
implements MongodbBeeSql, SuidFile, CreateIndex, GeoFind, Serializable {
private static final long serialVersionUID = 1596710362261L;
private static final String IDKEY = "_id";
private DatabaseClientConnection getConn() {
if (!HoneyConfig.getHoneyConfig().multiDS_enable) {
return null;
} else {
return HoneyContext.getDatabaseConnection();
}
}
private MongoDatabase getMongoDatabase(DatabaseClientConnection conn) {
if (conn == null) {
return SingleMongodbFactory.getMongoDb(); // 单个数据源时,
}
return (MongoDatabase) conn.getDbConnection();
}
private boolean isShardingMain() {//有分片(多个)
return HoneyContext.getSqlIndexLocal() == null && ShardingUtil.hadSharding(); //前提要是HoneyContext.hadSharding()
}
@Override
public List select(T entity) {
return select(entity, null);
}
@SuppressWarnings("unchecked")
private Class toClassT(T entity) {
return (Class)entity.getClass();
}
@Override
public int update(T entity) {
checkShardingSupport();
String tableName = _toTableName(entity);
BasicDBObject doc = null;
DatabaseClientConnection conn = null;
int num = 0;
String sql = "";
try {
String pkName = HoneyUtil.getPkFieldName(entity);
if ("".equals(pkName)) pkName = "id";
String pks[] = pkName.split(",");
StringUtils.trim(pks);
if (pks.length < 1) throw new ObjSQLException(
"ObjSQLException: in the update(T entity) or update(T entity,IncludeType includeType), the id field is missing !");
Map map = ParaConvertUtil.toMap(entity);
BasicDBObject filter = new BasicDBObject();
String column = "";
for (int i = 0; i < pks.length; i++) {
column = pks[i];
if ("id".equalsIgnoreCase(column)) {// 替换id为_id
column = IDKEY;
}
filter.append(column, map.get(column));
map.remove(column);
}
doc = newDBObject(map);
BasicDBObject updateDocument = new BasicDBObject("$set", doc);
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, filter, null, null,
null, null, false, entity.getClass(), updateDocument);
sql = struct.getSql();
initRoute(SuidType.MODIFY, struct.getEntityClass(), sql);
HoneyContext.addInContextForCache(sql, struct.getTableName());
logSQLForMain("Mongodb::update: "+sql);
logUpdate(struct);
conn = getConn();
ClientSession session = getClientSession();
UpdateResult rs;
if (session == null)
rs = getMongoDatabase(conn).getCollection(tableName).updateMany(filter, updateDocument);
else
rs = getMongoDatabase(conn).getCollection(tableName).updateMany(session, filter, updateDocument);
num = (int) rs.getModifiedCount();
return num;
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
} finally {
logAffectRow(num);
clearInCache(sql, "int", SuidType.MODIFY, num);
close(conn);
}
}
@Override
public int insert(T entity) {
String tableName = _toTableName(entity);
Document doc = null;
DatabaseClientConnection conn =null;
String sql="";
int num = 0;
try {
Map map = ParaConvertUtil.toMap(entity, -1, SuidType.INSERT);
conn = getConn();
MongoDatabase db=getMongoDatabase(conn);
_storeFile(map,db); //处理保存文件
doc = newDoc(map);
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, null, null, null,
null, null, false,entity.getClass(),newDBObject(map)); //insert 放在updateSet
sql=struct.getSql();
initRoute(SuidType.MODIFY, struct.getEntityClass(), sql);
logSQLForMain("Mongodb::insert: "+sql);
logInsertOne(struct);
HoneyContext.addInContextForCache(sql, struct.getTableName());
ClientSession session = getClientSession();
if (session == null)
db.getCollection(tableName).insertOne(doc);
else
db.getCollection(tableName).insertOne(session, doc);
num=1; //有异常不会执行到这
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
boolean notCatch=HoneyConfig.getHoneyConfig().notCatchModifyDuplicateException;
if (!notCatch && isConstraint(e)) { //内部捕获并且是重复异常,则由Bee框架处理
boolean notShow=HoneyConfig.getHoneyConfig().notShowModifyDuplicateException;
if(! notShow) Logger.warn(e.getMessage());
return num;
}
throw ExceptionHelper.convert(e);
} finally {
logAffectRow(num);
clearInCache(sql, "int", SuidType.MODIFY, num);
close(conn);
}
return num;
}
@SuppressWarnings("unchecked")
private void _storeFile(Map map, MongoDatabase database) {
if (map.containsKey(StringConst.GridFs_FileId)) {
String fileColumnName = (String) map.get(StringConst.GridFs_FileColumnName);
String filename_key = (String) map.get(StringConst.GridFs_FileName);
String filename_value = (String) map.get(filename_key);
// InputStream source = (InputStream) map.get(fileColumnName);
Object source0 = map.get(fileColumnName);
InputStream source=null;
if (byte[].class.equals(source0.getClass())) {
source =StreamUtil.byteArray2Stream((byte[])source0);
// } else if (InputStream.class.equals(source0.getClass())) {
} else if (InputStream.class.isAssignableFrom(source0.getClass())) {
source = (InputStream) source0;
}
// 如何区分哪些是metadataMap??? 使用注解?? 标明哪些字段作为Metadata?? 使用GridFsMetadata注解标注的Map
Map metadataMap = (Map) map.get(GridFsMetadata.class.getName());
String fileid = _uploadFile(filename_value, source, metadataMap, database);
map.put((String) map.get(StringConst.GridFs_FileId), fileid); // 将返回的fileid,存到保存它的字段
// 文件已另外存,这些不需要了
map.remove(StringConst.GridFs_FileId);
map.remove(StringConst.GridFs_FileName);
map.remove(StringConst.GridFs_FileColumnName);
map.remove(fileColumnName);
map.remove(GridFsMetadata.class.getName());
}
}
@Override
public int delete(T entity) {
return delete(entity, null);
}
@Override
public List select(T entity, Condition condition) {
if (condition != null && Boolean.TRUE.equals(condition.hasGroupBy())) {
return selectWithGroupBy(entity, condition);
} else {
if (entity == null) return Collections.emptyList();
MongoSqlStruct struct = parseMongoSqlStruct(entity, condition, "List");
Class entityClass = toClassT(entity);
List list= select(struct, entityClass);
fillGridFs(entityClass, condition, list); //文件没有放缓存;每次都是重新获取; //上下文处理??
return list;
}
}
private void fillGridFs(Class entityClass, Condition condition, List list) {
Map map = ParaConvertUtil.toMapForGridFsSelect(entityClass, getIncludeType(condition));
if (map == null || map.size() == 0) return;
if (map.containsKey(StringConst.GridFs_FileId)) {
String fileColumnName = (String) map.get(StringConst.GridFs_FileColumnName);
String filename_name = (String) map.get(StringConst.GridFs_FileName); // 存文件名的字段名
String fileid_name = (String) map.get(StringConst.GridFs_FileId); // 存文件id的字段名
boolean isByteArray = false;
boolean isInputStream = false;
boolean isFirst = true;
byte[] data = null;
for (T t : list) { //更新查询到的list
try {
Field field3 = HoneyUtil.getField(t.getClass(),fileColumnName);
if (isFirst) {
isFirst = false;
if (byte[].class.equals(field3.getType())) {
isByteArray = true;
} else if (InputStream.class.equals(field3.getType())) {
isInputStream = true;
} else {
break;
}
}
Field field = HoneyUtil.getField(t.getClass(),fileid_name);
HoneyUtil.setAccessibleTrue(field);
String fileid_value = (String) field.get(t);
if (StringUtils.isNotBlank(fileid_value)) {
// 上下文处理??
data = getFileById(fileid_value); // fileid_value为GridFs文件对应的fileid,插入后会存入实体表对应的字段fileid_name(若有);查询时即可使用
} else {
Field field2 = HoneyUtil.getField(t.getClass(),filename_name); // GridFs文件对应的文件名字段的名称
HoneyUtil.setAccessibleTrue(field2);
String filename_value = (String) field2.get(t); // 文件名的值:filename_value
if (StringUtils.isNotBlank(filename_value)) {
data = getFileByName(filename_value);
}
}
HoneyUtil.setAccessibleTrue(field3);
if (isByteArray) {
HoneyUtil.setFieldValue(field3, t, data);
} else if (isInputStream) {
HoneyUtil.setFieldValue(field3, t, StreamUtil.byteArray2Stream(data));
}
} catch (Exception e) {
Logger.debug(e.getMessage(), e);
}
}
}
}
@Override
public List selectById(Class entityClass, Object id) {
String tableName = _toTableNameByClass(entityClass);
Object[] obj = processId(entityClass, id);
BasicDBObject one = (BasicDBObject) obj[0];
Bson moreFilter = (Bson) obj[1];
Bson filter = null;
if (moreFilter != null)
filter = moreFilter;
else
filter = one;
MongoSqlStruct struct = new MongoSqlStruct("List", tableName, filter, null, null,
null, null, true,entityClass);
List list= select(struct, entityClass);
fillGridFs(entityClass, null, list);
return list;
}
@Override
public List selectOrderBy(T entity, String orderFields, OrderType[] orderTypes) {
String tableName = _toTableName(entity);
Bson filter = toDocument(entity);
String ofs[]=orderFields.split(",");
StringUtils.trim(ofs);
Bson sortBson = ParaConvertUtil.toSortBson(ofs, orderTypes);
Class entityClass = toClassT(entity);
MongoSqlStruct struct = new MongoSqlStruct("List", tableName, filter, sortBson, null,
null, null, true,entityClass);
List list= select(struct, entityClass);
fillGridFs(entityClass, null, list);
return list;
}
//用于判断单源和分片的, selectById也可以用
@Override
public List select(MongoSqlStruct struct, Class entityClass) {
if (!ShardingUtil.hadSharding()) {
return _select(struct, entityClass); // 不用分片走的分支
} else {
if (HoneyContext.getSqlIndexLocal() == null) { //分片,主线程
// List tabNameList = HoneyContext.getListLocal(StringConst.TabNameListLocal);
// struct.setTableName(struct.getTableName().replace(StringConst.ShardingTableIndexStr, tabNameList==null?"":tabNameList.toString()));
List list =_select(struct, entityClass); //检测缓存的
if (list != null) {// 若缓存是null,就无法区分了,所以没有数据,最好是返回空List,而不是null
logDsTab();
return list;
}
ShardingReg.regShadingPage("", "", struct.getStart(), struct.getSize());
List rsList = new MongodbShardingSelectEngine().asynProcess(entityClass, this, struct);//此处会设置真正的table
addInCache(struct.getSql(), rsList, rsList.size());
logSelectRows(rsList.size());
return rsList;
} else { // 子线程执行
return _select(struct, entityClass);
}
}
}
// 单表查,一次只涉及一张表
@SuppressWarnings("unchecked")
private List _select(MongoSqlStruct struct, Class entityClass) {
String sql = struct.getSql();
logSQLForMain("Mongodb::select: " + sql);
log(struct);
HoneyContext.addInContextForCache(sql, struct.getTableName());
// boolean isReg //不需要添加returnType判断,因MongoSqlStruct已有returnType
// initRoute(SuidType.SELECT, entityClass, sql);
Object cacheObj = getCache().get(sql);
if (cacheObj != null) {
clearContext(sql);
List list = (List) cacheObj;
logSelectRows(list.size());
return list;
}
if (isShardingMain()) return null; // sharding时,主线程没有缓存就返回.
List rsList = null;
try {
FindIterable docIterable = findIterableDocument(struct);
rsList = TransformResult.toListEntity(docIterable, entityClass);
addInCache(sql, rsList, rsList.size());
logSelectRows(rsList.size());
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
}
return rsList;
}
@Override
public String selectJson(T entity, Condition condition) {
if (entity == null) return null;
MongoSqlStruct struct = parseMongoSqlStruct(entity, condition, "StringJson");
Class entityClass = toClassT(entity);
return selectJson(struct, entityClass);
}
@Override
public String selectJson(MongoSqlStruct struct, Class entityClass) {
String sql = struct.getSql();
if (!ShardingUtil.hadSharding()) { // 无分片
return _selectJson(struct, entityClass);
} else { // 有分片
if (HoneyContext.getSqlIndexLocal() == null) { // 有分片的主线程
String cacheValue = _selectJson(struct, entityClass); // 检测缓存的
if (cacheValue != null) {
logDsTab();
return cacheValue;
}
ShardingReg.regShadingPage("", "", struct.getStart(), struct.getSize());
JsonResultWrap wrap = new MongodbShardingSelectJsonEngine().asynProcess(entityClass, this, struct); // 应该还要传suid类型
logSelectRows(wrap.getRowCount());
String json = wrap.getResultJson();
addInCache(sql, json, -1); // 没有作最大结果集判断
return json;
} else { // 子线程执行
return _selectJson(struct, entityClass);
}
}
}
private String _selectJson(MongoSqlStruct struct, Class entityClass) {
String sql = struct.getSql();
logSQLForMain("Mongodb::selectJson: "+sql);
log(struct);
HoneyContext.addInContextForCache(sql, struct.getTableName());
initRoute(SuidType.SELECT, entityClass, sql);
Object cacheObj = getCache().get(sql); // 这里的sql还没带有值
if (cacheObj != null) {
clearContext(sql);
return (String) cacheObj;
}
if (isShardingMain()) return null; // sharding时,主线程没有缓存就返回.
String json = "";
try {
FindIterable docIterable = findIterableDocument(struct);
JsonResultWrap wrap = TransformResult.toJson(docIterable.iterator(), entityClass);
json = wrap.getResultJson();
logSelectRows(wrap.getRowCount());
addInCache(sql, json, -1); // 没有作最大结果集判断
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
}
return json;
}
@Override
public List selectString(T entity, Condition condition) {
if (entity == null) return Collections.emptyList();
MongoSqlStruct struct = parseMongoSqlStruct(entity, condition, "List");
Class entityClass = toClassT(entity);
return selectString(struct, entityClass);
}
@Override
public List selectString(MongoSqlStruct struct, Class entityClass) {
String sql = struct.getSql();
if (!ShardingUtil.hadSharding()) {
return _selectString(struct, entityClass); // 不用分片走的分支
} else {
if (HoneyContext.getSqlIndexLocal() == null) {
List list = _selectString(struct, entityClass); // 检测缓存的
if (list != null) {
logDsTab();
return list;
}
ShardingReg.regShadingPage("", "", struct.getStart(), struct.getSize());
List rsList = new MongodbShardingSelectListStringArrayEngine().asynProcess(entityClass, this, struct);
addInCache(sql, rsList, rsList.size());
return rsList;
} else { // 子线程执行
return _selectString(struct, entityClass);
}
}
}
@SuppressWarnings("unchecked")
private List _selectString(MongoSqlStruct struct, Class entityClass) {
String sql = struct.getSql();
logSQLForMain("Mongodb::selectString: "+sql);
log(struct);
HoneyContext.addInContextForCache(sql, struct.getTableName());
initRoute(SuidType.SELECT, entityClass, sql);
Object cacheObj = getCache().get(sql); // 这里的sql还没带有值
if (cacheObj != null) {
clearContext(sql);
List list = (List) cacheObj;
logSelectRows(list.size());
return list;
}
if (isShardingMain()) return null; // sharding时,主线程没有缓存就返回.
List list = null;
try {
FindIterable docIterable = findIterableDocument(struct);
list = TransformResult.toListString(docIterable.iterator(), struct.getSelectFields());
logSelectRows(list.size());
addInCache(sql, list, list.size());
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
}
return list;
}
private MongoSqlStruct parseMongoSqlStruct(T entity, Condition condition, String returnType) {
if (condition != null) condition.setSuidType(SuidType.SELECT);
String tableName = _toTableName(entity);
BasicDBObject filter = toDBObjectForFilter(entity, condition);
Bson sortBson = ParaConvertUtil.toSortBson(condition);
Integer size = null;
Integer start = null;
String[] selectFields = null;
boolean hasId = false;
ConditionImpl conditionImpl = (ConditionImpl) condition;
if (condition != null) {
size = conditionImpl.getSize();
start = conditionImpl.getStart();
selectFields = conditionImpl.getSelectField();
if (selectFields != null) {
if (selectFields.length == 1) selectFields = selectFields[0].split(",");
StringUtils.trim(selectFields);
for (int i = 0; i < selectFields.length; i++) {
if ("id".equalsIgnoreCase(selectFields[i])) {
selectFields[i] = IDKEY;
hasId = true;
break;
}
}
}
}
return new MongoSqlStruct(returnType, tableName, filter, sortBson, start, size, selectFields, hasId, entity.getClass());
}
private FindIterable findIterableDocument(MongoSqlStruct struct) {
String tableName = struct.getTableName();
Bson filter = (Bson)struct.getFilter();
Bson sortBson = (Bson)struct.getSortBson();
Integer size = struct.getSize();
Integer start = struct.getStart();
String[] selectFields = struct.getSelectFields();
StringUtils.trim(selectFields);
boolean hasId = struct.isHasId();
DatabaseClientConnection conn = getConn();
FindIterable docIterable = null;
try {
MongoCollection collection=getMongoDatabase(conn).getCollection(tableName);
ClientSession session = getClientSession();
if (session == null) {
if (filter != null)
docIterable = collection.find(filter);
else
docIterable = collection.find();
} else {
if (filter != null)
docIterable = collection.find(session,filter);
else
docIterable = collection.find(session);
}
if (sortBson != null) docIterable = docIterable.sort(sortBson); //And Filter{filters=[Document{{_id=1}}, Document{{userid=1}}]}
if (size != null && size > 0) {
if (start == null || start < 0) start = 0;
docIterable = docIterable.skip(start).limit(size);
}
if (selectFields != null) {
if (hasId)
docIterable = docIterable.projection(fields(include(selectFields)));
else
docIterable = docIterable
.projection(fields(include(selectFields), excludeId()));
}
} finally {
close(conn);
}
return docIterable;
}
@Override
@SuppressWarnings({ "unchecked", "rawtypes" })
public int delete(T entity, Condition condition) {
checkShardingSupport();
String tableName = _toTableName(entity);
BasicDBObject filter = toDBObjectForFilter(entity, condition);
int num = 0;
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, filter, null, null,
null, null, false,entity.getClass());
String sql=struct.getSql();
initRoute(SuidType.MODIFY, struct.getEntityClass(), sql);
logSQLForMain("Mongodb::delete: "+sql);
logDeleteMany(struct);
HoneyContext.addInContextForCache(sql, struct.getTableName());
DatabaseClientConnection conn = getConn();
try {
DeleteResult rs = null;
ClientSession session = getClientSession();
if (filter != null) {
if (session == null)
rs = getMongoDatabase(conn).getCollection(tableName).deleteMany(filter);
else
rs = getMongoDatabase(conn).getCollection(tableName).deleteMany(session, filter);
}else {
boolean notDeleteWholeRecords = HoneyConfig.getHoneyConfig().notDeleteWholeRecords;
if (notDeleteWholeRecords) {
throw new BeeIllegalBusinessException("BeeIllegalBusinessException: It is not allowed delete whole documents(records) in one collection(table).If need, you can change the config in bee.osql.notDeleteWholeRecords !");
}
if (session == null)
rs = getMongoDatabase(conn).getCollection(tableName).deleteMany(new Document(new HashMap()));
else
rs = getMongoDatabase(conn).getCollection(tableName).deleteMany(session, new Document(new HashMap()));
}
if (rs != null)
num=(int) rs.getDeletedCount();
logAffectRow(num);
return num;
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
} finally {
clearInCache(sql, "int", SuidType.MODIFY, num); // has clearContext(sql)
close(conn);
}
}
@Override
public int update(T oldEntity, T newEntity) {
String tableName = _toTableName(oldEntity);
try {
Map oldMap = ParaConvertUtil.toMap(oldEntity);
Map newMap = ParaConvertUtil.toMap(newEntity);
return update(oldMap, newMap, tableName, null);
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
}
}
@Override
public int update(T entity, Condition condition, String... setFields) {
String tableName = _toTableName(entity);
Map reMap[] = toMapForUpdateSet(entity, condition, setFields);
return update(reMap[0], reMap[1], tableName, condition);
}
@Override
public int updateBy(T entity, Condition condition, String... whereFields) {
String tableName = _toTableName(entity);
Map reMap[] = toMapForUpdateBy(entity, condition, whereFields);
return update(reMap[0], reMap[1], tableName, condition);
}
private int update(Map filterMap, Map newMap, String tableName,Condition condition) {
checkShardingSupport();
BasicDBObject oldDoc = null;
BasicDBObject newDoc = null;
DatabaseClientConnection conn = null;
String sql="";
int num=0;
BasicDBObject updateSet = new BasicDBObject();
boolean hasNewValue=false;
try {
boolean notUpdateWholeRecords = HoneyConfig.getHoneyConfig().notUpdateWholeRecords;
if (notUpdateWholeRecords && ObjectUtils.isEmpty(filterMap)) {
throw new BeeIllegalBusinessException(
"BeeIllegalBusinessException: It is not allowed update whole documents(records) in one collection(table). If need, you can change the config in bee.osql.notUpdateWholeRecords !");
}
if (filterMap == null) filterMap = new HashMap<>();
oldDoc = new BasicDBObject(filterMap); // filter
// List updateBsonList=new ArrayList<>();
if(newMap!=null && newMap.size()>0) {
newDoc = new BasicDBObject(newMap);
hasNewValue=true;
// updateBsonList.add(new BasicDBObject("$set", newDoc));
}
List updateSetBsonList=MongoConditionUpdateSetHelper.processConditionForUpdateSet(condition);
if(updateSetBsonList!=null) {
// updateBsonList.addAll(updateSetBsonList);
DBObject setObject = updateSetBsonList.get(0);
DBObject incObject = updateSetBsonList.get(1);
DBObject mulObject = updateSetBsonList.get(2);
if(setObject!=null) {
if(newDoc!=null) newDoc.putAll(setObject);
else newDoc=(BasicDBObject)setObject;
}
// DBObject newObj = new BasicDBObject();
if (newDoc!=null) {
updateSet.put("$set", newDoc);
hasNewValue=true;
}
if (incObject!=null) {
updateSet.put("$inc", incObject);
hasNewValue=true;
}
if (mulObject!=null) {
updateSet.put("$mul", mulObject);
hasNewValue=true;
}
}else if(newDoc!=null){
updateSet.put("$set", newDoc);
}
if(!hasNewValue) {
throw new BeeErrorGrammarException("The update set part is empty!");
}
// Bson updateSet=Updates.combine(updateBsonList);
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, oldDoc, null, null,
null, null, false, null, updateSet); // this method no entityClass
sql = struct.getSql();
initRoute(SuidType.MODIFY, struct.getEntityClass(), sql);
HoneyContext.addInContextForCache(sql, struct.getTableName());
logSQLForMain("Mongodb::update: "+sql);
logUpdate(struct);
conn = getConn();
// UpdateResult rs = getMongoDatabase(conn).getCollection(tableName).updateMany(oldDoc, updateBsonList.get(0)); //ok
ClientSession session = getClientSession();
UpdateResult rs =null;
if (session == null)
rs = getMongoDatabase(conn).getCollection(tableName).updateMany(oldDoc, updateSet);
else
rs = getMongoDatabase(conn).getCollection(tableName).updateMany(session, oldDoc, updateSet);
Logger.debug("Update result raw json: "+rs.toString());
num=(int) rs.getModifiedCount();
logAffectRow(num);
return num;
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
} finally {
clearInCache(sql, "int", SuidType.MODIFY, num);
close(conn);
}
}
private Map[] toMapForUpdateBy(T entity, Condition condition, String... specialFields) {
return toMapForUpdate(entity, condition, true, specialFields);
}
private Map[] toMapForUpdateSet(T entity, Condition condition, String... specialFields) {
return toMapForUpdate(entity, condition, false, specialFields);
}
private String[] adjustVariableString(String... fieldList) {
if (fieldList == null) return new String[] { "" };
String fields[];
if (fieldList.length == 1) { // 变长参数,只有一个时,才允许用逗号隔开
fields = fieldList[0].split(",");
} else {
fields = fieldList;
}
StringUtils.trim(fields);
return fields;
}
// 没指定为whereFields的字段,作为set部分(默认只处理非空,非null的字段)
// condition中op,between,notBetween方法设置的字段,不受includeType的值影响
@SuppressWarnings("unchecked")
private Map[] toMapForUpdate(T entity, Condition condition,
boolean isFilterField, String... specialFields) {
checkShardingSupport();
Map reMap[] = new Map[2];
try {
if (condition != null) condition.setSuidType(SuidType.UPDATE);
Map entityMap = ParaConvertUtil.toMap(entity, getIncludeType(condition));
Map filterMapFromC = MongoConditionHelper.processCondition(condition);
// Map setMapFromC = MongoConditionHelper.processCondition(condition); // 只获取set的部分 condition set的部分,在下一方法才获取
// String fields[] = specialFields.split(",");
String fields[] = adjustVariableString(specialFields);
Map specialMap = new LinkedHashMap();
for (int i = 0; i < fields.length; i++) {
fields[i] = _toColumnName(fields[i], entity.getClass());
if ("id".equalsIgnoreCase(fields[i])) {// 替换id为_id fixed bug v2.0.2.14
fields[i] = "_id";
}
// entityMap分为两部分, 先找出特殊的部分
if (entityMap.containsKey(fields[i])) { //将entity的字段转到filter,作为过滤
specialMap.put(fields[i], entityMap.get(fields[i]));
entityMap.remove(fields[i]);
}
// Condition.set(arg1,arg2) 另外设置的字段,不受updateFields的约束;因此updateFields只指定在entity里的字段即可.
//mongodb没必要像下一句说的那么麻烦.
//// 一个字段既在指定的updateFields,也用在了Condition.set(arg1,arg2)等方法设置,entity里相应的字段会按规则转化到where部分.(V1.9.8)
}
Map filterMap;
Map setMap;
if (isFilterField) {
filterMap = specialMap;
setMap = entityMap;
} else {
filterMap = entityMap;
setMap = specialMap;
}
//这个方法,filterMapFromC 只从Condition提取过滤条件
if (ObjectUtils.isNotEmpty(filterMapFromC)) filterMap.putAll(filterMapFromC);
reMap[0] = filterMap;
reMap[1] = setMap;
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
}
return reMap;
}
@Override
public int insert(T entity[], int batchSize, String excludeFields) {
String tableName = _toTableName(entity[0]);
int len = entity.length;
List list = null;
StringBuffer logValueSql=null;
int count = 0;
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, null, null, null,
null, null, false,entity.getClass()," (Artificial sql) Just define for cache: insert batch "); //insert 放在updateSet
String sql=struct.getSql();
initRoute(SuidType.MODIFY, struct.getEntityClass(), sql);
HoneyContext.addInContextForCache(sql, struct.getTableName());
_log("Mongodb::batch insert: "+sql);
DatabaseClientConnection conn = getConn();
MongoDatabase db=getMongoDatabase(conn);
try {
for (int i = 1; i <= len; i++) { // i 1..len
Map map = toDocumentExcludeSomeAndStoreFile(entity[i - 1], excludeFields,db);
Document doc = newDoc(map);
if (i % batchSize ==1) {
list = new ArrayList<>();
logValueSql=new StringBuffer("[");
}
// String res = JSON.toJSONString(map);
// System.out.println(res);
if(list.size()!=0) logValueSql.append(",");
// logValueSql.append(newDBObject(map).toJson());
// logValueSql.append(JSON.toJSONString(map));
logValueSql.append(JsonUtil.toJson(map));
list.add(doc);
if (i % batchSize == 0 || i == len) {
logValueSql.append("]");
struct = new MongoSqlStruct("int", tableName, null, null, null,
null, null, false,entity.getClass(),logValueSql.toString());
logInsertMany(struct); //_log
InsertManyResult irs;
ClientSession session = MongoContext.getCurrentClientSession();
if (session == null) {
irs = db.getCollection(tableName).insertMany(list);
} else {
irs = db.getCollection(tableName).insertMany(session, list);
}
count += irs.getInsertedIds().size();
// MongoUtils.getCollection(tableName).bulkWrite(list);
// if (i != len) {
// list = new ArrayList<>();
//// listForLog = new ArrayList<>();
// logValueSql=new StringBuffer();
// }
}
}
logAffectRow(count);
return count;
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
logAffectRow(count);
if (isConstraint(e)) {
Logger.warn(e.getMessage());
return count;
}
throw ExceptionHelper.convert(e);
} finally {
clearInCache(sql, "int", SuidType.MODIFY, count);
close(conn);
}
}
private BasicDBObject toDocument(T entity) {
BasicDBObject doc = null;
try {
Map map = ParaConvertUtil.toMap(entity);
if (ObjectUtils.isNotEmpty(map)) doc = newDBObject(map);
} catch (Exception e) {
throw ExceptionHelper.convert(e);
}
return doc;
}
private Map toDocumentExcludeSomeAndStoreFile(T entity, String excludeFields,MongoDatabase db) {
// Document doc = null;
Map map = null;
try {
map = ParaConvertUtil.toMapExcludeSome(entity, excludeFields);
_storeFile(map,db); //处理保存文件,如果有
// doc = newDoc(map);
} catch (Exception e) {
throw ExceptionHelper.convert(e);
}
// return doc;
return map;
}
@SuppressWarnings("rawtypes")
private Object[] processId(Class clazz, Object id) {
Object obj[] = new Object[2];
BasicDBObject one = new BasicDBObject();
BasicDBObject moreFilter = null;
if (id instanceof String) {
String ids[] = ((String) id).split(",");
StringUtils.trim(ids);
String idType = getIdType(clazz, getPkName(clazz));
if (ids.length > 1) {
BasicDBObject idFilters[] = new BasicDBObject[ids.length];
int k = 0;
for (String idValue : ids) {
if ("String".equals(idType) && MongodbUtil.isMongodbId(idValue))
idFilters[k++] = new BasicDBObject(IDKEY, new ObjectId(idValue)); // 改为in 也可以
else
idFilters[k++] = new BasicDBObject(IDKEY, tranIdObject(idType, idValue)); // 改为in 也可以
}
// moreFilter = (BasicDBObject)Filters.or(idFilters);
moreFilter = new BasicDBObject();
moreFilter.put("$or", idFilters);
} else {
if ("String".equals(idType) && MongodbUtil.isMongodbId(ids[0]))
one.put(IDKEY, new ObjectId(ids[0]));
else
one.put(IDKEY, tranIdObject(idType, ids[0]));
}
} else {
one.put(IDKEY, id);
}
obj[0] = one;
obj[1] = moreFilter;
return obj;
}
@SuppressWarnings("rawtypes")
private String getIdType(Class clazz, String pkName) {
Field field = null;
String type = null;
try {
field = HoneyUtil.getField(clazz,pkName);
type = field.getType().getSimpleName();
} catch (Exception e) {
// ignore
}
return type;
}
@SuppressWarnings("rawtypes")
private String getPkName(Class c) {
try {
HoneyUtil.getField(c,"id"); // V1.11 因主键可以不是默认id,多了此步检测
return "id";
} catch (NoSuchFieldException e) {
String pkName = HoneyUtil.getPkFieldNameByClass(c);
if ("".equals(pkName))
throw new ObjSQLException("No primary key in " + c.getName());
if (pkName.contains(",")) throw new ObjSQLException(
"method of selectById just need one primary key, but more than one primary key in "
+ c.getName());
return pkName;
}
}
private Object tranIdObject(String idType, String idValue) {
if (idType != null) {
if ("Long".equals(idType) || "long".equals(idType)) {
return Long.parseLong(idValue);
} else if ("Integer".equals(idType) || "int".equals(idType)) {
return Integer.parseInt(idValue);
} else if ("Short".equals(idType) || "short".equals(idType)) {
return Short.parseShort(idValue);
} else {
return idValue;
}
}
return idValue;
}
@Override
@SuppressWarnings("rawtypes")
public int deleteById(Class c, Object id) {
checkShardingSupport();
String tableName = _toTableNameByClass(c);
Object[] obj = processId(c, id);
BasicDBObject one = (BasicDBObject) obj[0];
Bson moreFilter = (Bson) obj[1];
Object filter;
if (moreFilter != null)
filter=moreFilter;
else
filter=one;
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, (Bson)filter, null, null,
null, null, false,c);
String sql=struct.getSql();
initRoute(SuidType.MODIFY, struct.getEntityClass(), sql);
logSQLForMain("Mongodb::deleteById: "+sql);
logDelete(struct,moreFilter==null);
HoneyContext.addInContextForCache(sql, struct.getTableName());
int num=0;
DatabaseClientConnection conn = getConn();
try {
DeleteResult rs = null;
ClientSession session = getClientSession();
if (session == null) {
if (moreFilter != null)
rs = getMongoDatabase(conn).getCollection(tableName).deleteMany(moreFilter);
else
rs = getMongoDatabase(conn).getCollection(tableName).deleteOne(one);
} else {
if (moreFilter != null)
rs = getMongoDatabase(conn).getCollection(tableName).deleteMany(session, moreFilter);
else
rs = getMongoDatabase(conn).getCollection(tableName).deleteOne(session, one);
}
num=(int) rs.getDeletedCount();
logAffectRow(num);
return num;
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
} finally {
clearInCache(sql, "int", SuidType.MODIFY, num);
close(conn);
}
}
@Override
public int count(T entity, Condition condition) {
String tableName = _toTableName(entity);
BasicDBObject filter = toDBObjectForFilter(entity, condition);
Class entityClass = toClassT(entity);
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, filter, null, null, null,
null, false, entityClass);
String total = count(struct, entityClass);
return StringUtils.isBlank(total) ? 0 :Integer.parseInt(total);
}
@Override
public String count(MongoSqlStruct struct, Class entityClass) {
if (!ShardingUtil.hadSharding()) {
return _count(struct, entityClass);
} else {
if (HoneyContext.getSqlIndexLocal() == null) {
String cacheValue = _count(struct, entityClass); // 检测缓存的
if (cacheValue != null) {
logDsTab();
return cacheValue;
}
String fun = "";
fun = new MongodbShardingSelectFunEngine().asynProcess(entityClass, this,
struct);
String sql = struct.getSql();
addInCache(sql, fun, 1);
return fun;
} else { // 子线程执行
return _count(struct, entityClass);
}
}
}
public String _count(MongoSqlStruct struct, Class entityClass) {
String sql=struct.getSql();
logSQLForMain("Mongodb::count: "+sql);
logCount(struct);
HoneyContext.addInContextForCache(sql, struct.getTableName());
Object cacheObj = getCache().get(sql);
if (cacheObj != null) {
clearContext(sql);
return (String) cacheObj;
}
if (isShardingMain()) return null; // sharding时,主线程没有缓存就返回.
String tableName=struct.getTableName();
BasicDBObject filter=(BasicDBObject)struct.getFilter();
DatabaseClientConnection conn = getConn();
try {
int c;
ClientSession session = getClientSession();
if (session == null) {
if (filter != null)
c = (int) getMongoDatabase(conn).getCollection(tableName).countDocuments(filter);
else
c = (int) getMongoDatabase(conn).getCollection(tableName).countDocuments();
} else {
if (filter != null)
c = (int) getMongoDatabase(conn).getCollection(tableName).countDocuments(session, filter);
else
c = (int) getMongoDatabase(conn).getCollection(tableName).countDocuments(session);
}
addInCache(sql, c + "", 1);
logAffectRow(c);
return c+"";
} finally {
close(conn);
}
}
@Override
public long insertAndReturnId(T entity, IncludeType includeType) {
checkShardingSupport();
String tableName = _toTableName(entity);
String sql = "";
int num = 0;
// Condition condition = null;
// if (includeType != null) condition = BeeFactoryHelper.getCondition().setIncludeType(includeType);
// Document doc = toDocument(entity, condition);
Document doc = null;
DatabaseClientConnection conn = null;
try {
Map map = ParaConvertUtil.toMap(entity,includeType==null?-1:includeType.getValue(),SuidType.INSERT);
conn = getConn();
MongoDatabase db = getMongoDatabase(conn);
_storeFile(map, db); //处理保存文件
doc = newDoc(map);
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, null, null, null, null,
null, false, entity.getClass(), newDBObject(map));
sql = struct.getSql();
initRoute(SuidType.MODIFY, struct.getEntityClass(), sql);
logSQLForMain("Mongodb::insertAndReturnId: " + sql);
logInsertOne(struct);
HoneyContext.addInContextForCache(sql, struct.getTableName());
ClientSession session = getClientSession();
BsonValue bv =null;
if (session == null)
bv = db.getCollection(tableName).insertOne(doc).getInsertedId();
else
bv = db.getCollection(tableName).insertOne(session,doc).getInsertedId();
long r = 0;
if (bv != null) {
if(bv instanceof BsonString) { //支持是数字的字符串
r =Long.parseLong(((BsonString)bv).getValue());
}else {
r = bv.asInt64().longValue();
}
if (r > 0) num = 1;
}
return r;
} catch (Exception e) {
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
boolean notCatch = HoneyConfig.getHoneyConfig().notCatchModifyDuplicateException;
if (!notCatch && isConstraint(e)) { // 内部捕获并且是重复异常,则由Bee框架处理
boolean notShow = HoneyConfig.getHoneyConfig().notShowModifyDuplicateException;
if (!notShow) Logger.warn(e.getMessage());
return num;
}
Logger.warn("Confirm that the returned value is numeric type!");
if (e instanceof MongoTimeoutException) Logger.warn(Timeout_MSG);
throw ExceptionHelper.convert(e);
} finally {
logAffectRow(num);
clearInCache(sql, "int", SuidType.MODIFY, num);
close(conn);
}
}
/**
* SQL function: max,min,avg,sum,count. 如果统计的结果集为空,除了count返回0,其它都返回空字符.
*/
@Override
public String selectWithFun(T entity, FunctionType functionType, String fieldForFun,
Condition condition) {
if(entity==null) return null;
if (FunctionType.COUNT == functionType) {
return count(entity, condition)+"";
}
// last pipeline stage can not be null 不能在这拆分
String tableName = _toTableName(entity);
BasicDBObject filter = toDBObjectForFilter(entity, condition);
Bson funBson = null;
if ("id".equalsIgnoreCase(fieldForFun)) fieldForFun = IDKEY;
// if (filter != null) listBson.add(Aggregates.match(filter)); // 过滤条件,要放在match里
if (FunctionType.MAX == functionType) {
// fun=Arrays.asList(Aggregates.match(filter), group(null, max("_fun", "$"+fieldForFun)) );
funBson = group(null, max("_fun", "$" + fieldForFun));
} else if (FunctionType.MIN == functionType) {
funBson = group(null, min("_fun", "$" + fieldForFun));
} else if (FunctionType.AVG == functionType) {
funBson = group(null, avg("_fun", "$" + fieldForFun));
} else if (FunctionType.SUM == functionType) {
funBson = group(null, sum("_fun", "$" + fieldForFun)); // 统计的值为null时, sum: 0
}
Class entityClass = toClassT(entity);
MongoSqlStruct struct = new MongoSqlStruct("int", tableName, filter, null, null,
null, null, false,entityClass,funBson);
return selectWithFun(struct, entityClass);
}
@Override
public String selectWithFun(MongoSqlStruct struct, Class entityClass) {
if (!ShardingUtil.hadSharding()) {
return _selectWithFun(struct, entityClass);
} else {
if (HoneyContext.getSqlIndexLocal() == null) {
String cacheValue=_selectWithFun(struct, entityClass); //检测缓存的
if(cacheValue!=null) {
logDsTab();
return cacheValue;
}
String fun = "";
String funType = HoneyContext.getSysCommStrInheritableLocal(StringConst.FunType);
if (FunctionType.AVG.getName().equalsIgnoreCase(funType)) {
Logger.warn("AVG do not process here!");
} else {
fun = new MongodbShardingSelectFunEngine().asynProcess(entityClass, this, struct);
}
String sql = struct.getSql();
addInCache(sql, fun, 1);
return fun;
} else { // 子线程执行
return _selectWithFun(struct, entityClass);
}
}
}
public String _selectWithFun(MongoSqlStruct struct, Class entityClass) {
BasicDBObject filter = (BasicDBObject) struct.getFilter();
String tableName = struct.getTableName();
String sql = struct.getSql();
logSQLForMain("Mongodb::selectWithFun: "+sql);
HoneyContext.addInContextForCache(sql, tableName);
Object cacheObj = getCache().get(sql);
if (cacheObj != null) {
clearContext(sql);
return (String) cacheObj;
}
if (isShardingMain()) return null; // sharding时,主线程没有缓存就返回.
DatabaseClientConnection conn = getConn();
try {
MongoCollection collection = getMongoDatabase(conn).getCollection(tableName);
List listBson = new ArrayList<>();
Bson funBson = (Bson) struct.getUpdateSetOrInsertOrFunOrOther();
BasicDBObject match=null;
if (filter != null) {
listBson.add(Aggregates.match(filter)); // 过滤条件,要放在match里
match=new BasicDBObject();
match.put("$match", filter);
}
// if (FunctionType.MAX == functionType) {
//// fun=Arrays.asList(Aggregates.match(filter), group(null, max("_fun", "$"+fieldForFun)) );
// funBson = group(null, max("_fun", "$" + fieldForFun));
// } else if (FunctionType.MIN == functionType) {
// funBson = group(null, min("_fun", "$" + fieldForFun));
// } else if (FunctionType.AVG == functionType) {
// funBson = group(null, avg("_fun", "$" + fieldForFun));
// } else if (FunctionType.SUM == functionType) {
// funBson = group(null, sum("_fun", "$" + fieldForFun)); // 统计的值为null时, sum: 0
// }
struct = new MongoSqlStruct("int", tableName, match, null, null,
null, null, false, null, funBson); // this method no entityClass
logGroup(struct); //不准确 todo
ClientSession session = getClientSession();
listBson.add(funBson);
Document rs = null;
if (session == null)
rs = collection.aggregate(listBson).first();
else
rs = collection.aggregate(session, listBson).first();
String fun = "";
if (rs != null) {
Logger.debug("selectWithFun result raw json: "+rs.toJson());
// Logger.debug(rs.get("_fun")+"");
//
// Map jsonMap = null;
// try {
// ObjectMapper objectMapper = new ObjectMapper();
// jsonMap = objectMapper.readValue(rs.toJson(),
// new TypeReference
© 2015 - 2025 Weber Informatics LLC | Privacy Policy