org.apache.iotdb.db.metadata.MManager Maven / Gradle / Ivy
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iotdb.db.metadata;
import static java.util.stream.Collectors.toList;
import static org.apache.iotdb.tsfile.common.constant.TsFileConstant.PATH_SEPARATOR;
import java.io.BufferedReader;
import java.io.File;
import java.io.FileReader;
import java.io.IOException;
import java.nio.file.Files;
import java.util.ArrayDeque;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.Deque;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.CopyOnWriteArraySet;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import org.apache.iotdb.db.conf.IoTDBConfig;
import org.apache.iotdb.db.conf.IoTDBDescriptor;
import org.apache.iotdb.db.engine.StorageEngine;
import org.apache.iotdb.db.engine.fileSystem.SystemFileFactory;
import org.apache.iotdb.db.engine.storagegroup.StorageGroupProcessor;
import org.apache.iotdb.db.exception.StorageEngineException;
import org.apache.iotdb.db.exception.metadata.AliasAlreadyExistException;
import org.apache.iotdb.db.exception.metadata.DataTypeMismatchException;
import org.apache.iotdb.db.exception.metadata.DeleteFailedException;
import org.apache.iotdb.db.exception.metadata.IllegalPathException;
import org.apache.iotdb.db.exception.metadata.MetadataException;
import org.apache.iotdb.db.exception.metadata.PathAlreadyExistException;
import org.apache.iotdb.db.exception.metadata.PathNotExistException;
import org.apache.iotdb.db.exception.metadata.StorageGroupAlreadySetException;
import org.apache.iotdb.db.exception.metadata.StorageGroupNotSetException;
import org.apache.iotdb.db.metadata.mnode.MNode;
import org.apache.iotdb.db.metadata.mnode.MeasurementMNode;
import org.apache.iotdb.db.metadata.mnode.StorageGroupMNode;
import org.apache.iotdb.db.monitor.MonitorConstants;
import org.apache.iotdb.db.qp.constant.SQLConstant;
import org.apache.iotdb.db.qp.physical.crud.InsertPlan;
import org.apache.iotdb.db.qp.physical.crud.InsertRowPlan;
import org.apache.iotdb.db.qp.physical.crud.InsertTabletPlan;
import org.apache.iotdb.db.qp.physical.sys.CreateTimeSeriesPlan;
import org.apache.iotdb.db.qp.physical.sys.ShowTimeSeriesPlan;
import org.apache.iotdb.db.query.context.QueryContext;
import org.apache.iotdb.db.query.dataset.ShowTimeSeriesResult;
import org.apache.iotdb.db.rescon.MemTableManager;
import org.apache.iotdb.db.rescon.PrimitiveArrayManager;
import org.apache.iotdb.db.utils.RandomDeleteCache;
import org.apache.iotdb.db.utils.SchemaUtils;
import org.apache.iotdb.db.utils.TypeInferenceUtils;
import org.apache.iotdb.tsfile.common.conf.TSFileDescriptor;
import org.apache.iotdb.tsfile.exception.cache.CacheException;
import org.apache.iotdb.tsfile.exception.write.UnSupportedDataTypeException;
import org.apache.iotdb.tsfile.file.metadata.enums.CompressionType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSDataType;
import org.apache.iotdb.tsfile.file.metadata.enums.TSEncoding;
import org.apache.iotdb.tsfile.read.TimeValuePair;
import org.apache.iotdb.tsfile.utils.Pair;
import org.apache.iotdb.tsfile.write.schema.MeasurementSchema;
import org.apache.iotdb.tsfile.write.schema.TimeseriesSchema;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* This class takes the responsibility of serialization of all the metadata info and persistent it
* into files. This class contains all the interfaces to modify the metadata for delta system. All
* the operations will be insert into the logs temporary in case the downtime of the delta system.
*/
public class MManager {
public static final String TIME_SERIES_TREE_HEADER = "=== Timeseries Tree ===\n\n";
private static final String TAG_FORMAT = "tag key is %s, tag value is %s, tlog offset is %d";
private static final String DEBUG_MSG = "%s : TimeSeries %s is removed from tag inverted index, ";
private static final String DEBUG_MSG_1 = "%s: TimeSeries %s's tag info has been removed from tag inverted index ";
private static final String PREVIOUS_CONDITION = "before deleting it, tag key is %s, tag value is %s, tlog offset is %d, contains key %b";
private static final int UPDATE_SCHEMA_MAP_IN_ARRAYPOOL_THRESHOLD = 5000;
private static final Logger logger = LoggerFactory.getLogger(MManager.class);
/**
* A thread will check whether the MTree is modified lately each such interval. Unit: second
*/
private static final long MTREE_SNAPSHOT_THREAD_CHECK_TIME = 600L;
private final int mtreeSnapshotInterval;
private final long mtreeSnapshotThresholdTime;
// the log file seriesPath
private String logFilePath;
private String mtreeSnapshotPath;
private String mtreeSnapshotTmpPath;
private MTree mtree;
private MLogWriter logWriter;
private TagLogFile tagLogFile;
private boolean isRecovering;
// device -> DeviceMNode
private RandomDeleteCache mNodeCache;
// tag key -> tag value -> LeafMNode
private Map>> tagIndex = new ConcurrentHashMap<>();
// data type -> number
private Map schemaDataTypeNumMap = new ConcurrentHashMap<>();
// reported total series number
private long reportedDataTypeTotalNum;
private AtomicLong totalSeriesNumber = new AtomicLong();
private boolean initialized;
protected static IoTDBConfig config = IoTDBDescriptor.getInstance().getConfig();
private File logFile;
private ScheduledExecutorService timedCreateMTreeSnapshotThread;
/**
* threshold total size of MTree
*/
private static final long MTREE_SIZE_THRESHOLD = config.getAllocateMemoryForSchema();
private boolean allowToCreateNewSeries = true;
private static final int ESTIMATED_SERIES_SIZE = config.getEstimatedSeriesSize();
private static class MManagerHolder {
private MManagerHolder() {
// allowed to do nothing
}
private static final MManager INSTANCE = new MManager();
}
protected MManager() {
mtreeSnapshotInterval = config.getMtreeSnapshotInterval();
mtreeSnapshotThresholdTime = config.getMtreeSnapshotThresholdTime() * 1000L;
String schemaDir = config.getSchemaDir();
File schemaFolder = SystemFileFactory.INSTANCE.getFile(schemaDir);
if (!schemaFolder.exists()) {
if (schemaFolder.mkdirs()) {
logger.info("create system folder {}", schemaFolder.getAbsolutePath());
} else {
logger.info("create system folder {} failed.", schemaFolder.getAbsolutePath());
}
}
logFilePath = schemaDir + File.separator + MetadataConstant.METADATA_LOG;
mtreeSnapshotPath = schemaDir + File.separator + MetadataConstant.MTREE_SNAPSHOT;
mtreeSnapshotTmpPath = schemaDir + File.separator + MetadataConstant.MTREE_SNAPSHOT_TMP;
// do not write log when recover
isRecovering = true;
int cacheSize = config.getmManagerCacheSize();
mNodeCache = new RandomDeleteCache(cacheSize) {
@Override
public MNode loadObjectByKey(PartialPath key) throws CacheException {
try {
return mtree.getNodeByPathWithStorageGroupCheck(key);
} catch (MetadataException e) {
throw new CacheException(e);
}
}
};
if (config.isEnableMTreeSnapshot()) {
timedCreateMTreeSnapshotThread = Executors.newSingleThreadScheduledExecutor(r -> new Thread(r,
"timedCreateMTreeSnapshotThread"));
timedCreateMTreeSnapshotThread
.scheduleAtFixedRate(this::checkMTreeModified, MTREE_SNAPSHOT_THREAD_CHECK_TIME,
MTREE_SNAPSHOT_THREAD_CHECK_TIME, TimeUnit.SECONDS);
}
}
/**
* we should not use this function in other place, but only in IoTDB class
*/
public static MManager getInstance() {
return MManagerHolder.INSTANCE;
}
// Because the writer will be used later and should not be closed here.
@SuppressWarnings("squid:S2093")
public synchronized void init() {
if (initialized) {
return;
}
logFile = SystemFileFactory.INSTANCE.getFile(logFilePath);
try {
tagLogFile = new TagLogFile(config.getSchemaDir(), MetadataConstant.TAG_LOG);
isRecovering = true;
int lineNumber = initFromLog(logFile);
List storageGroups = mtree.getAllStorageGroupPaths();
for (PartialPath sg : storageGroups) {
MNode node = mtree.getNodeByPath(sg);
totalSeriesNumber.addAndGet(node.getLeafCount());
}
logWriter = new MLogWriter(config.getSchemaDir(), MetadataConstant.METADATA_LOG);
logWriter.setLineNumber(lineNumber);
isRecovering = false;
} catch (IOException | MetadataException e) {
mtree = new MTree();
logger.error("Cannot read MTree from file, using an empty new one", e);
}
reportedDataTypeTotalNum = 0L;
initialized = true;
}
/**
* @return line number of the logFile
*/
@SuppressWarnings("squid:S3776")
private int initFromLog(File logFile) throws IOException {
File tmpFile = SystemFileFactory.INSTANCE.getFile(mtreeSnapshotTmpPath);
if (tmpFile.exists()) {
logger.warn("Creating MTree snapshot not successful before crashing...");
Files.delete(tmpFile.toPath());
}
File mtreeSnapshot = SystemFileFactory.INSTANCE.getFile(mtreeSnapshotPath);
long time = System.currentTimeMillis();
if (!mtreeSnapshot.exists()) {
mtree = new MTree();
} else {
mtree = MTree.deserializeFrom(mtreeSnapshot);
logger.debug("spend {} ms to deserialize mtree from snapshot",
System.currentTimeMillis() - time);
}
time = System.currentTimeMillis();
// init the metadata from the operation log
if (logFile.exists()) {
int idx = 0;
try (FileReader fr = new FileReader(logFile);
BufferedReader br = new BufferedReader(fr)) {
String cmd;
while ((cmd = br.readLine()) != null) {
try {
operation(cmd);
idx++;
} catch (Exception e) {
logger.error("Can not operate cmd {}", cmd, e);
}
}
}
logger.debug("spend {} ms to deserialize mtree from mlog.txt",
System.currentTimeMillis() - time);
return idx;
} else if (mtreeSnapshot.exists()) {
throw new IOException("mtree snapshot file exists but mlog.txt does not exist.");
} else {
return 0;
}
}
/**
* function for clearing MTree
*/
public void clear() {
try {
this.mtree = new MTree();
this.mNodeCache.clear();
this.tagIndex.clear();
this.totalSeriesNumber.set(0);
if (logWriter != null) {
logWriter.close();
logWriter = null;
}
if (tagLogFile != null) {
tagLogFile.close();
tagLogFile = null;
}
this.schemaDataTypeNumMap.clear();
this.reportedDataTypeTotalNum = 0L;
initialized = false;
if (config.isEnableMTreeSnapshot() && timedCreateMTreeSnapshotThread != null) {
timedCreateMTreeSnapshotThread.shutdownNow();
timedCreateMTreeSnapshotThread = null;
}
} catch (IOException e) {
logger.error("Cannot close metadata log writer, because:", e);
}
}
public void operation(String cmd) throws IOException, MetadataException {
// see createTimeseries() to get the detailed format of the cmd
String[] args = cmd.trim().split(",", -1);
switch (args[0]) {
case MetadataOperationType.CREATE_TIMESERIES:
if (args.length > 8) {
String[] tmpArgs = new String[8];
tmpArgs[0] = args[0];
int i = 1;
tmpArgs[1] = "";
for (; i < args.length - 7; i++) {
tmpArgs[1] += args[i] + ",";
}
tmpArgs[1] += args[i++];
for (int j = 2; j < 8; j++) {
tmpArgs[j] = args[i++];
}
args = tmpArgs;
}
Map props = null;
if (!args[5].isEmpty()) {
String[] keyValues = args[5].split("&");
String[] kv;
props = new HashMap<>();
for (String keyValue : keyValues) {
kv = keyValue.split("=");
props.put(kv[0], kv[1]);
}
}
String alias = null;
if (!args[6].isEmpty()) {
alias = args[6];
}
long offset = -1L;
Map tagMap = null;
if (!args[7].isEmpty()) {
offset = Long.parseLong(args[7]);
tagMap = tagLogFile.readTag(config.getTagAttributeTotalSize(), offset);
}
CreateTimeSeriesPlan plan = new CreateTimeSeriesPlan(new PartialPath(args[1]),
TSDataType.deserialize(Short.parseShort(args[2])),
TSEncoding.deserialize(Short.parseShort(args[3])),
CompressionType.deserialize(Short.parseShort(args[4])), props, tagMap, null, alias);
createTimeseries(plan, offset);
break;
case MetadataOperationType.DELETE_TIMESERIES:
if (args.length > 2) {
StringBuilder tmp = new StringBuilder();
for (int i = 1; i < args.length - 1; i++) {
tmp.append(args[i]).append(",");
}
tmp.append(args[args.length - 1]);
args[1] = tmp.toString();
}
String failedTimeseries = deleteTimeseries(new PartialPath(args[1]));
if (!failedTimeseries.isEmpty()) {
throw new DeleteFailedException(failedTimeseries);
}
break;
case MetadataOperationType.SET_STORAGE_GROUP:
try {
setStorageGroup(new PartialPath(args[1]));
}
// two time series may set one storage group concurrently,
// that's normal in our concurrency control protocol
catch (MetadataException e) {
logger.info("concurrently operate set storage group cmd {} twice", cmd);
}
break;
case MetadataOperationType.DELETE_STORAGE_GROUP:
deleteStorageGroups(Collections.singletonList(new PartialPath(args[1])));
break;
case MetadataOperationType.SET_TTL:
setTTL(new PartialPath(args[1]), Long.parseLong(args[2]));
break;
case MetadataOperationType.CHANGE_OFFSET:
changeOffset(new PartialPath(args[1]), Long.parseLong(args[2]));
break;
case MetadataOperationType.CHANGE_ALIAS:
changeAlias(new PartialPath(args[1]), args[2]);
break;
default:
logger.error("Unrecognizable command {}", cmd);
}
}
public void createTimeseries(CreateTimeSeriesPlan plan) throws MetadataException {
createTimeseries(plan, -1);
}
@SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
public void createTimeseries(CreateTimeSeriesPlan plan, long offset) throws MetadataException {
if (!allowToCreateNewSeries) {
throw new MetadataException("IoTDB system load is too large to create timeseries, "
+ "please increase MAX_HEAP_SIZE in iotdb-env.sh/bat and restart");
}
try {
PartialPath path = plan.getPath();
SchemaUtils.checkDataTypeWithEncoding(plan.getDataType(), plan.getEncoding());
try {
mtree.getStorageGroupPath(path);
} catch (StorageGroupNotSetException e) {
if (!config.isAutoCreateSchemaEnabled()) {
throw e;
}
PartialPath storageGroupPath =
MetaUtils.getStorageGroupPathByLevel(path, config.getDefaultStorageGroupLevel());
setStorageGroup(storageGroupPath);
}
TSDataType type = plan.getDataType();
// create time series in MTree
MeasurementMNode leafMNode = mtree
.createTimeseries(path, type, plan.getEncoding(), plan.getCompressor(),
plan.getProps(), plan.getAlias());
// update tag index
if (plan.getTags() != null) {
// tag key, tag value
for (Entry entry : plan.getTags().entrySet()) {
if (entry.getKey() == null || entry.getValue() == null) {
continue;
}
tagIndex.computeIfAbsent(entry.getKey(), k -> new ConcurrentHashMap<>())
.computeIfAbsent(entry.getValue(), v -> new CopyOnWriteArraySet<>()).add(leafMNode);
}
}
// update statistics and schemaDataTypeNumMap
totalSeriesNumber.addAndGet(1);
if (totalSeriesNumber.get() * ESTIMATED_SERIES_SIZE >= MTREE_SIZE_THRESHOLD) {
logger.warn("Current series number {} is too large...", totalSeriesNumber);
allowToCreateNewSeries = false;
}
updateSchemaDataTypeNumMap(type, 1);
// write log
if (!isRecovering) {
// either tags or attributes is not empty
if ((plan.getTags() != null && !plan.getTags().isEmpty())
|| (plan.getAttributes() != null && !plan.getAttributes().isEmpty())) {
offset = tagLogFile.write(plan.getTags(), plan.getAttributes());
}
logWriter.createTimeseries(plan, offset);
}
leafMNode.setOffset(offset);
} catch (IOException e) {
throw new MetadataException(e.getMessage());
}
}
/**
* Add one timeseries to metadata tree, if the timeseries already exists, throw exception
*
* @param path the timeseries path
* @param dataType the dateType {@code DataType} of the timeseries
* @param encoding the encoding function {@code Encoding} of the timeseries
* @param compressor the compressor function {@code Compressor} of the time series
*/
public void createTimeseries(PartialPath path, TSDataType dataType, TSEncoding encoding,
CompressionType compressor, Map props) throws MetadataException {
try {
createTimeseries(
new CreateTimeSeriesPlan(path, dataType, encoding, compressor, props, null, null, null));
} catch (PathAlreadyExistException | AliasAlreadyExistException e) {
// just log it, created by multiple thread
logger.info("Concurrent create timeseries failed, use other thread's result");
}
}
/**
* Delete all timeseries under the given path, may cross different storage group
*
* @param prefixPath path to be deleted, could be root or a prefix path or a full path
* @return The String is the deletion failed Timeseries
*/
public String deleteTimeseries(PartialPath prefixPath) throws MetadataException {
if (isStorageGroup(prefixPath)) {
mNodeCache.clear();
}
try {
List allTimeseries = mtree.getAllTimeseriesPath(prefixPath);
// Monitor storage group seriesPath is not allowed to be deleted
allTimeseries.removeIf(p -> p.startsWith(MonitorConstants.getStatStorageGroupPrefixArray()));
Set failedNames = new HashSet<>();
for (PartialPath p : allTimeseries) {
try {
PartialPath emptyStorageGroup = deleteOneTimeseriesAndUpdateStatistics(p);
if (!isRecovering) {
if (emptyStorageGroup != null) {
StorageEngine.getInstance().deleteAllDataFilesInOneStorageGroup(emptyStorageGroup);
}
logWriter.deleteTimeseries(p.getFullPath());
}
} catch (DeleteFailedException e) {
failedNames.add(e.getName());
}
}
return String.join(",", failedNames);
} catch (IOException e) {
throw new MetadataException(e.getMessage());
}
}
/**
* remove the node from the tag inverted index
*/
@SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
private void removeFromTagInvertedIndex(MeasurementMNode node) throws IOException {
if (node.getOffset() < 0) {
return;
}
Map tagMap =
tagLogFile.readTag(config.getTagAttributeTotalSize(), node.getOffset());
if (tagMap != null) {
for (Entry entry : tagMap.entrySet()) {
if (tagIndex.containsKey(entry.getKey()) && tagIndex.get(entry.getKey())
.containsKey(entry.getValue())) {
if (logger.isDebugEnabled()) {
logger.debug(String.format(String.format(DEBUG_MSG, "Delete" + TAG_FORMAT,
node.getFullPath()), entry.getKey(), entry.getValue(), node.getOffset()));
}
tagIndex.get(entry.getKey()).get(entry.getValue()).remove(node);
if (tagIndex.get(entry.getKey()).get(entry.getValue()).isEmpty()) {
tagIndex.get(entry.getKey()).remove(entry.getValue());
if (tagIndex.get(entry.getKey()).isEmpty()) {
tagIndex.remove(entry.getKey());
}
}
} else {
if (logger.isDebugEnabled()) {
logger.debug(String.format(String.format(DEBUG_MSG_1, "Delete" + PREVIOUS_CONDITION,
node.getFullPath()), entry.getKey(), entry.getValue(), node.getOffset(),
tagIndex.containsKey(entry.getKey())));
}
}
}
}
}
/**
* @param path full path from root to leaf node
* @return after delete if the storage group is empty, return its path, otherwise return null
*/
private PartialPath deleteOneTimeseriesAndUpdateStatistics(PartialPath path)
throws MetadataException, IOException {
Pair pair = mtree
.deleteTimeseriesAndReturnEmptyStorageGroup(path);
removeFromTagInvertedIndex(pair.right);
PartialPath storageGroupPath = pair.left;
// update statistics in schemaDataTypeNumMap
updateSchemaDataTypeNumMap(pair.right.getSchema().getType(), -1);
// TODO: delete the path node and all its ancestors
mNodeCache.clear();
totalSeriesNumber.addAndGet(-1);
if (!allowToCreateNewSeries &&
totalSeriesNumber.get() * ESTIMATED_SERIES_SIZE < MTREE_SIZE_THRESHOLD) {
logger.info("Current series number {} come back to normal level", totalSeriesNumber);
allowToCreateNewSeries = true;
}
return storageGroupPath;
}
/**
* Set storage group of the given path to MTree.
*
* @param storageGroup root.node.(node)*
*/
public void setStorageGroup(PartialPath storageGroup) throws MetadataException {
try {
mtree.setStorageGroup(storageGroup);
if (!config.isEnableMemControl()) {
MemTableManager.getInstance().addOrDeleteStorageGroup(1);
}
if (!isRecovering) {
logWriter.setStorageGroup(storageGroup.getFullPath());
}
} catch (IOException e) {
throw new MetadataException(e.getMessage());
}
}
/**
* Delete storage groups of given paths from MTree. Log format: "delete_storage_group,sg1,sg2,sg3"
*
* @param storageGroups list of paths to be deleted. Format: root.node
*/
public void deleteStorageGroups(List storageGroups) throws MetadataException {
try {
for (PartialPath storageGroup : storageGroups) {
totalSeriesNumber.addAndGet(mtree.getAllTimeseriesCount(storageGroup));
// clear cached MNode
if (!allowToCreateNewSeries &&
totalSeriesNumber.get() * ESTIMATED_SERIES_SIZE < MTREE_SIZE_THRESHOLD) {
logger.info("Current series number {} come back to normal level", totalSeriesNumber);
allowToCreateNewSeries = true;
}
mNodeCache.clear();
// try to delete storage group
List leafMNodes = mtree.deleteStorageGroup(storageGroup);
for (MeasurementMNode leafMNode : leafMNodes) {
removeFromTagInvertedIndex(leafMNode);
// update statistics in schemaDataTypeNumMap
updateSchemaDataTypeNumMap(leafMNode.getSchema().getType(), -1);
}
if (!config.isEnableMemControl()) {
MemTableManager.getInstance().addOrDeleteStorageGroup(-1);
}
// if success
if (!isRecovering) {
logWriter.deleteStorageGroup(storageGroup.getFullPath());
}
}
} catch (IOException e) {
throw new MetadataException(e.getMessage());
}
}
/**
* update statistics in schemaDataTypeNumMap
*
* @param type data type
* @param num 1 for creating timeseries and -1 for deleting timeseries
*/
private synchronized void updateSchemaDataTypeNumMap(TSDataType type, int num) {
// add an array of the series type
schemaDataTypeNumMap.put(type, schemaDataTypeNumMap.getOrDefault(type, 0) + num);
// add an array of time
schemaDataTypeNumMap.put(TSDataType.INT64,
schemaDataTypeNumMap.getOrDefault(TSDataType.INT64, 0) + num);
// total current DataType Total Num (twice of number of time series)
// used in primitive array manager
long currentDataTypeTotalNum = totalSeriesNumber.get() * 2;
if (num > 0 && currentDataTypeTotalNum - reportedDataTypeTotalNum
>= UPDATE_SCHEMA_MAP_IN_ARRAYPOOL_THRESHOLD) {
PrimitiveArrayManager.updateSchemaDataTypeNum(schemaDataTypeNumMap, currentDataTypeTotalNum);
reportedDataTypeTotalNum = currentDataTypeTotalNum;
}
}
/**
* Check if the given path is storage group or not.
*
* @param path Format: root.node.(node)*
* @apiNote :for cluster
*/
public boolean isStorageGroup(PartialPath path) {
return mtree.isStorageGroup(path);
}
/**
* Get series type for given seriesPath.
*
* @param path full path
*/
public TSDataType getSeriesType(PartialPath path) throws MetadataException {
if (path.equals(SQLConstant.TIME_PATH)) {
return TSDataType.INT64;
}
return mtree.getSchema(path).getType();
}
public MeasurementMNode[] getMNodes(PartialPath deviceId, String[] measurements)
throws MetadataException {
MNode deviceNode = getNodeByPath(deviceId);
MeasurementMNode[] mNodes = new MeasurementMNode[measurements.length];
for (int i = 0; i < mNodes.length; i++) {
mNodes[i] = ((MeasurementMNode) deviceNode.getChild(measurements[i]));
if (mNodes[i] == null && !IoTDBDescriptor.getInstance().getConfig().isEnablePartialInsert()) {
throw new MetadataException(measurements[i] + " does not exist in " + deviceId);
}
}
return mNodes;
}
/**
* Get all devices under given prefixPath.
*
* @param prefixPath a prefix of a full path. if the wildcard is not at the tail, then each
* wildcard can only match one level, otherwise it can match to the tail.
* @return A HashSet instance which stores devices paths with given prefixPath.
*/
public Set getDevices(PartialPath prefixPath) throws MetadataException {
return mtree.getDevices(prefixPath);
}
/**
* Get all nodes from the given level
*
* @param prefixPath can be a prefix of a full path. Can not be a full path. can not have
* wildcard. But, the level of the prefixPath can be smaller than the given
* level, e.g., prefixPath = root.a while the given level is 5
* @param nodeLevel the level can not be smaller than the level of the prefixPath
* @return A List instance which stores all node at given level
*/
public List getNodesList(PartialPath prefixPath, int nodeLevel)
throws MetadataException {
return getNodesList(prefixPath, nodeLevel, null);
}
public List getNodesList(PartialPath prefixPath, int nodeLevel,
StorageGroupFilter filter)
throws MetadataException {
return mtree.getNodesList(prefixPath, nodeLevel, filter);
}
/**
* Get storage group name by path
*
* e.g., root.sg1 is a storage group and path = root.sg1.d1, return root.sg1
*
* @return storage group in the given path
*/
public PartialPath getStorageGroupPath(PartialPath path) throws StorageGroupNotSetException {
return mtree.getStorageGroupPath(path);
}
/**
* Get all storage group paths
*/
public List getAllStorageGroupPaths() {
return mtree.getAllStorageGroupPaths();
}
public List searchAllRelatedStorageGroups(PartialPath path)
throws MetadataException {
return mtree.searchAllRelatedStorageGroups(path);
}
/**
* Get all storage group under given prefixPath.
*
* @param prefixPath a prefix of a full path. if the wildcard is not at the tail, then each
* wildcard can only match one level, otherwise it can match to the tail.
* @return A ArrayList instance which stores storage group paths with given prefixPath.
*/
public List getStorageGroupPaths(PartialPath prefixPath) throws MetadataException {
return mtree.getStorageGroupPaths(prefixPath);
}
/**
* Get all storage group MNodes
*/
public List getAllStorageGroupNodes() {
return mtree.getAllStorageGroupNodes();
}
/**
* Return all paths for given path if the path is abstract. Or return the path itself. Regular
* expression in this method is formed by the amalgamation of seriesPath and the character '*'.
*
* @param prefixPath can be a prefix or a full path. if the wildcard is not at the tail, then each
* wildcard can only match one level, otherwise it can match to the tail.
*/
public List getAllTimeseriesPath(PartialPath prefixPath) throws MetadataException {
return mtree.getAllTimeseriesPath(prefixPath);
}
/**
* Similar to method getAllTimeseriesPath(), but return Path with alias alias.
*/
public Pair, Integer> getAllTimeseriesPathWithAlias(PartialPath prefixPath,
int limit, int offset) throws MetadataException {
return mtree.getAllTimeseriesPathWithAlias(prefixPath, limit, offset);
}
/**
* To calculate the count of timeseries for given prefix path.
*/
public int getAllTimeseriesCount(PartialPath prefixPath) throws MetadataException {
return mtree.getAllTimeseriesCount(prefixPath);
}
/**
* To calculate the count of devices for given prefix path.
*/
public int getDevicesNum(PartialPath prefixPath) throws MetadataException {
return mtree.getDevicesNum(prefixPath);
}
/**
* To calculate the count of storage group for given prefix path.
*/
public int getStorageGroupNum(PartialPath prefixPath) throws MetadataException {
return mtree.getStorageGroupNum(prefixPath);
}
/**
* To calculate the count of nodes in the given level for given prefix path.
*
* @param prefixPath a prefix path or a full path, can not contain '*'
* @param level the level can not be smaller than the level of the prefixPath
*/
public int getNodesCountInGivenLevel(PartialPath prefixPath, int level) throws MetadataException {
return mtree.getNodesCountInGivenLevel(prefixPath, level);
}
@SuppressWarnings("squid:S3776") // Suppress high Cognitive Complexity warning
private List showTimeseriesWithIndex(ShowTimeSeriesPlan plan,
QueryContext context) throws MetadataException {
if (!tagIndex.containsKey(plan.getKey())) {
throw new MetadataException("The key " + plan.getKey() + " is not a tag.");
}
Map> value2Node = tagIndex.get(plan.getKey());
if (value2Node.isEmpty()) {
throw new MetadataException("The key " + plan.getKey() + " is not a tag.");
}
List allMatchedNodes = new ArrayList<>();
if (plan.isContains()) {
for (Entry> entry : value2Node.entrySet()) {
if (entry.getKey() == null || entry.getValue() == null) {
continue;
}
String tagValue = entry.getKey();
if (tagValue.contains(plan.getValue())) {
allMatchedNodes.addAll(entry.getValue());
}
}
} else {
for (Entry> entry : value2Node.entrySet()) {
if (entry.getKey() == null || entry.getValue() == null) {
continue;
}
String tagValue = entry.getKey();
if (plan.getValue().equals(tagValue)) {
allMatchedNodes.addAll(entry.getValue());
}
}
}
// if ordered by heat, we sort all the timeseries by the descending order of the last insert timestamp
if (plan.isOrderByHeat()) {
List list;
try {
list = StorageEngine.getInstance()
.mergeLock(allMatchedNodes.stream().map(MNode::getPartialPath).collect(toList()));
try {
allMatchedNodes = allMatchedNodes.stream().sorted(Comparator
.comparingLong((MeasurementMNode mNode) -> MTree.getLastTimeStamp(mNode, context))
.reversed().thenComparing(MNode::getFullPath)).collect(toList());
} finally {
StorageEngine.getInstance().mergeUnLock(list);
}
} catch (StorageEngineException e) {
throw new MetadataException(e);
}
} else {
// otherwise, we just sort them by the alphabetical order
allMatchedNodes = allMatchedNodes.stream().sorted(Comparator.comparing(MNode::getFullPath))
.collect(toList());
}
List res = new LinkedList<>();
String[] prefixNodes = plan.getPath().getNodes();
int curOffset = -1;
int count = 0;
int limit = plan.getLimit();
int offset = plan.getOffset();
for (MeasurementMNode leaf : allMatchedNodes) {
if (match(leaf.getPartialPath(), prefixNodes)) {
if (limit != 0 || offset != 0) {
curOffset++;
if (curOffset < offset || count == limit) {
continue;
}
}
try {
Pair