com.gemstone.gemfire.cache.hdfs.internal.HdfsStoreImplJUnitTest Maven / Gradle / Ivy
Go to download
Show more of this group Show more artifacts with this name
Show all versions of gemfire-junit Show documentation
Show all versions of gemfire-junit Show documentation
SnappyData store based off Pivotal GemFireXD
/*
* Copyright (c) 2010-2015 Pivotal Software, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package com.gemstone.gemfire.cache.hdfs.internal;
import java.io.File;
import java.io.IOException;
import java.util.ArrayList;
import io.snappydata.test.dunit.AvailablePortHelper;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.MiniDFSCluster;
import com.gemstone.gemfire.cache.Region;
import com.gemstone.gemfire.cache.hdfs.HDFSEventQueueAttributes;
import com.gemstone.gemfire.cache.hdfs.HDFSEventQueueAttributesFactory;
import com.gemstone.gemfire.cache.hdfs.HDFSStore;
import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator;
import com.gemstone.gemfire.cache.hdfs.HDFSStore.HDFSCompactionConfig;
import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator.HDFSCompactionConfigMutator;
import com.gemstone.gemfire.cache.hdfs.HDFSStoreMutator.HDFSEventQueueAttributesMutator;
import com.gemstone.gemfire.cache.hdfs.internal.hoplog.BaseHoplogTestCase;
import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HDFSRegionDirector.HdfsRegionManager;
import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HFileSortedOplog;
import com.gemstone.gemfire.cache.hdfs.internal.hoplog.HdfsSortedOplogOrganizer;
import com.gemstone.gemfire.cache.hdfs.internal.hoplog.Hoplog;
import com.gemstone.gemfire.internal.cache.LocalRegion;
import com.gemstone.gemfire.internal.util.BlobHelper;
public class HdfsStoreImplJUnitTest extends BaseHoplogTestCase {
public void testAlterAttribute() throws Exception {
assertEquals(HDFSStore.DEFAULT_MAX_WRITE_ONLY_FILE_SIZE, hdfsStore.getMaxFileSize());
assertEquals(HDFSStore.DEFAULT_WRITE_ONLY_FILE_ROLLOVER_INTERVAL, hdfsStore.getFileRolloverInterval());
HDFSCompactionConfig compConfig = hdfsStore.getHDFSCompactionConfig();
assertEquals(HDFSCompactionConfig.DEFAULT_MAX_INPUT_FILE_COUNT, compConfig.getMaxInputFileCount());
assertEquals(HDFSCompactionConfig.DEFAULT_MAX_INPUT_FILE_SIZE_MB, compConfig.getMaxInputFileSizeMB());
assertEquals(HDFSCompactionConfig.DEFAULT_MAX_THREADS, compConfig.getMaxThreads());
assertEquals(HDFSCompactionConfig.DEFAULT_MIN_INPUT_FILE_COUNT, compConfig.getMinInputFileCount());
assertFalse(compConfig.getAutoCompaction());
assertEquals(HDFSCompactionConfig.DEFAULT_MAJOR_COMPACTION_INTERVAL_MINS, compConfig.getMajorCompactionIntervalMins());
assertEquals(HDFSCompactionConfig.DEFAULT_MAJOR_COMPACTION_MAX_THREADS, compConfig.getMajorCompactionMaxThreads());
assertFalse(compConfig.getAutoMajorCompaction());
assertEquals(HDFSCompactionConfig.DEFAULT_OLD_FILE_CLEANUP_INTERVAL_MINS, compConfig.getOldFilesCleanupIntervalMins());
HDFSEventQueueAttributes qAttr = hdfsStore.getHDFSEventQueueAttributes();
assertEquals(HDFSEventQueueAttributesFactory.DEFAULT_BATCH_SIZE_MB, qAttr.getBatchSizeMB());
assertEquals(HDFSEventQueueAttributesFactory.DEFAULT_BATCH_TIME_INTERVAL_MILLIS, qAttr.getBatchTimeInterval());
HDFSStoreMutator mutator = hdfsStore.createHdfsStoreMutator();
HDFSCompactionConfigMutator compMutator = mutator.getCompactionConfigMutator();
HDFSEventQueueAttributesMutator qMutator = mutator.getHDFSEventQueueAttributesMutator();
mutator.setMaxFileSize(234);
mutator.setFileRolloverInterval(121);
compMutator.setMaxInputFileCount(87);
compMutator.setMaxInputFileSizeMB(45);
compMutator.setMinInputFileCount(34);
compMutator.setMaxThreads(843);
compMutator.setAutoCompaction(true);
compMutator.setMajorCompactionIntervalMins(26);
compMutator.setMajorCompactionMaxThreads(92);
compMutator.setAutoMajorCompaction(true);
compMutator.setOldFilesCleanupIntervalMins(328);
qMutator.setBatchSizeMB(985);
qMutator.setBatchTimeInterval(695);
hdfsStore.alter(mutator);
assertEquals(234, hdfsStore.getMaxFileSize());
assertEquals(121, hdfsStore.getFileRolloverInterval());
compConfig = hdfsStore.getHDFSCompactionConfig();
assertEquals(87, compConfig.getMaxInputFileCount());
assertEquals(45, compConfig.getMaxInputFileSizeMB());
assertEquals(843, compConfig.getMaxThreads());
assertEquals(34, compConfig.getMinInputFileCount());
assertTrue(compConfig.getAutoCompaction());
assertEquals(26, compConfig.getMajorCompactionIntervalMins());
assertEquals(92, compConfig.getMajorCompactionMaxThreads());
assertTrue(compConfig.getAutoMajorCompaction());
assertEquals(328, compConfig.getOldFilesCleanupIntervalMins());
qAttr = hdfsStore.getHDFSEventQueueAttributes();
assertEquals(985, qAttr.getBatchSizeMB());
assertEquals(695, qAttr.getBatchTimeInterval());
}
public void testSameHdfsMultiStore() throws Exception {
int port = AvailablePortHelper.getRandomAvailableTCPPort();
MiniDFSCluster cluster = initMiniCluster(port ,1);
// create store with config file
hsf.setHomeDir("Store-1");
File confFile = new File("HdfsStoreImplJUnitTest-Store-1");
String conf = "\n "
+ " \n "
+ " dfs.block.size \n "
+ " 1024 \n "
+ " \n "
+ " \n "
+ " fs.default.name \n "
+ " hdfs://127.0.0.1:" + port + " \n"
+ " \n "
+ " ";
setConfigFile(hsf, confFile, conf);
HDFSStoreImpl store1 = (HDFSStoreImpl) hsf.create("Store-1");
confFile.delete();
// create region with store
regionfactory.setHDFSStoreName(store1.getName());
Region