All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.cassandra.db.Directories Maven / Gradle / Ivy

The newest version!
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.cassandra.db;

import java.io.File;
import java.io.FileFilter;
import java.io.IOError;
import java.io.IOException;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ThreadLocalRandom;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.atomic.AtomicLong;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.function.Function;
import java.util.stream.Collectors;

import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Predicate;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.ImmutableSet;
import com.google.common.collect.ImmutableSet.Builder;
import com.google.common.collect.Iterables;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import org.apache.cassandra.concurrent.ScheduledExecutors;
import org.apache.cassandra.config.CFMetaData;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.io.ExceededDiskThresholdException;
import org.apache.cassandra.io.FSError;
import org.apache.cassandra.io.FSWriteError;
import org.apache.cassandra.io.sstable.Component;
import org.apache.cassandra.io.sstable.Descriptor;
import org.apache.cassandra.io.sstable.SSTable;
import org.apache.cassandra.io.sstable.SnapshotDeletingTask;
import org.apache.cassandra.io.util.FileUtils;
import org.apache.cassandra.service.StorageService;
import org.apache.cassandra.service.StorageServiceMBean;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.Pair;

import static com.google.common.collect.Sets.newHashSet;

/**
 * Encapsulate handling of paths to the data files.
 *
 * 
 {@code
 *   //ks//ks-cf1-jb-1-Data.db
 *                         //la-2-Data.db
 *                         //./ks-cf1.idx-jb-1-Data.db
 *                         //./la-1-Data.db
 *                         ...
 * } 
* * Until v2.0, {@code } is just column family name. * Since v2.1, {@code } has column family ID(cfId) added to its end. * * SSTables from secondary indexes were put in the same directory as their parent. * Since v2.2, they have their own directory under the parent directory whose name is index name. * Upon startup, those secondary index files are moved to new directory when upgrading. * * For backward compatibility, Directories can use directory without cfId if exists. * * In addition, more that one 'root' data directory can be specified so that * {@code } potentially represents multiple locations. * Note that in the case of multiple locations, the manifest for the leveled * compaction is only in one of the location. * * Snapshots (resp. backups) are always created along the sstables there are * snapshotted (resp. backuped) but inside a subdirectory named 'snapshots' * (resp. backups) (and snapshots are further inside a subdirectory of the name * of the snapshot). For secondary indexes, snapshots (backups) are not created in * their own directory, but are in their parent's snapshot (backup) directory. * * This class abstracts all those details from the rest of the code. */ public class Directories { private static final Logger logger = LoggerFactory.getLogger(Directories.class); private static final double MAX_COMPACTION_DISK_USAGE = System.getProperty("palantir_cassandra.max_compaction_disk_usage") == null ? 0.90 : Double.parseDouble(System.getProperty("palantir_cassandra.max_compaction_disk_usage")); private static final ReadWriteLock SNAPSHOT_LOCK = new ReentrantReadWriteLock(); private static final int LOCK_TIMEOUT_SECONDS = 60; public static final String BACKUPS_SUBDIR = "backups"; public static final String SNAPSHOT_SUBDIR = "snapshots"; public static final String SECONDARY_INDEX_NAME_SEPARATOR = "."; public static final DataDirectory[] dataDirectories; //needed for dealing with race condition when compactions run in parallel, to reflect the actual available space //see https://github.com/palantir/cassandra/issues/198 private static final Object COMPACTION_LOCK = new Object(); private static long expectedSpaceUsedByCompactions = 0; static { String[] locations = DatabaseDescriptor.getAllDataFileLocations(); dataDirectories = new DataDirectory[locations.length]; for (int i = 0; i < locations.length; ++i) dataDirectories[i] = new DataDirectory(new File(locations[i])); } /** * Checks whether Cassandra has RWX permissions to the specified directory. Logs an error with * the details if it does not. * * @param dir File object of the directory. * @param dataDir String representation of the directory's location * @return status representing Cassandra's RWX permissions to the supplied folder location. */ public static boolean verifyFullPermissions(File dir, String dataDir) { if (!dir.isDirectory()) { logger.error("Not a directory {}", dataDir); return false; } else if (!FileAction.hasPrivilege(dir, FileAction.X)) { logger.error("Doesn't have execute permissions for {} directory", dataDir); return false; } else if (!FileAction.hasPrivilege(dir, FileAction.R)) { logger.error("Doesn't have read permissions for {} directory", dataDir); return false; } else if (dir.exists() && !FileAction.hasPrivilege(dir, FileAction.W)) { logger.error("Doesn't have write permissions for {} directory", dataDir); return false; } return true; } public enum FileAction { X, W, XW, R, XR, RW, XRW; private FileAction() { } public static boolean hasPrivilege(File file, FileAction action) { boolean privilege = false; switch (action) { case X: privilege = file.canExecute(); break; case W: privilege = file.canWrite(); break; case XW: privilege = file.canExecute() && file.canWrite(); break; case R: privilege = file.canRead(); break; case XR: privilege = file.canExecute() && file.canRead(); break; case RW: privilege = file.canRead() && file.canWrite(); break; case XRW: privilege = file.canExecute() && file.canRead() && file.canWrite(); break; } return privilege; } } private final CFMetaData metadata; private final File[] dataPaths; /** * Create Directories of given ColumnFamily. * SSTable directories are created under data_directories defined in cassandra.yaml if not exist at this time. * * @param metadata metadata of ColumnFamily */ public Directories(final CFMetaData metadata) { this.metadata = metadata; String cfId = ByteBufferUtil.bytesToHex(ByteBufferUtil.bytes(metadata.cfId)); int idx = metadata.cfName.indexOf(SECONDARY_INDEX_NAME_SEPARATOR); String cfName = idx >= 0 ? metadata.cfName.substring(0, idx) : metadata.cfName; String indexNameWithDot = idx >= 0 ? metadata.cfName.substring(idx) : null; this.dataPaths = new File[dataDirectories.length]; // If upgraded from version less than 2.1, use existing directories String oldSSTableRelativePath = join(metadata.ksName, cfName); for (int i = 0; i < dataDirectories.length; ++i) { // check if old SSTable directory exists dataPaths[i] = new File(dataDirectories[i].location, oldSSTableRelativePath); } boolean olderDirectoryExists = Iterables.any(Arrays.asList(dataPaths), new Predicate() { public boolean apply(File file) { return file.exists(); } }); if (!olderDirectoryExists) { // use 2.1+ style String newSSTableRelativePath = join(metadata.ksName, cfName + '-' + cfId); for (int i = 0; i < dataDirectories.length; ++i) dataPaths[i] = new File(dataDirectories[i].location, newSSTableRelativePath); } // if index, then move to its own directory if (indexNameWithDot != null) { for (int i = 0; i < dataDirectories.length; ++i) dataPaths[i] = new File(dataPaths[i], indexNameWithDot); } for (File dir : dataPaths) { try { FileUtils.createDirectory(dir); } catch (FSError e) { // don't just let the default exception handler do this, we need the create loop to continue logger.error("Failed to create {} directory", dir); FileUtils.handleFSError(e); } } // if index, move existing older versioned SSTable files to new directory if (indexNameWithDot != null) { for (File dataPath : dataPaths) { File[] indexFiles = dataPath.getParentFile().listFiles(new FileFilter() { @Override public boolean accept(File file) { if (file.isDirectory()) return false; Pair pair = SSTable.tryComponentFromFilename(file.getParentFile(), file.getName()); return pair != null && pair.left.ksname.equals(metadata.ksName) && pair.left.cfname.equals(metadata.cfName); } }); for (File indexFile : indexFiles) { File destFile = new File(dataPath, indexFile.getName()); logger.trace("Moving index file {} to {}", indexFile, destFile); FileUtils.renameWithConfirm(indexFile, destFile); } } } } public static void scheduleVerifyingDiskDoesNotExceedThresholdChecks() { ScheduledExecutors.scheduledTasks.scheduleAtFixedRate( getVerifyDiskHasEnoughUsableSpaceRunnable(), 1, 1, TimeUnit.MINUTES); } @VisibleForTesting static Runnable getVerifyDiskHasEnoughUsableSpaceRunnable() { return () -> { try { Directories.verifyDiskHasEnoughUsableSpace(); } catch (ExceededDiskThresholdException e) { // If node is already disabled propagating the exception from scheduled executor is redundant and clogs logs. if (!StorageService.instance.isNodeDisabled()) { throw e; } } }; } public static void verifyDiskHasEnoughUsableSpace() { Map.Entry maxPathToUtilization = getMaxPathToUtilization(); double threshold = DatabaseDescriptor.getMaxDiskUtilizationThreshold(); double currUtilization = maxPathToUtilization.getValue(); if (currUtilization >= threshold) { throw new ExceededDiskThresholdException(maxPathToUtilization.getKey().location, currUtilization, threshold); } else if (shouldEnableNode(threshold, currUtilization)) { logger.info(String.format( "Only transient errors were of type ExceededDiskThreshold. Current disk use of %f is under threshold " + "of %f. Clearing transient errors and enabling node", currUtilization, threshold)); StorageService.instance.clearTransientErrors(); StorageService.instance.unsafeEnableNode(); } } private static boolean shouldEnableNode(double threshold, double currUtilization) { boolean underThreshold = threshold - currUtilization >= 0.02; boolean nodeCanBeReEnabled = StorageService.instance.isNodeDisabled() && StorageService.instance.isSetupCompleted(); boolean zeroNonTransientErrors = StorageService.instance.getNonTransientErrors().isEmpty(); boolean onlyExceededDiskThresholdTransientError = StorageService.instance.getPresentTransientErrorTypes() .equals(ImmutableSet.of(StorageServiceMBean .TransientError .EXCEEDED_DISK_THRESHOLD)); return underThreshold && nodeCanBeReEnabled && zeroNonTransientErrors && onlyExceededDiskThresholdTransientError; } public static Map.Entry getMaxPathToUtilization() { Map dirsToUtilization = Arrays.stream(dataDirectories) .collect(Collectors.toMap( Function.identity(), dir -> { long total = dir.getTotalSpace(); long used = total - dir.getAvailableSpace(); return (double) used / total; }) ); Map.Entry maxPathToUtilization = dirsToUtilization .entrySet() .stream() .max(Map.Entry.comparingByValue()) .orElseThrow(() -> new RuntimeException( "Failed to filter most full data directory")); logger.debug(String.format("Highest data directory disk utilization on path %s (%f)", maxPathToUtilization.getKey().location, maxPathToUtilization.getValue())); return maxPathToUtilization; } /** * Returns SSTable location which is inside given data directory. * * @param dataDirectory * @return SSTable location */ public File getLocationForDisk(DataDirectory dataDirectory) { if (dataDirectory != null) for (File dir : dataPaths) if (dir.getAbsolutePath().startsWith(dataDirectory.location.getAbsolutePath())) return dir; return null; } public Descriptor find(String filename) { for (File dir : dataPaths) { if (new File(dir, filename).exists()) return Descriptor.fromFilename(dir, filename).left; } return null; } /** * Basically the same as calling {@link #getWriteableLocationAsFile(long)} with an unknown size ({@code -1L}), * which may return any allowed directory - even a data directory that has no usable space. * Do not use this method in production code. * * @throws IOError if all directories are blocked. */ public File getDirectoryForNewSSTables() { return getWriteableLocationAsFile(-1L); } /** * Returns an allowed directory that _currently_ has {@code writeSize} bytes as usable space. * * @throws IOError if all directories are disallowed. */ public File getWriteableLocationAsFile(long writeSize) { return getLocationForDisk(getWriteableLocation(writeSize)); } /** * Returns an allowed data directory that _currently_ has {@code writeSize} bytes as usable space. * * @throws IOError if all directories are disallowed. */ public DataDirectory getWriteableLocation(long writeSize) { verifyDiskHasEnoughUsableSpace(); List candidates = new ArrayList<>(); long totalAvailable = 0L; // pick directories with enough space and so that resulting sstable dirs aren't disallowed for writes. boolean tooBig = false; for (DataDirectory dataDir : dataDirectories) { if (DisallowedDirectories.isUnwritable(getLocationForDisk(dataDir))) { logger.trace("removing disallowed candidate {}", dataDir.location); continue; } DataDirectoryCandidate candidate = new DataDirectoryCandidate(dataDir); // exclude directory if its total writeSize does not fit to data directory if (candidate.availableSpace < writeSize) { logger.trace("removing candidate {}, usable={}, requested={}", candidate.dataDirectory.location, candidate.availableSpace, writeSize); tooBig = true; continue; } candidates.add(candidate); totalAvailable += candidate.availableSpace; } if (candidates.isEmpty()) if (tooBig) throw new RuntimeException("Insufficient disk space to write " + writeSize + " bytes"); else throw new FSWriteError(new IOException("All configured data directories have been disallowed as unwritable for erroring out"), ""); // shortcut for single data directory systems if (candidates.size() == 1) return candidates.get(0).dataDirectory; sortWriteableCandidates(candidates, totalAvailable); return pickWriteableDirectory(candidates); } // separated for unit testing static DataDirectory pickWriteableDirectory(List candidates) { // weighted random double rnd = ThreadLocalRandom.current().nextDouble(); for (DataDirectoryCandidate candidate : candidates) { rnd -= candidate.perc; if (rnd <= 0) return candidate.dataDirectory; } // last resort return candidates.get(0).dataDirectory; } // separated for unit testing static void sortWriteableCandidates(List candidates, long totalAvailable) { // calculate free-space-percentage for (DataDirectoryCandidate candidate : candidates) candidate.calcFreePerc(totalAvailable); // sort directories by perc Collections.sort(candidates); } public Boolean checkAvailableDiskSpaceWithoutConsideringConcurrentCompactions(long estimatedSSTables, long expectedTotalWriteSize) { return checkAvailableDiskSpace(estimatedSSTables, expectedTotalWriteSize, 0); } public Boolean checkAvailableDiskSpaceConsideringConcurrentCompactions(long estimatedSSTables, long expectedTotalWriteSize) { synchronized (COMPACTION_LOCK) { if (!checkAvailableDiskSpace(estimatedSSTables, expectedTotalWriteSize, expectedSpaceUsedByCompactions)) return false; expectedSpaceUsedByCompactions += expectedTotalWriteSize; return true; } } private boolean checkAvailableDiskSpace(long estimatedSSTables, long expectedTotalWriteSize, long expectedSpaceUsedByCompactions) { long writeSize = expectedTotalWriteSize / estimatedSSTables; long totalAvailable = 0L; long totalSpace = 0L; for (DataDirectory dataDir : dataDirectories) { if (DisallowedDirectories.isUnwritable(getLocationForDisk(dataDir))) continue; DataDirectoryCandidate candidate = new DataDirectoryCandidate(dataDir); // exclude directory if its total writeSize does not fit to data directory if (insufficientDiskSpaceForWriteSize(candidate.availableSpace - expectedSpaceUsedByCompactions, candidate.totalSpace, writeSize)) continue; totalAvailable += candidate.availableSpace; totalSpace += candidate.totalSpace; } if (insufficientDiskSpaceForWriteSize(totalAvailable - expectedSpaceUsedByCompactions, totalSpace, expectedTotalWriteSize)) { logger.warn("Insufficient space for compaction - total available space found: {}MB for compaction with" + " expected size {}MB, with total disk space {}MB and max disk usage by compaction at {}%", totalAvailable / 1024 / 1024, expectedTotalWriteSize / 1024 / 1024, totalSpace / 1024 / 1024, MAX_COMPACTION_DISK_USAGE * 100); return false; } return true; } private boolean insufficientDiskSpaceForWriteSize(long totalAvailable, long totalSpace, long writeSize) { long usedSpace = totalSpace - totalAvailable; return (usedSpace + writeSize) > (totalSpace * MAX_COMPACTION_DISK_USAGE); } public static void removeExpectedSpaceUsedByCompaction(long expectedTotalWriteSize, boolean shouldConsiderConcurrentCompactions) { if (!shouldConsiderConcurrentCompactions) return; synchronized (COMPACTION_LOCK) { expectedSpaceUsedByCompactions -= expectedTotalWriteSize; } } public static File getSnapshotDirectory(Descriptor desc, String snapshotName) { return getSnapshotDirectory(desc.directory, snapshotName); } /** * Returns directory to write snapshot. If directory does not exist, then one is created. * * If given {@code location} indicates secondary index, this will return * {@code /snapshots//.}. * Otherwise, this will return {@code /snapshots/}. * * @param location base directory * @param snapshotName snapshot name * @return directory to write snapshot */ public static File getSnapshotDirectory(File location, String snapshotName) { if (isSecondaryIndexFolder(location)) { return getOrCreate(location.getParentFile(), SNAPSHOT_SUBDIR, snapshotName, location.getName()); } else { return getOrCreate(location, SNAPSHOT_SUBDIR, snapshotName); } } public File getSnapshotManifestFile(String snapshotName) { File snapshotDir = getSnapshotDirectory(getDirectoryForNewSSTables(), snapshotName); return new File(snapshotDir, "manifest.json"); } public File getNewEphemeralSnapshotMarkerFile(String snapshotName) { File snapshotDir = new File(getWriteableLocationAsFile(1L), join(SNAPSHOT_SUBDIR, snapshotName)); return getEphemeralSnapshotMarkerFile(snapshotDir); } private static File getEphemeralSnapshotMarkerFile(File snapshotDirectory) { return new File(snapshotDirectory, "ephemeral.snapshot"); } public static File getBackupsDirectory(Descriptor desc) { return getBackupsDirectory(desc.directory); } public static File getBackupsDirectory(File location) { if (isSecondaryIndexFolder(location)) { return getOrCreate(location.getParentFile(), BACKUPS_SUBDIR, location.getName()); } else { return getOrCreate(location, BACKUPS_SUBDIR); } } public SSTableLister sstableLister() { return new SSTableLister(); } public static class DataDirectory { public final File location; public DataDirectory(File location) { this.location = location; } public long getAvailableSpace() { return FileUtils.getUsableSpace(location); } public long getTotalSpace() { return FileUtils.getTotalSpace(location); } } static final class DataDirectoryCandidate implements Comparable { final DataDirectory dataDirectory; final long availableSpace; final long totalSpace; double perc; public DataDirectoryCandidate(DataDirectory dataDirectory) { this.dataDirectory = dataDirectory; this.availableSpace = dataDirectory.getAvailableSpace(); this.totalSpace = dataDirectory.getTotalSpace(); } void calcFreePerc(long totalAvailableSpace) { double w = availableSpace; w /= totalAvailableSpace; perc = w; } public int compareTo(DataDirectoryCandidate o) { if (this == o) return 0; int r = Double.compare(perc, o.perc); if (r != 0) return -r; // last resort return System.identityHashCode(this) - System.identityHashCode(o); } } public class SSTableLister { private boolean skipTemporary; private boolean includeBackups; private boolean onlyBackups; private int nbFiles; private final Map> components = new HashMap<>(); private boolean filtered; private String snapshotName; public SSTableLister skipTemporary(boolean b) { if (filtered) throw new IllegalStateException("list() has already been called"); skipTemporary = b; return this; } public SSTableLister includeBackups(boolean b) { if (filtered) throw new IllegalStateException("list() has already been called"); includeBackups = b; return this; } public SSTableLister onlyBackups(boolean b) { if (filtered) throw new IllegalStateException("list() has already been called"); onlyBackups = b; includeBackups = b; return this; } public SSTableLister snapshots(String sn) { if (filtered) throw new IllegalStateException("list() has already been called"); snapshotName = sn; return this; } public Map> list() { filter(); return ImmutableMap.copyOf(components); } public List listFiles() { filter(); List l = new ArrayList<>(nbFiles); for (Map.Entry> entry : components.entrySet()) { for (Component c : entry.getValue()) { l.add(new File(entry.getKey().filenameFor(c))); } } return l; } private void filter() { if (filtered) return; for (File location : dataPaths) { if (DisallowedDirectories.isUnreadable(location)) continue; if (snapshotName != null) { getSnapshotDirectory(location, snapshotName).listFiles(getFilter()); continue; } if (!onlyBackups) location.listFiles(getFilter()); if (includeBackups) getBackupsDirectory(location).listFiles(getFilter()); } filtered = true; } private FileFilter getFilter() { return new FileFilter() { // This function always return false since accepts adds to the components map public boolean accept(File file) { if (file.isDirectory()) return false; Pair pair = SSTable.tryComponentFromFilename(file.getParentFile(), file.getName()); if (pair == null) return false; // we are only interested in the SSTable files that belong to the specific ColumnFamily if (!pair.left.ksname.equals(metadata.ksName) || !pair.left.cfname.equals(metadata.cfName)) return false; if (skipTemporary && pair.left.type.isTemporary) return false; Set previous = components.get(pair.left); if (previous == null) { previous = new HashSet<>(); components.put(pair.left, previous); } previous.add(pair.right); nbFiles++; return false; } }; } } /** * * @return Return a map of all snapshots to space being used * The pair for a snapshot has size on disk and true size. */ public Map> getSnapshotDetails() { final Map> snapshotSpaceMap = new HashMap<>(); for (File snapshot : listSnapshots()) { final long sizeOnDisk = FileUtils.folderSize(snapshot); final long trueSize = getTrueAllocatedSizeIn(snapshot); Pair spaceUsed = snapshotSpaceMap.get(snapshot.getName()); if (spaceUsed == null) spaceUsed = Pair.create(sizeOnDisk,trueSize); else spaceUsed = Pair.create(spaceUsed.left + sizeOnDisk, spaceUsed.right + trueSize); snapshotSpaceMap.put(snapshot.getName(), spaceUsed); } return snapshotSpaceMap; } public List listEphemeralSnapshots() { final List ephemeralSnapshots = new LinkedList<>(); for (File snapshot : listSnapshots()) { if (getEphemeralSnapshotMarkerFile(snapshot).exists()) ephemeralSnapshots.add(snapshot.getName()); } return ephemeralSnapshots; } private List listSnapshots() { maybeAcquireLock(SNAPSHOT_LOCK.readLock()); try { final List snapshots = new LinkedList<>(); for (final File dir : dataPaths) { File snapshotDir = isSecondaryIndexFolder(dir) ? new File(dir.getParent(), SNAPSHOT_SUBDIR) : new File(dir, SNAPSHOT_SUBDIR); if (snapshotDir.exists() && snapshotDir.isDirectory()) { final File[] snapshotDirs = snapshotDir.listFiles(); if (snapshotDirs != null) { for (final File snapshot : snapshotDirs) { if (snapshot.isDirectory()) snapshots.add(snapshot); } } } } return snapshots; } finally { SNAPSHOT_LOCK.readLock().unlock(); } } public boolean snapshotExists(String snapshotName) { for (File dir : dataPaths) { File snapshotDir; if (isSecondaryIndexFolder(dir)) { snapshotDir = new File(dir.getParentFile(), join(SNAPSHOT_SUBDIR, snapshotName, dir.getName())); } else { snapshotDir = new File(dir, join(SNAPSHOT_SUBDIR, snapshotName)); } if (snapshotDir.exists()) return true; } return false; } public static void clearSnapshot(String snapshotName, List snapshotDirectories) { maybeAcquireLock(SNAPSHOT_LOCK.writeLock()); try { // If snapshotName is empty or null, we will delete the entire snapshot directory String tag = snapshotName == null ? "" : snapshotName; for (File dir : snapshotDirectories) { File snapshotDir = new File(dir, join(SNAPSHOT_SUBDIR, tag)); if (snapshotDir.exists()) { logger.trace("Removing snapshot directory {}", snapshotDir); try { FileUtils.deleteRecursive(snapshotDir); } catch (FSWriteError e) { if (FBUtilities.isWindows()) SnapshotDeletingTask.addFailedSnapshot(snapshotDir); else throw e; } } } } finally { SNAPSHOT_LOCK.writeLock().unlock(); } } private static void maybeAcquireLock(Lock lock) { try { lock.tryLock(LOCK_TIMEOUT_SECONDS, TimeUnit.SECONDS); } catch (InterruptedException e) { throw new RuntimeException("Could not acquire snapshot lock after " + LOCK_TIMEOUT_SECONDS + " seconds", e); } } // The snapshot must exist public long snapshotCreationTime(String snapshotName) { for (File dir : dataPaths) { File snapshotDir = getSnapshotDirectory(dir, snapshotName); if (snapshotDir.exists()) return snapshotDir.lastModified(); } throw new RuntimeException("Snapshot " + snapshotName + " doesn't exist"); } /** * @return total snapshot size in byte for all snapshots. */ public long trueSnapshotsSize() { long result = 0L; for (File dir : dataPaths) { File snapshotDir = isSecondaryIndexFolder(dir) ? new File(dir.getParent(), SNAPSHOT_SUBDIR) : new File(dir, SNAPSHOT_SUBDIR); result += getTrueAllocatedSizeIn(snapshotDir); } return result; } public long getTrueAllocatedSizeIn(File input) { if (!input.isDirectory()) return 0; TrueFilesSizeVisitor visitor = new TrueFilesSizeVisitor(); try { Files.walkFileTree(input.toPath(), visitor); } catch (IOException e) { logger.error("Could not calculate the size of {}. {}", input, e); } return visitor.getAllocatedSize(); } // Recursively finds all the sub directories in the KS directory. public static List getKSChildDirectories(String ksName) { List result = new ArrayList<>(); for (DataDirectory dataDirectory : dataDirectories) { File ksDir = new File(dataDirectory.location, ksName); File[] cfDirs = ksDir.listFiles(); if (cfDirs == null) continue; for (File cfDir : cfDirs) { if (cfDir.isDirectory()) result.add(cfDir); } } return result; } public static boolean isSecondaryIndexFolder(File dir) { return dir.getName().startsWith(SECONDARY_INDEX_NAME_SEPARATOR); } public List getCFDirectories() { List result = new ArrayList<>(); for (File dataDirectory : dataPaths) { if (dataDirectory.isDirectory()) result.add(dataDirectory); } return result; } private static File getOrCreate(File base, String... subdirs) { File dir = subdirs == null || subdirs.length == 0 ? base : new File(base, join(subdirs)); if (dir.exists()) { if (!dir.isDirectory()) throw new AssertionError(String.format("Invalid directory path %s: path exists but is not a directory", dir)); } else if (!dir.mkdirs() && !(dir.exists() && dir.isDirectory())) { throw new FSWriteError(new IOException("Unable to create directory " + dir), dir); } return dir; } private static String join(String... s) { return StringUtils.join(s, File.separator); } @VisibleForTesting static void resetDataDirectoriesAfterTest() { String[] locations = DatabaseDescriptor.getAllDataFileLocations(); for (int i = 0; i < locations.length; ++i) dataDirectories[i] = new DataDirectory(new File(locations[i])); } private class TrueFilesSizeVisitor extends SimpleFileVisitor { private final AtomicLong size = new AtomicLong(0); private final Set visited = newHashSet(); //count each file only once private final Set alive; TrueFilesSizeVisitor() { super(); Builder builder = ImmutableSet.builder(); for (File file : sstableLister().listFiles()) builder.add(file.getName()); alive = builder.build(); } private boolean isAcceptable(Path file) { String fileName = file.toFile().getName(); Pair pair = SSTable.tryComponentFromFilename(file.getParent().toFile(), fileName); return pair != null && pair.left.ksname.equals(metadata.ksName) && pair.left.cfname.equals(metadata.cfName) && !visited.contains(fileName) && !alive.contains(fileName); } @Override public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException { if (isAcceptable(file)) { size.addAndGet(attrs.size()); visited.add(file.toFile().getName()); } return FileVisitResult.CONTINUE; } @Override public FileVisitResult visitFileFailed(Path file, IOException exc) throws IOException { return FileVisitResult.CONTINUE; } public long getAllocatedSize() { return size.get(); } } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy