
org.elasticsearch.repositories.fs.FsRepository Maven / Gradle / Ivy
/*
* Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one
* or more contributor license agreements. Licensed under the Elastic License
* 2.0 and the Server Side Public License, v 1; you may not use this file except
* in compliance with, at your election, the Elastic License 2.0 or the Server
* Side Public License, v 1.
*/
package org.elasticsearch.repositories.fs;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;
import org.elasticsearch.cluster.metadata.RepositoryMetadata;
import org.elasticsearch.cluster.service.ClusterService;
import org.elasticsearch.common.blobstore.BlobPath;
import org.elasticsearch.common.blobstore.BlobStore;
import org.elasticsearch.common.blobstore.fs.FsBlobStore;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Setting.Property;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.BigArrays;
import org.elasticsearch.env.Environment;
import org.elasticsearch.indices.recovery.RecoverySettings;
import org.elasticsearch.repositories.RepositoryException;
import org.elasticsearch.repositories.blobstore.BlobStoreRepository;
import org.elasticsearch.xcontent.NamedXContentRegistry;
import java.nio.file.Path;
import java.util.function.Function;
/**
* Shared file system implementation of the BlobStoreRepository
*
* Shared file system repository supports the following settings
*
* - {@code location}
- Path to the root of repository. This is mandatory parameter.
* - {@code concurrent_streams}
- Number of concurrent read/write stream (per repository on each node). Defaults to 5.
* - {@code chunk_size}
- Large file can be divided into chunks. This parameter specifies the chunk size.
* Defaults to not chucked.
* - {@code compress}
- If set to true metadata files will be stored compressed. Defaults to false.
*
*/
public class FsRepository extends BlobStoreRepository {
private static final Logger logger = LogManager.getLogger(FsRepository.class);
public static final String TYPE = "fs";
public static final Setting LOCATION_SETTING = new Setting<>("location", "", Function.identity(), Property.NodeScope);
public static final Setting REPOSITORIES_LOCATION_SETTING = new Setting<>(
"repositories.fs.location",
LOCATION_SETTING,
Function.identity(),
Property.NodeScope
);
public static final Setting CHUNK_SIZE_SETTING = Setting.byteSizeSetting(
"chunk_size",
new ByteSizeValue(Long.MAX_VALUE),
new ByteSizeValue(5),
new ByteSizeValue(Long.MAX_VALUE),
Property.NodeScope
);
public static final Setting REPOSITORIES_CHUNK_SIZE_SETTING = Setting.byteSizeSetting(
"repositories.fs.chunk_size",
new ByteSizeValue(Long.MAX_VALUE),
new ByteSizeValue(5),
new ByteSizeValue(Long.MAX_VALUE),
Property.NodeScope
);
public static final Setting COMPRESS_SETTING = Setting.boolSetting("compress", false, Property.NodeScope);
public static final Setting REPOSITORIES_COMPRESS_SETTING = Setting.boolSetting(
"repositories.fs.compress",
false,
Property.NodeScope,
Property.Deprecated
);
private final Environment environment;
private ByteSizeValue chunkSize;
private final BlobPath basePath;
/**
* Constructs a shared file system repository.
*/
public FsRepository(
RepositoryMetadata metadata,
Environment environment,
NamedXContentRegistry namedXContentRegistry,
ClusterService clusterService,
BigArrays bigArrays,
RecoverySettings recoverySettings
) {
super(metadata, calculateCompress(metadata, environment), namedXContentRegistry, clusterService, bigArrays, recoverySettings);
this.environment = environment;
String location = REPOSITORIES_LOCATION_SETTING.get(metadata.settings());
if (location.isEmpty()) {
logger.warn(
"the repository location is missing, it should point to a shared file system location"
+ " that is available on all master and data nodes"
);
throw new RepositoryException(metadata.name(), "missing location");
}
Path locationFile = environment.resolveRepoFile(location);
if (locationFile == null) {
if (environment.repoFiles().length > 0) {
logger.warn(
"The specified location [{}] doesn't start with any " + "repository paths specified by the path.repo setting: [{}] ",
location,
environment.repoFiles()
);
throw new RepositoryException(
metadata.name(),
"location [" + location + "] doesn't match any of the locations specified by path.repo"
);
} else {
logger.warn(
"The specified location [{}] should start with a repository path specified by"
+ " the path.repo setting, but the path.repo setting was not set on this node",
location
);
throw new RepositoryException(
metadata.name(),
"location [" + location + "] doesn't match any of the locations specified by path.repo because this setting is empty"
);
}
}
if (CHUNK_SIZE_SETTING.exists(metadata.settings())) {
this.chunkSize = CHUNK_SIZE_SETTING.get(metadata.settings());
} else {
this.chunkSize = REPOSITORIES_CHUNK_SIZE_SETTING.get(environment.settings());
}
this.basePath = BlobPath.EMPTY;
}
private static boolean calculateCompress(RepositoryMetadata metadata, Environment environment) {
return COMPRESS_SETTING.exists(metadata.settings())
? COMPRESS_SETTING.get(metadata.settings())
: REPOSITORIES_COMPRESS_SETTING.get(environment.settings());
}
@Override
protected BlobStore createBlobStore() throws Exception {
final String location = REPOSITORIES_LOCATION_SETTING.get(getMetadata().settings());
final Path locationFile = environment.resolveRepoFile(location);
return new FsBlobStore(bufferSize, locationFile, isReadOnly());
}
@Override
protected ByteSizeValue chunkSize() {
return chunkSize;
}
@Override
public BlobPath basePath() {
return basePath;
}
@Override
public boolean hasAtomicOverwrites() {
// We overwrite a file by deleting the old file and then renaming the new file into place, which is not atomic.
// Also on Windows the overwrite may fail if the file is opened for reading at the time.
return false;
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy