Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2017 Brocade Communications Systems, Inc. and others. All rights reserved.
*
* This program and the accompanying materials are made available under the
* terms of the Eclipse Public License v1.0 which accompanies this distribution,
* and is available at http://www.eclipse.org/legal/epl-v10.html
*/
package org.opendaylight.controller.cluster.persistence;
import static com.google.common.base.Preconditions.checkArgument;
import akka.actor.ExtendedActorSystem;
import akka.dispatch.Futures;
import akka.persistence.SelectedSnapshot;
import akka.persistence.SnapshotMetadata;
import akka.persistence.SnapshotSelectionCriteria;
import akka.persistence.serialization.Snapshot;
import akka.persistence.serialization.SnapshotSerializer;
import akka.persistence.snapshot.japi.SnapshotStore;
import akka.serialization.JavaSerializer;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.io.ByteStreams;
import com.typesafe.config.Config;
import java.io.BufferedInputStream;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.charset.StandardCharsets;
import java.nio.file.Files;
import java.nio.file.StandardCopyOption;
import java.util.ArrayDeque;
import java.util.Arrays;
import java.util.Collection;
import java.util.Collections;
import java.util.Deque;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.Callable;
import java.util.stream.Collector;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import org.eclipse.jdt.annotation.Nullable;
import org.opendaylight.controller.cluster.io.InputOutputStreamFactory;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import scala.concurrent.ExecutionContext;
import scala.concurrent.Future;
/**
* Akka SnapshotStore implementation backed by the local file system. This class was patterned after akka's
* LocalSnapshotStore class and exists because akka's version serializes to a byte[] before persisting
* to the file which will fail if the data reaches or exceeds Integer.MAX_VALUE in size. This class avoids that issue
* by serializing the data directly to the file.
*
* @author Thomas Pantelis
*/
public final class LocalSnapshotStore extends SnapshotStore {
private static final Logger LOG = LoggerFactory.getLogger(LocalSnapshotStore.class);
private static final int PERSISTENCE_ID_START_INDEX = "snapshot-".length();
private final InputOutputStreamFactory streamFactory;
private final ExecutionContext executionContext;
private final int maxLoadAttempts;
private final File snapshotDir;
public LocalSnapshotStore(final Config config) {
executionContext = context().system().dispatchers().lookup(config.getString("stream-dispatcher"));
snapshotDir = new File(config.getString("dir"));
final int localMaxLoadAttempts = config.getInt("max-load-attempts");
maxLoadAttempts = localMaxLoadAttempts > 0 ? localMaxLoadAttempts : 1;
if (config.getBoolean("use-lz4-compression")) {
final String size = config.getString("lz4-blocksize");
streamFactory = InputOutputStreamFactory.lz4(size);
LOG.debug("Using LZ4 Input/Output Stream, blocksize: {}", size);
} else {
streamFactory = InputOutputStreamFactory.simple();
LOG.debug("Using plain Input/Output Stream");
}
LOG.debug("LocalSnapshotStore ctor: snapshotDir: {}, maxLoadAttempts: {}", snapshotDir, maxLoadAttempts);
}
@Override
public void preStart() throws Exception {
if (!snapshotDir.isDirectory()) {
// Try to create the directory, on failure double check if someone else beat us to it.
if (!snapshotDir.mkdirs() && !snapshotDir.isDirectory()) {
throw new IOException("Failed to create snapshot directory " + snapshotDir.getCanonicalPath());
}
}
super.preStart();
}
@Override
public Future> doLoadAsync(final String persistenceId,
final SnapshotSelectionCriteria criteria) {
LOG.debug("In doLoadAsync - persistenceId: {}, criteria: {}", persistenceId, criteria);
// Select the youngest 'maxLoadAttempts' snapshots that match the criteria. This may help in situations where
// saving of a snapshot could not be completed because of a JVM crash. Hence, an attempt to load that snapshot
// will fail but loading an older snapshot may succeed.
Deque metadatas = getSnapshotMetadatas(persistenceId, criteria).stream()
.sorted(LocalSnapshotStore::compare).collect(reverse()).stream().limit(maxLoadAttempts)
.collect(Collectors.toCollection(ArrayDeque::new));
if (metadatas.isEmpty()) {
return Futures.successful(Optional.empty());
}
LOG.debug("doLoadAsync - found: {}", metadatas);
return Futures.future(() -> doLoad(metadatas), executionContext);
}
private Optional doLoad(final Deque metadatas) throws IOException {
SnapshotMetadata metadata = metadatas.removeFirst();
File file = toSnapshotFile(metadata);
LOG.debug("doLoad {}", file);
try {
Object data = deserialize(file);
LOG.debug("deserialized data: {}", data);
return Optional.of(new SelectedSnapshot(metadata, data));
} catch (IOException e) {
LOG.error("Error loading snapshot file {}, remaining attempts: {}", file, metadatas.size(), e);
if (metadatas.isEmpty()) {
throw e;
}
return doLoad(metadatas);
}
}
private Object deserialize(final File file) throws IOException {
return JavaSerializer.currentSystem().withValue((ExtendedActorSystem) context().system(),
(Callable