com.hazelcast.cache.impl.operation.CacheReplicationOperation Maven / Gradle / Ivy
/*
* Copyright (c) 2008-2016, Hazelcast, Inc. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hazelcast.cache.impl.operation;
import com.hazelcast.cache.impl.CachePartitionSegment;
import com.hazelcast.cache.impl.ICacheRecordStore;
import com.hazelcast.cache.impl.ICacheService;
import com.hazelcast.cache.impl.record.CacheRecord;
import com.hazelcast.config.CacheConfig;
import com.hazelcast.nio.ObjectDataInput;
import com.hazelcast.nio.ObjectDataOutput;
import com.hazelcast.nio.serialization.Data;
import com.hazelcast.spi.AbstractOperation;
import com.hazelcast.util.Clock;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
/**
* Replication operation is the data migration operation of {@link com.hazelcast.cache.impl.CacheRecordStore}.
*
* Cache record store's records and configurations will be migrated into their new nodes.
*
* Steps;
*
* - Serialize all non expired data.
* - Deserialize the data and config.
* - Create the configuration in the new node service.
* - Insert each record into {@link ICacheRecordStore}.
*
*
* Note: This operation is a per partition operation.
*/
public class CacheReplicationOperation extends AbstractOperation {
protected Map> data;
protected List configs;
public CacheReplicationOperation() {
data = new HashMap>();
configs = new ArrayList();
}
public CacheReplicationOperation(CachePartitionSegment segment, int replicaIndex) {
data = new HashMap>();
Iterator iter = segment.recordStoreIterator();
while (iter.hasNext()) {
ICacheRecordStore cacheRecordStore = iter.next();
CacheConfig cacheConfig = cacheRecordStore.getConfig();
if (cacheConfig.getAsyncBackupCount() + cacheConfig.getBackupCount() >= replicaIndex) {
data.put(cacheRecordStore.getName(), cacheRecordStore.getReadOnlyRecords());
}
}
configs = new ArrayList(segment.getCacheConfigs());
}
@Override
public void beforeRun() throws Exception {
// Migrate CacheConfigs first
ICacheService service = getService();
for (CacheConfig config : configs) {
service.putCacheConfigIfAbsent(config);
}
}
@Override
public void run()
throws Exception {
ICacheService service = getService();
for (Map.Entry> entry : data.entrySet()) {
ICacheRecordStore cache = service.getOrCreateRecordStore(entry.getKey(), getPartitionId());
Map map = entry.getValue();
Iterator> iter = map.entrySet().iterator();
while (iter.hasNext()) {
Map.Entry next = iter.next();
Data key = next.getKey();
CacheRecord record = next.getValue();
iter.remove();
cache.putRecord(key, record);
}
}
data.clear();
}
@Override
public String getServiceName() {
return ICacheService.SERVICE_NAME;
}
@Override
protected void writeInternal(ObjectDataOutput out)
throws IOException {
int confSize = configs.size();
out.writeInt(confSize);
for (CacheConfig config : configs) {
out.writeObject(config);
}
int count = data.size();
out.writeInt(count);
long now = Clock.currentTimeMillis();
for (Map.Entry> entry : data.entrySet()) {
Map cacheMap = entry.getValue();
int subCount = cacheMap.size();
out.writeInt(subCount);
out.writeUTF(entry.getKey());
for (Map.Entry e : cacheMap.entrySet()) {
final Data key = e.getKey();
final CacheRecord record = e.getValue();
if (record.isExpiredAt(now)) {
continue;
}
out.writeData(key);
out.writeObject(record);
}
// Empty data will terminate the iteration for read in case
// expired entries were found while serializing, since the
// real subCount will then be different from the one written
// before
out.writeData(null);
}
}
@Override
protected void readInternal(ObjectDataInput in)
throws IOException {
super.readInternal(in);
int confSize = in.readInt();
for (int i = 0; i < confSize; i++) {
final CacheConfig config = in.readObject();
configs.add(config);
}
int count = in.readInt();
for (int i = 0; i < count; i++) {
int subCount = in.readInt();
String name = in.readUTF();
Map m = new HashMap(subCount);
data.put(name, m);
// subCount + 1 because of the DefaultData written as the last entry
// which adds another Data entry at the end of the stream!
for (int j = 0; j < subCount + 1; j++) {
Data key = in.readData();
// Empty data received so reading can be stopped here since
// since the real object subCount might be different from
// the number on the stream due to found expired entries
if (key == null || key.dataSize() == 0) {
break;
}
CacheRecord record = in.readObject();
m.put(key, record);
}
}
}
public boolean isEmpty() {
return (configs == null || configs.isEmpty()) && (data == null || data.isEmpty());
}
}
© 2015 - 2025 Weber Informatics LLC | Privacy Policy