All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.twitter.distributedlog.logsegment.LogSegmentCache Maven / Gradle / Ivy

The newest version!
/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package com.twitter.distributedlog.logsegment;

import com.google.common.collect.Sets;
import com.twitter.distributedlog.DistributedLogConstants;
import com.twitter.distributedlog.LogSegmentMetadata;
import com.twitter.distributedlog.exceptions.UnexpectedException;
import org.apache.commons.lang3.tuple.Pair;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;

/**
 * Managing log segments in local cache.
 *
 * 

* Caching of log segment metadata assumes that the data contained in the ZNodes for individual * log segments is never updated after creation i.e we never call setData. A log segment * is finalized by creating a new ZNode and deleting the in progress node. This code will have * to change if we change the behavior *

*/ public class LogSegmentCache { static final Logger LOG = LoggerFactory.getLogger(LogSegmentCache.class); protected final String streamName; protected final Map logSegments = new HashMap(); protected final ConcurrentMap lid2LogSegments = new ConcurrentHashMap(); public LogSegmentCache(String streamName) { this.streamName = streamName; } /** * Retrieve log segments from the cache. * * - first sort the log segments in ascending order * - do validation and assign corresponding sequence id * - apply comparator after validation * * @param comparator * comparator to sort the returned log segments. * @return list of sorted and filtered log segments. * @throws UnexpectedException if unexpected condition detected (e.g. ledger sequence number gap) */ public List getLogSegments(Comparator comparator) throws UnexpectedException { List segmentsToReturn; synchronized (logSegments) { segmentsToReturn = new ArrayList(logSegments.size()); segmentsToReturn.addAll(logSegments.values()); } Collections.sort(segmentsToReturn, LogSegmentMetadata.COMPARATOR); long startSequenceId = DistributedLogConstants.UNASSIGNED_SEQUENCE_ID; LogSegmentMetadata prevSegment = null; for (int i = 0; i < segmentsToReturn.size(); i++) { LogSegmentMetadata segment = segmentsToReturn.get(i); // validation on ledger sequence number // - we are ok that if there are same log segments exist. it is just same log segment in different // states (inprogress vs completed). it could happen during completing log segment without transaction if (null != prevSegment && prevSegment.getVersion() >= LogSegmentMetadata.LogSegmentMetadataVersion.VERSION_V2_LEDGER_SEQNO.value && segment.getVersion() >= LogSegmentMetadata.LogSegmentMetadataVersion.VERSION_V2_LEDGER_SEQNO.value && prevSegment.getLogSegmentSequenceNumber() != segment.getLogSegmentSequenceNumber() && prevSegment.getLogSegmentSequenceNumber() + 1 != segment.getLogSegmentSequenceNumber()) { LOG.error("{} found ledger sequence number gap between log segment {} and {}", new Object[] { streamName, prevSegment, segment }); throw new UnexpectedException(streamName + " found ledger sequence number gap between log segment " + prevSegment.getLogSegmentSequenceNumber() + " and " + segment.getLogSegmentSequenceNumber()); } // assign sequence id if (!segment.isInProgress()) { if (segment.supportsSequenceId()) { startSequenceId = segment.getStartSequenceId() + segment.getRecordCount(); if (null != prevSegment && prevSegment.supportsSequenceId() && prevSegment.getStartSequenceId() > segment.getStartSequenceId()) { LOG.warn("{} found decreasing start sequence id in log segment {}, previous is {}", new Object[] { streamName, segment, prevSegment }); } } else { startSequenceId = DistributedLogConstants.UNASSIGNED_SEQUENCE_ID; } } else { if (segment.supportsSequenceId()) { LogSegmentMetadata newSegment = segment.mutator() .setStartSequenceId(startSequenceId == DistributedLogConstants.UNASSIGNED_SEQUENCE_ID ? 0L : startSequenceId) .build(); segmentsToReturn.set(i, newSegment); } break; } prevSegment = segment; } if (comparator != LogSegmentMetadata.COMPARATOR) { Collections.sort(segmentsToReturn, comparator); } return segmentsToReturn; } /** * Add the segment metadata for name in the cache. * * @param name * segment name. * @param metadata * segment metadata. */ public void add(String name, LogSegmentMetadata metadata) { synchronized (logSegments) { if (!logSegments.containsKey(name)) { logSegments.put(name, metadata); LOG.info("{} added log segment ({} : {}) to cache.", new Object[]{ streamName, name, metadata }); } LogSegmentMetadata oldMetadata = lid2LogSegments.remove(metadata.getLedgerId()); if (null == oldMetadata) { lid2LogSegments.put(metadata.getLedgerId(), metadata); } else { if (oldMetadata.isInProgress() && !metadata.isInProgress()) { lid2LogSegments.put(metadata.getLedgerId(), metadata); } else { lid2LogSegments.put(oldMetadata.getLedgerId(), oldMetadata); } } } } /** * Retrieve log segment name from the cache. * * @param name * name of the log segment. * @return log segment metadata */ public LogSegmentMetadata get(String name) { synchronized (logSegments) { return logSegments.get(name); } } /** * Update the log segment cache with removed/added segments. * * @param segmentsRemoved * segments that removed * @param segmentsAdded * segments that added */ public void update(Set segmentsRemoved, Map segmentsAdded) { synchronized (logSegments) { for (Map.Entry entry : segmentsAdded.entrySet()) { add(entry.getKey(), entry.getValue()); } for (String segment : segmentsRemoved) { remove(segment); } } } /** * Diff with new received segment list segmentReceived. * * @param segmentsReceived * new received segment list * @return segments added (left) and removed (right). */ public Pair, Set> diff(Set segmentsReceived) { Set segmentsAdded; Set segmentsRemoved; synchronized (logSegments) { Set segmentsCached = logSegments.keySet(); segmentsAdded = Sets.difference(segmentsReceived, segmentsCached).immutableCopy(); segmentsRemoved = Sets.difference(segmentsCached, segmentsReceived).immutableCopy(); } return Pair.of(segmentsAdded, segmentsRemoved); } /** * Remove log segment name from the cache. * * @param name * name of the log segment. * @return log segment metadata. */ public LogSegmentMetadata remove(String name) { synchronized (logSegments) { LogSegmentMetadata metadata = logSegments.remove(name); if (null != metadata) { lid2LogSegments.remove(metadata.getLedgerId(), metadata); LOG.debug("Removed log segment ({} : {}) from cache.", name, metadata); } return metadata; } } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy