com.tangosol.internal.net.topic.impl.paged.PagedTopicCaches Maven / Gradle / Ivy
Show all versions of coherence Show documentation
/*
* Copyright (c) 2000, 2023, Oracle and/or its affiliates.
*
* Licensed under the Universal Permissive License v 1.0 as shown at
* https://oss.oracle.com/licenses/upl.
*/
package com.tangosol.internal.net.topic.impl.paged;
import com.oracle.coherence.common.base.Exceptions;
import com.oracle.coherence.common.base.Logger;
import com.oracle.coherence.common.collections.ConcurrentHashMap;
import com.tangosol.coherence.config.Config;
import com.tangosol.internal.net.NamedCacheDeactivationListener;
import com.tangosol.internal.net.topic.impl.paged.agent.EnsureSubscriptionProcessor;
import com.tangosol.internal.net.topic.impl.paged.agent.EvictSubscriber;
import com.tangosol.internal.net.topic.impl.paged.filter.UnreadTopicContentFilter;
import com.tangosol.internal.net.topic.impl.paged.model.NotificationKey;
import com.tangosol.internal.net.topic.impl.paged.model.Page;
import com.tangosol.internal.net.topic.impl.paged.model.ContentKey;
import com.tangosol.internal.net.topic.impl.paged.model.PagedPosition;
import com.tangosol.internal.net.topic.impl.paged.model.SubscriberGroupId;
import com.tangosol.internal.net.topic.impl.paged.model.SubscriberId;
import com.tangosol.internal.net.topic.impl.paged.model.SubscriberInfo;
import com.tangosol.internal.net.topic.impl.paged.model.Subscription;
import com.tangosol.internal.net.topic.impl.paged.model.Usage;
import com.tangosol.io.ClassLoaderAware;
import com.tangosol.io.Serializer;
import com.tangosol.net.CacheService;
import com.tangosol.net.DistributedCacheService;
import com.tangosol.net.Member;
import com.tangosol.net.MemberEvent;
import com.tangosol.net.MemberListener;
import com.tangosol.net.NamedCache;
import com.tangosol.net.PagedTopicService;
import com.tangosol.net.cache.TypeAssertion;
import com.tangosol.net.topic.NamedTopic;
import com.tangosol.net.topic.Position;
import com.tangosol.net.topic.Publisher;
import com.tangosol.net.topic.Subscriber;
import com.tangosol.util.AbstractMapListener;
import com.tangosol.util.Aggregators;
import com.tangosol.util.Binary;
import com.tangosol.util.Filter;
import com.tangosol.util.Filters;
import com.tangosol.util.HashHelper;
import com.tangosol.util.InvocableMap;
import com.tangosol.util.InvocableMapHelper;
import com.tangosol.util.MapEvent;
import com.tangosol.util.NullImplementation;
import com.tangosol.util.ValueExtractor;
import com.tangosol.util.aggregator.Count;
import com.tangosol.util.aggregator.GroupAggregator;
import com.tangosol.util.extractor.EntryExtractor;
import com.tangosol.util.extractor.ReflectionExtractor;
import java.io.PrintStream;
import java.util.Collection;
import java.util.Collections;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.function.BiFunction;
import java.util.function.Consumer;
import java.util.stream.Collectors;
import java.util.stream.IntStream;
import java.util.stream.Stream;
import static com.tangosol.net.cache.TypeAssertion.withTypes;
/**
* This class encapsulates operations on the set of {@link NamedCache}s
* that are used to hold the underlying data for a topic.
*
* @author jk 2015.06.19
* @since Coherence 14.1.1
*/
@SuppressWarnings("rawtypes")
public class PagedTopicCaches
implements ClassLoaderAware, AutoCloseable
{
// ----- constructors ---------------------------------------------------
/**
* Create a {@link PagedTopicCaches}.
*
* @param sName the name of the topic
* @param cacheService the {@link CacheService} owning the underlying caches
*/
public PagedTopicCaches(String sName, PagedTopicService cacheService)
{
this(sName, cacheService, true);
}
/**
* Create a {@link PagedTopicCaches}.
*
* @param sName the name of the topic
* @param cacheService the {@link CacheService} owning the underlying caches
* @param registerListeners {@code true} to register listeners
*/
public PagedTopicCaches(String sName, PagedTopicService cacheService, boolean registerListeners)
{
this(sName, cacheService, null, registerListeners);
}
/**
* Create a {@link PagedTopicCaches}.
*
* @param sName the name of the topic
* @param cacheService the {@link CacheService} owning the underlying caches
* @param functionCache the function to invoke to obtain each underlying cache
* @param registerListeners {@code true} to register listeners
*/
PagedTopicCaches(String sName, PagedTopicService cacheService,
BiFunction functionCache,
boolean registerListeners)
{
if (sName == null || sName.isEmpty())
{
throw new IllegalArgumentException("The name argument cannot be null or empty String");
}
if (cacheService == null)
{
throw new IllegalArgumentException("The cacheService argument cannot be null");
}
if (functionCache == null)
{
functionCache = cacheService::ensureCache;
}
f_sTopicName = sName;
f_topicService = cacheService;
f_sCacheServiceName = cacheService.getInfo().getServiceName();
f_cPartition = cacheService.getPartitionCount();
f_functionCache = functionCache;
f_dependencies = cacheService.getTopicBackingMapManager().getTopicDependencies(sName);
initializeCaches(registerListeners);
m_state = State.Active;
}
// ----- TopicCaches methods --------------------------------------
/**
* Return the serializer.
*
* @return the serializer
*/
public Serializer getSerializer()
{
return f_topicService.getSerializer();
}
@Override
public void close()
{
release();
}
/**
* Destroy the PagedTopicCaches.
*/
public void release()
{
releaseOrDestroy(/* destroy */ false);
}
/**
* Destroy the PagedTopicCaches.
*/
public void destroy()
{
releaseOrDestroy(/* destroy */ true);
}
/**
* Determines {@code true} if the caches are active.
*
* @return true if the caches are active; false otherwise
*/
public boolean isActive()
{
State state = m_state;
return state == State.Active || state == State.Disconnected;
}
/**
* Returns {@code true} if the caches are destroyed,
* specifically the page cache for the topic.
*
* @return true if the caches are destroyed; false otherwise
*/
public boolean isDestroyed()
{
return Pages.isDestroyed();
}
/**
* Returns whether the caches are released,
* specifically the page cache for the topic.
*
* @return true if the caches are released; false otherwise
*/
public boolean isReleased()
{
return Pages.isReleased();
}
public void addListener(Listener listener)
{
m_mapListener.put(listener, Boolean.TRUE);
}
public void removeListener(Listener listener)
{
m_mapListener.remove(listener);
}
public void ensureConnected()
{
if (m_state == State.Disconnected)
{
synchronized (this)
{
if (m_state == State.Disconnected)
{
m_state = State.Active;
f_setCaches.forEach(NamedCache::size);
Set setListener = m_mapListener.keySet();
for (Listener listener : setListener)
{
try
{
listener.onConnect();
}
catch (Throwable t)
{
Logger.err(t);
}
}
}
}
}
}
// ----- ClassLoaderAware methods ---------------------------------------
@Override
public ClassLoader getContextClassLoader()
{
return f_topicService.getContextClassLoader();
}
@Override
public void setContextClassLoader(ClassLoader classLoader)
{
throw new UnsupportedOperationException();
}
// ----- accessor methods -----------------------------------------------
/**
* Return the topic name.
*
* @return the topic name
*/
public String getTopicName()
{
return f_sTopicName;
}
/**
* Get the start page for this topic upon creation.
*
* @return the start page
*/
public int getBasePage()
{
return Math.abs(f_sTopicName.hashCode() % f_cPartition);
}
/**
* Return the partition count for this topic.
*
* @return the partition count for this topic
*/
public int getPartitionCount()
{
return f_cPartition;
}
/**
* Return the channel count for this topic.
*
* @return the channel count for this topic
*/
public int getChannelCount()
{
return f_topicService.getChannelCount(f_sTopicName);
}
/**
* Return the default publisher channel count for this topic.
*
* If the system property {@link Publisher#PROP_CHANNEL_COUNT} with a suffix of a dot
* followed by the topic name is set, that value will be used.
*
* If the system property {@link Publisher#PROP_CHANNEL_COUNT} with no suffix is set,
* that value will be used.
*
* If neither property is set the configured channel count will be used.
*
* @return the default publisher channel count for this topic
*/
public int getPublisherChannelCount()
{
int cChannel = getDependencies().getConfiguredChannelCount();
cChannel = Config.getInteger(Publisher.PROP_CHANNEL_COUNT, cChannel);
return Config.getInteger(Publisher.PROP_CHANNEL_COUNT + "." + f_sTopicName, cChannel);
}
/**
* Return the set of NotificationKeys covering all partitions for the given notifier
*
* @param nNotifier the notifier id
*
* @return the NotificationKeys
*/
public Set getPartitionNotifierSet(int nNotifier)
{
Set setKey = new HashSet<>();
for (int i = 0; i < f_cPartition; ++i)
{
setKey.add(new NotificationKey(i, nNotifier));
}
return setKey;
}
/**
* Return the unit of order for a topic partition.
*
* @param nPartition the partition
*
* @return the unit of order
*/
public int getUnitOfOrder(int nPartition)
{
return f_sTopicName.hashCode() + nPartition;
}
/**
* Return the associated {@link PagedTopicService}.
*
* @return the {@link PagedTopicService}
*/
public PagedTopicService getService()
{
return f_topicService;
}
/**
* Return the {@link PagedTopicDependencies}.
*
* @return the {@link PagedTopicDependencies}
*/
public PagedTopicDependencies getDependencies()
{
return f_dependencies;
}
/**
* Returns the {@link Usage.Key} for the {@link Usage} entry that tracks the topic's tail for
* a given channel.
*
* We don't just use partition zero as that would concentrate extra load on a single partitions
* when there are many channels.
*
* @param nChannel the channel number
*
* @return the {@link Usage.Key} for the {@link Usage} entry that tracks the topic's head and tail
*/
public Usage.Key getUsageSyncKey(int nChannel)
{
int nPart = Math.abs((HashHelper.hash(f_sTopicName.hashCode(), nChannel) % f_cPartition));
return new Usage.Key(nPart, nChannel);
}
/**
* Returns {@code true} if a subscriber in the specified group has committed the specified {@link Position}
* in the specified channel.
*
* @param groupId the subscriber group identifier
* @param nChannel the channel identifier
* @param position the {@link Position} to check
*
* @return {@code true} if a subscriber in the specified group has committed the specified {@link Position}
* in the specified channel; or {@code false} if the position is not committed or the subscriber
* group does not exist
*/
public boolean isCommitted(SubscriberGroupId groupId, int nChannel, Position position)
{
if (position instanceof PagedPosition && nChannel >= 0 && nChannel < getChannelCount())
{
Map map = getLastCommitted(groupId);
Position posCommitted = map.get(nChannel);
return posCommitted != null && posCommitted.compareTo(position) >= 0;
}
return false;
}
/**
* Returns a {@link Map} of channel numbers to the latest {@link Position} committed
* for that channel. If a channel has had zero commits it will be missing
* from the map's keySet.
*
* @param subscriberGroupId the {@link SubscriberGroupId identifier} of the subscriber
* group to obtain the commits for
*
* @return {@code true} if the specified {@link Position} has been committed in the
* specified channel
*/
public Map getLastCommitted(SubscriberGroupId subscriberGroupId)
{
InvocableMap.EntryAggregator aggregatorPos
= Aggregators.comparableMax(Subscription::getCommittedPosition);
// Aggregate the subscription commits and remove any null values from the returned map
return getPositions(subscriberGroupId, aggregatorPos);
}
/**
* Returns a {@link Map} of the {@link Position} of the head for each channel for a subscriber group.
*
* @param subscriberGroupId the {@link SubscriberGroupId identifier} of the subscriber
* group to obtain the heads for
*
* @return a {@link Map} of the {@link Position} of the head for each channel for a subscriber group
*/
@SuppressWarnings("unused")
public Map getHeads(SubscriberGroupId subscriberGroupId, long nSubscriberId)
{
InvocableMap.EntryAggregator aggregatorPos
= Aggregators.comparableMin(new Subscription.HeadExtractor(nSubscriberId));
// Aggregate the subscription commits and remove any null values from the returned map
return getPositions(subscriberGroupId, aggregatorPos);
}
private Map getPositions(SubscriberGroupId subscriberGroupId, InvocableMap.EntryAggregator aggregator)
{
ValueExtractor extractorChannel = new ReflectionExtractor<>("getChannelId", new Object[0], EntryExtractor.KEY);
ValueExtractor extractorGroup = new ReflectionExtractor<>("getGroupId", new Object[0], EntryExtractor.KEY);
Filter filter = Filters.equal(extractorGroup, subscriberGroupId);
Filter filterPosition = Filters.not(Filters.equal(PagedPosition::getPage, Page.NULL_PAGE));
// Aggregate the subscription commits and remove any null values from the returned map
return Subscriptions.aggregate(filter, GroupAggregator.createInstance(extractorChannel, aggregator, filterPosition))
.entrySet()
.stream()
.filter(e -> e.getKey() != Page.EMPTY && e.getValue() != null)
.collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
}
/**
* Returns a {@link Map} of the {@link Position} of the head for each channel.
*
* @return a {@link Map} of the {@link Position} of the head for each channel
*/
public Map getHeads()
{
ValueExtractor extractorChannel
= new ReflectionExtractor<>("getChannelId", new Object[0], EntryExtractor.KEY);
InvocableMap.EntryAggregator aggregatorTail
= Aggregators.comparableMin(Page.HeadExtractor.INSTANCE);
return Pages.aggregate(GroupAggregator.createInstance(extractorChannel, aggregatorTail));
}
/**
* Returns a {@link Map} of the {@link Position} of the tail for each channel.
*
* @return a {@link Map} of the {@link Position} of the tail for each channel
*/
public Map getTails()
{
ValueExtractor extractorChannel
= new ReflectionExtractor<>("getChannelId", new Object[0], EntryExtractor.KEY);
InvocableMap.EntryAggregator aggregatorTail
= Aggregators.comparableMax(Page.TailExtractor.INSTANCE);
return Pages.aggregate(GroupAggregator.createInstance(extractorChannel, aggregatorTail));
}
/**
* Returns the {@link NamedTopic.ElementCalculator} to use to calculate message sizes.
*
* @return the {@link NamedTopic.ElementCalculator} to use to calculate message sizes
*/
public NamedTopic.ElementCalculator getElementCalculator()
{
return getDependencies().getElementCalculator();
}
/**
* Returns the identifiers for all the subscribers belonging to a subscriber group.
*
* There is no guarantee that all the subscribers are actually still active. If a subscriber
* process exits without closing the subscriber, the identifier remains in the cache until it
* is timed-out.
*
* @param sGroupName the subscriber group name to get subscribers for
*
* @return the identifiers for all the subscribers belonging to a subscriber group
*/
public Set getSubscribers(String sGroupName)
{
return f_topicService.getSubscribers(f_sTopicName, SubscriberGroupId.withName(sGroupName));
}
/**
* Return the set of {@link Subscriber.Name named} subscriber group(s) and statically configured subscriber-group(s).
*
* @return the set of named subscriber groups
*/
public Set getSubscriberGroups()
{
return getSubscriberGroupsIds(false)
.stream()
.map(SubscriberGroupId::getGroupName)
.collect(Collectors.toSet());
}
/**
* Return the set of subscriber group(s) for the topic, optionally including pseudo-groups
* created for anonymous subscribers.
*
* @param fAnonymous {@code true} to include anonymous subscriber groups
*
* @return the set of named subscriber groups
*/
public Set getSubscriberGroupsIds(boolean fAnonymous)
{
Stream stream = fAnonymous
? f_topicService.getSubscriberGroups(f_sTopicName).stream()
: f_topicService.getSubscriberGroups(f_sTopicName).stream().filter(SubscriberGroupId::isDurable);
return stream.collect(Collectors.toSet());
}
/**
* Returns an immutable map of subscriber id to channels allocated to that subscriber.
*
* @param sGroup the subscriber group to obtain allocations for
*
* @return an immutable map of subscriber id to channels allocated to that subscriber
*/
public Map> getChannelAllocations(String sGroup)
{
Subscription.Key key = new Subscription.Key(0, 0, SubscriberGroupId.withName(sGroup));
Subscription subscription = Subscriptions.get(key);
if (subscription != null)
{
return Collections.unmodifiableMap(subscription.getAllocationMap());
}
return Collections.emptyMap();
}
/**
* Print the channel allocations for the specified subscriber group.
*
* @param sGroup the name of the subscriber group
* @param out the {@link PrintStream} to print to
*/
public void printChannelAllocations(String sGroup, PrintStream out)
{
Map mapMember = f_topicService.getCluster()
.getMemberSet()
.stream()
.collect(Collectors.toMap(Member::getId, Member::toString));
out.println("Subscriber channel allocations for topic \"" + f_sTopicName + "\" subscriber group \"" + sGroup + "\":");
for (Map.Entry> entry : getChannelAllocations(sGroup).entrySet())
{
long nId = entry.getKey();
int nMember = PagedTopicSubscriber.memberIdFromId(nId);
out.println("SubscriberId=" + nId + " channels=" + entry.getValue() + " " + mapMember.get(nMember));
}
}
/**
* Disconnect a specific subscriber from a topic.
*
* Disconnecting a subscriber will cause channels to be reallocated and
* positions to be rolled back to the last commit for the channels
* owned by the disconnected subscriber.
*
* @param groupId the name of the subscriber group
* @param id the subscriber id
*/
public void disconnectSubscriber(SubscriberGroupId groupId, SubscriberId id)
{
Subscribers.invoke(new SubscriberInfo.Key(groupId, id.getId()), EvictSubscriber.INSTANCE);
}
/**
* Disconnect all group subscribers from a topic.
*
* Disconnecting subscribers will cause channels to be reallocated and
* positions to be rolled back to the last commit for the channels
* owned by the disconnected subscriber.
*
* @param sGroup the name of the subscriber group
* @param nMember the cluster member to disconnect subscribers for
*
* @return the identifiers of the disconnected subscribers
*/
public long[] disconnectAllSubscribers(String sGroup, int nMember)
{
return disconnectAllSubscribers(SubscriberGroupId.withName(sGroup), nMember);
}
/**
* Disconnect all group subscribers from a topic.
*
* Disconnecting subscribers will cause channels to be reallocated and
* positions to be rolled back to the last commit for the channels
* owned by the disconnected subscriber.
*
* @param id the {@link SubscriberGroupId id} of the subscriber group
* @param nMember the cluster member to disconnect subscribers for
*
* @return the identifiers of the disconnected subscribers
*/
public long[] disconnectAllSubscribers(SubscriberGroupId id, int nMember)
{
Filter filter = Filters.equal(SubscriberInfo.GroupIdExtractor.INSTANCE, id)
.and(Filters.equal(SubscriberInfo.MemberIdExtractor.INSTANCE, nMember));
Map map = Subscribers.invokeAll(filter, EvictSubscriber.INSTANCE);
return map.keySet().stream().mapToLong(SubscriberInfo.Key::getSubscriberId).toArray();
}
/**
* Disconnect all subscribers in a group.
*
* Disconnecting subscribers will cause channels to be reallocated and
* positions to be rolled back to the last commit for the channels
* owned by the disconnected subscriber.
*
* @param sGroup the name of the subscriber group
*
* @return the identifiers of the disconnected subscribers
*/
public long[] disconnectAllSubscribers(String sGroup)
{
return disconnectAllSubscribers(SubscriberGroupId.withName(sGroup));
}
/**
* Disconnect all subscribers in a group.
*
* Disconnecting subscribers will cause channels to be reallocated and
* positions to be rolled back to the last commit for the channels
* owned by the disconnected subscriber.
*
* @param id the {@link SubscriberGroupId id} of the subscriber group
*
* @return the identifiers of the disconnected subscribers
*/
public long[] disconnectAllSubscribers(SubscriberGroupId id)
{
Filter> filter = Filters.equal(SubscriberInfo.GroupIdExtractor.INSTANCE, id);
Map map = Subscribers.invokeAll(filter, EvictSubscriber.INSTANCE);
return map.keySet().stream().mapToLong(SubscriberInfo.Key::getSubscriberId).toArray();
}
/**
* Disconnect all group subscribers from all groups.
*
* Disconnecting subscribers will cause channels to be reallocated and
* positions to be rolled back to the last commit for the channels
* owned by the disconnected subscriber.
*/
public void disconnectAllSubscribers()
{
for (SubscriberGroupId id : getSubscriberGroupsIds(false))
{
long lSubscription = f_topicService.getSubscriptionId(f_sTopicName, id);
PagedTopicSubscriber.notifyClosed(Subscriptions, id, lSubscription, SubscriberId.NullSubscriber);
}
Subscribers.clear();
}
/**
* Ensure the specified subscriber group exists.
*
* @param sName the name of the group
* @param filter the filter to use to filter messages received by the group
* @param extractor the {@link ValueExtractor} to convert the messages received by the group
*/
public void ensureSubscriberGroup(String sName, Filter> filter, ValueExtractor, ?> extractor)
{
SubscriberGroupId subscriberGroupId = SubscriberGroupId.withName(sName);
initializeSubscription(subscriberGroupId, SubscriberId.NullSubscriber, 0L, filter, extractor, false, true, false);
}
/**
* Initialise a subscription.
*
* @param subscriberGroupId the subscriber group identifier
* @param subscriberId the subscriber identifier
* @param filter the filter to use to filter messages received by the subscription
* @param extractor the {@link ValueExtractor} function to convert the messages received by the subscription
* @param fReconnect {@code true} if this is a reconnection
* @param fCreateGroupOnly {@code true} if this is to only create a subscriber group
* @param fDisconnected {@code true} if this is an existing, disconnected subscription
*
* @return the pages that are the heads of the channels
*/
protected long[] initializeSubscription(SubscriberGroupId subscriberGroupId,
SubscriberId subscriberId,
long lSubscription,
Filter> filter,
ValueExtractor, ?> extractor,
boolean fReconnect,
boolean fCreateGroupOnly,
boolean fDisconnected)
{
try
{
String sName = subscriberGroupId.getGroupName();
Set setSubKeys = new HashSet<>(f_cPartition);
if (lSubscription == 0)
{
lSubscription = getService().ensureSubscription(f_sTopicName, subscriberGroupId, subscriberId, filter, extractor);
}
for (int i = 0; i < f_cPartition; ++i)
{
// Note: we ensure against channel 0 in each partition, and it will in turn initialize all channels
setSubKeys.add(new Subscription.Key(i, /*nChannel*/ 0, subscriberGroupId));
}
// outside any lock discover if pages are already pinned. Note that since we don't
// hold a lock, this is only useful if the group was already fully initialized (under lock) earlier.
// Otherwise, there is no guarantee that there isn't gaps in our pinned pages.
// check results to verify if initialization has already completed
EnsureSubscriptionProcessor processor = new EnsureSubscriptionProcessor(EnsureSubscriptionProcessor.PHASE_INQUIRE,
null, filter, extractor, subscriberId, fReconnect, fCreateGroupOnly, lSubscription);
Collection results;
if (sName == null)
{
results = null;
}
else
{
CompletableFuture