for the full
* license.
*/
package com.vaadin.data.provider;
import java.util.ArrayList;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Objects;
import java.util.Optional;
import java.util.Set;
import java.util.logging.Logger;
import java.util.stream.Collectors;
import java.util.stream.Stream;
import com.vaadin.shared.Range;
import com.vaadin.shared.data.HierarchicalDataCommunicatorConstants;
import com.vaadin.ui.ItemCollapseAllowedProvider;
import elemental.json.Json;
import elemental.json.JsonObject;
/**
* Mapper for hierarchical data.
*
* Keeps track of the expanded nodes, and size of of the subtrees for each
* expanded node.
*
* This class is framework internal implementation details, and can be changed /
* moved at any point. This means that you should not directly use this for
* anything.
*
* @author Vaadin Ltd
* @since 8.1
*
* @param
* the data type
* @param
* the filter type
*/
public class HierarchyMapper implements DataGenerator {
private static Logger getLogger() {
return Logger.getLogger(HierarchyMapper.class.getName());
}
// childMap is only used for finding parents of items and clean up on
// removing children of expanded nodes.
private Map> childMap = new HashMap<>();
private Map idToParentMap = new HashMap<>();
private final HierarchicalDataProvider provider;
private F filter;
private List backEndSorting;
private Comparator inMemorySorting;
private ItemCollapseAllowedProvider itemCollapseAllowedProvider = t -> true;
private Set expandedItemIds = new HashSet<>();
private Set activeIds = new HashSet();
private Set pendingRemovalIds = new HashSet();
private List reAddedIds = new ArrayList();
private List rootNodes = new ArrayList();
boolean initialized = false;
boolean useActiveDataOptimization = true;
/**
* Constructs a new HierarchyMapper.
*
* @param provider
* the hierarchical data provider for this mapper
*/
public HierarchyMapper(HierarchicalDataProvider provider) {
this.provider = provider;
}
/**
* Returns the size of the currently expanded hierarchy.
*
* @return the amount of available data
*/
public int getTreeSize() {
// ensure speedy closing in case the stream is connected to IO channels
try (Stream stream = getHierarchy(null)) {
return (int) stream.count();
}
}
/**
* Finds the index of the parent of the item in given target index.
*
* @param item
* the item to get the parent of
* @return the parent index or a negative value if the parent is not found
*
*/
public Integer getParentIndex(T item) {
return getIndexOf(getParentOfItem(item)).orElse(-1);
}
/**
* Returns whether the given item is expanded.
*
* @param item
* the item to test
* @return {@code true} if item is expanded; {@code false} if not
*/
public boolean isExpanded(T item) {
if (item == null) {
// Root nodes are always visible.
return true;
}
return expandedItemIds.contains(getDataProvider().getId(item));
}
/**
* Expands the given item. Always returns an empty range if this
* HierarchyMapper hasn't been initialized e.g. by attaching the TreeGrid
* that uses it (no need to add rows if not attached). The range can also be
* empty if the item doesn't exist in this hierarchy, it has already been
* expanded, it doesn't have any children, all its children have been
* filtered out, or expanding it cannot affect the indexing of any active
* items (items within the client-side cache). For the sake of simplicity,
* any items that share a root node with any active item are considered to
* fall within the last category, even if they are positioned after the last
* active item.
*
* @param item
* the item to expand
* @param position
* the index of the item
* @return range of rows added by expanding the item, can be empty
*/
public Range expand(T item, Integer position) {
if (doExpand(item) && initialized && position != null
&& isFunctionallyActive(getDataProvider().getId(item))) {
// ensure speedy closing in case the stream is connected to IO
// channels
try (Stream stream = getHierarchy(item, false, true)) {
return Range.withLength(position + 1, (int) stream.count());
}
}
return Range.emptyRange();
}
/**
* Expands the given item.
*
* @param item
* the item to expand
* @param position
* the index of item
* @return range of rows added by expanding the item
* @deprecated Use {@link #expand(Object, Integer)} instead.
*/
@Deprecated
public Range doExpand(T item, Optional position) {
return expand(item, position.orElse(null));
}
/**
* Expands the given item if it is collapsed and has children, and returns
* whether this method expanded the item.
*
* @param item
* the item to expand
* @return {@code true} if this method expanded the item, {@code false}
* otherwise
*/
private boolean doExpand(T item) {
boolean expanded = false;
if (!isExpanded(item) && hasChildren(item)) {
expandedItemIds.add(getDataProvider().getId(item));
expanded = true;
}
return expanded;
}
/**
* Collapses the given item.
*
* If this HierarchyMapper uses active data optimization, this method always
* returns an empty range if the collapsed item is sufficiently far beyond
* the active data range (not sharing a root node with any active items).
* The optimization doesn't affect the data before the active data range,
* since those changes affect the indexes of the active range.
*
* @param item
* the item to collapse (cannot be null)
* @param position
* the index of the item
*
* @return range of rows removed by collapsing the item, can be empty
*
* @see #useActiveDataOptimization(boolean)
*/
public Range collapse(T item, Integer position) {
Range removedRows = Range.emptyRange();
if (isExpanded(item)) {
// Note: never clear from re-added data here, that must wait
// until the client requests to drop the items
Object id = getDataProvider().getId(item);
// if not functionally active, there is no need to remove children
if (isFunctionallyActive(id)) {
// use cached to ensure all children get removed from the client
List childIds = removeChildrenRecursively(id);
if (position != null) {
// only register empty children if the item is still within
// the data set (could be filtered out or hidden by a
// collapsed parent)
registerChildren(item, Collections.emptyList());
removedRows = Range.withLength(position + 1,
childIds.size());
}
}
expandedItemIds.remove(id);
}
return removedRows;
}
/**
* Collapses the given item.
*
* @param item
* the item to collapse
* @param position
* the index of item
*
* @return range of rows removed by collapsing the item
* @deprecated Use {@link #collapse(Object, Integer)} instead.
*/
@Deprecated
public Range doCollapse(T item, Optional position) {
return collapse(item, position.orElse(null));
}
@Override
public void generateData(T item, JsonObject jsonObject) {
JsonObject hierarchyData = Json.createObject();
int depth = getDepth(item);
if (depth >= 0) {
hierarchyData.put(HierarchicalDataCommunicatorConstants.ROW_DEPTH,
depth);
}
Object itemId = getDataProvider().getId(item);
getActiveData().add(itemId);
// No need to update the caches even if this was new addition, data
// communicator always calls fetchItems(Range) before sending data to
// client and that adds all the potentially missing sections, as does
// any expand that affects functionally active rows.
if (getPendingRemovalData().remove(itemId)) {
// If the item's active parent has been collapsed and expanded again
// before the collapse has been fully processed (potentially
// multiple times), this method gets called immediately after each
// expand, but each collapse has to wait for the client request to
// complete the removal. Mark the item for waiting that processing.
getReAddedData().add(itemId);
}
// Note: never attempt to clear reAddedIds within this method, it gets
// frequently called for items that were already active (happens every
// time an item is refreshed for any reason, not just via expanse or
// collapse). Even if the itemId was no longer in the pendingRemovalIds
// when this method was called, the client round-trip may not have
// returned yet.
boolean isLeaf = !getDataProvider().hasChildren(item);
if (isLeaf) {
hierarchyData.put(HierarchicalDataCommunicatorConstants.ROW_LEAF,
true);
} else {
hierarchyData.put(
HierarchicalDataCommunicatorConstants.ROW_COLLAPSED,
!isExpanded(item));
hierarchyData.put(HierarchicalDataCommunicatorConstants.ROW_LEAF,
false);
hierarchyData.put(
HierarchicalDataCommunicatorConstants.ROW_COLLAPSE_ALLOWED,
getItemCollapseAllowedProvider().test(item));
}
// add hierarchy information to row as metadata
jsonObject.put(
HierarchicalDataCommunicatorConstants.ROW_HIERARCHY_DESCRIPTION,
hierarchyData);
}
/**
* Gets the current item collapse allowed provider.
*
* @return the item collapse allowed provider
*/
public ItemCollapseAllowedProvider getItemCollapseAllowedProvider() {
return itemCollapseAllowedProvider;
}
/**
* Sets the current item collapse allowed provider.
*
* @param itemCollapseAllowedProvider
* the item collapse allowed provider
*/
public void setItemCollapseAllowedProvider(
ItemCollapseAllowedProvider itemCollapseAllowedProvider) {
this.itemCollapseAllowedProvider = itemCollapseAllowedProvider;
}
/**
* Gets the current in-memory sorting.
*
* @return the in-memory sorting
*/
public Comparator getInMemorySorting() {
return inMemorySorting;
}
/**
* Sets the current in-memory sorting. This will cause the hierarchy to be
* constructed again.
*
* @param inMemorySorting
* the in-memory sorting
*/
public void setInMemorySorting(Comparator inMemorySorting) {
this.inMemorySorting = inMemorySorting;
}
/**
* Gets the current back-end sorting.
*
* @return the back-end sorting
*/
public List getBackEndSorting() {
return backEndSorting;
}
/**
* Sets the current back-end sorting. This will cause the hierarchy to be
* constructed again.
*
* @param backEndSorting
* the back-end sorting
*/
public void setBackEndSorting(List backEndSorting) {
this.backEndSorting = backEndSorting;
}
/**
* Gets the current filter.
*
* @return the filter
*/
public F getFilter() {
return filter;
}
/**
* Sets the current filter. This will cause the hierarchy to be constructed
* again.
*
* @param filter
* the filter
*/
@SuppressWarnings("unchecked")
public void setFilter(Object filter) {
this.filter = (F) filter;
}
/**
* Gets the {@code HierarchicalDataProvider} for this
* {@code HierarchyMapper}.
*
* @return the hierarchical data provider
*/
public HierarchicalDataProvider getDataProvider() {
return provider;
}
/**
* Returns whether given item has children.
*
* @param item
* the node to test
* @return {@code true} if node has children; {@code false} if not
*/
public boolean hasChildren(T item) {
return getDataProvider().hasChildren(item);
}
/* Fetch methods. These are used to calculate what to request. */
/**
* Gets a stream of items in the form of a flattened hierarchy from the
* back-end and filters the wanted results from the list. The default
* implementation of this method updates the internal parent-children
* bookkeeping up to the given range (or throughout the entire data set if
* active data optimization isn't in use), and marks this HierarchyMapper as
* initialized.
*
* NOTE: If your data request is likely to involve I/O
* channels , see {@link DataProvider#fetch(Query)} for instructions
* on how to handle the stream without risking resource leaks.
*
* NOTE: For performance and data consistency reasons this method should
* only be called for the data range that is currently getting sent to the
* client. If you wish to fetch the items for some other purpose, use
* {@link #fetchItems(Object, Range)} for a {@code null} parent instead. By
* default this method is called for the first time when the related
* component is attached.
*
* @param range
* the requested item range
* @return the stream of items
*
* @see #setInitialized(boolean)
* @see #useActiveDataOptimization(boolean)
*/
public Stream fetchItems(Range range) {
// null isn't an actual item, use -1 as startIndex
Stream stream = getChildrenList(null, false, -1, range).stream()
.skip(range.getStart()).limit(range.length());
setInitialized(true);
return stream;
}
/**
* Gets a stream of children for the given item in the form of a flattened
* hierarchy from the back-end and filter the wanted results from the list.
*
* NOTE: If your data request is likely to involve I/O
* channels , see {@link DataProvider#fetch(Query)} for instructions
* on how to handle the stream without risking resource leaks.
*
* @param parent
* the parent item for the fetch
* @param range
* the requested item range
* @return the stream of items
*/
public Stream fetchItems(T parent, Range range) {
return getHierarchy(parent, false, false).skip(range.getStart())
.limit(range.length());
}
/* Methods for providing information on the hierarchy. */
/**
* Generic method for finding direct children of a given parent, limited by
* given range.
*
* @param parent
* the parent
* @param range
* the range of direct children to return
* @return the requested children of the given parent
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
private Stream doFetchDirectChildren(T parent, Range range) {
return getDataProvider().fetchChildren(new HierarchicalQuery(
range.getStart(), range.length(), getBackEndSorting(),
getInMemorySorting(), getFilter(), parent));
}
private int getDepth(T item) {
int depth = -1;
while (item != null) {
item = getParentOfItem(item);
++depth;
}
return depth;
}
/**
* Find parent for the given item among open folders.
*
* @param item
* the item
* @return parent item or {@code null} for root items or if the parent is
* closed
*/
protected T getParentOfItem(T item) {
Objects.requireNonNull(item, "Can not find the parent of null");
return idToParentMap.get(getDataProvider().getId(item));
}
/**
* Removes recursively from internal parent-children bookkeeping all
* children of an item identified by a given id. The default implementation
* of this method doesn't update the expansion bookkeeping of any of the
* nodes.
*
* The internal bookkeeping should be used over data provider when handling
* a collapse, because it matches the latest data that has been sent to the
* client. If the data provider gets updated within the same server
* round-trip as the collapse, the client-side update will still be pending,
* and using fresh data can lead to incorrect row removals from the client.
*
* @param id
* id of the item whose children should be removed
* @return list of the removed children, can be empty
*/
protected List removeChildrenRecursively(Object id) {
final List allChildren = new ArrayList<>();
List directChildren = childMap.remove(id);
if (directChildren != null) {
directChildren.stream().forEach(childId -> {
// move any active children to pending removal
if (getActiveData().remove(childId)) {
getPendingRemovalData().add(childId);
}
idToParentMap.remove(childId);
allChildren.add(childId);
allChildren.addAll(removeChildrenRecursively(childId));
});
}
return allChildren;
}
/**
* Removes from internal parent-children bookkeeping all children of an item
* identified by a given id. The default implementation of this method
* doesn't update the expansion bookkeeping of any of the nodes, nor look
* deeper into the hierarchy -- those are handled by the calling method.
*
* NOTE: Older implementation of this method also updated the expansion
* bookkeeping and iterated over the entire hierarchy, as well as suggested
* overriding this method in subclasses for removing obsolete data to avoid
* memory leaks. That should no longer be necessary with the new logic.
*
* @param id
* the item id
* @see #removeChildrenRecursively(Object)
*/
protected void removeChildren(Object id) {
// Clean up removed nodes from child map
List childIdList = childMap.remove(id);
if (childIdList != null) {
childIdList.stream().forEach(idToParentMap::remove);
}
}
/**
* Finds the current index of given object. This is based on a search in
* flattened version of the hierarchy of each root node in turn until the
* target is found.
*
* NOTE: If this HierarchyMapper hasn't been initialized and the items
* requested for the first time, root node list is still empty and no search
* is performed.
*
* @param target
* the target object to find
* @return optional index of given object
*/
public Optional getIndexOf(T target) {
int index = -1;
int subTreeIndex = -1;
int parentNodeIndex = 0;
while (subTreeIndex < 0 && parentNodeIndex < rootNodes.size()) {
T rootNode = rootNodes.get(parentNodeIndex);
if (isExpanded(rootNode)) {
List hierarchy;
// ensure speedy closing in case the stream is connected to IO
// channels
try (Stream stream = getHierarchy(rootNode)) {
hierarchy = stream.collect(Collectors.toList());
}
subTreeIndex = hierarchy.indexOf(target);
if (subTreeIndex < 0) {
index += hierarchy.size();
} else {
index += subTreeIndex + 1;
}
} else {
++index;
if (rootNode.equals(target)) {
subTreeIndex = 0;
break;
}
}
++parentNodeIndex;
}
// index grows until the target is found or there are no more nodes
// subTreeIndex is only non-negative when the target is found
return Optional.ofNullable(subTreeIndex < 0 ? null : index);
}
/**
* Gets the full hierarchy tree starting from given node.
*
* @param parent
* the parent node to start from
* @return the flattened hierarchy as a stream
*/
private Stream getHierarchy(T parent) {
return getHierarchy(parent, true, false);
}
/**
* Gets the full hierarchy tree starting from given node. The starting node
* can be omitted.
*
* @param parent
* the parent node to start from
* @param includeParent
* {@code true} to include the parent; {@code false} if not
* @param checkChildList
* {@code true} if the cached child list should be updated for
* the entire subtree, {@code false} otherwise
* @return the flattened hierarchy as a stream
*/
private Stream getHierarchy(T parent, boolean includeParent,
boolean checkChildList) {
return getChildrenStream(parent, includeParent, checkChildList);
}
/**
* Gets the stream of direct children for given node.
*
* @param parent
* the parent node
* @return the stream of direct children
*/
private Stream getDirectChildren(T parent) {
return doFetchDirectChildren(parent, Range.between(0, getDataProvider()
.getChildCount(new HierarchicalQuery<>(filter, parent))));
}
/**
* The method to recursively fetch the children of given parent. Used with
* {@link Stream#flatMap} to expand a stream of parent nodes into a
* flattened hierarchy.
*
* @param parent
* the parent node
* @param checkChildList
* {@code true} if the cached child list should be updated for
* the entire subtree, {@code false} otherwise
* @return the stream of all children under the parent, includes the parent
*/
private Stream getChildrenStream(T parent, boolean checkChildList) {
return getChildrenStream(parent, true, checkChildList);
}
/**
* The method to recursively fetch the children of given parent. Used with
* {@link Stream#flatMap} to expand a stream of parent nodes into a
* flattened hierarchy.
*
* @param parent
* the parent node
* @param includeParent
* {@code true} to include the parent in the stream;
* {@code false} if not
* @param checkChildList
* {@code true} if the cached child list should be updated for
* the entire subtree, {@code false} otherwise
* @return the stream of all children under the parent
*/
private Stream getChildrenStream(T parent, boolean includeParent,
boolean checkChildList) {
List childList;
if (isExpanded(parent)) {
// ensure speedy closing in case the stream is connected to IO
// channels
try (Stream stream = getDirectChildren(parent)) {
childList = stream.collect(Collectors.toList());
}
if (parent == null) {
rootNodes.clear();
rootNodes.addAll(childList);
}
} else {
childList = Collections.emptyList();
}
if (checkChildList) {
removeChildren(
parent == null ? null : getDataProvider().getId(parent));
registerChildren(parent, childList);
}
return combineParentAndChildStreams(parent,
childList.stream()
.flatMap(x -> getChildrenStream(x, checkChildList)),
includeParent);
}
/**
* The method to recursively fetch the children of given parent. Always
* checks and updates the child list if necessary.
*
* NOTE: This method should only be called for the data range that is
* currently getting sent to the client. It might not cover the entire
* active range, but all items within the range should be considered active.
*
* @param parent
* the parent node
* @param includeParent
* {@code true} to include the parent in the list; {@code false}
* if not
* @param startIndex
* the index of the parent
* @param range
* the active range
* @return the list of all children under the parent
*/
private List getChildrenList(T parent, boolean includeParent,
int startIndex, Range range) {
List childList;
Object parentId = parent == null ? null
: getDataProvider().getId(parent);
if (isExpanded(parent)) {
// ensure speedy closing in case the stream is connected to IO
// channels
try (Stream stream = getDirectChildren(parent)) {
childList = stream.collect(Collectors.toList());
}
if (parent == null) {
rootNodes.clear();
rootNodes.addAll(childList);
}
} else {
childList = Collections.emptyList();
}
if (!useActiveDataOptimization || (startIndex < range.getEnd())) {
// update records for all items within or preceding active range
removeChildren(parentId);
registerChildren(parent, childList);
}
int index = startIndex + 1;
List combinedList = new ArrayList(
!includeParent || parent == null ? 0 : 1);
if (includeParent && parent != null) {
combinedList.add(parent);
}
for (T child : childList) {
List allChildren = getChildrenList(child, true, index, range);
combinedList.addAll(allChildren);
index += allChildren.size();
if (parent == null && index >= range.getEnd()) {
// no need to process root nodes that are after the range
break;
}
}
return combinedList;
}
/**
* Register parent and child items into inner structures.
*
* NOTE: Older implementation of this class suggested overriding this and
* other methods, mainly for the purpose of avoiding memory leaks. That
* should no longer be necessary with the new logic.
*
* This method assumes that any added, moved, or removed data is handled
* separately (only updating collapsed contents, or refreshing the whole
* data provider after the update), because making such changes here
* couldn't update the client side correctly.
*
* @param parent
* the parent item
* @param childList
* list of children to be registered for the given parent, can be
* empty
*/
protected void registerChildren(T parent, List childList) {
Object parentId = parent == null ? null
: getDataProvider().getId(parent);
List newChildIdList = new ArrayList<>();
childList.forEach(x -> {
Object childId = getDataProvider().getId(x);
newChildIdList.add(childId);
idToParentMap.put(childId, parent);
});
childMap.put(parentId, newChildIdList);
}
/**
* Returns whether the referenced item node is either active (within the
* current client cache) or semi-active (inactive but still needed because
* some other items within the same or a following subtree are active), or
* neither. The purpose of this check is to improve performance when a lot
* of root nodes are expanded.
*
* NOTE: there is no performance benefit if the entire hierarchy shares the
* same root node, or when the TreeGrid is scrolled to display the lowest
* subtree. If the active data optimization has been turned off, all items
* are considered functionally active regardless.
*
* Only children of functionally active root nodes should be processed to
* internal bookkeeping. That must include the complete sub-hierarchy for
* each such root node, otherwise the collapse logic might miscalculate the
* range of removed row indexes and cause a critical mismatch.
*
* The {@code null} pseudo-parent of root nodes cannot be collapsed, but is
* considered to be functionally active whether there are any root nodes or
* not, as long as this HierarchyMapper has been initialized.
*
* @param item
* the node whose status should be checked
* @return {@code true} if functionally active, {@code false} otherwise
*/
private boolean isFunctionallyActive(Object itemId) {
if (!useActiveDataOptimization) {
return true;
}
return childMap.containsKey(itemId);
}
/**
* FOR INTERNAL USE ONLY. This might be removed or renamed at any time
* without a warning.
*
* Returns whether the referenced item is within the client-side cache or
* not.
*
* @param itemId
* id of the item whose activity should be checked
* @return {@code true} if the item is within the cache, {@code false}
* otherwise
*/
boolean isActive(Object itemId) {
return getActiveData().contains(itemId);
}
/**
* Set of ids for items that are currently within the client cache. This set
* is kept up-to-date via {@link #generateData(Object, JsonObject)} and
* {@link #destroyData(Object)}, which are both called through
* {@link DataCommunicator.ActiveDataHandler}.
*
* @return set of ids for items that are currently considered active
*/
private Set getActiveData() {
return activeIds;
}
/**
* Returns a set of ids for formerly active (within the client cache) items
* that have been marked for removal through
* {@link #collapse(Object, Integer)} but are still waiting for the removal
* request from the client. Items within this set are still marked active in
* the data communicator bookkeeping, but have been cleaned out from
* HierarchyMapper's own internal bookkeeping, and the client has already
* been informed of the collapse.
*
* If the same item gets removed and added back before the removal is
* confirmed, the {@link #generateData(Object, JsonObject)} call moves the
* affected id from this set to the re-added data list. Otherwise the id
* gets removed from this set via {@link #destroyData(Object)} when the
* client request for the removal arrives (might get delayed until the next
* user interaction).
*
* This collection doesn't need to contain duplicates, because both collapse
* and expand (via {@link #generateData(Object, JsonObject)}) update it
* immediately rather than at the end of the round-trip.
*
* @return a set of ids that are pending for removal, can be empty
* @see #getActiveData()
* @see #getReAddedData()
*/
private Set getPendingRemovalData() {
return pendingRemovalIds;
}
/**
* Returns a list of ids for items that have been removed through collapse
* and then added back through expanse before the removal has been fully
* processed through all the layers of the data handling. Can contain
* duplicates if the same parent item has been collapsed and expanded
* multiple times within the same round-trip, because the collapse and the
* expand are both sent to the client immediately rather than waiting for
* other actions from the round-trip.
*
* Each collapse triggers its own client request for row removal whether or
* not the row has been added back already. When such a request arrives, the
* actual status of the item is checked via {@link #prepareForDrop(Object)}.
* If the item id is present on this list, one instance is removed, and that
* particular removal process for the item is halted. The same is repeated
* for every request for removal, until there are either no more removal
* requests (latest update to the item was through an expanse) or no more
* instances left on the list (latest update was through a collapse).
*
* Items on this list might or might not be currently within the active
* data, depending on whether the latest update to them has been through a
* collapse or an expand. In either case the presence on this list must
* block the removal from proceeding, because the situation might change
* again before all the requests have been processed.
*
* @return a list of ids that have been removed and added back before the
* removal has been fully processed
* @see #getActiveData()
* @see #getPendingRemovalData()
*/
private List getReAddedData() {
return reAddedIds;
}
/**
* Finds and returns the first active member of the cached subtree with the
* referenced parent, or {@code null} if none are both active and within the
* internal cache.
*
* @param parentId
* the id of the parent node
* @return the first active member or {@code null} if none found
*/
private Object getFirstActiveCached(Object parentId) {
if (isActive(parentId)) {
return parentId;
}
List childIdList = childMap.get(parentId);
if (childIdList != null) {
for (Object childId : childIdList) {
Object active = getFirstActiveCached(childId);
if (active != null) {
return active;
}
}
}
return null;
}
/**
* Helper method for combining parent and a stream of children into one
* stream. {@code null} item is never included, and parent can be skipped by
* providing the correct value for {@code includeParent}.
*
* @param parent
* the parent node
* @param children
* the stream of children
* @param includeParent
* {@code true} to include the parent in the stream;
* {@code false} if not
* @return the combined stream of parent and its children
*/
private Stream combineParentAndChildStreams(T parent, Stream children,
boolean includeParent) {
boolean parentIncluded = includeParent && parent != null;
Stream parentStream = parentIncluded ? Stream.of(parent)
: Stream.empty();
return Stream.concat(parentStream, children);
}
@Override
public void destroyAllData() {
childMap.clear();
idToParentMap.clear();
getActiveData().clear();
getReAddedData().clear();
getPendingRemovalData().clear();
setInitialized(false);
}
@Override
public void destroyData(T item) {
// Note: if you click to collapse a node, this won't get called for the
// removed children until the next user interaction, unless something
// else triggers another round-trip -- both expand and collapse bypass
// the active data handling in data communicator, so the active data
// update requires a request from the client
if (item != null) {
Object itemId = getDataProvider().getId(item);
if (getReAddedData().remove(itemId)) {
getLogger().warning(
"Cache mismatch, item still present in re-added "
+ "data but was triggered to be removed "
+ " (id: " + itemId + ")");
}
boolean removeFromPendingRemoval = getPendingRemovalData()
.remove(itemId);
boolean removedFromActive = getActiveData().remove(itemId);
if (removedFromActive && removeFromPendingRemoval) {
getLogger().warning(
"Cache mismatch, removed item was both in active data "
+ "and pending removal " + " (id: " + itemId
+ ")");
}
if (useActiveDataOptimization
&& (removedFromActive || removeFromPendingRemoval)) {
// find the root node of this item tree
Object parentId = itemId;
while (idToParentMap.get(parentId) != null) {
T parent = idToParentMap.get(parentId);
parentId = getDataProvider().getId(parent);
}
// check whether the item tree is/was the last functionally
// active item tree (if not, the item is considered functionally
// active by default)
boolean functionallyActive = false;
List rootNodeIds = childMap.get(null);
int rootIndex = rootNodeIds.indexOf(parentId);
if (rootIndex < rootNodeIds.size() - 1) {
Object next = rootNodeIds.get(rootIndex + 1);
if (isFunctionallyActive(next)) {
functionallyActive = true;
}
}
if (!functionallyActive) {
// check whether the item tree itself has any active nodes
// left (if not, it's no longer functionally active)
functionallyActive = getFirstActiveCached(parentId) != null;
}
if (!functionallyActive) {
removeChildrenRecursively(parentId);
// check activity of previous item trees, children should
// only be kept up to the last functionally active tree
while (rootIndex > 0) {
--rootIndex;
Object previous = rootNodeIds.get(rootIndex);
if (getFirstActiveCached(previous) != null) {
// active nodes found, no need to remove anything
// further
break;
}
removeChildrenRecursively(previous);
}
}
}
}
}
/**
* FOR INTERNAL USE ONLY. This might be removed or renamed at any time
* without a warning.
*
* Prepares this HierarchyMapper for dropping the indicated item from data
* communcator's active data. If the item has been re-added before this
* method is called (delayed trigger because the collapse action is circled
* through the client), the item should not be dropped.
*
* @param id
* the id of the item that is being prepared to be dropped
* @return {@code true} if the row is ready to be dropped, {@code false}
* otherwise
* @see #getActiveData()
*/
boolean prepareForDrop(Object id) {
// Only remove one instance at a time, in case the row has more than one
// removal request pending (each collapse will have its own
// corresponding request, even if the row was re-expanded right away).
return !getReAddedData().remove(id);
}
/**
* Set whether you want to optimize the internal data handling to only
* consider root nodes that have descendants within the client-side cache.
* The default value is {@code true}.
*
* @since 8.17
* @param use
* {@code true} if you want to optimize, {@code false} otherwise
*/
public void useActiveDataOptimization(boolean use) {
useActiveDataOptimization = use;
}
/**
* Set the initialization state of this class. By default this is set
* {@code true} when the related component is attached and data communicator
* makes the first data request for updating the client-side cache, and
* {@code false} when all the data is reset. Before initialization no expand
* data is sent to client, as the assumption is that the client hasn't been
* initialized yet either. Collapse doesn't have a corresponding check,
* because the assumption is that the internal caches are empty and the
* attempt to collapse would result in an empty row range anyway.
*
* NOTE: Do not set this class uninitialized for optimization purposes
* without calling {@link #destroyAllData()} afterwards, otherwise the
* internal caches are likely to get out of sync with both data communicator
* and the client side and be unable to recover gracefully.
*
* @since 8.17
* @param initialize
* {@code true} to mark initialized, {@code false} to mark
* requiring initialization
*/
public void setInitialized(boolean initialize) {
initialized = initialize;
if (initialize && rootNodes.isEmpty()) {
// ensure speedy closing in case the stream is connected to IO
// channels
try (Stream stream = getDirectChildren(null)) {
rootNodes.addAll(stream.collect(Collectors.toList()));
}
}
}
}