All Downloads are FREE. Search and download functionalities are using the official Maven repository.

net.pincette.jes.Aggregate Maven / Gradle / Ivy

There is a newer version: 3.2.6
Show newest version
package net.pincette.jes;

import static com.mongodb.client.model.Filters.and;
import static com.mongodb.client.model.Filters.eq;
import static com.mongodb.client.model.Filters.regex;
import static java.lang.Boolean.FALSE;
import static java.lang.String.valueOf;
import static java.time.Duration.ofSeconds;
import static java.time.Instant.now;
import static java.util.Arrays.fill;
import static java.util.Arrays.stream;
import static java.util.concurrent.CompletableFuture.completedFuture;
import static net.pincette.jes.util.Command.hasError;
import static net.pincette.jes.util.Command.isAllowed;
import static net.pincette.jes.util.Command.isCommand;
import static net.pincette.jes.util.Commands.DELETE;
import static net.pincette.jes.util.Commands.PATCH;
import static net.pincette.jes.util.Commands.PUT;
import static net.pincette.jes.util.Event.isEvent;
import static net.pincette.jes.util.JsonFields.AFTER;
import static net.pincette.jes.util.JsonFields.BEFORE;
import static net.pincette.jes.util.JsonFields.COMMAND;
import static net.pincette.jes.util.JsonFields.CORR;
import static net.pincette.jes.util.JsonFields.DELETED;
import static net.pincette.jes.util.JsonFields.ERROR;
import static net.pincette.jes.util.JsonFields.ID;
import static net.pincette.jes.util.JsonFields.JWT;
import static net.pincette.jes.util.JsonFields.LANGUAGES;
import static net.pincette.jes.util.JsonFields.OPS;
import static net.pincette.jes.util.JsonFields.SEQ;
import static net.pincette.jes.util.JsonFields.STATUS_CODE;
import static net.pincette.jes.util.JsonFields.TEST;
import static net.pincette.jes.util.JsonFields.TIMESTAMP;
import static net.pincette.jes.util.JsonFields.TYPE;
import static net.pincette.jes.util.Mongo.insert;
import static net.pincette.jes.util.Mongo.restore;
import static net.pincette.jes.util.Mongo.update;
import static net.pincette.jes.util.Streams.duplicateFilter;
import static net.pincette.json.JsonUtil.createArrayBuilder;
import static net.pincette.json.JsonUtil.createDiff;
import static net.pincette.json.JsonUtil.createObjectBuilder;
import static net.pincette.json.JsonUtil.createPatch;
import static net.pincette.json.JsonUtil.getBoolean;
import static net.pincette.json.JsonUtil.getString;
import static net.pincette.mongo.Collection.deleteOne;
import static net.pincette.mongo.JsonClient.find;
import static net.pincette.mongo.JsonClient.findOne;
import static net.pincette.util.Builder.create;
import static net.pincette.util.Collections.list;
import static net.pincette.util.Collections.set;
import static net.pincette.util.Util.getStackTrace;
import static net.pincette.util.Util.must;
import static net.pincette.util.Util.rethrow;
import static net.pincette.util.Util.tryToGet;

import com.mongodb.client.result.DeleteResult;
import com.mongodb.reactivestreams.client.MongoDatabase;
import java.time.Duration;
import java.util.HashMap;
import java.util.Map;
import java.util.Optional;
import java.util.Set;
import java.util.concurrent.CompletionStage;
import java.util.function.BinaryOperator;
import java.util.function.Function;
import java.util.function.UnaryOperator;
import javax.json.JsonArray;
import javax.json.JsonNumber;
import javax.json.JsonObject;
import javax.json.JsonObjectBuilder;
import net.pincette.function.SideEffect;
import net.pincette.jes.util.AuditFields;
import net.pincette.jes.util.Reducer;
import net.pincette.json.JsonUtil;
import net.pincette.util.TimedCache;
import org.apache.kafka.streams.KeyValue;
import org.apache.kafka.streams.StreamsBuilder;
import org.apache.kafka.streams.kstream.KStream;

/**
 * With this class JSON aggregates are managed using Kafka Streams. You give it reducers, which
 * calculate the new aggregate state with the current one and an incoming command. For every
 * aggregate instance all commands are processed sequentially. The result of a reducer execution is
 * compared with the current aggregate state and the difference is emitted as an event. A reducer
 * may also find problems in the command. In that case it should return the command, marked with
 * "_error": true. No event will be emitted then.
 *
 * 

The external interface at runtime is a set op Kafka topics. Their names always have the form * <app>-<type>-<purpose>-<environment>. The following topics are expected * to exist (the names are the purpose): * *

*
aggregate *
On this topic the current state of the aggregate is emitted. *
command *
Through this topic commands are received. It is the only input of the system. *
event *
On this topic the events are emitted, which contain the changes between two subsequent * aggregate versions. *
event-full *
The events are also emitted on this topic, but here they have two extra fields. The * _before field contains the previous state of the aggregate, while _after * contains the current one. This is for consumers that want to do other kinds of * analysis than the plain difference. *
monitor *
This optional topic receives monitoring events. *
reply *
On this topic either the new aggregate or the failed command it emitted. The topic is meant * to be routed back to the end-user, for example through Server-Sent Events. A reactive * client can pick it up and update its stores. This connects the server and the client in one * reactive loop. *
* *

An aggregate is a JSON document, which has the following technical fields on top of whatever * you put in it: * *

*
_corr *
The correlation identifier that was used by the last command. It is usually a UUID. *
_deleted *
This boolean marks the aggregate instance as deleted. This is a logical delete. *
_id *
The identifier of the aggregate instance. It is usually a UUID. *
_jwt *
The decoded JSON Web Token that was used by the last command. *
_seq *
A sequence number. This is the sequence number of the last event. *
_type *
The aggregate type, which is composed as <application>-<name>. *
* *

A command is a JSON document, which has the following technical fields on top of whatever you * put in it: * *

*
_command *
The name of the command. This field is mandatory. *
_corr *
A correlation identifier. It is propagated throughout the flow. This is usually a UUID. *
_error *
This boolean indicates there is a problem with the command. *
_id *
The identifier of the aggregate instance. It is usually a UUID. This field is mandatory. *
_jwt *
The decoded JSON Web Token. *
_languages *
An array of language tags in the order of preference. When a validator or some other * component wishes to send messages to the user, it can use the proper language for it. *
_type *
The aggregate type, which is composed as <application>-<name>. This field is * mandatory. *
* *

An event is a JSON document, which has the following technical fields: * *

*
_after *
An optional field that carries the new state of the aggregate instance. *
_before *
An optional field that carries the previous state of the aggregate instance. *
_command *
The name of the command that caused the event to be created. *
_corr *
The correlation identifier that was used by the last command. It is usually a UUID. *
_id *
The identifier of the aggregate instance. It is usually a UUID. *
_jwt *
The decoded JSON Web Token that was used by the last command. *
_ops *
An array of operations as described in RFC 6902. It describes how an aggregate instance has * changed after the reduction of a command. *
_seq *
A sequence number. There should not be holes in the sequence. This would indicate * corruption of the event log. *
_timestamp *
The timestamp in epoch millis. *
_type *
The aggregate type, which is composed as <application>-<name>. *
* * @author Werner Donn\u00e9 * @since 1.0 */ public class Aggregate { private static final String AGGREGATE_TOPIC = "aggregate"; private static final String COMMAND_TOPIC = "command"; private static final Duration DUPLICATE_WINDOW = ofSeconds(60); private static final String EVENT_TOPIC = "event"; private static final String EVENT_FULL_TOPIC = "event-full"; private static final String EXCEPTION = "exception"; private static final String MONITOR_TOPIC = "monitor"; private static final String REDUCER_COMMAND = "command"; private static final String REDUCER_STATE = "state"; private static final String REPLY_TOPIC = "reply"; private static final String STEP = "step"; private static final String STEP_AFTER = "after"; private static final String STEP_COMMAND = "command"; private static final String STEP_ERROR = "error"; private static final String STEP_TIMESTAMP = "timestamp"; private static final Set TECHNICAL_FIELDS = set(COMMAND, CORR, ID, JWT, LANGUAGES, SEQ, TEST, TIMESTAMP, TYPE); private final Map reducers = new HashMap<>(); private final TimedCache aggregateCache = new TimedCache<>(DUPLICATE_WINDOW); private String auditTopic; private KStream aggregates; private String app; private boolean breakingTheGlass; private StreamsBuilder builder; private StreamProcessor commandProcessor; private KStream commands; private MongoDatabase database; private String environment = "dev"; private KStream events; private KStream eventsFull; private KStream monitor; private boolean monitoring; private Reducer reducer; private KStream replies; private String type; /** * This will install a standard reducer for the commands delete, patch * and put. * * @since 1.0 */ public Aggregate() { withReducer(DELETE, (command, currentState) -> delete(currentState)); withReducer(PATCH, Aggregate::patch); withReducer(PUT, (command, currentState) -> put(command)); } private static JsonObject accessError(final JsonObject command) { return createObjectBuilder(command) .add(ERROR, true) .add(STATUS_CODE, 403) .add("message", "Forbidden") .build(); } private static String commandDuplicateKey(final JsonObject command) { return command.getString(ID) + command.getString(CORR) + command.getString(COMMAND); } private static JsonObject completeCommand(final JsonObject command) { return !command.containsKey(TIMESTAMP) ? createObjectBuilder(command) .add( TIMESTAMP, Optional.ofNullable(command.getJsonNumber(TIMESTAMP)) .map(JsonNumber::longValue) .orElseGet(() -> now().toEpochMilli())) .build() : command; } private static JsonObjectBuilder createAfter( final JsonObject newState, final JsonObject command, final String corr, final int seq, final long now) { return create( () -> createObjectBuilder(newState) .add(CORR, corr) .add(SEQ, seq) .add(TIMESTAMP, now) .remove(JWT)) .updateIf(b -> command.containsKey(JWT), b -> b.add(JWT, command.getJsonObject(JWT))) .build(); } private static JsonObject createAggregateMessage(final JsonObject aggregate) { return createObjectBuilder() .add(TYPE, aggregate.getString(TYPE)) .add(JWT, aggregate.getJsonObject(JWT)) .build(); } private static JsonObject createError(final JsonObject command, final long timestamp) { return createObjectBuilder() .add(STEP, STEP_ERROR) .add(STEP_COMMAND, command) .add(STEP_TIMESTAMP, timestamp) .build(); } private static JsonObject createEvent( final JsonObject oldState, final JsonObject newState, final JsonObject command, final JsonArray ops) { final String corr = command.getString(CORR); final long now = now().toEpochMilli(); final int seq = oldState.getInt(SEQ, -1) + 1; return create( () -> createObjectBuilder() .add(CORR, corr) .add(ID, newState.getString(ID).toLowerCase()) .add(TYPE, newState.getString(TYPE)) .add(SEQ, seq) .add(COMMAND, command.getString(COMMAND)) .add(TIMESTAMP, now) .add(BEFORE, oldState) .add(AFTER, createAfter(newState, command, corr, seq, now)) .add(OPS, ops)) .updateIf( b -> command.containsKey(LANGUAGES), b -> b.add(LANGUAGES, command.getJsonArray(LANGUAGES))) .updateIf(b -> command.containsKey(JWT), b -> b.add(JWT, command.getJsonObject(JWT))) .build() .build(); } private static JsonArray createOps(final JsonObject oldState, final JsonObject newState) { return createArrayBuilder( createDiff(removeTechnical(oldState).build(), removeTechnical(newState).build()) .toJsonArray()) .build(); } private static JsonObject createSource(final JsonObject command, final JsonObject state) { return createObjectBuilder().add(REDUCER_STATE, state).add(REDUCER_COMMAND, command).build(); } private static JsonObject createStep( final String step, final String after, final long timestamp) { return createStep(step, after, timestamp, null); } private static JsonObject createStep( final String step, final String after, final long timestamp, final String command) { return create(JsonUtil::createObjectBuilder) .update(b -> b.add(STEP, step)) .update(b -> b.add(STEP_TIMESTAMP, timestamp)) .updateIf(b -> after != null, b -> b.add(STEP_AFTER, after)) .updateIf(b -> command != null, b -> b.add(STEP_COMMAND, command)) .build() .build(); } /** * The standard delete reducer. It sets the field _deleted to true. * * @param currentState the current state of the aggregate. * @return The new state of the aggregate. * @since 1.0 */ public static CompletionStage delete(final JsonObject currentState) { return completedFuture(createObjectBuilder(currentState).add(DELETED, true).build()); } private static KStream errors(final KStream reducer) { return reducer.filter((k, v) -> isCommand(v) && hasError(v)); } private static String generateSeq(final long value) { return pad(valueOf(value), '0', 12); } private static JsonObject idsToLowerCase(final JsonObject json) { return createObjectBuilder(json) .add(ID, json.getString(ID).toLowerCase()) .add(CORR, json.getString(CORR).toLowerCase()) .build(); } private static JsonObject makeManaged(final JsonObject state, final JsonObject command) { return createObjectBuilder(state) .add(ID, command.getString(ID)) .add(TYPE, command.getString(TYPE)) .build(); } private static String mongoEventKey(final JsonObject json) { return mongoEventKey(json, json.getJsonNumber(SEQ).longValue()); } private static String mongoEventKey(final JsonObject json, final long seq) { return json.getString(ID) + "-" + generateSeq(seq); } private static String pad(final String s, final char c, final int size) { return s.length() >= size ? s : (new String(pad(c, size - s.length())) + s); } private static char[] pad(final char c, final int size) { final char[] result = new char[size]; fill(result, c); return result; } /** * The standard patch reducer. It expects to find the _ops field, which contains a * JSON patch and applies it to the aggregate. * * @param currentState the current state of the aggregate. * @param command the given command. * @return The new state of the aggregate. * @since 1.0 */ public static CompletionStage patch( final JsonObject command, final JsonObject currentState) { return completedFuture( Optional.ofNullable(command.getJsonArray(OPS)) .map(ops -> createPatch(ops).apply(currentState)) .orElse(currentState)); } private static JsonObject plainEvent(final JsonObject fullEvent) { return createObjectBuilder(fullEvent).remove(AFTER).remove(BEFORE).build(); } /** * The standard put reducer. It just removes the _command field and uses everything * else as the new state of the aggregate. * * @param command the given command. * @return The new state of the aggregate. * @since 1.0 */ public static CompletionStage put(final JsonObject command) { return completedFuture(createObjectBuilder(command).remove(COMMAND).build()); } /** * Wraps a generic transformer in a Reducer. The first argument will be the command * and the second the current state of the aggregate. * * @param transformer the given transformer. * @return The wrapped transformer. * @since 1.1.4 */ public static Reducer reducer(final BinaryOperator transformer) { return (command, state) -> completedFuture(transformer.apply(command, state)); } /** * Wraps a sequence of generic transformers in a Reducer. The result of one * transformer is fed to the next. The JSON object that is given to the sequence has the fields * command and state. The transformer should produce the new state. * * @param transformers the given transformer sequence. * @return The wrapped transformer sequence. * @since 1.1.4 */ @SafeVarargs public static Reducer reducer(final UnaryOperator... transformers) { final UnaryOperator function = stream(transformers).reduce(json -> json, (result, t) -> (j -> t.apply(result.apply(j)))); return (command, state) -> completedFuture(function.apply(createSource(command, state))); } private static JsonObjectBuilder removeTechnical(final JsonObject json) { return TECHNICAL_FIELDS.stream() .reduce(createObjectBuilder(json), JsonObjectBuilder::remove, (b1, b2) -> b1); } private static JsonObject setException(final JsonObject command, final Exception e) { return createObjectBuilder(command).add(ERROR, true).add(EXCEPTION, getStackTrace(e)).build(); } private static JsonObject setId(final JsonObject json, final String id) { return createObjectBuilder(json).add(ID, id).build(); } /** * Returns the aggregate stream. * * @return The aggregate stream. * @since 1.0 */ public KStream aggregates() { return aggregates; } private void aggregates(final KStream reducer) { aggregates = reducer .filter((k, v) -> isEvent(v) && v.containsKey(AFTER)) .mapValues(json -> json.getJsonObject(AFTER)); aggregates.to(topic(AGGREGATE_TOPIC)); } /** * Returns the app name. * * @return The app name. * @since 1.0 */ public String app() { return app; } private void audit() { if (auditTopic != null) { events .mapValues( v -> createObjectBuilder() .add(AuditFields.AGGREGATE, v.getString(ID)) .add(AuditFields.TYPE, v.getString(TYPE)) .add(AuditFields.COMMAND, v.getString(COMMAND)) .add(AuditFields.TIMESTAMP, v.getJsonNumber(TIMESTAMP).longValue()) .add(AuditFields.USER, getString(v, "/_jwt/sub").orElse("anonymous")) .add( AuditFields.BREAKING_THE_GLASS, getBoolean(v, "/_jwt/breakingTheGlass").orElse(false)) .build()) .to(auditTopic); } } /** * This builds the Kafka Streams topology for the aggregate. * * @return The builder that was given before. * @since 1.0 */ public StreamsBuilder build() { assert app != null && builder != null && environment != null && type != null && database != null; commands = createCommands(); final KStream red = reducer(); aggregates(red); replies(red); events(red); monitorTopic(aggregates, AGGREGATE_TOPIC); monitorTopic(events, EVENT_TOPIC); monitorTopic(replies, REPLY_TOPIC); monitorTopic(eventsFull, EVENT_FULL_TOPIC); monitorReducer(red); audit(); return builder; } /** * Returns the command stream, after applying potential command processors. * * @return The command stream. * @since 1.0 */ public KStream commands() { return commands; } private KStream createCommands() { final KStream com = builder.stream(topic(COMMAND_TOPIC)); final KStream commandFilter = duplicateFilter( com.filter((k, v) -> isCommand(v)) .map((k, v) -> new KeyValue<>(k.toLowerCase(), idsToLowerCase(v))) .mapValues(Aggregate::completeCommand), (k, v) -> commandDuplicateKey(v), ofSeconds(60)); monitorCommands(commandFilter); return (commandProcessor != null ? commandProcessor.apply(commandFilter, builder) : commandFilter); } private CompletionStage deleteMongoAggregate(final JsonObject aggregate) { return deleteOne( database.getCollection(mongoAggregateCollection()), eq(ID, aggregate.getString(ID))) .thenApply(result -> must(result, DeleteResult::wasAcknowledged)) .thenApply(result -> true); } private CompletionStage deleteMongoEvent(final JsonObject event) { return deleteOne(database.getCollection(mongoEventCollection()), eq(ID, mongoEventKey(event))) .thenApply(result -> must(result, DeleteResult::wasAcknowledged)) .thenApply(result -> true); } /** * Returns the environment. * * @return The environment. * @since 1.0 */ public String environment() { return environment; } /** * Returns the event stream. * * @return The event stream. * @since 1.0 */ public KStream events() { return events; } private void events(final KStream reducer) { eventsFull = reducer.filter((k, v) -> isEvent(v)); events = eventsFull.mapValues(Aggregate::plainEvent); eventsFull.to(topic(EVENT_FULL_TOPIC)); events.to(topic(EVENT_TOPIC)); } /** * Returns the full event stream. * * @return The event stream. * @since 1.0 */ public KStream eventsFull() { return eventsFull; } private CompletionStage executeReducer( final JsonObject command, final JsonObject currentState) { return Optional.ofNullable(reducer) .map(red -> red.apply(command, currentState)) .orElseGet( () -> Optional.ofNullable(reducers.get(command.getString(COMMAND))) .map(red -> red.apply(command, currentState)) .orElseGet(() -> completedFuture(currentState))); } /** * Returns the full aggregate type, which is composed as <app>-<type>. * * @return The full aggregate type. * @since 1.1 */ public String fullType() { return app + "-" + type; } private CompletionStage getCurrentState(final String id) { return aggregateCache .get(id) .map(currentState -> (CompletionStage) completedFuture(currentState)) .orElseGet( () -> getMongoCurrentState(id) .thenComposeAsync( currentState -> currentState .map(state -> (CompletionStage) completedFuture(state)) .orElseGet(() -> restore(id, fullType(), environment, database)))) .thenComposeAsync( currentState -> !currentState.isEmpty() ? restore(currentState, environment, database) : completedFuture(currentState)); } private CompletionStage> getMongoCurrentState(final String id) { return getMongoEntity(mongoAggregateCollection(), id); } private CompletionStage> getMongoEntity( final String collection, final String id) { return findOne(database.getCollection(collection), eq(ID, id)); } private CompletionStage insertMongoEvent(final JsonObject event) { return insert(setId(event, mongoEventKey(event)), mongoEventCollection(), database) .thenApply(result -> must(result, r -> r)); } private CompletionStage isDuplicate(final JsonObject command) { return find( database.getCollection(mongoEventCollection()), and( regex(ID, "^" + command.getString(ID) + ".*"), eq(CORR, command.getString(CORR)), eq(COMMAND, command.getString(COMMAND)))) .thenApply(result -> !result.isEmpty()); } /** * Returns the monitor stream. * * @return The monitor stream. * @since 1.0 */ public KStream monitor() { if (monitor == null) { monitor = builder.stream(topic(MONITOR_TOPIC)); } return monitor; } private String mongoAggregateCollection() { return fullType() + "-" + environment; } private String mongoEventCollection() { return fullType() + "-event-" + environment; } private void monitorCommands(final KStream commands) { if (monitoring) { commands .filter((k, v) -> v.containsKey(CORR)) .flatMap( (k, v) -> list( new KeyValue<>( v.getString(CORR), createStep( MonitorSteps.RECEIVED, null, v.getJsonNumber(TIMESTAMP).longValue(), v.getString(COMMAND))), new KeyValue<>( v.getString(CORR), createStep( MonitorSteps.COMMAND_TOPIC, MonitorSteps.RECEIVED, now().toEpochMilli(), v.getString(COMMAND))))) .to(topic(MONITOR_TOPIC)); } } private void monitorReducer(final KStream reducer) { if (monitoring) { reducer .filter((k, v) -> v.containsKey(CORR) && !hasError(v)) .map( (k, v) -> new KeyValue<>( v.getString(CORR), createStep( MonitorSteps.REDUCE, MonitorSteps.COMMAND_TOPIC, now().toEpochMilli()))) .to(topic(MONITOR_TOPIC)); reducer .filter((k, v) -> v.containsKey(CORR) && hasError(v)) .map((k, v) -> new KeyValue<>(v.getString(CORR), createError(v, now().toEpochMilli()))) .to(topic(MONITOR_TOPIC)); } } private void monitorTopic(final KStream stream, final String name) { if (monitoring) { stream .filter((k, v) -> v.containsKey(CORR)) .map( (k, v) -> new KeyValue<>( v.getString(CORR), createStep(name + "-topic", MonitorSteps.REDUCE, now().toEpochMilli()))) .to(topic(MONITOR_TOPIC)); } } private JsonObject processNewState( final JsonObject oldState, final JsonObject newState, final JsonObject command) { return Optional.ofNullable(newState) .filter(state -> !hasError(state)) .map(state -> createOps(oldState, newState)) .filter(ops -> !ops.isEmpty()) .map(ops -> createEvent(oldState, newState, command, ops)) .map( event -> SideEffect.run( () -> aggregateCache.put(event.getString(ID), event.getJsonObject(AFTER))) .andThenGet(() -> event)) .orElse(newState); } private JsonObject reduce(final JsonObject command) { return tryToGet( () -> isDuplicate(command) .thenComposeAsync( result -> FALSE.equals(result) ? reduceCommand(command).thenComposeAsync(this::saveReduction) : completedFuture(null)) .toCompletableFuture() .get(), e -> setException(command, e)) .orElse(null); } private CompletionStage reduceCommand(final JsonObject command) { return getCurrentState(command.getString(ID)) .thenApply(currentState -> makeManaged(currentState, command)) .thenComposeAsync( currentState -> isAllowed(currentState, command, breakingTheGlass) ? executeReducer(command, currentState) .thenApply(newState -> processNewState(currentState, newState, command)) : completedFuture(accessError(command))); } private KStream reducer() { return commands.mapValues(this::reduce).filter((k, v) -> v != null); } /** * Returns the reply stream. * * @return The reply stream. * @since 1.0 */ public KStream replies() { return replies; } private void replies(final KStream reducer) { replies = aggregates.flatMapValues(v -> list(v, createAggregateMessage(v))).merge(errors(reducer)); replies.to(topic(REPLY_TOPIC)); } private CompletionStage saveReduction(final JsonObject reduction) { final Function> handleAggregate = a -> a.getBoolean(DELETED, false) ? deleteMongoAggregate(a) : updateMongoAggregate(a); return isEvent(reduction) ? insertMongoEvent(plainEvent(reduction)) .thenComposeAsync(result -> handleAggregate.apply(reduction.getJsonObject(AFTER))) .thenApply(result -> reduction) .exceptionally( e -> { deleteMongoEvent(reduction); rethrow(e); return null; }) : completedFuture(reduction); } /** * Returns the topic name in the form <application>-<type>-purpose-< * environment>. * * @param purpose one of "aggregate", "command", "event", "event-full" or "reply". * @return The topic name. */ public String topic(final String purpose) { return fullType() + "-" + purpose + "-" + environment; } /** * Returns the aggregate type. * * @return The aggregate type. * @since 1.0 */ public String type() { return type; } private CompletionStage updateMongoAggregate(final JsonObject aggregate) { return update(aggregate, environment, database).thenApply(result -> must(result, r -> r)); } /** * Sets the name of the application. This will become the prefix of the aggregate type. * * @param app the application name. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withApp(final String app) { this.app = app; return this; } /** * Sets the audit Kafka topic, in which case auditing information is published on it. These are * JSON messages with the fields defined in AuditFields. * * @param auditTopic the Kafka topic for auditing. * @return The aggregate object itself. * @since 1.0 * @see net.pincette.jes.util.AuditFields */ public Aggregate withAudit(final String auditTopic) { this.auditTopic = auditTopic; return this; } /** * Honors the JWT field breakingTheGlass when checking ACLs. This should always be * used together with auditing. * * @return The aggregate object itself. * @since 1.0 */ public Aggregate withBreakingTheGlass() { breakingTheGlass = true; return this; } /** * Sets the Kafka Streams builder that will be used to create the topology. * * @param builder the given builder. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withBuilder(final StreamsBuilder builder) { this.builder = builder; return this; } /** * Sets a function with which a Kafka Stream can be inserted between the command topic and the * reducers. This method can be called several times. The result will be a function that chains * everything in the order of the invocations. * * @param commandProcessor the processor. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withCommandProcessor(final StreamProcessor commandProcessor) { final StreamProcessor previous = this.commandProcessor; this.commandProcessor = previous != null ? (s, b) -> commandProcessor.apply(previous.apply(s, b), b) : commandProcessor; return this; } /** * Sets the environment in which this aggregate will live. The default is dev. It * will become the suffix for all the topic names. Typically the value for this comes from an * external configuration. * * @param environment the name of the environment. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withEnvironment(final String environment) { this.environment = environment; return this; } /** * The MongoDB database in which the events and aggregates are written. * * @param database the database connection. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withMongoDatabase(final MongoDatabase database) { this.database = database; return this; } /** * Turns on monitoring. This publishes monitoring messages on the * <aggregate-type>-monitor-<environment> topic. A message is in JSON and always * contains the fields "step" and "timestamp". The former is defined in MonitorSteps. * The latter is an epoch millis value. The optional field "after" contains the step that proceeds * this one. The optional field "command" contains the name of a command. This is turned off by * default. * * @param monitoring whether monitoring is desired or not. * @return The aggregate object itself. * @since 1.0 * @see MonitorSteps */ public Aggregate withMonitoring(final boolean monitoring) { this.monitoring = monitoring; return this; } /** * Sets the reducer for the given command. * * @param command the name of the command, which will match the _command field. * @param reducer the reducer function. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withReducer(final String command, final Reducer reducer) { reducers.put(command, reducer); return this; } /** * Sets the reducer for all commands, which means the reducer does the dispatching itself. Note * that the individual reducers are not tried when this is set. * * @param reducer the reducer function. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withReducer(final Reducer reducer) { this.reducer = reducer; return this; } /** * Sets the aggregate type, which will become the suffix for the full aggregate type. * * @param type the type. * @return The aggregate object itself. * @since 1.0 */ public Aggregate withType(final String type) { this.type = type; return this; } }




© 2015 - 2025 Weber Informatics LLC | Privacy Policy