All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.kenshoo.pl.data.CommandsExecutor Maven / Gradle / Ivy

Go to download

A Java persistence layer based on JOOQ for high performance and business flow support.

There is a newer version: 0.1.121-jooq-3.16.3
Show newest version
package com.kenshoo.pl.data;

import com.kenshoo.jooq.DataTable;
import com.kenshoo.jooq.FieldAndValue;
import com.kenshoo.pl.data.CreateRecordCommand.OnDuplicateKey;
import org.jooq.*;
import org.jooq.impl.DSL;
import java.util.*;
import java.util.stream.IntStream;
import java.util.stream.Stream;

import static java.util.stream.Collectors.toList;
import static java.util.stream.Collectors.toSet;
import static org.jooq.lambda.Seq.seq;

public class CommandsExecutor {

    final private DSLContext dslContext;

    public CommandsExecutor(DSLContext dslContext) {
        this.dslContext = dslContext;
    }

    public static CommandsExecutor of(DSLContext dslContext) {
        return new CommandsExecutor(dslContext);
    }

    public AffectedRows executeInserts(final DataTable table, Collection commands) {
        return executeCommands(commands, homogeneousCommands -> executeInsertCommands(table, homogeneousCommands, OnDuplicateKey.FAIL));
    }

    public AffectedRows executeInsertsOnDuplicateKeyIgnore(final DataTable table, Collection commands) {
        return executeCommands(commands, homogeneousCommands -> executeInsertCommands(table, homogeneousCommands, OnDuplicateKey.IGNORE));
    }

    public AffectedRows executeInsertsOnDuplicateKeyUpdate(final DataTable table, Collection commands) {
        return executeCommands(commands, homogeneousCommands -> executeInsertCommands(table, homogeneousCommands, OnDuplicateKey.UPDATE));
    }

    public AffectedRows executeUpdates(final DataTable table, Collection commands) {
        return executeCommands(commands, homogeneousCommands -> executeUpdateCommands(table, homogeneousCommands));
    }

    public AffectedRows executeDeletes(final DataTable table, Collection commands) {
        if (commands.isEmpty()) {
            return AffectedRows.empty();
        }
        return executeDeleteCommands(table, commands);
    }

    private  AffectedRows executeCommands(Collection commands, HomogeneousChunkExecutor homogeneousChunkExecutor) {
        AffectedRows updated = AffectedRows.empty();
        List commandsLeft = new LinkedList<>(commands);
        while (!commandsLeft.isEmpty()) {
            C firstCommand = commandsLeft.remove(0);
            List commandsToExecute = new ArrayList<>(Collections.singletonList(firstCommand));
            Set firstCommandFields = getFieldsNames(firstCommand);
            Iterator iterator = commandsLeft.iterator();
            while (iterator.hasNext()) {
                C command = iterator.next();
                if (getFieldsNames(command).equals(firstCommandFields)) {
                    commandsToExecute.add(command);
                    iterator.remove();
                }
            }
            updated = updated.plus(homogeneousChunkExecutor.execute(commandsToExecute));
        }
        return updated;
    }

    private AffectedRows executeDeleteCommands(DataTable table, Collection commandsToExecute) {
        DeleteWhereStep delete = dslContext.delete(table);
        Iterator commandIt = commandsToExecute.iterator();
        DeleteRecordCommand command = commandIt.next();
        Condition condition = DSL.trueCondition();
        TableField[] tableFields = command.getId().getTableFields();
        for (TableField id : tableFields) {
            //noinspection unchecked
            condition = condition.and(id.eq((Object) null));
        }
        for (FieldAndValue partitionFieldAndValue : table.getVirtualPartition()) {
            //noinspection unchecked
            condition = condition.and(((Field) partitionFieldAndValue.getField()).eq((Object) null));
        }
        delete.where(condition);

        BatchBindStep batch = dslContext.batch(delete);
        while (command != null) {
            List values = Stream.concat(Stream.of(command.getId().getValues()),
                    table.getVirtualPartition().stream().map(FieldAndValue::getValue)).collect(toList());
            batch.bind(values.toArray());
            command = commandIt.hasNext() ? commandIt.next() : null;
        }
        return AffectedRows.deleted(IntStream.of(batch.execute()).sum());
    }

    private AffectedRows executeUpdateCommands(DataTable table, List commandsToExecute) {
        UpdateSetFirstStep update1 = dslContext.update(table);
        UpdateSetMoreStep update = null;
        UpdateRecordCommand command1 = commandsToExecute.get(0);
        if (!command1.getFields().findFirst().isPresent()) {
            return AffectedRows.empty();
        }
        for (Field field : seq(command1.getFields())) {
            if (update != null) {
                update = update.set(field, (Object) null);
            } else {
                update = update1.set(field, (Object) null);
            }
        }
        assert update != null;
        Condition condition = DSL.trueCondition();
        TableField[] tableFields = command1.getId().getTableFields();
        for (TableField id : tableFields) {
            //noinspection unchecked
            condition = condition.and(id.eq((Object) null));
        }
        for (FieldAndValue partitionFieldAndValue : table.getVirtualPartition()) {
            //noinspection unchecked
            condition = condition.and(((Field) partitionFieldAndValue.getField()).eq((Object) null));
        }
        update.where(condition);

        BatchBindStep batch = dslContext.batch(update);
        for (UpdateRecordCommand command : commandsToExecute) {
            List values = Stream.of(command.getValues(command1.getFields()),
                    Stream.of(command.getId().getValues()),
                    table.getVirtualPartition().stream().map(FieldAndValue::getValue)).flatMap(s -> s).collect(toList());
            batch.bind(values.toArray());
        }
        int[] execute = batch.execute();
        return AffectedRows.updated(IntStream.of(execute).sum());
    }

    private AffectedRows executeInsertCommands(DataTable table, List commandsToExecute, OnDuplicateKey onDuplicateKey) {

        final Optional generatedKeyRecorder = Optional.ofNullable(table.getIdentity())
                .map(identity -> new GeneratedKeyRecorder(identity.getField(), commandsToExecute.size()));

        DSLContext dslContext = generatedKeyRecorder.map(g -> g.newRecordingJooq(this.dslContext)).orElse(this.dslContext);

        CreateRecordCommand firstCommand = commandsToExecute.get(0);
        Collection> fields = Stream.concat(firstCommand.getFields(), table.getVirtualPartition().stream().map(FieldAndValue::getField)).collect(toList());
        InsertValuesStepN insertValuesStepN = dslContext.insertInto(table, fields).values(new Object[fields.size()]);
        Insert insert = insertValuesStepN;
        switch (onDuplicateKey) {
            case IGNORE:
                insert = insertValuesStepN.onDuplicateKeyIgnore();
                break;
            case UPDATE:
                InsertOnDuplicateSetStep insertOnDuplicateSetStep = insertValuesStepN.onDuplicateKeyUpdate();
                for (Field field : seq(firstCommand.getFields())) {
                    //noinspection unchecked
                    insertOnDuplicateSetStep = insertOnDuplicateSetStep.set((Field) field, (Object) null);
                }
                insert = (Insert) insertOnDuplicateSetStep;
                break;
        }

        BatchBindStep batch = dslContext.batch(insert);

        for (AbstractRecordCommand command : commandsToExecute) {
            List values = Stream.concat(command.getValues(firstCommand.getFields()), table.getVirtualPartition().stream().map(FieldAndValue::getValue)).collect(toList());
            if (onDuplicateKey == OnDuplicateKey.UPDATE) {
                values = Stream.concat(values.stream(), values.stream()).collect(toList());
            }
            batch.bind(values.toArray());
        }
        int[] result = batch.execute();
        // See https://dev.mysql.com/doc/refman/5.7/en/mysql-affected-rows.html for explanation
        // In case of regular INSERT (without IGNORE or ON DUPLICATE KEY UPDATE) the result is -2 for every inserted row
        int inserted = (int) IntStream.of(result).filter(i -> i == 1 || i == -2).count();
        int updated = (int) IntStream.of(result).filter(i -> i == 2).count();

        generatedKeyRecorder
                .map(GeneratedKeyRecorder::getGeneratedKeys)
                .ifPresent(generatedKeys -> setIdsToCommands(table.getIdentity().getField(), commandsToExecute, generatedKeys));

        return AffectedRows.insertedAndUpdated(inserted, updated);
    }

    private void setIdsToCommands(Field idField, List commandsToExecute, List generatedKeys) {
        seq(commandsToExecute).zip(generatedKeys).forEach(pair -> pair.v1.set(idField, pair.v2));
    }

    private Set getFieldsNames(AbstractRecordCommand command) {
        return command.getFields().map(Field::getName).collect(toSet());
    }

    @FunctionalInterface
    interface HomogeneousChunkExecutor {
        AffectedRows execute(List commands);
    }

}