All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.torodb.backend.BackendServiceImpl Maven / Gradle / Ivy

There is a newer version: 0.50.3
Show newest version
/*
 * ToroDB
 * Copyright © 2014 8Kdata Technology (www.8kdata.com)
 *
 * This program is free software: you can redistribute it and/or modify
 * it under the terms of the GNU Affero General Public License as published by
 * the Free Software Foundation, either version 3 of the License, or
 * (at your option) any later version.
 *
 * This program is distributed in the hope that it will be useful,
 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 * GNU Affero General Public License for more details.
 *
 * You should have received a copy of the GNU Affero General Public License
 * along with this program. If not, see .
 */

package com.torodb.backend;

import com.torodb.backend.ErrorHandler.Context;
import com.torodb.backend.meta.SchemaUpdater;
import com.torodb.common.util.Empty;
import com.torodb.core.TableRefFactory;
import com.torodb.core.annotations.TorodbIdleService;
import com.torodb.core.backend.BackendConnection;
import com.torodb.core.backend.BackendService;
import com.torodb.core.concurrent.ConcurrentToolsFactory;
import com.torodb.core.concurrent.StreamExecutor;
import com.torodb.core.d2r.IdentifierFactory;
import com.torodb.core.d2r.ReservedIdGenerator;
import com.torodb.core.exceptions.SystemException;
import com.torodb.core.exceptions.ToroRuntimeException;
import com.torodb.core.exceptions.user.UserException;
import com.torodb.core.retrier.Retrier;
import com.torodb.core.retrier.Retrier.Hint;
import com.torodb.core.retrier.RetrierGiveUpException;
import com.torodb.core.services.IdleTorodbService;
import com.torodb.core.transaction.RollbackException;
import com.torodb.core.transaction.metainf.MetaCollection;
import com.torodb.core.transaction.metainf.MetaDatabase;
import com.torodb.core.transaction.metainf.MetaDocPart;
import com.torodb.core.transaction.metainf.MetaDocPartIndexColumn;
import com.torodb.core.transaction.metainf.MetaIdentifiedDocPartIndex;
import org.apache.logging.log4j.Logger;
import org.jooq.DSLContext;
import org.jooq.lambda.tuple.Tuple2;

import java.sql.Connection;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import java.util.concurrent.CompletableFuture;
import java.util.concurrent.ThreadFactory;
import java.util.function.Consumer;
import java.util.function.Function;
import java.util.stream.Stream;

import javax.inject.Inject;

public class BackendServiceImpl extends IdleTorodbService implements BackendService {

  private static final Logger LOGGER = BackendLoggerFactory.get(BackendServiceImpl.class);

  private final DbBackendService dbBackendService;
  private final SqlInterface sqlInterface;
  private final ReservedIdGenerator ridGenerator;
  private final Retrier retrier;
  private final StreamExecutor streamExecutor;
  private final KvMetainfoHandler metainfoHandler;
  private final TableRefFactory tableRefFactory;
  private final IdentifierFactory identifierFactory;
  private final SchemaUpdater schemaUpdater;

  /**
   * @param threadFactory the thread factory that will be used to create the startup and shutdown
   *                      threads
   */
  @Inject
  public BackendServiceImpl(@TorodbIdleService ThreadFactory threadFactory,
      ReservedIdGenerator ridGenerator, DbBackendService dbBackendService,
      SqlInterface sqlInterface, TableRefFactory tableRefFactory,
      IdentifierFactory identifierFactory, Retrier retrier,
      ConcurrentToolsFactory concurrentToolsFactory,
      KvMetainfoHandler metainfoHandler, SchemaUpdater schemaUpdater) {
    super(threadFactory);

    this.dbBackendService = dbBackendService;
    this.sqlInterface = sqlInterface;
    this.ridGenerator = ridGenerator;
    this.retrier = retrier;
    this.streamExecutor = concurrentToolsFactory.createStreamExecutor(
        LOGGER, "backend-inner-jobs", true);
    this.metainfoHandler = metainfoHandler;
    this.tableRefFactory = tableRefFactory;
    this.identifierFactory = identifierFactory;
    this.schemaUpdater = schemaUpdater;
  }

  @Override
  public BackendConnection openConnection() {
    return new BackendConnectionImpl(this, sqlInterface, ridGenerator, 
        tableRefFactory, identifierFactory);
  }

  @Override
  public CompletableFuture enableDataImportMode(MetaDatabase db)
      throws RollbackException {
    if (!sqlInterface.getDbBackend().isOnDataInsertMode(db)) {
      sqlInterface.getDbBackend().enableDataInsertMode(db);
    }
    return CompletableFuture.completedFuture(Empty.getInstance());
  }

  @Override
  public CompletableFuture disableDataImportMode(MetaDatabase db)
      throws RollbackException {
    if (!sqlInterface.getDbBackend().isOnDataInsertMode(db)) {
      LOGGER.debug("Ignoring attempt to disable import mode on {} as it is not on that mode",
          db.getIdentifier());
      return CompletableFuture.completedFuture(Empty.getInstance());
    }
    sqlInterface.getDbBackend().disableDataInsertMode(db);

    //create internal indexes
    Stream> createInternalIndexesJobs = db.streamMetaCollections().flatMap(
        col -> col.streamContainedMetaDocParts().flatMap(
            docPart -> enableInternalIndexJobs(db, col, docPart)
        )
    );

    //create indexes
    Stream> createIndexesJobs = db.streamMetaCollections().flatMap(
        col -> enableIndexJobs(db, col)
    );

    //backend specific jobs
    Stream> backendSpecificJobs = sqlInterface.getStructureInterface()
        .streamDataInsertFinishTasks(db).map(job -> {
          return (Consumer) dsl -> {
            String index = job.apply(dsl);
            LOGGER.info("Task {} completed", index);
          };
        });
    Stream> jobs = Stream
        .concat(createInternalIndexesJobs, createIndexesJobs);
    jobs = Stream.concat(jobs, backendSpecificJobs);
    Stream runnables = jobs.map(this::dslConsumerToRunnable);

    return streamExecutor.executeRunnables(runnables);
  }

  private Stream> enableInternalIndexJobs(MetaDatabase db, MetaCollection col,
      MetaDocPart docPart) {
    StructureInterface structureInterface = sqlInterface.getStructureInterface();

    Stream> consumerStream;

    if (docPart.getTableRef().isRoot()) {
      consumerStream = structureInterface.streamRootDocPartTableIndexesCreation(
          db.getIdentifier(),
          docPart.getIdentifier(),
          docPart.getTableRef()
      );
    } else {
      MetaDocPart parentDocPart = col.getMetaDocPartByTableRef(
          docPart.getTableRef().getParent().get()
      );
      assert parentDocPart != null;
      consumerStream = structureInterface.streamDocPartTableIndexesCreation(
          db.getIdentifier(),
          docPart.getIdentifier(),
          docPart.getTableRef(),
          parentDocPart.getIdentifier()
      );
    }

    return consumerStream.map(job -> {
      return (Consumer) dsl -> {
        String index = job.apply(dsl);
        LOGGER.info("Created internal index {} for table {}", index, docPart.getIdentifier());
      };
    });
  }

  private Stream> enableIndexJobs(MetaDatabase db, MetaCollection col) {
    List> consumerList = new ArrayList<>();

    Iterator docPartIterator = col.streamContainedMetaDocParts().iterator();
    while (docPartIterator.hasNext()) {
      MetaDocPart docPart = docPartIterator.next();

      Iterator docPartIndexIterator = docPart.streamIndexes()
          .iterator();
      while (docPartIndexIterator.hasNext()) {
        MetaIdentifiedDocPartIndex docPartIndex = docPartIndexIterator.next();

        consumerList.add(createIndexJob(db, docPart, docPartIndex));
      }
    }

    return consumerList.stream();
  }

  private Consumer createIndexJob(MetaDatabase db, MetaDocPart docPart,
      MetaIdentifiedDocPartIndex docPartIndex) {
    return dsl -> {
      List> columnList = new ArrayList<>(docPartIndex.size());
      for (Iterator indexColumnIterator = docPartIndex
          .iteratorColumns(); indexColumnIterator.hasNext();) {
        MetaDocPartIndexColumn indexColumn = indexColumnIterator.next();
        columnList.add(new Tuple2<>(indexColumn.getIdentifier(), indexColumn.getOrdering()
            .isAscending()));
      }

      try {
        sqlInterface.getStructureInterface().createIndex(
            dsl, docPartIndex.getIdentifier(), db.getIdentifier(), docPart.getIdentifier(),
            columnList,
            docPartIndex.isUnique());
      } catch (UserException userException) {
        throw new SystemException(userException);
      }
      LOGGER.info("Created index {} for table {}", docPartIndex.getIdentifier(), docPart
          .getIdentifier());
    };
  }

  private Runnable dslConsumerToRunnable(Consumer consumer) {
    return () -> {
      try {
        retrier.retry(() -> {
          try (Connection connection = sqlInterface.getDbBackend().createWriteConnection()) {
            DSLContext dsl = sqlInterface.getDslContextFactory()
                .createDslContext(connection);

            consumer.accept(dsl);
            connection.commit();
            return null;
          } catch (SQLException ex) {
            throw sqlInterface.getErrorHandler().handleException(Context.CREATE_INDEX, ex);
          }
        }, Hint.CRITICAL);
      } catch (RetrierGiveUpException ex) {
        throw new ToroRuntimeException(ex);
      }
    };
  }

  @Override
  protected void startUp() throws Exception {
    LOGGER.debug("Starting backend...");

    streamExecutor.startAsync();
    streamExecutor.awaitRunning();

    LOGGER.trace("Waiting for {} to be running...", dbBackendService);
    dbBackendService.awaitRunning();

    LOGGER.debug("Backend started");
  }

  @Override
  protected void shutDown() throws Exception {
    streamExecutor.stopAsync();
    streamExecutor.awaitTerminated();
  }

  void onConnectionClosed(BackendConnectionImpl connection) {
  }

  KvMetainfoHandler getMetaInfoHandler() {
    return metainfoHandler;
  }

  SchemaUpdater getSchemaUpdater() {
    return schemaUpdater;
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy