All Downloads are FREE. Search and download functionalities are using the official Maven repository.

fr.boreal.backward_chaining.source_target.SourceTargetRewriter Maven / Gradle / Ivy

package fr.boreal.backward_chaining.source_target;

import fr.boreal.backward_chaining.api.BackwardChainingAlgorithm;
import fr.boreal.backward_chaining.source_target.dlx.DLX;
import fr.boreal.backward_chaining.source_target.dlx.DLXResult;
import fr.boreal.backward_chaining.source_target.dlx.DLXResultProcessor;
import fr.boreal.backward_chaining.source_target.dlx.data.ColumnObject;
import fr.boreal.model.formula.api.FOFormula;
import fr.boreal.model.kb.api.RuleBase;
import fr.boreal.model.logicalElements.api.Atom;
import fr.boreal.model.query.api.FOQuery;
import fr.boreal.model.query.impl.UnionFOQuery;
import fr.boreal.model.rule.api.FORule;
import fr.boreal.unifier.QueryUnifier;
import fr.boreal.unifier.QueryUnifierAlgorithm;

import java.util.*;

/**
 * This operator rewrites a query with the given rules assuming the rules are
 * source to target rules meaning that the vocabulary of the initial query and
 * the rewritings will be totally disjoint. This let us rewrite with mapping
 * rules in a single step This implementation uses DLX algorithm (Dancing links)
 * to compute a full cover of the query using single piece unifiers.
 */
public class SourceTargetRewriter implements BackwardChainingAlgorithm {

	final QueryUnifierAlgorithm unifier_algo;

	/**
	 * Creates a new SourceTargetOperator using default parameters query unifier
	 * algorithm : QueryUnifierAlgorithm
	 */
	public SourceTargetRewriter() {
		this(new QueryUnifierAlgorithm());
	}

	/**
	 * Creates a new SourceTargetOperator using the given parameters
	 * @param queryUnifierAlgorithm the query unifier algorithm to use
	 */
	public SourceTargetRewriter(QueryUnifierAlgorithm queryUnifierAlgorithm) {
		this.unifier_algo = queryUnifierAlgorithm;
	}

	@Override
	public UnionFOQuery rewrite(FOQuery query, RuleBase rules) {

		// Select the subset of the rules that are possible candidates
		// This is the rules which have at least one atom with a corresponding predicate
		// to the query
		Set candidates = new HashSet<>();
		for (Atom a1 : query.getFormula().asAtomSet()) {
			candidates.addAll(rules.getRulesByHeadPredicate(a1.getPredicate()));
		}

		// Compute all the unifiers of the query with all the candidates
		Set unifiers = new HashSet<>();
		for (FORule r : candidates) {
			unifiers.addAll(unifier_algo.getMostGeneralSinglePieceUnifiers(query, r));
		}

		unifiers = this.getTotalAggregatedUnifiers(unifiers, query);

		// Rewrite the query with the unifiers
		Set> rewritings = new HashSet<>();
		for (QueryUnifier unifier : unifiers) {
			rewritings.add(unifier.apply(query));
		}

		return new UnionFOQuery(rewritings);
	}

	private Set getTotalAggregatedUnifiers(Set unifiersToAggregate, FOQuery query) {

		if (unifiersToAggregate.isEmpty()) {
			return new HashSet<>();
		}

		// ---- building of matrix of covering ----

		// rows
		List> rows = new LinkedList<>();
		// map from row ids to set of unifiers covering that row
		Map> unifiersCoveringOfRow = new HashMap<>();

		for (QueryUnifier u : unifiersToAggregate) {
			Iterator it = query.getFormula().asAtomSet().iterator();

			List row = new LinkedList<>();
			String rowId;

			// column index
			int j = 0;
            StringBuilder rowIdBuilder = new StringBuilder();
            while (it.hasNext()) {
				Atom queryAtom = it.next();

				if (u.getUnifiedQueryPart().asAtomSet().contains(queryAtom)) {
					row.add((byte) 1);
					rowIdBuilder.append("-").append(j);

				} else {
					row.add((byte) 0);
				}
				j++;
			}
            rowId = rowIdBuilder.toString();

            // if it is not a redundant line
			if (!unifiersCoveringOfRow.containsKey(rowId)) {
				rows.add(row);

				Set rowCoveringUnifiers = new HashSet<>();
				rowCoveringUnifiers.add(u);
				unifiersCoveringOfRow.put(rowId, rowCoveringUnifiers);
			} else {
				unifiersCoveringOfRow.get(rowId).add(u);
			}
		}

		ColumnObject h = translateToColumnObject(rows);

		AggregatedUnifDLXResultProcessor resultProcessor = new AggregatedUnifDLXResultProcessor(unifiersCoveringOfRow);

		DLX.solve(h, false, resultProcessor);

		return resultProcessor.getAggregatedUnifiers();
	}

	private ColumnObject translateToColumnObject(List> rows) {
		int queryAtomNumber = rows.getFirst().size();

		// --conversion to arrays--
		// covering matrix of query atoms by piece unifiers
		byte[][] coveringMatrix = new byte[rows.size()][queryAtomNumber];
		// label of the columns
		Object[] columnLabels = new Object[queryAtomNumber];

		// filling of coveringMatrix
		for (int i = 0; i < rows.size(); i++) {
			for (int j = 0; j < queryAtomNumber; j++) {
				coveringMatrix[i][j] = rows.get(i).get(j);
			}
		}

		// filling of column labels
		for (int j = 0; j < queryAtomNumber; j++) {
			columnLabels[j] = "" + j;
		}

		return DLX.buildSparseMatrix(coveringMatrix, columnLabels);
	}

	/**
	 * Aggregator
	 */
	private static class AggregatedUnifDLXResultProcessor implements DLXResultProcessor {

		private final Set totalAggregatedUnifiers = new HashSet<>();
		private final Map> unifiersCoveringOfRow;

		/**
		 * Aggregate with parameters
		 * @param unifiersCoveringOfRow unifiers
		 */
		public AggregatedUnifDLXResultProcessor(Map> unifiersCoveringOfRow) {
			super();
			this.unifiersCoveringOfRow = unifiersCoveringOfRow;
		}

		/**
		 * result is exact covering set of rows 
* rows are described by column labels where a 1 appears in the row *
* We realize a breadth-first walk to build aggregated unifiers * defined by the result * * @see DLXResultProcessor#processResult(DLXResult) */ public boolean processResult(DLXResult result) { final Iterator> rows = result.rows(); //for each possible branching if(rows.hasNext()) { //row is defined by a list of column labels List columnLabelsOfRow = rows.next(); //build the row identifier StringBuilder rowId = new StringBuilder(); for(Object columnLabel : columnLabelsOfRow) rowId.append("-").append(columnLabel); //piece unifiers corresponding to the row Set rowUnifiers = unifiersCoveringOfRow.get(rowId.toString()); fromResultToAggregatedUnifiers(rowUnifiers, rows); } return true; } /** * @return the aggregated unifiers */ public Set getAggregatedUnifiers() { return this.totalAggregatedUnifiers; } private void fromResultToAggregatedUnifiers(Set previousAggregatedUnifiers, Iterator> rows) { if (rows.hasNext()) { // row is defined by a list of column labels List columnLabelsOfRow = rows.next(); // build the row identifier StringBuilder rowId = new StringBuilder(); for (Object columnLabel : columnLabelsOfRow) rowId.append("-").append(columnLabel); // piece unifiers corresponding to the row Set rowUnifiers = unifiersCoveringOfRow.get(rowId.toString()); Set partiallyAggregatedUnifiers = new HashSet<>(); // we aggregate each previous aggregated unifiers with the new row unifiers for (QueryUnifier previousUnifier : previousAggregatedUnifiers) { for (QueryUnifier rowUnifier : rowUnifiers) { Optional optAggregatedUnifier = previousUnifier.safeAggregate(rowUnifier); optAggregatedUnifier.ifPresent(partiallyAggregatedUnifiers::add); } } // we continue with the next row fromResultToAggregatedUnifiers(partiallyAggregatedUnifiers, rows); } else { // when there are no more row, unifiers are totals this.totalAggregatedUnifiers.addAll(previousAggregatedUnifiers); } } } }