All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.clickzetta.platform.client.api.FailureMessage Maven / Gradle / Ivy

There is a newer version: 2.0.0
Show newest version
package com.clickzetta.platform.client.api;

import com.clickzetta.platform.client.Table;
import com.clickzetta.platform.common.ColumnSchema;
import com.clickzetta.platform.common.Schema;
import com.clickzetta.platform.operator.Bytes;
import com.clickzetta.platform.operator.WriteOperation;
import com.clickzetta.platform.util.DateUtil;
import cz.proto.ingestion.Ingestion;
import org.apache.kudu.RowOperations;

import java.sql.Timestamp;
import java.util.*;
import java.util.stream.Collectors;

public class FailureMessage extends SuccessMessage {

  private final Table table;

  public FailureMessage(
      String sessionId,
      Table table,
      Ingestion.DataMutateRequest request,
      Ingestion.DataMutateResponse response) {
    super(sessionId, request, response);
    this.table = table;
  }

  @Override
  public int getErrorRowsCounts() {
    if (response.getRowStatusCount() > 0) {
      return (int) response.getRowStatusList().stream()
          .filter(mutateRowStatus -> mutateRowStatus.getCode() != Ingestion.Code.SUCCESS).count();
    }
    return 0;
  }

  @Override
  public List getErrorRows() {
    if (response.getRowStatusCount() > 0) {
      List errorRowStatus = response.getRowStatusList()
          .stream().filter(mutateRowStatus -> mutateRowStatus.getCode() != Ingestion.Code.SUCCESS)
          .sorted(Comparator.comparingInt(Ingestion.MutateRowStatus::getRowIndex))
          .collect(Collectors.toList());
      return decodePartialRow(request, errorRowStatus);
    }
    return null;
  }

  private Map getChangeTypeMap() {
    Map changeTypeMap = new HashMap<>();
    for (WriteOperation.ChangeType changeType : WriteOperation.ChangeType.values()) {
      changeTypeMap.put(changeType.toEncodedByte(), changeType);
    }
    return changeTypeMap;
  }

  private Row buildRowWithChangeType(WriteOperation.ChangeType changeType) {
    Row row = null;
    switch (changeType) {
      case INSERT:
      case INSERT_IGNORE:
        row = new KuduRow(table, WriteOperation.ChangeType.INSERT_IGNORE); break;
      case DELETE:
      case DELETE_IGNORE:
        row = new KuduRow(table, WriteOperation.ChangeType.DELETE_IGNORE); break;
      case UPSERT:
        row = new KuduRow(table, WriteOperation.ChangeType.UPSERT); break;
      case UPDATE:
      case UPDATE_IGNORE:
        row = new KuduRow(table, WriteOperation.ChangeType.UPDATE_IGNORE); break;
      case SPLIT_ROWS:
      default:
        throw new UnsupportedOperationException("Unsupported writeOperation changeType: " + changeType);
    }
    return row;
  }

  private List decodePartialRow(Ingestion.DataMutateRequest request, List errorRowStatus) {
    if (errorRowStatus == null || errorRowStatus.isEmpty()) {
      return null;
    }
    List resultRows = new ArrayList<>();
    Set errorRowIndex = errorRowStatus.stream()
        .map(Ingestion.MutateRowStatus::getRowIndex).collect(Collectors.toSet());

    Map changeTypeMap = getChangeTypeMap();
    Schema schema = table.getSchema();
    RowOperations.RowOperationsPB operationsPB = request.getRowOperations();

    byte[] rowData = operationsPB.getRows().toByteArray();
    byte[] indirectData = operationsPB.getIndirectData().toByteArray();
    boolean hasNullableBitSet = schema.hasNullableColumns();

    // decode bytes with target schema format.
    int index = 0;
    int point = 0;
    while (point < rowData.length) {
      WriteOperation.ChangeType operatorType = changeTypeMap.get(Bytes.getByte(rowData, point++));
      Row row = buildRowWithChangeType(operatorType);

      BitSet columnsBitSet = Bytes.toBitSet(rowData, point, schema.getColumns().size());
      point += Bytes.getBitSetSize(schema.getColumns().size());
      BitSet nullsBitSet = null;
      if (hasNullableBitSet) {
        nullsBitSet = Bytes.toBitSet(rowData, point, schema.getColumns().size());
        point += Bytes.getBitSetSize(schema.getColumns().size());
      }

      for (int i = 0; i < schema.getColumnCount(); i++) {
        ColumnSchema columnSchema = schema.getColumnByIndex(i);
        if (columnsBitSet.get(i)) {
          if (columnSchema.isNullable() && nullsBitSet.get(i)) {
            row.setValue(columnSchema.getName(), null);
          } else {
            Object val = getObject(columnSchema, point, rowData, indirectData);
            row.setValue(columnSchema.getName(), val);
            point += columnSchema.getType().getSize(columnSchema.getTypeAttributes());
          }
        }
      }
      if (errorRowIndex.contains(index)) {
        resultRows.add(row);
      }
      index++;
    }
    return resultRows;
  }

  public Object getObject(ColumnSchema columnSchema, int point, byte[] rowData, byte[] indirectData) {
    Object val = null;
    try {
      switch (columnSchema.getType()) {
        case BOOL:
          val = Bytes.getBoolean(rowData, point);
          break;
        case INT8:
          val = Bytes.getByte(rowData, point);
          break;
        case INT16:
          val = Bytes.getShort(rowData, point);
          break;
        case INT32:
          val = Bytes.getInt(rowData, point);
          break;
        case INT64:
          val = Bytes.getLong(rowData, point);
          break;
        case UNIXTIME_MICROS:
          val = new Timestamp(Bytes.getLong(rowData, point));
          break;
        case FLOAT:
          val = Bytes.getFloat(rowData, point);
          break;
        case DOUBLE:
          val = Bytes.getDouble(rowData, point);
          break;
        case STRING:
        case VARCHAR:
        case BINARY:
          long offset = Bytes.getLong(rowData, point);
          long length = Bytes.getLong(rowData, point + 8);
          val = Bytes.getString(indirectData, (int) offset, (int) length);
          break;
        case DATE:
          int days = Bytes.getInt(rowData, point);
          val = DateUtil.epochDaysToSqlDate(days);
          break;
        case DECIMAL:
          int precision = columnSchema.getTypeAttributes().getPrecision();
          int scale = columnSchema.getTypeAttributes().getScale();
          val = Bytes.getDecimal(rowData, point, precision, scale);
          break;
        default:
          throw new IllegalArgumentException("Unsupported column type: " + columnSchema.getType());
      }
    } catch (ClassCastException e) {
      throw new IllegalArgumentException(
          "Value type does not match column type " + columnSchema.getType() +
              " for column " + columnSchema.getName());
    }
    return val;
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy