All Downloads are FREE. Search and download functionalities are using the official Maven repository.

com.aliyun.datahub.client.impl.DatahubClientBatchImpl Maven / Gradle / Ivy

The newest version!
package com.aliyun.datahub.client.impl;

import com.aliyun.datahub.client.auth.Account;
import com.aliyun.datahub.client.common.DatahubConfig;
import com.aliyun.datahub.client.common.DatahubConstant;
import com.aliyun.datahub.client.exception.DatahubClientException;
import com.aliyun.datahub.client.exception.InvalidParameterException;
import com.aliyun.datahub.client.http.HttpConfig;
import com.aliyun.datahub.client.impl.batch.BatchConverterFactory;
import com.aliyun.datahub.client.impl.batch.BatchSerializer;
import com.aliyun.datahub.client.impl.batch.BatchType;
import com.aliyun.datahub.client.impl.request.protobuf.GetBatchRecordsRequestPB;
import com.aliyun.datahub.client.impl.request.protobuf.PutBatchRecordsRequestPB;
import com.aliyun.datahub.client.impl.schemaregistry.SchemaRegistryFactory;
import com.aliyun.datahub.client.metircs.ClientMetrics;
import com.aliyun.datahub.client.metircs.MetricType;
import com.aliyun.datahub.client.model.*;
import com.aliyun.datahub.client.model.protobuf.GetBatchRecordsResultPB;
import com.aliyun.datahub.client.util.FormatUtils;
import com.codahale.metrics.Meter;
import com.codahale.metrics.Timer;
import org.apache.commons.lang3.StringUtils;

import java.util.HashMap;
import java.util.List;
import java.util.Map;

public class DatahubClientBatchImpl extends DatahubClientJsonImpl {

    public DatahubClientBatchImpl(String endpoint, Account account, DatahubConfig datahubConfig, HttpConfig httpConfig,
                                  String userAgent, SchemaRegistryFactory schemaRegistryFactory) {
        super(endpoint, account, datahubConfig, httpConfig, userAgent, schemaRegistryFactory);
    }

    @Override
    public PutRecordsResult putRecords(String projectName, String topicName, List records) {
        throw new DatahubClientException("This method is not supported for batch client, please use putRecordsByShard");
    }

    @Override
    public PutRecordsByShardResult putRecordsByShard(String projectName, String topicName, String shardId, List records) {
        FormatUtils.checkProjectName(projectName);
        FormatUtils.checkTopicName(topicName);
        FormatUtils.checkShardId(shardId);
        if (records == null || records.isEmpty()) {
            throw new InvalidParameterException("Records is null or empty");
        }
        checkRecordEntryValid(records);

        BatchSerializer serializer = BatchConverterFactory.getSerializer(BatchType.AVRO)
                                             .setProjectName(projectName)
                                             .setTopicName(topicName)
                                             .setCompressType(httpConfig.getCompressType())
                                             .setSchemaRegistry(getSchemaRegistry());
        PutBatchRecordsRequestPB request = new PutBatchRecordsRequestPB()
                                                   .setSerializer(serializer)
                                                   .setRecords(records);
        String metricKey = ClientMetrics.genMetricKey(projectName, topicName);
        Timer.Context putLatency = METRIC_PROXY.getTimerContext(MetricType.PUT_RECORD_LATENCY.getName(), metricKey);
        try {
            PutRecordsByShardResult result = callWrapper(getService().putBatchRecordsByShard(projectName,
                    topicName, shardId, request));
            if (result != null) {
                Meter putQps = METRIC_PROXY.getMeter(MetricType.PUT_RECORD_QPS.getName(), metricKey);
                Meter putRps = METRIC_PROXY.getMeter(MetricType.PUT_RECORD_RPS.getName(), metricKey);
                Meter putThroughput = METRIC_PROXY.getMeter(MetricType.PUT_RECORD_THROUGHPUT.getName(), metricKey);
                if (putQps != null) {
                    putQps.mark(1);
                }

                if (putRps != null) {
                    putRps.mark(records.size());
                }
                if (putThroughput != null) {
                    putThroughput.mark(request.getDataSize());
                }
            }
            return result;
        } finally {
            if (putLatency != null) {
                putLatency.stop();
            }
        }
    }

    @Override
    public GetRecordsResult getRecords(String projectName, String topicName, String shardId, String cursor, int limit) {
        return getRecords(projectName, topicName, shardId, null, cursor, limit);
    }

    @Override
    public GetRecordsResult getRecords(String projectName, String topicName, String shardId, RecordSchema schema, String cursor, int limit) {
        FormatUtils.checkProjectName(projectName);
        FormatUtils.checkTopicName(topicName);
        if (StringUtils.isEmpty(cursor)) {
            throw new InvalidParameterException("Cursor format is invalid");
        }

        limit = Math.max(MIN_FETCH_SIZE, limit);
        limit = Math.min(MAX_FETCH_SIZE, limit);

        final GetBatchRecordsRequestPB request = new GetBatchRecordsRequestPB();
        request.setCursor(cursor).setLimit(limit);

        String metricKey = ClientMetrics.genMetricKey(projectName, topicName);
        Timer.Context getLatency = METRIC_PROXY.getTimerContext(MetricType.GET_RECORD_LATENCY.getName(), metricKey);
        try {
            GetBatchRecordsResultPB result = callWrapper(getService().getBatchRecords(projectName,
                    topicName, shardId, request));

            if (result != null) {
                result.internalSetProjectName(projectName);
                result.internalSetTopicName(topicName);
                result.internalSetShardId(shardId);
                result.internalSetSchemaRegistryClient(getSchemaRegistry());

                Meter getQps = METRIC_PROXY.getMeter(MetricType.GET_RECORD_QPS.getName(), metricKey);
                Meter getRps = METRIC_PROXY.getMeter(MetricType.GET_RECORD_RPS.getName(), metricKey);
                Meter getThroughput = METRIC_PROXY.getMeter(MetricType.GET_RECORD_THROUGHPUT.getName(), metricKey);
                if (getQps != null) {
                    getQps.mark(1);
                }
                if (getRps != null) {
                    getRps.mark(result.getRecordCount());
                }
                if (getThroughput != null) {
                    getThroughput.mark(result.getDataSize());
                }
            }
            return result;
        } finally {
            if (getLatency != null) {
                getLatency.stop();
            }
        }
    }

    private void checkRecordEntryValid(List records) {
        // add field not null check, only batch check
        for (RecordEntry entry : records) {
            RecordData data = entry.getRecordData();
            if (data instanceof TupleRecordData) {
                TupleRecordData tupleData = (TupleRecordData) data;
                tupleData.checkIfNotNullFieldSet();
            }
        }
    }

    @Override
    public PutRecordsByShardResult putRecordsByShard(String projectName, String topicName, String shardId, final List records, String token) {
        FormatUtils.checkProjectName(projectName);
        FormatUtils.checkTopicName(topicName);
        FormatUtils.checkShardId(shardId);
        if (records == null || records.isEmpty()) {
            throw new InvalidParameterException("Records is null or empty");
        }

        if (StringUtils.isEmpty(token)) {
            throw new InvalidParameterException("Token is empty");
        }

        Map headers = new HashMap<>();
        if (!StringUtils.isEmpty(token)) {
            headers.put(DatahubConstant.X_DATAHUB_ACCESS_TOKEN, token);
        }

        BatchSerializer serializer = BatchConverterFactory.getSerializer(BatchType.AVRO)
                                             .setProjectName(projectName)
                                             .setTopicName(topicName)
                                             .setCompressType(httpConfig.getCompressType())
                                             .setSchemaRegistry(getSchemaRegistry());
        PutBatchRecordsRequestPB request = new PutBatchRecordsRequestPB()
                                                   .setSerializer(serializer)
                                                   .setRecords(records);

        String metricKey = ClientMetrics.genMetricKey(projectName, topicName);
        Timer.Context putLatency = METRIC_PROXY.getTimerContext(MetricType.PUT_RECORD_LATENCY.getName(), metricKey);
        try {
            PutRecordsByShardResult result = callWrapper(getService().putBatchRecordsByShard(projectName,
                    topicName, shardId, request, headers));
            if (result != null) {
                Meter putQps = METRIC_PROXY.getMeter(MetricType.PUT_RECORD_QPS.getName(), metricKey);
                Meter putRps = METRIC_PROXY.getMeter(MetricType.PUT_RECORD_RPS.getName(), metricKey);
                Meter putThroughput = METRIC_PROXY.getMeter(MetricType.PUT_RECORD_THROUGHPUT.getName(), metricKey);
                if (putQps != null) {
                    putQps.mark(1);
                }

                if (putRps != null) {
                    putRps.mark(records.size());
                }

                if (putThroughput != null) {
                    putThroughput.mark(request.getDataSize());
                }
            }
            return result;
        } finally {
            if (putLatency != null) {
                putLatency.stop();
            }
        }
    }

    @Override
    public GetRecordsResult getRecords(String projectName, String topicName, String shardId, String cursor, int limit, String subId, String filter, String token) {
        return getRecords(projectName, topicName, shardId, null, cursor, limit, subId, filter, token);
    }

    @Override
    public GetRecordsResult getRecords(String projectName, String topicName, final String shardId, final RecordSchema schema, String cursor, int limit, String subId, String filter, String token) {
        FormatUtils.checkProjectName(projectName);
        FormatUtils.checkTopicName(topicName);

        if (StringUtils.isEmpty(cursor)) {
            throw new InvalidParameterException("Cursor format is invalid");
        }

        if (StringUtils.isEmpty(subId)) {
            throw new InvalidParameterException("SubId is empty");
        }

        if (StringUtils.isEmpty(token)) {
            throw new InvalidParameterException("Token is empty");
        }

        limit = Math.max(MIN_FETCH_SIZE, limit);
        limit = Math.min(MAX_FETCH_SIZE, limit);

        Map headers = new HashMap<>();
        headers.put(DatahubConstant.X_DATAHUB_SUB_ID, subId);
        headers.put(DatahubConstant.X_DATAHUB_ACCESS_TOKEN, token);

        final GetBatchRecordsRequestPB request = new GetBatchRecordsRequestPB();
        request.setCursor(cursor).setLimit(limit);
        if (!StringUtils.isEmpty(filter)) {
            request.setFilter(filter);
        }

        String metricKey = ClientMetrics.genMetricKey(projectName, topicName, subId);
        Timer.Context getLatency = METRIC_PROXY.getTimerContext(MetricType.GET_RECORD_LATENCY.getName(), metricKey);
        try {
            GetBatchRecordsResultPB result = callWrapper(getService().getBatchRecords(projectName,
                    topicName, shardId, request, headers));

            if (result != null) {
                result.internalSetProjectName(projectName);
                result.internalSetTopicName(topicName);
                result.internalSetShardId(shardId);
                result.internalSetSchemaRegistryClient(getSchemaRegistry());

                Meter getQps = METRIC_PROXY.getMeter(MetricType.GET_RECORD_QPS.getName(), metricKey);
                Meter getRps = METRIC_PROXY.getMeter(MetricType.GET_RECORD_RPS.getName(), metricKey);
                Meter getThroughput = METRIC_PROXY.getMeter(MetricType.GET_RECORD_THROUGHPUT.getName(), metricKey);
                if (getQps != null) {
                    getQps.mark(1);
                }
                if (getRps != null) {
                    getRps.mark(result.getRecordCount());
                }
                if (getThroughput != null) {
                    getThroughput.mark(result.getDataSize());
                }
            }
            return result;
        } finally {
            if (getLatency != null) {
                getLatency.stop();
            }
        }
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy