All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.hadoop.hbase.master.cleaner.ReplicationMetaCleaner Maven / Gradle / Ivy

There is a newer version: 3.0.0-beta-1
Show newest version
/**
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.hadoop.hbase.master.cleaner;

import java.io.IOException;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.hbase.HConstants;
import org.apache.hadoop.hbase.MetaTableAccessor;
import org.apache.hadoop.hbase.ScheduledChore;
import org.apache.hadoop.hbase.Stoppable;
import org.apache.hadoop.hbase.TableName;
import org.apache.yetus.audience.InterfaceAudience;
import org.apache.hadoop.hbase.client.Admin;
import org.apache.hadoop.hbase.client.ColumnFamilyDescriptor;
import org.apache.hadoop.hbase.client.Delete;
import org.apache.hadoop.hbase.client.Table;
import org.apache.hadoop.hbase.client.TableDescriptor;
import org.apache.hadoop.hbase.master.MasterServices;
import org.apache.hadoop.hbase.replication.ReplicationPeerDescription;
import org.apache.hadoop.hbase.util.Bytes;

/**
 * This chore is to clean up the useless data in hbase:meta which is used by serial replication.
 */
@InterfaceAudience.Private
public class ReplicationMetaCleaner extends ScheduledChore {

  private static final Log LOG = LogFactory.getLog(ReplicationMetaCleaner.class);

  private final Admin admin;
  private final MasterServices master;

  public ReplicationMetaCleaner(MasterServices master, Stoppable stoppable, int period)
      throws IOException {
    super("ReplicationMetaCleaner", stoppable, period);
    this.master = master;
    admin = master.getConnection().getAdmin();
  }

  @Override
  protected void chore() {
    try {
      Map tables = master.getTableDescriptors().getAllDescriptors();
      Map> serialTables = new HashMap<>();
      for (Map.Entry entry : tables.entrySet()) {
        boolean hasSerialScope = false;
        for (ColumnFamilyDescriptor column : entry.getValue().getColumnFamilies()) {
          if (column.getScope() == HConstants.REPLICATION_SCOPE_SERIAL) {
            hasSerialScope = true;
            break;
          }
        }
        if (hasSerialScope) {
          serialTables.put(entry.getValue().getTableName().getNameAsString(), new HashSet<>());
        }
      }
      if (serialTables.isEmpty()){
        return;
      }

      List peers = admin.listReplicationPeers();
      for (ReplicationPeerDescription peerDesc : peers) {
        Map> tableCFsMap = peerDesc.getPeerConfig().getTableCFsMap();
        if (tableCFsMap ==null) {
          continue;
        }

        for (Map.Entry> map : tableCFsMap.entrySet()) {
          if (serialTables.containsKey(map.getKey().getNameAsString())) {
            serialTables.get(map.getKey().getNameAsString()).add(peerDesc.getPeerId());
            break;
          }
        }
      }

      Map> barrierMap = MetaTableAccessor.getAllBarriers(master.getConnection());
      for (Map.Entry> entry : barrierMap.entrySet()) {
        String encodedName = entry.getKey();
        byte[] encodedBytes = Bytes.toBytes(encodedName);
        boolean canClearRegion = false;
        Map posMap = MetaTableAccessor.getReplicationPositionForAllPeer(
            master.getConnection(), encodedBytes);
        if (posMap.isEmpty()) {
          continue;
        }

        String tableName = MetaTableAccessor.getSerialReplicationTableName(
            master.getConnection(), encodedBytes);
        Set confPeers = serialTables.get(tableName);
        if (confPeers == null) {
          // This table doesn't exist or all cf's scope is not serial any more, we can clear meta.
          canClearRegion = true;
        } else {
          if (!allPeersHavePosition(confPeers, posMap)) {
            continue;
          }

          String daughterValue = MetaTableAccessor
              .getSerialReplicationDaughterRegion(master.getConnection(), encodedBytes);
          if (daughterValue != null) {
            //this region is merged or split
            boolean allDaughterStart = true;
            String[] daughterRegions = daughterValue.split(",");
            for (String daughter : daughterRegions) {
              byte[] region = Bytes.toBytes(daughter);
              if (!MetaTableAccessor.getReplicationBarriers(
                  master.getConnection(), region).isEmpty() &&
                  !allPeersHavePosition(confPeers,
                      MetaTableAccessor
                          .getReplicationPositionForAllPeer(master.getConnection(), region))) {
                allDaughterStart = false;
                break;
              }
            }
            if (allDaughterStart) {
              canClearRegion = true;
            }
          }
        }
        if (canClearRegion) {
          Delete delete = new Delete(encodedBytes);
          delete.addFamily(HConstants.REPLICATION_POSITION_FAMILY);
          delete.addFamily(HConstants.REPLICATION_BARRIER_FAMILY);
          delete.addFamily(HConstants.REPLICATION_META_FAMILY);
          try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
            metaTable.delete(delete);
          }
        } else {

          // Barriers whose seq is larger than min pos of all peers, and the last barrier whose seq
          // is smaller than min pos should be kept. All other barriers can be deleted.

          long minPos = Long.MAX_VALUE;
          for (Map.Entry pos : posMap.entrySet()) {
            minPos = Math.min(minPos, pos.getValue());
          }
          List barriers = entry.getValue();
          int index = Collections.binarySearch(barriers, minPos);
          if (index < 0) {
            index = -index - 1;
          }
          Delete delete = new Delete(encodedBytes);
          for (int i = 0; i < index - 1; i++) {
            delete.addColumn(HConstants.REPLICATION_BARRIER_FAMILY, Bytes.toBytes(barriers.get(i)));
          }
          try (Table metaTable = master.getConnection().getTable(TableName.META_TABLE_NAME)) {
            metaTable.delete(delete);
          }
        }

      }

    } catch (IOException e) {
      LOG.error("Exception during cleaning up.", e);
    }

  }

  private boolean allPeersHavePosition(Set peers, Map posMap)
      throws IOException {
    for(String peer:peers){
      if (!posMap.containsKey(peer)){
        return false;
      }
    }
    return true;
  }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy