All Downloads are FREE. Search and download functionalities are using the official Maven repository.

org.apache.cassandra.io.sstable.RangeAwareSSTableWriter Maven / Gradle / Ivy

Go to download

The Apache Cassandra Project develops a highly scalable second-generation distributed database, bringing together Dynamo's fully distributed design and Bigtable's ColumnFamily-based data model.

There is a newer version: 5.0.0
Show newest version
/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package org.apache.cassandra.io.sstable;

import java.io.IOException;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;

import org.apache.cassandra.db.ColumnFamilyStore;
import org.apache.cassandra.db.DecoratedKey;
import org.apache.cassandra.db.Directories;
import org.apache.cassandra.db.DiskBoundaries;
import org.apache.cassandra.db.PartitionPosition;
import org.apache.cassandra.db.SerializationHeader;
import org.apache.cassandra.db.lifecycle.LifecycleNewTracker;
import org.apache.cassandra.db.rows.UnfilteredRowIterator;
import org.apache.cassandra.io.sstable.format.SSTableFormat;
import org.apache.cassandra.io.sstable.format.SSTableReader;
import org.apache.cassandra.schema.TableId;
import org.apache.cassandra.utils.FBUtilities;
import org.apache.cassandra.utils.TimeUUID;

public class RangeAwareSSTableWriter implements SSTableMultiWriter
{
    private final List boundaries;
    private final List directories;
    private final int sstableLevel;
    private final long estimatedKeys;
    private final long repairedAt;
    private final TimeUUID pendingRepair;
    private final boolean isTransient;
    private final SSTableFormat format;
    private final SerializationHeader header;
    private final LifecycleNewTracker lifecycleNewTracker;
    private int currentIndex = -1;
    public final ColumnFamilyStore cfs;
    private final List finishedWriters = new ArrayList<>();
    private final List finishedReaders = new ArrayList<>();
    private SSTableMultiWriter currentWriter = null;

    public RangeAwareSSTableWriter(ColumnFamilyStore cfs, long estimatedKeys, long repairedAt, TimeUUID pendingRepair, boolean isTransient, SSTableFormat format, int sstableLevel, long totalSize, LifecycleNewTracker lifecycleNewTracker, SerializationHeader header) throws IOException
    {
        DiskBoundaries db = cfs.getDiskBoundaries();
        directories = db.directories;
        this.sstableLevel = sstableLevel;
        this.cfs = cfs;
        this.estimatedKeys = estimatedKeys / directories.size();
        this.repairedAt = repairedAt;
        this.pendingRepair = pendingRepair;
        this.isTransient = isTransient;
        this.format = format;
        this.lifecycleNewTracker = lifecycleNewTracker;
        this.header = header;
        boundaries = db.positions;
        if (boundaries == null)
        {
            Directories.DataDirectory localDir = cfs.getDirectories().getWriteableLocation(totalSize);
            if (localDir == null)
                throw new IOException(String.format("Insufficient disk space to store %s",
                                                    FBUtilities.prettyPrintMemory(totalSize)));
            Descriptor desc = cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(localDir), format);
            currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, pendingRepair, isTransient, null, sstableLevel, header, lifecycleNewTracker);
        }
    }

    private void maybeSwitchWriter(DecoratedKey key)
    {
        if (boundaries == null)
            return;

        boolean switched = false;
        while (currentIndex < 0 || key.compareTo(boundaries.get(currentIndex)) > 0)
        {
            switched = true;
            currentIndex++;
        }

        if (switched)
        {
            if (currentWriter != null)
                finishedWriters.add(currentWriter);

            Descriptor desc = cfs.newSSTableDescriptor(cfs.getDirectories().getLocationForDisk(directories.get(currentIndex)), format);
            currentWriter = cfs.createSSTableMultiWriter(desc, estimatedKeys, repairedAt, pendingRepair, isTransient, null, sstableLevel, header, lifecycleNewTracker);
        }
    }

    public void append(UnfilteredRowIterator partition)
    {
        maybeSwitchWriter(partition.partitionKey());
        currentWriter.append(partition);
    }

    @Override
    public Collection finish(boolean openResult)
    {
        if (currentWriter != null)
            finishedWriters.add(currentWriter);
        currentWriter = null;
        for (SSTableMultiWriter writer : finishedWriters)
        {
            if (writer.getBytesWritten() > 0)
                finishedReaders.addAll(writer.finish(openResult));
            else
                SSTableMultiWriter.abortOrDie(writer);
        }
        return finishedReaders;
    }

    @Override
    public Collection finished()
    {
        return finishedReaders;
    }

    @Override
    public SSTableMultiWriter setOpenResult(boolean openResult)
    {
        finishedWriters.forEach((w) -> w.setOpenResult(openResult));
        currentWriter.setOpenResult(openResult);
        return this;
    }

    public String getFilename()
    {
        return String.join("/", cfs.getKeyspaceName(), cfs.getTableName());
    }

    @Override
    public long getBytesWritten()
    {
       return currentWriter != null ? currentWriter.getBytesWritten() : 0L;
    }

    @Override
    public long getOnDiskBytesWritten()
    {
        return currentWriter != null ? currentWriter.getOnDiskBytesWritten() : 0L;
    }

    @Override
    public TableId getTableId()
    {
        return cfs.metadata.id;
    }

    @Override
    public Throwable commit(Throwable accumulate)
    {
        if (currentWriter != null)
            finishedWriters.add(currentWriter);
        currentWriter = null;
        for (SSTableMultiWriter writer : finishedWriters)
            accumulate = writer.commit(accumulate);
        return accumulate;
    }

    @Override
    public Throwable abort(Throwable accumulate)
    {
        if (currentWriter != null)
            finishedWriters.add(currentWriter);
        currentWriter = null;
        for (SSTableMultiWriter finishedWriter : finishedWriters)
            accumulate = finishedWriter.abort(accumulate);

        return accumulate;
    }

    @Override
    public void prepareToCommit()
    {
        if (currentWriter != null)
            finishedWriters.add(currentWriter);
        currentWriter = null;
        finishedWriters.forEach(SSTableMultiWriter::prepareToCommit);
    }

    @Override
    public void close()
    {
        if (currentWriter != null)
            finishedWriters.add(currentWriter);
        currentWriter = null;
        finishedWriters.forEach(SSTableMultiWriter::close);
    }
}




© 2015 - 2024 Weber Informatics LLC | Privacy Policy