All Downloads are FREE. Search and download functionalities are using the official Maven repository.

Alachisoft.NCache.Common.Streams.BufferedStream Maven / Gradle / Ivy

There is a newer version: 5.3.3
Show newest version
package Alachisoft.NCache.Common.Streams;

import tangible.RefObject;

import java.io.IOException;
import java.io.OutputStream;
import java.net.Socket;
import java.util.concurrent.atomic.AtomicInteger;

public class BufferedStream extends OutputStream {
    private final int _DefaultBufferSize = 4096;
    private final int _bufferSize;
    /// MaxShadowBufferSize is chosed such that shadow buffers are not allocated on the Large Object Heap.
    /// Currently, an object is allocated on the LOH if it is larger than 85000 bytes. See LARGE_OBJECT_SIZE in ndp\clr\src\vm\gc.h
    /// We will go with exactly 80 KBytes, although this is somewhat arbitrary.
    private final int MaxShadowBufferSize = 81920;  // Make sure not to get to the Large Object Heap.
    OutputStream _stream;
    private int _writePos;
    private byte[] _buffer;                               // Shared read/write buffer.  Alloc on first use.
    private int _readPos;                               // Read pointer within shared buffer.
    private int _readLen;
    private AtomicInteger writeCount = new AtomicInteger(0);


    public BufferedStream(Socket primaryClient, boolean b, int bufferSize) {
        _bufferSize = bufferSize;
        try {
            _stream = primaryClient.getOutputStream();
        } catch (IOException e) {
        }

    }


    public BufferedStream(Socket primaryClient, boolean b) {
        _bufferSize = _DefaultBufferSize;
        try {
            _stream = primaryClient.getOutputStream();
        } catch (IOException e) {
        }
    }

    public int getWriteCount() {
        return writeCount.get();
    }

    private void EnsureNotClosed() throws IOException {
        if (_stream == null)
            throw new IOException("Stream is closed.");
    }

    private void WriteToBuffer(byte[] array, RefObject tempRefOffset, RefObject tempRefCount) {
        int bytesToWrite = Math.min(_bufferSize - _writePos, tempRefCount.argvalue);

        if (bytesToWrite <= 0)
            return;

        EnsureBufferAllocated();
        System.arraycopy(array, tempRefOffset.argvalue, _buffer, _writePos, bytesToWrite);

        _writePos += bytesToWrite;
        tempRefCount.setValue(tempRefCount.getValue() - bytesToWrite);
        tempRefOffset.setValue(tempRefOffset.getValue() + bytesToWrite);
    }


    private void EnsureBufferAllocated() {
        if (_bufferSize > 0) {        // BufferedStream is not intended for multi-threaded use, so no worries about the get/set ---- on _buffer.
            if (_buffer == null)
                _buffer = new byte[_bufferSize];
        }
    }

    @Override
    public void write(int b) throws IOException {

    }

    public void write(byte[] array, int offset, int count) throws IOException {

        if (array == null) {
            throw new NullPointerException("Buffer cannot be null. Parameter name: array");
        }
        if (offset < 0) {
            throw new IndexOutOfBoundsException("Non-negative number required. Parameter name: offset");
        }
        if (count < 0) {
            throw new IndexOutOfBoundsException("Non-negative number required. Parameter name: count");
        }
        if (array.length - offset < count) {
            throw new IllegalArgumentException("Offset and length were out of bounds for the array or count is greater than the number of elements from index to the end of the source collection.");
        }

        EnsureNotClosed();
        // We need to use the buffer, while avoiding unnecessary buffer usage / memory copies.
        // We ASSUME that memory copies are much cheaper than writes to the underlying stream, so if an extra copy is
        // guaranteed to reduce the number of writes, we prefer it.
        // We pick a simple strategy that makes degenerate cases rare if our assumptions are right.
        //
        // For ever write, we use a simple heuristic (below) to decide whether to use the buffer.
        // The heuristic has the desirable property (*) that if the specified user data can fit into the currently available
        // buffer space without filling it up completely, the heuristic will always tell us to use the buffer. It will also
        // tell us to use the buffer in cases where the current write would fill the buffer, but the remaining data is small
        // enough such that subsequent operations can use the buffer again.
        //
        // Algorithm:
        // Determine whether or not to buffer according to the heuristic (below).
        // If we decided to use the buffer:
        //     Copy as much user data as we can into the buffer.
        //     If we consumed all data: We are finished.
        //     Otherwise, write the buffer out.
        //     Copy the rest of user data into the now cleared buffer (no need to write out the buffer again as the heuristic
        //     will prevent it from being filled twice).
        // If we decided not to use the buffer:
        //     Can the data already in the buffer and current user data be combines to a single write
        //     by allocating a "shadow" buffer of up to twice the size of _bufferSize (up to a limit to avoid LOH)?
        //     Yes, it can:
        //         Allocate a larger "shadow" buffer and ensure the buffered  data is moved there.
        //         Copy user data to the shadow buffer.
        //         Write shadow buffer to the underlying stream in a single operation.
        //     No, it cannot (amount of data is still too large):
        //         Write out any data possibly in the buffer.
        //         Write out user data directly.
        //
        // Heuristic:
        // If the subsequent write operation that follows the current write operation will result in a write to the
        // underlying stream in case that we use the buffer in the current write, while it would not have if we avoided
        // using the buffer in the current write (by writing current user data to the underlying stream directly), then we
        // prefer to avoid using the buffer since the corresponding memory copy is wasted (it will not reduce the number
        // of writes to the underlying stream, which is what we are optimising for).
        // ASSUME that the next write will be for the same amount of bytes as the current write (most common case) and
        // determine if it will cause a write to the underlying stream. If the next write is actually larger, our heuristic
        // still yields the right behaviour, if the next write is actually smaller, we may making an unnecessary write to
        // the underlying stream. However, this can only occur if the current write is larger than half the buffer size and
        // we will recover after one iteration.
        // We have:
        //     useBuffer = (_writePos + count + count < _bufferSize + _bufferSize)
        //
        // Example with _bufferSize = 20, _writePos = 6, count = 10:
        //
        //     +---------------------------------------+---------------------------------------+
        //     |             current buffer            | next iteration's "future" buffer      |
        //     +---------------------------------------+---------------------------------------+
        //     |0| | | | | | | | | |1| | | | | | | | | |2| | | | | | | | | |3| | | | | | | | | |
        //     |0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|
        //     +-----------+-------------------+-------------------+---------------------------+
        //     | _writePos |  current count    | assumed next count|avail buff after next write|
        //     +-----------+-------------------+-------------------+---------------------------+
        //
        // A nice property (*) of this heuristic is that it will always succeed if the user data completely fits into the
        // available buffer, i.e. if count < (_bufferSize - _writePos).

        int totalUserBytes;
        boolean useBuffer;
        totalUserBytes = _writePos + count;
        useBuffer = (totalUserBytes + count < (_bufferSize + _bufferSize));
        if (useBuffer) {

            tangible.RefObject tempRefOffset = new tangible.RefObject<>(offset);
            tangible.RefObject tempRefCount = new tangible.RefObject<>(count);
            WriteToBuffer(array, tempRefOffset, tempRefCount);
            count = tempRefCount.argvalue;
            offset = tempRefOffset.argvalue;

            if (_writePos < _bufferSize) {
                return;
            }
            _stream.write(_buffer, 0, _writePos);
            writeCount.incrementAndGet();
            _writePos = 0;

            tangible.RefObject tempRefOffset2 = new tangible.RefObject<>(offset);
            tangible.RefObject tempRefCount2 = new tangible.RefObject<>(count);
            WriteToBuffer(array, tempRefOffset2, tempRefCount2);
            count = tempRefCount2.getValue();
            offset = tempRefOffset2.getValue();


        } else { // if (!useBuffer)

            // Write out the buffer if necessary.
            if (_writePos > 0) {
                // Try avoiding extra write to underlying stream by combining previously buffered data with current user data:
                if (totalUserBytes <= (_bufferSize + _bufferSize) && totalUserBytes <= MaxShadowBufferSize) {

                    EnsureShadowBufferAllocated();
                    System.arraycopy(array, offset, _buffer, _writePos, count);
                    _stream.write(_buffer, 0, totalUserBytes);
                    writeCount.incrementAndGet();
                    _writePos = 0;
                    return;
                }

                _stream.write(_buffer, 0, _writePos);
                writeCount.incrementAndGet();
                _writePos = 0;
            }

            // Write out user data.
            _stream.write(array, offset, count);
            writeCount.incrementAndGet();
        }
    }

    private void EnsureShadowBufferAllocated() {
        // Already have shadow buffer?
        if (_buffer.length != _bufferSize || _bufferSize >= MaxShadowBufferSize)
            return;

        byte[] shadowBuffer = new byte[Math.min(_bufferSize + _bufferSize, MaxShadowBufferSize)];
        System.arraycopy(_buffer, 0, shadowBuffer, 0, _writePos);
        _buffer = shadowBuffer;
    }


    public void flush() throws IOException {

        EnsureNotClosed();

        // Has WRITE data in the buffer:
        if (_writePos > 0)
        {
            flushWrite();
            return;
        }

        // Has READ data in the buffer:
        if (_readPos < _readLen)
        {

            // User streams may have opted to throw from Flush if CanWrite is false (although the abstract Stream does not do so).
            // However, if we do not forward the Flush to the underlying stream, we may have problems when chaining several streams.
            // Let us make a best effort attempt:
            if (_stream instanceof BufferedStream)
            {
                _stream.flush();
                writeCount.incrementAndGet();
            }
            return;
        }

        // We had no data in the buffer, but we still need to tell the underlying stream to flush.
        if ( _stream instanceof BufferedStream)
        {
            _stream.flush();
            writeCount.incrementAndGet();
        }

        _writePos = _readPos = _readLen = 0;
    }


    private void flushWrite() throws IOException
    {

        _stream.write(_buffer, 0, _writePos);
        writeCount.incrementAndGet();
        _writePos = 0;
        _stream.flush();
    }

}


//import Alachisoft.NCache.Common.ErrorHandling.ErrorMessages;
//
//import java.util.Arrays;
//import java.util.concurrent.FutureTask;
//import java.util.stream.Stream;
//
//public class BufferedStream extends Stream {
//
//        private static final int _DEFAULT_BUFFER_SIZE = 4096;
//
//
//        private Stream _stream; // Underlying stream.  Close sets _stream to null.
//
//        private byte[] _buffer; // Shared read/write buffer.  Alloc on first use.
//
//        private int _bufferSize; // Length of internal buffer (not counting the shadow buffer).
//
//        private int _readPos; // Read pointer within shared buffer.
//        private int _readLen; // Number of bytes read in buffer from _stream.
//        private int _writePos; // Write pointer within shared buffer.
//
//        //#if !FEATURE_PAL && FEATURE_ASYNC_IO
//        private BeginEndAwaitableAdapter _beginEndAwaitable; // Used to be able to await a BeginXxx call and thus to share code
//        // between the APM and Async pattern implementations
//
//        private FutureTask _lastSyncCompletedReadTask; // The last successful Task returned from ReadAsync
//
//        // Because this ctor was here previously we need to keep it around.
//        private BufferedStream()
//        {
//        }
//
//
//        public BufferedStream(Stream stream)
//        {
//            this(stream, _DEFAULT_BUFFER_SIZE);
//        }
//
//
//        public BufferedStream(Stream stream, int bufferSize)
//        {
//            if (stream == null)
//            {
//                throw new NullPointerException("stream");
//            }
//
//            if (bufferSize <= 0)
//            {
//                throw new IndexOutOfBoundsException("bufferSize must be positive integer");
//            }
//            _stream = stream;
//            _bufferSize = bufferSize;
//
//            // Allocate _buffer on its first use - it will not be used if all reads
//            // & writes are greater than or equal to buffer size.
//
//
//        }
//
//        /** MaxShadowBufferSize is chosed such that shadow buffers are not allocated on the Large Object Heap.
//         Currently, an object is allocated on the LOH if it is larger than 85000 bytes. See LARGE_OBJECT_SIZE in ndp\clr\src\vm\gc.h
//         We will go with exactly 80 KBytes, although this is somewhat arbitrary.
//         */
//        private static final int MAX_SHADOW_BUFFER_SIZE = 81920; // Make sure not to get to the Large Object Heap.
//        private void ensureShadowBufferAllocated()
//        {
//            // Already have shadow buffer?
//            if (_buffer.length != _bufferSize || _bufferSize >= MAX_SHADOW_BUFFER_SIZE)
//            {
//                return;
//            }
//
//            byte[] shadowBuffer = new byte[Math.min(_bufferSize + _bufferSize, MAX_SHADOW_BUFFER_SIZE)];
//            System.arraycopy(_buffer, 0, shadowBuffer, 0, _writePos);
//            _buffer = shadowBuffer;
//        }
//
//
//        private void ensureBufferAllocated()
//        {
//
//
//            // BufferedStream is not intended for multi-threaded use, so no worries about the get/set ---- on _buffer.
//            if (_buffer == null)
//            {
//                _buffer = new byte[_bufferSize];
//            }
//        }
//
//
//        private Stream getUnderlyingStream()
//        {
//            return _stream;
//        }
//
//
//        private int getBufferSize()
//        {
//            return _bufferSize;
//        }
//
//
//
//
//
//
//
//
//
//        public long getLength()
//        {
//            if (_writePos > 0)
//            {
//                flushWrite();
//            }
//
//            return _stream.count();
//        }
//
//        public long writeCount;
//
//        public long getPosition()
//        {
//
//
//            return _stream.p() + (_readPos - _readLen + _writePos);
//        }
//        public void setPosition(long value)
//        {
//            if (value < 0)
//            {
//                throw new IndexOutOfBoundsException("value", ResourceHelper.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            Contract.EndContractBlock();
//
//            ensureNotClosed();
//            ensureCanSeek();
//
//            if (_writePos > 0)
//            {
//                flushWrite();
//            }
//
//            _readPos = 0;
//            _readLen = 0;
//            _stream.Seek(value, SeekOrigin.Begin);
//        }
//
//
//        protected void Dispose(boolean disposing)
//        {
//
//            try
//            {
//                if (disposing && _stream != null)
//                {
//                    try
//                    {
//                        flush();
//                    }
//                    finally
//                    {
//                        _stream.Close();
//                    }
//                }
//            }
//            finally
//            {
//                _stream = null;
//                _buffer = null;
////#if !FEATURE_PAL && FEATURE_ASYNC_IO
//                _lastSyncCompletedReadTask = null;
////#endif // !FEATURE_PAL && FEATURE_ASYNC_IO
//
//                // Call base.Dispose(bool) to cleanup async IO resources
//                super.dispose(disposing);
//            }
//        }
//
//
//        public void flush()
//        {
//
//            ensureNotClosed();
//
//            // Has WRITE data in the buffer:
//            if (_writePos > 0)
//            {
//
//                flushWrite();
//                Contract.Assert(_writePos == 0 && _readPos == 0 && _readLen == 0);
//                return;
//            }
//
//            // Has READ data in the buffer:
//            if (_readPos < _readLen)
//            {
//
//                // If the underlying stream is not seekable AND we have something in the read buffer, then FlushRead would throw.
//                // We can either throw away the buffer resulting in data loss (!) or ignore the Flush.
//                // (We cannot throw becasue it would be a breaking change.) We opt into ignoring the Flush in that situation.
//                if (!_stream.getCanSeek())
//                {
//                    return;
//                }
//
//                flushRead();
//
//                // User streams may have opted to throw from Flush if CanWrite is false (although the abstract Stream does not do so).
//                // However, if we do not forward the Flush to the underlying stream, we may have problems when chaining several streams.
//                // Let us make a best effort attempt:
//                if (_stream.getCanWrite() || _stream instanceof BufferedStream)
//                {
//                    _stream.Flush();
//                    writeCount++;
//                }
//
//                Contract.Assert(_writePos == 0 && _readPos == 0 && _readLen == 0);
//                return;
//            }
//
//            // We had no data in the buffer, but we still need to tell the underlying stream to flush.
//            if (_stream.getCanWrite() || _stream instanceof BufferedStream)
//            {
//                _stream.Flush();
//                writeCount++;
//            }
//
//            _writePos = _readPos = _readLen = 0;
//        }
//
//        //#if !FEATURE_PAL && FEATURE_ASYNC_IO
//        public Task FlushAsync(CancellationToken cancellationToken)
//        {
//
//            if (cancellationToken.getIsCancellationRequested())
//            {
//                return Task.FromCancellation(cancellationToken);
//            }
//
//            ensureNotClosed();
//
//            return flushAsyncInternal(cancellationToken, this, _stream, _writePos, _readPos, _readLen);
//        }
//
//
//        private static Task flushAsyncInternal(CancellationToken cancellationToken, BufferedStream _this, Stream stream, int writePos, int readPos, int readLen)
//        {
//
//            // We bring instance fields down as local parameters to this async method becasue BufferedStream is derived from MarshalByRefObject.
//            // Field access would be from the async state machine i.e., not via the this pointer and would require runtime checking to see
//            // if we are talking to a remote object, whcih is currently very slow (Dev11 bug #365921).
//            // Field access from whithin Asserts is, of course, irrelevant.
//            Contract.Assert(stream != null);
//
//            SemaphoreSlim sem = _this.EnsureAsyncActiveSemaphoreInitialized();
//            await sem.WaitAsync().ConfigureAwait(false);
//            try
//            {
//
//                if (writePos > 0)
//                {
//
//                    await _this.flushWriteAsync(cancellationToken).ConfigureAwait(false);
//                    Contract.Assert(_this._writePos == 0 && _this._readPos == 0 && _this._readLen == 0);
//                    return;
//                }
//
//                if (readPos < readLen)
//                {
//
//                    // If the underlying stream is not seekable AND we have something in the read buffer, then FlushRead would throw.
//                    // We can either throw away the buffer resulting in date loss (!) or ignore the Flush. (We cannot throw becasue it
//                    // would be a breaking change.) We opt into ignoring the Flush in that situation.
//                    if (!stream.getCanSeek())
//                    {
//                        return;
//                    }
//
//                    _this.flushRead(); // not async; it uses Seek, but there's no SeekAsync
//
//                    // User streams may have opted to throw from Flush if CanWrite is false (although the abstract Stream does not do so).
//                    // However, if we do not forward the Flush to the underlying stream, we may have problems when chaining several streams.
//                    // Let us make a best effort attempt:
//                    if (stream.getCanRead() || stream instanceof BufferedStream)
//                    {
//                        await stream.FlushAsync(cancellationToken).ConfigureAwait(false);
//                    }
//
//                    Contract.Assert(_this._writePos == 0 && _this._readPos == 0 && _this._readLen == 0);
//                    return;
//                }
//
//                // We had no data in the buffer, but we still need to tell the underlying stream to flush.
//                if (stream.getCanWrite() || stream instanceof BufferedStream)
//                {
//                    await stream.FlushAsync(cancellationToken).ConfigureAwait(false);
//                }
//
//                // There was nothing in the buffer:
//                Contract.Assert(_this._writePos == 0 && _this._readPos == _this._readLen);
//
//            }
//            finally
//            {
//                sem.Release();
//            }
//        }
////#endif // !FEATURE_PAL && FEATURE_ASYNC_IO
//
//
//        // Reading is done in blocks, but someone could read 1 byte from the buffer then write.
//        // At that point, the underlying stream's pointer is out of sync with this stream's position.
//        // All write  functions should call this function to ensure that the buffered data is not lost.
//        private void flushRead()
//        {
//
//            Contract.Assert(_writePos == 0, "BufferedStream: Write buffer must be empty in FlushRead!");
//
//            if (_readPos - _readLen != 0)
//            {
//                _stream.Seek(_readPos - _readLen, SeekOrigin.Current);
//            }
//
//            _readPos = 0;
//            _readLen = 0;
//        }
//
//
//        private void clearReadBufferBeforeWrite()
//        {
//
//            // This is called by write methods to clear the read buffer.
//
//            Contract.Assert(_readPos <= _readLen, "_readPos <= _readLen [" + _readPos + " <= " + _readLen + "]");
//
//            // No READ data in the buffer:
//            if (_readPos == _readLen)
//            {
//
//                _readPos = _readLen = 0;
//                return;
//            }
//
//            // Must have READ data.
//            Contract.Assert(_readPos < _readLen);
//
//            // If the underlying stream cannot seek, FlushRead would end up throwing NotSupported.
//            // However, since the user did not call a method that is intuitively expected to seek, a better message is in order.
//            // Ideally, we would throw an InvalidOperation here, but for backward compat we have to stick with NotSupported.
//            if (!_stream.getCanSeek())
//            {
//                throw new UnsupportedOperationException(ResourceHelper.GetResourceString("NotSupported_CannotWriteToBufferedStreamIfReadBufferCannotBeFlushed"));
//            }
//
//            flushRead();
//        }
//
//
//        private void flushWrite()
//        {
//
//            Contract.Assert(_readPos == 0 && _readLen == 0, "BufferedStream: Read buffer must be empty in FlushWrite!");
//            Contract.Assert(_buffer != null && _bufferSize >= _writePos, "BufferedStream: Write buffer must be allocated and write position must be in the bounds of the buffer in FlushWrite!");
//
//            _stream.Write(_buffer, 0, _writePos);
//            writeCount++;
//            _writePos = 0;
//            _stream.Flush();
//        }
//
//
//        //#if !FEATURE_PAL && FEATURE_ASYNC_IO
//        private Task flushWriteAsync(CancellationToken cancellationToken)
//        {
//
//            Contract.Assert(_readPos == 0 && _readLen == 0, "BufferedStream: Read buffer must be empty in FlushWrite!");
//            Contract.Assert(_buffer != null && _bufferSize >= _writePos, "BufferedStream: Write buffer must be allocated and write position must be in the bounds of the buffer in FlushWrite!");
//
//            await _stream.WriteAsync(_buffer, 0, _writePos, cancellationToken).ConfigureAwait(false);
//            _writePos = 0;
//            await _stream.FlushAsync(cancellationToken).ConfigureAwait(false);
//        }
////#endif // !FEATURE_PAL && FEATURE_ASYNC_IO
//
//
//        private int readFromBuffer(byte[] array, int offset, int count)
//        {
//
//            int readBytes = _readLen - _readPos;
//            Contract.Assert(readBytes >= 0);
//
//            if (readBytes == 0)
//            {
//                return 0;
//            }
//
//            Contract.Assert(readBytes > 0);
//
//            if (readBytes > count)
//            {
//                readBytes = count;
//            }
//
//            Buffer.BlockCopy(_buffer, _readPos, array, offset, readBytes);
//            _readPos += readBytes;
//
//            return readBytes;
//        }
//
//
//        private int readFromBuffer(byte[] array, int offset, int count, tangible.OutObject error)
//        {
//
//            try
//            {
//
//                error.argValue = null;
//                return readFromBuffer(array, offset, count);
//
//            }
//            catch (RuntimeException ex)
//            {
//                error.argValue = ex;
//                return 0;
//            }
//        }
//
//
//        public int read(byte[] array, int offset, int count)
//        {
//
//            if (array == null)
//            {
//                throw new NullPointerException("array", ResourceHelper.GetResourceString("ArgumentNull_Buffer"));
//            }
//            if (offset < 0)
//            {
//                throw new IndexOutOfBoundsException("offset", ResourceHelper.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (count < 0)
//            {
//                throw new IndexOutOfBoundsException("count", ResourceHelper.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (array.getLength() - offset < count)
//            {
//                throw new IllegalArgumentException(ResourceHelper.GetResourceString("Argument_InvalidOffLen"));
//            }
//            Contract.EndContractBlock();
//
//            ensureNotClosed();
//            ensureCanRead();
//
//            int bytesFromBuffer = readFromBuffer(array, offset, count);
//
//            // We may have read less than the number of bytes the user asked for, but that is part of the Stream contract.
//
//            // Reading again for more data may cause us to block if we're using a device with no clear end of file,
//            // such as a serial port or pipe. If we blocked here and this code was used with redirected pipes for a
//            // process's standard output, this can lead to deadlocks involving two processes.
//            // BUT - this is a breaking change.
//            // So: If we could not read all bytes the user asked for from the buffer, we will try once from the underlying
//            // stream thus ensuring the same blocking behaviour as if the underlying stream was not wrapped in this BufferedStream.
//            if (bytesFromBuffer == count)
//            {
//                return bytesFromBuffer;
//            }
//
//            int alreadySatisfied = bytesFromBuffer;
//            if (bytesFromBuffer > 0)
//            {
//                count -= bytesFromBuffer;
//                offset += bytesFromBuffer;
//            }
//
//            // So the READ buffer is empty.
//            Contract.Assert(_readLen == _readPos);
//            _readPos = _readLen = 0;
//
//            // If there was anything in the WRITE buffer, clear it.
//            if (_writePos > 0)
//            {
//                flushWrite();
//            }
//
//            // If the requested read is larger than buffer size, avoid the buffer and still use a single read:
//            if (count >= _bufferSize)
//            {
//
//                return _stream.Read(array, offset, count) + alreadySatisfied;
//            }
//
//            // Ok. We can fill the buffer:
//            ensureBufferAllocated();
//            _readLen = _stream.Read(_buffer, 0, _bufferSize);
//
//            bytesFromBuffer = readFromBuffer(array, offset, count);
//
//            // We may have read less than the number of bytes the user asked for, but that is part of the Stream contract.
//            // Reading again for more data may cause us to block if we're using a device with no clear end of stream,
//            // such as a serial port or pipe.  If we blocked here & this code was used with redirected pipes for a process's
//            // standard output, this can lead to deadlocks involving two processes. Additionally, translating one read on the
//            // BufferedStream to more than one read on the underlying Stream may defeat the whole purpose of buffering of the
//            // underlying reads are significantly more expensive.
//
//            return bytesFromBuffer + alreadySatisfied;
//        }
//
//
//        //#if !FEATURE_PAL && FEATURE_ASYNC_IO
//        public IAsyncResult BeginRead(byte[] buffer, int offset, int count, AsyncCallback callback, Object state)
//        {
//
//            if (buffer == null)
//            {
//                throw new NullPointerException("buffer", Environment.GetResourceString("ArgumentNull_Buffer"));
//            }
//            if (offset < 0)
//            {
//                throw new IndexOutOfBoundsException("offset", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (count < 0)
//            {
//                throw new IndexOutOfBoundsException("count", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (buffer.getLength() - offset < count)
//            {
//                throw new IllegalArgumentException(Environment.GetResourceString("Argument_InvalidOffLen"));
//            }
//            Contract.EndContractBlock();
//
//            // Previous version incorrectly threw NotSupported instead of ObjectDisposed. We keep that behaviour for back-compat.
//            // EnsureNotClosed();
//            if (_stream == null)
//            {
//                __Error.ReadNotSupported();
//            }
//            ensureCanRead();
//
//            int bytesFromBuffer = 0;
//            // Try to satisfy the request from the buffer synchronously. But still need a sem-lock in case that another
//            // Async IO Task accesses the buffer concurrently. If we fail to acquire the lock without waiting, make this
//            // an Async operation.
//            SemaphoreSlim sem = super.EnsureAsyncActiveSemaphoreInitialized();
//            Task semaphoreLockTask = sem.WaitAsync();
//            if (semaphoreLockTask.getStatus() == TaskStatus.RanToCompletion)
//            {
//
//                boolean completeSynchronously = true;
//                try
//                {
//
//                    RuntimeException error;
//                    tangible.OutObject tempOutError = new tangible.OutObject();
//                    bytesFromBuffer = readFromBuffer(buffer, offset, count, tempOutError);
//                    error = tempOutError.argValue;
//
//                    // If we satistied enough data from the buffer, we can complete synchronously.
//                    // Reading again for more data may cause us to block if we're using a device with no clear end of file,
//                    // such as a serial port or pipe. If we blocked here and this code was used with redirected pipes for a
//                    // process's standard output, this can lead to deadlocks involving two processes.
//                    // BUT - this is a breaking change.
//                    // So: If we could not read all bytes the user asked for from the buffer, we will try once from the underlying
//                    // stream thus ensuring the same blocking behaviour as if the underlying stream was not wrapped in this BufferedStream.
//                    completeSynchronously = (bytesFromBuffer == count || error != null);
//
//                    if (completeSynchronously)
//                    {
//
//                        SynchronousAsyncResult asyncResult = (error == null) ? new SynchronousAsyncResult(bytesFromBuffer, state) : new SynchronousAsyncResult(error, state, isWrite: false);
//                        if (callback != null)
//                        {
//                            callback.invoke(asyncResult);
//                        }
//
//                        return asyncResult;
//                    }
//                }
//                finally
//                {
//                    if (completeSynchronously) // if this is FALSE, we will be entering ReadFromUnderlyingStreamAsync and releasing there.
//                    {
//                        sem.Release();
//                    }
//                }
//            }
//
//            // Delegate to the async implementation.
//            return beginReadFromUnderlyingStream(buffer, offset + bytesFromBuffer, count - bytesFromBuffer, callback, state, bytesFromBuffer, semaphoreLockTask);
//        }
//
//
//        private IAsyncResult beginReadFromUnderlyingStream(byte[] buffer, int offset, int count, AsyncCallback callback, Object state, int bytesAlreadySatisfied, Task semaphoreLockTask)
//        {
//
//            Task readOp = readFromUnderlyingStreamAsync(buffer, offset, count, CancellationToken.getNone(), bytesAlreadySatisfied, semaphoreLockTask, true);
//            return TaskToApm.Begin(readOp, callback, state);
//        }
//
//
//        public int EndRead(IAsyncResult asyncResult)
//        {
//
//            if (asyncResult == null)
//            {
//                throw new NullPointerException("asyncResult");
//            }
//            Contract.Ensures(Contract.Result() >= 0);
//            Contract.EndContractBlock();
//
//            var sAR = asyncResult instanceof SynchronousAsyncResult ? (SynchronousAsyncResult)asyncResult : null;
//            if (sAR != null)
//            {
//                return SynchronousAsyncResult.EndRead(asyncResult);
//            }
//            return TaskToApm.End(asyncResult);
//        }
//
//
//        private Task lastSyncCompletedReadTask(int val)
//        {
//
//            Task t = _lastSyncCompletedReadTask;
//            Contract.Assert(t == null || t.getStatus() == TaskStatus.RanToCompletion);
//
//            if (t != null && t.getResult() == val)
//            {
//                return t;
//            }
//
//            t = Task.FromResult(val);
//            _lastSyncCompletedReadTask = t;
//            return t;
//        }
//
//
//        public Task ReadAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
//        {
//
//            if (buffer == null)
//            {
//                throw new NullPointerException("buffer", Environment.GetResourceString("ArgumentNull_Buffer"));
//            }
//            if (offset < 0)
//            {
//                throw new IndexOutOfBoundsException("offset", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (count < 0)
//            {
//                throw new IndexOutOfBoundsException("count", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (buffer.getLength() - offset < count)
//            {
//                throw new IllegalArgumentException(Environment.GetResourceString("Argument_InvalidOffLen"));
//            }
//            Contract.EndContractBlock();
//
//            // Fast path check for cancellation already requested
//            if (cancellationToken.getIsCancellationRequested())
//            {
//                return Task.FromCancellation(cancellationToken);
//            }
//
//            ensureNotClosed();
//            ensureCanRead();
//
//            int bytesFromBuffer = 0;
//            // Try to satisfy the request from the buffer synchronously. But still need a sem-lock in case that another
//            // Async IO Task accesses the buffer concurrently. If we fail to acquire the lock without waiting, make this
//            // an Async operation.
//            SemaphoreSlim sem = super.EnsureAsyncActiveSemaphoreInitialized();
//            Task semaphoreLockTask = sem.WaitAsync();
//            if (semaphoreLockTask.getStatus() == TaskStatus.RanToCompletion)
//            {
//
//                boolean completeSynchronously = true;
//                try
//                {
//                    RuntimeException error;
//                    tangible.OutObject tempOutError = new tangible.OutObject();
//                    bytesFromBuffer = readFromBuffer(buffer, offset, count, tempOutError);
//                    error = tempOutError.argValue;
//
//                    // If we satistied enough data from the buffer, we can complete synchronously.
//                    // Reading again for more data may cause us to block if we're using a device with no clear end of file,
//                    // such as a serial port or pipe. If we blocked here and this code was used with redirected pipes for a
//                    // process's standard output, this can lead to deadlocks involving two processes.
//                    // BUT - this is a breaking change.
//                    // So: If we could not read all bytes the user asked for from the buffer, we will try once from the underlying
//                    // stream thus ensuring the same blocking behaviour as if the underlying stream was not wrapped in this BufferedStream.
//                    completeSynchronously = (bytesFromBuffer == count || error != null);
//
//                    if (completeSynchronously)
//                    {
//
//                        return (error == null) ? lastSyncCompletedReadTask(bytesFromBuffer) : Task.FromException(error);
//                    }
//                }
//                finally
//                {
//                    if (completeSynchronously) // if this is FALSE, we will be entering ReadFromUnderlyingStreamAsync and releasing there.
//                    {
//                        sem.Release();
//                    }
//                }
//            }
//
//            // Delegate to the async implementation.
//            return readFromUnderlyingStreamAsync(buffer, offset + bytesFromBuffer, count - bytesFromBuffer, cancellationToken, bytesFromBuffer, semaphoreLockTask, false);
//        }
//
//
//        /** BufferedStream should be as thin a wrapper as possible. We want that ReadAsync delegates to
//         ReadAsync of the underlying _stream and that BeginRead delegates to BeginRead of the underlying stream,
//         rather than calling the base Stream which implements the one in terms of the other. This allows BufferedStream
//         to affect the semantics of the stream it wraps as little as possible. At the same time, we want to share as
//         much code between the APM and the Async pattern implementations as possible. This method is called by both with
//         a corresponding useApmPattern value. Recall that Task implements IAsyncResult.
//         @return -2 if _bufferSize was set to 0 while waiting on the semaphore; otherwise num of bytes read.
//         */
//        private Task readFromUnderlyingStreamAsync(byte[] array, int offset, int count, CancellationToken cancellationToken, int bytesAlreadySatisfied, Task semaphoreLockTask, boolean useApmPattern)
//        {
//
//            // Same conditions validated with exceptions in ReadAsync:
//            // (These should be Contract.Requires(..) but that method had some issues in async methods; using Assert(..) for now.)
//            Contract.Assert(array != null);
//            Contract.Assert(offset >= 0);
//            Contract.Assert(count >= 0);
//            Contract.Assert(array.getLength() - offset >= count);
//            Contract.Assert(_stream != null);
//            Contract.Assert(_stream.getCanRead());
//            Contract.Assert(_bufferSize > 0);
//            Contract.Assert(semaphoreLockTask != null);
//
//            // Employ async waiting based on the same synchronization used in BeginRead of the abstract Stream.
//            await semaphoreLockTask.ConfigureAwait(false);
//            try
//            {
//
//                // The buffer might have been changed by another async task while we were waiting on the semaphore.
//                // Check it now again.
//                int bytesFromBuffer = readFromBuffer(array, offset, count);
//                if (bytesFromBuffer == count)
//                {
//                    return bytesAlreadySatisfied + bytesFromBuffer;
//                }
//
//                if (bytesFromBuffer > 0)
//                {
//                    count -= bytesFromBuffer;
//                    offset += bytesFromBuffer;
//                    bytesAlreadySatisfied += bytesFromBuffer;
//                }
//
//                Contract.Assert(_readLen == _readPos);
//                _readPos = _readLen = 0;
//
//                // If there was anything in the WRITE buffer, clear it.
//                if (_writePos > 0)
//                {
//                    await flushWriteAsync(cancellationToken).ConfigureAwait(false); // no Begin-End read version for Flush. Use Async.
//                }
//
//                // If the requested read is larger than buffer size, avoid the buffer and still use a single read:
//                if (count >= _bufferSize)
//                {
//
//                    if (useApmPattern)
//                    {
//                        ensureBeginEndAwaitableAllocated();
//                        _stream.BeginRead(array, offset, count, BeginEndAwaitableAdapter.Callback, _beginEndAwaitable);
//                        return bytesAlreadySatisfied + _stream.EndRead(await _beginEndAwaitable);
//                    }
//                    else
//                    {
//                        return bytesAlreadySatisfied + await _stream.ReadAsync(array, offset, count, cancellationToken).ConfigureAwait(false);
//                    }
//                }
//
//                // Ok. We can fill the buffer:
//                ensureBufferAllocated();
//                if (useApmPattern)
//                {
//                    ensureBeginEndAwaitableAllocated();
//                    _stream.BeginRead(_buffer, 0, _bufferSize, BeginEndAwaitableAdapter.Callback, _beginEndAwaitable);
//                    _readLen = _stream.EndRead(await _beginEndAwaitable);
//                }
//                else
//                {
//                    _readLen = await _stream.ReadAsync(_buffer, 0, _bufferSize, cancellationToken).ConfigureAwait(false);
//                }
//
//                bytesFromBuffer = readFromBuffer(array, offset, count);
//                return bytesAlreadySatisfied + bytesFromBuffer;
//
//            }
//            finally
//            {
//                SemaphoreSlim sem = super.EnsureAsyncActiveSemaphoreInitialized();
//                sem.Release();
//            }
//        }
////#endif // !FEATURE_PAL && FEATURE_ASYNC_IO
//
//
//        public int ReadByte()
//        {
//
//            ensureNotClosed();
//            ensureCanRead();
//
//            if (_readPos == _readLen)
//            {
//
//                if (_writePos > 0)
//                {
//                    flushWrite();
//                }
//
//                ensureBufferAllocated();
//                _readLen = _stream.Read(_buffer, 0, _bufferSize);
//                _readPos = 0;
//            }
//
//            if (_readPos == _readLen)
//            {
//                return -1;
//            }
//
//            int b = _buffer[_readPos++];
//            return b;
//        }
//
//
//        private void writeToBuffer(byte[] array, tangible.RefObject offset, tangible.RefObject count)
//        {
//
//            int bytesToWrite = Math.min(_bufferSize - _writePos, count.argValue);
//
//            if (bytesToWrite <= 0)
//            {
//                return;
//            }
//
//            ensureBufferAllocated();
//            Buffer.BlockCopy(array, offset.argValue, _buffer, _writePos, bytesToWrite);
//
//            _writePos += bytesToWrite;
//            count.argValue -= bytesToWrite;
//            offset.argValue += bytesToWrite;
//        }
//
//
//        private void writeToBuffer(byte[] array, tangible.RefObject offset, tangible.RefObject count, tangible.OutObject error)
//        {
//
//            try
//            {
//
//                error.argValue = null;
//                writeToBuffer(array, offset, count);
//
//            }
//            catch (RuntimeException ex)
//            {
//                error.argValue = ex;
//            }
//        }
//
//
//        public void write(byte[] array, int offset, int count)
//        {
//
//            if (array == null)
//            {
//                throw new NullPointerException("array", ResourceHelper.GetResourceString("ArgumentNull_Buffer"));
//            }
//            if (offset < 0)
//            {
//                throw new IndexOutOfBoundsException("offset", ResourceHelper.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (count < 0)
//            {
//                throw new IndexOutOfBoundsException("count", ResourceHelper.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (array.getLength() - offset < count)
//            {
//                throw new IllegalArgumentException(ResourceHelper.GetResourceString("Argument_InvalidOffLen"));
//            }
//            Contract.EndContractBlock();
//
//            ensureNotClosed();
//            ensureCanWrite();
//
//            if (_writePos == 0)
//            {
//                clearReadBufferBeforeWrite();
//            }
//
//            // We need to use the buffer, while avoiding unnecessary buffer usage / memory copies.
//            // We ASSUME that memory copies are much cheaper than writes to the underlying stream, so if an extra copy is
//            // guaranteed to reduce the number of writes, we prefer it.
//            // We pick a simple strategy that makes degenerate cases rare if our assumptions are right.
//            //
//            // For ever write, we use a simple heuristic (below) to decide whether to use the buffer.
//            // The heuristic has the desirable property (*) that if the specified user data can fit into the currently available
//            // buffer space without filling it up completely, the heuristic will always tell us to use the buffer. It will also
//            // tell us to use the buffer in cases where the current write would fill the buffer, but the remaining data is small
//            // enough such that subsequent operations can use the buffer again.
//            //
//            // Algorithm:
//            // Determine whether or not to buffer according to the heuristic (below).
//            // If we decided to use the buffer:
//            //     Copy as much user data as we can into the buffer.
//            //     If we consumed all data: We are finished.
//            //     Otherwise, write the buffer out.
//            //     Copy the rest of user data into the now cleared buffer (no need to write out the buffer again as the heuristic
//            //     will prevent it from being filled twice).
//            // If we decided not to use the buffer:
//            //     Can the data already in the buffer and current user data be combines to a single write
//            //     by allocating a "shadow" buffer of up to twice the size of _bufferSize (up to a limit to avoid LOH)?
//            //     Yes, it can:
//            //         Allocate a larger "shadow" buffer and ensure the buffered  data is moved there.
//            //         Copy user data to the shadow buffer.
//            //         Write shadow buffer to the underlying stream in a single operation.
//            //     No, it cannot (amount of data is still too large):
//            //         Write out any data possibly in the buffer.
//            //         Write out user data directly.
//            //
//            // Heuristic:
//            // If the subsequent write operation that follows the current write operation will result in a write to the
//            // underlying stream in case that we use the buffer in the current write, while it would not have if we avoided
//            // using the buffer in the current write (by writing current user data to the underlying stream directly), then we
//            // prefer to avoid using the buffer since the corresponding memory copy is wasted (it will not reduce the number
//            // of writes to the underlying stream, which is what we are optimising for).
//            // ASSUME that the next write will be for the same amount of bytes as the current write (most common case) and
//            // determine if it will cause a write to the underlying stream. If the next write is actually larger, our heuristic
//            // still yields the right behaviour, if the next write is actually smaller, we may making an unnecessary write to
//            // the underlying stream. However, this can only occur if the current write is larger than half the buffer size and
//            // we will recover after one iteration.
//            // We have:
//            //     useBuffer = (_writePos + count + count < _bufferSize + _bufferSize)
//            //
//            // Example with _bufferSize = 20, _writePos = 6, count = 10:
//            //
//            //     +---------------------------------------+---------------------------------------+
//            //     |             current buffer            | next iteration's "future" buffer      |
//            //     +---------------------------------------+---------------------------------------+
//            //     |0| | | | | | | | | |1| | | | | | | | | |2| | | | | | | | | |3| | | | | | | | | |
//            //     |0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|0|1|2|3|4|5|6|7|8|9|
//            //     +-----------+-------------------+-------------------+---------------------------+
//            //     | _writePos |  current count    | assumed next count|avail buff after next write|
//            //     +-----------+-------------------+-------------------+---------------------------+
//            //
//            // A nice property (*) of this heuristic is that it will always succeed if the user data completely fits into the
//            // available buffer, i.e. if count < (_bufferSize - _writePos).
//
//            Contract.Assert(_writePos < _bufferSize);
//
//            int totalUserBytes;
//            boolean useBuffer;
//            checked
//            { // We do not expect buffer sizes big enough for an overflow, but if it happens, lets fail early:
//                totalUserBytes = _writePos + count;
//                useBuffer = (totalUserBytes + count < (_bufferSize + _bufferSize));
//            }
//
//            if (useBuffer)
//            {
//
//                tangible.RefObject tempRefOffset = new tangible.RefObject(offset);
//                tangible.RefObject tempRefCount = new tangible.RefObject(count);
//                writeToBuffer(array, tempRefOffset, tempRefCount);
//                count = tempRefCount.argValue;
//                offset = tempRefOffset.argValue;
//
//                if (_writePos < _bufferSize)
//                {
//
//                    Contract.Assert(count == 0);
//                    return;
//                }
//
//                Contract.Assert(count >= 0);
//                Contract.Assert(_writePos == _bufferSize);
//                Contract.Assert(_buffer != null);
//
//                _stream.Write(_buffer, 0, _writePos);
//                writeCount++;
//                _writePos = 0;
//
//                tangible.RefObject tempRefOffset2 = new tangible.RefObject(offset);
//                tangible.RefObject tempRefCount2 = new tangible.RefObject(count);
//                writeToBuffer(array, tempRefOffset2, tempRefCount2);
//                count = tempRefCount2.argValue;
//                offset = tempRefOffset2.argValue;
//
//                Contract.Assert(count == 0);
//                Contract.Assert(_writePos < _bufferSize);
//
//            }
//            else
//            { // if (!useBuffer)
//
//                // Write out the buffer if necessary.
//                if (_writePos > 0)
//                {
//
//                    Contract.Assert(_buffer != null);
//                    Contract.Assert(totalUserBytes >= _bufferSize);
//
//                    // Try avoiding extra write to underlying stream by combining previously buffered data with current user data:
//                    if (totalUserBytes <= (_bufferSize + _bufferSize) && totalUserBytes <= MAX_SHADOW_BUFFER_SIZE)
//                    {
//
//                        ensureShadowBufferAllocated();
//                        Buffer.BlockCopy(array, offset, _buffer, _writePos, count);
//                        _stream.Write(_buffer, 0, totalUserBytes);
//                        writeCount++;
//                        _writePos = 0;
//                        return;
//                    }
//
//                    _stream.Write(_buffer, 0, _writePos);
//                    writeCount++;
//                    _writePos = 0;
//                }
//
//                // Write out user data.
//                _stream.Write(array, offset, count);
//                writeCount++;
//            }
//        }
//
//
//
////#if FEATURE_ASYNC_IO
//
//        public IAsyncResult BeginWrite(byte[] buffer, int offset, int count, AsyncCallback callback, Object state)
//        {
//
//            if (buffer == null)
//            {
//                throw new NullPointerException("buffer", Environment.GetResourceString("ArgumentNull_Buffer"));
//            }
//            if (offset < 0)
//            {
//                throw new IndexOutOfBoundsException("offset", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (count < 0)
//            {
//                throw new IndexOutOfBoundsException("count", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (buffer.getLength() - offset < count)
//            {
//                throw new IllegalArgumentException(Environment.GetResourceString("Argument_InvalidOffLen"));
//            }
//            Contract.EndContractBlock();
//
//            // Previous version incorrectly threw NotSupported instead of ObjectDisposed. We keep that behaviour for back-compat.
//            // EnsureNotClosed();
//            if (_stream == null)
//            {
//                __Error.ReadNotSupported();
//            }
//            ensureCanWrite();
//
//            // Try to satisfy the request from the buffer synchronously. But still need a sem-lock in case that another
//            // Async IO Task accesses the buffer concurrently. If we fail to acquire the lock without waiting, make this
//            // an Async operation.
//            SemaphoreSlim sem = super.EnsureAsyncActiveSemaphoreInitialized();
//            Task semaphoreLockTask = sem.WaitAsync();
//            if (semaphoreLockTask.getStatus() == TaskStatus.RanToCompletion)
//            {
//
//                boolean completeSynchronously = true;
//                try
//                {
//                    if (_writePos == 0)
//                    {
//                        clearReadBufferBeforeWrite();
//                    }
//
//                    // If the write completely fits into the buffer, we can complete synchronously.
//                    Contract.Assert(_writePos < _bufferSize);
//                    completeSynchronously = (count < _bufferSize - _writePos);
//
//                    if (completeSynchronously)
//                    {
//
//                        RuntimeException error;
//                        tangible.RefObject tempRefOffset = new tangible.RefObject(offset);
//                        tangible.RefObject tempRefCount = new tangible.RefObject(count);
//                        tangible.OutObject tempOutError = new tangible.OutObject();
//                        writeToBuffer(buffer, tempRefOffset, tempRefCount, tempOutError);
//                        error = tempOutError.argValue;
//                        count = tempRefCount.argValue;
//                        offset = tempRefOffset.argValue;
//                        Contract.Assert(count == 0);
//
//                        SynchronousAsyncResult asyncResult = (error == null) ? new SynchronousAsyncResult(state) : new SynchronousAsyncResult(error, state, isWrite: true);
//                        if (callback != null)
//                        {
//                            callback.invoke(asyncResult);
//                        }
//
//                        return asyncResult;
//                    }
//                }
//                finally
//                {
//                    if (completeSynchronously) // if this is FALSE, we will be entering WriteToUnderlyingStreamAsync and releasing there.
//                    {
//                        sem.Release();
//                    }
//                }
//            }
//
//            // Delegate to the async implementation.
//            return beginWriteToUnderlyingStream(buffer, offset, count, callback, state, semaphoreLockTask);
//        }
//
//
//        private IAsyncResult beginWriteToUnderlyingStream(byte[] buffer, int offset, int count, AsyncCallback callback, Object state, Task semaphoreLockTask)
//        {
//
//            Task writeOp = writeToUnderlyingStreamAsync(buffer, offset, count, CancellationToken.getNone(), semaphoreLockTask, true);
//            return TaskToApm.Begin(writeOp, callback, state);
//        }
//
//
//        public void EndWrite(IAsyncResult asyncResult)
//        {
//
//            if (asyncResult == null)
//            {
//                throw new NullPointerException("asyncResult");
//            }
//            Contract.EndContractBlock();
//
//            var sAR = asyncResult instanceof SynchronousAsyncResult ? (SynchronousAsyncResult)asyncResult : null;
//            if (sAR != null)
//            {
//                SynchronousAsyncResult.EndWrite(asyncResult);
//                return;
//            }
//
//            TaskToApm.End(asyncResult);
//        }
//
//
//        public Task WriteAsync(byte[] buffer, int offset, int count, CancellationToken cancellationToken)
//        {
//
//            if (buffer == null)
//            {
//                throw new NullPointerException("buffer", Environment.GetResourceString("ArgumentNull_Buffer"));
//            }
//            if (offset < 0)
//            {
//                throw new IndexOutOfBoundsException("offset", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (count < 0)
//            {
//                throw new IndexOutOfBoundsException("count", Environment.GetResourceString("ArgumentOutOfRange_NeedNonNegNum"));
//            }
//            if (buffer.getLength() - offset < count)
//            {
//                throw new IllegalArgumentException(Environment.GetResourceString("Argument_InvalidOffLen"));
//            }
//            Contract.EndContractBlock();
//
//            // Fast path check for cancellation already requested
//            if (cancellationToken.getIsCancellationRequested())
//            {
//                return Task.FromCancellation(cancellationToken);
//            }
//
//            ensureNotClosed();
//            ensureCanWrite();
//
//            // Try to satisfy the request from the buffer synchronously. But still need a sem-lock in case that another
//            // Async IO Task accesses the buffer concurrently. If we fail to acquire the lock without waiting, make this
//            // an Async operation.
//            SemaphoreSlim sem = super.EnsureAsyncActiveSemaphoreInitialized();
//            Task semaphoreLockTask = sem.WaitAsync();
//            if (semaphoreLockTask.getStatus() == TaskStatus.RanToCompletion)
//            {
//
//                boolean completeSynchronously = true;
//                try
//                {
//
//                    if (_writePos == 0)
//                    {
//                        clearReadBufferBeforeWrite();
//                    }
//
//                    Contract.Assert(_writePos < _bufferSize);
//
//                    // If the write completely fits into the buffer, we can complete synchronously:
//                    completeSynchronously = (count < _bufferSize - _writePos);
//
//                    if (completeSynchronously)
//                    {
//
//                        RuntimeException error;
//                        tangible.RefObject tempRefOffset = new tangible.RefObject(offset);
//                        tangible.RefObject tempRefCount = new tangible.RefObject(count);
//                        tangible.OutObject tempOutError = new tangible.OutObject();
//                        writeToBuffer(buffer, tempRefOffset, tempRefCount, tempOutError);
//                        error = tempOutError.argValue;
//                        count = tempRefCount.argValue;
//                        offset = tempRefOffset.argValue;
//                        Contract.Assert(count == 0);
//
//                        return (error == null) ? Task.getCompletedTask() : Task.FromException(error);
//                    }
//                }
//                finally
//                {
//                    if (completeSynchronously) // if this is FALSE, we will be entering WriteToUnderlyingStreamAsync and releasing there.
//                    {
//                        sem.Release();
//                    }
//                }
//            }
//
//            // Delegate to the async implementation.
//            return writeToUnderlyingStreamAsync(buffer, offset, count, cancellationToken, semaphoreLockTask, false);
//        }
//
//
//        /** BufferedStream should be as thin a wrapper as possible. We want that WriteAsync delegates to
//         WriteAsync of the underlying _stream and that BeginWrite delegates to BeginWrite of the underlying stream,
//         rather than calling the base Stream which implements the one in terms of the other. This allows BufferedStream
//         to affect the semantics of the stream it wraps as little as possible. At the same time, we want to share as
//         much code between the APM and the Async pattern implementations as possible. This method is called by both with
//         a corresponding useApmPattern value. Recall that Task implements IAsyncResult.
//         */
//        private Task writeToUnderlyingStreamAsync(byte[] array, int offset, int count, CancellationToken cancellationToken, Task semaphoreLockTask, boolean useApmPattern)
//        {
//
//            // (These should be Contract.Requires(..) but that method had some issues in async methods; using Assert(..) for now.)
//            Contract.Assert(array != null);
//            Contract.Assert(offset >= 0);
//            Contract.Assert(count >= 0);
//            Contract.Assert(array.getLength() - offset >= count);
//            Contract.Assert(_stream != null);
//            Contract.Assert(_stream.getCanWrite());
//            Contract.Assert(_bufferSize > 0);
//            Contract.Assert(semaphoreLockTask != null);
//
//            // See the LARGE COMMENT in Write(..) for the explanation of the write buffer algorithm.
//
//            await semaphoreLockTask.ConfigureAwait(false);
//            try
//            {
//
//                // The buffer might have been changed by another async task while we were waiting on the semaphore.
//                // However, note that if we recalculate the sync completion condition to TRUE, then useBuffer will also be TRUE.
//
//                if (_writePos == 0)
//                {
//                    clearReadBufferBeforeWrite();
//                }
//
//                int totalUserBytes;
//                boolean useBuffer;
//                checked
//                { // We do not expect buffer sizes big enough for an overflow, but if it happens, lets fail early:
//                    totalUserBytes = _writePos + count;
//                    useBuffer = (totalUserBytes + count < (_bufferSize + _bufferSize));
//                }
//
//                if (useBuffer)
//                {
//
//                    tangible.RefObject tempRefOffset = new tangible.RefObject(offset);
//                    tangible.RefObject tempRefCount = new tangible.RefObject(count);
//                    writeToBuffer(array, tempRefOffset, tempRefCount);
//                    count = tempRefCount.argValue;
//                    offset = tempRefOffset.argValue;
//
//                    if (_writePos < _bufferSize)
//                    {
//
//                        Contract.Assert(count == 0);
//                        return;
//                    }
//
//                    Contract.Assert(count >= 0);
//                    Contract.Assert(_writePos == _bufferSize);
//                    Contract.Assert(_buffer != null);
//
//                    if (useApmPattern)
//                    {
//                        ensureBeginEndAwaitableAllocated();
//                        _stream.BeginWrite(_buffer, 0, _writePos, BeginEndAwaitableAdapter.Callback, _beginEndAwaitable);
//                        _stream.EndWrite(await _beginEndAwaitable);
//                    }
//                    else
//                    {
//                        await _stream.WriteAsync(_buffer, 0, _writePos, cancellationToken).ConfigureAwait(false);
//                    }
//                    _writePos = 0;
//
//                    tangible.RefObject tempRefOffset2 = new tangible.RefObject(offset);
//                    tangible.RefObject tempRefCount2 = new tangible.RefObject(count);
//                    writeToBuffer(array, tempRefOffset2, tempRefCount2);
//                    count = tempRefCount2.argValue;
//                    offset = tempRefOffset2.argValue;
//
//                    Contract.Assert(count == 0);
//                    Contract.Assert(_writePos < _bufferSize);
//
//                }
//                else
//                { // if (!useBuffer)
//
//                    // Write out the buffer if necessary.
//                    if (_writePos > 0)
//                    {
//
//                        Contract.Assert(_buffer != null);
//                        Contract.Assert(totalUserBytes >= _bufferSize);
//
//                        // Try avoiding extra write to underlying stream by combining previously buffered data with current user data:
//                        if (totalUserBytes <= (_bufferSize + _bufferSize) && totalUserBytes <= MAX_SHADOW_BUFFER_SIZE)
//                        {
//
//                            ensureShadowBufferAllocated();
//                            Buffer.InternalBlockCopy(array, offset, _buffer, _writePos, count);
//                            if (useApmPattern)
//                            {
//                                ensureBeginEndAwaitableAllocated();
//                                _stream.BeginWrite(_buffer, 0, totalUserBytes, BeginEndAwaitableAdapter.Callback, _beginEndAwaitable);
//                                _stream.EndWrite(await _beginEndAwaitable);
//                            }
//                            else
//                            {
//                                await _stream.WriteAsync(_buffer, 0, totalUserBytes, cancellationToken).ConfigureAwait(false);
//                            }
//                            _writePos = 0;
//                            return;
//                        }
//
//                        if (useApmPattern)
//                        {
//                            ensureBeginEndAwaitableAllocated();
//                            _stream.BeginWrite(_buffer, 0, _writePos, BeginEndAwaitableAdapter.Callback, _beginEndAwaitable);
//                            _stream.EndWrite(await _beginEndAwaitable);
//                        }
//                        else
//                        {
//                            await _stream.WriteAsync(_buffer, 0, _writePos, cancellationToken).ConfigureAwait(false);
//                        }
//                        _writePos = 0;
//                    }
//
//                    // Write out user data.
//                    if (useApmPattern)
//                    {
//                        ensureBeginEndAwaitableAllocated();
//                        _stream.BeginWrite(array, offset, count, BeginEndAwaitableAdapter.Callback, _beginEndAwaitable);
//                        _stream.EndWrite(await _beginEndAwaitable);
//                    }
//                    else
//                    {
//                        await _stream.WriteAsync(array, offset, count, cancellationToken).ConfigureAwait(false);
//                    }
//                }
//            }
//            finally
//            {
//                SemaphoreSlim sem = super.EnsureAsyncActiveSemaphoreInitialized();
//                sem.Release();
//            }
//        }
////#endif // !FEATURE_PAL && FEATURE_ASYNC_IO
//
//
//        public void WriteByte(byte value)
//        {
//
//            ensureNotClosed();
//
//            if (_writePos == 0)
//            {
//
//                ensureCanWrite();
//                clearReadBufferBeforeWrite();
//                ensureBufferAllocated();
//            }
//
//            // We should not be flushing here, but only writing to the underlying stream, but previous version flushed, so we keep this.
//            if (_writePos >= _bufferSize - 1)
//            {
//                flushWrite();
//            }
//
//            _buffer[_writePos++] = value;
//
//            Contract.Assert(_writePos < _bufferSize);
//        }
//
//
//        public long seek(long offset, SeekOrigin origin)
//        {
//
//            ensureNotClosed();
//            ensureCanSeek();
//
//            // If we have bytes in the WRITE buffer, flush them out, seek and be done.
//            if (_writePos > 0)
//            {
//
//                // We should be only writing the buffer and not flushing,
//                // but the previous version did flush and we stick to it for back-compat reasons.
//                flushWrite();
//                return _stream.Seek(offset, origin);
//            }
//
//            // The buffer is either empty or we have a buffered READ.
//
//            if (_readLen - _readPos > 0 && origin == SeekOrigin.Current)
//            {
//
//                // If we have bytes in the READ buffer, adjust the seek offset to account for the resulting difference
//                // between this stream's position and the underlying stream's position.
//                offset -= (_readLen - _readPos);
//            }
//
//            long oldPos = getPosition();
//            Contract.Assert(oldPos == _stream.getPosition() + (_readPos - _readLen));
//
//            long newPos = _stream.Seek(offset, origin);
//
//            // If the seek destination is still within the data currently in the buffer, we want to keep the buffer data and continue using it.
//            // Otherwise we will throw away the buffer. This can only happen on READ, as we flushed WRITE data above.
//
//            // The offset of the new/updated seek pointer within _buffer:
//            _readPos = (int)(newPos - (oldPos - _readPos));
//
//            // If the offset of the updated seek pointer in the buffer is still legal, then we can keep using the buffer:
//            if (0 <= _readPos && _readPos < _readLen)
//            {
//
//                // Adjust the seek pointer of the underlying stream to reflect the amount of useful bytes in the read buffer:
//                _stream.Seek(_readLen - _readPos, SeekOrigin.Current);
//
//            }
//            else
//            { // The offset of the updated seek pointer is not a legal offset. Loose the buffer.
//
//                _readPos = _readLen = 0;
//            }
//
//            Contract.Assert(newPos == getPosition(), "newPos (=" + newPos + ") == Position (=" + getPosition() + ")");
//            return newPos;
//        }
//
//
//        public void setLength(long value)
//        {
//
//            if (value < 0)
//            {
//                throw new IndexOutOfBoundsException("value", ResourceHelper.GetResourceString("ArgumentOutOfRange_NegFileSize"));
//            }
//            Contract.EndContractBlock();
//
//            ensureNotClosed();
//            ensureCanSeek();
//            ensureCanWrite();
//
//            flush();
//            _stream.SetLength(value);
//        }
//
//    } // class BufferedStream // namespace
//




© 2015 - 2024 Weber Informatics LLC | Privacy Policy