Many resources are needed to download a project. Please understand that we have to compensate our server costs. Thank you in advance. Project price only 1 $
You can buy this project and download/modify it how often you want.
/*
* Copyright (c) 2019-2020, [email protected] All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.ttzero.excel.entity.e3;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.ttzero.excel.reader.ExcelReadException;
import org.ttzero.excel.util.HexUtil;
import java.io.ByteArrayInputStream;
import java.io.ByteArrayOutputStream;
import java.io.IOException;
import java.io.ObjectInputStream;
import java.io.ObjectOutputStream;
import java.io.Serializable;
import java.nio.ByteBuffer;
import java.nio.charset.Charset;
import java.nio.charset.StandardCharsets;
import java.util.Arrays;
import java.util.LinkedList;
import java.util.UUID;
import static org.ttzero.excel.entity.e3.StandardTypeByteSize.BYTE;
import static org.ttzero.excel.entity.e3.StandardTypeByteSize.SHORT;
import static org.ttzero.excel.entity.e3.StandardTypeByteSize.INT;
import static org.ttzero.excel.entity.e3.StandardTypeByteSize.LONG;
/**
* Sector block
*
* @author guanquan.wang at 2019-01-29 11:19
*/
public class Block implements Serializable {
final Logger LOGGER = LoggerFactory.getLogger(getClass());
private static final long serialVersionUID = 1L;
/**
* Sector data buffer
*/
protected transient ByteBuffer buffer;
/**
* Data size from offset
* size = bytes.length - offset + 1
*/
private int size;
/**
* Use for reset
*/
int offset;
/**
* Document context
*/
protected transient Context context;
/**
* Current sector id
*/
protected int sid;
/**
* The Header sector id
* if block has mutable sat, hsid is the block header sector id
*/
private int hsid;
/**
* Remaining buffer
*/
protected byte[] remainingBuffer;
/**
* Size of remaining buffer
*/
protected int remainingSize;
/**
* The block length
*/
private int block_size;
/**
* The last sector index
*/
private int eof;
/**
* The transaction for each record
*/
transient LinkedList transaction;
/**
* The total size in bytes of the next packet.
*/
int packetSize;
/**
* Cache the pre record id
*/
private short preRecord;
private boolean usePre;
/**
* Whenever the content of a record exceeds the given limits
*/
protected boolean overflow;
/**
* Load block by sid
*
* @param context the workbook {@link Context}
* @param sid the sector id
*/
public Block(Context context, int sid) {
this.context = context;
this.sid = sid;
this.context.sectorTable.moveTo(sid);
this.block_size = 1 << (context.split + context.ssz);
}
public Block init() {
buffer = ByteBuffer.allocate(block_size);
buffer.order(context.byteOrder.getByteOrder());
remainingBuffer = new byte[10];
transaction = new LinkedList<>();
load();
return this;
}
/**
* Load sector from disk
*/
protected void load() {
// Load sector
context.read(sid, buffer);
int n = sid >> context.split;
hsid = n << context.split;
offset = (sid - hsid) << context.ssz;
buffer.position(offset);
eof = offset + context.sectorSize;
}
/**
* Check sector range and load next sector when read length big than size
*
* @param length the value length
* @return true if bits all in range
* @throws EndOfChainException if read a EOC sid
*/
protected boolean rangeCheck(int length) {
int n = product(length), _n = n;
// Maximum limit exceeded
if (n == 0 && overflow) {
remainingSize = 0;
whenOverflow();
n = product(length);
}
int lastSize = lastSize();
if (lastSize >= n) {
if (overflow) {
remainingSize = _n;
remaining();
int tmp_remainingSize = remainingSize;
remainingSize = 0;
whenOverflow();
remainingSize = tmp_remainingSize;
} else remainingSize &= 0;
return remainingSize == 0;
}
// Reset remaining size
remainingSize = lastSize;
// length large than a block size
if (n > remainingSize + sectorSize()) {
remaining();
int r_size = remainingSize;
remainingSize = 0;
do {
int per_last_size = Math.min(n - r_size, sectorSize());
testShouldLoad(availableNextSecID());
remainingBuffer = Arrays.copyOf(remainingBuffer, r_size + per_last_size);
buffer.get(remainingBuffer, r_size, per_last_size);
r_size += per_last_size;
} while (n > r_size);
remainingSize = r_size;
} else {
testShouldLoad(availableNextSecID());
}
if (overflow) {
if (remainingSize < n) {
if (remainingBuffer.length < n) {
remainingBuffer = Arrays.copyOf(remainingBuffer, n);
}
buffer.get(remainingBuffer, remainingSize, n - remainingSize);
remainingSize = n;
}
int tmp_remainingSize = remainingSize;
remainingSize = 0;
whenOverflow();
remainingSize = tmp_remainingSize;
}
return remainingSize == 0;
}
/**
* Test the next sector is should load.
*
* @param next_sid the next sector id
*/
protected void testShouldLoad(int next_sid) {
if (sid == next_sid) {
buffer.position(offset);
afterReset();
return;
}
int p = sid;
context.sectorTable.moveTo(sid = next_sid);
// An adjacent blocks
if (next_sid - p == 1 && buffer.remaining() >= context.sectorSize) {
offset = eof;
eof += context.sectorSize;
afterReset(remainingSize);
remainingSize &= 0;
return;
}
remaining();
int c = next_sid - hsid;
// Reset offset when the next sid in current block
if (c >= 0 && c < size >> context.ssz) {
offset = c << context.ssz;
buffer.position(offset);
eof = offset + context.sectorSize;
afterReset();
// Load the next sector
} else if (next_sid >= 0) {
load();
}
}
/**
* Reset header sector id
*/
protected void afterReset() {
afterReset(0);
}
/**
* Reset header sector id
*
* @param recoup recoup byte
*/
protected void afterReset(int recoup) { }
/**
* Identifier
*
* @return the next identifier
*/
public short nextIdentifier() {
if (usePre) {
usePre = false;
return preRecord;
}
return preRecord = nextShort();
}
/**
* Use pre identifier
*/
public void cacheIdentifier() {
usePre = true;
}
/**
* Read byte
*
* @return the byte value
*/
public byte nextByte() {
rangeCheck(BYTE);
return buffer.get();
}
/**
* Read short
*
* @return the short value
*/
public short nextShort() {
return (short) nextUnsignedShort();
}
/**
* Read unsigned short
*
* @return an unsigned short value
*/
public int nextUnsignedShort() {
int n = rangeCheck(SHORT)
? buffer.get() & 0xFF | (buffer.get() & 0xFF) << 8
: remainingBuffer[0] & 0xFF | (buffer.get() & 0xFF) << 8;
remainingSize = 0;
/*
Whenever the content of a record exceeds the given limits (see table),
the record must be split. Several CONTINUE records containing the additional
data are added after the parent record
*/
if (checkIfFull() && n == ParserIdentifier.CONTINUE) {
LOGGER.debug("CONTINUE >> SecID: {}, Position: {}", sid, buffer.position() - offset);
Mark mark = transaction.peek();
// Record data size
packetSize = nextShortIgnoreCursorMove();
// mark a CONTINUE record
if (mark != null) {
mark.CONTINUE(sid, buffer.position(), packetSize);
mark.product = 0;
}
n = rangeCheck(SHORT)
? buffer.get() & 0xFF | (buffer.get() & 0xFF) << 8
: remainingBuffer[0] & 0xFF | (buffer.get() & 0xFF) << 8;
}
return n;
}
/**
* Read char
*
* @return the char value
*/
public char nextChar() {
return (char) nextShort();
}
/**
* Read compression char
*
* @return the compression char value
*/
public char nextComprChar() {
return (char) nextByte();
}
/**
* Read int
*
* @return the int value
*/
public int nextInt() {
if (!rangeCheck(INT)) {
int value = 0, i = 0;
for (; i < remainingSize; ) {
value |= (remainingBuffer[i] & 0xFF) << (i++ << 3);
}
remainingSize = 0;
while (INT - i > 0) {
value |= (buffer.get() & 0xFF) << (i++ << 3);
}
return value;
}
return buffer.getInt();
}
/**
* Read long
*
* @return the long value
*/
public long nextLong() {
return nextLong(LONG);
}
/**
* Read long
*
* @param length bit length
* @return the long value
*/
public long nextLong(int length) {
long value = 0L;
int i = 0;
if (!rangeCheck(length)) {
for (; i < remainingSize; ) {
value |= ((remainingBuffer[i] & 0xFF) & -1L) << (i++ << 3);
}
remainingSize = 0;
}
while (length - i > 0) {
value |= ((buffer.get() & 0xFF) & -1L) << (i++ << 3);
}
return value;
}
/**
* Read single-precision floating-point
*
* @return the float value
*/
public float nextFloat() {
return Float.intBitsToFloat(nextInt());
}
/**
* Read double-precision floating-point
*
* @return the double value
*/
public double nextDouble() {
return Double.longBitsToDouble(nextLong());
}
/**
* Read double-precision floating-point
*
* @param size bit size
* @return the double value
*/
public double nextDouble(int size) {
if (size > LONG) {
throw new ExcelReadException("Overly large size, max " + LONG + ", current " + size);
}
long n = nextLong(size);
if (size <= 4) {
n <<= 32;
}
return Double.longBitsToDouble(n);
}
/**
* Read A GUID that identifies a software component
*
* @return uuid
*/
public UUID nextGUID() {
long mostSigBits = nextLong(INT);
mostSigBits <<= 16;
mostSigBits |= nextLong(SHORT);
mostSigBits <<= 16;
mostSigBits |= nextLong(SHORT);
// big-endian order
long leastSigBits = 0L;
byte[] range = range(LONG);
for (int i = LONG - 1, _i = 0; i >= 0; i--) {
leastSigBits |= ((range[i] & 0xFF) & -1L) << (_i++ << 3);
}
remainingSize = 0;
return new UUID(mostSigBits, leastSigBits);
}
/**
* 16-bit only don't content Asian phonetic and Rich-Text
*
* @param size character count
* @param option option flag. if bit 0 equal zero encode by 8-bytes
* if bit 0 equal one encode by 16-bytes
* @return ASCII or UTF16-LT string
*/
public String utf(int size, Option option) {
// Character compression (ccompr)
Charset charset;
// Uncompressed (16-bit characters)
if (option.isOn(0)) {
size <<= 1;
charset = StandardCharsets.UTF_16LE;
} else {
charset = StandardCharsets.US_ASCII;
}
String str;
// Block has CONTINUE record
if (!rangeCheck(size) && overflow) {
String s1 = new String(remainingBuffer, 0, remainingSize, charset);
int m_size = size - remainingSize;
if (m_size > 0) {
if (option.isOn(0)) m_size >>= 1;
/*
Unicode strings are split in a special way. At the beginning of each CONTINUE record
the option flags byte is repeated.
Only the character size flag will be set in this flags byte, the Rich-Text flag and
the Far-East flag are set to zero.
In each CONTINUE record it is possible that the character size changes from 8-bit
characters to 16-bit characters ! and vice versa.
Never a Unicode string is split until and including the first character. That means,
all header fields (string length, option flags, optional Rich-Text size, and optional
Far-East data size) and the first character of the string have to occur together in the
leading record, or have to be moved completely into the CONTINUE record.
*/
option = Option.of(nextByte());
if (option.isOn(0)) {
m_size <<= 1;
charset = StandardCharsets.UTF_16LE;
} else {
charset = StandardCharsets.US_ASCII;
}
byte[] bytes2 = range(m_size);
String s2 = new String(bytes2, 0, m_size, charset);
str = s1 + s2;
} else str = s1;
remainingSize = 0;
} else {
if (remainingSize >= size) {
str = new String(remainingBuffer, 0, size, charset);
} else if (remainingBuffer.length >= size) {
buffer.get(remainingBuffer, remainingSize, size - remainingSize);
str = new String(remainingBuffer, 0, size, charset);
} else if (size <= (1 << context.sssz)) {
remainingBuffer = Arrays.copyOf(remainingBuffer, 1 << context.sssz);
buffer.get(remainingBuffer, remainingSize, size - remainingSize);
str = new String(remainingBuffer, 0, size, charset);
} else {
byte[] data = Arrays.copyOf(remainingBuffer, size);
buffer.get(data, remainingSize, size - remainingSize);
str = new String(data, 0, size, charset);
}
}
return str;
}
// FIXME Wrong implementation
public String nextString(int size, Charset charset) {
byte[] data = range(size);
// including the null terminator
// int i = size - 1;
// for (; i >= 0; i--) {
// if (data[i] != 0) break;
// }
// if ((charset.equals(StandardCharsets.UTF_16LE)
// || charset.equals(StandardCharsets.UTF_16)
// || charset.equals(StandardCharsets.UTF_16BE)) && (i & 1) == 0)
// {
// i++;
// }
int n = trim(data, charset);
return charset.decode(ByteBuffer.wrap(data, 0, n)).toString();
// return new String(data, 0, i + 1, charset);
}
private int trim(byte[] bytes, Charset charset) {
int n = bytes.length;
if (charset.equals(StandardCharsets.UTF_16LE)) {
for (int i = 0; i < bytes.length - 1; i += 2) {
if (bytes[i] == 0x0 && bytes[i + 1] == 0x0) {
n = i;
break;
}
}
} else {
for (int i = 0; i < bytes.length; i++) {
if (bytes[i] == 0x0) {
n = i;
break;
}
}
}
return n;
}
/**
* Last bit size
*
* @return the last bit size
*/
public int lastSize() {
return eof - buffer.position();
}
/**
* Reset offset
*/
public void reset() {
Mark mark = transaction.peek();
if (mark != null) {
if (sid != mark.sid) {
sid = mark.sid;
load();
}
buffer.position(mark.offset);
mark.product = 0;
} else {
buffer.position(offset);
}
remainingSize = 0;
}
/**
* skip some bit-value
*
* @param size skip bit size
*/
public void skip(int size) {
int n = size > 0 ? product(size) : size;
// Last size
int lastSize = lastSize();
// Turn back
if (n < 0) {
// Retreat across Sector
if (sectorSize() < -n || sectorSize() - lastSize > -n && context.sectorTable.preSecID() != sid - 1) {
throw new IllegalArgumentException("Skip a negative sector. SecID: "
+ sid + " Position: " + (buffer.position() - offset));
}
buffer.position(buffer.position() + n);
return;
}
if (lastSize < n) {
// Skip Multiple sat
buffer.position(buffer.position() + lastSize);
n -= lastSize;
context.sectorTable.moveTo(sid);
int next_sid = availableNextSecID(), sat = 1 << context.ssz;
for (; n > sat; next_sid = availableNextSecID(), n -= sat) ;
testShouldLoad(next_sid);
}
buffer.position(buffer.position() + n);
if (overflow) {
whenOverflow();
skip(size - n);
}
}
/**
* bit array range
*
* @param length range length
* @return bit range
*/
public byte[] range(int length) {
if (remainingBuffer.length < length) {
remainingBuffer = new byte[length];
}
rangeCheck(length);
buffer.get(remainingBuffer, remainingSize, length - remainingSize);
remainingSize = 0;
return remainingBuffer;
}
/**
* @return current sector id
*/
public int currentSecID() {
return this.sid;
}
/**
* Marks the block is ready to read.
*
* @return the total size in bytes of the next packet.
*/
public int ready() {
// Size of the following data
this.packetSize = nextShort();
if (this.packetSize == 0) {
return packetSize;
}
transaction.push(Mark.of(sid, buffer.position()));
return this.packetSize;
}
/**
* Read the packet finish.
*/
public void commit() {
Mark mark = transaction.peek();
if (mark == null) return;
int endOffset = mark.product;
if (packetSize > endOffset) {
skip(packetSize - endOffset);
}
transaction.pop();
Mark _mark;
if ((_mark = transaction.peek()) != null) {
_mark.product(mark.product);
}
this.packetSize = 0;
}
/**
* @return the position from ready mark.
*/
public int position() {
Mark mark = transaction.peek();
if (mark != null) {
if (mark.sid != sid) {
int n = 0;
context.sectorTable.moveTo(mark.sid);
for (int fst = mark.sid; fst != sid; fst = availableNextSecID(), n++) ;
// end of block
if (lastSize() == 0) n++;
return (n - 1 << context.ssz) + sectorSize() - mark.offset + buffer.position();
} else return buffer.position() - mark.offset;
}
return -1;
}
/**
* @return the context
*/
public Context getContext() {
return context;
}
public Block deepClone() {
try {
ByteArrayOutputStream bos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(bos);
oos.writeObject(this);
oos.flush();
ObjectInputStream ois = new ObjectInputStream(new ByteArrayInputStream(bos.toByteArray()));
return mergeProperties((Block) ois.readObject());
} catch (IOException | ClassNotFoundException e) {
return whenCopyFailed();
}
}
protected Block mergeProperties(Block cp) {
cp.context = getContext();
LinkedList _trans = new LinkedList<>();
for (Mark mark : transaction) {
Mark _mark = new Mark(mark.sid, mark.ssid, mark.hssid, mark.offset);
_mark.product = mark.product;
_trans.push(_mark);
}
cp.transaction = _trans;
cp.buffer = ByteBuffer.allocate(block_size);
cp.buffer.order(buffer.order());
int pos = buffer.position();
buffer.position(0);
cp.buffer.put(buffer);
cp.buffer.flip();
cp.buffer.position(pos);
buffer.position(pos);
return cp;
}
protected Block whenCopyFailed() {
return copyProperties(new Block(context, sid));
}
protected Block copyProperties(Block cp) {
mergeProperties(cp);
cp.offset = offset;
cp.size = size;
cp.hsid = hsid;
cp.packetSize = packetSize;
cp.eof = eof;
cp.block_size = block_size;
cp.usePre = usePre;
cp.preRecord = preRecord;
cp.remainingBuffer = Arrays.copyOf(remainingBuffer, remainingBuffer.length);
cp.remainingSize = remainingSize;
return cp;
}
// --- Static
public static float IEEE754SinglePrecision(int i) {
return Float.intBitsToFloat(i);
}
public static double IEEE754DoublePrecision(long i) {
return Double.longBitsToDouble(i);
}
/**
* Returns the sector size
*
* @return the specific size given in the header
*/
public int sectorSize() {
return context.sectorSize;
}
/**
* Returns the next sector ID
*
* @return the sector ID
*/
protected int nextSecID() {
return context.sectorTable.nextSecID();
}
/**
* Returns the next sector ID
*
* @return the sector ID
*/
protected int availableNextSecID() {
int next_sid = nextSecID();
// End of Chain SID
if (next_sid == SectorAllocationTable.EOC) {
throw new EndOfChainException("End Of Chain SecID. sid: " + sid + ", next_sid: " + next_sid);
}
return next_sid;
}
/**
* Warp remaining data
*/
protected void remaining() {
if (remainingSize <= 0) return;
// Resize if remaining over flow
if (remainingBuffer.length < remainingSize) {
remainingBuffer = new byte[remainingSize];
}
buffer.get(remainingBuffer, 0, remainingSize);
}
/**
* Move the cursor forward {@code size} times.
* If it exceeds limit, return the maximum size of moves
* the maximum size will less than the parameter {@code size} when
* current record size overflow the {@code packetSize}
*
* @param size forward size
* @return the maximum size of moves
*/
protected int product(int size) {
Mark mark = transaction.peek();
int _size = size;
if (mark != null) {
if (packetSize > 0 && mark.product + size >= packetSize) {
size = packetSize - mark.product;
overflow |= mark.product + _size > packetSize;
}
else overflow = false;
mark.product(size);
}
return size;
}
protected void whenOverflow() {
Mark mark = transaction.peek();
if (mark != null && mark.product >= packetSize) {
LOGGER.debug("CONTINUE >> SecID: {}, Position: {}", sid, buffer.position() - offset);
short id = nextShortIgnoreCursorMove();
if (id != ParserIdentifier.CONTINUE) {
if (LOGGER.isTraceEnabled()) printBuffer();
throw new ExcelReadException("There has a error block(SecID: " + sid + " Position: "
+ (buffer.position() - offset) + "). Expected to be 0x0060 but " + HexUtil.toHexString(id));
}
packetSize = nextShortIgnoreCursorMove();
// Append a CONTINUE record
mark.CONTINUE(sid, buffer.position(), packetSize);
mark.product = 0;
}
}
/**
* Get the next short value but do not move the cursor
*
* @return the next short value
*/
private short nextShortIgnoreCursorMove() {
if (lastSize() < 2) {
remainingSize = lastSize();
testShouldLoad(availableNextSecID());
}
short v = (short) (remainingSize == 0
? buffer.get() & 0xFF | (buffer.get() & 0xFF) << 8
: remainingBuffer[0] & 0xFF | (buffer.get() & 0xFF) << 8);
remainingSize = 0;
return v;
}
/**
* Check Maximum limit exceeded
*/
protected boolean checkIfFull() {
Mark mark = transaction.peek();
return mark != null && mark.product >= packetSize;
}
/**
* Return the remaining size of the record
*
* @return -1 if unknown
*/
public int recordRemainingSize() {
Mark mark = transaction.peek();
if (mark == null) return -1;
return packetSize - mark.product;
}
// --- FOR TEST
Block(Context context, byte[] bytes) {
this.context = context;
this.buffer = ByteBuffer.wrap(bytes);
this.buffer.order(context.byteOrder.getByteOrder());
this.remainingBuffer = new byte[10];
this.size = this.eof = (short) bytes.length;
this.sid = this.hsid = this.offset = 0;
this.packetSize = size;
transaction = new LinkedList<>();
}
static class Mark {
int sid;
int ssid;
int hssid;
int offset;
int product;
// LinkedList continues;
private Mark(int sid, int offset) {
this(sid, -1, -1, offset);
}
private Mark(int sid, int ssid, int hssid, int offset) {
this.sid = sid;
this.ssid = ssid;
this.hssid = hssid;
this.offset = offset;
}
static Mark of(int sid, int offset) {
return new Mark(sid, offset);
}
static Mark of(int sid, int ssid, int hssid, int offset) {
return new Mark(sid, ssid, hssid, offset);
}
void product(int n) {
product += n;
// if (continues != null) {
// Continue con = continues.peek();
// if (con != null) con._product += n;
// }
}
void CONTINUE(int sid, int position, int packageSize) {
// FIXME Temporarily ignored
// if (continues == null) {
// continues = new LinkedList<>();
// }
// continues.push(new Continue(sid, position, packageSize));
}
}
/**
* Whenever the content of a record exceeds the given limits (see table),
* the record must be split. Several CONTINUE records containing the
* additional data are added after the parent record.
*
* BIFF version | Maximum data size of a record
* -------------|-------------------------------
* BIFF2-BIFF5 | 2080 bytes (2084 bytes including record header)
* BIFF8 | 8224 bytes (8228 bytes including record header)
*
*/
private static class Continue {
int sid;
int position;
int product, _product;
Continue(int sid, int position, int product) {
this.sid = sid;
this.position = position;
this.product = product;
}
}
// ================= FOR DEBUG =================
/**
* Print current sector data for debug
*/
private void printBuffer() {
byte[] bytes = new byte[sectorSize() << context.split];
int pos = buffer.position();
int i = 1, ii = sid >> 5 << 5;
buffer.position(0);
buffer.get(bytes);
buffer.position(pos); // Reset pos
int n = bytes.length;
System.out.println(">>>" + (ii++));
for (int j = 0; j < n; i++, j++) {
System.out.print(Integer.toHexString(bytes[j] & 0xFF));
System.out.print(' ');
if ((i & 63) == 0) {
System.out.println();
}
if ((i & 511) == 0) {
System.out.println();
System.out.println(">>>" + (ii++));
}
}
}
}