/** * Create a byte array consisting of an SST record and any * required Continue records, Ready to be written out. * * If an SST record and any subsequent Continue records are Read * in to Create this instance, this method should produce a byte * array that Is identical to the byte array produced by * concatenating the input records' data. * * @return the byte array */ public int Serialize(int offset, byte[] data) { UnicodeRecordStats stats = new UnicodeRecordStats(); sstRecordHeader.WriteSSTHeader(stats, data, 0 + offset, 0); int pos = offset + SSTRecord.SST_RECORD_OVERHEAD; for (int k = 0; k < strings.Size; k++) { if (k % ExtSSTRecord.DEFAULT_BUCKET_SIZE == 0) { int index = k / ExtSSTRecord.DEFAULT_BUCKET_SIZE; if (index < ExtSSTRecord.MAX_BUCKETS) { //Excel only indexes the first 128 buckets. bucketAbsoluteOffsets[index] = pos - offset; bucketRelativeOffsets[index] = pos - offset; } } UnicodeString s = GetUnicodeString(k); pos += s.Serialize(stats, pos, data); } //Check to see if there Is a hanging continue record Length if (stats.lastLengthPos != -1) { short lastRecordLength = (short)(pos - stats.lastLengthPos - 2); if (lastRecordLength > 8224) throw new InvalidProgramException(); LittleEndian.PutShort(data, stats.lastLengthPos, lastRecordLength); } return pos - offset; }
/** * Writes out the SST record. This consists of the sid, the record size, the number of * strings and the number of Unique strings. * * @param data The data buffer to Write the header to. * @param bufferIndex The index into the data buffer where the header should be written. * @param recSize The number of records written. * * @return The bufer of bytes modified. */ public int WriteSSTHeader(UnicodeRecordStats stats, byte[] data, int bufferIndex, int recSize) { int offset = bufferIndex; LittleEndian.PutShort(data, offset, SSTRecord.sid); offset += LittleEndianConstants.SHORT_SIZE; stats.recordSize += LittleEndianConstants.SHORT_SIZE; stats.remainingSize -= LittleEndianConstants.SHORT_SIZE; //Delay writing the Length stats.lastLengthPos = offset; offset += LittleEndianConstants.SHORT_SIZE; stats.recordSize += LittleEndianConstants.SHORT_SIZE; stats.remainingSize -= LittleEndianConstants.SHORT_SIZE; LittleEndian.PutInt(data, offset, numStrings); offset += LittleEndianConstants.INT_SIZE; stats.recordSize += LittleEndianConstants.INT_SIZE; stats.remainingSize -= LittleEndianConstants.INT_SIZE; LittleEndian.PutInt(data, offset, numUniqueStrings); offset += LittleEndianConstants.INT_SIZE; stats.recordSize += LittleEndianConstants.INT_SIZE; stats.remainingSize -= LittleEndianConstants.INT_SIZE; return offset - bufferIndex; }
public void GetRecordSize(UnicodeRecordStats stats) { //Basic string overhead if (stats.remainingSize < 3) { //Needs a continue stats.recordSize += 4; stats.remainingSize = SSTRecord.MAX_RECORD_SIZE - 4; } stats.recordSize += 3; stats.remainingSize -= 3; //Read the number of rich runs if rich text. if (IsRichText) { //Run count if (stats.remainingSize < 2) { //Needs a continue //ReSet the available space. stats.remainingSize = SSTRecord.MAX_RECORD_SIZE - 4; //continue record overhead stats.recordSize += 4; } stats.recordSize += 2; stats.remainingSize -= 2; } //Read the size of extended data if present. if (IsExtendedText) { //Needs a continue //extension Length if (stats.remainingSize < 4) { //ReSet the available space. stats.remainingSize = SSTRecord.MAX_RECORD_SIZE - 4; //continue record overhead stats.recordSize += 4; } stats.recordSize += 4; stats.remainingSize -= 4; } int charsize = IsUncompressedUnicode ? 2 : 1; int strSize = (String.Length * charsize); //Check to see if the offset occurs mid string, if so then we need to Add //the byte to start with that represents the first byte of the continue record. if (strSize > stats.remainingSize) { //Ok the offset occurs half way through the string, that means that //we need an extra byte after the continue record ie we didnt finIsh //writing out the string the 1st time through //But hang on, how many continue records did we span? What if this Is //a REALLY long string. We need to work this all out. int ammountThatCantFit = strSize; while (ammountThatCantFit > 0) { int ammountWritten = Math.Min(stats.remainingSize, ammountThatCantFit); //Make sure that the ammount that cant fit takes into account //whether we are writing double byte Unicode if (IsUncompressedUnicode) { //We have the '-1' here because whether this Is the first record or //subsequent continue records, there Is always the case that the //number of bytes in a string on doube byte boundaries Is actually odd. if (((ammountWritten) % 2) == 1) ammountWritten--; } stats.recordSize += ammountWritten; stats.remainingSize -= ammountWritten; //Ok lets subtract what we can write ammountThatCantFit -= ammountWritten; //Each iteration of this while loop Is another continue record, Unless //everything now fits. if (ammountThatCantFit > 0) { //ReSet the available space. stats.remainingSize = SSTRecord.MAX_RECORD_SIZE - 4; //continue record overhead stats.recordSize += 4; //The first byte after a continue mid string Is the extra byte to //indicate if this run Is compressed or not. stats.recordSize++; stats.remainingSize--; } } } else { //Ok the string fits nicely in the remaining size stats.recordSize += strSize; stats.remainingSize -= strSize; } if (IsRichText && (field_4_format_runs != null)) { int count = field_4_format_runs.Count; //ThIs will Ensure that a run does not split a continue for (int i = 0; i < count; i++) { if (stats.remainingSize < 4) { //ReSet the available space. stats.remainingSize = SSTRecord.MAX_RECORD_SIZE - 4; //continue record overhead stats.recordSize += 4; } //Each run count Is four bytes stats.recordSize += 4; stats.remainingSize -= 4; } } if (IsExtendedText && (field_5_ext_rst != null)) { //Ok ExtRst Is actually not documented, so i am going to hope //that we can actually continue on byte boundaries int ammountThatCantFit = field_5_ext_rst.Length - stats.remainingSize; if (ammountThatCantFit > 0) { while (ammountThatCantFit > 0) { //So for this record we have alReady written int ammountWritten = Math.Min(stats.remainingSize, ammountThatCantFit); stats.recordSize += ammountWritten; stats.remainingSize -= ammountWritten; //Ok lets subtract what we can write ammountThatCantFit -= ammountWritten; if (ammountThatCantFit > 0) { //Each iteration of this while loop Is another continue record. //ReSet the available space. stats.remainingSize = SSTRecord.MAX_RECORD_SIZE - 4; //continue record overhead stats.recordSize += 4; } } } else { //We can fit wholey in what remains. stats.remainingSize -= field_5_ext_rst.Length; stats.recordSize += field_5_ext_rst.Length; } } }
public int Serialize(UnicodeRecordStats stats, int offset, byte[] data) { int pos = offset; //Basic string overhead pos = WriteContinueIfRequired(stats, 3, pos, data); // byte[] retval = new byte[ 3 + (String.Length * charsize)]; LittleEndian.PutShort(data, pos, CharCount); pos += 2; data[pos] = OptionFlags; pos += 1; stats.recordSize += 3; stats.remainingSize -= 3; if (IsRichText) { if (field_4_format_runs != null) { pos = WriteContinueIfRequired(stats, 2, pos, data); LittleEndian.PutShort(data, pos, (short)field_4_format_runs.Count); pos += 2; stats.recordSize += 2; stats.remainingSize -= 2; } } if (IsExtendedText) { if (this.field_5_ext_rst != null) { pos = WriteContinueIfRequired(stats, 4, pos, data); LittleEndian.PutInt(data, pos, field_5_ext_rst.Length); pos += 4; stats.recordSize += 4; stats.remainingSize -= 4; } } int charsize = IsUncompressedUnicode ? 2 : 1; int strSize = (String.Length * charsize); byte[] strBytes = null; try { String unicodeString = String; if (!IsUncompressedUnicode) { strBytes = Encoding.GetEncoding("Iso-8859-1").GetBytes(unicodeString); } else { strBytes = Encoding.GetEncoding("UTF-16LE").GetBytes(unicodeString); } } catch (Exception) { throw new InvalidProgramException(); } if (strSize != strBytes.Length) throw new InvalidProgramException("That shouldnt have happened!"); //Check to see if the offset occurs mid string, if so then we need to Add //the byte to start with that represents the first byte of the continue record. if (strSize > stats.remainingSize) { //Ok the offset occurs half way through the string, that means that //we need an extra byte after the continue record ie we didnt finIsh //writing out the string the 1st time through //But hang on, how many continue records did we span? What if this Is //a REALLY long string. We need to work this all out. int ammountThatCantFit = strSize; int strPos = 0; while (ammountThatCantFit > 0) { int ammountWritten = Math.Min(stats.remainingSize, ammountThatCantFit); //Make sure that the ammount that cant fit takes into account //whether we are writing double byte Unicode if (IsUncompressedUnicode) { //We have the '-1' here because whether this Is the first record or //subsequent continue records, there Is always the case that the //number of bytes in a string on doube byte boundaries Is actually odd. if (((ammountWritten) % 2) == 1) ammountWritten--; } Array.Copy(strBytes, strPos, data, pos, ammountWritten); pos += ammountWritten; strPos += ammountWritten; stats.recordSize += ammountWritten; stats.remainingSize -= ammountWritten; //Ok lets subtract what we can write ammountThatCantFit -= ammountWritten; //Each iteration of this while loop Is another continue record, Unless //everything now fits. if (ammountThatCantFit > 0) { //We know that a continue WILL be requied, but use this common method pos = WriteContinueIfRequired(stats, ammountThatCantFit, pos, data); //The first byte after a continue mid string Is the extra byte to //indicate if this run Is compressed or not. data[pos] = (byte)(IsUncompressedUnicode ? 0x1 : 0x0); pos++; stats.recordSize++; stats.remainingSize--; } } } else { if (strSize > (data.Length - pos)) Console.WriteLine("Hmm shouldnt happen"); //Ok the string fits nicely in the remaining size Array.Copy(strBytes, 0, data, pos, strSize); pos += strSize; stats.recordSize += strSize; stats.remainingSize -= strSize; } if (IsRichText && (field_4_format_runs != null)) { int count = field_4_format_runs.Count; //ThIs will Ensure that a run does not split a continue for (int i = 0; i < count; i++) { pos = WriteContinueIfRequired(stats, 4, pos, data); FormatRun r = (FormatRun)field_4_format_runs[i]; LittleEndian.PutShort(data, pos, r.Char); pos += 2; LittleEndian.PutShort(data, pos, r.FontIndex); pos += 2; //Each run count Is four bytes stats.recordSize += 4; stats.remainingSize -= 4; } } if (IsExtendedText && (field_5_ext_rst != null)) { //Ok ExtRst Is actually not documented, so i am going to hope //that we can actually continue on byte boundaries int ammountThatCantFit = field_5_ext_rst.Length - stats.remainingSize; int extPos = 0; if (ammountThatCantFit > 0) { while (ammountThatCantFit > 0) { //So for this record we have alReady written int ammountWritten = Math.Min(stats.remainingSize, ammountThatCantFit); Array.Copy(field_5_ext_rst, extPos, data, pos, ammountWritten); pos += ammountWritten; extPos += ammountWritten; stats.recordSize += ammountWritten; stats.remainingSize -= ammountWritten; //Ok lets subtract what we can write ammountThatCantFit -= ammountWritten; if (ammountThatCantFit > 0) { pos = WriteContinueIfRequired(stats, 1, pos, data); } } } else { //We can fit wholey in what remains. Array.Copy(field_5_ext_rst, 0, data, pos, field_5_ext_rst.Length); pos += field_5_ext_rst.Length; stats.remainingSize -= field_5_ext_rst.Length; stats.recordSize += field_5_ext_rst.Length; } } return pos - offset; }
private int WriteContinueIfRequired(UnicodeRecordStats stats, int requiredSize, int offset, byte[] data) { //Basic string overhead if (stats.remainingSize < requiredSize) { //Check if be are alReady in a continue record, if so make sure that //we go back and write out our Length if (stats.lastLengthPos != -1) { short lastRecordLength = (short)(offset - stats.lastLengthPos - 2); if (lastRecordLength > 8224) throw new InvalidDataException(); LittleEndian.PutShort(data, stats.lastLengthPos, lastRecordLength); } LittleEndian.PutShort(data, offset, ContinueRecord.sid); offset += 2; //Record the location of the last continue legnth position, but dont write //anything there yet (since we dont know what it will be!) stats.lastLengthPos = offset; offset += 2; stats.recordSize += 4; stats.remainingSize = SSTRecord.MAX_RECORD_SIZE - 4; } return offset; }
private static int EncodeSingleValue(byte[] data, int offset, Object value) { if (value == EMPTY_REPRESENTATION) { LittleEndian.PutByte(data, offset, TYPE_EMPTY); LittleEndian.PutLong(data, offset + 1, 0L); return 9; } if (value is bool) { bool bVal = ((bool)value); LittleEndian.PutByte(data, offset, TYPE_bool); long longVal = bVal ? 1L : 0L; LittleEndian.PutLong(data, offset + 1, longVal); return 9; } if (value is double) { double dVal = (double)value; LittleEndian.PutByte(data, offset, TYPE_NUMBER); LittleEndian.PutDouble(data, offset + 1, dVal); return 9; } if (value is UnicodeString) { UnicodeString usVal = (UnicodeString)value; LittleEndian.PutByte(data, offset, TYPE_STRING); UnicodeRecordStats urs = new UnicodeRecordStats(); usVal.Serialize(urs, offset + 1, data); return 1 + urs.recordSize; } if (value is ErrorConstant) { ErrorConstant ecVal = (ErrorConstant)value; LittleEndian.PutByte(data, offset, TYPE_ERROR_CODE); LittleEndian.PutUShort(data, offset + 1, ecVal.ErrorCode); LittleEndian.PutUShort(data, offset + 3, 0); LittleEndian.PutInt(data, offset + 5, 0); return 9; } throw new Exception("Unexpected value type (" + value.GetType().Name + "'"); }
/** * @return encoded size without the 'type' code byte */ private static int GetEncodedSize(Object obj) { if (obj == EMPTY_REPRESENTATION) { return 8; } Type cls = obj.GetType(); if (cls == typeof(bool) || cls == typeof(double) || cls == typeof(ErrorConstant)) { return 8; } UnicodeString strVal = (UnicodeString)obj; UnicodeRecordStats urs = new UnicodeRecordStats(); strVal.GetRecordSize(urs); return urs.recordSize; }
/** * called by the class that Is responsible for writing this sucker. * Subclasses should implement this so that their data Is passed back in a * byte array. * * @param offset to begin writing at * @param data byte array containing instance data * @return number of bytes written */ public override int Serialize(int offset, byte [] data) { LittleEndian.PutShort(data, 0 + offset, sid); int dataSize = DataSize; LittleEndian.PutShort(data, 2 + offset, (short)dataSize); LittleEndian.PutShort(data, 4 + offset, field_1_number_of_sheets); if (IsExternalReferences) { int currentOffset = 6 + offset; UnicodeRecordStats urs = new UnicodeRecordStats(); field_2_encoded_url.Serialize(urs, currentOffset, data); currentOffset += urs.recordSize; for (int i = 0; i < field_3_sheet_names.Length; i++) { urs = new UnicodeRecordStats(); field_3_sheet_names[i].Serialize(urs, currentOffset, data); currentOffset += urs.recordSize; } } else { short field2val = _isAddInFunctions ? TAG_Add_IN_FUNCTIONS : TAG_INTERNAL_REFERENCES; LittleEndian.PutShort(data, 6 + offset, field2val); } return dataSize + 4; }
private static int SerializeUnicodeString(UnicodeString us, int offset, byte[] data) { UnicodeRecordStats urs = new UnicodeRecordStats(); us.Serialize(urs, offset, data); return urs.recordSize; }