public StreamEncryptionInfo(RecordInputStream rs, List<Record> outputRecs) { Record rec; rs.NextRecord(); int recSize = 4 + rs.Remaining; rec = RecordFactory.CreateSingleRecord(rs); outputRecs.Add(rec); FilePassRecord fpr = null; if (rec is BOFRecord) { _hasBOFRecord = true; // Fetch the next record, and see if it indicates whether // the document is encrypted or not if (rs.HasNextRecord) { rs.NextRecord(); rec = RecordFactory.CreateSingleRecord(rs); recSize += rec.RecordSize; outputRecs.Add(rec); // Encrypted is normally BOF then FILEPASS // May sometimes be BOF, WRITEPROTECT, FILEPASS if (rec is WriteProtectRecord && rs.HasNextRecord) { rs.NextRecord(); rec = RecordFactory.CreateSingleRecord(rs); recSize += rec.RecordSize; outputRecs.Add(rec); } // If it's a FILEPASS, track it specifically but // don't include it in the main stream if (rec is FilePassRecord) { fpr = (FilePassRecord)rec; outputRecs.RemoveAt(outputRecs.Count - 1); // TODO - add fpr not Added to outPutRecs rec = outputRecs[0]; } else { // workbook not encrypted (typical case) if (rec is EOFRecord) { // A workbook stream is never empty, so crash instead // of trying to keep track of nesting level throw new InvalidOperationException("Nothing between BOF and EOF"); } } } } else { // Invalid in a normal workbook stream. // However, some test cases work on sub-sections of // the workbook stream that do not begin with BOF _hasBOFRecord = false; } _InitialRecordsSize = recSize; _filePassRec = fpr; _lastRecord = rec; }
public StreamEncryptionInfo(RecordInputStream rs, List <Record> outputRecs) { Record rec; rs.NextRecord(); int recSize = 4 + rs.Remaining; rec = RecordFactory.CreateSingleRecord(rs); outputRecs.Add(rec); FilePassRecord fpr = null; if (rec is BOFRecord) { _hasBOFRecord = true; if (rs.HasNextRecord) { rs.NextRecord(); rec = RecordFactory.CreateSingleRecord(rs); recSize += rec.RecordSize; outputRecs.Add(rec); if (rec is FilePassRecord) { fpr = (FilePassRecord)rec; outputRecs.RemoveAt(outputRecs.Count - 1); // TODO - add fpr not Added to outPutRecs rec = outputRecs[0]; } else { // workbook not encrypted (typical case) if (rec is EOFRecord) { // A workbook stream is never empty, so crash instead // of trying to keep track of nesting level throw new InvalidOperationException("Nothing between BOF and EOF"); } } } } else { // Invalid in a normal workbook stream. // However, some test cases work on sub-sections of // the workbook stream that do not begin with BOF _hasBOFRecord = false; } _InitialRecordsSize = recSize; _filePassRec = fpr; _lastRecord = rec; }
/** * Constructs a OBJ record and Sets its fields appropriately. * * @param in the RecordInputstream to Read the record from */ public ObjRecord(RecordInputStream in1) { subrecords = new ArrayList(); //Check if this can be continued, if so then the //following wont work properly int subSize = 0; byte[] subRecordData = in1.ReadRemainder(); RecordInputStream subRecStream = new RecordInputStream(new MemoryStream(subRecordData)); while (subRecStream.HasNextRecord) { subRecStream.NextRecord(); Record subRecord = SubRecord.CreateSubRecord(subRecStream); subSize += subRecord.RecordSize; subrecords.Add(subRecord); } /** * Add the EndSubRecord explicitly. * * TODO - the reason the EndSubRecord Is always skipped Is because its 'sid' Is zero and * that causes subRecStream.HasNextRecord() to return false. * There may be more than the size of EndSubRecord left-over, if there Is any pAdding * after that record. The content of the EndSubRecord and the pAdding Is all zeros. * So there's not much to look at past the last substantial record. * * See Bugs 41242/45133 for details. */ if (subRecordData.Length - subSize >= 4) { subrecords.Add(new EndSubRecord()); } }
public void TestRead() { byte[] data = new byte[22]; LittleEndian.PutShort(data, 0, DVALRecord.sid); LittleEndian.PutShort(data, 2, (short)18); LittleEndian.PutShort(data, 4, (short)55); LittleEndian.PutInt(data, 6, 56); LittleEndian.PutInt(data, 10, 57); LittleEndian.PutInt(data, 14, 58); LittleEndian.PutInt(data, 18, 59); RecordInputStream in1 = new RecordInputStream(new MemoryStream(data)); in1.NextRecord(); DVALRecord dv = new DVALRecord(in1); Assert.AreEqual(55, dv.Options); Assert.AreEqual(56, dv.HorizontalPos); Assert.AreEqual(57, dv.VerticalPos); Assert.AreEqual(58, dv.ObjectID); if (dv.DVRecNo == 0) { Assert.Fail("Identified bug 44510"); } Assert.AreEqual(59, dv.DVRecNo); }
public StreamEncryptionInfo(RecordInputStream rs, List<Record> outputRecs) { Record rec; rs.NextRecord(); int recSize = 4 + rs.Remaining; rec = RecordFactory.CreateSingleRecord(rs); outputRecs.Add(rec); FilePassRecord fpr = null; if (rec is BOFRecord) { _hasBOFRecord = true; if (rs.HasNextRecord) { rs.NextRecord(); rec = RecordFactory.CreateSingleRecord(rs); recSize += rec.RecordSize; outputRecs.Add(rec); if (rec is FilePassRecord) { fpr = (FilePassRecord)rec; outputRecs.RemoveAt(outputRecs.Count - 1); // TODO - add fpr not Added to outPutRecs rec = outputRecs[0]; } else { // workbook not encrypted (typical case) if (rec is EOFRecord) { // A workbook stream is never empty, so crash instead // of trying to keep track of nesting level throw new InvalidOperationException("Nothing between BOF and EOF"); } } } } else { // Invalid in a normal workbook stream. // However, some test cases work on sub-sections of // the workbook stream that do not begin with BOF _hasBOFRecord = false; } _InitialRecordsSize = recSize; _filePassRec = fpr; _lastRecord = rec; }
private static RecordInputStream ConvertToInputStream(DrawingRecord r) { byte[] data = r.Serialize(); RecordInputStream rinp = new RecordInputStream( new MemoryStream(data) ); rinp.NextRecord(); return rinp; }
/** * @param rawData serialization of one {@link SSTRecord} and zero or more {@link ContinueRecord}s */ private static SSTRecord CreateSSTFromRawData(byte[] rawData) { RecordInputStream in1 = new RecordInputStream(new MemoryStream(rawData)); in1.NextRecord(); SSTRecord result = new SSTRecord(in1); Assert.AreEqual(0, in1.Remaining); Assert.IsTrue(!in1.HasNextRecord); return result; }
private static RecordInputStream ConvertToInputStream(DrawingRecord r) { byte[] data = r.Serialize(); using (MemoryStream ms = new MemoryStream(data)) { RecordInputStream rinp = new RecordInputStream(ms); rinp.NextRecord(); return(rinp); } }
public void TestCreate() { EmbeddedObjectRefSubRecord record1 = new EmbeddedObjectRefSubRecord(); byte[] ser = record1.Serialize(); RecordInputStream in2 = new RecordInputStream(new MemoryStream(ser)); in2.NextRecord(); EmbeddedObjectRefSubRecord record2 = new EmbeddedObjectRefSubRecord(in2, ser.Length - 4); Assert.AreEqual(record1.OLEClassName, record2.OLEClassName); Assert.AreEqual(record1.StreamId, record2.StreamId); byte[] ser2 = record1.Serialize(); Assert.IsTrue(Arrays.Equals(ser, ser2)); }
public Record CloneViaReserialise() { // Do it via a re-serialization // It's a cheat, but it works... byte[] b = Serialize(); using (MemoryStream ms = new MemoryStream(b)) { RecordInputStream rinp = new RecordInputStream(ms); rinp.NextRecord(); Record[] r = RecordFactory.CreateRecord(rinp); if (r.Length != 1) { throw new InvalidOperationException("Re-serialised a record to clone it, but got " + r.Length + " records back!"); } return(r[0]); } }
/** * Clone the current record, via a call to serialise * it, and another to Create a new record from the * bytes. * May only be used for classes which don't have * internal counts / ids in them. For those which * do, a full record-aware serialise is needed, which * allocates new ids / counts as needed. */ public override Record CloneViaReserialise() { // Do it via a re-serialise // It's a cheat, but it works... byte[] b = this.Serialize(); RecordInputStream rinp = new RecordInputStream( new System.IO.MemoryStream(b) ); rinp.NextRecord(); Record[] r = RecordFactory.CreateRecord(rinp); if (r.Length != 1) { throw new InvalidOperationException("Re-serialised a record to Clone it, but got " + r.Length + " records back!"); } return(r[0]); }
/** * Returns the next (complete) record from the * stream, or null if there are no more. */ public Record NextRecord() { Record r; r = GetNextUnreadRecord(); if (r != null) { // found an unread record return(r); } while (true) { if (!_recStream.HasNextRecord) { // recStream is exhausted; return(null); } // step underlying RecordInputStream to the next record _recStream.NextRecord(); if (_lastRecordWasEOFLevelZero) { // Potential place for ending the workbook stream // Check that the next record is not BOFRecord(0x0809) // Normally the input stream Contains only zero pAdding after the last EOFRecord, // but bug 46987 and 48068 suggests that the padding may be garbage. // This code relies on the pAdding bytes not starting with BOFRecord.sid if (_recStream.Sid != BOFRecord.sid) { return(null); } // else - another sheet substream starting here } r = ReadNextRecord(); if (r == null) { // some record types may get skipped (e.g. DBCellRecord and ContinueRecord) continue; } return(r); } }
/** * Constructs a EOFRecord record and Sets its fields appropriately. * @param in the RecordInputstream to Read the record from */ public ExtSSTRecord(RecordInputStream in1) { field_1_strings_per_bucket = in1.ReadShort(); int nInfos = in1.Remaining / InfoSubRecord.ENCODED_SIZE; List<InfoSubRecord> lst = new List<InfoSubRecord>(nInfos); while (in1.Available() > 0) { InfoSubRecord info = new InfoSubRecord(in1); lst.Add(info); if (in1.Available() == 0 && in1.HasNextRecord && in1.GetNextSid() == ContinueRecord.sid) { in1.NextRecord(); } } _sstInfos = lst.ToArray(); }
/** * Constructs a EOFRecord record and Sets its fields appropriately. * @param in the RecordInputstream to Read the record from */ public ExtSSTRecord(RecordInputStream in1) { field_1_strings_per_bucket = in1.ReadShort(); int nInfos = in1.Remaining / InfoSubRecord.ENCODED_SIZE; List <InfoSubRecord> lst = new List <InfoSubRecord>(nInfos); while (in1.Available() > 0) { InfoSubRecord info = new InfoSubRecord(in1); lst.Add(info); if (in1.Available() == 0 && in1.HasNextRecord && in1.GetNextSid() == ContinueRecord.sid) { in1.NextRecord(); } } _sstInfos = lst.ToArray(); }
public Record CloneViaReserialise() { // Do it via a re-serialization // It's a cheat, but it works... byte[] b = Serialize(); using (MemoryStream ms = new MemoryStream(b)) { RecordInputStream rinp = new RecordInputStream(ms); rinp.NextRecord(); Record[] r = RecordFactory.CreateRecord(rinp); if (r.Length != 1) { throw new InvalidOperationException("Re-serialised a record to clone it, but got " + r.Length + " records back!"); } return r[0]; } }
public TextObjectRecord(RecordInputStream in1) { field_1_options = in1.ReadUShort(); field_2_textOrientation = in1.ReadUShort(); field_3_reserved4 = in1.ReadUShort(); field_4_reserved5 = in1.ReadUShort(); field_5_reserved6 = in1.ReadUShort(); int field_6_textLength = in1.ReadUShort(); int field_7_formattingDataLength = in1.ReadUShort(); field_8_reserved7 = in1.ReadInt(); if (in1.Remaining > 0) { // Text Objects can have simple reference formulas // (This bit not mentioned in the MS document) if (in1.Remaining < 11) { throw new RecordFormatException("Not enough remaining data for a link formula"); } int formulaSize = in1.ReadUShort(); _unknownPreFormulaInt = in1.ReadInt(); Ptg[] ptgs = Ptg.ReadTokens(formulaSize, in1); if (ptgs.Length != 1) { throw new RecordFormatException("Read " + ptgs.Length + " tokens but expected exactly 1"); } _linkRefPtg = ptgs[0]; if (in1.Remaining > 0) { _unknownPostFormulaByte = (byte)in1.ReadByte(); } else { _unknownPostFormulaByte = null; } } else { _linkRefPtg = null; } if (in1.Remaining > 0) { throw new RecordFormatException("Unused " + in1.Remaining + " bytes at end of record"); } String text; if (field_6_textLength > 0) { text = ReadRawString(in1, field_6_textLength); } else { text = ""; } _text = new HSSFRichTextString(text); if (field_7_formattingDataLength > 0) { if (in1.IsContinueNext && in1.Remaining == 0) { in1.NextRecord(); ProcessFontRuns(in1, _text, field_7_formattingDataLength); } else { throw new RecordFormatException( "Expected Continue Record to hold font runs for TextObjectRecord"); } } }
public void TestLinkFormula() { RecordInputStream is1 = new RecordInputStream(new MemoryStream(linkData)); is1.NextRecord(); TextObjectRecord rec = new TextObjectRecord(is1); Ptg ptg = rec.LinkRefPtg; Assert.IsNotNull(ptg); Assert.AreEqual(typeof(RefPtg), ptg.GetType()); RefPtg rptg = (RefPtg)ptg; Assert.AreEqual("T2", rptg.ToFormulaString()); byte[] data2 = rec.Serialize(); Assert.AreEqual(linkData.Length, data2.Length); Assert.IsTrue(Arrays.Equals(linkData, data2)); }
public void TestLongRecords() { int[] length = { 1024, 2048, 4096, 8192, 16384 }; //test against strings of different length for (int i = 0; i < length.Length; i++) { StringBuilder buff = new StringBuilder(length[i]); for (int j = 0; j < length[i]; j++) { buff.Append("x"); } IRichTextString str = new HSSFRichTextString(buff.ToString()); TextObjectRecord obj = new TextObjectRecord(); obj.Str = (/*setter*/str); byte[] data = obj.Serialize(); RecordInputStream is1 = new RecordInputStream(new MemoryStream(data)); is1.NextRecord(); TextObjectRecord record = new TextObjectRecord(is1); str = record.Str; Assert.AreEqual(buff.Length, str.Length); Assert.AreEqual(buff.ToString(), str.String); } }
/** * Create an array of records from an input stream * * @param in the InputStream from which the records will be * obtained * * @exception RecordFormatException on error Processing the * InputStream */ public void ProcessRecords(Stream in1) { Record last_record = null; RecordInputStream recStream = new RecordInputStream(in1); while (recStream.HasNextRecord) { recStream.NextRecord(); Record[] recs = RecordFactory.CreateRecord(recStream); // handle MulRK records if (recs.Length > 1) { for (int k = 0; k < recs.Length; k++) { if ( last_record != null ) { if (!ProcessRecord(last_record)) { return; } } last_record = recs[ k ]; // do to keep the algorithm homogeneous...you can't } // actually continue a number record anyhow. } else { Record record = recs[ 0 ]; if (record != null) { if (last_record != null) { if (!ProcessRecord(last_record)) { return; } } last_record = record; } } } if (last_record != null) { ProcessRecord(last_record); } }
public void TestWithConcat() { // =CHOOSE(2,A2,A3,A4) byte[] data = { 6, 0, 68, 0, 1, 0, 1, 0, 15, 0, 0, 0, 0, 0, 0, 0, 57, 64, 0, 0, 12, 0, 12, unchecked((byte)-4), 46, 0, 30, 2, 0, // Int - 2 25, 4, 3, 0, // Attr 8, 0, 17, 0, 26, 0, // jumpTable 35, 0, // chooseOffSet 36, 1, 0, 0, unchecked((byte)-64), // Ref - A2 25, 8, 21, 0, // Attr 36, 2, 0, 0, unchecked((byte)-64), // Ref - A3 25, 8, 12, 0, // Attr 36, 3, 0, 0, unchecked((byte)-64), // Ref - A4 25, 8, 3, 0, // Attr 66, 4, 100, 0 // CHOOSE }; RecordInputStream inp = new RecordInputStream(new MemoryStream(data)); inp.NextRecord(); FormulaRecord fr = new FormulaRecord(inp); Ptg[] ptgs = fr.ParsedExpression; Assert.AreEqual(9, ptgs.Length); Assert.AreEqual(typeof(IntPtg), ptgs[0].GetType()); Assert.AreEqual(typeof(AttrPtg), ptgs[1].GetType()); Assert.AreEqual(typeof(RefPtg), ptgs[2].GetType()); Assert.AreEqual(typeof(AttrPtg), ptgs[3].GetType()); Assert.AreEqual(typeof(RefPtg), ptgs[4].GetType()); Assert.AreEqual(typeof(AttrPtg), ptgs[5].GetType()); Assert.AreEqual(typeof(RefPtg), ptgs[6].GetType()); Assert.AreEqual(typeof(AttrPtg), ptgs[7].GetType()); Assert.AreEqual(typeof(FuncVarPtg), ptgs[8].GetType()); FuncVarPtg choose = (FuncVarPtg)ptgs[8]; Assert.AreEqual("CHOOSE", choose.Name); }
public StreamEncryptionInfo(RecordInputStream rs, List <Record> outputRecs) { Record rec; rs.NextRecord(); int recSize = 4 + rs.Remaining; rec = RecordFactory.CreateSingleRecord(rs); outputRecs.Add(rec); FilePassRecord fpr = null; if (rec is BOFRecord) { _hasBOFRecord = true; // Fetch the next record, and see if it indicates whether // the document is encrypted or not if (rs.HasNextRecord) { rs.NextRecord(); rec = RecordFactory.CreateSingleRecord(rs); recSize += rec.RecordSize; outputRecs.Add(rec); // Encrypted is normally BOF then FILEPASS // May sometimes be BOF, WRITEPROTECT, FILEPASS if (rec is WriteProtectRecord && rs.HasNextRecord) { rs.NextRecord(); rec = RecordFactory.CreateSingleRecord(rs); recSize += rec.RecordSize; outputRecs.Add(rec); } // If it's a FILEPASS, track it specifically but // don't include it in the main stream if (rec is FilePassRecord) { fpr = (FilePassRecord)rec; outputRecs.RemoveAt(outputRecs.Count - 1); // TODO - add fpr not Added to outPutRecs rec = outputRecs[0]; } else { // workbook not encrypted (typical case) if (rec is EOFRecord) { // A workbook stream is never empty, so crash instead // of trying to keep track of nesting level throw new InvalidOperationException("Nothing between BOF and EOF"); } } } } else { // Invalid in a normal workbook stream. // However, some test cases work on sub-sections of // the workbook stream that do not begin with BOF _hasBOFRecord = false; } _InitialRecordsSize = recSize; _filePassRec = fpr; _lastRecord = rec; }
/** * @param in the RecordInPutstream to Read the record from */ protected void FillFields(RecordInputStream in1) { field_1_charCount = in1.ReadShort(); field_2_optionflags = (byte)in1.ReadByte(); int runCount = 0; int extensionLength = 0; //Read the number of rich runs if rich text. if (IsRichText) { runCount = in1.ReadShort(); } //Read the size of extended data if present. if (IsExtendedText) { extensionLength = in1.ReadInt(); } //Now need to Get the string data. //Turn off autocontinuation so that we can catch the continue boundary in1.AutoContinue = false; StringBuilder tmpString = new StringBuilder(field_1_charCount); int stringCharCount = field_1_charCount; bool isCompressed = ((field_2_optionflags & 1) == 0); while (stringCharCount != 0) { if (in1.Remaining == 0) { if (in1.IsContinueNext) { in1.NextRecord(); //Check if we are now Reading, compressed or Uncompressed Unicode. byte optionflags = (byte)in1.ReadByte(); isCompressed = ((optionflags & 1) == 0); } else throw new RecordFormatException("Expected continue record."); } if (isCompressed) { //Typecast direct to char from byte with high bit Set causes all ones //in the high byte of the char (which Is of course incorrect) char ch = (char)((short)0xff & (short)in1.ReadByte()); tmpString.Append(ch); } else { char ch = (char)in1.ReadShort(); tmpString.Append(ch); } stringCharCount--; } field_3_string = tmpString.ToString(); //Turn back on autocontinuation in1.AutoContinue = true; if (this.IsRichText && (runCount > 0)) { field_4_format_runs = new ArrayList(runCount); for (int i = 0; i < runCount; i++) { field_4_format_runs.Add(new FormatRun(in1.ReadShort(), in1.ReadShort())); //Read reserved //in.ReadInt(); } } if (IsExtendedText && (extensionLength > 0)) { field_5_ext_rst = new byte[extensionLength]; for (int i = 0; i < extensionLength; i++) { field_5_ext_rst[i] = (byte)in1.ReadByte(); } } }
public void TestHugeStrings() { SSTRecord record = new SSTRecord(); byte[][] bstrings = { new byte[9000], new byte[7433], new byte[9002], new byte[16998] }; UnicodeString[] strings = new UnicodeString[bstrings.Length]; int total_length = 0; for (int k = 0; k < bstrings.Length; k++) { Arrays.Fill(bstrings[k], (byte)('a' + k)); strings[k] = new UnicodeString(new String(ConvertByteToChar(bstrings[k]))); record.AddString(strings[k]); total_length += 3 + bstrings[k].Length; } // add overhead of SST record total_length += 8; // add overhead of broken strings total_length += 4; // add overhead of six records total_length += (6 * 4); byte[] content = new byte[record.RecordSize]; record.Serialize(0, content); Assert.AreEqual(total_length, content.Length); //Deserialize the record. RecordInputStream recStream = new RecordInputStream(new MemoryStream(content)); recStream.NextRecord(); record = new SSTRecord(recStream); Assert.AreEqual(strings.Length, record.NumStrings); Assert.AreEqual(strings.Length, record.NumUniqueStrings); Assert.AreEqual(strings.Length, record.CountStrings); for (int k = 0; k < strings.Length; k++) { Assert.AreEqual(strings[k], record.GetString(k)); } record = new SSTRecord(); bstrings[1] = new byte[bstrings[1].Length - 1]; for (int k = 0; k < bstrings.Length; k++) { if ((bstrings[k].Length % 2) == 1) { Arrays.Fill(bstrings[k], (byte)('a' + k)); strings[k] = new UnicodeString(new String(ConvertByteToChar(bstrings[k]))); } else { char[] data = new char[bstrings[k].Length / 2]; Arrays.Fill(data, (char)('\u2122' + k)); strings[k] = new UnicodeString(new String(data)); } record.AddString(strings[k]); } content = new byte[record.RecordSize]; record.Serialize(0, content); total_length--; Assert.AreEqual(total_length, content.Length); recStream = new RecordInputStream(new MemoryStream(content)); recStream.NextRecord(); record = new SSTRecord(recStream); Assert.AreEqual(strings.Length, record.NumStrings); Assert.AreEqual(strings.Length, record.NumUniqueStrings); Assert.AreEqual(strings.Length, record.CountStrings); for (int k = 0; k < strings.Length; k++) { Assert.AreEqual(strings[k], record.GetString(k)); } }
/** * Clone the current record, via a call to serialise * it, and another to Create a new record from the * bytes. * May only be used for classes which don't have * internal counts / ids in them. For those which * do, a full record-aware serialise is needed, which * allocates new ids / counts as needed. */ public override Record CloneViaReserialise() { // Do it via a re-serialise // It's a cheat, but it works... byte[] b = this.Serialize(); RecordInputStream rinp = new RecordInputStream( new System.IO.MemoryStream(b) ); rinp.NextRecord(); Record[] r = RecordFactory.CreateRecord(rinp); if (r.Length != 1) { throw new InvalidOperationException("Re-serialised a record to Clone it, but got " + r.Length + " records back!"); } return r[0]; }
private static Ptg ReadRefPtg(byte[] formulaRawBytes) { byte[] data = new byte[formulaRawBytes.Length + 4]; LittleEndian.PutUShort(data, 0, -5555); LittleEndian.PutUShort(data, 2, formulaRawBytes.Length); System.Array.Copy(formulaRawBytes, 0, data, 4, formulaRawBytes.Length); RecordInputStream in1 = new RecordInputStream(new MemoryStream(data)); in1.NextRecord(); byte ptgSid = (byte)in1.ReadByte(); switch (ptgSid) { case AreaPtg.sid: return new AreaPtg(in1); case Area3DPtg.sid: return new Area3DPtg(in1); case RefPtg.sid: return new RefPtg(in1); case Ref3DPtg.sid: return new Ref3DPtg(in1); } return null; }
/// <summary> ///First 4 bytes of data are assumed to be record identifier and length. The supplied ///data can contain multiple records (sequentially encoded in the same way) /// </summary> /// <param name="data"></param> /// <returns></returns> public static RecordInputStream Create(byte[] data) { Stream inputStream = new MemoryStream(data); RecordInputStream result = new RecordInputStream(inputStream); result.NextRecord(); return result; }