Example #1
0
 public bool Equals(ItemType other)
 {
     return(TypeValuesEqual(TypeValues, other?.TypeValues) &&
            TypeHeader.Equals(other?.TypeHeader) &&
            TypeName.Equals(other?.TypeName) &&
            SelectedItem.Equals(other?.SelectedItem));
 }
Example #2
0
        public void Write_AdditionalData()
        {
            var typeHeader    = new TypeHeader("Part", 0x13, new[] { 0x2, 0x1, 0x3 }, new byte[] { 0x1, 0x1, 0x1 });
            var expectedBytes = new byte[] { 0x13, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x50, 0x61, 0x72, 0x74, 0x01, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x01, 0x04, 0x01, 0x01, 0x01 };

            var bytes = typeHeader.Serialize();

            Assert.AreEqual(expectedBytes, bytes);
        }
Example #3
0
        public void Read_AdditionalData()
        {
            var data = new byte[] { 0x13, 0x00, 0x00, 0x00, 0x04, 0x00, 0x00, 0x00, 0x50, 0x61, 0x72, 0x74, 0x01, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x04, 0x02, 0x06, 0x01, 0x02, 0x03 };

            var parsed = TypeHeader.Deserialize(data);

            Assert.AreEqual("Part", parsed.Name);
            Assert.AreEqual(0x13, parsed.TypeId);
            Assert.AreEqual(3, parsed.InstanceCount);
            Assert.AreEqual(new[] { 2, 3, 6 }, parsed.Referents);
            Assert.AreEqual(new byte[] { 0x1, 0x2, 0x3 }, parsed.AdditionalData);
        }
Example #4
0
        public void Serialziation()
        {
            var rand = new Random(20121230);

            for (int i = 0; i < 1000; i += 1)
            {
                int kind    = rand.Next(0, 3);
                int ordinal = rand.Next(1, 32767);
                int length  = 0;

                if (kind == 3)
                {
                    length = rand.Next(8, 2147483647);
                }

                var head1 = new TypeHeader
                {
                    Kind    = (TypeKind)kind,
                    Size    = length,
                    Ordinal = ordinal,
                };
                var head2 = new TypeHeader {
                };

                Assert.Equal((TypeKind)kind, head1.Kind);
                Assert.Equal(length, head1.Size);
                Assert.Equal(ordinal, head1.Ordinal);

                Assert.NotEqual(head1, head2);

                Assert.Equal((TypeKind)0, head2.Kind);
                Assert.Equal(0, head2.Size);
                Assert.Equal(0, head2.Ordinal);

                Guid buf = Guid.Empty;

                int size = head1.Serialize((byte *)&buf, sizeof(Guid));
                int read = head2.Deserialize((byte *)&buf, sizeof(Guid));

                Assert.Equal(size, read);
                Assert.Equal(head1, head2, new TypeHeaderComparer());
            }
        }
Example #5
0
 public void Register(params Assembly[] assemblies)
 {
     TypeHeader.Register(assemblies);
 }
Example #6
0
 static JsonClientPacket()
 {
     TypeHeader.Register(typeof(JsonClientPacket).Assembly);
 }
Example #7
0
 static ProtobufClientPacket()
 {
     TypeHeader.Register(typeof(ProtobufClientPacket).Assembly);
 }
Example #8
0
        public void Save(Stream stream)
        {
            var writer     = new EndianAwareBinaryWriter(stream);
            var serializer = new RobloxSerializer(this);

            ReferentProvider.ClearCache(); // Clearing existing referent cache guarantees that referents won't be fragmented
            var instances  = GetChildFirstInstanceEnumerator().ToArray();
            var typeGroups = instances.GroupBy(n => n.ClassName).OrderBy(n => n.Key).ToDictionary(n => n.Key, n => n.ToArray());

            var typeCount   = typeGroups.Count;
            var objectCount = typeGroups.Aggregate(0, (acc, pair) => acc + pair.Value.Length);

            writer.WriteBytes(Signatures.Signature); // File signature
            writer.WriteInt32(typeCount);            // Generic header values
            writer.WriteInt32(objectCount);
            writer.WriteInt32(0);                    // Reserved
            writer.WriteInt32(0);                    // Reserved

            // Write type headers
            var typeHeaders = new TypeHeader[typeCount];
            var nextTypeId  = 0;

            foreach (var typeGroup in typeGroups)
            {
                var typeHeader = new TypeHeader(typeGroup.Key, nextTypeId, typeGroup.Value.Select(n => ReferentProvider.GetReferent(n)).ToArray());
                if (IsSingleton(typeGroup))
                {
                    typeHeader.AdditionalData = new byte[typeHeader.InstanceCount];
                    for (var i = 0; i < typeHeader.InstanceCount; i++)
                    {
                        typeHeader.AdditionalData[i] = 0x1;
                    }
                }
                typeHeaders[nextTypeId] = typeHeader;
                var bytes = typeHeader.Serialize();
                writer.WriteBytes(Signatures.TypeHeaderSignature);
                RobloxLZ4.WriteBlock(stream, bytes);
                nextTypeId++;
            }

            // Write property data
            foreach (var typeGroup in typeGroups)
            {
                var typeHeader     = typeHeaders.First(n => n.Name == typeGroup.Key);
                var instanceTypes  = serializer.GetUniqueProperties(typeGroup.Value);
                var propertyBlocks = instanceTypes.Select(propertyDescriptor => serializer.FillPropertyBlock(propertyDescriptor.Name, propertyDescriptor.Type, typeHeader.TypeId, typeGroup.Value, ReferentProvider)).ToList();
                foreach (var propertyBlock in propertyBlocks)
                {
                    var bytes = propertyBlock.Serialize();
                    writer.WriteBytes(Signatures.PropBlockSignature);
                    RobloxLZ4.WriteBlock(stream, bytes);
                }
            }

            // Build parent child referent arrays
            var parentData = Util.BuildParentData(instances, ReferentProvider);

            var parentDataBytes = Util.SerializeParentData(parentData);

            writer.WriteBytes(Signatures.ParentDataSignature);
            RobloxLZ4.WriteBlock(stream, parentDataBytes);

            // Write ending signature
            writer.WriteBytes(Signatures.EndSignature);
            writer.WriteBytes(Signatures.FileEndSignature);
        }
Example #9
0
        internal static void ReadRaw(EndianAwareBinaryReader reader, out int typeCount, out int objectCount, out TypeHeader[] typeHeaders, out Dictionary <int, List <PropertyBlock> > propertyData, out Tuple <int, int>[] childParentPairs)
        {
            // Check file signature
            var signatureBytes = reader.ReadBytes(Signatures.Signature.Length);

            if (!signatureBytes.SequenceEqual(Signatures.Signature))
            {
                throw new InvalidRobloxFileException("The file signature does not match.");
            }

            typeCount   = reader.ReadInt32();
            objectCount = reader.ReadInt32();
            reader.ReadInt32(); // Reserved
            reader.ReadInt32(); // Reserved

            // Deserialize type headers
            typeHeaders = new TypeHeader[typeCount];
            for (var i = 0; i < typeCount; i++)
            {
                var typeHeaderSignature = reader.ReadBytes(Signatures.TypeHeaderSignature.Length);
                if (!typeHeaderSignature.SequenceEqual(Signatures.TypeHeaderSignature))
                {
                    throw new InvalidRobloxFileException("Invalid type header signature.");
                }

                var decompressedBytes = RobloxLZ4.ReadBlock(reader.Stream);
                var typeHeader        = TypeHeader.Deserialize(decompressedBytes);
                typeHeaders[i] = typeHeader;
            }

            // Read property data
            propertyData = new Dictionary <int, List <PropertyBlock> >(); // Key is type id
            byte[] lastPropSignature;

            while (true)
            {
                lastPropSignature = reader.ReadBytes(Signatures.PropBlockSignature.Length);
                if (!lastPropSignature.SequenceEqual(Signatures.PropBlockSignature))
                {
                    break;
                }

                var decompressedBytes = RobloxLZ4.ReadBlock(reader.Stream);
                var propertyBlock     = PropertyBlock.Deserialize(decompressedBytes, typeHeaders);

                if (propertyBlock == null)
                {
                    continue;
                }

                if (!propertyData.ContainsKey(propertyBlock.TypeId))
                {
                    propertyData.Add(propertyBlock.TypeId, new List <PropertyBlock>());
                }
                propertyData[propertyBlock.TypeId].Add(propertyBlock);
            }

            if (!lastPropSignature.SequenceEqual(Signatures.ParentDataSignature))
            {
                throw new InvalidRobloxFileException("Missing parent data section.");
            }
            var parentData = RobloxLZ4.ReadBlock(reader.Stream);

            childParentPairs = Util.ReadParentData(parentData);

            var endSignature = reader.ReadBytes(Signatures.EndSignature.Length);

            if (!endSignature.SequenceEqual(Signatures.EndSignature))
            {
                throw new InvalidRobloxFileException("End signature is missing or invalid.");
            }
        }
Example #10
0
        // read one package
        private async Task <Pair <ResourcePackage, PackageHeader> > readPackage(PackageHeader packageHeader)
        {
            Pair <ResourcePackage, PackageHeader> pair = new Pair <ResourcePackage, PackageHeader>();
            //read packageHeader
            ResourcePackage resourcePackage = new ResourcePackage(packageHeader);

            pair.setLeft(resourcePackage); //= new Pair<ResourcePackage, PackageHeader>(resourcePackage, null);
            //pair. = resourcePackage; //.setLeft(resourcePackage);

            long beginPos = buffer.position();

            // read type string pool
            if (packageHeader.getTypeStrings() > 0)
            {
                buffer.position((int)(beginPos + packageHeader.getTypeStrings()
                                      - packageHeader.getHeaderSize()));
                resourcePackage.setTypeStringPool(await ParseUtils.readStringPool(buffer,
                                                                                  (StringPoolHeader)await readChunkHeader()));
            }

            //read key string pool
            if (packageHeader.getKeyStrings() > 0)
            {
                buffer.position((int)(beginPos + packageHeader.getKeyStrings()
                                      - packageHeader.getHeaderSize()));
                resourcePackage.setKeyStringPool(await ParseUtils.readStringPool(buffer,
                                                                                 (StringPoolHeader)await readChunkHeader()));
            }


            //outer:
            while (buffer.hasRemaining())
            {
                ChunkHeader chunkHeader = await readChunkHeader();

                long chunkBegin = buffer.position();
                switch (chunkHeader.getChunkType())
                {
                case ChunkType.TABLE_TYPE_SPEC:
                    TypeSpecHeader typeSpecHeader = (TypeSpecHeader)chunkHeader;
                    long[]         entryFlags     = new long[(int)typeSpecHeader.getEntryCount()];
                    for (int i = 0; i < typeSpecHeader.getEntryCount(); i++)
                    {
                        entryFlags[i] = Buffers.readUInt(buffer);
                    }

                    TypeSpec typeSpec = new TypeSpec(typeSpecHeader);


                    typeSpec.setEntryFlags(entryFlags);
                    //id start from 1
                    typeSpec.setName(resourcePackage.getTypeStringPool()
                                     .get(typeSpecHeader.getId() - 1));

                    resourcePackage.addTypeSpec(typeSpec);
                    buffer.position((int)(chunkBegin + typeSpecHeader.getBodySize()));
                    break;

                case ChunkType.TABLE_TYPE:
                    TypeHeader typeHeader = (TypeHeader)chunkHeader;
                    // read offsets table
                    long[] offsets = new long[(int)typeHeader.getEntryCount()];
                    for (int i = 0; i < typeHeader.getEntryCount(); i++)
                    {
                        offsets[i] = Buffers.readUInt(buffer);
                    }

                    RType type = new RType(typeHeader);
                    type.setName(resourcePackage.getTypeStringPool().get(typeHeader.getId() - 1));
                    long entryPos = chunkBegin + typeHeader.getEntriesStart() - typeHeader.getHeaderSize();
                    buffer.position((int)entryPos);
                    ByteBuffer b = await buffer.slice();

                    await b.order(byteOrder);

                    type.setBuffer(b);
                    type.setKeyStringPool(resourcePackage.getKeyStringPool());
                    type.setOffsets(offsets);
                    type.setStringPool(stringPool);
                    resourcePackage.addType(type);
                    locales.Add(type.getLocale());
                    buffer.position((int)(chunkBegin + typeHeader.getBodySize()));
                    break;

                case ChunkType.TABLE_PACKAGE:
                    // another package. we should read next package here
                    //pair = new Pair<ResourcePackage, PackageHeader>(pair.Key, (PackageHeader)chunkHeader);
                    pair.setRight((PackageHeader)chunkHeader);
                    //break outer;
                    return(pair);

                case ChunkType.TABLE_LIBRARY:
                    // read entries
                    LibraryHeader libraryHeader = (LibraryHeader)chunkHeader;
                    for (long i = 0; i < libraryHeader.getCount(); i++)
                    {
                        int    packageId = buffer.getInt();
                        string name      = await Buffers.readZeroTerminatedString(buffer, 128);

                        LibraryEntry entry = new LibraryEntry(packageId, name);
                        //TODO: now just skip it..
                    }
                    buffer.position((int)(chunkBegin + chunkHeader.getBodySize()));
                    break;

                default:
                    throw new Exception("unexpected chunk type: 0x" + chunkHeader.getChunkType());
                }
            }

            return(pair);
        }
Example #11
0
        private async Task <ChunkHeader> readChunkHeader()
        {
            long begin = buffer.position();

            int chunkType = await Buffers.readUShort(buffer);

            int headerSize = await Buffers.readUShort(buffer);

            long chunkSize = Buffers.readUInt(buffer);

            switch (chunkType)
            {
            case ChunkType.TABLE:
                ResourceTableHeader resourceTableHeader = new ResourceTableHeader(chunkType,
                                                                                  headerSize, chunkSize);
                resourceTableHeader.setPackageCount(Buffers.readUInt(buffer));
                buffer.position((int)(begin + headerSize));
                return(resourceTableHeader);

            case ChunkType.STRING_POOL:
                StringPoolHeader stringPoolHeader = new StringPoolHeader(chunkType, headerSize,
                                                                         chunkSize);
                stringPoolHeader.setStringCount(Buffers.readUInt(buffer));
                stringPoolHeader.setStyleCount(Buffers.readUInt(buffer));
                stringPoolHeader.setFlags(Buffers.readUInt(buffer));
                stringPoolHeader.setStringsStart(Buffers.readUInt(buffer));
                stringPoolHeader.setStylesStart(Buffers.readUInt(buffer));
                buffer.position((int)(begin + headerSize));
                return(stringPoolHeader);

            case ChunkType.TABLE_PACKAGE:
                PackageHeader packageHeader = new PackageHeader(chunkType, headerSize, chunkSize);
                packageHeader.setId(Buffers.readUInt(buffer));
                packageHeader.setName(ParseUtils.readStringUTF16(buffer, 128));
                packageHeader.setTypeStrings(Buffers.readUInt(buffer));
                packageHeader.setLastPublicType(Buffers.readUInt(buffer));
                packageHeader.setKeyStrings(Buffers.readUInt(buffer));
                packageHeader.setLastPublicKey(Buffers.readUInt(buffer));
                buffer.position((int)(begin + headerSize));
                return(packageHeader);

            case ChunkType.TABLE_TYPE_SPEC:
                TypeSpecHeader typeSpecHeader = new TypeSpecHeader(chunkType, headerSize, chunkSize);
                typeSpecHeader.setId(Buffers.readUByte(buffer));
                typeSpecHeader.setRes0(Buffers.readUByte(buffer));
                typeSpecHeader.setRes1(await Buffers.readUShort(buffer));
                typeSpecHeader.setEntryCount(Buffers.readUInt(buffer));
                buffer.position((int)(begin + headerSize));
                return(typeSpecHeader);

            case ChunkType.TABLE_TYPE:
                TypeHeader typeHeader = new TypeHeader(chunkType, headerSize, chunkSize);
                typeHeader.setId(Buffers.readUByte(buffer));
                typeHeader.setRes0(Buffers.readUByte(buffer));
                typeHeader.setRes1(await Buffers.readUShort(buffer));
                typeHeader.setEntryCount(Buffers.readUInt(buffer));
                typeHeader.setEntriesStart(Buffers.readUInt(buffer));
                typeHeader.setConfig(readResTableConfig());
                buffer.position((int)(begin + headerSize));
                return(typeHeader);

            case ChunkType.TABLE_LIBRARY:
                //DynamicRefTable
                LibraryHeader libraryHeader = new LibraryHeader(chunkType, headerSize, chunkSize);
                libraryHeader.setCount(Buffers.readUInt(buffer));
                buffer.position((int)(begin + headerSize));
                return(libraryHeader);

            case ChunkType.NULL:
            //buffer.skip((int) (chunkSize - headerSize));
            default:
                throw new ParserException("Unexpected chunk Type: 0x" + chunkType.ToString("X"));
            }
        }
        public override void update()
        {
            #region Synonyms

            //Add Synonyms here

            #endregion Synonyms

            #region Triggers

            //Add Triggers here

            #endregion Triggers

            #region Functions

            foreach (CswUpdateSchemaPLSQLFunctions.Functions Function in CswUpdateSchemaPLSQLFunctions.Functions._All)
            {
                _acceptBlame(Function._Dev, Function._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(Function.ToString());
                _resetBlame();
            }

            #endregion Functions

            #region Views

            foreach (CswUpdateSchemaPLSQLViews.Views View in CswUpdateSchemaPLSQLViews.Views._All)
            {
                _acceptBlame(View._Dev, View._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(View.ToString());
                _resetBlame();
            }

            #endregion Views

            #region Procedures

            foreach (CswUpdateSchemaPLSQLProcedures.Procedures Procedure in CswUpdateSchemaPLSQLProcedures.Procedures._All)
            {
                _acceptBlame(Procedure._Dev, Procedure._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(Procedure.ToString());
                _resetBlame();
            }

            #endregion Procedures

            #region Types

            //Because Nested Tables are dependant upon types, they must be dropped before they can be recompiled
            //Normally we would drop types explicitly based on the order of their dependencies,
            //but that can't be done generically. So instead, we use "force" to avoid ORA-02303.

            #region Drop Types

            foreach (CswUpdateSchemaPLSQLTypes.NestedTables NestedTable in CswUpdateSchemaPLSQLTypes.NestedTables._All)
            {
                _acceptBlame(NestedTable._Dev, NestedTable._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(
                    @"declare
                      object_not_exists EXCEPTION;
                      PRAGMA EXCEPTION_INIT(object_not_exists, -04043);
                    begin
                      execute immediate 'drop type " + NestedTable._Title + @" force';
                    exception
                      when object_not_exists then null;
                    end;"
                    );
                _resetBlame();
            }

            foreach (CswUpdateSchemaPLSQLTypes.TypeHeaders TypeHeader in CswUpdateSchemaPLSQLTypes.TypeHeaders._All)
            {
                _acceptBlame(TypeHeader._Dev, TypeHeader._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(
                    @"declare
                      object_not_exists EXCEPTION;
                      PRAGMA EXCEPTION_INIT(object_not_exists, -04043);
                    begin
                      execute immediate 'drop type " + TypeHeader._Title + @" force';
                    exception
                      when object_not_exists then null;
                    end;"
                    );
                _resetBlame();
            }

            #endregion Drop Types

            #region Type Headers

            foreach (CswUpdateSchemaPLSQLTypes.TypeHeaders TypeHeader in CswUpdateSchemaPLSQLTypes.TypeHeaders._All)
            {
                _acceptBlame(TypeHeader._Dev, TypeHeader._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(TypeHeader.ToString());
                _resetBlame();
            }

            #endregion Type Headers

            #region Type Bodies

            //Add Type Bodies here

            #endregion Type Bodies

            #region Nested Tables

            foreach (CswUpdateSchemaPLSQLTypes.NestedTables NestedTable in CswUpdateSchemaPLSQLTypes.NestedTables._All)
            {
                _acceptBlame(NestedTable._Dev, NestedTable._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(NestedTable.ToString());
                _resetBlame();
            }

            #endregion Nested Tables

            #endregion Types

            #region Package Headers

            foreach (CswUpdateSchemaPLSQLPackages.PackageHeaders PackageHead in CswUpdateSchemaPLSQLPackages.PackageHeaders._All)
            {
                _acceptBlame(PackageHead._Dev, PackageHead._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(PackageHead.ToString());
                _resetBlame();
            }

            #endregion Package Headers

            #region Package Bodies

            foreach (CswUpdateSchemaPLSQLPackages.PackageBodies PackageBodies in CswUpdateSchemaPLSQLPackages.PackageBodies._All)
            {
                _acceptBlame(PackageBodies._Dev, PackageBodies._CaseNo);
                _CswNbtSchemaModTrnsctn.execArbitraryPlatformNeutralSql(PackageBodies.ToString());
                _resetBlame();
            }

            #endregion Package Bodies
        } //update()
Example #13
0
 static MsgpackClientPacket()
 {
     TypeHeader.Register(typeof(MsgpackClientPacket).Assembly);
 }