private static TomeReply CreateReply(TableEntry entry) { var message = entry.Columns["reply"]; var info = "Command #" + entry.Columns["id"] + " ( " + entry.Columns["trigger"] + " -> " + entry.Columns["reply"] + " )"; var tomeReply = new TomeReply(message, info); return tomeReply; }
private static TomeReply CreateReply(string tableName, TableEntry entry) { var message = entry.Columns["reply"]; tableName = char.ToUpper(tableName[0]) + tableName.Substring(1); var info = tableName + " #" + entry.Columns["id"] + " ( " + entry.Columns["trigger"] + " -> " + entry.Columns["reply"] + " )"; var tomeReply = new TomeReply(message, info); return tomeReply; }
public TableEntry(int[] key, ProductionRule rule, TableEntry[] children) { Key = key; Rule = rule; Base = rule.LHS.Text; Probability = rule.Probability; Children = children; //adjust probability using probabilities of children for (int i = 0; i < Children.Length; i++) { Probability *= Children[i].Probability; } }
private static void InnerPopulateTable(Level level, TableEntry[][] table, ref int row, int col) { foreach (KeyValuePair<string, Level> item in level.OrderedNext) { if (item.Value.Next == null) { table[row][col] = new TableEntry { Data = item.Key }; table[row][col + 1] = new TableEntry { Data = item.Value.Amount.ToNuGetNumberString(), IsNumeric = true }; row++; } else { table[row][col] = new TableEntry { Data = item.Key, Rowspan = item.Value.Count }; InnerPopulateTable(item.Value, table, ref row, col + 1); } } }
private static void InnerPopulateTable(Level level, TableEntry[][] table, ref int row, int col, CultureInfo clientCulture) { foreach (KeyValuePair<string, Level> item in level.OrderedNext) { if (item.Value.Next == null) { table[row][col] = new TableEntry { Data = item.Key }; table[row][col + 1] = new TableEntry { Data = item.Value.Amount.ToString("n0", clientCulture) }; row++; } else { table[row][col] = new TableEntry { Data = item.Key, Rowspan = item.Value.Count }; InnerPopulateTable(item.Value, table, ref row, col + 1, clientCulture); } } }
public static Tuple<TableEntry[][], int> GroupBy(IList<StatisticsFact> facts, string[] pivot) { // Firstly take the facts and the pivot vector and produce a tree structure Level level = InnerGroupBy(facts, pivot); // Secondly print this tree structure into a sparse 2-dimensional structure (for creating html tables) // the structure is rectangular and not jagged. Logically this is a 2-d array however coding conventions // dictate the preference for jagged array structures. (Note generally this is only slightly sparse in our data.) TableEntry[][] table = new TableEntry[level.Count][]; for (int i = 0; i < level.Count; i++) { table[i] = new TableEntry[pivot.Length + 1]; } PopulateTable(level, table); return new Tuple<TableEntry[][], int>(table, level.Total); }
/// <summary> /// <para>Returns the value in the table at the specified key.</para> /// </summary> /// <param name="key">The key of the value to retrieve.</param> /// <returns> /// <para>The value with the specified key.</para> /// </returns> public PreferencesAssetReference this[string key] { get { PreferencesAssetReference value; dictionary.TryGetValue(key, out value); return(value); } set { int modifyIndex = IndexOfKey(key); if (modifyIndex == -1) { Add(key, value); } dictionary[key] = value; keyValuePairs[modifyIndex] = new TableEntry(key, value); InvokeChanged(); } }
public void SetEntries( List<ServerListEntry> servers ) { entries = new TableEntry[servers.Count]; usedEntries = new TableEntry[servers.Count]; this.servers = servers; int index = 0; foreach( ServerListEntry e in servers ) { TableEntry tableEntry = default(TableEntry); tableEntry.Hash = e.Hash; tableEntry.Name = e.Name; tableEntry.Players = e.Players + "/" + e.MaximumPlayers; tableEntry.Software = e.Software; tableEntry.Uptime = MakeUptime( e.Uptime ); entries[index] = tableEntry; usedEntries[index] = tableEntry; index++; } Count = entries.Length; }
public bool TryGetValue(TType type, TKey key, out TValue value) { using (this.rwLock.AcquireReadLock()) { Dictionary <TKey, TableEntry> keyTable = null; var exists = this.committedEntriesTable.TryGetValue(type, out keyTable); if (exists) { TableEntry tableEntry = null; exists = keyTable.TryGetValue(key, out tableEntry); value = exists ? tableEntry.ActorStateDataWrapper.Value : default(TValue); } else { value = default(TValue); } return(exists); } }
public int Compare(TableEntry x, TableEntry y) { if (x.TeamId == y.TeamId) { return(0); } var matches = db.LoadSelect <Match>(sql => sql.LeagueId == x.LeagueId && (sql.HomeTeamId == x.TeamId && sql.GuestTeamId == y.TeamId || sql.HomeTeamId == y.TeamId && sql.GuestTeamId == x.TeamId)).ToList(); if (matches.Count == 1) { var match = matches.Single(); if (match.WinnerTeamId == x.TeamId) { return(1); } if (match.WinnerTeamId == y.TeamId) { return(-1); } return(0); } if (HasMoreWins(x.TeamId, matches)) { return(1); } if (HasMoreWins(y.TeamId, matches)) { return(-1); } return(0); }
public void SetEntries(List <ServerListEntry> servers) { entries = new TableEntry[servers.Count]; usedEntries = new TableEntry[servers.Count]; this.servers = servers; int index = 0; foreach (ServerListEntry e in servers) { TableEntry tableEntry = default(TableEntry); tableEntry.Hash = e.Hash; tableEntry.Name = e.Name; tableEntry.Players = e.Players + "/" + e.MaximumPlayers; tableEntry.Software = e.Software; tableEntry.Uptime = MakeUptime(e.Uptime); entries[index] = tableEntry; usedEntries[index] = tableEntry; index++; } Count = entries.Length; }
void GetSelectedServer(int mouseX, int mouseY) { for (int i = 0; i < Count; i++) { TableEntry entry = usedEntries[i]; if (mouseY < entry.Y || mouseY >= entry.Y + entry.Height + 2) { continue; } if (lastIndex == i && (DateTime.UtcNow - lastPress).TotalSeconds < 1) { Window.ConnectToServer(servers, entry.Hash); lastPress = DateTime.MinValue; return; } SetSelected(i); NeedRedraw(); break; } }
private List <TableEntry> ToTableEntries(Type type, IList <ITableEntity> values, bool includeObjects, Dictionary <string, string> userMappings) { var rows = new List <TableEntry>(); if (values == null) { return(rows); } var tableName = type.Table().TableName; var properties = this.GetTableProperties(type, includeObjects); foreach (var obj in values) { if (obj == null) { continue; } var value = new TableEntry { PartitionKey = obj.PartitionKey, RowKey = obj.RowKey, Table = tableName, Data = new Dictionary <string, object>() }; foreach (var property in properties) { value.Data[property.Name] = property.GetValue(obj); if (property.Name == nameof(UserData.UserId)) { value.Data[nameof(UserEntity.UserName)] = userMappings.GetValueOrDefault((string)value.Data[property.Name]) ?? "UNKNONW"; } } rows.Add(value); } return(rows); }
private static void InnerPopulateTable(Level level, TableEntry[][] table, ref int row, int col) { foreach (KeyValuePair <string, Level> item in level.Next) { if (item.Value.Next == null) { table[row][col] = new TableEntry { Data = item.Key }; table[row][col + 1] = new TableEntry { Data = item.Value.Amount.ToString(CultureInfo.InvariantCulture) }; row++; } else { table[row][col] = new TableEntry { Data = item.Key, Rowspan = item.Value.Count }; InnerPopulateTable(item.Value, table, ref row, col + 1); } } }
private ITableEntity UpdateTableEntry(Type type, ITableEntity entity, TableEntry entry, bool includeObjects) { var tableName = type.Table().TableName; var properties = this.GetTableProperties(type, includeObjects); foreach (var property in properties) { if (entry.Data.TryGetValue(property.Name, out var v, StringComparison.InvariantCultureIgnoreCase)) { var value = v; if (value is JObject jobj) { value = jobj.ToObject(property.PropertyType); } else { value = Convert.ChangeType(value, property.PropertyType, CultureInfo.InvariantCulture); } property.SetValue(entity, value); } } return(entity); }
private HPackEncoder FinishWrite(int totalWritten) { _decoder.Decode(_buffer.Span.Slice(_bufferConsumed, totalWritten), false, (o, n, v) => { }, null); Debug.Assert(this.DynamicTableCount == _decoderDynamicTable.Count); Debug.Assert(this._dynamicMaxSize == _decoderDynamicTable.MaxSize); for (int i = 0; i < _dynamicCount; ++i) { System.Net.Http.HPack.HeaderField f = _decoderDynamicTable[i]; string fName = Encoding.UTF8.GetString(f.Name); string fValue = Encoding.UTF8.GetString(f.Value); TableEntry e = _dynamicTable[MapDynamicIndexToArrayIndex(i)]; Debug.Assert(e.Name == fName); Debug.Assert(e.Value == fValue); } Debug.Assert(this.DynamicTableSize == _decoderDynamicTable.Size); _bufferConsumed += totalWritten; return(this); }
private static void InnerPopulateTable(Level level, TableEntry[][] table, ref int row, int col, CultureInfo clientCulture) { foreach (KeyValuePair <string, Level> item in level.OrderedNext) { if (item.Value.Next == null) { table[row][col] = new TableEntry { Data = item.Key }; table[row][col + 1] = new TableEntry { Data = item.Value.Amount.ToString("n0", clientCulture), IsNumeric = true }; row++; } else { table[row][col] = new TableEntry { Data = item.Key, Rowspan = item.Value.Count }; InnerPopulateTable(item.Value, table, ref row, col + 1, clientCulture); } } }
private void EnsureDynamicSpaceAvailable(int size) { Debug.Assert(size >= 0); Debug.Assert((_dynamicMaxSize - _dynamicSize) < size); do { Console.WriteLine($"Evicting from {_dynamicSize}"); int evictIdx = _dynamicHead - _dynamicCount + 1; if (evictIdx < 0) { evictIdx += _dynamicTable.Length; } ref TableEntry e = ref _dynamicTable[evictIdx]; RemoveMapping(e, evictIdx); _dynamicCount--; _dynamicSize -= e.DynamicSize; e = default; }while ((_dynamicMaxSize - _dynamicSize) < size);
public ActionResult SaveStockQeueue(StockDetails stock) { DateTime currendate = DateTime.UtcNow; string stockid = Guid.NewGuid().ToString(); stock.CreatedDateTime = currendate; stock.CreateDateTime = currendate; TableEntry entity = new TableEntry(); stock.StockId = stockid; stock.PartitionKey = stockid; stock.StockStatus = false; stock.RowKey = stock.StockName; entity.TableName = "stocktable"; entity.PartitionKey = stockid; entity.RowKey = stock.StockName; entity.CreateDateTime = currendate; entity.TableEntityData = CommonMethods.JsonSerializer <StockDetails>(stock); AddMessageToQueue(entity.TableEntityData); return(Json(new { }, JsonRequestBehavior.AllowGet)); }
//********************************************************************************************** // Replies //********************************************************************************************** /// <summary> /// Add a reply to the table. /// </summary> public void AddReply(NodeBind dest, OverlayReply reply) { // Get the table entry for this request nodeId (which should already exist) string id = reply.RequestId; if (table.ContainsKey(id)) { // Stick this reply into the table entry TableEntry entry = (TableEntry)table[id]; Debug.Assert(entry.reply == null); entry.reply = reply; entry.replyDest = dest; // la variable q dice si la entrada se ha insertado o no // se marca cuando sale el mensaje; si el mensaje se envia // al mismo nodo del que salio esta varible es igual a true // por lo que no debemos borrar la entrada por que se necesita // para recibir la replica if (!entry.insertedTable) { // cancelamos el timer localNode.CancelTimer(entry.gcTimer); // sino la eliminamos aqui habría que esperar al GC // porque si el mismo nodo nos quiere enviar peticiones // coincidirian con nodeId de peticiones anteriores RemoveEntry(id); } } else { // No entry in the table. // This can occur if AllowDupCCR / AllowDupAD are true // Don't keep track of the reply } }
// Methods public void Insert(string lexeme, LexicalAnalyzer.Token token, int depth, LexicalAnalyzer lex_analyzer) { // Create the entry/symbol to put inside the symbol table. TableEntry symbol = new TableEntry(lexeme, token, depth); // Now, insert the symbol inside the table. uint index = hash(lexeme); TableEntry node = this.Lookup(lexeme); if (node != null) { if (node.symbol_depth == depth) { Console.WriteLine("Error at {0},{1}: The identifier '{2}' has already been declared in the current scope!", lex_analyzer.token_start.line + 1, lex_analyzer.token_start.character + 1, lexeme); Console.Write("\nPress a key to exit... "); Console.ReadKey(); Environment.Exit(-1); } } hash_table[index].AddFirst(symbol); // Going to the last element in the list waists time, just add to front. }
public static Tuple <TableEntry[][], string> GroupBy(IList <StatisticsFact> facts, string[] pivot, CultureInfo clientCulture) { // Firstly take the facts and the pivot vector and produce a tree structure Level level = InnerGroupBy(facts, pivot); // Secondly added the ordered list to each level (the pivot algorithm required dictionary lookups so this is a separate step) AddOrderedNext(level); // Thirdly print this tree structure into a sparse 2-dimensional structure (for creating html tables) // the structure is rectangular and not jagged. Logically this is a 2-d array however coding conventions // dictate the preference for jagged array structures. (Note generally this is only slightly sparse in our data.) TableEntry[][] table = new TableEntry[level.Count][]; for (int i = 0; i < level.Count; i++) { table[i] = new TableEntry[pivot.Length + 1]; } PopulateTable(level, table, clientCulture); return(new Tuple <TableEntry[][], string>(table, level.Total.ToString("n0", clientCulture))); }
public static Tuple<TableEntry[][], string> GroupBy(IList<StatisticsFact> facts, string[] pivot) { // Firstly take the facts and the pivot vector and produce a tree structure Level level = InnerGroupBy(facts, pivot); // Secondly added the ordered list to each level (the pivot algorithm required dictionary lookups so this is a separate step) AddOrderedNext(level); // Thirdly print this tree structure into a sparse 2-dimensional structure (for creating html tables) // the structure is rectangular and not jagged. Logically this is a 2-d array however coding conventions // dictate the preference for jagged array structures. (Note generally this is only slightly sparse in our data.) TableEntry[][] table = new TableEntry[level.Count][]; for (int i = 0; i < level.Count; i++) { table[i] = new TableEntry[pivot.Length + 1]; } PopulateTable(level, table); return new Tuple<TableEntry[][], string>(table, level.Total.ToNuGetNumberString()); }
public int Compare(TableEntry x, TableEntry y) { if (x.GamesWon > y.GamesWon) { return(1); } if (y.GamesWon > x.GamesWon) { return(-1); } if (x.GamesWonExtension > y.GamesWonExtension) { return(1); } if (y.GamesWonExtension > x.GamesWonExtension) { return(-1); } return(0); }
private void CreateFileRecursive(ReadOnlySpan <byte> path, ref SaveFileInfo fileInfo) { var parser = new PathParser(path); var key = new SaveEntryKey(parser.GetCurrent(), 0); int parentIndex = CreateParentDirectoryRecursive(ref parser, ref key); int index = FileTable.GetIndexFromKey(ref key).Index; TableEntry <SaveFileInfo> fileEntry = default; // File already exists. Update file info. if (index >= 0) { FileTable.GetValue(index, out fileEntry); fileEntry.Value = fileInfo; FileTable.SetValue(index, ref fileEntry); return; } fileEntry.Value = fileInfo; index = FileTable.Add(ref key, ref fileEntry); LinkFileToParent(parentIndex, index); }
public override void Visit(FuncDef funcDef) { string funcName = funcDef.FunctionName; if (funcDef.ScopeResolution != null) { funcName = $"{funcDef.ScopeResolution.ID}::{funcName}"; } var entry = new TableEntry(funcName, Classification.Function, -1); entry.Link = new SymbolTable(); if (funcDef.Parameters != null) { foreach (var param in funcDef.Parameters) { entry.Link.Add(param.Entry, param.Location); } } if (funcDef.Implementation?.Table != null) { foreach (var varEntry in funcDef.Implementation.Table.GetAll()) { entry.Link.Add(varEntry, funcDef.Implementation.Location); } } entry.Type = funcDef.ReturnType + "-"; if (funcDef.Parameters != null) { entry.Type += string.Join(",", funcDef.Parameters.Where(val => val.Id != string.Empty).Select(val => val.Type + "[]".Repeat(val.Dimensions.Count))); } funcDef.Entry = entry; }
/// <summary> /// Indexer. This will return the statement collection for the given owner. /// It will return null only if the owner is not in the table. /// </summary> public CodeStatementCollection this[object statementOwner] { get { if (statementOwner is null) { throw new ArgumentNullException(nameof(statementOwner)); } if (_table != null) { for (int idx = 0; idx < _table.Count; idx++) { if (object.ReferenceEquals(_table[idx].Owner, statementOwner)) { if (_table[idx].Statements is null) { _table[idx] = new TableEntry(statementOwner, new CodeStatementCollection()); } return(_table[idx].Statements); } } foreach (TableEntry e in _table) { if (object.ReferenceEquals(e.Owner, statementOwner)) { return(e.Statements); } } } return(null); } }
public override void PreVisit(ClassDecl classDecl) { this.FunctionScopeLink = null; this.ClassInstanceScope = this.GlobalScope.Get(classDecl.ClassName, Classification.Class).Link; }
bool DrawColumnEntry( IDrawer2D drawer, ref DrawTextArgs args, int maxWidth, int x, ref int y, ref TableEntry entry) { Size size = drawer.MeasureSize( ref args ); bool empty = args.Text == ""; if( empty ) size.Height = defaultInputHeight; if( y + size.Height > Y + Height ) { y = Y + Height + 2; return false; } entry.Y = y; entry.Height = size.Height; if( !empty ) { size.Width = Math.Min( maxWidth, size.Width ); args.SkipPartsCheck = false; drawer.DrawClippedText( ref args, x, y, maxWidth, 30 ); } y += size.Height + 2; return true; }
private bool AscendentAnalizer(ref string feedback) { GrammarDeterministicAutomata afd = new GrammarDeterministicAutomata(); AscendentDealer dealer = new AscendentDealer(afd); //Pilha do estado SimpleStack <GrammarState> stateStack = new SimpleStack <GrammarState>(); GrammarState ascendentState = GrammarState.ConvertFrom(afd.InitialState); stateStack.Push(ascendentState); Token token = null; bool nextToken = true; bool errorFound = false; while (!errorFound) { GrammarState state = null; Symbol symbol = null; #region Leitura e análise do token if (nextToken) { token = Lexic.NextToken(); if (token != null) { if (token.Type == TokenType.LINE_BREAK) { line++; //Eu anulo o token para ele mais à frente preencherem o símbolo com $ token = null; } } nextToken = false; } if (token != null) { if (token.Type == TokenType.VARIABLE) { symbol = Symbol.GetSymbol("var"); } else if (token.Type == TokenType.VALUE) { symbol = Symbol.GetSymbol("num"); } else if (token.Type == TokenType.STRING) { symbol = Symbol.GetSymbol("string"); } else { symbol = Symbol.GetSymbol(token.Value); } } else { //Se chegou ao fim da linha lida, começa a ler $ symbol = Terminal.Initial; } #endregion state = stateStack.Top(); TableEntry entry = dealer.Table.GetEntry(symbol, state); if (entry != null) { switch (entry.Type) { case EntryType.SHIFT: { GrammarState nextState = entry.NextState; stateStack.Push(nextState); nextToken = true; break; } case EntryType.REDUCTION: { GrammarState grammarState = stateStack.Top(); int reduction = dealer.GenerateReduction(grammarState).ReductionNumber; Symbol newSymbol = grammarState.GetStateSymbol(); for (int i = 0; i < reduction; i++) { stateStack.Pop(); } GrammarState newStateTop = stateStack.Top(); entry = dealer.Table.GetEntry(newSymbol, newStateTop); if (entry.Type.Equals(EntryType.GOTO)) { goto case EntryType.GOTO; } //Falta empilhar aqui break; } case EntryType.GOTO: { stateStack.Push(entry.NextState); break; } case EntryType.ACCEPTED: { //Fim de arquivo if (Lexic.ListToken.Count == Lexic.Position) { return(true); } else { //Ler próxima linha, esvaziando a pilha nextToken = true; while (!stateStack.IsEmpty()) { stateStack.Pop(); } stateStack.Push(ascendentState); } break; } default: { feedback += PARAMETER_INVALID + token.Value + ", linha " + line + "\r\n"; errorFound = true; break; } } } else { if (state.IsFinal) { //GrammarState grammarState = stateStack.Top(); int reduction = dealer.GenerateReduction(state).ReductionNumber; Symbol newSymbol = state.GetStateSymbol(); for (int i = 0; i < reduction; i++) { stateStack.Pop(); } GrammarState newStateTop = stateStack.Top(); entry = dealer.Table.GetEntry(newSymbol, newStateTop); if (entry.Type.Equals(EntryType.GOTO)) { stateStack.Push(entry.NextState); } } else { String expectedState = null; if (state.StateValue.Contains('.')) { expectedState = state.StateValue.Split('.')[1].Split(' ')[1].Trim(); } if (expectedState == null) { feedback += PARAMETER_INVALID + token.Value + ", linha " + line + "\r\n"; errorFound = true; break; } else { feedback += INCOMPLETE_TENSE_MISS + expectedState + ", linha " + line + "\r\n"; errorFound = true; break; } } } #region código antigo /* * //Para debug * List<TableEntry> list = dealer.Table.TableEntriesFor(state); * List<TableEntry> list2 = dealer.Table.TableEntriesIn(symbol); * TableEntry entry = dealer.Table.GetEntry(symbol, state); * * if (entry != null) * { * switch (entry.Type) * { * case EntryType.SHIFT: * { * GrammarState nextState = entry.NextState; * stateStack.Push(nextState); * * if (nextState.IsFinal) * { * goto case EntryType.REDUCTION; * } * else * { * nextToken = true; * } * break; * } * case EntryType.REDUCTION: * { #region código abandonado * //for (int i = 0; i < entry.Vertical.ReductionNumber; i++) * //{ * // stateStack.Pop(); * //} #endregion * * //Desempilhar (reductionNumber) estados da pilha * * //Topo: itemprint:string . -> reduction = 1 * int reduction = dealer.GenerateReduction(stateStack.Top()).ReductionNumber; * * for (int i = 0; i < reduction; i++) * { * stateStack.Pop(); * } * * //entry.NextState: itemprint: string . * //nextSymbol = itemprint * Symbol nextSymbol = null; * if (entry.NextState != null) * { * nextSymbol = entry.NextState.GetStateSymbol(); * } * else * { * nextSymbol = entry.Vertical.GetStateSymbol(); * } * * //Topo: instrução:PRINT . listaprint * GrammarState topState = stateStack.Top(); * * //Voltar para o não-terminal à esquerda da produção * if (topState == null) * { * entry = dealer.Table.GetEntry(nextSymbol, ascendentState); * } * else * { * //Deve ter uma transição de PRINT. listaprint em itemprint * entry = dealer.Table.GetEntry(nextSymbol, topState); * } * * if (entry != null) * { * if (entry.Type == EntryType.GOTO) * { * nextToken = true; * goto case EntryType.GOTO; * } * else * { * errorFound = true; * } * } * else * { * feedback += PARAMETER_INVALID + token.Value + ", linha " + line + "\r\n"; * errorFound = true; * } * * break; * * } * case EntryType.GOTO: * { * stateStack.Push(entry.NextState); * break; * } * case EntryType.ACCEPTED: * { * //Fim de arquivo * if (Lexic.ListToken.Count == Lexic.Position) * { * return true; * } * else * { * //Ler próxima linha, esvaziando a pilha * nextToken = true; * while (!stateStack.IsEmpty()) * { * stateStack.Pop(); * } * stateStack.Push(ascendentState); * } * break; * } * default: * { * feedback += PARAMETER_INVALID + token.Value + ", linha " + line + "\r\n"; * errorFound = true; * break; * } * } * } * //Caso a entry seja nula * else * { * if (state != null) * { * if (state.IsFinal) * { * //feedback += PARAMETER_INVALID + token.Value + ", linha " + line + "\r\n"; * * //Topo: listaprint:itemprint. separador listaprint * GrammarState stackTop = stateStack.Top(); * * //Realiza redução daquele estado * int reduction = dealer.GenerateReduction(stackTop).ReductionNumber; * for (int i = 0; i < reduction; i++) * { * stateStack.Pop(); * } * * //nextSymbol = listaprint * Symbol nextSymbol = stackTop.GetStateSymbol(); * * //Topo: instrução:PRINT . listaprint * stackTop = stateStack.Top(); * * //Deve ter uma transição de listaprint. em instrução * * //List<TableEntry> debugList = dealer.Table.TableEntriesFor(topState); * entry = dealer.Table.GetEntry(nextSymbol, stackTop); * * if (entry != null) * { * GrammarState nextState = entry.NextState; * //Verificar aqui para quando pular e quando não pular o token * stateStack.Push(nextState); * * //if (nextState.VerifyFinal()) * //{ * // nextToken = true; * //} * * TableEntry entry2 = dealer.Table.GetEntry(symbol, nextState); * * if (entry2 == null || entry2.NextState == null) * { * nextToken = true; * } * } * else * { * feedback += PARAMETER_INVALID + token.Value + ", linha " + line + "\r\n"; * errorFound = true; * } * } * else if (token != null) * { * errorFound = true; * feedback += INSTRUCTION_INVALID + token.Value + ", linha " + line + "\r\n"; * } * else * { * errorFound = true; * } * } * else * { * errorFound = true; * } * } */ #endregion } return(!errorFound); }
/// <summary> /// Análise sintática aceitando qualquer gramática. Não pode necessitar de análise léxica para identificar tokens. /// </summary> /// <param name="feedback"></param> /// <returns></returns> private bool AscendentAnalizerNonLexic(ref string feedback) { GrammarDeterministicAutomata afd = new GrammarDeterministicAutomata(); AscendentDealer dealer = new AscendentDealer(afd); //Pilha dos símbolos SimpleStack <Symbol> symbolStack = new SimpleStack <Symbol>(); symbolStack.Push(Terminal.Initial); //Pilha do estado SimpleStack <GrammarState> stateStack = new SimpleStack <GrammarState>(); GrammarState ascendentState = GrammarState.ConvertFrom(afd.InitialState); stateStack.Push(ascendentState); string[] file = Lexic.File; bool success = false; bool errorFound = false; GrammarState state; Symbol symbol; foreach (string line in file) { int indexToken = 0; List <string> tokens = new List <string>(line.Split(' ')); tokens.Add("$"); string token = ""; state = null; symbol = null; while (!errorFound || !success) { token = tokens[indexToken++]; symbol = Symbol.GetSymbol(token); state = stateStack.Top(); //Para debug List <TableEntry> list1 = dealer.Table.TableEntriesFor(state); List <TableEntry> list2 = dealer.Table.TableEntriesIn(symbol); TableEntry entry = dealer.Table.GetEntry(symbol, state); if (entry != null) { switch (entry.Type) { case EntryType.SHIFT: { GrammarState nextState = entry.NextState; symbolStack.Push(entry.Horizontal); stateStack.Push(nextState); if (nextState.IsFinal) { goto case EntryType.REDUCTION; } break; } case EntryType.REDUCTION: { GrammarState nextState = entry.NextState; int reduction = dealer.GenerateReduction(nextState).ReductionNumber; for (int i = 0; i < reduction; i++) { symbolStack.Pop(); stateStack.Pop(); } Symbol trans = nextState.GetStateSymbol(); entry = dealer.Table.GetEntry(trans, stateStack.Top()); nextState = entry.NextState; symbolStack.Push(entry.Horizontal); stateStack.Push(nextState); break; } default: break; } } } } return(!errorFound); }
/// <summary> /// Does a binary diff on the two streams and returns an <see cref="AddCopyCollection"/> /// of the differences. /// </summary> /// <param name="baseFile">The base file.</param> /// <param name="versionFile">The version file.</param> /// <returns>An AddCopyCollection that can be used later to construct the version file from the base file.</returns> public AddCopyCollection Execute(Stream baseFile, Stream versionFile) { if (!baseFile.CanSeek || !versionFile.CanSeek) { throw new ArgumentException("The Base and Version streams must support seeking."); } TableEntry?[] table = new TableEntry?[this.tableSize]; List <IAddCopy> list = new(); AddCopyCollection result = new(list); baseFile.Seek(0, SeekOrigin.Begin); versionFile.Seek(0, SeekOrigin.End); int verPos = 0; int basePos = 0; int verStart = 0; bool isBaseActive = true; uint verHash = 0; uint baseHash = 0; int lastVerHashPos = 0; int lastBaseHashPos = 0; while (verPos <= (versionFile.Length - this.footprintLength)) { // The GetTableEntry procedure will add the entry if it isn't already there. // This gives us a default behavior of favoring the first match. verHash = this.Footprint(versionFile, verPos, verHash, ref lastVerHashPos); TableEntry verEntry = GetTableEntry(table, verHash, versionFile, verPos); TableEntry?baseEntry = null; if (isBaseActive) { baseHash = this.Footprint(baseFile, basePos, baseHash, ref lastBaseHashPos); baseEntry = GetTableEntry(table, baseHash, baseFile, basePos); } if (baseFile == verEntry.File && Verify(baseFile, verEntry.Offset, versionFile, verPos)) { int length = this.EmitCodes(verEntry.Offset, verPos, verStart, baseFile, versionFile, list); basePos = verEntry.Offset + length; verPos += length; verStart = verPos; FlushTable(table); continue; } else if (this.FavorLastMatch) { verEntry.Offset = verPos; verEntry.File = versionFile; } isBaseActive = isBaseActive && (basePos <= (baseFile.Length - this.footprintLength)); if (isBaseActive && baseEntry != null) { if (versionFile == baseEntry.File && Verify(versionFile, baseEntry.Offset, baseFile, basePos) && verStart <= baseEntry.Offset) { int length = this.EmitCodes(basePos, baseEntry.Offset, verStart, baseFile, versionFile, list); verPos = baseEntry.Offset + length; basePos += length; verStart = verPos; FlushTable(table); continue; } else if (this.FavorLastMatch) { baseEntry.Offset = basePos; baseEntry.File = baseFile; } } verPos++; basePos++; } this.EmitCodes((int)baseFile.Length, (int)versionFile.Length, verStart, baseFile, versionFile, list); Debug.Assert( result.TotalByteLength == (int)versionFile.Length, "The total byte length of the AddCopyCollection MUST equal the length of the version file!"); return(result); }
private void FlushTable(TableEntry[] arTable) { for (int i = 0; i < arTable.Length; i++) { arTable[i] = null; } }
// // MUL -> UOP // public void ToUOP(string inFile, string inFileIdx, string outFile, FileType type, int typeIndex) { // Same for all UOP files long firstTable = 0x200; int tableSize = 0x3E8; // Sanity, in case firstTable is customized by you! if (firstTable < 0x28) { throw new Exception("At least 0x28 bytes are needed for the header."); } using (BinaryReader reader = OpenInput(inFile)) using (BinaryReader readerIdx = OpenInput(inFileIdx)) using (BinaryWriter writer = OpenOutput(outFile)) { List <IdxEntry> idxEntries; if (type == FileType.MapLegacyMUL) { // No IDX file, just group the data into 0xC4000 long chunks int length = (int)reader.BaseStream.Length; idxEntries = new List <IdxEntry>((int)Math.Ceiling((double)length / 0xC4000)); int position = 0; int id = 0; while (position < length) { IdxEntry e = new IdxEntry(); e.m_Id = id++; e.m_Offset = position; e.m_Size = 0xC4000; e.m_Extra = 0; idxEntries.Add(e); position += 0xC4000; } } else { int idxEntryCount = (int)(readerIdx.BaseStream.Length / 12); idxEntries = new List <IdxEntry>(idxEntryCount); for (int i = 0; i < idxEntryCount; ++i) { int offset = readerIdx.ReadInt32(); if (offset < 0) { readerIdx.BaseStream.Seek(8, SeekOrigin.Current); // skip continue; } IdxEntry e = new IdxEntry(); e.m_Id = i; e.m_Offset = offset; e.m_Size = readerIdx.ReadInt32(); e.m_Extra = readerIdx.ReadInt32(); idxEntries.Add(e); } } // File header writer.Write(0x50594D); // MYP writer.Write(5); // version writer.Write(0xFD23EC43); // format timestamp? writer.Write(firstTable); // first table writer.Write(tableSize); // table size writer.Write(idxEntries.Count); // file count writer.Write(1); // modified count? writer.Write(1); // ? writer.Write(0); // ? // Padding for (int i = 0x28; i < firstTable; ++i) { writer.Write((byte)0); } int tableCount = (int)Math.Ceiling((double)idxEntries.Count / tableSize); TableEntry[] tableEntries = new TableEntry[tableSize]; int maxId; string[] hashFormat = GetHashFormat(type, typeIndex, out maxId); for (int i = 0; i < tableCount; ++i) { long thisTable = writer.BaseStream.Position; int idxStart = i * tableSize; int idxEnd = Math.Min((i + 1) * tableSize, idxEntries.Count); // Table header writer.Write(tableSize); writer.Write((long)0); // next table, filled in later writer.Seek(34 * tableSize, SeekOrigin.Current); // table entries, filled in later // Data int tableIdx = 0; for (int j = idxStart; j < idxEnd; ++j, ++tableIdx) { reader.BaseStream.Seek(idxEntries[j].m_Offset, SeekOrigin.Begin); byte[] data = reader.ReadBytes(idxEntries[j].m_Size); tableEntries[tableIdx].m_Offset = writer.BaseStream.Position; tableEntries[tableIdx].m_Size = data.Length; // hash 906142efe9fdb38a, which is file 0009834.tga (and no others, as 7.0.59.5) use a different name format (7 digits instead of 8); // if in newer versions more of these files will have adopted that format, someone should update this list of exceptions // (even if this seems so much like a typo from someone from the UO development team :P ) if (idxEntries[j].m_Id == 9834) { tableEntries[tableIdx].m_Identifier = HashLittle2(String.Format(hashFormat[1], idxEntries[j].m_Id)); } else { tableEntries[tableIdx].m_Identifier = HashLittle2(String.Format(hashFormat[0], idxEntries[j].m_Id)); } tableEntries[tableIdx].m_Hash = HashAdler32(data); if (type == FileType.GumpartLegacyMUL) { // Prepend width/height from IDX's extra int width = (idxEntries[j].m_Extra >> 16) & 0xFFFF; int height = idxEntries[j].m_Extra & 0xFFFF; writer.Write(width); writer.Write(height); tableEntries[tableIdx].m_Size += 8; } writer.Write(data); } long nextTable = writer.BaseStream.Position; // Go back and fix table header if (i < tableCount - 1) { writer.BaseStream.Seek(thisTable + 4, SeekOrigin.Begin); writer.Write(nextTable); } else { writer.BaseStream.Seek(thisTable + 12, SeekOrigin.Begin); // No need to fix the next table address, it's the last } // Table entries tableIdx = 0; for (int j = idxStart; j < idxEnd; ++j, ++tableIdx) { writer.Write(tableEntries[tableIdx].m_Offset); writer.Write(0); // header length writer.Write(tableEntries[tableIdx].m_Size); // compressed size writer.Write(tableEntries[tableIdx].m_Size); // decompressed size writer.Write(tableEntries[tableIdx].m_Identifier); writer.Write(tableEntries[tableIdx].m_Hash); writer.Write((short)0); // compression method, none } // Fill remainder with empty entries for ( ; tableIdx < tableSize; ++tableIdx) { writer.Write(m_EmptyTableEntry); } writer.BaseStream.Seek(nextTable, SeekOrigin.Begin); } } }
/// <summary> /// Gera a entrada das tabelas /// </summary> /// <param name="state"></param> private List <TableEntry> GenerateTableEntries(GrammarState state) { GrammarState stateC = new GrammarState(state.StateValue); if (!state.Equals(GrammarState.ConvertFrom(Afd.InitialState))) { state = GrammarState.ConvertFrom(Afd.GetState(state.StateValue)); } List <TableEntry> entries = new List <TableEntry>(); Symbol stateSymbol = state.GetStateSymbol(); //Verifica se o estado é final para adicioná-lo à lista if (state.VerifyFinal()) { //Aparentemente, essa verificação não está correta if (!FinalStatesList.Contains(state)) { state.IsFinal = true; FinalStatesList.Add(state); } } //Varre todas as transições do estado foreach (StateTransiction <string, Symbol> transiction in state.Transictions) { Symbol symbol = transiction.Transiction; GrammarState grammarNextState = GrammarState.ConvertFrom(transiction.NextState); if (symbol is NonTerminal) { TableEntry entry = new TableEntry(symbol, state, grammarNextState, EntryType.GOTO); if (_table.AddEntry(entry)) { GenerateTableEntries(grammarNextState); entries.Add(entry); } } else { //Manda gerar as próximas entradas na tabela, mas não adiciona a entrada if (grammarNextState != null) { TableEntry entry = new TableEntry(symbol, state, grammarNextState, EntryType.SHIFT); if (_table.AddEntry(entry)) { GenerateTableEntries(grammarNextState); entries.Add(entry); } } else { TableEntry entry = new TableEntry(symbol, state, null, EntryType.REDUCTION); if (_table.AddEntry(entry)) { entries.Add(entry); } } } } return(entries); }
public override void PreVisit(MainStatBlock mainStatBlock) { this.FunctionScopeLink = this.GlobalScope.Get("main", Classification.Function); this.ClassInstanceScope = new SymbolTable(); }
/// <summary> /// Lädt alle Daten aus dem internen Puffer in die jeweiligen Variablen. /// </summary> /// <remarks></remarks> private void LoadData() { // --- HEADER --- Copyright _header.Copyright = _buffer.ReadString(40); // Version _header.Version = _buffer.ReadString(4); // Datei-Typ _header.FileType = _buffer.ReadString(12); // Tabellenanzahl _header.TableCount = _buffer.ReadUInteger(); // Offset der ersten Datei _header.FirstFileOffset = _buffer.ReadUInteger(); // --- TABELLENINFORMATIONEN --- for(int i = 0; i < _header.TableCount; ++i) { TableInfo currTableInfo = new TableInfo(); // Unbekannt1 currTableInfo.Unknown1 = _buffer.ReadByte(); // Ressourcentyp currTableInfo.ResourceType = _buffer.ReadString(3); // Tabellenoffset currTableInfo.TableOffset = _buffer.ReadUInteger(); // Dateien-Anzahl currTableInfo.FileCount = _buffer.ReadUInteger(); _tableInfos.Add(currTableInfo); } // --- TABELLEN --- for(int i = 0; i < _header.TableCount; ++i) { // Tabelleninformationen holen TableInfo currTableInfo = _tableInfos[i]; // Neue Tabelle anlegen Table aktTabelle = new Table(); aktTabelle.Entries = new List<TableEntry>(); // Einträge lesen TableEntry currTableEntry; for(uint j = 0; j < currTableInfo.FileCount; ++j) { // Neuer Eintrag currTableEntry = new TableEntry(); // Werte lesen currTableEntry.FileID = _buffer.ReadUInteger(); currTableEntry.FileOffset = _buffer.ReadUInteger(); currTableEntry.FileSize = _buffer.ReadUInteger(); // Eintrag speichern aktTabelle.Entries.Add(currTableEntry); } // Tabelle speichern _tables.Add(aktTabelle); } // --- DATEIEN --- for(int i = 0; i < _tables.Count; ++i) { // Tabelle abrufen Table currTable = _tables[i]; // Alle Einträge durchlaufen TableEntry currTableEntry; for(int j = 0; j < currTable.Entries.Count; ++j) { // Eintrag abrufen currTableEntry = currTable.Entries[j]; // Doppelte Dateien werden einfach von der letzten überschrieben _files[currTableEntry.FileID] = _buffer.ReadByteArray((int)currTableEntry.FileSize); } } // Puffer leeren, um Speicher zu sparen _buffer.Clear(); }
/// <summary> /// Fügt eine Ressource hinzu oder ersetzt diese. /// </summary> /// <param name="data">Die hinzuzufügende / zu ersetzende Ressource.</param> /// <param name="resourceID">Die ID der hinzuzufügenden / zu ersetzenden Ressource.</param> /// <param name="resourceType">Der Typ der Ressource, rückwärts geschrieben und drei Zeichen lang.</param> /// <remarks></remarks> public void AddReplaceRessource(RAMBuffer data, ushort resourceID, string resourceType) { // Neue Ressource? if(!_files.ContainsKey(resourceID)) { // Die zu ermittelnden Tabelleninfos TableInfo myTI = new TableInfo(); Table myT = new Table(); // Suchen der Tabelle mit dem passenden Ressourcen-Typen int i; bool found = false; for(i = 0; i < _tableInfos.Count; ++i) { // Passender Ressourcen-Typ? if(_tableInfos[i].ResourceType == resourceType) { // Tabelleninfos merken myTI = _tableInfos[i]; myT = _tables[i]; found = true; break; } } // Neue Tabelle erforderlich? if(!found) { // Tabelleninfo anlegen myTI.Unknown1 = (byte)'a'; myTI.TableOffset = 0; myTI.ResourceType = resourceType; myTI.FileCount = 0; _tableInfos.Add(myTI); // Neue Tabelle ist der einzige Eintrag myT.Entries = new List<TableEntry>(); _tables.Add(myT); } // Eine Datei mehr myTI.FileCount += 1; // Tabelleneintrag für die Datei erstellen TableEntry eintr = new TableEntry(); eintr.FileSize = (uint)data.Length; eintr.FileID = resourceID; myT.Entries.Add(eintr); // Datei speichern data.Position = 0; _files.Add(resourceID, data.ReadByteArray(data.Length)); // Tabelleninfos speichern _tableInfos[i] = myTI; _tables[i] = myT; } else { // Ressource ersetzen data.Position = 0; _files[resourceID] = data.ReadByteArray(data.Length); // Tabellen durchlaufen und passende Datei-ID suchen Table currTable; TableEntry currEntry; for(int i = 0; i < _tables.Count; ++i) { // Aktuelle Tabelle abrufen currTable = _tables[i]; // Eintrag mit passender Datei-ID suchen for(int j = 0; j < currTable.Entries.Count; ++j) { // Eintrg abrufen currEntry = currTable.Entries[j]; // Passende ID? if(currEntry.FileID == resourceID) { // Dateigröße ersetzen currEntry.FileSize = (uint)data.Length; _tables[i].Entries[j] = currEntry; // Fertig return; } } } } }
/// <summary> /// Does a binary diff on the two streams and returns an <see cref="AddCopyList"/> /// of the differences. /// </summary> /// <param name="BaseFile">The base file.</param> /// <param name="VerFile">The version file.</param> /// <returns>An AddCopyList that can be used later to construct the version file from the base file.</returns> public AddCopyList Execute(Stream BaseFile, Stream VerFile) { if (!BaseFile.CanSeek || !VerFile.CanSeek) { throw new ArgumentException("The Base and Version streams must support seeking."); } TableEntry[] arTable = new TableEntry[m_iTableSize]; AddCopyList List = new AddCopyList(); BaseFile.Seek(0, SeekOrigin.Begin); VerFile.Seek(0, SeekOrigin.End); int iVerPos = 0; int iBasePos = 0; int iVerStart = 0; bool bBaseActive = true; uint uiVerHash = 0; uint uiBaseHash = 0; int iLastVerHashPos = 0; int iLastBaseHashPos = 0; while (iVerPos <= (VerFile.Length - m_iFootprintLength)) { //The GetTableEntry procedure will add the entry if it isn't already there. //This gives us a default behavior of favoring the first match. uiVerHash = Footprint(VerFile, iVerPos, ref iLastVerHashPos, uiVerHash); TableEntry VerEntry = GetTableEntry(arTable, uiVerHash, VerFile, iVerPos); TableEntry BaseEntry = null; if (bBaseActive) { uiBaseHash = Footprint(BaseFile, iBasePos, ref iLastBaseHashPos, uiBaseHash); BaseEntry = GetTableEntry(arTable, uiBaseHash, BaseFile, iBasePos); } if (BaseFile == VerEntry.File && Verify(BaseFile, VerEntry.Offset, VerFile, iVerPos)) { int iLength = EmitCodes(VerEntry.Offset, iVerPos, iVerStart, BaseFile, VerFile, List); iBasePos = VerEntry.Offset + iLength; iVerPos += iLength; iVerStart = iVerPos; FlushTable(arTable); continue; } else if (m_bFavorLastMatch) { VerEntry.Offset = iVerPos; VerEntry.File = VerFile; } bBaseActive = bBaseActive && (iBasePos <= (BaseFile.Length - m_iFootprintLength)); if (bBaseActive) { if (VerFile == BaseEntry.File && Verify(VerFile, BaseEntry.Offset, BaseFile, iBasePos) && iVerStart <= BaseEntry.Offset) { int iLength = EmitCodes(iBasePos, BaseEntry.Offset, iVerStart, BaseFile, VerFile, List); iVerPos = BaseEntry.Offset + iLength; iBasePos += iLength; iVerStart = iVerPos; FlushTable(arTable); continue; } else if (m_bFavorLastMatch) { BaseEntry.Offset = iBasePos; BaseEntry.File = BaseFile; } } iVerPos++; iBasePos++; } EmitCodes((int)BaseFile.Length, (int)VerFile.Length, iVerStart, BaseFile, VerFile, List); Debug.Assert(List.TotalByteLength == (int)VerFile.Length, "The total byte length of the AddCopyList MUST equal the length of the version file!"); return(List); }
/// <summary> /// Does a binary diff on the two streams and returns an <see cref="AddCopyList"/> /// of the differences. /// </summary> /// <param name="BaseFile">The base file.</param> /// <param name="VerFile">The version file.</param> /// <returns>An AddCopyList that can be used later to construct the version file from the base file.</returns> public AddCopyList Execute(Stream BaseFile, Stream VerFile) { if (!BaseFile.CanSeek || !VerFile.CanSeek) { throw new ArgumentException("The Base and Version streams must support seeking."); } TableEntry[] arTable = new TableEntry[m_iTableSize]; AddCopyList List = new AddCopyList(); BaseFile.Seek(0, SeekOrigin.Begin); VerFile.Seek(0, SeekOrigin.End); int iVerPos = 0; int iBasePos = 0; int iVerStart = 0; bool bBaseActive = true; uint uiVerHash = 0; uint uiBaseHash = 0; int iLastVerHashPos = 0; int iLastBaseHashPos = 0; while (iVerPos <= (VerFile.Length - m_iFootprintLength)) { //The GetTableEntry procedure will add the entry if it isn't already there. //This gives us a default behavior of favoring the first match. uiVerHash = Footprint(VerFile, iVerPos, ref iLastVerHashPos, uiVerHash); TableEntry VerEntry = GetTableEntry(arTable, uiVerHash, VerFile, iVerPos); TableEntry BaseEntry = null; if (bBaseActive) { uiBaseHash = Footprint(BaseFile, iBasePos, ref iLastBaseHashPos, uiBaseHash); BaseEntry = GetTableEntry(arTable, uiBaseHash, BaseFile, iBasePos); } if (BaseFile == VerEntry.File && Verify(BaseFile, VerEntry.Offset, VerFile, iVerPos)) { int iLength = EmitCodes(VerEntry.Offset, iVerPos, iVerStart, BaseFile, VerFile, List); iBasePos = VerEntry.Offset + iLength; iVerPos += iLength; iVerStart = iVerPos; FlushTable(arTable); continue; } else if (m_bFavorLastMatch) { VerEntry.Offset = iVerPos; VerEntry.File = VerFile; } bBaseActive = bBaseActive && (iBasePos <= (BaseFile.Length - m_iFootprintLength)); if (bBaseActive) { if (VerFile == BaseEntry.File && Verify(VerFile, BaseEntry.Offset, BaseFile, iBasePos) && iVerStart <= BaseEntry.Offset) { int iLength = EmitCodes(iBasePos, BaseEntry.Offset, iVerStart, BaseFile, VerFile, List); iVerPos = BaseEntry.Offset + iLength; iBasePos += iLength; iVerStart = iVerPos; FlushTable(arTable); continue; } else if (m_bFavorLastMatch) { BaseEntry.Offset = iBasePos; BaseEntry.File = BaseFile; } } iVerPos++; iBasePos++; } EmitCodes((int)BaseFile.Length, (int)VerFile.Length, iVerStart, BaseFile, VerFile, List); Debug.Assert(List.TotalByteLength == (int)VerFile.Length, "The total byte length of the AddCopyList MUST equal the length of the version file!"); return List; }
// // UOP -> MUL // public void FromUOP(string inFile, string outFile, string outFileIdx, FileType type, int typeIndex) { Dictionary <ulong, int> chunkIds = new Dictionary <ulong, int>(); int maxId; string format = GetHashFormat(type, typeIndex, out maxId); for (int i = 0; i < maxId; ++i) { chunkIds[HashLittle2(String.Format(format, i))] = i; } bool[] used = new bool[maxId]; using (BinaryReader reader = OpenInput(inFile)) using (BinaryWriter writer = OpenOutput(outFile)) using (BinaryWriter writerIdx = OpenOutput(outFileIdx)) { if (reader.ReadInt32() != 0x50594D) // MYP { throw new ArgumentException("inFile is not a UOP file."); } Stream stream = reader.BaseStream; int version = reader.ReadInt32(); reader.ReadInt32(); // format timestamp? 0xFD23EC43 long nextTable = reader.ReadInt64(); do { // Table header stream.Seek(nextTable, SeekOrigin.Begin); int entries = reader.ReadInt32(); nextTable = reader.ReadInt64(); // Table entries TableEntry[] offsets = new TableEntry[entries]; for (int i = 0; i < entries; ++i) { /* * Empty entries are read too, because they do not always indicate the * end of the table. (Example: 7.0.26.4+ Fel/Tram maps) */ offsets[i].m_Offset = reader.ReadInt64(); offsets[i].m_HeaderLength = reader.ReadInt32(); // header length offsets[i].m_Size = reader.ReadInt32(); // compressed size reader.ReadInt32(); // decompressed size offsets[i].m_Identifier = reader.ReadUInt64(); // filename hash (HashLittle2) offsets[i].m_Hash = reader.ReadUInt32(); // data hash (Adler32) reader.ReadInt16(); // compression method (0 = none, 1 = zlib) } // Copy chunks for (int i = 0; i < offsets.Length; ++i) { if (offsets[i].m_Offset == 0) { continue; // skip empty entry } int chunkID; if (!chunkIds.TryGetValue(offsets[i].m_Identifier, out chunkID)) { throw new Exception("Unknown identifier encountered"); } stream.Seek(offsets[i].m_Offset + offsets[i].m_HeaderLength, SeekOrigin.Begin); byte[] chunkData = reader.ReadBytes(offsets[i].m_Size); if (type == FileType.MapLegacyMUL) { // Write this chunk on the right position (no IDX file to point to it) writer.Seek(chunkID * 0xC4000, SeekOrigin.Begin); writer.Write(chunkData); } else { int dataOffset = 0; #region Idx writerIdx.Seek(chunkID * 12, SeekOrigin.Begin); writerIdx.Write((int)writer.BaseStream.Position); // Position switch (type) { case FileType.GumpartLegacyMUL: { // Width and height are prepended to the data int width = (chunkData[0] | (chunkData[1] << 8) | (chunkData[2] << 16) | (chunkData[3] << 24)); int height = (chunkData[4] | (chunkData[5] << 8) | (chunkData[6] << 16) | (chunkData[7] << 24)); writerIdx.Write(offsets[i].m_Size - 8); writerIdx.Write((width << 16) | height); dataOffset = 8; break; } case FileType.SoundLegacyMUL: { // Extra contains the ID of this sound file + 1 writerIdx.Write(offsets[i].m_Size); writerIdx.Write(chunkID + 1); break; } default: { writerIdx.Write(offsets[i].m_Size); // Size writerIdx.Write((int)0); // Extra break; } } used[chunkID] = true; #endregion writer.Write(chunkData, dataOffset, chunkData.Length - dataOffset); } } // Move to next table if (nextTable != 0) { stream.Seek(nextTable, SeekOrigin.Begin); } }while (nextTable != 0); // Fix idx // TODO: Only go until the last used entry? Does the client mind? if (writerIdx != null) { for (int i = 0; i < used.Length; ++i) { if (!used[i]) { writerIdx.Seek(i * 12, SeekOrigin.Begin); writerIdx.Write(-1); writerIdx.Write((long)0); } } } } }
// // MUL -> UOP // public void ToUOP(string inFile, string inFileIdx, string outFile, FileType type, int typeIndex) { // Same for all UOP files long firstTable = 0x200; int tableSize = 100; //0x3E8; // block size (files per block) short compress = 0; // art, gumpart, map, etc are expected to be uncompressed, if we compress them they won't be loaded by the client... // Sanity, in case firstTable is customized by you! if (firstTable < 0x28) { throw new Exception("At least 0x28 bytes are needed for the header."); } using (BinaryReader reader = OpenInput(inFile)) using (BinaryReader readerIdx = OpenInput(inFileIdx)) using (BinaryWriter writer = OpenOutput(outFile)) { List <IdxEntry> idxEntries; if (type == FileType.MapLegacyMUL) { // No IDX file, just group the data into 0xC4000 long chunks int length = (int)reader.BaseStream.Length; idxEntries = new List <IdxEntry>((int)Math.Ceiling((double)length / 0xC4000)); int position = 0; int id = 0; while (position < length) { IdxEntry e = new IdxEntry(); e.m_Id = id++; e.m_Offset = position; e.m_Size = 0xC4000; e.m_Extra = 0; idxEntries.Add(e); position += 0xC4000; } } else { int idxEntryCount = (int)(readerIdx.BaseStream.Length / 12); idxEntries = new List <IdxEntry>(idxEntryCount); for (int i = 0; i < idxEntryCount; ++i) { int offset = readerIdx.ReadInt32(); if (offset < 0) { readerIdx.BaseStream.Seek(8, SeekOrigin.Current); // skip continue; } IdxEntry e = new IdxEntry(); e.m_Id = i; e.m_Offset = offset; e.m_Size = readerIdx.ReadInt32(); e.m_Extra = readerIdx.ReadInt32(); idxEntries.Add(e); } } int fileCount = idxEntries.Count; if (type == FileType.MultiMUL) { ++fileCount; // for "housing.bin" idxEntries.Add(new IdxEntry()); } // File header writer.Write(0x50594D); // MYP writer.Write(5); // version writer.Write(0xFD23EC43); // format timestamp? writer.Write(firstTable); // first table writer.Write(tableSize); // table (block) size (= files per block) writer.Write(fileCount); // file count writer.Write(1); // modified count? writer.Write(1); // ? writer.Write(0); // ? // Padding for (int i = 0x28; i < firstTable; ++i) { writer.Write((byte)0); } int tableCount = (int)Math.Ceiling((double)fileCount / tableSize); TableEntry[] tableEntries = new TableEntry[tableSize]; int maxId; string[] hashFormat = GetHashFormat(type, typeIndex, out maxId); for (int i = 0; i < tableCount; ++i) { long thisTable = writer.BaseStream.Position; int idxStart = i * tableSize; int idxEnd = Math.Min((i + 1) * tableSize, fileCount); // Table header writer.Write(idxEnd - idxStart); // files in this block writer.Write((long)0); // next table, filled in later writer.Seek(34 * tableSize, SeekOrigin.Current); // table entries, filled in later // Data int tableIdx = 0; for (int j = idxStart; j < idxEnd; ++j, ++tableIdx) { // Special case: MultiCollection.uop if ((type == FileType.MultiMUL) && (i == tableCount - 1) && (j == idxEnd - 1)) { if (File.Exists("housing.bin")) { FileInfo binInfo = new FileInfo("housing.bin"); // MultiCollection.uop has the file "build/multicollection/housing.bin", which has to be treated separately using (BinaryReader readerBin = OpenInput("housing.bin")) { byte[] binData = new byte[binInfo.Length]; int readLen = readerBin.Read(binData, 0, (int)binInfo.Length); tableEntries[tableIdx].m_Offset = writer.BaseStream.Position; tableEntries[tableIdx].m_Size = readLen; tableEntries[tableIdx].m_Identifier = HashLittle2("build/multicollection/housing.bin"); tableEntries[tableIdx].m_Hash = HashAdler32(binData); writer.Write(binData, 0, readLen); } } continue; } reader.BaseStream.Seek(idxEntries[j].m_Offset, SeekOrigin.Begin); byte[] data = reader.ReadBytes(idxEntries[j].m_Size); int sizeDecompressed = data.Length; if (type == FileType.GumpartLegacyMUL) { // Prepend width/height from IDX's extra sizeDecompressed += 8; int width = (idxEntries[j].m_Extra >> 16) & 0xFFFF; int height = idxEntries[j].m_Extra & 0xFFFF; byte[] dataCopy = data; data = new byte[sizeDecompressed]; data[0] = (byte)(width & 0xFF); data[1] = (byte)((width >> 8) & 0xFF); data[2] = (byte)((width >> 16) & 0xFF); data[3] = (byte)((width >> 24) & 0xFF); data[4] = (byte)(height & 0xFF); data[5] = (byte)((height >> 8) & 0xFF); data[6] = (byte)((height >> 16) & 0xFF); data[7] = (byte)((height >> 24) & 0xFF); Array.Copy(dataCopy, 0, data, 8, sizeDecompressed - 8); } int sizeOut; byte[] dataOut; if (compress != 0) { sizeOut = Zlib.CompressBound((ulong)sizeDecompressed); // estimated maximum size dataOut = new byte[sizeOut]; Zlib.Compress(dataOut, ref sizeOut, data, sizeDecompressed); } else { sizeOut = sizeDecompressed; dataOut = data; } tableEntries[tableIdx].m_Offset = writer.BaseStream.Position; tableEntries[tableIdx].m_Compression = compress; tableEntries[tableIdx].m_Size = sizeOut; tableEntries[tableIdx].m_SizeDecompressed = sizeDecompressed; // hash 906142efe9fdb38a, which is file 0009834.tga (and no others, as 7.0.59.5) use a different name format (7 digits instead of 8); // if in newer versions more of these files will have adopted that format, someone should update this list of exceptions // (even if this seems so much like a typo from someone from the UO development team :P ) if ((type == FileType.GumpartLegacyMUL) && (idxEntries[j].m_Id == 9834)) { tableEntries[tableIdx].m_Identifier = HashLittle2(String.Format(hashFormat[1], idxEntries[j].m_Id)); } else { tableEntries[tableIdx].m_Identifier = HashLittle2(String.Format(hashFormat[0], idxEntries[j].m_Id)); } tableEntries[tableIdx].m_Hash = HashAdler32(dataOut); writer.Write(dataOut, 0, sizeOut); } long nextTable = writer.BaseStream.Position; // Go back and fix table header if (i < tableCount - 1) { writer.BaseStream.Seek(thisTable + 4, SeekOrigin.Begin); writer.Write(nextTable); } else { writer.BaseStream.Seek(thisTable + 12, SeekOrigin.Begin); // No need to fix the next table address, it's the last } // Table entries tableIdx = 0; for (int j = idxStart; j < idxEnd; ++j, ++tableIdx) { writer.Write(tableEntries[tableIdx].m_Offset); writer.Write(0); // header length writer.Write(tableEntries[tableIdx].m_Size); // compressed size writer.Write(tableEntries[tableIdx].m_SizeDecompressed); // decompressed size writer.Write(tableEntries[tableIdx].m_Identifier); writer.Write(tableEntries[tableIdx].m_Hash); writer.Write(tableEntries[tableIdx].m_Compression); // compression method } // Fill remainder with empty entries for ( ; tableIdx < tableSize; ++tableIdx) { writer.Write(m_EmptyTableEntry); } writer.BaseStream.Seek(nextTable, SeekOrigin.Begin); } } }
public Blender() { for (var i = 0; i < 256; i++) BlendTable[i] = new TableEntry(i, 8, 0.5, 0.5); }
// // UOP -> MUL // public void FromUOP(string inFile, string outFile, string outFileIdx, FileType type, int typeIndex) { Dictionary <ulong, int> chunkIds = new Dictionary <ulong, int>(); Dictionary <ulong, int> chunkIds2 = new Dictionary <ulong, int>(); int maxId; string[] formats = GetHashFormat(type, typeIndex, out maxId); for (int i = 0; i < maxId; ++i) { chunkIds[HashLittle2(String.Format(formats[0], i))] = i; } if (formats[1] != "") { for (int i = 0; i < maxId; ++i) { chunkIds2[HashLittle2(String.Format(formats[1], i))] = i; } } bool[] used = new bool[maxId]; using (BinaryReader reader = OpenInput(inFile)) using (BinaryWriter writer = OpenOutput(outFile)) using (BinaryWriter writerIdx = OpenOutput(outFileIdx)) { if (reader.ReadInt32() != 0x50594D) // MYP { throw new ArgumentException("inFile is not a UOP file."); } Stream stream = reader.BaseStream; int version = reader.ReadInt32(); reader.ReadInt32(); // format timestamp? 0xFD23EC43 long nextTable = reader.ReadInt64(); //List<string> toLog = new List<string>(); do { // Table header stream.Seek(nextTable, SeekOrigin.Begin); int entries = reader.ReadInt32(); nextTable = reader.ReadInt64(); // Table entries TableEntry[] offsets = new TableEntry[entries]; for (int i = 0; i < entries; ++i) { /* * Empty entries are read too, because they do not always indicate the * end of the table. (Example: 7.0.26.4+ Fel/Tram maps) */ offsets[i].m_Offset = reader.ReadInt64(); offsets[i].m_HeaderLength = reader.ReadInt32(); // header length offsets[i].m_Size = reader.ReadInt32(); // compressed size offsets[i].m_SizeDecompressed = reader.ReadInt32(); // decompressed size offsets[i].m_Identifier = reader.ReadUInt64(); // filename hash (HashLittle2) offsets[i].m_Hash = reader.ReadUInt32(); // data hash (Adler32) offsets[i].m_Compression = reader.ReadInt16(); // compression method (0 = none, 1 = zlib) } // Copy chunks for (int i = 0; i < offsets.Length; ++i) { if (offsets[i].m_Offset == 0) { continue; // skip empty entry } if ((type == FileType.MultiMUL) && (offsets[i].m_Identifier == 0x126D1E99DDEDEE0A)) { // MultiCollection.uop has the file "build/multicollection/housing.bin", which has to be handled separately using (BinaryWriter writerBin = OpenOutput("housing.bin")) { stream.Seek(offsets[i].m_Offset + offsets[i].m_HeaderLength, SeekOrigin.Begin); byte[] binData = reader.ReadBytes(offsets[i].m_Size); byte[] binDataToWrite; if (offsets[i].m_Compression == 1) { byte[] binDataDecompressed = new byte[offsets[i].m_SizeDecompressed]; Zlib.Decompress(binDataDecompressed, ref offsets[i].m_SizeDecompressed, binData, offsets[i].m_Size); binDataToWrite = binDataDecompressed; } else { binDataToWrite = binData; } writerBin.Write(binDataToWrite, 0, binDataToWrite.Length); } continue; } int chunkID = -1; if (!chunkIds.TryGetValue(offsets[i].m_Identifier, out chunkID)) { int tmpChunkID = -1; if (!chunkIds2.TryGetValue(offsets[i].m_Identifier, out tmpChunkID)) { throw new Exception(String.Format("Unknown identifier encountered ({0:X})", offsets[i].m_Identifier)); } else { chunkID = tmpChunkID; //toLog.Add(String.Format("[DevInfo] Hash {0} has format type 2! ChunkID: {1}", offsets[i].m_Identifier, chunkID)); } } stream.Seek(offsets[i].m_Offset + offsets[i].m_HeaderLength, SeekOrigin.Begin); byte[] chunkDataRaw = reader.ReadBytes(offsets[i].m_Size); byte[] chunkData; if (offsets[i].m_Compression == 1) { byte[] chunkDataDecompressed = new byte[offsets[i].m_SizeDecompressed]; Zlib.Decompress(chunkDataDecompressed, ref offsets[i].m_SizeDecompressed, chunkDataRaw, offsets[i].m_Size); chunkData = chunkDataDecompressed; } else { chunkData = chunkDataRaw; } if (type == FileType.MapLegacyMUL) { // Write this chunk on the right position (no IDX file to point to it) writer.Seek(chunkID * 0xC4000, SeekOrigin.Begin); writer.Write(chunkData); } else { int dataOffset = 0; #region Idx writerIdx.Seek(chunkID * 12, SeekOrigin.Begin); writerIdx.Write((uint)writer.BaseStream.Position); // Position switch (type) { case FileType.GumpartLegacyMUL: { // Width and height are prepended to the data int width = (chunkData[0] | (chunkData[1] << 8) | (chunkData[2] << 16) | (chunkData[3] << 24)); int height = (chunkData[4] | (chunkData[5] << 8) | (chunkData[6] << 16) | (chunkData[7] << 24)); writerIdx.Write(chunkData.Length - 8); writerIdx.Write((width << 16) | height); dataOffset = 8; break; } case FileType.SoundLegacyMUL: { // Extra contains the ID of this sound file + 1 writerIdx.Write(chunkData.Length); writerIdx.Write(chunkID + 1); break; } default: { writerIdx.Write(chunkData.Length); // Size writerIdx.Write((int)0); // Extra break; } } used[chunkID] = true; #endregion writer.Write(chunkData, dataOffset, chunkData.Length - dataOffset); } } // Move to next table if (nextTable != 0) { stream.Seek(nextTable, SeekOrigin.Begin); } }while (nextTable != 0); // Fix idx // TODO: Only go until the last used entry? Does the client mind? if (writerIdx != null) { for (int i = 0; i < used.Length; ++i) { if (!used[i]) { writerIdx.Seek(i * 12, SeekOrigin.Begin); writerIdx.Write(-1); writerIdx.Write((long)0); } } } /* * if (toLog.Count != 0) * { * using (StreamWriter file = new StreamWriter(@"uop2mul_log.txt")) * { * for (int i = 0; i < toLog.Count; ++i) * file.WriteLine(toLog[i]); * } * } * toLog.Clear(); */ } }
private static void PopulateTable(Level level, TableEntry[][] table, CultureInfo clientCulture) { int row = 0; InnerPopulateTable(level, table, ref row, 0, clientCulture); }
// // MUL -> UOP // public void ToUOP( string inFile, string inFileIdx, string outFile, FileType type, int typeIndex ) { // Same for all UOP files long firstTable = 0x200; int tableSize = 0x3E8; // Sanity, in case firstTable is customized by you! if ( firstTable < 0x28 ) throw new Exception( "At least 0x28 bytes are needed for the header." ); using ( BinaryReader reader = OpenInput( inFile ) ) using ( BinaryReader readerIdx = OpenInput( inFileIdx ) ) using ( BinaryWriter writer = OpenOutput( outFile ) ) { List<IdxEntry> idxEntries; if ( type == FileType.MapLegacyMUL ) { // No IDX file, just group the data into 0xC4000 long chunks int length = (int)reader.BaseStream.Length; idxEntries = new List<IdxEntry>( (int)Math.Ceiling( (double)length / 0xC4000 ) ); int position = 0; int id = 0; while ( position < length ) { IdxEntry e = new IdxEntry(); e.m_Id = id++; e.m_Offset = position; e.m_Size = 0xC4000; e.m_Extra = 0; idxEntries.Add( e ); position += 0xC4000; } } else { int idxEntryCount = (int)( readerIdx.BaseStream.Length / 12 ); idxEntries = new List<IdxEntry>( idxEntryCount ); for ( int i = 0; i < idxEntryCount; ++i ) { int offset = readerIdx.ReadInt32(); if ( offset < 0 ) { readerIdx.BaseStream.Seek( 8, SeekOrigin.Current ); // skip continue; } IdxEntry e = new IdxEntry(); e.m_Id = i; e.m_Offset = offset; e.m_Size = readerIdx.ReadInt32(); e.m_Extra = readerIdx.ReadInt32(); idxEntries.Add( e ); } } // File header writer.Write( 0x50594D ); // MYP writer.Write( 5 ); // version writer.Write( 0xFD23EC43 ); // format timestamp? writer.Write( firstTable ); // first table writer.Write( tableSize ); // table size writer.Write( idxEntries.Count ); // file count writer.Write( 1 ); // modified count? writer.Write( 1 ); // ? writer.Write( 0 ); // ? // Padding for ( int i = 0x28; i < firstTable; ++i ) writer.Write( (byte)0 ); int tableCount = (int)Math.Ceiling( (double)idxEntries.Count / tableSize ); TableEntry[] tableEntries = new TableEntry[tableSize]; int maxId; string hashFormat = GetHashFormat( type, typeIndex, out maxId ); for ( int i = 0; i < tableCount; ++i ) { long thisTable = writer.BaseStream.Position; int idxStart = i * tableSize; int idxEnd = Math.Min( ( i + 1 ) * tableSize, idxEntries.Count ); // Table header writer.Write( tableSize ); writer.Write( (long)0 ); // next table, filled in later writer.Seek( 34 * tableSize, SeekOrigin.Current ); // table entries, filled in later // Data int tableIdx = 0; for ( int j = idxStart; j < idxEnd; ++j, ++tableIdx ) { reader.BaseStream.Seek( idxEntries[j].m_Offset, SeekOrigin.Begin ); byte[] data = reader.ReadBytes( idxEntries[j].m_Size ); tableEntries[tableIdx].m_Offset = writer.BaseStream.Position; tableEntries[tableIdx].m_Size = data.Length; tableEntries[tableIdx].m_Identifier = HashLittle2( String.Format( hashFormat, idxEntries[j].m_Id ) ); tableEntries[tableIdx].m_Hash = HashAdler32( data ); if ( type == FileType.GumpartLegacyMUL ) { // Prepend width/height from IDX's extra int width = ( idxEntries[j].m_Extra >> 16 ) & 0xFFFF; int height = idxEntries[j].m_Extra & 0xFFFF; writer.Write( width ); writer.Write( height ); tableEntries[tableIdx].m_Size += 8; } writer.Write( data ); } long nextTable = writer.BaseStream.Position; // Go back and fix table header if ( i < tableCount - 1 ) { writer.BaseStream.Seek( thisTable + 4, SeekOrigin.Begin ); writer.Write( nextTable ); } else { writer.BaseStream.Seek( thisTable + 12, SeekOrigin.Begin ); // No need to fix the next table address, it's the last } // Table entries tableIdx = 0; for ( int j = idxStart; j < idxEnd; ++j, ++tableIdx ) { writer.Write( tableEntries[tableIdx].m_Offset ); writer.Write( 0 ); // header length writer.Write( tableEntries[tableIdx].m_Size ); // compressed size writer.Write( tableEntries[tableIdx].m_Size ); // decompressed size writer.Write( tableEntries[tableIdx].m_Identifier ); writer.Write( tableEntries[tableIdx].m_Hash ); writer.Write( (short)0 ); // compression method, none } // Fill remainder with empty entries for ( ; tableIdx < tableSize; ++tableIdx ) writer.Write( m_EmptyTableEntry ); writer.BaseStream.Seek( nextTable, SeekOrigin.Begin ); } } }
// // UOP -> MUL // public void FromUOP( string inFile, string outFile, string outFileIdx, FileType type, int typeIndex ) { Dictionary<ulong, int> chunkIds = new Dictionary<ulong, int>(); int maxId; string format = GetHashFormat( type, typeIndex, out maxId ); for ( int i = 0; i < maxId; ++i ) chunkIds[HashLittle2( String.Format( format, i ) )] = i; bool[] used = new bool[maxId]; using ( BinaryReader reader = OpenInput( inFile ) ) using ( BinaryWriter writer = OpenOutput( outFile ) ) using ( BinaryWriter writerIdx = OpenOutput( outFileIdx ) ) { if ( reader.ReadInt32() != 0x50594D ) // MYP throw new ArgumentException( "inFile is not a UOP file." ); Stream stream = reader.BaseStream; int version = reader.ReadInt32(); reader.ReadInt32(); // format timestamp? 0xFD23EC43 long nextTable = reader.ReadInt64(); do { // Table header stream.Seek( nextTable, SeekOrigin.Begin ); int entries = reader.ReadInt32(); nextTable = reader.ReadInt64(); // Table entries TableEntry[] offsets = new TableEntry[entries]; for ( int i = 0; i < entries; ++i ) { /* * Empty entries are read too, because they do not always indicate the * end of the table. (Example: 7.0.26.4+ Fel/Tram maps) */ offsets[i].m_Offset = reader.ReadInt64(); offsets[i].m_HeaderLength = reader.ReadInt32(); // header length offsets[i].m_Size = reader.ReadInt32(); // compressed size reader.ReadInt32(); // decompressed size offsets[i].m_Identifier = reader.ReadUInt64(); // filename hash (HashLittle2) offsets[i].m_Hash = reader.ReadUInt32(); // data hash (Adler32) reader.ReadInt16(); // compression method (0 = none, 1 = zlib) } // Copy chunks for ( int i = 0; i < offsets.Length; ++i ) { if ( offsets[i].m_Offset == 0 ) continue; // skip empty entry int chunkID; if ( !chunkIds.TryGetValue( offsets[i].m_Identifier, out chunkID ) ) throw new Exception( "Unknown identifier encountered" ); stream.Seek( offsets[i].m_Offset + offsets[i].m_HeaderLength, SeekOrigin.Begin ); byte[] chunkData = reader.ReadBytes( offsets[i].m_Size ); if ( type == FileType.MapLegacyMUL ) { // Write this chunk on the right position (no IDX file to point to it) writer.Seek( chunkID * 0xC4000, SeekOrigin.Begin ); writer.Write( chunkData ); } else { int dataOffset = 0; #region Idx writerIdx.Seek( chunkID * 12, SeekOrigin.Begin ); writerIdx.Write( (int)writer.BaseStream.Position ); // Position switch ( type ) { case FileType.GumpartLegacyMUL: { // Width and height are prepended to the data int width = ( chunkData[0] | ( chunkData[1] << 8 ) | ( chunkData[2] << 16 ) | ( chunkData[3] << 24 ) ); int height = ( chunkData[4] | ( chunkData[5] << 8 ) | ( chunkData[6] << 16 ) | ( chunkData[7] << 24 ) ); writerIdx.Write( offsets[i].m_Size - 8 ); writerIdx.Write( ( width << 16 ) | height ); dataOffset = 8; break; } case FileType.SoundLegacyMUL: { // Extra contains the ID of this sound file + 1 writerIdx.Write( offsets[i].m_Size ); writerIdx.Write( chunkID + 1 ); break; } default: { writerIdx.Write( offsets[i].m_Size ); // Size writerIdx.Write( (int)0 ); // Extra break; } } used[chunkID] = true; #endregion writer.Write( chunkData, dataOffset, chunkData.Length - dataOffset ); } } // Move to next table if ( nextTable != 0 ) stream.Seek( nextTable, SeekOrigin.Begin ); } while ( nextTable != 0 ); // Fix idx // TODO: Only go until the last used entry? Does the client mind? if ( writerIdx != null ) { for ( int i = 0; i < used.Length; ++i ) { if ( !used[i] ) { writerIdx.Seek( i * 12, SeekOrigin.Begin ); writerIdx.Write( -1 ); writerIdx.Write( (long)0 ); } } } } }
private static void PopulateTable(Level level, TableEntry[][] table) { int row = 0; InnerPopulateTable(level, table, ref row, 0); }
private TableEntry GetTableEntry(TableEntry[] arTable, uint uiHash, Stream File, int iPos) { int iIndex = (int)(uiHash % arTable.Length); TableEntry Result = arTable[iIndex]; if (Result == null) { Result = new TableEntry(); Result.File = File; Result.Offset = iPos; arTable[iIndex] = Result; } return Result; }