public void Method1_Implement_Queue_Using_Stack_EnQueueIsCostly(int[] arr) { /* * Complexity Analysis: * Time Complexity: * Push operation: O(1). * Same as pop operation in stack. * Pop operation: O(N). * In the worst case we have empty whole of stack 1 into stack 2 * Auxiliary Space: O(N). * Use of stack for storing values. */ var Queue = new StackQueue <int>(); foreach (var item in arr) { Queue.EnQueue1(item); } for (int i = 0; i < arr.Length; i++) { var data = Queue.DeQueue1(); Assert.Equal(arr[i], data); output.WriteLine(data.ToString()); } }
public void Method2_Implement_Queue_Using_Stack_DeQueueIsCostlyAtFirstTime(int[] arr) { /* MUCH MORE FASTER! * Complexity Analysis: * Time Complexity: * Push operation : O(1). * Same as pop operation in stack. * Pop operation : O(N). * The difference from above method1 is that in this method element is returned and all elements are restored back in a single call. * Auxiliary Space: O(N). * Use of stack for storing values. */ var Queue = new StackQueue <int>(); foreach (var item in arr) { Queue.EnQueue2(item); } for (int i = 0; i < arr.Length; i++) { var data = Queue.DeQueue2(); Assert.Equal(arr[i], data); output.WriteLine(data.ToString()); } }
public void Add(ModuleDefinition module) { StackQueue <TypeDefinition> type_definitions = new StackQueue <TypeDefinition>(); StackQueue <TypeDefinition> type_definitions_closure = new StackQueue <TypeDefinition>(); foreach (TypeDefinition td in module.Types) { type_definitions.Push(td); } while (type_definitions.Count > 0) { TypeDefinition ty = type_definitions.Pop(); type_definitions_closure.Push(ty); foreach (TypeDefinition ntd in ty.NestedTypes) { type_definitions.Push(ntd); } } foreach (TypeDefinition td in type_definitions_closure) { foreach (MethodDefinition definition in td.Methods) { Add(definition); } } }
public static System.Collections.Generic.IEnumerable <T> Sort <T, E> (IGraph <T, E> graph, IEnumerable <T> source) where E : IEdge <T> { Dictionary <T, bool> Visited = new Dictionary <T, bool>(); StackQueue <T> Stack = new StackQueue <T>(); foreach (T v in graph.Vertices) { Visited[v] = false; } foreach (T v in source) { Stack.Push(v); } while (Stack.Count != 0) { T u = Stack.Pop(); Visited[u] = true; yield return(u); foreach (T v in graph.ReversePredecessors(u)) { if (!Visited[v] && !Stack.Contains(v)) { Stack.Push(v); } } } }
public IEnumerator <Mono.Cecil.MethodDefinition> GetEnumerator() { StackQueue <Mono.Cecil.TypeDefinition> type_definitions = new StackQueue <Mono.Cecil.TypeDefinition>(); StackQueue <Mono.Cecil.TypeDefinition> type_definitions_closure = new StackQueue <Mono.Cecil.TypeDefinition>(); foreach (Mono.Cecil.TypeDefinition td in _module.Types) { type_definitions.Push(td); } while (type_definitions.Count > 0) { Mono.Cecil.TypeDefinition td = type_definitions.Pop(); type_definitions_closure.Push(td); foreach (Mono.Cecil.TypeDefinition ntd in td.NestedTypes) { type_definitions.Push(ntd); } } foreach (Mono.Cecil.TypeDefinition type in type_definitions_closure) { foreach (Mono.Cecil.MethodDefinition method in type.Methods) { yield return(method); } } }
public void StackQueueDequeuTest() { StackQueue sq = new StackQueue(); sq.Enqueue(new StackQueueElement() { ElementValue = 1 }); sq.Enqueue(new StackQueueElement() { ElementValue = 2 }); sq.Enqueue(new StackQueueElement() { ElementValue = 3 }); StackQueueElement ele = sq.Dequeue(); Assert.AreEqual(1, ele.ElementValue); StackQueueElement ele2 = sq.Dequeue(); Assert.AreEqual(2, ele2.ElementValue); }
static void Main(string[] args) { var operatingUnitName = "陳俊欽-紅不讓手機配件-鳳山店"; var index = operatingUnitName.IndexOf("-") + 1; var storeName = operatingUnitName.Substring(index, operatingUnitName.Length - index); var v1 = 0 / 2; var v2 = 1 / 2; var v3 = 2 / 2; var aa = new List <string>() { "a", "b", "c", "d", "e", "f", "g" }; var a = aa.Select((item, inx) => new { item, inx }) .GroupBy(x => x.inx / 2); var myStackQueue = new StackQueue <int>(); //T is now int myStackQueue.Enqueue(1); myStackQueue.Push(2); myStackQueue.Push(3); myStackQueue.Enqueue(4); //At this point, the collection is { 3, 2, 1, 4 } foreach (var item in myStackQueue) { Console.WriteLine(item); } }
public void Stack_And_Queue() { var sq = new StackQueue <int>(); for (var i = 0; i < 5; i++) { if (i % 2 == 0) { sq.Push(i); } else { sq.Enqueue(i); } } // 4 (push) // 3 (enqueue) // 2 (push) // 1 (enqueue) // 0 (push) Assert.AreEqual(4, sq.Pop()); Assert.AreEqual(0, sq.Dequeue()); Assert.AreEqual(3, sq.Pop()); Assert.AreEqual(1, sq.Dequeue()); Assert.AreEqual(2, sq.Pop()); }
public STATE(Dictionary <CFG.Vertex, bool> visited, Dictionary <CFG.Vertex, STATE <T> > states_in, Dictionary <CFG.Vertex, STATE <T> > states_out, CFG.Vertex bb, Action <STATE <T>, Dictionary <CFG.Vertex, STATE <T> >, Dictionary <CFG.Vertex, STATE <T> >, CFG.Vertex> custom_initializer ) { // Set up a state that is a copy of another state. _stack = new StackQueue <T>(); int in_level = -1; int args = bb.StackNumberOfArguments; bool scalar_ret = bb.HasScalarReturnValue; bool struct_ret = bb.HasStructReturnValue; bool has_this = bb.HasThis; int locals = bb.StackNumberOfLocals; // Use predecessor information to get initial stack size. if (bb.IsEntry) { in_level = bb.StackNumberOfLocals + bb.StackNumberOfArguments; } else { foreach (CFG.Vertex pred in bb._graph.PredecessorNodes(bb)) { // Do not consider interprocedural edges when computing stack size. if (pred._original_method_reference != bb._original_method_reference) { throw new Exception("Interprocedural edge should not exist."); } // If predecessor has not been visited, warn and do not consider. // Warn if predecessor does not concur with another predecessor. if (in_level != -1 && states_out.ContainsKey(pred) && states_out[pred]._stack.Count != in_level) { System.Console.Error.WriteLine("Inconsistent stack size on inputs to basic block " + bb); foreach (CFG.Vertex error_pred in bb._graph.PredecessorNodes(bb)) { System.Console.Error.WriteLine("Predecessor " + error_pred + " has stack size on exit of " + states_out[error_pred]._stack.Count); } throw new Exception("Miscalculation in stack size " + "for basic block " + bb); } if (states_out.ContainsKey(pred)) { in_level = states_out[pred]._stack.Count; } } } if (in_level == -1) { throw new Exception("Predecessor edge computation screwed up."); } int level = in_level; custom_initializer(this, states_in, states_out, bb); }
public static Mono.Cecil.MethodDefinition ConvertToMonoCecilMethodDefinition(System.Reflection.MethodBase mi) { // Get assembly name which encloses code for kernel. String kernel_assembly_file_name = mi.DeclaringType.Assembly.Location; // Get directory containing the assembly. String full_path = Path.GetFullPath(kernel_assembly_file_name); full_path = Path.GetDirectoryName(full_path); String kernel_full_name = null; // Get full name of kernel, including normalization because they cannot be compared directly with Mono.Cecil names. if (mi as System.Reflection.MethodInfo != null) { System.Reflection.MethodInfo mik = mi as System.Reflection.MethodInfo; kernel_full_name = string.Format("{0} {1}.{2}({3})", mik.ReturnType.FullName, Campy.Utils.Utility.RemoveGenericParameters(mi.ReflectedType), mi.Name, string.Join(",", mi.GetParameters().Select(o => string.Format("{0}", o.ParameterType)).ToArray())); } else { kernel_full_name = string.Format("{0}.{1}({2})", Campy.Utils.Utility.RemoveGenericParameters(mi.ReflectedType), mi.Name, string.Join(",", mi.GetParameters().Select(o => string.Format("{0}", o.ParameterType)).ToArray())); } kernel_full_name = Campy.Utils.Utility.NormalizeSystemReflectionName(kernel_full_name); // Decompile entire module. Mono.Cecil.ModuleDefinition md = Mono.Cecil.ModuleDefinition.ReadModule(kernel_assembly_file_name); // Examine all types, and all methods of types in order to find the lambda in Mono.Cecil. List <Type> types = new List <Type>(); StackQueue <Mono.Cecil.TypeDefinition> type_definitions = new StackQueue <Mono.Cecil.TypeDefinition>(); StackQueue <Mono.Cecil.TypeDefinition> type_definitions_closure = new StackQueue <Mono.Cecil.TypeDefinition>(); foreach (Mono.Cecil.TypeDefinition td in md.Types) { type_definitions.Push(td); } while (type_definitions.Count > 0) { Mono.Cecil.TypeDefinition ty = type_definitions.Pop(); type_definitions_closure.Push(ty); foreach (Mono.Cecil.TypeDefinition ntd in ty.NestedTypes) { type_definitions.Push(ntd); } } foreach (Mono.Cecil.TypeDefinition td in type_definitions_closure) { foreach (Mono.Cecil.MethodDefinition md2 in td.Methods) { String md2_name = Campy.Utils.Utility.NormalizeMonoCecilName(md2.FullName); if (md2_name.Contains(kernel_full_name)) { return(md2); } } } return(null); }
public ListSection <T> _locals; // Pointer to _stack, if there are local variables to the method. public STATE() { _stack = new StackQueue <T>(); _this = null; _arguments = null; _locals = null; _struct_ret = null; }
private IMPORTER() { Cfg = new CFG(); _methods_to_do = new StackQueue <MethodReference>(); _methods_done = new List <string>(); _methods_avoid.Add("System.Void System.ThrowHelper::ThrowArgumentOutOfRangeException()"); _methods_avoid.Add("System.Void System.ThrowHelper::ThrowArgumentOutOfRangeException()"); _methods_avoid.Add("System.Void System.ArgumentOutOfRangeException::.ctor(System.String, System.String)"); }
public STATE(STATE <T> other) { _stack = new StackQueue <T>(); for (int i = 0; i < other._stack.Count; ++i) { _stack.Push(other._stack.PeekBottom(i)); } _struct_ret = _stack.Section(other._struct_ret.Base, other._struct_ret.Len); _this = _stack.Section(other._this.Base, other._this.Len); _arguments = _stack.Section(other._arguments.Base, other._arguments.Len); _locals = _stack.Section(other._locals.Base, other._locals.Len); }
public static void testStackQueue() { StackQueue queue = new StackQueue(); queue.enqueue(1); queue.enqueue(2); queue.enqueue(3); queue.enqueue(4); queue.enqueue(5); var peek = queue.peek(); Console.WriteLine(peek); Console.WriteLine(queue.ToString()); }
static void Main(string[] args) { var queryCnt = Convert.ToInt32(Console.ReadLine()); var stackQueue = new StackQueue(); var queryList = new List <string>(); for (int i = 0; i < queryCnt; i++) { var fullQuery = Console.ReadLine(); queryList.Add(fullQuery); } stackQueue.ProcessQuery(queryList); Console.ReadKey(); }
public void Queue() { var sq = new StackQueue <int>(); for (var i = 0; i < 3; i++) { sq.Enqueue(i); } var expected = 0; while (sq.Count > 0) { var next = sq.Dequeue(); Assert.AreEqual(expected, next); expected++; } }
public void Stack() { var sq = new StackQueue <int>(); for (var i = 0; i < 3; i++) { sq.Push(i); } var expected = 2; while (sq.Count > 0) { var next = sq.Pop(); Assert.AreEqual(expected, next); expected--; } }
public void Q3_6() { var stack = new Stack <int>(); stack.Push(3); stack.Push(4); stack.Push(2); stack.Push(5); stack.Push(1); var sortedStack = StackQueue.Q6_Sort(stack); Assert.AreEqual(5, sortedStack.Pop()); Assert.AreEqual(4, sortedStack.Pop()); Assert.AreEqual(3, sortedStack.Pop()); Assert.AreEqual(2, sortedStack.Pop()); Assert.AreEqual(1, sortedStack.Pop()); }
//SOL private static void solve(int[] arr, InOut.Ergebnis erg) { StackQueue <int> q = new StackQueue <int>(); string s = ""; foreach (int i in arr) { if (i != -1) { q.Enqueue(i); } else { s += " " + q.Dequeue(); } } erg.Setze(s.Trim(' ')); }
public static Mono.Cecil.TypeDefinition ConvertToMonoCecilTypeDefinition(Type ty) { // Get assembly name which encloses code for kernel. String kernel_assembly_file_name = ty.Assembly.Location; // Get directory containing the assembly. String full_path = Path.GetFullPath(kernel_assembly_file_name); full_path = Path.GetDirectoryName(full_path); // Decompile entire module. Mono.Cecil.ModuleDefinition md = Mono.Cecil.ModuleDefinition.ReadModule(kernel_assembly_file_name); // Examine all types, and all methods of types in order to find the lambda in Mono.Cecil. List <Type> types = new List <Type>(); StackQueue <Mono.Cecil.TypeDefinition> type_definitions = new StackQueue <Mono.Cecil.TypeDefinition>(); StackQueue <Mono.Cecil.TypeDefinition> type_definitions_closure = new StackQueue <Mono.Cecil.TypeDefinition>(); foreach (Mono.Cecil.TypeDefinition td in md.Types) { type_definitions.Push(td); } while (type_definitions.Count > 0) { Mono.Cecil.TypeDefinition td = type_definitions.Pop(); if (Campy.Utils.Utility.IsSimilarType(ty, td)) { return(td); } type_definitions_closure.Push(td); foreach (Mono.Cecil.TypeDefinition ntd in td.NestedTypes) { type_definitions.Push(ntd); } } foreach (Mono.Cecil.TypeDefinition td in type_definitions_closure) { if (Campy.Utils.Utility.IsSimilarType(ty, td)) { return(td); } } return(null); }
public void StackQueueAddedItemCountTest() { StackQueue sq = new StackQueue(); sq.Enqueue(new StackQueueElement() { ElementValue = 1 }); sq.Enqueue(new StackQueueElement() { ElementValue = 2 }); sq.Enqueue(new StackQueueElement() { ElementValue = 3 }); Assert.AreEqual(3, sq.Count()); }
private static StackQueue <TItem> OneStep(StackQueue <TItem> q) { if (q._isRecopying && !q.H.IsEmpty() && !q.T.IsEmpty()) { q._nowCopying++; q.HH.Push(q.T.Pop()); q.Hr.Push(q.H.Pop()); } else if (q._isRecopying && q.H.IsEmpty() && !q.T.IsEmpty()) { q._isRecopying = true; q.HH.Push(q.T.Pop()); } else if (q._isRecopying && q.H.IsEmpty() && q.T.IsEmpty() && q._nowCopying > 1) { q._isRecopying = true; q._nowCopying--; q.HH.Push(q.Hr.Pop()); } else if (q._isRecopying && q.H.IsEmpty() && q.T.IsEmpty() && q._nowCopying == 1) { q._isRecopying = false; q._nowCopying--; q.HH.Push(q.Hr.Pop()); q.H = q.HH; q.T = q.TT; q.HH = new Stack <TItem>(); q.TT = new Stack <TItem>(); q.Hr = new Stack <TItem>(); q.h = new Stack <TItem>(); } else if (q._isRecopying && q.H.IsEmpty() && q.T.IsEmpty() && q._nowCopying == 0) { q._isRecopying = false; q.H = q.HH; q.T = q.TT; q.HH = new Stack <TItem>(); q.TT = new Stack <TItem>(); q.Hr = new Stack <TItem>(); q.h = new Stack <TItem>(); } return(q); }
public static Type ConvertToSystemReflectionType(Mono.Cecil.TypeDefinition td) { // Find equivalent to type definition in Mono to System Reflection type. // get module. String assembly_location = td.Module.FullyQualifiedName; System.Reflection.Assembly assembly = System.Reflection.Assembly.LoadFile(assembly_location); List <Type> types = new List <Type>(); StackQueue <Type> type_definitions = new StackQueue <Type>(); StackQueue <Type> type_definitions_closure = new StackQueue <Type>(); foreach (Type t in assembly.GetTypes()) { type_definitions.Push(t); } while (type_definitions.Count > 0) { Type t = type_definitions.Pop(); if (Campy.Utils.Utility.IsSimilarType(t, td)) { return(t); } type_definitions_closure.Push(t); foreach (Type ntd in t.GetNestedTypes()) { type_definitions.Push(ntd); } } foreach (Type t in type_definitions_closure) { if (Campy.Utils.Utility.IsSimilarType(t, td)) { return(t); } } return(null); }
private static void PerformQueueOps() { // Queue using array var arrayQueue = new ArrayQueue <int>(5); arrayQueue.Enqueue(10); arrayQueue.Enqueue(20); arrayQueue.Enqueue(30); Console.WriteLine(arrayQueue.Dequeue()); Console.WriteLine(arrayQueue.Dequeue()); arrayQueue.Enqueue(40); arrayQueue.Enqueue(50); arrayQueue.Enqueue(60); arrayQueue.Enqueue(70); // Queue using stack Console.WriteLine(arrayQueue.ToString()); var stackQueue = new StackQueue <int>(); stackQueue.Enqueue(10); stackQueue.Enqueue(20); stackQueue.Enqueue(30); Console.WriteLine(stackQueue.Dequeue()); Console.WriteLine(stackQueue.Dequeue()); Console.ReadLine(); //Priority Queue var priorityQueue = new PriorityQueue(5); priorityQueue.Enqueue(10); priorityQueue.Enqueue(8); priorityQueue.Enqueue(9); priorityQueue.Enqueue(7); Console.WriteLine(priorityQueue.ToString()); Console.ReadLine(); }
public void VisitNodes(Func <T, bool> func) { foreach (T v in graph.Vertices) { Visited[v] = false; } // Initialize all workers with // empty stack. Stack = new StackQueue <Tuple <T, StackQueue <T> > > [NumberOfWorkers]; for (int i = 0; i < NumberOfWorkers; ++i) { Stack[i] = new StackQueue <Tuple <T, StackQueue <T> > >( new Tuple <T, StackQueue <T> >(default(T), new StackQueue <T>())); } // Initialize first worker with stack containing all sources. foreach (T v in Source) { Stack[0].PeekTop().Item2.Push(v); } // Spawn workers. Parallel.For(0, NumberOfWorkers, (int index) => { bool terminate = false; while (!terminate) { T u = default(T); GetWork(index); while (Stack[index].Count >= 1 && Stack[index].PeekTop().Item2.Count > 0) { // There is stuff in the to do list. Pop it and perform dfs // expansion of the vertex. // Safe: No other threads will grab nodes within the cutoff, // and no other threads can change this stack size. StackQueue <T> todo = Stack[index].PeekTop().Item2; u = todo.Pop(); Visited[u] = true; // visit. // yield return u; //System.Console.WriteLine("visit " + u); bool term = func(u); if (term) { Terminate = true; break; } // Push successors. StackQueue <T> items = new StackQueue <T>(); foreach (T v in graph.ReverseSuccessors(u)) { if (!Visited[v] && !SpecialContains(index, v)) { items.Push(v); } } if (items.Count != 0) { // Add new backtrack and to do list. Stack[index].Push( new Tuple <T, StackQueue <T> >(u, items)); } // Synchronize threads on stack. GetWork(index); } // Check for termination. terminate = TerminateTest(); } }); }
// Copy work from another thread to thread "index_successors". void GetWork(int index) { lock (Stack[index]) { // Clean up. while (Stack[index].Count > 1 && Stack[index].PeekTop().Item2.Count == 0) { Stack[index].Pop(); } // Check if there is work. if (!(Stack[index].Count == 1 && Stack[index].PeekTop().Item2.Count == 0)) { return; } } bool done = false; int from = 0; for (int j = 0; j < NUMRETRY - 1; ++j) { from = (from + 1) % NumberOfWorkers; lock (Stack[from]) { if (Stack[from].Count > CutOff) { // Check if there actually is work in Stack[from]. // There may be a stack full of empty "todo" work lists, in which case the // thread that owns the stack hasn't yet cleaned up. int count = 0; for (int i = 0; i < Stack[from].Count - CutOff; ++i) { count += Stack[from].PeekBottom(i).Item2.Count; } if (count <= 1) { continue; } count = count / 2; if (count < 1) { continue; } //System.Console.WriteLine("Stealing " + count + " work items from " + from + " to " + index_successors); // Work is available at the stack of threads. // Grab "count" nodes to work on. // Copy stack "from" to "index_successors", // and then divide the two stacks into disjoint sets of // vertices. StackQueue <Tuple <T, StackQueue <T> > > new_stack = new StackQueue <Tuple <T, StackQueue <T> > >(); for (int i = 0; i < Stack[from].Count - CutOff && count > 0; ++i) { Tuple <T, StackQueue <T> > tf = Stack[from].PeekBottom(i); T tb = tf.Item1; StackQueue <T> s = tf.Item2; // Make partitions. StackQueue <T> work = new StackQueue <T>(); for ( ; count > 0 && s.Count != 0; --count) { T v = s.DequeueBottom(); // Side effect removing work. work.Push(v); } Tuple <T, StackQueue <T> > tt = new Tuple <T, StackQueue <T> >(tb, work); new_stack.Push(tt); } // assign new stack. Stack[index] = new_stack; // Dump stacks. //Dump(); done = true; } } if (done) { return; } } }