/// <exception cref="VariantException"></exception> /// <exception cref="TjsException"></exception> public virtual void Assign(Dispatch2 dsp) { // copy members from "dsp" to "Owner" // determin dsp's object type //Holder<ArrayNI> arrayni = new Holder<ArrayNI>(null); ArrayNI array = (ArrayNI)dsp.GetNativeInstance(ArrayClass.ClassID); if (array != null) { // copy from array mItems.Clear(); int count = array.mItems.Count; for (int i = 0; i < count; i++) { mItems.AddItem(new Variant(array.mItems[i])); } } else { //mItems.addAll( array.mItems ); // convert from dictionary or others mItems.Clear(); ArrayNI.DictionaryEnumCallback callback = new ArrayNI.DictionaryEnumCallback(mItems ); dsp.EnumMembers(Interface.IGNOREPROP, callback, dsp); } }
public virtual void Clear() { lock (pendingReplications) { pendingReplications.Clear(); timedOutItems.Clear(); } }
/// <exception cref="System.NotSupportedException"></exception> /// <exception cref="NGit.Errors.TransportException"></exception> internal virtual void Execute(ProgressMonitor monitor, FetchResult result) { askFor.Clear(); localUpdates.Clear(); fetchHeadUpdates.Clear(); packLocks.Clear(); try { ExecuteImp(monitor, result); } finally { try { foreach (PackLock Lock in packLocks) { Lock.Unlock(); } } catch (IOException e) { throw new TransportException(e.Message, e); } } }
public virtual void TestDeletingBlocks() { MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).Build (); try { cluster.WaitActive(); DataNode dn = cluster.GetDataNodes()[0]; FsDatasetImpl ds = (FsDatasetImpl)DataNodeTestUtils.GetFSDataset(dn); FsVolumeImpl vol = ds.GetVolumes()[0]; ExtendedBlock eb; ReplicaInfo info; IList <Block> blockList = new AList <Block>(); for (int i = 1; i <= 63; i++) { eb = new ExtendedBlock(Blockpool, i, 1, 1000 + i); info = new FinalizedReplica(eb.GetLocalBlock(), vol, vol.GetCurrentDir().GetParentFile ()); ds.volumeMap.Add(Blockpool, info); info.GetBlockFile().CreateNewFile(); info.GetMetaFile().CreateNewFile(); blockList.AddItem(info); } ds.Invalidate(Blockpool, Sharpen.Collections.ToArray(blockList, new Block[0])); try { Sharpen.Thread.Sleep(1000); } catch (Exception) { } // Nothing to do NUnit.Framework.Assert.IsTrue(ds.IsDeletingBlock(Blockpool, blockList[0].GetBlockId ())); blockList.Clear(); eb = new ExtendedBlock(Blockpool, 64, 1, 1064); info = new FinalizedReplica(eb.GetLocalBlock(), vol, vol.GetCurrentDir().GetParentFile ()); ds.volumeMap.Add(Blockpool, info); info.GetBlockFile().CreateNewFile(); info.GetMetaFile().CreateNewFile(); blockList.AddItem(info); ds.Invalidate(Blockpool, Sharpen.Collections.ToArray(blockList, new Block[0])); try { Sharpen.Thread.Sleep(1000); } catch (Exception) { } // Nothing to do NUnit.Framework.Assert.IsFalse(ds.IsDeletingBlock(Blockpool, blockList[0].GetBlockId ())); } finally { cluster.Shutdown(); } }
/// <exception cref="System.IO.IOException"/> protected internal override void ParseExecResult(BufferedReader lines) { output.Clear(); string line = lines.ReadLine(); while (line != null) { output.AddItem(line); line = lines.ReadLine(); } }
/// <summary>Scan head, index and merge tree.</summary> /// <remarks> /// Scan head, index and merge tree. Used during normal checkout or merge /// operations. /// </remarks> /// <exception cref="NGit.Errors.CorruptObjectException">NGit.Errors.CorruptObjectException /// </exception> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual void PreScanTwoTrees() { removed.Clear(); updated.Clear(); conflicts.Clear(); walk = new NameConflictTreeWalk(repo); builder = dc.Builder(); AddTree(walk, headCommitTree); AddTree(walk, mergeCommitTree); walk.AddTree(new DirCacheBuildIterator(builder)); walk.AddTree(workingTree); while (walk.Next()) { ProcessEntry(walk.GetTree <CanonicalTreeParser>(0), walk.GetTree <CanonicalTreeParser >(1), walk.GetTree <DirCacheBuildIterator>(2), walk.GetTree <WorkingTreeIterator>( 3)); if (walk.IsSubtree) { walk.EnterSubtree(); } } }
/// <summary>Update this remote's definition within the configuration.</summary> /// <remarks>Update this remote's definition within the configuration.</remarks> /// <param name="rc">the configuration file to store ourselves into.</param> public virtual void Update(Config rc) { IList <string> vlst = new AList <string>(); vlst.Clear(); foreach (URIish u in URIs) { vlst.AddItem(u.ToPrivateString()); } rc.SetStringList(SECTION, Name, KEY_URL, vlst); vlst.Clear(); foreach (URIish u_1 in PushURIs) { vlst.AddItem(u_1.ToPrivateString()); } rc.SetStringList(SECTION, Name, KEY_PUSHURL, vlst); vlst.Clear(); foreach (RefSpec u_2 in FetchRefSpecs) { vlst.AddItem(u_2.ToString()); } rc.SetStringList(SECTION, Name, KEY_FETCH, vlst); vlst.Clear(); foreach (RefSpec u_3 in PushRefSpecs) { vlst.AddItem(u_3.ToString()); } rc.SetStringList(SECTION, Name, KEY_PUSH, vlst); Set(rc, KEY_UPLOADPACK, UploadPack, DEFAULT_UPLOAD_PACK); Set(rc, KEY_RECEIVEPACK, ReceivePack, DEFAULT_RECEIVE_PACK); Set(rc, KEY_TAGOPT, TagOpt.Option(), NGit.Transport.TagOpt.AUTO_FOLLOW.Option()); Set(rc, KEY_MIRROR, mirror, DEFAULT_MIRROR); Set(rc, KEY_TIMEOUT, timeout, 0); if (!oldName.Equals(name)) { rc.UnsetSection(SECTION, oldName); oldName = name; } }
public virtual void SetUp() { float maxF = LightWeightLinkedSet.DefaultMaxLoadFactor; float minF = LightWeightLinkedSet.DefautMinLoadFactor; int initCapacity = LightWeightLinkedSet.MinimumCapacity; rand = new Random(Time.Now()); list.Clear(); for (int i = 0; i < Num; i++) { list.AddItem(rand.Next()); } set = new LightWeightLinkedSet <int>(initCapacity, maxF, minF); }
public void TestPrependAppend() { List <int> list = new List <int>(); AList <int> alist = this.NewList(); List <int>[] lists = new List <int> [8]; AList <int>[] alists = new AList <int>[] { this.NewList(0, out lists[0]), this.NewList(1, out lists[1]), this.NewList(5, out lists[2]), this.NewList(11, out lists[3]), this.NewList(20, out lists[4]), this.NewList(32, out lists[5]), this.NewList(53, out lists[6]), this.NewList(100, out lists[7]), }; Assert.AreEqual(alists.Length, lists.Length); // So, let's just do a random series of Append and Prepend operations, // clearing the list occasionally so that both list sizes vary a lot, // which will cause the code paths to vary (important because there // are several different ways these operations can be done). for (int trial = 0; trial < 20; trial++) { if (trial % 4 == 0) { alist.Clear(); list.Clear(); } int whirl = this._r.Next(alists.Length); AList <int> other = alists[whirl]; bool append = this._r.Next(2) == 0; if (append) { alist.Append(other); list.AddRange(lists[whirl]); } else { alist.Prepend(other); list.InsertRange(0, lists[whirl]); } Assert.That(other.ImmutableCount == other.Count || other.Count <= this._maxLeafSize); Assert.That(alist.ImmutableCount >= other.ImmutableCount || alist.Count - other.Count <= this._maxLeafSize); } }
public void Clear() { if (mNodes != null) { int count = mNodes.Count; for (int i = 0; i < count; i++) { ExprNode node = mNodes[i]; node?.Clear(); } mNodes.Clear(); mNodes = null; } if (mVal != null) { mVal.Clear(); mVal = null; } }
/// <summary> /// Copy a delimited text file into a new (or optionally exixsting) table in /// this database. /// </summary> /// <remarks> /// Copy a delimited text file into a new (or optionally exixsting) table in /// this database. /// </remarks> /// <param name="name">Name of the new table to create</param> /// <param name="in">Source reader to import</param> /// <param name="delim">Regular expression representing the delimiter string.</param> /// <param name="quote">the quote character</param> /// <param name="filter">valid import filter</param> /// <param name="useExistingTable"> /// if /// <code>true</code> /// use current table if it already /// exists, otherwise, create new table with unique /// name /// </param> /// <returns>the name of the imported table</returns> /// <exception cref="System.IO.IOException"></exception> public static string ImportReader(BufferedReader @in, Database db, string name, string delim, char quote, ImportFilter filter, bool useExistingTable) { string line = @in.ReadLine(); if (line == null || line.Trim().Length == 0) { return(null); } Sharpen.Pattern delimPat = Sharpen.Pattern.Compile(delim); name = Database.EscapeIdentifier(name); Table table = null; if (!useExistingTable || ((table = db.GetTable(name)) == null)) { IList <Column> columns = new List <Column>(); string[] columnNames = SplitLine(line, delimPat, quote, @in, 0); for (int i = 0; i < columnNames.Length; i++) { columns.AddItem(new ColumnBuilder(columnNames[i], DataType.TEXT).EscapeName().SetLength (DataTypeProperties.TEXT.maxSize.Value).ToColumn()); } table = CreateUniqueTable(db, name, columns, null, filter); } IList <object[]> rows = new AList <object[]>(COPY_TABLE_BATCH_SIZE); int numColumns = table.GetColumnCount(); while ((line = @in.ReadLine()) != null) { object[] data = SplitLine(line, delimPat, quote, @in, numColumns); rows.AddItem(filter.FilterRow(data)); if (rows.Count == COPY_TABLE_BATCH_SIZE) { table.AddRows(rows); rows.Clear(); } } if (rows.Count > 0) { table.AddRows(rows); } return(table.GetName()); }
public static void FinalizeApplication() { mGlobalStringMap = null; mNativeClassNames?.Clear(); mNativeClassNames = null; mConsoleOutput = null; mMessageMapper = null; mStorage = null; mArrayClass = null; mDictionaryClass = null; mVAPool = null; NULL_ARG = null; ArrayObject.FinalizeApplication(); TjsByteCodeLoader.FinalizeApplication(); CustomObject.FinalizeApplication(); DictionaryObject.FinalizeApplication(); MathClass.FinalizeApplication(); Variant.FinalizeApplication(); LexicalAnalyzer.FinalizeApplication(); }
// temporary //static private final int MIN_VARIANT_DATA_COUNT = 400*2; public virtual void Create(int count) { if (count < MIN_COUNT) { count = MIN_COUNT; } if (mWork == null) { mWork = new AList <VariantRepalace>(); } mWork.Clear(); if (mObjs == null || mObjs.Length < count) { mObjs = new InterCodeObject[count]; mParent = new int[count]; mPropSetter = new int[count]; mPropGetter = new int[count]; mSuperClassGetter = new int[count]; mProperties = new int[count][]; } }
/// <summary> /// Initializes the lists. /// </summary> private void InitLists() { if (AlphaList == null) { AlphaList = new List <int>(); } else { AlphaList.Clear(); } if (BetaList == null) { BetaList = new List <int>(); } else { BetaList.Clear(); } if (AList == null) { AList = new List <int>(); } else { AList.Clear(); } if (BList == null) { BList = new List <int>(); } else { BList.Clear(); } }
public virtual void TestChooseTarget2() { DatanodeStorageInfo[] targets; BlockPlacementPolicyDefault repl = (BlockPlacementPolicyDefault)replicator; IList <DatanodeStorageInfo> chosenNodes = new AList <DatanodeStorageInfo>(); ICollection <Node> excludedNodes = new HashSet <Node>(); excludedNodes.AddItem(dataNodes[1]); targets = repl.ChooseTarget(filename, 4, dataNodes[0], chosenNodes, false, excludedNodes , BlockSize, TestBlockStoragePolicy.DefaultStoragePolicy); NUnit.Framework.Assert.AreEqual(targets.Length, 4); NUnit.Framework.Assert.AreEqual(storages[0], targets[0]); NUnit.Framework.Assert.IsTrue(cluster.IsNodeGroupAware()); // Make sure no replicas are on the same nodegroup for (int i = 1; i < 4; i++) { NUnit.Framework.Assert.IsFalse(IsOnSameNodeGroup(targets[0], targets[i])); } NUnit.Framework.Assert.IsTrue(IsOnSameRack(targets[1], targets[2]) || IsOnSameRack (targets[2], targets[3])); NUnit.Framework.Assert.IsFalse(IsOnSameRack(targets[1], targets[3])); excludedNodes.Clear(); chosenNodes.Clear(); excludedNodes.AddItem(dataNodes[1]); chosenNodes.AddItem(storages[2]); targets = repl.ChooseTarget(filename, 1, dataNodes[0], chosenNodes, true, excludedNodes , BlockSize, TestBlockStoragePolicy.DefaultStoragePolicy); System.Console.Out.WriteLine("targets=" + Arrays.AsList(targets)); NUnit.Framework.Assert.AreEqual(2, targets.Length); //make sure that the chosen node is in the target. int i_1 = 0; for (; i_1 < targets.Length && !storages[2].Equals(targets[i_1]); i_1++) { } NUnit.Framework.Assert.IsTrue(i_1 < targets.Length); }
public virtual void TestContainerCleanup() { Logger rootLogger = LogManager.GetRootLogger(); rootLogger.SetLevel(Level.Debug); DrainDispatcher dispatcher = new DrainDispatcher(); MockRM rm = new _MockRM_167(this, dispatcher); rm.Start(); MockNM nm1 = rm.RegisterNode("127.0.0.1:1234", 5000); RMApp app = rm.SubmitApp(2000); //kick the scheduling nm1.NodeHeartbeat(true); RMAppAttempt attempt = app.GetCurrentAppAttempt(); MockAM am = rm.SendAMLaunched(attempt.GetAppAttemptId()); am.RegisterAppAttempt(); //request for containers int request = 2; am.Allocate("127.0.0.1", 1000, request, new AList <ContainerId>()); dispatcher.Await(); //kick the scheduler nm1.NodeHeartbeat(true); IList <Container> conts = am.Allocate(new AList <ResourceRequest>(), new AList <ContainerId >()).GetAllocatedContainers(); int contReceived = conts.Count; int waitCount = 0; while (contReceived < request && waitCount++ < 200) { Log.Info("Got " + contReceived + " containers. Waiting to get " + request); Sharpen.Thread.Sleep(100); conts = am.Allocate(new AList <ResourceRequest>(), new AList <ContainerId>()).GetAllocatedContainers (); dispatcher.Await(); contReceived += conts.Count; nm1.NodeHeartbeat(true); } NUnit.Framework.Assert.AreEqual(request, contReceived); // Release a container. AList <ContainerId> release = new AList <ContainerId>(); release.AddItem(conts[0].GetId()); am.Allocate(new AList <ResourceRequest>(), release); dispatcher.Await(); // Send one more heartbeat with a fake running container. This is to // simulate the situation that can happen if the NM reports that container // is running in the same heartbeat when the RM asks it to clean it up. IDictionary <ApplicationId, IList <ContainerStatus> > containerStatuses = new Dictionary <ApplicationId, IList <ContainerStatus> >(); AList <ContainerStatus> containerStatusList = new AList <ContainerStatus>(); containerStatusList.AddItem(BuilderUtils.NewContainerStatus(conts[0].GetId(), ContainerState .Running, "nothing", 0)); containerStatuses[app.GetApplicationId()] = containerStatusList; NodeHeartbeatResponse resp = nm1.NodeHeartbeat(containerStatuses, true); WaitForContainerCleanup(dispatcher, nm1, resp); // Now to test the case when RM already gave cleanup, and NM suddenly // realizes that the container is running. Log.Info("Testing container launch much after release and " + "NM getting cleanup" ); containerStatuses.Clear(); containerStatusList.Clear(); containerStatusList.AddItem(BuilderUtils.NewContainerStatus(conts[0].GetId(), ContainerState .Running, "nothing", 0)); containerStatuses[app.GetApplicationId()] = containerStatusList; resp = nm1.NodeHeartbeat(containerStatuses, true); // The cleanup list won't be instantaneous as it is given out by scheduler // and not RMNodeImpl. WaitForContainerCleanup(dispatcher, nm1, resp); rm.Stop(); }
private void timer1_Tick(object sender, EventArgs e) { x--; Draw(); if (soldier.life <= 0) { if (!won) { //MessageBox.Show("You lost!"); won = true; } soldier = new Soldier(); soldier.Setup(panel, soldierimg); ant.Clear(); Random rend = new Random(); for (int i = 0; i < 100; i++) { int v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a1 = new Antagonist2(); a1.Setup(this, g, antagonist); ant.Add(a1); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a2 = new Antagonist2(); a2.Setup(this, g, antagonist); ant.Add(a2); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a3 = new Antagonist2(); a3.Setup(this, g, antagonist); ant.Add(a3); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a4 = new Antagonist2(); a4.Setup(this, g, antagonist); ant.Add(a4); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a5 = new Antagonist2(); a5.Setup(this, g, antagonist); ant.Add(a5); v = (rend.Next(10) + 1) * 100 + 200; ant[0 + i * 5].SetLocation(800 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[1 + i * 5].SetLocation(630 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[2 + i * 5].SetLocation(330 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[3 + i * 5].SetLocation(430 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[4 + i * 5].SetLocation(530 + i * 1280, v); } serializer.Serialize(soldier, ant); Play(); } else if (ant.Count == 0) { if (!won) { //MessageBox.Show("You won!"); won = true; } soldier.DrawWon(); //new System.Threading.ManualResetEvent(false).WaitOne(60 * 1000); soldier = new Soldier(); soldier.Setup(panel, soldierimg); ant.Clear(); Random rend = new Random(); for (int i = 0; i < 100; i++) { int v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a1 = new Antagonist2(); a1.Setup(this, g, antagonist); ant.Add(a1); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a2 = new Antagonist2(); a2.Setup(this, g, antagonist); ant.Add(a2); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a3 = new Antagonist2(); a3.Setup(this, g, antagonist); ant.Add(a3); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a4 = new Antagonist2(); a4.Setup(this, g, antagonist); ant.Add(a4); v = rend.Next(5); if (v == 0) { antt = antagonist; } else if (v == 1) { antt = antagonist2; } else if (v == 2) { antt = antagonist3; } else if (v == 3) { antt = antagonist4; } else if (v == 4) { antt = antagonist5; } antagonist = antt; Antagonist2 a5 = new Antagonist2(); a5.Setup(this, g, antagonist); ant.Add(a5); v = (rend.Next(10) + 1) * 100 + 200; ant[0 + i * 5].SetLocation(800 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[1 + i * 5].SetLocation(630 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[2 + i * 5].SetLocation(330 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[3 + i * 5].SetLocation(430 + i * 1280, v); v = (rend.Next(10) + 1) * 100 + 200; ant[4 + i * 5].SetLocation(530 + i * 1280, v); } serializer.Serialize(soldier, ant); Play(); } }
/// <summary>Test the case where we do a mark inside a reset.</summary> /// <remarks> /// Test the case where we do a mark inside a reset. Test for both file /// and memory /// </remarks> /// <param name="key"/> /// <param name="values"/> /// <returns/> /// <exception cref="System.IO.IOException"/> private static int Test2(IntWritable key, MarkableIterator <IntWritable> values) { IntWritable i; int errors = 0; int count = 0; AList <IntWritable> expectedValues = new AList <IntWritable>(); AList <IntWritable> expectedValues1 = new AList <IntWritable>(); Log.Info("Executing TEST:2 for Key:" + key); values.Mark(); Log.Info("TEST:2 Marking"); while (values.HasNext()) { i = values.Next(); Log.Info(key + ":" + i); expectedValues.AddItem(i); if (count == 8) { break; } count++; } values.Reset(); count = 0; Log.Info("TEST:2 reset"); while (values.HasNext()) { i = values.Next(); Log.Info(key + ":" + i); if (count < expectedValues.Count) { if (i != expectedValues[count]) { errors++; Log.Info("TEST:2. Check:1 Expected: " + expectedValues[count] + ", Got: " + i); return(errors); } } // We have moved passed the first mark, but still reading from the // memory cache if (count == 3) { values.Mark(); Log.Info("TEST:2. Marking -- " + key + ":" + i); } if (count >= 3) { expectedValues1.AddItem(i); } count++; } values.Reset(); Log.Info("TEST:2. Reset"); expectedValues.Clear(); count = 0; while (values.HasNext()) { i = values.Next(); Log.Info(key + ":" + i); if (count < expectedValues1.Count) { if (i != expectedValues1[count]) { errors++; Log.Info("TEST:2. Check:2 Expected: " + expectedValues1[count] + ", Got: " + i); return(errors); } } // We have moved passed the previous mark, but now we are in the file // cache if (count == 20) { values.Mark(); Log.Info("TEST:2. Marking -- " + key + ":" + i); } if (count >= 20) { expectedValues.AddItem(i); } count++; } values.Reset(); count = 0; Log.Info("TEST:2. Reset"); while (values.HasNext()) { i = values.Next(); Log.Info(key + ":" + i); if (i != expectedValues[count]) { errors++; Log.Info("TEST:2. Check:1 Expected: " + expectedValues[count] + ", Got: " + i); return(errors); } } Log.Info("TEST:2 Done"); return(errors); }
/// <exception cref="Kirikiri.Tjs2.TJSException"></exception> private void ReadObjects(ScriptBlock block, BinaryReader br) { int totalSize = br.ReadInt32(); string[] strarray = mStringArray; ByteBuffer[] bbarray = mByteBufferArray; double[] dblarray = mDoubleArray; byte[] barray = mByteArray; short[] sarray = mShortArray; int[] iarray = mIntArray; long[] larray = mLongArray; int toplevel = br.ReadInt32(); int objcount = br.ReadInt32(); mObjectsCache.Create(objcount); InterCodeObject[] objs = mObjectsCache.mObjs; AList <VariantRepalace> work = mObjectsCache.mWork; int[] parent = mObjectsCache.mParent; int[] propSetter = mObjectsCache.mPropSetter; int[] propGetter = mObjectsCache.mPropGetter; int[] superClassGetter = mObjectsCache.mSuperClassGetter; int[][] properties = mObjectsCache.mProperties; for (int o = 0; o < objcount; o++) { if (br.ReadChars(4).ToRealString() != FILE_TAG_LE) { throw new TjsException("ByteCode Broken"); } int objsize = br.ReadInt32(); parent[o] = br.ReadInt32(); int name = br.ReadInt32(); int contextType = br.ReadInt32(); int maxVariableCount = br.ReadInt32(); int variableReserveCount = br.ReadInt32(); int maxFrameCount = br.ReadInt32(); int funcDeclArgCount = br.ReadInt32(); int funcDeclUnnamedArgArrayBase = br.ReadInt32(); int funcDeclCollapseBase = br.ReadInt32(); propSetter[o] = br.ReadInt32(); propGetter[o] = br.ReadInt32(); superClassGetter[o] = br.ReadInt32(); int count = br.ReadInt32(); LongBuffer srcpos = null; // codePos/srcPos は今のところ使ってない、ソート济みなので、longにする必要はないが…… // codePos/srcPos currently not used. it's for debug. Please refer to newer krkrz code and fix here later. if (count > 0) { for (int i = 0; i < count; i++) { br.ReadInt64(); } } else { //br.BaseStream.Seek(count << 3, SeekOrigin.Current); //br.ReadInt32(); } count = br.ReadInt32(); short[] code = new short[count]; for (int i = 0; i < count; i++) { code[i] = br.ReadInt16(); } //TranslateCodeAddress( block, code, codeSize ); var padding = 4 - (count * 2) % 4; if (padding > 0 && padding < 4) { br.ReadBytes(padding); } count = br.ReadInt32(); int vcount = count * 2; if (mVariantTypeData == null || mVariantTypeData.Length < vcount) { mVariantTypeData = new short[vcount]; } for (int i = 0; i < vcount; i++) { mVariantTypeData[i] = br.ReadInt16(); } Variant[] vdata = new Variant[count]; int datacount = count; Variant tmp; for (int i = 0; i < datacount; i++) { int pos = i << 1; int type = mVariantTypeData[pos]; int index = mVariantTypeData[pos + 1]; switch (type) { case TYPE_VOID: { vdata[i] = new Variant(); // null break; } case TYPE_OBJECT: { vdata[i] = new Variant(null, null); // null Array Dictionary はまだサポートしていない TODO break; } case TYPE_INTER_OBJECT: { tmp = new Variant(); work.AddItem(new VariantRepalace(tmp, index)); vdata[i] = tmp; break; } case TYPE_INTER_GENERATOR: { tmp = new Variant(); work.AddItem(new VariantRepalace(tmp, index)); vdata[i] = tmp; break; } case TYPE_STRING: { vdata[i] = new Variant(strarray[index]); break; } case TYPE_OCTET: { vdata[i] = new Variant(bbarray[index]); break; } case TYPE_REAL: { vdata[i] = new Variant(dblarray[index]); break; } case TYPE_BYTE: { vdata[i] = new Variant(barray[index]); break; } case TYPE_SHORT: { vdata[i] = new Variant(sarray[index]); break; } case TYPE_INTEGER: { vdata[i] = new Variant(iarray[index]); break; } case TYPE_LONG: { vdata[i] = new Variant(larray[index]); break; } case TYPE_UNKNOWN: default: { vdata[i] = new Variant(); // null; break; break; } } } count = br.ReadInt32(); int[] scgetterps = new int[count]; for (int i = 0; i < count; i++) { scgetterps[i] = br.ReadInt32(); } // properties count = br.ReadInt32(); if (count > 0) { int pcount = count << 1; int[] props = new int[pcount]; for (int i = 0; i < pcount; i++) { props[i] = br.ReadInt32(); } properties[o] = props; } //IntVector superpointer = IntVector.wrap( scgetterps ); InterCodeObject obj = new InterCodeObject(block, mStringArray[name], contextType, code, vdata, maxVariableCount, variableReserveCount, maxFrameCount, funcDeclArgCount , funcDeclUnnamedArgArrayBase, funcDeclCollapseBase, true, srcpos, scgetterps); //objs.add(obj); objs[o] = obj; } Variant val = new Variant(); for (int o = 0; o < objcount; o++) { InterCodeObject parentObj = null; InterCodeObject propSetterObj = null; InterCodeObject propGetterObj = null; InterCodeObject superClassGetterObj = null; if (parent[o] >= 0) { parentObj = objs[parent[o]]; } if (propSetter[o] >= 0) { propSetterObj = objs[propSetter[o]]; } if (propGetter[o] >= 0) { propGetterObj = objs[propGetter[o]]; } if (superClassGetter[o] >= 0) { superClassGetterObj = objs[superClassGetter[o]]; } objs[o] .SetCodeObject(parentObj, propSetterObj, propGetterObj, superClassGetterObj ); if (properties[o] != null) { InterCodeObject obj = parentObj; // objs.get(o).mParent; int[] prop = properties[o]; int length = (int)(((uint)prop.Length) >> 1); for (int i = 0; i < length; i++) { int pos = i << 1; int pname = prop[pos]; int pobj = prop[pos + 1]; val.Set(objs[pobj]); obj.PropSet(Interface.MEMBERENSURE | Interface.IGNOREPROP, mStringArray[pname], val , obj); } properties[o] = null; } } for (int i = 0; i < work.Count; i++) { VariantRepalace w = work[i]; w.Work.Set(objs[w.Index]); } work.Clear(); InterCodeObject top = null; if (toplevel >= 0) { top = objs[toplevel]; } block.SetObjects(top, objs, objcount); }
public override void Clear() { data.Clear(); Reset(); }
/// <summary>Update this remote's definition within the configuration.</summary> /// <remarks>Update this remote's definition within the configuration.</remarks> /// <param name="rc">the configuration file to store ourselves into.</param> public virtual void Update(Config rc) { IList<string> vlst = new AList<string>(); vlst.Clear(); foreach (URIish u in URIs) { vlst.AddItem(u.ToPrivateString()); } rc.SetStringList(SECTION, Name, KEY_URL, vlst); vlst.Clear(); foreach (URIish u_1 in PushURIs) { vlst.AddItem(u_1.ToPrivateString()); } rc.SetStringList(SECTION, Name, KEY_PUSHURL, vlst); vlst.Clear(); foreach (RefSpec u_2 in FetchRefSpecs) { vlst.AddItem(u_2.ToString()); } rc.SetStringList(SECTION, Name, KEY_FETCH, vlst); vlst.Clear(); foreach (RefSpec u_3 in PushRefSpecs) { vlst.AddItem(u_3.ToString()); } rc.SetStringList(SECTION, Name, KEY_PUSH, vlst); Set(rc, KEY_UPLOADPACK, UploadPack, DEFAULT_UPLOAD_PACK); Set(rc, KEY_RECEIVEPACK, ReceivePack, DEFAULT_RECEIVE_PACK); Set(rc, KEY_TAGOPT, TagOpt.Option(), NGit.Transport.TagOpt.AUTO_FOLLOW.Option()); Set(rc, KEY_MIRROR, mirror, DEFAULT_MIRROR); Set(rc, KEY_TIMEOUT, timeout, 0); if (!oldName.Equals(name)) { rc.UnsetSection(SECTION, oldName); oldName = name; } }
public void Reset() { _blocks.Clear(); }
/// <exception cref="System.IO.IOException"/> private RawKeyValueIterator FinalMerge(JobConf job, FileSystem fs, IList <InMemoryMapOutput <K, V> > inMemoryMapOutputs, IList <MergeManagerImpl.CompressAwarePath> onDiskMapOutputs ) { Log.Info("finalMerge called with " + inMemoryMapOutputs.Count + " in-memory map-outputs and " + onDiskMapOutputs.Count + " on-disk map-outputs"); long maxInMemReduce = GetMaxInMemReduceLimit(); // merge config params Type keyClass = (Type)job.GetMapOutputKeyClass(); Type valueClass = (Type)job.GetMapOutputValueClass(); bool keepInputs = job.GetKeepFailedTaskFiles(); Path tmpDir = new Path(reduceId.ToString()); RawComparator <K> comparator = (RawComparator <K>)job.GetOutputKeyComparator(); // segments required to vacate memory IList <Merger.Segment <K, V> > memDiskSegments = new AList <Merger.Segment <K, V> >(); long inMemToDiskBytes = 0; bool mergePhaseFinished = false; if (inMemoryMapOutputs.Count > 0) { TaskID mapId = inMemoryMapOutputs[0].GetMapId().GetTaskID(); inMemToDiskBytes = CreateInMemorySegments(inMemoryMapOutputs, memDiskSegments, maxInMemReduce ); int numMemDiskSegments = memDiskSegments.Count; if (numMemDiskSegments > 0 && ioSortFactor > onDiskMapOutputs.Count) { // If we reach here, it implies that we have less than io.sort.factor // disk segments and this will be incremented by 1 (result of the // memory segments merge). Since this total would still be // <= io.sort.factor, we will not do any more intermediate merges, // the merge of all these disk segments would be directly fed to the // reduce method mergePhaseFinished = true; // must spill to disk, but can't retain in-mem for intermediate merge Path outputPath = mapOutputFile.GetInputFileForWrite(mapId, inMemToDiskBytes).Suffix (Org.Apache.Hadoop.Mapred.Task.MergedOutputPrefix); RawKeyValueIterator rIter = Merger.Merge(job, fs, keyClass, valueClass, memDiskSegments , numMemDiskSegments, tmpDir, comparator, reporter, spilledRecordsCounter, null, mergePhase); FSDataOutputStream @out = CryptoUtils.WrapIfNecessary(job, fs.Create(outputPath)); IFile.Writer <K, V> writer = new IFile.Writer <K, V>(job, @out, keyClass, valueClass , codec, null, true); try { Merger.WriteFile(rIter, writer, reporter, job); writer.Close(); onDiskMapOutputs.AddItem(new MergeManagerImpl.CompressAwarePath(outputPath, writer .GetRawLength(), writer.GetCompressedLength())); writer = null; } catch (IOException e) { // add to list of final disk outputs. if (null != outputPath) { try { fs.Delete(outputPath, true); } catch (IOException) { } } // NOTHING throw; } finally { if (null != writer) { writer.Close(); } } Log.Info("Merged " + numMemDiskSegments + " segments, " + inMemToDiskBytes + " bytes to disk to satisfy " + "reduce memory limit"); inMemToDiskBytes = 0; memDiskSegments.Clear(); } else { if (inMemToDiskBytes != 0) { Log.Info("Keeping " + numMemDiskSegments + " segments, " + inMemToDiskBytes + " bytes in memory for " + "intermediate, on-disk merge"); } } } // segments on disk IList <Merger.Segment <K, V> > diskSegments = new AList <Merger.Segment <K, V> >(); long onDiskBytes = inMemToDiskBytes; long rawBytes = inMemToDiskBytes; MergeManagerImpl.CompressAwarePath[] onDisk = Sharpen.Collections.ToArray(onDiskMapOutputs , new MergeManagerImpl.CompressAwarePath[onDiskMapOutputs.Count]); foreach (MergeManagerImpl.CompressAwarePath file in onDisk) { long fileLength = fs.GetFileStatus(file).GetLen(); onDiskBytes += fileLength; rawBytes += (file.GetRawDataLength() > 0) ? file.GetRawDataLength() : fileLength; Log.Debug("Disk file: " + file + " Length is " + fileLength); diskSegments.AddItem(new Merger.Segment <K, V>(job, fs, file, codec, keepInputs, ( file.ToString().EndsWith(Org.Apache.Hadoop.Mapred.Task.MergedOutputPrefix) ? null : mergedMapOutputsCounter), file.GetRawDataLength())); } Log.Info("Merging " + onDisk.Length + " files, " + onDiskBytes + " bytes from disk" ); diskSegments.Sort(new _IComparer_786()); // build final list of segments from merged backed by disk + in-mem IList <Merger.Segment <K, V> > finalSegments = new AList <Merger.Segment <K, V> >(); long inMemBytes = CreateInMemorySegments(inMemoryMapOutputs, finalSegments, 0); Log.Info("Merging " + finalSegments.Count + " segments, " + inMemBytes + " bytes from memory into reduce" ); if (0 != onDiskBytes) { int numInMemSegments = memDiskSegments.Count; diskSegments.AddRange(0, memDiskSegments); memDiskSegments.Clear(); // Pass mergePhase only if there is a going to be intermediate // merges. See comment where mergePhaseFinished is being set Progress thisPhase = (mergePhaseFinished) ? null : mergePhase; RawKeyValueIterator diskMerge = Merger.Merge(job, fs, keyClass, valueClass, codec , diskSegments, ioSortFactor, numInMemSegments, tmpDir, comparator, reporter, false , spilledRecordsCounter, null, thisPhase); diskSegments.Clear(); if (0 == finalSegments.Count) { return(diskMerge); } finalSegments.AddItem(new Merger.Segment <K, V>(new MergeManagerImpl.RawKVIteratorReader (this, diskMerge, onDiskBytes), true, rawBytes)); } return(Merger.Merge(job, fs, keyClass, valueClass, finalSegments, finalSegments.Count , tmpDir, comparator, reporter, spilledRecordsCounter, null, null)); }
/// <exception cref="Kirikiri.Tjs2.TJSException"></exception> private void ExportByteCode(BinaryStream output) { byte[] filetag = FILE_TAG; byte[] codetag = new byte[] { (byte)('T'), (byte)('J'), (byte)('S'), (byte)('2') }; byte[] objtag = new byte[] { (byte)('O'), (byte)('B'), (byte)('J'), (byte)('S') }; byte[] datatag = new byte[] { (byte)('D'), (byte)('A'), (byte)('T'), (byte)('A') }; int count = mInterCodeGeneratorList.Count; AList<ByteBuffer> objarray = new AList<ByteBuffer>(count * 2); ConstArrayData constarray = new ConstArrayData(); int objsize = 0; for (int i = 0; i < count; i++) { InterCodeGenerator obj = mInterCodeGeneratorList[i]; ByteBuffer buf = obj.ExportByteCode(this, constarray); objarray.AddItem(buf); objsize += buf.Capacity() + TAG_SIZE + CHUNK_SIZE_LEN; } // tag + size objsize += TAG_SIZE + CHUNK_SIZE_LEN + 4 + 4; // OBJS tag + size + toplevel + count ByteBuffer dataarea = constarray.ExportBuffer(); int datasize = dataarea.Capacity() + TAG_SIZE + CHUNK_SIZE_LEN; // DATA tag + size int filesize = objsize + datasize + FILE_TAG_SIZE + CHUNK_SIZE_LEN; // TJS2 tag + file size byte[] filesizearray = new byte[] { unchecked((byte)(filesize & unchecked((int)(0xff )))), unchecked((byte)(((int)(((uint)filesize) >> 8)) & unchecked((int)(0xff)))) , unchecked((byte)(((int)(((uint)filesize) >> 16)) & unchecked((int)(0xff)))), unchecked( (byte)(((int)(((uint)filesize) >> 24)) & unchecked((int)(0xff)))) }; byte[] datasizearray = new byte[] { unchecked((byte)(datasize & unchecked((int)(0xff )))), unchecked((byte)(((int)(((uint)datasize) >> 8)) & unchecked((int)(0xff)))) , unchecked((byte)(((int)(((uint)datasize) >> 16)) & unchecked((int)(0xff)))), unchecked( (byte)(((int)(((uint)datasize) >> 24)) & unchecked((int)(0xff)))) }; byte[] objsizearray = new byte[] { unchecked((byte)(objsize & unchecked((int)(0xff )))), unchecked((byte)(((int)(((uint)objsize) >> 8)) & unchecked((int)(0xff)))), unchecked((byte)(((int)(((uint)objsize) >> 16)) & unchecked((int)(0xff)))), unchecked( (byte)(((int)(((uint)objsize) >> 24)) & unchecked((int)(0xff)))) }; byte[] objcountarray = new byte[] { unchecked((byte)(count & unchecked((int)(0xff )))), unchecked((byte)(((int)(((uint)count) >> 8)) & unchecked((int)(0xff)))), unchecked( (byte)(((int)(((uint)count) >> 16)) & unchecked((int)(0xff)))), unchecked((byte) (((int)(((uint)count) >> 24)) & unchecked((int)(0xff)))) }; int toplevel = -1; if (mTopLevelGenerator != null) { toplevel = GetCodeIndex(mTopLevelGenerator); } byte[] toparray = new byte[] { unchecked((byte)(toplevel & unchecked((int)(0xff)) )), unchecked((byte)(((int)(((uint)toplevel) >> 8)) & unchecked((int)(0xff)))), unchecked((byte)(((int)(((uint)toplevel) >> 16)) & unchecked((int)(0xff)))), unchecked( (byte)(((int)(((uint)toplevel) >> 24)) & unchecked((int)(0xff)))) }; output.Write(filetag); output.Write(filesizearray); output.Write(datatag); output.Write(datasizearray); output.Write(dataarea); output.Write(objtag); output.Write(objsizearray); output.Write(toparray); output.Write(objcountarray); for (int i_1 = 0; i_1 < count; i_1++) { ByteBuffer buf = objarray[i_1]; int size = buf.Capacity(); byte[] bufsizearray = new byte[] { unchecked((byte)(size & unchecked((int)(0xff)) )), unchecked((byte)(((int)(((uint)size) >> 8)) & unchecked((int)(0xff)))), unchecked( (byte)(((int)(((uint)size) >> 16)) & unchecked((int)(0xff)))), unchecked((byte)( ((int)(((uint)size) >> 24)) & unchecked((int)(0xff)))) }; output.Write(codetag); output.Write(bufsizearray); output.Write(buf); } output.Close(); output = null; objarray.Clear(); objarray = null; constarray = null; dataarea = null; }
/// <exception cref="TjsException"></exception> private void ReadObjects(ScriptBlock block, byte[] buff, int offset, int size) { string[] strarray = mStringArray; ByteBuffer[] bbarray = mByteBufferArray; double[] dblarray = mDoubleArray; byte[] barray = mByteArray; short[] sarray = mShortArray; int[] iarray = mIntArray; long[] larray = mLongArray; int toplevel = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int objcount = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; //Log.v("test","count:"+objcount); mObjectsCache.Create(objcount); InterCodeObject[] objs = mObjectsCache.mObjs; AList <ByteCodeLoader.VariantRepalace> work = mObjectsCache.mWork; int[] parent = mObjectsCache.mParent; int[] propSetter = mObjectsCache.mPropSetter; int[] propGetter = mObjectsCache.mPropGetter; int[] superClassGetter = mObjectsCache.mSuperClassGetter; int[][] properties = mObjectsCache.mProperties; for (int o = 0; o < objcount; o++) { int tag = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; if (tag != FILE_TAG_LE) { throw new TjsException(Error.ByteCodeBroken); } //int objsize = (buff[offset]&0xff) | (buff[offset+1]&0xff) << 8 | (buff[offset+2]&0xff) << 16 | (buff[offset+3]&0xff) << 24; offset += 4; parent[o] = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int name = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int contextType = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int maxVariableCount = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1 ] & unchecked ((int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int variableReserveCount = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ((int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff) )) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int maxFrameCount = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ((int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int funcDeclArgCount = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1 ] & unchecked ((int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int funcDeclUnnamedArgArrayBase = (buff[offset] & unchecked ((int)(0xff))) | (buff [offset + 1] & unchecked ((int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int )(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int funcDeclCollapseBase = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ((int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff) )) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; propSetter[o] = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; propGetter[o] = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; superClassGetter[o] = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ((int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int count = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; LongBuffer srcpos; // codePos/srcPos は今のところ使ってない、ソート济みなので、longにする必要はないが…… offset += count << 3; srcpos = null; count = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked (( int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; short[] code = new short[count]; for (int i = 0; i < count; i++) { code[i] = (short)((buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8); offset += 2; } offset += (count & 1) << 1; count = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked (( int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int vcount = count << 1; if (mVariantTypeData == null || mVariantTypeData.Length < vcount) { mVariantTypeData = new short[vcount]; } short[] data = mVariantTypeData; for (int i_1 = 0; i_1 < vcount; i_1++) { data[i_1] = (short)((buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ((int)(0xff))) << 8); offset += 2; } Variant[] vdata = new Variant[count]; int datacount = count; Variant tmp; for (int i_2 = 0; i_2 < datacount; i_2++) { int pos = i_2 << 1; int type = data[pos]; int index = data[pos + 1]; switch (type) { case TYPE_VOID: { vdata[i_2] = new Variant(); // null break; } case TYPE_OBJECT: { vdata[i_2] = new Variant(null, null); // null Array Dictionary はまだサポートしていない TODO break; } case TYPE_INTER_OBJECT: { tmp = new Variant(); work.AddItem(new ByteCodeLoader.VariantRepalace(tmp, index)); vdata[i_2] = tmp; break; } case TYPE_INTER_GENERATOR: { tmp = new Variant(); work.AddItem(new ByteCodeLoader.VariantRepalace(tmp, index)); vdata[i_2] = tmp; break; } case TYPE_STRING: { vdata[i_2] = new Variant(strarray[index]); break; } case TYPE_OCTET: { vdata[i_2] = new Variant(bbarray[index]); break; } case TYPE_REAL: { vdata[i_2] = new Variant(dblarray[index]); break; } case TYPE_BYTE: { vdata[i_2] = new Variant(barray[index]); break; } case TYPE_SHORT: { vdata[i_2] = new Variant(sarray[index]); break; } case TYPE_INTEGER: { vdata[i_2] = new Variant(iarray[index]); break; } case TYPE_LONG: { vdata[i_2] = new Variant(larray[index]); break; } case TYPE_UNKNOWN: default: { vdata[i_2] = new Variant(); // null; break; } } } count = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked (( int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; int[] scgetterps = new int[count]; for (int i_3 = 0; i_3 < count; i_3++) { scgetterps[i_3] = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; } // properties count = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked (( int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; if (count > 0) { int pcount = count << 1; int[] props = new int[pcount]; for (int i_4 = 0; i_4 < pcount; i_4++) { props[i_4] = (buff[offset] & unchecked ((int)(0xff))) | (buff[offset + 1] & unchecked ( (int)(0xff))) << 8 | (buff[offset + 2] & unchecked ((int)(0xff))) << 16 | (buff[offset + 3] & unchecked ((int)(0xff))) << 24; offset += 4; } properties[o] = props; } //IntVector superpointer = IntVector.wrap( scgetterps ); InterCodeObject obj = new InterCodeObject(block, mStringArray[name], contextType, code, vdata, maxVariableCount, variableReserveCount, maxFrameCount, funcDeclArgCount , funcDeclUnnamedArgArrayBase, funcDeclCollapseBase, true, srcpos, scgetterps); //objs.add(obj); objs[o] = obj; } Variant val = new Variant(); for (int o_1 = 0; o_1 < objcount; o_1++) { InterCodeObject parentObj = null; InterCodeObject propSetterObj = null; InterCodeObject propGetterObj = null; InterCodeObject superClassGetterObj = null; if (parent[o_1] >= 0) { parentObj = objs[parent[o_1]]; } if (propSetter[o_1] >= 0) { propSetterObj = objs[propSetter[o_1]]; } if (propGetter[o_1] >= 0) { propGetterObj = objs[propGetter[o_1]]; } if (superClassGetter[o_1] >= 0) { superClassGetterObj = objs[superClassGetter[o_1]]; } objs[o_1].SetCodeObject(parentObj, propSetterObj, propGetterObj, superClassGetterObj ); if (properties[o_1] != null) { InterCodeObject obj = parentObj; // objs.get(o).mParent; int[] prop = properties[o_1]; int length = (int)(((uint)prop.Length) >> 1); for (int i = 0; i < length; i++) { int pos = i << 1; int pname = prop[pos]; int pobj = prop[pos + 1]; val.Set(objs[pobj]); obj.PropSet(Interface.MEMBERENSURE | Interface.IGNOREPROP, mStringArray[pname], val , obj); } properties[o_1] = null; } } int count_1 = work.Count; for (int i_5 = 0; i_5 < count_1; i_5++) { ByteCodeLoader.VariantRepalace w = work[i_5]; w.Work.Set(objs[w.Index]); } work.Clear(); InterCodeObject top = null; if (toplevel >= 0) { top = objs[toplevel]; } block.SetObjects(top, objs, objcount); }
internal virtual void CreateSplits(IDictionary <string, ICollection <CombineFileInputFormat.OneBlockInfo > > nodeToBlocks, IDictionary <CombineFileInputFormat.OneBlockInfo, string[]> blockToNodes , IDictionary <string, IList <CombineFileInputFormat.OneBlockInfo> > rackToBlocks, long totLength, long maxSize, long minSizeNode, long minSizeRack, IList <InputSplit > splits) { AList <CombineFileInputFormat.OneBlockInfo> validBlocks = new AList <CombineFileInputFormat.OneBlockInfo >(); long curSplitSize = 0; int totalNodes = nodeToBlocks.Count; long totalLength = totLength; Multiset <string> splitsPerNode = HashMultiset.Create(); ICollection <string> completedNodes = new HashSet <string>(); while (true) { // it is allowed for maxSize to be 0. Disable smoothing load for such cases // process all nodes and create splits that are local to a node. Generate // one split per node iteration, and walk over nodes multiple times to // distribute the splits across nodes. for (IEnumerator <KeyValuePair <string, ICollection <CombineFileInputFormat.OneBlockInfo > > > iter = nodeToBlocks.GetEnumerator(); iter.HasNext();) { KeyValuePair <string, ICollection <CombineFileInputFormat.OneBlockInfo> > one = iter .Next(); string node = one.Key; // Skip the node if it has previously been marked as completed. if (completedNodes.Contains(node)) { continue; } ICollection <CombineFileInputFormat.OneBlockInfo> blocksInCurrentNode = one.Value; // for each block, copy it into validBlocks. Delete it from // blockToNodes so that the same block does not appear in // two different splits. IEnumerator <CombineFileInputFormat.OneBlockInfo> oneBlockIter = blocksInCurrentNode .GetEnumerator(); while (oneBlockIter.HasNext()) { CombineFileInputFormat.OneBlockInfo oneblock = oneBlockIter.Next(); // Remove all blocks which may already have been assigned to other // splits. if (!blockToNodes.Contains(oneblock)) { oneBlockIter.Remove(); continue; } validBlocks.AddItem(oneblock); Sharpen.Collections.Remove(blockToNodes, oneblock); curSplitSize += oneblock.length; // if the accumulated split size exceeds the maximum, then // create this split. if (maxSize != 0 && curSplitSize >= maxSize) { // create an input split and add it to the splits array AddCreatedSplit(splits, Sharpen.Collections.Singleton(node), validBlocks); totalLength -= curSplitSize; curSplitSize = 0; splitsPerNode.AddItem(node); // Remove entries from blocksInNode so that we don't walk these // again. blocksInCurrentNode.RemoveAll(validBlocks); validBlocks.Clear(); // Done creating a single split for this node. Move on to the next // node so that splits are distributed across nodes. break; } } if (validBlocks.Count != 0) { // This implies that the last few blocks (or all in case maxSize=0) // were not part of a split. The node is complete. // if there were any blocks left over and their combined size is // larger than minSplitNode, then combine them into one split. // Otherwise add them back to the unprocessed pool. It is likely // that they will be combined with other blocks from the // same rack later on. // This condition also kicks in when max split size is not set. All // blocks on a node will be grouped together into a single split. if (minSizeNode != 0 && curSplitSize >= minSizeNode && splitsPerNode.Count(node) == 0) { // haven't created any split on this machine. so its ok to add a // smaller one for parallelism. Otherwise group it in the rack for // balanced size create an input split and add it to the splits // array AddCreatedSplit(splits, Sharpen.Collections.Singleton(node), validBlocks); totalLength -= curSplitSize; splitsPerNode.AddItem(node); // Remove entries from blocksInNode so that we don't walk this again. blocksInCurrentNode.RemoveAll(validBlocks); } else { // The node is done. This was the last set of blocks for this node. // Put the unplaced blocks back into the pool for later rack-allocation. foreach (CombineFileInputFormat.OneBlockInfo oneblock in validBlocks) { blockToNodes[oneblock] = oneblock.hosts; } } validBlocks.Clear(); curSplitSize = 0; completedNodes.AddItem(node); } else { // No in-flight blocks. if (blocksInCurrentNode.Count == 0) { // Node is done. All blocks were fit into node-local splits. completedNodes.AddItem(node); } } } // else Run through the node again. // Check if node-local assignments are complete. if (completedNodes.Count == totalNodes || totalLength == 0) { // All nodes have been walked over and marked as completed or all blocks // have been assigned. The rest should be handled via rackLock assignment. Log.Info("DEBUG: Terminated node allocation with : CompletedNodes: " + completedNodes .Count + ", size left: " + totalLength); break; } } // if blocks in a rack are below the specified minimum size, then keep them // in 'overflow'. After the processing of all racks is complete, these // overflow blocks will be combined into splits. AList <CombineFileInputFormat.OneBlockInfo> overflowBlocks = new AList <CombineFileInputFormat.OneBlockInfo >(); ICollection <string> racks = new HashSet <string>(); // Process all racks over and over again until there is no more work to do. while (blockToNodes.Count > 0) { // Create one split for this rack before moving over to the next rack. // Come back to this rack after creating a single split for each of the // remaining racks. // Process one rack location at a time, Combine all possible blocks that // reside on this rack as one split. (constrained by minimum and maximum // split size). // iterate over all racks for (IEnumerator <KeyValuePair <string, IList <CombineFileInputFormat.OneBlockInfo> > > iter = rackToBlocks.GetEnumerator(); iter.HasNext();) { KeyValuePair <string, IList <CombineFileInputFormat.OneBlockInfo> > one = iter.Next( ); racks.AddItem(one.Key); IList <CombineFileInputFormat.OneBlockInfo> blocks = one.Value; // for each block, copy it into validBlocks. Delete it from // blockToNodes so that the same block does not appear in // two different splits. bool createdSplit = false; foreach (CombineFileInputFormat.OneBlockInfo oneblock in blocks) { if (blockToNodes.Contains(oneblock)) { validBlocks.AddItem(oneblock); Sharpen.Collections.Remove(blockToNodes, oneblock); curSplitSize += oneblock.length; // if the accumulated split size exceeds the maximum, then // create this split. if (maxSize != 0 && curSplitSize >= maxSize) { // create an input split and add it to the splits array AddCreatedSplit(splits, GetHosts(racks), validBlocks); createdSplit = true; break; } } } // if we created a split, then just go to the next rack if (createdSplit) { curSplitSize = 0; validBlocks.Clear(); racks.Clear(); continue; } if (!validBlocks.IsEmpty()) { if (minSizeRack != 0 && curSplitSize >= minSizeRack) { // if there is a minimum size specified, then create a single split // otherwise, store these blocks into overflow data structure AddCreatedSplit(splits, GetHosts(racks), validBlocks); } else { // There were a few blocks in this rack that // remained to be processed. Keep them in 'overflow' block list. // These will be combined later. Sharpen.Collections.AddAll(overflowBlocks, validBlocks); } } curSplitSize = 0; validBlocks.Clear(); racks.Clear(); } } System.Diagnostics.Debug.Assert(blockToNodes.IsEmpty()); System.Diagnostics.Debug.Assert(curSplitSize == 0); System.Diagnostics.Debug.Assert(validBlocks.IsEmpty()); System.Diagnostics.Debug.Assert(racks.IsEmpty()); // Process all overflow blocks foreach (CombineFileInputFormat.OneBlockInfo oneblock_1 in overflowBlocks) { validBlocks.AddItem(oneblock_1); curSplitSize += oneblock_1.length; // This might cause an exiting rack location to be re-added, // but it should be ok. for (int i = 0; i < oneblock_1.racks.Length; i++) { racks.AddItem(oneblock_1.racks[i]); } // if the accumulated split size exceeds the maximum, then // create this split. if (maxSize != 0 && curSplitSize >= maxSize) { // create an input split and add it to the splits array AddCreatedSplit(splits, GetHosts(racks), validBlocks); curSplitSize = 0; validBlocks.Clear(); racks.Clear(); } } // Process any remaining blocks, if any. if (!validBlocks.IsEmpty()) { AddCreatedSplit(splits, GetHosts(racks), validBlocks); } }
private void KillBasedOnProgress(bool considerMaps) { bool fail = false; if (considerMaps) { ReliabilityTest.Log.Info("Will kill tasks based on Maps' progress"); } else { ReliabilityTest.Log.Info("Will kill tasks based on Reduces' progress"); } ReliabilityTest.Log.Info("Initial progress threshold: " + this.threshold + ". Threshold Multiplier: " + this.thresholdMultiplier + ". Number of iterations: " + this.numIterations); float thresholdVal = this.threshold; int numIterationsDone = 0; while (!this.killed) { try { float progress; if (this.jc.GetJob(this.rJob.GetID()).IsComplete() || numIterationsDone == this.numIterations) { break; } if (considerMaps) { progress = this.jc.GetJob(this.rJob.GetID()).MapProgress(); } else { progress = this.jc.GetJob(this.rJob.GetID()).ReduceProgress(); } if (progress >= thresholdVal) { numIterationsDone++; if (numIterationsDone > 0 && numIterationsDone % 2 == 0) { fail = true; } //fail tasks instead of kill ClusterStatus c = this.jc.GetClusterStatus(); ReliabilityTest.Log.Info(new DateTime() + " Killing a few tasks"); ICollection <TaskAttemptID> runningTasks = new AList <TaskAttemptID>(); TaskReport[] mapReports = this.jc.GetMapTaskReports(this.rJob.GetID()); foreach (TaskReport mapReport in mapReports) { if (mapReport.GetCurrentStatus() == TIPStatus.Running) { Sharpen.Collections.AddAll(runningTasks, mapReport.GetRunningTaskAttempts()); } } if (runningTasks.Count > c.GetTaskTrackers() / 2) { int count = 0; foreach (TaskAttemptID t in runningTasks) { ReliabilityTest.Log.Info(new DateTime() + " Killed task : " + t); this.rJob.KillTask(t, fail); if (count++ > runningTasks.Count / 2) { //kill 50% break; } } } runningTasks.Clear(); TaskReport[] reduceReports = this.jc.GetReduceTaskReports(this.rJob.GetID()); foreach (TaskReport reduceReport in reduceReports) { if (reduceReport.GetCurrentStatus() == TIPStatus.Running) { Sharpen.Collections.AddAll(runningTasks, reduceReport.GetRunningTaskAttempts()); } } if (runningTasks.Count > c.GetTaskTrackers() / 2) { int count = 0; foreach (TaskAttemptID t in runningTasks) { ReliabilityTest.Log.Info(new DateTime() + " Killed task : " + t); this.rJob.KillTask(t, fail); if (count++ > runningTasks.Count / 2) { //kill 50% break; } } } thresholdVal = thresholdVal * this.thresholdMultiplier; } Sharpen.Thread.Sleep(5000); } catch (Exception) { this.killed = true; } catch (Exception e) { ReliabilityTest.Log.Fatal(StringUtils.StringifyException(e)); } } }
/// <exception cref="System.IO.IOException"></exception> private IDictionary <string, OpenSshConfig.Host> Parse(InputStream @in) { IDictionary <string, OpenSshConfig.Host> m = new LinkedHashMap <string, OpenSshConfig.Host >(); BufferedReader br = new BufferedReader(new InputStreamReader(@in)); IList <OpenSshConfig.Host> current = new AList <OpenSshConfig.Host>(4); string line; while ((line = br.ReadLine()) != null) { line = line.Trim(); if (line.Length == 0 || line.StartsWith("#")) { continue; } string[] parts = line.Split("[ \t]*[= \t]", 2); string keyword = parts[0].Trim(); string argValue = parts[1].Trim(); if (StringUtils.EqualsIgnoreCase("Host", keyword)) { current.Clear(); foreach (string pattern in argValue.Split("[ \t]")) { string name = Dequote(pattern); OpenSshConfig.Host c = m.Get(name); if (c == null) { c = new OpenSshConfig.Host(); m.Put(name, c); } current.AddItem(c); } continue; } if (current.IsEmpty()) { // We received an option outside of a Host block. We // don't know who this should match against, so skip. // continue; } if (StringUtils.EqualsIgnoreCase("HostName", keyword)) { foreach (OpenSshConfig.Host c in current) { if (c.hostName == null) { c.hostName = Dequote(argValue); } } } else { if (StringUtils.EqualsIgnoreCase("User", keyword)) { foreach (OpenSshConfig.Host c in current) { if (c.user == null) { c.user = Dequote(argValue); } } } else { if (StringUtils.EqualsIgnoreCase("Port", keyword)) { try { int port = System.Convert.ToInt32(Dequote(argValue)); foreach (OpenSshConfig.Host c in current) { if (c.port == 0) { c.port = port; } } } catch (FormatException) { } } else { // Bad port number. Don't set it. if (StringUtils.EqualsIgnoreCase("IdentityFile", keyword)) { foreach (OpenSshConfig.Host c in current) { if (c.identityFile == null) { c.identityFile = ToFile(Dequote(argValue)); } } } else { if (StringUtils.EqualsIgnoreCase("PreferredAuthentications", keyword)) { foreach (OpenSshConfig.Host c in current) { if (c.preferredAuthentications == null) { c.preferredAuthentications = Nows(Dequote(argValue)); } } } else { if (StringUtils.EqualsIgnoreCase("BatchMode", keyword)) { foreach (OpenSshConfig.Host c in current) { if (c.batchMode == null) { c.batchMode = Yesno(Dequote(argValue)); } } } else { if (StringUtils.EqualsIgnoreCase("StrictHostKeyChecking", keyword)) { string value = Dequote(argValue); foreach (OpenSshConfig.Host c in current) { if (c.strictHostKeyChecking == null) { c.strictHostKeyChecking = value; } } } } } } } } } } return(m); }
/// <summary>Execute this checkout</summary> /// <returns> /// <code>false</code> if this method could not delete all the files /// which should be deleted (e.g. because of of the files was /// locked). In this case /// <see cref="GetToBeDeleted()">GetToBeDeleted()</see> /// lists the files /// which should be tried to be deleted outside of this method. /// Although <code>false</code> is returned the checkout was /// successful and the working tree was updated for all other files. /// <code>true</code> is returned when no such problem occurred /// </returns> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual bool Checkout() { toBeDeleted.Clear(); if (headCommitTree != null) { PreScanTwoTrees(); } else { PrescanOneTree(); } if (!conflicts.IsEmpty()) { if (failOnConflict) { dc.Unlock(); throw new NGit.Errors.CheckoutConflictException(Sharpen.Collections.ToArray(conflicts , new string[conflicts.Count])); } else { CleanUpConflicts(); } } // update our index builder.Finish(); FilePath file = null; string last = string.Empty; // when deleting files process them in the opposite order as they have // been reported. This ensures the files are deleted before we delete // their parent folders for (int i = removed.Count - 1; i >= 0; i--) { string r = removed[i]; file = new FilePath(repo.WorkTree, r); if (!file.Delete() && file.Exists()) { toBeDeleted.AddItem(r); } else { if (!IsSamePrefix(r, last)) { RemoveEmptyParents(file); } last = r; } } if (file != null) { RemoveEmptyParents(file); } foreach (string path in updated.Keys) { // ... create/overwrite this file ... file = new FilePath(repo.WorkTree, path); if (!file.GetParentFile().Mkdirs()) { } // ignore DirCacheEntry entry = dc.GetEntry(path); // submodules are handled with separate operations if (FileMode.GITLINK.Equals(entry.RawMode)) { continue; } CheckoutEntry(repo, file, entry); } // commit the index builder - a new index is persisted if (!builder.Commit()) { dc.Unlock(); throw new IndexWriteException(); } return(toBeDeleted.Count == 0); }