public virtual void TestChooseTargetWithDecomNodes() { namenode.GetNamesystem().WriteLock(); try { string blockPoolId = namenode.GetNamesystem().GetBlockPoolId(); dnManager.HandleHeartbeat(dnrList[3], BlockManagerTestUtil.GetStorageReportsForDatanode (dataNodes[3]), blockPoolId, dataNodes[3].GetCacheCapacity(), dataNodes[3].GetCacheRemaining (), 2, 0, 0, null); dnManager.HandleHeartbeat(dnrList[4], BlockManagerTestUtil.GetStorageReportsForDatanode (dataNodes[4]), blockPoolId, dataNodes[4].GetCacheCapacity(), dataNodes[4].GetCacheRemaining (), 4, 0, 0, null); dnManager.HandleHeartbeat(dnrList[5], BlockManagerTestUtil.GetStorageReportsForDatanode (dataNodes[5]), blockPoolId, dataNodes[5].GetCacheCapacity(), dataNodes[5].GetCacheRemaining (), 4, 0, 0, null); // value in the above heartbeats int load = 2 + 4 + 4; FSNamesystem fsn = namenode.GetNamesystem(); NUnit.Framework.Assert.AreEqual((double)load / 6, dnManager.GetFSClusterStats().GetInServiceXceiverAverage (), Epsilon); // Decommission DNs so BlockPlacementPolicyDefault.isGoodTarget() // returns false for (int i = 0; i < 3; i++) { DatanodeDescriptor d = dnManager.GetDatanode(dnrList[i]); dnManager.GetDecomManager().StartDecommission(d); d.SetDecommissioned(); } NUnit.Framework.Assert.AreEqual((double)load / 3, dnManager.GetFSClusterStats().GetInServiceXceiverAverage (), Epsilon); // update references of writer DN to update the de-commissioned state IList <DatanodeDescriptor> liveNodes = new AList <DatanodeDescriptor>(); dnManager.FetchDatanodes(liveNodes, null, false); DatanodeDescriptor writerDn = null; if (liveNodes.Contains(dataNodes[0])) { writerDn = liveNodes[liveNodes.IndexOf(dataNodes[0])]; } // Call chooseTarget() DatanodeStorageInfo[] targets = namenode.GetNamesystem().GetBlockManager().GetBlockPlacementPolicy ().ChooseTarget("testFile.txt", 3, writerDn, new AList <DatanodeStorageInfo>(), false , null, 1024, TestBlockStoragePolicy.DefaultStoragePolicy); NUnit.Framework.Assert.AreEqual(3, targets.Length); ICollection <DatanodeStorageInfo> targetSet = new HashSet <DatanodeStorageInfo>(Arrays .AsList(targets)); for (int i_1 = 3; i_1 < storages.Length; i_1++) { NUnit.Framework.Assert.IsTrue(targetSet.Contains(storages[i_1])); } } finally { dataNodes[0].StopDecommission(); dataNodes[1].StopDecommission(); dataNodes[2].StopDecommission(); namenode.GetNamesystem().WriteUnlock(); } }
protected override bool RemoveFromBoth(AList <int> alist, List <int> list, int item) { int i = alist.IndexOf(item); if (i == -1) { return(false); } alist.Remove(item); list.RemoveAt(i); return(true); }
public void IndexOf_CheckUnusedWord_ReturnNegativeOne() { //arrange AList <string> listOfValues = new AList <string>(); listOfValues.Add("fill"); listOfValues.Add("fill"); int test; //act test = listOfValues.IndexOf("spill"); //assert Assert.AreEqual(test, -1); }
public void IndexOf_CheckIndex1_AListIndex1EqualsValue() { //arrange string value = "Expected value"; AList <string> listOfValues = new AList <string>(); listOfValues.Add("fill"); listOfValues.Add(value); listOfValues.Add("fill"); int test; //act test = listOfValues.IndexOf("Expected value"); //assert Assert.AreEqual(test, 1); }
public virtual int GetObjectIndex(InterCodeObject obj) { return(mInterCodeObjectList.IndexOf(new WeakReference <InterCodeObject>(obj))); }
// Get as many revisions as possible in one _all_docs request. // This is compatible with CouchDB, but it only works for revs of generation 1 without attachments. internal void PullBulkWithAllDocs(IList <RevisionInternal> bulkRevs) { // http://wiki.apache.org/couchdb/HTTP_Bulk_Document_API Log.V(Tag, "PullBulkWithAllDocs() calling AsyncTaskStarted()"); AsyncTaskStarted(); ++httpConnectionCount; var remainingRevs = new AList <RevisionInternal>(bulkRevs); var keys = bulkRevs.Select(rev => rev.GetDocId()).ToArray(); var body = new Dictionary <string, object>(); body.Put("keys", keys); SendAsyncRequest(HttpMethod.Post, "/_all_docs?include_docs=true", body, (result, e) => { var res = result.AsDictionary <string, object>(); if (e != null) { SetLastError(e); RevisionFailed(); SafeAddToCompletedChangesCount(bulkRevs.Count); } else { // Process the resulting rows' documents. // We only add a document if it doesn't have attachments, and if its // revID matches the one we asked for. var rows = res.Get("rows").AsList <IDictionary <string, object> >(); Log.V(Tag, "Checking {0} bulk-fetched remote revisions", rows.Count); foreach (var row in rows) { var doc = row.Get("doc").AsDictionary <string, object>(); if (doc != null && doc.Get("_attachments") == null) { var rev = new RevisionInternal(doc, LocalDatabase); var pos = remainingRevs.IndexOf(rev); if (pos > -1) { rev.SetSequence(remainingRevs[pos].GetSequence()); remainingRevs.Remove(pos); QueueDownloadedRevision(rev); } } } } // Any leftover revisions that didn't get matched will be fetched individually: if (remainingRevs.Count > 0) { Log.V(Tag, "Bulk-fetch didn't work for {0} of {1} revs; getting individually", remainingRevs.Count, bulkRevs.Count); foreach (var rev in remainingRevs) { QueueRemoteRevision(rev); } PullRemoteRevisions(); } // Note that we've finished this task: Log.V(Tag, "PullBulkWithAllDocs() calling AsyncTaskFinished()"); AsyncTaskFinished(1); --httpConnectionCount; // Start another task if there are still revisions waiting to be pulled: PullRemoteRevisions(); }); }