/// <summary> /// Objects known to exist but not expressed by /// <see cref="NGit.Repository.GetAllRefs()">NGit.Repository.GetAllRefs()</see> /// . /// <p> /// When a repository borrows objects from another repository, it can /// advertise that it safely has that other repository's references, without /// exposing any other details about the other repository. This may help /// a client trying to push changes avoid pushing more than it needs to. /// </summary> /// <returns>unmodifiable collection of other known objects.</returns> public override ICollection <ObjectId> GetAdditionalHaves() { HashSet <ObjectId> r = new HashSet <ObjectId>(); foreach (FileObjectDatabase.AlternateHandle d in objectDatabase.MyAlternates()) { if (d is FileObjectDatabase.AlternateRepository) { Repository repo; repo = ((FileObjectDatabase.AlternateRepository)d).repository; foreach (Ref @ref in repo.GetAllRefs().Values) { if (@ref.GetObjectId() != null) { r.AddItem(@ref.GetObjectId()); } if (@ref.GetPeeledObjectId() != null) { r.AddItem(@ref.GetPeeledObjectId()); } } Sharpen.Collections.AddAll(r, repo.GetAdditionalHaves()); } } return(r); }
/// <param name="ref">the ref which log should be inspected</param> /// <param name="minTime">only reflog entries not older then this time are processed</param> /// <returns> /// the /// <see cref="NGit.ObjectId">NGit.ObjectId</see> /// s contained in the reflog /// </returns> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> private ICollection <ObjectId> ListRefLogObjects(Ref @ref, long minTime) { IList <ReflogEntry> rlEntries = repo.GetReflogReader(@ref.GetName()).GetReverseEntries (); if (rlEntries == null || rlEntries.IsEmpty()) { return(Sharpen.Collections.EmptySet <ObjectId>()); } ICollection <ObjectId> ret = new HashSet <ObjectId>(); foreach (ReflogEntry e in rlEntries) { if (e.GetWho().GetWhen().GetTime() < minTime) { break; } ret.AddItem(e.GetNewId()); ObjectId oldId = e.GetOldId(); if (oldId != null && !ObjectId.ZeroId.Equals(oldId)) { ret.AddItem(oldId); } } return(ret); }
public virtual void TestListDirectory() { FilePath dir = new FilePath("testListDirectory"); Files.CreateDirectory(dir.ToPath()); try { ICollection <string> entries = new HashSet <string>(); entries.AddItem("entry1"); entries.AddItem("entry2"); entries.AddItem("entry3"); foreach (string entry in entries) { Files.CreateDirectory(new FilePath(dir, entry).ToPath()); } IList <string> list = IOUtils.ListDirectory(dir, TestIOUtils.NoEntry3Filter.Instance ); foreach (string entry_1 in list) { Assert.True(entries.Remove(entry_1)); } Assert.True(entries.Contains("entry3")); list = IOUtils.ListDirectory(dir, null); foreach (string entry_2 in list) { entries.Remove(entry_2); } Assert.True(entries.IsEmpty()); } finally { FileUtils.DeleteDirectory(dir); } }
public virtual void TestPutMetrics2() { GraphiteSink sink = new GraphiteSink(); IList <MetricsTag> tags = new AList <MetricsTag>(); tags.AddItem(new MetricsTag(MsInfo.Context, "all")); tags.AddItem(new MetricsTag(MsInfo.Hostname, null)); ICollection <AbstractMetric> metrics = new HashSet <AbstractMetric>(); metrics.AddItem(MakeMetric("foo1", 1)); metrics.AddItem(MakeMetric("foo2", 2)); MetricsRecord record = new MetricsRecordImpl(MsInfo.Context, (long)10000, tags, metrics ); ArgumentCaptor <string> argument = ArgumentCaptor.ForClass <string>(); GraphiteSink.Graphite mockGraphite = MakeGraphite(); Whitebox.SetInternalState(sink, "graphite", mockGraphite); sink.PutMetrics(record); try { Org.Mockito.Mockito.Verify(mockGraphite).Write(argument.Capture()); } catch (IOException e) { Runtime.PrintStackTrace(e); } string result = argument.GetValue(); Assert.Equal(true, result.Equals("null.all.Context.Context=all.foo1 1 10\n" + "null.all.Context.Context=all.foo2 2 10\n") || result.Equals("null.all.Context.Context=all.foo2 2 10\n" + "null.all.Context.Context=all.foo1 1 10\n")); }
public virtual void TestListLocatedStatus() { string testHarPath = this.GetType().GetResource("/test.har").AbsolutePath; URI uri = new URI("har://" + testHarPath); HarFileSystem hfs = new HarFileSystem(localFileSystem); hfs.Initialize(uri, new Configuration()); // test.har has the following contents: // dir1/1.txt // dir1/2.txt ICollection <string> expectedFileNames = new HashSet <string>(); expectedFileNames.AddItem("1.txt"); expectedFileNames.AddItem("2.txt"); // List contents of dir, and ensure we find all expected files Path path = new Path("dir1"); RemoteIterator <LocatedFileStatus> fileList = hfs.ListLocatedStatus(path); while (fileList.HasNext()) { string fileName = fileList.Next().GetPath().GetName(); Assert.True(fileName + " not in expected files list", expectedFileNames .Contains(fileName)); expectedFileNames.Remove(fileName); } Assert.Equal("Didn't find all of the expected file names: " + expectedFileNames, 0, expectedFileNames.Count); }
public override void Run() { try { MBeanServer mbs = ManagementFactory.GetPlatformMBeanServer(); // Metrics that belong to "FSNamesystem", these are metrics that // come from hadoop metrics framework for the class FSNamesystem. ObjectName mxbeanNamefsn = new ObjectName("Hadoop:service=NameNode,name=FSNamesystem" ); // Metrics that belong to "FSNamesystemState". // These are metrics that FSNamesystem registers directly with MBeanServer. ObjectName mxbeanNameFsns = new ObjectName("Hadoop:service=NameNode,name=FSNamesystemState" ); // Metrics that belong to "NameNodeInfo". // These are metrics that FSNamesystem registers directly with MBeanServer. ObjectName mxbeanNameNni = new ObjectName("Hadoop:service=NameNode,name=NameNodeInfo" ); ICollection <ObjectName> mbeans = new HashSet <ObjectName>(); mbeans.AddItem(mxbeanNamefsn); mbeans.AddItem(mxbeanNameFsns); mbeans.AddItem(mxbeanNameNni); foreach (ObjectName mbean in mbeans) { MBeanInfo attributes = mbs.GetMBeanInfo(mbean); foreach (MBeanAttributeInfo attributeInfo in attributes.GetAttributes()) { mbs.GetAttribute(mbean, attributeInfo.GetName()); } } succeeded = true; } catch (Exception) { } }
/// <exception cref="NGit.Errors.TransportException"></exception> private void QueueWants(ICollection <Ref> want) { HashSet <ObjectId> inWorkQueue = new HashSet <ObjectId>(); foreach (Ref r in want) { ObjectId id = r.GetObjectId(); try { RevObject obj = revWalk.ParseAny(id); if (obj.Has(COMPLETE)) { continue; } if (inWorkQueue.AddItem(id)) { obj.Add(IN_WORK_QUEUE); workQueue.AddItem(obj); } } catch (MissingObjectException) { if (inWorkQueue.AddItem(id)) { workQueue.AddItem(id); } } catch (IOException e) { throw new TransportException(MessageFormat.Format(JGitText.Get().cannotRead, id.Name ), e); } } }
/// <summary>Get configured node labels in a given queuePath</summary> public virtual ICollection <string> GetConfiguredNodeLabels(string queuePath) { ICollection <string> configuredNodeLabels = new HashSet <string>(); KeyValuePair <string, string> e = null; IEnumerator <KeyValuePair <string, string> > iter = GetEnumerator(); while (iter.HasNext()) { e = iter.Next(); string key = e.Key; if (key.StartsWith(GetQueuePrefix(queuePath) + AccessibleNodeLabels + Dot)) { // Find <label-name> in // <queue-path>.accessible-node-labels.<label-name>.property int labelStartIdx = key.IndexOf(AccessibleNodeLabels) + AccessibleNodeLabels.Length + 1; int labelEndIndx = key.IndexOf('.', labelStartIdx); string labelName = Sharpen.Runtime.Substring(key, labelStartIdx, labelEndIndx); configuredNodeLabels.AddItem(labelName); } } // always add NO_LABEL configuredNodeLabels.AddItem(RMNodeLabelsManager.NoLabel); return(configuredNodeLabels); }
/// <exception cref="System.IO.IOException"/> private int LoadTokensFromBucket(HistoryServerStateStoreService.HistoryServerState state, Path bucket) { string numStr = Sharpen.Runtime.Substring(bucket.GetName(), TokenBucketDirPrefix. Length); int bucketId = System.Convert.ToInt32(numStr); int numTokens = 0; FileStatus[] tokenStats = fs.ListStatus(bucket); ICollection <string> loadedTokens = new HashSet <string>(tokenStats.Length); foreach (FileStatus stat in tokenStats) { string name = stat.GetPath().GetName(); if (name.StartsWith(TokenFilePrefix)) { LoadTokenFromBucket(bucketId, state, stat.GetPath(), stat.GetLen()); loadedTokens.AddItem(name); ++numTokens; } else { if (name.StartsWith(UpdateTmpFilePrefix)) { string tokenName = Sharpen.Runtime.Substring(name, UpdateTmpFilePrefix.Length); if (loadedTokens.Contains(tokenName)) { // already have the token, update may be partial so ignore it fs.Delete(stat.GetPath(), false); } else { // token is missing, so try to parse the update temp file LoadTokenFromBucket(bucketId, state, stat.GetPath(), stat.GetLen()); fs.Rename(stat.GetPath(), new Path(stat.GetPath().GetParent(), tokenName)); loadedTokens.AddItem(tokenName); ++numTokens; } } else { if (name.StartsWith(TmpFilePrefix)) { // cleanup incomplete temp files fs.Delete(stat.GetPath(), false); } else { Log.Warn("Skipping unexpected file in history server token bucket: " + stat.GetPath ()); } } } } return(numTokens); }
public virtual void TestDirectory() { fs.Mkdirs(Dir1); // test empty directory RemoteIterator <LocatedFileStatus> itor = fs.ListFiles(Dir1, true); NUnit.Framework.Assert.IsFalse(itor.HasNext()); itor = fs.ListFiles(Dir1, false); NUnit.Framework.Assert.IsFalse(itor.HasNext()); // testing directory with 1 file WriteFile(fs, File2, FileLen); itor = fs.ListFiles(Dir1, true); LocatedFileStatus stat = itor.Next(); NUnit.Framework.Assert.IsFalse(itor.HasNext()); Assert.True(stat.IsFile()); Assert.Equal(FileLen, stat.GetLen()); Assert.Equal(fs.MakeQualified(File2), stat.GetPath()); Assert.Equal(1, stat.GetBlockLocations().Length); itor = fs.ListFiles(Dir1, false); stat = itor.Next(); NUnit.Framework.Assert.IsFalse(itor.HasNext()); Assert.True(stat.IsFile()); Assert.Equal(FileLen, stat.GetLen()); Assert.Equal(fs.MakeQualified(File2), stat.GetPath()); Assert.Equal(1, stat.GetBlockLocations().Length); // test more complicated directory WriteFile(fs, File1, FileLen); WriteFile(fs, File3, FileLen); ICollection <Path> filesToFind = new HashSet <Path>(); filesToFind.AddItem(fs.MakeQualified(File1)); filesToFind.AddItem(fs.MakeQualified(File2)); filesToFind.AddItem(fs.MakeQualified(File3)); itor = fs.ListFiles(TestDir, true); stat = itor.Next(); Assert.True(stat.IsFile()); Assert.True("Path " + stat.GetPath() + " unexpected", filesToFind .Remove(stat.GetPath())); stat = itor.Next(); Assert.True(stat.IsFile()); Assert.True("Path " + stat.GetPath() + " unexpected", filesToFind .Remove(stat.GetPath())); stat = itor.Next(); Assert.True(stat.IsFile()); Assert.True("Path " + stat.GetPath() + " unexpected", filesToFind .Remove(stat.GetPath())); NUnit.Framework.Assert.IsFalse(itor.HasNext()); Assert.True(filesToFind.IsEmpty()); itor = fs.ListFiles(TestDir, false); stat = itor.Next(); Assert.True(stat.IsFile()); Assert.Equal(fs.MakeQualified(File1), stat.GetPath()); NUnit.Framework.Assert.IsFalse(itor.HasNext()); fs.Delete(TestDir, true); }
/// <exception cref="System.Exception"/> public virtual void TestHAUtilClonesDelegationTokens() { Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token = GetDelegationToken (fs, "JobTracker"); UserGroupInformation ugi = UserGroupInformation.CreateRemoteUser("test"); URI haUri = new URI("hdfs://my-ha-uri/"); token.SetService(HAUtil.BuildTokenServiceForLogicalUri(haUri, HdfsConstants.HdfsUriScheme )); ugi.AddToken(token); ICollection <IPEndPoint> nnAddrs = new HashSet <IPEndPoint>(); nnAddrs.AddItem(new IPEndPoint("localhost", nn0.GetNameNodeAddress().Port)); nnAddrs.AddItem(new IPEndPoint("localhost", nn1.GetNameNodeAddress().Port)); HAUtil.CloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); ICollection <Org.Apache.Hadoop.Security.Token.Token <TokenIdentifier> > tokens = ugi .GetTokens(); NUnit.Framework.Assert.AreEqual(3, tokens.Count); Log.Info("Tokens:\n" + Joiner.On("\n").Join(tokens)); DelegationTokenSelector dts = new DelegationTokenSelector(); // check that the token selected for one of the physical IPC addresses // matches the one we received foreach (IPEndPoint addr in nnAddrs) { Text ipcDtService = SecurityUtil.BuildTokenService(addr); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = dts.SelectToken (ipcDtService, ugi.GetTokens()); NUnit.Framework.Assert.IsNotNull(token2); Assert.AssertArrayEquals(token.GetIdentifier(), token2.GetIdentifier()); Assert.AssertArrayEquals(token.GetPassword(), token2.GetPassword()); } // switch to host-based tokens, shouldn't match existing tokens SecurityUtilTestHelper.SetTokenServiceUseIp(false); foreach (IPEndPoint addr_1 in nnAddrs) { Text ipcDtService = SecurityUtil.BuildTokenService(addr_1); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = dts.SelectToken (ipcDtService, ugi.GetTokens()); NUnit.Framework.Assert.IsNull(token2); } // reclone the tokens, and see if they match now HAUtil.CloneDelegationTokenForLogicalUri(ugi, haUri, nnAddrs); foreach (IPEndPoint addr_2 in nnAddrs) { Text ipcDtService = SecurityUtil.BuildTokenService(addr_2); Org.Apache.Hadoop.Security.Token.Token <DelegationTokenIdentifier> token2 = dts.SelectToken (ipcDtService, ugi.GetTokens()); NUnit.Framework.Assert.IsNotNull(token2); Assert.AssertArrayEquals(token.GetIdentifier(), token2.GetIdentifier()); Assert.AssertArrayEquals(token.GetPassword(), token2.GetPassword()); } }
public override void InitializeMemberVariables() { xmlFilename = new string("yarn-default.xml"); configurationClasses = (Type[])new Type[] { typeof(YarnConfiguration) }; // Allocate for usage configurationPropsToSkipCompare = new HashSet <string>(); // Set error modes errorIfMissingConfigProps = true; errorIfMissingXmlProps = false; // Specific properties to skip configurationPropsToSkipCompare.AddItem(YarnConfiguration.DefaultRmConfigurationProviderClass ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.DefaultClientFailoverProxyProvider ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.DefaultIpcRecordFactoryClass ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.DefaultIpcClientFactoryClass ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.DefaultIpcServerFactoryClass ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.DefaultIpcRpcImpl); configurationPropsToSkipCompare.AddItem(YarnConfiguration.DefaultRmScheduler); configurationPropsToSkipCompare.AddItem(YarnConfiguration.YarnSecurityServiceAuthorizationApplicationclientProtocol ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.YarnSecurityServiceAuthorizationApplicationmasterProtocol ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.YarnSecurityServiceAuthorizationContainerManagementProtocol ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.YarnSecurityServiceAuthorizationResourceLocalizer ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.YarnSecurityServiceAuthorizationResourcemanagerAdministrationProtocol ); configurationPropsToSkipCompare.AddItem(YarnConfiguration.YarnSecurityServiceAuthorizationResourcetrackerProtocol ); // Allocate for usage xmlPropsToSkipCompare = new HashSet <string>(); xmlPrefixToSkipCompare = new HashSet <string>(); // Should probably be moved from yarn-default.xml to mapred-default.xml xmlPropsToSkipCompare.AddItem("mapreduce.job.hdfs-servers"); xmlPropsToSkipCompare.AddItem("mapreduce.job.jar"); // Possibly obsolete, but unable to verify 100% xmlPropsToSkipCompare.AddItem("yarn.nodemanager.aux-services.mapreduce_shuffle.class" ); xmlPropsToSkipCompare.AddItem("yarn.resourcemanager.container.liveness-monitor.interval-ms" ); // Used in the XML file as a variable reference internal to the XML file xmlPropsToSkipCompare.AddItem("yarn.nodemanager.hostname"); xmlPropsToSkipCompare.AddItem("yarn.timeline-service.hostname"); // Currently defined in TimelineAuthenticationFilterInitializer xmlPrefixToSkipCompare.AddItem("yarn.timeline-service.http-authentication"); // Currently defined in RegistryConstants xmlPrefixToSkipCompare.AddItem("hadoop.registry"); }
internal static bool IsValidRequestor(ServletContext context, string remoteUser, Configuration conf) { if (remoteUser == null) { // This really shouldn't happen... Log.Warn("Received null remoteUser while authorizing access to getImage servlet"); return(false); } ICollection <string> validRequestors = new HashSet <string>(); validRequestors.AddItem(SecurityUtil.GetServerPrincipal(conf.Get(DFSConfigKeys.DfsNamenodeKerberosPrincipalKey ), NameNode.GetAddress(conf).GetHostName())); try { validRequestors.AddItem(SecurityUtil.GetServerPrincipal(conf.Get(DFSConfigKeys.DfsSecondaryNamenodeKerberosPrincipalKey ), SecondaryNameNode.GetHttpAddress(conf).GetHostName())); } catch (Exception e) { // Don't halt if SecondaryNameNode principal could not be added. Log.Debug("SecondaryNameNode principal could not be added", e); string msg = string.Format("SecondaryNameNode principal not considered, %s = %s, %s = %s" , DFSConfigKeys.DfsSecondaryNamenodeKerberosPrincipalKey, conf.Get(DFSConfigKeys .DfsSecondaryNamenodeKerberosPrincipalKey), DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey , conf.GetTrimmed(DFSConfigKeys.DfsNamenodeSecondaryHttpAddressKey, DFSConfigKeys .DfsNamenodeSecondaryHttpAddressDefault)); Log.Warn(msg); } if (HAUtil.IsHAEnabled(conf, DFSUtil.GetNamenodeNameServiceId(conf))) { Configuration otherNnConf = HAUtil.GetConfForOtherNode(conf); validRequestors.AddItem(SecurityUtil.GetServerPrincipal(otherNnConf.Get(DFSConfigKeys .DfsNamenodeKerberosPrincipalKey), NameNode.GetAddress(otherNnConf).GetHostName( ))); } foreach (string v in validRequestors) { if (v != null && v.Equals(remoteUser)) { Log.Info("ImageServlet allowing checkpointer: " + remoteUser); return(true); } } if (HttpServer2.UserHasAdministratorAccess(context, remoteUser)) { Log.Info("ImageServlet allowing administrator: " + remoteUser); return(true); } Log.Info("ImageServlet rejecting: " + remoteUser); return(false); }
/// <summary> /// This method checks if there is a conflict in the fragment names /// of the uris. /// </summary> /// <remarks> /// This method checks if there is a conflict in the fragment names /// of the uris. Also makes sure that each uri has a fragment. It /// is only to be called if you want to create symlinks for /// the various archives and files. May be used by user code. /// </remarks> /// <param name="uriFiles">The uri array of urifiles</param> /// <param name="uriArchives">the uri array of uri archives</param> public static bool CheckURIs(URI[] uriFiles, URI[] uriArchives) { if ((uriFiles == null) && (uriArchives == null)) { return(true); } // check if fragment is null for any uri // also check if there are any conflicts in fragment names ICollection <string> fragments = new HashSet <string>(); // iterate over file uris if (uriFiles != null) { for (int i = 0; i < uriFiles.Length; i++) { string fragment = uriFiles[i].GetFragment(); if (fragment == null) { return(false); } string lowerCaseFragment = StringUtils.ToLowerCase(fragment); if (fragments.Contains(lowerCaseFragment)) { return(false); } fragments.AddItem(lowerCaseFragment); } } // iterate over archive uris if (uriArchives != null) { for (int i = 0; i < uriArchives.Length; i++) { string fragment = uriArchives[i].GetFragment(); if (fragment == null) { return(false); } string lowerCaseFragment = StringUtils.ToLowerCase(fragment); if (fragments.Contains(lowerCaseFragment)) { return(false); } fragments.AddItem(lowerCaseFragment); } } return(true); }
public virtual void TestGetApplicationsRequest() { GetApplicationsRequest request = GetApplicationsRequest.NewInstance(); EnumSet <YarnApplicationState> appStates = EnumSet.Of(YarnApplicationState.Accepted ); request.SetApplicationStates(appStates); ICollection <string> tags = new HashSet <string>(); tags.AddItem("tag1"); request.SetApplicationTags(tags); ICollection <string> types = new HashSet <string>(); types.AddItem("type1"); request.SetApplicationTypes(types); long startBegin = Runtime.CurrentTimeMillis(); long startEnd = Runtime.CurrentTimeMillis() + 1; request.SetStartRange(startBegin, startEnd); long finishBegin = Runtime.CurrentTimeMillis() + 2; long finishEnd = Runtime.CurrentTimeMillis() + 3; request.SetFinishRange(finishBegin, finishEnd); long limit = 100L; request.SetLimit(limit); ICollection <string> queues = new HashSet <string>(); queues.AddItem("queue1"); request.SetQueues(queues); ICollection <string> users = new HashSet <string>(); users.AddItem("user1"); request.SetUsers(users); ApplicationsRequestScope scope = ApplicationsRequestScope.All; request.SetScope(scope); GetApplicationsRequest requestFromProto = new GetApplicationsRequestPBImpl(((GetApplicationsRequestPBImpl )request).GetProto()); // verify the whole record equals with original record NUnit.Framework.Assert.AreEqual(requestFromProto, request); // verify all properties are the same as original request NUnit.Framework.Assert.AreEqual("ApplicationStates from proto is not the same with original request" , requestFromProto.GetApplicationStates(), appStates); NUnit.Framework.Assert.AreEqual("ApplicationTags from proto is not the same with original request" , requestFromProto.GetApplicationTags(), tags); NUnit.Framework.Assert.AreEqual("ApplicationTypes from proto is not the same with original request" , requestFromProto.GetApplicationTypes(), types); NUnit.Framework.Assert.AreEqual("StartRange from proto is not the same with original request" , requestFromProto.GetStartRange(), new LongRange(startBegin, startEnd)); NUnit.Framework.Assert.AreEqual("FinishRange from proto is not the same with original request" , requestFromProto.GetFinishRange(), new LongRange(finishBegin, finishEnd)); NUnit.Framework.Assert.AreEqual("Limit from proto is not the same with original request" , requestFromProto.GetLimit(), limit); NUnit.Framework.Assert.AreEqual("Queues from proto is not the same with original request" , requestFromProto.GetQueues(), queues); NUnit.Framework.Assert.AreEqual("Users from proto is not the same with original request" , requestFromProto.GetUsers(), users); }
/// <exception cref="System.IO.IOException"/> public virtual void TestRemoveNewlyAddedVolume() { int numExistingVolumes = dataset.GetVolumes().Count; IList <NamespaceInfo> nsInfos = new AList <NamespaceInfo>(); foreach (string bpid in BlockPoolIds) { nsInfos.AddItem(new NamespaceInfo(0, ClusterId, bpid, 1)); } string newVolumePath = BaseDir + "/newVolumeToRemoveLater"; StorageLocation loc = StorageLocation.Parse(newVolumePath); Storage.StorageDirectory sd = CreateStorageDirectory(new FilePath(newVolumePath)); DataStorage.VolumeBuilder builder = new DataStorage.VolumeBuilder(storage, sd); Org.Mockito.Mockito.When(storage.PrepareVolume(Matchers.Eq(datanode), Matchers.Eq (loc.GetFile()), Matchers.AnyListOf <NamespaceInfo>())).ThenReturn(builder); dataset.AddVolume(loc, nsInfos); NUnit.Framework.Assert.AreEqual(numExistingVolumes + 1, dataset.GetVolumes().Count ); Org.Mockito.Mockito.When(storage.GetNumStorageDirs()).ThenReturn(numExistingVolumes + 1); Org.Mockito.Mockito.When(storage.GetStorageDir(numExistingVolumes)).ThenReturn(sd ); ICollection <FilePath> volumesToRemove = new HashSet <FilePath>(); volumesToRemove.AddItem(loc.GetFile()); dataset.RemoveVolumes(volumesToRemove, true); NUnit.Framework.Assert.AreEqual(numExistingVolumes, dataset.GetVolumes().Count); }
/// <summary>List directory contents for a resource folder.</summary> /// <remarks>List directory contents for a resource folder. Not recursive.</remarks> /// <author>Andrew Reslan</author> /// <param name="clazz">Any java class that lives in the same place as the resources folder /// </param> /// <param name="path">Should end with "/", but not start with one.</param> /// <returns>An array of the name of each member item, or null if path does not denote a directory /// </returns> /// <exception cref="Sharpen.URISyntaxException">Sharpen.URISyntaxException</exception> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public static string[] GetResourceListing(Type clazz, string path) { Uri dirURL = clazz.GetClassLoader().GetResource(path); if (dirURL != null && dirURL.Scheme.Equals("file")) { return(new FilePath(dirURL.ToURI()).List()); } if (dirURL != null && dirURL.Scheme.Equals("jar")) { string jarPath = Sharpen.Runtime.Substring(dirURL.AbsolutePath, 5, dirURL.AbsolutePath .IndexOf("!")); JarFile jar = new JarFile(URLDecoder.Decode(jarPath, "UTF-8")); Enumeration <JarEntry> entries = ((Enumeration <JarEntry>)jar.Entries()); ICollection <string> result = new HashSet <string>(); while (entries.MoveNext()) { string name = entries.Current.GetName(); if (name.StartsWith(path)) { string entry = Sharpen.Runtime.Substring(name, path.Length); int checkSubdir = entry.IndexOf("/"); if (checkSubdir >= 0) { // if it is a subdirectory, we just return the directory name entry = Sharpen.Runtime.Substring(entry, 0, checkSubdir); } result.AddItem(entry); } } return(Sharpen.Collections.ToArray(result, new string[result.Count])); } throw new NotSupportedException("Cannot list files for URL " + dirURL); }
/// <summary> /// Generate a string listing the switch mapping implementation, /// the mapping for every known node and the number of nodes and /// unique switches known about -each entry to a separate line. /// </summary> /// <returns> /// a string that can be presented to the ops team or used in /// debug messages. /// </returns> public virtual string DumpTopology() { IDictionary <string, string> rack = GetSwitchMap(); StringBuilder builder = new StringBuilder(); builder.Append("Mapping: ").Append(ToString()).Append("\n"); if (rack != null) { builder.Append("Map:\n"); ICollection <string> switches = new HashSet <string>(); foreach (KeyValuePair <string, string> entry in rack) { builder.Append(" ").Append(entry.Key).Append(" -> ").Append(entry.Value).Append( "\n"); switches.AddItem(entry.Value); } builder.Append("Nodes: ").Append(rack.Count).Append("\n"); builder.Append("Switches: ").Append(switches.Count).Append("\n"); } else { builder.Append("No topology information"); } return(builder.ToString()); }
/// <exception cref="System.IO.IOException"/> private void InitializeNodeLabels() { RMContext rmContext = distShellTest.yarnCluster.GetResourceManager(0).GetRMContext (); // Setup node labels RMNodeLabelsManager labelsMgr = rmContext.GetNodeLabelManager(); ICollection <string> labels = new HashSet <string>(); labels.AddItem("x"); labelsMgr.AddToCluserNodeLabels(labels); // Setup queue access to node labels distShellTest.conf.Set("yarn.scheduler.capacity.root.accessible-node-labels", "x" ); distShellTest.conf.Set("yarn.scheduler.capacity.root.accessible-node-labels.x.capacity" , "100"); distShellTest.conf.Set("yarn.scheduler.capacity.root.default.accessible-node-labels" , "x"); distShellTest.conf.Set("yarn.scheduler.capacity.root.default.accessible-node-labels.x.capacity" , "100"); rmContext.GetScheduler().Reinitialize(distShellTest.conf, rmContext); // Fetch node-ids from yarn cluster NodeId[] nodeIds = new NodeId[NumNms]; for (int i = 0; i < NumNms; i++) { NodeManager mgr = distShellTest.yarnCluster.GetNodeManager(i); nodeIds[i] = mgr.GetNMContext().GetNodeId(); } // Set label x to NM[1] labelsMgr.AddLabelsToNode(ImmutableMap.Of(nodeIds[1], labels)); }
public LocalContainerLauncher(AppContext context, TaskUmbilicalProtocol umbilical ) : base(typeof(Org.Apache.Hadoop.Mapred.LocalContainerLauncher).FullName) { this.context = context; this.umbilical = umbilical; // umbilical: MRAppMaster creates (taskAttemptListener), passes to us // (TODO/FIXME: pointless to use RPC to talk to self; should create // LocalTaskAttemptListener or similar: implement umbilical protocol // but skip RPC stuff) try { curFC = FileContext.GetFileContext(curDir.ToURI()); } catch (UnsupportedFileSystemException) { Log.Error("Local filesystem " + curDir.ToURI().ToString() + " is unsupported?? (should never happen)" ); } // Save list of files/dirs that are supposed to be present so can delete // any extras created by one task before starting subsequent task. Note // that there's no protection against deleted or renamed localization; // users who do that get what they deserve (and will have to disable // uberization in order to run correctly). FilePath[] curLocalFiles = curDir.ListFiles(); localizedFiles = new HashSet <FilePath>(curLocalFiles.Length); for (int j = 0; j < curLocalFiles.Length; ++j) { localizedFiles.AddItem(curLocalFiles[j]); } }
/// <summary>Add multiple node labels to repository</summary> /// <param name="labels">new node labels added</param> /// <exception cref="System.IO.IOException"/> public virtual void AddToCluserNodeLabels(ICollection <string> labels) { if (!nodeLabelsEnabled) { Log.Error(NodeLabelsNotEnabledErr); throw new IOException(NodeLabelsNotEnabledErr); } if (null == labels || labels.IsEmpty()) { return; } ICollection <string> newLabels = new HashSet <string>(); labels = NormalizeLabels(labels); // do a check before actual adding them, will throw exception if any of them // doesn't meet label name requirement foreach (string label in labels) { CheckAndThrowLabelName(label); } foreach (string label_1 in labels) { // shouldn't overwrite it to avoid changing the Label.resource if (this.labelCollections[label_1] == null) { this.labelCollections[label_1] = new NodeLabel(label_1); newLabels.AddItem(label_1); } } if (null != dispatcher && !newLabels.IsEmpty()) { dispatcher.GetEventHandler().Handle(new StoreNewClusterNodeLabels(newLabels)); } Log.Info("Add labels: [" + StringUtils.Join(labels.GetEnumerator(), ",") + "]"); }
public override void Run() { // Create a worklist of task attempts to work over. ICollection <TaskAttemptID> maps = new HashSet <TaskAttemptID>(); foreach (TaskAttemptID map in localMapFiles.Keys) { maps.AddItem(map); } while (maps.Count > 0) { try { // If merge is on, block merger.WaitForResource(); metrics.ThreadBusy(); // Copy as much as is possible. DoCopy(maps); metrics.ThreadFree(); } catch (Exception) { } catch (Exception t) { exceptionReporter.ReportException(t); } } }
/// <exception cref="System.Exception"/> private void _testInputFiles(bool withFilter, bool withGlob) { ICollection <Path> createdFiles = CreateFiles(); JobConf conf = new JobConf(); Path inputDir = (withGlob) ? new Path(workDir, "a*") : workDir; FileInputFormat.SetInputPaths(conf, inputDir); conf.SetInputFormat(typeof(TestFileInputFormatPathFilter.DummyFileInputFormat)); if (withFilter) { FileInputFormat.SetInputPathFilter(conf, typeof(TestFileInputFormatPathFilter.TestPathFilter )); } TestFileInputFormatPathFilter.DummyFileInputFormat inputFormat = (TestFileInputFormatPathFilter.DummyFileInputFormat )conf.GetInputFormat(); ICollection <Path> computedFiles = new HashSet <Path>(); foreach (FileStatus file in inputFormat.ListStatus(conf)) { computedFiles.AddItem(file.GetPath()); } createdFiles.Remove(localFs.MakeQualified(new Path(workDir, "_hello"))); createdFiles.Remove(localFs.MakeQualified(new Path(workDir, ".hello"))); if (withFilter) { createdFiles.Remove(localFs.MakeQualified(new Path(workDir, "aa"))); createdFiles.Remove(localFs.MakeQualified(new Path(workDir, "bb"))); } if (withGlob) { createdFiles.Remove(localFs.MakeQualified(new Path(workDir, "b"))); createdFiles.Remove(localFs.MakeQualified(new Path(workDir, "bb"))); } NUnit.Framework.Assert.AreEqual(createdFiles, computedFiles); }
/// <summary>Remove expired delegation tokens from cache</summary> /// <exception cref="System.IO.IOException"/> private void RemoveExpiredToken() { long now = Time.Now(); ICollection <TokenIdent> expiredTokens = new HashSet <TokenIdent>(); lock (this) { IEnumerator <KeyValuePair <TokenIdent, AbstractDelegationTokenSecretManager.DelegationTokenInformation > > i = currentTokens.GetEnumerator(); while (i.HasNext()) { KeyValuePair <TokenIdent, AbstractDelegationTokenSecretManager.DelegationTokenInformation > entry = i.Next(); long renewDate = entry.Value.GetRenewDate(); if (renewDate < now) { expiredTokens.AddItem(entry.Key); i.Remove(); } } } // don't hold lock on 'this' to avoid edit log updates blocking token ops foreach (TokenIdent ident in expiredTokens) { LogExpireToken(ident); RemoveStoredToken(ident); } }
public virtual void TestMultipleClose() { Uri testFileUrl = GetType().GetClassLoader().GetResource("recordSpanningMultipleSplits.txt.bz2" ); NUnit.Framework.Assert.IsNotNull("Cannot find recordSpanningMultipleSplits.txt.bz2" , testFileUrl); FilePath testFile = new FilePath(testFileUrl.GetFile()); Path testFilePath = new Path(testFile.GetAbsolutePath()); long testFileSize = testFile.Length(); Configuration conf = new Configuration(); conf.SetInt(LineRecordReader.MaxLineLength, int.MaxValue); FileSplit split = new FileSplit(testFilePath, 0, testFileSize, (string[])null); LineRecordReader reader = new LineRecordReader(conf, split); LongWritable key = new LongWritable(); Text value = new Text(); //noinspection StatementWithEmptyBody while (reader.Next(key, value)) { } reader.Close(); reader.Close(); BZip2Codec codec = new BZip2Codec(); codec.SetConf(conf); ICollection <Decompressor> decompressors = new HashSet <Decompressor>(); for (int i = 0; i < 10; ++i) { decompressors.AddItem(CodecPool.GetDecompressor(codec)); } NUnit.Framework.Assert.AreEqual(10, decompressors.Count); }
public virtual void TestAddResourceConcurrency() { StartEmptyStore(); string key = "key1"; int count = 5; ExecutorService exec = Executors.NewFixedThreadPool(count); IList <Future <string> > futures = new AList <Future <string> >(count); CountDownLatch start = new CountDownLatch(1); for (int i = 0; i < count; i++) { string fileName = "foo-" + i + ".jar"; Callable <string> task = new _Callable_129(this, start, key, fileName); futures.AddItem(exec.Submit(task)); } // start them all at the same time start.CountDown(); // check the result; they should all agree with the value ICollection <string> results = new HashSet <string>(); foreach (Future <string> future in futures) { results.AddItem(future.Get()); } NUnit.Framework.Assert.AreSame(1, results.Count); exec.Shutdown(); }
/// <summary>Checks if there is NAME_TAG for queues.</summary> /// <remarks> /// Checks if there is NAME_TAG for queues. /// Checks if (queue has children) /// then it shouldnot have acls-* or state /// else /// throws an Exception. /// </remarks> /// <param name="node"/> private void Validate(Node node) { NodeList fields = node.GetChildNodes(); //Check if <queue> & (<acls-*> || <state>) are not siblings //if yes throw an IOException. ICollection <string> siblings = new HashSet <string>(); for (int i = 0; i < fields.GetLength(); i++) { if (!(fields.Item(i) is Element)) { continue; } siblings.AddItem((fields.Item(i)).GetNodeName()); } if (!siblings.Contains(QueueNameTag)) { throw new RuntimeException(" Malformed xml formation queue name not specified "); } if (siblings.Contains(QueueTag) && (siblings.Contains(AclAdministerJobTag) || siblings .Contains(AclSubmitJobTag) || siblings.Contains(StateTag))) { throw new RuntimeException(" Malformed xml formation queue tag and acls " + "tags or state tags are siblings " ); } }
/// <summary>List directory contents for a resource folder.</summary> /// <remarks>List directory contents for a resource folder. Not recursive.</remarks> /// <author>Andrew Reslan</author> /// <param name="clazz">Any java class that lives in the same place as the resources folder /// </param> /// <param name="path">Should end with "/", but not start with one.</param> /// <returns>An array of the name of each member item, or null if path does not denote a directory /// </returns> /// <exception cref="Sharpen.URISyntaxException">Sharpen.URISyntaxException</exception> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public static string[] GetResourceListing(Type clazz, string path) { Uri dirURL = clazz.GetClassLoader().GetResource(path); if (dirURL != null && dirURL.Scheme.Equals("file")) { return new FilePath(dirURL.ToURI()).List(); } if (dirURL != null && dirURL.Scheme.Equals("jar")) { string jarPath = Sharpen.Runtime.Substring(dirURL.AbsolutePath, 5, dirURL.AbsolutePath .IndexOf("!")); JarFile jar = new JarFile(URLDecoder.Decode(jarPath, "UTF-8")); Enumeration<JarEntry> entries = ((Enumeration<JarEntry>)jar.Entries()); ICollection<string> result = new HashSet<string>(); while (entries.MoveNext()) { string name = entries.Current.GetName(); if (name.StartsWith(path)) { string entry = Sharpen.Runtime.Substring(name, path.Length); int checkSubdir = entry.IndexOf("/"); if (checkSubdir >= 0) { // if it is a subdirectory, we just return the directory name entry = Sharpen.Runtime.Substring(entry, 0, checkSubdir); } result.AddItem(entry); } } return Sharpen.Collections.ToArray(result, new string[result.Count]); } throw new NotSupportedException("Cannot list files for URL " + dirURL); }
public virtual void RemoveOrTrackCompletedContainersFromContext(IList <ContainerId > containerIds) { ICollection <ContainerId> removedContainers = new HashSet <ContainerId>(); Sharpen.Collections.AddAll(pendingContainersToRemove, containerIds); IEnumerator <ContainerId> iter = pendingContainersToRemove.GetEnumerator(); while (iter.HasNext()) { ContainerId containerId = iter.Next(); // remove the container only if the container is at DONE state Org.Apache.Hadoop.Yarn.Server.Nodemanager.Containermanager.Container.Container nmContainer = context.GetContainers()[containerId]; if (nmContainer == null) { iter.Remove(); } else { if (nmContainer.GetContainerState().Equals(ContainerState.Done)) { Sharpen.Collections.Remove(context.GetContainers(), containerId); removedContainers.AddItem(containerId); iter.Remove(); } } } if (!removedContainers.IsEmpty()) { Log.Info("Removed completed containers from NM context: " + removedContainers); } pendingCompletedContainers.Clear(); }
/// <exception cref="System.IO.IOException"></exception> private void WritePack(IDictionary <string, RemoteRefUpdate> refUpdates, ProgressMonitor monitor) { ICollection <ObjectId> remoteObjects = new HashSet <ObjectId>(); ICollection <ObjectId> newObjects = new HashSet <ObjectId>(); PackWriter writer = new PackWriter(transport.GetPackConfig(), local.NewObjectReader ()); try { foreach (Ref r in GetRefs()) { remoteObjects.AddItem(r.GetObjectId()); } Sharpen.Collections.AddAll(remoteObjects, additionalHaves); foreach (RemoteRefUpdate r_1 in refUpdates.Values) { if (!ObjectId.ZeroId.Equals(r_1.GetNewObjectId())) { newObjects.AddItem(r_1.GetNewObjectId()); } } writer.SetUseCachedPacks(true); writer.SetThin(thinPack); writer.SetReuseValidatingObjects(false); writer.SetDeltaBaseAsOffset(capableOfsDelta); writer.PreparePack(monitor, newObjects, remoteObjects); writer.WritePack(monitor, monitor, @out); } finally { writer.Release(); } packTransferTime = writer.GetStatistics().GetTimeWriting(); }
public virtual void Validate(ICollection <string> tableColNames) { if (GetColumns().IsEmpty()) { throw new ArgumentException("index " + GetName() + " has no columns"); } if (GetColumns().Count > IndexData.MAX_COLUMNS) { throw new ArgumentException("index " + GetName() + " has too many columns, max " + IndexData.MAX_COLUMNS); } ICollection <string> idxColNames = new HashSet <string>(); foreach (IndexBuilder.Column col in GetColumns()) { string idxColName = col.GetName().ToUpper(); if (!idxColNames.AddItem(idxColName)) { throw new ArgumentException("duplicate column name " + col.GetName() + " in index " + GetName()); } if (!tableColNames.Contains(idxColName)) { throw new ArgumentException("column named " + col.GetName() + " not found in table" ); } } }
/// <exception cref="NGit.Errors.TransportException"></exception> private void QueueWants(ICollection<Ref> want) { HashSet<ObjectId> inWorkQueue = new HashSet<ObjectId>(); foreach (Ref r in want) { ObjectId id = r.GetObjectId(); try { RevObject obj = revWalk.ParseAny(id); if (obj.Has(COMPLETE)) { continue; } if (inWorkQueue.AddItem(id)) { obj.Add(IN_WORK_QUEUE); workQueue.AddItem(obj); } } catch (MissingObjectException) { if (inWorkQueue.AddItem(id)) { workQueue.AddItem(id); } } catch (IOException e) { throw new TransportException(MessageFormat.Format(JGitText.Get().cannotRead, id.Name ), e); } } }
private ICollection<string> ListPackDirectory() { string[] nameList = packDirectory.List(); if (nameList == null) { return Sharpen.Collections.EmptySet<string>(); } ICollection<string> nameSet = new HashSet<string>(); foreach (string name in nameList) { if (name.StartsWith("pack-")) { nameSet.AddItem(name); } } return nameSet; }
/// <summary>Parse the first line of a receive-pack request.</summary> /// <remarks>Parse the first line of a receive-pack request.</remarks> /// <param name="line">line from the client.</param> public FirstLine(string line) { if (line.Length > 45) { HashSet<string> opts = new HashSet<string>(); string opt = Sharpen.Runtime.Substring(line, 45); if (opt.StartsWith(" ")) { opt = Sharpen.Runtime.Substring(opt, 1); } foreach (string c in opt.Split(" ")) { opts.AddItem(c); } this.line = Sharpen.Runtime.Substring(line, 0, 45); this.options = Sharpen.Collections.UnmodifiableSet(opts); } else { this.line = line; this.options = Sharpen.Collections.EmptySet<string>(); } }
// public static Task CreateDocumentsAsync(Database db, int n) // { // return db.RunAsync((database)=> // { // database.BeginTransaction(); // ApiTest.CreateDocuments(database, n); // database.EndTransaction(true); // }); // } // public static void CreateDocuments(Database db, int numberOfDocsToCreate) // { // //TODO should be changed to use db.runInTransaction // for (int i = 0; i < numberOfDocsToCreate; i++) // { // var properties = new Dictionary<String, Object>(); // properties["testName"] = "testDatabase"; // properties["sequence"] = i; // CreateDocumentWithProperties(db, properties); // } // } // // public static Document CreateDocumentWithProperties(Database db, IDictionary<String, Object> properties) // { // var doc = db.CreateDocument(); // // Assert.IsNotNull(doc); // Assert.IsNull(doc.CurrentRevisionId); // Assert.IsNull(doc.CurrentRevision); // Assert.IsNotNull("Document has no ID", doc.Id); // // // 'untitled' docs are no longer untitled (8/10/12) // try // { // doc.PutProperties(properties); // } // catch (Exception e) // { // Log.E(Tag, "Error creating document", e); // Assert.IsTrue( false, "can't create new document in db:" + db.Name + // " with properties:" + properties.Aggregate(new StringBuilder(" >>> "), (str, kvp)=> { str.AppendFormat("'{0}:{1}' ", kvp.Key, kvp.Value); return str; }, str=>str.ToString())); // } // // Assert.IsNotNull(doc.Id); // Assert.IsNotNull(doc.CurrentRevisionId); // Assert.IsNotNull(doc.UserProperties); // Assert.AreEqual(db.GetDocument(doc.Id), doc); // // return doc; // } /// <exception cref="System.Exception"></exception> public void RunLiveQuery(String methodNameToCall) { var db = database; var doneSignal = new CountdownEvent(11); // FIXME.ZJG: Not sure why, but now Changed is only called once. // 11 corresponds to startKey = 23; endKey = 33 // run a live query var view = db.GetView("vu"); view.SetMap((document, emitter) => emitter (document ["sequence"], 1), "1"); var query = view.CreateQuery().ToLiveQuery(); query.StartKey = 23; query.EndKey = 33; Log.I(Tag, "Created " + query); // these are the keys that we expect to see in the livequery change listener callback var expectedKeys = new HashSet<Int64>(); for (var i = 23; i < 34; i++) { expectedKeys.AddItem(i); } // install a change listener which decrements countdown latch when it sees a new // key from the list of expected keys EventHandler<QueryChangeEventArgs> handler = (sender, e) => { var rows = e.Rows; foreach(var row in rows) { if (expectedKeys.Contains((Int64)row.Key)) { Log.I(Tag, " doneSignal decremented " + doneSignal.CurrentCount); doneSignal.Signal(); } } }; query.Changed += handler; // create the docs that will cause the above change listener to decrement countdown latch var createTask = CreateDocumentsAsync(db, n: 50); createTask.Wait(TimeSpan.FromSeconds(5)); if (methodNameToCall.Equals("start")) { // start the livequery running asynchronously query.Start(); } else if (methodNameToCall.Equals("startWaitForRows")) { query.Start(); query.WaitForRows(); } else { Assert.IsNull(query.Rows); query.Run(); // this will block until the query completes Assert.IsNotNull(query.Rows); } // wait for the doneSignal to be finished var success = doneSignal.Wait(TimeSpan.FromSeconds(5)); Assert.IsTrue(success, "Done signal timed out live query never ran"); // stop the livequery since we are done with it query.Changed -= handler; query.Stop(); query.Dispose(); db.Close(); createTask.Dispose(); doneSignal.Dispose(); }
/// <exception cref="System.IO.IOException"></exception> private void SendPack() { bool sideband = options.Contains(OPTION_SIDE_BAND) || options.Contains(OPTION_SIDE_BAND_64K ); if (!biDirectionalPipe) { // Ensure the request was fully consumed. Any remaining input must // be a protocol error. If we aren't at EOF the implementation is broken. int eof = rawIn.Read(); if (0 <= eof) { throw new CorruptObjectException(MessageFormat.Format(JGitText.Get().expectedEOFReceived , "\\x" + Sharpen.Extensions.ToHexString(eof))); } } ProgressMonitor pm = NullProgressMonitor.INSTANCE; OutputStream packOut = rawOut; SideBandOutputStream msgOut = null; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) { bufsz = SideBandOutputStream.MAX_BUF; } packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); if (!options.Contains(OPTION_NO_PROGRESS)) { msgOut = new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, rawOut ); pm = new SideBandProgressMonitor(msgOut); } } try { if (wantAll.IsEmpty()) { preUploadHook.OnSendPack(this, wantIds, commonBase); } else { preUploadHook.OnSendPack(this, wantAll, commonBase); } } catch (UploadPackMayNotContinueException noPack) { if (sideband && noPack.Message != null) { noPack.SetOutput(); SideBandOutputStream err = new SideBandOutputStream(SideBandOutputStream.CH_ERROR , SideBandOutputStream.SMALL_BUF, rawOut); err.Write(Constants.Encode(noPack.Message)); err.Flush(); } throw; } PackConfig cfg = packConfig; if (cfg == null) { cfg = new PackConfig(db); } PackWriter pw = new PackWriter(cfg, walk.GetObjectReader()); try { pw.SetUseCachedPacks(true); pw.SetReuseDeltaCommits(true); pw.SetDeltaBaseAsOffset(options.Contains(OPTION_OFS_DELTA)); pw.SetThin(options.Contains(OPTION_THIN_PACK)); pw.SetReuseValidatingObjects(false); if (commonBase.IsEmpty()) { ICollection<ObjectId> tagTargets = new HashSet<ObjectId>(); foreach (Ref @ref in refs.Values) { if (@ref.GetPeeledObjectId() != null) { tagTargets.AddItem(@ref.GetPeeledObjectId()); } else { if (@ref.GetObjectId() == null) { continue; } else { if (@ref.GetName().StartsWith(Constants.R_HEADS)) { tagTargets.AddItem(@ref.GetObjectId()); } } } } pw.SetTagTargets(tagTargets); } RevWalk rw = walk; if (wantAll.IsEmpty()) { pw.PreparePack(pm, wantIds, commonBase); } else { walk.Reset(); ObjectWalk ow = walk.ToObjectWalkWithSameObjects(); pw.PreparePack(pm, ow, wantAll, commonBase); rw = ow; } if (options.Contains(OPTION_INCLUDE_TAG)) { foreach (Ref vref in refs.Values) { Ref @ref = vref; ObjectId objectId = @ref.GetObjectId(); // If the object was already requested, skip it. if (wantAll.IsEmpty()) { if (wantIds.Contains(objectId)) { continue; } } else { RevObject obj = rw.LookupOrNull(objectId); if (obj != null && obj.Has(WANT)) { continue; } } if ([email protected]()) { @ref = db.Peel(@ref); } ObjectId peeledId = @ref.GetPeeledObjectId(); if (peeledId == null) { continue; } objectId = @ref.GetObjectId(); if (pw.WillInclude(peeledId) && !pw.WillInclude(objectId)) { pw.AddObject(rw.ParseAny(objectId)); } } } pw.WritePack(pm, NullProgressMonitor.INSTANCE, packOut); statistics = pw.GetStatistics(); if (msgOut != null) { string msg = pw.GetStatistics().GetMessage() + '\n'; msgOut.Write(Constants.Encode(msg)); msgOut.Flush(); } } finally { pw.Release(); } if (sideband) { pckOut.End(); } if (logger != null && statistics != null) { logger.OnPackStatistics(statistics); } }
public void OnCompletion(object result, Exception e) { try { if (e == null) { ICollection<string> failedIDs = new HashSet<string>(); // _bulk_docs response is really an array, not a dictionary! IList<IDictionary<string, object>> items = (IList)result; foreach (IDictionary<string, object> item in items) { Status status = this._enclosing.StatusFromBulkDocsResponseItem(item); if (status.IsError()) { // One of the docs failed to save. Log.W(Log.TagSync, "%s: _bulk_docs got an error: %s", item, this); // 403/Forbidden means validation failed; don't treat it as an error // because I did my job in sending the revision. Other statuses are // actual replication errors. if (status.GetCode() != Status.Forbidden) { string docID = (string)item.Get("id"); failedIDs.AddItem(docID); } } } // TODO - port from iOS // NSURL* url = docID ? [_remote URLByAppendingPathComponent: docID] : nil; // error = CBLStatusToNSError(status, url); // Remove from the pending list all the revs that didn't fail: foreach (RevisionInternal revisionInternal in changes) { if (!failedIDs.Contains(revisionInternal.GetDocId())) { this._enclosing.RemovePending(revisionInternal); } } } if (e != null) { this._enclosing.SetError(e); this._enclosing.RevisionFailed(); } else { Log.V(Log.TagSync, "%s: POSTed to _bulk_docs", this._enclosing); } this._enclosing.AddToCompletedChangesCount(numDocsToSend); } finally { Log.V(Log.TagSync, "%s | %s: uploadBulkDocs.sendAsyncRequest() calling asyncTaskFinished()" , this, Sharpen.Thread.CurrentThread()); this._enclosing.AsyncTaskFinished(1); } }
/// <exception cref="Kirikiri.Tjs2.VariantException"></exception> /// <exception cref="Kirikiri.Tjs2.TJSException"></exception> public NativeJavaClass(string name, Type c) : base(name) { mJavaClass = c; string classname = name; mClassID = TJS.RegisterNativeClass(classname); try { HashSet<string> registProp = new HashSet<string>(); // set/getで重复しないようにチェック MethodInfo[] methods = c.GetMethods(); foreach (MethodInfo m in methods) { string methodName = m.Name; int flag = 0; if (m.IsStatic) { flag |= Interface.STATICMEMBER; } if ("constructor".Equals(methodName)) { // コンストラクタ RegisterNCM(classname, new NativeJavaClassConstructor(m, mClassID), classname, Interface .nitMethod, flag); } else { if (methodName.StartsWith("prop_")) { // プロパティ prop_ で始まるものはプロパティとみなす Type[] @params = Sharpen.Runtime.GetParameterTypes(m); MethodInfo setMethod = null; MethodInfo getMethod = null; string propName = null; if (methodName.StartsWith("prop_set_")) { if (@params.Length == 1) { setMethod = m; propName = Sharpen.Runtime.Substring(methodName, "prop_set_".Length); if (registProp.Contains(propName) == false) { string getMethodName = "prop_get_" + propName; foreach (MethodInfo getm in methods) { if (getm.Name.Equals(getMethodName)) { Type[] p = Sharpen.Runtime.GetParameterTypes(getm); if (p.Length == 0 && getm.ReturnType.Equals(typeof(void)) != true) { getMethod = getm; break; } } } } } } else { if (methodName.StartsWith("prop_get_")) { if (@params.Length == 0 && m.ReturnType.Equals(typeof(void)) != true) { getMethod = m; propName = Sharpen.Runtime.Substring(methodName, "prop_get_".Length); if (registProp.Contains(propName) == false) { string setMethodName = "prop_set_" + propName; foreach (MethodInfo setm in methods) { if (setm.Name.Equals(setMethodName)) { Type[] p = Sharpen.Runtime.GetParameterTypes(setm); if (p.Length == 1) { setMethod = setm; break; } } } } } } } if (propName != null && registProp.Contains(propName) == false) { if (setMethod != null || getMethod != null) { RegisterNCM(propName, new NativeJavaClassProperty(getMethod, setMethod, mClassID) , classname, Interface.nitProperty, flag); registProp.AddItem(propName); } } } else { // 通常メソッド RegisterNCM(methodName, new NativeJavaClassMethod(m, mClassID), classname, Interface .nitMethod, flag); } } } registProp = null; } catch (SecurityException e) { throw new TJSException(Error.InternalError + e.ToString()); } }
/// <exception cref="System.IO.IOException"></exception> private void RecvWants() { HashSet<ObjectId> wantIds = new HashSet<ObjectId>(); bool isFirst = true; for (; ; ) { string line; try { line = pckIn.ReadString(); } catch (EOFException eof) { if (isFirst) { break; } throw; } if (line == PacketLineIn.END) { break; } if (!line.StartsWith("want ") || line.Length < 45) { throw new PackProtocolException(MessageFormat.Format(JGitText.Get().expectedGot, "want", line)); } if (isFirst && line.Length > 45) { string opt = Sharpen.Runtime.Substring(line, 45); if (opt.StartsWith(" ")) { opt = Sharpen.Runtime.Substring(opt, 1); } foreach (string c in opt.Split(" ")) { options.AddItem(c); } line = Sharpen.Runtime.Substring(line, 0, 45); } wantIds.AddItem(ObjectId.FromString(Sharpen.Runtime.Substring(line, 5))); isFirst = false; } if (wantIds.IsEmpty()) { return; } AsyncRevObjectQueue q = walk.ParseAny(wantIds.AsIterable (), true); try { for (; ; ) { RevObject o; try { o = q.Next(); } catch (IOException error) { throw new PackProtocolException(MessageFormat.Format(JGitText.Get().notValid, error .Message), error); } if (o == null) { break; } if (o.Has(WANT)) { } else { // Already processed, the client repeated itself. if (o.Has(ADVERTISED)) { o.Add(WANT); wantAll.AddItem(o); if (o is RevTag) { o = walk.Peel(o); if (o is RevCommit) { if (!o.Has(WANT)) { o.Add(WANT); wantAll.AddItem(o); } } } } else { throw new PackProtocolException(MessageFormat.Format(JGitText.Get().notValid, o.Name )); } } } } finally { q.Release(); } }
/// <exception cref="System.IO.IOException"></exception> private void WritePack(IDictionary<string, RemoteRefUpdate> refUpdates, ProgressMonitor monitor) { ICollection<ObjectId> remoteObjects = new HashSet<ObjectId>(); ICollection<ObjectId> newObjects = new HashSet<ObjectId>(); PackWriter writer = new PackWriter(transport.GetPackConfig(), local.NewObjectReader ()); try { foreach (Ref r in GetRefs()) { remoteObjects.AddItem(r.GetObjectId()); } Sharpen.Collections.AddAll(remoteObjects, additionalHaves); foreach (RemoteRefUpdate r_1 in refUpdates.Values) { if (!ObjectId.ZeroId.Equals(r_1.GetNewObjectId())) { newObjects.AddItem(r_1.GetNewObjectId()); } } writer.SetUseCachedPacks(true); writer.SetThin(thinPack); writer.SetReuseValidatingObjects(false); writer.SetDeltaBaseAsOffset(capableOfsDelta); writer.PreparePack(monitor, newObjects, remoteObjects); writer.WritePack(monitor, monitor, @out); } finally { writer.Release(); } packTransferTime = writer.GetStatistics().GetTimeWriting(); }
public ICollection<BlobKey> AllKeys() { ICollection<BlobKey> result = new HashSet<BlobKey>(); FilePath file = new FilePath(path); FilePath[] contents = file.ListFiles(); foreach (FilePath attachment in contents) { if (attachment.IsDirectory()) { continue; } BlobKey attachmentKey = new BlobKey(); GetKeyForFilename(attachmentKey, attachment.GetPath()); result.AddItem(attachmentKey); } return result; }
/// <exception cref="NGit.Errors.TransportException"></exception> private void Sendpack(IList<RemoteRefUpdate> updates, ProgressMonitor monitor) { string pathPack = null; string pathIdx = null; PackWriter writer = new PackWriter(transport.GetPackConfig(), local.NewObjectReader ()); try { ICollection<ObjectId> need = new HashSet<ObjectId>(); ICollection<ObjectId> have = new HashSet<ObjectId>(); foreach (RemoteRefUpdate r in updates) { need.AddItem(r.GetNewObjectId()); } foreach (Ref r_1 in GetRefs()) { have.AddItem(r_1.GetObjectId()); if (r_1.GetPeeledObjectId() != null) { have.AddItem(r_1.GetPeeledObjectId()); } } writer.PreparePack(monitor, need, have); // We don't have to continue further if the pack will // be an empty pack, as the remote has all objects it // needs to complete this change. // if (writer.GetObjectCount() == 0) { return; } packNames = new LinkedHashMap<string, string>(); foreach (string n in dest.GetPackNames()) { packNames.Put(n, n); } string @base = "pack-" + writer.ComputeName().Name; string packName = @base + ".pack"; pathPack = "pack/" + packName; pathIdx = "pack/" + @base + ".idx"; if (Sharpen.Collections.Remove(packNames, packName) != null) { // The remote already contains this pack. We should // remove the index before overwriting to prevent bad // offsets from appearing to clients. // dest.WriteInfoPacks(packNames.Keys); dest.DeleteFile(pathIdx); } // Write the pack file, then the index, as readers look the // other direction (index, then pack file). // string wt = "Put " + Sharpen.Runtime.Substring(@base, 0, 12); OutputStream os = dest.WriteFile(pathPack, monitor, wt + "..pack"); try { os = new SafeBufferedOutputStream(os); writer.WritePack(monitor, monitor, os); } finally { os.Close(); } os = dest.WriteFile(pathIdx, monitor, wt + "..idx"); try { os = new SafeBufferedOutputStream(os); writer.WriteIndex(os); } finally { os.Close(); } // Record the pack at the start of the pack info list. This // way clients are likely to consult the newest pack first, // and discover the most recent objects there. // AList<string> infoPacks = new AList<string>(); infoPacks.AddItem(packName); Sharpen.Collections.AddAll(infoPacks, packNames.Keys); dest.WriteInfoPacks(infoPacks); } catch (IOException err) { SafeDelete(pathIdx); SafeDelete(pathPack); throw new TransportException(uri, JGitText.Get().cannotStoreObjects, err); } finally { writer.Release(); } }
/// <returns>list of files with the flag assume-unchanged</returns> public virtual ICollection<string> GetAssumeUnchanged() { if (assumeUnchanged == null) { HashSet<string> unchanged = new HashSet<string>(); for (int i = 0; i < dirCache.GetEntryCount(); i++) { if (dirCache.GetEntry(i).IsAssumeValid) { unchanged.AddItem(dirCache.GetEntry(i).PathString); } } assumeUnchanged = unchanged; } return assumeUnchanged; }
/// <summary> /// Objects known to exist but not expressed by /// <see cref="NGit.Repository.GetAllRefs()">NGit.Repository.GetAllRefs()</see> /// . /// <p> /// When a repository borrows objects from another repository, it can /// advertise that it safely has that other repository's references, without /// exposing any other details about the other repository. This may help /// a client trying to push changes avoid pushing more than it needs to. /// </summary> /// <returns>unmodifiable collection of other known objects.</returns> public override ICollection<ObjectId> GetAdditionalHaves() { HashSet<ObjectId> r = new HashSet<ObjectId>(); foreach (FileObjectDatabase.AlternateHandle d in objectDatabase.MyAlternates()) { if (d is FileObjectDatabase.AlternateRepository) { Repository repo; repo = ((FileObjectDatabase.AlternateRepository)d).repository; foreach (Ref @ref in repo.GetAllRefs().Values) { r.AddItem(@ref.GetObjectId()); } Sharpen.Collections.AddAll(r, repo.GetAdditionalHaves()); } } return r; }
private static ICollection<RefSpec> ExpandPushWildcardsFor(Repository db, ICollection <RefSpec> specs) { IDictionary<string, Ref> localRefs = db.GetAllRefs(); ICollection<RefSpec> procRefs = new HashSet<RefSpec>(); foreach (RefSpec spec in specs) { if (spec.IsWildcard()) { foreach (Ref localRef in localRefs.Values) { if (spec.MatchSource(localRef)) { procRefs.AddItem(spec.ExpandFromSource(localRef)); } } } else { procRefs.AddItem(spec); } } return procRefs; }
internal static Boolean PurgeRevisionsTask(Database enclosingDatabase, IDictionary<String, IList<String>> docsToRevs, IDictionary<String, Object> result) { foreach (string docID in docsToRevs.Keys) { long docNumericID = enclosingDatabase.GetDocNumericID(docID); if (docNumericID == -1) { continue; } var revsPurged = new AList<string>(); var revIDs = docsToRevs [docID]; if (revIDs == null) { return false; } else { if (revIDs.Count == 0) { revsPurged = new AList<string>(); } else { if (revIDs.Contains("*")) { try { var args = new[] { Convert.ToString(docNumericID) }; enclosingDatabase.StorageEngine.ExecSQL("DELETE FROM revs WHERE doc_id=?", args); } catch (SQLException e) { Log.E(Tag, "Error deleting revisions", e); return false; } revsPurged = new AList<string>(); revsPurged.AddItem("*"); } else { Cursor cursor = null; try { var args = new [] { Convert.ToString(docNumericID) }; var queryString = "SELECT revid, sequence, parent FROM revs WHERE doc_id=? ORDER BY sequence DESC"; cursor = enclosingDatabase.StorageEngine.RawQuery(queryString, args); if (!cursor.MoveToNext()) { Log.W(Tag, "No results for query: " + queryString); return false; } var seqsToPurge = new HashSet<long>(); var seqsToKeep = new HashSet<long>(); var revsToPurge = new HashSet<string>(); while (!cursor.IsAfterLast()) { string revID = cursor.GetString(0); long sequence = cursor.GetLong(1); long parent = cursor.GetLong(2); if (seqsToPurge.Contains(sequence) || revIDs.Contains(revID) && !seqsToKeep.Contains (sequence)) { seqsToPurge.AddItem(sequence); revsToPurge.AddItem(revID); if (parent > 0) { seqsToPurge.AddItem(parent); } } else { seqsToPurge.Remove(sequence); revsToPurge.Remove(revID); seqsToKeep.AddItem(parent); } cursor.MoveToNext(); } seqsToPurge.RemoveAll(seqsToKeep); Log.I(Tag, String.Format("Purging doc '{0}' revs ({1}); asked for ({2})", docID, revsToPurge, revIDs)); if (seqsToPurge.Count > 0) { string seqsToPurgeList = String.Join(",", seqsToPurge); string sql = string.Format("DELETE FROM revs WHERE sequence in ({0})", seqsToPurgeList); try { enclosingDatabase.StorageEngine.ExecSQL(sql); } catch (SQLException e) { Log.E(Tag, "Error deleting revisions via: " + sql, e); return false; } } Collections.AddAll(revsPurged, revsToPurge); } catch (SQLException e) { Log.E(Tag, "Error getting revisions", e); return false; } finally { if (cursor != null) { cursor.Close(); } } } } } result[docID] = revsPurged; } return true; }
/// <exception cref="System.IO.IOException"></exception> private void SendPack(bool sideband) { ProgressMonitor pm = NullProgressMonitor.INSTANCE; OutputStream packOut = rawOut; SideBandOutputStream msgOut = null; if (sideband) { int bufsz = SideBandOutputStream.SMALL_BUF; if (options.Contains(OPTION_SIDE_BAND_64K)) { bufsz = SideBandOutputStream.MAX_BUF; } packOut = new SideBandOutputStream(SideBandOutputStream.CH_DATA, bufsz, rawOut); if (!options.Contains(OPTION_NO_PROGRESS)) { msgOut = new SideBandOutputStream(SideBandOutputStream.CH_PROGRESS, bufsz, rawOut ); pm = new SideBandProgressMonitor(msgOut); } } try { if (wantAll.IsEmpty()) { preUploadHook.OnSendPack(this, wantIds, commonBase); } else { preUploadHook.OnSendPack(this, wantAll, commonBase); } } catch (ServiceMayNotContinueException noPack) { if (sideband && noPack.Message != null) { noPack.SetOutput(); SideBandOutputStream err = new SideBandOutputStream(SideBandOutputStream.CH_ERROR , SideBandOutputStream.SMALL_BUF, rawOut); err.Write(Constants.Encode(noPack.Message)); err.Flush(); } throw; } PackConfig cfg = packConfig; if (cfg == null) { cfg = new PackConfig(db); } PackWriter pw = new PackWriter(cfg, walk.GetObjectReader()); try { pw.SetUseCachedPacks(true); pw.SetReuseDeltaCommits(true); pw.SetDeltaBaseAsOffset(options.Contains(OPTION_OFS_DELTA)); pw.SetThin(options.Contains(OPTION_THIN_PACK)); pw.SetReuseValidatingObjects(false); if (commonBase.IsEmpty() && refs != null) { ICollection<ObjectId> tagTargets = new HashSet<ObjectId>(); foreach (Ref @ref in refs.Values) { if (@ref.GetPeeledObjectId() != null) { tagTargets.AddItem(@ref.GetPeeledObjectId()); } else { if (@ref.GetObjectId() == null) { continue; } else { if (@ref.GetName().StartsWith(Constants.R_HEADS)) { tagTargets.AddItem(@ref.GetObjectId()); } } } } pw.SetTagTargets(tagTargets); } if (depth > 0) { pw.SetShallowPack(depth, unshallowCommits); } RevWalk rw = walk; if (wantAll.IsEmpty()) { pw.PreparePack(pm, wantIds, commonBase); } else { walk.Reset(); ObjectWalk ow = walk.ToObjectWalkWithSameObjects(); pw.PreparePack(pm, ow, wantAll, commonBase); rw = ow; } if (options.Contains(OPTION_INCLUDE_TAG) && refs != null) { foreach (Ref vref in refs.Values) { Ref @ref = vref; ObjectId objectId = @ref.GetObjectId(); // If the object was already requested, skip it. if (wantAll.IsEmpty()) { if (wantIds.Contains(objectId)) { continue; } } else { RevObject obj = rw.LookupOrNull(objectId); if (obj != null && obj.Has(WANT)) { continue; } } if ([email protected]()) { @ref = db.Peel(@ref); } ObjectId peeledId = @ref.GetPeeledObjectId(); if (peeledId == null) { continue; } objectId = @ref.GetObjectId(); if (pw.WillInclude(peeledId) && !pw.WillInclude(objectId)) { pw.AddObject(rw.ParseAny(objectId)); } } } pw.WritePack(pm, NullProgressMonitor.INSTANCE, packOut); statistics = pw.GetStatistics(); if (msgOut != null) { string msg = pw.GetStatistics().GetMessage() + '\n'; msgOut.Write(Constants.Encode(msg)); msgOut.Flush(); } } finally { pw.Release(); } if (sideband) { pckOut.End(); } if (statistics != null) { logger.OnPackStatistics(statistics); } }
/// <exception cref="System.Exception"></exception> public virtual void TestAttachments() { string testAttachmentName = "test_attachment"; BlobStore attachments = database.GetAttachments(); NUnit.Framework.Assert.AreEqual(0, attachments.Count()); NUnit.Framework.Assert.AreEqual(new HashSet<object>(), attachments.AllKeys()); Status status = new Status(); IDictionary<string, object> rev1Properties = new Dictionary<string, object>(); rev1Properties.Put("foo", 1); rev1Properties.Put("bar", false); RevisionInternal rev1 = database.PutRevision(new RevisionInternal(rev1Properties, database), null, false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); byte[] attach1 = Sharpen.Runtime.GetBytesForString("This is the body of attach1"); database.InsertAttachmentForSequenceWithNameAndType(new ByteArrayInputStream(attach1 ), rev1.GetSequence(), testAttachmentName, "text/plain", rev1.GetGeneration()); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); //We must set the no_attachments column for the rev to false, as we are using an internal //private API call above (database.insertAttachmentForSequenceWithNameAndType) which does //not set the no_attachments column on revs table try { ContentValues args = new ContentValues(); args.Put("no_attachments=", false); database.GetDatabase().Update("revs", args, "sequence=?", new string[] { rev1.GetSequence ().ToString() }); } catch (SQLException e) { Log.E(Database.Tag, "Error setting rev1 no_attachments to false", e); throw new CouchbaseLiteException(Status.InternalServerError); } Attachment attachment = database.GetAttachmentForSequence(rev1.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/plain", attachment.GetContentType()); byte[] data = IOUtils.ToByteArray(attachment.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach1, data)); IDictionary<string, object> innerDict = new Dictionary<string, object>(); innerDict.Put("content_type", "text/plain"); innerDict.Put("digest", "sha1-gOHUOBmIMoDCrMuGyaLWzf1hQTE="); innerDict.Put("length", 27); innerDict.Put("stub", true); innerDict.Put("revpos", 1); IDictionary<string, object> attachmentDict = new Dictionary<string, object>(); attachmentDict.Put(testAttachmentName, innerDict); IDictionary<string, object> attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent (rev1.GetSequence(), EnumSet.NoneOf<Database.TDContentOptions>()); NUnit.Framework.Assert.AreEqual(attachmentDict, attachmentDictForSequence); RevisionInternal gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1 .GetRevId(), EnumSet.NoneOf<Database.TDContentOptions>()); IDictionary<string, object> gotAttachmentDict = (IDictionary<string, object>)gotRev1 .GetProperties().Get("_attachments"); NUnit.Framework.Assert.AreEqual(attachmentDict, gotAttachmentDict); // Check the attachment dict, with attachments included: Sharpen.Collections.Remove(innerDict, "stub"); innerDict.Put("data", Base64.EncodeBytes(attach1)); attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent(rev1 .GetSequence(), EnumSet.Of(Database.TDContentOptions.TDIncludeAttachments)); NUnit.Framework.Assert.AreEqual(attachmentDict, attachmentDictForSequence); gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), EnumSet .Of(Database.TDContentOptions.TDIncludeAttachments)); gotAttachmentDict = (IDictionary<string, object>)gotRev1.GetProperties().Get("_attachments" ); NUnit.Framework.Assert.AreEqual(attachmentDict, gotAttachmentDict); // Add a second revision that doesn't update the attachment: IDictionary<string, object> rev2Properties = new Dictionary<string, object>(); rev2Properties.Put("_id", rev1.GetDocId()); rev2Properties.Put("foo", 2); rev2Properties.Put("bazz", false); RevisionInternal rev2 = database.PutRevision(new RevisionInternal(rev2Properties, database), rev1.GetRevId(), false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); database.CopyAttachmentNamedFromSequenceToSequence(testAttachmentName, rev1.GetSequence (), rev2.GetSequence()); // Add a third revision of the same document: IDictionary<string, object> rev3Properties = new Dictionary<string, object>(); rev3Properties.Put("_id", rev2.GetDocId()); rev3Properties.Put("foo", 2); rev3Properties.Put("bazz", false); RevisionInternal rev3 = database.PutRevision(new RevisionInternal(rev3Properties, database), rev2.GetRevId(), false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); byte[] attach2 = Sharpen.Runtime.GetBytesForString("<html>And this is attach2</html>" ); database.InsertAttachmentForSequenceWithNameAndType(new ByteArrayInputStream(attach2 ), rev3.GetSequence(), testAttachmentName, "text/html", rev2.GetGeneration()); // Check the 2nd revision's attachment: Attachment attachment2 = database.GetAttachmentForSequence(rev2.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/plain", attachment2.GetContentType()); data = IOUtils.ToByteArray(attachment2.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach1, data)); // Check the 3rd revision's attachment: Attachment attachment3 = database.GetAttachmentForSequence(rev3.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/html", attachment3.GetContentType()); data = IOUtils.ToByteArray(attachment3.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach2, data)); IDictionary<string, object> attachmentDictForRev3 = (IDictionary<string, object>) database.GetAttachmentsDictForSequenceWithContent(rev3.GetSequence(), EnumSet.NoneOf <Database.TDContentOptions>()).Get(testAttachmentName); if (attachmentDictForRev3.ContainsKey("follows")) { if (((bool)attachmentDictForRev3.Get("follows")) == true) { throw new RuntimeException("Did not expected attachment dict 'follows' key to be true" ); } else { throw new RuntimeException("Did not expected attachment dict to have 'follows' key" ); } } // Examine the attachment store: NUnit.Framework.Assert.AreEqual(2, attachments.Count()); ICollection<BlobKey> expected = new HashSet<BlobKey>(); expected.AddItem(BlobStore.KeyForBlob(attach1)); expected.AddItem(BlobStore.KeyForBlob(attach2)); NUnit.Framework.Assert.AreEqual(expected, attachments.AllKeys()); database.Compact(); // This clears the body of the first revision NUnit.Framework.Assert.AreEqual(1, attachments.Count()); ICollection<BlobKey> expected2 = new HashSet<BlobKey>(); expected2.AddItem(BlobStore.KeyForBlob(attach2)); NUnit.Framework.Assert.AreEqual(expected2, attachments.AllKeys()); }
/// <exception cref="System.IO.IOException"></exception> private ObjectDirectory.CachedPackList ScanCachedPacks(ObjectDirectory.CachedPackList old) { FileSnapshot s = FileSnapshot.Save(cachedPacksFile); byte[] buf; try { buf = IOUtil.ReadFully(cachedPacksFile); } catch (FileNotFoundException) { buf = new byte[0]; } if (old != null && old.snapshot.Equals(s) && Arrays.Equals(old.raw, buf)) { old.snapshot.SetClean(s); return old; } AList<LocalCachedPack> list = new AList<LocalCachedPack>(4); ICollection<ObjectId> tips = new HashSet<ObjectId>(); int ptr = 0; while (ptr < buf.Length) { if (buf[ptr] == '#' || buf[ptr] == '\n') { ptr = RawParseUtils.NextLF(buf, ptr); continue; } if (buf[ptr] == '+') { tips.AddItem(ObjectId.FromString(buf, ptr + 2)); ptr = RawParseUtils.NextLF(buf, ptr + 2); continue; } IList<string> names = new AList<string>(4); while (ptr < buf.Length && buf[ptr] == 'P') { int end = RawParseUtils.NextLF(buf, ptr); if (buf[end - 1] == '\n') { end--; } names.AddItem(RawParseUtils.Decode(buf, ptr + 2, end)); ptr = RawParseUtils.NextLF(buf, end); } if (!tips.IsEmpty() && !names.IsEmpty()) { list.AddItem(new LocalCachedPack(this, tips, names)); tips = new HashSet<ObjectId>(); } } list.TrimToSize(); return new ObjectDirectory.CachedPackList(s, Sharpen.Collections.UnmodifiableList (list), buf); }
public void TestAttachments() { var testAttachmentName = "test_attachment"; var attachments = database.Attachments; Assert.AreEqual(0, attachments.Count()); Assert.AreEqual(0, attachments.AllKeys().Count()); var rev1Properties = new Dictionary<string, object>(); rev1Properties["foo"] = 1; rev1Properties["bar"] = false; var status = new Status(); var rev1 = database.PutRevision( new RevisionInternal(rev1Properties, database), null, false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); var attach1 = Runtime.GetBytesForString( "This is the body of attach1").ToArray(); database.InsertAttachmentForSequenceWithNameAndType( new ByteArrayInputStream(attach1), rev1.GetSequence(), testAttachmentName, "text/plain", rev1.GetGeneration()); //We must set the no_attachments column for the rev to false, as we are using an internal //private API call above (database.insertAttachmentForSequenceWithNameAndType) which does //not set the no_attachments column on revs table try { var args = new ContentValues(); args.Put("no_attachments", false); database.StorageEngine.Update( "revs", args, "sequence=?", new[] { rev1.GetSequence().ToString() } ); } catch (SQLException e) { Log.E(Tag, "Error setting rev1 no_attachments to false", e); throw new CouchbaseLiteException(StatusCode.InternalServerError); } var attachment = database.GetAttachmentForSequence( rev1.GetSequence(), testAttachmentName ); Assert.AreEqual("text/plain", attachment.ContentType); var data = attachment.Content.ToArray(); Assert.IsTrue(Arrays.Equals(attach1, data)); // Workaround : // Not closing the content stream will cause Sharing Violation // Exception when trying to get the same attachment going forward. attachment.ContentStream.Close(); var innerDict = new Dictionary<string, object>(); innerDict["content_type"] = "text/plain"; innerDict["digest"] = "sha1-gOHUOBmIMoDCrMuGyaLWzf1hQTE="; innerDict["length"] = 27; innerDict["stub"] = true; innerDict["revpos"] = 1; var attachmentDict = new Dictionary<string, object>(); attachmentDict[testAttachmentName] = innerDict; var attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent(rev1.GetSequence(), DocumentContentOptions.None); Assert.AreEqual(new SortedDictionary<string,object>(attachmentDict), new SortedDictionary<string,object>(attachmentDictForSequence));//Assert.AreEqual(1, attachmentDictForSequence.Count); var gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), DocumentContentOptions.IncludeAttachments); var gotAttachmentDict = gotRev1.GetProperties() .Get("_attachments") .AsDictionary<string,object>(); Assert.AreEqual(attachmentDict.Select(kvp => kvp.Key).OrderBy(k => k), gotAttachmentDict.Select(kvp => kvp.Key).OrderBy(k => k)); // Check the attachment dict, with attachments included: innerDict.Remove("stub"); innerDict.Put("data", Convert.ToBase64String(attach1)); attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent( rev1.GetSequence(), DocumentContentOptions.IncludeAttachments); Assert.AreEqual(new SortedDictionary<string,object>(attachmentDict[testAttachmentName].AsDictionary<string,object>()), new SortedDictionary<string,object>(attachmentDictForSequence[testAttachmentName].AsDictionary<string,object>())); gotRev1 = database.GetDocumentWithIDAndRev( rev1.GetDocId(), rev1.GetRevId(), DocumentContentOptions.IncludeAttachments); gotAttachmentDict = gotRev1.GetProperties() .Get("_attachments") .AsDictionary<string, object>() .Get(testAttachmentName) .AsDictionary<string,object>(); Assert.AreEqual(innerDict.Select(kvp => kvp.Key).OrderBy(k => k), gotAttachmentDict.Select(kvp => kvp.Key).OrderBy(k => k)); // Add a second revision that doesn't update the attachment: var rev2Properties = new Dictionary<string, object>(); rev2Properties.Put("_id", rev1.GetDocId()); rev2Properties["foo"] = 2; rev2Properties["bazz"] = false; var rev2 = database.PutRevision(new RevisionInternal(rev2Properties, database), rev1.GetRevId(), false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); database.CopyAttachmentNamedFromSequenceToSequence( testAttachmentName, rev1.GetSequence(), rev2.GetSequence()); // Add a third revision of the same document: var rev3Properties = new Dictionary<string, object>(); rev3Properties.Put("_id", rev2.GetDocId()); rev3Properties["foo"] = 2; rev3Properties["bazz"] = false; var rev3 = database.PutRevision(new RevisionInternal( rev3Properties, database), rev2.GetRevId(), false, status); Assert.AreEqual(StatusCode.Created, status.GetCode()); var attach2 = Runtime.GetBytesForString("<html>And this is attach2</html>").ToArray(); database.InsertAttachmentForSequenceWithNameAndType( new ByteArrayInputStream(attach2), rev3.GetSequence(), testAttachmentName, "text/html", rev2.GetGeneration()); // Check the 2nd revision's attachment: var attachment2 = database.GetAttachmentForSequence(rev2.GetSequence(), testAttachmentName); Assert.AreEqual("text/plain", attachment2.ContentType); data = attachment2.Content.ToArray(); Assert.IsTrue(Arrays.Equals(attach1, data)); // Workaround : // Not closing the content stream will cause Sharing Violation // Exception when trying to get the same attachment going forward. attachment2.ContentStream.Close(); // Check the 3rd revision's attachment: var attachment3 = database.GetAttachmentForSequence(rev3.GetSequence(), testAttachmentName); Assert.AreEqual("text/html", attachment3.ContentType); data = attachment3.Content.ToArray(); Assert.IsTrue(Arrays.Equals(attach2, data)); var attachmentDictForRev3 = database.GetAttachmentsDictForSequenceWithContent(rev3.GetSequence(), DocumentContentOptions.None) .Get(testAttachmentName) .AsDictionary<string,object>(); if (attachmentDictForRev3.ContainsKey("follows")) { if (((bool)attachmentDictForRev3.Get("follows")) == true) { throw new RuntimeException("Did not expected attachment dict 'follows' key to be true" ); } else { throw new RuntimeException("Did not expected attachment dict to have 'follows' key" ); } } // Workaround : // Not closing the content stream will cause Sharing Violation // Exception when trying to get the same attachment going forward. attachment3.ContentStream.Close(); // Examine the attachment store: Assert.AreEqual(2, attachments.Count()); var expected = new HashSet<BlobKey>(); expected.AddItem(BlobStore.KeyForBlob(attach1)); expected.AddItem(BlobStore.KeyForBlob(attach2)); Assert.AreEqual(expected.Count, attachments.AllKeys().Count()); foreach(var key in attachments.AllKeys()) { Assert.IsTrue(expected.Contains(key)); } database.Compact(); // This clears the body of the first revision Assert.AreEqual(1, attachments.Count()); var expected2 = new HashSet<BlobKey>(); expected2.AddItem(BlobStore.KeyForBlob(attach2)); Assert.AreEqual(expected2.Count, attachments.AllKeys().Count()); foreach(var key in attachments.AllKeys()) { Assert.IsTrue(expected2.Contains(key)); } }
// kick something off that will s /// <exception cref="System.Exception"></exception> public virtual void RunLiveQuery(string methodNameToCall) { Database db = StartDatabase(); CountDownLatch doneSignal = new CountDownLatch(11); // 11 corresponds to startKey=23; endKey=33 // run a live query View view = db.GetView("vu"); view.SetMap(new _Mapper_817(), "1"); LiveQuery query = view.CreateQuery().ToLiveQuery(); query.SetStartKey(23); query.SetEndKey(33); Log.I(Tag, "Created " + query); // these are the keys that we expect to see in the livequery change listener callback ICollection<int> expectedKeys = new HashSet<int>(); for (int i = 23; i < 34; i++) { expectedKeys.AddItem(i); } // install a change listener which decrements countdown latch when it sees a new // key from the list of expected keys LiveQuery.ChangeListener changeListener = new _ChangeListener_836(expectedKeys, doneSignal ); query.AddChangeListener(changeListener); // create the docs that will cause the above change listener to decrement countdown latch int kNDocs = 50; CreateDocumentsAsync(db, kNDocs); if (methodNameToCall.Equals("start")) { // start the livequery running asynchronously query.Start(); } else { if (methodNameToCall.Equals("startWaitForRows")) { query.Start(); query.WaitForRows(); } else { NUnit.Framework.Assert.IsNull(query.GetRows()); query.Run(); // this will block until the query completes NUnit.Framework.Assert.IsNotNull(query.GetRows()); } } // wait for the doneSignal to be finished bool success = doneSignal.Await(300, TimeUnit.Seconds); NUnit.Framework.Assert.IsTrue("Done signal timed out, live query never ran", success ); // stop the livequery since we are done with it query.RemoveChangeListener(changeListener); query.Stop(); }
/// <exception cref="System.Exception"></exception> public virtual void TestAttachments() { string testAttachmentName = "test_attachment"; BlobStore attachments = database.GetAttachments(); NUnit.Framework.Assert.AreEqual(0, attachments.Count()); NUnit.Framework.Assert.AreEqual(new HashSet<object>(), attachments.AllKeys()); Status status = new Status(); IDictionary<string, object> rev1Properties = new Dictionary<string, object>(); rev1Properties.Put("foo", 1); rev1Properties.Put("bar", false); RevisionInternal rev1 = database.PutRevision(new RevisionInternal(rev1Properties, database), null, false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); byte[] attach1 = Sharpen.Runtime.GetBytesForString("This is the body of attach1"); database.InsertAttachmentForSequenceWithNameAndType(new ByteArrayInputStream(attach1 ), rev1.GetSequence(), testAttachmentName, "text/plain", rev1.GetGeneration()); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); Attachment attachment = database.GetAttachmentForSequence(rev1.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/plain", attachment.GetContentType()); byte[] data = IOUtils.ToByteArray(attachment.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach1, data)); IDictionary<string, object> innerDict = new Dictionary<string, object>(); innerDict.Put("content_type", "text/plain"); innerDict.Put("digest", "sha1-gOHUOBmIMoDCrMuGyaLWzf1hQTE="); innerDict.Put("length", 27); innerDict.Put("stub", true); innerDict.Put("revpos", 1); IDictionary<string, object> attachmentDict = new Dictionary<string, object>(); attachmentDict.Put(testAttachmentName, innerDict); IDictionary<string, object> attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent (rev1.GetSequence(), EnumSet.NoneOf<Database.TDContentOptions>()); NUnit.Framework.Assert.AreEqual(attachmentDict, attachmentDictForSequence); RevisionInternal gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1 .GetRevId(), EnumSet.NoneOf<Database.TDContentOptions>()); IDictionary<string, object> gotAttachmentDict = (IDictionary<string, object>)gotRev1 .GetProperties().Get("_attachments"); NUnit.Framework.Assert.AreEqual(attachmentDict, gotAttachmentDict); // Check the attachment dict, with attachments included: Sharpen.Collections.Remove(innerDict, "stub"); innerDict.Put("data", Base64.EncodeBytes(attach1)); attachmentDictForSequence = database.GetAttachmentsDictForSequenceWithContent(rev1 .GetSequence(), EnumSet.Of(Database.TDContentOptions.TDIncludeAttachments)); NUnit.Framework.Assert.AreEqual(attachmentDict, attachmentDictForSequence); gotRev1 = database.GetDocumentWithIDAndRev(rev1.GetDocId(), rev1.GetRevId(), EnumSet .Of(Database.TDContentOptions.TDIncludeAttachments)); gotAttachmentDict = (IDictionary<string, object>)gotRev1.GetProperties().Get("_attachments" ); NUnit.Framework.Assert.AreEqual(attachmentDict, gotAttachmentDict); // Add a second revision that doesn't update the attachment: IDictionary<string, object> rev2Properties = new Dictionary<string, object>(); rev2Properties.Put("_id", rev1.GetDocId()); rev2Properties.Put("foo", 2); rev2Properties.Put("bazz", false); RevisionInternal rev2 = database.PutRevision(new RevisionInternal(rev2Properties, database), rev1.GetRevId(), false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); database.CopyAttachmentNamedFromSequenceToSequence(testAttachmentName, rev1.GetSequence (), rev2.GetSequence()); // Add a third revision of the same document: IDictionary<string, object> rev3Properties = new Dictionary<string, object>(); rev3Properties.Put("_id", rev2.GetDocId()); rev3Properties.Put("foo", 2); rev3Properties.Put("bazz", false); RevisionInternal rev3 = database.PutRevision(new RevisionInternal(rev3Properties, database), rev2.GetRevId(), false, status); NUnit.Framework.Assert.AreEqual(Status.Created, status.GetCode()); byte[] attach2 = Sharpen.Runtime.GetBytesForString("<html>And this is attach2</html>" ); database.InsertAttachmentForSequenceWithNameAndType(new ByteArrayInputStream(attach2 ), rev3.GetSequence(), testAttachmentName, "text/html", rev2.GetGeneration()); // Check the 2nd revision's attachment: Attachment attachment2 = database.GetAttachmentForSequence(rev2.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/plain", attachment2.GetContentType()); data = IOUtils.ToByteArray(attachment2.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach1, data)); // Check the 3rd revision's attachment: Attachment attachment3 = database.GetAttachmentForSequence(rev3.GetSequence(), testAttachmentName ); NUnit.Framework.Assert.AreEqual("text/html", attachment3.GetContentType()); data = IOUtils.ToByteArray(attachment3.GetContent()); NUnit.Framework.Assert.IsTrue(Arrays.Equals(attach2, data)); // Examine the attachment store: NUnit.Framework.Assert.AreEqual(2, attachments.Count()); ICollection<BlobKey> expected = new HashSet<BlobKey>(); expected.AddItem(BlobStore.KeyForBlob(attach1)); expected.AddItem(BlobStore.KeyForBlob(attach2)); NUnit.Framework.Assert.AreEqual(expected, attachments.AllKeys()); database.Compact(); // This clears the body of the first revision NUnit.Framework.Assert.AreEqual(1, attachments.Count()); ICollection<BlobKey> expected2 = new HashSet<BlobKey>(); expected2.AddItem(BlobStore.KeyForBlob(attach2)); NUnit.Framework.Assert.AreEqual(expected2, attachments.AllKeys()); }
/// <summary>Generate and write the bundle to the output stream.</summary> /// <remarks> /// Generate and write the bundle to the output stream. /// <p> /// This method can only be called once per BundleWriter instance. /// </remarks> /// <param name="monitor">progress monitor to report bundle writing status to.</param> /// <param name="os"> /// the stream the bundle is written to. The stream should be /// buffered by the caller. The caller is responsible for closing /// the stream. /// </param> /// <exception cref="System.IO.IOException"> /// an error occurred reading a local object's data to include in /// the bundle, or writing compressed object data to the output /// stream. /// </exception> public virtual void WriteBundle(ProgressMonitor monitor, OutputStream os) { PackConfig pc = packConfig; if (pc == null) { pc = new PackConfig(db); } PackWriter packWriter = new PackWriter(pc, db.NewObjectReader()); try { HashSet<ObjectId> inc = new HashSet<ObjectId>(); HashSet<ObjectId> exc = new HashSet<ObjectId>(); Sharpen.Collections.AddAll(inc, include.Values); foreach (RevCommit r in assume) { exc.AddItem(r.Id); } packWriter.SetDeltaBaseAsOffset(true); packWriter.SetThin(exc.Count > 0); packWriter.SetReuseValidatingObjects(false); if (exc.Count == 0) { packWriter.SetTagTargets(tagTargets); } packWriter.PreparePack(monitor, inc, exc); TextWriter w = new OutputStreamWriter(os, Constants.CHARSET); w.Write(NGit.Transport.TransportBundleConstants.V2_BUNDLE_SIGNATURE); w.Write('\n'); char[] tmp = new char[Constants.OBJECT_ID_STRING_LENGTH]; foreach (RevCommit a in assume) { w.Write('-'); a.CopyTo(tmp, w); if (a.RawBuffer != null) { w.Write(' '); w.Write(a.GetShortMessage()); } w.Write('\n'); } foreach (KeyValuePair<string, ObjectId> e in include.EntrySet()) { e.Value.CopyTo(tmp, w); w.Write(' '); w.Write(e.Key); w.Write('\n'); } w.Write('\n'); w.Flush(); packWriter.WritePack(monitor, monitor, os); } finally { packWriter.Release(); } }
public bool Run() { foreach (string docID in docsToRevs.Keys) { long docNumericID = this._enclosing.GetDocNumericID(docID); if (docNumericID == -1) { continue; } IList<string> revsPurged = new AList<string>(); IList<string> revIDs = (IList<string>)docsToRevs.Get(docID); if (revIDs == null) { return false; } else { if (revIDs.Count == 0) { revsPurged = new AList<string>(); } else { if (revIDs.Contains("*")) { try { string[] args = new string[] { System.Convert.ToString(docNumericID) }; this._enclosing.database.ExecSQL("DELETE FROM revs WHERE doc_id=?", args); } catch (SQLException e) { Log.E(Database.Tag, "Error deleting revisions", e); return false; } revsPurged = new AList<string>(); revsPurged.AddItem("*"); } else { Cursor cursor = null; try { string[] args = new string[] { System.Convert.ToString(docNumericID) }; string queryString = "SELECT revid, sequence, parent FROM revs WHERE doc_id=? ORDER BY sequence DESC"; cursor = this._enclosing.database.RawQuery(queryString, args); if (!cursor.MoveToNext()) { Log.W(Database.Tag, "No results for query: " + queryString); return false; } ICollection<long> seqsToPurge = new HashSet<long>(); ICollection<long> seqsToKeep = new HashSet<long>(); ICollection<string> revsToPurge = new HashSet<string>(); while (!cursor.IsAfterLast()) { string revID = cursor.GetString(0); long sequence = cursor.GetLong(1); long parent = cursor.GetLong(2); if (seqsToPurge.Contains(sequence) || revIDs.Contains(revID) && !seqsToKeep.Contains (sequence)) { seqsToPurge.AddItem(sequence); revsToPurge.AddItem(revID); if (parent > 0) { seqsToPurge.AddItem(parent); } } else { seqsToPurge.Remove(sequence); revsToPurge.Remove(revID); seqsToKeep.AddItem(parent); } cursor.MoveToNext(); } seqsToPurge.RemoveAll(seqsToKeep); Log.I(Database.Tag, string.Format("Purging doc '%s' revs (%s); asked for (%s)", docID , revsToPurge, revIDs)); if (seqsToPurge.Count > 0) { string seqsToPurgeList = TextUtils.Join(",", seqsToPurge); string sql = string.Format("DELETE FROM revs WHERE sequence in (%s)", seqsToPurgeList ); try { this._enclosing.database.ExecSQL(sql); } catch (SQLException e) { Log.E(Database.Tag, "Error deleting revisions via: " + sql, e); return false; } } Sharpen.Collections.AddAll(revsPurged, revsToPurge); } catch (SQLException e) { Log.E(Database.Tag, "Error getting revisions", e); return false; } finally { if (cursor != null) { cursor.Close(); } } } } } result.Put(docID, revsPurged); } return true; }