/// <summary>Returns list of groups for a user.</summary> /// <param name="user">get groups for this user</param> /// <returns>list of groups for a given user</returns> /// <exception cref="System.IO.IOException"/> public virtual IList <string> GetGroups(string user) { lock (this) { ICollection <string> groupSet = new TreeSet <string>(); IList <string> groups = null; foreach (GroupMappingServiceProvider provider in providersList) { try { groups = provider.GetGroups(user); } catch (Exception) { } //LOG.warn("Exception trying to get groups for user " + user, e); if (groups != null && !groups.IsEmpty()) { Collections.AddAll(groupSet, groups); if (!combined) { break; } } } IList <string> results = new AList <string>(groupSet.Count); Collections.AddAll(results, groupSet); return(results); } }
internal static void MergeCompactedValue(JObject obj, string key, JToken value) { if (obj == null) { return; } var prop = obj[key]; if (prop.IsNull()) { obj[key] = value; return; } if (!(prop is JArray)) { var tmp = new JArray(); tmp.Add(prop); } if (value is JArray) { Collections.AddAll((JArray)prop, (JArray)value); } else { ((JArray)prop).Add(value); } }
internal virtual void Abort(ISet <string> createdFiles) { //System.out.println(Thread.currentThread().getName() + ": now abort seg=" + segmentInfo.name); hasAborted = aborting = true; try { if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "now abort"); } try { consumer.Abort(); } #pragma warning disable 168 catch (Exception t) #pragma warning restore 168 { } pendingUpdates.Clear(); Collections.AddAll(createdFiles, directory.CreatedFiles); } finally { aborting = false; if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "done abort"); } } }
/// <summary>Return all the secret key entries in the in-memory map</summary> public virtual IList <Text> GetAllSecretKeys() { IList <Text> list = new AList <Text>(); Collections.AddAll(list, secretKeysMap.Keys); return(list); }
public virtual ICollection <string> GetCollection() { ICollection <string> list = new AList <string>(); if (all) { list.AddItem("*"); } else { if (ipAddresses != null) { Collections.AddAll(list, ipAddresses); } if (hostNames != null) { Collections.AddAll(list, hostNames); } if (cidrAddresses != null) { foreach (SubnetUtils.SubnetInfo cidrAddress in cidrAddresses) { list.AddItem(cidrAddress.GetCidrSignature()); } } } return(list); }
/// <exception cref="Org.Apache.Hadoop.Record.Compiler.Generated.ParseException"/> public JFile Input() { AList <JFile> ilist = new AList <JFile>(); AList <JRecord> rlist = new AList <JRecord>(); JFile i; AList <JRecord> l; while (true) { switch ((jj_ntk == -1) ? Jj_ntk() : jj_ntk) { case IncludeTkn: { i = Include(); ilist.AddItem(i); break; } case ModuleTkn: { l = Module(); Collections.AddAll(rlist, l); break; } default: { jj_la1[0] = jj_gen; Jj_consume_token(-1); throw new ParseException(); } } switch ((jj_ntk == -1) ? Jj_ntk() : jj_ntk) { case ModuleTkn: case IncludeTkn: { break; } default: { jj_la1[1] = jj_gen; goto label_1_break; } } label_1_continue :; } label_1_break :; Jj_consume_token(0); { if (true) { return(new JFile(curFileName, ilist, rlist)); } } throw new Error("Missing return statement in function"); }
public override AbstractInsnNode Clone(IDictionary <LabelNode, LabelNode> clonedLabels ) { var clone = new LookupSwitchInsnNode(Clone(dflt, clonedLabels), null, Clone(labels, clonedLabels)); Collections.AddAll(clone.keys, keys); return(clone.CloneAnnotations(this)); }
private static ICollection <string> GetGroups() { ICollection <string> allGroups = new HashSet <string>(); foreach (ICollection <string> userGroups in userToNetgroupsMap.Values) { Collections.AddAll(allGroups, userGroups); } return(allGroups); }
/// <summary>Get netgroups for a given user</summary> /// <param name="user">get groups for this user</param> /// <param name="groups">put groups into this List</param> public static void GetNetgroups(string user, IList <string> groups) { ICollection <string> userGroups = userToNetgroupsMap[user]; //ConcurrentHashMap does not allow null values; //So null value check can be used to check if the key exists if (userGroups != null) { Collections.AddAll(groups, userGroups); } }
private void InitializeAllowedHeaders(FilterConfig filterConfig) { string allowedHeadersConfig = filterConfig.GetInitParameter(AllowedHeaders); if (allowedHeadersConfig == null) { allowedHeadersConfig = AllowedHeadersDefault; } Collections.AddAll(allowedHeaders, Arrays.AsList(allowedHeadersConfig.Trim ().Split("\\s*,\\s*"))); Log.Info("Allowed Headers: " + GetAllowedHeadersHeader()); }
/// <summary>Shut down all ThreadPools immediately.</summary> public virtual IList <Runnable> ShutdownNow() { lock (this) { Log.Info("Shutting down all AsyncDiskService threads immediately..."); IList <Runnable> list = new AList <Runnable>(); foreach (KeyValuePair <string, ThreadPoolExecutor> e in executors) { Collections.AddAll(list, e.Value.ShutdownNow()); } return(list); } }
/// <param name="nif">network interface to get addresses for</param> /// <returns> /// set containing addresses for each subinterface of nif, /// see below for the rationale for using an ordered set /// </returns> private static LinkedHashSet <IPAddress> GetSubinterfaceInetAddrs(NetworkInterface nif) { LinkedHashSet <IPAddress> addrs = new LinkedHashSet <IPAddress>(); Enumeration <NetworkInterface> subNifs = nif.GetSubInterfaces(); while (subNifs.MoveNext()) { NetworkInterface subNif = subNifs.Current; Collections.AddAll(addrs, Collections.List(subNif.GetInetAddresses())); } return(addrs); }
public override FileSystem[] GetChildFileSystems() { IList <InodeTree.MountPoint <FileSystem> > mountPoints = fsState.GetMountPoints(); ICollection <FileSystem> children = new HashSet <FileSystem>(); foreach (InodeTree.MountPoint <FileSystem> mountPoint in mountPoints) { FileSystem targetFs = mountPoint.target.targetFileSystem; Collections.AddAll(children, Arrays.AsList(targetFs.GetChildFileSystems() )); } return(Collections.ToArray(children, new FileSystem[] { })); }
private void InitializeAllowedOrigins(FilterConfig filterConfig) { string allowedOriginsConfig = filterConfig.GetInitParameter(AllowedOrigins); if (allowedOriginsConfig == null) { allowedOriginsConfig = AllowedOriginsDefault; } Collections.AddAll(allowedOrigins, Arrays.AsList(allowedOriginsConfig.Trim ().Split("\\s*,\\s*"))); allowAllOrigins = allowedOrigins.Contains("*"); Log.Info("Allowed Origins: " + StringUtils.Join(allowedOrigins, ',')); Log.Info("Allow All Origins: " + allowAllOrigins); }
/// <exception cref="System.Exception"/> private void Change(int exit, string owner, string group, params string[] files) { FileStatus[][] oldStats = new FileStatus[files.Length][]; for (int i = 0; i < files.Length; i++) { oldStats[i] = fileSys.GlobStatus(new Path(files[i])); } IList <string> argv = new List <string>(); if (owner != null) { argv.AddItem("-chown"); string chown = owner; if (group != null) { chown += ":" + group; if (group.IsEmpty()) { group = null; } } // avoid testing for it later argv.AddItem(chown); } else { argv.AddItem("-chgrp"); argv.AddItem(group); } Collections.AddAll(argv, files); Assert.Equal(exit, fsShell.Run(Collections.ToArray(argv , new string[0]))); for (int i_1 = 0; i_1 < files.Length; i_1++) { FileStatus[] stats = fileSys.GlobStatus(new Path(files[i_1])); if (stats != null) { for (int j = 0; j < stats.Length; j++) { Assert.Equal("check owner of " + files[i_1], ((owner != null) ? "STUB-" + owner : oldStats[i_1][j].GetOwner()), stats[j].GetOwner()); Assert.Equal("check group of " + files[i_1], ((group != null) ? "STUB-" + group : oldStats[i_1][j].GetGroup()), stats[j].GetGroup()); } } } }
/// <summary> /// Expert: create a <see cref="ParallelCompositeReader"/> based on the provided /// <paramref name="readers"/> and <paramref name="storedFieldReaders"/>; when a document is /// loaded, only <paramref name="storedFieldReaders"/> will be used. /// </summary> public ParallelCompositeReader(bool closeSubReaders, CompositeReader[] readers, CompositeReader[] storedFieldReaders) : base(PrepareSubReaders(readers, storedFieldReaders)) { this.closeSubReaders = closeSubReaders; Collections.AddAll(completeReaderSet, readers); Collections.AddAll(completeReaderSet, storedFieldReaders); // update ref-counts (like MultiReader): if (!closeSubReaders) { foreach (IndexReader reader in completeReaderSet) { reader.IncRef(); } } // finally add our own synthetic readers, so we close or decRef them, too (it does not matter what we do) Collections.AddAll(completeReaderSet, GetSequentialSubReaders()); }
/// <summary> /// Expands a list of arguments into /// <see cref="PathData"/> /// objects. The default /// behavior is to call /// <see cref="ExpandArgument(string)"/> /// on each element /// which by default globs the argument. The loop catches IOExceptions, /// increments the error count, and displays the exception. /// </summary> /// <param name="args"> /// strings to expand into /// <see cref="PathData"/> /// objects /// </param> /// <returns> /// list of all /// <see cref="PathData"/> /// objects the arguments /// </returns> /// <exception cref="System.IO.IOException">if anything goes wrong...</exception> protected internal virtual List <PathData> ExpandArguments(List <string> args) { List <PathData> expandedArgs = new List <PathData>(); foreach (string arg in args) { try { Collections.AddAll(expandedArgs, ExpandArgument(arg)); } catch (IOException e) { // other exceptions are probably nasty DisplayError(e); } } return(expandedArgs); }
/// <summary> /// Returns all the IPs associated with the provided interface, if any, in /// textual form. /// </summary> /// <param name="strInterface"> /// The name of the network interface or sub-interface to query /// (eg eth0 or eth0:0) or the string "default" /// </param> /// <param name="returnSubinterfaces"> /// Whether to return IPs associated with subinterfaces of /// the given interface /// </param> /// <returns> /// A string vector of all the IPs associated with the provided /// interface. The local host IP is returned if the interface /// name "default" is specified or there is an I/O error looking /// for the given interface. /// </returns> /// <exception cref="UnknownHostException">If the given interface is invalid</exception> public static string[] GetIPs(string strInterface, bool returnSubinterfaces) { if ("default".Equals(strInterface)) { return(new string[] { cachedHostAddress }); } NetworkInterface netIf; try { netIf = NetworkInterface.GetByName(strInterface); if (netIf == null) { netIf = GetSubinterface(strInterface); } } catch (SocketException e) { Log.Warn("I/O error finding interface " + strInterface + ": " + e.Message); return(new string[] { cachedHostAddress }); } if (netIf == null) { throw new UnknownHostException("No such interface " + strInterface); } // NB: Using a LinkedHashSet to preserve the order for callers // that depend on a particular element being 1st in the array. // For example, getDefaultIP always returns the first element. LinkedHashSet <IPAddress> allAddrs = new LinkedHashSet <IPAddress>(); Collections.AddAll(allAddrs, Collections.List(netIf.GetInetAddresses())); if (!returnSubinterfaces) { allAddrs.RemoveAll(GetSubinterfaceInetAddrs(netIf)); } string[] ips = new string[allAddrs.Count]; int i = 0; foreach (IPAddress addr in allAddrs) { ips[i++] = addr.GetHostAddress(); } return(ips); }
/// <summary> /// Returns all files in use by this segment. </summary> public virtual ICollection <string> Files() { // Start from the wrapped info's files: ISet <string> files = new HashSet <string>(Info.GetFiles()); // TODO we could rely on TrackingDir.getCreatedFiles() (like we do for // updates) and then maybe even be able to remove LiveDocsFormat.files(). // Must separately add any live docs files: Info.Codec.LiveDocsFormat.Files(this, files); // Must separately add any field updates files foreach (ISet <string> updateFiles in genUpdatesFiles.Values) { Collections.AddAll(files, updateFiles); } return(files); }
/// <summary>Get all superInterfaces that extend VersionedProtocol</summary> /// <param name="childInterfaces"/> /// <returns>the super interfaces that extend VersionedProtocol</returns> internal static Type[] GetSuperInterfaces(Type[] childInterfaces) { IList <Type> allInterfaces = new AList <Type>(); foreach (Type childInterface in childInterfaces) { if (typeof(VersionedProtocol).IsAssignableFrom(childInterface)) { allInterfaces.AddItem(childInterface); Collections.AddAll(allInterfaces, Arrays.AsList(GetSuperInterfaces(childInterface .GetInterfaces()))); } else { Log.Warn("Interface " + childInterface + " ignored because it does not extend VersionedProtocol" ); } } return(Collections.ToArray(allInterfaces, new Type[allInterfaces.Count])); }
/// <exception cref="System.IO.IOException"/> public virtual void FillQueueForKey(string keyName, Queue <KeyProviderCryptoExtension.EncryptedKeyVersion > keyQueue, int numKeys) { IList <KeyProviderCryptoExtension.EncryptedKeyVersion> retEdeks = new List <KeyProviderCryptoExtension.EncryptedKeyVersion >(); for (int i = 0; i < numKeys; i++) { try { retEdeks.AddItem(this._enclosing.keyProviderCryptoExtension.GenerateEncryptedKey( keyName)); } catch (GeneralSecurityException e) { throw new IOException(e); } } Collections.AddAll(keyQueue, retEdeks); }
/* It's possible to specify a timeout, in seconds, in the config file */ /* Number of times to retry authentication in the event of auth failure * (normally happens due to stale authToken) */ /// <exception cref="System.IO.IOException"/> public virtual void FillQueueForKey(string keyName, Queue <KeyProviderCryptoExtension.EncryptedKeyVersion > keyQueue, int numEKVs) { KMSClientProvider.CheckNotNull(keyName, "keyName"); IDictionary <string, string> @params = new Dictionary <string, string>(); @params[KMSRESTConstants.EekOp] = KMSRESTConstants.EekGenerate; @params[KMSRESTConstants.EekNumKeys] = string.Empty + numEKVs; Uri url = this._enclosing.CreateURL(KMSRESTConstants.KeyResource, keyName, KMSRESTConstants .EekSubResource, @params); HttpURLConnection conn = this._enclosing.CreateConnection(url, KMSClientProvider. HttpGet); conn.SetRequestProperty(KMSClientProvider.ContentType, KMSClientProvider.ApplicationJsonMime ); IList response = this._enclosing.Call <IList>(conn, null, HttpURLConnection.HttpOk ); IList <KeyProviderCryptoExtension.EncryptedKeyVersion> ekvs = KMSClientProvider.ParseJSONEncKeyVersion (keyName, response); Collections.AddAll(keyQueue, ekvs); }
// Nothing to do. public override SourceValue Merge(SourceValue value1, SourceValue value2) { if (Runtime.InstanceOf(value1.insns, typeof(SmallSet <>)) && Runtime.InstanceOf (value2.insns, typeof(SmallSet <>))) { var setUnion = ((SmallSet <AbstractInsnNode>)value1.insns).Union ((SmallSet <AbstractInsnNode>)value2.insns).ToHashSet(); if (setUnion == value1.insns && value1.size == value2.size) { return(value1); } return(new SourceValue(Math.Min(value1.size, value2.size), setUnion)); } if (value1.size != value2.size || !ContainsAll(value1.insns, value2.insns)) { var setUnion = new HashSet <AbstractInsnNode>(); Collections.AddAll(setUnion, value1.insns); Collections.AddAll(setUnion, value2.insns); return(new SourceValue(Math.Min(value1.size, value2.size), setUnion)); } return(value1); }
/// <summary>Create a StructTypeID based on the RecordTypeInfo of some record</summary> public StructTypeID(RecordTypeInfo rti) : base(TypeID.RIOType.Struct) { Collections.AddAll(typeInfos, rti.GetFieldTypeInfos()); }
/// <summary> /// Expert: create a <see cref="ParallelAtomicReader"/> based on the provided /// <paramref name="readers"/> and <paramref name="storedFieldsReaders"/>; when a document is /// loaded, only <paramref name="storedFieldsReaders"/> will be used. /// </summary> public ParallelAtomicReader(bool closeSubReaders, AtomicReader[] readers, AtomicReader[] storedFieldsReaders) { InitializeInstanceFields(); this.closeSubReaders = closeSubReaders; if (readers.Length == 0 && storedFieldsReaders.Length > 0) { throw new System.ArgumentException("There must be at least one main reader if storedFieldsReaders are used."); } this.parallelReaders = (AtomicReader[])readers.Clone(); this.storedFieldsReaders = (AtomicReader[])storedFieldsReaders.Clone(); if (parallelReaders.Length > 0) { AtomicReader first = parallelReaders[0]; this.maxDoc = first.MaxDoc; this.numDocs = first.NumDocs; this.hasDeletions = first.HasDeletions; } else { this.maxDoc = this.numDocs = 0; this.hasDeletions = false; } Collections.AddAll(completeReaderSet, this.parallelReaders); Collections.AddAll(completeReaderSet, this.storedFieldsReaders); // check compatibility: foreach (AtomicReader reader in completeReaderSet) { if (reader.MaxDoc != maxDoc) { throw new System.ArgumentException("All readers must have same MaxDoc: " + maxDoc + "!=" + reader.MaxDoc); } } // TODO: make this read-only in a cleaner way? FieldInfos.Builder builder = new FieldInfos.Builder(); // build FieldInfos and fieldToReader map: foreach (AtomicReader reader in this.parallelReaders) { FieldInfos readerFieldInfos = reader.FieldInfos; foreach (FieldInfo fieldInfo in readerFieldInfos) { // NOTE: first reader having a given field "wins": if (!fieldToReader.ContainsKey(fieldInfo.Name)) { builder.Add(fieldInfo); fieldToReader[fieldInfo.Name] = reader; if (fieldInfo.HasVectors) { tvFieldToReader[fieldInfo.Name] = reader; } } } } fieldInfos = builder.Finish(); // build Fields instance foreach (AtomicReader reader in this.parallelReaders) { Fields readerFields = reader.Fields; if (readerFields != null) { foreach (string field in readerFields) { // only add if the reader responsible for that field name is the current: if (fieldToReader[field].Equals(reader)) { this.fields.AddField(field, readerFields.GetTerms(field)); } } } } // do this finally so any Exceptions occurred before don't affect refcounts: foreach (AtomicReader reader in completeReaderSet) { if (!closeSubReaders) { reader.IncRef(); } reader.RegisterParentReader(this); } }
internal virtual void SealFlushedSegment(FlushedSegment flushedSegment) { Debug.Assert(flushedSegment != null); SegmentCommitInfo newSegment = flushedSegment.segmentInfo; IndexWriter.SetDiagnostics(newSegment.Info, IndexWriter.SOURCE_FLUSH); IOContext context = new IOContext(new FlushInfo(newSegment.Info.DocCount, newSegment.GetSizeInBytes())); bool success = false; try { if (indexWriterConfig.UseCompoundFile) { Collections.AddAll(filesToDelete, IndexWriter.CreateCompoundFile(infoStream, directory, CheckAbort.NONE, newSegment.Info, context)); newSegment.Info.UseCompoundFile = true; } // Have codec write SegmentInfo. Must do this after // creating CFS so that 1) .si isn't slurped into CFS, // and 2) .si reflects useCompoundFile=true change // above: codec.SegmentInfoFormat.SegmentInfoWriter.Write(directory, newSegment.Info, flushedSegment.fieldInfos, context); // TODO: ideally we would freeze newSegment here!! // because any changes after writing the .si will be // lost... // Must write deleted docs after the CFS so we don't // slurp the del file into CFS: if (flushedSegment.liveDocs != null) { int delCount = flushedSegment.delCount; Debug.Assert(delCount > 0); if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "flush: write " + delCount + " deletes gen=" + flushedSegment.segmentInfo.DelGen); } // TODO: we should prune the segment if it's 100% // deleted... but merge will also catch it. // TODO: in the NRT case it'd be better to hand // this del vector over to the // shortly-to-be-opened SegmentReader and let it // carry the changes; there's no reason to use // filesystem as intermediary here. SegmentCommitInfo info = flushedSegment.segmentInfo; Codec codec = info.Info.Codec; codec.LiveDocsFormat.WriteLiveDocs(flushedSegment.liveDocs, directory, info, delCount, context); newSegment.DelCount = delCount; newSegment.AdvanceDelGen(); } success = true; } finally { if (!success) { if (infoStream.IsEnabled("DWPT")) { infoStream.Message("DWPT", "hit exception creating compound file for newly flushed segment " + newSegment.Info.Name); } } } }
/// <exception cref="System.IO.IOException"/> public override void CacheGroupsAdd(IList <string> groups) { Log.Info("Adding " + groups + " to groups."); Collections.AddAll(allGroups, groups); }
public RecordHistoryLevel(params HistoryEventTypes[] filterHistoryEventType) { Collections.AddAll(this.RecordedHistoryEventTypes, filterHistoryEventType); }
private void AddAll(ICollection <FieldTypeInfo> tis) { Collections.AddAll(sTid.GetFieldTypeInfos(), tis); }
// this shouldn't be a // valid iri/bnode i // hope! // TODO: fill with default namespaces public virtual object Call(RdfDataset dataset) { foreach (var e in dataset.GetNamespaces().GetEnumerableSelf ()) { availableNamespaces[e.Value] = e.Key; } usedNamespaces = new HashSet <string>(); var refs = new JObject(); var ttl = new JObject(); foreach (var graphName in dataset.Keys) { var localGraphName = graphName; var triples = dataset.GetQuads(localGraphName); if ("@default".Equals(localGraphName)) { localGraphName = null; } // http://www.w3.org/TR/turtle/#unlabeled-bnodes // TODO: implement nesting for unlabled nodes // map of what the output should look like // subj (or [ if bnode) > pred > obj // > obj (set ref if IRI) // > pred > obj (set ref if bnode) // subj > etc etc etc // subjid -> [ ref, ref, ref ] var prevSubject = string.Empty; var prevPredicate = string.Empty; JObject thisSubject = null; JArray thisPredicate = null; foreach (var triple in triples) { var subject = triple.GetSubject().GetValue(); var predicate = triple.GetPredicate().GetValue(); if (prevSubject.Equals(subject)) { if (prevPredicate.Equals(predicate)) { } else { // nothing to do // new predicate if (thisSubject.ContainsKey(predicate)) { thisPredicate = (JArray)thisSubject[predicate]; } else { thisPredicate = new JArray(); thisSubject[predicate] = thisPredicate; } prevPredicate = predicate; } } else { // new subject if (ttl.ContainsKey(subject)) { thisSubject = (JObject)ttl[subject]; } else { thisSubject = new JObject(); ttl[subject] = thisSubject; } if (thisSubject.ContainsKey(predicate)) { thisPredicate = (JArray)thisSubject[predicate]; } else { thisPredicate = new JArray(); thisSubject[predicate] = thisPredicate; } prevSubject = subject; prevPredicate = predicate; } if (triple.GetObject().IsLiteral()) { thisPredicate.Add(triple.GetObject()); } else { var o = triple.GetObject().GetValue(); if (o.StartsWith("_:")) { // add ref to o if (!refs.ContainsKey(o)) { refs[o] = new JArray(); } ((JArray)refs[o]).Add(thisPredicate); } thisPredicate.Add(o); } } } var collections = new JObject(); var subjects = new JArray(ttl.GetKeys()); // find collections foreach (string subj in subjects) { var preds = (JObject)ttl[subj]; if (preds != null && preds.ContainsKey(JsonLdConsts.RdfFirst)) { var col = new JArray(); collections[subj] = col; while (true) { var first = (JArray)Collections.Remove(preds, JsonLdConsts.RdfFirst); var o = first[0]; col.Add(o); // refs if (refs.ContainsKey((string)o)) { ((JArray)refs[(string)o]).Remove(first); ((JArray)refs[(string)o]).Add(col); } var next = (string)Collections.Remove(preds, JsonLdConsts.RdfRest)[0 ]; if (JsonLdConsts.RdfNil.Equals(next)) { break; } // if collections already contains a value for "next", add // it to this col and break out if (collections.ContainsKey(next)) { Collections.AddAll(col, (JArray)Collections.Remove(collections, next)); break; } preds = (JObject)Collections.Remove(ttl, next); Collections.Remove(refs, next); } } } // process refs (nesting referenced bnodes if only one reference to them // in the whole graph) foreach (var id in refs.GetKeys()) { // skip items if there is more than one reference to them in the // graph if (((JArray)refs[id]).Count > 1) { continue; } // otherwise embed them into the referenced location var @object = Collections.Remove(ttl, id); if (collections.ContainsKey(id)) { @object = new JObject(); var tmp = new JArray(); tmp.Add(Collections.Remove(collections, id)); ((JObject)@object)[ColsKey] = tmp; } var predicate = (JArray)refs[id][0]; // replace the one bnode ref with the object predicate[predicate.LastIndexOf(id)] = @object; } // replace the rest of the collections foreach (var id_1 in collections.GetKeys()) { var subj_1 = (JObject)ttl[id_1]; if (!subj_1.ContainsKey(ColsKey)) { subj_1[ColsKey] = new JArray(); } ((JArray)subj_1[ColsKey]).Add(collections[id_1]); } // build turtle output var output = GenerateTurtle(ttl, 0, 0, false); var prefixes = string.Empty; foreach (var prefix in usedNamespaces) { var name = availableNamespaces[prefix]; prefixes += "@prefix " + name + ": <" + prefix + "> .\n"; } return((string.Empty.Equals(prefixes) ? string.Empty : prefixes + "\n") + output); }