/** <summary>Begins a new transfer state to the neighbor connected via con. </summary> <param name="con">The connection to the neigbhor we will be transferring data to.</param> <param name="ts">The table server we're providing the transfer for. C# does not allow sub-class objects to have access to their parent objects member variables, so we pass it in like this.</param> <remarks> Step 1: Get all the keys between me and my new neighbor. Step 2: Get all values for those keys, we copy so that we don't worry about changes to the dht during this interaction. This is only a pointer copy and since we let the OS deal with removing the contents of an entry, we don't need to make copies of the actual entry. Step 3: Generate another list of keys of up to max parallel transfers and begin transferring, that way we do not need to lock access to the entry enumerator until non-constructor puts. Step 4: End constructor, results from puts, cause the next entry to be sent. */ public TransferState(Brunet.Connections.Connection con, TableServer ts) { this._ts = ts; this._con = con; // Get all keys between me and my new neighbor LinkedList<MemBlock> keys; lock(_ts._sync) { keys = _ts._data.GetKeysBetween((AHAddress) _ts._node.Address, (AHAddress) _con.Address); } if(Dht.DhtLog.Enabled) { ProtocolLog.Write(Dht.DhtLog, String.Format( "Starting transfer from {0} to {1}", _ts._node.Address, _con.Address)); } int total_entries = 0; /* Get all values for those keys, we copy so that we don't worry about * changes to the dht during this interaction. This is only a pointer * copy and since we let the OS deal with removing the contents of an * entry, we don't need to make copies of the actual entry. */ foreach(MemBlock key in keys) { Entry[] entries; lock(_ts._sync) { LinkedList<Entry> llentries = _ts._data.GetEntries(key); if(llentries == null) { continue; } entries = new Entry[llentries.Count]; total_entries += llentries.Count; llentries.CopyTo(entries, 0); } key_entries.AddLast(entries); } if(Dht.DhtLog.Enabled) { ProtocolLog.Write(Dht.DhtLog, String.Format( "Total keys: {0}, total entries: {1}.", key_entries.Count, total_entries)); } _entry_enumerator = GetEntryEnumerator(); /* Here we generate another list of keys that we would like to * this is done here, so that we can lock up the _entry_enumerator * only during this stage and not during the RpcManager.Invoke */ LinkedList<Entry> local_entries = new LinkedList<Entry>(); for(int i = 0; i < MAX_PARALLEL_TRANSFERS && _entry_enumerator.MoveNext(); i++) { local_entries.AddLast((Entry) _entry_enumerator.Current); } foreach(Entry ent in local_entries) { Channel queue = new Channel(); queue.CloseAfterEnqueue(); queue.CloseEvent += this.NextTransfer; int ttl = (int) (ent.EndTime - DateTime.UtcNow).TotalSeconds; try { _ts._rpc.Invoke(_con.Edge, queue, "dht.PutHandler", ent.Key, ent.Value, ttl, false); } catch { if(_con.Edge.IsClosed) { _interrupted = true; Done(); break; } } } }
/// <summary>This is a RpcDhtProxy rpc call entry, which can be called using "RpcDhtProxy.Register" /// Register the entry to Entry. If the key,value pair does not /// exist in _entries, it creates the pair in the list. Otherwise, it updates the ttl. /// After inserting the entry, this module try to register the key, value pair to neighbor node.</summary> /// <param name="key">dht entry key to insert</param> /// <param name="value">dht entry value to insert</param> /// <param name="ttl">dht entry ttl to insert</param> public bool Register(MemBlock key, MemBlock value, int ttl) { Entry entry = null; lock(_sync) { Dictionary<MemBlock, Entry> key_entries = null; if(!_entries.TryGetValue(key, out key_entries)) { key_entries = new Dictionary<MemBlock, Entry>(); _entries[key] = key_entries; } if(key_entries.ContainsKey(value)) { key_entries[value].Timer.Stop(); } entry = new Entry(key, value, ttl); key_entries[value] = entry; } if(entry != null) { entry.Timer = new SimpleTimer(EntryCallback, entry, 0, RETRY_TIMEOUT); entry.Timer.Start(); } return true; }
/** <summary>Attempts to store the key:value pair into this server.</summary> <remarks>First the dht deletes any expired entries stored at the key, second it retrieves the entries from the data store. If it is empty it creates a new entry and returns. Otherwise, it looks for the value in the list and updates the lease time. If there is no entry for that key:value pair it either adds it in the case of a put or throws an exception if it is a create.</remarks> <param name="key">The index to store the data at.</param> <param name="value">Data to store at the key.</param> <param name="ttl">Dht lease time in seconds</param> <param name="unique">True if this should perform a create, false otherwise. </param> <returns>True on success, thrown exception on failure</returns> <exception cref="Exception">Data is too large, unresolved remote issues, or the create is no successful</exception> */ public bool PutHandler(MemBlock key, MemBlock value, int ttl, bool unique) { DateTime create_time = DateTime.UtcNow; DateTime end_time = create_time.AddSeconds(ttl); lock(_sync) { _data.DeleteExpired(key); LinkedList<Entry> data = _data.GetEntries(key); if(data != null) { foreach(Entry ent in data) { if(ent.Value.Equals(value)) { if(end_time > ent.EndTime) { _data.UpdateEntry(ent.Key, ent.Value, end_time); } return true; } } // If this is a create we didn't find an previous entry, so failure, else add it if(unique) { throw new Exception("ENTRY_ALREADY_EXISTS"); } } // This is either a new key or a new value (put only) Entry e = new Entry(key, value, create_time, end_time); _data.AddEntry(e); } // end of lock return true; }
/** <summary>Retrieves data from the Dht.</summary> <remarks>First old entries for the key are deleted from the dht, second a look up is performed, and finally using the token a range of data is selectively returned.</remarks> <param name="key">The index used to look up.</summary> <param name="token">Contains the data necessary to do follow up look ups if all the data stored in a key is to big for MAX_BYTES.</param> <returns>IList of hashtables containing the results. Compatible with DhtGetResult.</returns> */ public IList Get(MemBlock key, byte[] token) { int seen_start_idx = 0; int seen_end_idx = 0; if( token != null ) { using(MemoryStream ms = new MemoryStream(token)) { int[] bounds = (int[])AdrConverter.Deserialize(ms); seen_start_idx = bounds[0]; seen_end_idx = bounds[1]; seen_start_idx = seen_end_idx + 1; } } int consumed_bytes = 0; Entry[] data = null; lock(_sync ) { _data.DeleteExpired(key); LinkedList<Entry> ll_data = _data.GetEntries(key); // Keys exist! if( ll_data != null ) { data = new Entry[ll_data.Count]; ll_data.CopyTo(data, 0); } } ArrayList result = null; if(data != null) { result = new ArrayList(); ArrayList values = new ArrayList(); int remaining_items = 0; byte[] next_token = null; seen_end_idx = data.Length - 1; for(int i = seen_start_idx; i < data.Length; i++) { Entry e = (Entry) data[i]; if(e.Value.Length + consumed_bytes <= MAX_BYTES) { int age = (int) (DateTime.UtcNow - e.CreateTime).TotalSeconds; int ttl = (int) (e.EndTime - DateTime.UtcNow).TotalSeconds; consumed_bytes += e.Value.Length; Hashtable item = new Hashtable(); item["age"] = age; item["value"] = (byte[])e.Value; item["ttl"] = ttl; values.Add(item); } else { seen_end_idx = i - 1; break; } } remaining_items = data.Length - (seen_end_idx + 1); //Token creation int[] new_bounds = new int[2]; new_bounds[0] = seen_start_idx; new_bounds[1] = seen_end_idx; using(MemoryStream ms = new System.IO.MemoryStream()) { AdrConverter.Serialize(new_bounds, ms); next_token = ms.ToArray(); } result.Add(values); result.Add(remaining_items); result.Add(next_token); } return result; }
public void Test1() { RNGCryptoServiceProvider rng = new RNGCryptoServiceProvider(); TableServerData tsd = new TableServerData("0"); Entry[] not_expired = new Entry[12]; Entry[] to_expire = new Entry[12]; DateTime now = DateTime.UtcNow; DateTime live = now.AddSeconds(120); DateTime expire = now.AddSeconds(5); for(int i = 0; i < 4; i++) { byte[] key = new byte[20]; rng.GetBytes(key); for(int j = 0; j < 3; j++) { byte[] value = new byte[20]; rng.GetBytes(value); Entry ent = new Entry(key, value, now, expire); to_expire[i * 3 + j] = ent; tsd.AddEntry(ent); value = new byte[20]; rng.GetBytes(value); ent = new Entry(key, value, now, live); not_expired[i * 3 + j] = ent; tsd.AddEntry(ent); Assert.IsFalse(not_expired[i * 3 + j].Equals(to_expire[i * 3 + j]), String.Format("{0}: not_expired == to_expire.", i * 3 + j)); } } for(int i = 0; i < 4; i++) { LinkedList<Entry> entries = tsd.GetEntries(not_expired[i * 3].Key); for(int j = 0; j < 3; j++) { Assert.IsTrue(entries.Contains(not_expired[i * 3 + j]), "step 0: not_expired " + (i * 3 + j)); Assert.IsTrue(entries.Contains(to_expire[i * 3 + j]), "step 0: to_expire " + (i * 3 + j)); } } for(int i = 0; i < 4; i++) { for(int j = 0; j < 3; j++) { int pos = i * 3 + j; if(pos % 2 == 0) { Entry ent = not_expired[pos]; tsd.UpdateEntry(ent.Key, ent.Value, now.AddSeconds(160)); } } } Entry entry = to_expire[11]; tsd.UpdateEntry(entry.Key, entry.Value, now.AddSeconds(160)); for(int i = 0; i < 4; i++) { LinkedList<Entry> entries = tsd.GetEntries(not_expired[i * 3].Key); for(int j = 0; j < 3; j++) { Assert.IsTrue(entries.Contains(not_expired[i * 3 + j]), "step 1: not_expired " + (i * 3 + j)); Assert.IsTrue(entries.Contains(to_expire[i * 3 + j]), "step 1: to_expire " + (i * 3 + j)); } } while(DateTime.UtcNow < expire.AddSeconds(1)) { for(int i = 0; i < 50000000; i++) { int k = i % 5; k += 6; } } for(int i = 0; i < 3; i++) { LinkedList<Entry> entries = tsd.GetEntries(not_expired[i * 3].Key); for(int j = 0; j < 3; j++) { Assert.IsTrue(entries.Contains(not_expired[i * 3 + j]), "step 2: not_expired " + (i * 3 + j)); Assert.IsFalse(entries.Contains(to_expire[i * 3 + j]), "step 2: to_expire " + (i * 3 + j)); } } Assert.AreEqual(13, tsd.Count, "Entries we didn't check are removed by CheckEntries."); }
public void Test0() { RNGCryptoServiceProvider rng = new RNGCryptoServiceProvider(); TableServerData tsd = new TableServerData("0"); byte[] key = new byte[20]; rng.GetBytes(key); DateTime now = DateTime.UtcNow; Entry ent = new Entry(key, key, now, now.AddSeconds(100)); tsd.AddEntry(ent); LinkedList<Entry> entries = tsd.GetEntries(key); Assert.AreEqual(1, entries.Count, "Count after add"); Assert.AreEqual(ent, entries.First.Value, "Entries are equal"); tsd.UpdateEntry(ent.Key, ent.Value, now.AddSeconds(200)); entries = tsd.GetEntries(key); Assert.AreEqual(1, entries.Count, "Count after update"); Assert.AreEqual(ent, entries.First.Value, "Entries are equal"); tsd.RemoveEntry(ent.Key, ent.Value); entries = tsd.GetEntries(key); Assert.AreEqual(tsd.Count, 0, "Count after remove"); Assert.AreEqual(null, entries, "Entry after remove"); }
/** <summary>Disk caching is unsupported at this time.</summary> */ /* When we have a cache miss, we should try to load the data from disk, * if we are successful, we should also delete that file from the disk */ public void CacheMiss(Object o, EventArgs args) { Brunet.Collections.Cache.MissArgs margs = (Brunet.Collections.Cache.MissArgs) args; MemBlock key = (MemBlock) margs.Key; string path = GeneratePath(key); if(File.Exists(path)) { using (FileStream fs = File.Open(path, FileMode.Open)) { ArrayList ht_entries = (ArrayList) AdrConverter.Deserialize(fs); Entry[] entries = new Entry[ht_entries.Count]; int index = 0; foreach(Hashtable entry in ht_entries) { entries[index++] = (Entry) entry; } _data[key] = new LinkedList<Entry>(entries); } File.Delete(path); } }
/** <summary>Disk caching is unsupported at this time.</summary> */ /* When we have a cache eviction, we must write it to disk, we take * each entry, convert it explicitly into a hashtable, and then use adr * to create a stream and write it to disk */ public void CacheEviction(Object o, EventArgs args) { Brunet.Collections.Cache.EvictionArgs eargs = (Brunet.Collections.Cache.EvictionArgs) args; MemBlock key = (MemBlock) eargs.Key; if(Dht.DhtLog.Enabled) { ProtocolLog.Write(Dht.DhtLog, String.Format( "Evicted out of cache {0}, entries in dht {1}, entries in cache {2}", (new BigInteger(key)).ToString(16), Count, _data.Count)); } if(eargs.Value != null && ((LinkedList<Entry>) eargs.Value).Count > 0) { LinkedList<Entry> data = (LinkedList<Entry>) eargs.Value; // AdrConverter doesn't support LinkedLists Entry[] entries = new Entry[data.Count]; data.CopyTo(entries, 0); Hashtable[] ht_entries = new Hashtable[entries.Length]; int index = 0; foreach(Entry entry in entries) { ht_entries[index++] = (Hashtable) entry; } string dir_path, filename; string file_path = GeneratePath(key, out dir_path, out filename); if(!Directory.Exists(dir_path)) { Directory.CreateDirectory(dir_path); } using (FileStream fs = File.Open(file_path, FileMode.Create)) { AdrConverter.Serialize(ht_entries, fs); } } }
/** <summary>This adds an entry and should only be called if no such entry exists, as it does not look to see if a duplicate entry already exists. This creates a new LinkedList if this is the first entry for the specific key and stores it in the _data hashtable. This increments count.</summary> <remarks>Because data is stored by non-decreasing end time, we must place this at the correct position, which by starting at the last entry is right after the first entry that has a shorter end time.</remarks> <param name="entry">The data to store.</param> */ public void AddEntry(Entry entry) { CheckEntries(); LinkedList<Entry> data = (LinkedList<Entry>) _data[entry.Key]; if(data == null) { list_of_keys.AddLast(entry.Key); data = new LinkedList<Entry>(); _data[entry.Key] = data; } LinkedListNode<Entry> ent = data.Last; while(ent != null) { if(entry.EndTime > ent.Value.EndTime) { data.AddAfter(ent, entry); break; } ent = ent.Previous; } if(ent == null) { data.AddFirst(entry); } count++; }