static TransportAddressFactory() { _string_to_type = new Hashtable(); _ta_cache = new Cache(CACHE_SIZE); }
public TimeOutManager() { /* * Here we set the timeout mechanisms. There is a default * value, but this is now dynamic based on the observed * RTT of the network */ //resend the request after 5 seconds by default _min_timeout = 5000; _global_stats = new TimeStats(_min_timeout, 0.98); //Start with 50 sec timeout _acked_rtt_stats = new TimeStats(_min_timeout * 10, 0.98); _last_check = DateTime.UtcNow; _send_stats = new Cache(1000); _type_stats = new Cache(100); _sync = new object(); }
public ReqrepManager(string info, PType prefix) { lock( _inst_tab_sync ) { _instance_table.Replace(info, this); } _info = info; _prefix = prefix; #if BRUNET_SIMULATOR Random r = Node.SimulatorRandom; #else Random r = new Random(); #endif //Don't use negative numbers: _req_state_table = new UidGenerator<RequestState>(r, true); //Don't use negative numbers: _reply_id_table = new UidGenerator<ReplyState>(r, true); /** * We keep a list of the most recent 1000 replies until they * get too old. If the reply gets older than reptimeout, we * remove it */ _reply_cache = new Cache(1000); _reply_cache.EvictionEvent += HandleReplyCacheEviction; _to_mgr = new TimeOutManager(); }
public ReflectionRpcHandler(RpcManager rpc, object handler, bool use_sender) { _rpc = rpc; _handler = handler; _type = _handler.GetType(); _use_sender = use_sender; _sync = new object(); //Cache the 10 most used methods: _method_cache = new Cache(10); }
public RpcManager(ReqrepManager rrm) { _sync = new Object(); _rrman = rrm; _method_cache = new Cache(CACHE_SIZE); _method_handlers = new Hashtable(); }
public ReqrepManager(string info, PType prefix) { lock( _inst_tab_sync ) { _instance_table.Replace(info, this); } _info = info; _prefix = prefix; _req_handler_table = new Hashtable(); Random r = new Random(); //Don't use negative numbers: _req_state_table = new UidGenerator<RequestState>(r, true); //Don't use negative numbers: _reply_id_table = new UidGenerator<ReplyState>(r, true); _rep_handler_table = new Hashtable(); /** * We keep a list of the most recent 1000 replies until they * get too old. If the reply gets older than reptimeout, we * remove it */ _reply_cache = new Cache(1000); _reply_cache.EvictionEvent += HandleReplyCacheEviction; /* * Here we set the timeout mechanisms. There is a default * value, but this is now dynamic based on the observed * RTT of the network */ //resend the request after 5 seconds. _edge_reqtimeout = new TimeSpan(0,0,0,0,5000); _nonedge_reqtimeout = new TimeSpan(0,0,0,0,5000); //Start with 50 sec timeout _acked_reqtimeout = new TimeSpan(0,0,0,0,50000); //Here we track the statistics to improve the timeouts: _nonedge_rtt_stats = new TimeStats(_nonedge_reqtimeout.TotalMilliseconds, 0.98); _edge_rtt_stats = new TimeStats(_edge_reqtimeout.TotalMilliseconds, 0.98); _acked_rtt_stats = new TimeStats(_acked_reqtimeout.TotalMilliseconds, 0.98); _last_check = DateTime.UtcNow; }
/** * Protected constructor, we want to control ReqrepManager instances * running on a node. * @param info some context that we work for */ public ReqrepManager(object info) { ReqrepManager existing; lock( _inst_tab_sync ) { if(_instance_table.TryGetValue(info, out existing) ) { throw new Exception("Already an existing ReqrepManager for: " + info.ToString()); } else { _instance_table[info] = this; } } _info = info.ToString(); Random r = new Random(); //Don't use negative numbers: _req_state_table = new UidGenerator<RequestState>(r, true); //Don't use negative numbers: _reply_id_table = new UidGenerator<ReplyState>(r, true); /** * We keep a list of the most recent 1000 replies until they * get too old. If the reply gets older than reptimeout, we * remove it */ _reply_cache = new Cache(1000); _reply_cache.EvictionEvent += HandleReplyCacheEviction; _to_mgr = new TimeOutManager(); }
public void TestEnumeration() { const int MAX_SIZE = 100; Random r = new Random(); Cache c = new Cache(MAX_SIZE); Hashtable ht = new Hashtable(); for(int i = 0; i < MAX_SIZE; i++) { int k = r.Next(); int v = r.Next(); ht[k] = v; c[k] = v; } int enum_count = 0; foreach(DictionaryEntry de in c) { Assert.IsNotNull( c[de.Key], "Enumeration"); enum_count++; } Assert.AreEqual(enum_count, c.Count, "Enumeration count"); //Remove a bunch at random: ArrayList removed = new ArrayList(); for(int i = 0; i < MAX_SIZE / 2; i++) { object k = r.Next(0, MAX_SIZE); removed.Add( k ); c.Remove( k ); } //Make sure they are really gone: enum_count = 0; foreach(DictionaryEntry de in c) { Assert.IsNotNull( c[de.Key], "Enumeration after remove"); enum_count++; } Assert.AreEqual(enum_count, c.Count, "Enumeration count after remove"); foreach(object k in removed) { Assert.IsNull(c[k], "removed objects removed"); } //Let's enumerate and removed: foreach(DictionaryEntry de in c) { c.Remove(de.Key); Assert.IsNull( c[de.Key], "Removing with enumeration"); } Assert.AreEqual(0, c.Count, "Removed everything"); }
public void TestEviction() { const int MAX_SIZE = 1000; Random r = new Random(); Cache c = new Cache(MAX_SIZE); Hashtable ht = new Hashtable(); Hashtable ht_evicted = new Hashtable(); EventHandler eh = delegate(object o, EventArgs args) { Cache.EvictionArgs a = (Cache.EvictionArgs)args; ht_evicted[a.Key] = a.Value; }; c.EvictionEvent += eh; int i = 0; for(i = 0; i < 50 * MAX_SIZE; i++) { int v = r.Next(); ht[i] = v; c[i] = v; int exp_size = Math.Min(i+1, MAX_SIZE); Assert.AreEqual(c.Count, exp_size, "Size check"); Assert.AreEqual(ht[i], c[i], "equivalence check"); //Keep the zero'th element in the cache: object v_0 = c[0]; Assert.IsNotNull(v_0, "0th element still in the cache"); } Assert.AreEqual(c.Count, MAX_SIZE, "Full cache"); //Now check that everything is either in the Cache or was evicted: IDictionaryEnumerator ide = ht.GetEnumerator(); while(ide.MoveNext()) { int key = (int)ide.Key; int val = (int)ide.Value; object c_val = c[key]; if( !c.Contains(key) ) { Assert.IsNull(c_val, "Evicted entry is null"); c_val = ht_evicted[key]; Assert.AreEqual(c_val, val, "Evicted lookup"); } else { //Not in the cache: Assert.AreEqual(c_val, val, "Cache lookup"); } } //Let's remove from the Cache and see if that worked: int s0 = c.Count; object rv = c.Remove(0); Assert.AreEqual( rv, ht[0], "Removed value matches"); Assert.IsNull(c[0], "Remove really removed"); Assert.AreEqual( s0 - 1, c.Count, "Removed decreased size"); }
public void TestRecall() { const int MAX_SIZE = 100; Random r = new Random(); Cache c = new Cache(MAX_SIZE); Hashtable ht = new Hashtable(); for(int i = 0; i < MAX_SIZE; i++) { int k = r.Next(); int v = r.Next(); ht[k] = v; c[k] = v; } IDictionaryEnumerator ide = ht.GetEnumerator(); while(ide.MoveNext()) { int key = (int)ide.Key; int val = (int)ide.Value; object c_val = c[key]; Assert.AreEqual(c_val, val, "Test lookup"); } }
public void TestUpdate() { Cache c = new Cache(16); object entry = new object(); object first = new object(); object second = new object(); c[entry] = first; c[entry] = second; Assert.IsTrue(c[entry].Equals(second), "Entry equals second"); }
public RpcManager(ReqrepManager rrm) { _sync = new Object(); _rrman = rrm; _method_cache = new Cache(CACHE_SIZE); _method_handlers = new Hashtable(); #if DAVID_ASYNC_INVOKE _rpc_command = new BlockingQueue(); _rpc_thread = new Thread(RpcCommandRun); _rpc_thread.IsBackground = true; _rpc_thread.Start(); #endif }
static TransportAddressFactory() { _string_to_type = new Dictionary<string,TransportAddress.TAType>(); _ta_cache = new Cache(CACHE_SIZE); _ta_factories = new Dictionary<string,Converter<string,TransportAddress>>(); AddFactoryMethod("tcp", IPTransportAddress.Create); AddFactoryMethod("udp", IPTransportAddress.Create); AddFactoryMethod("function", IPTransportAddress.Create); AddFactoryMethod("tls", IPTransportAddress.Create); AddFactoryMethod("tlstest", IPTransportAddress.Create); //Here's the odd ball: AddFactoryMethod("s", delegate(string s) { return new SimulationTransportAddress(s); }); }
public FragmentingHandler(int max_frags_cached) : base() { _fragments = new Cache(max_frags_cached); _fragments.EvictionEvent += this.HandleEviction; _frag_count = new SCG.Dictionary<Pair<uint, int>, Fragments>(); }