public void SetUp() { var port = int.Parse(ConfigurationManager.AppSettings["port"]); var address = ConfigurationManager.AppSettings["address"]; IPAddress ipAddress; if (!IPAddress.TryParse(address, out ipAddress)) { throw new ArgumentException("endpoint"); } //Use defaults var endpoint = new IPEndPoint(ipAddress, port); ISocketPoolConfiguration config = new SocketPoolConfiguration(); _node = new CouchbaseNode(endpoint, config); }
public SocketPool(IMemcachedNode node, ISocketPoolConfiguration config, ISaslAuthenticationProvider provider) { if (config.MinPoolSize < 0) throw new InvalidOperationException("MinPoolSize must be larger >= 0", null); if (config.MaxPoolSize < config.MinPoolSize) throw new InvalidOperationException("MaxPoolSize must be larger than MinPoolSize", null); if (config.QueueTimeout < TimeSpan.Zero) throw new InvalidOperationException("queueTimeout must be >= TimeSpan.Zero", null); _provider = provider; _node = node; _config = config; _queue = new Queue<IPooledSocket>(config.MaxPoolSize); _isAlive = true; PreAllocate(config.MinPoolSize); }
void IMemcachedNodeLocator.Initialize(IList<IMemcachedNode> nodes) { if (this.isInitialized) throw new InvalidOperationException("Instance is already initialized."); // locking on this is rude but easy lock (initLock) { if (this.isInitialized) throw new InvalidOperationException("Instance is already initialized."); if (nodes.Count > 0) node = nodes[0]; this.isInitialized = true; } }
IEnumerable<IMemcachedNode> IMemcachedNodeLocator.GetWorkingNodes() { var nodes = this.nodes; var retval = new IMemcachedNode[nodes.Length]; Array.Copy(nodes, retval, retval.Length); return retval; }
/// <summary> /// Get the next available node for the given one. For the last node /// the first one is returned. If this list contains only a /// single node, conceptionally there's no next node, so null /// is returned. /// </summary> private IMemcachedNode FindNextNodeForBackup(IMemcachedNode primaryNode) { if (primaryNode == null || _allServers.Count == 1) { return null; } var idx = _allServers.FindIndex(v => v.EndPoint.Equals(primaryNode.EndPoint)); var nextIdx = (idx == _allServers.Count - 1) ? 0 : idx + 1; var backupNode = _allServers[nextIdx]; if (backupNode.EndPoint.Equals(primaryNode.EndPoint)) { backupNode = null; } return backupNode; }
public SocketPool(IMemcachedNode node, ISocketPoolConfiguration config) : this(node, config, null) { }
public INodeFailurePolicy Create(IMemcachedNode node) { return PolicyInstance; }
INodeFailurePolicy INodeFailurePolicyFactory.Create(IMemcachedNode node) { return PolicyInstance; }
INodeFailurePolicy INodeFailurePolicyFactory.Create(IMemcachedNode node) { return(PolicyInstance); }
private void NodeFailedEvent(IMemcachedNode memcachedNode) { _logger.Error("Node unavailable"); }
private bool ExecuteWithRedirect(IMemcachedNode startNode, ISingleItemOperation op) { if (startNode.Execute(op)) return true; var iows = op as IOperationWithState; // different op factory, we do not know how to retry if (iows == null) return false; #if HAS_FORWARD_MAP // node responded with invalid vbucket // this should happen only when a node is in a transitioning state if (iows.State == OpState.InvalidVBucket) { // check if we have a forward-locator // (whihc supposedly reflects the state of the cluster when all vbuckets have been migrated succesfully) IMemcachedNodeLocator fl = this.nsPool.ForwardLocator; if (fl != null) { var nextNode = fl.Locate(op.Key); if (nextNode != null) { // the node accepted the requesta if (nextNode.Execute(op)) return true; } } } #endif // still invalid vbucket, try all nodes in sequence if (iows.State == OperationState.InvalidVBucket) { var nodes = this.Pool.GetWorkingNodes(); foreach (var node in nodes) { if (node.Execute(op)) return true; // the node accepted our request so quit if (iows.State != OperationState.InvalidVBucket) break; } } return false; }
private void NodeFail(IMemcachedNode node) { var warnEnabled = log.IsWarnEnabled; if (warnEnabled) { log.WarnFormat("Node {0} is dead.", node.EndPoint); } // block the rest api listener until we're finished here lock (_syncObj) { var currentState = this.state; // the pool has been already reinitialized by the time the node // reported its failure, thus it has no connection to the current state if (currentState == null || currentState == InternalState.Empty) { return; } var fail = this.nodeFailed; if (fail != null) { fail(node); } // we don't know who to reconfigure the pool when vbucket is // enabled, so operations targeting the dead servers will fail. // when we have a normal config we just reconfigure the locator, // so the items will be rehashed to the working servers if (!currentState.IsVbucket) { if (warnEnabled) { log.Warn("We have a standard config, so we'll recreate the node locator."); } ReinitializeLocator(currentState); } // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed if (!this.isTimerActive) { if (warnEnabled) { log.Warn("Starting the recovery timer."); } if (this.resurrectTimer == null) { this.resurrectTimer = new Timer(this.rezCallback, null, this.deadTimeoutMsec, Timeout.Infinite); } else { this.resurrectTimer.Change(this.deadTimeoutMsec, Timeout.Infinite); } this.isTimerActive = true; if (warnEnabled) { log.Warn("Timer started."); } } } if (warnEnabled) { log.Warn("Fail handler is finished."); } }
private void SetNodeDead(IMemcachedNode n1, List<IMemcachedNode> activeNodes) { // Having to use reflection to set a private field var prop = n1.GetType().GetField("internalPoolImpl", BindingFlags.NonPublic | BindingFlags.Instance); var internalPoolImpl = prop.GetValue(n1); var prop2 = internalPoolImpl.GetType().GetField("isAlive", BindingFlags.NonPublic | BindingFlags.Instance); prop2.SetValue(internalPoolImpl, false); locator = new SessionNodeLocator(); locator.Initialize(activeNodes); }
private bool NotEqual(IMemcachedNode first, IMemcachedNode second) { if (first == null && second == null) { return false; } if (first == null || second == null) { return true; } return !first.EndPoint.Equals(second.EndPoint); }
private void NodeFail(IMemcachedNode node) { var warnEnabled = log.IsWarnEnabled; if (warnEnabled) log.WarnFormat("Node {0} is dead.", node.EndPoint); // block the rest api listener until we're finished here lock (_syncObj) { var currentState = this.state; // the pool has been already reinitialized by the time the node // reported its failure, thus it has no connection to the current state if (currentState == null || currentState == InternalState.Empty) return; var fail = this.nodeFailed; if (fail != null) fail(node); // we don't know who to reconfigure the pool when vbucket is // enabled, so operations targeting the dead servers will fail. // when we have a normal config we just reconfigure the locator, // so the items will be rehashed to the working servers if (!currentState.IsVbucket) { if (warnEnabled) log.Warn("We have a standard config, so we'll recreate the node locator."); ReinitializeLocator(currentState); } // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed if (!this.isTimerActive) { if (warnEnabled) log.Warn("Starting the recovery timer."); if (this.resurrectTimer == null) this.resurrectTimer = new Timer(this.rezCallback, null, this.deadTimeoutMsec, Timeout.Infinite); else this.resurrectTimer.Change(this.deadTimeoutMsec, Timeout.Infinite); this.isTimerActive = true; if (warnEnabled) log.Warn("Timer started."); } } if (warnEnabled) log.Warn("Fail handler is finished."); }
IEnumerable<IMemcachedNode> IMemcachedNodeLocator.GetWorkingNodes() { var ld = this.lookupData; if (ld.Servers == null || ld.Servers.Length == 0) return Enumerable.Empty<IMemcachedNode>(); var retval = new IMemcachedNode[ld.Servers.Length]; Array.Copy(ld.Servers, retval, retval.Length); return retval; }
private void NodeFail(IMemcachedNode node) { var isDebug = log.IsDebugEnabled; if (isDebug) { log.DebugFormat("Node {0} is dead.", node.EndPoint); } // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed lock (this.DeadSync) { if (this.isDisposed) { if (log.IsWarnEnabled) { log.Warn("Got a node fail but the pool is already disposed. Ignoring."); } return; } // bubble up the fail event to the client var fail = this.nodeFailed; if (fail != null) { fail(node); } // re-initialize the locator var newLocator = this.configuration.CreateNodeLocator(); newLocator.Initialize(allNodes.Where(n => n.IsAlive).ToArray()); Interlocked.Exchange(ref this.nodeLocator, newLocator); // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed if (!this.isTimerActive) { if (isDebug) { log.Debug("Starting the recovery timer."); } if (this.resurrectTimer == null) { this.resurrectTimer = new Timer(this.rezCallback, null, this.deadTimeoutMsec, Timeout.Infinite); } else { this.resurrectTimer.Change(this.deadTimeoutMsec, Timeout.Infinite); } this.isTimerActive = true; if (isDebug) { log.Debug("Timer started."); } } } }
private void NodeFail(IMemcachedNode node) { var isDebug = log.IsDebugEnabled; if (isDebug) log.DebugFormat("Node {0} is dead.", node.EndPoint); // block the rest api listener until we're finished here lock (this.DeadSync) { var currentState = this.state; // we don't know who to reconfigure the sockIOPool when vbucket is // enabled, so operations targeting the dead servers will fail. // when we have a normal config we just reconfigure the locator, // so the items will be rehashed to the working servers if (!currentState.IsVbucket) { if (isDebug) log.Debug("We have a standard config, so we'll recreate the node locator."); ReinitializeLocator(currentState); } // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed if (!this.isTimerActive) { if (isDebug) log.Debug("Starting the recovery timer."); if (this.resurrectTimer == null) this.resurrectTimer = new Timer(this.rezCallback, null, this.deadTimeoutMsec, Timeout.Infinite); else this.resurrectTimer.Change(this.deadTimeoutMsec, Timeout.Infinite); this.isTimerActive = true; if (isDebug) log.Debug("Timer started."); } } if (isDebug) log.Debug("Fail handler is finished."); }
INodeFailurePolicy INodeFailurePolicyFactory.Create(IMemcachedNode node) { return new FakePolicy(); }
private void NodeFail(IMemcachedNode node) { var isDebug = log.IsDebugEnabled; if (isDebug) log.DebugFormat("Node {0} is dead.", node.EndPoint); // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed lock (DeadSync) { // re-initialize the locator // TEST var newLocator = this.configuration.CreateNodeLocator(); newLocator.Initialize(allNodes.Where(n => n.IsAlive).ToArray()); Interlocked.Exchange(ref this.nodeLocator, newLocator); // TEST // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed if (!this.isTimerActive) { if (isDebug) log.Debug("Starting the recovery timer."); if (this.resurrectTimer == null) this.resurrectTimer = new Timer(this.rezCallback, null, this.deadTimeoutMsec, Timeout.Infinite); else this.resurrectTimer.Change(this.deadTimeoutMsec, Timeout.Infinite); this.isTimerActive = true; if (isDebug) log.Debug("Timer started."); } } }
private void _memcachedClient_NodeFailed(IMemcachedNode obj) { _logger.LogCritical("Memcached Node Failure - {Node} has failed to respond", obj.EndPoint); }
private void NodeFail(IMemcachedNode node) { var isDebug = log.IsDebugEnabled; if (isDebug) log.DebugFormat("Node {0} is dead.", node.EndPoint); // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed lock (this.DeadSync) { if (this.isDisposed) { if (log.IsWarnEnabled) log.Warn("Got a node fail but the pool is already disposed. Ignoring."); return; } // bubble up the fail event to the client var fail = this.nodeFailed; if (fail != null) fail(node); // re-initialize the locator var newLocator = this.configuration.CreateNodeLocator(); newLocator.Initialize(allNodes.Where(n => n.IsAlive).ToArray()); Interlocked.Exchange(ref this.nodeLocator, newLocator); // the timer is stopped until we encounter the first dead server // when we have one, we trigger it and it will run after DeadTimeout has elapsed if (!this.isTimerActive) { if (isDebug) log.Debug("Starting the recovery timer."); if (this.resurrectTimer == null) this.resurrectTimer = new Timer(this.rezCallback, null, this.deadTimeoutMsec, Timeout.Infinite); else this.resurrectTimer.Change(this.deadTimeoutMsec, Timeout.Infinite); this.isTimerActive = true; if (isDebug) log.Debug("Timer started."); } } }
INodeFailurePolicy INodeFailurePolicyFactory.Create(IMemcachedNode node) { return(new ThrottlingFailurePolicy(this.ResetAfter, this.FailureThreshold)); }
private static uint[] GenerateKeys(IMemcachedNode node, int numberOfKeys) { const int KeyLength = 4; const int PartCount = 1; // (ModifiedFNV.HashSize / 8) / KeyLength; // HashSize is in bits, uint is 4 byte long var k = new uint[PartCount * numberOfKeys]; // every server is registered numberOfKeys times // using UInt32s generated from the different parts of the hash // i.e. hash is 64 bit: // 00 00 aa bb 00 00 cc dd // server will be stored with keys 0x0000aabb & 0x0000ccdd // (or a bit differently based on the little/big indianness of the host) string address = node.EndPoint.ToString(); var fnv = new FNV1a(); for (int i = 0; i < numberOfKeys; i++) { byte[] data = fnv.ComputeHash(Encoding.ASCII.GetBytes(String.Concat(address, "-", i))); for (int h = 0; h < PartCount; h++) { k[i * PartCount + h] = BitConverter.ToUInt32(data, h * KeyLength); } } return k; }
private IOperationResult ExecuteWithRedirect(IMemcachedNode startNode, ISingleItemOperation op) { var result = new BinaryOperationResult(); var opResult = startNode.Execute(op); if (opResult.Success) { return(result.Pass()); } var iows = op as IOperationWithState; // different op factory, we do not know how to retry if (iows == null) { result.InnerResult = opResult.InnerResult; return(result.Fail("Operation state was invalid")); } #if HAS_FORWARD_MAP // node responded with invalid vbucket // this should happen only when a node is in a transitioning state if (iows.State == OpState.InvalidVBucket) { // check if we have a forward-locator // (whihc supposedly reflects the state of the cluster when all vbuckets have been migrated succesfully) IMemcachedNodeLocator fl = this.nsPool.ForwardLocator; if (fl != null) { var nextNode = fl.Locate(op.Key); if (nextNode != null) { // the node accepted the requesta if (nextNode.Execute(op)) { return(true); } } } } #endif // still invalid vbucket, try all nodes in sequence if (iows.State == OperationState.InvalidVBucket) { var nodes = this.Pool.GetWorkingNodes(); foreach (var node in nodes) { opResult = node.Execute(op); if (opResult.Success) { return(result.Pass()); } // the node accepted our request so quit if (iows.State != OperationState.InvalidVBucket) { break; } } } //TODO: why would this happen? return(result.Fail("Failed to execute operation")); }
private void MemcachedClient_NodeFailed(IMemcachedNode obj) { throw new Exception($"ElastiCache endpoint: {obj.EndPoint} failed: {obj}"); }
INodeFailurePolicy INodeFailurePolicyFactory.Create(IMemcachedNode node) => PolicyInstance;