Example #1
0
		public async Task PingFailsFallsOverToHealthyNodeWithoutPing()
		{
			/** Here's an example with a cluster with 2 nodes where the second node fails on ping */
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(2)
				.Ping(p => p.Succeeds(Always))
				.Ping(p => p.OnPort(9201).FailAlways())
				.StaticConnectionPool()
				.AllDefaults()
			);

			/** When making the calls, the first call goes to 9200 which succeeds,
			* and the 2nd call does a ping on 9201 because it's used for the first time.
			* The ping fails so we wrap over to node 9200 which we've already pinged.
			*
			* Finally we assert that the connectionpool has one node that is marked as dead
			*/
			await audit.TraceCalls(

				new ClientCall {
					{ PingSuccess, 9200},
					{ HealthyResponse, 9200},
					{ pool =>
					{
						pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0);
					} }
				},
				new ClientCall {
					{ PingFailure, 9201},
					{ HealthyResponse, 9200},
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) }
				}
			);
		}
		/**
		* Sometimes an unexpected exception happens further down in the pipeline, this is why we 
		* wrap them inside an UnexpectedElasticsearchClientException so that information about where 
		* in the pipeline the unexpected exception is not lost, here a call to 9200 fails using a webexception.
		* It then falls over to 9201 which throws an hard exception from within IConnection. We assert that we 
		* can still see the audit trail for the whole coordinated request.
		*/

		[U] public async Task WillFailOverKnowConnectionExceptionButNotUnexpected()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
#if DOTNETCORE
				.ClientCalls(r => r.OnPort(9200).FailAlways(new System.Net.Http.HttpRequestException("recover")))
#else
				.ClientCalls(r => r.OnPort(9200).FailAlways(new WebException("recover")))
#endif 
				.ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!")))
				.StaticConnectionPool()
				.Settings(s => s.DisablePing())
			);

			audit = await audit.TraceUnexpectedException(
				new ClientCall {
					{ AuditEvent.BadResponse, 9200 },
					{ AuditEvent.BadResponse, 9201 },
				},
				(e) =>
				{
					e.FailureReason.Should().Be(PipelineFailure.Unexpected);
					e.InnerException.Should().NotBeNull();
					e.InnerException.Message.Should().Be("boom!");
				}
			);
		}
Example #3
0
		public async Task PingFailsFallsOverMultipleTimesToHealthyNode()
		{
			/** A cluster with 4 nodes where the second and third pings fail */
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(4)
				.Ping(p => p.SucceedAlways())
				.Ping(p => p.OnPort(9201).FailAlways())
				.Ping(p => p.OnPort(9202).FailAlways())
				.StaticConnectionPool()
				.AllDefaults()
			);

			await audit.TraceCalls(
				/** The first call goes to 9200 which succeeds */
				new ClientCall {
					{ PingSuccess, 9200},
					{ HealthyResponse, 9200},
					{ pool =>
					{
						pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0);
					} }
				},
				/** The 2nd call does a ping on 9201 because its used for the first time.
				* It fails and so we ping 9202 which also fails. We then ping 9203 becuase
				* we haven't used it before and it succeeds */
				new ClientCall {
					{ PingFailure, 9201},
					{ PingFailure, 9202},
					{ PingSuccess, 9203},
					{ HealthyResponse, 9203},
					/** Finally we assert that the connectionpool has two nodes that are marked as dead */
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) }
				}
			);
		}
		public async Task DefaultMaxIsNumberOfNodes()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.FailAlways())
				.ClientCalls(r => r.OnPort(9209).SucceedAlways())
				.StaticConnectionPool()
				.Settings(s => s.DisablePing())
			);

			audit = await audit.TraceCall(
				new ClientCall {
					{ BadResponse, 9200 },
					{ BadResponse, 9201 },
					{ BadResponse, 9202 },
					{ BadResponse, 9203 },
					{ BadResponse, 9204 },
					{ BadResponse, 9205 },
					{ BadResponse, 9206 },
					{ BadResponse, 9207 },
					{ BadResponse, 9208 },
					{ HealthyResponse, 9209 }
				}
			);
		}
		/** == Unexpected exceptions 
		* When a client call throws an exception that the IConnction can not handle, this exception will bubble
		* out the client as an UnexpectedElasticsearchClientException, regardless whether the client is configured to throw or not.
		* An IConnection is in charge of knowning what exceptions it can recover from or not. The default IConnection that is based on WebRequest can and 
		* will recover from WebExceptions but others will be grounds for immediately exiting the pipeline.
		*/

		[U] public async Task UnexpectedExceptionsBubbleOut()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.SucceedAlways())
				.ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!")))
				.StaticConnectionPool()
				.Settings(s => s.DisablePing())
			);

			audit = await audit.TraceCall(
				new ClientCall {
					{ AuditEvent.HealthyResponse, 9200 },
				}
			);

			audit = await audit.TraceUnexpectedException(
				new ClientCall {
					{ AuditEvent.BadResponse, 9201 },
				},
				(e) =>
				{
					e.FailureReason.Should().Be(PipelineFailure.Unexpected);
					e.InnerException.Should().NotBeNull();
					e.InnerException.Message.Should().Be("boom!");
				}
			);
		}
		public async Task DetectsDataNodes()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Sniff(s => s.Fails(Always))
				.Sniff(s => s.OnPort(9202)
					.Succeeds(Always, Framework.Cluster.Nodes(8).StoresNoData(9200, 9201, 9202))
				)
				.SniffingConnectionPool()
				.AllDefaults()
			)
			{
				AssertPoolBeforeCall = (pool) =>
				{
					pool.Should().NotBeNull();
					pool.Nodes.Should().HaveCount(10);
					pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(10);
				},

				AssertPoolAfterCall = (pool) =>
				{
					pool.Should().NotBeNull();
					pool.Nodes.Should().HaveCount(8);
					pool.Nodes.Where(n => n.HoldsData).Should().HaveCount(5);
				}
			};
			await audit.TraceStartup();
		}
		public async Task ASniffOnStartupHappensOnce()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Sniff(s => s.Fails(Always))
				.Sniff(s => s.OnPort(9202).Succeeds(Always))
				.SniffingConnectionPool()
				.AllDefaults()
			);

			 await audit.TraceCalls(
				 new ClientCall
				 {
					{ SniffOnStartup},
					{ SniffFailure, 9200},
					{ SniffFailure, 9201},
					{ SniffSuccess, 9202},
					{ PingSuccess , 9200},
					{ HealthyResponse, 9200}
				},
				new ClientCall
				{
					{ PingSuccess, 9201},
					{ HealthyResponse, 9201}
				}
			);
		}
		[U] public async Task DisableSniffAndPing()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.SucceedAlways())
				.SniffingConnectionPool()
				.Settings(s => s.SniffOnStartup())
			);

			audit = await audit.TraceCall(
				new ClientCall(r=>r.DisableSniffing().DisablePing()) {
					{ HealthyResponse, 9200 }
				}
            );
		}
		public async Task OnlyCallsForcedNode()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.SucceedAlways())
				.ClientCalls(r => r.OnPort(9208).FailAlways())
				.StaticConnectionPool()
				.Settings(s => s.DisablePing())
			);

			audit = await audit.TraceCall(
				new ClientCall(r => r.ForceNode(new Uri("http://localhost:9208"))) {
					{ BadResponse, 9208 }
				}
			);
		}
		public async Task DoesNotRetryOnSingleNodeConnectionPool()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3)))
				.ClientCalls(r => r.OnPort(9209).SucceedAlways())
				.SingleNodeConnection()
				.Settings(s => s.DisablePing().MaximumRetries(10))
			);

			audit = await audit.TraceCall(
				new ClientCall(r => r.MaxRetries(10)) {
					{ BadResponse, 9200 }
				}
			);

		}
Example #11
0
		public async Task Http503FallsOver()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.FailAlways(503))
				.ClientCalls(r => r.OnPort(9201).SucceedAlways())
				.StaticConnectionPool()
				.Settings(s => s.DisablePing())
			);

			audit = await audit.TraceCall(
				new ClientCall {
					{ BadResponse, 9200 },
					{ HealthyResponse, 9201 },
				}
			);
		}
		public async Task PingAfterRevival()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(3)
				.ClientCalls(r => r.SucceedAlways())
				.ClientCalls(r => r.OnPort(9202).Fails(Once))
				.Ping(p => p.SucceedAlways())
				.StaticConnectionPool()
				.AllDefaults()
			);

			audit = await audit.TraceCalls(
				new ClientCall { { PingSuccess, 9200 }, { HealthyResponse, 9200 } },
				new ClientCall { { PingSuccess, 9201 }, { HealthyResponse, 9201 } },
				new ClientCall {
					{ PingSuccess, 9202},
					{ BadResponse, 9202},
					{ HealthyResponse, 9200},
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) }
				},
				new ClientCall { { HealthyResponse, 9201 } },
				new ClientCall { { HealthyResponse, 9200 } },
				new ClientCall { { HealthyResponse, 9201 } },
				new ClientCall {
					{ HealthyResponse, 9200 },
					{ pool => pool.Nodes.First(n=>!n.IsAlive).DeadUntil.Should().BeAfter(DateTime.UtcNow) }
				}
			);

			audit = await audit.TraceCalls(
				new ClientCall { { HealthyResponse, 9201 } },
				new ClientCall { { HealthyResponse, 9200 } },
				new ClientCall { { HealthyResponse, 9201 } }
			);

			audit.ChangeTime(d => d.AddMinutes(20));

			audit = await audit.TraceCalls(
				new ClientCall { { HealthyResponse, 9201 } },
				new ClientCall {
					{ Resurrection, 9202 },
					{ PingSuccess, 9202 },
					{ HealthyResponse, 9202 }
				}
			);
		}
		public async Task RespectsOveralRequestTimeout()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(10)))
				.ClientCalls(r => r.OnPort(9209).SucceedAlways())
				.StaticConnectionPool()
				.Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(20)))
			);

			audit = await audit.TraceCall(
				new ClientCall {
					{ BadResponse, 9200 },
					{ BadResponse, 9201 },
					{ MaxTimeoutReached }
				}
			);
		}
		public async Task CanOverrideBadResponse()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.FailAlways(400))
				.StaticConnectionPool()
				.Settings(s => s.DisablePing().MaximumRetries(0))
			);

			audit = await audit.TraceCalls(
				new ClientCall() {
					{ BadResponse, 9200 }
				},
				new ClientCall(r => r.AllowedStatusCodes(400)) {
					{ HealthyResponse, 9201 }
				}
			);
		}
		public async Task FixedMaximumNumberOfRetries()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.FailAlways())
				.ClientCalls(r => r.OnPort(9209).SucceedAlways())
				.StaticConnectionPool()
				.Settings(s => s.DisablePing().MaximumRetries(5))
			);

			audit = await audit.TraceCall(
				new ClientCall(r => r.MaxRetries(2)) {
					{ BadResponse, 9200 },
					{ BadResponse, 9201 },
					{ BadResponse, 9202 },
					{ MaxRetriesReached }
				}
			);
		}
Example #16
0
		public async Task SniffOnStartUpTakesNewClusterState()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Sniff(s => s.Fails(Always))
				.Sniff(s => s.OnPort(9202).Succeeds(Always, Framework.Cluster.Nodes(8, startFrom: 9204)))
				.SniffingConnectionPool()
				.AllDefaults()
			);

			await audit.TraceCall(new ClientCall {
				{ SniffOnStartup},
				{ SniffFailure, 9200},
				{ SniffFailure, 9201},
				{ SniffSuccess, 9202},
				{ PingSuccess, 9204},
				{ HealthyResponse, 9204}
			});
		}
		[U] public async Task BadAuthenticationIsUnrecoverable()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Ping(r => r.SucceedAlways())
				.ClientCalls(r => r.FailAlways(401))
				.StaticConnectionPool()
				.AllDefaults()
			);

			audit = await audit.TraceElasticsearchException(
				new ClientCall {
					{ AuditEvent.PingSuccess, 9200 },
					{ AuditEvent.BadResponse, 9200 },
				},
				(e) =>
				{
					e.FailureReason.Should().Be(PipelineFailure.BadAuthentication);
				}
			);
		}
		public async Task RespectsConnectTimeoutOverride()
		{
			/** we set up a 10 node cluster with a global time out of 20 seconds.
			* Each call on a node takes 10 seconds. So we can only try this call on 2 nodes
			* before the max request time out kills the client call.
			*/
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Ping(p => p.SucceedAlways().Takes(TimeSpan.FromSeconds(20)))
				.ClientCalls(r => r.SucceedAlways())
				.StaticConnectionPool()
				.Settings(s => s.RequestTimeout(TimeSpan.FromSeconds(10)).PingTimeout(TimeSpan.FromSeconds(10)))
			);

			audit = await audit.TraceCalls(
				/**
				* The first call uses the configured global settings, request times out after 10 seconds and ping
				* calls always take 20, so we should see a single ping failure
				*/
				new ClientCall {
					{ PingFailure, 9200 },
					{ MaxTimeoutReached }
				},
				/**
				* On the second request we set a request ping timeout override of 2seconds
				* We should now see more nodes being tried before the request timeout is hit.
				*/
				new ClientCall(r => r.PingTimeout(TimeSpan.FromSeconds(2)))
				{
					{ PingFailure, 9202 },
					{ PingFailure, 9203 },
					{ PingFailure, 9204 },
					{ PingFailure, 9205 },
					{ PingFailure, 9206 },
					{ MaxTimeoutReached }
				}
			);

		}
		/** == Disabling sniffing and pinging on a request basis 
		* Even if you are using a sniffing connection pool thats set up to sniff on start/failure
		* and pinging enabled, you can opt out of this behaviour on a per request basis
		*
		* In our first test we set up a cluster that pings and sniffs on startup 
		* but we disable the sniffing on our first request so we only see the ping and the response
		*/

		[U] public async Task DisableSniff()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.SucceedAlways())
				.SniffingConnectionPool()
				.Settings(s => s.SniffOnStartup())
			);

			audit = await audit.TraceCalls(
				/**
				* We disable sniffing so eventhoug its our first call we do not want to sniff on startup
				*/
				new ClientCall(r=>r.DisableSniffing()) {
					{ PingSuccess, 9200 },
					{ HealthyResponse, 9200 }
				},
				/**
				* Instead the sniff on startup is deffered to the second call into the cluster that 
				* does not disable sniffing on a per request basis
				*/
				new ClientCall()
				{
					{ SniffOnStartup },
					{ SniffSuccess, 9200 },
					{ PingSuccess, 9200 },
					{ HealthyResponse, 9200 }
				},
				/**
				* And after that no sniff on startup will happen again
				*/
				new ClientCall()
				{ 
					{ PingSuccess, 9201 },
					{ HealthyResponse, 9201 }
				}
            );
		}
		public async Task RespectsRequestTimeoutOverride()
		{

			/** we set up a 10 node cluster with a global time out of 20 seconds.
			* Each call on a node takes 10 seconds. So we can only try this call on 2 nodes
			* before the max request time out kills the client call.
			*/
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(10)))
				.ClientCalls(r => r.OnPort(9209).SucceedAlways())
				.StaticConnectionPool()
				.Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(20)))
			);

			audit = await audit.TraceCalls(
				new ClientCall {
					{ BadResponse, 9200 },
					{ BadResponse, 9201 },
					{ MaxTimeoutReached }
				},
				/**
				* On the second request we specify a request timeout override to 80 seconds
				* We should now see more nodes being tried.
				*/
				new ClientCall(r => r.RequestTimeout(TimeSpan.FromSeconds(80)))
				{
					{ BadResponse, 9203 },
					{ BadResponse, 9204 },
					{ BadResponse, 9205 },
					{ BadResponse, 9206 },
					{ BadResponse, 9207 },
					{ BadResponse, 9208 },
					{ HealthyResponse, 9209 },
				}
			);

		}
		/**== Disabling sniffing and pinging on a request basis
		*
		* Even if you are using a sniffing connection pool thats set up to sniff on start/failure
		* and pinging enabled, you can opt out of this behaviour on a _per request_ basis.
		*
		* In our first test we set up a cluster that pings and sniffs on startup
		* but we disable the sniffing on our first request so we only see the ping and the response
		*/

		[U] public async Task DisableSniff()
		{
			/** Let's set up the cluster and configure clients to **always** sniff on startup */
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.ClientCalls(r => r.SucceedAlways())
				.SniffingConnectionPool()
				.Settings(s => s.SniffOnStartup()) // <1> sniff on startup
			);


			audit = await audit.TraceCalls(
				/** Now We disable sniffing on the request so even though it's our first call, we do not want to sniff on startup */
				new ClientCall(r => r.DisableSniffing()) // <1> disable sniffing
				{
					{ PingSuccess, 9200 }, // <2> first call is a successful ping
					{ HealthyResponse, 9200 }
				},
				/** Instead, the sniff on startup is deferred to the second call into the cluster that
				* does not disable sniffing on a per request basis
				*/
				new ClientCall()
				{
					{ SniffOnStartup }, // <3> sniff on startup call happens here, on the second call
					{ SniffSuccess, 9200 },
					{ PingSuccess, 9200 },
					{ HealthyResponse, 9200 }
				},
				/** And after that no sniff on startup will happen again */
				new ClientCall()
				{
					{ PingSuccess, 9201 },
					{ HealthyResponse, 9201 }
				}
            );
		}
		/**	As an example, let's set up a 10 node cluster that will always succeed when pinged but
			will fail with a 401 response when making client calls
		*/
		[U] public async Task BadAuthenticationIsUnrecoverable()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Ping(r => r.SucceedAlways())
				.ClientCalls(r => r.FailAlways(401))
				.StaticConnectionPool()
				.AllDefaults()
			);

			/** Here we make a client call and determine that the first audit event was a successful ping, 
			* followed by a bad response as a result of a bad authentication response
			*/
			audit = await audit.TraceElasticsearchException(
				new ClientCall {
					{ AuditEvent.PingSuccess, 9200 },
					{ AuditEvent.BadResponse, 9200 },
				},
				(e) =>
				{
					e.FailureReason.Should().Be(PipelineFailure.BadAuthentication);
				}
			);
		}
Example #23
0
#pragma warning disable 1998
        public async Task <Auditor> TraceElasticsearchExceptionOnResponse(ClientCall callTrace, Action <ElasticsearchClientException> assert)
#pragma warning restore 1998
        {
            this._cluster = _cluster ?? this.Cluster();
            this._cluster.ClientThrows(false);
            this.AssertPoolBeforeCall?.Invoke(this._cluster.ConnectionPool);

            Action call = () => { this.Response = this._cluster.ClientCall(callTrace?.RequestOverrides); };

            call.ShouldNotThrow();

            this.Response.ShouldNotBeValid();
            var exception = this.Response.ApiCall.OriginalException as ElasticsearchClientException;

            exception.Should().NotBeNull("OriginalException on response is not expected ElasticsearchClientException");
            assert(exception);

            this.AuditTrail = exception.AuditTrail;
            this.AssertPoolAfterCall?.Invoke(this._cluster.ConnectionPool);

            this._clusterAsync = _clusterAsync ?? this.Cluster();
            this._clusterAsync.ClientThrows(false);
            Func <Task> callAsync = async() => { this.ResponseAsync = await this._clusterAsync.ClientCallAsync(callTrace?.RequestOverrides); };

            callAsync.ShouldNotThrow();
            this.ResponseAsync.ShouldNotBeValid();
            exception = this.ResponseAsync.ApiCall.OriginalException as ElasticsearchClientException;
            exception.Should().NotBeNull("OriginalException on response is not expected ElasticsearchClientException");
            assert(exception);

            this.AsyncAuditTrail = exception.AuditTrail;
            this.AssertPoolAfterCall?.Invoke(this._clusterAsync.ConnectionPool);
            var audit = new Auditor(_cluster, _clusterAsync);

            return(audit);
        }
		/**
		* An unexpected hard exception on ping and sniff is something we *do* try to revover from and failover.
		* Here pinging nodes on first use is enabled and 9200 throws on ping, we still fallover to 9201's ping succeeds.
		* However the client call on 9201 throws a hard exception we can not recover from
		*/

		[U] public async Task PingUnexceptedExceptionDoesFailOver()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Ping(r => r.OnPort(9200).FailAlways(new Exception("ping exception")))
				.Ping(r => r.OnPort(9201).SucceedAlways())
				.ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!")))
				.StaticConnectionPool()
				.AllDefaults()
			);

			audit = await audit.TraceUnexpectedException(
				new ClientCall {
					{ AuditEvent.PingFailure, 9200 },
					{ AuditEvent.PingSuccess, 9201 },
					{ AuditEvent.BadResponse, 9201 },
				},
				(e) =>
				{
					e.FailureReason.Should().Be(PipelineFailure.Unexpected);

					/** InnerException is the exception that brought the request down */
					e.InnerException.Should().NotBeNull();
					e.InnerException.Message.Should().Be("boom!");

					/** The hard exception that happened on ping is still available though */
					e.SeenExceptions.Should().NotBeEmpty();
					var pipelineException = e.SeenExceptions.First();
					pipelineException.FailureReason.Should().Be(PipelineFailure.PingFailure);
					pipelineException.InnerException.Message.Should().Be("ping exception");

					/** Seen exception is hard to relate back to a point in time, the exception is also 
					* available on the audit trail
					*/
					var pingException = e.AuditTrail.First(a => a.Event == AuditEvent.PingFailure).Exception;
					pingException.Should().NotBeNull();
					pingException.Message.Should().Be("ping exception");

				}
			);
		}
Example #25
0
		public async Task AllNodesArePingedOnlyOnFirstUseProvidedTheyAreHealthy()
		{
			/**A healthy cluster of 4 (min master nodes of 3 of course!) */
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(4)
				.Ping(p => p.SucceedAlways())
				.StaticConnectionPool()
				.AllDefaults()
			);

			await audit.TraceCalls(
				new ClientCall { { PingSuccess, 9200}, { HealthyResponse, 9200} },
				new ClientCall { { PingSuccess, 9201}, { HealthyResponse, 9201} },
				new ClientCall { { PingSuccess, 9202}, { HealthyResponse, 9202} },
				new ClientCall { { PingSuccess, 9203}, { HealthyResponse, 9203} },
				new ClientCall { { HealthyResponse, 9200} },
				new ClientCall { { HealthyResponse, 9201} },
				new ClientCall { { HealthyResponse, 9202} },
				new ClientCall { { HealthyResponse, 9203} },
				new ClientCall { { HealthyResponse, 9200} }
			);
		}
		/** == Sniffing on connection failure 
		* Sniffing on connection is enabled by default when using a connection pool that allows reseeding. 
		* The only IConnectionPool we ship that allows this is the SniffingConnectionPool.
		*
		* This can be very handy to force a refresh of the pools known healthy node by inspecting elasticsearch itself.
		* A sniff tries to get the nodes by asking each currently known node until one response.
		*/

		[U] public async Task DoesASniffAfterConnectionFailure()
		{
			/**
			* Here we seed our connection with 5 known nodes 9200-9204 of which we think
			* 9202, 9203, 9204 are master eligable nodes. Our virtualized cluster will throw once when doing 
			* a search on 9201. This should a sniff to be kicked off.
			*/
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(5)
				.MasterEligable(9202, 9203, 9204)
				.ClientCalls(r => r.SucceedAlways())
				.ClientCalls(r => r.OnPort(9201).Fails(Once))
				/**
				* When the cull fails on 9201 the sniff succeeds and returns a new cluster of healty nodes
				* this cluster only has 3 nodes and the known masters are 9200 and 9202 but a search on 9201
				* still fails once
				*/
				.Sniff(p => p.SucceedAlways(Framework.Cluster
					.Nodes(3)
					.MasterEligable(9200, 9202)
					.ClientCalls(r => r.OnPort(9201).Fails(Once))
					/**
					* After this second failure on 9201 another sniff will be returned a cluster that no 
					* longer fails but looks completely different (9210-9212) we should be able to handle this
					*/
					.Sniff(s => s.SucceedAlways(Framework.Cluster
						.Nodes(3, 9210)
						.MasterEligable(9210, 921)
						.ClientCalls(r => r.SucceedAlways())
						.Sniff(r => r.SucceedAlways())
					))
				))
				.SniffingConnectionPool()
				.Settings(s => s.DisablePing().SniffOnStartup(false))
			);

			audit = await audit.TraceCalls(
			/** */
				new ClientCall {
					{ HealthyResponse, 9200 },
					{ pool =>  pool.Nodes.Count.Should().Be(5) }
				},
				new ClientCall {
					{ BadResponse, 9201},
					/** We assert we do a sniff on our first known master node 9202 */
					{ SniffOnFail },
					{ SniffSuccess, 9202},
					{ HealthyResponse, 9200},
					/** Our pool should now have three nodes */
					{ pool =>  pool.Nodes.Count.Should().Be(3) }
				},
				new ClientCall {
					{ BadResponse, 9201},
					/** We assert we do a sniff on the first master node in our updated cluster */
					{ SniffOnFail },
					{ SniffSuccess, 9200},
					{ HealthyResponse, 9210},
					{ pool =>  pool.Nodes.Count.Should().Be(3) }
				},
				new ClientCall { { HealthyResponse, 9211 } },
				new ClientCall { { HealthyResponse, 9212 } },
				new ClientCall { { HealthyResponse, 9210 } },
				new ClientCall { { HealthyResponse, 9211 } },
				new ClientCall { { HealthyResponse, 9212 } },
				new ClientCall { { HealthyResponse, 9210 } },
				new ClientCall { { HealthyResponse, 9211 } },
				new ClientCall { { HealthyResponse, 9212 } },
				new ClientCall { { HealthyResponse, 9210 } }
			);
		}
		[U] public async Task DoesASniffAfterConnectionFailureOnPing()
		{
			/** Here we set up our cluster exactly the same as the previous setup 
			* Only we enable pinging (default is true) and make the ping fail
			*/
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(5)
				.MasterEligable(9202, 9203, 9204)
				.Ping(r => r.OnPort(9201).Fails(Once))
				.Sniff(p => p.SucceedAlways(Framework.Cluster
					.Nodes(3)
					.MasterEligable(9200, 9202)
					.Ping(r => r.OnPort(9201).Fails(Once))
					.Sniff(s => s.SucceedAlways(Framework.Cluster
						.Nodes(3, 9210)
						.MasterEligable(9210, 9211)
						.Ping(r => r.SucceedAlways())
						.Sniff(r => r.SucceedAlways())
					))
				))
				.SniffingConnectionPool()
				.Settings(s => s.SniffOnStartup(false))
			);

			audit = await audit.TraceCalls(
				new ClientCall {
					{ PingSuccess, 9200 },
					{ HealthyResponse, 9200 },
					{ pool =>  pool.Nodes.Count.Should().Be(5) }
				},
				new ClientCall {
					{ PingFailure, 9201},
					/** We assert we do a sniff on our first known master node 9202 */
					{ SniffOnFail },
					{ SniffSuccess, 9202},
					{ PingSuccess, 9200},
					{ HealthyResponse, 9200},
					/** Our pool should now have three nodes */
					{ pool =>  pool.Nodes.Count.Should().Be(3) }
				},
				new ClientCall {
					{ PingFailure, 9201},
					/** We assert we do a sniff on the first master node in our updated cluster */
					{ SniffOnFail },
					{ SniffSuccess, 9200},
					{ PingSuccess, 9210},
					{ HealthyResponse, 9210},
					{ pool =>  pool.Nodes.Count.Should().Be(3) }
				},
				new ClientCall { { PingSuccess, 9211 }, { HealthyResponse, 9211 } },
				new ClientCall { { PingSuccess, 9212 }, { HealthyResponse, 9212 } },
				/** 9210 was already pinged after the sniff returned the new nodes */
				new ClientCall { { HealthyResponse, 9210 } },
				new ClientCall { { HealthyResponse, 9211 } },
				new ClientCall { { HealthyResponse, 9212 } },
				new ClientCall { { HealthyResponse, 9210 } }
			);
		}
Example #28
0
		public async Task SniffPrefersMasterNodesButStillFailsOver()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(new[] {
					new Node(new Uri("http://localhost:9200")) { MasterEligable = true },
					new Node(new Uri("http://localhost:9201")) { MasterEligable = true },
					new Node(new Uri("http://localhost:9202")) { MasterEligable = false },
				})
				.Sniff(s => s.Fails(Always))
				.Sniff(s => s.OnPort(9202).Succeeds(Always))
				.SniffingConnectionPool()
				.AllDefaults()
			);

			await audit.TraceCall(new ClientCall {
				{ SniffOnStartup},
				{ SniffFailure, 9200},
				{ SniffFailure, 9201},
				{ SniffSuccess, 9202},
				{ PingSuccess, 9200},
				{ HealthyResponse, 9200}
			});
		}
		public async Task FallsOverDeadNodes()
		{
			/** A cluster with 2 nodes where the second node fails on ping */
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(4)
				.ClientCalls(p => p.Succeeds(Always))
				.ClientCalls(p => p.OnPort(9201).FailAlways())
				.ClientCalls(p => p.OnPort(9203).FailAlways())
				.StaticConnectionPool()
				.Settings(p=>p.DisablePing())
			);

			await audit.TraceCalls(
				/** The first call goes to 9200 which succeeds */
				new ClientCall {
					{ HealthyResponse, 9200},
					{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(0) }
				},
				/** The 2nd call does a ping on 9201 because its used for the first time.
				* It fails so we wrap over to node 9202 */
				new ClientCall {
					{ BadResponse, 9201},
					{ HealthyResponse, 9202},
					/** Finally we assert that the connectionpool has one node that is marked as dead */
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(1) }
				},
				/** The next call goes to 9203 which fails so we should wrap over */
				new ClientCall {
					{ BadResponse, 9203},
					{ HealthyResponse, 9200},
					{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) }
				},
				new ClientCall {
					{ HealthyResponse, 9202},
					{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) }
				},
				new ClientCall {
					{ HealthyResponse, 9200},
					{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) }
				},
				new ClientCall {
					{ HealthyResponse, 9202},
					{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) }
				},
				new ClientCall {
					{ HealthyResponse, 9200},
					{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(2) }
				}
			);
		}
		public async Task PicksADifferentNodeEachTimeAnodeIsDown()
		{
			/** A cluster with 2 nodes where the second node fails on ping */
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(4)
				.ClientCalls(p => p.Fails(Always))
				.StaticConnectionPool()
				.Settings(p=>p.DisablePing())
			);

			await audit.TraceCalls(
				/** All the calls fail */
				new ClientCall {
					{ BadResponse, 9200},
					{ BadResponse, 9201},
					{ BadResponse, 9202},
					{ BadResponse, 9203},
					{ MaxRetriesReached },
					{ pool => pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) }
				},
				/** After all our registered nodes are marked dead we want to sample a single dead node
				* each time to quickly see if the cluster is back up. We do not want to retry all 4
				* nodes
				*/
				new ClientCall {
					{ AllNodesDead },
					{ Resurrection, 9201},
					{ BadResponse, 9201},
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) }
				},
				new ClientCall {
					{ AllNodesDead },
					{ Resurrection, 9202},
					{ BadResponse, 9202},
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) }
				},
				new ClientCall {
					{ AllNodesDead },
					{ Resurrection, 9203},
					{ BadResponse, 9203},
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) }
				},
				new ClientCall {
					{ AllNodesDead },
					{ Resurrection, 9200},
					{ BadResponse, 9200},
					{ pool =>  pool.Nodes.Where(n=>!n.IsAlive).Should().HaveCount(4) }
				}
			);
		}
Example #31
0
		public async Task SniffTriesAllNodes()
		{
			var audit = new Auditor(() => Framework.Cluster
				.Nodes(10)
				.Sniff(s => s.Fails(Always))
				.Sniff(s => s.OnPort(9209).Succeeds(Always))
				.SniffingConnectionPool()
				.AllDefaults()
			);

			await audit.TraceCall(new ClientCall {
				{ SniffOnStartup},
				{ SniffFailure, 9200},
				{ SniffFailure, 9201},
				{ SniffFailure, 9202},
				{ SniffFailure, 9203},
				{ SniffFailure, 9204},
				{ SniffFailure, 9205},
				{ SniffFailure, 9206},
				{ SniffFailure, 9207},
				{ SniffFailure, 9208},
				{ SniffSuccess, 9209},
				{ PingSuccess, 9200},
				{ HealthyResponse, 9200}
			});
		}