public void Update() { var id = CreateTimeEntry(new TimeEntry(222, 333, Convert.ToDateTime("01/08/2008"), 24)); var updated = new TimeEntry(999, 888, Convert.ToDateTime("08/12/2012"), 2); var putResponse = TestHttpClient.PutAsync($"/time-entries/{id}", SerializePayload(updated)).Result; var getResponse = TestHttpClient.GetAsync($"/time-entries/{id}").Result; var getAllResponse = TestHttpClient.GetAsync("/time-entries").Result; Assert.Equal(HttpStatusCode.OK, putResponse.StatusCode); Assert.Equal(HttpStatusCode.OK, getResponse.StatusCode); Assert.Equal(HttpStatusCode.OK, getAllResponse.StatusCode); var getAllResponseBody = JArray.Parse(getAllResponse.Content.ReadAsStringAsync().Result); Assert.Equal(1, getAllResponseBody.Count); Assert.Equal(id, getAllResponseBody[0]["id"].ToObject <int>()); Assert.Equal(999, getAllResponseBody[0]["projectId"].ToObject <long>()); Assert.Equal(888, getAllResponseBody[0]["userId"].ToObject <long>()); Assert.Equal("08/12/2012 00:00:00", getAllResponseBody[0]["date"].ToObject <string>()); Assert.Equal(2, getAllResponseBody[0]["hours"].ToObject <int>()); var getResponseBody = JObject.Parse(getResponse.Content.ReadAsStringAsync().Result); Assert.Equal(id, getResponseBody["id"].ToObject <int>()); Assert.Equal(999, getResponseBody["projectId"].ToObject <long>()); Assert.Equal(888, getResponseBody["userId"].ToObject <long>()); Assert.Equal("08/12/2012 00:00:00", getResponseBody["date"].ToObject <string>()); Assert.Equal(2, getResponseBody["hours"].ToObject <int>()); }
public async Task Get_ReturnsGatewayResponse_WhenCreatedInDatabase() { // Arrange var gatewaysData = GatewayUtilities.GenerateGatewaysRequestDataWithUniqueNames(23) .OrderBy(x => x.Name) .ToArray(); var createdGateways = (await GatewayUtilities.CreateGatewaysAsync(TestHttpClient, gatewaysData)) .OrderBy(x => x.Name) .ToArray(); // Act var gatewaysResponses = await Task.WhenAll( createdGateways.Select(x => TestHttpClient.GetAsync($"{GatewayApiRoot}/{x.Id}")) ); var gateways = (await Task.WhenAll( gatewaysResponses.Select(x => x.Content.ReadAsAsync <GatewayResponse>()) )) .OrderBy(x => x.Name) .ToArray(); // Assert gatewaysResponses.Select(x => x.StatusCode) .Should().AllBeEquivalentTo(HttpStatusCode.OK); var valueTuples = gateways .Zip(gatewaysData) .Zip(createdGateways, (x, cg) => (x.First, x.Second, cg)); foreach (var(gateway, data, createdGateway) in valueTuples) { gateway.Id.Should().Be(createdGateway.Id); gateway.Name.Should().Be(data.Name).And.Be(createdGateway.Name); } }
public void HasHealth() { var response = TestHttpClient.GetAsync("/health").Result; var responseBody = JObject.Parse(response.Content.ReadAsStringAsync().Result); Assert.Equal(HttpStatusCode.OK, response.StatusCode); Assert.Equal("UP", responseBody["status"]); Assert.Equal("UP", responseBody["diskSpace"]["status"]); Assert.Equal("UP", responseBody["timeEntry"]["status"]); }
public async void DependencyInjectionTest() { var response = await TestHttpClient.GetAsync("http://testserver/api/simpleopen"); Assert.True(response.IsSuccessStatusCode); var result = await response.Content.ReadAsStringAsync(); Assert.Contains("test", result); }
public async void TestNormalController() { var response = await TestHttpClient.GetAsync("http://testserver/normal/ok"); Assert.True(response.IsSuccessStatusCode); var result = await response.Content.ReadAsStringAsync(); Assert.Contains("ok", result); }
public async Task ReturnsMessage() { var response = await TestHttpClient.GetAsync("/"); response.EnsureSuccessStatusCode(); var expectedResponse = "hello from integration test"; var actualResponse = await response.Content.ReadAsStringAsync(); Assert.Equal(expectedResponse, actualResponse); }
public void Delete() { var id = CreateTimeEntry(new TimeEntry(222, 333, Convert.ToDateTime("01/08/2008"), 24)); var deleteResponse = TestHttpClient.DeleteAsync($"/time-entries/{id}").Result; var getResponse = TestHttpClient.GetAsync($"/time-entries/{id}").Result; var getAllResponse = TestHttpClient.GetAsync("/time-entries").Result; Assert.Equal(HttpStatusCode.NoContent, deleteResponse.StatusCode); Assert.Equal(HttpStatusCode.NotFound, getResponse.StatusCode); Assert.Equal("[]", getAllResponse.Content.ReadAsStringAsync().Result); }
public async Task ReturnsCfEnv() { var response = await TestHttpClient.GetAsync("/env"); response.EnsureSuccessStatusCode(); var expectedResponse = @"{""port"":""123"",""memoryLimit"":""512M"",""cfInstanceIndex"":""1"",""cfInstanceAddr"":""127.0.0.1""}"; var actualResponse = await response.Content.ReadAsStringAsync(); Assert.Equal(expectedResponse, actualResponse); }
public void Read() { var id = CreateTimeEntry(new TimeEntry(999, 1010, Convert.ToDateTime("10/10/2015"), 9)); var response = TestHttpClient.GetAsync($"/time-entries/{id}").Result; var responseBody = JObject.Parse(response.Content.ReadAsStringAsync().Result); Assert.Equal(HttpStatusCode.OK, response.StatusCode); Assert.Equal(id, responseBody["id"].ToObject <long>()); Assert.Equal(999, responseBody["projectId"].ToObject <long>()); Assert.Equal(1010, responseBody["userId"].ToObject <long>()); Assert.Equal("10/10/2015 00:00:00", responseBody["date"].ToObject <string>()); Assert.Equal(9, responseBody["hours"].ToObject <int>()); }
public async Task GetAll_ReturnsEmptyPagedData_WhenDatabaseIsEmpty() { // Act var response = await TestHttpClient.GetAsync(DeviceApiRoot); // Assert var pagedData = await response.Content.ReadAsAsync <PagedData <DeviceResponse> >(); response.StatusCode.Should().Be(HttpStatusCode.OK); pagedData.Should().NotBeNull(); pagedData.Items.Should().HaveCount(0); pagedData.TotalCount.Should().Be(0); pagedData.PageSize.Should().Be(10); pagedData.CurrentPage.Should().Be(1); }
public async Task GetAll_ReturnsPagedData_WhenCreatedInDatabase() { // Arrange await DeviceUtilities.CreateDevicesAsync(TestHttpClient, 9); // Act var response = await TestHttpClient.GetAsync(DeviceApiRoot); // Assert var pagedData = await response.Content.ReadAsAsync <PagedData <DeviceResponse> >(); response.StatusCode.Should().Be(HttpStatusCode.OK); pagedData.Should().NotBeNull(); pagedData.Items.Should().HaveCount(9); pagedData.TotalCount.Should().Be(9); pagedData.PageSize.Should().Be(10); pagedData.CurrentPage.Should().Be(1); }
/// <summary> /// Waits for the a remote proxy and origin to report being ready. /// </summary> /// <param name="baseUri">The base URI.</param> /// <param name="hostname">The target hostname.</param> private async Task WaitUntilReadyAsync(Uri baseUri, string hostname) { // Delay for 10 seconds so that any DNS entries cached by HAProxy will // have a chance to expire. By default, we configure the DNS hold time // to be 5 seconds, so waiting for 10 seconds should be more than enough. await Task.Delay(TimeSpan.FromSeconds(10)); // Allow self-signed certificates for HTTPS tests. var handler = new HttpClientHandler() { ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator }; using (var client = new TestHttpClient(disableConnectionReuse: true, handler: handler, disposeHandler: true)) { client.BaseAddress = baseUri; client.DefaultRequestHeaders.Host = hostname; await NeonHelper.WaitForAsync( async() => { try { var response = await client.GetAsync("/"); return(response.IsSuccessStatusCode || response.StatusCode == HttpStatusCode.Redirect); } catch (HttpRequestException) { // We're going to ignore these because this probably // indicates that HAProxy hasn't started a listener // on the port yet. return(false); } }, timeout : TimeSpan.FromSeconds(60), pollTime : TimeSpan.FromMilliseconds(100)); } }
public async Task Get_ReturnsDeviceResponse_WhenCreatedInDatabase() { // Arrange var afterCreatedDateTime = DateTime.UtcNow; var gatewayIps = await GenerateGatewayIpsAsync(5); var devicesData = DeviceUtilities.GenerateDevicesRequestDataWithUniqueVendors(9, gatewayIps) .OrderBy(x => x.Vendor) .ToArray(); var createdDevices = (await DeviceUtilities.CreateDevicesAsync(TestHttpClient, devicesData)) .OrderBy(x => x.Vendor) .ToArray(); // Act var devicesResponses = await Task.WhenAll( createdDevices.Select(x => TestHttpClient.GetAsync($"{DeviceApiRoot}/{x.UID}")) ); var devices = (await Task.WhenAll( devicesResponses.Select(x => x.Content.ReadAsAsync <DeviceResponse>()) )) .OrderBy(x => x.Vendor) .ToArray(); // Assert devicesResponses.Select(x => x.StatusCode) .Should().AllBeEquivalentTo(HttpStatusCode.OK); var valueTuples = devices .Zip(devicesData) .Zip(createdDevices, (x, cg) => (x.First, x.Second, cg)); foreach (var(device, data, createdDevice) in valueTuples) { device.UID.Should().Be(createdDevice.UID); device.Vendor.Should().Be(data.Vendor).And.Be(createdDevice.Vendor); device.Status.Should().Be(data.Status).And.Be(createdDevice.Status); device.GatewayId.Should().Be(data.GatewayId).And.Be(createdDevice.GatewayId); Math.Abs((device.DateCreated - afterCreatedDateTime).TotalSeconds).Should().BeLessOrEqualTo(20); } }
public async Task GetAll_ReturnsPagedData_WhenCreatedInDatabase() { // Arrange var gatewaysData = GatewayUtilities.GenerateGatewaysRequestDataWithUniqueNames(23) .OrderBy(x => x.Name) .ToArray(); await GatewayUtilities.CreateGatewaysAsync(TestHttpClient, gatewaysData); // Act var response = await TestHttpClient.GetAsync(GatewayApiRoot); // Assert var pagedData = await response.Content.ReadAsAsync <PagedData <GatewayListItemResponse> >(); response.StatusCode.Should().Be(HttpStatusCode.OK); pagedData.Should().NotBeNull(); pagedData.Items.Should().HaveCount(10); pagedData.TotalCount.Should().Be(23); pagedData.PageSize.Should().Be(10); pagedData.CurrentPage.Should().Be(1); }
public void List() { var id1 = CreateTimeEntry(new TimeEntry(222, 333, Convert.ToDateTime("01/08/2008"), 24)); var id2 = CreateTimeEntry(new TimeEntry(444, 555, Convert.ToDateTime("02/10/2008"), 6)); var response = TestHttpClient.GetAsync("/time-entries").Result; var responseBody = JArray.Parse(response.Content.ReadAsStringAsync().Result); Assert.Equal(HttpStatusCode.OK, response.StatusCode); Assert.Equal(id1, responseBody[0]["id"].ToObject <int>()); Assert.Equal(222, responseBody[0]["projectId"].ToObject <long>()); Assert.Equal(333, responseBody[0]["userId"].ToObject <long>()); Assert.Equal("01/08/2008 00:00:00", responseBody[0]["date"].ToObject <string>()); Assert.Equal(24, responseBody[0]["hours"].ToObject <int>()); Assert.Equal(id2, responseBody[1]["id"].ToObject <int>()); Assert.Equal(444, responseBody[1]["projectId"].ToObject <long>()); Assert.Equal(555, responseBody[1]["userId"].ToObject <long>()); Assert.Equal("02/10/2008 00:00:00", responseBody[1]["date"].ToObject <string>()); Assert.Equal(6, responseBody[1]["hours"].ToObject <int>()); }
/// <summary> /// Verify that we can create an HTTPS traffic manager rule that pre-warms items. /// </summary> /// <param name="testName">Simple name (without spaces) used to ensure that URIs cached for different tests won't conflict.</param> /// <param name="proxyPort">The inbound proxy port.</param> /// <param name="network">The proxy network.</param> /// <param name="trafficManager">The traffic manager.</param> /// <param name="serviceName">Optionally specifies the backend service name (defaults to <b>vegomatic</b>).</param> /// <returns>The tracking <see cref="Task"/>.</returns> private async Task TestHttpsCacheWarming(string testName, int proxyPort, string network, TrafficManager trafficManager, string serviceName = "vegomatic") { // Append a GUID to the test name to ensure that we won't // conflict with what any previous test runs may have loaded // into the cache. testName += "-" + Guid.NewGuid().ToString("D"); // Verify that we can create an HTTP traffic manager rule for a // site on the proxy port using a specific hostname and then // verify that warming actually works by spinning up a [vegomatic] // based service to accept the traffic. // // We'll do this by specifying warm and cold URIs that both enable // caching. We'll specify the warm URI as a warm target but not // the cold URI. Then we'll publish the rule and wait for a bit // to allow it to stablize and for the [neon-proxy-cache] to // load the warm URI. // // Finally, we'll verify that this worked by fetching both URIs. // The warm URI should indicate that it came from the cache and // the cold URI should not be cached. var manager = hive.GetReachableManager(); var guid = Guid.NewGuid().ToString("D"); // Avoid conflicts with previous test runs var expireSeconds = 60; var warmUri = new Uri($"https://{testHostname}:{proxyPort}/{guid}/warm?body=text:warm&Expires={expireSeconds}"); var coldUri = new Uri($"https://{testHostname}:{proxyPort}/{guid}/cold?body=text:cold&Expires={expireSeconds}"); manager.Connect(); // Allow self-signed certificates. var handler = new HttpClientHandler() { ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator }; using (var client = new TestHttpClient(disableConnectionReuse: true, handler: handler, disposeHandler: true)) { // Add the test certificate. hive.Certificate.Set("test-load-balancer", certificate); // Setup the client to query the [vegomatic] service through the // proxy without needing to configure a hive DNS entry. client.BaseAddress = new Uri($"https://{manager.PrivateAddress}:{proxyPort}/"); client.DefaultRequestHeaders.Host = testHostname; // Configure the traffic manager rule. var rule = new TrafficHttpRule() { Name = "vegomatic", CheckExpect = "status 200", CheckSeconds = 1, }; rule.Cache = new TrafficHttpCache() { Enabled = true }; rule.Cache.WarmTargets.Add( new TrafficWarmTarget() { UpdateSeconds = 1.0, Uri = warmUri.ToString() }); rule.Frontends.Add( new TrafficHttpFrontend() { Host = testHostname, ProxyPort = proxyPort, CertName = "test-load-balancer" }); rule.Backends.Add( new TrafficHttpBackend() { Server = serviceName, Port = 80 }); trafficManager.SetRule(rule); // Spin up a [vegomatic] service instance. manager.SudoCommand($"docker service create --name vegomatic --network {network} --replicas 1 {vegomaticImage} test-server").EnsureSuccess(); await WaitUntilReadyAsync(client.BaseAddress, testHostname); // Wait a bit longer to ensure that the cache has had a chance to // warm the URI. await Task.Delay(TimeSpan.FromSeconds(5)); // Query for the warm and cold URIs and verify that the warm item was a // cache hit and the cold item was not. var warmResponse = await client.GetAsync(warmUri.PathAndQuery); var warmBody = (await warmResponse.Content.ReadAsStringAsync()).Trim(); var coldResponse = await client.GetAsync(coldUri.PathAndQuery); var coldBody = (await coldResponse.Content.ReadAsStringAsync()).Trim(); Assert.Equal(HttpStatusCode.OK, warmResponse.StatusCode); Assert.Equal("warm", warmBody); Assert.True(CacheHit(warmResponse)); Assert.Equal(HttpStatusCode.OK, coldResponse.StatusCode); Assert.Equal("cold", coldBody); Assert.False(CacheHit(coldResponse)); } }
/// <summary> /// Verify that we can create HTTPS traffic manager rules for a /// site on the proxy port using a specific hostname and various /// path prefixes and then verify that that the traffic manager actually /// works by spinning up a [vegomatic] based service to accept the traffic. /// </summary> /// <param name="testName">Simple name (without spaces) used to ensure that URIs cached for different tests won't conflict.</param> /// <param name="proxyPort">The inbound proxy port.</param> /// <param name="network">The proxy network.</param> /// <param name="trafficManager">The traffic manager.</param> /// <param name="useCache">Optionally enable caching and verify.</param> /// <param name="serviceName">Optionally specifies the backend service name prefix (defaults to <b>vegomatic</b>).</param> /// <returns>The tracking <see cref="Task"/>.</returns> private async Task TestHttpsPrefix(string testName, int proxyPort, string network, TrafficManager trafficManager, bool useCache = false, string serviceName = "vegomatic") { // Append a GUID to the test name to ensure that we won't // conflict with what any previous test runs may have loaded // into the cache. testName += "-" + Guid.NewGuid().ToString("D"); // Verify that we can create an HTTP traffic manager rule for a // site on the proxy port using a specific hostname and then // verify that that the traffic manager actually works by spinning // up a [vegomatic] based service to accept the traffic. var manager = hive.GetReachableManager(); var hostname = testHostname; manager.Connect(); // Allow self-signed certificates. var handler = new HttpClientHandler() { ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator }; using (var client = new TestHttpClient(disableConnectionReuse: true, handler: handler, disposeHandler: true)) { // Add the test certificate. hive.Certificate.Set("test-load-balancer", certificate); // Setup the client to query the [vegomatic] service through the // proxy without needing to configure a hive DNS entry. client.BaseAddress = new Uri($"https://{manager.PrivateAddress}:{proxyPort}/"); client.DefaultRequestHeaders.Host = hostname; // Create the traffic manager rules, one without a path prefix and // some others, some with intersecting prefixes so we can verify // that the longest prefixes are matched first. // // Each rule's backend will be routed to a service whose name // will be constructed from [testName] plus the prefix with the // slashes replaced with dashes. Each service will be configured // to return its name. var prefixes = new PrefixInfo[] { new PrefixInfo("/", $"{serviceName}"), new PrefixInfo("/foo/", $"{serviceName}-foo"), new PrefixInfo("/foo/bar/", $"{serviceName}-foo-bar"), new PrefixInfo("/foobar/", $"{serviceName}-foobar"), new PrefixInfo("/bar/", $"{serviceName}-bar") }; // Spin the services up first in parallel (for speed). Each of // these service will respond to requests with its service name. var tasks = new List <Task>(); foreach (var prefix in prefixes) { tasks.Add(Task.Run( () => { manager.SudoCommand($"docker service create --name {prefix.ServiceName} --network {network} --replicas 1 {vegomaticImage} test-server server-id={prefix.ServiceName}").EnsureSuccess(); })); } await NeonHelper.WaitAllAsync(tasks, TimeSpan.FromSeconds(30)); // Create the traffic manager rules. foreach (var prefix in prefixes) { var rule = new TrafficHttpRule() { Name = prefix.ServiceName, CheckExpect = "status 200", CheckSeconds = 1, }; if (useCache) { rule.Cache = new TrafficHttpCache() { Enabled = true }; } var frontend = new TrafficHttpFrontend() { Host = hostname, ProxyPort = proxyPort, CertName = "test-load-balancer" }; if (!string.IsNullOrEmpty(prefix.Path)) { frontend.PathPrefix = prefix.Path; } rule.Frontends.Add(frontend); rule.Backends.Add( new TrafficHttpBackend() { Server = prefix.ServiceName, Port = 80 }); trafficManager.SetRule(rule, deferUpdate: true); } trafficManager.Update(); // Wait for all of the services to report being ready. await NeonHelper.WaitForAsync( async() => { foreach (var prefix in prefixes) { try { var response = await client.GetAsync(prefix.Path); response.EnsureSuccessStatusCode(); } catch { return(false); } } return(true); }, timeout : TimeSpan.FromSeconds(60), pollTime : TimeSpan.FromSeconds(1)); // Give everything a chance to stablize. await Task.Delay(TimeSpan.FromSeconds(5)); // Now verify that prefix rules route to the correct backend service. foreach (var prefix in prefixes) { var response = await client.GetAsync($"{prefix.Path}{testName}?expires=60"); response.EnsureSuccessStatusCode(); var body = await response.Content.ReadAsStringAsync(); Assert.Equal(prefix.ServiceName, body.Trim()); if (useCache) { // Verify that the request routed through Varnish. Assert.True(ViaVarnish(response)); // This is the first request using the globally unique [testName] // so it should not be a cache hit. Assert.False(CacheHit(response)); } } // If caching is enabled, perform the requests again to ensure that // we see cache hits. if (useCache) { foreach (var prefix in prefixes) { // Request the item again and verify that it was a cache hit. var response = await client.GetAsync($"{prefix.Path}{testName}?expires=60"); response.EnsureSuccessStatusCode(); var body = await response.Content.ReadAsStringAsync(); Assert.Equal(prefix.ServiceName, body.Trim()); Assert.True(CacheHit(response)); } } } }
/// <summary> /// Verify that we can create an HTTPS traffic manager rule for a /// site on the proxy port using a specific hostname and then /// verify that that the traffic manager actually works by spinning /// up a [vegomatic] based service to accept the traffic. /// </summary> /// <param name="testName">Simple name (without spaces).</param> /// <param name="proxyPort">The inbound proxy port.</param> /// <param name="network">The proxy network.</param> /// <param name="trafficManager">The traffic manager.</param> /// <param name="useCache">Optionally enable caching and verify.</param> /// <param name="serviceName">Optionally specifies the backend service name (defaults to <b>vegomatic</b>).</param> /// <returns>The tracking <see cref="Task"/>.</returns> private async Task TestHttpsRule(string testName, int proxyPort, string network, TrafficManager trafficManager, bool useCache = false, string serviceName = "vegomatic") { // Verify that we can create an HTTPS traffic manager rule for a // site on the proxy port using a specific hostname and then // verify that that the traffic manager actually works by spinning // up a [vegomatic] based service to accept the traffic. var queryCount = 100; var manager = hive.GetReachableManager(); manager.Connect(); // We need the test hostname to point to the manager's private address // so we can submit HTTPS requests there. hiveFixture.LocalMachineHosts.AddHostAddress(testHostname, manager.PrivateAddress.ToString()); // Allow self-signed certificates. var handler = new HttpClientHandler() { ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator }; using (var client = new TestHttpClient(disableConnectionReuse: true, handler: handler, disposeHandler: true)) { client.BaseAddress = new Uri($"https://{testHostname}:{proxyPort}/"); client.DefaultRequestHeaders.Host = testHostname; // Add the test certificate. hive.Certificate.Set("test-load-balancer", certificate); // Configure the traffic manager rule. var rule = new TrafficHttpRule() { Name = "vegomatic", CheckExpect = "status 200", CheckSeconds = 1 }; if (useCache) { rule.Cache = new TrafficHttpCache() { Enabled = true }; } rule.Frontends.Add( new TrafficHttpFrontend() { Host = testHostname, ProxyPort = proxyPort, CertName = "test-load-balancer" }); rule.Backends.Add( new TrafficHttpBackend() { Server = serviceName, Port = 80 }); trafficManager.SetRule(rule); // Spin up a single [vegomatic] service instance. manager.SudoCommand($"docker service create --name vegomatic --network {network} --replicas 1 {vegomaticImage} test-server").EnsureSuccess(); await WaitUntilReadyAsync(client.BaseAddress, testHostname); // Query the service several times to verify that we get a response and // also that all of the responses are the same (because we have only // a single [vegomatic] instance returning its UUID). // // We're going to use a different URL for each request so that we // won't see any cache hits. var uniqueResponses = new HashSet <string>(); var viaVarnish = false; var cacheHit = false; for (int i = 0; i < queryCount; i++) { var response = await client.GetAsync($"/{testName}/pass-1/{i}?body=server-id&expires=60"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); if (ViaVarnish(response)) { viaVarnish = true; } if (CacheHit(response)) { cacheHit = true; } var body = await response.Content.ReadAsStringAsync(); if (!uniqueResponses.Contains(body)) { uniqueResponses.Add(body); } } Assert.Single(uniqueResponses); if (useCache) { // [viaVarnish] should be TRUE because we're routing through the cache. Assert.True(viaVarnish); // [cacheHit] should be FALSE because we used a unique URI for each request. Assert.False(cacheHit); } else { // [viaVarnish] and [cacheHit] should both be FALSE because we're not caching. Assert.False(viaVarnish); Assert.False(cacheHit); } // Repeat the test if caching is enabled with the same URLs as last time and verify that // we see cache hits this time. if (useCache) { viaVarnish = false; cacheHit = false; for (int i = 0; i < queryCount; i++) { var response = await client.GetAsync($"/{testName}/pass-1/{i}?body=server-id&expires=60"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); if (ViaVarnish(response)) { viaVarnish = true; } if (CacheHit(response)) { cacheHit = true; } var body = await response.Content.ReadAsStringAsync(); if (!uniqueResponses.Contains(body)) { uniqueResponses.Add(body); } } Assert.True(viaVarnish); Assert.True(cacheHit); } // Spin up a second replica and repeat the query test to verify // that we see two unique responses. // // Note also that we need to perform these requests in parallel // to try to force HAProxy to establish more than one connection // to the [vegomatic] service. If we don't do this, HAProxy may // establish a single connection to one of the service instances // and keep sending traffic there resulting in us seeing only // one response UUID. manager.SudoCommand($"docker service update --replicas 2 {serviceName}").EnsureSuccess(); await WaitUntilReadyAsync(client.BaseAddress, testHostname); // Reset the response info and do the requests. uniqueResponses.Clear(); viaVarnish = false; cacheHit = false; var tasks = new List <Task>(); var uris = new List <string>(); for (int i = 0; i < queryCount; i++) { uris.Add($"/{testName}/pass-2/{i}?body=server-id&expires=60&delay=0.250"); } foreach (var uri in uris) { tasks.Add(Task.Run( async() => { var response = await client.GetAsync(uri); var body = await response.Content.ReadAsStringAsync(); Assert.Equal(HttpStatusCode.OK, response.StatusCode); if (ViaVarnish(response)) { viaVarnish = true; } if (CacheHit(response)) { cacheHit = true; } lock (uniqueResponses) { if (!uniqueResponses.Contains(body)) { uniqueResponses.Add(body); } } })); } await NeonHelper.WaitAllAsync(tasks, TimeSpan.FromSeconds(30)); if (useCache) { // [viaVarnish] should be TRUE because we're routing through the cache. Assert.True(viaVarnish); // [cacheHit] should be FALSE because we used a unique URI for each request. Assert.False(cacheHit); } else { // [viaVarnish] and [cacheHit] should both be FALSE because we're not caching. Assert.False(viaVarnish); Assert.False(cacheHit); } Assert.Equal(2, uniqueResponses.Count); } }
public async Task GetCarsReturnsOk() { var statusCode = await client.GetAsync(); Assert.StrictEqual(HttpStatusCode.OK, statusCode); }
/// <summary> /// Verify that we can create an HTTP traffic manager rule for a /// site on the proxy port using a specific hostname and then /// verify that that the traffic manager actually works by spinning /// up a [vegomatic] based service to accept the traffic. /// </summary> /// <param name="testName">Simple name (without spaces) used to ensure that URIs cached for different tests won't conflict.</param> /// <param name="hostnames">The hostnames to be used for .</param> /// <param name="proxyPort">The inbound proxy port.</param> /// <param name="network">The proxy network.</param> /// <param name="trafficManager">The traffic manager.</param> /// <param name="useCache">Optionally enable caching and verify.</param> /// <param name="serviceName">Optionally specifies the backend service name (defaults to <b>vegomatic</b>).</param> /// <returns>The tracking <see cref="Task"/>.</returns> private async Task TestHttpMultipleFrontends(string testName, string[] hostnames, int proxyPort, string network, TrafficManager trafficManager, bool useCache = false, string serviceName = "vegomatic") { Covenant.Requires <ArgumentNullException>(hostnames != null && hostnames.Length > 0); // Append a GUID to the test name to ensure that we won't // conflict with what any previous test runs may have loaded // into the cache. testName += "-" + Guid.NewGuid().ToString("D"); // Verify that we can create an HTTP traffic manager rule for a // site on the proxy port using a specific hostname and then // verify that that the traffic manager actually works by spinning // up a [vegomatic] based service to accept the traffic. var queryCount = 100; var manager = hive.GetReachableManager(); var proxyUri = new Uri($"http://{manager.PrivateAddress}:{proxyPort}/"); manager.Connect(); using (var client = new TestHttpClient(disableConnectionReuse: true)) { // Configure the traffic manager rule. var rule = new TrafficHttpRule() { Name = "vegomatic", CheckExpect = "status 200", CheckSeconds = 1, }; if (useCache) { rule.Cache = new TrafficHttpCache() { Enabled = true }; } foreach (var hostname in hostnames) { rule.Frontends.Add( new TrafficHttpFrontend() { Host = hostname, ProxyPort = proxyPort }); } rule.Backends.Add( new TrafficHttpBackend() { Server = serviceName, Port = 80 }); trafficManager.SetRule(rule); // Spin up a single [vegomatic] service instance. manager.SudoCommand($"docker service create --name vegomatic --network {network} --replicas 1 {vegomaticImage} test-server").EnsureSuccess(); await WaitUntilReadyAsync(proxyUri, hostnames.First()); // Query the service several times for each hostname to verify that we // get a response and also that all of the responses are the same // (because we have only a single [vegomatic] instance returning its UUID). // // We're going to use a different URL for each request so that we // won't see any cache hits. foreach (var hostname in hostnames) { var uniqueResponses = new HashSet <string>(); var viaVarnish = false; var cacheHit = false; client.BaseAddress = proxyUri; client.DefaultRequestHeaders.Host = hostname; for (int i = 0; i < queryCount; i++) { var response = await client.GetAsync($"/{testName}/{hostname}/pass-1/{i}?body=server-id&expires=60"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); if (ViaVarnish(response)) { viaVarnish = true; } if (CacheHit(response)) { cacheHit = true; } var body = await response.Content.ReadAsStringAsync(); if (!uniqueResponses.Contains(body)) { uniqueResponses.Add(body); } } Assert.Single(uniqueResponses); if (useCache) { // [viaVarnish] should be TRUE because we're routing through the cache. Assert.True(viaVarnish); // [cacheHit] should be FALSE because we used a unique URI for each request. Assert.False(cacheHit); } else { // [viaVarnish] and [cacheHit] should both be FALSE because we're not caching. Assert.False(viaVarnish); Assert.False(cacheHit); } // Repeat the test if caching is enabled with the same URLs as last time and verify that // we see cache hits this time. if (useCache) { viaVarnish = false; cacheHit = false; for (int i = 0; i < queryCount; i++) { var response = await client.GetAsync($"/{testName}/{hostname}/pass-1/{i}?body=server-id&expires=60"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); if (ViaVarnish(response)) { viaVarnish = true; } if (CacheHit(response)) { cacheHit = true; } var body = await response.Content.ReadAsStringAsync(); if (!uniqueResponses.Contains(body)) { uniqueResponses.Add(body); } } Assert.True(viaVarnish); Assert.True(cacheHit); } } // Spin up a second replica and repeat the query test for each hostname // to verify that we see two unique responses. // // Note that we're going to pass a new set of URLs to avoid having // any responses cached so we'll end up seeing all of the IDs. // // Note also that we need to perform these requests in parallel // to try to force Varnish to establish more than one connection // to the [vegomatic] service. If we don't do this, Varnish will // establish a single connection to one of the service instances // and keep sending traffic there resulting in us seeing only // one response UUID. manager.SudoCommand($"docker service update --replicas 2 vegomatic").EnsureSuccess(); await WaitUntilReadyAsync(proxyUri, hostnames.First()); foreach (var hostname in hostnames) { var uniqueResponses = new HashSet <string>(); var viaVarnish = false; var cacheHit = false; var tasks = new List <Task>(); var uris = new List <string>(); client.BaseAddress = proxyUri; client.DefaultRequestHeaders.Host = hostname; for (int i = 0; i < queryCount; i++) { uris.Add($"/{testName}/pass-2/{i}?body=server-id&expires=60&delay=0.250"); } foreach (var uri in uris) { tasks.Add(Task.Run( async() => { var response = await client.GetAsync(uri); var body = await response.Content.ReadAsStringAsync(); Assert.Equal(HttpStatusCode.OK, response.StatusCode); if (ViaVarnish(response)) { viaVarnish = true; } if (CacheHit(response)) { cacheHit = true; } lock (uniqueResponses) { if (!uniqueResponses.Contains(body)) { uniqueResponses.Add(body); } } })); } await NeonHelper.WaitAllAsync(tasks, TimeSpan.FromSeconds(30)); if (useCache) { // [viaVarnish] should be TRUE because we're routing through the cache. Assert.True(viaVarnish); // [cacheHit] should be FALSE because we used a unique URI for each request. Assert.False(cacheHit); } else { // [viaVarnish] and [cacheHit] should both be FALSE because we're not caching. Assert.False(viaVarnish); Assert.False(cacheHit); } Assert.Equal(2, uniqueResponses.Count); } } }
public void HasInfo() { var response = TestHttpClient.GetAsync("/info").Result; Assert.Equal(HttpStatusCode.OK, response.StatusCode); }
public async void CreateNewUser_valid() { var testHttpClient = new TestHttpClient(); // login admin var loginResponseAdmin = await testHttpClient.PostLoginAsync(_adminValidLoginRequest); var loginBodyAdmin = loginResponseAdmin.Item2; // create user var createUserResponse = await testHttpClient.PostAsync <User>("api/v1/users", _user, loginBodyAdmin.Token); var createUserStatusCode = createUserResponse.Item1; var createUserPostResult = createUserResponse.Item2; Assert.Equal(HttpStatusCode.OK, createUserStatusCode); Assert.Equal(0, createUserPostResult.ErrorCode); Assert.Equal(1, createUserPostResult.ManipulatedAmount(typeof(User), DataLibrary.OperationType.Create)); Assert.NotNull(createUserPostResult.ManipulatedEntity.Id); //----------------------------------------------------------- // put new user in the creators group var createGroupMemberResponse = await testHttpClient.PostAsync <GroupMember>("api/v1/groupmembers", new GroupMember(3, createUserPostResult.ManipulatedEntity.Id), loginBodyAdmin.Token); var createGroupMemberStatusCode = createGroupMemberResponse.Item1; var createGroupMemberResult = createGroupMemberResponse.Item2; Assert.Equal(HttpStatusCode.OK, createGroupMemberStatusCode); Assert.Equal(0, createGroupMemberResult.ErrorCode); Assert.Equal(1, createGroupMemberResult.ManipulatedAmount(typeof(GroupMember), DataLibrary.OperationType.Create)); Assert.Equal(3, createGroupMemberResult.ManipulatedEntity.GroupId); //----------------------------------------------------------- // create new document type var createDocumentTypeResponse = await testHttpClient.PostAsync <DocumentType>("api/v1/documentTypes", new DocumentType("E-Book", null), loginBodyAdmin.Token); var createDocumentTypeStatusCode = createDocumentTypeResponse.Item1; var createDocumentTypePostResult = createDocumentTypeResponse.Item2; Assert.Equal(HttpStatusCode.OK, createDocumentTypeStatusCode); Assert.Equal(0, createDocumentTypePostResult.ErrorCode); Assert.Equal(1, createDocumentTypePostResult.ManipulatedAmount(typeof(DocumentType), DataLibrary.OperationType.Create)); Assert.NotNull(createDocumentTypePostResult.ManipulatedEntity.Id); //----------------------------------------------------------- // login new user var loginResponseCreator = await testHttpClient.PostLoginAsync(new Request() { UserName = _user.UserName, Password = _user.Password }); var loginBodyCreator = loginResponseCreator.Item2; //----------------------------------------------------------- // new user creates new document var createDocumentResponse = await testHttpClient.PostAsync <Document>("api/v1/documents", new Document(createDocumentTypePostResult.ManipulatedEntity.Id.Value, "Der Duden"), loginBodyCreator.Token); var createDocumentStatusCode = createDocumentResponse.Item1; var createDocumentPostResult = createDocumentResponse.Item2; Assert.Equal(HttpStatusCode.OK, createDocumentStatusCode); Assert.Equal(0, createDocumentPostResult.ErrorCode); Assert.Equal(1, createDocumentPostResult.ManipulatedAmount(typeof(Document), DataLibrary.OperationType.Create)); Assert.NotNull(createDocumentPostResult.ManipulatedEntity.Id); //----------------------------------------------------------- // retrieve created document as created user var retrieveDocumentResponse = await testHttpClient.GetAsync <Document>(@$ "api/v1/documents?docId={createDocumentPostResult.ManipulatedEntity.Id.Value}", loginBodyCreator.Token); var retrievedDocumentStatusCode = retrieveDocumentResponse.Item1; var retrievedDocumentGetResult = retrieveDocumentResponse.Item2; Assert.Equal(HttpStatusCode.OK, retrievedDocumentStatusCode); Assert.Equal(0, retrievedDocumentGetResult.ErrorCode); Assert.Single(retrievedDocumentGetResult.Resultset); }
/// <summary> /// Generates a traffic manager rule for each <see cref="Redirection"/> passed that /// will redirect from one URI to another. /// </summary> /// <param name="trafficManager">The target traffic manager.</param> /// <param name="testName">Used to name the traffic manager rules.</param> /// <param name="singleRule"> /// Pass <c>true</c> to test a single rule with all of the redirections or /// <c>false</c> to test with one redirection per rule. /// </param> /// <param name="redirections">The redirections.</param> private async Task TestRedirect(TrafficManager trafficManager, string testName, bool singleRule, params Redirection[] redirections) { var manager = hive.GetReachableManager(); // We need local DNS mappings for each of the URI hosts to target a hive node. var hosts = new HashSet <string>(StringComparer.InvariantCultureIgnoreCase); foreach (var redirect in redirections) { if (!hosts.Contains(redirect.FromUri.Host)) { hosts.Add(redirect.FromUri.Host); } if (!hosts.Contains(redirect.ToUri.Host)) { hosts.Add(redirect.ToUri.Host); } } foreach (var host in hosts) { hiveFixture.LocalMachineHosts.AddHostAddress(host, manager.PrivateAddress.ToString(), deferCommit: true); } hiveFixture.LocalMachineHosts.Commit(); // Generate and upload a self-signed certificate for each redirect host that // uses HTTPS and upload these to the hive. Each certificate will be named // the same as the hostname. var hostToCertificate = new Dictionary <string, TlsCertificate>(StringComparer.InvariantCultureIgnoreCase); foreach (var redirect in redirections.Where(r => r.FromUri.Scheme == "https")) { var host = redirect.FromUri.Host; if (hostToCertificate.ContainsKey(host)) { continue; } hostToCertificate[host] = TlsCertificate.CreateSelfSigned(host); } foreach (var item in hostToCertificate) { hive.Certificate.Set(item.Key, item.Value); } // Create the traffic manager rule(s). if (singleRule) { var rule = new TrafficHttpRule() { Name = testName, }; foreach (var redirect in redirections) { var frontend = new TrafficHttpFrontend() { Host = redirect.FromUri.Host, ProxyPort = redirect.FromUri.Port, RedirectTo = redirect.ToUri }; if (redirect.FromUri.Scheme == "https") { frontend.CertName = redirect.FromUri.Host; } rule.Frontends.Add(frontend); } trafficManager.SetRule(rule); } else { var redirectIndex = 0; foreach (var redirect in redirections) { var rule = new TrafficHttpRule() { Name = $"{testName}-{redirectIndex}", }; var frontend = new TrafficHttpFrontend() { Host = redirect.FromUri.Host, ProxyPort = redirect.FromUri.Port, RedirectTo = redirect.ToUri }; if (redirect.FromUri.Scheme == "https") { frontend.CertName = redirect.FromUri.Host; } rule.Frontends.Add(frontend); trafficManager.SetRule(rule); redirectIndex++; } } // Give the new rules some time to deploy. await Task.Delay(TimeSpan.FromSeconds(5)); // Now all we need to do is hit all of the redirect [FromUri]s // and verify that we get redirects to the corresponding // [ToUri]s. // Allow self-signed certificates and disable client-side automatic redirect handling // so we'll be able to see the redirect responses. var handler = new HttpClientHandler() { ServerCertificateCustomValidationCallback = HttpClientHandler.DangerousAcceptAnyServerCertificateValidator, AllowAutoRedirect = false // We need to see the redirects }; using (var client = new TestHttpClient(disableConnectionReuse: true, handler: handler, disposeHandler: true)) { foreach (var redirect in redirections) { var response = await client.GetAsync(redirect.FromUri); Assert.Equal(HttpStatusCode.Redirect, response.StatusCode); Assert.True(response.Headers.TryGetValues("Location", out var locations)); Assert.Equal(redirect.ToUri.ToString(), locations.Single()); } } }
/// <summary> /// Verify that we can create an TCP traffic manager rule for a /// site on the public port using a specific hostname and then /// verify that that the traffic manager actually works by spinning /// up a [vegomatic] based service to accept the traffic. /// </summary> /// <param name="testName">Simple name (without spaces) used to ensure that URIs cached for different tests won't conflict.</param> /// <param name="proxyPort">The inbound proxy port.</param> /// <param name="network">The proxy network.</param> /// <param name="trafficManager">The traffic manager.</param> /// <param name="serviceName">Optionally specifies the backend service name (defaults to <b>vegomatic</b>).</param> /// <returns>The tracking <see cref="Task"/>.</returns> private async Task TestTcpRule(string testName, int proxyPort, string network, TrafficManager trafficManager, string serviceName = "vegomatic") { // Append a GUID to the test name to ensure that we won't // conflict with what any previous test runs may have loaded // into the cache. testName += "-" + Guid.NewGuid().ToString("D"); // Verify that we can create an TCP traffic manager rule for a // site on the public port using a specific hostname and then // verify that that the traffic manager actually works by spinning // up a [vegomatic] based service to accept the traffic. var queryCount = 100; var manager = hive.GetReachableManager(); var hostname = manager.PrivateAddress.ToString(); manager.Connect(); using (var client = new TestHttpClient(disableConnectionReuse: true)) { // Setup the client to query the [vegomatic] service through the // proxy without needing to configure a hive DNS entry. client.BaseAddress = new Uri($"http://{manager.PrivateAddress}:{proxyPort}/"); client.DefaultRequestHeaders.Host = testHostname; // Configure the traffic manager rule. var rule = new TrafficTcpRule() { Name = "vegomatic", CheckSeconds = 1, }; rule.Frontends.Add( new TrafficTcpFrontend() { ProxyPort = proxyPort }); rule.Backends.Add( new TrafficTcpBackend() { Server = serviceName, Port = 80 }); trafficManager.SetRule(rule); // Spin up a single [vegomatic] service instance. manager.SudoCommand($"docker service create --name vegomatic --network {network} --replicas 1 {vegomaticImage} test-server").EnsureSuccess(); await WaitUntilReadyAsync(client.BaseAddress, hostname); // Query the service several times to verify that we get a response and // also that all of the responses are the same (because we have only // a single [vegomatic] instance returning its UUID). var uniqueResponses = new HashSet <string>(); for (int i = 0; i < queryCount; i++) { var response = await client.GetAsync($"/{testName}/pass-1/{i}?body=server-id&expires=60"); Assert.Equal(HttpStatusCode.OK, response.StatusCode); var body = await response.Content.ReadAsStringAsync(); if (!uniqueResponses.Contains(body)) { uniqueResponses.Add(body); } } Assert.Single(uniqueResponses); // Spin up a second replica and repeat the query test to verify // that we see two unique responses. // // Note that we're going to pass a new set of URLs to avoid having // any responses cached so we'll end up seeing all of the IDs. // // Note also that we need to perform these requests in parallel // to try to force Varnish to establish more than one connection // to the [vegomatic] service. If we don't do this, Varnish will // establish a single connection to one of the service instances // and keep sending traffic there resulting in us seeing only // one response UUID. manager.SudoCommand($"docker service update --replicas 2 vegomatic").EnsureSuccess(); await WaitUntilReadyAsync(client.BaseAddress, hostname); // Reset the response info and do the requests. uniqueResponses.Clear(); var tasks = new List <Task>(); var uris = new List <string>(); for (int i = 0; i < queryCount; i++) { uris.Add($"/{testName}/pass-2/{i}?body=server-id&expires=60&delay=0.250"); } foreach (var uri in uris) { tasks.Add(Task.Run( async() => { var response = await client.GetAsync(uri); var body = await response.Content.ReadAsStringAsync(); Assert.Equal(HttpStatusCode.OK, response.StatusCode); })); } await NeonHelper.WaitAllAsync(tasks, TimeSpan.FromSeconds(30)); Assert.Equal(2, uniqueResponses.Count); } }
public async Task Cache() { var manager = hive.GetReachableManager(); //----------------------------------------------------------------- // Verify that we can add a cached public traffic manager rule and // then that PURGE works. var playbook = $@" - name: test hosts: localhost tasks: - name: rule neon_traffic_manager: name: public state: present rule_name: test rule: mode: http checkexpect: status 200 cache: enabled: true frontends: - host: vegomatic.test backends: - server: vegomatic port: 80 "; var results = AnsiblePlayer.PlayNoGather(playbook); var taskResult = results.GetTaskResult("rule"); Assert.True(taskResult.Success); Assert.True(taskResult.Changed); var rule = (TrafficHttpRule)hive.PublicTraffic.GetRule("test"); Assert.NotNull(rule); Assert.Equal("test", rule.Name); Assert.Equal(TrafficMode.Http, rule.Mode); Assert.Single(rule.Frontends); Assert.Equal("vegomatic.test", rule.Frontends.First().Host); Assert.Equal(HiveHostPorts.ProxyPublicHttp, rule.Frontends.First().ProxyPort); Assert.Single(rule.Backends); Assert.Equal("vegomatic", rule.Backends.First().Server); Assert.Equal(80, rule.Backends.First().Port); Assert.True(rule.Cache.Enabled); //----------------------------------------------------------------- // Add the same rule and verify that no change was detected this time. playbook = $@" - name: test hosts: localhost tasks: - name: rule neon_traffic_manager: name: public state: present rule_name: test rule: mode: http checkexpect: status 200 cache: enabled: true frontends: - host: vegomatic.test backends: - server: vegomatic port: 80 "; results = AnsiblePlayer.PlayNoGather(playbook); taskResult = results.GetTaskResult("rule"); Assert.True(taskResult.Success); Assert.False(taskResult.Changed); rule = (TrafficHttpRule)hive.PublicTraffic.GetRule("test"); Assert.NotNull(rule); Assert.Equal("test", rule.Name); Assert.Equal(TrafficMode.Http, rule.Mode); Assert.Single(rule.Frontends); Assert.Equal("vegomatic.test", rule.Frontends.First().Host); Assert.Equal(HiveHostPorts.ProxyPublicHttp, rule.Frontends.First().ProxyPort); Assert.Single(rule.Backends); Assert.Equal("vegomatic", rule.Backends.First().Server); Assert.Equal(80, rule.Backends.First().Port); Assert.True(rule.Cache.Enabled); //----------------------------------------------------------------- // Crank up Vegomatic as the backing service and perform some tests // to verify that caching is actually working. var baseAddress = new Uri($"http://{manager.PrivateAddress}:80/"); var uuid = Guid.NewGuid().ToString("D"); manager.SudoCommand($"docker service create --name vegomatic --network {HiveConst.PublicNetwork} --replicas 1 {vegomaticImage} test-server expires=300").EnsureSuccess(); await WaitUntilReadyAsync(baseAddress, "vegomatic.test"); using (var client = new TestHttpClient(disableConnectionReuse: true)) { client.BaseAddress = baseAddress; client.DefaultRequestHeaders.Host = "vegomatic.test"; // Cache responses for [/{uuid}/test1.txt] and [/{uuid}/test2.txt] var response = await client.GetAsync($"/{uuid}/test1.txt"); Assert.True(ViaVarnish(response)); // Verify that the request was routed thru Varnish Assert.False(CacheHit(response)); // The first request shouldn't be cached response = await client.GetAsync($"/{uuid}/test1.txt"); Assert.True(ViaVarnish(response)); Assert.True(CacheHit(response)); // The second request should be cached response = await client.GetAsync($"/{uuid}/test2.txt"); Assert.True(ViaVarnish(response)); // Verify that the request was routed thru Varnish Assert.False(CacheHit(response)); // The first request shouldn't be cached response = await client.GetAsync($"/{uuid}/test2.txt"); Assert.True(ViaVarnish(response)); Assert.True(CacheHit(response)); // The second request should be cached // Purge [test1.txt] and verify that it's no longer cached and that [test2.txt] // is still cached. playbook = $@" - name: test hosts: localhost tasks: - name: rule neon_traffic_manager: name: public state: purge purge_list: - http://vegomatic.test/*/test1.txt "; results = AnsiblePlayer.PlayNoGather(playbook); taskResult = results.GetTaskResult("rule"); Assert.True(taskResult.Success); Assert.True(taskResult.Changed); // PURGE is always considered to be a change response = await client.GetAsync($"/{uuid}/test1.txt"); Assert.True(ViaVarnish(response)); Assert.False(CacheHit(response)); // This should have been purged. response = await client.GetAsync($"/{uuid}/test2.txt"); Assert.True(ViaVarnish(response)); Assert.True(CacheHit(response)); // This should still be cached. // Both items should both be loaded back into the cache now. We're going // to try purging both of them this time. playbook = $@" - name: test hosts: localhost tasks: - name: rule neon_traffic_manager: name: public state: purge purge_list: - http://vegomatic.test/**/* "; results = AnsiblePlayer.PlayNoGather(playbook); taskResult = results.GetTaskResult("rule"); Assert.True(taskResult.Success); Assert.True(taskResult.Changed); response = await client.GetAsync($"/{uuid}/test1.txt"); Assert.True(ViaVarnish(response)); Assert.False(CacheHit(response)); // This should have been purged. response = await client.GetAsync($"/{uuid}/test2.txt"); Assert.True(ViaVarnish(response)); Assert.False(CacheHit(response)); // This should be purged too. // Both items should both be loaded back into the cache now. We're going // to use ALL to both of them this time. playbook = $@" - name: test hosts: localhost tasks: - name: rule neon_traffic_manager: name: public state: purge purge_list: - ALL "; results = AnsiblePlayer.PlayNoGather(playbook); taskResult = results.GetTaskResult("rule"); Assert.True(taskResult.Success); Assert.True(taskResult.Changed); response = await client.GetAsync($"/{uuid}/test1.txt"); Assert.True(ViaVarnish(response)); Assert.False(CacheHit(response)); // This should have been purged. response = await client.GetAsync($"/{uuid}/test2.txt"); Assert.True(ViaVarnish(response)); Assert.False(CacheHit(response)); // This should be purged too. // Verify that we can do case sensitive purging. await client.GetAsync($"/{uuid}/test1.txt"); await client.GetAsync($"/{uuid}/test2.txt"); await client.GetAsync($"/{uuid}/TEST1.TXT"); await client.GetAsync($"/{uuid}/TEST2.TXT"); playbook = $@" - name: test hosts: localhost tasks: - name: rule neon_traffic_manager: name: public state: purge purge_list: - http://vegomatic.test/*/test*.txt purge_case_sensitive: yes "; results = AnsiblePlayer.PlayNoGather(playbook); taskResult = results.GetTaskResult("rule"); Assert.True(taskResult.Success); Assert.True(taskResult.Changed); response = await client.GetAsync($"/{uuid}/test1.txt"); Assert.True(ViaVarnish(response)); Assert.False(CacheHit(response)); // This should have been purged. response = await client.GetAsync($"/{uuid}/test2.txt"); Assert.True(ViaVarnish(response)); Assert.False(CacheHit(response)); // This should be purged too. response = await client.GetAsync($"/{uuid}/TEST1.TXT"); Assert.True(ViaVarnish(response)); Assert.True(CacheHit(response)); // This should still be cached. response = await client.GetAsync($"/{uuid}/TEST2.TXT"); Assert.True(ViaVarnish(response)); Assert.True(CacheHit(response)); // This should still be cached too. } //----------------------------------------------------------------- // Delete the rule and verify. playbook = $@" - name: test hosts: localhost tasks: - name: rule neon_traffic_manager: name: public state: absent rule_name: test "; results = AnsiblePlayer.PlayNoGather(playbook); taskResult = results.GetTaskResult("rule"); Assert.True(taskResult.Success); Assert.True(taskResult.Changed); rule = (TrafficHttpRule)hive.PublicTraffic.GetRule("test"); Assert.Null(rule); }