/** Now, let's disable pinging on the request */ [U] public async Task DisablePing() { var audit = new Auditor(() => Virtual.Elasticsearch .Bootstrap(10) .ClientCalls(r => r.SucceedAlways()) .Sniff(c => c.SucceedAlways()) .SniffingConnectionPool() .Settings(s => s.SniffOnStartup()) ); audit = await audit.TraceCall( new ClientCall(r => r.DisablePing()) // <1> disable ping { { SniffOnStartup }, { SniffSuccess, 9200 }, // <2> No ping after sniffing { HealthyResponse, 9200 } } ); }
public async Task SniffOnStartUpTakesNewClusterState() { var audit = new Auditor(() => Framework.Cluster .Nodes(10) .Sniff(s => s.Fails(Always)) .Sniff(s => s.OnPort(9202).Succeeds(Always, Framework.Cluster.Nodes(8, startFrom: 9204))) .SniffingConnectionPool() .AllDefaults() ); await audit.TraceCall(new ClientCall { { SniffOnStartup }, { SniffFailure, 9200 }, { SniffFailure, 9201 }, { SniffSuccess, 9202 }, { PingSuccess, 9204 }, { HealthyResponse, 9204 } }); }
public async Task DefaultMaxIsNumberOfNodes() { var audit = new Auditor(() => VirtualClusterWith .Nodes(10) .ClientCalls(r => r.FailAlways()) .ClientCalls(r => r.OnPort(9209).SucceedAlways()) .StaticConnectionPool() .Settings(s => s.DisablePing()) ); audit = await audit.TraceCall( new ClientCall(r => r.MaxRetries(2)) { { BadResponse, 9200 }, { BadResponse, 9201 }, { BadResponse, 9202 }, { MaxRetriesReached } } ); }
int LogQuoteServerEvent(UserCommandType command, string quote) { //Cost,StockSymbol,UserId,Timestamp,CryptoKey string[] args = quote.Split(","); QuoteServerType stockQuote = new QuoteServerType() { username = args[2], server = Server.QUOTE_SERVER.Abbr, price = Convert.ToDecimal(args[0]), transactionNum = command.transactionNum, stockSymbol = args[1], timestamp = Unix.TimeStamp.ToString(), quoteServerTime = args[3], cryptokey = args[4] }; Auditor.WriteRecord(stockQuote); return((int)(stockQuote.price * 100)); }
public async Task RetriesAreLimitedByNodesInPool() { var audit = new Auditor(() => VirtualClusterWith .Nodes(2) .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) .ClientCalls(r => r.OnPort(9209).SucceedAlways()) .StaticConnectionPool() .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10))) ); audit = await audit.TraceCall( new ClientCall { { BadResponse, 9200 }, { BadResponse, 9201 }, { MaxRetriesReached }, { FailedOverAllNodes } } ); }
public async Task FixedMaximumNumberOfRetries() { var audit = new Auditor(() => Virtual.Elasticsearch .Bootstrap(10) .ClientCalls(r => r.FailAlways()) .ClientCalls(r => r.OnPort(9209).SucceedAlways()) .StaticConnectionPool() .Settings(s => s.DisablePing().MaximumRetries(5)) ); audit = await audit.TraceCall( new ClientCall(r => r.MaxRetries(2)) { { BadResponse, 9200 }, { BadResponse, 9201 }, { BadResponse, 9202 }, { MaxRetriesReached } } ); }
public async Task SniffOnStartUpTakesNewClusterState() { var audit = new Auditor(() => VirtualClusterWith .Nodes(10) .Sniff(s => s.Fails(Always)) .Sniff(s => s.OnPort(9202).Succeeds(Always, VirtualClusterWith.Nodes(8, startFrom: 9204))) // <1> Sniffing returns 8 nodes, starting from 9204 .SniffingConnectionPool() .AllDefaults() ); await audit.TraceCall(new ClientCall { { SniffOnStartup }, { SniffFailure, 9200 }, { SniffFailure, 9201 }, { SniffSuccess, 9202 }, { PingSuccess, 9204 }, // <2> After successfully sniffing, the ping now happens on 9204 { HealthyResponse, 9204 } }); }
public HttpResponseMessage PutApplication(string id, [FromUri] string apiKey, [FromBody] Application application) { Auditor.Trace(GetType(), "Started..."); Auditor.Trace(GetType(), "...Attempting to load organisation from apiKey key:={0}", apiKey); var organisation = GetOrganisationFromApiKey(apiKey); if (organisation == null) { Auditor.Trace(GetType(), "...Failed to authenticate with apiKey:={0}", apiKey); return(Request.CreateResponse(HttpStatusCode.Unauthorized)); } Auditor.Trace(GetType(), "...Successfully loaded organisation Name:={0}, Id:={1}", organisation.Name, organisation.Id); var storedApplication = Session.Raven.Load <Application>(Application.GetId(id)); if (storedApplication == null || storedApplication.OrganisationId != organisation.Id) { return(Request.CreateResponse(HttpStatusCode.NotFound)); } if (application.Version.IsNotNullOrEmpty()) { storedApplication.Version = application.Version; } storedApplication.IsActive = application.IsActive; storedApplication.Name = application.Name; return(Request.CreateResponse(HttpStatusCode.OK, new Application { Id = storedApplication.Id, Name = storedApplication.Name, Version = storedApplication.Version, IsActive = storedApplication.IsActive, HipChatRoomId = storedApplication.HipChatRoomId, OrganisationId = storedApplication.OrganisationId, Token = storedApplication.Token, CampfireRoomId = storedApplication.CampfireRoomId })); }
//"Insert" method to directly save objects with composite keys public virtual int Add(T type, bool validateEntity = false, string endPoint = "", string transID = "") { //var compositeKeys = typeof(T).GetProperties().Any(p => p.GetCustomAttributes(typeof(CompositeKeyColumnAttribute), true).Length != 0); //var identityPropertyName = typeof(T).GetProperties().SingleOrDefault(t => t.Name.ToLower().StartsWith("id")) == null ? "ID" // : typeof(T).GetProperties().SingleOrDefault(t => t.Name.ToLower().StartsWith("id")).Name; //var id = Convert.ToInt32(typeof(T).GetProperty(identityPropertyName).GetValue(type, null)); //var v = id > 0 && !compositeKeys ? _repository.Single<T>(id) : null; if (validateEntity) { var validator = new Validation.EntityValidator <T>(); var vr = validator.Validate(type); if (vr.HasError) { throw new Exception("Validation: " + vr.ErrorList); } } try { if (!string.IsNullOrEmpty(endPoint) && !string.IsNullOrEmpty(transID) && typeof(T).Name != "Audit") { Auditor.Add <T>(new Auditor.AuditAction { Endpoint = endPoint, Operation = "insert", TransactionID = transID, }, type, type, "id", "created"); } var result = _repository.Insert(type); return((result == null || result.Equals(0)) ? 0 : Convert.ToInt32(result)); } catch (SqlException sx) { throw new Exception(new ValidationExceptionParser(TableName, sx).ValidationErrorMessage, sx); } finally { _repository.Dispose(); } }
/* * Logs interserver communication * @param command The user command that is driving the process */ protected void LogServerEvent(UserCommandType command) { SystemEventType sysEvent = new SystemEventType() { timestamp = Unix.TimeStamp.ToString(), server = ServiceDetails.Abbr, transactionNum = command.transactionNum, username = command.username, fundsSpecified = command.fundsSpecified, command = command.command, filename = command.filename, stockSymbol = command.stockSymbol }; if (command.fundsSpecified) { sysEvent.funds = command.funds / 100m; } Auditor.WriteRecord(sysEvent); }
/// <summary> /// Reject business method. /// </summary> /// <param name="trainingRequest">A trainingRequest value.</param> public void Reject(TrainingRequest trainingRequest) { // Data access component declarations. var trainingRequestDAC = new TrainingRequestDAC(); TrainingRequest oldRequest = trainingRequestDAC.SelectById(trainingRequest.TRequestID); // Step 1 - Calling UpdateById on TrainingRequestDAC. trainingRequestDAC.UpdateById(trainingRequest); var auditLog = new AuditLog(); auditLog.Module = "Reject Training"; auditLog.Action = "Reject Training"; auditLog.Description = "Reject Training"; auditLog.CreateBy = Environment.MachineName; var auditor = new Auditor(); auditor.AuditModified(auditLog, oldRequest, trainingRequest); }
/// <summary> /// Cancel business method. /// </summary> /// <param name="trainingRequest">A trainingRequest value.</param> public void Cancel(TrainingRequest trainingRequest) { // Data access component declarations. var trainingRequestDAC = new TrainingRequestDAC(); // Step 1 - Calling UpdateById on TrainingRequestDAC. trainingRequestDAC.UpdateById(trainingRequest); var auditLog = new AuditLog(); auditLog.Module = "Cancel Training"; auditLog.Action = "Cancel Training"; auditLog.Description = "Cancel Training"; auditLog.CreateBy = Environment.MachineName; var auditor = new Auditor(); // auditor.AuditModified(auditLog, trainingRequest.RequestStatus, trainingRequest.RequestStatus); auditor.AuditAdded(auditLog, trainingRequest); }
private Auditor EstablecerAuditor(MethodExecutionArgs args, IEnumerable <object> listaParametros) { _configuracion = new Configuracion(); var direccionIp = DireccionIp(); var usuario = _configuracion.ObtenerUsuario((ApiController)args.Instance); var auditor = new Auditor { Entidad = _configuracion.ObtenerInstancia(args.Instance.ToString()), Accion = args.Method.Name, FechaRegistro = DateTime.Now, ModificadoPor = usuario, Parametros = _configuracion.CrearJsonParametros(listaParametros.Select(x => x).ToList()), Nombre = direccionIp }; return(auditor); }
public async Task FixedMaximumNumberOfRetries() { var audit = new Auditor(() => Framework.Cluster .Nodes(10) .ClientCalls(r => r.FailAlways()) .ClientCalls(r => r.OnPort(9209).SucceedAlways()) .StaticConnectionPool() .Settings(s => s.DisablePing().MaximumRetries(3)) ); audit = await audit.TraceCall( new ClientCall { { BadResponse, 9200 }, { BadResponse, 9201 }, { BadResponse, 9202 }, { BadResponse, 9203 }, { MaxRetriesReached } } ); }
public async Task FixedMaximumNumberOfRetries() { var audit = new Auditor(() => VirtualClusterWith .Nodes(10) .ClientCalls(r => r.FailAlways()) .ClientCalls(r => r.OnPort(9209).SucceedAlways()) .StaticConnectionPool() .Settings(s => s.DisablePing().MaximumRetries(3)) // <1> Set the maximum number of retries to 3 ); audit = await audit.TraceCall( new ClientCall { { BadResponse, 9200 }, { BadResponse, 9201 }, { BadResponse, 9202 }, { BadResponse, 9203 }, { MaxRetriesReached } // <2> The client call trace returns an `MaxRetriesReached` audit after the initial attempt and the number of retries allowed } ); }
/** * If you set smaller request time outs you might not want it to also affect the retry timeout, therefor you can configure these separately too. * Here we simulate calls taking 3 seconds, a request time out of 2 and an overall retry timeout of 10 seconds. * We should see 5 attempts to perform this query, testing that our request timeout cuts the query off short and that our max retry timeout of 10 * wins over the configured request timeout */ [U] public async Task RespectsMaxRetryTimeoutOverRequestTimeout() { var audit = new Auditor(() => Framework.Cluster .Nodes(10) .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) .ClientCalls(r => r.OnPort(9209).SucceedAlways()) .StaticConnectionPool() .Settings(s => s.DisablePing().SetTimeout(TimeSpan.FromSeconds(2)).SetMaxRetryTimeout(TimeSpan.FromSeconds(10))) ); audit = await audit.TraceCall( new CallTrace { { BadResponse, 9200 }, { BadResponse, 9201 }, { BadResponse, 9202 }, { BadResponse, 9203 }, { BadResponse, 9204 }, } ); }
private void AddAuditorView(Auditor auditor, bool useAnimation) { if (!animatedViews.Contains(auditor.Id)) { GameObject auditorObj = Instantiate <GameObject>(auditorPrefab); auditorObj.GetComponent <RectTransform>().SetParent(animationParent, false); var auditorView = auditorObj.GetComponent <SecretaryAnimObject>(); animatedViews.Add(auditor.Id); if (useAnimation) { AnimateAuditorObject(auditorView, auditor); } else { auditorView.Setup(auditor); auditorViews.Add(auditorView.Auditor.Id, auditorView); animatedViews.Remove(auditorView.Auditor.Id); } } }
public async Task <MultiTraderCreditProviderBeginTransferResponse> BeginTransferAsync(MultiTraderCreditProviderBeginTransferRequest request, CancellationToken cancellation) { var response = new MultiTraderCreditProviderBeginTransferResponse(); var args = new { Result = CommandParameter.Output(System.Data.SqlDbType.VarChar, 50), Key = CommandParameter.Output(System.Data.SqlDbType.VarChar, 50), TraderKey = request.TraderKey, request.FromUserName, request.ToUserName, request.Amount, request.FromInfo, request.ToInfo }; var dbr = await Db.ExecuteNonQueryAsync("usp1_CreditTransaction_begin_transfer", args, cancellation); if (dbr.Success) { response.Status = args.Result.Value?.ToString(); var ar = await Auditor.AuditAsync("CRD_BGNTRNSFR", $"{{tk:{request.TraderKey},fu:{request.FromUserName},tu:{request.ToUserName},m:{request.Amount}}}"); if (ar.IsSucceeded()) { if (response.Status == "Success") { response.Succeeded(args.Key.Value); } } else { response.Exception = ar.Exception; response.Status = "AuditError"; } } else { response.Failed(dbr.Exception); } return(response); }
/** == Disabling sniffing and pinging on a request basis * Even if you are using a sniffing connection pool thats set up to sniff on start/failure * and pinging enabled, you can opt out of this behaviour on a per request basis * * In our first test we set up a cluster that pings and sniffs on startup * but we disable the sniffing on our first request so we only see the ping and the response */ [U] public async Task DisableSniff() { var audit = new Auditor(() => Framework.Cluster .Nodes(10) .ClientCalls(r => r.SucceedAlways()) .SniffingConnectionPool() .Settings(s => s.SniffOnStartup()) ); audit = await audit.TraceCalls( /** * We disable sniffing so eventhoug its our first call we do not want to sniff on startup */ new ClientCall(r => r.DisableSniffing()) { { PingSuccess, 9200 }, { HealthyResponse, 9200 } }, /** * Instead the sniff on startup is deffered to the second call into the cluster that * does not disable sniffing on a per request basis */ new ClientCall() { { SniffOnStartup }, { SniffSuccess, 9200 }, { PingSuccess, 9200 }, { HealthyResponse, 9200 } }, /** * And after that no sniff on startup will happen again */ new ClientCall() { { PingSuccess, 9201 }, { HealthyResponse, 9201 } } ); }
public async Task RespectsMaxRetryTimeoutOverRequestTimeout() { var audit = new Auditor(() => VirtualClusterWith .Nodes(10) .ClientCalls(r => r.FailAlways().Takes(TimeSpan.FromSeconds(3))) .ClientCalls(r => r.OnPort(9209).FailAlways()) .StaticConnectionPool() .Settings(s => s.DisablePing().RequestTimeout(TimeSpan.FromSeconds(2)).MaxRetryTimeout(TimeSpan.FromSeconds(10))) ); audit = await audit.TraceCall( new ClientCall { { BadResponse, 9200 }, { BadResponse, 9201 }, { BadResponse, 9202 }, { BadResponse, 9203 }, { BadResponse, 9204 }, { MaxTimeoutReached } } ); }
[U] public async Task BadAuthenticationIsUnrecoverable() { var audit = new Auditor(() => Framework.Cluster .Nodes(10) .Ping(r => r.SucceedAlways()) .ClientCalls(r => r.FailAlways(401)) .StaticConnectionPool() .AllDefaults() ); audit = await audit.TraceElasticsearchException( new ClientCall { { AuditEvent.PingSuccess, 9200 }, { AuditEvent.BadResponse, 9200 }, }, (e) => { e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); } ); }
public async Task AllNodesArePingedOnlyOnFirstUseProvidedTheyAreHealthy() { var audit = new Auditor(() => Framework.Cluster .Nodes(4) .Ping(p => p.SucceedAlways()) // <1> Pings on nodes always succeed .StaticConnectionPool() .AllDefaults() ); await audit.TraceCalls( new ClientCall { { PingSuccess, 9200 }, { HealthyResponse, 9200 } }, // <2> A successful ping on each node new ClientCall { { PingSuccess, 9201 }, { HealthyResponse, 9201 } }, new ClientCall { { PingSuccess, 9202 }, { HealthyResponse, 9202 } }, new ClientCall { { PingSuccess, 9203 }, { HealthyResponse, 9203 } }, new ClientCall { { HealthyResponse, 9200 } }, new ClientCall { { HealthyResponse, 9201 } }, new ClientCall { { HealthyResponse, 9202 } }, new ClientCall { { HealthyResponse, 9203 } }, new ClientCall { { HealthyResponse, 9200 } } ); }
/** * An unexpected hard exception on ping and sniff is something we *do* try to revover from and failover. * Here pinging nodes on first use is enabled and 9200 throws on ping, we still fallover to 9201's ping succeeds. * However the client call on 9201 throws a hard exception we can not recover from */ [U] public async Task PingUnexceptedExceptionDoesFailOver() { var audit = new Auditor(() => Framework.Cluster .Nodes(10) .Ping(r => r.OnPort(9200).FailAlways(new Exception("ping exception"))) .Ping(r => r.OnPort(9201).SucceedAlways()) .ClientCalls(r => r.OnPort(9201).FailAlways(new Exception("boom!"))) .StaticConnectionPool() .AllDefaults() ); audit = await audit.TraceUnexpectedException( new ClientCall { { AuditEvent.PingFailure, 9200 }, { AuditEvent.PingSuccess, 9201 }, { AuditEvent.BadResponse, 9201 }, }, (e) => { e.FailureReason.Should().Be(PipelineFailure.Unexpected); /** InnerException is the exception that brought the request down */ e.InnerException.Should().NotBeNull(); e.InnerException.Message.Should().Be("boom!"); /** The hard exception that happened on ping is still available though */ e.SeenExceptions.Should().NotBeEmpty(); var pipelineException = e.SeenExceptions.First(); pipelineException.FailureReason.Should().Be(PipelineFailure.PingFailure); pipelineException.InnerException.Message.Should().Be("ping exception"); /** Seen exception is hard to relate back to a point in time, the exception is also * available on the audit trail */ var pingException = e.AuditTrail.First(a => a.Event == AuditEvent.PingFailure).Exception; pingException.Should().NotBeNull(); pingException.Message.Should().Be("ping exception"); } ); }
public async Task RespectsConnectTimeoutOverride() { /** we set up a 10 node cluster with a global time out of 20 seconds. * Each call on a node takes 10 seconds. So we can only try this call on 2 nodes * before the max request time out kills the client call. */ var audit = new Auditor(() => Framework.Cluster .Nodes(10) .Ping(p => p.SucceedAlways().Takes(TimeSpan.FromSeconds(20))) .ClientCalls(r => r.SucceedAlways()) .StaticConnectionPool() .Settings(s => s.RequestTimeout(TimeSpan.FromSeconds(10)).PingTimeout(TimeSpan.FromSeconds(10))) ); audit = await audit.TraceCalls( /** * The first call uses the configured global settings, request times out after 10 seconds and ping * calls always take 20, so we should see a single ping failure */ new ClientCall { { PingFailure, 9200 }, { MaxTimeoutReached } }, /** * On the second request we set a request ping timeout override of 2seconds * We should now see more nodes being tried before the request timeout is hit. */ new ClientCall(r => r.PingTimeout(TimeSpan.FromSeconds(2))) { { PingFailure, 9202 }, { PingFailure, 9203 }, { PingFailure, 9204 }, { PingFailure, 9205 }, { PingFailure, 9206 }, { MaxTimeoutReached } } ); }
/**[[disable-sniff-ping-per-request]] * === Disable sniffing and pinging per request * * Even if you are using a sniffing connection pool thats set up to sniff on start/failure * and pinging enabled, you can opt out of this behaviour on a _per request_ basis. * * In our first test we set up a cluster that pings and sniffs on startup * but we disable the sniffing on our first request so we only see the ping and the response */ [U] public async Task DisableSniff() { /** Let's set up the cluster and configure clients to **always** sniff on startup */ var audit = new Auditor(() => VirtualClusterWith .Nodes(10) .ClientCalls(r => r.SucceedAlways()) .Sniff(c => c.SucceedAlways()) .Ping(c => c.SucceedAlways()) .SniffingConnectionPool() .Settings(s => s.SniffOnStartup()) // <1> sniff on startup ); /** Now We disable sniffing on the request so even though it's our first call, * we do not want to sniff on startup. * * Instead, the sniff on startup is deferred to the second call into the cluster that * does not disable sniffing on a per request basis. * * And after that no sniff on startup will happen again */ audit = await audit.TraceCalls( new ClientCall(r => r.DisableSniffing()) // <1> disable sniffing { { PingSuccess, 9200 }, // <2> first call is a successful ping { HealthyResponse, 9200 } }, new ClientCall() { { SniffOnStartup }, // <3> sniff on startup call happens here, on the second call { SniffSuccess, 9200 }, { PingSuccess, 9200 }, { HealthyResponse, 9200 } }, new ClientCall() { { PingSuccess, 9201 }, // <4> No sniff on startup again { HealthyResponse, 9201 } } ); }
public async Task AllNodesArePingedOnlyOnFirstUseProvidedTheyAreHealthy() { /**A healthy cluster of 4 (min master nodes of 3 of course!) */ var audit = new Auditor(() => Framework.Cluster .Nodes(4) .Ping(p => p.SucceedAlways()) .StaticConnectionPool() .AllDefaults() ); await audit.TraceCalls( new ClientCall { { PingSuccess, 9200 }, { HealthyResponse, 9200 } }, new ClientCall { { PingSuccess, 9201 }, { HealthyResponse, 9201 } }, new ClientCall { { PingSuccess, 9202 }, { HealthyResponse, 9202 } }, new ClientCall { { PingSuccess, 9203 }, { HealthyResponse, 9203 } }, new ClientCall { { HealthyResponse, 9200 } }, new ClientCall { { HealthyResponse, 9201 } }, new ClientCall { { HealthyResponse, 9202 } }, new ClientCall { { HealthyResponse, 9203 } }, new ClientCall { { HealthyResponse, 9200 } } ); }
public async Task SniffPrefersMasterNodes() { var audit = new Auditor(() => Virtual.Elasticsearch .Bootstrap(new[] { new Node(new Uri("http://localhost:9200"), NotMasterEligible), new Node(new Uri("http://localhost:9201"), NotMasterEligible), new Node(new Uri("http://localhost:9202")), }) .Sniff(s => s.Succeeds(Always)) .Ping(s => s.Succeeds(Always)) .ClientCalls(r => r.SucceedAlways()) .SniffingConnectionPool() .AllDefaults() ); await audit.TraceCall(new ClientCall { { SniffOnStartup }, { SniffSuccess, 9202 }, { PingSuccess, 9200 }, { HealthyResponse, 9200 } }); }
[U] public async Task BadAuthenticationHtmlResponseIsIgnored() { var audit = new Auditor(() => Framework.Cluster .Nodes(10) .Ping(r => r.SucceedAlways()) .ClientCalls(r => r.FailAlways(401).ReturnResponse(ResponseHtml)) .StaticConnectionPool() .AllDefaults() ); audit = await audit.TraceElasticsearchException( new ClientCall { { AuditEvent.PingSuccess, 9200 }, { AuditEvent.BadResponse, 9200 }, }, (e) => { e.FailureReason.Should().Be(PipelineFailure.BadAuthentication); e.Response.ResponseBodyInBytes.Should().BeNull(); } ); }
private async Task <MultiTraderCreditProviderBeginApplyResponse> BeginApplyAsync(MultiTraderCreditProviderBeginApplyRequest request, CancellationToken cancellation) { var response = new MultiTraderCreditProviderBeginApplyResponse(); var args = new { Result = CommandParameter.Output(System.Data.SqlDbType.VarChar, 50), Key = CommandParameter.Output(System.Data.SqlDbType.VarChar, 50), TraderKey = request.TraderKey, UserName = request.UserName, Amount = request.Amount, Info = request.Info }; var dbr = await Db.ExecuteNonQueryAsync("usp1_CreditTransaction_begin_apply", args, cancellation); if (dbr.Success) { response.Status = args.Result.Value?.ToString(); var ar = await Auditor.AuditAsync(request.AuditCode, $"{{tk:{request.TraderKey},u:{request.UserName},m:{request.Amount}}}"); if (ar.IsSucceeded()) { if (response.Status == "Success") { response.Succeeded(args.Key.Value); } } else { response.Exception = ar.Exception; response.Status = "AuditError"; } } else { response.Failed(dbr.Exception); } return(response); }
// -------------------------- Transfer -------------------------- public MultiTraderCreditProviderTransferResponse Transfer(MultiTraderCreditProviderTransferRequest request) { var response = new MultiTraderCreditProviderTransferResponse(); var args = new { Result = CommandParameter.Output(System.Data.SqlDbType.VarChar, 80), TraderKey = request.TraderKey, request.FromUserName, request.ToUserName, request.Amount, request.FromInfo, request.ToInfo }; var dbr = Db.ExecuteNonQuery("usp1_CreditTransaction_transfer", args); if (dbr.Success) { response.Status = args.Result.Value?.ToString(); var ar = Auditor.Audit("CRD_TRNSFR", $"{{tk:{request.TraderKey},fu:{request.FromUserName},tu:{request.ToUserName},m:{request.Amount}}}"); if (ar.IsSucceeded()) { if (response.Status == "Success") { response.Succeeded(); } } else { response.Exception = ar.Exception; response.Status = "AuditError"; } } else { response.Failed(dbr.Exception); } return(response); }
public static Auditor CreateAuditor(int ID, bool currentAuditor, byte[] rowVersion) { Auditor auditor = new Auditor(); auditor.Id = ID; auditor.CurrentAuditor = currentAuditor; auditor.RowVersion = rowVersion; return auditor; }
public void AddToAuditors(Auditor auditor) { base.AddObject("Auditors", auditor); }