public async Task AddLoadAsync(ITransaction tx, Uri collectionName, IEnumerable <LoadMetric> metrics) { IReliableDictionary <string, List <LoadMetric> > metricDictionary = await this.stateManager.GetOrAddAsync <IReliableDictionary <string, List <LoadMetric> > >(this.metricStoreName); await metricDictionary.AddOrUpdateAsync( tx, collectionName.ToString(), metrics.ToList(), (key, value) => { List <LoadMetric> currentMetrics = new List <LoadMetric>(); foreach (LoadMetric newMetric in metrics) { LoadMetric current = value.Find(x => String.Equals(x.Name, newMetric.Name, StringComparison.OrdinalIgnoreCase)); if (current == null) { currentMetrics.Add(newMetric); } else { currentMetrics.Add(new LoadMetric(current.Name, current.Value + newMetric.Value)); } } return(currentMetrics); }); }
public bool Visit(LoadContainer container) { if (container.ContainerType != ContainerType.Database) { return(true); } LoadMetric instance = PhysicalSize.Instance; ByteQuantifiedSize sizeMetric = container.MaximumLoad.GetSizeMetric(instance); ByteQuantifiedSize byteQuantifiedSize = sizeMetric * this.threshold / 100; ByteQuantifiedSize sizeMetric2 = container.ConsumedLoad.GetSizeMetric(instance); this.logger.LogVerbose("Database {0} has maximum physical size {1}, SoftDeletedThreshold of {2}, target size {3}, consumed physical size {4}", new object[] { container, sizeMetric, this.threshold, byteQuantifiedSize, sizeMetric2 }); if (sizeMetric2 >= byteQuantifiedSize) { SoftDeletedDatabaseCleanupRequest request = new SoftDeletedDatabaseCleanupRequest(this.clientFactory, (DirectoryDatabase)container.DirectoryObject, byteQuantifiedSize); base.Queue.EnqueueRequest(request); } return(false); }
public void BeginMailboxMove(BandMailboxRebalanceData rebalanceData, LoadMetric metric) { base.CallService(delegate() { this.Channel.BeginMailboxMove(rebalanceData, metric); }); }
public void BeginMailboxMove(BandMailboxRebalanceData rebalanceData, LoadMetric metric) { base.ForwardExceptions(delegate() { rebalanceData.ConvertToFromSerializationFormat(); this.serviceImpl.MoveMailboxes(rebalanceData); }); }
internal LoadMetricValue(LoadMetric loadMetric, long value) { this.LoadMetric = loadMetric.FriendlyName; this.Value = value; if (loadMetric.IsSize) { this.Size = new ByteQuantifiedSize?(loadMetric.ToByteQuantifiedSize(value)); } }
public Task <Job> Execute(Job j) { int loadfactor = Convert.ToInt32(j.InitData) / 1000; // 1sec (1000ms) -> 1 load factor if (loadfactor < 0) { loadfactor = 1; } for (int i = 0; i < loadfactor; i++) { Interlocked.Increment(ref load); } var m = new LoadMetric("TCU", load); this.Partition.ReportLoad(new LoadMetric[] { m }); var msg = String.Format("executing job {0} Node: {1}, Partition {2} ", j.Id, this.Context.NodeContext.NodeName, this.Partition.PartitionInfo.Id); LogClient.Log(msg); var t = new JobExecutor().Execute(j); msg = string.Format("DONE executing job {0} Node: {1}, Partition {2} ", j.Id, this.Context.NodeContext.NodeName, this.Partition.PartitionInfo.Id); LogClient.Log(msg); for (int i = 0; i < loadfactor; i++) { Interlocked.Decrement(ref load); } m = new LoadMetric("TCU", load); this.Partition.ReportLoad(new LoadMetric[] { m }); return(t); }
/// <summary> /// Reports the health of a node. /// </summary> /// <param name="ttl"></param> public async Task ReportNodeHealthAndLoadAsync(TimeSpan ttl) { const int MB = 1048576; HealthInformation hi = null; NodeHealthReport nhr = null; try { // Get the global memory load and report as a node health parameter. NativeMethods.MEMORYSTATUSEX msex = new NativeMethods.MEMORYSTATUSEX(); if (NativeMethods.GlobalMemoryStatus(ref msex)) { HealthState hs = (msex.dwMemoryLoad > 80) ? HealthState.Warning : (msex.dwMemoryLoad > 95) ? HealthState.Error : HealthState.Ok; // Save the current memory load. MemoryLoad = msex.dwMemoryLoad; // Create the health information to report to Service Fabric. hi = new HealthInformation("NodeHealth", "MemoryLoad", hs); hi.TimeToLive = (0.0 <= ttl.TotalMilliseconds) ? TimeSpan.FromSeconds(30) : ttl; hi.Description = $"Percent of memory in used on this node: {msex.dwMemoryLoad}"; hi.RemoveWhenExpired = true; hi.SequenceNumber = HealthInformation.AutoSequenceNumber; // Create a replica health report. nhr = new NodeHealthReport(Context.NodeContext.NodeName, hi); ServiceFabricClient.HealthManager.ReportHealth(nhr); } // Create the health information and send report to Service Fabric. hi = new HealthInformation("NodeHealth", "CPU", HealthState.Ok); hi.TimeToLive = (0.0 <= ttl.TotalMilliseconds) ? TimeSpan.FromSeconds(30) : ttl; hi.Description = $"Total CPU usage on this node: {_cpuCounter.NextValue()}"; hi.RemoveWhenExpired = true; hi.SequenceNumber = HealthInformation.AutoSequenceNumber; nhr = new NodeHealthReport(Context.NodeContext.NodeName, hi); ServiceFabricClient.HealthManager.ReportHealth(nhr); // Get the number of deployed replicas on this node for this service. int serviceReplicaCount = 0; var replicaList = await ServiceFabricClient.QueryManager.GetDeployedReplicaListAsync(Context.NodeContext.NodeName, Application); for (int i = 0; i < replicaList.Count; i++) { if (Context.ServiceName == replicaList[i].ServiceName) { serviceReplicaCount++; } } DateTimeOffset oldSampleTime = _timeOfCpuSample; TimeSpan oldCpuSample = _cpuProcessTime; _cpuProcessTime = Process.GetCurrentProcess().TotalProcessorTime; _timeOfCpuSample = DateTimeOffset.UtcNow; long processTicks = (_cpuProcessTime - oldCpuSample).Ticks; long periodTicks = (_timeOfCpuSample - oldSampleTime).Ticks; long cpuTicks = (processTicks / periodTicks); long cpuPercent = (cpuTicks / serviceReplicaCount) * 100; long partitionWorkingSet = ((Process.GetCurrentProcess().WorkingSet64 / MB) / serviceReplicaCount); // Report the partition load metrics. LoadMetric[] metrics = new LoadMetric[] { new LoadMetric("PartitionCPU", (int)cpuPercent), new LoadMetric("WorkingSetMB", Convert.ToInt32(partitionWorkingSet)) }; ReportLoad(metrics); } catch (Exception ex) { _eventSource.ServiceRequestFailed(Context.ServiceTypeName, Context.PartitionId, Context.ReplicaId, "ReportNodeHealthAndLoadAsync", ex.Message); } }
void ILoadBalanceService.BeginMailboxMove(BandMailboxRebalanceData rebalanceData, LoadMetric metric) { this.client.BeginMailboxMove(rebalanceData, metric); }
public override void BeginMailboxMove(BandMailboxRebalanceData rebalanceData, LoadMetric metric) { base.BeginMailboxMove(rebalanceData.ToSerializationFormat(true), new LoadMetric(metric.Name, metric.IsSize)); }
public LoadCapacityConstraintValidationResult(LoadCapacityConstraint constraint, bool accepted, LoadMetric exceededMetric, long availableUnits, long requestedUnits) : base(constraint, accepted) { this.ExceededMetric = exceededMetric; this.AvailableUnits = availableUnits; this.RequestedUnits = requestedUnits; }
public override void BeginMailboxMove(BandMailboxRebalanceData rebalanceData, LoadMetric metric) { this.serviceImpl.MoveMailboxes(rebalanceData); }
private void BalanceBand(Band band, double totalDatabaseWeight, BandData[] bandData) { this.logger.Log(MigrationEventType.Information, "Balancing band '{0}' with '{1}' data entries.", new object[] { band, bandData.Count <BandData>() }); double moveUnitsPerWeight = (double)bandData.Sum((BandData data) => data.TotalWeight) / totalDatabaseWeight; double num = (band.Profile == Band.BandProfile.SizeBased) ? band.MailboxSizeWeightFactor : 0.0; double deviation = (double)this.settings.WeightDeviationPercent / 100.0; ByteQuantifiedSize byteQuantifiedSize = ByteQuantifiedSize.FromGB((ulong)this.settings.MaximumAmountOfDataPerRoundGb); this.logger.Log(MigrationEventType.Verbose, "Moving with a deviation of '{0}', a minimum delta of '{1}' and a maximum of '{2}' per database.", new object[] { deviation, num, byteQuantifiedSize }); List <BandData> list = (from data in bandData orderby this.GetBandDelta(moveUnitsPerWeight, deviation, data) descending select data).ToList <BandData>(); foreach (BandData bandData2 in list) { double num2 = this.GetBandDelta(moveUnitsPerWeight, deviation, bandData2); this.logger.Log(MigrationEventType.Verbose, "Attempting to balance band {0} for database {1}, current delta is {2}.", new object[] { band, bandData2.Database, num2 }); LoadMetric instance = PhysicalSize.Instance; if (num2 <= num) { this.logger.Log(MigrationEventType.Information, "Not balancing band {0} for database {1} because delta {2} is either less than the minimum of {3} or database has no more available space ({4}). We're done.", new object[] { band, bandData2.Database, num2, num, bandData2.Database.AvailableCapacity.GetSizeMetric(instance) }); break; } foreach (BandData bandData3 in from data in bandData orderby data.TotalWeight select data) { if (!bandData3.Database.CanAcceptBalancingLoad) { this.logger.Log(MigrationEventType.Verbose, "Database {0} can not be used as target because it can't take LB load.", new object[] { bandData3.Database }); } else { double num3 = this.GetBandDelta(moveUnitsPerWeight, 0.0 - deviation, bandData3); this.logger.Log(MigrationEventType.Verbose, "Trying to place weight into {0} (current delta: {1}).", new object[] { bandData3.Database, num3 }); ByteQuantifiedSize sizeMetric = bandData3.Database.AvailableCapacity.GetSizeMetric(instance); if (0.0 - num3 > sizeMetric.ToMB()) { this.logger.Log(MigrationEventType.Verbose, "Target delta of {0} is larger than the {1} available space in the database, adjusting.", new object[] { num3, sizeMetric }); num3 = 0.0 - sizeMetric.ToMB(); this.logger.Log(MigrationEventType.Verbose, "New target delta is {0}.", new object[] { num3 }); } if (num3 >= 0.0) { this.logger.Log(MigrationEventType.Verbose, "Target database is above the threshold, skipping as a target.", new object[0]); } else { ByteQuantifiedSize sizeMetric2 = bandData3.Database.CommittedLoad.GetSizeMetric(instance); ByteQuantifiedSize byteQuantifiedSize2; if (sizeMetric2 > byteQuantifiedSize) { byteQuantifiedSize2 = ByteQuantifiedSize.Zero; } else { byteQuantifiedSize2 = byteQuantifiedSize - sizeMetric2; } int num4 = (int)Math.Floor(byteQuantifiedSize2.ToMB() / band.MailboxSizeWeightFactor); if (num4 <= 0) { this.logger.Log(MigrationEventType.Verbose, "Target database committed load is {0} which is over the limit of {1}, skipping as a target.", new object[] { sizeMetric2, byteQuantifiedSize }); } else { int num5 = (int)Math.Min(Math.Round(Math.Min(Math.Abs(num2), Math.Abs(num3))), (double)num4); this.logger.Log(MigrationEventType.Verbose, "Projected to move {0} units out of {1} and into {2}", new object[] { num5, bandData2.Database, bandData3.Database }); if (num5 > 0) { ByteQuantifiedSize value = ByteQuantifiedSize.FromMB((ulong)((double)num5 * band.MailboxSizeWeightFactor)); LoadMetricStorage loadMetricStorage = new LoadMetricStorage(); loadMetricStorage[band] = (long)num5; BandMailboxRebalanceData item = new BandMailboxRebalanceData(bandData2.Database, bandData3.Database, loadMetricStorage); bandData3.TotalWeight += num5; LoadMetricStorage committedLoad; LoadMetric metric; (committedLoad = bandData3.Database.CommittedLoad)[metric = instance] = committedLoad[metric] + (long)value.ToBytes(); this.totalDataSelectedToMove += value; bandData2.TotalWeight -= num5; this.results.Add(item); num2 -= (double)num5; } if (num2 <= num) { break; } } } } } } }
public virtual void BeginMailboxMove(BandMailboxRebalanceData rebalanceData, LoadMetric metric) { this.service.BeginMailboxMove(rebalanceData, metric); }