/// <summary> /// Returns a dataset loading proposal for the given expected load. /// </summary> /// <param name="expectedLoad">The load the dataset is expected to put on the machine.</param> /// <returns> /// A proposal that indicates if the machine can load the dataset and what the /// expected loading performance will be like. /// </returns> public DatasetActivationProposal ProposeForLocalMachine(ExpectedDatasetLoad expectedLoad) { // Grab the information about the local machine. Do this here so that we get // the most up to date information we can. var machine = new Machine(); // RAM memory var requiredMemory = expectedLoad.InMemorySizeInBytes * expectedLoad.RelativeMemoryExpansionWhileRunning; var maximumMemoryPercentage = requiredMemory / (machine.Specification.PerProcessMemoryInKilobytes * 1024) * 100; // Virtual memory var maximumVirtualMemoryPercentage = requiredMemory / (machine.Specification.TotalVirtualMemoryInKilobytes * 1024) * 100; // Disk var requiredDisk = expectedLoad.OnDiskSizeInBytes * expectedLoad.RelativeOnDiskExpansionAfterRunning; var maximumFreeDisk = (from disk in machine.Specification.Disks() select disk.AvailableSpaceInBytes).Max(); var maximumDiskPercentage = requiredDisk / maximumFreeDisk * 100; // Loading time // Currently can't get this because we don't know how fast the disk is. And even if we know that // then we still don't know how fast we can unpack the information ... so we just fake it. var loadTime = new TimeSpan(0, 1, 0); // Transfer time // And we can't get this one either because we don't know the speed of the connection we're going to use ... var transferTime = new TimeSpan(0, 1, 0); return(new DatasetActivationProposal { Endpoint = m_LocalEndpoint, IsAvailable = false, ActivationTime = loadTime, TransferTime = transferTime, PercentageOfAvailableDisk = (int)Math.Ceiling(maximumDiskPercentage), PercentageOfMaximumMemory = (int)Math.Ceiling(maximumVirtualMemoryPercentage), PercentageOfPhysicalMemory = (int)Math.Ceiling(maximumMemoryPercentage), }); }
/// <summary> /// Returns a dataset loading proposal for the given expected load. /// </summary> /// <param name="expectedLoad">The load the dataset is expected to put on the machine.</param> /// <returns> /// A proposal that indicates if the machine can load the dataset and what the /// expected loading performance will be like. /// </returns> public DatasetActivationProposal ProposeForLocalMachine(ExpectedDatasetLoad expectedLoad) { // Grab the information about the local machine. Do this here so that we get // the most up to date information we can. var machine = new Machine(); // RAM memory var requiredMemory = expectedLoad.InMemorySizeInBytes * expectedLoad.RelativeMemoryExpansionWhileRunning; var maximumMemoryPercentage = requiredMemory / (machine.Specification.PerProcessMemoryInKilobytes * 1024) * 100; // Virtual memory var maximumVirtualMemoryPercentage = requiredMemory / (machine.Specification.TotalVirtualMemoryInKilobytes * 1024) * 100; // Disk var requiredDisk = expectedLoad.OnDiskSizeInBytes * expectedLoad.RelativeOnDiskExpansionAfterRunning; var maximumFreeDisk = (from disk in machine.Specification.Disks() select disk.AvailableSpaceInBytes).Max(); var maximumDiskPercentage = requiredDisk / maximumFreeDisk * 100; // Loading time // Currently can't get this because we don't know how fast the disk is. And even if we know that // then we still don't know how fast we can unpack the information ... so we just fake it. var loadTime = new TimeSpan(0, 1, 0); // Transfer time // And we can't get this one either because we don't know the speed of the connection we're going to use ... var transferTime = new TimeSpan(0, 1, 0); return new DatasetActivationProposal { Endpoint = m_LocalEndpoint, IsAvailable = false, ActivationTime = loadTime, TransferTime = transferTime, PercentageOfAvailableDisk = (int)Math.Ceiling(maximumDiskPercentage), PercentageOfMaximumMemory = (int)Math.Ceiling(maximumVirtualMemoryPercentage), PercentageOfPhysicalMemory = (int)Math.Ceiling(maximumMemoryPercentage), }; }
private static IEnumerable <DatasetActivationProposal> OrderProposals( ExpectedDatasetLoad load, DistributionLocations preferedLocations, IEnumerable <Tuple <EndpointId, IDatasetActivationCommands> > usableNodes, CancellationToken token) { var loadingProposals = new Queue <Task <DatasetActivationProposal> >(); bool shouldLoad = ShouldLoadDistributed(preferedLocations); foreach (var pair in usableNodes) { if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } if (shouldLoad) { try { var result = pair.Item2.ProposeFor(load); loadingProposals.Enqueue(result); } catch (CommandInvocationFailedException) { // Chances are the endpoint just disappeared // so we just ignore it and move on. } } } while (loadingProposals.Count > 0) { if (token.IsCancellationRequested) { // Just abandon any tasks that were running but not finished token.ThrowIfCancellationRequested(); } var task = loadingProposals.Dequeue(); if (!task.IsCompleted) { if (loadingProposals.Count > 1) { loadingProposals.Enqueue(task); continue; } else { try { task.Wait(); } catch (AggregateException) { continue; } } } if (task.IsCanceled || task.IsFaulted) { // Get the exception so that the task doesn't throw in // the finalizer. Don't do anything with this though // because we don't really care. var exception = task.Exception; continue; } var proposal = task.Result; if (proposal.IsAvailable) { yield return(proposal); } } }
private static IEnumerable<DatasetActivationProposal> OrderProposals( ExpectedDatasetLoad load, DistributionLocations preferedLocations, IEnumerable<Tuple<EndpointId, IDatasetActivationCommands>> usableNodes, CancellationToken token) { var loadingProposals = new Queue<Task<DatasetActivationProposal>>(); bool shouldLoad = ShouldLoadDistributed(preferedLocations); foreach (var pair in usableNodes) { if (token.IsCancellationRequested) { token.ThrowIfCancellationRequested(); } if (shouldLoad) { try { var result = pair.Item2.ProposeFor(load); loadingProposals.Enqueue(result); } catch (CommandInvocationFailedException) { // Chances are the endpoint just disappeared // so we just ignore it and move on. } } } while (loadingProposals.Count > 0) { if (token.IsCancellationRequested) { // Just abandon any tasks that were running but not finished token.ThrowIfCancellationRequested(); } var task = loadingProposals.Dequeue(); if (!task.IsCompleted) { if (loadingProposals.Count > 1) { loadingProposals.Enqueue(task); continue; } else { try { task.Wait(); } catch (AggregateException) { continue; } } } if (task.IsCanceled || task.IsFaulted) { // Get the exception so that the task doesn't throw in // the finalizer. Don't do anything with this though // because we don't really care. var exception = task.Exception; continue; } var proposal = task.Result; if (proposal.IsAvailable) { yield return proposal; } } }