//properties //methods /// <summary> /// Use this to convert a number from one unit of byte measuremen to another. For example, convert from bytes to kilobytes or vice versa. /// For example, double result = ConvertByteUnits(2048.0, ByteUnits.Bytes, ByteUnits.Kilobytes) returns 2.0 (i.e. 2KB). /// </summary> /// <param name="fromNumber">Number to convert.</param> /// <param name="fromByteUnits">Current unit of measurement.</param> /// <param name="toByteUnits">Desired unit of measurement.</param> /// <returns>Revised number in new unit of measurement.</returns> public static double ConvertByteUnits(double fromNumber, ByteUnits fromByteUnits, ByteUnits toByteUnits) { double retval = -1.0; retval = (fromNumber * ByteUnitMultipliers[(int)fromByteUnits]) / ByteUnitMultipliers[(int)toByteUnits]; return(retval); }
public void Strings() { Assert.Equal("500m", ByteUnits.ToMilliByteString(0.5m)); Assert.Equal("1000000m", ByteUnits.ToMilliByteString(1000)); Assert.Equal("500", ByteUnits.ToByteString(500)); Assert.Equal("1000000", ByteUnits.ToByteString(1000000)); Assert.Equal("1K", ByteUnits.ToKString(1000)); Assert.Equal("2K", ByteUnits.ToKString(2000)); Assert.Equal("0.5K", ByteUnits.ToKString(500)); Assert.Equal("1Ki", ByteUnits.ToKiString(1024)); Assert.Equal("2Ki", ByteUnits.ToKiString(2048)); Assert.Equal("0.5Ki", ByteUnits.ToKiString(512)); Assert.Equal("1M", ByteUnits.ToMString(1000000)); Assert.Equal("2M", ByteUnits.ToMString(2000000)); Assert.Equal("0.5M", ByteUnits.ToMString(500000)); Assert.Equal("1Mi", ByteUnits.ToMiString(1 * ByteUnits.MebiBytes)); Assert.Equal("2Mi", ByteUnits.ToMiString(2 * ByteUnits.MebiBytes)); Assert.Equal("0.5Mi", ByteUnits.ToMiString(ByteUnits.MebiBytes / 2)); Assert.Equal("1G", ByteUnits.ToGString(1000000000)); Assert.Equal("2G", ByteUnits.ToGString(2000000000)); Assert.Equal("0.5G", ByteUnits.ToGString(500000000)); Assert.Equal("1Gi", ByteUnits.ToGiString(1 * ByteUnits.GibiBytes)); Assert.Equal("2Gi", ByteUnits.ToGiString(2 * ByteUnits.GibiBytes)); Assert.Equal("0.5Gi", ByteUnits.ToGiString(ByteUnits.GibiBytes / 2)); Assert.Equal("1T", ByteUnits.ToTString(1000000000000)); Assert.Equal("2T", ByteUnits.ToTString(2000000000000)); Assert.Equal("0.5T", ByteUnits.ToTString(500000000000)); Assert.Equal("1Ti", ByteUnits.ToTiString(1 * ByteUnits.TebiBytes)); Assert.Equal("2Ti", ByteUnits.ToTiString(2 * ByteUnits.TebiBytes)); Assert.Equal("0.5Ti", ByteUnits.ToTiString(ByteUnits.TebiBytes / 2)); Assert.Equal("1P", ByteUnits.ToPString(1000000000000000)); Assert.Equal("2P", ByteUnits.ToPString(2000000000000000)); Assert.Equal("0.5P", ByteUnits.ToPString(500000000000000)); Assert.Equal("1Pi", ByteUnits.ToPiString(1 * ByteUnits.PebiBytes)); Assert.Equal("2Pi", ByteUnits.ToPiString(2 * ByteUnits.PebiBytes)); Assert.Equal("0.5Pi", ByteUnits.ToPiString(ByteUnits.PebiBytes / 2)); Assert.Equal("1E", ByteUnits.ToEString(1000000000000000000)); Assert.Equal("2E", ByteUnits.ToEString(2000000000000000000)); Assert.Equal("0.5E", ByteUnits.ToEString(500000000000000000)); Assert.Equal("1Ei", ByteUnits.ToEiString(1 * ByteUnits.ExbiBytes)); Assert.Equal("2Ei", ByteUnits.ToEiString(2 * ByteUnits.ExbiBytes)); Assert.Equal("0.5Ei", ByteUnits.ToEiString(ByteUnits.ExbiBytes / 2)); }
public void Strings() { Assert.Equal("500", ByteUnits.ToByteString(500)); Assert.Equal("1000000", ByteUnits.ToByteString(1000000)); Assert.Equal("1KB", ByteUnits.ToKB(1000)); Assert.Equal("2KB", ByteUnits.ToKB(2000)); Assert.Equal("0.5KB", ByteUnits.ToKB(500)); Assert.Equal("1KiB", ByteUnits.ToKiB(1024)); Assert.Equal("2KiB", ByteUnits.ToKiB(2048)); Assert.Equal("0.5KiB", ByteUnits.ToKiB(512)); Assert.Equal("1MB", ByteUnits.ToMB(1000000)); Assert.Equal("2MB", ByteUnits.ToMB(2000000)); Assert.Equal("0.5MB", ByteUnits.ToMB(500000)); Assert.Equal("1MiB", ByteUnits.ToMiB(1 * ByteUnits.MebiBytes)); Assert.Equal("2MiB", ByteUnits.ToMiB(2 * ByteUnits.MebiBytes)); Assert.Equal("0.5MiB", ByteUnits.ToMiB(ByteUnits.MebiBytes / 2)); Assert.Equal("1GB", ByteUnits.ToGB(1000000000)); Assert.Equal("2GB", ByteUnits.ToGB(2000000000)); Assert.Equal("0.5GB", ByteUnits.ToGB(500000000)); Assert.Equal("1GiB", ByteUnits.ToGiB(1 * ByteUnits.GibiBytes)); Assert.Equal("2GiB", ByteUnits.ToGiB(2 * ByteUnits.GibiBytes)); Assert.Equal("0.5GiB", ByteUnits.ToGiB(ByteUnits.GibiBytes / 2)); Assert.Equal("1TB", ByteUnits.ToTB(1000000000000)); Assert.Equal("2TB", ByteUnits.ToTB(2000000000000)); Assert.Equal("0.5TB", ByteUnits.ToTB(500000000000)); Assert.Equal("1TiB", ByteUnits.ToTiB(1 * ByteUnits.TebiBytes)); Assert.Equal("2TiB", ByteUnits.ToTiB(2 * ByteUnits.TebiBytes)); Assert.Equal("0.5TiB", ByteUnits.ToTiB(ByteUnits.TebiBytes / 2)); Assert.Equal("1PB", ByteUnits.ToPB(1000000000000000)); Assert.Equal("2PB", ByteUnits.ToPB(2000000000000000)); Assert.Equal("0.5PB", ByteUnits.ToPB(500000000000000)); Assert.Equal("1PiB", ByteUnits.ToPiB(1 * ByteUnits.PebiBytes)); Assert.Equal("2PiB", ByteUnits.ToPiB(2 * ByteUnits.PebiBytes)); Assert.Equal("0.5PiB", ByteUnits.ToPiB(ByteUnits.PebiBytes / 2)); Assert.Equal("1EB", ByteUnits.ToEB(1000000000000000000)); Assert.Equal("2EB", ByteUnits.ToEB(2000000000000000000)); Assert.Equal("0.5EB", ByteUnits.ToEB(500000000000000000)); Assert.Equal("1EiB", ByteUnits.ToEiB(1 * ByteUnits.ExbiBytes)); Assert.Equal("2EiB", ByteUnits.ToEiB(2 * ByteUnits.ExbiBytes)); Assert.Equal("0.5EiB", ByteUnits.ToEiB(ByteUnits.ExbiBytes / 2)); }
public void ParseErrors() { decimal value; Assert.False(ByteUnits.TryParse(null, out value)); Assert.False(ByteUnits.TryParse("", out value)); Assert.False(ByteUnits.TryParse(" ", out value)); Assert.False(ByteUnits.TryParse("ABC", out value)); Assert.False(ByteUnits.TryParse("-10", out value)); Assert.False(ByteUnits.TryParse("-20KB", out value)); Assert.False(ByteUnits.TryParse("10a", out value)); Assert.False(ByteUnits.TryParse("10akb", out value)); }
/// <summary> /// Validates the options. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <exception cref="ClusterDefinitionException">Thrown if the definition is not valid.</exception> internal void Validate(ClusterDefinition clusterDefinition) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); var minioOptionsPrefix = $"{nameof(ClusterDefinition.Storage)}.{nameof(ClusterDefinition.Storage.Minio)}"; if (!clusterDefinition.Nodes.Any(n => n.Labels.Minio)) { if (clusterDefinition.Kubernetes.AllowPodsOnControlPlane.GetValueOrDefault() == true) { foreach (var node in clusterDefinition.Nodes) { node.Labels.MinioInternal = true; } } else { foreach (var node in clusterDefinition.Workers) { node.Labels.MinioInternal = true; } } } else { foreach (var node in clusterDefinition.Nodes.Where(n => n.Labels.Minio)) { node.Labels.MinioInternal = true; } } var serverCount = clusterDefinition.Nodes.Where(n => n.Labels.MinioInternal).Count(); if (serverCount * VolumesPerNode < 4) { throw new ClusterDefinitionException($"Minio requires at least [4] volumes within the cluster. Increase [{minioOptionsPrefix}.{nameof(MinioOptions.VolumesPerNode)}] so the number of nodes hosting Minio times [{VolumesPerNode}] is at least [4]."); } var minOsDiskAfterMinio = ByteUnits.Parse(KubeConst.MinimumOsDiskAfterMinio); foreach (var node in clusterDefinition.Nodes.Where(node => node.Labels.MinioInternal)) { var osDisk = ByteUnits.Parse(node.GetDataDiskSize(clusterDefinition)); var minioVolumes = ByteUnits.Parse(VolumeSize) * VolumesPerNode; if (osDisk - minioVolumes < minOsDiskAfterMinio) { throw new ClusterDefinitionException($"Node [{node.Name}] Operating System (boot) disk is too small. Increase this to at least [{ByteUnits.Humanize(minOsDiskAfterMinio + minioVolumes, powerOfTwo: true, spaceBeforeUnit: false)}]."); } } }
/// <summary> /// Ensures that a VM memory or disk size specification is valid and also /// converts the value to the corresponding long count. /// </summary> /// <param name="sizeValue">The size value string.</param> /// <param name="optionsType">Type of the property holding the size property (used for error reporting).</param> /// <param name="propertyName">The size property name (used for error reporting).</param> /// <returns>The size converted into a <c>long</c>.</returns> /// <exception cref="ClusterDefinitionException">Thrown if the size is not valid.</exception> public static long ValidateSize(string sizeValue, Type optionsType, string propertyName) { if (string.IsNullOrEmpty(sizeValue)) { throw new ClusterDefinitionException($"[{optionsType.Name}.{propertyName}] cannot be NULL or empty."); } if (!ByteUnits.TryParse(sizeValue, out var size)) { throw new ClusterDefinitionException($"[{optionsType.Name}.{propertyName}={sizeValue}] cannot be parsed."); } return((long)size); }
/// <summary> /// Returns the size in bytes of RAM to allocate to the MDS cache /// on this node integrated Ceph storage cluster is enabled and /// MDS is deployed to the node. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <returns>The size in bytes or zero if Ceph is not enabled.</returns> public decimal GetCephMDSCacheSize(ClusterDefinition clusterDefinition) { if (!clusterDefinition.Ceph.Enabled) { return(0); } if (string.IsNullOrEmpty(Labels.CephMDSCacheSize)) { Labels.CephMDSCacheSize = clusterDefinition.Ceph.MDSCacheSize; } return(ByteUnits.Parse(Labels.CephMDSCacheSize)); }
public void Humanize_PowerOfTen_WithoutSpace_NoB() { Assert.Equal("0", ByteUnits.Humanize(0, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("500", ByteUnits.Humanize(500, spaceBeforeUnit: false)); Assert.Equal("1K", ByteUnits.Humanize(ByteUnits.KiloBytes, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1.5K", ByteUnits.Humanize(ByteUnits.KiloBytes + ByteUnits.KiloBytes / 2, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1M", ByteUnits.Humanize(ByteUnits.MegaBytes, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1.5M", ByteUnits.Humanize(ByteUnits.MegaBytes + ByteUnits.MegaBytes / 2, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1G", ByteUnits.Humanize(ByteUnits.GigaBytes, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1.5G", ByteUnits.Humanize(ByteUnits.GigaBytes + ByteUnits.GigaBytes / 2, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1T", ByteUnits.Humanize(ByteUnits.TeraBytes, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1.5T", ByteUnits.Humanize(ByteUnits.TeraBytes + ByteUnits.TeraBytes / 2, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1P", ByteUnits.Humanize(ByteUnits.PetaBytes, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1.5P", ByteUnits.Humanize(ByteUnits.PetaBytes + ByteUnits.PetaBytes / 2, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1E", ByteUnits.Humanize(ByteUnits.ExaBytes, spaceBeforeUnit: false, removeByteUnit: true)); Assert.Equal("1.5E", ByteUnits.Humanize(ByteUnits.ExaBytes + ByteUnits.ExaBytes / 2, spaceBeforeUnit: false, removeByteUnit: true)); // Verify that negative numbers are not supported. Assert.Throws <ArgumentException>(() => ByteUnits.Humanize(-1, spaceBeforeUnit: false, removeByteUnit: true)); }
public void Humanize_PowerOfTen_WithSpace() { Assert.Equal("0", ByteUnits.Humanize(0)); Assert.Equal("500", ByteUnits.Humanize(500)); Assert.Equal("1 KB", ByteUnits.Humanize(ByteUnits.KiloBytes)); Assert.Equal("1.5 KB", ByteUnits.Humanize(ByteUnits.KiloBytes + ByteUnits.KiloBytes / 2)); Assert.Equal("1 MB", ByteUnits.Humanize(ByteUnits.MegaBytes)); Assert.Equal("1.5 MB", ByteUnits.Humanize(ByteUnits.MegaBytes + ByteUnits.MegaBytes / 2)); Assert.Equal("1 GB", ByteUnits.Humanize(ByteUnits.GigaBytes)); Assert.Equal("1.5 GB", ByteUnits.Humanize(ByteUnits.GigaBytes + ByteUnits.GigaBytes / 2)); Assert.Equal("1 TB", ByteUnits.Humanize(ByteUnits.TeraBytes)); Assert.Equal("1.5 TB", ByteUnits.Humanize(ByteUnits.TeraBytes + ByteUnits.TeraBytes / 2)); Assert.Equal("1 PB", ByteUnits.Humanize(ByteUnits.PetaBytes)); Assert.Equal("1.5 PB", ByteUnits.Humanize(ByteUnits.PetaBytes + ByteUnits.PetaBytes / 2)); Assert.Equal("1 EB", ByteUnits.Humanize(ByteUnits.ExaBytes)); Assert.Equal("1.5 EB", ByteUnits.Humanize(ByteUnits.ExaBytes + ByteUnits.ExaBytes / 2)); // Verify that negative numbers are not supported. Assert.Throws <ArgumentException>(() => ByteUnits.Humanize(-1)); }
public void Humanize_PowerOfTwo_WithSpace_NoB() { Assert.Equal("0", ByteUnits.Humanize(0, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("500", ByteUnits.Humanize(500, powerOfTwo: true)); Assert.Equal("1000", ByteUnits.Humanize(1000, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1 Ki", ByteUnits.Humanize(ByteUnits.KibiBytes, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1.5 Ki", ByteUnits.Humanize(ByteUnits.KibiBytes + ByteUnits.KibiBytes / 2, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1 Mi", ByteUnits.Humanize(ByteUnits.MebiBytes, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1.5 Mi", ByteUnits.Humanize(ByteUnits.MebiBytes + ByteUnits.MebiBytes / 2, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1 Gi", ByteUnits.Humanize(ByteUnits.GibiBytes, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1.5 Gi", ByteUnits.Humanize(ByteUnits.GibiBytes + ByteUnits.GibiBytes / 2, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1 Ti", ByteUnits.Humanize(ByteUnits.TebiBytes, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1.5 Ti", ByteUnits.Humanize(ByteUnits.TebiBytes + ByteUnits.TebiBytes / 2, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1 Pi", ByteUnits.Humanize(ByteUnits.PebiBytes, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1.5 Pi", ByteUnits.Humanize(ByteUnits.PebiBytes + ByteUnits.PebiBytes / 2, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1 Ei", ByteUnits.Humanize(ByteUnits.ExbiBytes, powerOfTwo: true, removeByteUnit: true)); Assert.Equal("1.5 Ei", ByteUnits.Humanize(ByteUnits.ExbiBytes + ByteUnits.ExbiBytes / 2, powerOfTwo: true, removeByteUnit: true)); // Verify that negative numbers are not supported. Assert.Throws <ArgumentException>(() => ByteUnits.Humanize(-1, powerOfTwo: true, removeByteUnit: true)); }
/// <summary> /// Validates the options and also ensures that all <c>null</c> properties are /// initialized to their default values. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <param name="nodeName">The associated node name.</param> /// <exception cref="ClusterDefinitionException">Thrown if the definition is not valid.</exception> public void Validate(ClusterDefinition clusterDefinition, string nodeName) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(nodeName)); var node = clusterDefinition.NodeDefinitions[nodeName]; var awsNodeOptionsPrefix = $"{nameof(ClusterDefinition.NodeDefinitions)}.{nameof(NodeDefinition.Aws)}"; // Set the cluster default storage types if necessary. if (VolumeType == AwsVolumeType.Default) { VolumeType = clusterDefinition.Hosting.Aws.DefaultVolumeType; if (VolumeType == AwsVolumeType.Default) { VolumeType = AwsHostingOptions.defaultVolumeType; } } if (OpenEBSVolumeType == AwsVolumeType.Default) { OpenEBSVolumeType = clusterDefinition.Hosting.Aws.DefaultOpenEBSVolumeType; if (OpenEBSVolumeType == AwsVolumeType.Default) { VolumeType = AwsHostingOptions.defaultOpenEBSVolumeType; } } // Validate the instance, setting the cluster default if necessary. var instanceType = this.InstanceType; if (string.IsNullOrEmpty(instanceType)) { instanceType = clusterDefinition.Hosting.Aws.DefaultInstanceType; } this.InstanceType = instanceType; // Validate the placement partition index. if (PlacementPartition > 0) { if (node.IsControlPane) { var controlNodeCount = clusterDefinition.ControlNodes.Count(); var partitionCount = 0; if (clusterDefinition.Hosting.Aws.ControlPlanePlacementPartitions == -1) { partitionCount = controlNodeCount; } else { partitionCount = clusterDefinition.Hosting.Aws.ControlPlanePlacementPartitions; } partitionCount = Math.Min(partitionCount, AwsHostingOptions.MaxPlacementPartitions); if (PlacementPartition > partitionCount) { throw new ClusterDefinitionException($"cluster node [{nodeName}] configures [{awsNodeOptionsPrefix}.{nameof(PlacementPartition)}={PlacementPartition}] which is outside the valid range of [1...{partitionCount}]."); } } else if (node.IsWorker) { var partitionCount = clusterDefinition.Hosting.Aws.WorkerPlacementPartitions; partitionCount = Math.Min(partitionCount, AwsHostingOptions.MaxPlacementPartitions); if (PlacementPartition > partitionCount) { throw new ClusterDefinitionException($"cluster node [{nodeName}] configures [{awsNodeOptionsPrefix}.{nameof(PlacementPartition)}={PlacementPartition}] which is outside the valid range of [1...{partitionCount}]."); } } else { throw new NotImplementedException(); } } // Validate the volume size, setting the cluster default if necessary. if (string.IsNullOrEmpty(this.VolumeSize)) { this.VolumeSize = clusterDefinition.Hosting.Aws.DefaultVolumeSize; } if (!ByteUnits.TryParse(this.VolumeSize, out var volumeSizeBytes) || volumeSizeBytes <= 1) { throw new ClusterDefinitionException($"cluster node [{nodeName}] configures [{awsNodeOptionsPrefix}.{nameof(VolumeSize)}={VolumeSize}] which is not valid."); } var driveSizeGiB = AwsHelper.GetVolumeSizeGiB(VolumeType, volumeSizeBytes); this.VolumeSize = $"{driveSizeGiB} GiB"; // Validate the OpenEBS volume size too. if (string.IsNullOrEmpty(this.OpenEBSVolumeSize)) { this.OpenEBSVolumeSize = clusterDefinition.Hosting.Aws.DefaultOpenEBSVolumeSize; } if (!ByteUnits.TryParse(this.VolumeSize, out var openEbsVolumeSizeBytes) || openEbsVolumeSizeBytes <= 1) { throw new ClusterDefinitionException($"cluster node [{nodeName}] configures [{awsNodeOptionsPrefix}.{nameof(OpenEBSVolumeSize)}={OpenEBSVolumeSize}] which is not valid."); } var openEBSVolumeSizeGiB = AwsHelper.GetVolumeSizeGiB(OpenEBSVolumeType, openEbsVolumeSizeBytes); this.VolumeSize = $"{openEBSVolumeSizeGiB} GiB"; }
public static long ConvertToBytes(int size, ByteUnits targetUnits, ByteUnits sourceUnits = ByteUnits.Bytes) { byte pow = (byte)((byte)targetUnits - (byte)sourceUnits); return((long)(size * Math.Pow(1024, pow))); }
public static long Convert(this ByteUnits bu, int size) { return(StaticConverter.ConvertToBytes(size, bu)); }
/// <summary> /// Parses a dictionary of name/value labels by setting the appropriate /// properties of the parent node. /// </summary> /// <param name="labels">The label dictionary.</param> internal void Parse(Dictionary <string, string> labels) { // WARNING: // // This method will need to be updated whenever new standard labels are added or changed. foreach (var label in labels) { switch (label.Key) { case LabelAddress: Node.Address = label.Value; break; case LabelRole: Node.Role = label.Value; break; case LabelIngress: ParseCheck(label, () => { Node.Ingress = NeonHelper.ParseBool(label.Value); }); break; case LabelOpenEbs: ParseCheck(label, () => { Node.OpenEbsStorage = NeonHelper.ParseBool(label.Value); }); break; case LabelAzureVmSize: case LabelAzureStorageType: case LabelAzureDriveSize: if (Node.Azure == null) { Node.Azure = new AzureNodeOptions(); } switch (label.Key) { case LabelAzureVmSize: Node.Azure.VmSize = label.Value; break; case LabelAzureDriveSize: Node.Azure.DiskSize = label.Value; break; case LabelAzureStorageType: ParseCheck(label, () => { Node.Azure.StorageType = NeonHelper.ParseEnum <AzureStorageType>(label.Value); }); break; } break; case LabelStorageSize: ParseCheck(label, () => { Node.Labels.StorageSize = ByteUnits.Parse(label.Value).ToString(); }); break; case LabelStorageLocal: Node.Labels.StorageLocal = label.Value.Equals("true", StringComparison.OrdinalIgnoreCase); break; case LabelStorageHDD: Node.Labels.StorageHDD = label.Value.Equals("true", StringComparison.OrdinalIgnoreCase); break; case LabelStorageRedundant: Node.Labels.StorageRedundant = label.Value.Equals("true", StringComparison.OrdinalIgnoreCase); break; case LabelStorageEphemeral: Node.Labels.StorageEphemeral = label.Value.Equals("true", StringComparison.OrdinalIgnoreCase); break; case LabelComputeCores: ParseCheck(label, () => { Node.Labels.ComputeCores = int.Parse(label.Value); }); break; case LabelComputeRamMiB: ParseCheck(label, () => { Node.Labels.ComputeRam = int.Parse(label.Value); }); break; case LabelPhysicalMachine: Node.Labels.PhysicalMachine = label.Value; break; case LabelPhysicalLocation: Node.Labels.PhysicalLocation = label.Value; break; case LabelPhysicalAvailabilitytSet: Node.Labels.PhysicalAvailabilitySet = label.Value; break; case LabelPhysicalPower: Node.Labels.PhysicalPower = label.Value; break; case LabelDatacenter: case LabelEnvironment: // These labels don't currently map to node properties so we'll ignore them. break; default: // Must be a custom label. Node.Labels.Custom.Add(label.Key, label.Value); break; } } }
/// <summary> /// Validates the options and also ensures that all <c>null</c> properties are /// initialized to their default values. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <exception cref="ClusterDefinitionException">Thrown if the definition is not valid.</exception> public void Validate(ClusterDefinition clusterDefinition) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); Network ??= new AwsNetworkOptions(); Network.Validate(clusterDefinition); var awsHostionOptionsPrefix = $"{nameof(ClusterDefinition.Hosting)}.{nameof(ClusterDefinition.Hosting.Aws)}"; foreach (var ch in clusterDefinition.Name) { if (char.IsLetterOrDigit(ch) || ch == '-' || ch == '_') { continue; } throw new ClusterDefinitionException($"cluster name [{clusterDefinition.Name}] is not valid for AWS deployment. Only letters, digits, dashes, or underscores are allowed."); } if (string.IsNullOrEmpty(AccessKeyId)) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(AccessKeyId)}] is required."); } if (string.IsNullOrEmpty(SecretAccessKey)) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(SecretAccessKey)}] is required."); } if (string.IsNullOrEmpty(AvailabilityZone)) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(AvailabilityZone)}] is required."); } // Verify [ResourceGroup]. if (string.IsNullOrEmpty(ResourceGroup)) { ResourceGroup = clusterDefinition.Name; } if (ResourceGroup.Length > 64) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] is longer than 64 characters."); } if (!char.IsLetter(ResourceGroup.First())) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] does not begin with a letter."); } if (ResourceGroup.Last() == '_' || ResourceGroup.Last() == '-') { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] ends with a dash or underscore."); } foreach (var ch in ResourceGroup) { if (!(char.IsLetterOrDigit(ch) || ch == '_' || ch == '-')) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] includes characters other than letters, digits, dashes and underscores."); } } // Verify [ControlPlanePlacementPartitions] if (ControlPlanePlacementPartitions < 0) { ControlPlanePlacementPartitions = Math.Min(MaxPlacementPartitions, clusterDefinition.ControlNodes.Count()); } else { if (ControlPlanePlacementPartitions < 1 || MaxPlacementPartitions < ControlPlanePlacementPartitions) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(ControlPlanePlacementPartitions)}={ControlPlanePlacementPartitions}] cannot be in the range [1...{MaxPlacementPartitions}]"); } } // Verify [ControlPlanePlacementPartitions] if (WorkerPlacementPartitions < 1 || MaxPlacementPartitions < WorkerPlacementPartitions) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(WorkerPlacementPartitions)}={WorkerPlacementPartitions}] cannot be in the range [1...{MaxPlacementPartitions}]"); } // Verify [DefaultInstanceType] if (string.IsNullOrEmpty(DefaultInstanceType)) { DefaultInstanceType = defaultInstanceType; } // Verify [DefaultVolumeSize]. if (string.IsNullOrEmpty(DefaultVolumeSize)) { DefaultVolumeSize = defaultVolumeSize; } if (!ByteUnits.TryParse(DefaultVolumeSize, out var volumeSize) || volumeSize <= 0) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(DefaultVolumeSize)}={DefaultVolumeSize}] is not valid."); } // Verify [DefaultOpenEBSVolumeSize]. if (string.IsNullOrEmpty(DefaultOpenEBSVolumeSize)) { DefaultOpenEBSVolumeSize = defaultOpenEBSVolumeSize; } if (!ByteUnits.TryParse(DefaultOpenEBSVolumeSize, out var openEbsVolumeSize) || openEbsVolumeSize <= 0) { throw new ClusterDefinitionException($"[{awsHostionOptionsPrefix}.{nameof(DefaultOpenEBSVolumeSize)}={DefaultOpenEBSVolumeSize}] is not valid."); } // Check AWS cluster limits. if (clusterDefinition.ControlNodes.Count() > KubeConst.MaxControlNodes) { throw new ClusterDefinitionException($"cluster control-plane count [{awsHostionOptionsPrefix}.{clusterDefinition.ControlNodes.Count()}] exceeds the [{KubeConst.MaxControlNodes}] limit for clusters."); } if (clusterDefinition.Nodes.Count() > AwsHelper.MaxClusterNodes) { throw new ClusterDefinitionException($"cluster node count [{awsHostionOptionsPrefix}.{clusterDefinition.Nodes.Count()}] exceeds the [{AwsHelper.MaxClusterNodes}] limit for clusters deployed to AWS."); } //----------------------------------------------------------------- // Network subnets VpcSubnet = VpcSubnet ?? defaultVpcSubnet; NodeSubnet = NodeSubnet ?? defaultPrivateSubnet; PublicSubnet = PublicSubnet ?? defaultPublicSubnet; const int minAwsPrefix = 16; const int maxAwsPrefix = 28; // VpcSubnet if (!NetworkCidr.TryParse(VpcSubnet, out var vpcSubnet)) { throw new ClusterDefinitionException($"AWS hosting [{nameof(VpcSubnet)}={VpcSubnet}] is not a valid subnet."); } if (vpcSubnet.PrefixLength < minAwsPrefix) { throw new ClusterDefinitionException($"AWS hosting [{nameof(VpcSubnet)}={VpcSubnet}] is too large. The smallest CIDR prefix supported by AWS is [/{minAwsPrefix}]."); } if (vpcSubnet.PrefixLength > maxAwsPrefix) { throw new ClusterDefinitionException($"AWS hosting [{nameof(VpcSubnet)}={VpcSubnet}] is too large. The largest CIDR prefix supported by AWS is [/{maxAwsPrefix}]."); } // PrivateSubnet if (!NetworkCidr.TryParse(NodeSubnet, out var privateSubnet)) { throw new ClusterDefinitionException($"AWS hosting [{nameof(NodeSubnet)}={NodeSubnet}] is not a valid subnet."); } if (vpcSubnet.PrefixLength < minAwsPrefix) { throw new ClusterDefinitionException($"AWS hosting [{nameof(NodeSubnet)}={NodeSubnet}] is too large. The smallest CIDR prefix supported by AWS is [/{minAwsPrefix}]."); } if (vpcSubnet.PrefixLength > maxAwsPrefix) { throw new ClusterDefinitionException($"AWS hosting [{nameof(NodeSubnet)}={NodeSubnet}] is too large. The largest CIDR prefix supported by AWS is [/{maxAwsPrefix}]."); } // PublicSubnet if (!NetworkCidr.TryParse(PublicSubnet, out var publicSubnet)) { throw new ClusterDefinitionException($"AWS hosting [{nameof(PublicSubnet)}={PublicSubnet}] is not a valid subnet."); } // Ensure that the subnets fit together. if (!vpcSubnet.Contains(privateSubnet)) { throw new ClusterDefinitionException($"AWS hosting [{nameof(PublicSubnet)}={PublicSubnet}] is not contained within [{nameof(VpcSubnet)}={VpcSubnet}]."); } if (!vpcSubnet.Contains(publicSubnet)) { throw new ClusterDefinitionException($"AWS hosting [{nameof(NodeSubnet)}={NodeSubnet}] is not contained within [{nameof(VpcSubnet)}={VpcSubnet}]."); } if (privateSubnet.Overlaps(publicSubnet)) { throw new ClusterDefinitionException($"AWS hosting [{nameof(NodeSubnet)}={NodeSubnet}] and [{nameof(PublicSubnet)}={PublicSubnet}] cannot overlap."); } }
/// <summary> /// Validates the options and also ensures that all <c>null</c> properties are /// initialized to their default values. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <exception cref="ClusterDefinitionException">Thrown if the definition is not valid.</exception> public void Validate(ClusterDefinition clusterDefinition) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); Network ??= new AzureNetworkOptions(); Network.Validate(clusterDefinition); var azureHostingOptionsPrefix = $"{nameof(ClusterDefinition.Hosting)}.{nameof(ClusterDefinition.Hosting.Azure)}"; foreach (var ch in clusterDefinition.Name) { if (char.IsLetterOrDigit(ch) || ch == '-' || ch == '_') { continue; } throw new ClusterDefinitionException($"cluster name [{clusterDefinition.Name}] is not valid for Azure deployment. Only letters, digits, dashes, or underscores are allowed."); } if (string.IsNullOrEmpty(SubscriptionId)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(SubscriptionId)}] cannot be empty."); } if (string.IsNullOrEmpty(TenantId)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(TenantId)}] cannot be empty."); } if (string.IsNullOrEmpty(ClientId)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(ClientId)}] cannot be empty."); } if (string.IsNullOrEmpty(ClientSecret)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(ClientSecret)}] cannot be empty."); } if (string.IsNullOrEmpty(Region)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(Region)}] cannot be empty."); } if (string.IsNullOrEmpty(DomainLabel)) { // We're going to generate a GUID and strip out the dashes. DomainLabel = "neon-" + Guid.NewGuid().ToString("d").Replace("-", string.Empty); } // Verify [ResourceGroup]. if (string.IsNullOrEmpty(ResourceGroup)) { ResourceGroup = clusterDefinition.Name; } if (ResourceGroup.Length > 64) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] is longer than 64 characters."); } if (!char.IsLetter(ResourceGroup.First())) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] does not begin with a letter."); } if (ResourceGroup.Last() == '_' || ResourceGroup.Last() == '-') { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] ends with a dash or underscore."); } foreach (var ch in ResourceGroup) { if (!(char.IsLetterOrDigit(ch) || ch == '_' || ch == '-')) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(ResourceGroup)}={ResourceGroup}] includes characters other than letters, digits, dashes and underscores."); } } // Verify [Environment]. if (Environment != null) { Environment.Validate(clusterDefinition); } // Verify [DefaultVmSize] if (string.IsNullOrEmpty(DefaultVmSize)) { DefaultVmSize = defaultVmSize; } // Verify [DefaultDiskSize]. if (string.IsNullOrEmpty(DefaultDiskSize)) { DefaultDiskSize = defaultDiskSize; } if (!ByteUnits.TryParse(DefaultDiskSize, out var diskSize) || diskSize <= 0) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(DefaultDiskSize)}={DefaultDiskSize}] is not valid."); } // Verify [DefaultOpenEBSDiskSize]. if (string.IsNullOrEmpty(DefaultOpenEBSDiskSize)) { DefaultOpenEBSDiskSize = defaultOpenEBSDiskSize; } if (!ByteUnits.TryParse(DefaultOpenEBSDiskSize, out var openEbsDiskSize) || openEbsDiskSize <= 0) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(DefaultOpenEBSDiskSize)}={DefaultOpenEBSDiskSize}] is not valid."); } // Check Azure cluster limits. if (clusterDefinition.ControlNodes.Count() > KubeConst.MaxControlNodes) { throw new ClusterDefinitionException($"cluster control-plane count [{clusterDefinition.ControlNodes.Count()}] exceeds the [{KubeConst.MaxControlNodes}] limit for clusters."); } if (clusterDefinition.Nodes.Count() > AzureHelper.MaxClusterNodes) { throw new ClusterDefinitionException($"cluster node count [{clusterDefinition.Nodes.Count()}] exceeds the [{AzureHelper.MaxClusterNodes}] limit for clusters deployed to Azure."); } // Verify subnets if (!NetworkCidr.TryParse(VnetSubnet, out var vnetSubnet)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(VnetSubnet)}={VnetSubnet}] is not a valid subnet."); } if (!NetworkCidr.TryParse(NodeSubnet, out var nodeSubnet)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(NodeSubnet)}={NodeSubnet}] is not a valid subnet."); } if (!vnetSubnet.Contains(nodeSubnet)) { throw new ClusterDefinitionException($"[{azureHostingOptionsPrefix}.{nameof(NodeSubnet)}={NodeSubnet}] is contained within [{nameof(VnetSubnet)}={VnetSubnet}]."); } }
public sMemoryValue(long value, ByteUnits unit) { _value = value; _unit=unit; }
public void ParseBase2() { // Verify that the units are correct. Assert.Equal(Pow(2m, 10), ByteUnits.KibiBytes); Assert.Equal(Pow(2m, 20), ByteUnits.MebiBytes); Assert.Equal(Pow(2m, 30), ByteUnits.GibiBytes); Assert.Equal(Pow(2m, 40), ByteUnits.TebiBytes); Assert.Equal(Pow(2m, 50), ByteUnits.PebiBytes); Assert.Equal(Pow(2m, 60), ByteUnits.ExbiBytes); decimal value; // Parse whole values. Assert.True(ByteUnits.TryParse("1m", out value)); Assert.Equal(0.001m, value); Assert.True(ByteUnits.TryParse("0", out value)); Assert.Equal(0.0m, value); Assert.True(ByteUnits.TryParse("4Ki", out value)); Assert.Equal(ByteUnits.KibiBytes * 4, value); Assert.True(ByteUnits.TryParse("4Mi", out value)); Assert.Equal(ByteUnits.MebiBytes * 4, value); Assert.True(ByteUnits.TryParse("7Gi", out value)); Assert.Equal(ByteUnits.GibiBytes * 7, value); Assert.True(ByteUnits.TryParse("2Ti", out value)); Assert.Equal(ByteUnits.TebiBytes * 2, value); Assert.True(ByteUnits.TryParse("2Gi", out value)); Assert.Equal(ByteUnits.GibiBytes * 2, value); Assert.True(ByteUnits.TryParse("4Ti", out value)); Assert.Equal(ByteUnits.TebiBytes * 4, value); Assert.True(ByteUnits.TryParse("3Pi", out value)); Assert.Equal(ByteUnits.PebiBytes * 3, value); Assert.True(ByteUnits.TryParse("5Ei", out value)); Assert.Equal(ByteUnits.ExbiBytes * 5, value); // Test fractional values. Assert.True(ByteUnits.TryParse("1.5m", out value)); Assert.Equal(0.001m * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5", out value)); Assert.Equal(1 * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5Ki", out value)); Assert.Equal(ByteUnits.KibiBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5Mi", out value)); Assert.Equal(ByteUnits.MebiBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5Gi", out value)); Assert.Equal(ByteUnits.GibiBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5Ti", out value)); Assert.Equal(ByteUnits.TebiBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5Pi", out value)); Assert.Equal(ByteUnits.PebiBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5Ei", out value)); Assert.Equal(ByteUnits.ExbiBytes * 1.5m, value); // Parse values with a space before the units. Assert.True(ByteUnits.TryParse("1 m", out value)); Assert.Equal(1.0m * 0.001m, value); Assert.True(ByteUnits.TryParse("1 Ki", out value)); Assert.Equal(1.0m * ByteUnits.KibiBytes, value); Assert.True(ByteUnits.TryParse("2 Mi", out value)); Assert.Equal(2.0m * ByteUnits.MebiBytes, value); Assert.True(ByteUnits.TryParse("3 Gi", out value)); Assert.Equal(3.0m * ByteUnits.GibiBytes, value); Assert.True(ByteUnits.TryParse("4 Ti", out value)); Assert.Equal(4.0m * ByteUnits.TebiBytes, value); Assert.True(ByteUnits.TryParse("9 Pi", out value)); Assert.Equal(9.0m * ByteUnits.PebiBytes, value); Assert.True(ByteUnits.TryParse("10 Ei", out value)); Assert.Equal(10.0m * ByteUnits.ExbiBytes, value); }
/// <summary> /// Verifies that the limit properties make sense. /// </summary> /// <returns><c>null</c> for valid properties, otherwise an error message.</returns> public string Validate() { decimal memory = -1; decimal memorySwap = -1; decimal memoryReservation = -1; decimal kernelMemory = -1; if (Memory != null) { try { memory = ByteUnits.Parse(Memory); if (memory < 0) { throw new Exception(); } } catch { return($"[{nameof(Memory)}={Memory}]: Value is not valid."); } } if (MemorySwap != null) { try { memorySwap = ByteUnits.Parse(MemorySwap); if (memorySwap < -1) { throw new Exception(); } } catch { return($"[{nameof(MemorySwap)}={MemorySwap}]: Value is not valid."); } if (memorySwap > 0 && memory == -1) { return($"[{nameof(Memory)}] must also be set when [{nameof(MemorySwap)}] is specified."); } if (memory != -1 && memorySwap <= memory) { return($"[{nameof(MemorySwap)}={MemorySwap}] must be greater than [{nameof(Memory)}={Memory}]."); } } if (MemorySwappiness != null) { if (MemorySwappiness.Value < 0 || MemorySwappiness.Value > 100) { return($"[{nameof(MemorySwappiness)}={MemorySwappiness}]: Value is not valid."); } } if (MemoryReservation != null) { try { memoryReservation = ByteUnits.Parse(MemoryReservation); if (memoryReservation < 0) { throw new Exception(); } } catch { return($"[{nameof(MemoryReservation)}={MemoryReservation}]: Value is not valid."); } } if (KernelMemory != null) { try { kernelMemory = ByteUnits.Parse(KernelMemory); if (memory < 4 * ByteUnits.MebiBytes) { return($"[{nameof(Memory)}={Memory}]: Value cannot be less than 4MiB."); } } catch { return($"[{nameof(KernelMemory)}={KernelMemory}]: Value is not valid."); } if (kernelMemory < 4 * ByteUnits.MebiBytes) { return($"[{nameof(KernelMemory)}={KernelMemory}]: Value cannot be less than 4MiB."); } } if (memory != -1 && memoryReservation != -1) { if (memoryReservation >= memory) { return($"[{nameof(MemoryReservation)}={MemoryReservation}] must be less than [{nameof(Memory)}={Memory}]."); } } if (OomKillDisable && memory == -1) { return($"[{nameof(OomKillDisable)}={OomKillDisable}] is not allowed when [{nameof(Memory)}] is not set."); } return(null); }
/// <inheritdoc/> public override bool Provision(bool force) { // $todo(jeff.lill): // // I'm not implementing [force] here. I'm not entirely sure // that this makes sense for production clusters. // // Perhaps it would make more sense to replace this with a // [neon cluster remove] command. // // https://github.com/nforgeio/neonKUBE/issues/156 if (IsProvisionNOP) { // There's nothing to do here. return(true); } // Update the node labels with the actual capabilities of the // virtual machines being provisioned. foreach (var node in cluster.Definition.Nodes) { if (string.IsNullOrEmpty(node.Labels.PhysicalMachine)) { node.Labels.PhysicalMachine = node.VmHost; } if (node.Labels.ComputeCores == 0) { node.Labels.ComputeCores = node.GetVmProcessors(cluster.Definition); } if (node.Labels.ComputeRam == 0) { node.Labels.ComputeRam = (int)(node.GetVmMemory(cluster.Definition) / ByteUnits.MebiBytes); } if (string.IsNullOrEmpty(node.Labels.StorageSize)) { node.Labels.StorageSize = ByteUnits.ToGiString(node.GetVmDisk(cluster.Definition)); } } // Build a list of [SshProxy] instances that map to the specified XenServer // hosts. We'll use the [XenClient] instances as proxy metadata. var sshProxies = new List <SshProxy <XenClient> >(); xenHosts = new List <XenClient>(); foreach (var host in cluster.Definition.Hosting.VmHosts) { var hostAddress = host.Address; var hostname = host.Name; var hostUsername = host.Username ?? cluster.Definition.Hosting.VmHostUsername; var hostPassword = host.Password ?? cluster.Definition.Hosting.VmHostPassword; if (string.IsNullOrEmpty(hostname)) { hostname = host.Address; } var xenHost = new XenClient(hostAddress, hostUsername, hostPassword, name: host.Name, logFolder: logFolder); xenHosts.Add(xenHost); sshProxies.Add(xenHost.SshProxy); } // We're going to provision the XenServer hosts in parallel to // speed up cluster setup. This works because each XenServer // is essentially independent from the others. controller = new SetupController <XenClient>($"Provisioning [{cluster.Definition.Name}] cluster", sshProxies) { ShowStatus = this.ShowStatus, MaxParallel = this.MaxParallel }; controller.AddWaitUntilOnlineStep(); controller.AddStep("host folders", (node, stepDelay) => node.CreateHostFolders()); controller.AddStep("verify readiness", (node, stepDelay) => VerifyReady(node)); controller.AddStep("virtual machine template", (node, stepDelay) => CheckVmTemplate(node)); controller.AddStep("virtual machines", (node, stepDelay) => ProvisionVirtualMachines(node)); controller.AddGlobalStep(string.Empty, () => Finish(), quiet: true); if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: One or more configuration steps failed."); return(false); } return(true); }
public decimal ToUnit(ByteUnits unit) { if (unit == _unit) { return((decimal)_value); } decimal ret = 0; switch (_unit) { case ByteUnits.Byte: switch (unit) { case ByteUnits.KiloByte: ret = (decimal)_value / (decimal)Constants.KB_BYTE_COUNT; break; case ByteUnits.MegaByte: ret = (decimal)_value / (decimal)Constants.MB_BYTE_COUNT; break; case ByteUnits.GigaByte: ret = (decimal)_value / (decimal)Constants.GB_BYTE_COUNT; break; case ByteUnits.TeraByte: ret = (decimal)_value / (decimal)Constants.TB_BYTE_COUNT; break; } break; case ByteUnits.KiloByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.KB_BYTE_COUNT; break; case ByteUnits.MegaByte: ret = (decimal)_value / ((decimal)Constants.MB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.GigaByte: ret = (decimal)_value / ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.TeraByte: ret = (decimal)_value / ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; } break; case ByteUnits.MegaByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.MB_BYTE_COUNT; break; case ByteUnits.KiloByte: ret = (decimal)_value * ((decimal)Constants.MB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.GigaByte: ret = (decimal)_value / ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; case ByteUnits.TeraByte: ret = (decimal)_value / ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; } break; case ByteUnits.GigaByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.GB_BYTE_COUNT; break; case ByteUnits.KiloByte: ret = (decimal)_value * ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.MegaByte: ret = (decimal)_value * ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; case ByteUnits.TeraByte: ret = (decimal)_value / ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.GB_BYTE_COUNT); break; } break; case ByteUnits.TeraByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.TB_BYTE_COUNT; break; case ByteUnits.KiloByte: ret = (decimal)_value * ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.MegaByte: ret = (decimal)_value * ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; case ByteUnits.GigaByte: ret = (decimal)_value * ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.GB_BYTE_COUNT); break; } break; } return(ret); }
public void CaseInsensitive() { decimal value; Assert.True(ByteUnits.TryParse("1k", out value)); Assert.Equal(ByteUnits.KiloBytes, value); Assert.True(ByteUnits.TryParse("1kb", out value)); Assert.Equal(ByteUnits.KiloBytes, value); Assert.True(ByteUnits.TryParse("2ki", out value)); Assert.Equal(ByteUnits.KibiBytes * 2, value); Assert.True(ByteUnits.TryParse("2kib", out value)); Assert.Equal(ByteUnits.KibiBytes * 2, value); Assert.True(ByteUnits.TryParse("1m", out value)); Assert.Equal(ByteUnits.MegaBytes, value); Assert.True(ByteUnits.TryParse("1mb", out value)); Assert.Equal(ByteUnits.MegaBytes, value); Assert.True(ByteUnits.TryParse("2mi", out value)); Assert.Equal(ByteUnits.MebiBytes * 2, value); Assert.True(ByteUnits.TryParse("2mib", out value)); Assert.Equal(ByteUnits.MebiBytes * 2, value); Assert.True(ByteUnits.TryParse("1g", out value)); Assert.Equal(ByteUnits.GigaBytes, value); Assert.True(ByteUnits.TryParse("1gb", out value)); Assert.Equal(ByteUnits.GigaBytes, value); Assert.True(ByteUnits.TryParse("2gi", out value)); Assert.Equal(ByteUnits.GibiBytes * 2, value); Assert.True(ByteUnits.TryParse("2gib", out value)); Assert.Equal(ByteUnits.GibiBytes * 2, value); Assert.True(ByteUnits.TryParse("1t", out value)); Assert.Equal(ByteUnits.TeraBytes, value); Assert.True(ByteUnits.TryParse("1tb", out value)); Assert.Equal(ByteUnits.TeraBytes, value); Assert.True(ByteUnits.TryParse("2ti", out value)); Assert.Equal(ByteUnits.TebiBytes * 2, value); Assert.True(ByteUnits.TryParse("2tib", out value)); Assert.Equal(ByteUnits.TebiBytes * 2, value); Assert.True(ByteUnits.TryParse("1p", out value)); Assert.Equal(ByteUnits.PetaBytes, value); Assert.True(ByteUnits.TryParse("1pb", out value)); Assert.Equal(ByteUnits.PetaBytes, value); Assert.True(ByteUnits.TryParse("2pi", out value)); Assert.Equal(ByteUnits.PebiBytes * 2, value); Assert.True(ByteUnits.TryParse("2pib", out value)); Assert.Equal(ByteUnits.PebiBytes * 2, value); Assert.True(ByteUnits.TryParse("1e", out value)); Assert.Equal(ByteUnits.ExaBytes, value); Assert.True(ByteUnits.TryParse("1eb", out value)); Assert.Equal(ByteUnits.ExaBytes, value); Assert.True(ByteUnits.TryParse("2ei", out value)); Assert.Equal(ByteUnits.ExbiBytes * 2, value); Assert.True(ByteUnits.TryParse("2eib", out value)); Assert.Equal(ByteUnits.ExbiBytes * 2, value); }
public sMemoryValue(long value, ByteUnits unit) { _value = value; _unit = unit; }
#pragma warning restore CA1416 /// <summary> /// Creates a virtual machine. /// </summary> /// <param name="machineName">The machine name.</param> /// <param name="memorySize"> /// A string specifying the memory size. This can be a long byte count or a /// byte count or a number with units like <b>512MiB</b>, <b>0.5GiB</b>, <b>2GiB</b>, /// or <b>1TiB</b>. This defaults to <b>2GiB</b>. /// </param> /// <param name="processorCount"> /// The number of virutal processors to assign to the machine. This defaults to <b>4</b>. /// </param> /// <param name="driveSize"> /// A string specifying the primary disk size. This can be a long byte count or a /// byte count or a number with units like <b>512MB</b>, <b>0.5GiB</b>, <b>2GiB</b>, /// or <b>1TiB</b>. Pass <c>null</c> to leave the disk alone. This defaults to <c>null</c>. /// </param> /// <param name="drivePath"> /// Optionally specifies the path where the virtual hard drive will be located. Pass /// <c>null</c> or empty to default to <b>MACHINE-NAME.vhdx</b> located in the default /// Hyper-V virtual machine drive folder. /// </param> /// <param name="checkpointDrives">Optionally enables drive checkpoints. This defaults to <c>false</c>.</param> /// <param name="templateDrivePath"> /// If this is specified and <paramref name="drivePath"/> is not <c>null</c> then /// the hard drive template at <paramref name="templateDrivePath"/> will be copied /// to <paramref name="drivePath"/> before creating the machine. /// </param> /// <param name="switchName">Optional name of the virtual switch.</param> /// <param name="extraDrives"> /// Optionally specifies any additional virtual drives to be created and /// then attached to the new virtual machine. /// </param> /// <remarks> /// <note> /// The <see cref="VirtualDrive.Path"/> property of <paramref name="extraDrives"/> may be /// passed as <c>null</c> or empty. In this case, the drive name will default to /// being located in the standard Hyper-V virtual drivers folder and will be named /// <b>MACHINE-NAME-#.vhdx</b>, where <b>#</b> is the one-based index of the drive /// in the enumeration. /// </note> /// </remarks> public void AddVm( string machineName, string memorySize = "2GiB", int processorCount = 4, string driveSize = null, string drivePath = null, bool checkpointDrives = false, string templateDrivePath = null, string switchName = null, IEnumerable <VirtualDrive> extraDrives = null) { Covenant.Requires <ArgumentNullException>(!string.IsNullOrEmpty(machineName), nameof(machineName)); CheckDisposed(); memorySize = ByteUnits.Parse(memorySize).ToString(); if (driveSize != null) { driveSize = ByteUnits.Parse(driveSize).ToString(); } var driveFolder = DefaultDriveFolder; if (string.IsNullOrEmpty(drivePath)) { drivePath = Path.Combine(driveFolder, $"{machineName}-[0].vhdx"); } else { driveFolder = Path.GetDirectoryName(Path.GetFullPath(drivePath)); } if (VmExists(machineName)) { throw new HyperVException($"Virtual machine [{machineName}] already exists."); } // Copy the template VHDX file. if (templateDrivePath != null) { File.Copy(templateDrivePath, drivePath); } // Resize the VHDX if requested. if (driveSize != null) { powershell.Execute($"{HyperVNamespace}Resize-VHD -Path '{drivePath}' -SizeBytes {driveSize}"); } // Create the virtual machine. var command = $"{HyperVNamespace}New-VM -Name '{machineName}' -MemoryStartupBytes {memorySize} -Generation 1"; if (!string.IsNullOrEmpty(drivePath)) { command += $" -VHDPath '{drivePath}'"; } if (!string.IsNullOrEmpty(switchName)) { command += $" -SwitchName '{switchName}'"; } try { powershell.Execute(command); } catch (Exception e) { throw new HyperVException(e.Message, e); } // We need to configure the VM's processor count and min/max memory settings. try { powershell.Execute($"{HyperVNamespace}Set-VM -Name '{machineName}' -ProcessorCount {processorCount} -StaticMemory -MemoryStartupBytes {memorySize}"); } catch (Exception e) { throw new HyperVException(e.Message, e); } // Create and attach any additional drives as required. if (extraDrives != null) { var diskNumber = 1; foreach (var drive in extraDrives) { if (string.IsNullOrEmpty(drive.Path)) { drive.Path = Path.Combine(driveFolder, $"{machineName}-[{diskNumber}].vhdx"); } if (drive.Size <= 0) { throw new ArgumentException("Virtual drive size must be greater than 0.", nameof(drive)); } NeonHelper.DeleteFile(drive.Path); var fixedOrDynamic = drive.IsDynamic ? "-Dynamic" : "-Fixed"; try { powershell.Execute($"{HyperVNamespace}New-VHD -Path '{drive.Path}' {fixedOrDynamic} -SizeBytes {drive.Size} -BlockSizeBytes 1MB"); powershell.Execute($"{HyperVNamespace}Add-VMHardDiskDrive -VMName '{machineName}' -Path \"{drive.Path}\""); } catch (Exception e) { throw new HyperVException(e.Message, e); } diskNumber++; } } // Windows 10 releases since the August 2017 Creators Update enable automatic // virtual drive checkpointing (which is annoying). We're going to disable this // by default. if (!checkpointDrives) { try { powershell.Execute($"{HyperVNamespace}Set-VM -CheckpointType Disabled -Name '{machineName}'"); } catch (Exception e) { throw new HyperVException(e.Message, e); } } // We need to do some extra configuration for nested virtual machines: // // https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/user-guide/nested-virtualization if (IsNestedVirtualization) { // Enable nested virtualization for the VM. powershell.Execute($"{HyperVNamespace}Set-VMProcessor -VMName '{machineName}' -ExposeVirtualizationExtensions $true"); // Enable MAC address spoofing for the VMs network adapter. powershell.Execute($"{HyperVNamespace}Set-VMNetworkAdapter -VMName '{machineName}' -MacAddressSpoofing On"); } }
public void ParseBase10() { // Verify that the units are correct. Assert.Equal(1000m, ByteUnits.KiloBytes); Assert.Equal(1000000m, ByteUnits.MegaBytes); Assert.Equal(1000000000m, ByteUnits.GigaBytes); Assert.Equal(1000000000000m, ByteUnits.TeraBytes); Assert.Equal(1000000000000000m, ByteUnits.PetaBytes); Assert.Equal(1000000000000000000m, ByteUnits.ExaBytes); decimal value; // Parse whole values. Assert.True(ByteUnits.TryParse("300m", out value)); Assert.Equal(0.3m, value); Assert.True(ByteUnits.TryParse("4000m", out value)); Assert.Equal(4m, value); Assert.True(ByteUnits.TryParse("0", out value)); Assert.Equal(0, value); Assert.True(ByteUnits.TryParse("10", out value)); Assert.Equal(10, value); Assert.True(ByteUnits.TryParse("20", out value)); Assert.Equal(20, value); Assert.True(ByteUnits.TryParse("1K", out value)); Assert.Equal(ByteUnits.KiloBytes, value); Assert.True(ByteUnits.TryParse("2K", out value)); Assert.Equal(ByteUnits.KiloBytes * 2, value); Assert.True(ByteUnits.TryParse("1M", out value)); Assert.Equal(ByteUnits.MegaBytes, value); Assert.True(ByteUnits.TryParse("2M", out value)); Assert.Equal(ByteUnits.MegaBytes * 2, value); Assert.True(ByteUnits.TryParse("1G", out value)); Assert.Equal(ByteUnits.GigaBytes, value); Assert.True(ByteUnits.TryParse("2G", out value)); Assert.Equal(ByteUnits.GigaBytes * 2, value); Assert.True(ByteUnits.TryParse("2T", out value)); Assert.Equal(ByteUnits.TeraBytes * 2, value); Assert.True(ByteUnits.TryParse("1T", out value)); Assert.Equal(ByteUnits.TeraBytes, value); Assert.True(ByteUnits.TryParse("2P", out value)); Assert.Equal(ByteUnits.PetaBytes * 2, value); Assert.True(ByteUnits.TryParse("1P", out value)); Assert.Equal(ByteUnits.PetaBytes, value); Assert.True(ByteUnits.TryParse("2E", out value)); Assert.Equal(ByteUnits.ExaBytes * 2, value); Assert.True(ByteUnits.TryParse("1E", out value)); Assert.Equal(ByteUnits.ExaBytes, value); // Parse fractional values. Assert.True(ByteUnits.TryParse("1.5K", out value)); Assert.Equal(ByteUnits.KiloBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5M", out value)); Assert.Equal(ByteUnits.MegaBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5G", out value)); Assert.Equal(ByteUnits.GigaBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5T", out value)); Assert.Equal(ByteUnits.TeraBytes * 1.5m, value); Assert.True(ByteUnits.TryParse("1.5P", out value)); Assert.Equal(ByteUnits.PetaBytes * 1.5m, value); // Parse values with a space before the units. Assert.True(ByteUnits.TryParse("1 m", out value)); Assert.Equal(0.001m, value); Assert.True(ByteUnits.TryParse("1 ", out value)); Assert.Equal(1, value); Assert.True(ByteUnits.TryParse("2 K", out value)); Assert.Equal(2 * ByteUnits.KiloBytes, value); Assert.True(ByteUnits.TryParse("3 K", out value)); Assert.Equal(3 * ByteUnits.KiloBytes, value); Assert.True(ByteUnits.TryParse("4 M", out value)); Assert.Equal(4 * ByteUnits.MegaBytes, value); Assert.True(ByteUnits.TryParse("5 M", out value)); Assert.Equal(5 * ByteUnits.MegaBytes, value); Assert.True(ByteUnits.TryParse("6 G", out value)); Assert.Equal(6 * ByteUnits.GigaBytes, value); Assert.True(ByteUnits.TryParse("7 G", out value)); Assert.Equal(7 * ByteUnits.GigaBytes, value); Assert.True(ByteUnits.TryParse("8 T", out value)); Assert.Equal(8 * ByteUnits.TeraBytes, value); Assert.True(ByteUnits.TryParse("9 T", out value)); Assert.Equal(9 * ByteUnits.TeraBytes, value); Assert.True(ByteUnits.TryParse("9 P", out value)); Assert.Equal(9 * ByteUnits.PetaBytes, value); Assert.True(ByteUnits.TryParse("10 P", out value)); Assert.Equal(10 * ByteUnits.PetaBytes, value); }
private async Task UpdateDiskAsync() { await SyncContext.Clear; var tasks = new List <Task>() { AppState.Metrics.GetDiskUsageAsync(DateTime.UtcNow.AddMinutes(chartLookBack * -1), DateTime.UtcNow), AppState.Metrics.GetDiskTotalAsync() }; await Task.WhenAll(tasks); if (AppState.Metrics.DiskUsageBytes == null || AppState.Metrics.DiskTotalBytes < 0) { return; } var diskUsageX = AppState.Metrics.DiskUsageBytes.Data.Result?.First()?.Values?.Select(x => AppState.Metrics.UnixTimeStampToDateTime(x.Time).ToShortTimeString()).ToList(); var diskUsageY = AppState.Metrics.DiskUsageBytes.Data.Result.First().Values.Select(x => decimal.Parse(x.Value) / 1000000000).ToList(); await UpdateChartAsync(diskUsageX, diskUsageY, diskChartConfig, diskChart, $"Disk usage (total disk: {ByteUnits.ToGB(AppState.Metrics.DiskTotalBytes)})"); }
/// <summary> /// Validates the options and also ensures that all <c>null</c> properties are /// initialized to their default values. /// </summary> /// <param name="clusterDefinition">The cluster definition.</param> /// <param name="nodeName">The associated node name.</param> /// <exception cref="ClusterDefinitionException">Thrown if the definition is not valid.</exception> public void Validate(ClusterDefinition clusterDefinition, string nodeName) { Covenant.Requires <ArgumentNullException>(clusterDefinition != null, nameof(clusterDefinition)); var azureNodeOptionsPrefix = $"{nameof(ClusterDefinition.Hosting)}.{nameof(ClusterDefinition.Hosting.Azure)}"; // Set the cluster default storage types if necessary. if (StorageType == AzureStorageType.Default) { StorageType = clusterDefinition.Hosting.Azure.DefaultStorageType; if (StorageType == AzureStorageType.Default) { StorageType = AzureHostingOptions.defaultStorageType; } } if (OpenEBSStorageType == AzureStorageType.Default) { OpenEBSStorageType = clusterDefinition.Hosting.Azure.DefaultOpenEBSStorageType; if (OpenEBSStorageType == AzureStorageType.Default) { OpenEBSStorageType = AzureHostingOptions.defaultOpenEBSStorageType; } } // Validate the VM size, setting the cluster default if necessary. var vmSize = this.VmSize; if (string.IsNullOrEmpty(vmSize)) { vmSize = clusterDefinition.Hosting.Azure.DefaultVmSize; } this.VmSize = vmSize; // Validate the drive size, setting the cluster default if necessary. if (string.IsNullOrEmpty(this.DiskSize)) { this.DiskSize = clusterDefinition.Hosting.Azure.DefaultDiskSize; } if (!ByteUnits.TryParse(this.DiskSize, out var driveSizeBytes) || driveSizeBytes <= 1) { throw new ClusterDefinitionException($"cluster node [{nodeName}] configures [{azureNodeOptionsPrefix}.{nameof(DiskSize)}={DiskSize}] which is not valid."); } var driveSizeGiB = AzureHelper.GetDiskSizeGiB(StorageType, driveSizeBytes); this.DiskSize = $"{driveSizeGiB} GiB"; // Validate the OpenEBS disk size too. if (string.IsNullOrEmpty(this.OpenEBSDiskSize)) { this.OpenEBSDiskSize = clusterDefinition.Hosting.Azure.DefaultOpenEBSDiskSize; } if (!ByteUnits.TryParse(this.OpenEBSDiskSize, out var openEbsDiskSizeBytes) || openEbsDiskSizeBytes <= 1) { throw new ClusterDefinitionException($"cluster node [{nodeName}] configures [{azureNodeOptionsPrefix}.{nameof(OpenEBSDiskSize)}={OpenEBSDiskSize}] which is not valid."); } var openEBSDiskSizeGiB = AzureHelper.GetDiskSizeGiB(OpenEBSStorageType, openEbsDiskSizeBytes); this.DiskSize = $"{openEBSDiskSizeGiB} GiB"; }
/// <inheritdoc/> public override bool Provision(bool force) { if (IsProvisionNOP) { // There's nothing to do here. return(true); } // Update the node labels with the actual capabilities of the // virtual machines being provisioned. foreach (var node in cluster.Definition.Nodes) { if (string.IsNullOrEmpty(node.Labels.PhysicalMachine)) { node.Labels.PhysicalMachine = Environment.MachineName; } if (node.Labels.ComputeCores == 0) { node.Labels.ComputeCores = cluster.Definition.Hosting.VmProcessors; } if (node.Labels.ComputeRam == 0) { node.Labels.ComputeRam = (int)(ClusterDefinition.ValidateSize(cluster.Definition.Hosting.VmMemory, typeof(HostingOptions), nameof(HostingOptions.VmMemory)) / ByteUnits.MebiBytes); } if (string.IsNullOrEmpty(node.Labels.StorageSize)) { node.Labels.StorageSize = ByteUnits.ToGiString(node.GetVmMemory(cluster.Definition)); } } // If a public address isn't explicitly specified, we'll assume that we're // running inside the network and we can access the private address. foreach (var node in cluster.Definition.Nodes) { if (string.IsNullOrEmpty(node.PublicAddress)) { node.PublicAddress = node.PrivateAddress; } } // Perform the provisioning operations. controller = new SetupController <NodeDefinition>($"Provisioning [{cluster.Definition.Name}] cluster", cluster.Nodes) { ShowStatus = this.ShowStatus, MaxParallel = 1 // We're only going to provision one VM at a time on a local Hyper-V instance. }; controller.AddGlobalStep("prepare hyper-v", () => PrepareHyperV()); controller.AddStep("virtual machines", (node, stepDelay) => ProvisionVM(node)); controller.AddGlobalStep(string.Empty, () => Finish(), quiet: true); if (!controller.Run()) { Console.Error.WriteLine("*** ERROR: One or more configuration steps failed."); return(false); } return(true); }
/// <summary> /// Inspects the node to determine physical machine capabilities like /// processor count, RAM, and primary disk capacity and then sets the /// corresponding node labels. /// </summary> /// <param name="node">The target node.</param> private void SetLabels(SshProxy <NodeDefinition> node) { CommandResponse result; // Download [/proc/meminfo] and extract the [MemTotal] value (in kB). result = node.SudoCommand("cat /proc/meminfo"); if (result.ExitCode == 0) { var memInfo = result.OutputText; var memTotalRegex = new Regex(@"^MemTotal:\s*(?<size>\d+)\s*kB", RegexOptions.Multiline); var memMatch = memTotalRegex.Match(memInfo); if (memMatch.Success && long.TryParse(memMatch.Groups["size"].Value, out var memSizeKiB)) { // Note that the RAM reported by Linux is somewhat less than the // physical RAM installed. node.Metadata.Labels.ComputeRam = (int)(memSizeKiB / 1024); // Convert KiB --> MiB } } // Download [/proc/cpuinfo] and count the number of processors. result = node.SudoCommand("cat /proc/cpuinfo"); if (result.ExitCode == 0) { var cpuInfo = result.OutputText; var processorRegex = new Regex(@"^processor\s*:\s*\d+", RegexOptions.Multiline); var processorMatches = processorRegex.Matches(cpuInfo); node.Metadata.Labels.ComputeCores = processorMatches.Count; } // Determine the primary disk size. // $hack(jeff.lill): // // I'm not entirely sure how to determine which block device is hosting // the primary file system for all systems. For now, I'm just going to // assume that this can be one of: // // /dev/sda1 // /dev/sda // /dev/xvda1 // /dev/xvda // // I'll try each of these in order and setting the label for the // first reasonable result we get back. var blockDevices = new string[] { "/dev/sda1", "/dev/sda", "/dev/xvda1", "/dev/xvda" }; foreach (var blockDevice in blockDevices) { result = node.SudoCommand($"lsblk -b --output SIZE -n -d {blockDevice}", RunOptions.LogOutput); if (result.ExitCode == 0) { if (long.TryParse(result.OutputText.Trim(), out var deviceSize) && deviceSize > 0) { node.Metadata.Labels.StorageSize = ByteUnits.ToGiString(deviceSize); break; } } } }
public decimal ToUnit(ByteUnits unit) { if (unit==_unit) return (decimal)_value; decimal ret = 0; switch (_unit) { case ByteUnits.Byte: switch (unit) { case ByteUnits.KiloByte: ret = (decimal)_value / (decimal)Constants.KB_BYTE_COUNT; break; case ByteUnits.MegaByte: ret = (decimal)_value / (decimal)Constants.MB_BYTE_COUNT; break; case ByteUnits.GigaByte: ret = (decimal)_value / (decimal)Constants.GB_BYTE_COUNT; break; case ByteUnits.TeraByte: ret = (decimal)_value / (decimal)Constants.TB_BYTE_COUNT; break; } break; case ByteUnits.KiloByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.KB_BYTE_COUNT; break; case ByteUnits.MegaByte: ret = (decimal)_value / ((decimal)Constants.MB_BYTE_COUNT/(decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.GigaByte: ret = (decimal)_value / ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.TeraByte: ret = (decimal)_value / ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; } break; case ByteUnits.MegaByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.MB_BYTE_COUNT; break; case ByteUnits.KiloByte: ret = (decimal)_value * ((decimal)Constants.MB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.GigaByte: ret = (decimal)_value / ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; case ByteUnits.TeraByte: ret = (decimal)_value / ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; } break; case ByteUnits.GigaByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.GB_BYTE_COUNT; break; case ByteUnits.KiloByte: ret = (decimal)_value * ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.MegaByte: ret = (decimal)_value * ((decimal)Constants.GB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; case ByteUnits.TeraByte: ret = (decimal)_value / ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.GB_BYTE_COUNT); break; } break; case ByteUnits.TeraByte: switch (unit) { case ByteUnits.Byte: ret = (decimal)_value * (decimal)Constants.TB_BYTE_COUNT; break; case ByteUnits.KiloByte: ret = (decimal)_value * ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.KB_BYTE_COUNT); break; case ByteUnits.MegaByte: ret = (decimal)_value * ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.MB_BYTE_COUNT); break; case ByteUnits.GigaByte: ret = (decimal)_value * ((decimal)Constants.TB_BYTE_COUNT / (decimal)Constants.GB_BYTE_COUNT); break; } break; } return ret; }