public ResourceManager Add(string name, int maxResourcesPerFrame = 10) { if (Groups.Exists(r => r.Name == name)) throw new ArgumentException(string.Format("resource group '{0}' already exists", name)); var group = new ResourceGroup { Name = name, ResourceManager = new ResourceManager(FileSystem), MaxResourcesPerFrame = maxResourcesPerFrame }; Groups.Add(group); return group.ResourceManager; }
public async Task TestSetUp() { var client = GetArmClient(); _resourceGroup = await client.GetResourceGroup(_resourceGroupIdentifier).GetAsync(); }
public async Task SetGetNetworkRuleSets() { //create namespace _resourceGroup = await CreateResourceGroupAsync(); EventHubNamespaceCollection namespaceCollection = _resourceGroup.GetEventHubNamespaces(); string namespaceName = await CreateValidNamespaceName("testnamespacemgmt"); EventHubNamespace eventHubNamespace = (await namespaceCollection.CreateOrUpdateAsync(true, namespaceName, new EventHubNamespaceData(DefaultLocation))).Value; //prepare vnet string vnetName = Recording.GenerateAssetName("sdktestvnet"); var parameters = new VirtualNetworkData { AddressSpace = new AddressSpace { AddressPrefixes = { "10.0.0.0/16" } }, Subnets = { new SubnetData { Name = "default1", AddressPrefix = "10.0.0.0/24", ServiceEndpoints ={ new ServiceEndpointPropertiesFormat { Service = "Microsoft.EventHub" } } }, new SubnetData { Name = "default2", AddressPrefix = "10.0.1.0/24", ServiceEndpoints ={ new ServiceEndpointPropertiesFormat { Service = "Microsoft.EventHub" } } }, new SubnetData { Name = "default3", AddressPrefix = "10.0.2.0/24", ServiceEndpoints ={ new ServiceEndpointPropertiesFormat { Service = "Microsoft.EventHub" } } } }, Location = "eastus2" }; VirtualNetwork virtualNetwork = (await _resourceGroup.GetVirtualNetworks().CreateOrUpdateAsync(true, vnetName, parameters)).Value; //set network rule set string subscriptionId = DefaultSubscription.Id.ToString(); ResourceIdentifier subnetId1 = new ResourceIdentifier(subscriptionId + "/resourcegroups/" + _resourceGroup.Id.Name + "/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/subnets/default1"); ResourceIdentifier subnetId2 = new ResourceIdentifier(subscriptionId + "/resourcegroups/" + _resourceGroup.Id.Name + "/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/subnets/default2"); ResourceIdentifier subnetId3 = new ResourceIdentifier(subscriptionId + "/resourcegroups/" + _resourceGroup.Id.Name + "/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/subnets/default3"); NetworkRuleSetData parameter = new NetworkRuleSetData() { DefaultAction = DefaultAction.Deny, VirtualNetworkRules = { new NetworkRuleSetVirtualNetworkRules() { Subnet = new WritableSubResource() { Id = subnetId1 } }, new NetworkRuleSetVirtualNetworkRules() { Subnet = new WritableSubResource() { Id = subnetId2 } }, new NetworkRuleSetVirtualNetworkRules() { Subnet = new WritableSubResource() { Id = subnetId3 } } }, IpRules = { new NetworkRuleSetIpRules() { IpMask = "1.1.1.1", Action = "Allow" }, new NetworkRuleSetIpRules() { IpMask = "1.1.1.2", Action = "Allow" }, new NetworkRuleSetIpRules() { IpMask = "1.1.1.3", Action = "Allow" }, new NetworkRuleSetIpRules() { IpMask = "1.1.1.4", Action = "Allow" }, new NetworkRuleSetIpRules() { IpMask = "1.1.1.5", Action = "Allow" } } }; await eventHubNamespace.GetNetworkRuleSet().CreateOrUpdateAsync(true, parameter); //get the network rule set NetworkRuleSet networkRuleSet = await eventHubNamespace.GetNetworkRuleSet().GetAsync(); Assert.NotNull(networkRuleSet); Assert.NotNull(networkRuleSet.Data.IpRules); Assert.NotNull(networkRuleSet.Data.VirtualNetworkRules); Assert.AreEqual(networkRuleSet.Data.VirtualNetworkRules.Count, 3); Assert.AreEqual(networkRuleSet.Data.IpRules.Count, 5); //delete virtual network await virtualNetwork.DeleteAsync(true); }
public AksStack() { var config = new Pulumi.Config(); var kubernetesVersion = config.Get("kubernetesVersion") ?? "1.16.9"; var resourceGroup = new ResourceGroup("aks-rg"); var password = new RandomPassword("password", new RandomPasswordArgs { Length = 20, Special = true, }).Result; var sshPublicKey = new PrivateKey("ssh-key", new PrivateKeyArgs { Algorithm = "RSA", RsaBits = 4096, }).PublicKeyOpenssh; // Create the AD service principal for the K8s cluster. var adApp = new Application("aks"); var adSp = new ServicePrincipal("aksSp", new ServicePrincipalArgs { ApplicationId = adApp.ApplicationId }); var adSpPassword = new ServicePrincipalPassword("aksSpPassword", new ServicePrincipalPasswordArgs { ServicePrincipalId = adSp.Id, Value = password, EndDate = "2099-01-01T00:00:00Z", }); // Grant networking permissions to the SP (needed e.g. to provision Load Balancers) var assignment = new Assignment("role-assignment", new AssignmentArgs { PrincipalId = adSp.Id, Scope = resourceGroup.Id, RoleDefinitionName = "Network Contributor" }); // Create a Virtual Network for the cluster var vnet = new VirtualNetwork("vnet", new VirtualNetworkArgs { ResourceGroupName = resourceGroup.Name, AddressSpaces = { "10.2.0.0/16" }, }); // Create a Subnet for the cluster var subnet = new Subnet("subnet", new SubnetArgs { ResourceGroupName = resourceGroup.Name, VirtualNetworkName = vnet.Name, AddressPrefix = "10.2.1.0/24", }); // Now allocate an AKS cluster. var cluster = new KubernetesCluster("aksCluster", new KubernetesClusterArgs { ResourceGroupName = resourceGroup.Name, DefaultNodePool = new KubernetesClusterDefaultNodePoolArgs { Name = "aksagentpool", NodeCount = 3, VmSize = "Standard_B2s", OsDiskSizeGb = 30, VnetSubnetId = subnet.Id, }, DnsPrefix = "sampleaks", LinuxProfile = new KubernetesClusterLinuxProfileArgs { AdminUsername = "******", SshKey = new KubernetesClusterLinuxProfileSshKeyArgs { KeyData = sshPublicKey, }, }, ServicePrincipal = new KubernetesClusterServicePrincipalArgs { ClientId = adApp.ApplicationId, ClientSecret = adSpPassword.Value, }, KubernetesVersion = kubernetesVersion, RoleBasedAccessControl = new KubernetesClusterRoleBasedAccessControlArgs { Enabled = true }, NetworkProfile = new KubernetesClusterNetworkProfileArgs { NetworkPlugin = "azure", DnsServiceIp = "10.2.2.254", ServiceCidr = "10.2.2.0/24", DockerBridgeCidr = "172.17.0.1/16", }, }); this.KubeConfig = cluster.KubeConfigRaw; }
/// <summary> /// Resource groups can be updated through a simple PATCH operation to a group /// address. The format of the request is the same as that for creating a /// resource groups, though if a field is unspecified current value will be /// carried over. /// </summary> /// <param name='operations'> /// The operations group for this extension method. /// </param> /// <param name='resourceGroupName'> /// The name of the resource group to be created or updated. The name is case /// insensitive. /// </param> /// <param name='parameters'> /// Parameters supplied to the update state resource group service operation. /// </param> public static ResourceGroup Patch(this IResourceGroupsOperations operations, string resourceGroupName, ResourceGroup parameters) { return(Task.Factory.StartNew(s => ((IResourceGroupsOperations)s).PatchAsync(resourceGroupName, parameters), operations, CancellationToken.None, TaskCreationOptions.None, TaskScheduler.Default).Unwrap().GetAwaiter().GetResult()); }
public override void OnRebuild(VoidPtr address, int length, bool force) { int count = Children.Count; CLR0Material* pMat = (CLR0Material*)(address + (_version == 4 ? CLR0v4.Size : CLR0v3.Size) + 0x18 + (count * 0x10)); int offset = Children.Count * 8; foreach (CLR0MaterialNode n in Children) offset += n.Children.Count * 8; ABGRPixel* pData = (ABGRPixel*)((VoidPtr)pMat + offset); ResourceGroup* group; if (_version == 4) { CLR0v4* header = (CLR0v4*)address; *header = new CLR0v4(length, _numFrames, count, _loop); group = header->Group; } else { CLR0v3* header = (CLR0v3*)address; *header = new CLR0v3(length, _numFrames, count, _loop); group = header->Group; } *group = new ResourceGroup(count); ResourceEntry* entry = group->First; foreach (CLR0MaterialNode n in Children) { (entry++)->_dataOffset = (int)pMat - (int)group; uint newFlags = 0; CLR0MaterialEntry* pMatEntry = (CLR0MaterialEntry*)((VoidPtr)pMat + 8); foreach (CLR0MaterialEntryNode e in n.Children) { newFlags |= ((uint)((1 + (e._constant ? 2 : 0)) & 3) << ((int)e._target * 2)); if (e._numEntries == 0) *pMatEntry = new CLR0MaterialEntry((ABGRPixel)e._colorMask, (ABGRPixel)e._solidColor); else { *pMatEntry = new CLR0MaterialEntry((ABGRPixel)e._colorMask, (int)pData - (int)((VoidPtr)pMatEntry + 4)); foreach (ARGBPixel p in e._colors) *pData++ = (ABGRPixel)p; } pMatEntry++; e._changed = false; } pMat->_flags = newFlags; pMat = (CLR0Material*)pMatEntry; n._changed = false; } if (_userEntries.Count > 0 && _version == 4) { CLR0v4* header = (CLR0v4*)address; header->UserData = pData; _userEntries.Write(pData); } }
private Control getTopTabBlock( ResourceGroup resourceGroup ) { return new Block( new ControlLine( getTabControlsForResources( resourceGroup, false ).ToArray() ) { CssClass = CssElementCreator.TopTabCssClass, VerticalAlignment = TableCellVerticalAlignment.Bottom } ) { CssClass = CssElementCreator.TopTabCssClass }; }
public void Write(VoidPtr userDataAddr) { if (Count == 0 || userDataAddr == null) return; UserData* data = (UserData*)userDataAddr; ResourceGroup* pGroup = data->Group; ResourceEntry* pEntry = &pGroup->_first + 1; *pGroup = new ResourceGroup(Count); byte* pData = (byte*)pGroup + pGroup->_totalSize; int id = 0; foreach (UserDataClass s in this) { (pEntry++)->_dataOffset = (int)pData - (int)pGroup; UserDataEntry* p = (UserDataEntry*)pData; *p = new UserDataEntry(s.DataType != UserValueType.String ? s._entries.Count : (s._entries.Count > 0 ? 1 : 0), s._type, id++); pData += 0x18; if (s.DataType != UserValueType.String) for (int i = 0; i < s._entries.Count; i++) if (s.DataType == UserValueType.Float) { float x; if (!float.TryParse(s._entries[i], out x)) x = 0; *(bfloat*)pData = x; pData += 4; } else if (s.DataType == UserValueType.Int) { int x; if (!int.TryParse(s._entries[i], out x)) x = 0; *(bint*)pData = x; pData += 4; } p->_totalLen = (int)pData - (int)p; } data->_totalLen = (int)pData - (int)userDataAddr; }
public static CloudServiceCollection GetCloudServices(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetCloudServices()); }
public static GalleryCollection GetGalleries(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetGalleries()); }
public static SnapshotCollection GetSnapshots(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetSnapshots()); }
public void RaUserPermissions() { User newUser = null; ResourceGroup resourceGroup = null; string roleAssignmentId = "A807281A-2F74-44B9-B862-C0D3683ADCC9"; string userName = null; string userPass = null; string userPermission = "*/read"; string roleDefinitionName = "Reader"; string newUserObjectId = null; var controllerAdmin = ResourcesController.NewInstance; try { // Generate new user under admin account controllerAdmin.RunPsTestWorkflow( // scriptBuilder () => { userName = TestUtilities.GenerateName("aduser"); userPass = TestUtilities.GenerateName("adpass") + "0#$"; var upn = userName + "@" + controllerAdmin.UserDomain; var parameter = new UserCreateParameters { UserPrincipalName = upn, DisplayName = userName, AccountEnabled = true, MailNickname = userName + "test", PasswordProfile = new PasswordProfile { ForceChangePasswordNextLogin = false, Password = userPass } }; newUser = controllerAdmin.GraphClient.Users.Create(parameter); newUserObjectId = newUser.ObjectId; resourceGroup = controllerAdmin.ResourceManagementClient.ResourceGroups .List() .First(); // Wait to allow newly created object changes to propagate TestMockSupport.Delay(20000); return(new[] { string.Format( "CreateRoleAssignment '{0}' '{1}' '{2}' '{3}'", roleAssignmentId, newUserObjectId, roleDefinitionName, resourceGroup.Name) }); }, // cleanup null, MethodBase.GetCurrentMethod().ReflectedType?.ToString(), MethodBase.GetCurrentMethod().Name + "_Setup"); // login as different user and run the test var controllerUser = ResourcesController.NewInstance; controllerUser.RunPsTestWorkflow( // scriptBuilder () => { // Wait to allow for the role assignment to propagate TestMockSupport.Delay(20000); return(new[] { string.Format( "Test-RaUserPermissions '{0}' '{1}'", resourceGroup.Name, userPermission) }); }, // cleanup null, MethodBase.GetCurrentMethod().ReflectedType?.ToString(), MethodBase.GetCurrentMethod().Name + "_Test"); } finally { // remove created user and assignment controllerAdmin = ResourcesController.NewInstance; controllerAdmin.RunPsTestWorkflow( // scriptBuilder null, // initialize null, // cleanup () => { if (newUser != null) { controllerAdmin.GraphClient.Users.Delete(newUser.ObjectId); } if (resourceGroup != null) { controllerAdmin.AuthorizationManagementClient.RoleAssignments.Delete(resourceGroup.Id, roleAssignmentId).ToString(); } }, MethodBase.GetCurrentMethod().ReflectedType?.ToString(), MethodBase.GetCurrentMethod().Name + "_Cleanup"); } }
public void resourceRemoved(ResourceGroup group, Engine.Resources.Resource resource) { materialParser.resourceRemoved(group, resource); ogreResourceManager.removeResourceLocation(resource.LocName, group.FullName); }
private void PostProcessBone(VoidPtr mdlAddress, MDL0EntryNode node, ResourceGroup* group, ref int index, StringTable stringTable) { VoidPtr dataAddress = (VoidPtr)group + (&group->_first)[index]._dataOffset; ResourceEntry.Build(group, index++, dataAddress, (BRESString*)stringTable[node.Name]); node.PostProcess(mdlAddress, dataAddress, stringTable); foreach (MDL0EntryNode n in node.Children) PostProcessBone(mdlAddress, n, group, ref index, stringTable); }
//Extracts resources from a group, using the specified type private void ExtractGroup(ResourceGroup* pGroup, Type t) { //If using shaders, cache results instead of unique entries //This is because shaders can appear multiple times, but with different names bool useCache = t == typeof(MDL0ShaderNode); MDL0CommonHeader* pHeader; ResourceNode node; int* offsetCache = stackalloc int[128]; int offsetCount = 0, offset, x; foreach (ResourcePair p in *pGroup) { //Get data offset offset = (int)p.Data; if (useCache) { //search for entry within offset cache for (x = 0; (x < offsetCount) && (offsetCache[x] != offset); x++); //If found, skip to next entry if (x < offsetCount) continue; //Otherwise, store offset offsetCache[offsetCount++] = offset; } //Create resource instance pHeader = (MDL0CommonHeader*)p.Data; node = Activator.CreateInstance(t) as ResourceNode; //Initialize node.Initialize(this, pHeader, pHeader->_size); //Set the name of the node. This is necessary for defs. //Make sure we're not naming the shaders, //or it will name it the name of the first material it's linked to. if (t != typeof(MDL0ShaderNode)) node._name = (string)p.Name; } }
static async Task CreateOrUpdateResourceGroup(IServicePrincipal serverApplication) { Console.WriteLine("Add or update Key Vault Resource Group (required once)? [y/N] to continue: "); var key = Console.ReadLine() .ToUpperInvariant() .Trim(); if (key != "Y") { return; } Console.Write("SubscriptionId: "); var subscriptionId = Console.ReadLine(); Console.Write("Resource Group Name (blank for default 'SignService-KeyVaults'): "); var name = Console.ReadLine(); if (string.IsNullOrWhiteSpace(name)) { name = "SignService-KeyVaults"; } Console.WriteLine("Location (eastus, westus, etc): "); var location = Console.ReadLine(); var accessToken = await authContext.AcquireTokenSilentAsync(azureRmResourceId, clientId); var rgc = new ResourceManagementClient(new TokenCredentials(accessToken.AccessToken)); rgc.SubscriptionId = subscriptionId; var rg = new ResourceGroup(location, name: name); rg = await rgc.ResourceGroups.CreateOrUpdateAsync(name, rg); var ac = new AuthorizationManagementClient(new TokenCredentials(accessToken.AccessToken)); ac.SubscriptionId = subscriptionId; // See if the resource group has the reader role // Get the reader role var roleFilter = new ODataQuery <RoleDefinitionFilter>(f => f.RoleName == "Reader"); var roleDefinitions = await ac.RoleDefinitions.ListAsync(rg.Id, roleFilter); var roleDefinition = roleDefinitions.First(); var roleId = roleDefinition.Id; var spid = serverApplication.ObjectId; var raps = await ac.RoleAssignments.ListForScopeAsync(rg.Id, new ODataQuery <RoleAssignmentFilter>(f => f.PrincipalId == spid)); if (raps.All(ra => ra.Properties.RoleDefinitionId != roleId)) { // none found, add one var rap = new RoleAssignmentProperties { PrincipalId = spid, RoleDefinitionId = roleId }; var ra = await ac.RoleAssignments.CreateAsync(rg.Id, Guid.NewGuid().ToString(), rap); } }
private static void WriteDefs(ModelLinker linker, ref byte* pGroup, ref byte* pData) { MDL0Node mdl = linker.Model; //This should never happen! if (!mdl._hasMix && !mdl._hasOpa && !mdl._hasTree && !mdl._hasXlu) return; ResourceNode[] polyList = null; if (mdl._objList != null) { polyList = new ResourceNode[mdl._objList.Count]; Array.Copy(mdl._objList.ToArray(), polyList, mdl._objList.Count); } MDL0ObjectNode poly; int entryCount = 0; byte* floor = pData; int dataLen; ResourceGroup* group = linker.Defs = (ResourceGroup*)pGroup; ResourceEntry* entry = &group->_first + 1; //NodeTree if (mdl._hasTree) { //Write group entry entry[entryCount++]._dataOffset = (int)(pData - pGroup); int bCount = linker.BoneCache.Length; for (int i = 0; i < bCount; i++) { MDL0BoneNode bone = linker.BoneCache[i] as MDL0BoneNode; *pData = 2; //Entry tag *(bushort*)(pData + 1) = (ushort)bone._entryIndex; *(bushort*)(pData + 3) = (ushort)(bone._parent is MDL0BoneNode ? ((MDL0BoneNode)bone._parent)._nodeIndex : 0); pData += 5; //Advance } *pData++ = 1; //Terminate } //NodeMix //Only weight references go here. //First list bones used by weight groups, in bone order //Then list weight groups that use bones. Ordered by entry count. if (mdl._hasMix) { //Write group entry entry[entryCount++]._dataOffset = (int)(pData - pGroup); //Add bones first (using flat bone list) foreach (MDL0BoneNode b in linker.BoneCache) if (b._weightCount > 0) { *pData = 5; //Tag *(bushort*)(pData + 1) = (ushort)b._nodeIndex; *(bushort*)(pData + 3) = (ushort)b._entryIndex; pData += 5; //Advance } //Add weight groups (using sorted influence list) foreach (Influence i in mdl._influences._influences) { *pData = 3; //Tag *(bushort*)&pData[1] = (ushort)i._index; int g = 0; foreach (BoneWeight w in i._weights) if (w.Bone != null && w.Weight != 0 && w.Bone._nodeIndex < linker.NodeCache.Length && w.Bone._nodeIndex >= 0 && linker.NodeCache[w.Bone._nodeIndex] is MDL0BoneNode) g++; pData[3] = (byte)g; pData += 4; //Advance foreach (BoneWeight w in i._weights) { if (w.Bone == null || w.Weight == 0 || w.Bone._nodeIndex >= linker.NodeCache.Length || w.Bone._nodeIndex < 0) continue; *(bushort*)pData = (ushort)w.Bone._nodeIndex; *(bfloat*)(pData + 2) = w.Weight; pData += 6; //Advance } } *pData++ = 1; //Terminate } //DrawOpa if (mdl._hasOpa && polyList != null) { Array.Sort(polyList, MDL0ObjectNode.DrawCompareOpa); //Write group entry entry[entryCount++]._dataOffset = (int)(pData - pGroup); for (int i = 0; i < polyList.Length; i++) { poly = polyList[i] as MDL0ObjectNode; if (poly.OpaMaterialNode != null) { *pData = 4; //Tag *(bushort*)(pData + 1) = (ushort)poly.OpaMaterialNode._entryIndex; *(bushort*)(pData + 3) = (ushort)poly._entryIndex; *(bushort*)(pData + 5) = (ushort)(poly.BoneNode != null ? poly.BoneNode.BoneIndex : 0); pData[7] = poly.DrawPriority; pData += 8; //Advance } } *pData++ = 1; //Terminate } //DrawXlu if (mdl._hasXlu && polyList != null) { Array.Sort(polyList, MDL0ObjectNode.DrawCompareXlu); //Write group entry entry[entryCount++]._dataOffset = (int)(pData - pGroup); for (int i = 0; i < polyList.Length; i++) { poly = polyList[i] as MDL0ObjectNode; if (poly.XluMaterialNode != null) { *pData = 4; //Tag *(bushort*)(pData + 1) = (ushort)poly.XluMaterialNode._entryIndex; *(bushort*)(pData + 3) = (ushort)poly._entryIndex; *(bushort*)(pData + 5) = (ushort)(poly.BoneNode != null ? poly.BoneNode.BoneIndex : 0); pData[7] = poly.DrawPriority; pData += 8; //Advance } } *pData++ = 1; //Terminate } //Align data dataLen = (int)(pData - floor); while ((dataLen++ & 3) != 0) *pData++ = 0; //Set header *group = new ResourceGroup(entryCount); //Advance group poiner pGroup += group->_totalSize; }
public static AvailabilitySetCollection GetAvailabilitySets(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetAvailabilitySets()); }
public override void OnRebuild(VoidPtr address, int length, bool force) { int count = Children.Count; ResourceGroup* group; if (_version == 4) { VIS0v4* header = (VIS0v4*)address; *header = new VIS0v4(length, (ushort)(_numFrames - ConversionBias), (ushort)count, _loop); group = header->Group; } else { VIS0v3* header = (VIS0v3*)address; *header = new VIS0v3(length, (ushort)(_numFrames - ConversionBias), (ushort)count, _loop); group = header->Group; } *group = new ResourceGroup(count); ResourceEntry* entry = group->First; VoidPtr dataAddress = group->EndAddress; foreach (ResourceNode n in Children) { (entry++)->_dataOffset = (int)dataAddress - (int)group; int len = n._calcSize; n.Rebuild(dataAddress, len, force); dataAddress += len; } if (_userEntries.Count > 0 && _version == 4) { VIS0v4* header = (VIS0v4*)address; header->UserData = dataAddress; _userEntries.Write(dataAddress); } }
public static ProximityPlacementGroupCollection GetProximityPlacementGroups(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetProximityPlacementGroups()); }
public ROOTHeader(int size, int numEntries) { _tag = Tag; _size = size; _master = new ResourceGroup(numEntries); }
public static DedicatedHostGroupCollection GetDedicatedHostGroups(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetDedicatedHostGroups()); }
public void initialize() { SnowCrashCLR.parse(source.API.Replace("\r", string.Empty), out _blueprint, out _result); _resourceGroup = _blueprint.GetResourceGroupsCs().FirstOrDefault(); }
public static SshPublicKeyCollection GetSshPublicKeys(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetSshPublicKeys()); }
/// <summary> /// Resource groups can be updated through a simple PATCH operation to a group /// address. The format of the request is the same as that for creating a /// resource groups, though if a field is unspecified current value will be /// carried over. /// </summary> /// <param name='operations'> /// The operations group for this extension method. /// </param> /// <param name='resourceGroupName'> /// The name of the resource group to be created or updated. The name is case /// insensitive. /// </param> /// <param name='parameters'> /// Parameters supplied to the update state resource group service operation. /// </param> /// <param name='cancellationToken'> /// The cancellation token. /// </param> public static async Task <ResourceGroup> PatchAsync(this IResourceGroupsOperations operations, string resourceGroupName, ResourceGroup parameters, CancellationToken cancellationToken = default(CancellationToken)) { using (var _result = await operations.PatchWithHttpMessagesAsync(resourceGroupName, parameters, null, cancellationToken).ConfigureAwait(false)) { return(_result.Body); } }
public static VirtualMachineScaleSetCollection GetVirtualMachineScaleSets(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetVirtualMachineScaleSets()); }
public async Task NamespaceCreateGetUpdateDeleteAuthorizationRule() { //create namespace _resourceGroup = await CreateResourceGroupAsync(); EventHubNamespaceCollection namespaceCollection = _resourceGroup.GetEventHubNamespaces(); string namespaceName = await CreateValidNamespaceName("testnamespacemgmt"); EventHubNamespace eventHubNamespace = (await namespaceCollection.CreateOrUpdateAsync(true, namespaceName, new EventHubNamespaceData(DefaultLocation))).Value; NamespaceAuthorizationRuleCollection ruleCollection = eventHubNamespace.GetNamespaceAuthorizationRules(); //create authorization rule string ruleName = Recording.GenerateAssetName("authorizationrule"); AuthorizationRuleData parameter = new AuthorizationRuleData() { Rights = { AccessRights.Listen, AccessRights.Send } }; NamespaceAuthorizationRule authorizationRule = (await ruleCollection.CreateOrUpdateAsync(true, ruleName, parameter)).Value; Assert.NotNull(authorizationRule); Assert.AreEqual(authorizationRule.Data.Rights.Count, parameter.Rights.Count); //get authorization rule authorizationRule = await ruleCollection.GetAsync(ruleName); Assert.AreEqual(authorizationRule.Id.Name, ruleName); Assert.NotNull(authorizationRule); Assert.AreEqual(authorizationRule.Data.Rights.Count, parameter.Rights.Count); //get all authorization rules List <NamespaceAuthorizationRule> rules = await ruleCollection.GetAllAsync().ToEnumerableAsync(); //there should be two authorization rules Assert.True(rules.Count > 1); bool isContainAuthorizationRuleName = false; bool isContainDefaultRuleName = false; foreach (NamespaceAuthorizationRule rule in rules) { if (rule.Id.Name == ruleName) { isContainAuthorizationRuleName = true; } if (rule.Id.Name == DefaultNamespaceAuthorizationRule) { isContainDefaultRuleName = true; } } Assert.True(isContainDefaultRuleName); Assert.True(isContainAuthorizationRuleName); //update authorization rule parameter.Rights.Add(AccessRights.Manage); authorizationRule = (await ruleCollection.CreateOrUpdateAsync(true, ruleName, parameter)).Value; Assert.NotNull(authorizationRule); Assert.AreEqual(authorizationRule.Data.Rights.Count, parameter.Rights.Count); //delete authorization rule await authorizationRule.DeleteAsync(true); //validate if deleted Assert.IsFalse(await ruleCollection.ExistsAsync(ruleName)); rules = await ruleCollection.GetAllAsync().ToEnumerableAsync(); Assert.True(rules.Count == 1); Assert.AreEqual(rules[0].Id.Name, DefaultNamespaceAuthorizationRule); }
public static ImageCollection GetImages(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetImages()); }
protected static async Task <int> GetResourceCountAsync(GenericResourceContainer genericResources, ResourceGroup rg = default) { int result = 0; var pageable = rg == null?genericResources.GetAllAsync() : genericResources.GetByResourceGroupAsync(rg.Id.Name); await foreach (var resource in pageable) { result++; } return(result); }
public static RestorePointGroupCollection GetRestorePointGroups(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetRestorePointGroups()); }
public async Task NetworkRuleSetCreateGetUpdateDelete() { var location = await GetLocation(); var resourceGroupName = Recording.GenerateAssetName(Helper.ResourceGroupPrefix); ResourceGroup resourceGroup = await ArmClient.DefaultSubscription.GetResourceGroups().CreateOrUpdate(resourceGroupName, new ResourceGroupData(location)).WaitForCompletionAsync(); // Prepare VNet var vnetName = Recording.GenerateAssetName("sdktestvnet"); var parameters = new VirtualNetworkData { AddressSpace = new AddressSpace { AddressPrefixes = { "10.0.0.0/16" } }, Subnets = { new SubnetData { Name = "default1", AddressPrefix = "10.0.0.0/24", ServiceEndpoints ={ new ServiceEndpointPropertiesFormat { Service = "Microsoft.EventHub" } } }, new SubnetData { Name = "default2", AddressPrefix = "10.0.1.0/24", ServiceEndpoints ={ new ServiceEndpointPropertiesFormat { Service = "Microsoft.EventHub" } } }, new SubnetData { Name = "default3", AddressPrefix = "10.0.2.0/24", ServiceEndpoints ={ new ServiceEndpointPropertiesFormat { Service = "Microsoft.EventHub" } } } }, Location = "eastus2" }; await WaitForCompletionAsync(await resourceGroup.GetVirtualNetworks().CreateOrUpdateAsync(vnetName, parameters)); // Create a namespace var namespaceName = Recording.GenerateAssetName(Helper.NamespacePrefix); var createNamespaceResponse = await NamespacesOperations.StartCreateOrUpdateAsync(resourceGroupName, namespaceName, new EHNamespace() { Location = location, Tags = { { "tag1", "value1" }, { "tag2", "value2" } } } ); var np = (await WaitForCompletionAsync(createNamespaceResponse)).Value; Assert.NotNull(createNamespaceResponse); Assert.AreEqual(np.Name, namespaceName); DelayInTest(5); //get the created namespace var getNamespaceResponse = await NamespacesOperations.GetAsync(resourceGroupName, namespaceName); if (string.Compare(getNamespaceResponse.Value.ProvisioningState, "Succeeded", true) != 0) { DelayInTest(5); } getNamespaceResponse = await NamespacesOperations.GetAsync(resourceGroupName, namespaceName); Assert.NotNull(getNamespaceResponse); Assert.AreEqual("Succeeded", getNamespaceResponse.Value.ProvisioningState, StringComparer.CurrentCultureIgnoreCase.ToString()); Assert.AreEqual(location, getNamespaceResponse.Value.Location, StringComparer.CurrentCultureIgnoreCase.ToString()); var netWorkRuleSet = await NamespacesOperations.CreateOrUpdateNetworkRuleSetAsync(resourceGroupName, namespaceName, new NetworkRuleSet() { DefaultAction = DefaultAction.Deny, VirtualNetworkRules = { new NWRuleSetVirtualNetworkRules() { Subnet = new ResourceManager.EventHubs.Models.Subnet("/subscriptions/" + SubscriptionId + "/resourcegroups/" + resourceGroupName + "/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/subnets/default1") }, new NWRuleSetVirtualNetworkRules() { Subnet = new ResourceManager.EventHubs.Models.Subnet("/subscriptions/" + SubscriptionId + "/resourcegroups/" + resourceGroupName + "/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/subnets/default2") }, new NWRuleSetVirtualNetworkRules() { Subnet = new ResourceManager.EventHubs.Models.Subnet("/subscriptions/" + SubscriptionId + "/resourcegroups/" + resourceGroupName + "/providers/Microsoft.Network/virtualNetworks/" + vnetName + "/subnets/default3") } }, IpRules = { new NWRuleSetIpRules() { IpMask = "1.1.1.1", Action = "Allow" }, new NWRuleSetIpRules() { IpMask = "1.1.1.2", Action = "Allow" }, new NWRuleSetIpRules() { IpMask = "1.1.1.3", Action = "Allow" }, new NWRuleSetIpRules() { IpMask = "1.1.1.4", Action = "Allow" }, new NWRuleSetIpRules() { IpMask = "1.1.1.5", Action = "Allow" } } }); var getNetworkRuleSet = await NamespacesOperations.GetNetworkRuleSetAsync(resourceGroupName, namespaceName); var netWorkRuleSet1 = await NamespacesOperations.CreateOrUpdateNetworkRuleSetAsync(resourceGroupName, namespaceName, new NetworkRuleSet() { DefaultAction = "Allow" }); var getNetworkRuleSet1 = await NamespacesOperations.GetNetworkRuleSetAsync(resourceGroupName, namespaceName); DelayInTest(60); //Delete namespace await WaitForCompletionAsync(await NamespacesOperations.StartDeleteAsync(resourceGroupName, namespaceName)); }
public static CapacityReservationGroupCollection GetCapacityReservationGroups(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetCapacityReservationGroups()); }
/// <inheritdoc/> public string ToDelimitedString() { CultureInfo culture = CultureInfo.CurrentCulture; return(string.Format( culture, StringHelper.StringFormatSequence(0, 15, Configuration.FieldSeparator), Id, SetIdAig.HasValue ? SetIdAig.Value.ToString(culture) : null, SegmentActionCode, ResourceId?.ToDelimitedString(), ResourceType?.ToDelimitedString(), ResourceGroup != null ? string.Join(Configuration.FieldRepeatSeparator, ResourceGroup.Select(x => x.ToDelimitedString())) : null, ResourceQuantity.HasValue ? ResourceQuantity.Value.ToString(Consts.NumericFormat, culture) : null, ResourceQuantityUnits?.ToDelimitedString(), StartDateTime.HasValue ? StartDateTime.Value.ToString(Consts.DateTimeFormatPrecisionSecond, culture) : null, StartDateTimeOffset.HasValue ? StartDateTimeOffset.Value.ToString(Consts.NumericFormat, culture) : null, StartDateTimeOffsetUnits?.ToDelimitedString(), Duration.HasValue ? Duration.Value.ToString(Consts.NumericFormat, culture) : null, DurationUnits?.ToDelimitedString(), AllowSubstitutionCode?.ToDelimitedString(), FillerStatusCode?.ToDelimitedString() ).TrimEnd(Configuration.FieldSeparator.ToCharArray())); }
public static DiskCollection GetDisks(this ResourceGroup resourceGroup) { return(GetExtensionClient(resourceGroup).GetDisks()); }
//Materials must already be written. Do this last! private static void WriteTextures(ModelLinker linker, ref byte* pGroup) { MDL0GroupNode texGrp = linker.Groups[(int)MDLResourceType.Textures]; MDL0GroupNode palGrp = linker.Groups[(int)MDLResourceType.Palettes]; if (texGrp == null) return; ResourceGroup* pTexGroup = null; ResourceEntry* pTexEntry = null; if (linker._texCount > 0) { linker.Textures = pTexGroup = (ResourceGroup*)pGroup; *pTexGroup = new ResourceGroup(linker._texCount); pTexEntry = &pTexGroup->_first + 1; pGroup += pTexGroup->_totalSize; } ResourceGroup* pDecGroup = null; ResourceEntry* pDecEntry = null; if (linker._palCount > 0) { linker.Palettes = pDecGroup = (ResourceGroup*)pGroup; *pDecGroup = new ResourceGroup(linker._palCount); pDecEntry = &pDecGroup->_first + 1; pGroup += pDecGroup->_totalSize; } bint* pData = (bint*)pGroup; int offset; //Textures List<ResourceNode> list = texGrp.Children; list.Sort(); //Alphabetical order if (pTexGroup != null) foreach (MDL0TextureNode t in list) if (t._references.Count > 0) { offset = (int)pData; (pTexEntry++)->_dataOffset = offset - (int)pTexGroup; *pData++ = t._references.Count; foreach (MDL0MaterialRefNode mat in t._references) { *pData++ = (int)mat.Material.WorkingUncompressed.Address - offset; *pData++ = (int)mat.WorkingUncompressed.Address - offset; } } //Palettes if (palGrp == null) return; list = palGrp.Children; list.Sort(); //Alphabetical order if (pDecGroup != null) foreach (MDL0TextureNode t in list) if (t._references.Count > 0) { offset = (int)pData; (pDecEntry++)->_dataOffset = offset - (int)pDecGroup; *pData++ = t._references.Count; foreach (MDL0MaterialRefNode mat in t._references) { *pData++ = (int)mat.Material.WorkingUncompressed.Address - offset; *pData++ = (int)mat.WorkingUncompressed.Address - offset; } } }
private void SignalRScenarioVerification(SignalRManagementClient signalrClient, ResourceGroup resourceGroup, SignalRResource signalr, bool isStandard, int capacity = 1) { // Validate the newly created SignalR instance SignalRTestUtilities.ValidateResourceDefaultTags(signalr); Assert.NotNull(signalr.Sku); if (isStandard) { Assert.Equal(SignalRSkuTier.Standard, signalr.Sku.Tier); Assert.Equal("Standard_S1", signalr.Sku.Name); Assert.Equal("S1", signalr.Sku.Size); Assert.Equal(capacity, signalr.Sku.Capacity); } else { Assert.Equal(SignalRSkuTier.Free, signalr.Sku.Tier); Assert.Equal("Free_F1", signalr.Sku.Name); Assert.Equal("F1", signalr.Sku.Size); Assert.Equal(capacity, signalr.Sku.Capacity); } Assert.Equal(ProvisioningState.Succeeded, signalr.ProvisioningState); Assert.NotEmpty(signalr.HostName); Assert.NotEmpty(signalr.ExternalIP); Assert.NotNull(signalr.PublicPort); Assert.NotNull(signalr.ServerPort); Assert.NotEmpty(signalr.Version); Assert.Equal(1, signalr.Features.Count); // ServiceMode will be set as Default Assert.Equal("Default", signalr.Features.First().Value); Assert.Equal(1, signalr.Cors.AllowedOrigins.Count); // all origins(*) are allowed by default. Assert.Equal("*", signalr.Cors.AllowedOrigins.First()); // List the SignalR instances by resource group var signalrByResourceGroup = signalrClient.SignalR.ListByResourceGroup(resourceGroup.Name); Assert.Single(signalrByResourceGroup); signalr = signalrByResourceGroup.FirstOrDefault(r => StringComparer.OrdinalIgnoreCase.Equals(r.Name, signalr.Name)); SignalRTestUtilities.ValidateResourceDefaultTags(signalr); // Get the SignalR instance by name signalr = signalrClient.SignalR.Get(resourceGroup.Name, signalr.Name); SignalRTestUtilities.ValidateResourceDefaultTags(signalr); // List keys var keys = signalrClient.SignalR.ListKeys(resourceGroup.Name, signalr.Name); Assert.NotNull(keys); Assert.NotEmpty(keys.PrimaryKey); Assert.NotEmpty(keys.PrimaryConnectionString); Assert.NotEmpty(keys.SecondaryKey); Assert.NotEmpty(keys.SecondaryConnectionString); // Update the SignalR instance capacity = isStandard ? 1 : 5; signalr = signalrClient.SignalR.Update(resourceGroup.Name, signalr.Name, new SignalRUpdateParameters { Tags = SignalRTestUtilities.DefaultNewTags, Sku = new ResourceSku { Name = isStandard ? "Free_F1" : "Standard_S1", Tier = isStandard ? "Free" : "Standard", Size = isStandard ? "F1" : "S1", Capacity = capacity, }, Properties = new SignalRCreateOrUpdateProperties { HostNamePrefix = TestUtilities.GenerateName("signalr-service-test"), Features = new List <SignalRFeature> { new SignalRFeature { Value = "Serverless" } }, Cors = new SignalRCorsSettings { AllowedOrigins = new List <string> { "http://example.com:12345", "https://contoso.com", } }, }, }); // Validate the updated SignalR instance SignalRTestUtilities.ValidateResourceDefaultNewTags(signalr); Assert.NotNull(signalr.Sku); if (isStandard) { Assert.Equal(SignalRSkuTier.Free, signalr.Sku.Tier); Assert.Equal("Free_F1", signalr.Sku.Name); Assert.Equal("F1", signalr.Sku.Size); Assert.Equal(capacity, signalr.Sku.Capacity); } else { Assert.Equal(SignalRSkuTier.Standard, signalr.Sku.Tier); Assert.Equal("Standard_S1", signalr.Sku.Name); Assert.Equal("S1", signalr.Sku.Size); Assert.Equal(capacity, signalr.Sku.Capacity); } Assert.Equal(ProvisioningState.Succeeded, signalr.ProvisioningState); Assert.NotEmpty(signalr.HostName); Assert.NotEmpty(signalr.ExternalIP); Assert.NotNull(signalr.PublicPort); Assert.NotNull(signalr.ServerPort); Assert.NotEmpty(signalr.Version); Assert.Equal(1, signalr.Features.Count); Assert.Equal("Serverless", signalr.Features.First().Value); Assert.Equal(2, signalr.Cors.AllowedOrigins.Count); Assert.Equal("http://example.com:12345", signalr.Cors.AllowedOrigins.First()); Assert.Equal("https://contoso.com", signalr.Cors.AllowedOrigins.Last()); // List keys of the updated SignalR instance keys = signalrClient.SignalR.ListKeys(resourceGroup.Name, signalr.Name); Assert.NotNull(keys); Assert.NotEmpty(keys.PrimaryKey); Assert.NotEmpty(keys.PrimaryConnectionString); Assert.NotEmpty(keys.SecondaryKey); Assert.NotEmpty(keys.SecondaryConnectionString); // Regenerate primary key var newKeys1 = signalrClient.SignalR.RegenerateKey(resourceGroup.Name, signalr.Name, new RegenerateKeyParameters { KeyType = "Primary", }); // Due to a bug in SignalR RP, the result of RegenerateKey is null. UnComment following lines after we fixed it RP side //Assert.NotNull(newKeys1); //Assert.NotEqual(keys.PrimaryKey, newKeys1.PrimaryKey); //Assert.NotEqual(keys.PrimaryConnectionString, newKeys1.PrimaryConnectionString); //Assert.Null(newKeys1.SecondaryKey); //Assert.Null(newKeys1.SecondaryConnectionString); // Ensure only the primary key is regenerated newKeys1 = signalrClient.SignalR.ListKeys(resourceGroup.Name, signalr.Name); Assert.NotNull(newKeys1); Assert.NotEqual(keys.PrimaryKey, newKeys1.PrimaryKey); Assert.NotEqual(keys.PrimaryConnectionString, newKeys1.PrimaryConnectionString); Assert.Equal(keys.SecondaryKey, newKeys1.SecondaryKey); Assert.Equal(keys.SecondaryConnectionString, newKeys1.SecondaryConnectionString); // Regenerate secondary key var newKeys2 = signalrClient.SignalR.RegenerateKey(resourceGroup.Name, signalr.Name, new RegenerateKeyParameters { KeyType = "Secondary", }); // Due to a bug in SignalR RP, the result of RegenerateKey is null. UnComment following lines after we fixed it RP side //Assert.NotNull(newKeys2); //Assert.Null(newKeys2.PrimaryKey); //Assert.Null(newKeys2.PrimaryConnectionString); //Assert.NotEqual(keys.SecondaryKey, newKeys2.SecondaryKey); //Assert.NotEqual(keys.SecondaryConnectionString, newKeys2.SecondaryConnectionString); // ensure only the secondary key is regenerated newKeys2 = signalrClient.SignalR.ListKeys(resourceGroup.Name, signalr.Name); Assert.NotNull(newKeys2); Assert.Equal(newKeys1.PrimaryKey, newKeys2.PrimaryKey); Assert.Equal(newKeys1.PrimaryConnectionString, newKeys2.PrimaryConnectionString); Assert.NotEqual(newKeys1.SecondaryKey, newKeys2.SecondaryKey); Assert.NotEqual(newKeys1.SecondaryConnectionString, newKeys2.SecondaryConnectionString); // Delete the SignalR instance signalrClient.SignalR.Delete(resourceGroup.Name, signalr.Name); // Delete again, should be no-op signalrClient.SignalR.Delete(resourceGroup.Name, signalr.Name); }
public override void OnRebuild(VoidPtr address, int length, bool force) { ResourceGroup* group = (ResourceGroup*)address; *group = new ResourceGroup(UsedChildren.Count); int nodeIndex = 0; ResourceEntry* entry = group->First; foreach (SCN0EntryNode n in Children) { if (n.Name != "<null>") { (entry++)->_dataOffset = (int)_dataAddr - (int)group; n._nodeIndex = nodeIndex++; n._realIndex = n.Index; } else n._nodeIndex = n._realIndex = -1; n.keyframeAddr = keyframeAddress; n.lightAddr = (RGBAPixel*)lightArrayAddress; n.visAddr = visibilityAddress; n.Rebuild(_dataAddr, n._calcSize, true); _dataAddr += n._calcSize; keyframeAddress += n._keyLen; lightArrayAddress += n._lightLen; visibilityAddress += n._visLen; } }
/// <summary> /// Initializes a new instance of the <see cref="StorageAccountsHelper"/> class. /// </summary> /// <param name="client">Object representing a storage management client.</param> /// <param name="resourceGroup">Object representing a resource group.</param> public StorageAccountsHelper(StorageManagementClient client, ResourceGroup resourceGroup) { this.StorageManagementClient = client; this.resourceGroup = resourceGroup; }
private IEnumerable<Control> getTabControlsForResources( ResourceGroup resourceGroup, bool includeIcons ) { var tabs = new List<Control>(); foreach( var resource in resourceGroup.Resources.Where( p => p.UserCanAccessResource ) ) { var tab = EwfLink.Create( resource.IsIdenticalToCurrent() ? null : resource, new TextActionControlStyle( resource.ResourceName, icon: includeIcons ? new ActionControlIcon( new FontAwesomeIcon( resource.IsIdenticalToCurrent() ? "fa-circle" : "fa-circle-thin" ) ) : null ) ); tab.CssClass = resource.IsIdenticalToCurrent() ? CssElementCreator.CurrentTabCssClass : resource.AlternativeMode is DisabledResourceMode ? CssElementCreator.DisabledTabCssClass : ""; tabs.Add( tab ); } return tabs; }
private void OnDownloadSuccess(object sender, DownloadSuccessEventArgs e) { UpdateInfo updateInfo = e.UserData as UpdateInfo; if (updateInfo == null) { return; } try { using (FileStream fileStream = new FileStream(e.DownloadPath, FileMode.Open, FileAccess.ReadWrite)) { bool compressed = updateInfo.Length != updateInfo.CompressedLength || updateInfo.HashCode != updateInfo.CompressedHashCode; int length = (int)fileStream.Length; if (length != updateInfo.CompressedLength) { fileStream.Close(); string errorMessage = Utility.Text.Format("Resource compressed length error, need '{0}', downloaded '{1}'.", updateInfo.CompressedLength, length); DownloadFailureEventArgs downloadFailureEventArgs = DownloadFailureEventArgs.Create(e.SerialId, e.DownloadPath, e.DownloadUri, errorMessage, e.UserData); OnDownloadFailure(this, downloadFailureEventArgs); ReferencePool.Release(downloadFailureEventArgs); return; } if (compressed) { fileStream.Position = 0L; int hashCode = Utility.Verifier.GetCrc32(fileStream); if (hashCode != updateInfo.CompressedHashCode) { fileStream.Close(); string errorMessage = Utility.Text.Format("Resource compressed hash code error, need '{0}', downloaded '{1}'.", updateInfo.CompressedHashCode, hashCode); DownloadFailureEventArgs downloadFailureEventArgs = DownloadFailureEventArgs.Create(e.SerialId, e.DownloadPath, e.DownloadUri, errorMessage, e.UserData); OnDownloadFailure(this, downloadFailureEventArgs); ReferencePool.Release(downloadFailureEventArgs); return; } fileStream.Position = 0L; m_ResourceManager.PrepareCachedStream(); if (!Utility.Compression.Decompress(fileStream, m_ResourceManager.m_CachedStream)) { fileStream.Close(); string errorMessage = Utility.Text.Format("Unable to decompress resource '{0}'.", e.DownloadPath); DownloadFailureEventArgs downloadFailureEventArgs = DownloadFailureEventArgs.Create(e.SerialId, e.DownloadPath, e.DownloadUri, errorMessage, e.UserData); OnDownloadFailure(this, downloadFailureEventArgs); ReferencePool.Release(downloadFailureEventArgs); return; } int uncompressedLength = (int)m_ResourceManager.m_CachedStream.Length; if (uncompressedLength != updateInfo.Length) { fileStream.Close(); string errorMessage = Utility.Text.Format("Resource length error, need '{0}', downloaded '{1}'.", updateInfo.Length, uncompressedLength); DownloadFailureEventArgs downloadFailureEventArgs = DownloadFailureEventArgs.Create(e.SerialId, e.DownloadPath, e.DownloadUri, errorMessage, e.UserData); OnDownloadFailure(this, downloadFailureEventArgs); ReferencePool.Release(downloadFailureEventArgs); return; } fileStream.Position = 0L; fileStream.SetLength(0L); fileStream.Write(m_ResourceManager.m_CachedStream.GetBuffer(), 0, uncompressedLength); } else { int hashCode = 0; fileStream.Position = 0L; if (updateInfo.LoadType == LoadType.LoadFromMemoryAndQuickDecrypt || updateInfo.LoadType == LoadType.LoadFromMemoryAndDecrypt || updateInfo.LoadType == LoadType.LoadFromBinaryAndQuickDecrypt || updateInfo.LoadType == LoadType.LoadFromBinaryAndDecrypt) { Utility.Converter.GetBytes(updateInfo.HashCode, m_CachedHashBytes); if (updateInfo.LoadType == LoadType.LoadFromMemoryAndQuickDecrypt || updateInfo.LoadType == LoadType.LoadFromBinaryAndQuickDecrypt) { hashCode = Utility.Verifier.GetCrc32(fileStream, m_CachedHashBytes, Utility.Encryption.QuickEncryptLength); } else if (updateInfo.LoadType == LoadType.LoadFromMemoryAndDecrypt || updateInfo.LoadType == LoadType.LoadFromBinaryAndDecrypt) { hashCode = Utility.Verifier.GetCrc32(fileStream, m_CachedHashBytes, length); } Array.Clear(m_CachedHashBytes, 0, CachedHashBytesLength); } else { hashCode = Utility.Verifier.GetCrc32(fileStream); } if (hashCode != updateInfo.HashCode) { fileStream.Close(); string errorMessage = Utility.Text.Format("Resource hash code error, need '{0}', downloaded '{1}'.", updateInfo.HashCode, hashCode); DownloadFailureEventArgs downloadFailureEventArgs = DownloadFailureEventArgs.Create(e.SerialId, e.DownloadPath, e.DownloadUri, errorMessage, e.UserData); OnDownloadFailure(this, downloadFailureEventArgs); ReferencePool.Release(downloadFailureEventArgs); return; } } } if (updateInfo.UseFileSystem) { IFileSystem fileSystem = m_ResourceManager.GetFileSystem(updateInfo.FileSystemName, false); bool retVal = fileSystem.WriteFile(updateInfo.ResourceName.FullName, updateInfo.ResourcePath); if (File.Exists(updateInfo.ResourcePath)) { File.Delete(updateInfo.ResourcePath); } if (!retVal) { string errorMessage = Utility.Text.Format("Write resource to file system '{0}' error.", fileSystem.FullPath); DownloadFailureEventArgs downloadFailureEventArgs = DownloadFailureEventArgs.Create(e.SerialId, e.DownloadPath, e.DownloadUri, errorMessage, e.UserData); OnDownloadFailure(this, downloadFailureEventArgs); ReferencePool.Release(downloadFailureEventArgs); return; } } m_UpdateCandidateInfo.Remove(updateInfo.ResourceName); m_UpdateWaitingInfo.Remove(updateInfo); m_UpdateWaitingInfoWhilePlaying.Remove(updateInfo); m_ResourceManager.m_ResourceInfos[updateInfo.ResourceName].MarkReady(); m_ResourceManager.m_ReadWriteResourceInfos.Add(updateInfo.ResourceName, new ReadWriteResourceInfo(updateInfo.FileSystemName, updateInfo.LoadType, updateInfo.Length, updateInfo.HashCode)); if (ResourceUpdateSuccess != null) { ResourceUpdateSuccess(updateInfo.ResourceName, e.DownloadPath, e.DownloadUri, updateInfo.Length, updateInfo.CompressedLength); } m_CurrentGenerateReadWriteVersionListLength += updateInfo.CompressedLength; if (m_UpdateCandidateInfo.Count <= 0 || m_UpdateWaitingInfo.Count + m_UpdateWaitingInfoWhilePlaying.Count <= 0 || m_CurrentGenerateReadWriteVersionListLength >= m_GenerateReadWriteVersionListLength) { GenerateReadWriteVersionList(); } if (m_UpdatingResourceGroup != null && m_UpdateWaitingInfo.Count <= 0) { ResourceGroup updatingResourceGroup = m_UpdatingResourceGroup; m_UpdatingResourceGroup = null; if (ResourceUpdateComplete != null) { ResourceUpdateComplete(updatingResourceGroup, !m_FailureFlag); } } if (m_UpdateCandidateInfo.Count <= 0 && ResourceUpdateAllComplete != null) { ResourceUpdateAllComplete(); } } catch (Exception exception) { string errorMessage = Utility.Text.Format("Update resource '{0}' with error message '{1}'.", e.DownloadPath, exception); DownloadFailureEventArgs downloadFailureEventArgs = DownloadFailureEventArgs.Create(e.SerialId, e.DownloadPath, e.DownloadUri, errorMessage, e.UserData); OnDownloadFailure(this, downloadFailureEventArgs); ReferencePool.Release(downloadFailureEventArgs); } }
public override void OnRebuild(VoidPtr address, int length, bool force) { //Set header values if (_version == 4) { PAT0v4* header = (PAT0v4*)address; header->_header._tag = PAT0v4.Tag; header->_header._version = 4; header->_dataOffset = PAT0v4.Size; header->_userDataOffset = header->_origPathOffset = 0; header->_numFrames = (ushort)_frameCount; header->_numEntries = (ushort)Children.Count; header->_numTexPtr = (ushort)_textureFiles.Count; header->_numPltPtr = (ushort)_paletteFiles.Count; header->_loop = _loop; } else { PAT0v3* header = (PAT0v3*)address; header->_header._tag = PAT0v3.Tag; header->_header._version = 3; header->_dataOffset = PAT0v3.Size; header->_origPathOffset = 0; header->_numFrames = (ushort)_frameCount; header->_numEntries = (ushort)Children.Count; header->_numTexPtr = (ushort)_textureFiles.Count; header->_numPltPtr = (ushort)_paletteFiles.Count; header->_loop = _loop; } PAT0v3* commonHeader = (PAT0v3*)address; //Now set header values that are in the same spot between versions //Set offsets commonHeader->_texTableOffset = length - (_textureFiles.Count + _paletteFiles.Count) * 8; commonHeader->_pltTableOffset = commonHeader->_texTableOffset + _textureFiles.Count * 4; //Set pointer offsets int offset = length - _textureFiles.Count * 4 - _paletteFiles.Count * 4; commonHeader->_texPtrTableOffset = offset; commonHeader->_pltPtrTableOffset = offset + _textureFiles.Count * 4; //Set pointers bint* ptr = (bint*)(commonHeader->Address + commonHeader->_texPtrTableOffset); for (int i = 0; i < _textureFiles.Count; i++) *ptr++ = 0; ptr = (bint*)(commonHeader->Address + commonHeader->_pltPtrTableOffset); for (int i = 0; i < _paletteFiles.Count; i++) *ptr++ = 0; ResourceGroup* group = commonHeader->Group; *group = new ResourceGroup(Children.Count); VoidPtr entryAddress = group->EndAddress; VoidPtr dataAddress = entryAddress; ResourceEntry* rEntry = group->First; foreach (PAT0EntryNode n in Children) dataAddress += n._entryLen; foreach (PAT0EntryNode n in Children) foreach (PAT0TextureNode t in n.Children) { n._dataAddrs[t.Index] = dataAddress; if (n._dataLens[t.Index] != -1) dataAddress += n._dataLens[t.Index]; } foreach (PAT0EntryNode n in Children) { (rEntry++)->_dataOffset = (int)entryAddress - (int)group; n.Rebuild(entryAddress, n._entryLen, true); entryAddress += n._entryLen; } if (_userEntries.Count > 0 && _version == 4) { PAT0v4* header = (PAT0v4*)address; header->UserData = dataAddress; _userEntries.Write(dataAddress); } }
public async Task TestCancelUpdateElasticPoolOperation() { /* * * In this test we only test the cancel operation on resize pool from Premium to Premium * since currently we only support Cancel pool resize operation on Premium <-> Premium * */ string testPrefix = "sqlelasticpoollistcanceloperation-"; using (SqlManagementTestContext context = new SqlManagementTestContext(this)) { ResourceGroup resourceGroup = context.CreateResourceGroup("West Europe"); Server server = context.CreateServer(resourceGroup, "westeurope"); SqlManagementClient sqlClient = context.GetClient <SqlManagementClient>(); Dictionary <string, string> tags = new Dictionary <string, string>() { { "tagKey1", "TagValue1" } }; // Create a premium elastic pool with required parameters string epName = SqlManagementTestUtilities.GenerateName(); var epInput = new ElasticPool() { Location = server.Location, Tags = tags, }; var elasticPool = sqlClient.ElasticPools.CreateOrUpdate(resourceGroup.Name, server.Name, epName, epInput); SqlManagementTestUtilities.ValidateElasticPool(epInput, elasticPool, epName); Assert.NotNull(elasticPool); // Update elastic pool to Premium with 250 DTU var epUpdateReponse = sqlClient.ElasticPools.BeginCreateOrUpdateWithHttpMessagesAsync(resourceGroup.Name, server.Name, epName, new ElasticPool() { Location = server.Location, Tags = tags }); if (HttpMockServer.Mode == HttpRecorderMode.Record) { Thread.Sleep(TimeSpan.FromSeconds(15)); } // Get the pool update operation for new added properties on elastic pool operations: ETA, Operation Description and IsCancellable // Expected they have null value since not been updated by operation progress AzureOperationResponse <IPage <ElasticPoolOperation> > response = sqlClient.ElasticPoolOperations.ListByElasticPoolWithHttpMessagesAsync(resourceGroup.Name, server.Name, epName).Result; Assert.Equal(HttpStatusCode.OK, response.Response.StatusCode); IList <ElasticPoolOperation> responseObject = response.Body.ToList(); Assert.Single(responseObject); Assert.NotNull(responseObject[0].PercentComplete); Assert.NotNull(responseObject[0].EstimatedCompletionTime); Assert.NotNull(responseObject[0].Description); Assert.NotNull(responseObject[0].IsCancellable); // Cancel the elastic pool update operation string requestId = responseObject[0].Name; sqlClient.ElasticPoolOperations.Cancel(resourceGroup.Name, server.Name, epName, Guid.Parse(requestId)); CloudException ex = await Assert.ThrowsAsync <CloudException>(() => sqlClient.GetPutOrPatchOperationResultAsync(epUpdateReponse.Result, new Dictionary <string, List <string> >(), CancellationToken.None)); Assert.Contains("OperationCancelled", ex.Body.Code); // Make sure the elastic pool is not updated due to cancel operation var epGetResponse = sqlClient.ElasticPools.Get(resourceGroup.Name, server.Name, epName); Assert.Equal(125, epGetResponse.Dtu); Assert.Equal("Premium", epGetResponse.Edition); } }
public override void OnRebuild(VoidPtr address, int length, bool force) { ResourceGroup* group; if (_version == 5) { CHR0v5* header = (CHR0v5*)address; *header = new CHR0v5(_version, length, _numFrames - _conversionBias, Children.Count, _loop); group = header->Group; } else { CHR0v4_3* header = (CHR0v4_3*)address; *header = new CHR0v4_3(_version, length, _numFrames - _conversionBias, Children.Count, _loop); group = header->Group; } *group = new ResourceGroup(Children.Count); VoidPtr entryAddress = group->EndAddress; VoidPtr dataAddress = entryAddress; foreach (CHR0EntryNode n in Children) dataAddress += n._entryLen; ResourceEntry* rEntry = group->First; foreach (CHR0EntryNode n in Children) { (rEntry++)->_dataOffset = (int)entryAddress - (int)group; n._dataAddr = dataAddress; n.Rebuild(entryAddress, n._entryLen, true); entryAddress += n._entryLen; dataAddress += n._dataLen; } if (_userEntries.Count > 0 && _version == 5) { CHR0v5* header = (CHR0v5*)address; header->UserData = dataAddress; _userEntries.Write(dataAddress); } }
private void TestUpdateElasticPool <TUpdateModel>( SqlManagementClient sqlClient, ResourceGroup resourceGroup, Server server, Func <TUpdateModel> createModelFunc, Func <string, string, string, TUpdateModel, ElasticPool> updateFunc) { Dictionary <string, string> tags = new Dictionary <string, string>() { { "tagKey1", "TagValue1" } }; // Create a elasticPool with parameters Tags // string epName = SqlManagementTestUtilities.GenerateName(); var epInput = new ElasticPool() { Location = server.Location, Sku = new Microsoft.Azure.Management.Sql.Models.Sku("StandardPool"), Tags = tags, DatabaseDtuMax = 20, DatabaseDtuMin = 0 }; var returnedEp = sqlClient.ElasticPools.CreateOrUpdate(resourceGroup.Name, server.Name, epName, epInput); SqlManagementTestUtilities.ValidateElasticPool(epInput, returnedEp, epName); var epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(1, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); // Update elasticPool Dtu // dynamic epInput2 = createModelFunc(); epInput2.Sku = returnedEp.Sku; epInput2.Sku.Capacity = 200; returnedEp = updateFunc(resourceGroup.Name, server.Name, epName, epInput2); SqlManagementTestUtilities.ValidateElasticPool(epInput2, returnedEp, epName); epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(2, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); Assert.Equal(1, epa.Where(a => a.Operation == "UPDATE").Count()); // Update elasticPool Dtu Max // dynamic epInput3 = createModelFunc(); epInput3.Sku = returnedEp.Sku; epInput3.DatabaseDtuMax = 100; returnedEp = updateFunc(resourceGroup.Name, server.Name, epName, epInput3); SqlManagementTestUtilities.ValidateElasticPool(epInput3, returnedEp, epName); epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(3, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); Assert.Equal(2, epa.Where(a => a.Operation == "UPDATE").Count()); // Update elasticPool Dtu Min // dynamic epInput4 = createModelFunc(); epInput4.Sku = returnedEp.Sku; epInput4.DatabaseDtuMin = 10; returnedEp = updateFunc(resourceGroup.Name, server.Name, epName, epInput4); SqlManagementTestUtilities.ValidateElasticPool(epInput4, returnedEp, epName); epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(4, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); Assert.Equal(3, epa.Where(a => a.Operation == "UPDATE").Count()); // Update elasticPool Maintenance Configuration Id dynamic epInput5 = createModelFunc(); epInput5.MaintenanceConfigurationId = SqlManagementTestUtilities.GetTestMaintenanceConfigurationId(sqlClient.SubscriptionId); returnedEp = updateFunc(resourceGroup.Name, server.Name, epName, epInput5); SqlManagementTestUtilities.ValidateElasticPool(epInput5, returnedEp, epName); epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(5, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); Assert.Equal(4, epa.Where(a => a.Operation == "UPDATE").Count()); }
private void TestUpdateHyperscaleElasticPool <TUpdateModel>( SqlManagementClient sqlClient, ResourceGroup resourceGroup, Server server, Func <TUpdateModel> createModelFunc, Func <string, string, string, TUpdateModel, ElasticPool> updateFunc) { // Create a Hyperscale elasticPool // string epName = SqlManagementTestUtilities.GenerateName(); var epInput = new ElasticPool() { Location = server.Location, Sku = new Microsoft.Azure.Management.Sql.Models.Sku("HS_Gen5_4") }; var returnedEp = sqlClient.ElasticPools.CreateOrUpdate(resourceGroup.Name, server.Name, epName, epInput); SqlManagementTestUtilities.ValidateElasticPool(epInput, returnedEp, epName); var epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(1, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); // Verify pool has default HighAvailabilityReplicaCount Assert.Equal(1, returnedEp.HighAvailabilityReplicaCount); // Update HighAvailabilityReplicaCount // dynamic epInput2 = createModelFunc(); epInput2.HighAvailabilityReplicaCount = 2; updateFunc(resourceGroup.Name, server.Name, epName, epInput2); returnedEp = sqlClient.ElasticPools.Get(resourceGroup.Name, server.Name, epName); SqlManagementTestUtilities.ValidateElasticPool(epInput2, returnedEp, epName); epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(2, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); Assert.Equal(1, epa.Where(a => a.Operation == "UPDATE").Count()); // Verify pool has updated HighAvailabilityReplicaCount // Assert.Equal(2, returnedEp.HighAvailabilityReplicaCount); // Update Hyperscale pool SLO // dynamic epInput3 = createModelFunc(); epInput3.Sku = new Microsoft.Azure.Management.Sql.Models.Sku("HS_Gen5_8"); returnedEp = updateFunc(resourceGroup.Name, server.Name, epName, epInput3); SqlManagementTestUtilities.ValidateElasticPool(epInput2, returnedEp, epName); epa = sqlClient.ElasticPoolActivities.ListByElasticPool(resourceGroup.Name, server.Name, epName); Assert.NotNull(epa); Assert.Equal(3, epa.Count()); Assert.Equal(1, epa.Where(a => a.Operation == "CREATE").Count()); Assert.Equal(2, epa.Where(a => a.Operation == "UPDATE").Count()); // Verify pool has same HighAvailabilityReplicaCount // Assert.Equal(2, returnedEp.HighAvailabilityReplicaCount); // Verify pool has updated SLO // Assert.Equal(8, returnedEp.Sku.Capacity); }
public override void OnRebuild(VoidPtr address, int length, bool force) { ResourceGroup* group; if (_version == 4) { SHP0v4* header = (SHP0v4*)address; *header = new SHP0v4(_loop, (ushort)(_numFrames - ConversionBias), (ushort)_strings.Count); group = header->Group; } else { SHP0v3* header = (SHP0v3*)address; *header = new SHP0v3(_loop, (ushort)(_numFrames - ConversionBias), (ushort)_strings.Count); group = header->Group; } *group = new ResourceGroup(Children.Count); VoidPtr entryAddress = group->EndAddress; VoidPtr dataAddress = entryAddress; foreach (SHP0EntryNode n in Children) dataAddress += n._entryLen; ResourceEntry* rEntry = group->First; foreach (SHP0EntryNode n in Children) { (rEntry++)->_dataOffset = (int)entryAddress - (int)group; n._dataAddr = dataAddress; n.Rebuild(entryAddress, n._entryLen, true); entryAddress += n._entryLen; dataAddress += n._dataLen; } ((SHP0v3*)address)->_stringListOffset = (int)dataAddress - (int)address; if (_userEntries.Count > 0 && _version == 4) { SHP0v4* header = (SHP0v4*)address; header->UserData = dataAddress; _userEntries.Write(dataAddress); } }