public void GetAllEntitiesInPartition_Ok() { //Arrange var tableEntity = new DynamicTableEntity(); var tableQuerySegmentMock = new Mock <TableQuerySegment <DynamicTableEntity> >(new List <DynamicTableEntity> { tableEntity }); //tableQuerySegmentMock.Setup(s => s.Results).Returns(new List<DynamicTableEntity> {tableEntity}); var(cloudTableMock, pool) = CreateTablePoolWithMockForExecuteQuery(tableEntity, SetupExecuteQuery); cloudTableMock.Setup(t => t.ExecuteQuerySegmentedAsync(It.IsAny <TableQuery <DynamicTableEntity> >(), It.IsAny <TableContinuationToken>(), It.IsAny <TableRequestOptions>(), It.IsAny <OperationContext>(), It.IsAny <CancellationToken>())) .Returns(Task.FromResult(tableQuerySegmentMock.Object)); //Act & Assert cloudTableMock.Verify(); TableStorageHelpers.GetAllEntitiesInPartition <DynamicTableEntity>(pool, "partitionKey") .Should() .Contain(tableEntity); }
public async Task ExecuteBatchAsync_WritesToTableStorage() { IEnvironment testEnvironment = new TestEnvironment(); testEnvironment.SetEnvironmentVariable(EnvironmentSettingNames.AzureWebsitePlaceholderMode, "0"); DiagnosticEventTableStorageRepository repository = new DiagnosticEventTableStorageRepository(_configuration, _hostIdProvider, testEnvironment, _logger); var table = repository.GetDiagnosticEventsTable(); await TableStorageHelpers.CreateIfNotExistsAsync(table, 2); await EmptyTableAsync(table); var dateTime = DateTime.UtcNow; var diagnosticEvent = new DiagnosticEvent("hostId", dateTime); var events = new ConcurrentDictionary <string, DiagnosticEvent>(); events.TryAdd("EC123", diagnosticEvent); await repository.ExecuteBatchAsync(events, table); var results = ExecuteQuery(table, new TableQuery()); Assert.Equal(results.Count(), 1); }
private static void RetryNImpl(Action action, int numberOfRetries, TimeSpan minBackoff, TimeSpan maxBackoff, TimeSpan deltaBackoff) { int totalNumberOfRetries = numberOfRetries; int backoff; if (minBackoff > maxBackoff) { throw new ArgumentException("The minimum backoff must not be larger than the maximum backoff period."); } if (minBackoff.TotalMilliseconds < 0) { throw new ArgumentException("The minimum backoff period must not be negative."); } do { try { action(); break; } catch (InvalidOperationException e) { HttpStatusCode status; // precondition failed is the status code returned by the server to indicate that the etag is wrong if (TableStorageHelpers.EvaluateException(e, out status)) { if (status == HttpStatusCode.PreconditionFailed) { if (numberOfRetries == 0) { throw; } backoff = CalculateCurrentBackoff(minBackoff, maxBackoff, deltaBackoff, totalNumberOfRetries - numberOfRetries); Debug.Assert(backoff >= minBackoff.TotalMilliseconds); Debug.Assert(backoff <= maxBackoff.TotalMilliseconds); if (backoff > 0) { Thread.Sleep(backoff); } } else { throw; } } else { throw; } } }while (numberOfRetries-- > 0); }
public void InsertOrReplaceEntity_Ok() { //Arrange var tableEntity = new DynamicTableEntity(); var(cloudTableMock, pool) = CreateTablePoolWithMockForExecute(tableEntity, TableOperationType.InsertOrReplace, SetupExecute); //Act & Assert TableStorageHelpers.InsertOrReplaceEntity(pool, tableEntity).Should().Be(tableEntity); cloudTableMock.Verify(); }
public void DeleteEntity_Ok() { //Arrange var tableEntity = new DynamicTableEntity(); var(cloudTableMock, pool) = CreateTablePoolWithMockForExecute(tableEntity, TableOperationType.Delete, SetupExecute); //Act & Assert cloudTableMock.Verify(); TableStorageHelpers.Delete(pool, "partitionKey", "rowKey"); // no exceptions }
public void InsertEntity_Ok() { //Arrange var tableEntity = new DynamicTableEntity(); var cloudTableMock = new Mock <CloudTable>(new Uri("https://nothing.net"), new TableClientConfiguration()); SetupExecute(cloudTableMock, tableEntity, TableOperationType.Insert); var pool = new CloudTablePool("UnitTestTable", cloudTableMock.Object); //Act & Assert TableStorageHelpers.InsertEntity(pool, tableEntity).Should().Be(tableEntity); cloudTableMock.Verify(); }
public void GetEntity_Ok() { //Arrange var tableEntity = new DynamicTableEntity(); var(cloudTableMock, pool) = CreateTablePoolWithMockForExecuteQuery(tableEntity, SetupExecuteQuery); //Act & Assert cloudTableMock.Verify(); TableStorageHelpers.GetEntity <DynamicTableEntity>(pool, "partitionKey", "rowKey") .Should() .Be(tableEntity); }
// remember that there is no is no rollback functionality for the table storage service right now // be cautious when using this function // if a role does not exist, we stop deleting roles, if a user in a role does not exist, we continue deleting // in case of error conditions, the behavior of this function is different than the SQL role provider public override void RemoveUsersFromRoles(string[] usernames, string[] roleNames) { SecUtility.CheckArrayParameter(ref roleNames, true, true, true, MaxTableRoleNameLength, "roleNames"); SecUtility.CheckArrayParameter(ref usernames, true, true, true, Constants.MaxTableUsernameLength, "usernames"); RoleRow row; try { TableStorageDataServiceContext svc = CreateDataServiceContext(); foreach (string role in roleNames) { if (!RoleExists(role)) { throw new ProviderException(string.Format(CultureInfo.InstalledUICulture, "The role {0} does not exist!", role)); } foreach (string user in usernames) { row = GetUserInRole(svc, role, user); if (row == null) { Log.Write(EventKind.Warning, string.Format(CultureInfo.InstalledUICulture, "The user {0} does not exist in the role {1}.", user, role)); continue; } try { svc.DeleteObject(row); svc.SaveChangesWithRetries(); } catch (Exception e) { HttpStatusCode status; if (TableStorageHelpers.EvaluateException(e, out status) && (status == HttpStatusCode.NoContent || status == HttpStatusCode.NotFound)) { Log.Write(EventKind.Warning, string.Format(CultureInfo.InstalledUICulture, "The user {0} does not exist in the role {1}.", user, role)); svc.Detach(row); } else { throw new ProviderException(string.Format(CultureInfo.InstalledUICulture, "Error deleting user {0} from role {1}.", user, role)); } } } } } catch (InvalidOperationException e) { throw new ProviderException("Error while accessing the data store.", e); } }
public void InsertOrMergeEntityAsync_Ok() { //Arrange var tableEntity = new DynamicTableEntity(); var(cloudTableMock, pool) = CreateTablePoolWithMockForExecute(tableEntity, TableOperationType.InsertOrMerge, SetupExecuteAsync); //Act & Assert cloudTableMock.Verify(); TableStorageHelpers.InsertOrMergeEntityAsync(pool, tableEntity).GetAwaiter().GetResult() .Should() .Be(tableEntity); }
public void UpdateEntity_Ok() { //Arrange var tableEntity = new DynamicTableEntity(); tableEntity.ETag = "*"; var(cloudTableMock, pool) = CreateTablePoolWithMockForExecute(tableEntity, TableOperationType.Merge, SetupExecute); //Act & Assert cloudTableMock.Verify(); TableStorageHelpers.UpdateEntity(pool, tableEntity) .Should() .Be(tableEntity); }
// Because of limited transactional support in the table storage offering, this function gives limited guarantees // for inserting all users into all roles. // We do not recommend using this function because of missing transactional support. public override void AddUsersToRoles(string[] usernames, string[] roleNames) { SecUtility.CheckArrayParameter(ref roleNames, true, true, true, MaxTableRoleNameLength, "roleNames"); SecUtility.CheckArrayParameter(ref usernames, true, true, true, Constants.MaxTableUsernameLength, "usernames"); RoleRow row; try { TableStorageDataServiceContext svc = CreateDataServiceContext(); foreach (string role in roleNames) { if (!RoleExists(role)) { throw new ProviderException(string.Format(CultureInfo.InstalledUICulture, "The role {0} does not exist!", role)); } foreach (string user in usernames) { row = new RoleRow(_applicationName, role, user); try { svc.AddObject(_tableName, row); svc.SaveChangesWithRetries(); } catch (InvalidOperationException e) { HttpStatusCode status; if (TableStorageHelpers.EvaluateException(e, out status) && status == HttpStatusCode.Conflict) { // this element already exists or was created in a failed retry // this is not a fatal error; continue adding elements Log.Write(EventKind.Warning, string.Format(CultureInfo.InstalledUICulture, "The user {0} already exists in the role {1}.", user, role)); svc.Detach(row); } else { throw new ProviderException(string.Format(CultureInfo.InstalledUICulture, "Error adding user {0} to role {1}", user, role)); } } } } } catch (InvalidOperationException e) { throw new ProviderException("Error while accessing the data store.", e); } }
public GeoLocationQuery(IConfiguration configuration, ILogger <GeoLocationQuery> logger, IHttpClientFactory httpClientFactory, GeoLocationService geoLocationService) { logger_ = logger; geoLocationService_ = geoLocationService; apiKey_ = configuration.GetValue <string>("IPDATA-API-KEY"); httpClientFactory_ = httpClientFactory; newGates_ = configuration.GetValue("NewFeatureGates", false); var slot = configuration.GetDeploymentSlot().ToString().ToLower(); var slotName = slot.ToString().ToLower(); var connection = configuration.GetValue <string>("SAFeatureGate"); var storageAccount = CloudStorageAccount.Parse(connection); var tableClient = TableStorageHelpers.CreateClient(storageAccount); ipCacheTable_ = new TableStorage(tableClient, $"{slotName}IpCache", true); }
internal static TableOperation CreateMetricsInsertOperation(ScaleMetrics metrics, string hostId, ScaleMonitorDescriptor descriptor, DateTime?now = null) { now = now ?? DateTime.UtcNow; // Use an inverted ticks rowkey to order the table in descending order, allowing us to easily // query for latest logs. Adding a guid as part of the key to ensure uniqueness. string rowKey = TableStorageHelpers.GetRowKey(now.Value); var entity = TableEntityConverter.ToEntity(metrics, hostId, rowKey, metrics.Timestamp); entity.Properties.Add(MonitorIdPropertyName, EntityProperty.GeneratePropertyForString(descriptor.Id)); // We map the sample timestamp to its own column so it doesn't conflict with the built in column. // We want to ensure that timestamp values for returned metrics are precise and monotonically // increasing when ordered results are returned. The built in timestamp doesn't guarantee this. entity.Properties.Add(SampleTimestampPropertyName, EntityProperty.GeneratePropertyForDateTimeOffset(metrics.Timestamp)); return(TableOperation.Insert(entity)); }
public async Task QueueBackgroundDiagnosticsEventsTablePurge_PurgesTables() { IEnvironment testEnvironment = new TestEnvironment(); testEnvironment.SetEnvironmentVariable(EnvironmentSettingNames.AzureWebsitePlaceholderMode, "0"); DiagnosticEventTableStorageRepository repository = new DiagnosticEventTableStorageRepository(_configuration, _hostIdProvider, testEnvironment, _logger); // delete any existing non-current diagnostics events tables string tablePrefix = DiagnosticEventTableStorageRepository.TableNamePrefix; var currentTable = repository.GetDiagnosticEventsTable(); var tables = await TableStorageHelpers.ListOldTablesAsync(currentTable, repository.TableClient, tablePrefix); foreach (var table in tables) { await table.DeleteIfExistsAsync(); } // create 3 old tables for (int i = 0; i < 3; i++) { var table = repository.TableClient.GetTableReference($"{tablePrefix}Test{i}"); await TableStorageHelpers.CreateIfNotExistsAsync(table, 2); } // verify tables were created tables = await TableStorageHelpers.ListOldTablesAsync(currentTable, repository.TableClient, tablePrefix); Assert.Equal(3, tables.Count()); // queue the background purge TableStorageHelpers.QueueBackgroundTablePurge(currentTable, repository.TableClient, tablePrefix, NullLogger.Instance, 0); // wait for the purge to complete await TestHelpers.Await(async() => { tables = await TableStorageHelpers.ListOldTablesAsync(currentTable, repository.TableClient, tablePrefix); return(tables.Count() == 0); }, timeout : 5000); }
public FeatureGateStore( IConfiguration configuration, ILogger <FeatureGateStore> logger, ServiceOption serviceOption, EventualCloudTableClient storage) { var slot = configuration.GetDeploymentSlot().ToString().ToLower(); var slotName = slot.ToString().ToLower(); var connection = configuration.GetValue <string>("SAFeatureGate"); newGates_ = configuration.GetValue("NewFeatureGates", false); var storageAccount = CloudStorageAccount.Parse(connection); var tableClient = TableStorageHelpers.CreateClient(storageAccount); Storage = storage.GetTableReference($"{slotName}Gates"); Storage.CreateIfNotExists(); logger_ = logger; rnd_ = new Random(); GatesTable = new TableStorage(tableClient, $"{slotName}Gates", true); Service = serviceOption.Service; }
public override bool RoleExists(string roleName) { SecUtility.CheckParameter(ref roleName, true, true, true, MaxTableRoleNameLength, "rolename"); try { TableStorageDataServiceContext svc = CreateDataServiceContext(); DataServiceQuery <RoleRow> queryObj = svc.CreateQuery <RoleRow>(_tableName); IEnumerable <RoleRow> query = from role in queryObj where role.PartitionKey == SecUtility.CombineToKey(_applicationName, string.Empty) && role.RowKey == SecUtility.Escape(roleName) select role; TableStorageDataServiceQuery <RoleRow> q = new TableStorageDataServiceQuery <RoleRow>(query as DataServiceQuery <RoleRow>, _tableRetry); try { // this query addresses exactly one result // we thus should get an exception if there is no element q.ExecuteWithRetries(); return(true); } catch (DataServiceQueryException e) { HttpStatusCode s; if (TableStorageHelpers.EvaluateException(e, out s) && s == HttpStatusCode.NotFound) { return(false); } else { throw; } } } catch (InvalidOperationException e) { throw new ProviderException("Error while accessing the data store.", e); } }
private RoleRow GetUserInRole(DataServiceContext svc, string rolename, string username) { SecUtility.CheckParameter(ref username, true, true, true, Constants.MaxTableUsernameLength, "username"); SecUtility.CheckParameter(ref rolename, true, true, true, MaxTableRoleNameLength, "rolename"); try { DataServiceQuery <RoleRow> queryObj = svc.CreateQuery <RoleRow>(_tableName); IEnumerable <RoleRow> query = from user in queryObj where user.PartitionKey == SecUtility.CombineToKey(_applicationName, username) && user.RowKey == SecUtility.Escape(rolename) select user; TableStorageDataServiceQuery <RoleRow> q = new TableStorageDataServiceQuery <RoleRow>(query as DataServiceQuery <RoleRow>, _tableRetry); try { IEnumerable <RoleRow> userRows = q.ExecuteAllWithRetries(); return(userRows.First()); } catch (DataServiceQueryException e) { HttpStatusCode s; if (TableStorageHelpers.EvaluateException(e, out s) && s == HttpStatusCode.NotFound) { return(null); } else { throw; } } } catch (InvalidOperationException e) { throw new ProviderException("Error while accessing the data store.", e); } }
internal virtual async Task FlushLogs(CloudTable table = null) { if (_environment.IsPlaceholderModeEnabled()) { return; } table = table ?? GetDiagnosticEventsTable(); try { bool tableCreated = await TableStorageHelpers.CreateIfNotExistsAsync(table, _tableCreationRetries); if (tableCreated) { TableStorageHelpers.QueueBackgroundTablePurge(table, TableClient, TableNamePrefix, _logger); } } catch (Exception ex) { _logger.LogError(ex, $"Unable to create table '{table.Name}' after {_tableCreationRetries} retries. Aborting write operation {ex}"); // Clearing the memory cache to avoid memory build up. _events.Clear(); return; } // Assigning a new empty directory to reset the event count in the new duration window. // All existing events are logged to other logging pipelines already. ConcurrentDictionary <string, DiagnosticEvent> tempDictionary = _events; _events = new ConcurrentDictionary <string, DiagnosticEvent>(); if (tempDictionary.Count > 0) { await ExecuteBatchAsync(tempDictionary, table); } }
private static string GetExceptionMessage(Exception exception) { HttpStatusCode statusCode; StorageExtendedErrorInformation extendedErrorInfo; if (TableStorageHelpers.EvaluateException(exception, out statusCode, out extendedErrorInfo)) { if (extendedErrorInfo != null) { return(string.Format(CultureInfo.InvariantCulture, "{0} {1}", extendedErrorInfo.ErrorCode ?? "", extendedErrorInfo.ErrorMessage ?? "")); } } DataServiceClientException dse = exception.InnerException as DataServiceClientException; if (dse != null) { return(dse.Message); } else { return(exception.Message); } }
public override void CreateRole(string roleName) { SecUtility.CheckParameter(ref roleName, true, true, true, MaxTableRoleNameLength, "rolename"); try { TableStorageDataServiceContext svc = CreateDataServiceContext(); RoleRow newRole = new RoleRow(_applicationName, roleName, string.Empty); svc.AddObject(_tableName, newRole); svc.SaveChangesWithRetries(); } catch (InvalidOperationException e) { HttpStatusCode status; // when retry policies are used we cannot distinguish between a conflict and success // so, in the case of a conflict, we just retrun success here if (TableStorageHelpers.EvaluateException(e, out status) && status == HttpStatusCode.Conflict) { return; // the role already exists } throw new ProviderException("Error accessing role table.", e); } }
public async Task MigrateFeatureGatesToCosmos( [FromServices] EventualCloudTableClient storage, [FromServices] IConfiguration configuration, [FromServices] ServiceOption serviceOption) { // data is small and not online, so next tech is ok for this // also it is out of azcontext code, so just do it here var slot = configuration.GetDeploymentSlot().ToString().ToLower(); var slotName = slot.ToString().ToLower(); var connection = configuration.GetValue <string>("SAFeatureGate"); var storageAccount = CloudStorageAccount.Parse(connection); var tableClient = TableStorageHelpers.CreateClient(storageAccount); var newTable = storage.GetTableReference($"{slotName}Gates"); await newTable.CreateIfNotExistsAsync(); var gatesTable = new TableStorage(tableClient, $"{slotName}Gates", true); var data = await gatesTable.ExecuteQueryAsync(new TableQuery <LegacyFeatureEntry>()); foreach (var item in data) { var newEntry = new FeatureEntryTableEntity { AllowedIPs = item.AZAllowedIPs, Continents = item.AZContinents, Countries = item.AZCountries, Description = item.Description, Disabled = item.Disabled.ToString(), PartitionKey = item.PartitionKey, Issuer = item.Issuer, ReleaseDate = item.ReleaseDate, RequiredRoles = item.AZRequiredRoles, RowKey = item.RowKey, Users = item.AZUsers, }; await newTable.InsertOrMergeAsync(newEntry); } }
public DiagnosticEvent(string hostId, DateTime timestamp) { RowKey = TableStorageHelpers.GetRowKey(timestamp); PartitionKey = $"{hostId}-{timestamp:yyyyMMdd}"; Timestamp = timestamp; }
// this method shows an alternative way of accessing/creating DataServiceContext objects // this approach is closer to what tools generate for normal ADO.NET Data Services projects internal static void RunSamples1() { StorageAccountInfo account = null; try { Console.WriteLine("Show how to create tables and queries using the SampleDataServiceContext class..."); account = StorageAccountInfo.GetDefaultTableStorageAccountFromConfiguration(); SampleDataServiceContext svc = new SampleDataServiceContext(account); svc.RetryPolicy = RetryPolicies.RetryN(3, TimeSpan.FromSeconds(1)); // Create 'SampleTable' // this uses the SampleDataServiceContext class TableStorage.CreateTablesFromModel(typeof(SampleDataServiceContext), account); string sampleTableName = SampleDataServiceContext.SampleTableName; DeleteAllEntriesFromSampleTable(svc, sampleTableName); svc.AddObject(SampleDataServiceContext.SampleTableName, new SampleEntity("sample", "entity")); svc.SaveChangesWithRetries(); var qResult = from c in svc.SampleTable where c.PartitionKey == "samplepartitionkey" && c.RowKey == "samplerowkey1" select c; TableStorageDataServiceQuery <SampleEntity> q = new TableStorageDataServiceQuery <SampleEntity>(qResult as DataServiceQuery <SampleEntity>, svc.RetryPolicy); try { // the query references the whole key and explicitly addresses one entity // thus, this query can generate an exception if there are 0 results during enumeration IEnumerable <SampleEntity> res = q.ExecuteAllWithRetries(); foreach (SampleEntity s in res) { Console.WriteLine("This code is not reached. " + s.PartitionKey); } } catch (DataServiceQueryException e) { HttpStatusCode s; if (TableStorageHelpers.EvaluateException(e, out s) && s == HttpStatusCode.NotFound) { // this would mean the entity was not found Console.WriteLine("The entity was not found. This is expected here."); } } Console.WriteLine("Delete all entries in the sample table."); DeleteAllEntriesFromSampleTable(svc, sampleTableName); Console.WriteLine("Table sample 1 finished!"); } catch (DataServiceRequestException dsre) { Console.WriteLine("DataServiceRequestException: " + GetExceptionMessage(dsre)); ShowTableStorageErrorMessage(account.BaseUri.ToString()); } catch (InvalidOperationException ioe) { Console.WriteLine("Storage service error: " + GetExceptionMessage(ioe)); ShowTableStorageErrorMessage(account.BaseUri.ToString()); } }
public override bool DeleteRole(string roleName, bool throwOnPopulatedRole) { SecUtility.CheckParameter(ref roleName, true, true, true, MaxTableRoleNameLength, "rolename"); try { TableStorageDataServiceContext svc = CreateDataServiceContext(); DataServiceQuery <RoleRow> queryObj = svc.CreateQuery <RoleRow>(_tableName); IEnumerable <RoleRow> query = from userRole in queryObj where userRole.PartitionKey.CompareTo(SecUtility.EscapedFirst(_applicationName)) >= 0 && userRole.PartitionKey.CompareTo(SecUtility.NextComparisonString(SecUtility.EscapedFirst(_applicationName))) < 0 && userRole.RowKey == SecUtility.Escape(roleName) select userRole; TableStorageDataServiceQuery <RoleRow> q = new TableStorageDataServiceQuery <RoleRow>(query as DataServiceQuery <RoleRow>, _tableRetry); IEnumerable <RoleRow> userRows = q.ExecuteAllWithRetries(); if (userRows == null) { return(false); } List <RoleRow> l = new List <RoleRow>(userRows); if (l.Count == 0) { // the role does not exist return(false); } RoleRow role; if (IsStaleRole(l, out role)) { return(false); } if (l.Count > 1 && throwOnPopulatedRole) { throw new ProviderException("Cannot delete populated role."); } svc.DeleteObject(role); svc.SaveChangesWithRetries(); // lets try to remove all remaining elements in the role foreach (RoleRow row in l) { if (row != role) { try { svc.DeleteObject(row); svc.SaveChangesWithRetries(); } catch (InvalidOperationException ex) { HttpStatusCode status; if (TableStorageHelpers.EvaluateException(ex, out status) && (status == HttpStatusCode.NoContent || status == HttpStatusCode.NotFound)) { // this element already was already deleted by another process or during a failed retry // this is not a fatal error; continue deleting elements Log.Write(EventKind.Warning, string.Format(CultureInfo.InstalledUICulture, "The user {0} does not exist in the role {1}.", row.UserName, row.RoleName)); } else { throw new ProviderException(string.Format(CultureInfo.InstalledUICulture, "Error deleting user {0} from role {1}.", row.UserName, row.RoleName)); } } } } return(true); } catch (InvalidOperationException e) { throw new ProviderException("Error while accessing the data store.", e); } }
// shows alternative ways of generating DataServiceContext objects internal static void RunSamples2() { StorageAccountInfo account = null; try { account = StorageAccountInfo.GetDefaultTableStorageAccountFromConfiguration(); TableStorage tableStorage = TableStorage.Create(account); tableStorage.RetryPolicy = RetryPolicies.RetryN(3, TimeSpan.FromSeconds(1)); // the DataServiceContext object inherits its retry policy from tableStorage in this case TableStorageDataServiceContext svc = tableStorage.GetDataServiceContext(); Console.WriteLine("Table creation, delete and list samples..."); string sampleTableName = SampleDataServiceContext.SampleTableName; tableStorage.TryCreateTable(sampleTableName); DeleteAllEntriesFromSampleTable(svc, sampleTableName); Console.WriteLine("List all tables in the account."); IEnumerable <string> tables2 = tableStorage.ListTables(); foreach (string n1 in tables2) { Console.WriteLine(n1); } Console.WriteLine("Inserting entities into the table..."); SampleEntity t = new SampleEntity("samplepartitionkey", "samplerowkey"); svc.AddObject(sampleTableName, t); svc.SaveChangesWithRetries(); //Detach the existing entity so that we can demonstrate the server side //error when you try to insert an same object with the same keys svc.Detach(t); // Insert an entity with the same keys Console.WriteLine("Try to insert the same entity into the table and show how to deal with error conditions."); t = new SampleEntity("samplepartitionkey", "samplerowkey"); svc.AddObject(sampleTableName, t); try { svc.SaveChangesWithRetries(); // getting here is an error because inserting the same row twice raises an exception Console.WriteLine("Should not get here. Succeeded inserting two entities with the same keys"); } catch (Exception e) { HttpStatusCode status; StorageExtendedErrorInformation errorInfo; if (TableStorageHelpers.EvaluateException(e, out status, out errorInfo) && status == HttpStatusCode.Conflict) { // the row has already been inserted before, this is expected here if (errorInfo != null) { Console.WriteLine("Attempting to insert row with same keys resulted in error {0} : {1}", errorInfo.ErrorCode, errorInfo.ErrorMessage); } } else { throw; } } svc.Detach(t); Console.WriteLine("Insert a large item into the table."); t = new SampleEntity("samplepartitionkey", "samplerowkey1"); t.B = new String('a', 1000); svc.AddObject(sampleTableName, t); svc.SaveChangesWithRetries(); Console.WriteLine("Create a normal DataServiceContext object (not TableStorageDataServiceContext) and attach it to a TableStorage object."); DataServiceContext svc2 = new DataServiceContext( TableStorage.GetServiceBaseUri(account.BaseUri, account.UsePathStyleUris, account.AccountName)); tableStorage.Attach(svc2); var qResult = from c in svc2.CreateQuery <SampleEntity>(sampleTableName) where c.RowKey == "samplerowkey1" select c; foreach (SampleEntity cust in qResult) { if (cust.B != t.B) { Console.WriteLine("Sample failed. Did not read the entity property just written"); } } Console.WriteLine("Insert many rows in a table and show the API for dealing with query result pagination."); int num = 2100; Console.WriteLine("Inserting {0} rows.", num.ToString(CultureInfo.CurrentUICulture)); for (int i = 0; i < num; i++) { t = new SampleEntity("samplestring", i.ToString(CultureInfo.InvariantCulture)); svc.AddObject(sampleTableName, t); svc.SaveChangesWithRetries(); if ((i + 1) % 50 == 0) { Console.WriteLine("Inserted row {0}.", (i + 1).ToString(CultureInfo.CurrentUICulture)); } } Console.WriteLine("Executing query that will return many results. This can take a while..."); var qResult2 = from c in svc.CreateQuery <SampleEntity>(sampleTableName) where c.PartitionKey == "samplestring" select c; TableStorageDataServiceQuery <SampleEntity> tableStorageQuery = new TableStorageDataServiceQuery <SampleEntity>(qResult2 as DataServiceQuery <SampleEntity>); IEnumerable <SampleEntity> res = tableStorageQuery.ExecuteAllWithRetries(); Console.WriteLine("Retrieved query results:"); foreach (SampleEntity entity in res) { Console.WriteLine("Partition key: {0}, row key: {1}.", entity.PartitionKey, entity.RowKey); } Console.WriteLine("Delete all entries in the sample table."); DeleteAllEntriesFromSampleTable(tableStorage.GetDataServiceContext(), sampleTableName); tableStorage.DeleteTable(sampleTableName); Console.WriteLine("Table samples finished!"); } catch (DataServiceRequestException dsre) { Console.WriteLine("DataServiceRequestException: " + GetExceptionMessage(dsre)); ShowTableStorageErrorMessage(account.BaseUri.ToString()); } catch (InvalidOperationException ioe) { Console.WriteLine("Storage service error: " + GetExceptionMessage(ioe)); ShowTableStorageErrorMessage(account.BaseUri.ToString()); } }
// we don't use the retry policy itself in this function because out parameters are not well handled by // retry policies private SessionStateStoreData GetSession(HttpContext context, string id, out bool locked, out TimeSpan lockAge, out object lockId, out SessionStateActions actions, bool exclusive) { Debug.Assert(context != null); SecUtility.CheckParameter(ref id, true, true, false, TableStorageConstants.MaxStringPropertySizeInChars, "id"); SessionRow session = null; int curRetry = 0; bool retry = false; // Assign default values to out parameters locked = false; lockId = null; lockAge = TimeSpan.Zero; actions = SessionStateActions.None; do { retry = false; try { TableStorageDataServiceContext svc = CreateDataServiceContext(); session = GetSession(id, svc); // Assign default values to out parameters locked = false; lockId = null; lockAge = TimeSpan.Zero; actions = SessionStateActions.None; // if the blob does not exist, we return null // ASP.NET will call the corresponding method for creating the session if (session == null) { return(null); } if (session.Initialized == false) { Debug.Assert(session.Locked == false); actions = SessionStateActions.InitializeItem; session.Initialized = true; } session.ExpiresUtc = DateTime.UtcNow.AddMinutes(session.Timeout); if (exclusive) { if (!session.Locked) { if (session.Lock == Int32.MaxValue) { session.Lock = 0; } else { session.Lock++; } session.LockDateUtc = DateTime.UtcNow; } lockId = session.Lock; locked = session.Locked; session.Locked = true; } lockAge = DateTime.UtcNow.Subtract(session.LockDateUtc); lockId = session.Lock; if (locked == true) { return(null); } // let's try to write this back to the data store // in between, someone else could have written something to the store for the same session // we retry a number of times; if all fails, we throw an exception svc.UpdateObject(session); svc.SaveChangesWithRetries(); } catch (InvalidOperationException e) { HttpStatusCode status; // precondition fails indicates problems with the status code if (TableStorageHelpers.EvaluateException(e, out status) && status == HttpStatusCode.PreconditionFailed) { retry = true; } else { throw new ProviderException("Error accessing the data store.", e); } } } while (retry && curRetry++ < NumRetries); // ok, now we have successfully written back our state // we can now read the blob // note that we do not need to care about read/write locking when accessing the // blob because each time we write a new session we create a new blob with a different name SessionStateStoreData result = null; MemoryStream stream = null; StreamReader reader = null; BlobProperties properties; try { try { stream = _blobProvider.GetBlobContent(session.BlobName, out properties); } catch (StorageException e) { throw new ProviderException("Couldn't read session blob!", e); } reader = new StreamReader(stream); if (actions == SessionStateActions.InitializeItem) { // Return an empty SessionStateStoreData result = new SessionStateStoreData(new SessionStateItemCollection(), SessionStateUtility.GetSessionStaticObjects(context), session.Timeout); } else { // Read Items, StaticObjects, and Timeout from the file byte[] items = Convert.FromBase64String(reader.ReadLine()); byte[] statics = Convert.FromBase64String(reader.ReadLine()); int timeout = session.Timeout; // Deserialize the session result = DeserializeSession(items, statics, timeout); } } finally { if (stream != null) { stream.Close(); } if (reader != null) { reader.Close(); } } return(result); }
public async Task ReadWriteMetrics_IntegerConversion_HandlesLongs() { var monitor1 = new TestScaleMonitor1(); var monitors = new IScaleMonitor[] { monitor1 }; // first write a couple entities manually to the table to simulate // the change in entity property type (int -> long) // this shows that the table can have entities of both formats with // no versioning issues // add an entity with Count property of type int var entity = new DynamicTableEntity { RowKey = TableStorageHelpers.GetRowKey(DateTime.UtcNow), PartitionKey = TestHostId, Properties = new Dictionary <string, EntityProperty>() }; var expectedIntCountValue = int.MaxValue; entity.Properties.Add("Timestamp", new EntityProperty(DateTime.UtcNow)); entity.Properties.Add("Count", new EntityProperty(expectedIntCountValue)); entity.Properties.Add(TableStorageScaleMetricsRepository.MonitorIdPropertyName, EntityProperty.GeneratePropertyForString(monitor1.Descriptor.Id)); var batch = new TableBatchOperation(); batch.Add(TableOperation.Insert(entity)); // add an entity with Count property of type long entity = new DynamicTableEntity { RowKey = TableStorageHelpers.GetRowKey(DateTime.UtcNow), PartitionKey = TestHostId, Properties = new Dictionary <string, EntityProperty>() }; var expectedLongCountValue = long.MaxValue; entity.Properties.Add("Timestamp", new EntityProperty(DateTime.UtcNow)); entity.Properties.Add("Count", new EntityProperty(expectedLongCountValue)); entity.Properties.Add(TableStorageScaleMetricsRepository.MonitorIdPropertyName, EntityProperty.GeneratePropertyForString(monitor1.Descriptor.Id)); batch.Add(TableOperation.Insert(entity)); await _repository.ExecuteBatchSafeAsync(batch); // push a long max value through serialization var metricsMap = new Dictionary <IScaleMonitor, ScaleMetrics>(); metricsMap.Add(monitor1, new TestScaleMetrics1 { Count = long.MaxValue }); await _repository.WriteMetricsAsync(metricsMap); // add one more metricsMap = new Dictionary <IScaleMonitor, ScaleMetrics>(); metricsMap.Add(monitor1, new TestScaleMetrics1 { Count = 12345 }); await _repository.WriteMetricsAsync(metricsMap); // read the metrics back var result = await _repository.ReadMetricsAsync(monitors); Assert.Equal(1, result.Count); var monitorMetricsList = result[monitor1]; Assert.Equal(4, monitorMetricsList.Count); // verify the explicitly written int record was read correctly var currSample = (TestScaleMetrics1)monitorMetricsList[0]; Assert.Equal(expectedIntCountValue, currSample.Count); // verify the explicitly written long record was read correctly currSample = (TestScaleMetrics1)monitorMetricsList[1]; Assert.Equal(expectedLongCountValue, currSample.Count); // verify the final roundtripped values currSample = (TestScaleMetrics1)monitorMetricsList[2]; Assert.Equal(long.MaxValue, currSample.Count); currSample = (TestScaleMetrics1)monitorMetricsList[3]; Assert.Equal(12345, currSample.Count); }