Exemplo n.º 1
0
 public TDST_EntitySet(TableServiceContext context)
     : base(context)
 {
 }
Exemplo n.º 2
0
        // Query entities. Use LINQ clauses to filter data.
        // Return true on success, false if not found, throw exception on error.

        public DataServiceQuery <T> QueryEntities <T>(string tableName) where T : TableServiceEntity
        {
            TableServiceContext tableServiceContext = TableClient.GetDataServiceContext();

            return(tableServiceContext.CreateQuery <T>(tableName));
        }
Exemplo n.º 3
0
        public override bool DeleteRole(string roleName, bool throwOnPopulatedRole)
        {
            SecUtility.CheckParameter(ref roleName, true, true, true, MaxTableRoleNameLength, "rolename");

            try
            {
                TableServiceContext        svc      = CreateDataServiceContext();
                DataServiceQuery <RoleRow> queryObj = svc.CreateQuery <RoleRow>(_tableName);

                var query = (from userRole in queryObj
                             where userRole.PartitionKey.CompareTo(SecUtility.EscapedFirst(_applicationName)) >= 0 &&
                             userRole.PartitionKey.CompareTo(SecUtility.NextComparisonString(SecUtility.EscapedFirst(_applicationName))) < 0 &&
                             userRole.RowKey == SecUtility.Escape(roleName)
                             select userRole).AsTableServiceQuery();
                IEnumerable <RoleRow> userRows = query.Execute();

                if (userRows == null)
                {
                    return(false);
                }
                List <RoleRow> l = new List <RoleRow>(userRows);
                if (l.Count == 0)
                {
                    // the role does not exist
                    return(false);
                }
                RoleRow role;
                if (IsStaleRole(l, out role))
                {
                    return(false);
                }
                if (l.Count > 1 && throwOnPopulatedRole)
                {
                    throw new ProviderException("Cannot delete populated role.");
                }
                svc.DeleteObject(role);
                svc.SaveChangesWithRetries();
                // lets try to remove all remaining elements in the role
                foreach (RoleRow row in l)
                {
                    if (row != role)
                    {
                        try
                        {
                            svc.DeleteObject(row);
                            svc.SaveChangesWithRetries();
                        }
                        catch (InvalidOperationException ex)
                        {
                            var dsce = ex.InnerException as DataServiceClientException;

                            if (ex.InnerException is DataServiceClientException && (dsce.StatusCode == (int)HttpStatusCode.NoContent || dsce.StatusCode == (int)HttpStatusCode.NotFound))
                            {
                                // this element already was already deleted by another process or during a failed retry
                                // this is not a fatal error; continue deleting elements
                                Log.Write(EventKind.Warning, string.Format(CultureInfo.InstalledUICulture, "The user {0} does not exist in the role {1}.", row.UserName, row.RoleName));
                            }
                            else
                            {
                                throw new ProviderException(string.Format(CultureInfo.InstalledUICulture, "Error deleting user {0} from role {1}.", row.UserName, row.RoleName));
                            }
                        }
                    }
                }
                return(true);
            }
            catch (InvalidOperationException e)
            {
                throw new ProviderException("Error while accessing the data store.", e);
            }
        }
Exemplo n.º 4
0
        // Write a DataTable to an AzureTable.
        // DataTable's Rows are an unstructured property bag.
        // columnTypes - type of the column, or null if column should be skipped. Length of columnTypes should be the same as number of columns.
        public static void SaveToAzureTable(DataTable table, CloudStorageAccount account, string tableName, Type[] columnTypes, Func <int, Row, PartitionRowKey> funcComputeKeys)
        {
            if (table == null)
            {
                throw new ArgumentNullException("table");
            }
            if (account == null)
            {
                throw new ArgumentNullException("account");
            }
            if (columnTypes == null)
            {
                throw new ArgumentNullException("columnTypes");
            }
            if (tableName == null)
            {
                throw new ArgumentNullException("tableName");
            }
            ValidateAzureTableName(tableName);

            // Azure tables have "special" columns.
            // We can skip these by settings columnType[i] to null, which means don't write that column
            string[] columnNames = table.ColumnNames.ToArray();
            if (columnNames.Length != columnTypes.Length)
            {
                throw new ArgumentException(string.Format("columnTypes should have {0} elements", columnNames.Length), "columnTypes");
            }

            columnTypes = columnTypes.ToArray(); // create a copy for mutation.
            for (int i = 0; i < columnNames.Length; i++)
            {
                if (IsSpecialColumnName(columnNames[i]))
                {
                    columnTypes[i] = null;
                }
            }

            if (funcComputeKeys == null)
            {
                funcComputeKeys = GetPartitionRowKeyFunc(columnNames);
            }

            // Validate columnTypes
            string [] edmTypeNames = Array.ConvertAll(columnTypes,
                                                      columnType => {
                if (columnType == null)
                {
                    return(null);
                }
                string edmTypeName;
                _edmNameMapping.TryGetValue(columnType, out edmTypeName);
                if (edmTypeName == null)
                {
                    // Unsupported type!
                    throw new InvalidOperationException(string.Format("Type '{0}' is not a supported type on azure tables", columnType.FullName));
                }
                return(edmTypeName);
            });


            var tableClient    = account.CreateCloudTableClient();
            var tableReference = tableClient.GetTableReference(tableName);

            if (tableReference.Exists())
            {
                tableReference.Delete();
            }

            tableReference.Create();

            GenericTableWriter w = new GenericTableWriter
            {
                _edmTypeNames = edmTypeNames,
                _columnNames  = table.ColumnNames.ToArray()
            };

            // Batch rows for performance,
            // but all rows in the batch must have the same partition key
            TableServiceContext ctx = null;
            string lastPartitionKey = null;

            HashSet <PartitionRowKey> dups = new HashSet <PartitionRowKey>();

            int rowCounter = 0;
            int batchSize  = 0;

            foreach (Row row in table.Rows)
            {
                GenericWriterEntity entity = new GenericWriterEntity {
                    _source = row
                };
                // Compute row and partition keys too.
                var partRow = funcComputeKeys(rowCounter, row);
                entity.PartitionKey = partRow.PartitionKey;
                entity.RowKey       = partRow.RowKey;
                rowCounter++;

                // but all rows in the batch must have the same partition key
                if ((ctx != null) && (lastPartitionKey != null) && (lastPartitionKey != entity.PartitionKey))
                {
                    ctx.SaveChangesWithRetries(SaveChangesOptions.Batch | SaveChangesOptions.ReplaceOnUpdate);
                    ctx = null;
                }

                if (ctx == null)
                {
                    dups.Clear();
                    lastPartitionKey = null;
                    ctx = tableClient.GetTableServiceContext();
                    ctx.Format.UseAtom();
                    ctx.WritingEntity += new EventHandler <ReadingWritingEntityEventArgs>(w.ctx_WritingEntity);
                    batchSize          = 0;
                }

                // Add enty to the current batch
                // Upsert means insert+Replace. But still need uniqueness within a batch.
                bool allowUpsert = true;

                // Check for dups within a batch.
                var key = new PartitionRowKey {
                    PartitionKey = entity.PartitionKey, RowKey = entity.RowKey
                };
                bool dupWithinBatch = dups.Contains(key);
                dups.Add(key);


                if (allowUpsert)
                {
                    // Upsert allows overwriting existing keys. But still must be unique within a batch.
                    if (!dupWithinBatch)
                    {
                        ctx.AttachTo(tableName, entity);
                        ctx.UpdateObject(entity);
                    }
                }
                else
                {
                    // AddObject requires uniquess.
                    if (dupWithinBatch)
                    {
                        // Azure REST APIs will give us a horrible cryptic error (400 with no message).
                        // Provide users a useful error instead.
                        throw new InvalidOperationException(string.Format("Table has duplicate keys: {0}", key));
                    }

                    ctx.AddObject(tableName, entity);
                }


                lastPartitionKey = entity.PartitionKey;
                batchSize++;

                if (batchSize % UploadBatchSize == 0)
                {
                    // Beware, if keys collide within a batch, we get a very cryptic error and 400.
                    // If they collide across batches, we get a more useful 409 (conflict).
                    try
                    {
                        ctx.SaveChangesWithRetries(SaveChangesOptions.Batch | SaveChangesOptions.ReplaceOnUpdate);
                    }
                    catch (DataServiceRequestException de)
                    {
                        var e = de.InnerException as DataServiceClientException;
                        if (e != null)
                        {
                            if (e.StatusCode == 409)
                            {
                                // Conflict. Duplicate keys. We don't get the specific duplicate key.
                                // Server shouldn't do this if we support upsert.
                                // (although an old emulator that doesn't yet support upsert may throw it).
                                throw new InvalidOperationException(string.Format("Table has duplicate keys. {0}", e.Message));
                            }
                        }
                    }
                    ctx = null;
                }
            }

            if (ctx != null)
            {
                ctx.SaveChangesWithRetries(SaveChangesOptions.Batch | SaveChangesOptions.ReplaceOnUpdate);
            }
        }
        public override void SetAndReleaseItemExclusive(HttpContext context, string id,
                                                        SessionStateStoreData item, object lockId, bool newItem)
        {
            Debug.Assert(context != null);
            SecUtility.CheckParameter(ref id, true, true, false, Configuration.MaxStringPropertySizeInChars, "id");

            _providerRetry(() =>
            {
                TableServiceContext svc = CreateDataServiceContext();
                SessionRow session;

                if (!newItem)
                {
                    session = GetSession(id, svc);
                    if (session == null || session.Lock != (int)lockId)
                    {
                        Debug.Assert(false);
                        return;
                    }
                }
                else
                {
                    session             = new SessionRow(id, _applicationName);
                    session.Lock        = 1;
                    session.LockDateUtc = DateTime.UtcNow;
                }
                session.Initialized = true;
                Debug.Assert(session.Timeout >= 0);
                session.Timeout    = item.Timeout;
                session.ExpiresUtc = DateTime.UtcNow.AddMinutes(session.Timeout);
                session.Locked     = false;

                // yes, we always create a new blob here
                session.BlobName = GetBlobNamePrefix(id) + Guid.NewGuid().ToString("N");


                // Serialize the session and write the blob
                byte[] items, statics;
                SerializeSession(item, out items, out statics);
                string serializedItems   = Convert.ToBase64String(items);
                string serializedStatics = Convert.ToBase64String(statics);
                MemoryStream output      = new MemoryStream();
                StreamWriter writer      = new StreamWriter(output);

                try
                {
                    writer.WriteLine(serializedItems);
                    writer.WriteLine(serializedStatics);
                    writer.Flush();
                    // for us, it shouldn't matter whether newItem is set to true or false
                    // because we always create the entire blob and cannot append to an
                    // existing one
                    _blobProvider.UploadStream(session.BlobName, output);
                    writer.Close();
                    output.Close();
                }
                catch (Exception e)
                {
                    if (!newItem)
                    {
                        ReleaseItemExclusive(svc, session, lockId);
                    }
                    throw new ProviderException("Error accessing the data store.", e);
                }
                finally
                {
                    if (writer != null)
                    {
                        writer.Close();
                    }
                    if (output != null)
                    {
                        output.Close();
                    }
                }

                if (newItem)
                {
                    svc.AddObject(_tableName, session);
                    svc.SaveChangesWithRetries();
                }
                else
                {
                    // Unlock the session and save changes
                    ReleaseItemExclusive(svc, session, lockId);
                }
            });
        }
Exemplo n.º 6
0
        static void DoTables(CloudStorageAccount account)
        {
            Console.WriteLine("=======================TABLE==========================");
            CloudTableClient    tableClient = new CloudTableClient(account.TableEndpoint.AbsoluteUri, account.Credentials);
            TableServiceContext context     = tableClient.GetDataServiceContext();

            Console.WriteLine("Clearning container...");
            tableClient.DeleteTableIfExist("AzureStorageTestTableFrom");
            tableClient.DeleteTableIfExist("AzureStorageTestTableTo");

            Console.WriteLine("Creating container...");
            while (true)
            {
                try
                {
                    tableClient.CreateTable("AzureStorageTestTableFrom");
                    break;
                }
                catch (Exception)
                {
                    Thread.Sleep(1000);
                }
            }
            while (true)
            {
                try
                {
                    tableClient.CreateTable("AzureStorageTestTableTo");
                    break;
                }
                catch (Exception)
                {
                    Thread.Sleep(1000);
                }
            }

            Console.WriteLine("Uploading data...");
            for (int i = 0; i < 30; i++)
            {
                SquareRow row = new SquareRow()
                {
                    Number       = i,
                    Squared      = i * i,
                    PartitionKey = "0",
                    RowKey       = Guid.NewGuid().ToString(),
                };
                context.AddObject("AzureStorageTestTableFrom", row);
            }
            context.SaveChanges();

            Console.WriteLine("Checking data...");
            var query =
                from row in context.CreateQuery <SquareRow>("AzureStorageTestTableFrom")
                where row.Number < 10
                select row;

            foreach (var square in query)
            {
                DoubleRow row = new DoubleRow()
                {
                    Number       = square.Squared,
                    Doubled      = square.Squared * 2,
                    PartitionKey = "0",
                    RowKey       = Guid.NewGuid().ToString(),
                };
                context.AddObject("AzureStorageTestTableTo", row);
            }
            context.SaveChanges();
        }
Exemplo n.º 7
0
 public LogContext(TableServiceContext context)
 {
     this.tableContext = context;
 }
Exemplo n.º 8
0
 public OfflineReadyController(TableServiceContext dbContext)
 {
     TableRepository = new EntityTableRepository <OfflineReady>(dbContext);
 }
 public ProAzureReaderDataSource(string storageAccountConnectionString)
 {
     Init(storageAccountConnectionString);
     dContext             = TableClient.GetDataServiceContext();
     dContext.RetryPolicy = RetryPolicies.Retry(3, TimeSpan.FromSeconds(5));
 }
Exemplo n.º 10
0
        // Based on: http://blogs.msdn.com/b/cesardelatorre/archive/2011/03/12/typical-issue-one-of-the-request-inputs-is-not-valid-when-working-with-the-wa-development-storage.aspx
        private async Task InitializeTableSchemaFromEntity(CloudTableClient tableClient)
        {
            const string operation = "InitializeTableSchemaFromEntity";
            var          startTime = DateTime.UtcNow;

            TableServiceEntity entity = new T();

            entity.PartitionKey = Guid.NewGuid().ToString();
            entity.RowKey       = Guid.NewGuid().ToString();
            Array.ForEach(
                entity.GetType().GetProperties(BindingFlags.Public | BindingFlags.Instance),
                p =>
            {
                if ((p.Name == "PartitionKey") || (p.Name == "RowKey") || (p.Name == "Timestamp"))
                {
                    return;
                }

                if (p.PropertyType == typeof(string))
                {
                    p.SetValue(entity, Guid.NewGuid().ToString(),
                               null);
                }
                else if (p.PropertyType == typeof(DateTime))
                {
                    p.SetValue(entity, startTime, null);
                }
            });

            try
            {
                TableServiceContext svc = tableClient.GetDataServiceContext();
                svc.AddObject(TableName, entity);

                try
                {
                    await Task <DataServiceResponse> .Factory.FromAsync(
                        svc.BeginSaveChangesWithRetries,
                        svc.EndSaveChangesWithRetries,
                        SaveChangesOptions.None,
                        null);
                }
                catch (Exception exc)
                {
                    CheckAlertWriteError(operation + "-Create", entity, null, exc);
                    throw;
                }

                try
                {
                    svc.DeleteObject(entity);
                    await Task <DataServiceResponse> .Factory.FromAsync(
                        svc.BeginSaveChangesWithRetries,
                        svc.EndSaveChangesWithRetries,
                        SaveChangesOptions.None,
                        null);
                }
                catch (Exception exc)
                {
                    CheckAlertWriteError(operation + "-Delete", entity, null, exc);
                    throw;
                }
            }
            finally
            {
                CheckAlertSlowAccess(startTime, operation);
            }
        }
Exemplo n.º 11
0
 private void LogJob(string jobId)
 {
     try
     {
         JobInfo jobInfo = null;
         using (var conn = new SqlConnection(_connectionString))
         {
             conn.Open();
             try
             {
                 using (var cmd = conn.CreateCommand())
                 {
                     cmd.CommandType = CommandType.StoredProcedure;
                     cmd.CommandText = "GetJobDetail";
                     cmd.Parameters.AddWithValue("jobId", jobId);
                     using (var reader = cmd.ExecuteReader())
                     {
                         if (reader.Read())
                         {
                             jobInfo                  = new JobInfo(reader.GetString(0), reader.GetString(1));
                             jobInfo.JobType          = (JobType)reader.GetInt32(2);
                             jobInfo.Status           = (JobStatus)reader.GetInt32(3);
                             jobInfo.StatusMessage    = reader.IsDBNull(4) ? String.Empty : reader.GetString(4);
                             jobInfo.ScheduledRunTime =
                                 reader.IsDBNull(5) ? (DateTime?)null : reader.GetDateTime(5);
                             jobInfo.StartTime           = reader.IsDBNull(6) ? (DateTime?)null : reader.GetDateTime(6);
                             jobInfo.ProcessorId         = reader.IsDBNull(7) ? String.Empty : reader.GetString(7);
                             jobInfo.ProcessingCompleted =
                                 reader.IsDBNull(8) ? (DateTime?)null : reader.GetDateTime(8);
                             jobInfo.ProcessingException =
                                 reader.IsDBNull(9) ? String.Empty : reader.GetString(9);
                             jobInfo.RetryCount = reader.GetInt32(10);
                         }
                     }
                 }
             }
             catch (Exception ex)
             {
                 Trace.TraceError("Error retrieving job detail for job {0}. Cause: {1}", jobId, ex);
             }
             finally
             {
                 conn.Close();
             }
         }
         if (jobInfo != null)
         {
             var connectionString =
                 RoleEnvironment.GetConfigurationSettingValue(AzureConstants.DiagnosticsConnectionStringName);
             CloudStorageAccount storageAccount = CloudStorageAccount.Parse(connectionString);
             CloudTableClient    tableClient    = storageAccount.CreateCloudTableClient();
             tableClient.CreateTableIfNotExist("jobs");
             TableServiceContext serviceContext = tableClient.GetDataServiceContext();
             JobLogEntity        jobLogEntity   = new JobLogEntity(jobInfo);
             serviceContext.AddObject("jobs", jobLogEntity);
             serviceContext.SaveChangesWithRetries();
         }
     }
     catch (Exception ex)
     {
         Trace.TraceError("Error logging detail for job {0}. Cause: {1}", jobId, ex);
     }
 }
Exemplo n.º 12
0
        internal async Task BulkInsertTableEntries(IReadOnlyCollection <T> data)
        {
            const string operation = "BulkInsertTableEntries";

            if (data == null)
            {
                throw new ArgumentNullException("data");
            }
            if (data.Count > AzureTableDefaultPolicies.MAX_BULK_UPDATE_ROWS)
            {
                throw new ArgumentOutOfRangeException("data", data.Count,
                                                      "Too many rows for bulk update - max " + AzureTableDefaultPolicies.MAX_BULK_UPDATE_ROWS);
            }

            var startTime = DateTime.UtcNow;

            if (Logger.IsVerbose2)
            {
                Logger.Verbose2("Bulk inserting {0} entries to {1} table", data.Count, TableName);
            }

            try
            {
                TableServiceContext svc = tableOperationsClient.GetDataServiceContext();
                foreach (T entry in data)
                {
                    svc.AttachTo(TableName, entry);
                    svc.UpdateObject(entry);
                }

                bool fallbackToInsertOneByOne = false;
                try
                {
                    // SaveChangesOptions.None == Insert-or-merge operation, SaveChangesOptions.Batch == Batch transaction
                    // http://msdn.microsoft.com/en-us/library/hh452241.aspx
                    await Task <DataServiceResponse> .Factory.FromAsync(
                        svc.BeginSaveChangesWithRetries,
                        svc.EndSaveChangesWithRetries,
                        SaveChangesOptions.None | SaveChangesOptions.Batch,
                        null);

                    return;
                }
                catch (Exception exc)
                {
                    Logger.Warn(ErrorCode.AzureTable_37, String.Format("Intermediate error bulk inserting {0} entries in the table {1}",
                                                                       data.Count, TableName), exc);

                    var dsre = exc.GetBaseException() as DataServiceRequestException;
                    if (dsre != null)
                    {
                        var dsce = dsre.GetBaseException() as DataServiceClientException;
                        if (dsce != null)
                        {
                            // Fallback to insert rows one by one
                            fallbackToInsertOneByOne = true;
                        }
                    }

                    if (!fallbackToInsertOneByOne)
                    {
                        throw;
                    }
                }

                // Bulk insert failed, so try to insert rows one by one instead
                var promises = new List <Task>();
                foreach (T entry in data)
                {
                    promises.Add(UpsertTableEntryAsync(entry));
                }
                await Task.WhenAll(promises);
            }
            finally
            {
                CheckAlertSlowAccess(startTime, operation);
            }
        }
Exemplo n.º 13
0
        /// <summary>
        /// Read data entries and their corresponding eTags from the Azure table.
        /// </summary>
        /// <param name="predicate">Predicate function to use for querying the table and filtering the results.</param>
        /// <returns>Enumeration of entries in the table which match the query condition.</returns>
        internal async Task <IEnumerable <Tuple <T, string> > > ReadTableEntriesAndEtagsAsync(Expression <Func <T, bool> > predicate)
        {
            const string operation = "ReadTableEntriesAndEtags";
            var          startTime = DateTime.UtcNow;

            try
            {
                TableServiceContext svc = tableOperationsClient.GetDataServiceContext();
                // Improve performance when table name differs from class name
                // http://www.gtrifonov.com/2011/06/15/improving-performance-for-windows-azure-tables/
                svc.ResolveType = ResolveEntityType;

                //IQueryable<T> query = svc.CreateQuery<T>(TableName).Where(predicate);
                CloudTableQuery <T> cloudTableQuery = svc.CreateQuery <T>(TableName).Where(predicate).AsTableServiceQuery(); // turn IQueryable into CloudTableQuery

                try
                {
                    Func <Task <List <T> > > executeQueryHandleContinuations = async() =>
                    {
                        // Read table with continuation token
                        // http://convective.wordpress.com/2013/11/03/queries-in-the-windows-azure-storage-client-library-v2-1/

                        // 1) First wrong sync way to read:
                        // List<T> queryResults = query.ToList(); // ToList will actually execute the query and add entities to svc. However, this will not handle continuation tokens.
                        // 2) Second correct sync way to read:
                        // http://convective.wordpress.com/2010/02/06/queries-in-azure-tables/
                        // CloudTableQuery.Execute will properly retrieve all the records from a table through the automatic handling of continuation tokens:
                        Task <ResultSegment <T> > firstSegmentPromise = Task <ResultSegment <T> > .Factory.FromAsync(
                            cloudTableQuery.BeginExecuteSegmented,
                            cloudTableQuery.EndExecuteSegmented,
                            null);

                        // 3) Third wrong async way to read:
                        // return firstSegmentPromise;
                        // 4) Forth correct async way to read - handles continuation tokens:

                        var list = new List <T>();

                        Task <ResultSegment <T> > nextSegmentAsync = firstSegmentPromise;
                        while (true)
                        {
                            ResultSegment <T> resultSegment = await nextSegmentAsync;
                            var capture = resultSegment.Results;
                            if (capture != null) // don't call Count or Any or anything else that can potentialy cause multiple evaluations of the IEnumerable
                            {
                                list.AddRange(capture);
                            }

                            if (!resultSegment.HasMoreResults)
                            {
                                // All data was read successfully if we got to here
                                break;
                            }

                            // ask to read the next segment
                            nextSegmentAsync = Task <ResultSegment <T> > .Factory.FromAsync(
                                resultSegment.BeginGetNext,
                                resultSegment.EndGetNext,
                                null);
                        }

                        return(list);
                    };

                    IBackoffProvider backoff = new FixedBackoff(AzureTableDefaultPolicies.PauseBetweenTableOperationRetries);

                    List <T> results = await AsyncExecutorWithRetries.ExecuteWithRetries(
                        counter => executeQueryHandleContinuations(),
                        AzureTableDefaultPolicies.MaxTableOperationRetries,
                        (exc, counter) => AzureStorageUtils.AnalyzeReadException(exc.GetBaseException(), counter, TableName, Logger),
                        AzureTableDefaultPolicies.TableOperationTimeout,
                        backoff);

                    // Data was read successfully if we got to here
                    return(PairEntitiesWithEtags(svc, results));
                }
                catch (Exception exc)
                {
                    // Out of retries...
                    var errorMsg = string.Format("Failed to read Azure storage table {0}: {1}", TableName, exc.Message);
                    if (!AzureStorageUtils.TableStorageDataNotFound(exc))
                    {
                        Logger.Warn(ErrorCode.AzureTable_09, errorMsg, exc);
                    }
                    throw new OrleansException(errorMsg, exc);
                }
            }
            finally
            {
                CheckAlertSlowAccess(startTime, operation);
            }
        }
Exemplo n.º 14
0
        /// <summary>
        /// Inserts a data entry in the Azure table: creates a new one if does not exists or overwrites (without eTag) an already existing version (the "update in place" semantincs).
        /// </summary>
        /// <param name="data">Data to be inserted or replaced in the table.</param>
        /// <returns>Value promise with new Etag for this data entry after completing this storage operation.</returns>
        public async Task <string> UpsertTableEntryAsync(T data)
        {
            const string operation = "UpsertTableEntry";
            var          startTime = DateTime.UtcNow;

            if (Logger.IsVerbose2)
            {
                Logger.Verbose2("{0} entry {1} into table {2}", operation, data, TableName);
            }

            try
            {
                TableServiceContext svc = tableOperationsClient.GetDataServiceContext();
                try
                {
                    Task <DataServiceResponse> savePromise;

                    Func <int, Task <DataServiceResponse> > doSaveChanges = retryNum =>
                    {
                        if (retryNum > 0)
                        {
                            svc.Detach(data);
                        }

                        // Try to do update first
                        svc.AttachTo(TableName, data, ANY_ETAG);
                        svc.UpdateObject(data);

                        return(Task <DataServiceResponse> .Factory.FromAsync(
                                   svc.BeginSaveChangesWithRetries,
                                   svc.EndSaveChangesWithRetries,
                                   SaveChangesOptions.ReplaceOnUpdate,
                                   null));
                    };


                    if (AzureTableDefaultPolicies.MaxBusyRetries > 0)
                    {
                        IBackoffProvider backoff = new FixedBackoff(AzureTableDefaultPolicies.PauseBetweenBusyRetries);

                        savePromise = AsyncExecutorWithRetries.ExecuteWithRetries(
                            doSaveChanges,
                            AzureTableDefaultPolicies.MaxBusyRetries,
                            // Retry automatically iff we get ServerBusy reply from Azure storage
                            (exc, retryNum) => IsServerBusy(exc),
                            AzureTableDefaultPolicies.BusyRetriesTimeout,
                            backoff);
                    }
                    else
                    {
                        // Try single Write only once
                        savePromise = doSaveChanges(0);
                    }
                    await            savePromise;
                    EntityDescriptor result = svc.GetEntityDescriptor(data);
                    return(result.ETag);
                }
                catch (Exception exc)
                {
                    Logger.Warn(ErrorCode.AzureTable_06, String.Format("Intermediate error upserting entry {0} to the table {1}",
                                                                       (data == null ? "null" : data.ToString()), TableName), exc);
                    throw;
                }
            }
            finally
            {
                CheckAlertSlowAccess(startTime, operation);
            }
        }
Exemplo n.º 15
0
 public static void Insert <TEntity>(TableServiceContext serviceContext, EntitySetName <TEntity> entitySetName, TEntity entity) where TEntity : TableServiceEntity
 {
     serviceContext.AddObject(entitySetName.Name, entity);
     serviceContext.SaveChanges();
 }
Exemplo n.º 16
0
        private void btnUpdate_Click(object sender, EventArgs e)
        {
            try
            {
                TableStorageClient = new TableStorageHelper(txtStorageAcc.Text);

                TableStorageClient.CreateTable(DeploymentTracking.TABLE_NAME);

                Stopwatch s = new Stopwatch();
                s.Start();
                TableServiceContext context = TableStorageClient.GetTableServiceContext();

                long count = int.Parse(txtNumberEntities.Text);

                for (long i = 0; i < count; i++)
                {
                    if (i > 0 && (i % 100) == 0)
                    {
                        //Finish first batch
                        DataServiceResponse resp = context.SaveChangesWithRetries(SaveChangesOptions.Batch);
                        //start second batch
                        context.AddObject(DeploymentTracking.TABLE_NAME,
                                          new DeploymentTracking()
                        {
                            Created       = DateTime.UtcNow,
                            CreatedBy     = Environment.MachineName,
                            IsSuccessful  = true,
                            Operation     = "Test Operation" + i,
                            ProvId        = 1,
                            RetryNumber   = 1,
                            StatusCode    = "Status Code " + i,
                            StatusMessage = "Test Message" + i,
                            // RowKey = System.Guid.NewGuid().ToString("N")
                            RowKey = (i + 1).ToString()
                        });
                    }
                    else
                    {
                        //continue
                        context.AddObject(DeploymentTracking.TABLE_NAME,
                                          new DeploymentTracking()
                        {
                            Created       = DateTime.UtcNow,
                            CreatedBy     = Environment.MachineName,
                            IsSuccessful  = true,
                            Operation     = "Test Operation" + i,
                            ProvId        = 1,
                            RetryNumber   = 1,
                            StatusCode    = "Status Code " + i,
                            StatusMessage = "Test Message" + i,
                            // RowKey = System.Guid.NewGuid().ToString("N")
                            RowKey = (i + 1).ToString()
                        });
                    }
                }

                DataServiceResponse resp1 = context.SaveChangesWithRetries(SaveChangesOptions.Batch);
                s.Stop();
                MessageBox.Show("Entities Inserted. Time Required i milliseconds. " + s.ElapsedMilliseconds);
            }
            catch (Exception ex)
            {
                MessageBox.Show(ex.Message);
            }
        }
Exemplo n.º 17
0
 public static void SetPayloadFormatOnDataServiceContext(TableServiceContext ctx, TablePayloadFormat format, CloudTableClient tableClient)
 {
     ctx.Format.UseJson(new TableStorageModel(tableClient.Credentials.AccountName));
 }
        public FatPipeDAL(string storageConnectionString, FatpipeManager fpm)
        {
            try
            {
                LoggerFactory.Logger.Debug("FatPipeDAL C'tor", "Initializing Azure accounts");
                this.fpm = fpm;

                // The constructor below is hardcoded for now.. it will use the method params
                if (!string.IsNullOrEmpty(storageConnectionString))
                {
                    this.cloudAcct = CloudStorageAccount.Parse(storageConnectionString);
                }

                else //fallback path to Cloud storage
                {
                    cloudAcct = new CloudStorageAccount(new StorageCredentialsAccountAndKey("maargsoft", "njPcqdHZuYUNbp32GS1tpSAeoUSp1hZ1EJsqRdtnTJe5BZtEmVd61UHonvva6o3WZ1COeAPtTm4ofbMqFaqj7Q=="), false);
                }

                queueClient      = cloudAcct.CreateCloudQueueClient();
                blobClient       = cloudAcct.CreateCloudBlobClient();
                this.tableClient = cloudAcct.CreateCloudTableClient();

                bool runtimeEnv = !string.IsNullOrEmpty(MaargConfiguration.Instance[MaargConfiguration.FatpipeManagerIncomingQueueName]);
                if (runtimeEnv)
                {
                    incomingQ = queueClient.GetQueueReference(MaargConfiguration.Instance[MaargConfiguration.FatpipeManagerIncomingQueueName]);
                    outgoingQ = queueClient.GetQueueReference(MaargConfiguration.Instance[MaargConfiguration.FatpipeManagerOutgoingQueueName]);

                    // Create the queue if it doesn't already exist
                    incomingQ.CreateIfNotExist();
                    outgoingQ.CreateIfNotExist();

                    DateTime today  = DateTime.Today;
                    string   bucket = string.Format(BlobContainerPartitionNameFormat, today.Month, today.Year,
                                                    MaargConfiguration.Instance[MaargConfiguration.FatpipeManagerIncomingBlobContainerNameSuffix]);

                    string outbucket = string.Format(BlobContainerPartitionNameFormat, today.Month, today.Year,
                                                     MaargConfiguration.Instance[MaargConfiguration.FatpipeManagerOutgoingBlobContainerNameSuffix]);

                    // Retrieve a reference to a container
                    inContainer  = blobClient.GetContainerReference(bucket);
                    outContainer = blobClient.GetContainerReference(outbucket);

                    // Create the container if it doesn't already exist
                    inContainer.CreateIfNotExist();
                    outContainer.CreateIfNotExist();
                }

                schemaContainer = blobClient.GetContainerReference(SchemaContainerName);
                schemaContainer.CreateIfNotExist();

                notificationTemplateContainer = blobClient.GetContainerReference(NotificationTemplateContainerName);
                notificationTemplateContainer.CreateIfNotExist();

                //suspended store
                if (runtimeEnv)
                {
                    this.suspendedContainer = this.blobClient.GetContainerReference(MaargConfiguration.Instance[MaargConfiguration.SuspendedMessageBlobContainerName]);
                    this.suspendedContainer.CreateIfNotExist();
                    this.tableClient.CreateTableIfNotExist(MaargConfiguration.Instance[MaargConfiguration.SuspendedMessageTableName]);
                    this.suspendedTableServiceContext = this.tableClient.GetDataServiceContext();

                    int visibilityTimeoutInSeconds;
                    if (!int.TryParse(MaargConfiguration.Instance[MaargConfiguration.FatpipeManagerQueueMessageVisibilityTimeoutInSeconds], out visibilityTimeoutInSeconds))
                    {
                        visibilityTimeoutInSeconds = 30;
                        LoggerFactory.Logger.Warning("FatPipeDAL C'tor", EventId.BizpipeMissingConfigValue
                                                     , "Configuration for {0} is not defined. Using default value of {1} seconds."
                                                     , MaargConfiguration.FatpipeManagerQueueMessageVisibilityTimeoutInSeconds
                                                     , visibilityTimeoutInSeconds);
                    }
                    this.visibilityTimeout = new TimeSpan(0, 0, visibilityTimeoutInSeconds);

                    // does it need public access?
                    this.inContainer.SetPermissions(new BlobContainerPermissions {
                        PublicAccess = BlobContainerPublicAccessType.Blob
                    });
                    this.outContainer.SetPermissions(new BlobContainerPermissions {
                        PublicAccess = BlobContainerPublicAccessType.Blob
                    });
                    this.suspendedContainer.SetPermissions(new BlobContainerPermissions {
                        PublicAccess = BlobContainerPublicAccessType.Blob
                    });
                }
                LoggerFactory.Logger.Debug("FatPipeDAL C'tor", "Connections established to Azure accounts");
            }
            catch (Exception excp)
            {
                LoggerFactory.Logger.Error("FatPipeDAL C'tor", EventId.BizpipeCtor
                                           , "FatPipeDAL constructor encountered exception: {0} ", excp.ToString());
            }
        }
Exemplo n.º 19
0
        public IQueryable <T> Query <T>(string tableName)
        {
            TableServiceContext context = this.CreateContext <T>();

            return(context.CreateQuery <T>(tableName).AsTableServiceQuery());
        }
Exemplo n.º 20
0
        // Write a DataTable to an AzureTable.
        // DataTable's Rows are an unstructured property bag.
        // columnTypes - type of the column, or null if column should be skipped. Length of columnTypes should be the same as number of columns.
        public static void SaveToAzureTable(DataTable table, CloudStorageAccount account, string tableName, Type[] columnTypes, Func <int, Row, ParitionRowKey> funcComputeKeys)
        {
            if (table == null)
            {
                throw new ArgumentNullException("table");
            }
            if (account == null)
            {
                throw new ArgumentNullException("account");
            }
            if (columnTypes == null)
            {
                throw new ArgumentNullException("columnTypes");
            }
            if (tableName == null)
            {
                throw new ArgumentNullException("tableName");
            }
            ValidateAzureTableName(tableName);

            // Azure tables have "special" columns.
            // We can skip these by settings columnType[i] to null, which means don't write that column
            string[] columnNames = table.ColumnNames.ToArray();
            if (columnNames.Length != columnTypes.Length)
            {
                throw new ArgumentException(string.Format("columnTypes should have {0} elements", columnNames.Length), "columnTypes");
            }

            columnTypes = columnTypes.ToArray(); // create a copy for mutation.
            for (int i = 0; i < columnNames.Length; i++)
            {
                if (IsSpecialColumnName(columnNames[i]))
                {
                    columnTypes[i] = null;
                }
            }

            if (funcComputeKeys == null)
            {
                funcComputeKeys = GetPartitionRowKeyFunc(columnNames);
            }

            // Validate columnTypes
            string [] edmTypeNames = Array.ConvertAll(columnTypes,
                                                      columnType => {
                if (columnType == null)
                {
                    return(null);
                }
                string edmTypeName;
                _edmNameMapping.TryGetValue(columnType, out edmTypeName);
                if (edmTypeName == null)
                {
                    // Unsupported type!
                    throw new InvalidOperationException(string.Format("Type '{0}' is not a supported type on azure tables", columnType.FullName));
                }
                return(edmTypeName);
            });


            CloudTableClient tableClient = account.CreateCloudTableClient();

            tableClient.DeleteTableIfExist(tableName);
            tableClient.CreateTableIfNotExist(tableName);


            GenericTableWriter w = new GenericTableWriter
            {
                _edmTypeNames = edmTypeNames,
                _columnNames  = table.ColumnNames.ToArray()
            };

            // Batch rows for performance,
            // but all rows in the batch must have the same partition key
            TableServiceContext ctx = null;
            string lastPartitionKey = null;

            int rowCounter = 0;
            int batchSize  = 0;

            foreach (Row row in table.Rows)
            {
                GenericWriterEntity entity = new GenericWriterEntity {
                    _source = row
                };
                // Compute row and partition keys too.
                var partRow = funcComputeKeys(rowCounter, row);
                entity.PartitionKey = partRow.PartitionKey;
                entity.RowKey       = partRow.RowKey;
                rowCounter++;

                // but all rows in the batch must have the same partition key
                if ((ctx != null) && (lastPartitionKey != null) && (lastPartitionKey != entity.PartitionKey))
                {
                    ctx.SaveChangesWithRetries(SaveChangesOptions.Batch | SaveChangesOptions.ReplaceOnUpdate);
                    ctx = null;
                }

                if (ctx == null)
                {
                    lastPartitionKey = null;
                    ctx = tableClient.GetDataServiceContext();
                    ctx.WritingEntity += new EventHandler <ReadingWritingEntityEventArgs>(w.ctx_WritingEntity);
                    batchSize          = 0;
                }

                // Add enty to the current batch
                ctx.AddObject(tableName, entity);
                lastPartitionKey = entity.PartitionKey;
                batchSize++;

                if (batchSize % 50 == 0)
                {
                    ctx.SaveChangesWithRetries(SaveChangesOptions.Batch | SaveChangesOptions.ReplaceOnUpdate);
                    ctx = null;
                }
            }

            if (ctx != null)
            {
                ctx.SaveChangesWithRetries(SaveChangesOptions.Batch | SaveChangesOptions.ReplaceOnUpdate);
            }
        }
Exemplo n.º 21
0
        public override void Run()
        {
            var storageAccount = CloudStorageAccount.FromConfigurationSetting("DataConnectionString");

            TableServiceContext tableServiceContext = new TableServiceContext(storageAccount.TableEndpoint.ToString(), storageAccount.Credentials);

            storageAccount.CreateCloudTableClient().CreateTableIfNotExist("ResultsTable");

            //var tableClient = storageAccount.CreateCloudTableClient();
            //tableClient.CreateTableIfNotExist("ResultsTable");

            CloudBlobClient    blobStorage = storageAccount.CreateCloudBlobClient();
            CloudBlobContainer container   = blobStorage.GetContainerReference("datasets");

            CloudQueueClient queueStorage = storageAccount.CreateCloudQueueClient();
            CloudQueue       queue        = queueStorage.GetQueueReference("inputreceiver");

            Trace.TraceInformation("Creating container and queue...");

            // If the Start() method throws an exception, the role recycles.
            // If this sample is run locally and the development storage tool has not been started, this
            // can cause a number of exceptions to be thrown because roles are restarted repeatedly.
            // Lets try to create the queue and the container and check whether the storage services are running
            // at all.
            bool containerAndQueueCreated = false;

            while (!containerAndQueueCreated)
            {
                try
                {
                    container.CreateIfNotExist();

                    var permissions = container.GetPermissions();

                    permissions.PublicAccess = BlobContainerPublicAccessType.Container;

                    container.SetPermissions(permissions);

                    permissions = container.GetPermissions();

                    queue.CreateIfNotExist();

                    containerAndQueueCreated = true;
                }
                catch (StorageClientException e)
                {
                    if (e.ErrorCode == StorageErrorCode.TransportError)
                    {
                        Trace.TraceError(string.Format("Connect failure! The most likely reason is that the local " +
                                                       "Development Storage tool is not running or your storage account configuration is incorrect. " +
                                                       "Message: '{0}'", e.Message));
                        System.Threading.Thread.Sleep(5000);
                    }
                    else
                    {
                        throw;
                    }
                }
            }

            Trace.TraceInformation("Listening for queue messages...");

            // Now that the queue and the container have been created in the above initialization process, get messages
            // from the queue and process them individually.
            while (true)
            {
                try
                {
                    CloudQueueMessage msg = queue.GetMessage();
                    if (msg != null)
                    {
                        // Protocol: [path, (y|n - isHeightMap), filename]
                        string[] parts = msg.AsString.Split('\n');

                        // Get the blob directories
                        string         path       = parts[0];
                        CloudBlockBlob content    = container.GetBlockBlobReference(path);
                        CloudBlockBlob resultBlob = container.GetBlockBlobReference("results/" + path);

                        // Read the input file from its blob to a MemoryStream
                        MemoryStream input = new MemoryStream();
                        content.DownloadToStream(input);
                        input.Seek(0, SeekOrigin.Begin);

                        Trace.TraceInformation("Creating Rasterizer...");

                        // Initialize the rasterizer
                        bool       isHeightMap = (parts[1] == "y");
                        bool       runParallel = (parts[3] == "y");
                        Rasterizer r           = new Rasterizer();
                        r.readInput(input, isHeightMap);
                        r.initializeConstraints();

                        Trace.TraceInformation("Will run...");

                        // Run
                        DateTime startTime = DateTime.Now;
                        if (runParallel)
                        {
                            r.stSimplexParallel();
                        }
                        else
                        {
                            r.stSimplex();
                        }
                        TimeSpan duration = DateTime.Now - startTime;

                        Trace.TraceInformation("Finished! Took " + duration.TotalMilliseconds + "ms");

                        // Save results in "/datasets/results/" blob
                        string       result    = r.getConstraintsStr();
                        byte[]       byteArray = Encoding.Default.GetBytes(result);
                        MemoryStream stream    = new MemoryStream(byteArray);
                        resultBlob.Properties.ContentType = "text/plain";
                        resultBlob.UploadFromStream(stream);

                        // Save results metadata in ResultsTable table
                        Uri dsetUri;
                        Uri.TryCreate(content.Uri, path, out dsetUri);
                        tableServiceContext.AddObject("ResultsTable", new ResultsTable()
                        {
                            ResultURL       = resultBlob.Uri,
                            DatasetURL      = dsetUri,
                            DatasetFilename = parts[2],
                            Time            = duration.TotalMilliseconds,
                            IsHeightMap     = isHeightMap,
                            IsParallel      = runParallel
                        });
                        tableServiceContext.SaveChanges();

                        Trace.TraceInformation(string.Format("Done with '{0}'", path));

                        queue.DeleteMessage(msg);
                    }
                    else
                    {
                        System.Threading.Thread.Sleep(1000);
                    }
                }
                catch (Exception e)
                {
                    // Explicitly catch all exceptions of type StorageException here because we should be able to
                    // recover from these exceptions next time the queue message, which caused this exception,
                    // becomes visible again.
                    System.Threading.Thread.Sleep(5000);
                    Trace.TraceError(string.Format("Exception when processing queue item. Message: '{0}'", e.Message));
                }
            }
        }
 public static Task <DataServiceResponse> SaveChangesAsync(this TableServiceContext context)
 {
     return(context.SaveChangesAsync(SaveChangesOptions.None));
 }
Exemplo n.º 23
0
 protected abstract IQueryable <Entity> CreateBenchmarkQuery(TableServiceContext context);
Exemplo n.º 24
0
        private void MovePageRecursively(Models.Site site, string pageFullName, string newParent, TableServiceContext serviceContext)
        {
            var oldPage = Get(new Page(site, pageFullName));
            var entity  = PageEntityHelper.ToPageEntity(oldPage);

            if (!string.IsNullOrEmpty(newParent))
            {
                var newPage = new Page(new Page(site, newParent), oldPage.Name);
                entity.FullName   = newPage.FullName;
                entity.ParentPage = newPage.Parent.FullName;
            }
            else
            {
                entity.FullName   = oldPage.Name;
                entity.ParentPage = "";
            }

            foreach (var item in ChildPages(oldPage))
            {
                MovePageRecursively(site, item.FullName, entity.FullName, serviceContext);
            }

            serviceContext.AddObject(PageTable, entity);
            var oldEntity = PageEntityHelper.ToPageEntity(oldPage);

            serviceContext.AttachTo(PageTable, oldEntity, "*");
            serviceContext.DeleteObject(oldEntity);
        }
        // we don't use the retry policy itself in this function because out parameters are not well handled by
        // retry policies
        private SessionStateStoreData GetSession(HttpContext context, string id, out bool locked, out TimeSpan lockAge,
                                                 out object lockId, out SessionStateActions actions,
                                                 bool exclusive)
        {
            Debug.Assert(context != null);
            SecUtility.CheckParameter(ref id, true, true, false, Configuration.MaxStringPropertySizeInChars, "id");

            SessionRow session = null;

            int  curRetry = 0;
            bool retry    = false;

            // Assign default values to out parameters
            locked  = false;
            lockId  = null;
            lockAge = TimeSpan.Zero;
            actions = SessionStateActions.None;

            do
            {
                retry = false;
                try
                {
                    TableServiceContext svc = CreateDataServiceContext();
                    session = GetSession(id, svc);

                    // Assign default values to out parameters
                    locked  = false;
                    lockId  = null;
                    lockAge = TimeSpan.Zero;
                    actions = SessionStateActions.None;

                    // if the blob does not exist, we return null
                    // ASP.NET will call the corresponding method for creating the session
                    if (session == null)
                    {
                        return(null);
                    }
                    if (session.Initialized == false)
                    {
                        Debug.Assert(session.Locked == false);
                        actions             = SessionStateActions.InitializeItem;
                        session.Initialized = true;
                    }
                    session.ExpiresUtc = DateTime.UtcNow.AddMinutes(session.Timeout);
                    if (exclusive)
                    {
                        if (!session.Locked)
                        {
                            if (session.Lock == Int32.MaxValue)
                            {
                                session.Lock = 0;
                            }
                            else
                            {
                                session.Lock++;
                            }
                            session.LockDateUtc = DateTime.UtcNow;
                        }
                        lockId         = session.Lock;
                        locked         = session.Locked;
                        session.Locked = true;
                    }
                    lockAge = DateTime.UtcNow.Subtract(session.LockDateUtc);
                    lockId  = session.Lock;

                    if (locked == true)
                    {
                        return(null);
                    }

                    // let's try to write this back to the data store
                    // in between, someone else could have written something to the store for the same session
                    // we retry a number of times; if all fails, we throw an exception
                    svc.UpdateObject(session);
                    svc.SaveChangesWithRetries();
                }
                catch (InvalidOperationException e)
                {
                    // precondition fails indicates problems with the status code
                    if (e.InnerException is DataServiceClientException && (e.InnerException as DataServiceClientException).StatusCode == (int)HttpStatusCode.PreconditionFailed)
                    {
                        retry = true;
                    }
                    else
                    {
                        throw new ProviderException("Error accessing the data store.", e);
                    }
                }
            } while (retry && curRetry++ < NumRetries);

            // ok, now we have successfully written back our state
            // we can now read the blob
            // note that we do not need to care about read/write locking when accessing the
            // blob because each time we write a new session we create a new blob with a different name

            SessionStateStoreData result = null;
            MemoryStream          stream = null;
            StreamReader          reader = null;
            BlobProperties        properties;

            try
            {
                try
                {
                    stream = _blobProvider.GetBlobContent(session.BlobName, out properties);
                }
                catch (Exception e)
                {
                    throw new ProviderException("Couldn't read session blob!", e);
                }

                reader = new StreamReader(stream);
                if (actions == SessionStateActions.InitializeItem)
                {
                    // Return an empty SessionStateStoreData
                    result = new SessionStateStoreData(new SessionStateItemCollection(),
                                                       SessionStateUtility.GetSessionStaticObjects(context), session.Timeout);
                }
                else
                {
                    // Read Items, StaticObjects, and Timeout from the file
                    byte[] items   = Convert.FromBase64String(reader.ReadLine());
                    byte[] statics = Convert.FromBase64String(reader.ReadLine());
                    int    timeout = session.Timeout;
                    // Deserialize the session
                    result = DeserializeSession(items, statics, timeout);
                }
            }
            finally
            {
                if (stream != null)
                {
                    stream.Close();
                }
                if (reader != null)
                {
                    reader.Close();
                }
            }
            return(result);
        }
Exemplo n.º 26
0
 internal static void ReleaseContext(TableServiceContext ctx)
 {
     ctx.SendingSignedRequestAction = null;
     ctx.ResetCancellation();
     ctx.ContextSemaphore.Release();
 }
        public void TableGetSetPermissionTest()
        {
            CloudTableClient tableClient = GenerateCloudTableClient();
            CloudTable       table       = tableClient.GetTableReference("T" + Guid.NewGuid().ToString("N"));

            try
            {
                table.Create();

                TableServiceContext context = tableClient.GetTableServiceContext();
                context.AddObject(table.Name, new BaseEntity("PK", "RK"));
                context.SaveChangesWithRetries();

                TablePermissions expectedPermissions;
                TablePermissions testPermissions;

                // Test new table permissions.
                expectedPermissions = new TablePermissions();
                testPermissions     = table.GetPermissions();
                AssertPermissionsEqual(expectedPermissions, testPermissions);

                // Test setting empty permissions.
                table.SetPermissions(expectedPermissions);
                Thread.Sleep(30 * 1000);
                testPermissions = table.GetPermissions();
                AssertPermissionsEqual(expectedPermissions, testPermissions);

                // Add a policy, check setting and getting.
                expectedPermissions.SharedAccessPolicies.Add(Guid.NewGuid().ToString(), new SharedAccessTablePolicy
                {
                    Permissions            = SharedAccessTablePermissions.Query,
                    SharedAccessStartTime  = DateTimeOffset.Now - TimeSpan.FromHours(1),
                    SharedAccessExpiryTime = DateTimeOffset.Now + TimeSpan.FromHours(1)
                });

                table.SetPermissions(expectedPermissions);
                Thread.Sleep(30 * 1000);
                testPermissions = table.GetPermissions();
                AssertPermissionsEqual(expectedPermissions, testPermissions);

                // Add a policy, check setting and getting.
                expectedPermissions.SharedAccessPolicies.Add(Guid.NewGuid().ToString(), new SharedAccessTablePolicy
                {
                    Permissions            = SharedAccessTablePermissions.Delete | SharedAccessTablePermissions.Add,
                    SharedAccessStartTime  = DateTimeOffset.Now + TimeSpan.FromHours(1),
                    SharedAccessExpiryTime = DateTimeOffset.Now + TimeSpan.FromDays(1)
                });

                table.SetPermissions(expectedPermissions);
                Thread.Sleep(30 * 1000);
                testPermissions = table.GetPermissions();
                AssertPermissionsEqual(expectedPermissions, testPermissions);

                // Add a null policy, check setting and getting.
                expectedPermissions.SharedAccessPolicies.Add(Guid.NewGuid().ToString(), new SharedAccessTablePolicy
                {
                    Permissions = SharedAccessTablePermissions.None,
                });

                table.SetPermissions(expectedPermissions);
                Thread.Sleep(30 * 1000);
                testPermissions = table.GetPermissions();
                AssertPermissionsEqual(expectedPermissions, testPermissions);

                // Add a policy, check setting and getting.
                expectedPermissions.SharedAccessPolicies.Add(Guid.NewGuid().ToString(), new SharedAccessTablePolicy
                {
                    Permissions            = SharedAccessTablePermissions.Add | SharedAccessTablePermissions.Query | SharedAccessTablePermissions.Update | SharedAccessTablePermissions.Delete,
                    SharedAccessStartTime  = DateTimeOffset.Now + TimeSpan.FromDays(0.5),
                    SharedAccessExpiryTime = DateTimeOffset.Now + TimeSpan.FromDays(1)
                });

                table.SetPermissions(expectedPermissions);
                Thread.Sleep(30 * 1000);
                testPermissions = table.GetPermissions();
                AssertPermissionsEqual(expectedPermissions, testPermissions);

                // Add a policy, check setting and getting.
                expectedPermissions.SharedAccessPolicies.Add(Guid.NewGuid().ToString(), new SharedAccessTablePolicy
                {
                    Permissions            = SharedAccessTablePermissions.Update,
                    SharedAccessStartTime  = DateTimeOffset.Now + TimeSpan.FromHours(6),
                    SharedAccessExpiryTime = DateTimeOffset.Now + TimeSpan.FromHours(6.5)
                });

                table.SetPermissions(expectedPermissions);
                Thread.Sleep(30 * 1000);
                testPermissions = table.GetPermissions();
                AssertPermissionsEqual(expectedPermissions, testPermissions);
            }
            finally
            {
                table.DeleteIfExists();
            }
        }
Exemplo n.º 28
0
 protected internal AzureTableServer(CloudStorageAccount account)
 {
     this.RawQueueClient = new CloudTableClient(account.TableEndpoint.AbsoluteUri, account.Credentials);
     this.RawContext     = this.RawQueueClient.GetDataServiceContext();
 }
Exemplo n.º 29
0
        // the username to match can be in a format that varies between providers
        // for this implementation, a syntax similar to the one used in the SQL provider is applied
        // "user%" will return all users in a role that start with the string "user"
        // the % sign can only appear at the end of the usernameToMatch parameter
        // because the current version of the table storage service does not support StartsWith in LINQ queries,
        // calling this function can cause significant network trafic when '%' is used in the usernameToMach
        // parameter
        public override string[] FindUsersInRole(string roleName, string usernameToMatch)
        {
            SecUtility.CheckParameter(ref roleName, true, true, true, MaxTableRoleNameLength, "rolename");
            SecUtility.CheckParameter(ref usernameToMatch, true, true, false, Constants.MaxTableUsernameLength, "usernameToMatch");

            bool startswith = false;

            if (usernameToMatch.Contains('%'))
            {
                if (usernameToMatch.IndexOf('%') != usernameToMatch.Length - 1)
                {
                    throw new ArgumentException("The TableStorageRoleProvider only supports search strings that contain '%' as the last character!");
                }
                usernameToMatch = usernameToMatch.Substring(0, usernameToMatch.Length - 1);
                startswith      = true;
            }

            try
            {
                TableServiceContext        svc      = CreateDataServiceContext();
                DataServiceQuery <RoleRow> queryObj = svc.CreateQuery <RoleRow>(_tableName);

                CloudTableQuery <RoleRow> query;

                if (startswith && string.IsNullOrEmpty(usernameToMatch))
                {
                    // get all users in the role
                    query = (from userRole in queryObj
                             where userRole.PartitionKey.CompareTo(SecUtility.EscapedFirst(_applicationName)) >= 0 &&
                             userRole.PartitionKey.CompareTo(SecUtility.NextComparisonString(SecUtility.EscapedFirst(_applicationName))) < 0 &&
                             userRole.RowKey == SecUtility.Escape(roleName)
                             select userRole).AsTableServiceQuery();
                }
                else if (startswith)
                {
                    // get all users in the role that start with the specified string (we cannot restrict the query more because StartsWith is not supported)
                    // we cannot include the username to search for in the key, because the key might e escaped
                    query = (from userRole in queryObj
                             where userRole.PartitionKey.CompareTo(SecUtility.EscapedFirst(_applicationName)) >= 0 &&
                             userRole.PartitionKey.CompareTo(SecUtility.NextComparisonString(SecUtility.EscapedFirst(_applicationName))) < 0 &&
                             userRole.RowKey == SecUtility.Escape(roleName) &&
                             (userRole.UserName.CompareTo(usernameToMatch) >= 0 || userRole.UserName == string.Empty)
                             select userRole).AsTableServiceQuery();
                }
                else
                {
                    // get a specific user
                    query = (from userRole in queryObj
                             where (userRole.PartitionKey == SecUtility.CombineToKey(_applicationName, usernameToMatch) ||
                                    userRole.PartitionKey == SecUtility.CombineToKey(_applicationName, string.Empty)) &&
                             userRole.RowKey == SecUtility.Escape(roleName)
                             select userRole).AsTableServiceQuery();
                }

                IEnumerable <RoleRow> userRows = query.Execute();

                if (userRows == null)
                {
                    throw new ProviderException("The role does not exist!");
                }
                List <RoleRow> l = new List <RoleRow>(userRows);
                if (l.Count == 0)
                {
                    // the role does not exist
                    throw new ProviderException("The role does not exist!");
                }
                RoleRow role;
                if (IsStaleRole(l, out role))
                {
                    throw new ProviderException("The role does not exist!");
                }
                List <string> ret = new List <string>();
                foreach (RoleRow row in l)
                {
                    if (row != role)
                    {
                        if (startswith && !string.IsNullOrEmpty(usernameToMatch) && !row.UserName.StartsWith(usernameToMatch, StringComparison.Ordinal))
                        {
                            continue;
                        }
                        ret.Add(row.UserName);
                    }
                }
                return(ret.ToArray());
            }
            catch (InvalidOperationException e)
            {
                throw new ProviderException("Error while accessing the data store.", e);
            }
        }
Exemplo n.º 30
0
        public IEnumerable <CloudServiceStatus> GetHostedServiceStatus()
        {
            TableServiceContext context = GetContext();

            return(context.CreateQuery <CloudServiceStatus>(ServiceStatusTableName).AsTableServiceQuery());
        }