private void InsertInternal <T>(string tableName, IEnumerable <CloudEntity <T> > entities) { var table = GetTable(tableName); var fatEntities = entities.Select(e => Tuple.Create(FatEntity.Convert(e, _serializer), e)); //var noBatchMode = false; foreach (var slice in SliceEntities(fatEntities, e => e.Item1.GetPayload())) { var batchOperation = new TableBatchOperation(); var cloudEntityOfFatEntity = new Dictionary <object, CloudEntity <T> >(); foreach (var fatEntity in slice) { batchOperation.Insert(fatEntity.Item1); cloudEntityOfFatEntity.Add(fatEntity.Item1, fatEntity.Item2); } Retry.Do(_policies.TransientTableErrorBackOff(), CancellationToken.None, () => { try { table.ExecuteBatch(batchOperation); } catch { // TODO: Implement } }); //batchOperation.Insert(customer1); //table.ExecuteBatch(batchOperation); } }
private T ConvertFatEntity(FatEntity fat) { T result; var data = fat.GetData(); if (data.Length == 0) { result = default(T); } else { if (_decryptor != null) { using (var ms = new MemoryStream()) { using (var dc = _decryptor.Decrypt(ms, false)) { dc.Write(data, 0, data.Length); } data = ms.ToArray(); } } result = JsonConvert.DeserializeObject <T>(Encoding.UTF8.GetString(data)); } return(result); }
/// <see cref="ITableStorageProvider.Insert{T}"/> public void Insert <T>(string tableName, IEnumerable <CloudEntity <T> > entities) { lock (_syncRoot) { List <MockTableEntry> entries; if (!_tables.TryGetValue(tableName, out entries)) { _tables.Add(tableName, entries = new List <MockTableEntry>()); } // verify valid data BEFORE inserting them if (entities.Join(entries, u => ToId(u), ToId, (u, v) => true).Any()) { throw new DataServiceRequestException("INSERT: key conflict."); } if (entities.GroupBy(e => ToId(e)).Any(id => id.Count() != 1)) { throw new DataServiceRequestException("INSERT: duplicate keys."); } // ok, we can insert safely now foreach (var entity in entities) { var etag = (_nextETag++).ToString(); entity.ETag = etag; entries.Add(new MockTableEntry { PartitionKey = entity.PartitionKey, RowKey = entity.RowKey, ETag = etag, Value = FatEntity.Convert(entity, DataSerializer) }); } } }
private FatEntity ConvertToFatEntity(E entity) { byte[] data; if (entity.DataObject == null) { data = new byte[0]; } else { data = Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(entity.DataObject)); if (_encryptor != null) { using (var ms = new MemoryStream()) { using (var enc = _encryptor.Encrypt(ms, false)) { enc.Write(data, 0, data.Length); } data = ms.ToArray(); } } } var fat = new FatEntity() { PartitionKey = entity.PartitionKey, RowKey = entity.RowKey }; fat.SetData(data, data.Length); return(fat); }
public void Convert() { var timevalues = new TimeValue[20000]; for (int i = 0; i < timevalues.Length; i++) { timevalues[i] = new TimeValue { Time = new DateTime(2001, 1, 1).AddMinutes(i), Value = i }; } var serie = new TimeSerie { TimeValues = timevalues }; var cloudEntity = new CloudEntity <TimeSerie> { PartitionKey = "part", RowKey = "key", Value = serie }; var fatEntity = FatEntity.Convert(cloudEntity, _serializer); var cloudEntity2 = FatEntity.Convert <TimeSerie>(fatEntity, _serializer, null); var fatEntity2 = FatEntity.Convert(cloudEntity2, _serializer); Assert.IsNotNull(cloudEntity2); Assert.IsNotNull(fatEntity2); Assert.AreEqual(cloudEntity.PartitionKey, fatEntity.PartitionKey); Assert.AreEqual(cloudEntity.RowKey, fatEntity.RowKey); Assert.AreEqual(cloudEntity.PartitionKey, fatEntity2.PartitionKey); Assert.AreEqual(cloudEntity.RowKey, fatEntity2.RowKey); Assert.IsNotNull(cloudEntity2.Value); Assert.AreEqual(cloudEntity.Value.TimeValues.Length, cloudEntity2.Value.TimeValues.Length); for (int i = 0; i < timevalues.Length; i++) { Assert.AreEqual(cloudEntity.Value.TimeValues[i].Time, cloudEntity2.Value.TimeValues[i].Time); Assert.AreEqual(cloudEntity.Value.TimeValues[i].Value, cloudEntity2.Value.TimeValues[i].Value); } var data1 = fatEntity.GetData(); var data2 = fatEntity2.GetData(); Assert.AreEqual(data1.Length, data2.Length); for (int i = 0; i < data2.Length; i++) { Assert.AreEqual(data1[i], data2[i]); } }
private FatEntity BuildEntity(string partitionKey, string rowKey) { var e = new FatEntity { PartitionKey = partitionKey, RowKey = rowKey, ETag = "*" }; var data = new { testdata = "blah" }; var bytes = Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(data)); e.SetData(bytes, bytes.Length); return(e); }
private IEnumerable <CloudEntity <T> > GetInternal <T>(string tableName, string filter) { // CloudTable.ExecuteQuery handles continuation token internally // CloudTable.ExecuteQuerySegmented does not // http://stackoverflow.com/questions/16017001/does-azure-tablequery-handle-continuation-tokens-internally // We are using ExecuteQuerySegmented however since we want to start yielding results asap var table = _tableStorage.GetTableReference(tableName); var query = string.IsNullOrWhiteSpace(filter) ? new TableQuery <FatEntity>() : new TableQuery <FatEntity>().Where(filter); TableContinuationToken continuationToken = null; do { TableQuerySegment <FatEntity> querySegment = null; FatEntity[] fatEntities = null; var token = continuationToken; Retry.Do(_policies.TransientTableErrorBackOff(), CancellationToken.None, () => { // TODO: Catch //try //{ querySegment = table.ExecuteQuerySegmented(query, token); fatEntities = querySegment.Results.ToArray(); //} //catch (DataServiceQueryException ex) //{ // // if the table does not exist, there is nothing to return // var errorCode = RetryPolicies.GetErrorCode(ex); // if (TableErrorCodeStrings.TableNotFound == errorCode // || StorageErrorCodeStrings.ResourceNotFound == errorCode) // { // fatEntities = new FatEntity[0]; // return; // } // throw; //} }); foreach (var fatEntity in fatEntities) { yield return(FatEntity.Convert <T>(fatEntity, _serializer)); } if (querySegment != null && querySegment.ContinuationToken != null) { continuationToken = querySegment.ContinuationToken; } } while (continuationToken != null); }
public void Delete(E entity) { CheckNotSaved(); // Delete is special in that we do not need the data to delete var fat = new FatEntity { PartitionKey = entity.PartitionKey, RowKey = entity.RowKey, ETag = "*" }; _context.Delete(fat); _deleteBackupContext.InsertOrReplace(fat); _isDirty = true; }
/// <see cref="ITableStorageProvider.Update{T}"/> public void Update <T>(string tableName, IEnumerable <CloudEntity <T> > entities, bool force) { lock (_syncRoot) { List <MockTableEntry> entries; if (!_tables.TryGetValue(tableName, out entries)) { throw new DataServiceRequestException("UPDATE: table not found."); } // verify valid data BEFORE updating them if (entities.GroupJoin(entries, u => ToId(u), ToId, (u, vs) => vs.Count(entry => force || u.ETag == null || entry.ETag == u.ETag)).Any(c => c != 1)) { throw new DataServiceRequestException("UPDATE: key not found or etag conflict."); } if (entities.GroupBy(e => ToId(e)).Any(id => id.Count() != 1)) { throw new DataServiceRequestException("UPDATE: duplicate keys."); } // ok, we can update safely now foreach (var entity in entities) { var etag = (_nextETag++).ToString(); entity.ETag = etag; var index = entries.FindIndex(entry => entry.PartitionKey == entity.PartitionKey && entry.RowKey == entity.RowKey); entries[index] = new MockTableEntry { PartitionKey = entity.PartitionKey, RowKey = entity.RowKey, ETag = etag, Value = FatEntity.Convert(entity, DataSerializer) }; } } }
public CloudEntity <T> ToCloudEntity <T>(IDataSerializer serializer) { return(FatEntity.Convert <T>(Value, serializer, ETag)); }