public void FirstCallFetchesNewItems() { _store.LoadData(_loc).Returns(Task.FromResult <DataWithMetadata>(null)); _store.TryOptimisticWrite(_loc, null, null, null, CancellationToken.None).ReturnsForAnyArgs(Task.FromResult(new OptimisticStoreWriteResult { Result = true })); var generator = GetGenerator(10); var id = generator.NextId().Result; _store.Received(1).LoadData(_loc); Assert.AreEqual(1, id); }
public static async Task<IEncryptor> CreateEncryptor(IOptimisticStore store, StoreLocation keyLocation, RSAServiceProvider rsaCert) { bool isFound; byte[] blob; do { var data = await store.LoadData(keyLocation).ConfigureAwait(false); if (data == null) { // Have to create a new key blob = AESBlob.CreateBlob(DefaultKeySize, rsaCert); var ct = CancellationToken.None; // We use an optimistic write so that it will only create the file IF THE FILE DOES NOT EXIST // This will catch rare cases where two server calls may try to create two keys var result = await store.TryOptimisticWrite(keyLocation, null, null, async (s) => { await s.WriteAsync(blob, 0, blob.Length, ct).ConfigureAwait(false); return blob.Length; }, ct).ConfigureAwait(false); isFound = result.Result; } else { blob = await data.Stream.ReadBytes().ConfigureAwait(false); isFound = true; } } while (!isFound); var encryptor = AESBlob.CreateEncryptor(blob, rsaCert); return new CertProtectedEncryptor(keyLocation.Container, encryptor); }
public async Task <DataWithMetadata> LoadData(StoreLocation location, string snapshot = null, IEncryptor encryptor = null) { var data = await _store.LoadData(location, snapshot); if (data == null) { return(null); } var metadata = data.Metadata; var stream = data.Stream; // Check encryption algorithm var hasEncryption = metadata.ContainsKey(MetadataConstants.EncryptionMetadataKey); if (hasEncryption && metadata[MetadataConstants.EncryptionMetadataKey] != encryptor.Algorithm) { throw new InvalidOperationException("Encryption Algorithms do not match, cannot load data"); } // Check decompression algorithm var hasCompression = metadata.ContainsKey(MetadataConstants.CompressionMetadataKey); if (hasCompression && metadata[MetadataConstants.CompressionMetadataKey] != _compressor.Algorithm) { throw new InvalidOperationException("Compression Algorithms do not match, cannot load data"); } // Modify read stream if required if (hasEncryption || hasCompression) { stream = stream.AddTransformer(s => { // This is a read flow, so decryption comes first if (hasEncryption) { s = encryptor.Decrypt(s, true); } // Then comes the decompression if (hasCompression) { s = _compressor.DecompressReadStream(s); } return(s); }); } return(new DataWithMetadata(stream, metadata)); }
public static async Task <IEncryptor> CreateEncryptor(IOptimisticStore store, StoreLocation keyLocation, RSA rsaCert) { bool isFound; byte[] blob; do { var data = await store.LoadData(keyLocation); if (data == null) { // Have to create a new key blob = AESBlob.CreateBlob(DefaultKeySize, rsaCert); var ct = CancellationToken.None; // We use an optimistic write so that it will only create the file IF THE FILE DOES NOT EXIST // This will catch rare cases where two server calls may try to create two keys var result = await store.TryOptimisticWrite(keyLocation, null, null, async (s) => { await s.WriteAsync(blob, 0, blob.Length, ct); return(blob.Length); }, ct); isFound = result.Result; } else { blob = await data.Stream.ReadBytes(); isFound = true; } } while (!isFound); var encryptor = AESBlob.CreateEncryptor(blob, rsaCert); return(new CertProtectedEncryptor(keyLocation.Container, encryptor)); }
private async Task UpdateFromSyncStore() { int retryCount = 0; // maxRetries + 1 because the first run isn't a 're'try. while (retryCount < _maxRetries + 1) { string data = null; var dataStream = await _store.LoadData(_location); if (dataStream != null) { var all = await dataStream.Stream.ReadBytes(); data = Encoding.UTF8.GetString(all, 0, all.Length); } long currentId; if (data == null) { currentId = 0; } else { if (!long.TryParse(data, out currentId)) { throw new Exception(string.Format("Data '{0}' in storage was corrupt and could not be parsed as a long", data)); } if (currentId < 0) { throw new Exception(string.Format("Saved Id cannot be less than 0")); } } var upperLimit = currentId + _rangeSize; var limitBytes = Encoding.UTF8.GetBytes(upperLimit.ToString(CultureInfo.InvariantCulture)); var m = dataStream == null ? null : new Metadata() { ETag = dataStream.Metadata.ETag }; var ct = CancellationToken.None; var result = await _store.TryOptimisticWrite(_location, m, null, async (s) => { await s.WriteAsync(limitBytes, 0, limitBytes.Length, ct); return(limitBytes.Length); }, ct); if (result.Result) { // First update currentId // Then upper limit, this will avoid any need for locks etc _internalId = currentId; _upperIdLimit = upperLimit; return; } retryCount++; // update failed, go back around the loop } throw new Exception(string.Format("Failed to update the OptimisticSyncStore after {0} attempts", retryCount)); }