public async Task ReadPrimitiveQueue()
        {
            Queue result = await Serializer.DeserializeWrapper <Queue>(@"[1,2]");

            int expected = 1;

            foreach (JsonElement i in result)
            {
                Assert.Equal(expected++, i.GetInt32());
            }

            result = await Serializer.DeserializeWrapper <Queue>(@"[]");

            int         count = 0;
            IEnumerator e     = result.GetEnumerator();

            while (e.MoveNext())
            {
                count++;
            }
            Assert.Equal(0, count);

            QueueWrapper wrapper = await Serializer.DeserializeWrapper <QueueWrapper>(@"[1,2]");

            expected = 1;

            foreach (JsonElement i in wrapper)
            {
                Assert.Equal(expected++, i.GetInt32());
            }
        }
示例#2
0
        public static void ReadPrimitiveQueue()
        {
            Queue result   = JsonSerializer.Deserialize <Queue>(Encoding.UTF8.GetBytes(@"[1,2]"));
            int   expected = 1;

            foreach (JsonElement i in result)
            {
                Assert.Equal(expected++, i.GetInt32());
            }

            result = JsonSerializer.Deserialize <Queue>(Encoding.UTF8.GetBytes(@"[]"));

            int         count = 0;
            IEnumerator e     = result.GetEnumerator();

            while (e.MoveNext())
            {
                count++;
            }
            Assert.Equal(0, count);

            QueueWrapper wrapper = JsonSerializer.Deserialize <QueueWrapper>(@"[1,2]");

            expected = 1;

            foreach (JsonElement i in wrapper)
            {
                Assert.Equal(expected++, i.GetInt32());
            }
        }
 public void SetUp()
 {
     _mq = GetMessageQueue(_inputQueueName);
     _textQueueWrapper = new QueueWrapper <string>(_mq);
     _intQueueWrapper  = new QueueWrapper <int>(_mq);
     DeleteAllMessagesFromQueues();
 }
示例#4
0
        public void TestQueueBig()
        {
            using (var dl = new DisposableList())
            {
                var entities = TestHelpers.CreateStarEntities(10000);
                var original = TableGatewayMessage.Create("Star", entities);

                var queue = new QueueWrapper(TestHelpers.GenUnique("gateq"), TestConfig.GatewayStorageAccount);
                var bcont = new BlobContainerWrapper(TestHelpers.GenUnique("gatecont"), TestConfig.GatewayStorageAccount);
                dl.Add(queue.Delete);
                dl.Add(bcont.Delete);

                var blobStorage           = new GatewayBlobStore(bcont);
                GatewayQueueWriter writer = new GatewayQueueWriter(queue, blobStorage);
                writer.Write(original);

                GatewayQueueReader reader = new GatewayQueueReader(queue, blobStorage);
                reader.ReadNextMessage <TableGatewayMessage>(
                    gm =>
                {
                    var rentities = gm.EntitiesAs <StarEntity>().ToList();
                    Assert.AreEqual(entities.Count, rentities.Count);

                    for (int i = 0; i < rentities.Count; ++i)
                    {
                        TestHelpers.AssertEqualStars(entities[i], rentities[i]);
                    }
                },
                    (e, gm, cqm) => Assert.Fail());
            }
        }
        private void Load()
        {
            string json = IOUtil.LoadFromPersistentStorage("trackinq_queue.json");

            if (json != null)
            {
                try
                {
                    Log("Try to read queue");
                    QueueWrapper wrapper = JsonUtils.Deserialize <QueueWrapper>(json, true);
                    if (wrapper != null)
                    {
                        this.m_offlineMinutes = wrapper.offlineMinutes;
                        this.m_dropCounter    = wrapper.dropCounter;
                        this.m_batch          = new Queue <TrackingEventData>(wrapper.Queue);
                    }
                    Log("Tracking Queue loaded #" + this.m_batch.Count);
                }
                catch (Exception exception)
                {
                    UnityEngine.Debug.LogError(exception);
                }
            }
            this.m_lastSaved    = DateTime.Now;
            this.m_queueChanged = false;
        }
示例#6
0
        public void TestQueue()
        {
            using (var dl = new DisposableList())
            {
                var docs     = TestHelpers.CreateStarDocuments(3);
                var original = DocdbGatewayMessage.Create("Star", docs);

                var queue = new QueueWrapper(TestHelpers.GenUnique("gateq"), TestConfig.GatewayStorageAccount);
                dl.Add(queue.Delete);

                var blobStorage           = new InMemoryGatewayBlobStore();
                GatewayQueueWriter writer = new GatewayQueueWriter(queue, blobStorage);
                writer.Write(original);

                GatewayQueueReader reader = new GatewayQueueReader(queue, blobStorage);
                reader.ReadNextMessage <DocdbGatewayMessage>(
                    gm =>
                {
                    var rdocs = gm.DocumentsAs <StarDocument>().ToList();
                    Assert.AreEqual(docs.Count, rdocs.Count);

                    for (int i = 0; i < rdocs.Count; ++i)
                    {
                        TestHelpers.AssertEqualStars(docs[i], rdocs[i]);
                    }
                },
                    (e, gm, cqm) => Assert.Fail());
            }
        }
        public void Initialize()
        {
            MyIListWrapper = new WrapperForIList()
            {
                "Hello"
            };
            MyIDictionaryWrapper = new WrapperForIDictionary()
            {
                { "key", "value" }
            };
            MyHashtableWrapper = new HashtableWrapper(new List <KeyValuePair <string, object> > {
                new KeyValuePair <string, object>("key", "value")
            });
            MyArrayListWrapper = new ArrayListWrapper()
            {
                "Hello"
            };
            MySortedListWrapper = new SortedListWrapper()
            {
                { "key", "value" }
            };
            MyStackWrapper = new StackWrapper();
            MyQueueWrapper = new QueueWrapper();

            MyStackWrapper.Push("Hello");
            MyQueueWrapper.Enqueue("Hello");
        }
 public void Initialize()
 {
     m_LocalEndPoint      = new NativeArray <NetworkEndPoint>(1, Allocator.Persistent);
     m_LocalEndPoint[0]   = IPCManager.Instance.CreateEndPoint();
     m_queue              = new QueueWrapper();
     m_ConcurrentIPCQueue = m_queue.m_IPCQueue.ToConcurrent();
 }
示例#9
0
        /// <summary>
        /// Заполнение очереди считанными блоками из исходного файла
        /// </summary>
        /// <param name="sourceFile">Архивируемый файл</param>
        private void FillingQueue(FileStream sourceFile)
        {
            var remainingFileSize = sourceFile.Length;
            var blockCount        = (int)(sourceFile.Length % _sizeOfBlock > 0 ? sourceFile.Length / _sizeOfBlock + 1 : sourceFile.Length % _sizeOfBlock);

            _queueOfBlocks   = new QueueWrapper(SystemUsageHelper.GetAvailableRam(), (int)(_sizeOfBlock / (1024 * 1024)));
            _blocksOfArchive = new BlockOfArchive[blockCount];

            for (int i = 0; i < blockCount; i++)
            {
                var lenghtOfCurrentBlock = remainingFileSize - _sizeOfBlock > 0 ? _sizeOfBlock : remainingFileSize;
                var buffer    = new byte[lenghtOfCurrentBlock];
                var bytesRead = 0;
                while (bytesRead < buffer.Length)
                {
                    bytesRead = sourceFile.Read(buffer, 0, buffer.Length);
                }

                while (!_queueOfBlocks.TryEnqueue(new KeyValuePair <int, byte[]>(i, buffer)))
                {
                }

                remainingFileSize -= lenghtOfCurrentBlock;
            }
        }
示例#10
0
        public GatewayQueueReader(QueueWrapper queue, IGatewayBlobStore gatewayBlobStore)
        {
            Guard.NotNull(queue, "queue");
            Guard.NotNull(gatewayBlobStore, "GatewayBlobStore");

            _queue            = queue;
            _gatewayBlobStore = gatewayBlobStore;
        }
示例#11
0
文件: Form1.cs 项目: Oldhenry/Kasper
 private void RunQueue()
 {
     _wrapper = QueueWrapper <int> .Instanse;
     if (!backgroundWorker1.IsBusy)
     {
         backgroundWorker1.RunWorkerAsync(_wrapper);
     }
 }
示例#12
0
 internal MultiThreadedRunner(int numThreads, Run r)
 {
     _queue     = new QueueWrapper <T>(numThreads);
     _threads   = new Thread[numThreads];
     _runMethod = r;
     for (int i = 0; i < numThreads; i++)
     {
         _threads[i] = new Thread(RunProcess);
     }
 }
示例#13
0
 private FileUploader(string srcPath, string destPath, AdlsClient client, int numThreads,
                      IfExists doOverwrite, IProgress <TransferStatus> progressTracker, bool notRecurse, bool resume, bool isBinary, CancellationToken cancelToken, bool ingressTest, long chunkSize) : base(srcPath, destPath, client, numThreads, doOverwrite, progressTracker, notRecurse, resume, ingressTest, chunkSize, Path.Combine(Path.GetTempPath(), ".adl", "Upload", GetTransferLogFileName(srcPath, destPath, Path.DirectorySeparatorChar, '/')), cancelToken, $"binary:{isBinary}")
 {
     // If not recurse then we will have one thread and ProducerFirstPass logic loop will run only once
     NumProducerThreads    = NotRecurse ? 1 : NumProducerThreadsFirstPass;
     UploaderProducerQueue = new QueueWrapper <DirectoryInfo>(NumProducerThreads);
     if (FileTransferLog.IsDebugEnabled)
     {
         FileTransferLog.Debug($"FileTransfer.Uploader, Src: {SourcePath}, Dest: {DestPath}, Threads: {NumConsumerThreads}, TrackingProgress: {ProgressTracker != null}, OverwriteIfExist: {DoOverwrite == IfExists.Overwrite}");
     }
     _isBinary   = isBinary;
     _encodeType = Encoding.UTF8;
 }
 // MetaDataInfo is appen
 internal TransferLog(bool resume, string transferLogFile, string validateMetaDataInfo, bool disableLogging)
 {
     if (string.IsNullOrEmpty(transferLogFile))
     {
         throw new ArgumentNullException(nameof(transferLogFile));
     }
     _transferLogFile = transferLogFile;
     if (disableLogging && resume)
     {
         throw new ArgumentException("resume and disablelogging both cannot be true");
     }
     _disableLogging = disableLogging;
     if (!_disableLogging)
     {
         Utils.CreateParentDirectory(transferLogFile);
         try
         {
             _stream = new FileStream(transferLogFile, resume ? FileMode.Open : FileMode.Create,
                                      FileAccess.ReadWrite);
         }
         catch (FileNotFoundException)
         {
             throw new ArgumentException("You have selected to resume but the resume file does not exist. There can be number of reasons for this: No transfer has been run before for the given source and destination or the last transfer was successful or temp folder was cleaned up. Please run without resume.");
         }
         if (resume)
         {
             LoadedMetaData = new Dictionary <string, MetaData>();
             LoadFrom(validateMetaDataInfo);
         }
         _recordQueue = new QueueWrapper <string>(-1); // Purposeful-We will close it manually
         _writeStream = new StreamWriter(_stream)
         {
             AutoFlush = true
         };
         if (!resume)
         {
             _writeStream.WriteLine($"{FirstLineConst},{validateMetaDataInfo}");
         }
         else
         {
             // This is a precaution that if the transfer broke before with an incomplete line
             // We will ignore an empty line anyways
             _writeStream.WriteLine();
         }
         _writeThread = new Thread(RunMetaDataWrite)
         {
             Name = "MetaDataWriteThread"
         };
         _writeThread.Start();
     }
 }
示例#15
0
        public void BulkOfEntitiesConsistent()
        {
            using (var dl = new DisposableList())
            {
                // create the source table
                var sourceTableWrapper = new TableWrapper(TestHelpers.GenUnique(TestConfig.TableName), TestConfig.PrimaryStorageAccount, true);
                dl.Add(sourceTableWrapper.Delete);

                // initialize the target table and attach it to the disposable container
                var targetTable = new TableWrapper(TestHelpers.GenUnique(TestConfig.TableName), TestConfig.SecondaryStorageAccount, true);
                dl.Add(targetTable.Delete);
                var tableParamsResolver = new InMemoryTargetTableResolver();
                tableParamsResolver.Add(TestHelpers.TableKey, targetTable);

                // create gateway blob storage
                var gateBlob = new InMemoryGatewayBlobStore();

                // create a gateway queue
                var gateQueue       = new QueueWrapper(TestHelpers.GenUnique("gateq"), TestConfig.GatewayStorageAccount);
                var gateQueueWriter = new GatewayQueueWriter(gateQueue, gateBlob);

                var sourceTable = new SourceTable <StarEntity>(sourceTableWrapper, gateQueueWriter, TestHelpers.TableKey);

                // 100 entities to satisfy TableStorage's batch requirements
                var entities = TestHelpers.CreateStarEntities(100);

                sourceTable.Write(entities);

                // Now verify that the entities were synced to the secondary table storage
                TableGatewayQueueProcessor gateQueueProcessor = new TableGatewayQueueProcessor(
                    new GatewayQueueReader(gateQueue, gateBlob),
                    tableParamsResolver
                    );

                bool result = gateQueueProcessor.ProcessNext();

                Assert.IsTrue(result);

                var targetEntities = targetTable.ReadEntities <StarEntity>()
                                     .ToList();
                Assert.AreEqual(entities.Count, targetEntities.Count);
                foreach (var entity in entities)
                {
                    TestHelpers.AssertEqualStars(
                        entity,
                        targetEntities.First(x => x.RowKey == entity.RowKey)
                        );
                }
            }
        }
        private FileDownloader(string srcPath, string destPath, AdlsClient client, int numThreads,
                               IfExists doOverwrite, IProgress <TransferStatus> progressTracker, bool notRecurse, bool disableTransferLogging, bool resume, CancellationToken cancelToken, bool egressTest, int egressBufferCapacity, long chunkSize) : base(srcPath, destPath, client, numThreads, doOverwrite, progressTracker, notRecurse, disableTransferLogging, resume, egressTest, chunkSize, Path.Combine(Path.GetTempPath(), ".adl", "Download", GetTransferLogFileName(client.AccountFQDN, srcPath, destPath, '/', Path.DirectorySeparatorChar)), cancelToken)
        {
            EgressBufferCapacity = egressBufferCapacity;

            // If not recurse then we will have one thread and ProducerFirstPass logic loop will run only once
            NumProducerThreads      = notRecurse ? 1 : NumProducerThreadsFirstPass;
            DownloaderProducerQueue = new QueueWrapper <DirectoryEntry>(NumProducerThreads);
            DownloaderList          = new List <DirectoryEntry>(DownloaderListCapacity);
            if (FileTransferLog.IsDebugEnabled)
            {
                FileTransferLog.Debug($"FileTransfer.Downloader, Src: {SourcePath}, Dest: {DestPath}, Threads: {NumConsumerThreads}, TrackingProgress: {ProgressTracker != null}, OverwriteIfExist: {DoOverwrite == IfExists.Overwrite}");
            }
        }
 private ContentProcessor(AdlsClient client, string path, int numThreads, CancellationToken cancelToken = default(CancellationToken))
 {
     Client        = client;
     CancelToken   = cancelToken;
     NumThreads    = numThreads <= 0 ? AdlsClient.DefaultNumThreads : numThreads;
     _threadWorker = new Thread[NumThreads];
     for (int i = 0; i < NumThreads; i++)
     {
         _threadWorker[i] = new Thread(Run)
         {
             Name = "Thread-" + i
         };
     }
     _queue   = new QueueWrapper <DirectoryEntry>(NumThreads);
     RootPath = path;
 }
示例#18
0
        /// <summary>
        /// Creates this instance.
        /// </summary>
        /// <param name="queueConfiguration">The queue configuration.</param>
        /// <param name="loggerFactory">The logger factory.</param>
        /// <returns>
        /// The <see cref="IQueue" />
        /// </returns>
        public static IQueue Create(
            QueueConfiguration queueConfiguration,
            ILoggerFactory loggerFactory = null)
        {
            var logger = loggerFactory ?? new LoggerFactory();

            var queueEndpoint = new QueueEndpoint
            {
                ConnectionString = queueConfiguration.QueueConnectionString,
                QueueName        = queueConfiguration.QueueName
            };

            var serializerSettings = new SerializerSettings
            {
                SerializerType = queueConfiguration.SerializerType
            };

            var batchSettings = new BatchSettings
            {
                MaxQueueMessagesPerSchedule = queueConfiguration.MaxQueueMessagesPerSchedule,
                MaxMessagesPerQueueMessage  = queueConfiguration.MaxMessagesPerQueueMessage
            };

            var scheduleSettings = new ScheduleSettings
            {
                ThrottleTime = queueConfiguration.ThrottleTime
            };

            var queueWrapper           = new QueueWrapper(queueEndpoint);
            var messageSerializer      = new MessageSerializer(serializerSettings);
            var queueMessageSerializer = new QueueMessageSerializer(batchSettings, messageSerializer);

            var buffer = new Buffer(logger, queueWrapper, queueMessageSerializer);

            var stopwatchFactory = new StopwatchFactory();
            var delayCalculator  = new DelayCalculator();
            var pumpProcessor    = new PumpProcessor(
                logger,
                buffer,
                stopwatchFactory,
                delayCalculator,
                scheduleSettings);

            var queuePump = new QueuePump(buffer, pumpProcessor);

            return(new Queue(queuePump));
        }
        private AclProcessor(string path, AdlsClient client, List <AclEntry> aclEntries, RequestedAclType type, int threadCount, IProgress <AclProcessorStats> aclStatusTracker, CancellationToken cancelToken, bool verify = false, string verifyFile = null, bool ignoreVerifyTimeErrors = false)
        {
            _inputPath    = path;
            Client        = client;
            NumThreads    = threadCount <= 0 ? AdlsClient.DefaultNumThreads : threadCount;
            Queue         = new PriorityQueueWrapper <BaseJob>(NumThreads);
            _threadWorker = new Thread[NumThreads];
            if (aclEntries == null || aclEntries.Count == 0)
            {
                throw new ArgumentException("Input acl is null or empty");
            }
            AclEntries     = aclEntries;
            FileAclEntries = new List <AclEntry>(AclEntries.Count);
            foreach (var entry in AclEntries)
            {
                if (entry.Scope == AclScope.Access)
                {
                    FileAclEntries.Add(entry);
                }
            }

            if (FileAclEntries.Count == 0 && AclLog.IsDebugEnabled)
            {
                AclLog.Debug("AclEntries for file are empty so input acl must be containing default acls");
            }
            Type              = type;
            _isVerify         = verify;
            _aclStatusTracker = aclStatusTracker;
            _cancelToken      = cancelToken;
            // If verify file is passed we have to setup a thread and a filestream to write to the file
            if (verify && !string.IsNullOrEmpty(verifyFile))
            {
                _ignoreVerifyTimeErrors = ignoreVerifyTimeErrors;
                _incorrectVerifyFile    = verifyFile;
                _incorrectFileList      = new QueueWrapper <string>(-1);
                Utils.CreateParentDirectory(_incorrectVerifyFile);
                _incorrectVerifyFileStream = new StreamWriter(new FileStream(_incorrectVerifyFile, FileMode.OpenOrCreate, FileAccess.ReadWrite))
                {
                    AutoFlush = true
                };
            }
            _linkPaths = new ConcurrentBag <string>();
            if (AclLog.IsDebugEnabled)
            {
                AclLog.Debug($"AclProcessor, Name: {_inputPath}, Threads: {NumThreads}, AclChangeType: {Type}, InputAcl: {string.Join(":", AclEntries)}{(_isVerify ? ", RunInVerifyMode" : string.Empty)}");
            }
        }
示例#20
0
        private void Save()
        {
            QueueWrapper dataObject = new QueueWrapper();

            dataObject.Queue          = new List <TrackingEventData>(this.m_batch);
            dataObject.offlineMinutes = this.m_offlineMinutes;
            dataObject.dropCounter    = this.m_dropCounter;
            if (IOUtil.SaveToPersistentStorage(JsonUtils.Serialize(dataObject), "trackinq_queue.json", ConfigApp.PersistentStorageEncryptionEnabled, true))
            {
                this.m_queueChanged = false;
                Log("Tracking Queue saved #" + this.m_batch.Count);
            }
            else
            {
                Log("Failed to save Tracking Queue");
            }
            this.m_lastSaved = DateTime.Now;
        }
示例#21
0
        public void TestMultiQueueBig()
        {
            using (var dl = new DisposableList())
            {
                var entities = TestHelpers.CreateStarEntities(10000);
                var original = TableGatewayMessage.Create("Star", entities);

                List <QueueWrapper> queues = new List <QueueWrapper>();
                for (int i = 0; i < 3; ++i)
                {
                    var queue = new QueueWrapper(TestHelpers.GenUnique("gateq" + i), TestConfig.GatewayStorageAccount);
                    dl.Add(queue.Delete);
                    queues.Add(queue);
                }

                var blobStorage = new InMemoryGatewayBlobStore();
                GatewayMultiQueueWriter writer = new GatewayMultiQueueWriter(queues, blobStorage);
                writer.Write(original);
                Assert.AreEqual(1, blobStorage.Count);

                foreach (var queue in queues)
                {
                    GatewayQueueReader reader = new GatewayQueueReader(queue, blobStorage);
                    bool processed            = false;
                    reader.ReadNextMessage <TableGatewayMessage>(
                        gm =>
                    {
                        var rentities = gm.EntitiesAs <StarEntity>().ToList();
                        Assert.AreEqual(entities.Count, rentities.Count);

                        for (int i = 0; i < rentities.Count; ++i)
                        {
                            TestHelpers.AssertEqualStars(entities[i], rentities[i]);
                        }

                        processed = true;
                    },
                        (e, gm, cqm) => Assert.Fail());

                    Assert.IsTrue(processed);
                }
            }
        }
示例#22
0
        public void EntityConsistent()
        {
            using (var dl = new DisposableList())
            {
                // create the source table
                var sourceTableWrapper = new TableWrapper(TestHelpers.GenUnique(TestConfig.TableName), TestConfig.PrimaryStorageAccount, true);
                dl.Add(sourceTableWrapper.Delete);

                // initialize the target table and attach it to the disposable container
                var targetTable = new TableWrapper(TestHelpers.GenUnique(TestConfig.TableName), TestConfig.SecondaryStorageAccount, true);
                dl.Add(targetTable.Delete);
                var tableParamsResolver = new InMemoryTargetTableResolver();
                tableParamsResolver.Add(TestHelpers.TableKey, targetTable);

                // create gateway blob storage
                var gateBlob = new InMemoryGatewayBlobStore();

                // create a gateway queue
                var gateQueue       = new QueueWrapper(TestHelpers.GenUnique("gateq"), TestConfig.GatewayStorageAccount);
                var gateQueueWriter = new GatewayQueueWriter(gateQueue, gateBlob);

                var sourceTable = new SourceTable <StarEntity>(sourceTableWrapper, gateQueueWriter, TestHelpers.TableKey);

                var entity = TestHelpers.CreateStarEntity();

                // write the entity
                sourceTable.Write(entity);

                // now verify that the entity was synced to the secondary table storage
                TableGatewayQueueProcessor gateQueueProcessor = new TableGatewayQueueProcessor(
                    new GatewayQueueReader(gateQueue, gateBlob),
                    tableParamsResolver
                    );

                bool result = gateQueueProcessor.ProcessNext();

                Assert.IsTrue(result);

                var targetEntity = targetTable.ReadEntity <StarEntity>(entity.PartitionKey, entity.RowKey);
                TestHelpers.AssertEqualStars(entity, targetEntity);
            }
        }
示例#23
0
        public async Task SubmitMessagesAsync_WhenMessagesSubmitted_ExpectOnTheQueue()
        {
            // Arrange
            var queueEndpoint = new QueueEndpoint
            {
                ConnectionString = StorageHelpers.ConnectionString,
                QueueName        = this.testQueueName
            };

            var queueWrapper = new QueueWrapper(queueEndpoint);

            var data = new List <CloudQueueMessage>();

            for (var i = 0; i < 10; i++)
            {
                var testEntity = new TestEntity {
                    Data = $"Test{i}"
                }.SerializeToString();
                var queueMessage = new CloudQueueMessage(testEntity);
                data.Add(queueMessage);
            }

            // Act
            var stopwatch = Stopwatch.StartNew();
            await queueWrapper.SubmitMessagesAsync(data, CancellationToken.None).ConfigureAwait(false);

            stopwatch.Stop();

            // Assert
            this.WriteTimeElapsed(stopwatch);

            var cloudQueueMessage = await StorageHelpers.GetQueueReference(this.testQueueName).GetMessagesAsync(10).ConfigureAwait(false);

            var list = cloudQueueMessage.ToList();

            Assert.That(list, Is.Not.Null);
            Assert.That(list.Count, Is.EqualTo(10));
        }
示例#24
0
        public async Task SubmitMessageAsync_WhenMessageSubmitted_ExpectOnTheQueue()
        {
            // Arrange
            var queueEndpoint = new QueueEndpoint
            {
                ConnectionString = StorageHelpers.ConnectionString,
                QueueName        = this.testQueueName
            };

            var queueWrapper = new QueueWrapper(queueEndpoint);

            var data = new TestEntity {
                Data = "Test"
            }.SerializeToVersionedMessage(SerializerType.ProtocolBuffers).SerializeToString();
            var cloudMessage = new CloudQueueMessage(data);

            // Act
            var stopwatch = Stopwatch.StartNew();
            await queueWrapper.SubmitMessageAsync(cloudMessage, CancellationToken.None).ConfigureAwait(false);

            stopwatch.Stop();

            // Assert
            this.WriteTimeElapsed(stopwatch);

            var cloudQueueMessage = await StorageHelpers.GetQueueReference(this.testQueueName).GetMessageAsync().ConfigureAwait(false);

            Assert.That(cloudQueueMessage, Is.Not.Null);

            var testEntity = cloudQueueMessage
                             .AsString
                             .DeserializeToVersionedMessage(SerializerType.ProtocolBuffers)
                             .Data
                             .Deserialize <TestEntity>();

            Assert.That(testEntity, Is.Not.Null);
            Assert.That(testEntity.Data, Is.EqualTo("Test"));
        }
示例#25
0
        public void DocumentConsistent()
        {
            const string gatewayKey = "Stars";

            // create gateway blob storage
            var gateBlob = new InMemoryGatewayBlobStore();

            // create a gateway queue
            var gateQueue       = new QueueWrapper(TestHelpers.GenUnique("gateq"), TestConfig.GatewayStorageAccount);
            var gateQueueWriter = new GatewayQueueWriter(gateQueue, gateBlob);

            var doc = TestHelpers.CreateStarDocument();

            SourceCollection           scol = new SourceCollection(_primaryClient, _primaryCollection, gateQueueWriter, gatewayKey);
            DocdbGatewayQueueProcessor gateQueueProcessor = new DocdbGatewayQueueProcessor(
                new GatewayQueueReader(gateQueue, gateBlob),
                new FixedTargetCollectionResolver(_secondaryClient, _secondaryCollection)
                );
            var tcol = new TargetCollection(_secondaryClient, _secondaryCollection);

            scol.Write(doc);
            Assert.IsTrue(gateQueueProcessor.ProcessNext());

            var tdoc = tcol.ReadDocument <StarDocument>(doc.Id);

            TestHelpers.AssertEqualStars(doc, tdoc);
            Assert.AreEqual(1, tdoc.Version);

            // same Id but different data in the object
            doc = TestHelpers.CreateStarDocument();
            scol.Write(doc);
            Assert.AreEqual(2, doc.Version); // verify the side effect of setting Version
            Assert.IsTrue(gateQueueProcessor.ProcessNext());
            tdoc = tcol.ReadDocument <StarDocument>(doc.Id);
            TestHelpers.AssertEqualStars(doc, tdoc);
            Assert.AreEqual(2, tdoc.Version);
        }
示例#26
0
        public void QueueWrapperDequeueW2Threads()
        {
            QueueBase <string> queue = new QueueWrapper <string>();

            base.TestEnqueueDequeue(queue, 2);
        }
 // Call only when testing serialization.
 public void Initialize()
 {
     MyQueueWrapper = new QueueWrapper(new List <object> {
         "Hello"
     });
 }
        public CrossPartitionRangePageAsyncEnumerator(
            IFeedRangeProvider feedRangeProvider,
            CreatePartitionRangePageAsyncEnumerator <TPage, TState> createPartitionRangeEnumerator,
            IComparer <PartitionRangePageAsyncEnumerator <TPage, TState> > comparer,
            int?maxConcurrency,
            CancellationToken cancellationToken,
            CrossFeedRangeState <TState> state = default)
        {
            this.feedRangeProvider = feedRangeProvider ?? throw new ArgumentNullException(nameof(feedRangeProvider));
            this.createPartitionRangeEnumerator = createPartitionRangeEnumerator ?? throw new ArgumentNullException(nameof(createPartitionRangeEnumerator));
            this.cancellationToken = cancellationToken;

            this.lazyEnumerators = new AsyncLazy <IQueue <PartitionRangePageAsyncEnumerator <TPage, TState> > >(async(ITrace trace, CancellationToken token) =>
            {
                ReadOnlyMemory <FeedRangeState <TState> > rangeAndStates;
                if (state != default)
                {
                    rangeAndStates = state.Value;
                }
                else
                {
                    // Fan out to all partitions with default state
                    List <FeedRangeEpk> ranges = await feedRangeProvider.GetFeedRangesAsync(trace, token);

                    List <FeedRangeState <TState> > rangesAndStatesBuilder = new List <FeedRangeState <TState> >(ranges.Count);
                    foreach (FeedRangeInternal range in ranges)
                    {
                        rangesAndStatesBuilder.Add(new FeedRangeState <TState>(range, default));
                    }

                    rangeAndStates = rangesAndStatesBuilder.ToArray();
                }

                List <BufferedPartitionRangePageAsyncEnumerator <TPage, TState> > bufferedEnumerators = new List <BufferedPartitionRangePageAsyncEnumerator <TPage, TState> >(rangeAndStates.Length);
                for (int i = 0; i < rangeAndStates.Length; i++)
                {
                    FeedRangeState <TState> feedRangeState = rangeAndStates.Span[i];
                    PartitionRangePageAsyncEnumerator <TPage, TState> enumerator = createPartitionRangeEnumerator(feedRangeState);
                    BufferedPartitionRangePageAsyncEnumerator <TPage, TState> bufferedEnumerator = new BufferedPartitionRangePageAsyncEnumerator <TPage, TState>(enumerator, cancellationToken);
                    bufferedEnumerators.Add(bufferedEnumerator);
                }

                if (maxConcurrency.HasValue)
                {
                    await ParallelPrefetch.PrefetchInParallelAsync(bufferedEnumerators, maxConcurrency.Value, trace, token);
                }

                IQueue <PartitionRangePageAsyncEnumerator <TPage, TState> > queue;
                if (comparer == null)
                {
                    queue = new QueueWrapper <PartitionRangePageAsyncEnumerator <TPage, TState> >(
                        new Queue <PartitionRangePageAsyncEnumerator <TPage, TState> >(bufferedEnumerators));
                }
                else
                {
                    queue = new PriorityQueueWrapper <PartitionRangePageAsyncEnumerator <TPage, TState> >(
                        new PriorityQueue <PartitionRangePageAsyncEnumerator <TPage, TState> >(
                            bufferedEnumerators,
                            comparer));
                }

                return(queue);
            });
        }
        public CrossPartitionRangePageAsyncEnumerator(
            IFeedRangeProvider feedRangeProvider,
            CreatePartitionRangePageAsyncEnumerator <TPage, TState> createPartitionRangeEnumerator,
            IComparer <PartitionRangePageAsyncEnumerator <TPage, TState> > comparer,
            int?maxConcurrency,
            CancellationToken cancellationToken,
            CrossPartitionState <TState> state = default)
        {
            this.feedRangeProvider = feedRangeProvider ?? throw new ArgumentNullException(nameof(feedRangeProvider));
            this.createPartitionRangeEnumerator = createPartitionRangeEnumerator ?? throw new ArgumentNullException(nameof(createPartitionRangeEnumerator));
            this.cancellationToken = cancellationToken;

            this.lazyEnumerators = new AsyncLazy <IQueue <PartitionRangePageAsyncEnumerator <TPage, TState> > >(async(CancellationToken token) =>
            {
                IReadOnlyList <(FeedRangeInternal, TState)> rangeAndStates;
                if (state != default)
                {
                    rangeAndStates = state.Value;
                }
                else
                {
                    // Fan out to all partitions with default state
                    IEnumerable <FeedRangeInternal> ranges = await feedRangeProvider.GetFeedRangesAsync(token);

                    List <(FeedRangeInternal, TState)> rangesAndStatesBuilder = new List <(FeedRangeInternal, TState)>();
                    foreach (FeedRangeInternal range in ranges)
                    {
                        rangesAndStatesBuilder.Add((range, default));
                    }

                    rangeAndStates = rangesAndStatesBuilder;
                }

                List <BufferedPartitionRangePageAsyncEnumerator <TPage, TState> > bufferedEnumerators = rangeAndStates
                                                                                                        .Select(rangeAndState =>
                {
                    PartitionRangePageAsyncEnumerator <TPage, TState> enumerator = createPartitionRangeEnumerator(rangeAndState.Item1, rangeAndState.Item2);
                    BufferedPartitionRangePageAsyncEnumerator <TPage, TState> bufferedEnumerator = new BufferedPartitionRangePageAsyncEnumerator <TPage, TState>(enumerator, cancellationToken);
                    return(bufferedEnumerator);
                })
                                                                                                        .ToList();

                if (maxConcurrency.HasValue)
                {
                    await ParallelPrefetch.PrefetchInParallelAsync(bufferedEnumerators, maxConcurrency.Value, token);
                }

                IQueue <PartitionRangePageAsyncEnumerator <TPage, TState> > queue;
                if (comparer == null)
                {
                    queue = new QueueWrapper <PartitionRangePageAsyncEnumerator <TPage, TState> >(
                        new Queue <PartitionRangePageAsyncEnumerator <TPage, TState> >(bufferedEnumerators));
                }
                else
                {
                    queue = new PriorityQueueWrapper <PartitionRangePageAsyncEnumerator <TPage, TState> >(
                        new PriorityQueue <PartitionRangePageAsyncEnumerator <TPage, TState> >(
                            bufferedEnumerators,
                            comparer));
                }

                return(queue);
            });
        }
示例#30
0
        public void QueueWrapperEnqueueW100Threads()
        {
            QueueBase <string> queue = new QueueWrapper <string>();

            base.TestEnqueue(queue, 4);
        }