public void InsertSampleData() { //extra code for checking if sample data is already there. No need for this in real applications. var countDocs = _productCatalogMongoRepository.GetCount<Book>(); if (countDocs < 1) { var books = new List<Book>(); //let's insert some meaningful data first books.AddRange(getSampleBooks()); var authors = _productCatalogMongoRepository.GetOptionSet<Author, DenormalizedReference>(); ; var categories = _productCatalogMongoRepository.GetOptionSet<BookCategory, DenormalizedReference>(); //now let's add some 20K more documents var randomValue = new Random(); for (int count = 0; count < 5000; count++) { var book = new Book { Name = string.Format("RandomBook {0} {1}", randomValue.Next(10, 21), randomValue.Next(99, 100000)), Description = "Test Description", AvaliableCopies = randomValue.Next(30, 100), Author = authors[randomValue.Next(authors.Count)], Category = categories[randomValue.Next(categories.Count)], }; books.Add(book); } this.QueueInsert(books); } }
//Storing book information and then flowing it to search engine is absolutely critical to me. Hence queuing this to RabbitMQ string QueueInsert(Book entity) { _productCatalogMongoRepository.SetDocumentDefaults(entity); //getting the mongoEntityID first, then queue to Search engine. RPC based queuing var task = _queueClient.Bus.RequestAsync<IMXEntity, BookQueueResponse>(entity); task.ContinueWith(response => { var searchDoc = new BookSearchDocument { Id = response.Result.Id, Title = entity.Name, Author = new MXSearchDenormalizedRefrence { DenormalizedId = entity.Author.DenormalizedId, DenormalizedName = entity.Author.DenormalizedName }, Category = new MXSearchDenormalizedRefrence { DenormalizedId = entity.Category.DenormalizedId, DenormalizedName = entity.Category.DenormalizedName }, AvaliableCopies = entity.AvaliableCopies, }; _queueClient.Bus.Publish<ISearchDocument>(searchDoc); }); return "queued"; }