public void AddOrGetExistingStressTest() { const int LOOPS1 = 3; const int LOOPS2 = 500; for (var _i = 0; _i < LOOPS1; _i++) { var _cache = new CollectionCache<string, object>(); var _dateTimeOffSet = DateTimeOffset.UtcNow; for (var _j = 1; _j <= LOOPS2; _j++) { var _key = "Test" + _j; var _value = new object(); _cache.AddOrGetExisting(_key, _value); } var _elapsedTime = (DateTimeOffset.UtcNow - _dateTimeOffSet).TotalMilliseconds; Assert.LessOrEqual(_elapsedTime, 30); } }
public GlobalAddressResolver( GlobalEndpointManager endpointManager, Protocol protocol, IAuthorizationTokenProvider tokenProvider, CollectionCache collectionCache, PartitionKeyRangeCache routingMapProvider, UserAgentContainer userAgentContainer, IServiceConfigurationReader serviceConfigReader, HttpMessageHandler messageHandler, ConnectionPolicy connectionPolicy, ApiType apiType) { this.endpointManager = endpointManager; this.protocol = protocol; this.tokenProvider = tokenProvider; this.userAgentContainer = userAgentContainer; this.collectionCache = collectionCache; this.routingMapProvider = routingMapProvider; this.serviceConfigReader = serviceConfigReader; this.messageHandler = messageHandler; this.apiType = apiType; int maxBackupReadEndpoints = !connectionPolicy.EnableReadRequestsFallback.HasValue || connectionPolicy.EnableReadRequestsFallback.Value ? GlobalAddressResolver.MaxBackupReadRegions : 0; this.maxEndpoints = maxBackupReadEndpoints + 2; // for write and alternate write endpoint (during failover) this.addressCacheByEndpoint = new ConcurrentDictionary <Uri, EndpointCache>(); foreach (Uri endpoint in endpointManager.WriteEndpoints) { this.GetOrAddEndpoint(endpoint); } foreach (Uri endpoint in endpointManager.ReadEndpoints) { this.GetOrAddEndpoint(endpoint); } }
internal override async Task <List <PartitionKeyRange> > GetTargetPartitionKeyRangesAsync( string resourceLink, string collectionResourceId, List <Range <string> > providedRanges) { if (string.IsNullOrEmpty(collectionResourceId)) { throw new ArgumentNullException(nameof(collectionResourceId)); } if (providedRanges == null || !providedRanges.Any() || providedRanges.Any(x => x == null)) { throw new ArgumentNullException(nameof(providedRanges)); } IRoutingMapProvider routingMapProvider = await this.GetRoutingMapProviderAsync(); List <PartitionKeyRange> ranges = await routingMapProvider.TryGetOverlappingRangesAsync(collectionResourceId, providedRanges); if (ranges == null && PathsHelper.IsNameBased(resourceLink)) { // Refresh the cache and don't try to re-resolve collection as it is not clear what already // happened based on previously resolved collection rid. // Return NotFoundException this time. Next query will succeed. // This can only happen if collection is deleted/created with same name and client was not restarted // in between. CollectionCache collectionCache = await this.documentClient.GetCollectionCacheAsync(); collectionCache.Refresh(resourceLink); } if (ranges == null) { throw new NotFoundException($"{DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture)}: GetTargetPartitionKeyRanges(collectionResourceId:{collectionResourceId}, providedRanges: {string.Join(",", providedRanges)} failed due to stale cache"); } return(ranges); }
public GlobalAddressResolver( GlobalEndpointManager endpointManager, GlobalPartitionEndpointManager partitionKeyRangeLocationCache, Protocol protocol, ICosmosAuthorizationTokenProvider tokenProvider, CollectionCache collectionCache, PartitionKeyRangeCache routingMapProvider, IServiceConfigurationReader serviceConfigReader, ConnectionPolicy connectionPolicy, CosmosHttpClient httpClient) { this.endpointManager = endpointManager; this.partitionKeyRangeLocationCache = partitionKeyRangeLocationCache; this.protocol = protocol; this.tokenProvider = tokenProvider; this.collectionCache = collectionCache; this.routingMapProvider = routingMapProvider; this.serviceConfigReader = serviceConfigReader; this.httpClient = httpClient; int maxBackupReadEndpoints = !connectionPolicy.EnableReadRequestsFallback.HasValue || connectionPolicy.EnableReadRequestsFallback.Value ? GlobalAddressResolver.MaxBackupReadRegions : 0; this.enableTcpConnectionEndpointRediscovery = connectionPolicy.EnableTcpConnectionEndpointRediscovery; this.maxEndpoints = maxBackupReadEndpoints + 2; // for write and alternate write endpoint (during failover) this.addressCacheByEndpoint = new ConcurrentDictionary <Uri, EndpointCache>(); foreach (Uri endpoint in endpointManager.WriteEndpoints) { this.GetOrAddEndpoint(endpoint); } foreach (Uri endpoint in endpointManager.ReadEndpoints) { this.GetOrAddEndpoint(endpoint); } }
public static void configure(VPack.Builder builder, VPackParser vpackParser, CollectionCache cache) { builder.fieldNamingStrategy(new _VPackFieldNamingStrategy_64()); builder.registerDeserializer(ID, typeof(string), new _VPackDeserializer_74(cache)); vpackParser.registerDeserializer(ID, ValueType.CUSTOM, new _VPackJsonDeserializer_97(cache)); builder.registerSerializer(typeof(Request ), VPackSerializers.REQUEST); builder.registerSerializer(typeof(AuthenticationRequest ), VPackSerializers.AUTH_REQUEST); builder.registerSerializer(typeof(CollectionType ), VPackSerializers.COLLECTION_TYPE); builder.registerSerializer(typeof(BaseDocument ), VPackSerializers.BASE_DOCUMENT); builder.registerSerializer(typeof(BaseEdgeDocument ), VPackSerializers.BASE_EDGE_DOCUMENT); builder.registerSerializer(typeof(TraversalOptions.Order ), VPackSerializers.TRAVERSAL_ORDER); builder.registerSerializer(typeof(LogLevel ), VPackSerializers.LOG_LEVEL); builder.registerDeserializer(typeof(Response ), VPackDeserializers.RESPONSE); builder.registerDeserializer(typeof(CollectionType ), VPackDeserializers.COLLECTION_TYPE); builder.registerDeserializer(typeof(CollectionStatus ), VPackDeserializers.COLLECTION_STATUS); builder.registerDeserializer(typeof(BaseDocument ), VPackDeserializers.BASE_DOCUMENT); builder.registerDeserializer(typeof(BaseEdgeDocument ), VPackDeserializers.BASE_EDGE_DOCUMENT); builder.registerDeserializer(QueryEntity.PROPERTY_STARTED, typeof(System.DateTime), VPackDeserializers. DATE_STRING); builder.registerDeserializer(typeof(LogLevel ), VPackDeserializers.LOG_LEVEL); }
public void AddOrGetExistingAsynctressWhenFastTimeoutTest() { const int LOOPS1 = 3; const int LOOPS2 = 500; for (var _i = 0; _i < LOOPS1; _i++) { var _dictionary = new Dictionary<object, object>(); var _cache = new CollectionCache<object, object>(); for (var _j = 1; _j <= LOOPS2; _j++) { var _key = new object(); var _value = new object(); _dictionary.Add(_key, _value); } var _dateTimeOffSet = DateTimeOffset.UtcNow; Parallel.ForEach(_dictionary, _x => _cache.AddOrGetExisting(_x.Key, _x.Value, DateTimeOffset.UtcNow.AddMilliseconds(50))); var _elapsedTime = (DateTimeOffset.UtcNow - _dateTimeOffSet).TotalMilliseconds; Assert.LessOrEqual(_elapsedTime, 50); } }
public void AddOrGetExistingTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value = new object(); var _addOrGetExisting1 = _cache.AddOrGetExisting(KEY, _value); Assert.AreEqual(_value, _addOrGetExisting1); var _addOrGetExisting2 = _cache.AddOrGetExisting(KEY, _value); Assert.AreEqual(_value, _addOrGetExisting2); }
public void UpdateWhenCacheItemPolicyTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value1 = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(KEY, _value1); Assert.AreEqual(_value1, _addOrGetExisting); var _value2 = new object(); var _value = _cache.Update(KEY, _value2, new CacheItemPolicy { AbsoluteExpiration = DateTimeOffset.UtcNow.AddMilliseconds(20) }); Assert.AreNotEqual(_addOrGetExisting, _value); Assert.AreEqual(_value2, _value); Thread.Sleep(50); Assert.IsNull(_cache.Get(KEY).FirstOrDefault(_x => _x == _value)); }
private async Task <IDocumentQueryExecutionContext> CreateItemQueryExecutionContextAsync(CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); CosmosContainerSettings collection = null; if (this.cosmosQueryContext.ResourceTypeEnum.IsCollectionChild()) { CollectionCache collectionCache = await this.cosmosQueryContext.QueryClient.GetCollectionCacheAsync(); using ( DocumentServiceRequest request = DocumentServiceRequest.Create( OperationType.Query, this.cosmosQueryContext.ResourceTypeEnum, this.cosmosQueryContext.ResourceLink.OriginalString, AuthorizationTokenType.Invalid)) //this request doesn't actually go to server { collection = await collectionCache.ResolveCollectionAsync(request, cancellationToken); } if (this.cosmosQueryContext.QueryRequestOptions != null && this.cosmosQueryContext.QueryRequestOptions.PartitionKey != null && this.cosmosQueryContext.QueryRequestOptions.PartitionKey.Equals(PartitionKey.None)) { this.cosmosQueryContext.QueryRequestOptions.PartitionKey = PartitionKey.FromInternalKey(collection.GetNoneValue()); } } if (collection == null) { throw new ArgumentException($"The container was not found for resource: {this.cosmosQueryContext.ResourceLink.OriginalString} "); } this.cosmosQueryContext.ContainerResourceId = collection.ResourceId; // For non-Windows platforms(like Linux and OSX) in .NET Core SDK, we cannot use ServiceInterop, so need to bypass in that case. // We are also now bypassing this for 32 bit host process running even on Windows as there are many 32 bit apps that will not work without this if (this.cosmosQueryContext.QueryClient.ByPassQueryParsing()) { // We create a ProxyDocumentQueryExecutionContext that will be initialized with DefaultDocumentQueryExecutionContext // which will be used to send the query to Gateway and on getting 400(bad request) with 1004(cross partition query not servable), we initialize it with // PipelinedDocumentQueryExecutionContext by providing the partition query execution info that's needed(which we get from the exception returned from Gateway). CosmosProxyItemQueryExecutionContext proxyQueryExecutionContext = new CosmosProxyItemQueryExecutionContext( queryContext: this.cosmosQueryContext, containerSettings: collection); return(proxyQueryExecutionContext); } //todo:elasticcollections this may rely on information from collection cache which is outdated //if collection is deleted/created with same name. //need to make it not rely on information from collection cache. PartitionedQueryExecutionInfo partitionedQueryExecutionInfo = await GetPartitionedQueryExecutionInfoAsync( queryClient : this.cosmosQueryContext.QueryClient, sqlQuerySpec : this.cosmosQueryContext.SqlQuerySpec, partitionKeyDefinition : collection.PartitionKey, requireFormattableOrderByQuery : true, isContinuationExpected : true, allowNonValueAggregateQuery : this.cosmosQueryContext.AllowNonValueAggregateQuery, cancellationToken : cancellationToken); List <PartitionKeyRange> targetRanges = await GetTargetPartitionKeyRanges( this.cosmosQueryContext.QueryClient, this.cosmosQueryContext.ResourceLink.OriginalString, partitionedQueryExecutionInfo, collection, this.cosmosQueryContext.QueryRequestOptions); return(await CreateSpecializedDocumentQueryExecutionContext( this.cosmosQueryContext, partitionedQueryExecutionInfo, targetRanges, collection.ResourceId, cancellationToken)); }
public void UpdateStressAsyncTest() { const int LOOPS1 = 3; const int LOOPS2 = 500; var _dictionary = new Dictionary<string, object>(); var _cache = new CollectionCache<string, object>(); for (var _j = 1; _j <= LOOPS2; _j++) { var _key = "Test" + _j; var _value = new object(); _cache.AddOrGetExisting(_key, _value); _dictionary.Add(_key, _value); } for (var _i = 0; _i < LOOPS1; _i++) { var _dateTimeOffSet = DateTimeOffset.UtcNow; Parallel.ForEach(_dictionary, _x => { var _value = new object(); _cache.Update(_x.Key, _value); }); var _elapsedTime = (DateTimeOffset.UtcNow - _dateTimeOffSet).TotalMilliseconds; Assert.LessOrEqual(_elapsedTime, 150); } }
protected override void updateCharacters() { Player pActivePlayer = mManager.activePlayer(); if (pActivePlayer.hasLeader()) { UIAttributeTag leaderTag = mPlayerFamily.GetSubTag("-CurrentLeader"); UIAttributeTag characterTabTag = ui.GetUIAttributeTag("TabPanel-Character"); Character playerLeader = pActivePlayer.leader(); updateCharacterData(leaderTag, playerLeader); leaderTag.IsActive = true; using (var cognomenSB = CollectionCache.GetStringBuilderScoped()) { HelpText.buildCognomenLink(cognomenSB.Value, pActivePlayer.leader().getCognomen(), pActivePlayer.leader()); mPlayerFamily.SetKey("CurrentLeader-Cognomen", cognomenSB.Value); } int iLivingSpouses = 0; for (int iLoopSpouse = 0; iLoopSpouse < playerLeader.getSpouses().Count; iLoopSpouse++) { Character pLoopCharacter = Game.character(playerLeader.getSpouses()[iLoopSpouse]); if (pLoopCharacter.isAlive()) { UIAttributeTag leaderSpouseTag = characterTabTag.GetSubTag("-Spouse", iLivingSpouses); int iOpinion = pActivePlayer.calculateCharacterOpinionRate(pLoopCharacter); updateCharacterData(leaderSpouseTag, pLoopCharacter, iOpinion, pActivePlayer.hasCharacterOpinionValues(pLoopCharacter), Infos.Helpers.getOpinionCharacterFromRate(iOpinion)); UIAttributeTag spouseSlotTag = leaderTag.GetSubTag("-Spouse", iLivingSpouses); updateCharacterSlotData(spouseSlotTag, pLoopCharacter, RoleType.SPOUSE, (pLoopCharacter != null)); iLivingSpouses++; } } ui.SetUIAttribute("SpouseList-Count", Math.Max(iLivingSpouses, 1).ToStringCached()); characterTabTag.SetBool("Spouse-IsActive", iLivingSpouses > 0); if (iLivingSpouses == 0) { UIAttributeTag spouseSlotTag = leaderTag.GetSubTag("-Spouse", 0); updateCharacterSlotData(spouseSlotTag, null, RoleType.SPOUSE, false); } //Heir { Character pNextLeader = pActivePlayer.findHeir(); if (pNextLeader != null) { UIAttributeTag nextLeaderTag = characterTabTag.GetSubTag("-Heir"); int iOpinion = pActivePlayer.calculateCharacterOpinionRate(pNextLeader); updateCharacterData(nextLeaderTag, pNextLeader, iOpinion, pActivePlayer.hasCharacterOpinionValues(pNextLeader), Infos.Helpers.getOpinionCharacterFromRate(iOpinion)); nextLeaderTag.IsActive = true; } UIAttributeTag heirSlotTag = leaderTag.GetSubTag("-Heir"); updateCharacterSlotData(heirSlotTag, pNextLeader, RoleType.HEIR, (pNextLeader != null)); } //Council { for (CouncilType eLoopCouncil = 0; eLoopCouncil < Infos.councilsNum(); eLoopCouncil++) { UIAttributeTag councilListTag = leaderTag.GetSubTag("-CouncilList", (int)eLoopCouncil); Character pCharacter = pActivePlayer.councilCharacter(eLoopCouncil); updateCharacterSlotData(councilListTag, pCharacter, RoleType.COUNCIL, pActivePlayer.isCouncilUnlock(eLoopCouncil), eLoopCouncil); } mPlayerFamily.SetInt("CurrentLeader-CouncilList-Count", (int)Infos.councilsNum()); } //Other Characters using (var characterList = CollectionCache.GetListScoped <int>()) { bool showCharacters = false; List <int> successionList = characterList.Value; pActivePlayer.findSuccessionList(successionList); //Heirs { int numHeirs = 0; foreach (int iLoopHeir in successionList) { Character pLoopHeir = Game.character(iLoopHeir); if (pLoopHeir.isAlive()) { UIAttributeTag currentLeaderHeirListTag = characterTabTag.GetSubTag("-HeirList", numHeirs); int iOpinion = pActivePlayer.calculateCharacterOpinionRate(pLoopHeir); updateCharacterData(currentLeaderHeirListTag, pLoopHeir, iOpinion, pActivePlayer.hasCharacterOpinionValues(pLoopHeir), Infos.Helpers.getOpinionCharacterFromRate(iOpinion)); numHeirs++; } } characterTabTag.SetInt("HeirList-Count", numHeirs); characterTabTag.SetBool("HeirList-IsActive", numHeirs > 0); showCharacters |= numHeirs > 0; } //Court { int numCourtiers = 0; using (var charListScoped = CollectionCache.GetListScoped <int>()) { pActivePlayer.getActiveCharacters(charListScoped.Value); foreach (int iLoopCourtier in charListScoped.Value) { Character pLoopCourtier = Game.character(iLoopCourtier); if (pLoopCourtier.isCourtier() && !pLoopCourtier.isLeaderOrSpouseOrHeir()) { UIAttributeTag currentLeaderCourtierListTag = characterTabTag.GetSubTag("-CourtierList", numCourtiers); int iOpinion = pActivePlayer.calculateCharacterOpinionRate(pLoopCourtier); updateCharacterData(currentLeaderCourtierListTag, pLoopCourtier, iOpinion, pActivePlayer.hasCharacterOpinionValues(pLoopCourtier), Infos.Helpers.getOpinionCharacterFromRate(iOpinion)); numCourtiers++; } } } characterTabTag.SetInt("CourtierList-Count", numCourtiers); characterTabTag.SetBool("CourtierList-IsActive", numCourtiers > 0); showCharacters |= numCourtiers > 0; } //Others { int numOthers = 0; using (var charListScoped = CollectionCache.GetListScoped <int>()) { pActivePlayer.getActiveCharacters(charListScoped.Value); foreach (int iLoopOthers in charListScoped.Value) { Character pLoopOthers = Game.character(iLoopOthers); if (!pLoopOthers.isCourtier() && !pLoopOthers.isLeaderOrSpouseOrHeir() && !pLoopOthers.isSuccessor()) { UIAttributeTag currentLeaderOtherListTag = characterTabTag.GetSubTag("-OtherList", numOthers); int iOpinion = pActivePlayer.calculateCharacterOpinionRate(pLoopOthers); updateCharacterData(currentLeaderOtherListTag, pLoopOthers, iOpinion, pActivePlayer.hasCharacterOpinionValues(pLoopOthers), Infos.Helpers.getOpinionCharacterFromRate(iOpinion)); numOthers++; } } } characterTabTag.SetInt("OtherList-Count", numOthers); characterTabTag.SetBool("OtherList-IsActive", numOthers > 0); showCharacters |= numOthers > 0; } mCharacters.IsActive = showCharacters; } mPlayerFamily.SetBool("CurrentNation-ShowNation", false); } }
public _VPackJsonDeserializer_97(CollectionCache cache) { this.cache = cache; }
private async Task <FeedResponse <dynamic> > ExecuteOnceAsync(IDocumentClientRetryPolicy retryPolicyInstance, CancellationToken cancellationToken) { // Don't reuse request, as the rest of client SDK doesn't reuse requests between retries. // The code leaves some temporary garbage in request (in RequestContext etc.), // which shold be erased during retries. using (DocumentServiceRequest request = await this.CreateRequestAsync()) { if (retryPolicyInstance != null) { retryPolicyInstance.OnBeforeSendRequest(request); } if (!string.IsNullOrEmpty(request.Headers[HttpConstants.HttpHeaders.PartitionKey]) || !request.ResourceType.IsPartitioned()) { return(await this.ExecuteRequestAsync(request, cancellationToken)); } CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); CosmosContainerSettings collection = await collectionCache.ResolveCollectionAsync(request, CancellationToken.None); if (!string.IsNullOrEmpty(base.PartitionKeyRangeId)) { request.RouteTo(new PartitionKeyRangeIdentity(collection.ResourceId, base.PartitionKeyRangeId)); return(await this.ExecuteRequestAsync(request, cancellationToken)); } // For non-Windows platforms(like Linux and OSX) in .NET Core SDK, we cannot use ServiceInterop for parsing the query, // so forcing the request through Gateway. We are also now by-passing this for 32-bit host process in NETFX on Windows // as the ServiceInterop dll is only available in 64-bit. if (CustomTypeExtensions.ByPassQueryParsing()) { request.UseGatewayMode = true; return(await this.ExecuteRequestAsync(request, cancellationToken)); } QueryPartitionProvider queryPartitionProvider = await this.Client.GetQueryPartitionProviderAsync(cancellationToken); IRoutingMapProvider routingMapProvider = await this.Client.GetRoutingMapProviderAsync(); List <CompositeContinuationToken> suppliedTokens; Range <string> rangeFromContinuationToken = this.partitionRoutingHelper.ExtractPartitionKeyRangeFromContinuationToken(request.Headers, out suppliedTokens); Tuple <PartitionRoutingHelper.ResolvedRangeInfo, IReadOnlyList <Range <string> > > queryRoutingInfo = await this.TryGetTargetPartitionKeyRangeAsync( request, collection, queryPartitionProvider, routingMapProvider, rangeFromContinuationToken, suppliedTokens); if (request.IsNameBased && queryRoutingInfo == null) { request.ForceNameCacheRefresh = true; collection = await collectionCache.ResolveCollectionAsync(request, CancellationToken.None); queryRoutingInfo = await this.TryGetTargetPartitionKeyRangeAsync( request, collection, queryPartitionProvider, routingMapProvider, rangeFromContinuationToken, suppliedTokens); } if (queryRoutingInfo == null) { throw new NotFoundException($"{DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture)}: Was not able to get queryRoutingInfo even after resolve collection async with force name cache refresh to the following collectionRid: {collection.ResourceId} with the supplied tokens: {JsonConvert.SerializeObject(suppliedTokens)}"); } request.RouteTo(new PartitionKeyRangeIdentity(collection.ResourceId, queryRoutingInfo.Item1.ResolvedRange.Id)); FeedResponse <dynamic> response = await this.ExecuteRequestAsync(request, cancellationToken); if (!await this.partitionRoutingHelper.TryAddPartitionKeyRangeToContinuationTokenAsync( response.Headers, providedPartitionKeyRanges: queryRoutingInfo.Item2, routingMapProvider: routingMapProvider, collectionRid: collection.ResourceId, resolvedRangeInfo: queryRoutingInfo.Item1)) { // Collection to which this request was resolved doesn't exist. // Retry policy will refresh the cache and return NotFound. throw new NotFoundException($"{DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture)}: Call to TryAddPartitionKeyRangeToContinuationTokenAsync failed to the following collectionRid: {collection.ResourceId} with the supplied tokens: {JsonConvert.SerializeObject(suppliedTokens)}"); } return(response); } }
public void CreateCacheEntryChangeMonitorTest() { var _spatialCache = new CollectionCache<object, object>(); Assert.Throws<NotSupportedException>(() => _spatialCache.CreateCacheEntryChangeMonitor(new List<object>())); }
public void ContainsWhenRegionNameTest() { const string REGION = "Test"; var _cache = new CollectionCache<object, object>(); var _key = new object(); var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value, REGION); Assert.AreEqual(_value, _addOrGetExisting); var _contains = _cache.Contains(_key, _value, REGION); Assert.IsTrue(_contains); }
public void ContainsWhenFalseTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(KEY, _value); Assert.AreEqual(_value, _addOrGetExisting); const string KEY2 = "Test2"; var _contains = _cache.Contains(KEY2, _value); Assert.IsFalse(_contains); }
public void AddOrUpdateExistingWithObjectTest() { var _cache = new CollectionCache<object, object>(); var _key = typeof(object); var _value = typeof(object); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value, DateTimeOffset.UtcNow.AddMilliseconds(500)); Assert.AreEqual(_value, _addOrGetExisting); var _value2 = new KeyValuePair<object, object>(_key, new object()); var _actual = _cache.AddOrUpdateExisting(_key, _value2); Assert.AreEqual(_actual, _value2); }
public void AddOrUpdateExistingWhenNotExistsTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value = new KeyValuePair<string, object>(KEY, new object()); var _actual = _cache.AddOrUpdateExisting(KEY, _value); Assert.AreEqual(_value, _actual); }
public void AddOrGetExistingWhenCacheItemPolicyTest() { var _cache = new CollectionCache<object, object>(); var _key = new object(); var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value, new CacheItemPolicy { AbsoluteExpiration = DateTimeOffset.UtcNow.AddMilliseconds(20) }); Assert.AreEqual(_value, _addOrGetExisting); Thread.Sleep(80); var _actual = _cache.Get(_key); Assert.IsEmpty(_actual); }
public void Initialize() { if (_inited) { return; } _inited = true; IOnlineUserCache onlineUserCache = new OnlineUserCache(_redisHelper); IEvaluatingMottoCache evaluatingMottoCache = new EvaluatingMottoCache(_redisHelper); IVerifyCodeCache verifyCodeCache = new VerifyCodeCache(_redisHelper); IDeviceSignatureCache deviceSignCache = new DeviceSignatureCache(_redisHelper); IUserInfoCache userInfoCache = new UserInfoCache(_redisHelper); ICollectionCache collectionCache = new CollectionCache(_redisHelper); ISyncRootCache syncRootCache = new SyncRootCache(_redisHelper); Caches.Add(typeof(IEvaluatingMottoCache), evaluatingMottoCache); Caches.Add(typeof(IVerifyCodeCache), verifyCodeCache); Caches.Add(typeof(IDeviceSignatureCache), deviceSignCache); Caches.Add(typeof(IUserInfoCache), userInfoCache); Caches.Add(typeof(IOnlineUserCache), onlineUserCache); Caches.Add(typeof(ICollectionCache), collectionCache); Caches.Add(typeof(ISyncRootCache), syncRootCache); _eventPublisher.RegisterEventHandler <DeviceRegEvent>(deviceSignCache.HandleEvent); _eventPublisher.RegisterEventHandler <DisplayNoticeEvent>(deviceSignCache.HandleEvent); _eventPublisher.RegisterEventHandler <SendVerifyCodeEvent>(verifyCodeCache.HandleEvent); _eventPublisher.RegisterEventHandler <UserLoginEvent>(onlineUserCache.HandleEvent); _eventPublisher.RegisterEventHandler <LoadUserInfoEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CreateMottoEvent>(evaluatingMottoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CreateMottoEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CreateVoteEvent>(evaluatingMottoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CreateVoteEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <LoveMottoEvent>(evaluatingMottoCache.HandleEvent); _eventPublisher.RegisterEventHandler <LoveMottoEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnloveMottoEvent>(evaluatingMottoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnloveMottoEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CreateReviewEvent>(evaluatingMottoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CreateReviewEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <RemoveReviewEvent>(evaluatingMottoCache.HandleEvent); _eventPublisher.RegisterEventHandler <RemoveReviewEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CreateCollectionEvent>(userInfoCache.HandleEvent); //eventPublisher.RegisterEventHandler<CreateCollectionEvent>(collectionCache.HandleEvent); _eventPublisher.RegisterEventHandler <CollectMottoEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <CollectMottoEvent>(collectionCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnCollectMottoEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnCollectMottoEvent>(collectionCache.HandleEvent); _eventPublisher.RegisterEventHandler <LoveCollectionEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <LoveCollectionEvent>(collectionCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnLoveCollectionEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnLoveCollectionEvent>(collectionCache.HandleEvent); _eventPublisher.RegisterEventHandler <UpdateUserNameEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UpdateUserThumbEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UpdateSexEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <LoveUserEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnLoveUserEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <BanUserEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <UnBanUserEvent>(userInfoCache.HandleEvent); _eventPublisher.RegisterEventHandler <SendPrivateMsgEvent>(userInfoCache.HandleEvent); }
public static async Task <IDocumentQueryExecutionContext> CreateDocumentQueryExecutionContextAsync( IDocumentQueryClient client, ResourceType resourceTypeEnum, Type resourceType, Expression expression, FeedOptions feedOptions, string resourceLink, bool isContinuationExpected, CancellationToken token, Guid correlatedActivityId) { DocumentQueryExecutionContextBase.InitParams constructorParams = new DocumentQueryExecutionContextBase.InitParams( client, resourceTypeEnum, resourceType, expression, feedOptions, resourceLink, false, correlatedActivityId); CosmosContainerSettings collection = null; if (resourceTypeEnum.IsCollectionChild()) { CollectionCache collectionCache = await client.GetCollectionCacheAsync(); using ( DocumentServiceRequest request = DocumentServiceRequest.Create( OperationType.Query, resourceTypeEnum, resourceLink, AuthorizationTokenType.Invalid)) //this request doesnt actually go to server { collection = await collectionCache.ResolveCollectionAsync(request, token); } } // For non-Windows platforms(like Linux and OSX) in .NET Core SDK, we cannot use ServiceInterop, so need to bypass in that case. // We are also now bypassing this for 32 bit host process running even on Windows as there are many 32 bit apps that will not work without this if (CustomTypeExtensions.ByPassQueryParsing()) { // We create a ProxyDocumentQueryExecutionContext that will be initialized with DefaultDocumentQueryExecutionContext // which will be used to send the query to Gateway and on getting 400(bad request) with 1004(cross partition query not servable), we initialize it with // PipelinedDocumentQueryExecutionContext by providing the partition query execution info that's needed(which we get from the exception returned from Gateway). ProxyDocumentQueryExecutionContext proxyQueryExecutionContext = ProxyDocumentQueryExecutionContext.CreateAsync( client, resourceTypeEnum, resourceType, expression, feedOptions, resourceLink, token, collection, isContinuationExpected, correlatedActivityId); return(proxyQueryExecutionContext); } DefaultDocumentQueryExecutionContext queryExecutionContext = await DefaultDocumentQueryExecutionContext.CreateAsync( constructorParams, isContinuationExpected, token); // If isContinuationExpected is false, we want to check if there are aggregates. if ( resourceTypeEnum.IsCollectionChild() && resourceTypeEnum.IsPartitioned() && (feedOptions.EnableCrossPartitionQuery || !isContinuationExpected)) { //todo:elasticcollections this may rely on information from collection cache which is outdated //if collection is deleted/created with same name. //need to make it not rely on information from collection cache. PartitionedQueryExecutionInfo partitionedQueryExecutionInfo = await queryExecutionContext.GetPartitionedQueryExecutionInfoAsync( collection.PartitionKey, true, isContinuationExpected, token); if (DocumentQueryExecutionContextFactory.ShouldCreateSpecializedDocumentQueryExecutionContext( resourceTypeEnum, feedOptions, partitionedQueryExecutionInfo, collection.PartitionKey, isContinuationExpected)) { List <PartitionKeyRange> targetRanges; if (!string.IsNullOrEmpty(feedOptions.PartitionKeyRangeId)) { targetRanges = new List <PartitionKeyRange> { await queryExecutionContext.GetTargetPartitionKeyRangeById( collection.ResourceId, feedOptions.PartitionKeyRangeId) }; } else { List <Range <string> > queryRanges = partitionedQueryExecutionInfo.QueryRanges; if (feedOptions.PartitionKey != null) { queryRanges = new List <Range <string> > { Range <string> .GetPointRange( feedOptions.PartitionKey.InternalKey.GetEffectivePartitionKeyString( collection.PartitionKey)) }; } targetRanges = await queryExecutionContext.GetTargetPartitionKeyRanges(collection.ResourceId, queryRanges); } return(await CreateSpecializedDocumentQueryExecutionContext( constructorParams, partitionedQueryExecutionInfo, targetRanges, collection.ResourceId, isContinuationExpected, token)); } } return(queryExecutionContext); }
public void CreateCacheEntryChangeMonitorWhenRegionNameTest() { const string REGION = "Test"; var _spatialCache = new CollectionCache<object, object>(); Assert.Throws<NotSupportedException>(() => _spatialCache.CreateCacheEntryChangeMonitor(new List<object>(), REGION)); }
public override async Task <ResponseMessage> SendAsync( RequestMessage request, CancellationToken cancellationToken) { using (ITrace childTrace = request.Trace.StartChild(this.FullHandlerName, TraceComponent.RequestHandler, Tracing.TraceLevel.Info)) { request.Trace = childTrace; ResponseMessage response = null; string originalContinuation = request.Headers.ContinuationToken; try { RntdbEnumerationDirection rntdbEnumerationDirection = RntdbEnumerationDirection.Forward; if (request.Properties.TryGetValue(HttpConstants.HttpHeaders.EnumerationDirection, out object direction)) { rntdbEnumerationDirection = (byte)direction == (byte)RntdbEnumerationDirection.Reverse ? RntdbEnumerationDirection.Reverse : RntdbEnumerationDirection.Forward; } request.Headers.Remove(HttpConstants.HttpHeaders.IsContinuationExpected); request.Headers.Add(HttpConstants.HttpHeaders.IsContinuationExpected, bool.TrueString); if (!request.Properties.TryGetValue(HandlerConstants.StartEpkString, out object startEpk)) { startEpk = PartitionKeyInternal.MinimumInclusiveEffectivePartitionKey; } if (!request.Properties.TryGetValue(HandlerConstants.EndEpkString, out object endEpk)) { endEpk = PartitionKeyInternal.MaximumExclusiveEffectivePartitionKey; } startEpk ??= PartitionKeyInternal.MinimumInclusiveEffectivePartitionKey; endEpk ??= PartitionKeyInternal.MaximumExclusiveEffectivePartitionKey; List <Range <string> > providedRanges = new List <Range <string> > { new Range <string>( (string)startEpk, (string)endEpk, isMinInclusive: true, isMaxInclusive: false) }; DocumentServiceRequest serviceRequest = request.ToDocumentServiceRequest(); PartitionKeyRangeCache routingMapProvider = await this.client.DocumentClient.GetPartitionKeyRangeCacheAsync(); CollectionCache collectionCache = await this.client.DocumentClient.GetCollectionCacheAsync(NoOpTrace.Singleton); ContainerProperties collectionFromCache = await collectionCache.ResolveCollectionAsync(serviceRequest, CancellationToken.None); //direction is not expected to change between continuations. Range <string> rangeFromContinuationToken = this.partitionRoutingHelper.ExtractPartitionKeyRangeFromContinuationToken(serviceRequest.Headers, out List <CompositeContinuationToken> suppliedTokens); ResolvedRangeInfo resolvedRangeInfo = await this.partitionRoutingHelper.TryGetTargetRangeFromContinuationTokenRangeAsync( providedPartitionKeyRanges : providedRanges, routingMapProvider : routingMapProvider, collectionRid : collectionFromCache.ResourceId, rangeFromContinuationToken : rangeFromContinuationToken, suppliedTokens : suppliedTokens, direction : rntdbEnumerationDirection); if (serviceRequest.IsNameBased && resolvedRangeInfo.ResolvedRange == null && resolvedRangeInfo.ContinuationTokens == null) { serviceRequest.ForceNameCacheRefresh = true; collectionFromCache = await collectionCache.ResolveCollectionAsync(serviceRequest, CancellationToken.None); resolvedRangeInfo = await this.partitionRoutingHelper.TryGetTargetRangeFromContinuationTokenRangeAsync( providedPartitionKeyRanges : providedRanges, routingMapProvider : routingMapProvider, collectionRid : collectionFromCache.ResourceId, rangeFromContinuationToken : rangeFromContinuationToken, suppliedTokens : suppliedTokens, direction : rntdbEnumerationDirection); } if (resolvedRangeInfo.ResolvedRange == null && resolvedRangeInfo.ContinuationTokens == null) { return(((DocumentClientException) new NotFoundException( $"{DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture)}: Was not able to get queryRoutingInfo even after resolve collection async with force name cache refresh to the following collectionRid: {collectionFromCache.ResourceId} with the supplied tokens: {JsonConvert.SerializeObject(suppliedTokens)}") ).ToCosmosResponseMessage(request)); } serviceRequest.RouteTo(new PartitionKeyRangeIdentity(collectionFromCache.ResourceId, resolvedRangeInfo.ResolvedRange.Id)); response = await base.SendAsync(request, cancellationToken); if (!response.IsSuccessStatusCode) { this.SetOriginalContinuationToken(request, response, originalContinuation); } else { if (!await this.partitionRoutingHelper.TryAddPartitionKeyRangeToContinuationTokenAsync( response.Headers.CosmosMessageHeaders, providedPartitionKeyRanges: providedRanges, routingMapProvider: routingMapProvider, collectionRid: collectionFromCache.ResourceId, resolvedRangeInfo: resolvedRangeInfo, direction: rntdbEnumerationDirection)) { return(((DocumentClientException) new NotFoundException( $"{DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture)}: Call to TryAddPartitionKeyRangeToContinuationTokenAsync failed to the following collectionRid: {collectionFromCache.ResourceId} with the supplied tokens: {JsonConvert.SerializeObject(suppliedTokens)}") ).ToCosmosResponseMessage(request)); } } return(response); } catch (DocumentClientException ex) { ResponseMessage errorResponse = ex.ToCosmosResponseMessage(request); this.SetOriginalContinuationToken(request, errorResponse, originalContinuation); return(errorResponse); } catch (CosmosException ex) { ResponseMessage errorResponse = ex.ToCosmosResponseMessage(request); this.SetOriginalContinuationToken(request, errorResponse, originalContinuation); return(errorResponse); } catch (AggregateException ex) { this.SetOriginalContinuationToken(request, response, originalContinuation); // TODO: because the SDK underneath this path uses ContinueWith or task.Result we need to catch AggregateExceptions here // in order to ensure that underlying DocumentClientExceptions get propagated up correctly. Once all ContinueWith and .Result // is removed this catch can be safely removed. AggregateException innerExceptions = ex.Flatten(); Exception docClientException = innerExceptions.InnerExceptions.FirstOrDefault(innerEx => innerEx is DocumentClientException); if (docClientException != null) { return(((DocumentClientException)docClientException).ToCosmosResponseMessage(request)); } throw; } } }
public void GetCountTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(KEY, _value); Assert.AreEqual(_value, _addOrGetExisting); var _count = _cache.GetCount(KEY); Assert.AreEqual(1, _count); }
public void AddOrGetExistingWhenAbsoluteExpirationTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(KEY, _value, DateTimeOffset.UtcNow.AddMilliseconds(20)); Assert.AreEqual(_value, _addOrGetExisting); Thread.Sleep(80); var _actual = _cache.Get(KEY); Assert.IsEmpty(_actual); }
public void GetCountWhenNoElementsTest() { var _cache = new CollectionCache<object, object>(); var _count = _cache.GetCount(new object()); Assert.AreEqual(0, _count); }
public void RemoveWhenRegionNameTest() { const string REGION = "Test"; var _cache = new CollectionCache<object, object>(); var _key = new object(); var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value, REGION); Assert.AreEqual(_value, _addOrGetExisting); _cache.Remove(_key, _value, REGION); var _actual = _cache.Get(_key); Assert.IsEmpty(_actual); }
public void GetCountWhenRegionAndNoElementsTest() { const string REGION = "TestC"; var _cache = new CollectionCache<object, object>(); var _key = new object(); var _keyValuePair = new KeyValuePair<object, object>(_key, new object()); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _keyValuePair, REGION); Assert.AreEqual(_keyValuePair, _addOrGetExisting); var _count = _cache.GetCount(_key); Assert.AreEqual(0, _count); }
public void UpdateStressTest() { const int LOOPS1 = 3; const int LOOPS2 = 500; var _dictionary = new Dictionary<string, object>(); var _cache = new CollectionCache<string, object>(); for (var _j = 1; _j <= LOOPS2; _j++) { var _key = "Test" + _j; var _value = new object(); _cache.AddOrGetExisting(_key, _value); _dictionary.Add(_key, _value); } for (var _i = 0; _i < LOOPS1; _i++) { var _dateTimeOffSet = DateTimeOffset.UtcNow; for (var _j = 1; _j <= LOOPS2; _j++) { var _key = _dictionary.Where(_x => _x.Value == _dictionary["Test" + _j]).Select(_x => _x.Key).FirstOrDefault(); var _value = new object(); _cache.Update(_key, _value); } var _elapsedTime = (DateTimeOffset.UtcNow - _dateTimeOffSet).TotalMilliseconds; Assert.LessOrEqual(_elapsedTime, 150); } }
public void GetCountWhenRegionNameTest() { const string REGION = "Test"; var _cache = new CollectionCache<object, object>(); var _key = new object(); var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value, REGION); Assert.AreEqual(_value, _addOrGetExisting); var _count = _cache.GetCount(_key, REGION); Assert.AreEqual(1, _count); }
public void UpdateTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value1 =new object(); var _addOrGetExisting = _cache.AddOrGetExisting(KEY, _value1); Assert.AreEqual(_value1, _addOrGetExisting); var _value2 = new KeyValuePair<string, object>(KEY, new object()); var _value = _cache.Update(KEY, _value2); Assert.AreNotEqual(_addOrGetExisting, _value); Assert.AreEqual(_value2, _value); }
public void GetEnumeratorTest() { var _enumerator = new CollectionCache<object, object>().GetEnumerator(); Assert.AreEqual(_enumerator, new Dictionary<object, IList<object>>.Enumerator()); }
public void UpdateWhenRegionNameTest() { const string REGION = "Test"; var _cache = new CollectionCache<object, object>(); var _key = new object(); var _value1 = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value1, REGION); Assert.AreEqual(_value1, _addOrGetExisting); var _value2 = new KeyValuePair<object, object>(_key, new object()); var _value = _cache.Update(_key, _value2, REGION); Assert.AreNotEqual(_addOrGetExisting, _value); Assert.AreEqual(_value2, _value); }
public void GetTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(KEY, _value); Assert.AreEqual(_value, _addOrGetExisting); var _actual = _cache.Get(KEY); Assert.AreEqual(_value, _actual.FirstOrDefault()); }
, ConnectionSync> build(VPack vpack, CollectionCache collectionCache) { return(new CommunicationSync(host, port, timeout , user, password, useSsl, sslContext, vpack, collectionCache, chunksize)); }
public void GetValuesWhenValueNotParsedTest() { var _cache = new CollectionCache<string, object>(); const string KEY1 = "Test1"; var _value1 = new object(); const string KEY2 = "Test2"; var _value2 = new object(); _cache.AddOrGetExisting(KEY1, _value1); _cache.AddOrGetExisting(KEY2, _value2); var _list = _cache.GetValues(new[] { KEY1 }).ToList(); Assert.Contains(KEY1, _list.Select(_x => _x.Key).ToList()); Assert.IsFalse(_list.Select(_x => _x.Key).Contains(KEY2)); }
private static async Task <Tuple <bool, PartitionKeyRange> > TryResolvePartitionKeyRangeAsync( DocumentServiceRequest request, ISessionContainer sessionContainer, PartitionKeyRangeCache partitionKeyRangeCache, CollectionCache clientCollectionCache, bool refreshCache) { if (refreshCache) { request.ForceMasterRefresh = true; request.ForceNameCacheRefresh = true; } PartitionKeyRange partitonKeyRange = null; ContainerProperties collection = await clientCollectionCache.ResolveCollectionAsync( request, CancellationToken.None, NoOpTrace.Singleton); string partitionKeyString = request.Headers[HttpConstants.HttpHeaders.PartitionKey]; if (partitionKeyString != null) { CollectionRoutingMap collectionRoutingMap = await partitionKeyRangeCache.TryLookupAsync( collectionRid : collection.ResourceId, previousValue : null, request : request, cancellationToken : CancellationToken.None, NoOpTrace.Singleton); if (refreshCache && collectionRoutingMap != null) { collectionRoutingMap = await partitionKeyRangeCache.TryLookupAsync( collectionRid : collection.ResourceId, previousValue : collectionRoutingMap, request : request, cancellationToken : CancellationToken.None, NoOpTrace.Singleton); } partitonKeyRange = AddressResolver.TryResolveServerPartitionByPartitionKey( request: request, partitionKeyString: partitionKeyString, collectionCacheUptoDate: false, collection: collection, routingMap: collectionRoutingMap); } else if (request.PartitionKeyRangeIdentity != null) { PartitionKeyRangeIdentity partitionKeyRangeId = request.PartitionKeyRangeIdentity; partitonKeyRange = await partitionKeyRangeCache.TryGetPartitionKeyRangeByIdAsync( collection.ResourceId, partitionKeyRangeId.PartitionKeyRangeId, NoOpTrace.Singleton, refreshCache); } else if (request.RequestContext.ResolvedPartitionKeyRange != null) { partitonKeyRange = request.RequestContext.ResolvedPartitionKeyRange; } if (partitonKeyRange == null) { if (refreshCache) { return(new Tuple <bool, PartitionKeyRange>(false, null)); } // need to refresh cache. Maybe split happened. return(await GatewayStoreModel.TryResolvePartitionKeyRangeAsync( request : request, sessionContainer : sessionContainer, partitionKeyRangeCache : partitionKeyRangeCache, clientCollectionCache : clientCollectionCache, refreshCache : true)); } return(new Tuple <bool, PartitionKeyRange>(true, partitonKeyRange)); }
public void GetWhenEqualsOverrideTest() { var _cache = new CollectionCache<TestCacheObject, object>(); var _key = new TestCacheObject { Id = "A" }; var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value); Assert.AreEqual(_value, _addOrGetExisting); var _lookupKey = new TestCacheObject { Id = "A" }; var _actual = _cache.Get(_lookupKey); Assert.AreEqual(_value, _actual.FirstOrDefault()); }
private void GetOrCreateCollection <T>() { Connect(); string collectionName = typeof(T).Name; Logger.Trace($"{nameof(MongoStore)}.{nameof(GetOrCreateCollection)}<{typeof(T).Name}>", new LogItem("Event", "Mongo GetCollection")); IMongoCollection <T> collection = Database.GetCollection <T>(collectionName); Dictionary <string, List <string> > indexes = new Dictionary <string, List <string> > { { "ExpiresAt", new List <string> { "ExpiresAt" } } }; PropertyInfo[] members = typeof(T).GetProperties(); foreach (PropertyInfo memberInfo in members) { MongoIndexAttribute indexAttribute = memberInfo.GetCustomAttribute <MongoIndexAttribute>(); if (indexAttribute == null) { continue; } if (!indexes.ContainsKey(indexAttribute.IndexName)) { indexes.Add(indexAttribute.IndexName, new List <string>()); } indexes[indexAttribute.IndexName].Add(memberInfo.Name); } IMongoIndexManager <T> indexManager = collection.Indexes; foreach (KeyValuePair <string, List <string> > index in indexes) { bool indexExists = false; using (IAsyncCursor <BsonDocument> asyncCursor = indexManager.List()) while (asyncCursor.MoveNext() && !indexExists) { indexExists = CheckIndexExists(asyncCursor, index); } if (!indexExists) { string indexJson = $"{{{string.Join(",", index.Value.Select(field => $"\"{field}\":1"))}}}"; Logger.Trace($"{nameof(MongoStore)}.{nameof(GetOrCreateCollection)}<{typeof(T).Name}>", new LogItem("Action", $"Create ExpiresAt index")); CreateIndexOptions cio = new CreateIndexOptions { Name = index.Key }; if (index.Key == "ExpiresAt") { cio.ExpireAfter = TimeSpan.Zero; } indexManager.CreateOne(new JsonIndexKeysDefinition <T>(indexJson), cio); } } CollectionCache.Add(typeof(T), collection); }
public _VPackDeserializer_74(CollectionCache cache) { this.cache = cache; }
protected override async Task <FeedResponse <dynamic> > ExecuteInternalAsync(CancellationToken cancellationToken) { CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); PartitionKeyRangeCache partitionKeyRangeCache = await this.Client.GetPartitionKeyRangeCache(); IDocumentClientRetryPolicy retryPolicyInstance = this.Client.RetryPolicy.GetRequestPolicy(); retryPolicyInstance = new InvalidPartitionExceptionRetryPolicy(collectionCache, retryPolicyInstance); if (base.ResourceTypeEnum.IsPartitioned()) { retryPolicyInstance = new PartitionKeyRangeGoneRetryPolicy( collectionCache, partitionKeyRangeCache, PathsHelper.GetCollectionPath(base.ResourceLink), retryPolicyInstance); } return(await BackoffRetryUtility <FeedResponse <dynamic> > .ExecuteAsync( async() => { this.fetchExecutionRangeAccumulator.BeginFetchRange(); ++this.retries; this.fetchSchedulingMetrics.Start(); this.fetchExecutionRangeAccumulator.BeginFetchRange(); FeedResponse <dynamic> response = await this.ExecuteOnceAsync(retryPolicyInstance, cancellationToken); this.fetchSchedulingMetrics.Stop(); this.fetchExecutionRangeAccumulator.EndFetchRange(response.Count, this.retries); if (!string.IsNullOrEmpty(response.Headers[HttpConstants.HttpHeaders.QueryMetrics])) { this.fetchExecutionRangeAccumulator.EndFetchRange(response.Count, this.retries); response = new FeedResponse <dynamic>( response, response.Count, response.Headers, response.UseETagAsContinuation, new Dictionary <string, QueryMetrics> { { singlePartitionKeyId, QueryMetrics.CreateFromDelimitedStringAndClientSideMetrics( response.Headers[HttpConstants.HttpHeaders.QueryMetrics], new ClientSideMetrics( this.retries, response.RequestCharge, this.fetchExecutionRangeAccumulator.GetExecutionRanges(), string.IsNullOrEmpty(response.ResponseContinuation) ? new List <Tuple <string, SchedulingTimeSpan> >() { new Tuple <string, SchedulingTimeSpan>(singlePartitionKeyId, this.fetchSchedulingMetrics.Elapsed) } : new List <Tuple <string, SchedulingTimeSpan> >()), Guid.Parse(response.ActivityId)) } }, response.RequestStatistics, response.DisallowContinuationTokenMessage, response.ResponseLengthBytes); } this.retries = -1; return response; }, retryPolicyInstance, cancellationToken)); }
public PartitionKeyRangeCache(IAuthorizationTokenProvider authorizationTokenProvider, IStoreModel storeModel, CollectionCache collectionCache) { this.routingMapCache = new AsyncCache <string, CollectionRoutingMap>( EqualityComparer <CollectionRoutingMap> .Default, StringComparer.Ordinal); this.authorizationTokenProvider = authorizationTokenProvider; this.storeModel = storeModel; this.collectionCache = collectionCache; }
/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="filterCallback">The callback used to filter each partition.</param> /// <param name="token">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task InitializeAsync( string collectionRid, IReadOnlyList <PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, Dictionary <string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func <ItemProducerTree, Task> filterCallback, CancellationToken token) { CollectionCache collectionCache = await this.queryContext.QueryClient.GetCollectionCacheAsync(); this.TraceInformation(string.Format( CultureInfo.InvariantCulture, "parallel~contextbase.initializeasync, queryspec {0}, maxbuffereditemcount: {1}, target partitionkeyrange count: {2}, maximumconcurrencylevel: {3}, documentproducer initial page size {4}", JsonConvert.SerializeObject(querySpecForInit, DefaultJsonSerializationSettings.Value), this.actualMaxBufferedItemCount, partitionKeyRanges.Count, this.comparableTaskScheduler.MaximumConcurrencyLevel, initialPageSize)); List <ItemProducerTree> itemProducerTrees = new List <ItemProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken = (targetRangeToContinuationMap != null && targetRangeToContinuationMap.ContainsKey(partitionKeyRange.Id)) ? targetRangeToContinuationMap[partitionKeyRange.Id] : null; ItemProducerTree itemProducerTree = new ItemProducerTree( this.queryContext, querySpecForInit, partitionKeyRange, this.OnItemProducerTreeCompleteFetching, this.itemProducerForest.Comparer as IComparer <ItemProducerTree>, this.equalityComparer, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken) { Filter = filter }; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(itemProducerTree); } itemProducerTrees.Add(itemProducerTree); } // Using loop fisson so that we can load the document producers in parallel foreach (ItemProducerTree itemProducerTree in itemProducerTrees) { if (!deferFirstPage) { (bool successfullyMovedNext, QueryResponse failureResponse)response = await itemProducerTree.MoveNextIfNotSplitAsync(token); if (response.failureResponse != null) { // Set the failure so on drain it can be returned. this.FailureResponse = response.failureResponse; // No reason to enqueue the rest of the itemProducerTrees since there is a failure. break; } } if (filterCallback != null) { await filterCallback(itemProducerTree); } if (itemProducerTree.HasMoreResults) { this.itemProducerForest.Enqueue(itemProducerTree); } } }
private async Task <Tuple <DocumentFeedResponse <CosmosElement>, string> > ExecuteOnceAsync( IDocumentClientRetryPolicy retryPolicyInstance, CancellationToken cancellationToken) { // Don't reuse request, as the rest of client SDK doesn't reuse requests between retries. // The code leaves some temporary garbage in request (in RequestContext etc.), // which shold be erased during retries. using (DocumentServiceRequest request = await this.CreateRequestAsync()) { DocumentFeedResponse <CosmosElement> feedRespose; string partitionIdentifier; // We need to determine how to execute the request: if (LogicalPartitionKeyProvided(request)) { feedRespose = await this.ExecuteRequestAsync(request, retryPolicyInstance, cancellationToken); partitionIdentifier = $"PKId({request.Headers[HttpConstants.HttpHeaders.PartitionKey]})"; } else if (PhysicalPartitionKeyRangeIdProvided(this)) { CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); ContainerProperties collection = await collectionCache.ResolveCollectionAsync(request, CancellationToken.None); request.RouteTo(new PartitionKeyRangeIdentity(collection.ResourceId, base.PartitionKeyRangeId)); feedRespose = await this.ExecuteRequestAsync(request, retryPolicyInstance, cancellationToken); partitionIdentifier = base.PartitionKeyRangeId; } else { // The query is going to become a full fan out, but we go one partition at a time. if (ServiceInteropAvailable()) { // Get the routing map provider CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); ContainerProperties collection = await collectionCache.ResolveCollectionAsync(request, CancellationToken.None); QueryPartitionProvider queryPartitionProvider = await this.Client.GetQueryPartitionProviderAsync(cancellationToken); IRoutingMapProvider routingMapProvider = await this.Client.GetRoutingMapProviderAsync(); // Figure out what partition you are going to based on the range from the continuation token // If token is null then just start at partitionKeyRangeId "0" List <CompositeContinuationToken> suppliedTokens; Range <string> rangeFromContinuationToken = this.partitionRoutingHelper.ExtractPartitionKeyRangeFromContinuationToken( request.Headers, out suppliedTokens); Tuple <PartitionRoutingHelper.ResolvedRangeInfo, IReadOnlyList <Range <string> > > queryRoutingInfo = await this.TryGetTargetPartitionKeyRangeAsync( request, collection, queryPartitionProvider, routingMapProvider, rangeFromContinuationToken, suppliedTokens); if (request.IsNameBased && queryRoutingInfo == null) { request.ForceNameCacheRefresh = true; collection = await collectionCache.ResolveCollectionAsync(request, CancellationToken.None); queryRoutingInfo = await this.TryGetTargetPartitionKeyRangeAsync( request, collection, queryPartitionProvider, routingMapProvider, rangeFromContinuationToken, suppliedTokens); } if (queryRoutingInfo == null) { throw new NotFoundException($"{DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture)}: Was not able to get queryRoutingInfo even after resolve collection async with force name cache refresh to the following collectionRid: {collection.ResourceId} with the supplied tokens: {JsonConvert.SerializeObject(suppliedTokens)}"); } request.RouteTo(new PartitionKeyRangeIdentity(collection.ResourceId, queryRoutingInfo.Item1.ResolvedRange.Id)); DocumentFeedResponse <CosmosElement> response = await this.ExecuteRequestAsync(request, retryPolicyInstance, cancellationToken); // Form a composite continuation token (range + backend continuation token). // If the backend continuation token was null for the range, // then use the next adjacent range. // This is how the default execution context serially visits every partition. if (!await this.partitionRoutingHelper.TryAddPartitionKeyRangeToContinuationTokenAsync( response.Headers, providedPartitionKeyRanges: queryRoutingInfo.Item2, routingMapProvider: routingMapProvider, collectionRid: collection.ResourceId, resolvedRangeInfo: queryRoutingInfo.Item1)) { // Collection to which this request was resolved doesn't exist. // Retry policy will refresh the cache and return NotFound. throw new NotFoundException($"{DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture)}: Call to TryAddPartitionKeyRangeToContinuationTokenAsync failed to the following collectionRid: {collection.ResourceId} with the supplied tokens: {JsonConvert.SerializeObject(suppliedTokens)}"); } feedRespose = response; partitionIdentifier = queryRoutingInfo.Item1.ResolvedRange.Id; } else { // For non-Windows platforms(like Linux and OSX) in .NET Core SDK, we cannot use ServiceInterop for parsing the query, // so forcing the request through Gateway. We are also now by-passing this for 32-bit host process in NETFX on Windows // as the ServiceInterop dll is only available in 64-bit. request.UseGatewayMode = true; feedRespose = await this.ExecuteRequestAsync(request, retryPolicyInstance, cancellationToken); partitionIdentifier = "Gateway"; } } return(new Tuple <DocumentFeedResponse <CosmosElement>, string>(feedRespose, partitionIdentifier)); } }
public void GetWhenNotExistsTest() { var _cache = new CollectionCache<object, object>(); var _key = new object(); var _actual = _cache.Get(_key); Assert.IsEmpty(_actual); }
protected async Task RepairContextAsync( string collectionRid, int currentDocumentProducerIndex, Func <DocumentProducer <T>, int> taskPriorityFunc, IReadOnlyList <PartitionKeyRange> replacementRanges, SqlQuerySpec querySpecForRepair, Action callback = null) { CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); INameValueCollection requestHeaders = await this.CreateCommonHeadersAsync(this.GetFeedOptions(null)); this.DocumentProducers.Capacity = this.DocumentProducers.Count + replacementRanges.Count - 1; DocumentProducer <T> replacedDocumentProducer = this.DocumentProducers[currentDocumentProducerIndex]; DefaultTrace.TraceInformation(string.Format( CultureInfo.InvariantCulture, "{0}, CorrelatedActivityId: {5} | Parallel~ContextBase.RepairContextAsync, MaxBufferedItemCount: {1}, Replacement PartitionKeyRange Count: {2}, MaximumConcurrencyLevel: {3}, DocumentProducer Initial Page Size {4}", DateTime.UtcNow.ToString("o", CultureInfo.InvariantCulture), this.actualMaxBufferedItemCount, replacementRanges.Count, this.TaskScheduler.MaximumConcurrencyLevel, replacedDocumentProducer.PageSize, this.CorrelatedActivityId)); int index = currentDocumentProducerIndex + 1; foreach (PartitionKeyRange range in replacementRanges) { this.DocumentProducers.Insert( index++, new DocumentProducer <T>( this.TaskScheduler, (continuationToken, pageSize) => { INameValueCollection headers = requestHeaders.Clone(); headers[HttpConstants.HttpHeaders.Continuation] = continuationToken; headers[HttpConstants.HttpHeaders.PageSize] = pageSize.ToString(CultureInfo.InvariantCulture); return(this.CreateDocumentServiceRequest( headers, querySpecForRepair, range, collectionRid)); }, range, taskPriorityFunc, this.ExecuteRequestAsync <T>, () => new NonRetriableInvalidPartitionExceptionRetryPolicy(collectionCache, this.Client.RetryPolicy.GetRequestPolicy()), this.OnDocumentProducerCompleteFetching, this.CorrelatedActivityId, replacedDocumentProducer.PageSize, replacedDocumentProducer.CurrentBackendContinuationToken)); } this.DocumentProducers.RemoveAt(currentDocumentProducerIndex); if (callback != null) { callback(); } if (this.ShouldPrefetch) { for (int i = 0; i < replacementRanges.Count; i++) { this.DocumentProducers[i + currentDocumentProducerIndex].TryScheduleFetch(); } } if (this.CurrentContinuationTokens.Remove(replacedDocumentProducer)) { for (int i = 0; i < replacementRanges.Count; ++i) { this.CurrentContinuationTokens[this.DocumentProducers[currentDocumentProducerIndex + i]] = replacedDocumentProducer.CurrentBackendContinuationToken; } } }
/// <summary> /// Initializes cross partition query execution context by initializing the necessary document producers. /// </summary> /// <param name="collectionRid">The collection to drain from.</param> /// <param name="partitionKeyRanges">The partitions to target.</param> /// <param name="initialPageSize">The page size to start the document producers off with.</param> /// <param name="querySpecForInit">The query specification for the rewritten query.</param> /// <param name="targetRangeToContinuationMap">Map from partition to it's corresponding continuation token.</param> /// <param name="deferFirstPage">Whether or not we should defer the fetch of the first page from each partition.</param> /// <param name="filter">The filter to inject in the predicate.</param> /// <param name="filterCallback">The callback used to filter each partition.</param> /// <param name="token">The cancellation token.</param> /// <returns>A task to await on.</returns> protected async Task InitializeAsync( string collectionRid, IReadOnlyList <PartitionKeyRange> partitionKeyRanges, int initialPageSize, SqlQuerySpec querySpecForInit, Dictionary <string, string> targetRangeToContinuationMap, bool deferFirstPage, string filter, Func <DocumentProducerTree, Task> filterCallback, CancellationToken token) { CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); INameValueCollection requestHeaders = await this.CreateCommonHeadersAsync(this.GetFeedOptions(null)); this.TraceInformation(string.Format( CultureInfo.InvariantCulture, "parallel~contextbase.initializeasync, queryspec {0}, maxbuffereditemcount: {1}, target partitionkeyrange count: {2}, maximumconcurrencylevel: {3}, documentproducer initial page size {4}", JsonConvert.SerializeObject(this.querySpec, DefaultJsonSerializationSettings.Value), this.actualMaxBufferedItemCount, partitionKeyRanges.Count, this.comparableTaskScheduler.MaximumConcurrencyLevel, initialPageSize)); List <DocumentProducerTree> documentProducerTrees = new List <DocumentProducerTree>(); foreach (PartitionKeyRange partitionKeyRange in partitionKeyRanges) { string initialContinuationToken = (targetRangeToContinuationMap != null && targetRangeToContinuationMap.ContainsKey(partitionKeyRange.Id)) ? targetRangeToContinuationMap[partitionKeyRange.Id] : null; DocumentProducerTree documentProducerTree = new DocumentProducerTree( partitionKeyRange, //// Create Document Service Request callback (pkRange, continuationToken, pageSize) => { INameValueCollection headers = requestHeaders.Clone(); headers[HttpConstants.HttpHeaders.Continuation] = continuationToken; headers[HttpConstants.HttpHeaders.PageSize] = pageSize.ToString(CultureInfo.InvariantCulture); return(this.CreateDocumentServiceRequest( headers, querySpecForInit, pkRange, collectionRid)); }, this.ExecuteRequestLazyAsync, //// Retry policy callback () => new NonRetriableInvalidPartitionExceptionRetryPolicy(collectionCache, this.Client.ResetSessionTokenRetryPolicy.GetRequestPolicy()), this.OnDocumentProducerTreeCompleteFetching, this.documentProducerForest.Comparer as IComparer <DocumentProducerTree>, this.equalityComparer, this.Client, deferFirstPage, collectionRid, initialPageSize, initialContinuationToken); documentProducerTree.Filter = filter; // Prefetch if necessary, and populate consume queue. if (this.CanPrefetch) { this.TryScheduleFetch(documentProducerTree); } documentProducerTrees.Add(documentProducerTree); } // Using loop fisson so that we can load the document producers in parallel foreach (DocumentProducerTree documentProducerTree in documentProducerTrees) { if (!deferFirstPage) { await documentProducerTree.MoveNextIfNotSplitAsync(token); } if (filterCallback != null) { await filterCallback(documentProducerTree); } if (documentProducerTree.HasMoreResults) { this.documentProducerForest.Enqueue(documentProducerTree); } } }
public void GetWhenRegionNameDoesNotExistsTest() { const string REGION = "Test"; const string REGION2 = "Test2"; var _cache = new CollectionCache<object, object>(); var _key = new object(); var _keyValuePair = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _keyValuePair, REGION); Assert.AreEqual(_keyValuePair, _addOrGetExisting); Assert.Throws<KeyNotFoundException>(() => _cache.Get(_key, REGION2)); }
public static async Task <IDocumentQueryExecutionContext> CreateDocumentQueryExecutionContextAsync( IDocumentQueryClient client, ResourceType resourceTypeEnum, Type resourceType, Expression expression, FeedOptions feedOptions, string resourceLink, bool isContinuationExpected, CancellationToken token, Guid correlatedActivityId) { ContainerProperties collection = null; if (resourceTypeEnum.IsCollectionChild()) { CollectionCache collectionCache = await client.GetCollectionCacheAsync(); using ( DocumentServiceRequest request = DocumentServiceRequest.Create( OperationType.Query, resourceTypeEnum, resourceLink, AuthorizationTokenType.Invalid)) //this request doesnt actually go to server { collection = await collectionCache.ResolveCollectionAsync(request, token); } if (feedOptions != null && feedOptions.PartitionKey != null && feedOptions.PartitionKey.Equals(Documents.PartitionKey.None)) { feedOptions.PartitionKey = Documents.PartitionKey.FromInternalKey(collection.GetNoneValue()); } } DocumentQueryExecutionContextBase.InitParams constructorParams = new DocumentQueryExecutionContextBase.InitParams( client, resourceTypeEnum, resourceType, expression, feedOptions, resourceLink, false, correlatedActivityId); // For non-Windows platforms(like Linux and OSX) in .NET Core SDK, we cannot use ServiceInterop, so need to bypass in that case. // We are also now bypassing this for 32 bit host process running even on Windows as there are many 32 bit apps that will not work without this if (CustomTypeExtensions.ByPassQueryParsing()) { // We create a ProxyDocumentQueryExecutionContext that will be initialized with DefaultDocumentQueryExecutionContext // which will be used to send the query to Gateway and on getting 400(bad request) with 1004(cross partition query not servable), we initialize it with // PipelinedDocumentQueryExecutionContext by providing the partition query execution info that's needed(which we get from the exception returned from Gateway). ProxyDocumentQueryExecutionContext proxyQueryExecutionContext = ProxyDocumentQueryExecutionContext.Create( client, resourceTypeEnum, resourceType, expression, feedOptions, resourceLink, token, collection, isContinuationExpected, correlatedActivityId); return(proxyQueryExecutionContext); } DefaultDocumentQueryExecutionContext queryExecutionContext = await DefaultDocumentQueryExecutionContext.CreateAsync( constructorParams, isContinuationExpected, token); // If isContinuationExpected is false, we want to check if there are aggregates. if ( resourceTypeEnum.IsCollectionChild() && resourceTypeEnum.IsPartitioned() && (feedOptions.EnableCrossPartitionQuery || !isContinuationExpected)) { //todo:elasticcollections this may rely on information from collection cache which is outdated //if collection is deleted/created with same name. //need to make it not rely on information from collection cache. PartitionedQueryExecutionInfo partitionedQueryExecutionInfo = await queryExecutionContext.GetPartitionedQueryExecutionInfoAsync( partitionKeyDefinition : collection.PartitionKey, requireFormattableOrderByQuery : true, isContinuationExpected : isContinuationExpected, allowNonValueAggregateQuery : true, hasLogicalPartitionKey : feedOptions.PartitionKey != null, cancellationToken : token); if (DocumentQueryExecutionContextFactory.ShouldCreateSpecializedDocumentQueryExecutionContext( resourceTypeEnum, feedOptions, partitionedQueryExecutionInfo, collection.PartitionKey, isContinuationExpected)) { List <PartitionKeyRange> targetRanges = await GetTargetPartitionKeyRangesAsync( queryExecutionContext, partitionedQueryExecutionInfo, collection, feedOptions); // Devnote this will get replace by the new v3 to v2 logic throw new NotSupportedException("v2 query excution context is currently not supported."); } } return(queryExecutionContext); }
public void GetWhenRegionNameTest() { const string REGION = "Test"; var _cache = new CollectionCache<object, object>(); var _key = new object(); var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(_key, _value, REGION); Assert.AreEqual(_value, _addOrGetExisting); var _actual = _cache.Get(_key, REGION); Assert.AreEqual(_value, _actual.FirstOrDefault()); }
protected override async Task <DocumentFeedResponse <CosmosElement> > ExecuteInternalAsync(CancellationToken token) { CollectionCache collectionCache = await this.Client.GetCollectionCacheAsync(); PartitionKeyRangeCache partitionKeyRangeCache = await this.Client.GetPartitionKeyRangeCacheAsync(); IDocumentClientRetryPolicy retryPolicyInstance = this.Client.ResetSessionTokenRetryPolicy.GetRequestPolicy(); retryPolicyInstance = new InvalidPartitionExceptionRetryPolicy(retryPolicyInstance); if (base.ResourceTypeEnum.IsPartitioned()) { retryPolicyInstance = new PartitionKeyRangeGoneRetryPolicy( collectionCache, partitionKeyRangeCache, PathsHelper.GetCollectionPath(base.ResourceLink), retryPolicyInstance); } return(await BackoffRetryUtility <DocumentFeedResponse <CosmosElement> > .ExecuteAsync( async() => { this.fetchExecutionRangeAccumulator.BeginFetchRange(); ++this.retries; Tuple <DocumentFeedResponse <CosmosElement>, string> responseAndPartitionIdentifier = await this.ExecuteOnceAsync(retryPolicyInstance, token); DocumentFeedResponse <CosmosElement> response = responseAndPartitionIdentifier.Item1; string partitionIdentifier = responseAndPartitionIdentifier.Item2; if (!string.IsNullOrEmpty(response.ResponseHeaders[HttpConstants.HttpHeaders.QueryMetrics])) { this.fetchExecutionRangeAccumulator.EndFetchRange( partitionIdentifier, response.ActivityId, response.Count, this.retries); response = new DocumentFeedResponse <CosmosElement>( response, response.Count, response.Headers, response.UseETagAsContinuation, new Dictionary <string, QueryMetrics> { { partitionIdentifier, QueryMetrics.CreateFromDelimitedStringAndClientSideMetrics( response.ResponseHeaders[HttpConstants.HttpHeaders.QueryMetrics], response.ResponseHeaders[HttpConstants.HttpHeaders.IndexUtilization], new ClientSideMetrics( this.retries, response.RequestCharge, this.fetchExecutionRangeAccumulator.GetExecutionRanges())) } }, response.RequestStatistics, response.DisallowContinuationTokenMessage, response.ResponseLengthBytes); } this.retries = -1; return response; }, retryPolicyInstance, token)); }
public void RemoveTest() { var _cache = new CollectionCache<string, object>(); const string KEY = "Test"; var _value = new object(); var _addOrGetExisting = _cache.AddOrGetExisting(KEY, _value); Assert.AreEqual(_value, _addOrGetExisting); _cache.Remove(KEY, _value); var _actual = _cache.Get(KEY); Assert.IsEmpty(_actual); }