/// <summary> /// Flushes the BLOB cache for the specified Web Application. /// WARNING: This method needs to be run as Farm Admin and have security_admin SQL server role and the db_owner role /// on the web app's content DB in order to successfully flush the web app's BLOB cache. /// </summary> /// <param name="webApplication">The SharePoint web application.</param> public void FlushBlobCache(SPWebApplication webApplication) { try { PublishingCache.FlushBlobCache(webApplication); } catch (SPException exception) { this.logger.Error("Failed to flush the BLOB cache accross the web app. You need You need security_admin SQL server role and the db_owner role on the web app's content DB. Caught and swallowed exception: {0}", exception); } catch (AccessViolationException exception) { this.logger.Warn("Received an AccessViolationException when flushing BLOB Cache. Trying again with RemoteAdministratorAccessDenied set to true. Caught and swallowed exception: {0}", exception); bool initialRemoteAdministratorAccessDenied = true; SPWebService myService = SPWebService.ContentService; try { initialRemoteAdministratorAccessDenied = myService.RemoteAdministratorAccessDenied; myService.RemoteAdministratorAccessDenied = false; myService.Update(); PublishingCache.FlushBlobCache(webApplication); } finally { myService.RemoteAdministratorAccessDenied = initialRemoteAdministratorAccessDenied; myService.Update(); } } }
private StartupShutdownBase CreateLocalServer(LocalServerConfiguration localServerConfiguration, DistributedContentSettings distributedSettings = null) { var resolvedCacheSettings = DistributedContentStoreFactory.ResolveCacheSettingsInPrecedenceOrder(_arguments); Func <AbsolutePath, IContentStore> contentStoreFactory = path => DistributedContentStoreFactory.CreateLocalContentStore( distributedSettings, _arguments, resolvedCacheSettings.Where(s => s.ResolvedCacheRootPath == path || s.ResolvedCacheRootPath.Path.StartsWith(path.Path, StringComparison.OrdinalIgnoreCase)).Single()); if (distributedSettings?.EnableMetadataStore == true) { _logger.Always("Creating local server with content and metadata store"); var factory = CreateDistributedContentStoreFactory(); Func <AbsolutePath, ICache> cacheFactory = path => { var distributedCache = new OneLevelCache( contentStoreFunc: () => contentStoreFactory(path), memoizationStoreFunc: () => CreateServerSideLocalMemoizationStore(path, factory), Guid.NewGuid(), passContentToMemoization: true); ICache cacheToReturn = distributedCache; #if MICROSOFT_INTERNAL if (distributedSettings.EnablePublishingCache) { cacheToReturn = new PublishingCache <OneLevelCache>( local: distributedCache, remote: new BuildCachePublishingStore(contentSource: distributedCache, _fileSystem, distributedSettings.PublishingConcurrencyLimit), Guid.NewGuid()); } #endif return(cacheToReturn); }; return(new LocalCacheServer( _fileSystem, _logger, _arguments.Configuration.LocalCasSettings.ServiceSettings.ScenarioName, cacheFactory, localServerConfiguration, capabilities: distributedSettings.EnablePublishingCache?Capabilities.All: Capabilities.AllNonPublishing)); } else { _logger.Always("Creating local server with content store only"); return(new LocalContentServer( _fileSystem, _logger, _arguments.Configuration.LocalCasSettings.ServiceSettings.ScenarioName, contentStoreFactory, localServerConfiguration)); } }
public void FlushBlobCache(SPWebApplicationInstance webApplication) { if (webApplication == null) { throw new JavaScriptException(this.Engine, "Error", "An instance of a Web Application object must be specified as the first argument."); } PublishingCache.FlushBlobCache(webApplication.SPWebApplication); }
public PublishingCacheInstance(ObjectInstance prototype, PublishingCache publishingCache) : this(prototype) { if (publishingCache == null) { throw new ArgumentNullException("publishingCache"); } m_publishingCache = publishingCache; }
public Base64EncodedByteArrayInstance ListCacheContents(SPSiteInstance site) { if (site == null) { throw new JavaScriptException(this.Engine, "Error", "An instance of a site object must be specified as the first argument."); } using (var ms = new MemoryStream()) { PublishingCache.ListCacheContents(ms, false, site.Site); var blob = new Base64EncodedByteArrayInstance(this.Engine.Object.InstancePrototype, ms.ToArray()); return(blob); } }
static async Task MainAsync() { var repo = new RandomUser.RandomUserRepository(); var user = await repo.GetSingleDummyUser(); Console.WriteLine(string.Format("Fetched user {0} {1}", user.Name.First, user.Name.Last)); var options = ConfigurationOptions.Parse("localhost"); options.ClientName = "publishClient"; var connection = ConnectionMultiplexer.Connect(options); var serializer = new MsgPackItemSerializer(); var remoteCache = new RedisCache(connection.GetDatabase(), serializer); var cache = new PublishingCache(remoteCache, new RedisPublisher(connection, serializer)); cache.Add("/pubsubcache/single", user); Console.WriteLine("Published"); Console.ReadLine(); }
static async Task MainAsync() { var repo = new RandomUser.RandomUserRepository(); var user = await repo.GetSingleDummyUser(); Console.WriteLine(string.Format("Fetched user {0} {1}", user.Name.First, user.Name.Last)); var options = ConfigurationOptions.Parse("localhost"); options.ClientName = "publishClient"; var connection = ConnectionMultiplexer.Connect(options); var serializer = new MsgPackItemSerializer(); var remoteCache = new RedisCache(connection.GetDatabase(), serializer); var cache = new PublishingCache(remoteCache, new RedisPublisher(connection, serializer)); cache.Add("/pubsubcache/single", user); Console.WriteLine("Published"); Console.ReadLine(); }
public async Task AsynchronousPublishingDoesNotBlock() { var context = new Context(Logger); using var testDirectory = new DisposableDirectory(FileSystem); var blockingStore = new BlockingPublishingStore(); var publishingCache = new PublishingCache <LocalCache>(CreateInnerCache(testDirectory), blockingStore, Guid.NewGuid()); await publishingCache.StartupAsync(context).ShouldBeSuccess(); var sessionResult = publishingCache.CreatePublishingSession( context, name: "Default", ImplicitPin.None, CreateConfiguration(publishAsynchronously: true), pat: Guid.NewGuid().ToString()).ShouldBeSuccess(); var session = sessionResult.Session; await session.StartupAsync(context).ShouldBeSuccess(); var amountOfFiles = 10; var putResults = await Task.WhenAll(Enumerable.Range(0, amountOfFiles + 2) .Select(n => session.PutRandomAsync(context, HashType.Vso0, provideHash: false, size: 1024, Token).ShouldBeSuccess())); var hashes = putResults.Select(r => r.ContentHash); var contentHashList = new ContentHashListWithDeterminism( new ContentHashList(hashes.Take(amountOfFiles).ToArray()), CacheDeterminism.None); var strongFingerprint = new StrongFingerprint( new Fingerprint(hashes.Skip(amountOfFiles).First().ToByteArray()), new Selector(hashes.Skip(amountOfFiles + 1).First())); await session.AddOrGetContentHashListAsync(context, strongFingerprint, contentHashList, Token).ShouldBeSuccess(); Assert.False(blockingStore.TaskCompletionSource.Task.IsCompleted); blockingStore.TaskCompletionSource.SetResult(new BoolResult(new Exception())); await blockingStore.TaskCompletionSource.Task.ShouldBeError(); }
private StartupShutdownBase CreateDistributedServer(LocalServerConfiguration localServerConfiguration, DistributedContentSettings distributedSettings) { var cacheConfig = _arguments.Configuration; var factory = CreateDistributedContentStoreFactory(); // NOTE: This relies on the assumption that when creating a distributed server, // there is only one call to create a cache so we simply create the cache here and ignore path // below in factory delegates since the logic for creating path based caches is included in the // call to CreateTopLevelStore var topLevelAndPrimaryStore = factory.CreateTopLevelStore(); if (distributedSettings.EnableMetadataStore || distributedSettings.EnableDistributedCache) { _logger.Always("Creating distributed server with content and metadata store"); Func <AbsolutePath, ICache> cacheFactory = path => { if (distributedSettings.EnableDistributedCache) { var distributedCache = new DistributedOneLevelCache(topLevelAndPrimaryStore.topLevelStore, topLevelAndPrimaryStore.primaryDistributedStore, Guid.NewGuid(), passContentToMemoization: true); ICache cacheToReturn = distributedCache; #if MICROSOFT_INTERNAL if (distributedSettings.EnablePublishingCache) { cacheToReturn = new PublishingCache <DistributedOneLevelCache>( local: distributedCache, remote: new BuildCachePublishingStore(contentSource: distributedCache, _fileSystem, distributedSettings.PublishingConcurrencyLimit), Guid.NewGuid()); } #endif return(cacheToReturn); } else { return(new OneLevelCache( contentStoreFunc: () => topLevelAndPrimaryStore.topLevelStore, memoizationStoreFunc: () => CreateServerSideLocalMemoizationStore(path, factory), Guid.NewGuid(), passContentToMemoization: true)); } }; // NOTE(jubayard): When generating the service configuration, we create a single named cache root in // the distributed case. This means that the factories will be called exactly once, so we will have // a single MultiplexedContentStore and MemoizationStore. The latter will be located in the last cache // root listed as per production configuration, which currently (8/27/2019) points to the SSD drives. return(new LocalCacheServer( _fileSystem, _logger, _arguments.Configuration.LocalCasSettings.ServiceSettings.ScenarioName, cacheFactory, localServerConfiguration, capabilities: distributedSettings.EnablePublishingCache?Capabilities.All: Capabilities.AllNonPublishing, factory.GetAdditionalEndpoints())); } else { _logger.Always("Creating distributed server with content store only"); return(new LocalContentServer( _fileSystem, _logger, cacheConfig.LocalCasSettings.ServiceSettings.ScenarioName, path => topLevelAndPrimaryStore.topLevelStore, localServerConfiguration, factory.GetAdditionalEndpoints())); } }