/// <summary> /// Closes the cache and disconnects from the distributed system. /// </summary> public void Dispose() { if (cache != null && !cache.IsClosed) { if (IsDurableClient) { log.Info("Closing durable client with keepalive = " + keepAliveOnClose); cache.Close(keepAliveOnClose); } else { cache.Close(); } } cache = null; if (disconnectOnClose) { if (system != null && DistributedSystem.IsConnected) { DistributedSystem.Disconnect(); } system = null; } }
private Region CreateRegion() { DistributedSystem dsys = DistributedSystem.Connect("exampleregion"); Cache cache = CacheFactory.Create("exampleregion", dsys); AttributesFactory attributesFactory = new AttributesFactory(); attributesFactory.SetScope(ScopeType.Local); attributesFactory.SetCachingEnabled(true); RegionAttributes regionAttributes = attributesFactory.CreateRegionAttributes(); return(cache.CreateRegion("exampleregion", regionAttributes)); }
public static void CloseKeepAlive() { if (DistributedSystem.IsConnected) { CloseCacheKeepAlive(); if (m_doDisconnect) { DistributedSystem.Disconnect(); } } m_dsys = null; }
public void SampleUsage() { // 1. Connect to system Console.WriteLine("{0}Connecting to GemFire", Environment.NewLine); DistributedSystem dsys = DistributedSystem.Connect("exampleregion"); // 2. Create a cache Cache cache = CacheFactory.Create("exampleregion", dsys); // 3. Create default region attributes AttributesFactory af = new AttributesFactory(); RegionAttributes rAttrib = af.CreateRegionAttributes(); // 4. Create region Region region = cache.CreateRegion("exampleregion", rAttrib); }
public static void InitConfig(Properties <string, string> config, string cacheXml, bool PdxReadSerialized) { string gfcppPropsFile = Util.AssemblyDir + "/gfcpp.properties"; if (File.Exists(gfcppPropsFile)) { Properties <string, string> newConfig = new Properties <string, string>(); newConfig.Load(gfcppPropsFile); if (config != null) { newConfig.AddAll(config); } config = newConfig; } //ConnectConfig(dsName, config); if (m_cache == null || m_cache.IsClosed) { try { CacheHelper <TKey, TVal> .m_doDisconnect = false; CacheFactory cf = CacheFactory.CreateCacheFactory(config); if (cacheXml != null && cacheXml.Length > 0) { FwkInfo("seting cache-xml-file {0}", cacheXml); cf = cf.Set("cache-xml-file", cacheXml); } if (PdxReadSerialized) { FwkInfo("seting PdxReadSerialized {0}", PdxReadSerialized); cf = CacheFactory.CreateCacheFactory(config) .SetPdxReadSerialized(PdxReadSerialized); } m_cache = cf.Create(); } catch (CacheExistsException) { m_cache = CacheFactory.GetAnyInstance(); } } m_dsys = m_cache.DistributedSystem; }
public void RegisterRegexInterest() { // 1. Connect to system Console.WriteLine("{0}Connecting to GemFire", Environment.NewLine); DistributedSystem dsys = DistributedSystem.Connect("empty"); // 2. Create a cache Cache cache = CacheFactory.Create("Cache", dsys); // 2.5 Create Pool //PoolFactory fact = PoolManager.CreateFactory(); //fact.AddServer("localhost", 40404); //fact.SetSubscriptionEnabled(true); //fact.Create("examplePool"); // 3. Create default region attributes AttributesFactory af = new AttributesFactory(); af.SetClientNotificationEnabled(true); //af.SetPoolName("examplePool"); af.SetEndpoints("localhost:40404"); RegionAttributes rAttrib = af.CreateRegionAttributes(); // 4. Create region Region region = cache.CreateRegion("empty", rAttrib); Thread.Sleep(1000); region.RegisterRegex("Keys-*", false, null, false); //RegionFactory regionFact = cache.CreateRegionFactory(RegionShortcut.CACHING_PROXY); //Region region = regionFact.Create("exampleregion"); //region.RegisterRegex(".*", false, new System.Collections.Generic.List<ICacheableKey>()); }
public void Connect() { try { DistributedSystem.Disconnect(); Assert.Fail("NotConnectedException should have occurred when " + "disconnecting without having connected."); } catch (NotConnectedException ex) { Util.Log("Got an expected exception in DistributedSystem.disconnect: " + ex); } try { CacheHelper.ConnectName("ConnTest"); } finally { CacheHelper.Close(); } }
/// <summary> /// Initialization callback called by Spring. Responsible for connecting to the distributed system /// and creating the cache. /// </summary> public void AfterPropertiesSet() { AssertUtils.ArgumentNotNull("name", name, "Cache name can not be null"); Properties gemfirePropertes = MergePropertes(); if (DistributedSystem.IsConnected) { DistributedSystem.Disconnect(); } system = DistributedSystem.Connect(distributedSystemName, gemfirePropertes); log.Info("Connected to Distributed System [" + system.Name + "]"); // first look for open caches String msg = null; try { cache = CacheFactory.GetInstance(system); msg = "Retrieved existing"; } catch (Exception) { if (cacheXml == null) { cache = CacheFactory.Create(name, system); } else { //TODO call Create method that takes CacheAttributes cache = CacheFactory.Create(name, system, cacheXml); log.Debug("Initialized cache from " + cacheXml); } //TODO call Create method that takes CacheAttributes msg = "Created"; } log.Info(msg + " GemFire v." + CacheFactory.Version + " Cache ['" + cache.Name + "']"); }
public override void InitTests() { base.InitTests(); m_dsys = CacheHelper.DSYS; }
public static void Main() { /* Total number of benchmark samples to benchmark and the number of puts * to make for each sample. */ const int cnBenchmarkedSamples = 60, cnOperationsPerSample = 5000; DistributedSystem MyDistributedSystem = null; Cache MyCache = null; try { DateTime[] BenchmarkedItemTimes = new DateTime[cnBenchmarkedSamples]; // Determine what the serialized overhead is. MemoryStream SerializedStream = new MemoryStream(); new BinaryFormatter().Serialize(SerializedStream, new byte[0]); /* The payload size is done in this manner because we want a 1KB size, * and, therefore, the overhead must be backed out of the overall length. */ byte[] Payload = new byte[1024 - SerializedStream.Length]; SerializedStream.Close(); DateTime StartingTime; Console.WriteLine("* Connecting to the distributed system and creating the cache."); /* Properties can be passed to GemFire through two different mechanisms: the * Properties object as is done below or the gfcpp.properties file. The * settings passed in a Properties object take precedence over any settings * in a file. This way the end-user cannot bypass any required settings. * * Using a gfcpp.properties file can be useful when you want to change the * behavior of certain settings without doing a new build, test, and deploy cycle. * * See gfcpp.properties for details on some of the other settings used in this * project. */ Properties DistributedSystemProperties = new Properties(); DistributedSystemProperties.Insert("log-file", "C:/temp/benchmarkClient.log"); DistributedSystemProperties.Insert("log-level", "debug"); // Set the name used to identify the member of the distributed system. DistributedSystemProperties.Insert("name", "BenchmarkHierarchicalClient"); /* Specify the file whose contents are used to initialize the cache when it is created. * * An XML file isn't needed at all because everything can be specified in code--much * as the "license-file" property is. However, it provides a convenient way * to isolate common settings that can be updated without a build/test/deploy cycle. */ DistributedSystemProperties.Insert("cache-xml-file", "BenchmarkHierarchicalClient.xml"); /* Define where the license file is located. It is very useful to do this in * code vs. the gemfire.properties file, because it allows you to access the * license used by the GemFire installation (as pointed to by the GEMFIRE * environment variable). */ DistributedSystemProperties.Insert("license-file", "../../gfCppLicense.zip"); // Connect to the GemFire distributed system. MyDistributedSystem = DistributedSystem.Connect("BenchmarkClient", DistributedSystemProperties); // Create the cache. This causes the cache-xml-file to be parsed. MyCache = CacheFactory.Create("BenchmarkClient", MyDistributedSystem); // Get the example region which is a subregion of /root Region MyExampleRegion = MyCache.GetRegion("/root/exampleRegion"); Console.WriteLine("{0}* Region, {1}, was opened in the cache.{2}", Environment.NewLine, MyExampleRegion.FullPath, Environment.NewLine); Console.WriteLine("Please wait while the benchmark tests are performed."); StartingTime = System.DateTime.Now; // Perform benchmark until cnBenchmarkedSamples are executed for (int nCurrentBenchmarkedItem = 0; nCurrentBenchmarkedItem < cnBenchmarkedSamples; nCurrentBenchmarkedItem++) { for (int nOperations = 0; nOperations < cnOperationsPerSample; nOperations++) { /* Perform the serialization every time to more accurately * represent the normal behavior of an application. * * Optimize performance by allocating memory for 1KB. */ MyExampleRegion.Put("Key3", CacheableBytes.Create(Payload)); } BenchmarkedItemTimes[nCurrentBenchmarkedItem] = System.DateTime.Now; } Console.WriteLine("{0}Finished benchmarking. Analyzing the results.", Environment.NewLine); long nTotalOperations = cnBenchmarkedSamples * cnOperationsPerSample; // Calculate the total time for the benchmark. TimeSpan BenchmarkTimeSpan = BenchmarkedItemTimes[cnBenchmarkedSamples - 1] - StartingTime; // Find the best sample. TimeSpan BestSampleTime = BenchmarkedItemTimes[0] - StartingTime; for (int nCurrentSample = 1; nCurrentSample < BenchmarkedItemTimes.Length; nCurrentSample++) { // Evaluation the sample's time with the sample preceding it. TimeSpan CurrentSampleTime = BenchmarkedItemTimes[nCurrentSample] - BenchmarkedItemTimes[nCurrentSample - 1]; if (CurrentSampleTime < BestSampleTime) { BestSampleTime = CurrentSampleTime; } } Console.WriteLine("{0}Benchmark Statistics:", Environment.NewLine); Console.WriteLine("\tNumber of Samples: {0}", cnBenchmarkedSamples); Console.WriteLine("\t1KB Operations/Sample: {0}", cnOperationsPerSample); Console.WriteLine("\tTotal 1KB Operations: {0}", nTotalOperations); Console.WriteLine("\tTotal Time: {0:N2} seconds", BenchmarkTimeSpan.TotalSeconds); Console.WriteLine("{0}Benchmark Averages (Mean):", Environment.NewLine); Console.WriteLine("\tKB/Second: {0:N2}", nTotalOperations / BenchmarkTimeSpan.TotalSeconds); Console.WriteLine("\tBytes/Second: {0:N2}", (1024 * nTotalOperations) / BenchmarkTimeSpan.TotalSeconds); Console.WriteLine("\tMilliseconds/KB: {0:N2}", BenchmarkTimeSpan.TotalMilliseconds / nTotalOperations); Console.WriteLine("\tNanoseconds/KB: {0}", BenchmarkTimeSpan.Ticks / nTotalOperations); Console.WriteLine("\tNanoseconds/Byte: {0:N2}", BenchmarkTimeSpan.Ticks / (1024D * nTotalOperations)); Console.WriteLine("{0}Best Benchmark Results:", Environment.NewLine); Console.WriteLine("\tKB/Second = {0:N2}", cnOperationsPerSample / BestSampleTime.TotalSeconds); Console.WriteLine("\tBytes/Second = {0:N2}", (1024 * cnOperationsPerSample) / BestSampleTime.TotalSeconds); Console.WriteLine("\tMilliseconds/KB = {0:N2}", BestSampleTime.TotalMilliseconds / cnOperationsPerSample); Console.WriteLine("\tNanoseconds/KB: {0}", BestSampleTime.Ticks / cnOperationsPerSample); Console.WriteLine("\tNanoseconds/Byte: {0:N2}", BestSampleTime.Ticks / (1024D * cnOperationsPerSample)); // Keep the console active until <Enter> is pressed. Console.WriteLine("{0}---[ Press <Enter> to End the Application ]---", Environment.NewLine); Console.ReadLine(); } catch (Exception ThrownException) { Console.Error.WriteLine(ThrownException.Message); Console.Error.WriteLine("---[ Press <Enter> to End the Application ]---"); Console.Error.WriteLine(ThrownException.StackTrace); Console.ReadLine(); } finally { /* While there are not any ramifications of terminating without closing the cache * and disconnecting from the distributed system, it is considered a best practice * to do so. */ try { Console.WriteLine("Closing the cache and disconnecting.{0}", Environment.NewLine); } catch { /* Ignore any exceptions */ } try { /* Close the cache. This terminates the cache and releases all the resources. * Generally speaking, after a cache is closed, any further method calls on * it or region object will throw an exception. */ MyCache.Close(); } catch { /* Ignore any exceptions */ } } }
/// <summary> /// Closes the cache and disconnects from the distributed system. /// </summary> public void Dispose() { if (cache != null && !cache.IsClosed) { cache.Close(); } cache = null; if (disconnectOnClose) { if (system != null && DistributedSystem.IsConnected) { DistributedSystem.Disconnect(); } system = null; } }
/// <summary> /// Initialization callback called by Spring. Responsible for connecting to the distributed system /// and creating the cache. /// </summary> public void AfterPropertiesSet() { AssertUtils.ArgumentNotNull("name", name, "Cache name can not be null"); Properties gemfirePropertes = MergePropertes(); system = DistributedSystem.Connect(distributedSystemName, gemfirePropertes); log.Info("Connected to Distributed System [" + system.Name + "]"); // first look for open caches String msg = null; try { cache = CacheFactory.GetInstance(system); msg = "Retrieved existing"; } catch (Exception) { if (cacheXml == null) { cache = CacheFactory.Create(name, system); } else { //TODO call Create method that takes CacheAttributes cache = CacheFactory.Create(name, system, cacheXml); log.Debug("Initialized cache from " + cacheXml); } //TODO call Create method that takes CacheAttributes msg = "Created"; } log.Info(msg + " GemFire v." + CacheFactory.Version + " Cache ['" + cache.Name + "']"); }
public static void Main() { DistributedSystem MyDistributedSystem = null; Cache MyCache = null; string sKey = null; CacheableString sValue = null; try { Console.WriteLine("* Connecting to the distributed system and creating the cache."); /* Properties can be passed to GemFire through two different mechanisms: the * Properties object as is done below or the gemfire.properties file. The * settings passed in a Properties object take precedence over any settings * in a file. This way the end-user cannot bypass any required settings. * * Using a gemfire.properties file can be useful when you want to change the * behavior of certain settings without doing a new build, test, and deploy cycle. * * See gemfire.properties for details on some of the other settings used in this * project. * * For details on all of the possible settings and their respective values * that can be specified in this configuration file, see chapter 5, * "System Configuration", in the "System Administrator's Guide". This is * installed in the docs\pdf folder under the GemFire installation folder * (e.g., C:\Program Files\Gemfire5\docs\pdf\SystemAdmin.pdf). */ Properties DistributedSystemProperties = new Properties(); DistributedSystemProperties.Insert("name", "CacheClient"); /* Specify the file whose contents are used to initialize the cache when it is created. * * An XML file isn't needed at all because everything can be specified in code--much * as the "license-file" property is. However, it provides a convenient way * to isolate common settings that can be updated without a build/test/deploy cycle. */ DistributedSystemProperties.Insert("cache-xml-file", "HierarchicalClient.xml"); /* Define where the license file is located. It is very useful to do this in * code vs. the gemfire.properties file, because it allows you to access the * license used by the GemFire installation (as pointed to by the GEMFIRE * environment variable). */ DistributedSystemProperties.Insert("license-file", "../../gfCppLicense.zip"); DistributedSystemProperties.Insert("log-file", "./csharpclient.log"); DistributedSystemProperties.Insert("log-level", "finest"); /* Override the mcast-port setting so the client runs "standalone". * The client and server must run in separate distributed systems. */ //DistributedSystemProperties.Insert("mcast-port", "0"); // Connect to the GemFire distributed system. MyDistributedSystem = DistributedSystem.Connect("LocalDS", DistributedSystemProperties); /*////////////////////////////////////////////////////////////////////////////// * * Create the cache. * * //////////////////////////////////////////////////////////////////////////////*/ // Create the cache. This causes the cache-xml-file to be parsed. MyCache = CacheFactory.Create("localCache", MyDistributedSystem); /*////////////////////////////////////////////////////////////////////////////// * * Create the region. * * //////////////////////////////////////////////////////////////////////////////*/ // Prepare the attributes needed to create a sub-region. AttributesFactory MyAttributesFactory = new AttributesFactory(); /* The "scope" determines how changes to the local cache are "broadcast" * to like-named regions in other caches. * * For native clients DistributedAck and DistributedNoAck work * identically. */ MyAttributesFactory.SetScope(ScopeType.DistributedAck); /* Endpoints is a comma delimited list of logical names, hostnames, and ports of * "server" caches with which to connect. The endpoints parameter follows this syntax: * * logicalName1=host1:port1, . . . ,logicalNameN=hostN:portN */ MyAttributesFactory.SetEndpoints("localhost:40404"); MyAttributesFactory.SetClientNotificationEnabled(true); /* Because of implementation details, it is best not to cache data in a root-level * region. There is nothing special about the name "root", it is just a good naming * convention. * * Get the "root" region from the cache and create a sub-region under it for the * data. */ Region MyExampleRegion = MyCache.GetRegion("root").CreateSubRegion( "exampleRegion", MyAttributesFactory.CreateRegionAttributes()); Console.WriteLine(String.Format("{0}* Region, {1}, was created in the cache.", Environment.NewLine, MyExampleRegion.FullPath)); Console.WriteLine("* Getting three values from the Hierarchical Server."); // Retrieve several values from the cache. for (int nCount = 0; nCount < 4; nCount++) { sKey = string.Format("Key{0}", nCount); Console.WriteLine(String.Format("* Requesting object: {0}{1}", sKey, Environment.NewLine)); /* Request the object from the cache. Because it doesn't exist in the local * cache, it will be passed to the server. Because the server doesn't have * it, the request will be passed to SimpleCacheLoader.Load(). The entry * returned is a string. */ sValue = MyExampleRegion.Get(sKey) as CacheableString; Console.WriteLine(String.Format("* Retrieved object: ({0})", sValue.ToString())); } Console.WriteLine("* If you look at the Cache Server's console, you can see the CacheListener notifications that happened in response to the gets."); Console.WriteLine("{0}---[ Press <Enter> to continue. ]---", Environment.NewLine); Console.ReadLine(); /*////////////////////////////////////////////////////////////////////////////// * * Exercise the serialization and deserialization methods. * * //////////////////////////////////////////////////////////////////////////////*/ // Demonstrate the process needed to manually deserialize the object from the cache. Console.WriteLine(string.Format("* Manually deserializing the object for Key0: ({0})", MyExampleRegion.Get("Key0"))); // Demonstrate the static FromCache method in the CachedItem class and modify the object. Console.WriteLine("* Using the FromCache() method and modifying the object for Key0"); // Get the item. //sValue = (CacheableString)MyExampleRegion.Get("Key0"); sValue = MyExampleRegion.Get("Key0") as CacheableString; Console.WriteLine(string.Format("* Original value: ({0})", sValue.ToString())); /* This modifies the object associated with Key0 and uses CacheSerializer * to perform manual serialization. */ String cachedItem = "PDA"; MyExampleRegion.Put("Key0", cachedItem); // Reread the object from the cache. sValue = (CacheableString)MyExampleRegion.Get("Key0"); Console.WriteLine(string.Format("* Retrieved updated object: {0}", sValue)); Console.WriteLine("* Invalidating the data for Key2"); /* Invalidating a cached item causes the object to be removed, but the * key remains in the cache. If it is subsequently requested it will * be retrieved using a CacheLoader if possible. */ MyExampleRegion.Invalidate("Key2"); Console.WriteLine("* Requesting Key2 after the invalidation."); // Request the invalidated item. sValue = (CacheableString)MyExampleRegion.Get("Key2"); Console.WriteLine(string.Format("* Retrieved object: {0}", sValue)); Console.WriteLine("* Destroying Key3"); // Destroying a cached item removes both the object and the key. MyExampleRegion.Destroy("Key3"); Console.WriteLine("{0}---[ Press <Enter> to End the Application ]---", Environment.NewLine); Console.ReadLine(); } catch (Exception ThrownException) { Console.Error.WriteLine(ThrownException.Message); Console.Error.WriteLine(ThrownException.StackTrace); Console.Error.WriteLine("---[ Press <Enter> to End the Application ]---"); Console.ReadLine(); } finally { /* While there are not any ramifications of terminating without closing the cache * and disconnecting from the distributed system, it is considered a best practice * to do so. */ try { Console.WriteLine("Closing the cache and disconnecting.{0}", Environment.NewLine); } catch { /* Ignore any exceptions */ } try { /* Close the cache. This terminates the cache and releases all the resources. * Generally speaking, after a cache is closed, any further method calls on * it or region object will throw an exception. */ MyCache.Close(); } catch { /* Ignore any exceptions */ } try { /* Disconnect from the distributed system. */ MyDistributedSystem = null; } catch { /* Ignore any exceptions */ } } }
public static void CloseKeepAlive() { CloseCacheKeepAlive(); m_dsys = null; }
public static void Close() { CloseCache(); m_dsys = null; }