private void AddPicDataStream(string key, byte[] imageBytes) { StrKey strKey = new StrKey(key); ByteValue byteVal = new ByteValue(imageBytes); try { picStream = base.CreateDirStream<StrKey, ByteValue>("H2OAlertsPics", true); picStream.Append(strKey, byteVal); // logger.Log("WaterAlert picture has been written to {0}.", picStream.Get(strKey).ToString()); picStream.Close(); } catch (Exception e) { logger.Log("Error while writing images to dir stream: {0}", e.ToString()); } }
public override void Close() { stream.Close(); base.Close(); }
public List <RetVal> PredictOccupancy(long startSlotIndex, long endSlotIndex) { List <RetVal> retVal = new List <RetVal>(); System.IO.StreamReader datafile = null; if (dataFilePath != null) //assuming datafile has one occupancy value per line read to startSlotIndex { string line; int counter = 0; datafile = new System.IO.StreamReader(this.dataFilePath); if (startSlotIndex != 0) { while ((line = datafile.ReadLine()) != null) { if (counter == startSlotIndex) { break; } counter++; } } } StreamFactory streamFactory = StreamFactory.Instance; FqStreamID fq_sid = new FqStreamID("preheatnaive", "A", "TestBS"); CallerInfo ci = new CallerInfo(null, "A", "A", 1); streamFactory.deleteStream(fq_sid, ci); IStream occupancyGroundTruthStream = streamFactory.openValueDataStream <StrKey, ByteValue>(fq_sid, ci, null, StreamFactory.StreamSecurityType.Plain, CompressionType.None, StreamFactory.StreamOp.Write, null, 4 * 1024 * 1024, 1, new Logger()); int slotIndex = 0; long startTime, retrievelTime, computeTime, appendTime; while (true) { startTime = DateTime.Now.Ticks; List <int> currentPOV = ConstructCurrentPOV(occupancyGroundTruthStream, slotIndex); List <List <int> > previousDaysPOV = ConstructPreviousPOV(occupancyGroundTruthStream, slotIndex); retrievelTime = DateTime.Now.Ticks - startTime; startTime = DateTime.Now.Ticks; int predictedOccupancy = Predict(currentPOV, previousDaysPOV); computeTime = DateTime.Now.Ticks - startTime; startTime = DateTime.Now.Ticks; if (datafile == null) // if no datafile to read the ground truth from just append randomly { occupancyGroundTruthStream.Append(occupancyKey, new ByteValue(BitConverter.GetBytes(random.Next(2))), slotIndexBase + slotIndex); } else { string line = datafile.ReadLine(); if (line == null) { Console.WriteLine("reached the end of datafile"); break; } occupancyGroundTruthStream.Append(occupancyKey, new ByteValue(StreamFactory.GetBytes(line)), slotIndexBase + slotIndex); } slotIndex++; appendTime = DateTime.Now.Ticks - startTime; Console.WriteLine("Slot number {0} {1} {2} {3}", slotIndex, retrievelTime, computeTime, appendTime); using (results = File.AppendText(outputFilePath)) results.WriteLine("Slot number {0} {1} {2} {3}", slotIndex, retrievelTime, computeTime, appendTime); //retVal.Add(new RetVal(endTime - startTime, predictedOccupancy)); if (slotIndex == endSlotIndex) { break; } } occupancyGroundTruthStream.Close(); return(retVal); }
/// <summary> /// Close the stream. /// </summary> public void Close() { buffer = null; bufferedRequest = null; stream.Close(); }
public void Cleanup() { vds.Close(); }
public void Close() { stream.Close(); }
public void Cleanup() { dds_byte_val.Close(); }
public void Run(string CallerName, string HomeName, string AppName, string StreamName, string RandName, long stime, long etime, StreamType stream_type, StreamOperation stream_op, StreamFactory.StreamDataType ptype, CompressionType ctype, int ChunkSize, int ThreadPoolSize, Byte[] value, int num_operations, SynchronizerType synctype, int max_key = 0, string address = null, bool doCosts = false, bool doRaw = false) { // Set experiment directory CallerInfo ci = new CallerInfo(null, CallerName, CallerName, 1); exp_directory = Path.GetFullPath((null != ci.workingDir) ? ci.workingDir : Directory.GetCurrentDirectory()); exp_directory = exp_directory + "/" + HomeName + "/" + AppName + "/" + StreamName; if (max_key == 0) { max_key = num_operations; } // Set a description/tag for the experiment this.exp_id = "Directory: " + HomeName + "/" + AppName + "/" + StreamName + " Caller:" + CallerName + " Stream Type:" + stream_type + " Stream Op: " + stream_op + " Stream Ptype: " + ptype + " Compression Type: " + ctype + " Value size: " + value.Length + " num_operations: " + max_key + " actual_num_ops: " + num_operations + " Sync type: " + synctype + " Do costs? " + doCosts + "Chunk Size: " + ChunkSize + " ThreadPool Size:" + ThreadPoolSize; this.compressed_exp_id = " ST:" + stream_type + " OP: " + stream_op + " PT: " + ptype + " CT: " + ctype + " VS: " + value.Length + " I:" + num_operations + " MK:" + max_key + " SYNC: " + synctype + " chsize: " + ChunkSize + " nThreads: " + ThreadPoolSize; // Set remote storage server account info string AzureaccountName = ConfigurationManager.AppSettings.Get("AccountName"); string AzureaccountKey = ConfigurationManager.AppSettings.Get("AccountSharedKey"); string S3accountName = ConfigurationManager.AppSettings.Get("S3AccountName"); string S3accountKey = ConfigurationManager.AppSettings.Get("S3AccountSharedKey"); LocationInfo Li; if (synctype == SynchronizerType.Azure) { Li = new LocationInfo(AzureaccountName, AzureaccountKey, SynchronizerType.Azure); } else if (synctype == SynchronizerType.AmazonS3) { Li = new LocationInfo(S3accountName, S3accountKey, SynchronizerType.AmazonS3); } else { Li = null; } StreamFactory sf = StreamFactory.Instance; IStream stream = null; FqStreamID streamid = new FqStreamID(HomeName, AppName, StreamName); // Set op : R/W StreamFactory.StreamOp rw; if (stream_op == StreamOperation.RandomKeyRandomValueAppend || stream_op == StreamOperation.RandomKeySameValueAppend || stream_op == StreamOperation.SameKeyRandomValueAppend || stream_op == StreamOperation.SameKeySameValueAppend) { rw = StreamFactory.StreamOp.Write; } else { rw = StreamFactory.StreamOp.Read; } // Initialize costs CostsHelper costhelper = null; double baselineStorageKV = 0; if (doCosts) { costhelper = new CostsHelper(); costhelper.getCurrentCpuUsage(); costhelper.getNetworkUsage(); } if (stream_type == StreamType.CloudRaw) { if (!Directory.Exists(exp_directory)) { Directory.CreateDirectory(exp_directory); } Logger logger = new Logger(); Byte[] val = new Byte[value.Length * num_operations]; // DateTime Date = new DateTime(DateTime.UtcNow.Ticks); // string cname = String.Format("CloudRaw-{0}", Date.ToString("yyyy-MM-dd")); // string bname = String.Format("{0}", Date.ToString("HH-mm-ss")); // string cname = String.Format("cloudraw-{0}", RandomString(4)); // string bname = String.Format("{0}", RandomString(4)); string cname = String.Format("cloudraw-{0}", RandName); string bname = String.Format("{0}", RandName); if (stream_op == StreamOperation.RandomKeyGet || stream_op == StreamOperation.RandomKeyGetMultipleSegments || stream_op == StreamOperation.RandomKeyGetAll) { doRawCloudPerf(val, SynchronizerType.Azure, SynchronizeDirection.Download, exp_directory, logger, containerName: cname, blobName: bname); logger.Dump(exp_directory + "/log"); } else { doRawCloudPerf(val, SynchronizerType.Azure, SynchronizeDirection.Upload, exp_directory, logger, containerName: cname, blobName: bname); logger.Dump(exp_directory + "/log"); } return; } if (stream_type == StreamType.DiskRaw) { if (!Directory.Exists(exp_directory)) { Directory.CreateDirectory(exp_directory); } Logger logger = doDiskRaw(stream_op, num_operations, value.Length, ptype, exp_directory); logger.Dump(exp_directory + "/log"); return; } // Are we getting raw disk throughput? if (stream_type == StreamType.Raw) { string ret = doDiskSpeed((value.Length * num_operations) / 1000 + "K", value.Length / 1000 + "K", rw); if (!Directory.Exists(exp_directory)) { Directory.CreateDirectory(exp_directory); } File.WriteAllText(exp_directory + "/log", ret); return; } // Populate the keys and the values Random random = new Random(DateTime.Now.Millisecond); StrKey[] keys = new StrKey[max_key]; for (int i = 0; i < max_key; ++i) { keys[i] = new StrKey("" + i); } /* * List<ByteValue> vals = new List<ByteValue>(num_operations); * Byte[][] tmp = new Byte[num_operations][]; * for (int i = 0; i < num_operations; ++i) * { * tmp[i] = new Byte[value.Length]; * random.NextBytes(tmp[i]); * } * * for (int i = 0; i < num_operations; ++i) * { * keys[i] = new StrKey("" + i); * vals.Add(new ByteValue(tmp[i])); * // vals[i] = new ByteValue(tmp); * } */ Logger log = new Logger(); // Open stream for different types of experiments if (stream_type == StreamType.Local && ptype == StreamFactory.StreamDataType.Values) { stream = sf.openValueDataStream <StrKey, ByteValue>(streamid, ci, null, StreamFactory.StreamSecurityType.Plain, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else if (stream_type == StreamType.LocalEnc && ptype == StreamFactory.StreamDataType.Values) { stream = sf.openValueDataStream <StrKey, ByteValue>(streamid, ci, null, StreamFactory.StreamSecurityType.Secure, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else if (stream_type == StreamType.Remote && ptype == StreamFactory.StreamDataType.Values) { stream = sf.openValueDataStream <StrKey, ByteValue>(streamid, ci, Li, StreamFactory.StreamSecurityType.Plain, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else if (stream_type == StreamType.RemoteEnc && ptype == StreamFactory.StreamDataType.Values) { stream = sf.openValueDataStream <StrKey, ByteValue>(streamid, ci, Li, StreamFactory.StreamSecurityType.Secure, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else if (stream_type == StreamType.Local && ptype == StreamFactory.StreamDataType.Files) { stream = sf.openFileDataStream <StrKey>(streamid, ci, null, StreamFactory.StreamSecurityType.Plain, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else if (stream_type == StreamType.LocalEnc && ptype == StreamFactory.StreamDataType.Files) { stream = sf.openFileDataStream <StrKey>(streamid, ci, null, StreamFactory.StreamSecurityType.Secure, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else if (stream_type == StreamType.Remote && ptype == StreamFactory.StreamDataType.Files) { stream = sf.openFileDataStream <StrKey>(streamid, ci, Li, StreamFactory.StreamSecurityType.Plain, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else if (stream_type == StreamType.RemoteEnc && ptype == StreamFactory.StreamDataType.Files) { stream = sf.openFileDataStream <StrKey>(streamid, ci, Li, StreamFactory.StreamSecurityType.Secure, ctype, rw, address, ChunkSize, ThreadPoolSize, log); } else { return; } if (stream_op == StreamOperation.RandomKeyRandomValueAppend) { List <ByteValue> vals = new List <ByteValue>(num_operations); Byte[][] tmp = new Byte[num_operations][]; for (int i = 0; i < num_operations; ++i) { tmp[i] = new Byte[value.Length]; random.NextBytes(tmp[i]); } for (int i = 0; i < num_operations; ++i) { vals.Add(new ByteValue(tmp[i])); } for (int i = 0; i < num_operations; ++i) { baselineStorageKV += keys[i].Size(); baselineStorageKV += vals[i].Size(); stream.Append(keys[i], vals[i]); } stream.Close(); } else if (stream_op == StreamOperation.RandomKeySameValueAppend) { Byte[] singleval = new Byte[value.Length]; random.NextBytes(singleval); ByteValue singlebv = new ByteValue(singleval); for (int i = 0; i < num_operations; ++i) { baselineStorageKV += keys[i].Size(); baselineStorageKV += value.Length; stream.Append(keys[i], singlebv); } stream.Close(); } else if (stream_op == StreamOperation.SameKeySameValueAppend) { StrKey key = new StrKey("ExpKey"); Byte[] singleval = new Byte[value.Length]; random.NextBytes(singleval); ByteValue singlebv = new ByteValue(singleval); for (int i = 0; i < num_operations; ++i) { stream.Append(key, singlebv); // System.Threading.Thread.Sleep(10); } stream.Close(); } else if (stream_op == StreamOperation.RandomKeyGet || stream_op == StreamOperation.RandomKeyGetMultipleSegments) { for (int i = 0; i < num_operations; ++i) { stream.Get(keys[random.Next(0, max_key)]); } stream.Close(); } else if (stream_op == StreamOperation.RandomKeyGetAll) { StrKey key = new StrKey("ExpKey"); for (int i = 0; i < num_operations;) { long st = 0; long et = -1; Console.WriteLine(stime + ":" + etime); while (et < st) { st = RandomLong(stime, etime, random); // et = RandomLong(stime, etime, random); et = st + (10 * 10 * TimeSpan.TicksPerMillisecond); } Console.WriteLine(st + ":" + et); IEnumerable <IDataItem> iterator = stream.GetAll(key, st, et); foreach (IDataItem data in iterator) { data.GetVal(); ++i; if (i == num_operations) { break; } } } stream.Close(); } else if (stream_op == StreamOperation.SameKeyRandomValueAppend) { StrKey key = new StrKey("ExpKey"); for (int i = 0; i < num_operations; ++i) { baselineStorageKV += key.Size(); // baselineStorageKV += vals[i].Size(); // stream.Append(key, vals[i]); } stream.Close(); } else { for (int i = 0; i < num_operations; ++i) { stream.Get(new StrKey("" + random.Next(0, num_operations - 1))); } stream.Close(); } // Dump the instrumentation logs stream.DumpLogs(exp_directory + "/log"); // Collect costs usage List <string> costs = new List <string>(); if (doCosts) { costs.Add(DateTime.UtcNow.Ticks + ": CPU: " + costhelper.getCurrentCpuUsage()); costs.Add(DateTime.UtcNow.Ticks + ": Network: " + costhelper.getNetworkUsage()); costs.Add(DateTime.UtcNow.Ticks + ": DataRelated Storage: " + costhelper.getStorageUsage(this.exp_directory, dataRelated: true) / 1000.0f); costs.Add(DateTime.UtcNow.Ticks + ": Constant Storage: " + costhelper.getStorageUsage(this.exp_directory, dataRelated: false) / 1000.0f); costs.Add(DateTime.UtcNow.Ticks + ": Baseline Storage: " + baselineStorageKV / 1000.0f); } File.AppendAllLines(exp_directory + "/log", costs); // sf.deleteStream(streamid, ci); }