internal async Task <Entity> RequestMultipart(string endpoint, JToken data, BlobList input, HttpMethod htttpMethod = null, Dictionary <string, string> additionalHeaders = null, string contentType = ContentType.NXREQUEST) { if (data == null) { throw new NullReferenceException("data parameter was null."); } if (input == null) { throw new NullReferenceException("input was null."); } HttpRequestMessage request = new HttpRequestMessage(htttpMethod ?? HttpMethod.Post, (endpoint.StartsWith("/") ? endpoint.Substring(1) : endpoint)); MultipartContent requestContent = BuildMultipartContent(data); foreach (Blob blob in input) { AddBlobToMultipartContent(requestContent, blob); } request.Content = requestContent; return(await ProcessRequest(request, additionalHeaders)); }
} /* ClearBobs */ private void ExtractBlobsFromCurrentScreen() { if (blobs != null) { blobs.Dispose(); blobs = null; } if (raster == null) { return; } BlobDetector bd = new BlobDetector(raster, (int)((float)blobMinSize / Math.Ceiling(sipperFile.Ratio() * sipperFile.Ratio()))); blobs = bd.ExtractBlobs(); bd.Dispose(); //Lets Paint Boxes for each Blob foreach (Blob b in blobs) { PaintRectangle(panelDC, Color.Red, b); PaintRectangle(bufferDC, Color.Red, b); } } /* ExtractBlobsFromCurrentScreen */
public void AttachBlobs() { Blob blob = new Blob(IOHelper.CreateTempFile("This is just a note.")).SetFilename("note1.txt"); Entity result = client.Operation("Blob.Attach") .SetInput(blob) .SetParameter("document", blobContainer.Path) .Execute() .Result; Assert.True(result is Blob); Assert.Equal("This is just a note.", IOHelper.ReadText(((Blob)result).File)); BlobList blobs = new BlobList(); blobs.Add(new Blob(IOHelper.CreateTempFile("This is another note.")).SetFilename("note2.txt")); blobs.Add(Blob.FromFile("Puppy.docx")); result = client.Operation("Blob.Attach") .SetInput(blobs) .SetParameter("document", blobContainer.Path) .SetParameter("xpath", "files:files") .Execute() .Result; Assert.True(result is BlobList); Assert.Equal(2, blobs.Count); Assert.Equal("This is another note.", IOHelper.ReadText(blobs[0].File)); Assert.True(IOHelper.AreFilesEqual("Puppy.docx", blobs[1].File.FullName)); }
public void FreeTest(int blobSize, int itemCount) { var addressList = new ConcurrentStack <int>(); var blobs = new BlobList <int>(blobSize); for (var i = 0; i < itemCount; i++) { var address = blobs.Allocate(); addressList.Push(address); } blobs.LivingCount.Should().Be(itemCount); foreach (var address in addressList) { blobs.Free(address); } blobs.LivingCount.Should().Be(0); for (int i = 0; i < itemCount * 2; i++) { blobs.Allocate(); blobs.LivingCount.Should().Be(i + 1); } }
} /* PaintWholePanel*/ public void ClearBobs() { if (blobs != null) { blobs.Dispose(); blobs = null; } } /* ClearBobs */
internal void Close() { _buffer = null; _blobs = null; _strings = null; _signatures = null; _module = null; }
private async Task <Entity> ProcessResponse(HttpResponseMessage response) { if ((int)response.StatusCode == 204 || response.Content.Headers.ContentLength == 0) { return(null); } Entity entity = null; MediaTypeHeaderValue contentType = response.Content.Headers.ContentType; bool isText = contentType.MediaType.Contains("text/"); bool isJson = contentType.MediaType == ContentType.JSON || contentType.MediaType == ContentType.NXENTITY; bool isMultipart = response.Content.IsMimeMultipartContent(); if ((int)response.StatusCode >= 400 && (int)response.StatusCode <= 499) { throw new ClientErrorException(response.StatusCode, isText || isJson ? await response.Content.ReadAsStringAsync() : string.Empty); } if ((int)response.StatusCode >= 500 && (int)response.StatusCode <= 599) { throw new ServerErrorException(response.StatusCode, isText || isJson ? await response.Content.ReadAsStringAsync() : string.Empty); } if (isText || isJson) { string result = await response.Content.ReadAsStringAsync(); if (isJson) { entity = Marshaller.UnMarshal(JToken.Parse(result)); } else { FileInfo tmpFile = IOHelper.CreateTempFile(result); entity = new Blob(response.Content.Headers.ContentDisposition.FileNameStar ?? response.Content.Headers.ContentDisposition.FileName).SetFile(tmpFile); } } else if (response.Content.IsMimeMultipartContent()) { MultipartMemoryStreamProvider mp = await response.Content.ReadAsMultipartAsync(); BlobList blobs = new BlobList(); foreach (HttpContent part in mp.Contents) { blobs.Add(new Blob(IOHelper.CreateTempFile(await part.ReadAsStreamAsync()))); } entity = blobs; } else { entity = new Blob(IOHelper.CreateTempFile(await response.Content.ReadAsStreamAsync())); } return(entity); }
internal void Load(BuildModule module) { if (_module != null) { throw new InvalidOperationException(); } _module = module; if (_isNew) { if (_bufferLength == 0) { var image = _module.Image; _moduleTableOffset = BuildAssembly.RowSize * image.GetAssemblyCount(); _typeTableOffset = _moduleTableOffset + BuildModule.RowSize; _methodTableOffset = _typeTableOffset + (BuildType.RowSize * image.GetTypeDefCount()); _fieldTableOffset = _methodTableOffset + (BuildMethod.RowSize * image.GetMethodCount()); _propertyTableOffset = _fieldTableOffset + (BuildField.RowSize * image.GetFieldCount()); _eventTableOffset = _propertyTableOffset + (BuildProperty.RowSize * image.GetPropertyCount()); _resourceTableOffset = _eventTableOffset + (BuildEvent.RowSize * image.GetEventCount()); _bufferLength = _resourceTableOffset + (BuildResource.RowSize * image.GetManifestResourceCount()); } _buffer = new byte[_bufferLength]; _blobs = new BlobList(); _strings = new StringSerializer(); _signatures = new SignatureList(_strings); _objects = new ModuleObjectState(this); } else { using (var accessor = new StreamAccessor(new FileStream(_stateFilePath, FileMode.Open, FileAccess.Read, FileShare.None))) { _bufferLength = accessor.Read7BitEncodedInt(); _buffer = accessor.ReadBytes(_bufferLength); _objects = new ModuleObjectState(this, accessor); _blobs = new BlobList(accessor); var stringBlob = new Blob(accessor.ReadBytes(accessor.Read7BitEncodedInt())); StrongCryptoUtils.Decrypt(stringBlob.GetBuffer(), 0, stringBlob.Length); _strings = new StringSerializer(new BlobAccessor(stringBlob)); _signatures = new SignatureList(accessor, _strings); } } _strings.DelayWrite = true; _signatures.DelayWrite = true; }
} /* ScrollRows */ public void AddUserSpecifiedBlob(int tlCol, int tlRow, int brCol, int brRow ) { if (blobs == null) { blobs = new BlobList(true); } Blob b = new Blob(-1, tlCol, tlRow, brCol, brRow); blobs.Add(b); }
public void AllocateTest(int blobSize, int itemCount) { var addressList = new ConcurrentStack <int>(); var blobs = new BlobList <Guid>(blobSize); var items = Enumerable.Range(0, itemCount).Select(_ => Guid.NewGuid()).ToArray(); Parallel.ForEach(items, x => { var address = blobs.Allocate(); blobs.Write(address, x); addressList.Push(address); }); var values = addressList.Select(x => blobs.Read(x)).ToArray(); items.Should().BeEquivalentTo(values); }
public IActionResult Index() { //This for Azure App Configuration //BlobServiceClient blobServiceClient = new BlobServiceClient(_configuration["TestApp:Settings:StgConnString"]); //BlobContainerClient containerClient = blobServiceClient.GetBlobContainerClient(_configuration["TestApp:Settings:StgContainerName"]); //This for Azure Key Vault BlobServiceClient blobServiceClient = new BlobServiceClient(_configuration["DemoStoreConnectionString"]); BlobContainerClient containerClient = blobServiceClient.GetBlobContainerClient(_configuration["DemoStoreContainerName"]); BlobList blobList = new BlobList(); blobList.AccountName = containerClient.Uri.ToString(); blobList.ContainerName = containerClient.Name.ToString(); blobList.Blobs = new List <string>(); foreach (var blob in containerClient.GetBlobs()) { blobList.Blobs.Add(blob.Name); } ViewData["blobs"] = blobList; return(View()); }
public void GetBlobs() { Entity entity = client.Operation("Blob.Get") .SetInput("doc:" + blobContainer.Path) .Execute() .Result; Assert.True(entity is Blob); Assert.Equal("This is just a note.", IOHelper.ReadText(((Blob)entity).File)); entity = client.Operation("Blob.GetList") .SetInput("doc:" + blobContainer.Path) .Execute() .Result; Assert.True(entity is BlobList); BlobList blobs = (BlobList)entity; Assert.Equal(2, blobs.Count); Assert.Equal("This is another note.", IOHelper.ReadText(blobs[0].File)); Assert.True(IOHelper.AreFilesEqual("Puppy.docx", blobs[1].File.FullName)); }
/// <summary> /// Sets the operation input. /// </summary> /// <param name="input">The operation input.</param> /// <returns>The current <see cref="Operation"/> instance.</returns> public Operation SetInput(BlobList input) { Input = input; return this; }
private async Task<Entity> ProcessResponse(HttpResponseMessage response) { if ((int)response.StatusCode == 204 || response.Content.Headers.ContentLength == 0) { return null; } Entity entity = null; MediaTypeHeaderValue contentType = response.Content.Headers.ContentType; bool isText = contentType.MediaType.Contains("text/"); bool isJson = contentType.MediaType == ContentType.JSON || contentType.MediaType == ContentType.NXENTITY; bool isMultipart = response.Content.IsMimeMultipartContent(); if ((int)response.StatusCode >= 400 && (int)response.StatusCode <= 499) { throw new ClientErrorException(response.StatusCode, isText || isJson ? await response.Content.ReadAsStringAsync() : string.Empty); } if ((int)response.StatusCode >= 500 && (int)response.StatusCode <= 599) { throw new ServerErrorException(response.StatusCode, isText || isJson ? await response.Content.ReadAsStringAsync() : string.Empty); } if (isText || isJson) { string result = await response.Content.ReadAsStringAsync(); if (isJson) { entity = Marshaller.UnMarshal(JToken.Parse(result)); } else { FileInfo tmpFile = IOHelper.CreateTempFile(result); entity = new Blob(response.Content.Headers.ContentDisposition.FileNameStar ?? response.Content.Headers.ContentDisposition.FileName).SetFile(tmpFile); } } else if (response.Content.IsMimeMultipartContent()) { MultipartMemoryStreamProvider mp = await response.Content.ReadAsMultipartAsync(); BlobList blobs = new BlobList(); foreach (HttpContent part in mp.Contents) { blobs.Add(new Blob(IOHelper.CreateTempFile(await part.ReadAsStreamAsync()))); } entity = blobs; } else { entity = new Blob(IOHelper.CreateTempFile(await response.Content.ReadAsStreamAsync())); } return entity; }
/// <summary> /// Sets the operation input. /// </summary> /// <param name="input">The operation input.</param> /// <returns>The current <see cref="Operation"/> instance.</returns> public Operation SetInput(BlobList input) { Input = input; return(this); }
//takes an acceleration array along with other config data in order to blob together the data into a byte array public virtual void BuildAccelerationBlob(Acceleration[] accArray, byte version = 1, byte FIFO_Size = 32, short SampleFrequency = 800, byte GRange = 16, Boolean FullResolution = true, bool testFlag = false) { //the switch statement is used for versioning of the blob, currently only one version exists, however; it is easy to add a new version //by adding to a new case switch (version) { case 1: if (NumberDataPoints == 0) { //set the First Time Captured FirstTimeCaptured = accArray[0].TimeCaptured; //define the new List of Bytes BlobList = new List <Byte>(); //add the version to the start BlobList.Add(version); //add the fifo size to the start BlobList.Add(FIFO_Size); //add the sample frequency to the start Byte[] frequencyBytes = BitConverter.GetBytes(SampleFrequency); for (int i = 0; i < frequencyBytes.Length; i++) { BlobList.Add(frequencyBytes[i]); } //add the G Range to the start BlobList.Add(GRange); //add the Full Resolution to the start BlobList.Add(BitConverter.GetBytes(FullResolution)[0]); } foreach (Acceleration acc in accArray) { //if it is the first datapoint from the FIFO dump, add a t Time value as well as the acc data if (NumberDataPoints % FIFO_Size == 0) { //label this part of the blob with t for Time char label = 't'; BlobList.Add((byte)label); //add 8 byte timestamp to blob Byte[] time = BitConverter.GetBytes(acc.TimeCaptured); for (int i = 0; i < time.Length; i++) { BlobList.Add(time[i]); } //add 2 byte x acceleration Byte[] accX = BitConverter.GetBytes(acc.x); for (int i = 0; i < 2; i++) { BlobList.Add(accX[i]); } //add 2 byte y acceleration Byte[] accY = BitConverter.GetBytes(acc.y); for (int i = 0; i < 2; i++) { BlobList.Add(accY[i]); } //add 2 byte z acceleration Byte[] accZ = BitConverter.GetBytes(acc.z); for (int i = 0; i < 2; i++) { BlobList.Add(accZ[i]); } NumberDataPoints++; } else //otherwise only add acc data { //label this part of the blob with a for just an Acceleration data point char label = 'a'; BlobList.Add((byte)label); //add 2 byte x acceleration Byte[] accX = BitConverter.GetBytes(acc.x); for (int i = 0; i < 2; i++) { BlobList.Add(accX[i]); } //add 2 byte y acceleration Byte[] accY = BitConverter.GetBytes(acc.y); for (int i = 0; i < 2; i++) { BlobList.Add(accY[i]); } //add 2 byte z acceleration Byte[] accZ = BitConverter.GetBytes(acc.z); for (int i = 0; i < 2; i++) { BlobList.Add(accZ[i]); } NumberDataPoints++; } } //if the number of datapoints meets or exceeds the prespecified blob size (currently 4000), send the blob to the DB if (NumberDataPoints >= MiscellaneousConstants.BLOB_SIZE) { BlobArray = BlobList.ToArray(); Database.DatabaseOperations.AddAccelerationBlobData(this, Location, true); NumberDataPoints = 0; } else if (testFlag) // if it is a test, only send it to an array so as to not mess with the DB { BlobArray = BlobList.ToArray(); NumberDataPoints = 0; } break; case 2: // if there ever is a second version of the blob, blobify it here //currently no v2 so do nothing break; } }
/// <summary> /// Create a new job in the Windows Azure Import/Export service. /// </summary> /// <param name="jobName">Name of the job.</param> /// <param name="configFilePath"></param> public void CreateJob(string jobName, string configFilePath) { var XConf = XDocument.Load(configFilePath); // ReturnAddress: Specifies the return address information for the job. var XReturnAddress = XConf.Descendants("ReturnAddress").First(); var returnAddress = new ReturnAddress( XReturnAddress.Element("Name").Value, XReturnAddress.Element("Address").Value, XReturnAddress.Element("Phone").Value, XReturnAddress.Element("Email").Value ); // ReturnShipping: Specifies the return carrier and customer’s account with the carrier var XReturnShipping = XConf.Descendants("ReturnShipping").First(); var returnShipping = new ReturnShipping( XReturnShipping.Element("CarrierName").Value, XReturnShipping.Element("CarrierAccountNumber").Value ); // Properties: The list of properties for the job. // refer to https://msdn.microsoft.com/en-us/library/azure/dn529110.aspx for more details var XJobProperty = XConf.Descendants("JobProperty").First(); var putJobProperties = new PutJobProperties( backupDriveManifest: bool.Parse(XJobProperty.Element("BackupDriveManifest").Value), description: XJobProperty.Element("Description").Value, enableVerboseLog: bool.Parse(XJobProperty.Element("EnableVerboseLog").Value), friendlyName: XJobProperty.Element("FriendlyName").Value, type: (XJobProperty.Element("JobType").Value.Equals("Import", StringComparison.InvariantCultureIgnoreCase)? JobType.Import: JobType.Export), location: XJobProperty.Element("Location").Value, storageAccountKey: XJobProperty.Element("StorageAccountKey").Value, storageAccountName: XJobProperty.Element("StorageAccountName").Value, importExportStatesPath: XJobProperty.Element("ImportExportStatesPath").Value, returnAddress: returnAddress, returnShipping: returnShipping ); // must include either StorageAccountKey or ContainerSas in the request if (string.IsNullOrEmpty(XJobProperty.Element("StorageAccountKey").Value)) { putJobProperties.StorageAccountKey = null; putJobProperties.ContainerSas = XJobProperty.Element("ContainerSas").Value; } var putJobParameters = new PutJobParameters(jobName, putJobProperties); if (putJobProperties.Type == JobType.Export) { // BlobList: contain information about the blobs to be exported for an export job. var XBlobList = XConf.Descendants("BlobList").First().Elements(); var blobList = new BlobList(new List <string>(), new List <string>()); foreach (var XBlob in XBlobList) { if (!String.IsNullOrWhiteSpace(XBlob.Attribute("BlobPaths").Value)) { blobList.BlobPath.Add(XBlob.Attribute("BlobPaths").Value); } if (!String.IsNullOrWhiteSpace(XBlob.Attribute("BlobPathPrefixes").Value)) { blobList.BlobPathPrefix.Add(XBlob.Attribute("BlobPathPrefixes").Value); } } putJobParameters.Export = new Export(blobList: blobList); } else { // DriveList: List of up to ten drives that comprise the job. var XDriveList = XConf.Descendants("DriveList").First().Elements(); var driveList = new List <Drive>(); foreach (var XDrive in XDriveList) { driveList.Add(new Drive( driveId: XDrive.Element("DriveId").Value, bitLockerKey: XDrive.Element("BitLockerKey").Value, manifestFile: XDrive.Element("ManifestFile").Value, manifestHash: XDrive.Element("ManifestHash").Value )); } putJobParameters.DriveList = driveList; } client.PutJob(XJobProperty.Element("StorageAccountName").Value, jobName, putJobParameters); }
static bool DownloadDependencies(string RootPath, IEnumerable <DependencyFile> RequiredFiles, IEnumerable <DependencyBlob> Blobs, IEnumerable <DependencyPack> Packs, int NumThreads, int MaxRetries, string ProxyUrl) { // Make sure we can actually open the right number of connections ServicePointManager.DefaultConnectionLimit = NumThreads; // Build a lookup for the files that need updating from each blob Dictionary <string, List <DependencyFile> > BlobToFiles = new Dictionary <string, List <DependencyFile> >(); foreach (DependencyFile RequiredFile in RequiredFiles) { List <DependencyFile> FileList; if (!BlobToFiles.TryGetValue(RequiredFile.Hash, out FileList)) { FileList = new List <DependencyFile>(); BlobToFiles.Add(RequiredFile.Hash, FileList); } FileList.Add(RequiredFile); } // Find all the required blobs DependencyBlob[] RequiredBlobs = Blobs.Where(x => BlobToFiles.ContainsKey(x.Hash)).ToArray(); // Build a lookup for the files that need updating from each blob Dictionary <string, List <DependencyBlob> > PackToBlobs = new Dictionary <string, List <DependencyBlob> >(); foreach (DependencyBlob RequiredBlob in RequiredBlobs) { List <DependencyBlob> BlobList = new List <DependencyBlob>(); if (!PackToBlobs.TryGetValue(RequiredBlob.PackHash, out BlobList)) { BlobList = new List <DependencyBlob>(); PackToBlobs.Add(RequiredBlob.PackHash, BlobList); } BlobList.Add(RequiredBlob); } // Find all the required packs DependencyPack[] RequiredPacks = Packs.Where(x => PackToBlobs.ContainsKey(x.Hash)).ToArray(); // Get temporary filenames for all the files we're going to download Dictionary <DependencyPack, string> DownloadFileNames = new Dictionary <DependencyPack, string>(); foreach (DependencyPack Pack in RequiredPacks) { DownloadFileNames.Add(Pack, Path.GetTempFileName()); } // Setup the async state AsyncDownloadState State = new AsyncDownloadState(); State.NumFiles = RequiredFiles.Count(); long NumBytesTotal = RequiredPacks.Sum(x => x.CompressedSize); ConcurrentQueue <DependencyPack> DownloadQueue = new ConcurrentQueue <DependencyPack>(RequiredPacks); ConcurrentQueue <DependencyPack> DecompressQueue = new ConcurrentQueue <DependencyPack>(); // Create all the worker threads Thread[] WorkerThreads = new Thread[NumThreads]; for (int Idx = 0; Idx < NumThreads; Idx++) { WorkerThreads[Idx] = new Thread(x => DownloadWorker(RootPath, DownloadQueue, DecompressQueue, DownloadFileNames, PackToBlobs, BlobToFiles, State, MaxRetries, ProxyUrl)); WorkerThreads[Idx].Start(); } // Create the decompression thread Thread DecompressionThread = new Thread(x => DecompressWorker(RootPath, DecompressQueue, DownloadFileNames, PackToBlobs, BlobToFiles, State)); DecompressionThread.Start(); // Tick the status message until we've finished or ended with an error. Use a circular buffer to average out the speed over time. long[] NumBytesReadBuffer = new long[60]; for (int BufferIdx = 0, NumFilesReportedRead = 0; NumFilesReportedRead < State.NumFiles && State.NumFailingDownloads < NumThreads && State.LastDecompressError == null; BufferIdx = (BufferIdx + 1) % NumBytesReadBuffer.Length) { const int TickInterval = 100; long NumBytesRead = Interlocked.Read(ref State.NumBytesRead); float NumBytesPerSecond = (float)Math.Max(NumBytesRead - NumBytesReadBuffer[BufferIdx], 0) * 1000.0f / (NumBytesReadBuffer.Length * TickInterval); NumFilesReportedRead = State.NumFilesRead; Log.WriteStatus("Received {0}/{1} files ({2:0.0}/{3:0.0}mb; {4:0.00}mb/s; {5}%)...", NumFilesReportedRead, State.NumFiles, (NumBytesRead / (1024.0 * 1024.0)) + 0.0999999, (NumBytesTotal / (1024.0 * 1024.0)) + 0.0999999, (NumBytesPerSecond / (1024.0 * 1024.0)) + 0.0099, (NumBytesRead * 100) / NumBytesTotal); NumBytesReadBuffer[BufferIdx] = NumBytesRead; Thread.Sleep(TickInterval); } // If we finished with an error, try to clean up and return if (State.NumFilesRead < State.NumFiles) { DecompressionThread.Abort(); foreach (Thread WorkerThread in WorkerThreads) { WorkerThread.Abort(); } Log.WriteError("{0}", (State.LastDecompressError != null)? State.LastDecompressError : State.LastDownloadError); foreach (string FileName in DownloadFileNames.Values) { try { File.Delete(FileName); } catch (Exception) { } } return(false); } // Join all the threads DecompressionThread.Join(); foreach (Thread WorkerThread in WorkerThreads) { WorkerThread.Join(); } Log.FlushStatus(); return(true); }
internal async Task<Entity> RequestMultipart(string endpoint, JToken data, BlobList input, HttpMethod htttpMethod = null, Dictionary<string, string> additionalHeaders = null, string contentType = ContentType.NXREQUEST) { if (data == null) { throw new NullReferenceException("data parameter was null."); } if (input == null) { throw new NullReferenceException("input was null."); } HttpRequestMessage request = new HttpRequestMessage(htttpMethod ?? HttpMethod.Post, (endpoint.StartsWith("/") ? endpoint.Substring(1) : endpoint)); MultipartContent requestContent = BuildMultipartContent(data); foreach (Blob blob in input) { AddBlobToMultipartContent(requestContent, blob); } request.Content = requestContent; return await ProcessRequest(request, additionalHeaders); }