public static GrabMutex ( string name ) : |
||
name | string | |
Résultat |
public AzureIndexInput(AzureDirectory azuredirectory, ICloudBlob blob) : base(blob.Name) { _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1]; _fileMutex = BlobMutexManager.GrabMutex(_name); _fileMutex.WaitOne(); try { _azureDirectory = azuredirectory; _blobContainer = azuredirectory.BlobContainer; _blob = blob; string fileName = _name; StreamOutput fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName); // get the blob _blob.DownloadToStream(fileStream); fileStream.Flush(); Debug.WriteLine("GET {0} RETREIVED {1} bytes", _name, fileStream.Length); fileStream.Close(); // and open it as an input _indexInput = CacheDirectory.openInput(fileName, IOContext.DEFAULT); } finally { _fileMutex.ReleaseMutex(); } }
public AzureIndexInput(AzureIndexInput cloneInput) { _fileMutex = BlobMutexManager.GrabMutex(cloneInput._name); _fileMutex.WaitOne(); try { #if FULLDEBUG Debug.WriteLine(String.Format("Creating clone for {0}", cloneInput._name)); #endif _azureDirectory = cloneInput._azureDirectory; _blobContainer = cloneInput._blobContainer; _blob = cloneInput._blob; _indexInput = cloneInput._indexInput.Clone() as IndexInput; } catch (Exception) { // sometimes we get access denied on the 2nd stream...but not always. I haven't tracked it down yet // but this covers our tail until I do Debug.WriteLine(String.Format("Dagnabbit, falling back to memory clone for {0}", cloneInput._name)); } finally { _fileMutex.ReleaseMutex(); } }
public AzureIndexInput(AzureDirectory azureDirectory, string name, BlobClient blob) : base(name) { this._name = name; this._azureDirectory = azureDirectory; #if FULLDEBUG Debug.WriteLine($"{_azureDirectory.Name} opening {name} "); #endif _fileMutex = BlobMutexManager.GrabMutex(name); _fileMutex.WaitOne(); try { _blobContainer = azureDirectory.BlobContainer; _blob = blob; bool fileNeeded = false; if (!CacheDirectory.FileExists(name)) { fileNeeded = true; } else { long cachedLength = CacheDirectory.FileLength(name); var properties = blob.GetProperties(); long blobLength = properties.Value?.ContentLength ?? 0; if (cachedLength != blobLength) { fileNeeded = true; } } // if the file does not exist // or if it exists and it is older then the lastmodified time in the blobproperties (which always comes from the blob storage) if (fileNeeded) { using (StreamOutput fileStream = _azureDirectory.CreateCachedOutputAsStream(name)) { // get the blob _blob.DownloadTo(fileStream); fileStream.Flush(); Debug.WriteLine($"{_azureDirectory.Name} GET {_name} RETREIVED {fileStream.Length} bytes"); } } #if FULLDEBUG Debug.WriteLine($"{_azureDirectory.Name} Using cached file for {name}"); #endif // and open it as our input, this is now available forevers until new file comes along _indexInput = CacheDirectory.OpenInput(name, IOContext.DEFAULT); } finally { _fileMutex.ReleaseMutex(); } }
public FastAzureIndexOutput(AzureDirectory azureDirectory, ICloudBlob blob) { this._fileMutex = BlobMutexManager.GrabMutex(this._name); this._fileMutex.WaitOne(); try { this._azureDirectory = azureDirectory; this._blobContainer = this._azureDirectory.BlobContainer; this._blob = blob; this._name = blob.Uri.Segments[blob.Uri.Segments.Length - 1]; this._indexOutput = this.CacheDirectory.CreateOutput(this._name); } finally { this._fileMutex.ReleaseMutex(); } }
public AzureIndexOutput(AzureDirectory azureDirectory, ICloudBlob blob) { _fileMutex = BlobMutexManager.GrabMutex(_name); _fileMutex.WaitOne(); try { _azureDirectory = azureDirectory; _blob = blob; _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1]; // create the local cache one we will operate against... _indexOutput = CacheDirectory.createOutput(_name, IOContext.DEFAULT); } finally { _fileMutex.ReleaseMutex(); } }
public AzureIndexOutput(AzureDirectory azureDirectory, string name, CloudBlockBlob blob) { this._name = name; _fileMutex = BlobMutexManager.GrabMutex(_name); _fileMutex.WaitOne(); try { _azureDirectory = azureDirectory; _blobContainer = _azureDirectory.BlobContainer; _blob = blob; // create the local cache one we will operate against... _indexOutput = CacheDirectory.CreateOutput(_name, IOContext.DEFAULT); } finally { _fileMutex.ReleaseMutex(); } }
public AzureIndexInput(AzureDirectory azuredirectory, CloudBlob blob) { _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1]; #if FULLDEBUG Debug.WriteLine(String.Format("opening {0} ", _name)); #endif _fileMutex = BlobMutexManager.GrabMutex(_name); _fileMutex.WaitOne(); try { _azureDirectory = azuredirectory; _blobContainer = azuredirectory.BlobContainer; _blob = blob; string fileName = _name; bool fFileNeeded = false; if (!CacheDirectory.FileExists(fileName)) { fFileNeeded = true; } else { long cachedLength = CacheDirectory.FileLength(fileName); long blobLength = blob.Properties.Length; long.TryParse(blob.Metadata["CachedLength"], out blobLength); long longLastModified = 0; DateTime blobLastModifiedUTC = blob.Properties.LastModifiedUtc; if (long.TryParse(blob.Metadata["CachedLastModified"], out longLastModified)) { blobLastModifiedUTC = new DateTime(longLastModified).ToUniversalTime(); } if (cachedLength != blobLength) { fFileNeeded = true; } else { // there seems to be an error of 1 tick which happens every once in a while // for now we will say that if they are within 1 tick of each other and same length DateTime cachedLastModifiedUTC = new DateTime(CacheDirectory.FileModified(fileName), DateTimeKind.Local).ToUniversalTime(); if (cachedLastModifiedUTC != blobLastModifiedUTC) { TimeSpan timeSpan = blobLastModifiedUTC.Subtract(cachedLastModifiedUTC); if (timeSpan.TotalSeconds > 1) { fFileNeeded = true; } else { #if FULLDEBUG Debug.WriteLine(timeSpan.TotalSeconds); #endif // file not needed } } } } // if the file does not exist // or if it exists and it is older then the lastmodified time in the blobproperties (which always comes from the blob storage) if (fFileNeeded) { #if COMPRESSBLOBS if (_azureDirectory.ShouldCompressFile(_name)) { // then we will get it fresh into local deflatedName // StreamOutput deflatedStream = new StreamOutput(CacheDirectory.CreateOutput(deflatedName)); MemoryStream deflatedStream = new MemoryStream(); // get the deflated blob _blob.DownloadToStream(deflatedStream); Debug.WriteLine(string.Format("GET {0} RETREIVED {1} bytes", _name, deflatedStream.Length)); // seek back to begininng deflatedStream.Seek(0, SeekOrigin.Begin); // open output file for uncompressed contents StreamOutput fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName); // create decompressor DeflateStream decompressor = new DeflateStream(deflatedStream, CompressionMode.Decompress); byte[] bytes = new byte[65535]; int nRead = 0; do { nRead = decompressor.Read(bytes, 0, 65535); if (nRead > 0) { fileStream.Write(bytes, 0, nRead); } } while (nRead == 65535); decompressor.Close(); // this should close the deflatedFileStream too fileStream.Close(); } else #endif { StreamOutput fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName); // get the blob _blob.DownloadToStream(fileStream); fileStream.Flush(); Debug.WriteLine(string.Format("GET {0} RETREIVED {1} bytes", _name, fileStream.Length)); fileStream.Close(); } // and open it as an input _indexInput = CacheDirectory.OpenInput(fileName); } else { #if FULLDEBUG Debug.WriteLine(String.Format("Using cached file for {0}", _name)); #endif // open the file in read only mode _indexInput = CacheDirectory.OpenInput(fileName); } } finally { _fileMutex.ReleaseMutex(); } }
public AzureIndexInput(AzureDirectory azuredirectory, ICloudBlob blob) { _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1]; #if FULLDEBUG Debug.WriteLine(String.Format("opening {0} ", _name)); #endif _fileMutex = BlobMutexManager.GrabMutex(_name); _fileMutex.WaitOne(); try { _azureDirectory = azuredirectory; _blobContainer = azuredirectory.BlobContainer; _blob = blob; var fileName = _name; var fFileNeeded = false; if (!CacheDirectory.FileExists(fileName)) { fFileNeeded = true; } else { long cachedLength = CacheDirectory.FileLength(fileName); string blobLengthMetadata; bool hasMetadataValue = blob.Metadata.TryGetValue("CachedLength", out blobLengthMetadata); long blobLength = blob.Properties.Length; if (hasMetadataValue) { long.TryParse(blobLengthMetadata, out blobLength); } string blobLastModifiedMetadata; long longLastModified = 0; DateTime blobLastModifiedUTC = blob.Properties.LastModified.Value.UtcDateTime; if (blob.Metadata.TryGetValue("CachedLastModified", out blobLastModifiedMetadata)) { if (long.TryParse(blobLastModifiedMetadata, out longLastModified)) { blobLastModifiedUTC = new DateTime(longLastModified).ToUniversalTime(); } } if (cachedLength != blobLength) { fFileNeeded = true; } else { // cachedLastModifiedUTC was not ouputting with a date (just time) and the time was always off long unixDate = CacheDirectory.FileModified(fileName); DateTime start = new DateTime(1970, 1, 1, 0, 0, 0, DateTimeKind.Utc); var cachedLastModifiedUTC = start.AddMilliseconds(unixDate).ToUniversalTime(); if (cachedLastModifiedUTC != blobLastModifiedUTC) { var timeSpan = blobLastModifiedUTC.Subtract(cachedLastModifiedUTC); if (timeSpan.TotalSeconds > 1) { fFileNeeded = true; } else { #if FULLDEBUG Debug.WriteLine(timeSpan.TotalSeconds); #endif // file not needed } } } } // if the file does not exist // or if it exists and it is older then the lastmodified time in the blobproperties (which always comes from the blob storage) if (fFileNeeded) { if (_azureDirectory.ShouldCompressFile(_name)) { InflateStream(fileName); } else { using (var fileStream = _azureDirectory.CreateCachedOutputAsStream(fileName)) { // get the blob _blob.DownloadToStream(fileStream); fileStream.Flush(); Debug.WriteLine(string.Format("GET {0} RETREIVED {1} bytes", _name, fileStream.Length)); } } // and open it as an input _indexInput = CacheDirectory.OpenInput(fileName); } else { #if FULLDEBUG Debug.WriteLine(String.Format("Using cached file for {0}", _name)); #endif // open the file in read only mode _indexInput = CacheDirectory.OpenInput(fileName); } } finally { _fileMutex.ReleaseMutex(); } }