protected internal FixedIntBlockIndexOutput(IndexOutput output, int fixedBlockSize)
 {
     blockSize = fixedBlockSize;
     this.output = output;
     output.WriteVInt(blockSize);
     buffer = new int[blockSize];
 }
 public MockSingleIntIndexOutput(Directory dir, string fileName, IOContext context)
 {
     @out = dir.CreateOutput(fileName, context);
     bool success = false;
     try
     {
         CodecUtil.WriteHeader(@out, CODEC, VERSION_CURRENT);
         success = true;
     }
     finally
     {
         if (!success)
         {
             IOUtils.CloseWhileHandlingException(@out);
         }
     }
 }
        public SecureStoreIndexOutput(Directory cache, string cachePath, Func<DataWithMetadata, Task> saveTask)
        {
            _cache = cache;
            _name = cachePath;
            _saveTask = saveTask;

            _fileMutex = BlobMutexManager.GrabMutex(_name);
            _fileMutex.WaitOne();
            try
            {
                // create the local cache one we will operate against...
                _indexOutput = _cache.CreateOutput(_name);
            }
            finally
            {
                _fileMutex.ReleaseMutex();
            }
        }
        public AzureIndexOutput(AzureDirectory azureDirectory, ICloudBlob blob)
        {
            _fileMutex = BlobMutexManager.GrabMutex(_name);
            _fileMutex.WaitOne();
            try
            {
                _azureDirectory = azureDirectory;
                _blobContainer = _azureDirectory.BlobContainer;
                _blob = blob;
                _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1];

                // create the local cache one we will operate against...
                _indexOutput = CacheDirectory.CreateOutput(_name);
            }
            finally
            {
                _fileMutex.ReleaseMutex();
            }
        }
Esempio n. 5
0
		/// <summary>Copy the current contents of this buffer to the named output. </summary>
		public virtual void  WriteTo(IndexOutput out_Renamed)
		{
			Flush();
			long end = file.length;
			long pos = 0;
			int buffer = 0;
			while (pos < end)
			{
				int length = BUFFER_SIZE;
				long nextPos = pos + length;
				if (nextPos > end)
				{
					// at the last buffer
					length = (int) (end - pos);
				}
				out_Renamed.WriteBytes((byte[]) file.GetBuffer(buffer++), length);
				pos = nextPos;
			}
		}
Esempio n. 6
0
        public SyncIndexOutput(SyncDirectory syncDirectory, string name)
        {
            if (syncDirectory == null) throw new ArgumentNullException(nameof(syncDirectory));

            //TODO: _name was null here, is this intended? https://github.com/azure-contrib/AzureDirectory/issues/19
            // I have changed this to be correct now
            _name = name;
            _syncDirectory = syncDirectory;
            _fileMutex = SyncMutexManager.GrabMutex(_syncDirectory, _name);
            _fileMutex.WaitOne();
            try
            {
                // create the local cache one we will operate against...
                _indexOutput = CacheDirectory.CreateOutput(_name);
            }
            finally
            {
                _fileMutex.ReleaseMutex();
            }
        }
Esempio n. 7
0
        public AzureIndexOutput(AzureDirectory azureDirectory, ICloudBlob blob)
        {
            if (azureDirectory == null) throw new ArgumentNullException(nameof(azureDirectory));

            _azureDirectory = azureDirectory;
            _fileMutex = SyncMutexManager.GrabMutex(_azureDirectory, _name);
            _fileMutex.WaitOne();
            try
            {
                _blobContainer = _azureDirectory.BlobContainer;
                _blob = blob;
                _name = blob.Uri.Segments[blob.Uri.Segments.Length - 1];

                // create the local cache one we will operate against...
                _indexOutput = CacheDirectory.CreateOutput(_name);
            }
            finally
            {
                _fileMutex.ReleaseMutex();
            }
        }
Esempio n. 8
0
        /// <summary>
        /// Write a block of data (<code>For</code> format).
        /// </summary>
        /// <param name="data">     the data to write </param>
        /// <param name="encoded">  a buffer to use to encode data </param>
        /// <param name="out">      the destination output </param>
        /// <exception cref="IOException"> If there is a low-level I/O error </exception>
        public void WriteBlock(int[] data, sbyte[] encoded, IndexOutput @out)
        {
            if (IsAllEqual(data))
            {
                @out.WriteByte((sbyte)ALL_VALUES_EQUAL);
                @out.WriteVInt(data[0]);
                return;
            }

            int numBits = BitsRequired(data);
            Debug.Assert(numBits > 0 && numBits <= 32, numBits.ToString());
            PackedInts.Encoder encoder = Encoders[numBits];
            int iters = Iterations[numBits];
            Debug.Assert(iters * encoder.ByteValueCount() >= Lucene41PostingsFormat.BLOCK_SIZE);
            int encodedSize = EncodedSizes[numBits];
            Debug.Assert(iters * encoder.ByteBlockCount() >= encodedSize);

            @out.WriteByte((sbyte)numBits);

            encoder.Encode(data, 0, encoded, 0, iters);
            @out.WriteBytes(encoded, encodedSize);
        }
Esempio n. 9
0
 private IndexOutput GetOutput()
 {
     lock (this)
     {
         if (DataOut == null)
         {
             bool success = false;
             try
             {
                 DataOut = Directory.CreateOutput(DataFileName, IOContext.DEFAULT);
                 CodecUtil.WriteHeader(DataOut, DATA_CODEC, VERSION_CURRENT);
                 success = true;
             }
             finally
             {
                 if (!success)
                 {
                     IOUtils.CloseWhileHandlingException((IDisposable)DataOut);
                 }
             }
         }
         return DataOut;
     }
 }
 protected override void Dispose(bool disposing)
 {
     if (disposing)
     {
         try
         {
             IOUtils.Close(FieldsStream, IndexWriter);
         }
         finally
         {
             FieldsStream = null;
             IndexWriter = null;
         }
     }
 }
Esempio n. 11
0
 internal CopyThread(IndexInput src, IndexOutput dst)
 {
     this.Src = src;
     this.Dst = dst;
 }
Esempio n. 12
0
        internal void  FinishCommit(Directory dir, IState state)
        {
            if (pendingSegnOutput == null)
            {
                throw new System.SystemException("prepareCommit was not called");
            }
            bool success = false;

            try
            {
                pendingSegnOutput.FinishCommit();
                pendingSegnOutput.Close();
                pendingSegnOutput = null;
                success           = true;
            }
            finally
            {
                if (!success)
                {
                    RollbackCommit(dir, state);
                }
            }

            // NOTE: if we crash here, we have left a segments_N
            // file in the directory in a possibly corrupt state (if
            // some bytes made it to stable storage and others
            // didn't).  But, the segments_N file includes checksum
            // at the end, which should catch this case.  So when a
            // reader tries to read it, it will throw a
            // CorruptIndexException, which should cause the retry
            // logic in SegmentInfos to kick in and load the last
            // good (previous) segments_N-1 file.

            System.String fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", generation);
            success = false;
            try
            {
                dir.Sync(fileName);
                success = true;
            }
            finally
            {
                if (!success)
                {
                    try
                    {
                        dir.DeleteFile(fileName, state);
                    }
                    catch (System.Exception)
                    {
                        // Suppress so we keep throwing the original exception
                    }
                }
            }

            lastGeneration = generation;

            try
            {
                IndexOutput genOutput = dir.CreateOutput(IndexFileNames.SEGMENTS_GEN, state);
                try
                {
                    genOutput.WriteInt(FORMAT_LOCKLESS);
                    genOutput.WriteLong(generation);
                    genOutput.WriteLong(generation);
                }
                finally
                {
                    genOutput.Close();
                }
            }
            catch (System.Exception)
            {
                // It's OK if we fail to write this file since it's
                // used only as one of the retry fallbacks.
            }
        }
Esempio n. 13
0
        protected override void Dispose(bool disposing)
        {
            _fileMutex.WaitOne();
            try
            {
                string fileName = _name;

                // make sure it's all written out
                _indexOutput.Flush();

                long originalLength = _indexOutput.Length;
                _indexOutput.Dispose();

                Stream blobStream;
            #if COMPRESSBLOBS

                // optionally put a compressor around the blob stream
                if (_azureDirectory.ShouldCompressFile(_name))
                {
                    // unfortunately, deflate stream doesn't allow seek, and we need a seekable stream
                    // to pass to the blob storage stuff, so we compress into a memory stream
                    MemoryStream compressedStream = new MemoryStream();

                    try
                    {
                        IndexInput indexInput = CacheDirectory.OpenInput(fileName);
                        using (DeflateStream compressor = new DeflateStream(compressedStream, CompressionMode.Compress, true))
                        {
                            // compress to compressedOutputStream
                            byte[] bytes = new byte[indexInput.Length()];
                            indexInput.ReadBytes(bytes, 0, (int)bytes.Length);
                            compressor.Write(bytes, 0, (int)bytes.Length);
                        }
                        indexInput.Close();

                        // seek back to beginning of comrpessed stream
                        compressedStream.Seek(0, SeekOrigin.Begin);

                        Debug.WriteLine(string.Format("COMPRESSED {0} -> {1} {2}% to {3}",
                           originalLength,
                           compressedStream.Length,
                           ((float)compressedStream.Length / (float)originalLength) * 100,
                           _name));
                    }
                    catch
                    {
                        // release the compressed stream resources if an error occurs
                        compressedStream.Dispose();
                        throw;
                    }

                    blobStream = compressedStream;
                }
                else
            #endif
                {
                    blobStream = new StreamInput(CacheDirectory.OpenInput(fileName));
                }

                try
                {
                    // push the blobStream up to the cloud
                    _blob.UploadFromStream(blobStream);

                    // set the metadata with the original index file properties
                    _blob.Metadata["CachedLength"] = originalLength.ToString();
                    _blob.Metadata["CachedLastModified"] = CacheDirectory.FileModified(fileName).ToString();
                    _blob.SetMetadata();

                    Debug.WriteLine(string.Format("PUT {1} bytes to {0} in cloud", _name, blobStream.Length));
                }
                finally
                {
                    blobStream.Dispose();
                }

            #if FULLDEBUG
                Debug.WriteLine(string.Format("CLOSED WRITESTREAM {0}", _name));
            #endif
                // clean up
                _indexOutput = null;
                _blobContainer = null;
                _blob = null;
                GC.SuppressFinalize(this);
            }
            finally
            {
                _fileMutex.ReleaseMutex();
            }
        }
Esempio n. 14
0
 public override void Init(IndexOutput termsOut)
 {
     this.termsOut = termsOut;
     CodecUtil.WriteHeader(termsOut, CODEC, VERSION_CURRENT);
     termsOut.WriteVInt(pending.Length); // encode maxPositions in header
     _wrappedPostingsWriter.Init(termsOut);
 }
        // TODO what Var-Var codecs exist in practice... and what are there blocksizes like?
        // if its less than 128 we should set that as max and use byte?

        /// <summary>
        /// NOTE: maxBlockSize must be the maximum block size 
        ///  plus the max non-causal lookahead of your codec.  EG Simple9
        ///  requires lookahead=1 because on seeing the Nth value
        ///  it knows it must now encode the N-1 values before it. 
        /// </summary>
        protected internal VariableIntBlockIndexOutput(IndexOutput output, int maxBlockSize)
        {
            this.output = output;
            this.output.WriteInt(maxBlockSize);
        }
Esempio n. 16
0
 public virtual void RemoveIndexOutput(IndexOutput @out, string name)
 {
     lock (this)
     {
         OpenFilesForWrite.Remove(name);
         RemoveOpenFile(@out, name);
     }
 }
Esempio n. 17
0
 public BufferedIndexOutputWrapper(MockDirectoryWrapper outerInstance, int bufferSize, IndexOutput io)
     : base(bufferSize)
 {
     this.OuterInstance = outerInstance;
     this.Io = io;
 }
Esempio n. 18
0
        /// <summary>Merge files with the extensions added up to now.
        /// All files with these extensions are combined sequentially into the
        /// compound stream. After successful merge, the source files
        /// are deleted.
        /// </summary>
        /// <throws>  IllegalStateException if close() had been called before or </throws>
        /// <summary>   if no file has been added to this object
        /// </summary>
        public void Dispose()
        {
            // Extract into protected method if class ever becomes unsealed

            // TODO: Dispose shouldn't throw exceptions!
            if (merged)
            {
                throw new SystemException("Merge already performed");
            }

            if ((entries.Count == 0))
            {
                throw new SystemException("No entries to merge have been defined");
            }

            merged = true;

            // open the compound stream
            IndexOutput os = null;

            try
            {
                var state = StateHolder.Current.Value;
                os = directory.CreateOutput(fileName, state);

                // Write the number of entries
                os.WriteVInt(entries.Count);

                // Write the directory with all offsets at 0.
                // Remember the positions of directory entries so that we can
                // adjust the offsets later
                long totalSize = 0;
                foreach (FileEntry fe in entries)
                {
                    fe.directoryOffset = os.FilePointer;
                    os.WriteLong(0); // for now
                    os.WriteString(fe.file);
                    totalSize += directory.FileLength(fe.file, state);
                }

                // Pre-allocate size of file as optimization --
                // this can potentially help IO performance as
                // we write the file and also later during
                // searching.  It also uncovers a disk-full
                // situation earlier and hopefully without
                // actually filling disk to 100%:
                long finalLength = totalSize + os.FilePointer;
                os.SetLength(finalLength);

                // Open the files and copy their data into the stream.
                // Remember the locations of each file's data section.
                var buffer = new byte[16384];
                foreach (FileEntry fe in entries)
                {
                    fe.dataOffset = os.FilePointer;
                    CopyFile(fe, os, buffer, state);
                }

                // Write the data offsets into the directory of the compound stream
                foreach (FileEntry fe in entries)
                {
                    os.Seek(fe.directoryOffset);
                    os.WriteLong(fe.dataOffset);
                }

                System.Diagnostics.Debug.Assert(finalLength == os.Length);

                // Close the output stream. Set the os to null before trying to
                // close so that if an exception occurs during the close, the
                // finally clause below will not attempt to close the stream
                // the second time.
                IndexOutput tmp = os;
                os = null;
                tmp.Close();
            }
            finally
            {
                if (os != null)
                {
                    try
                    {
                        os.Close();
                    }
                    catch (System.IO.IOException)
                    {
                    }
                }
            }
        }
Esempio n. 19
0
 internal DirectCFSIndexOutput(CompoundFileWriter outerInstance, IndexOutput @delegate, FileEntry entry, bool isSeparate)
     : base()
 {
     this.OuterInstance = outerInstance;
     this.@delegate = @delegate;
     this.Entry = entry;
     entry.Offset = Offset = @delegate.FilePointer;
     this.IsSeparate = isSeparate;
 }
Esempio n. 20
0
 private void WriteEntryTable(ICollection<FileEntry> entries, IndexOutput entryOut)
 {
     CodecUtil.WriteHeader(entryOut, ENTRY_CODEC, VERSION_CURRENT);
     entryOut.WriteVInt(entries.Count);
     foreach (FileEntry fe in entries)
     {
         entryOut.WriteString(IndexFileNames.StripSegmentName(fe.File));
         entryOut.WriteLong(fe.Offset);
         entryOut.WriteLong(fe.Length);
     }
     CodecUtil.WriteFooter(entryOut);
 }
Esempio n. 21
0
        private void Write(Directory directory)
        {
            string segmentsFileName = NextSegmentFileName;

            // Always advance the generation on write:
            if (_generation == -1)
            {
                _generation = 1;
            }
            else
            {
                _generation++;
            }

            IndexOutput segnOutput = null;
            bool success = false;

            var upgradedSIFiles = new HashSet<string>();

            try
            {
                segnOutput = directory.CreateOutput(segmentsFileName, IOContext.DEFAULT);
                CodecUtil.WriteHeader(segnOutput, "segments", VERSION_48);
                segnOutput.WriteLong(Version);
                segnOutput.WriteInt(Counter); // write counter
                segnOutput.WriteInt(Size()); // write infos
                foreach (SegmentCommitInfo siPerCommit in segments)
                {
                    SegmentInfo si = siPerCommit.Info;
                    segnOutput.WriteString(si.Name);
                    segnOutput.WriteString(si.Codec.Name);
                    segnOutput.WriteLong(siPerCommit.DelGen);
                    int delCount = siPerCommit.DelCount;
                    if (delCount < 0 || delCount > si.DocCount)
                    {
                        throw new InvalidOperationException("cannot write segment: invalid docCount segment=" + si.Name + " docCount=" + si.DocCount + " delCount=" + delCount);
                    }
                    segnOutput.WriteInt(delCount);
                    segnOutput.WriteLong(siPerCommit.FieldInfosGen);
                    IDictionary<long, ISet<string>> genUpdatesFiles = siPerCommit.UpdatesFiles;
                    segnOutput.WriteInt(genUpdatesFiles.Count);
                    foreach (KeyValuePair<long, ISet<string>> e in genUpdatesFiles)
                    {
                        segnOutput.WriteLong(e.Key);
                        segnOutput.WriteStringSet(e.Value);
                    }
                    Debug.Assert(si.Dir == directory);

                    // If this segment is pre-4.x, perform a one-time
                    // "ugprade" to write the .si file for it:
                    string version = si.Version;
                    if (version == null || StringHelper.VersionComparator.Compare(version, "4.0") < 0)
                    {
                        if (!SegmentWasUpgraded(directory, si))
                        {
                            string markerFileName = IndexFileNames.SegmentFileName(si.Name, "upgraded", Lucene3xSegmentInfoFormat.UPGRADED_SI_EXTENSION);
                            si.AddFile(markerFileName);

                            string segmentFileName = Write3xInfo(directory, si, IOContext.DEFAULT);
                            upgradedSIFiles.Add(segmentFileName);
                            directory.Sync(/*Collections.singletonList(*/new[] { segmentFileName }/*)*/);

                            // Write separate marker file indicating upgrade
                            // is completed.  this way, if there is a JVM
                            // kill/crash, OS crash, power loss, etc. while
                            // writing the upgraded file, the marker file
                            // will be missing:
                            IndexOutput @out = directory.CreateOutput(markerFileName, IOContext.DEFAULT);
                            try
                            {
                                CodecUtil.WriteHeader(@out, SEGMENT_INFO_UPGRADE_CODEC, SEGMENT_INFO_UPGRADE_VERSION);
                            }
                            finally
                            {
                                @out.Dispose();
                            }
                            upgradedSIFiles.Add(markerFileName);
                            directory.Sync(/*Collections.SingletonList(*/new[] { markerFileName }/*)*/);
                        }
                    }
                }
                segnOutput.WriteStringStringMap(_userData);
                PendingSegnOutput = segnOutput;
                success = true;
            }
            finally
            {
                if (!success)
                {
                    // We hit an exception above; try to close the file
                    // but suppress any exception:
                    IOUtils.CloseWhileHandlingException(segnOutput);

                    foreach (string fileName in upgradedSIFiles)
                    {
                        try
                        {
                            directory.DeleteFile(fileName);
                        }
                        catch (Exception)
                        {
                            // Suppress so we keep throwing the original exception
                        }
                    }

                    try
                    {
                        // Try not to leave a truncated segments_N file in
                        // the index:
                        directory.DeleteFile(segmentsFileName);
                    }
                    catch (Exception)
                    {
                        // Suppress so we keep throwing the original exception
                    }
                }
            }
        }
        protected override void Dispose(bool disposing)
        {
            _fileMutex.WaitOne();
            try
            {
                // make sure it's all written out
                _indexOutput.Flush();

                long originalLength = _indexOutput.Length;
                _indexOutput.Dispose();

                var blobStream = new StreamInput(_cache.OpenInput(_name));
                
                try
                {
                    var elapsed = _cache.FileModified(_name);

                    // normalize RAMDirectory and FSDirectory times
                    if (elapsed > ticks1970)
                    {
                        elapsed -= ticks1970;
                    }

                    var cachedLastModifiedUTC = new DateTime(elapsed, DateTimeKind.Local).ToUniversalTime();

                    var wrapper = new ReadStreamWrapper(blobStream);
                    var data = new DataWithMetadata(wrapper, new Metadata
                    {
                        ContentLength = originalLength,
                        LastModified = cachedLastModifiedUTC
                    });

                    _saveTask(data).WaitAndWrap();

                    LeoTrace.WriteLine(string.Format("PUT {1} bytes to {0} in cloud", _name, blobStream.Length));
                }
                finally
                {
                    blobStream.Dispose();
                }

                // clean up
                _indexOutput = null;
                _cache = null;
                GC.SuppressFinalize(this);
            }
            finally
            {
                _fileMutex.ReleaseMutex();
            }
        }
Esempio n. 23
0
 internal CopyThread(IndexInput src, IndexOutput dst)
 {
     this.Src = src;
     this.Dst = dst;
 }
 /// <summary>
 /// Construct an empty output buffer. </summary>
 public MockIndexOutputWrapper(MockDirectoryWrapper dir, IndexOutput @delegate, string name)
 {
     this.Dir = dir;
     this.Name = name;
     this.@delegate = @delegate;
 }
 public VariableIntBlockIndexOutputAnonymousHelper(IndexOutput output, int baseBlockSize)
     : base(output, baseBlockSize)
 {
     this.baseBlockSize = baseBlockSize;
     this.buffer = new int[2 + 2 * baseBlockSize];
 }
Esempio n. 26
0
 /// <summary>
 /// Writes a codec footer, which records both a checksum
 /// algorithm ID and a checksum. this footer can
 /// be parsed and validated with
 /// <seealso cref="#checkFooter(ChecksumIndexInput) checkFooter()"/>.
 /// <p>
 /// CodecFooter --&gt; Magic,AlgorithmID,Checksum
 /// <ul>
 ///    <li>Magic --&gt; <seealso cref="DataOutput#writeInt Uint32"/>. this
 ///        identifies the start of the footer. It is always {@value #FOOTER_MAGIC}.
 ///    <li>AlgorithmID --&gt; <seealso cref="DataOutput#writeInt Uint32"/>. this
 ///        indicates the checksum algorithm used. Currently this is always 0,
 ///        for zlib-crc32.
 ///    <li>Checksum --&gt; <seealso cref="DataOutput#writeLong Uint32"/>. The
 ///        actual checksum value for all previous bytes in the stream, including
 ///        the bytes from Magic and AlgorithmID.
 /// </ul>
 /// </summary>
 /// <param name="out"> Output stream </param>
 /// <exception cref="IOException"> If there is an I/O error writing to the underlying medium. </exception>
 public static void WriteFooter(IndexOutput @out)
 {
     @out.WriteInt(FOOTER_MAGIC);
     @out.WriteInt(0);
     @out.WriteLong(@out.Checksum);
 }
		public ChecksumIndexOutput(IndexOutput main)
		{
			this.main = main;
			digest = new CRC32();
		}
Esempio n. 28
0
        public virtual void  TestDirectInstantiation()
        {
            System.IO.FileInfo path = new System.IO.FileInfo(SupportClass.AppSettings.Get("tempDir", System.IO.Path.GetTempPath()));

            int sz = 2;

            Directory[] dirs = new Directory[sz];

            dirs[0] = new SimpleFSDirectory(path, null);
            // dirs[1] = new NIOFSDirectory(path, null);
            System.Console.WriteLine("Skipping NIOFSDirectory() test under Lucene.Net");
            dirs[1] = new MMapDirectory(path, null);

            for (int i = 0; i < sz; i++)
            {
                Directory dir = dirs[i];
                dir.EnsureOpen();
                System.String fname       = "foo." + i;
                System.String lockname    = "foo" + i + ".lck";
                IndexOutput   out_Renamed = dir.CreateOutput(fname);
                out_Renamed.WriteByte((byte)i);
                out_Renamed.Close();

                for (int j = 0; j < sz; j++)
                {
                    Directory d2 = dirs[j];
                    d2.EnsureOpen();
                    Assert.IsTrue(d2.FileExists(fname));
                    Assert.AreEqual(1, d2.FileLength(fname));

                    // don't test read on MMapDirectory, since it can't really be
                    // closed and will cause a failure to delete the file.
                    if (d2 is MMapDirectory)
                    {
                        continue;
                    }

                    IndexInput input = d2.OpenInput(fname);
                    Assert.AreEqual((byte)i, input.ReadByte());
                    input.Close();
                }

                // delete with a different dir
                dirs[(i + 1) % sz].DeleteFile(fname);

                for (int j = 0; j < sz; j++)
                {
                    Directory d2 = dirs[j];
                    Assert.IsFalse(d2.FileExists(fname));
                }

                Lock lock_Renamed = dir.MakeLock(lockname);
                Assert.IsTrue(lock_Renamed.Obtain());

                for (int j = 0; j < sz; j++)
                {
                    Directory d2    = dirs[j];
                    Lock      lock2 = d2.MakeLock(lockname);
                    try
                    {
                        Assert.IsFalse(lock2.Obtain(1));
                    }
                    catch (LockObtainFailedException e)
                    {
                        // OK
                    }
                }

                lock_Renamed.Release();

                // now lock with different dir
                lock_Renamed = dirs[(i + 1) % sz].MakeLock(lockname);
                Assert.IsTrue(lock_Renamed.Obtain());
                lock_Renamed.Release();
            }

            for (int i = 0; i < sz; i++)
            {
                Directory dir = dirs[i];
                dir.EnsureOpen();
                dir.Close();
                Assert.IsFalse(dir.isOpen_ForNUnit);
            }
        }
Esempio n. 29
0
        public virtual void TestCopyBytesMem()
        {
            int num = AtLeast(10);

            for (int iter = 0; iter < num; iter++)
            {
                Directory dir = NewDirectory();
                if (VERBOSE)
                {
                    Console.WriteLine("TEST: iter=" + iter + " dir=" + dir);
                }

                // make random file
                IndexOutput @out     = dir.CreateOutput("test", NewIOContext(Random));
                var         bytes    = new byte[TestUtil.NextInt32(Random, 1, 77777)];
                int         size     = TestUtil.NextInt32(Random, 1, 1777777);
                int         upto     = 0;
                int         byteUpto = 0;
                while (upto < size)
                {
                    bytes[byteUpto++] = Value(upto);
                    upto++;
                    if (byteUpto == bytes.Length)
                    {
                        @out.WriteBytes(bytes, 0, bytes.Length);
                        byteUpto = 0;
                    }
                }

                @out.WriteBytes(bytes, 0, byteUpto);
                Assert.AreEqual(size, @out.GetFilePointer());
                @out.Dispose();
                Assert.AreEqual(size, dir.FileLength("test"));

                // copy from test -> test2
                IndexInput @in = dir.OpenInput("test", NewIOContext(Random));

                @out = dir.CreateOutput("test2", NewIOContext(Random));

                upto = 0;
                while (upto < size)
                {
                    if (Random.NextBoolean())
                    {
                        @out.WriteByte(@in.ReadByte());
                        upto++;
                    }
                    else
                    {
                        int chunk = Math.Min(TestUtil.NextInt32(Random, 1, bytes.Length), size - upto);
                        @out.CopyBytes(@in, chunk);
                        upto += chunk;
                    }
                }
                Assert.AreEqual(size, upto);
                @out.Dispose();
                @in.Dispose();

                // verify
                IndexInput in2 = dir.OpenInput("test2", NewIOContext(Random));
                upto = 0;
                while (upto < size)
                {
                    if (Random.NextBoolean())
                    {
                        var v = in2.ReadByte();
                        Assert.AreEqual(Value(upto), v);
                        upto++;
                    }
                    else
                    {
                        int limit = Math.Min(TestUtil.NextInt32(Random, 1, bytes.Length), size - upto);
                        in2.ReadBytes(bytes, 0, limit);
                        for (int byteIdx = 0; byteIdx < limit; byteIdx++)
                        {
                            Assert.AreEqual(Value(upto), bytes[byteIdx]);
                            upto++;
                        }
                    }
                }
                in2.Dispose();

                dir.DeleteFile("test");
                dir.DeleteFile("test2");

                dir.Dispose();
            }
        }
Esempio n. 30
0
        public override void Close()
        {
            _fileMutex.WaitOne();
            try
            {
                string fileName = _name;

                // make sure it's all written out
                _indexOutput.Flush();

                long originalLength = _indexOutput.Length();
                _indexOutput.Close();

                Stream blobStream;

                // optionally put a compressor around the blob stream
                if (_azureDirectory.ShouldCompressFile(_name))
                {
                    blobStream = CompressStream(fileName, originalLength);
                }
                else
                {
                    blobStream = new StreamInput(CacheDirectory.OpenInput(fileName));
                }

                try
                {
                    // push the blobStream up to the cloud
                    _blob.UploadFromStream(blobStream);

                    // set the metadata with the original index file properties
                    _blob.Metadata["CachedLength"] = originalLength.ToString();
                    _blob.Metadata["CachedLastModified"] = CacheDirectory.FileModified(fileName).ToString();
                    _blob.SetMetadata();

                    Debug.WriteLine(string.Format("PUT {1} bytes to {0} in cloud", _name, blobStream.Length));
                }
                finally
                {
                    blobStream.Dispose();
                }

            #if FULLDEBUG
                Debug.WriteLine(string.Format("CLOSED WRITESTREAM {0}", _name));
            #endif
                // clean up
                _indexOutput = null;
                _blobContainer = null;
                _blob = null;
                GC.SuppressFinalize(this);
            }
            finally
            {
                _fileMutex.ReleaseMutex();
            }
        }
        private int NumBufferedDocs; // docBase + numBufferedDocs == current doc ID

        /// <summary>
        /// Sole constructor. </summary>
        public CompressingStoredFieldsWriter(Directory directory, SegmentInfo si, string segmentSuffix, IOContext context, string formatName, CompressionMode compressionMode, int chunkSize)
        {
            Debug.Assert(directory != null);
            this.Directory = directory;
            this.Segment = si.Name;
            this.SegmentSuffix = segmentSuffix;
            this.CompressionMode = compressionMode;
            this.Compressor = compressionMode.NewCompressor();
            this.ChunkSize = chunkSize;
            this.DocBase = 0;
            this.BufferedDocs = new GrowableByteArrayDataOutput(chunkSize);
            this.NumStoredFields = new int[16];
            this.EndOffsets = new int[16];
            this.NumBufferedDocs = 0;

            bool success = false;
            IndexOutput indexStream = directory.CreateOutput(IndexFileNames.SegmentFileName(Segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_INDEX_EXTENSION), context);
            try
            {
                FieldsStream = directory.CreateOutput(IndexFileNames.SegmentFileName(Segment, segmentSuffix, Lucene40StoredFieldsWriter.FIELDS_EXTENSION), context);

                string codecNameIdx = formatName + CODEC_SFX_IDX;
                string codecNameDat = formatName + CODEC_SFX_DAT;
                CodecUtil.WriteHeader(indexStream, codecNameIdx, VERSION_CURRENT);
                CodecUtil.WriteHeader(FieldsStream, codecNameDat, VERSION_CURRENT);
                Debug.Assert(CodecUtil.HeaderLength(codecNameDat) == FieldsStream.FilePointer);
                Debug.Assert(CodecUtil.HeaderLength(codecNameIdx) == indexStream.FilePointer);

                IndexWriter = new CompressingStoredFieldsIndexWriter(indexStream);
                indexStream = null;

                FieldsStream.WriteVInt(chunkSize);
                FieldsStream.WriteVInt(PackedInts.VERSION_CURRENT);

                success = true;
            }
            finally
            {
                if (!success)
                {
                    IOUtils.CloseWhileHandlingException(indexStream);
                    Abort();
                }
            }
        }
Esempio n. 32
0
        internal void RollbackCommit(Directory dir)
        {
            if (PendingSegnOutput != null)
            {
                // Suppress so we keep throwing the original exception
                // in our caller
                IOUtils.CloseWhileHandlingException(PendingSegnOutput);
                PendingSegnOutput = null;

                // Must carefully compute fileName from "generation"
                // since lastGeneration isn't incremented:
                string segmentFileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", _generation);
                // Suppress so we keep throwing the original exception
                // in our caller
                IOUtils.DeleteFilesIgnoringExceptions(dir, segmentFileName);
            }
        }
Esempio n. 33
0
 public StreamOutput(IndexOutput output)
 {
     Output = output;
 }
Esempio n. 34
0
        internal void FinishCommit(Directory dir)
        {
            if (PendingSegnOutput == null)
            {
                throw new InvalidOperationException("prepareCommit was not called");
            }
            bool success = false;
            try
            {
                CodecUtil.WriteFooter(PendingSegnOutput);
                success = true;
            }
            finally
            {
                if (!success)
                {
                    // Closes pendingSegnOutput & deletes partial segments_N:
                    RollbackCommit(dir);
                }
                else
                {
                    success = false;
                    try
                    {
                        PendingSegnOutput.Dispose();
                        success = true;
                    }
                    finally
                    {
                        if (!success)
                        {
                            // Closes pendingSegnOutput & deletes partial segments_N:
                            RollbackCommit(dir);
                        }
                        else
                        {
                            PendingSegnOutput = null;
                        }
                    }
                }
            }

            // NOTE: if we crash here, we have left a segments_N
            // file in the directory in a possibly corrupt state (if
            // some bytes made it to stable storage and others
            // didn't).  But, the segments_N file includes checksum
            // at the end, which should catch this case.  So when a
            // reader tries to read it, it will throw a
            // CorruptIndexException, which should cause the retry
            // logic in SegmentInfos to kick in and load the last
            // good (previous) segments_N-1 file.

            var fileName = IndexFileNames.FileNameFromGeneration(IndexFileNames.SEGMENTS, "", _generation);
            success = false;
            try
            {
                dir.Sync(Collections.Singleton(fileName));
                success = true;
            }
            finally
            {
                if (!success)
                {
                    try
                    {
                        dir.DeleteFile(fileName);
                    }
                    catch (Exception)
                    {
                        // Suppress so we keep throwing the original exception
                    }
                }
            }

            _lastGeneration = _generation;
            WriteSegmentsGen(dir, _generation);
        }
Esempio n. 35
0
        public virtual void TestDirectInstantiation()
        {
            DirectoryInfo path = CreateTempDir("testDirectInstantiation");

            byte[] largeBuffer = new byte[Random.Next(256 * 1024)], largeReadBuffer = new byte[largeBuffer.Length];
            for (int i = 0; i < largeBuffer.Length; i++)
            {
                largeBuffer[i] = (byte)i; // automatically loops with modulo
            }

            var dirs = new FSDirectory[] { new SimpleFSDirectory(path, null), new NIOFSDirectory(path, null), new MMapDirectory(path, null) };

            for (int i = 0; i < dirs.Length; i++)
            {
                FSDirectory dir = dirs[i];
                dir.EnsureOpen();
                string      fname    = "foo." + i;
                string      lockname = "foo" + i + ".lck";
                IndexOutput @out     = dir.CreateOutput(fname, NewIOContext(Random));
                @out.WriteByte((byte)i);
                @out.WriteBytes(largeBuffer, largeBuffer.Length);
                @out.Dispose();

                for (int j = 0; j < dirs.Length; j++)
                {
                    FSDirectory d2 = dirs[j];
                    d2.EnsureOpen();
                    Assert.IsTrue(SlowFileExists(d2, fname));
                    Assert.AreEqual(1 + largeBuffer.Length, d2.FileLength(fname));

                    // LUCENENET specific - unmap hack not needed
                    //// don't do read tests if unmapping is not supported!
                    //if (d2 is MMapDirectory && !((MMapDirectory)d2).UseUnmap)
                    //{
                    //    continue;
                    //}

                    IndexInput input = d2.OpenInput(fname, NewIOContext(Random));
                    Assert.AreEqual((byte)i, input.ReadByte());
                    // read array with buffering enabled
                    Arrays.Fill(largeReadBuffer, (byte)0);
                    input.ReadBytes(largeReadBuffer, 0, largeReadBuffer.Length, true);
                    Assert.AreEqual(largeBuffer, largeReadBuffer);
                    // read again without using buffer
                    input.Seek(1L);
                    Arrays.Fill(largeReadBuffer, (byte)0);
                    input.ReadBytes(largeReadBuffer, 0, largeReadBuffer.Length, false);
                    Assert.AreEqual(largeBuffer, largeReadBuffer);
                    input.Dispose();
                }

                // delete with a different dir
                dirs[(i + 1) % dirs.Length].DeleteFile(fname);

                for (int j = 0; j < dirs.Length; j++)
                {
                    FSDirectory d2 = dirs[j];
                    Assert.IsFalse(SlowFileExists(d2, fname));
                }

                Lock @lock = dir.MakeLock(lockname);
                Assert.IsTrue(@lock.Obtain());

                for (int j = 0; j < dirs.Length; j++)
                {
                    FSDirectory d2    = dirs[j];
                    Lock        lock2 = d2.MakeLock(lockname);
                    try
                    {
                        Assert.IsFalse(lock2.Obtain(1));
                    }
#pragma warning disable 168
                    catch (LockObtainFailedException e)
#pragma warning restore 168
                    {
                        // OK
                    }
                }

                @lock.Dispose();

                // now lock with different dir
                @lock = dirs[(i + 1) % dirs.Length].MakeLock(lockname);
                Assert.IsTrue(@lock.Obtain());
                @lock.Dispose();
            }

            for (int i = 0; i < dirs.Length; i++)
            {
                FSDirectory dir = dirs[i];
                dir.EnsureOpen();
                dir.Dispose();
                Assert.IsFalse(dir.IsOpen);
            }
        }
Esempio n. 36
0
 /// <summary>
 /// Copy the contents of the file with specified extension into the provided
 /// output stream.
 /// </summary>
 private long CopyFileEntry(IndexOutput dataOut, FileEntry fileEntry)
 {
     IndexInput @is = fileEntry.Dir.OpenInput(fileEntry.File, IOContext.READONCE);
     bool success = false;
     try
     {
         long startPtr = dataOut.FilePointer;
         long length = fileEntry.Length;
         dataOut.CopyBytes(@is, length);
         // Verify that the output length diff is equal to original file
         long endPtr = dataOut.FilePointer;
         long diff = endPtr - startPtr;
         if (diff != length)
         {
             throw new System.IO.IOException("Difference in the output file offsets " + diff + " does not match the original file length " + length);
         }
         fileEntry.Offset = startPtr;
         success = true;
         return length;
     }
     finally
     {
         if (success)
         {
             IOUtils.Close(@is);
             // copy successful - delete file
             fileEntry.Dir.DeleteFile(fileEntry.File);
         }
         else
         {
             IOUtils.CloseWhileHandlingException(@is);
         }
     }
 }