Exemple #1
0
        public DataSlice CreateNewDataSlice()
        {
            DataSlice _slice = new DataSlice(_sliceCount);

            _sliceCount++;
            return(_slice);
        }
Exemple #2
0
        // think of a better return value
        public DataSlice ReadDataSlice(DataSlice dataSlice)
        {
            try
            {
                Impersonation impersonation = null;
                if (_isSharedPath)
                {
                    impersonation = new Impersonation(_username, _password);
                }

                if (_fileStream == null)
                {
                    _fileStream = GetFile();
                }
                byte[] _data  = new byte[dataSlice.SliceHeader.TotalSize];
                int    offset = 0;

                foreach (long segID in dataSlice.SliceHeader.SegmentIds)
                {
                    Segment segment = _segmentManager.GetSegment(segID);

                    byte[] _splitData = segment.ReadDataSlice(_fileStream, dataSlice.SliceHeader.Id);
                    try
                    {
                        System.Buffer.BlockCopy(_splitData, 0, _data, offset, _splitData.Length);

                        offset += _splitData.Length;
                    }
                    catch (Exception exp)
                    {
                        if (LoggerManager.Instance.RecoveryLogger != null &&
                            LoggerManager.Instance.RecoveryLogger.IsErrorEnabled)
                        {
                            LoggerManager.Instance.RecoveryLogger.Error("BackupFile.ReadDataSlice()", exp.ToString());
                        }
                    }
                }
                dataSlice.Data = _data;

                if (_isSharedPath)
                {
                    impersonation.Dispose();
                }
            }
            catch (Exception ex)
            {
                if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled)
                {
                    LoggerManager.Instance.RecoveryLogger.Error("BackupFile.ReadDataSlice()", ex.ToString());
                }
            }

            return(dataSlice);
        }
Exemple #3
0
        internal List <DataSlice> RecreateSliceMetaInfo(Stream stream)
        {
            foreach (long key in SegmentHeader.SliceMap.Keys)
            {
                DataSlice _slice = new DataSlice(key);

                long _startingOffset = SegmentHeader.SliceMap[key];

                try
                {
                    stream.Seek(_startingOffset, SeekOrigin.Begin);

                    // read header length
                    byte[] len = new byte[4];
                    if (stream.Read(len, 0, 4) > 0)
                    {
                        int    length = BitConverter.ToInt32(len, 0);
                        byte[] data   = new byte[length];
                        stream.Read(data, 0, length);

                        _slice.PopulateHeader(CompactBinaryFormatter.FromByteBuffer(data, string.Empty) as DataSlice.Header);

                        _slice.SliceHeader.SegmentIds.Add(_segmentHeader.Id);

                        // internal map
                        _containingDataSlice.Add(_slice.SliceHeader.Id, _slice);
                    }
                }
                catch (Exception ex)
                {
                    if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled)
                    {
                        LoggerManager.Instance.RecoveryLogger.Error("Segment.RecreateSliceMeta()", ex.ToString());
                    }
                }
            }
            return(_containingDataSlice.Values.ToList());
        }
Exemple #4
0
        private void WriteData(DataSlice slice)
        {
            string fileName;

            if (slice.Data != null)
            {
                try
                {
                    if (slice.SliceHeader.ContentType == Common.Recovery.DataSliceType.Data)
                    {
                        fileName = slice.SliceHeader.Database;
                    }
                    else
                    {
                        fileName = RecoveryFolderStructure.CONFIG_SERVER;
                    }

                    BackupFile file = Context.GetBackupFile(fileName);
                    if (file != null)
                    {
                        file.SaveDataSlice(slice);
                    }
                    else
                    {
                        throw new NullReferenceException("File " + fileName + " cannot be null");
                    }
                }
                catch (Exception exp)
                {
                    if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled)
                    {
                        LoggerManager.Instance.RecoveryLogger.Error("RecoveryIOWriter.Write()", exp.ToString());
                    }
                }
            }
        }
Exemple #5
0
        public void WriteDataSlice(Stream stream, DataSlice dataSlice, long offset, long length = -1)
        {
            // segment starting offset i.e {[(16mb * segments #)+file_header_space] + (current offset+ reserved_space_for_header+1)}
            long _startingOffset = ((BackupFile.HeaderCapacity + ((_dataCapacity + _headerCapacity) * SegmentHeader.Id)) + (_headerCapacity + _currentSize + 1));

            //add slice id and starting offset of each slice
            if (!_segmentHeader.SliceMap.ContainsKey(dataSlice.SliceHeader.Id))
            {
                _segmentHeader.SliceMap.Add(dataSlice.SliceHeader.Id, _startingOffset);
            }

            if (length == -1)
            {
                dataSlice.SliceHeader.MappedData = dataSlice.Size;
            }
            else
            {
                dataSlice.SliceHeader.MappedData = length;
            }
            if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled)
            {
                LoggerManager.Instance.RecoveryLogger.Info("Segment.Write()", _segmentHeader.ToString() + " Writing to file " + dataSlice.SliceHeader.ToString());
            }

            try
            {
                //Calculate crc for data

                // seek to point
                stream.Seek(_startingOffset, SeekOrigin.Begin);

                //1. write slice header
                byte[] header = CompactBinaryFormatter.ToByteBuffer(dataSlice.SliceHeader, string.Empty);

                int _headerLength = header.Length;
                stream.Write(BitConverter.GetBytes(_headerLength), 0, 4);
                _currentSize += 4;

                stream.Write(header, 0, _headerLength);
                _currentSize += _headerLength;

                //2. write data as per offset, if length is 0 then write entire data

                byte[] data = dataSlice.Data;
                stream.Write(BitConverter.GetBytes(length == -1 ? dataSlice.Size : length), 0, 4);
                _currentSize += 4;

                // write complete data
                if (length == -1)
                {
                    int _dataLength = data.Length;

                    stream.Write(data, 0, _dataLength);
                    _currentSize += _dataLength;
                }
                else
                {
                    // write truncated data
                    byte[] truncated   = CopySlice(data, offset, length);
                    int    _dataLength = truncated.Length;

                    stream.Write(truncated, 0, _dataLength);
                    _currentSize += _dataLength;
                }

                //3. if segment is completely filled write segment header
                if (EmptySpace < 5)
                {
                    SaveHeader(stream);
                }
            }
            catch (Exception ex)
            {
                if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled)
                {
                    LoggerManager.Instance.RecoveryLogger.Error("Segment.WriteSlice()", ex.ToString());
                }
            }
        }
Exemple #6
0
        public void SaveDataSlice(DataSlice dataSlice)
        {
            try
            {
                Impersonation impersonation = null;
                if (_isSharedPath)
                {
                    impersonation = new Impersonation(_username, _password);
                }

                if (_fileStream == null)
                {
                    _fileStream = GetFile();
                }

                //1. get segment from segment manager, 8 is padded for length that is added
                SliceFacilitator[] _segmentList = _segmentManager.GetFacilitatingSegments(dataSlice.Size,
                                                                                          dataSlice.HeaderSize, 8);

                if (_segmentList.Length > 1)
                {
                    dataSlice.SliceHeader.OverFlow = true;
                    //  dataSlice.SliceHeader.SegmentIds = _segmentList.Select(x => x.Segment.SegmentHeader.Id).Cast<long>().ToArray();
                    int  i      = 0;
                    long offset = 0;
                    foreach (SliceFacilitator _fac in _segmentList)
                    {
                        //4. write slice to file
                        if (i + 1 < _segmentList.Length)
                        {
                            dataSlice.SliceHeader.NextSegmentId = _segmentList[i + 1].Segment.SegmentHeader.Id;
                        }
                        else
                        {
                            dataSlice.SliceHeader.NextSegmentId = -1;
                        }

                        _fac.Segment.SegmentHeader.NumberOfSlices++;
                        lock (_mutex)
                        {
                            _fac.Segment.WriteDataSlice(_fileStream, dataSlice, offset, _fac.Size);
                        }
                        offset += _fac.Size;
                        i++;
                    }
                }
                else
                {
                    dataSlice.SliceHeader.OverFlow      = false;
                    dataSlice.SliceHeader.NextSegmentId = -1;
                    _segmentList[0].Segment.SegmentHeader.NumberOfSlices++;
                    lock (_mutex)
                    {
                        _segmentList[0].Segment.WriteDataSlice(_fileStream, dataSlice, 0);
                    }
                }
                if (_isSharedPath)
                {
                    impersonation.Dispose();
                }
            }
            catch (Exception ex)
            {
                if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled)
                {
                    LoggerManager.Instance.RecoveryLogger.Error("BackupFile.SaveDataSlice()", ex.ToString());
                }
            }
        }
Exemple #7
0
 public void Add(DataSlice slice)
 {
     documentCollection.Add(slice);
 }
Exemple #8
0
 public bool TryTake(out DataSlice dataSlice, int timeout)
 {
     return(documentCollection.TryTake(out dataSlice, timeout));
 }
Exemple #9
0
        internal override void Run()
        {
            // 1. based on job type, select file name
            // read that file
            // and then read the other file and viola
            try
            {
                switch (Context.JobType)
                {
                case RecoveryJobType.ConfigRestore:

                    #region config restore
                    foreach (BackupFile file in Context.FileList)
                    {
                        if (file.Name.Contains(RecoveryFolderStructure.CONFIG_SERVER))
                        {
                            IDictionary <long, DataSlice> sliceMap = file.RecreateMetaInfo();
                            foreach (DataSlice sliceID in sliceMap.Values)
                            {
                                // Add data to shared queue
                                Context.SharedQueue.Add(file.ReadDataSlice(sliceID));
                            }

                            file.Close();
                            if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled)
                            {
                                LoggerManager.Instance.RecoveryLogger.Info("RecoveryIOReader.Run()", "Config file reading complete");
                            }
                        }
                    }
                    #endregion
                    break;

                case RecoveryJobType.DataRestore:
                    #region DataRestore
                    //1. Select data files
                    List <BackupFile> dataFile = Context.FileList.Where(x => !x.Name.Contains(RecoveryFolderStructure.OPLOG)).ToList <BackupFile>();
                    //2. iterate and produce them completely
                    foreach (BackupFile file in dataFile)
                    {
                        IDictionary <long, DataSlice> sliceMap = file.RecreateMetaInfo();
                        Dictionary <string, string[]> inner    = Context.PersistenceConfiguration.DbCollectionMap.Where(x => x.Key.Equals(Context.ActiveDB)).Select(x => x.Value).First();
                        string[] collectionList = inner.First().Value;
                        //2. based on config select slices to read
                        foreach (string collection in collectionList)
                        {
                            // get all slices holding data of collection to be restored
                            List <DataSlice> _slicesToRestore = sliceMap.Where(x => x.Value.SliceHeader.Collection.Equals(collection)).Select(x => x.Value).ToList <DataSlice>();

                            foreach (DataSlice sliceID in _slicesToRestore)
                            {
                                // Add data to shared queue
                                Context.SharedQueue.Add(file.ReadDataSlice(sliceID));
                            }
                        }
                        file.Close();
                    }
                    //3. inform consuming job of complete iteration
                    DataSlice commandSlice = new DataSlice(999998);
                    commandSlice.SliceHeader.Collection  = "Complete";
                    commandSlice.SliceHeader.Database    = "";
                    commandSlice.SliceHeader.Cluster     = "";
                    commandSlice.SliceHeader.ContentType = DataSliceType.Command;
                    commandSlice.Data = CompactBinaryFormatter.ToByteBuffer("Complete_Adding_Data", string.Empty);
                    Context.SharedQueue.Add(commandSlice);
                    if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled)
                    {
                        LoggerManager.Instance.RecoveryLogger.Info("RecoveryIOReader.Run()", "Db file reading complete");
                    }
                    //4. wait for message to produce next
                    while (!Context.SharedQueue.PauseProducing)
                    {
                        // wait with time out
                    }

                    // check if producing allowed
                    if (!Context.SharedQueue.PauseProducing)
                    {
                    }

                    if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled)
                    {
                        LoggerManager.Instance.RecoveryLogger.Info("RecoveryIOReader.Run()", "oplog file reading complete");
                    }
                    #endregion
                    break;
                }

                // Add command slice
                DataSlice finalSlice = new DataSlice(999999);
                finalSlice.SliceHeader.Collection  = "Complete";
                finalSlice.SliceHeader.Database    = "";
                finalSlice.SliceHeader.Cluster     = "";
                finalSlice.SliceHeader.ContentType = DataSliceType.Command;
                finalSlice.Data = CompactBinaryFormatter.ToByteBuffer("Complete_Adding", string.Empty);
                Context.SharedQueue.Add(finalSlice);
                if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled)
                {
                    LoggerManager.Instance.RecoveryLogger.Info("RecoveryIOReader.Run()", "Complete status set");
                }
            }
            catch (ThreadAbortException)
            {
                Thread.ResetAbort();
            }
            catch (Exception exp)
            {
                if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled)
                {
                    LoggerManager.Instance.RecoveryLogger.Error("RecoveryIOReader.Run()", exp.ToString());
                }
            }
        }