public ICollectionReader ExecuteReader(string queryText, ICollection<IParameter> parameters) { if (string.IsNullOrEmpty(queryText)) throw new ArgumentNullException("queryText can not be null or empty string"); if (typeof(T) == typeof(IJSONDocument) || typeof(T) == typeof(JSONDocument)) { Type type; if (!JsonDocumentUtil.IsSupportedParameterType(parameters, out type)) { throw new ArgumentException(string.Format("Type {0} is not supported on Collection<JSONDocument>", type), "parameters"); } } Query query = new Query(); query.QueryText = queryText; query.Parameters = parameters.Cast<IParameter>().ToList(); ReadQueryOperation readQueryOperation = new ReadQueryOperation(); readQueryOperation.Database = _database.DatabaseName; readQueryOperation.Collection = _collectionName; readQueryOperation.Query = query; ReadQueryResponse readQueryResponse = (ReadQueryResponse)_database.ExecutionMapper.ExecuteReader(readQueryOperation); if (!readQueryResponse.IsSuccessfull) { if (readQueryResponse.ErrorParams != null && readQueryResponse.ErrorParams.Length > 0) throw new Exception(string.Format("Operation failed Error: " + Common.ErrorHandling.ErrorMessages.GetErrorMessage(readQueryResponse.ErrorCode, readQueryResponse.ErrorParams))); throw new Exception("Operation failed Error: " + Common.ErrorHandling.ErrorMessages.GetErrorMessage(readQueryResponse.ErrorCode)); } CollectionReader reader = new CollectionReader((DataChunk)readQueryResponse.DataChunk, _database.ExecutionMapper, _database.DatabaseName, _collectionName); return reader; }
public ICollectionReader ExecuteReader(string queryText, ICollection <IParameter> parameters) { if (string.IsNullOrEmpty(queryText)) { throw new ArgumentException("value can not be null or empty string.", "queryText"); } Query query = new Query(); query.QueryText = queryText; query.Parameters = (List <IParameter>)parameters; ReadQueryOperation readQueryOperation = new ReadQueryOperation(); readQueryOperation.Database = _databaseName; //Collection Name Cannot be null(Protobuf) readQueryOperation.Collection = ""; readQueryOperation.Query = query; ReadQueryResponse readQueryResponse = (ReadQueryResponse)this.ExecutionMapper.ExecuteReader(readQueryOperation); if (readQueryResponse.IsSuccessfull) { //TODO ReadQueryResponse must have Collection Name or server must share collection name becuse it is needed for // GetNextChunk and CloseDataChunk operations CollectionReader reader = new CollectionReader((DataChunk)readQueryResponse.DataChunk, this.ExecutionMapper, this.DatabaseName, readQueryOperation.Collection); return(reader); } else if (readQueryResponse.ErrorParams != null && readQueryResponse.ErrorParams.Length > 0) { throw new Exception(string.Format("Operation failed Error: " + Common.ErrorHandling.ErrorMessages.GetErrorMessage(readQueryResponse.ErrorCode, readQueryResponse.ErrorParams))); } throw new Exception("Operation failed Error: " + Common.ErrorHandling.ErrorMessages.GetErrorMessage(readQueryResponse.ErrorCode)); }
internal override void Run() { int collectionItterated = 1; LoggerManager.Instance.SetThreadContext(new LoggerContext() { ShardName = _context.LocalShardName != null ? _context.LocalShardName : "", DatabaseName = Database != null ? Database : "" }); try { IDatabaseStore dbInstance = _context.DatabasesManager.GetDatabase(Database); foreach (string _collection in Collections) { if (((Storage.DatabaseStore)dbInstance).Collections.ContainsKey(_collection)) { //Add custom object Query defaultQuery = new Query(); defaultQuery.QueryText = "Select * from "; //M_Note: precausion used incase user goes bonkers whilst naming his collections if (_collection.Contains("\"")) { defaultQuery.QueryText += "$" + _collection + "$"; } else { defaultQuery.QueryText += "\"" + _collection + "\""; } ReadQueryOperation readQueryOperation = new ReadQueryOperation(); readQueryOperation.Database = Database.ToString(); readQueryOperation.Collection = _collection; readQueryOperation.Query = defaultQuery; ArrayList docList = new ArrayList(); long currentSize = 0; DataSlice _activeSlice = PersistenceManager.ActiveContext.GetBackupFile(Database).CreateNewDataSlice(); _activeSlice.SliceHeader.Collection = _collection; _activeSlice.SliceHeader.Database = Database; _activeSlice.SliceHeader.Cluster = Cluster; _activeSlice.SliceHeader.ContentType = DataSliceType.Data; try { ReadQueryResponse readQueryResponse = (ReadQueryResponse)dbInstance.ExecuteReader(readQueryOperation); if (readQueryResponse.IsSuccessfull) { _dbReader = new CollectionReader((DataChunk)readQueryResponse.DataChunk, _context.TopologyImpl, Database, _collection); //create data slice to be written in the common queue while (_dbReader != null && _dbReader.ReadNext() && _dbReader.GetDocument() != null) { //get document and create chunk and add to shared storage IJSONDocument _doc = _dbReader.GetDocument(); // verify size if (currentSize + _doc.Size <= _activeSlice.Capcity) { docList.Add(_doc); currentSize += _doc.Size + 2;// Hack to accomodate the 2 bytes serialization is going add } else { DataSlice _nxtSlice = PersistenceManager.ActiveContext.GetBackupFile(Database).CreateNewDataSlice(); _nxtSlice.SliceHeader.Collection = _collection; _nxtSlice.SliceHeader.Database = Database; _nxtSlice.SliceHeader.Cluster = Cluster; _nxtSlice.SliceHeader.ContentType = DataSliceType.Data; _activeSlice.Data = CompactBinaryFormatter.ToByteBuffer(docList, string.Empty); _activeSlice.SliceHeader.DataCount = docList.Count; _activeSlice.SliceHeader.TotalSize = _activeSlice.Data.LongLength; // Add to shared queue PersistenceManager.SharedQueue.Add(_activeSlice); _activeSlice = _nxtSlice; docList.Clear(); docList.Add(_doc); currentSize = 0; currentSize += _doc.Size; } } _dbReader.Dispose(); // write final data set if (docList.Count > 0) { _activeSlice.Data = CompactBinaryFormatter.ToByteBuffer(docList, string.Empty); _activeSlice.SliceHeader.DataCount = docList.Count; _activeSlice.SliceHeader.TotalSize = _activeSlice.Data.LongLength; // Add to shared queue PersistenceManager.SharedQueue.Add(_activeSlice); docList.Clear(); } // submit status ExecutionStatus.Status = RecoveryStatus.Executing; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Completed Backup of " + Database + "_" + _collection + " : " + _collection; collectionItterated++; if (ProgressHandler != null) { ProgressHandler.SubmitRecoveryState(ExecutionStatus); } } else { throw new Exception("Operation failed Error code: " + readQueryResponse.ErrorCode); } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseBackupJob.Run()", Database + " : " + ex.ToString()); } collectionItterated--; } } } // Add command slice DataSlice finalSlice = new DataSlice(999999); finalSlice.SliceHeader.Collection = "Complete"; finalSlice.SliceHeader.Database = Database; finalSlice.SliceHeader.Cluster = Cluster; finalSlice.SliceHeader.ContentType = DataSliceType.Command; finalSlice.Data = CompactBinaryFormatter.ToByteBuffer("Data_Complete_Adding", string.Empty); PersistenceManager.SharedQueue.Add(finalSlice); while (!PersistenceManager.SharedQueue.Consumed) { // wait till all data has been consumed and written //M_TODO: // Add timeout interval for file writing, incase the data is not being consumed and timeout span has been reached, break the loop and DIE!!! } if (PersistenceManager.SharedQueue.Consumed) { // submit status ExecutionStatus.Status = RecoveryStatus.Completed; ExecutionStatus.PercentageExecution = 1;//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Completed Backup of " + Database; } else { // submit status ExecutionStatus.Status = RecoveryStatus.Failure; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Failed Backup of " + Database; } if (ProgressHandler != null) { System.Threading.Tasks.Task.Factory.StartNew(() => ProgressHandler.SubmitRecoveryState(ExecutionStatus)); } if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("DatabaseBackupJob.Run()", Database + "Completed"); } } catch (ThreadAbortException) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsDebugEnabled) { LoggerManager.Instance.RecoveryLogger.Debug("DatabaseBackupJob.Run()", "Thread stopped"); } Thread.ResetAbort(); } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseBackupJob.Run()", Database + " : " + exp.ToString()); } ExecutionStatus.Status = RecoveryStatus.Failure; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Failed Backup of " + Database; if (ProgressHandler != null) { System.Threading.Tasks.Task.Factory.StartNew(() => ProgressHandler.SubmitRecoveryState(ExecutionStatus)); } } }