public EuroStatTableController( ILogger <EuroStatTableController> logger, IDatabaseStore databaseStore ) { _logger = logger; _databaseStore = databaseStore; }
public NaceRegionDataController( ILogger <NaceRegionDataController> logger, IDatabaseStore databaseStore ) { _logger = logger; this.databaseStore = databaseStore; }
public BaseDataService(ILogger <BaseDataService> _logger, IDatabaseStore ds, BaseData baseData) { this._logger = _logger; this.ds = ds; this.baseData = baseData; }
public ApplicationInitialiser(DatabaseStore databaseStoreInterceptore, ILog log) { if (databaseStoreInterceptore == null) { throw new ArgumentNullException("databaseStoreInterceptore"); } if (log == null) { throw new ArgumentNullException("log"); } this.databaseStoreInterceptore = databaseStoreInterceptore; this.log = log; }
public void Repository_ItemNotExistInCache_StoreValueGetsCalled() { ICacheStore cacheStore = Mock.Of <ICacheStore>(); IDatabaseStore databaseStore = Mock.Of <IDatabaseStore>(); Repository repository = new Repository(cacheStore, databaseStore, GetLog()); string key = "key_1"; repository.GetValue(key); Mock.Get(cacheStore).Verify(x => x.StoreValue(key, It.IsAny <string>())); }
public DatabaseStoreInterceptore(IDatabaseStore databaseStore, ILog log) { if (databaseStore == null) { throw new ArgumentNullException("databaseStore"); } if (log == null) { throw new ArgumentNullException("log"); } this.databaseStore = databaseStore; this.log = log; //by default is closed waitHandle = new ManualResetEvent(false); }
public Repository(ICacheStore cacheStore, IDatabaseStore databaseStore, ILog log) { if (cacheStore == null) { throw new ArgumentNullException("cacheStore"); } if (databaseStore == null) { throw new ArgumentNullException("databaseStore"); } if (log == null) { throw new ArgumentNullException("log"); } this.cacheStore = cacheStore; this.databaseStore = databaseStore; this.log = log; }
public void Dispose(bool destroy) { IDatabaseStore sysdbStore = null; foreach (KeyValuePair <String, IDatabaseStore> pair in _databases) { if (pair.Value is SystemDatabaseStore) { sysdbStore = pair.Value; // dispose it at the end } else if (pair.Value != null) { pair.Value.Dispose(destroy); } } if (sysdbStore != null) { sysdbStore.Dispose(destroy); } }
internal override void Run() { int collectionItterated = 1; LoggerManager.Instance.SetThreadContext(new LoggerContext() { ShardName = _context.LocalShardName != null ? _context.LocalShardName : "", DatabaseName = Database != null ? Database : "" }); try { IDatabaseStore dbInstance = _context.DatabasesManager.GetDatabase(Database); foreach (string _collection in Collections) { if (((Storage.DatabaseStore)dbInstance).Collections.ContainsKey(_collection)) { //Add custom object Query defaultQuery = new Query(); defaultQuery.QueryText = "Select * from "; //M_Note: precausion used incase user goes bonkers whilst naming his collections if (_collection.Contains("\"")) { defaultQuery.QueryText += "$" + _collection + "$"; } else { defaultQuery.QueryText += "\"" + _collection + "\""; } ReadQueryOperation readQueryOperation = new ReadQueryOperation(); readQueryOperation.Database = Database.ToString(); readQueryOperation.Collection = _collection; readQueryOperation.Query = defaultQuery; ArrayList docList = new ArrayList(); long currentSize = 0; DataSlice _activeSlice = PersistenceManager.ActiveContext.GetBackupFile(Database).CreateNewDataSlice(); _activeSlice.SliceHeader.Collection = _collection; _activeSlice.SliceHeader.Database = Database; _activeSlice.SliceHeader.Cluster = Cluster; _activeSlice.SliceHeader.ContentType = DataSliceType.Data; try { ReadQueryResponse readQueryResponse = (ReadQueryResponse)dbInstance.ExecuteReader(readQueryOperation); if (readQueryResponse.IsSuccessfull) { _dbReader = new CollectionReader((DataChunk)readQueryResponse.DataChunk, _context.TopologyImpl, Database, _collection); //create data slice to be written in the common queue while (_dbReader != null && _dbReader.ReadNext() && _dbReader.GetDocument() != null) { //get document and create chunk and add to shared storage IJSONDocument _doc = _dbReader.GetDocument(); // verify size if (currentSize + _doc.Size <= _activeSlice.Capcity) { docList.Add(_doc); currentSize += _doc.Size + 2;// Hack to accomodate the 2 bytes serialization is going add } else { DataSlice _nxtSlice = PersistenceManager.ActiveContext.GetBackupFile(Database).CreateNewDataSlice(); _nxtSlice.SliceHeader.Collection = _collection; _nxtSlice.SliceHeader.Database = Database; _nxtSlice.SliceHeader.Cluster = Cluster; _nxtSlice.SliceHeader.ContentType = DataSliceType.Data; _activeSlice.Data = CompactBinaryFormatter.ToByteBuffer(docList, string.Empty); _activeSlice.SliceHeader.DataCount = docList.Count; _activeSlice.SliceHeader.TotalSize = _activeSlice.Data.LongLength; // Add to shared queue PersistenceManager.SharedQueue.Add(_activeSlice); _activeSlice = _nxtSlice; docList.Clear(); docList.Add(_doc); currentSize = 0; currentSize += _doc.Size; } } _dbReader.Dispose(); // write final data set if (docList.Count > 0) { _activeSlice.Data = CompactBinaryFormatter.ToByteBuffer(docList, string.Empty); _activeSlice.SliceHeader.DataCount = docList.Count; _activeSlice.SliceHeader.TotalSize = _activeSlice.Data.LongLength; // Add to shared queue PersistenceManager.SharedQueue.Add(_activeSlice); docList.Clear(); } // submit status ExecutionStatus.Status = RecoveryStatus.Executing; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Completed Backup of " + Database + "_" + _collection + " : " + _collection; collectionItterated++; if (ProgressHandler != null) { ProgressHandler.SubmitRecoveryState(ExecutionStatus); } } else { throw new Exception("Operation failed Error code: " + readQueryResponse.ErrorCode); } } catch (Exception ex) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseBackupJob.Run()", Database + " : " + ex.ToString()); } collectionItterated--; } } } // Add command slice DataSlice finalSlice = new DataSlice(999999); finalSlice.SliceHeader.Collection = "Complete"; finalSlice.SliceHeader.Database = Database; finalSlice.SliceHeader.Cluster = Cluster; finalSlice.SliceHeader.ContentType = DataSliceType.Command; finalSlice.Data = CompactBinaryFormatter.ToByteBuffer("Data_Complete_Adding", string.Empty); PersistenceManager.SharedQueue.Add(finalSlice); while (!PersistenceManager.SharedQueue.Consumed) { // wait till all data has been consumed and written //M_TODO: // Add timeout interval for file writing, incase the data is not being consumed and timeout span has been reached, break the loop and DIE!!! } if (PersistenceManager.SharedQueue.Consumed) { // submit status ExecutionStatus.Status = RecoveryStatus.Completed; ExecutionStatus.PercentageExecution = 1;//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Completed Backup of " + Database; } else { // submit status ExecutionStatus.Status = RecoveryStatus.Failure; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Failed Backup of " + Database; } if (ProgressHandler != null) { System.Threading.Tasks.Task.Factory.StartNew(() => ProgressHandler.SubmitRecoveryState(ExecutionStatus)); } if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsInfoEnabled) { LoggerManager.Instance.RecoveryLogger.Info("DatabaseBackupJob.Run()", Database + "Completed"); } } catch (ThreadAbortException) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsDebugEnabled) { LoggerManager.Instance.RecoveryLogger.Debug("DatabaseBackupJob.Run()", "Thread stopped"); } Thread.ResetAbort(); } catch (Exception exp) { if (LoggerManager.Instance.RecoveryLogger != null && LoggerManager.Instance.RecoveryLogger.IsErrorEnabled) { LoggerManager.Instance.RecoveryLogger.Error("DatabaseBackupJob.Run()", Database + " : " + exp.ToString()); } ExecutionStatus.Status = RecoveryStatus.Failure; ExecutionStatus.PercentageExecution = (collectionItterated / Collections.Count);//[M_NOTE] rudementary logic, change this ExecutionStatus.MessageTime = DateTime.Now; ExecutionStatus.Message = "Failed Backup of " + Database; if (ProgressHandler != null) { System.Threading.Tasks.Task.Factory.StartNew(() => ProgressHandler.SubmitRecoveryState(ExecutionStatus)); } } }
public DatabasePipeline(IDatabaseStore store) { _store = store; }
internal void Initialize(IDatabasesManager store) { _store = store.GetDatabase(MiscUtil.SYSTEM_DATABASE); _isInitialized = true; }
public RegionController(ILogger <RegionController> logger, IDatabaseStore databaseStore) { _logger = logger; this.databaseStore = databaseStore; }
public EuroStatFetchService(ILogger <EuroStatFetchService> _logger, IHttpClientFactory clientFactory, IDatabaseStore databaseStore, EuroStatJSONToObjectsConverterService JSONConverter) { this._logger = _logger; this.databaseStore = databaseStore; this.JSONConverter = JSONConverter; }
public static Spider AddDataBasePipeline(this Spider spider, IDatabaseStore databaseStore) { return(spider.AddPipeline(new DatabasePipeline(databaseStore))); }
public EuroStatJSONToObjectsConverterService(ILogger <JsonConverter> _logger, IDatabaseStore databaseStore) { this._logger = _logger; this.databaseStore = databaseStore; }