Example #1
0
 /// <summary>
 /// Creates MongoConnection instance
 /// </summary>
 /// <param name="dbContext"></param>
 /// <param name="storageOptions"></param>
 /// <returns></returns>
 public virtual MongoConnection CreateMongoConnection(HangfireDbContext dbContext, MongoStorageOptions storageOptions)
 {
     return(new MongoConnection(dbContext, storageOptions));
 }
Example #2
0
 private static dynamic GetTestHash(HangfireDbContext database, string key)
 {
     return(database.StateData.OfType <HashDto>().Find(Builders <HashDto> .Filter.Eq(_ => _.Key, key)).FirstOrDefault());
 }
Example #3
0
 public MongoMigrationStrategyBase(HangfireDbContext dbContext, MongoStorageOptions storageOptions)
 {
     _dbContext      = dbContext;
     _storageOptions = storageOptions;
 }
 private static dynamic GetTestJob(HangfireDbContext database, int jobId)
 {
     return(database.Job.FindOneById(jobId));
 }
Example #5
0
 private static JobDto GetTestJob(HangfireDbContext database, string jobId)
 {
     return(database.Job.Find(Builders <JobDto> .Filter.Eq(_ => _.Id, jobId)).FirstOrDefault());
 }
 private MongoJobFetcher CreateJobQueue(HangfireDbContext connection)
 {
     return(new MongoJobFetcher(connection, new MongoStorageOptions(), _jobQueueSemaphoreMock.Object));
 }
 public Version16MigrationStepFacts()
 {
     _dbContext             = ConnectionUtils.CreateDbContext();
     _database              = _dbContext.Database;
     _mongoMigrationBagMock = new Mock <IMongoMigrationContext>(MockBehavior.Strict);
 }
Example #8
0
 private static dynamic GetTestHash(HangfireDbContext database, string key)
 {
     return(database.StateDataHash.Find(_ => _.Key == key).FirstOrDefault());
 }
Example #9
0
        private void UseConnection(Action <HangfireDbContext> action)
        {
            HangfireDbContext connection = ConnectionUtils.CreateConnection();

            action(connection);
        }
Example #10
0
 private static LiteJob GetTestJob(HangfireDbContext database, int jobId)
 {
     return(database.Job.FindById(jobId));
 }
Example #11
0
 private static IList <LiteSet> GetTestSet(HangfireDbContext database, string key)
 {
     return(database.StateDataSet.Find(_ => _.Key == key).ToList());
 }
Example #12
0
 /// <summary>
 /// Create MongoFetchedJob instance
 /// </summary>
 /// <param name="dbContext"></param>
 /// <param name="id"></param>
 /// <param name="jobId"></param>
 /// <param name="queue"></param>
 /// <returns></returns>
 public virtual MongoFetchedJob CreateFetchedJob(HangfireDbContext dbContext, ObjectId id, ObjectId jobId,
                                                 string queue)
 {
     return(new MongoFetchedJob(dbContext, id, jobId, queue));
 }
Example #13
0
 /// <summary>
 /// Creates new MongoDistributedLock
 /// </summary>
 /// <param name="resource"></param>
 /// <param name="timeout"></param>
 /// <param name="dbContext"></param>
 /// <param name="storageOptions"></param>
 /// <returns></returns>
 public virtual MongoDistributedLock CreateMongoDistributedLock(string resource, TimeSpan timeout, HangfireDbContext dbContext, MongoStorageOptions storageOptions)
 {
     return(new MongoDistributedLock($"Hangfire:{resource}", timeout, dbContext, storageOptions, DistributedLockMutex));
 }
Example #14
0
 /// <summary>
 /// Creates new MongoWriteOnlyTransaction instance
 /// </summary>
 /// <param name="dbContext"></param>
 /// <param name="storageOptions"></param>
 /// <returns></returns>
 public virtual MongoWriteOnlyTransaction CreateMongoWriteOnlyTransaction(HangfireDbContext dbContext, MongoStorageOptions storageOptions)
 {
     return(new MongoWriteOnlyTransaction(dbContext, storageOptions));
 }
        /// <summary>
        /// Creates MongoDB distributed lock
        /// </summary>
        /// <param name="resource">Lock resource</param>
        /// <param name="timeout">Lock timeout</param>
        /// <param name="database">Lock database</param>
        /// <param name="options">Database options</param>
        public MongoDistributedLock(string resource, TimeSpan timeout, HangfireDbContext database, MongoStorageOptions options)
        {
            if (String.IsNullOrEmpty(resource) == true)
            {
                throw new ArgumentNullException("resource");
            }

            if (database == null)
            {
                throw new ArgumentNullException("database");
            }

            if (options == null)
            {
                throw new ArgumentNullException("options");
            }

            _resource = resource;
            _database = database;
            _options  = options;

            try
            {
                // Remove dead locks
                database.DistributedLock.DeleteManyAsync(
                    Builders <DistributedLockDto> .Filter.Eq(_ => _.Resource, resource) &
                    Builders <DistributedLockDto> .Filter.Lt(_ => _.Heartbeat, database.GetServerTimeUtc().Subtract(options.DistributedLockLifetime)));

                // Check lock
                DateTime lockTimeoutTime = DateTime.Now.Add(timeout);
                bool     isLockedBySomeoneElse;
                bool     isFirstAttempt = true;
                do
                {
                    isLockedBySomeoneElse = AsyncHelper.RunSync(() =>
                                                                database.DistributedLock
                                                                .Find(Builders <DistributedLockDto> .Filter.Eq(_ => _.Resource, resource) &
                                                                      Builders <DistributedLockDto> .Filter.Ne(_ => _.ClientId, _options.ClientId))
                                                                .FirstOrDefaultAsync()) != null;

                    if (isFirstAttempt == true)
                    {
                        isFirstAttempt = false;
                    }
                    else
                    {
                        Thread.Sleep((int)timeout.TotalMilliseconds / 10);
                    }
                }while ((isLockedBySomeoneElse == true) && (lockTimeoutTime >= DateTime.Now));

                // Set lock
                if (isLockedBySomeoneElse == false)
                {
                    AsyncHelper.RunSync(() => database.DistributedLock.FindOneAndUpdateAsync(
                                            Builders <DistributedLockDto> .Filter.Eq(_ => _.Resource, resource),
                                            Builders <DistributedLockDto> .Update.Combine(
                                                Builders <DistributedLockDto> .Update.Set(_ => _.ClientId, _options.ClientId),
                                                Builders <DistributedLockDto> .Update.Inc(_ => _.LockCount, 1),
                                                Builders <DistributedLockDto> .Update.Set(_ => _.Heartbeat, database.GetServerTimeUtc())
                                                ),
                                            new FindOneAndUpdateOptions <DistributedLockDto> {
                        IsUpsert = true
                    }));

                    StartHeartBeat();
                }
                else
                {
                    throw new MongoDistributedLockException(String.Format("Could not place a lock on the resource '{0}': {1}.", _resource, "The lock request timed out"));
                }
            }
            catch (Exception ex)
            {
                if (ex is MongoDistributedLockException)
                {
                    throw;
                }
                else
                {
                    throw new MongoDistributedLockException(String.Format("Could not place a lock on the resource '{0}': {1}.", _resource, "Check inner exception for details"), ex);
                }
            }
        }
Example #16
0
        private LiteJob CreateJobInState(HangfireDbContext database, int jobId, string stateName, Func <LiteJob, LiteJob> visitor = null)
        {
            var job = Job.FromExpression(() => SampleMethod("wrong"));

            Dictionary <string, string> stateData;

            if (stateName == EnqueuedState.StateName)
            {
                stateData = new Dictionary <string, string> {
                    ["EnqueuedAt"] = $"{DateTime.Now:o}"
                };
            }
            else if (stateName == ProcessingState.StateName)
            {
                stateData = new Dictionary <string, string>
                {
                    ["ServerId"]  = Guid.NewGuid().ToString(),
                    ["StartedAt"] = JobHelper.SerializeDateTime(DateTime.Now.Subtract(TimeSpan.FromMilliseconds(500)))
                };
            }
            else
            {
                stateData = new Dictionary <string, string>();
            }

            var jobState = new LiteState()
            {
                JobId     = jobId,
                Name      = stateName,
                Reason    = null,
                CreatedAt = DateTime.Now,
                Data      = stateData
            };

            var liteJob = new LiteJob
            {
                Id             = jobId,
                InvocationData = JobHelper.ToJson(InvocationData.Serialize(job)),
                Arguments      = "[\"\\\"Arguments\\\"\"]",
                StateName      = stateName,
                CreatedAt      = DateTime.Now,
                StateHistory   = new List <LiteState> {
                    jobState
                }
            };

            if (visitor != null)
            {
                liteJob = visitor(liteJob);
            }
            database.Job.Insert(liteJob);

            var jobQueueDto = new JobQueue
            {
                FetchedAt = null,
                JobId     = jobId,
                Queue     = DefaultQueue
            };

            if (stateName == FetchedStateName)
            {
                jobQueueDto.FetchedAt = DateTime.Now;
            }

            database.JobQueue.Insert(jobQueueDto);

            return(liteJob);
        }
        private long GetNumberOfJobsByStateName(HangfireDbContext connection, string stateName)
        {
            var count = connection.Job.Count(Builders <JobDto> .Filter.Eq(_ => _.StateName, stateName));

            return(count);
        }
 /// <summary>
 /// Retreives server time in UTC zone
 /// </summary>
 /// <param name="dbContext">Hangfire database context</param>
 /// <returns>Server time</returns>
 public static DateTime GetServerTimeUtc(this HangfireDbContext dbContext)
 {
     return(GetServerTimeUtc(dbContext.Database));
 }
 public MongoMigrationStrategyMigrate(HangfireDbContext dbContext, MongoStorageOptions storageOptions)
     : base(dbContext, storageOptions)
 {
 }
 public LogManager(HangfireDbContext hangfireDbContext)
 {
     _hangfireDbContext = hangfireDbContext;
 }
        //[Fact, Trait("Category", "DataGeneration")]
        public void Clean_Database_Filled()
        {
            var connectionString = "mongodb://localhost";
            var databaseName     = "Mongo-Hangfire-Filled";
            var context          = new HangfireDbContext(connectionString, databaseName);

            // Make sure we start from scratch
            context.Database.Client.DropDatabase(databaseName);

            var storageOptions = new MongoStorageOptions
            {
                MigrationOptions = new MongoMigrationOptions
                {
                    Strategy       = MongoMigrationStrategy.Drop,
                    BackupStrategy = MongoBackupStrategy.None
                },
                QueuePollInterval = TimeSpan.FromMilliseconds(500)
            };
            var serverOptions = new BackgroundJobServerOptions
            {
                ShutdownTimeout = TimeSpan.FromSeconds(15)
            };
            var mongoClientSettings = MongoClientSettings.FromConnectionString(connectionString);

            JobStorage.Current = new MongoStorage(mongoClientSettings, databaseName, storageOptions);

            using (new BackgroundJobServer(serverOptions))
            {
                // Recurring Job
                RecurringJob.AddOrUpdate(() => HangfireTestJobs.ExecuteRecurringJob("Recurring job"), Cron.Minutely);

                // Scheduled job
                BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteScheduledJob("Scheduled job"), TimeSpan.FromSeconds(30));

                // Enqueued job
                BackgroundJob.Enqueue(() => HangfireTestJobs.ExecuteEnqueuedJob("Enqueued job"));

                // Continued job
                var parentId = BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteContinueWithJob("ContinueWith job", false), TimeSpan.FromSeconds(15));
                BackgroundJob.ContinueWith(parentId, () => HangfireTestJobs.ExecuteContinueWithJob("ContinueWith job continued", true));

                // Now the waiting game starts
                HangfireTestJobs.ScheduleEvent.WaitOne();
                BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteScheduledJob("Scheduled job (*)"), TimeSpan.FromMinutes(30));

                HangfireTestJobs.ContinueWithEvent.WaitOne();
                HangfireTestJobs.RecurringEvent.WaitOne();

                HangfireTestJobs.EnqueueEvent.WaitOne();
                BackgroundJob.Enqueue(() => HangfireTestJobs.ExecuteEnqueuedJob("Enqueued job (*)"));
            }


            // Some data are cleaned up when hangfire shuts down.
            // Grab a copy so we can write it back - needed for migration tests.
            var connection = JobStorage.Current.GetConnection();

            connection.AnnounceServer("test-server", new ServerContext
            {
                WorkerCount = serverOptions.WorkerCount,
                Queues      = serverOptions.Queues
            });

            connection.AcquireDistributedLock("test-lock", TimeSpan.FromSeconds(30));

            // Create database snapshot in zip file
            var schemaVersion = (int)MongoMigrationManager.RequiredSchemaVersion;

            using (var stream = new FileStream($@"Hangfire-Mongo-Schema-{schemaVersion:000}.zip", FileMode.Create))
            {
                var allowedEmptyCollections = new List <string>
                {
                    "hangfire.migrationLock"
                };

                if (MongoMigrationManager.RequiredSchemaVersion >= MongoSchema.Version09 &&
                    MongoMigrationManager.RequiredSchemaVersion <= MongoSchema.Version15)
                {
                    // Signal collection work was initiated in schema version 9,
                    // and still not put to use in schema version 15.
                    allowedEmptyCollections.Add($@"{storageOptions.Prefix}.signal");
                }
                BackupDatabaseToStream(connectionString, databaseName, stream, allowedEmptyCollections.ToArray());
            }
        }
        private static bool IsEntryExpired(HangfireDbContext connection, ObjectId entryId)
        {
            var count = AsyncHelper.RunSync(() => connection.AggregatedCounter.Find(Builders <AggregatedCounterDto> .Filter.Eq(_ => _.Id, entryId)).CountAsync());

            return(count == 0);
        }
Example #23
0
 private MongoJobQueue CreateJobQueue(HangfireDbContext connection)
 {
     return(new MongoJobQueue(connection, new MongoStorageOptions()));
 }
 private static MongoJobQueueMonitoringApi CreateMongoJobQueueMonitoringApi(HangfireDbContext connection)
 {
     return(new MongoJobQueueMonitoringApi(connection));
 }
Example #25
0
 private static IList <SetDto> GetTestSet(HangfireDbContext database, string key)
 {
     return(database.StateData.OfType <SetDto>().Find(Builders <SetDto> .Filter.Eq(_ => _.Key, key)).ToList());
 }
Example #26
0
        /// <summary>
        /// Runs aggregator
        /// </summary>
        /// <param name="cancellationToken">Cancellation token</param>
        public void Execute(CancellationToken cancellationToken)
        {
            Logger.DebugFormat("Aggregating records in 'Counter' table...");

            long removedCount;

            do
            {
                using (var storageConnection = (MongoConnection)_storage.GetConnection())
                {
                    HangfireDbContext database = storageConnection.Database;

                    List <CounterDto> recordsToAggregate = database
                                                           .Counter.Find(new BsonDocument())
                                                           .Limit(NumberOfRecordsInSinglePass)
                                                           .ToList();

                    var recordsToMerge = recordsToAggregate
                                         .GroupBy(_ => _.Key).Select(_ => new
                    {
                        Key      = _.Key,
                        Value    = _.Sum(x => x.Value),
                        ExpireAt = _.Max(x => x.ExpireAt)
                    });

                    foreach (var item in recordsToMerge)
                    {
                        AggregatedCounterDto aggregatedItem = database
                                                              .AggregatedCounter
                                                              .Find(Builders <AggregatedCounterDto> .Filter.Eq(_ => _.Key, item.Key))
                                                              .FirstOrDefault();
                        if (aggregatedItem != null)
                        {
                            database.AggregatedCounter.UpdateOne(Builders <AggregatedCounterDto> .Filter.Eq(_ => _.Key, item.Key),
                                                                 Builders <AggregatedCounterDto> .Update.Combine(
                                                                     Builders <AggregatedCounterDto> .Update.Inc(_ => _.Value, item.Value),
                                                                     Builders <AggregatedCounterDto> .Update.Set(_ => _.ExpireAt, item.ExpireAt > aggregatedItem.ExpireAt ? item.ExpireAt : aggregatedItem.ExpireAt)));
                        }
                        else
                        {
                            database.AggregatedCounter.InsertOne(new AggregatedCounterDto
                            {
                                Id       = ObjectId.GenerateNewId(),
                                Key      = item.Key,
                                Value    = item.Value,
                                ExpireAt = item.ExpireAt
                            });
                        }
                    }

                    removedCount = database
                                   .Counter
                                   .DeleteMany(Builders <CounterDto> .Filter.In(_ => _.Id, recordsToAggregate.Select(_ => _.Id)))
                                   .DeletedCount;
                }

                if (removedCount >= NumberOfRecordsInSinglePass)
                {
                    cancellationToken.WaitHandle.WaitOne(DelayBetweenPasses);
                    cancellationToken.ThrowIfCancellationRequested();
                }
            } while (removedCount >= NumberOfRecordsInSinglePass);

            cancellationToken.WaitHandle.WaitOne(_interval);
        }
Example #27
0
        /// <summary>
        /// Ctor using default storage options
        /// </summary>
        public MongoConnection(HangfireDbContext database, PersistentJobQueueProviderCollection queueProviders)

            : this(database, new MongoStorageOptions(), queueProviders)
        {
        }
Example #28
0
        public ExpirationManagerFacts()
        {
            _dbContext = ConnectionUtils.CreateDbContext();

            _token = new CancellationToken(true);
        }
 public MongoMonitoringApi(HangfireDbContext database, PersistentJobQueueProviderCollection queueProviders)
 {
     _database       = database;
     _queueProviders = queueProviders;
 }
Example #30
0
 /// <summary>
 /// Creates MongoJobFetcher instance
 /// </summary>
 /// <param name="dbContext"></param>
 /// <param name="storageOptions"></param>
 /// <returns></returns>
 public virtual MongoJobFetcher CreateMongoJobFetcher(HangfireDbContext dbContext, MongoStorageOptions storageOptions)
 {
     return(new MongoJobFetcher(dbContext, storageOptions, JobQueueSemaphore));
 }