public ExpirationManagerFacts()
        {
            _storage        = ConnectionUtils.CreateStorage();
            _queueProviders = _storage.QueueProviders;

            _token = new CancellationToken(true);
        }
        public void CountersAggregatorExecutesProperly()
        {
            var storage = ConnectionUtils.CreateStorage();

            using (var connection = (LiteDbConnection)storage.GetConnection())
            {
                // Arrange
                connection.Database.StateDataCounter.Insert(new Counter
                {
                    Key      = "key",
                    Value    = 1L,
                    ExpireAt = DateTime.Now.AddHours(1)
                });

                var aggregator = new CountersAggregator(storage, TimeSpan.Zero);
                var cts        = new CancellationTokenSource();
                cts.Cancel();

                // Act
                aggregator.Execute(cts.Token);

                // Assert
                Assert.Equal(1, connection.Database.StateDataAggregatedCounter.Count());
            }
        }
Ejemplo n.º 3
0
        public void GetMonitoringApi_ReturnsNonNullInstance()
        {
            LiteDbStorage  storage = ConnectionUtils.CreateStorage();
            IMonitoringApi api     = storage.GetMonitoringApi();

            Assert.NotNull(api);
        }
Ejemplo n.º 4
0
        public MongoRunFixture()
        {
            var databaseName = "Mongo-Hangfire-CamelCase";
            var context      = ConnectionUtils.CreateDbContext(databaseName);

            DbContext = context;
            // Make sure we start from scratch
            context.Database.Client.DropDatabase(databaseName);

            var storageOptions = new MongoStorageOptions
            {
                MigrationOptions = new MongoMigrationOptions
                {
                    MigrationStrategy = new DropMongoMigrationStrategy(),
                    BackupStrategy    = new NoneMongoBackupStrategy()
                }
            };

            JobStorage.Current = ConnectionUtils.CreateStorage(storageOptions, databaseName);

            var conventionPack = new ConventionPack {
                new CamelCaseElementNameConvention()
            };

            ConventionRegistry.Register("CamelCase", conventionPack, t => true);

            _server = new BackgroundJobServer(new BackgroundJobServerOptions
            {
                SchedulePollingInterval = TimeSpan.FromMilliseconds(100)
            });
        }
        public void CountersAggregatorExecutesProperly()
        {
            var storage = ConnectionUtils.CreateStorage();

            using (var connection = (HangfireSQLiteConnection)storage.GetConnection())
            {
                // Arrange
                connection.DbContext.Database.Insert(new Counter
                {
                    Id       = Guid.NewGuid().ToString(),
                    Key      = "key",
                    Value    = 1L,
                    ExpireAt = DateTime.UtcNow.AddHours(1)
                });

                var aggregator = new CountersAggregator(storage, TimeSpan.Zero);
                var cts        = new CancellationTokenSource();
                cts.Cancel();

                // Act
                aggregator.Execute(cts.Token);

                // Assert
                Assert.Equal(1, connection.DbContext.AggregatedCounterRepository.Count());
            }
        }
        public void CountersAggregatorExecutesProperly()
        {
            var storage = ConnectionUtils.CreateStorage();

            using (var connection = (MongoConnection)storage.GetConnection())
            {
                // Arrange
                connection.Database.StateData.InsertOne(new CounterDto
                {
                    Key      = "key",
                    Value    = 1L,
                    ExpireAt = DateTime.UtcNow.AddHours(1)
                });

                var aggregator = new CountersAggregator(storage, TimeSpan.Zero);

                // Act
                using (var cts = new CancellationTokenSource())
                {
                    cts.Cancel();
                    aggregator.Execute(cts.Token);
                }

                // Assert
                Assert.Equal(1, connection.Database.StateData.OfType <AggregatedCounterDto>().Count(new BsonDocument()));
            }
        }
        public void MultipleServerRunsRecurrentJobs()
        {
            // ARRANGE
            const int serverCount = 20;
            const int workerCount = 20;

            var options = new BackgroundJobServerOptions[serverCount];
            var storage = ConnectionUtils.CreateStorage(new MongoStorageOptions());
            var servers = new BackgroundJobServer[serverCount];

            var jobManagers = new RecurringJobManager[serverCount];

            for (int i = 0; i < serverCount; i++)
            {
                options[i] = new BackgroundJobServerOptions {
                    Queues = new[] { $"queue_options_{i}" }, WorkerCount = workerCount
                };

                servers[i]     = new BackgroundJobServer(options[i], storage);
                jobManagers[i] = new RecurringJobManager(storage);
            }

            try
            {
                // ACT
                for (int i = 0; i < serverCount; i++)
                {
                    var i1         = i;
                    var jobManager = jobManagers[i1];

                    for (int j = 0; j < workerCount; j++)
                    {
                        var j1         = j;
                        var queueIndex = j1 % options[i1].Queues.Length;
                        var queueName  = options[i1].Queues[queueIndex];
                        var job        = Job.FromExpression(() => Console.WriteLine("Setting signal for queue {0}",
                                                                                    queueName));
                        var jobId = $"job:[{i},{j}]";

                        jobManager.AddOrUpdate(jobId, job, Cron.Minutely(), new RecurringJobOptions
                        {
                            QueueName = queueName
                        });
                        jobManager.Trigger(jobId);
                    }
                }

                // let hangfire run for 1 sec
                Task.Delay(1000).Wait();
            }
            finally
            {
                for (int i = 0; i < serverCount; i++)
                {
                    servers[i].SendStop();
                    servers[i].Dispose();
                }
            }
        }
Ejemplo n.º 8
0
        public void GetConnection_ReturnsNonNullInstance()
        {
            LiteDbStorage storage = ConnectionUtils.CreateStorage();

            using (IStorageConnection connection = storage.GetConnection())
            {
                Assert.NotNull(connection);
            }
        }
Ejemplo n.º 9
0
        public void GetComponents_ReturnsAllNeededComponents()
        {
            MongoStorage storage = ConnectionUtils.CreateStorage();

            var components = storage.GetComponents();

            Type[] componentTypes = components.Select(x => x.GetType()).ToArray();
            Assert.Contains(typeof(ExpirationManager), componentTypes);
        }
        public void MultipleBackgroundJobServers_AddsRecurrentJobs()
        {
            // ARRANGE
            const int serverCount = 15;
            const int workerCount = 2;

            JobStorage.Current = ConnectionUtils.CreateStorage(new MongoStorageOptions());

            var options = Enumerable.Range(0, serverCount)
                          .Select((_, i) => new BackgroundJobServerOptions
            {
                Queues      = new[] { "default", $"queue_{i}" },
                WorkerCount = workerCount
            })
                          .ToList();

            var servers = options.Select(o => new BackgroundJobServer(o)).ToList();

            // let hangfire run for 1 sec
            Task.Delay(1000).Wait();

            // ACT
            foreach (var queue in options.SelectMany(o => o.Queues))
            {
                for (int i = 0; i < workerCount; i++)
                {
                    RecurringJob.AddOrUpdate($@"job_{queue}.{i}-a", () => Console.WriteLine($@"{queue}.{i}-a"), Cron.Minutely(), null, queue);
                    RecurringJob.AddOrUpdate($@"job_{queue}.{i}-b", () => Console.WriteLine($@"{queue}.{i}-b"), Cron.Minutely(), null, queue);
                }
            }

            // let hangfire run for 1 sec
            Task.Delay(1000).Wait();

            // ASSERT
            servers.ForEach(s =>
            {
                s.SendStop();
            });
            servers.ForEach(s =>
            {
                s.Dispose();
            });
        }
Ejemplo n.º 11
0
        //[Fact, Trait("Category", "DataGeneration")]
        public void Clean_Database_Filled()
        {
            var databaseName = "Mongo-Hangfire-Filled";
            var context      = ConnectionUtils.CreateDbContext(databaseName);

            // Make sure we start from scratch
            context.Database.Client.DropDatabase(databaseName);

            var storageOptions = new MongoStorageOptions
            {
                MigrationOptions = new MongoMigrationOptions
                {
                    MigrationStrategy = new DropMongoMigrationStrategy(),
                    BackupStrategy    = new NoneMongoBackupStrategy()
                },
                QueuePollInterval = TimeSpan.FromMilliseconds(500)
            };
            var serverOptions = new BackgroundJobServerOptions
            {
                ShutdownTimeout = TimeSpan.FromSeconds(15)
            };

            JobStorage.Current = ConnectionUtils.CreateStorage(databaseName);

            using (new BackgroundJobServer(serverOptions))
            {
                // Recurring Job
                RecurringJob.AddOrUpdate(() => HangfireTestJobs.ExecuteRecurringJob("Recurring job"), Cron.Minutely);

                // Scheduled job
                BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteScheduledJob("Scheduled job"), TimeSpan.FromSeconds(30));

                // Enqueued job
                BackgroundJob.Enqueue(() => HangfireTestJobs.ExecuteEnqueuedJob("Enqueued job"));

                // Continued job
                var parentId = BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteContinueWithJob("ContinueWith job", false), TimeSpan.FromSeconds(15));
                BackgroundJob.ContinueWith(parentId, () => HangfireTestJobs.ExecuteContinueWithJob("ContinueWith job continued", true));

                // Now the waiting game starts
                HangfireTestJobs.ScheduleEvent.WaitOne();
                BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteScheduledJob("Scheduled job (*)"), TimeSpan.FromMinutes(30));

                HangfireTestJobs.ContinueWithEvent.WaitOne();
                HangfireTestJobs.RecurringEvent.WaitOne();

                HangfireTestJobs.EnqueueEvent.WaitOne();
                BackgroundJob.Enqueue(() => HangfireTestJobs.ExecuteEnqueuedJob("Enqueued job (*)"));
            }


            // Some data are cleaned up when hangfire shuts down.
            // Grab a copy so we can write it back - needed for migration tests.
            var connection = JobStorage.Current.GetConnection();

            connection.AnnounceServer("test-server", new ServerContext
            {
                WorkerCount = serverOptions.WorkerCount,
                Queues      = serverOptions.Queues
            });

            connection.AcquireDistributedLock("test-lock", TimeSpan.FromSeconds(30));

            // Create database snapshot in zip file
            var schemaVersion = (int)MongoMigrationManager.RequiredSchemaVersion;

            using (var stream = new FileStream($@"Hangfire-Mongo-Schema-{schemaVersion:000}.zip", FileMode.Create))
            {
                var allowedEmptyCollections = new List <string>
                {
                    "hangfire.migrationLock"
                };

                if (MongoMigrationManager.RequiredSchemaVersion >= MongoSchema.Version09 &&
                    MongoMigrationManager.RequiredSchemaVersion <= MongoSchema.Version15)
                {
                    // Signal collection work was initiated in schema version 9,
                    // and still not put to use in schema version 15.
                    allowedEmptyCollections.Add($@"{storageOptions.Prefix}.signal");
                }
                BackupDatabaseToStream(databaseName, stream, allowedEmptyCollections.ToArray());
            }
        }