public void Ctor_SetsTheDefaultOptions() { MongoStorageOptions options = new MongoStorageOptions(); Assert.Equal("hangfire", options.Prefix); Assert.True(options.InvisibilityTimeout > TimeSpan.Zero); }
public MongoJobQueueProvider(MongoStorageOptions options) { if (options == null) throw new ArgumentNullException("options"); _options = options; }
public void Ctor_SetsTheDefaultOptions_ShouldGenerateUniqueClientId() { var options1 = new MongoStorageOptions(); var options2 = new MongoStorageOptions(); var options3 = new MongoStorageOptions(); IEnumerable<string> result = new[] { options1.ClientId, options2.ClientId, options3.ClientId }.Distinct(); Assert.Equal(3, result.Count()); }
/// <summary> /// Configure Hangfire to use MongoDB storage /// </summary> /// <param name="configuration">Configuration</param> /// <param name="connectionString">Connection string for Mongo database, for example 'mongodb://*****:*****@host:port'</param> /// <param name="databaseName">Name of database at Mongo server</param> /// <param name="options">Storage options</param> /// <returns></returns> public static MongoStorage UseMongoStorage(this IBootstrapperConfiguration configuration, string connectionString, string databaseName, MongoStorageOptions options) { MongoStorage storage = new MongoStorage(connectionString, databaseName, options); configuration.UseStorage(storage); return storage; }
public MongoJobQueue(HangfireDbContext connection, MongoStorageOptions options) { if (options == null) throw new ArgumentNullException("options"); if (connection == null) throw new ArgumentNullException("connection"); _options = options; _connection = connection; }
public bool Execute(IMongoDatabase database, MongoStorageOptions storageOptions, IMongoMigrationBag migrationBag) { database.DropCollection(storageOptions.Prefix + ".signal"); return(true); }
public void Set_QueuePollInterval_ShouldThrowAnException_WhenGivenIntervalIsNegative() { var options = new MongoStorageOptions(); Assert.Throws<ArgumentException>( () => options.QueuePollInterval = TimeSpan.FromSeconds(-1)); }
protected MongoMigrationStrategyBase(HangfireDbContext dbContext, MongoStorageOptions storageOptions, MongoMigrationRunner migrationRunner) { _dbContext = dbContext; _storageOptions = storageOptions; _migrationRunner = migrationRunner; }
public MongoMigrationStrategyMigrate(IMongoDatabase database, MongoStorageOptions storageOptions, MongoMigrationRunner migrationRunner) : base(database, storageOptions, migrationRunner) { }
/// <summary> /// Creates MongoDB distributed lock /// </summary> /// <param name="resource">Lock resource</param> /// <param name="timeout">Lock timeout</param> /// <param name="database">Lock database</param> /// <param name="options">Database options</param> /// <exception cref="DistributedLockTimeoutException">Thrown if lock is not acuired within the timeout</exception> /// <exception cref="MongoDistributedLockException">Thrown if other mongo specific issue prevented the lock to be acquired</exception> public MongoDistributedLock(string resource, TimeSpan timeout, HangfireDbContext database, MongoStorageOptions options) { if (string.IsNullOrEmpty(resource)) { throw new ArgumentNullException(nameof(resource)); } if (timeout.TotalSeconds > int.MaxValue) { throw new ArgumentException($"The timeout specified is too large. Please supply a timeout equal to or less than {int.MaxValue} seconds", nameof(timeout)); } if (database == null) { throw new ArgumentNullException(nameof(database)); } if (options == null) { throw new ArgumentNullException(nameof(options)); } _resource = resource; _database = database; _options = options; if (!AcquiredLocks.Value.ContainsKey(_resource) || AcquiredLocks.Value[_resource] == 0) { Cleanup(); Acquire(timeout); AcquiredLocks.Value[_resource] = 1; StartHeartBeat(); } else { AcquiredLocks.Value[_resource]++; } }
public static MongoStorage CreateStorage(MongoStorageOptions storageOptions) { return(new MongoStorage(GetConnectionString(), GetDatabaseName(), storageOptions)); }
public MongoMigrationStrategyDrop(HangfireDbContext dbContext, MongoStorageOptions storageOptions) : base(dbContext, storageOptions) { }
public bool Execute(IMongoDatabase database, MongoStorageOptions storageOptions, IMongoMigrationContext migrationContext) { var jobGraph = database.GetCollection <BsonDocument>(storageOptions.Prefix + ".jobGraph"); var counters = jobGraph.Find(new BsonDocument("_t", "CounterDto")).ToList(); var idsToRemove = new BsonArray(); foreach (var countersByKey in counters.GroupBy(c => c["Key"].AsString)) { var key = countersByKey.Key; var groupedCounters = countersByKey.ToList(); // if only one, nothing to do, continue... if (groupedCounters.Count == 1) { continue; } // if all have the same value take the newest var allSameValue = groupedCounters.Select(c => Convert.ToInt32(c["Value"])).Distinct().Count() == 1; if (allSameValue) { var newestObjectId = groupedCounters.Select(c => c["_id"].AsObjectId).Max(); idsToRemove.AddRange(groupedCounters.Where(c => c["_id"].AsObjectId != newestObjectId).Select(c => c["_id"])); continue; } // if more with different values delete all with value = '1' and sum the rest, most likely there have been // created a new counterDto, which will have been counted instead of the aggregated one. idsToRemove.AddRange(groupedCounters.Where(c => Convert.ToInt32(c["Value"]) == 1).Select(c => c["_id"])); // verify there is only one counter left. if more, sum the results and put in a new document, // delete the existing groupedCounters.RemoveAll(c => idsToRemove.Contains(c["_id"].AsObjectId)); if (groupedCounters.Count <= 1) { continue; } var sum = groupedCounters.Sum(c => { var value = c["Value"]; return(value.IsInt32 ? value.AsInt32 : value.AsInt64); }); var expireAt = groupedCounters.Any(c => c.Contains("ExpireAt") && c["ExpireAt"] != BsonNull.Value) ? (BsonValue)groupedCounters.Select(c => c["ExpireAt"].ToUniversalTime()).Max() : BsonNull.Value; var counterToInsert = new BsonDocument { ["Key"] = key, ["Value"] = sum, ["_id"] = ObjectId.GenerateNewId(), ["ExpireAt"] = expireAt, ["_t"] = new BsonArray(new[] { "BaseJobDto", "ExpiringJobDto", "KeyJobDto", "CounterDto" }) }; jobGraph.InsertOne(counterToInsert); idsToRemove.AddRange(groupedCounters.Select(c => c["_id"])); } if (!idsToRemove.Any()) { return(true); } jobGraph.DeleteMany(new BsonDocument("_id", new BsonDocument("$in", idsToRemove))); return(true); }
public MongoMigrationManager(MongoStorageOptions storageOptions) { _storageOptions = storageOptions; }
public MongoJobQueue(HangfireDbContext connection, MongoStorageOptions storageOptions) { _storageOptions = storageOptions ?? throw new ArgumentNullException(nameof(storageOptions)); _connection = connection ?? throw new ArgumentNullException(nameof(connection)); }
public MongoJobQueueProvider(MongoStorageOptions storageOptions) { _storageOptions = storageOptions ?? throw new ArgumentNullException(nameof(storageOptions)); }
/// <summary> /// Executes backup routine /// </summary> public virtual void Backup(MongoStorageOptions storageOptions, IMongoDatabase database, MongoSchema fromSchema, MongoSchema toSchema) { }
/// <summary> /// Configures Hangfire to use Tags. /// </summary> /// <param name="configuration">Global configuration</param> /// <param name="options">Options for tags</param> /// <param name="mongoOptions">Options for Mongo storage</param> /// <returns></returns> public static IGlobalConfiguration UseTagsWithMongo(this IGlobalConfiguration configuration, TagsOptions options = null, MongoStorageOptions mongoOptions = null) { options = options ?? new TagsOptions(); mongoOptions = mongoOptions ?? new MongoStorageOptions(); options.Storage = new MongoTagsServiceStorage(mongoOptions); var config = configuration.UseTags(options); JobsSidebarMenu.Items.RemoveAll(x => x.Method.Module.Assembly == typeof(TagsOptions).Assembly); JobsSidebarMenu.Items.Add(page => new MenuItem("Tags", page.Url.To("/tags/search")) { Active = page.RequestPath.StartsWith("/tags/search"), Metric = new DashboardMetric("tags:count", razorPage => { return(new Metric(((MongoTagsServiceStorage)options.Storage).GetTagsCount())); }) }); return(config); }
public bool Execute(IMongoDatabase database, MongoStorageOptions storageOptions, IMongoMigrationBag migrationBag) { var jobCollection = database.GetCollection <BsonDocument>($@"{storageOptions.Prefix}.job"); var jobParametersCollection = database.GetCollection <BsonDocument>($@"{storageOptions.Prefix}.jobParameter"); var stateCollection = database.GetCollection <BsonDocument>($@"{storageOptions.Prefix}.state"); var filter = Builders <BsonDocument> .Filter.Empty; var jobs = jobCollection.Find(filter).ToList(); var jobIdMapping = jobs .Where(j => j["_id"].IsInt32) .Select(j => j["_id"].AsInt32) .Distinct() .ToDictionary(jid => jid, jid => new BsonObjectId(ObjectId.GenerateNewId()).ToString()); migrationBag.SetItem("JobIdMapping", jobIdMapping); var migratedJobs = jobs.Select(job => { var id = job["_id"].AsInt32; var jobParameters = jobParametersCollection.Find(jp => jp["JobId"] == id) .ToList(); var jobStates = stateCollection.Find(s => s["JobId"] == id) .SortBy(s => s["CreatedAt"]) .ToList(); job["_id"] = jobIdMapping[id]; job["Parameters"] = new BsonDocument(jobParameters.ToDictionary(jp => jp["Name"].AsString, jp => jp["Value"].AsString)); job["StateHistory"] = new BsonArray(jobStates.Select(s => { s.Remove("_id"); s.Remove("JobId"); // We expect "Data" to be a string of raw JSON // - but it has been experienced that it wasn't if (s["Data"].IsString) { s["Data"] = new BsonDocument( JobHelper.FromJson <Dictionary <string, string> >(s["Data"].AsString)); } else { System.Diagnostics.Debug.WriteLine(s["Data"].BsonType); } if (!s["Data"].IsBsonDocument) { throw new MongoMigrationException(this, "Expected JobState field 'Data' to be BsonDocument"); } return(s); })); job.Remove("StateId"); return(job); }).ToList(); if (migratedJobs.Any()) { jobCollection.InsertMany(migratedJobs); } jobCollection.DeleteMany(filter); return(true); }
public bool Execute(IMongoDatabase database, MongoStorageOptions storageOptions, IMongoMigrationBag migrationBag) { var stateDataFindTask = database .GetCollection <BsonDocument>(storageOptions.Prefix + ".stateData") .Find(new BsonDocument()) .ToListAsync(); var jobFindTask = database .GetCollection <BsonDocument>(storageOptions.Prefix + ".job") .Find(new BsonDocument()) .ToListAsync(); var jobQueueFindTask = database .GetCollection <BsonDocument>(storageOptions.Prefix + ".jobQueue") .Find(new BsonDocument()) .ToListAsync(); // run in parallel, make sure we dont deadlock if we have a synchronization context Task.Run(() => Task.WhenAll(stateDataFindTask, jobFindTask, jobQueueFindTask)).GetAwaiter().GetResult(); var jobs = jobFindTask.Result; var stateData = stateDataFindTask.Result; var jobQueue = jobQueueFindTask.Result; foreach (var data in stateData) { var typeName = ""; if (data.TryGetValue("_t", out var typeValue)) { typeName = typeValue is BsonArray ? data["_t"].AsBsonArray.Last().AsString : data["_t"].AsString; } else { throw new InvalidOperationException($"Expected '_t' element in stateData entity, got: {data.ToJson()}"); } data["_t"] = new BsonArray(new [] { "BaseJobDto", "ExpiringJobDto", "KeyJobDto", typeName }); } foreach (var job in jobs) { job["_t"] = new BsonArray(new[] { "BaseJobDto", "ExpiringJobDto", "JobDto" }); } foreach (var jobQ in jobQueue) { jobQ["_t"] = new BsonArray { "BaseJobDto", "JobQueueDto" }; } var jobGraphEntities = jobs.Concat(stateData).Concat(jobQueue); if (jobGraphEntities.Any()) { database .GetCollection <BsonDocument>(storageOptions.Prefix + ".jobGraph") .InsertMany(jobGraphEntities); } return(true); }
public MongoTagsServiceStorage(MongoStorageOptions options) { _options = options; }
protected MongoMigrationStrategyBase(IMongoDatabase database, MongoStorageOptions storageOptions, MongoMigrationRunner migrationRunner) { _database = database; _storageOptions = storageOptions; _migrationRunner = migrationRunner; }
public void Ctor_SetsTheDefaultOptions_ShouldGenerateClientId() { var options = new MongoStorageOptions(); Assert.False(String.IsNullOrWhiteSpace(options.ClientId)); }
/// <summary> /// Find hangfire collection namespaces by reflecting over properties on database. /// </summary> public static IEnumerable <string> ExistingHangfireCollectionNames(IMongoDatabase database, MongoSchema schema, MongoStorageOptions storageOptions) { var existingCollectionNames = ExistingDatabaseCollectionNames(database).ToList(); return(schema.CollectionNames(storageOptions.Prefix).Where(c => existingCollectionNames.Contains(c))); }
public MongoMigrationStrategyBase(HangfireDbContext dbContext, MongoStorageOptions storageOptions) { _dbContext = dbContext; _storageOptions = storageOptions; }
/// <summary> /// Adds Osp. /// </summary> /// <param name="services">The services.</param> /// <returns></returns> private static IOspBuilder AddOsp(this IServiceCollection services) { JwtSecurityTokenHandler.DefaultInboundClaimTypeMap.Clear(); var builder = services.AddOspBuilder(); builder .AddRequiredPlatformServices(); services.AddMemoryCache(); services.AddDistributedPubSubCache(options => { var ospOptions = services.BuildServiceProvider().GetRequiredService <OspCoreServicesOptions>(); options.Host = ospOptions.RedisCacheHost; options.Password = ospOptions.RedisCachePassword; }); services.AddAuthentication(authenticationOptions => { authenticationOptions.DefaultScheme = CookieAuthenticationDefaults.AuthenticationScheme; authenticationOptions.DefaultChallengeScheme = BackendCommon.OidcAuthenticationScheme; }) .AddCookie(options => { options.ExpireTimeSpan = CoreServiceConstants.CookieExpireTimeSpan; options.Cookie.Name = CoreServiceConstants.CookieName; }) .AddOpenIdConnect(BackendCommon.OidcAuthenticationScheme, options => { var ospOptions = services.BuildServiceProvider().GetRequiredService <OspCoreServicesOptions>(); options.Authority = ospOptions.Authority; //options.RequireHttpsMetadata = false; options.ClientId = CommonConstants.CoreServicesClientId; options.Scope.Clear(); options.Scope.Add(CommonConstants.Scopes.OpenId); options.Scope.Add(CommonConstants.Scopes.Profile); options.Scope.Add(CommonConstants.Scopes.Email); options.Scope.Add(CommonConstants.Scopes.Role); options.SaveTokens = true; options.SignInScheme = CookieAuthenticationDefaults.AuthenticationScheme; options.TokenValidationParameters = new TokenValidationParameters { NameClaimType = JwtClaimTypes.Name, RoleClaimType = JwtClaimTypes.Role, }; }) .AddIdentityServerAuthentication(options => { var ospOptions = services.BuildServiceProvider().GetRequiredService <OspCoreServicesOptions>(); // base-address of your identity server options.Authority = ospOptions.Authority; // name of the API resource options.ApiName = CommonConstants.SystemApi; // Added, because otherwise exception is thrown when passing an invalid access token by a client. options.SupportedTokens = SupportedTokens.Jwt; }); services.AddAuthorization(options => { options.AddPolicy(CoreServiceConstants.AuthenticatedUserPolicy, policyBuilder => policyBuilder.RequireAuthenticatedUser()); options.AddPolicy(CoreServiceConstants.SystemApiReadOnlyPolicy, authorizationPolicyBuilder => { // require SystemApiFullAccess or SystemApiReadOnly authorizationPolicyBuilder.RequireScope(CommonConstants.SystemApiFullAccess, CommonConstants.SystemApiReadOnly); }); options.AddPolicy(CoreServiceConstants.SystemApiReadWritePolicy, authorizationPolicyBuilder => { // require SystemApiFullAccess authorizationPolicyBuilder.RequireScope(CommonConstants.SystemApiFullAccess); }); options.AddPolicy(CoreServiceConstants.TenantApiReadWritePolicy, authorizationPolicyBuilder => { authorizationPolicyBuilder.AuthenticationSchemes.Add(CookieAuthenticationDefaults.AuthenticationScheme); authorizationPolicyBuilder.AuthenticationSchemes.Add(IdentityServerAuthenticationDefaults.AuthenticationScheme); authorizationPolicyBuilder.RequireAuthenticatedUser(); }); }); services.AddMvcCore().AddAuthorization(); services.AddOspApiVersioningAndDocumentation(options => { options.AddXmlDocAssembly <Startup>(); options.AddXmlDocAssembly <ClientDto>(); options.Scopes = new Dictionary <string, string> { { CommonConstants.SystemApiFullAccess, Texts.Backend_CoreServices_Api_FullAccess }, { CommonConstants.SystemApiReadOnly, Texts.Backend_CoreServices_Api_ReadOnlyAccess } }; options.ApiTitle = "System Services API"; options.ApiDescription = "Object Service Platform (OSP) System Services."; options.ClientId = CommonConstants.CoreServicesSwaggerClientId; options.AppName = Texts.Backend_CoreServices_UserSchema_Swagger_DisplayName; }); // Hangfire is used to handle background jobs and scheduled jobs services.AddHangfire(config => { var ospOptions = services.BuildServiceProvider().GetRequiredService <IOptions <OspCoreServicesOptions> >(); var systemOptions = services.BuildServiceProvider() .GetRequiredService <IOptions <OspSystemConfiguration> >(); var storageOptions = new MongoStorageOptions { MigrationOptions = new MongoMigrationOptions { MigrationStrategy = new DropMongoMigrationStrategy(), BackupStrategy = new NoneMongoBackupStrategy() } }; MongoUrlBuilder mongoUrlBuilder = new MongoUrlBuilder { DatabaseName = ospOptions.Value.JobDatabaseName, Username = systemOptions.Value.AdminUser, Password = systemOptions.Value.AdminUserPassword, AuthenticationSource = systemOptions.Value.AuthenticationDatabaseName }; if (systemOptions.Value.DatabaseHost.Contains(",")) { mongoUrlBuilder.Servers = systemOptions.Value.DatabaseHost.Split(",").Select(x => new MongoServerAddress(x)); } else { mongoUrlBuilder.Server = new MongoServerAddress(systemOptions.Value.DatabaseHost); } config.UseMongoStorage(mongoUrlBuilder.ToString(), storageOptions); }); // GraphQL services.AddSingleton <IDataLoaderContextAccessor, DataLoaderContextAccessor>(); services.AddSingleton <DataLoaderDocumentListener>(); services.AddScoped <IUserContextBuilder, TenantUserContextBuilder>(); services.AddScoped <IGraphQLExecuter <OspSchema>, TenantGraphQlExecuter>(); // Add GraphQL services and configure options services.AddSingleton <OspSchema>(); services.AddSingleton <OspQuery>(); // Separate web socket factory to handle tenant specific queries correctly services.AddTransient <ITenantWebSocketConnectionFactory, TenantWebSocketFactory>(); services.TryAddSingleton <IDocumentExecuter, SubscriptionDocumentExecuter>(); services.AddGraphQL(options => { options.EnableMetrics = true; }) .AddErrorInfoProvider(opt => opt.ExposeExceptionStackTrace = true) .AddWebSockets() .AddSystemTextJson(); return(builder); }
public bool Execute(IMongoDatabase database, MongoStorageOptions storageOptions, IMongoMigrationContext migrationContext) { database.CreateCollection(storageOptions.Prefix + ".notifications"); return(true); }
internal MongoMigrationManager(MongoStorageOptions storageOptions, IMongoDatabase database) { _storageOptions = storageOptions; _database = database; }
public void Set_QueuePollInterval_ShouldThrowAnException_WhenGivenIntervalIsEqualToZero() { var options = new MongoStorageOptions(); Assert.Throws<ArgumentException>( () => options.QueuePollInterval = TimeSpan.Zero); }
public MongoMigrationStrategyMigrate(HangfireDbContext dbContext, MongoStorageOptions storageOptions, MongoMigrationRunner migrationRunner) : base(dbContext, storageOptions, migrationRunner) { }
public void Set_QueuePollInterval_SetsTheValue() { var options = new MongoStorageOptions(); options.QueuePollInterval = TimeSpan.FromSeconds(1); Assert.Equal(TimeSpan.FromSeconds(1), options.QueuePollInterval); }
/// <summary> /// Creates MongoDB distributed lock /// </summary> /// <param name="resource">Lock resource</param> /// <param name="timeout">Lock timeout</param> /// <param name="database">Lock database</param> /// <param name="options">Database options</param> public MongoDistributedLock(string resource, TimeSpan timeout, HangfireDbContext database, MongoStorageOptions options) { if (String.IsNullOrEmpty(resource) == true) throw new ArgumentNullException("resource"); if (database == null) throw new ArgumentNullException("database"); if (options == null) throw new ArgumentNullException("options"); _resource = resource; _database = database; _options = options; try { // Remove dead locks database.DistributedLock.DeleteManyAsync( Builders<DistributedLockDto>.Filter.Eq(_ => _.Resource, resource) & Builders<DistributedLockDto>.Filter.Lt(_ => _.Heartbeat, database.GetServerTimeUtc().Subtract(options.DistributedLockLifetime))); // Check lock DateTime lockTimeoutTime = DateTime.Now.Add(timeout); bool isLockedBySomeoneElse; bool isFirstAttempt = true; do { isLockedBySomeoneElse = AsyncHelper.RunSync(() => database.DistributedLock .Find(Builders<DistributedLockDto>.Filter.Eq(_ => _.Resource, resource) & Builders<DistributedLockDto>.Filter.Ne(_ => _.ClientId, _options.ClientId)) .FirstOrDefaultAsync()) != null; if (isFirstAttempt == true) isFirstAttempt = false; else Thread.Sleep((int)timeout.TotalMilliseconds / 10); } while ((isLockedBySomeoneElse == true) && (lockTimeoutTime >= DateTime.Now)); // Set lock if (isLockedBySomeoneElse == false) { AsyncHelper.RunSync(() => database.DistributedLock.FindOneAndUpdateAsync( Builders<DistributedLockDto>.Filter.Eq(_ => _.Resource, resource), Builders<DistributedLockDto>.Update.Combine( Builders<DistributedLockDto>.Update.Set(_ => _.ClientId, _options.ClientId), Builders<DistributedLockDto>.Update.Inc(_ => _.LockCount, 1), Builders<DistributedLockDto>.Update.Set(_ => _.Heartbeat, database.GetServerTimeUtc()) ), new FindOneAndUpdateOptions<DistributedLockDto> { IsUpsert = true })); StartHeartBeat(); } else { throw new MongoDistributedLockException(String.Format("Could not place a lock on the resource '{0}': {1}.", _resource, "The lock request timed out")); } } catch (Exception ex) { if (ex is MongoDistributedLockException) throw; else throw new MongoDistributedLockException(String.Format("Could not place a lock on the resource '{0}': {1}.", _resource, "Check inner exception for details"), ex); } }
public MongoDistributedLock(string resource, TimeSpan timeout, HangfireDbContext database, MongoStorageOptions options) { if (String.IsNullOrEmpty(resource) == true) throw new ArgumentNullException("resource"); if (database == null) throw new ArgumentNullException("database"); if (options == null) throw new ArgumentNullException("options"); _resource = resource; _database = database; _options = options; try { // Remove dead locks database.DistributedLock.Remove(Query.And(Query<DistributedLockDto>.EQ(_ => _.Resource, resource), Query<DistributedLockDto>.LT(_ => _.Heartbeat, database.GetServerTimeUtc().Subtract(options.DistributedLockLifetime)))); // Check lock DateTime lockTimeoutTime = DateTime.Now.Add(timeout); bool isLockedBySomeoneElse; bool isFirstAttempt = true; do { isLockedBySomeoneElse = database .DistributedLock .FindOne(Query.And(Query<DistributedLockDto>.EQ(_ => _.Resource, resource), Query<DistributedLockDto>.NE(_ => _.ClientId, _options.ClientId))) != null; if (isFirstAttempt == true) isFirstAttempt = false; else Thread.Sleep((int)timeout.TotalMilliseconds / 10); } while ((isLockedBySomeoneElse == true) && (lockTimeoutTime >= DateTime.Now)); // Set lock if (isLockedBySomeoneElse == false) { database.DistributedLock.FindAndModify(new FindAndModifyArgs { Query = Query<DistributedLockDto>.EQ(_ => _.Resource, resource), Update = Update.Combine( Update<DistributedLockDto>.Set(_ => _.ClientId, _options.ClientId), Update<DistributedLockDto>.Inc(_ => _.LockCount, 1), Update<DistributedLockDto>.Set(_ => _.Heartbeat, database.GetServerTimeUtc()) ), Upsert = true }); StartHeartBeat(); } else { throw new MongoDistributedLockException(String.Format("Could not place a lock on the resource '{0}': {1}.", _resource, "The lock request timed out")); } } catch (Exception ex) { if (ex is MongoDistributedLockException) throw; else throw new MongoDistributedLockException(String.Format("Could not place a lock on the resource '{0}': {1}.", _resource, "Check inner exception for details"), ex); } }
//[Fact, Trait("Category", "DataGeneration")] public void Clean_Database_Filled() { var connectionString = "mongodb://localhost"; var databaseName = "Mongo-Hangfire-Filled"; // Make sure we start from scratch using (HangfireDbContext context = new HangfireDbContext(connectionString, databaseName)) { context.Database.Client.DropDatabase(databaseName); } var storageOptions = new MongoStorageOptions { MigrationOptions = new MongoMigrationOptions { Strategy = MongoMigrationStrategy.None, BackupStrategy = MongoBackupStrategy.None }, QueuePollInterval = TimeSpan.FromMilliseconds(500) }; var serverOptions = new BackgroundJobServerOptions { ShutdownTimeout = TimeSpan.FromSeconds(15) }; JobStorage.Current = new MongoStorage(connectionString, databaseName, storageOptions); using (new BackgroundJobServer(serverOptions)) { // Recurring Job RecurringJob.AddOrUpdate(() => HangfireTestJobs.ExecuteRecurringJob("Recurring job"), Cron.Minutely); // Scheduled job BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteScheduledJob("Scheduled job"), TimeSpan.FromSeconds(30)); // Enqueued job BackgroundJob.Enqueue(() => HangfireTestJobs.ExecuteEnqueuedJob("Enqueued job")); // Continued job var parentId = BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteContinueWithJob("ContinueWith job", false), TimeSpan.FromSeconds(15)); BackgroundJob.ContinueWith(parentId, () => HangfireTestJobs.ExecuteContinueWithJob("ContinueWith job continued", true)); // Now the waiting game starts HangfireTestJobs.ScheduleEvent.WaitOne(); BackgroundJob.Schedule(() => HangfireTestJobs.ExecuteScheduledJob("Scheduled job (*)"), TimeSpan.FromMinutes(30)); HangfireTestJobs.ContinueWithEvent.WaitOne(); HangfireTestJobs.RecurringEvent.WaitOne(); HangfireTestJobs.EnqueueEvent.WaitOne(); BackgroundJob.Enqueue(() => HangfireTestJobs.ExecuteEnqueuedJob("Enqueued job (*)")); } // Some data are cleaned up when hangfire shuts down. // Grab a copy so we can write it back - needed for migration tests. var connection = JobStorage.Current.GetConnection(); connection.AnnounceServer("test-server", new ServerContext { WorkerCount = serverOptions.WorkerCount, Queues = serverOptions.Queues }); connection.AcquireDistributedLock("test-lock", TimeSpan.FromSeconds(30)); // Create database snapshot in zip file var schemaVersion = (int)MongoMigrationManager.RequiredSchemaVersion; using (var stream = new FileStream($@"Hangfire-Mongo-Schema-{schemaVersion:000}.zip", FileMode.Create)) { var allowedEmptyCollections = new List <string>(); if (MongoMigrationManager.RequiredSchemaVersion >= MongoSchema.Version09 && MongoMigrationManager.RequiredSchemaVersion <= MongoSchema.Version15) { // Signal collection work was initiated in schema version 9, // and still not put to use in schema version 15. allowedEmptyCollections.Add($@"{storageOptions.Prefix}.signal"); } BackupDatabaseToStream(connectionString, databaseName, stream, allowedEmptyCollections.ToArray()); } }