private void SyncGeneration(IDatabase db) { var generationKey = CacheNamespace.GetGenerationKey(); var serverGenerationValue = db.StringGet(generationKey); var serverGeneration = Convert.ToInt64(serverGenerationValue); var currentGeneration = CacheNamespace.GetGeneration(); // Generation was cleared by someone else (shouldn't happen). if (serverGenerationValue.IsNullOrEmpty) { db.StringSetAsync( key: generationKey, value: currentGeneration, // Only set if someone else doesn't jump in and set it first. when: When.NotExists, flags: CommandFlags.FireAndForget ); log.InfoFormat("setting server generation ({0}) because it is empty", currentGeneration); } // Generation was lowered by someone else (shouldn't happen). else if (serverGeneration < CacheNamespace.GetGeneration()) { var transaction = db.CreateTransaction(); // Only set if someone else doesn't jump in and set it first. transaction.AddCondition(Condition.StringEqual(generationKey, serverGeneration)); transaction.StringSetAsync( key: generationKey, value: CacheNamespace.GetGeneration(), flags: CommandFlags.FireAndForget ); // We don't need to worry about the result because we will // already retry if we can't sync the generation. transaction.ExecuteAsync(CommandFlags.FireAndForget); log.InfoFormat("syncing server generation (server={0}, current={1})", serverGeneration, currentGeneration); } else { CacheNamespace.SetHigherGeneration(serverGeneration); log.InfoFormat("syncing server generation (server={0}, current={1})", serverGeneration, currentGeneration); } }
public void AddAssembly(string assemblyName) { log.InfoFormat("Searching for mapped documents in assembly: {0}", assemblyName); Assembly assembly; try { assembly = Assembly.Load(assemblyName); } catch (Exception e) { throw new ValidatorConfigurationException("Could not add assembly " + assemblyName, e); } AddAssembly(assembly); }
public RecordHierarchy GetRecordHierarchy(Entity entity) { _log.InfoFormat("Getting record hierarchy for entity record ({0}#{1})", entity.Name, entity.JoinedKeyWithValue); var index = 0; var hierarchy = GetEntityHierarchy(null, entity, ref index); var sql = GenerateHierarchySql(hierarchy); _log.DebugFormat("Sql hierarchy: \r\n {0}", sql); var model = new DynamicModel(Admin.ConnectionStringName); var records = model.Query(sql, entity.Key.Select(x => x.Value.Raw).ToArray()).ToList(); var recordHierarchy = GetHierarchyRecords(records, hierarchy); return(recordHierarchy); }
private static IEnumerable <System.Type> ClassesWithoutTopLevelSaveSupport(Configuration config) { foreach (PersistentClass classMapping in config.ClassMappings) { foreach (var property in classMapping.PropertyClosureIterator) { if (property.Value is OneToOne) { var mappedClass = classMapping.MappedClass; Log.InfoFormat("Type {0} does not support top-level saves.", mappedClass.Name); yield return(mappedClass); break; } } } }
public virtual void OnMerge(MergeEvent @event) { EventCache copyCache = new EventCache(); OnMerge(@event, copyCache); // transientCopyCache may contain parent and child entities in random order. // Child entities occurring ahead of their respective transient parents may fail // to get merged in one iteration. // Retries are necessary as more and more children may be able to merge on subsequent iterations. // Iteratively get transient entities and retry merge until one of the following conditions is true: // 1) transientCopyCache.size() == 0 // 2) transientCopyCache.size() is not decreasing // TODO: find out if retrying can add entities to copyCache (don't think it can...) // For now, just retry once; throw TransientObjectException if there are still any transient entities IDictionary transientCopyCache = this.GetTransientCopyCache(@event, copyCache); while (transientCopyCache.Count > 0) { var initialTransientCount = transientCopyCache.Count; RetryMergeTransientEntities(@event, transientCopyCache, copyCache); // find any entities that are still transient after retry transientCopyCache = this.GetTransientCopyCache(@event, copyCache); // if a retry did nothing, the remaining transient entities // cannot be merged due to references to other transient entities // that are not part of the merge if (transientCopyCache.Count == initialTransientCount) { ISet <string> transientEntityNames = new HashSet <string>(); foreach (object transientEntity in transientCopyCache.Keys) { string transientEntityName = @event.Session.GuessEntityName(transientEntity); transientEntityNames.Add(transientEntityName); log.InfoFormat( "transient instance could not be processed by merge: {0} [{1}]", transientEntityName, transientEntity.ToString()); } throw new TransientObjectException("one or more objects is an unsaved transient instance - save transient instance(s) before merging: " + String.Join(",", transientEntityNames.ToArray())); } } copyCache.Clear(); }
public RecordHierarchy GetRecordHierarchy( EntityRecord entityRecord, IList <PropertyDeleteOption> deleteOptions = null) { _log.InfoFormat( "Getting record hierarchy for entity record ({0}#{1})", entityRecord.Entity.Name, entityRecord.JoinedKeysWithNames); var hierarchy = GetEntityHierarchy(entityRecord.Entity, deleteOptions); var sql = GenerateHierarchySql(hierarchy); _log.Debug($"Sql hierarchy: \r\n {sql}"); var model = new DynamicModel(_admin.ConnectionStringName); var records = model.Query(sql, entityRecord.Keys.Select(x => x.Raw).ToArray()).ToList(); var recordHierarchy = GetHierarchyRecords(records, hierarchy); return(recordHierarchy); }
public virtual void OnMerge(MergeEvent @event) { EventCache copyCache = new EventCache(); OnMerge(@event, copyCache); // TODO: iteratively get transient entities and retry merge until one of the following conditions: // 1) transientCopyCache.size() == 0 // 2) transientCopyCache.size() is not decreasing and copyCache.size() is not increasing // TODO: find out if retrying can add entities to copyCache (don't think it can...) // For now, just retry once; throw TransientObjectException if there are still any transient entities IDictionary transientCopyCache = this.GetTransientCopyCache(@event, copyCache); if (transientCopyCache.Count > 0) { RetryMergeTransientEntities(@event, transientCopyCache, copyCache); // find any entities that are still transient after retry transientCopyCache = this.GetTransientCopyCache(@event, copyCache); if (transientCopyCache.Count > 0) { ISet <string> transientEntityNames = new HashedSet <string>(); foreach (object transientEntity in transientCopyCache.Keys) { string transientEntityName = @event.Session.GuessEntityName(transientEntity); transientEntityNames.Add(transientEntityName); log.InfoFormat( "transient instance could not be processed by merge: {0} [{1}]", transientEntityName, transientEntity.ToString()); } throw new TransientObjectException("one or more objects is an unsaved transient instance - save transient instance(s) before merging: " + transientEntityNames); } } copyCache.Clear(); copyCache = null; }
private static object[] postUpdateDbState(PostUpdateEvent evt) { var newDbState = (object[])evt.State.Clone(); var entityPersister = evt.Persister; var oldState = evt.OldState; if (oldState == null) { log.InfoFormat("Using current state when persisting detached {0}. This can result in incorrect audit data if non updatable property(ies) are used.", entityPersister.EntityName); return(newDbState); } for (var i = 0; i < entityPersister.PropertyNames.Length; i++) { if (!entityPersister.PropertyUpdateability[i]) { // Assuming that PostUpdateEvent#getOldState() returns database state of the record before modification. // Otherwise, we would have to execute SQL query to be sure of @Column(updatable = false) column value. // For now, we're in that case returning newdbstate above which (potentially) result in corrupt audit state for update=false properties newDbState[i] = oldState[i]; } } return(newDbState); }
public void Log(NHibernateLogLevel logLevel, NHibernateLogValues state, Exception exception) { if (!IsEnabled(logLevel)) { return; } switch (logLevel) { case NHibernateLogLevel.Debug: case NHibernateLogLevel.Trace: if (exception != null) { _internalLogger.Debug(state, exception); } else if (state.Args?.Length > 0) { _internalLogger.DebugFormat(state.Format, state.Args); } else { _internalLogger.Debug(state); } break; case NHibernateLogLevel.Info: if (exception != null) { _internalLogger.Info(state, exception); } else if (state.Args?.Length > 0) { _internalLogger.InfoFormat(state.Format, state.Args); } else { _internalLogger.Info(state); } break; case NHibernateLogLevel.Warn: if (exception != null) { _internalLogger.Warn(state, exception); } else if (state.Args?.Length > 0) { _internalLogger.WarnFormat(state.Format, state.Args); } else { _internalLogger.Warn(state); } break; case NHibernateLogLevel.Error: if (exception != null) { _internalLogger.Error(state, exception); } else if (state.Args?.Length > 0) { _internalLogger.ErrorFormat(state.Format, state.Args); } else { _internalLogger.Error(state); } break; case NHibernateLogLevel.Fatal: if (exception != null) { _internalLogger.Fatal(state, exception); } else { _internalLogger.Fatal(state); } break; case NHibernateLogLevel.None: break; default: throw new ArgumentOutOfRangeException(nameof(logLevel), logLevel, null); } }