/// <summary> Try to initialize a collection from the cache</summary> private async Task <bool> InitializeCollectionFromCacheAsync( object collectionKey, ICollectionPersister persister, IPersistentCollection collection, ISessionImplementor source, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); if (!(source.EnabledFilters.Count == 0) && persister.IsAffectedByEnabledFilters(source)) { log.Debug("disregarding cached version (if any) of collection due to enabled filters "); return(false); } bool useCache = persister.HasCache && source.CacheMode.HasFlag(CacheMode.Get); if (!useCache) { return(false); } var batchSize = persister.GetBatchSize(); if (batchSize > 1 && persister.Cache.PreferMultipleGet()) { var collectionEntries = new CollectionEntry[batchSize]; // The first item in the array is the item that we want to load var collectionBatch = await(source.PersistenceContext.BatchFetchQueue .GetCollectionBatchAsync(persister, collectionKey, batchSize, false, collectionEntries, cancellationToken)).ConfigureAwait(false); // Ignore null values as the retrieved batch may contains them when there are not enough // uninitialized collection in the queue var keys = new List <CacheKey>(batchSize); for (var i = 0; i < collectionBatch.Length; i++) { var key = collectionBatch[i]; if (key == null) { break; } keys.Add(source.GenerateCacheKey(key, persister.KeyType, persister.Role)); } var cachedObjects = await(persister.Cache.GetManyAsync(keys.ToArray(), source.Timestamp, cancellationToken)).ConfigureAwait(false); for (var i = 1; i < cachedObjects.Length; i++) { var coll = source.PersistenceContext.BatchFetchQueue.GetBatchLoadableCollection(persister, collectionEntries[i]); await(AssembleAsync(keys[i], cachedObjects[i], persister, source, coll, collectionBatch[i], false, cancellationToken)).ConfigureAwait(false); } return(await(AssembleAsync(keys[0], cachedObjects[0], persister, source, collection, collectionKey, true, cancellationToken)).ConfigureAwait(false)); } var cacheKey = source.GenerateCacheKey(collectionKey, persister.KeyType, persister.Role); var cachedObject = await(persister.Cache.GetAsync(cacheKey, source.Timestamp, cancellationToken)).ConfigureAwait(false); return(await(AssembleAsync(cacheKey, cachedObject, persister, source, collection, collectionKey, true, cancellationToken)).ConfigureAwait(false)); }
/// <summary> Called before executing any actions</summary> public virtual void BeforeExecutions() { // we need to obtain the lock before any actions are // executed, since this may be an inverse="true" // bidirectional association and it is one of the // earlier entity actions which actually updates // the database (this action is responsible for // second-level cache invalidation only) if (persister.HasCache) { CacheKey ck = session.GenerateCacheKey(key, persister.KeyType, persister.Role); softLock = persister.Cache.Lock(ck, null); } }
/// <summary> Try to initialize a collection from the cache</summary> private bool InitializeCollectionFromCache(object id, ICollectionPersister persister, IPersistentCollection collection, ISessionImplementor source) { if (!(source.EnabledFilters.Count == 0) && persister.IsAffectedByEnabledFilters(source)) { log.Debug("disregarding cached version (if any) of collection due to enabled filters "); return(false); } bool useCache = persister.HasCache && source.CacheMode.HasFlag(CacheMode.Get); if (!useCache) { return(false); } var batchSize = persister.GetBatchSize(); if (batchSize > 1 && persister.Cache.IsBatchingGetSupported()) { var collectionEntries = new CollectionEntry[batchSize]; // The first item in the array is the item that we want to load var collectionBatch = source.PersistenceContext.BatchFetchQueue .GetCollectionBatch(persister, id, batchSize, false, collectionEntries); // Ignore null values as the retrieved batch may contains them when there are not enough // uninitialized collection in the queue var keys = new List <CacheKey>(batchSize); for (var i = 0; i < collectionBatch.Length; i++) { var key = collectionBatch[i]; if (key == null) { break; } keys.Add(source.GenerateCacheKey(key, persister.KeyType, persister.Role)); } var cachedObjects = persister.Cache.GetMany(keys.ToArray(), source.Timestamp); for (var i = 1; i < cachedObjects.Length; i++) { var coll = source.PersistenceContext.BatchFetchQueue.GetBatchLoadableCollection(persister, collectionEntries[i]); Assemble(keys[i], cachedObjects[i], persister, source, coll, collectionBatch[i], false); } return(Assemble(keys[0], cachedObjects[0], persister, source, collection, id, true)); } var cacheKey = source.GenerateCacheKey(id, persister.KeyType, persister.Role); var cachedObject = persister.Cache.Get(cacheKey, source.Timestamp); return(Assemble(cacheKey, cachedObject, persister, source, collection, id, true)); }
/// <summary> /// If the class to be loaded has been configured with a cache, then lock /// given id in that cache and then perform the load. /// </summary> /// <returns> The loaded entity </returns> protected virtual object LockAndLoad(LoadEvent @event, IEntityPersister persister, EntityKey keyToLoad, LoadType options, ISessionImplementor source) { ISoftLock sLock = null; CacheKey ck; if (persister.HasCache) { ck = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); sLock = persister.Cache.Lock(ck, null); } else { ck = null; } object entity; try { entity = Load(@event, persister, keyToLoad, options); } finally { if (persister.HasCache) { persister.Cache.Release(ck, sLock); } } object proxy = @event.Session.PersistenceContext.ProxyFor(persister, keyToLoad, entity); return(proxy); }
/// <summary> /// If the class to be loaded has been configured with a cache, then lock /// given id in that cache and then perform the load. /// </summary> /// <returns> The loaded entity </returns> protected virtual async Task <object> LockAndLoadAsync(LoadEvent @event, IEntityPersister persister, EntityKey keyToLoad, LoadType options, ISessionImplementor source, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); ISoftLock sLock = null; CacheKey ck; if (persister.HasCache) { ck = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); sLock = await(persister.Cache.LockAsync(ck, null, cancellationToken)).ConfigureAwait(false); } else { ck = null; } object entity; try { entity = await(LoadAsync(@event, persister, keyToLoad, options, cancellationToken)).ConfigureAwait(false); } finally { if (persister.HasCache) { await(persister.Cache.ReleaseAsync(ck, sLock, cancellationToken)).ConfigureAwait(false); } } object proxy = @event.Session.PersistenceContext.ProxyFor(persister, keyToLoad, entity); return(proxy); }
/// <summary> Try to initialize a collection from the cache</summary> private async Task <bool> InitializeCollectionFromCacheAsync(object id, ICollectionPersister persister, IPersistentCollection collection, ISessionImplementor source, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); if (!(source.EnabledFilters.Count == 0) && persister.IsAffectedByEnabledFilters(source)) { log.Debug("disregarding cached version (if any) of collection due to enabled filters "); return(false); } bool useCache = persister.HasCache && source.CacheMode.HasFlag(CacheMode.Get); if (!useCache) { return(false); } else { ISessionFactoryImplementor factory = source.Factory; CacheKey ck = source.GenerateCacheKey(id, persister.KeyType, persister.Role); object ce = await(persister.Cache.GetAsync(ck, source.Timestamp, cancellationToken)).ConfigureAwait(false); if (factory.Statistics.IsStatisticsEnabled) { if (ce == null) { factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); } else { factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); } } if (ce == null) { log.Debug("Collection cache miss: {0}", ck); } else { log.Debug("Collection cache hit: {0}", ck); } if (ce == null) { return(false); } else { IPersistenceContext persistenceContext = source.PersistenceContext; CollectionCacheEntry cacheEntry = (CollectionCacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); await(cacheEntry.AssembleAsync(collection, persister, persistenceContext.GetCollectionOwner(id, persister), cancellationToken)).ConfigureAwait(false); persistenceContext.GetCollectionEntry(collection).PostInitialize(collection); return(true); } } }
/// <summary> Try to initialize a collection from the cache</summary> private bool InitializeCollectionFromCache(object id, ICollectionPersister persister, IPersistentCollection collection, ISessionImplementor source) { if (!(source.EnabledFilters.Count == 0) && persister.IsAffectedByEnabledFilters(source)) { log.Debug("disregarding cached version (if any) of collection due to enabled filters "); return false; } bool useCache = persister.HasCache && ((source.CacheMode & CacheMode.Get) == CacheMode.Get); if (!useCache) { return false; } else { ISessionFactoryImplementor factory = source.Factory; CacheKey ck = source.GenerateCacheKey(id, persister.KeyType, persister.Role); object ce = persister.Cache.Get(ck, source.Timestamp); if (factory.Statistics.IsStatisticsEnabled) { if (ce == null) { factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); } else { factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); } } if (ce == null) { log.DebugFormat("Collection cache miss: {0}", ck); } else { log.DebugFormat("Collection cache hit: {0}", ck); } if (ce == null) { return false; } else { IPersistenceContext persistenceContext = source.PersistenceContext; CollectionCacheEntry cacheEntry = (CollectionCacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); cacheEntry.Assemble(collection, persister, persistenceContext.GetCollectionOwner(id, persister)); persistenceContext.GetCollectionEntry(collection).PostInitialize(collection); return true; } } }
/// <summary> Try to initialize a collection from the cache</summary> private bool InitializeCollectionFromCache(object id, ICollectionPersister persister, IPersistentCollection collection, ISessionImplementor source) { if (!(source.EnabledFilters.Count == 0) && persister.IsAffectedByEnabledFilters(source)) { log.Debug("disregarding cached version (if any) of collection due to enabled filters "); return(false); } bool useCache = persister.HasCache && ((source.CacheMode & CacheMode.Get) == CacheMode.Get); if (!useCache) { return(false); } else { ISessionFactoryImplementor factory = source.Factory; CacheKey ck = source.GenerateCacheKey(id, persister.KeyType, persister.Role); object ce = persister.Cache.Get(ck, source.Timestamp); if (factory.Statistics.IsStatisticsEnabled) { if (ce == null) { factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); } else { factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); } } if (ce == null) { log.DebugFormat("Collection cache miss: {0}", ck); } else { log.DebugFormat("Collection cache hit: {0}", ck); } if (ce == null) { return(false); } else { IPersistenceContext persistenceContext = source.PersistenceContext; CollectionCacheEntry cacheEntry = (CollectionCacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); cacheEntry.Assemble(collection, persister, persistenceContext.GetCollectionOwner(id, persister)); persistenceContext.GetCollectionEntry(collection).PostInitialize(collection); return(true); } } }
/// <summary> /// Performs a pessimistic lock upgrade on a given entity, if needed. /// </summary> /// <param name="entity">The entity for which to upgrade the lock.</param> /// <param name="entry">The entity's EntityEntry instance.</param> /// <param name="requestedLockMode">The lock mode being requested for locking. </param> /// <param name="source">The session which is the source of the event being processed.</param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the work</param> protected virtual async Task UpgradeLockAsync(object entity, EntityEntry entry, LockMode requestedLockMode, ISessionImplementor source, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); if (requestedLockMode.GreaterThan(entry.LockMode)) { // The user requested a "greater" (i.e. more restrictive) form of // pessimistic lock if (entry.Status != Status.Loaded) { throw new ObjectDeletedException("attempted to lock a deleted instance", entry.Id, entry.EntityName); } IEntityPersister persister = entry.Persister; if (log.IsDebugEnabled()) { log.Debug("locking {0} in mode: {1}", MessageHelper.InfoString(persister, entry.Id, source.Factory), requestedLockMode); } ISoftLock slock; CacheKey ck; if (persister.HasCache) { ck = source.GenerateCacheKey(entry.Id, persister.IdentifierType, persister.RootEntityName); slock = await(persister.Cache.LockAsync(ck, entry.Version, cancellationToken)).ConfigureAwait(false); } else { ck = null; slock = null; } try { if (persister.IsVersioned && requestedLockMode == LockMode.Force) { // todo : should we check the current isolation mode explicitly? object nextVersion = await(persister.ForceVersionIncrementAsync(entry.Id, entry.Version, source, cancellationToken)).ConfigureAwait(false); entry.ForceLocked(entity, nextVersion); } else { await(persister.LockAsync(entry.Id, entry.Version, entity, requestedLockMode, source, cancellationToken)).ConfigureAwait(false); } entry.LockMode = requestedLockMode; } finally { // the database now holds a lock + the object is flushed from the cache, // so release the soft lock if (persister.HasCache) { await(persister.Cache.ReleaseAsync(ck, slock, cancellationToken)).ConfigureAwait(false); } } } }
/// <summary> /// Performs a pessimistic lock upgrade on a given entity, if needed. /// </summary> /// <param name="entity">The entity for which to upgrade the lock.</param> /// <param name="entry">The entity's EntityEntry instance.</param> /// <param name="requestedLockMode">The lock mode being requested for locking. </param> /// <param name="source">The session which is the source of the event being processed.</param> protected virtual void UpgradeLock(object entity, EntityEntry entry, LockMode requestedLockMode, ISessionImplementor source) { if (requestedLockMode.GreaterThan(entry.LockMode)) { // The user requested a "greater" (i.e. more restrictive) form of // pessimistic lock if (entry.Status != Status.Loaded) { throw new ObjectDeletedException("attempted to lock a deleted instance", entry.Id, entry.EntityName); } IEntityPersister persister = entry.Persister; if (log.IsDebugEnabled) { log.Debug(string.Format("locking {0} in mode: {1}", MessageHelper.InfoString(persister, entry.Id, source.Factory), requestedLockMode)); } ISoftLock slock; CacheKey ck; if (persister.HasCache) { ck = source.GenerateCacheKey(entry.Id, persister.IdentifierType, persister.RootEntityName); slock = persister.Cache.Lock(ck, entry.Version); } else { ck = null; slock = null; } try { if (persister.IsVersioned && requestedLockMode == LockMode.Force) { // todo : should we check the current isolation mode explicitly? object nextVersion = persister.ForceVersionIncrement(entry.Id, entry.Version, source); entry.ForceLocked(entity, nextVersion); } else { persister.Lock(entry.Id, entry.Version, entity, requestedLockMode, source); } entry.LockMode = requestedLockMode; } finally { // the database now holds a lock + the object is flushed from the cache, // so release the soft lock if (persister.HasCache) { persister.Cache.Release(ck, slock); } } } }
/// <summary> Add the collection to the second-level cache </summary> /// <param name="lce">The entry representing the collection to add </param> /// <param name="persister">The persister </param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the work</param> private async Task AddCollectionToCacheAsync(LoadingCollectionEntry lce, ICollectionPersister persister, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); ISessionImplementor session = LoadContext.PersistenceContext.Session; ISessionFactoryImplementor factory = session.Factory; if (log.IsDebugEnabled()) { log.Debug("Caching collection: {0}", MessageHelper.CollectionInfoString(persister, lce.Collection, lce.Key, session)); } if (!(session.EnabledFilters.Count == 0) && persister.IsAffectedByEnabledFilters(session)) { // some filters affecting the collection are enabled on the session, so do not do the put into the cache. log.Debug("Refusing to add to cache due to enabled filters"); // todo : add the notion of enabled filters to the CacheKey to differentiate filtered collections from non-filtered; // but CacheKey is currently used for both collections and entities; would ideally need to define two separate ones; // currently this works in conjunction with the check on // DefaultInitializeCollectionEventHandler.initializeCollectionFromCache() (which makes sure to not read from // cache with enabled filters). return; // EARLY EXIT!!!!! } IComparer versionComparator; object version; if (persister.IsVersioned) { versionComparator = persister.OwnerEntityPersister.VersionType.Comparator; object collectionOwner = LoadContext.PersistenceContext.GetCollectionOwner(lce.Key, persister); if (collectionOwner == null) { return; } version = LoadContext.PersistenceContext.GetEntry(collectionOwner).Version; } else { version = null; versionComparator = null; } CollectionCacheEntry entry = new CollectionCacheEntry(lce.Collection, persister); CacheKey cacheKey = session.GenerateCacheKey(lce.Key, persister.KeyType, persister.Role); bool put = await(persister.Cache.PutAsync(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, versionComparator, factory.Settings.IsMinimalPutsEnabled && session.CacheMode != CacheMode.Refresh, cancellationToken)).ConfigureAwait(false); if (put && factory.Statistics.IsStatisticsEnabled) { factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); } }
/// <summary> Attempts to load the entity from the second-level cache. </summary> /// <param name="event">The load event </param> /// <param name="persister">The persister for the entity being requested for load </param> /// <param name="options">The load options. </param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the work</param> /// <returns> The entity from the second-level cache, or null. </returns> protected virtual async Task <object> LoadFromSecondLevelCacheAsync(LoadEvent @event, IEntityPersister persister, LoadType options, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); ISessionImplementor source = @event.Session; bool useCache = persister.HasCache && source.CacheMode.HasFlag(CacheMode.Get) && @event.LockMode.LessThan(LockMode.Read); if (useCache) { ISessionFactoryImplementor factory = source.Factory; CacheKey ck = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); object ce = await(persister.Cache.GetAsync(ck, source.Timestamp, cancellationToken)).ConfigureAwait(false); if (factory.Statistics.IsStatisticsEnabled) { if (ce == null) { factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); log.DebugFormat("Entity cache miss: {0}", ck); } else { factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); log.DebugFormat("Entity cache hit: {0}", ck); } } if (ce != null) { CacheEntry entry = (CacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); // Entity was found in second-level cache... // NH: Different behavior (take a look to options.ExactPersister (NH-295)) if (!options.ExactPersister || persister.EntityMetamodel.SubclassEntityNames.Contains(entry.Subclass)) { return(await(AssembleCacheEntryAsync(entry, @event.EntityId, persister, @event, cancellationToken)).ConfigureAwait(false)); } } } return(null); }
/// <summary> /// Perform the second step of 2-phase load. Fully initialize the entity instance. /// After processing a JDBC result set, we "resolve" all the associations /// between the entities which were instantiated and had their state /// "hydrated" into an array /// </summary> internal static void InitializeEntity(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent, Action <IEntityPersister, CachePutData> cacheBatchingHandler) { //TODO: Should this be an InitializeEntityEventListener??? (watch out for performance!) bool statsEnabled = session.Factory.Statistics.IsStatisticsEnabled; var stopWath = new Stopwatch(); if (statsEnabled) { stopWath.Start(); } IPersistenceContext persistenceContext = session.PersistenceContext; EntityEntry entityEntry = persistenceContext.GetEntry(entity); if (entityEntry == null) { throw new AssertionFailure("possible non-threadsafe access to the session"); } IEntityPersister persister = entityEntry.Persister; object id = entityEntry.Id; object[] hydratedState = entityEntry.LoadedState; if (log.IsDebugEnabled()) { log.Debug("resolving associations for {0}", MessageHelper.InfoString(persister, id, session.Factory)); } IType[] types = persister.PropertyTypes; var collectionToResolveIndexes = new List <int>(hydratedState.Length); for (int i = 0; i < hydratedState.Length; i++) { object value = hydratedState[i]; if (!Equals(LazyPropertyInitializer.UnfetchedProperty, value) && !(Equals(BackrefPropertyAccessor.Unknown, value))) { if (types[i].IsCollectionType) { // Resolve them last, because they may depend on other properties if they use a property-ref collectionToResolveIndexes.Add(i); continue; } hydratedState[i] = types[i].ResolveIdentifier(value, session, entity); } } foreach (var i in collectionToResolveIndexes) { hydratedState[i] = types[i].ResolveIdentifier(hydratedState[i], session, entity); } //Must occur after resolving identifiers! if (session.IsEventSource) { preLoadEvent.Entity = entity; preLoadEvent.State = hydratedState; preLoadEvent.Id = id; preLoadEvent.Persister = persister; IPreLoadEventListener[] listeners = session.Listeners.PreLoadEventListeners; for (int i = 0; i < listeners.Length; i++) { listeners[i].OnPreLoad(preLoadEvent); } } persister.SetPropertyValues(entity, hydratedState); ISessionFactoryImplementor factory = session.Factory; if (persister.HasCache && session.CacheMode.HasFlag(CacheMode.Put)) { if (log.IsDebugEnabled()) { log.Debug("adding entity to second-level cache: {0}", MessageHelper.InfoString(persister, id, session.Factory)); } object version = Versioning.GetVersion(hydratedState, persister); CacheEntry entry = CacheEntry.Create(hydratedState, persister, version, session, entity); CacheKey cacheKey = session.GenerateCacheKey(id, persister.IdentifierType, persister.RootEntityName); if (cacheBatchingHandler != null && persister.IsBatchLoadable) { cacheBatchingHandler( persister, new CachePutData( cacheKey, persister.CacheEntryStructure.Structure(entry), version, persister.IsVersioned ? persister.VersionType.Comparator : null, UseMinimalPuts(session, entityEntry))); } else { bool put = persister.Cache.Put(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, persister.IsVersioned ? persister.VersionType.Comparator : null, UseMinimalPuts(session, entityEntry)); if (put && factory.Statistics.IsStatisticsEnabled) { factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); } } } bool isReallyReadOnly = readOnly; if (!persister.IsMutable) { isReallyReadOnly = true; } else { object proxy = persistenceContext.GetProxy(entityEntry.EntityKey); if (proxy != null) { // there is already a proxy for this impl // only set the status to read-only if the proxy is read-only isReallyReadOnly = ((INHibernateProxy)proxy).HibernateLazyInitializer.ReadOnly; } } if (isReallyReadOnly) { //no need to take a snapshot - this is a //performance optimization, but not really //important, except for entities with huge //mutable property values persistenceContext.SetEntryStatus(entityEntry, Status.ReadOnly); } else { //take a snapshot TypeHelper.DeepCopy(hydratedState, persister.PropertyTypes, persister.PropertyUpdateability, hydratedState, session); persistenceContext.SetEntryStatus(entityEntry, Status.Loaded); } persister.AfterInitialize(entity, session); if (session.IsEventSource) { postLoadEvent.Entity = entity; postLoadEvent.Id = id; postLoadEvent.Persister = persister; IPostLoadEventListener[] listeners = session.Listeners.PostLoadEventListeners; for (int i = 0; i < listeners.Length; i++) { listeners[i].OnPostLoad(postLoadEvent); } } if (log.IsDebugEnabled()) { log.Debug("done materializing entity {0}", MessageHelper.InfoString(persister, id, session.Factory)); } if (statsEnabled) { stopWath.Stop(); factory.StatisticsImplementor.LoadEntity(persister.EntityName, stopWath.Elapsed); } }
public virtual bool? IsTransient(object entity, ISessionImplementor session) { object id; if (CanExtractIdOutOfEntity) { id = GetIdentifier(entity, session.EntityMode); } else { id = null; } // we *always* assume an instance with a null // identifier or no identifier property is unsaved! if (id == null) { return true; } // check the version unsaved-value, if appropriate if (IsVersioned) { object version = GetVersion(entity, session.EntityMode); // let this take precedence if defined, since it works for // assigned identifiers bool? result = entityMetamodel.VersionProperty.UnsavedValue.IsUnsaved(version); if (result.HasValue) { return result; } } // check the id unsaved-value bool? result2 = entityMetamodel.IdentifierProperty.UnsavedValue.IsUnsaved(id); if (result2.HasValue) { if (IdentifierGenerator is Assigned) { // if using assigned identifier, we can only make assumptions // if the value is a known unsaved-value if (result2.Value) return true; } else { return result2; } } // check to see if it is in the second-level cache if (HasCache) { CacheKey ck = session.GenerateCacheKey(id, IdentifierType, RootEntityName); if (Cache.Get(ck, session.Timestamp) != null) return false; } return null; }
public virtual object InitializeLazyProperty(string fieldName, object entity, ISessionImplementor session) { object id = session.GetContextEntityIdentifier(entity); EntityEntry entry = session.PersistenceContext.GetEntry(entity); if (entry == null) throw new HibernateException("entity is not associated with the session: " + id); if (log.IsDebugEnabled) { log.Debug( string.Format("initializing lazy properties of: {0}, field access: {1}", MessageHelper.InfoString(this, id, Factory), fieldName)); } if (HasCache) { CacheKey cacheKey = session.GenerateCacheKey(id, IdentifierType, EntityName); object ce = Cache.Get(cacheKey, session.Timestamp); if (ce != null) { CacheEntry cacheEntry = (CacheEntry)CacheEntryStructure.Destructure(ce, factory); if (!cacheEntry.AreLazyPropertiesUnfetched) { //note early exit here: return InitializeLazyPropertiesFromCache(fieldName, entity, session, entry, cacheEntry); } } } return InitializeLazyPropertiesFromDatastore(fieldName, entity, session, id, entry); }
public override void Execute() { ISessionImplementor session = Session; object id = Id; IEntityPersister persister = Persister; object instance = Instance; bool statsEnabled = Session.Factory.Statistics.IsStatisticsEnabled; Stopwatch stopwatch = null; if (statsEnabled) { stopwatch = Stopwatch.StartNew(); } bool veto = PreUpdate(); ISessionFactoryImplementor factory = Session.Factory; if (persister.IsVersionPropertyGenerated) { // we need to grab the version value from the entity, otherwise // we have issues with generated-version entities that may have // multiple actions queued during the same flush previousVersion = persister.GetVersion(instance); } CacheKey ck = null; if (persister.HasCache) { ck = session.GenerateCacheKey(id, persister.IdentifierType, persister.RootEntityName); slock = persister.Cache.Lock(ck, previousVersion); } if (!veto) { persister.Update(id, state, dirtyFields, hasDirtyCollection, previousState, previousVersion, instance, null, session); } EntityEntry entry = Session.PersistenceContext.GetEntry(instance); if (entry == null) { throw new AssertionFailure("Possible nonthreadsafe access to session"); } if (entry.Status == Status.Loaded || persister.IsVersionPropertyGenerated) { // get the updated snapshot of the entity state by cloning current state; // it is safe to copy in place, since by this time no-one else (should have) // has a reference to the array TypeHelper.DeepCopy(state, persister.PropertyTypes, persister.PropertyCheckability, state, Session); if (persister.HasUpdateGeneratedProperties) { // this entity defines property generation, so process those generated // values... persister.ProcessUpdateGeneratedProperties(id, instance, state, Session); if (persister.IsVersionPropertyGenerated) { nextVersion = Versioning.GetVersion(state, persister); } } // have the entity entry perform post-update processing, passing it the // update state and the new version (if one). entry.PostUpdate(instance, state, nextVersion); } if (persister.HasCache) { if (persister.IsCacheInvalidationRequired || entry.Status != Status.Loaded) { persister.Cache.Evict(ck); } else { CacheEntry ce = CacheEntry.Create(state, persister, nextVersion, Session, instance); cacheEntry = persister.CacheEntryStructure.Structure(ce); bool put = persister.Cache.Update(ck, cacheEntry, nextVersion, previousVersion); if (put && factory.Statistics.IsStatisticsEnabled) { factory.StatisticsImplementor.SecondLevelCachePut(Persister.Cache.RegionName); } } } PostUpdate(); if (statsEnabled && !veto) { stopwatch.Stop(); factory.StatisticsImplementor.UpdateEntity(Persister.EntityName, stopwatch.Elapsed); } }
/// <summary> /// Perform the second step of 2-phase load. Fully initialize the entity instance. /// After processing a JDBC result set, we "resolve" all the associations /// between the entities which were instantiated and had their state /// "hydrated" into an array /// </summary> public static async Task InitializeEntityAsync(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); //TODO: Should this be an InitializeEntityEventListener??? (watch out for performance!) bool statsEnabled = session.Factory.Statistics.IsStatisticsEnabled; var stopWath = new Stopwatch(); if (statsEnabled) { stopWath.Start(); } IPersistenceContext persistenceContext = session.PersistenceContext; EntityEntry entityEntry = persistenceContext.GetEntry(entity); if (entityEntry == null) { throw new AssertionFailure("possible non-threadsafe access to the session"); } IEntityPersister persister = entityEntry.Persister; object id = entityEntry.Id; object[] hydratedState = entityEntry.LoadedState; if (log.IsDebugEnabled) { log.Debug("resolving associations for " + MessageHelper.InfoString(persister, id, session.Factory)); } IType[] types = persister.PropertyTypes; for (int i = 0; i < hydratedState.Length; i++) { object value = hydratedState[i]; if (!Equals(LazyPropertyInitializer.UnfetchedProperty, value) && !(Equals(BackrefPropertyAccessor.Unknown, value))) { hydratedState[i] = await(types[i].ResolveIdentifierAsync(value, session, entity, cancellationToken)).ConfigureAwait(false); } } //Must occur after resolving identifiers! if (session.IsEventSource) { preLoadEvent.Entity = entity; preLoadEvent.State = hydratedState; preLoadEvent.Id = id; preLoadEvent.Persister = persister; IPreLoadEventListener[] listeners = session.Listeners.PreLoadEventListeners; for (int i = 0; i < listeners.Length; i++) { await(listeners[i].OnPreLoadAsync(preLoadEvent, cancellationToken)).ConfigureAwait(false); } } persister.SetPropertyValues(entity, hydratedState); ISessionFactoryImplementor factory = session.Factory; if (persister.HasCache && session.CacheMode.HasFlag(CacheMode.Put)) { if (log.IsDebugEnabled) { log.Debug("adding entity to second-level cache: " + MessageHelper.InfoString(persister, id, session.Factory)); } object version = Versioning.GetVersion(hydratedState, persister); CacheEntry entry = new CacheEntry(hydratedState, persister, entityEntry.LoadedWithLazyPropertiesUnfetched, version, session, entity); CacheKey cacheKey = session.GenerateCacheKey(id, persister.IdentifierType, persister.RootEntityName); bool put = await(persister.Cache.PutAsync(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, persister.IsVersioned ? persister.VersionType.Comparator : null, UseMinimalPuts(session, entityEntry), cancellationToken)).ConfigureAwait(false); if (put && factory.Statistics.IsStatisticsEnabled) { factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); } } bool isReallyReadOnly = readOnly; if (!persister.IsMutable) { isReallyReadOnly = true; } else { object proxy = persistenceContext.GetProxy(entityEntry.EntityKey); if (proxy != null) { // there is already a proxy for this impl // only set the status to read-only if the proxy is read-only isReallyReadOnly = ((INHibernateProxy)proxy).HibernateLazyInitializer.ReadOnly; } } if (isReallyReadOnly) { //no need to take a snapshot - this is a //performance optimization, but not really //important, except for entities with huge //mutable property values persistenceContext.SetEntryStatus(entityEntry, Status.ReadOnly); } else { //take a snapshot TypeHelper.DeepCopy(hydratedState, persister.PropertyTypes, persister.PropertyUpdateability, hydratedState, session); persistenceContext.SetEntryStatus(entityEntry, Status.Loaded); } persister.AfterInitialize(entity, entityEntry.LoadedWithLazyPropertiesUnfetched, session); if (session.IsEventSource) { postLoadEvent.Entity = entity; postLoadEvent.Id = id; postLoadEvent.Persister = persister; IPostLoadEventListener[] listeners = session.Listeners.PostLoadEventListeners; for (int i = 0; i < listeners.Length; i++) { listeners[i].OnPostLoad(postLoadEvent); } } if (log.IsDebugEnabled) { log.Debug("done materializing entity " + MessageHelper.InfoString(persister, id, session.Factory)); } if (statsEnabled) { stopWath.Stop(); factory.StatisticsImplementor.LoadEntity(persister.EntityName, stopWath.Elapsed); } }
public override async Task ExecuteAsync(CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); object id = Id; IEntityPersister persister = Persister; ISessionImplementor session = Session; object instance = Instance; bool statsEnabled = Session.Factory.Statistics.IsStatisticsEnabled; Stopwatch stopwatch = null; if (statsEnabled) { stopwatch = Stopwatch.StartNew(); } bool veto = await(PreDeleteAsync(cancellationToken)).ConfigureAwait(false); object tmpVersion = version; if (persister.IsVersionPropertyGenerated) { // we need to grab the version value from the entity, otherwise // we have issues with generated-version entities that may have // multiple actions queued during the same flush tmpVersion = persister.GetVersion(instance); } CacheKey ck; if (persister.HasCache) { ck = session.GenerateCacheKey(id, persister.IdentifierType, persister.RootEntityName); sLock = await(persister.Cache.LockAsync(ck, version, cancellationToken)).ConfigureAwait(false); } else { ck = null; } if (!isCascadeDeleteEnabled && !veto) { await(persister.DeleteAsync(id, tmpVersion, instance, session, cancellationToken)).ConfigureAwait(false); } //postDelete: // After actually deleting a row, record the fact that the instance no longer // exists on the database (needed for identity-column key generation), and // remove it from the session cache IPersistenceContext persistenceContext = session.PersistenceContext; EntityEntry entry = persistenceContext.RemoveEntry(instance); if (entry == null) { throw new AssertionFailure("Possible nonthreadsafe access to session"); } entry.PostDelete(); EntityKey key = session.GenerateEntityKey(entry.Id, entry.Persister); persistenceContext.RemoveEntity(key); persistenceContext.RemoveProxy(key); if (persister.HasCache) { await(persister.Cache.EvictAsync(ck, cancellationToken)).ConfigureAwait(false); } await(PostDeleteAsync(cancellationToken)).ConfigureAwait(false); if (statsEnabled && !veto) { stopwatch.Stop(); Session.Factory.StatisticsImplementor.DeleteEntity(Persister.EntityName, stopwatch.Elapsed); } }
/// <summary> Attempts to load the entity from the second-level cache. </summary> /// <param name="event">The load event </param> /// <param name="persister">The persister for the entity being requested for load </param> /// <param name="options">The load options. </param> /// <param name="cancellationToken">A cancellation token that can be used to cancel the work</param> /// <returns> The entity from the second-level cache, or null. </returns> protected virtual async Task <object> LoadFromSecondLevelCacheAsync(LoadEvent @event, IEntityPersister persister, LoadType options, CancellationToken cancellationToken) { cancellationToken.ThrowIfCancellationRequested(); ISessionImplementor source = @event.Session; bool useCache = persister.HasCache && source.CacheMode.HasFlag(CacheMode.Get) && @event.LockMode.LessThan(LockMode.Read); if (!useCache) { return(null); } ISessionFactoryImplementor factory = source.Factory; var batchSize = persister.GetBatchSize(); var entityBatch = source.PersistenceContext.BatchFetchQueue.QueryCacheQueue ?.GetEntityBatch(persister, @event.EntityId); if (entityBatch != null || batchSize > 1 && persister.Cache.PreferMultipleGet()) { // The first item in the array is the item that we want to load if (entityBatch != null) { if (entityBatch.Length == 0) { return(null); // The key was already checked } batchSize = entityBatch.Length; } if (entityBatch == null) { entityBatch = await(source.PersistenceContext.BatchFetchQueue.GetEntityBatchAsync(persister, @event.EntityId, batchSize, false, cancellationToken)).ConfigureAwait(false); } // Ignore null values as the retrieved batch may contains them when there are not enough // uninitialized entities in the queue var keys = new List <CacheKey>(batchSize); for (var i = 0; i < entityBatch.Length; i++) { var key = entityBatch[i]; if (key == null) { break; } keys.Add(source.GenerateCacheKey(key, persister.IdentifierType, persister.RootEntityName)); } var cachedObjects = await(persister.Cache.GetManyAsync(keys.ToArray(), source.Timestamp, cancellationToken)).ConfigureAwait(false); for (var i = 1; i < cachedObjects.Length; i++) { cancellationToken.ThrowIfCancellationRequested(); await(AssembleAsync( keys[i], cachedObjects[i], new LoadEvent(entityBatch[i], @event.EntityClassName, @event.LockMode, @event.Session), false)).ConfigureAwait(false); } cancellationToken.ThrowIfCancellationRequested(); return(await(AssembleAsync(keys[0], cachedObjects[0], @event, true)).ConfigureAwait(false)); } var cacheKey = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); var cachedObject = await(persister.Cache.GetAsync(cacheKey, source.Timestamp, cancellationToken)).ConfigureAwait(false); cancellationToken.ThrowIfCancellationRequested(); return(await(AssembleAsync(cacheKey, cachedObject, @event, true)).ConfigureAwait(false)); Task <object> AssembleAsync(CacheKey ck, object ce, LoadEvent evt, bool alterStatistics) { try { if (factory.Statistics.IsStatisticsEnabled && alterStatistics) { if (ce == null) { factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); log.Debug("Entity cache miss: {0}", ck); } else { factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); log.Debug("Entity cache hit: {0}", ck); } } if (ce != null) { CacheEntry entry = (CacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); // Entity was found in second-level cache... // NH: Different behavior (take a look to options.ExactPersister (NH-295)) if (!options.ExactPersister || persister.EntityMetamodel.SubclassEntityNames.Contains(entry.Subclass)) { return(AssembleCacheEntryAsync(entry, evt.EntityId, persister, evt, cancellationToken)); } } return(Task.FromResult <object>(null)); } catch (Exception ex) { return(Task.FromException <object>(ex)); } } }
/// <summary> /// Perform the second step of 2-phase load. Fully initialize the entity instance. /// After processing a JDBC result set, we "resolve" all the associations /// between the entities which were instantiated and had their state /// "hydrated" into an array /// </summary> public static void InitializeEntity(object entity, bool readOnly, ISessionImplementor session, PreLoadEvent preLoadEvent, PostLoadEvent postLoadEvent) { //TODO: Should this be an InitializeEntityEventListener??? (watch out for performance!) bool statsEnabled = session.Factory.Statistics.IsStatisticsEnabled; var stopWath = new Stopwatch(); if (statsEnabled) { stopWath.Start(); } IPersistenceContext persistenceContext = session.PersistenceContext; EntityEntry entityEntry = persistenceContext.GetEntry(entity); if (entityEntry == null) { throw new AssertionFailure("possible non-threadsafe access to the session"); } IEntityPersister persister = entityEntry.Persister; object id = entityEntry.Id; object[] hydratedState = entityEntry.LoadedState; if (log.IsDebugEnabled) log.Debug("resolving associations for " + MessageHelper.InfoString(persister, id, session.Factory)); IType[] types = persister.PropertyTypes; for (int i = 0; i < hydratedState.Length; i++) { object value = hydratedState[i]; if (!Equals(LazyPropertyInitializer.UnfetchedProperty, value) && !(Equals(BackrefPropertyAccessor.Unknown, value))) { hydratedState[i] = types[i].ResolveIdentifier(value, session, entity); } } //Must occur after resolving identifiers! if (session.IsEventSource) { preLoadEvent.Entity = entity; preLoadEvent.State = hydratedState; preLoadEvent.Id = id; preLoadEvent.Persister=persister; IPreLoadEventListener[] listeners = session.Listeners.PreLoadEventListeners; for (int i = 0; i < listeners.Length; i++) { listeners[i].OnPreLoad(preLoadEvent); } } persister.SetPropertyValues(entity, hydratedState, session.EntityMode); ISessionFactoryImplementor factory = session.Factory; if (persister.HasCache && ((session.CacheMode & CacheMode.Put) == CacheMode.Put)) { if (log.IsDebugEnabled) log.Debug("adding entity to second-level cache: " + MessageHelper.InfoString(persister, id, session.Factory)); object version = Versioning.GetVersion(hydratedState, persister); CacheEntry entry = new CacheEntry(hydratedState, persister, entityEntry.LoadedWithLazyPropertiesUnfetched, version, session, entity); CacheKey cacheKey = session.GenerateCacheKey(id, persister.IdentifierType, persister.RootEntityName); bool put = persister.Cache.Put(cacheKey, persister.CacheEntryStructure.Structure(entry), session.Timestamp, version, persister.IsVersioned ? persister.VersionType.Comparator : null, UseMinimalPuts(session, entityEntry)); if (put && factory.Statistics.IsStatisticsEnabled) { factory.StatisticsImplementor.SecondLevelCachePut(persister.Cache.RegionName); } } bool isReallyReadOnly = readOnly; if (!persister.IsMutable) { isReallyReadOnly = true; } else { object proxy = persistenceContext.GetProxy(entityEntry.EntityKey); if (proxy != null) { // there is already a proxy for this impl // only set the status to read-only if the proxy is read-only isReallyReadOnly = ((INHibernateProxy)proxy).HibernateLazyInitializer.ReadOnly; } } if (isReallyReadOnly) { //no need to take a snapshot - this is a //performance optimization, but not really //important, except for entities with huge //mutable property values persistenceContext.SetEntryStatus(entityEntry, Status.ReadOnly); } else { //take a snapshot TypeHelper.DeepCopy(hydratedState, persister.PropertyTypes, persister.PropertyUpdateability, hydratedState, session); persistenceContext.SetEntryStatus(entityEntry, Status.Loaded); } persister.AfterInitialize(entity, entityEntry.LoadedWithLazyPropertiesUnfetched, session); if (session.IsEventSource) { postLoadEvent.Entity = entity; postLoadEvent.Id = id; postLoadEvent.Persister = persister; IPostLoadEventListener[] listeners = session.Listeners.PostLoadEventListeners; for (int i = 0; i < listeners.Length; i++) { listeners[i].OnPostLoad(postLoadEvent); } } if (log.IsDebugEnabled) log.Debug("done materializing entity " + MessageHelper.InfoString(persister, id, session.Factory)); if (statsEnabled) { stopWath.Stop(); factory.StatisticsImplementor.LoadEntity(persister.EntityName, stopWath.Elapsed); } }
public CacheKey GenerateCacheKey(object id, IType type, string entityOrRoleName) { return(_session.GenerateCacheKey(id, type, entityOrRoleName)); }
/// <summary> Attempts to load the entity from the second-level cache. </summary> /// <param name="event">The load event </param> /// <param name="persister">The persister for the entity being requested for load </param> /// <param name="options">The load options. </param> /// <returns> The entity from the second-level cache, or null. </returns> protected virtual object LoadFromSecondLevelCache(LoadEvent @event, IEntityPersister persister, LoadType options) { ISessionImplementor source = @event.Session; bool useCache = persister.HasCache && source.CacheMode.HasFlag(CacheMode.Get) && @event.LockMode.LessThan(LockMode.Read); if (!useCache) { return(null); } ISessionFactoryImplementor factory = source.Factory; var batchSize = persister.GetBatchSize(); if (batchSize > 1 && persister.Cache.PreferMultipleGet()) { // The first item in the array is the item that we want to load var entityBatch = source.PersistenceContext.BatchFetchQueue.GetEntityBatch(persister, @event.EntityId, batchSize, false); // Ignore null values as the retrieved batch may contains them when there are not enough // uninitialized entities in the queue var keys = new List <CacheKey>(batchSize); for (var i = 0; i < entityBatch.Length; i++) { var key = entityBatch[i]; if (key == null) { break; } keys.Add(source.GenerateCacheKey(key, persister.IdentifierType, persister.RootEntityName)); } var cachedObjects = persister.Cache.GetMany(keys.ToArray(), source.Timestamp); for (var i = 1; i < cachedObjects.Length; i++) { Assemble( keys[i], cachedObjects[i], new LoadEvent(entityBatch[i], @event.EntityClassName, @event.LockMode, @event.Session), false); } return(Assemble(keys[0], cachedObjects[0], @event, true)); } var cacheKey = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); var cachedObject = persister.Cache.Get(cacheKey, source.Timestamp); return(Assemble(cacheKey, cachedObject, @event, true)); object Assemble(CacheKey ck, object ce, LoadEvent evt, bool alterStatistics) { if (factory.Statistics.IsStatisticsEnabled && alterStatistics) { if (ce == null) { factory.StatisticsImplementor.SecondLevelCacheMiss(persister.Cache.RegionName); log.Debug("Entity cache miss: {0}", ck); } else { factory.StatisticsImplementor.SecondLevelCacheHit(persister.Cache.RegionName); log.Debug("Entity cache hit: {0}", ck); } } if (ce != null) { CacheEntry entry = (CacheEntry)persister.CacheEntryStructure.Destructure(ce, factory); // Entity was found in second-level cache... // NH: Different behavior (take a look to options.ExactPersister (NH-295)) if (!options.ExactPersister || persister.EntityMetamodel.SubclassEntityNames.Contains(entry.Subclass)) { return(AssembleCacheEntry(entry, evt.EntityId, persister, evt)); } } return(null); } }
/// <summary> /// If the class to be loaded has been configured with a cache, then lock /// given id in that cache and then perform the load. /// </summary> /// <returns> The loaded entity </returns> protected virtual object LockAndLoad(LoadEvent @event, IEntityPersister persister, EntityKey keyToLoad, LoadType options, ISessionImplementor source) { ISoftLock sLock = null; CacheKey ck; if (persister.HasCache) { ck = source.GenerateCacheKey(@event.EntityId, persister.IdentifierType, persister.RootEntityName); sLock = persister.Cache.Lock(ck, null); } else { ck = null; } object entity; try { entity = Load(@event, persister, keyToLoad, options); } finally { if (persister.HasCache) { persister.Cache.Release(ck, sLock); } } object proxy = @event.Session.PersistenceContext.ProxyFor(persister, keyToLoad, entity); return proxy; }