Exemplo n.º 1
0
        internal void WriteByteTo(IJournalledResource data,
                                  long position, int b)
        {
            if (PARANOID_CHECKS)
            {
                lock (write_lock) {
                    if (write_lock_count == 0)
                    {
                        Console.Out.WriteLine("Write without a Lock!");
                        Console.Out.WriteLine(new ApplicationException().StackTrace);
                    }
                }
            }

            long page_number = position / page_size;

            BMPage page = FetchPage(data, page_number);

            lock (page) {
                try {
                    page.Initialize();
                    page.Write((int)(position % page_size), (byte)b);
                } finally {
                    page.Dispose();
                }
            }
        }
Exemplo n.º 2
0
 /// <summary>
 /// Called when a page is accessed.
 /// </summary>
 /// <param name="page"></param>
 private void PageAccessed(BMPage page)
 {
     // lock (T_lock) {
     page.t = current_T;
     ++current_T;
     ++page.access_count;
     // }
 }
Exemplo n.º 3
0
            /// <summary>
            /// The calculation for finding the <i>weight</i> of a page in the cache.
            /// </summary>
            /// <param name="page"></param>
            /// <remarks>
            /// A heavier page is sorted lower and is therefore cleared from the
            /// cache faster.
            /// </remarks>
            /// <returns></returns>
            private float pageEnumValue(BMPage page)
            {
                // We fix the access counter so it can not exceed 10000 accesses.  I'm
                // a little unsure if we should WriteByte this constant in the equation but it
                // ensures that some old but highly accessed page will not stay in the
                // cache forever.
                long  bounded_page_count = System.Math.Min(page.access_count, 10000);
                float v = (1f / bounded_page_count) * (lbm.current_T - page.t);

                return(v);
            }
Exemplo n.º 4
0
        internal void WriteByteArrayTo(IJournalledResource data, long position, byte[] buf, int off, int len)
        {
            if (PARANOID_CHECKS)
            {
                lock (write_lock) {
                    if (write_lock_count == 0)
                    {
                        Console.Out.WriteLine("Write without a Lock!");
                        Console.Out.WriteLine(new ApplicationException().StackTrace);
                    }
                }
            }

            long page_number  = position / page_size;
            int  start_offset = (int)(position % page_size);
            int  to_write     = System.Math.Min(len, page_size - start_offset);

            BMPage page = FetchPage(data, page_number);

            lock (page) {
                try {
                    page.Initialize();
                    page.Write(start_offset, buf, off, to_write);
                } finally {
                    page.Dispose();
                }
            }
            len -= to_write;

            while (len > 0)
            {
                off      += to_write;
                position += to_write;
                ++page_number;
                to_write = System.Math.Min(len, page_size);

                page = FetchPage(data, page_number);
                lock (page) {
                    try {
                        page.Initialize();
                        page.Write(0, buf, off, to_write);
                    } finally {
                        page.Dispose();
                    }
                }
                len -= to_write;
            }
        }
Exemplo n.º 5
0
        // ------
        // Buffered access methods.  These are all thread safe methods.  When a page
        // is accessed the page is synchronized so no 2 or more operations can
        // Read/Write from the page at the same time.  An operation can Read/Write to
        // different pages at the same time, however, and this requires thread safety
        // at a lower level (in the IJournalledResource implementation).
        // ------

        internal int ReadByteFrom(IJournalledResource data, long position)
        {
            long page_number = position / page_size;
            int  v;

            BMPage page = FetchPage(data, page_number);

            lock (page) {
                try {
                    page.Initialize();
                    v = ((int)page.Read((int)(position % page_size))) & 0x0FF;
                } finally {
                    page.Dispose();
                }
            }

            return(v);
        }
Exemplo n.º 6
0
        internal int ReadByteArrayFrom(IJournalledResource data, long position, byte[] buf, int off, int len)
        {
            int  orig_len     = len;
            long page_number  = position / page_size;
            int  start_offset = (int)(position % page_size);
            int  to_read      = System.Math.Min(len, page_size - start_offset);

            BMPage page = FetchPage(data, page_number);

            lock (page) {
                try {
                    page.Initialize();
                    page.Read(start_offset, buf, off, to_read);
                } finally {
                    page.Dispose();
                }
            }

            len -= to_read;
            while (len > 0)
            {
                off      += to_read;
                position += to_read;
                ++page_number;
                to_read = System.Math.Min(len, page_size);

                page = FetchPage(data, page_number);
                lock (page) {
                    try {
                        page.Initialize();
                        page.Read(0, buf, off, to_read);
                    } finally {
                        page.Dispose();
                    }
                }
                len -= to_read;
            }

            return(orig_len);
        }
Exemplo n.º 7
0
        /// <summary>
        /// Fetches and returns a page from a store.
        /// </summary>
        /// <param name="data"></param>
        /// <param name="page_number"></param>
        /// <remarks>
        /// Pages may be cached.  If the page is not available in the cache then a new
        /// <see cref="BMPage"/> object is created for the page requested.
        /// </remarks>
        /// <returns></returns>
        private BMPage FetchPage(IJournalledResource data, long page_number)
        {
            long id = data.Id;

            BMPage prev_page = null;
            bool   new_page  = false;
            BMPage page;

            lock (page_map) {
                // Generate the hash code for this page.
                int p = (CalcHashCode(id, page_number) & 0x07FFFFFFF) %
                        page_map.Length;
                // Search for this page in the hash
                page = page_map[p];
                while (page != null && !page.IsPage(id, page_number))
                {
                    prev_page = page;
                    page      = page.hash_next;
                }

                // Page isn't found so create it and add to the cache
                if (page == null)
                {
                    page = new BMPage(data, page_number, page_size);
                    // Add this page to the map
                    page.hash_next = page_map[p];
                    page_map[p]    = page;
                }
                else
                {
                    // Move this page to the head if it's not already at the head.
                    if (prev_page != null)
                    {
                        prev_page.hash_next = page.hash_next;
                        page.hash_next      = page_map[p];
                        page_map[p]         = page;
                    }
                }

                lock (page) {
                    // If page not in use then it must be newly setup, so add a
                    // reference.
                    if (page.NotInUse)
                    {
                        page.Reset();
                        new_page = true;
                        page.ReferenceAdd();
                    }
                    // Add a reference for this fetch
                    page.ReferenceAdd();
                }

                // If the page is new,
                if (new_page)
                {
                    PageCreated(page);
                }
                else
                {
                    PageAccessed(page);
                }
            }

            // Return the page.
            return(page);
        }
Exemplo n.º 8
0
        /// <summary>
        /// Called when a new page is created.
        /// </summary>
        /// <param name="page"></param>
        private void PageCreated(BMPage page)
        {
            // lock (T_lock) {

            if (PARANOID_CHECKS)
            {
                int i = page_list.IndexOf(page);
                if (i != -1)
                {
                    BMPage f = (BMPage)page_list[i];
                    if (f == page)
                    {
                        throw new ApplicationException("Same page added multiple times.");
                    }
                    if (f != null)
                    {
                        throw new ApplicationException("Duplicate pages.");
                    }
                }
            }

            page.t = current_T;
            ++current_T;

            ++current_page_count;
            page_list.Add(page);

            // Below is the page purge algorithm.  If the maximum number of pages
            // has been created we sort the page list weighting each page by time
            // since last accessed and total number of accesses and clear the bottom
            // 20% of this list.

            // Check if we should purge old pages and purge some if we do...
            if (current_page_count > max_pages)
            {
                // Purge 20% of the cache
                // Sort the pages by the current formula,
                //  ( 1 / page_access_count ) * (current_t - page_t)
                // Further, if the page has written data then we multiply by 0.75.
                // This scales down page writes so they have a better chance of
                // surviving in the cache than page writes.
                Object[] pages = page_list.ToArray();
                Array.Sort(pages, PageCacheComparer);

                int purge_size = System.Math.Max((int)(pages.Length * 0.20f), 2);
                for (int i = 0; i < purge_size; ++i)
                {
                    BMPage dpage = (BMPage)pages[pages.Length - (i + 1)];
                    lock (dpage) {
                        dpage.Dispose();
                    }
                }

                // Remove all the elements from page_list and set it with the sorted
                // list (minus the elements we removed).
                page_list.Clear();
                for (int i = 0; i < pages.Length - purge_size; ++i)
                {
                    page_list.Add(pages[i]);
                }

                current_page_count -= purge_size;
            }
            // }
        }
Exemplo n.º 9
0
        ///<summary>
        /// Sets a check point in the log.
        ///</summary>
        ///<param name="flush_journals"></param>
        /// <remarks>
        /// This logs a point in which a recovery process should at least be able to
        /// be rebuild back to. This will block if there are any write locks.
        /// <para>
        /// Some things to keep in mind when using this. You must ensure that no writes
        /// can occur while this operation is occuring. Typically this will happen at the
        /// end of a commit but you need to ensure that nothing can happen in the background,
        /// such as records being deleted or items being inserted. It is required that the
        /// 'no write' restriction is enforced at a high level. If care is not taken then the
        /// image written will not be clean and if a crash occurs the image that is recovered
        /// will not be stable.
        /// </para>
        /// </remarks>
        public void SetCheckPoint(bool flush_journals)
        {
            // Wait until the writes have finished, and then set the
            // 'check_point_in_progress' bool.
            lock (write_lock) {
                while (write_lock_count > 0)
                {
                    Monitor.Wait(write_lock);
                }
                check_point_in_progress = true;
            }

            try {
                //      Console.Out.WriteLine("SET CHECKPOINT");
                lock (page_map) {
                    // Flush all the pages out to the log.
                    for (int i = 0; i < page_map.Length; ++i)
                    {
                        BMPage page = page_map[i];
                        BMPage prev = null;

                        while (page != null)
                        {
                            bool deleted_hash = false;
                            lock (page) {
                                // Flush the page (will only actually flush if there are changes)
                                page.Flush();

                                // Remove this page if it is no longer in use
                                if (page.NotInUse)
                                {
                                    deleted_hash = true;
                                    if (prev == null)
                                    {
                                        page_map[i] = page.hash_next;
                                    }
                                    else
                                    {
                                        prev.hash_next = page.hash_next;
                                    }
                                }
                            }
                            // Go to next page in hash chain
                            if (!deleted_hash)
                            {
                                prev = page;
                            }
                            page = page.hash_next;
                        }
                    }
                }

                journalled_system.SetCheckPoint(flush_journals);
            } finally {
                // Make sure we unset the 'check_point_in_progress' bool and notify
                // any blockers.
                lock (write_lock) {
                    check_point_in_progress = false;
                    Monitor.PulseAll(write_lock);
                }
            }
        }
Exemplo n.º 10
0
            public override bool Equals(Object ob)
            {
                BMPage dest_page = (BMPage)ob;

                return(IsPage(dest_page.Id, dest_page.page));
            }
Exemplo n.º 11
0
        /// <summary>
        /// Called when a new page is created.
        /// </summary>
        /// <param name="page"></param>
        private void PageCreated(BMPage page)
        {
            // lock (T_lock) {

                if (PARANOID_CHECKS) {
                    int i = page_list.IndexOf(page);
                    if (i != -1) {
                        BMPage f = (BMPage)page_list[i];
                        if (f == page) {
                            throw new ApplicationException("Same page added multiple times.");
                        }
                        if (f != null) {
                            throw new ApplicationException("Duplicate pages.");
                        }
                    }
                }

                page.t = current_T;
                ++current_T;

                ++current_page_count;
                page_list.Add(page);

                // Below is the page purge algorithm.  If the maximum number of pages
                // has been created we sort the page list weighting each page by time
                // since last accessed and total number of accesses and clear the bottom
                // 20% of this list.

                // Check if we should purge old pages and purge some if we do...
                if (current_page_count > max_pages) {
                    // Purge 20% of the cache
                    // Sort the pages by the current formula,
                    //  ( 1 / page_access_count ) * (current_t - page_t)
                    // Further, if the page has written data then we multiply by 0.75.
                    // This scales down page writes so they have a better chance of
                    // surviving in the cache than page writes.
                    Object[] pages = page_list.ToArray();
                    Array.Sort(pages, PageCacheComparer);

                    int purge_size = System.Math.Max((int)(pages.Length * 0.20f), 2);
                    for (int i = 0; i < purge_size; ++i) {
                        BMPage dpage = (BMPage)pages[pages.Length - (i + 1)];
                        lock (dpage) {
                            dpage.Dispose();
                        }
                    }

                    // Remove all the elements from page_list and set it with the sorted
                    // list (minus the elements we removed).
                    page_list.Clear();
                    for (int i = 0; i < pages.Length - purge_size; ++i) {
                        page_list.Add(pages[i]);
                    }

                    current_page_count -= purge_size;

                }
            // }
        }
Exemplo n.º 12
0
 /// <summary>
 /// Called when a page is accessed.
 /// </summary>
 /// <param name="page"></param>
 private void PageAccessed(BMPage page)
 {
     // lock (T_lock) {
         page.t = current_T;
         ++current_T;
         ++page.access_count;
     // }
 }
Exemplo n.º 13
0
        /// <summary>
        /// Fetches and returns a page from a store.
        /// </summary>
        /// <param name="data"></param>
        /// <param name="page_number"></param>
        /// <remarks>
        /// Pages may be cached.  If the page is not available in the cache then a new 
        /// <see cref="BMPage"/> object is created for the page requested.
        /// </remarks>
        /// <returns></returns>
        private BMPage FetchPage(IJournalledResource data, long page_number)
        {
            long id = data.Id;

            BMPage prev_page = null;
            bool new_page = false;
            BMPage page;

            lock (page_map) {
                // Generate the hash code for this page.
                int p = (CalcHashCode(id, page_number) & 0x07FFFFFFF) %
                                                                       page_map.Length;
                // Search for this page in the hash
                page = page_map[p];
                while (page != null && !page.IsPage(id, page_number)) {
                    prev_page = page;
                    page = page.hash_next;
                }

                // Page isn't found so create it and add to the cache
                if (page == null) {
                    page = new BMPage(data, page_number, page_size);
                    // Add this page to the map
                    page.hash_next = page_map[p];
                    page_map[p] = page;
                } else {
                    // Move this page to the head if it's not already at the head.
                    if (prev_page != null) {
                        prev_page.hash_next = page.hash_next;
                        page.hash_next = page_map[p];
                        page_map[p] = page;
                    }
                }

                lock (page) {
                    // If page not in use then it must be newly setup, so add a
                    // reference.
                    if (page.NotInUse) {
                        page.Reset();
                        new_page = true;
                        page.ReferenceAdd();
                    }
                    // Add a reference for this fetch
                    page.ReferenceAdd();
                }

                // If the page is new,
            if (new_page) {
                PageCreated(page);
            } else {
                PageAccessed(page);
            }
            }

            // Return the page.
            return page;
        }
Exemplo n.º 14
0
 /// <summary>
 /// The calculation for finding the <i>weight</i> of a page in the cache.
 /// </summary>
 /// <param name="page"></param>
 /// <remarks>
 /// A heavier page is sorted lower and is therefore cleared from the 
 /// cache faster.
 /// </remarks>
 /// <returns></returns>
 private float pageEnumValue(BMPage page)
 {
     // We fix the access counter so it can not exceed 10000 accesses.  I'm
     // a little unsure if we should WriteByte this constant in the equation but it
     // ensures that some old but highly accessed page will not stay in the
     // cache forever.
     long bounded_page_count = System.Math.Min(page.access_count, 10000);
     float v = (1f / bounded_page_count) * (lbm.current_T - page.t);
     return v;
 }