Beispiel #1
0
        public static long Render(AggregateStructure Data, RecordWriter Output, FNodeSet Fields)
        {

            if (Data._Headers.Count == 0)
            {
                Data._Cache.WriteToFinal(Output, Fields);
                return (long)Data._Cache.Count;
            }

            long writes = 0;
            foreach (Header h in Data._Headers)
            {

                KeyValueSet kvs = KeyValueSet.Open(h, Data._keys, Data._aggregates);
                writes += (long)kvs.Count;
                kvs.WriteToFinal(Output, Fields);

            }

            return writes;

        }
Beispiel #2
0
        public static Table Render(AggregateStructure Data, string Dir, string Name, int MaxRecordCount, FNodeSet Fields)
        {

            Table t = new Table(Dir, Name, Fields.Columns, MaxRecordCount);

            if (Data._Headers.Count == 0)
            {
                RecordSet rs = Data._Cache.ToFinal(Fields);
                t.Union(rs);
                return t;
            }

            for (int i = 0; i < Data._Headers.Count; i++)
            {

                KeyValueSet kvs = KeyValueSet.Open(Data._Headers[i], Data._keys, Data._aggregates);
                RecordSet rs = kvs.ToFinal(Fields);
                t.Union(rs);

            }

            return t;

        }
Beispiel #3
0
        public static RecordSet Render(AggregateStructure Data, FNodeSet Fields)
        {

            if (Data._Headers.Count != 0)
                throw new Exception("AggregateStructure cannot render into a RecordSet unless there is only one grouping structure");

            return Data._Cache.ToFinal(Fields);

        }
Beispiel #4
0
        public static void Consolidate(AggregateStructure Data)
        {

            // If the header cache is empty, then just return the rendered group by set //
            if (Data.Headers.Count == 0)
                return;

            // Otherwise, we need to union all the headers //
            for (int i = 0; i < Data._Headers.Count - 1; i++)
            {

                // Open the first data set //
                KeyValueSet gbs1 = KeyValueSet.Open(Data._Headers[i], Data._keys, Data._aggregates);

                for (int j = i + 1; j < Data._Headers.Count; j++)
                {

                    // Open the second set //
                    KeyValueSet gbs2 = KeyValueSet.Open(Data._Headers[j], Data._keys, Data._aggregates);
                    
                    // Merge the two //
                    KeyValueSet.Union(gbs1, gbs2);
                    
                    // Save the second set in case there were deletes //
                    KeyValueSet.Save(Data._Headers[j], gbs2);

                }

                // Save the first set //
                KeyValueSet.Save(Data._Headers[i], gbs1);

            }


        }
Beispiel #5
0
        // Statics //
        /// <summary>
        /// Merges two aggregate structures
        /// </summary>
        /// <param name="WorkStruct">The structure to be merged into another</param>
        /// <param name="MergeIntoStruc">The structure to be appended</param>
        public static void Merge(AggregateStructure WorkStruct, AggregateStructure MergeIntoStruc)
        {

            // If the merge into structure is all memory based and the work struct is all memory based, try to merge their caches //
            if (MergeIntoStruc._Headers.Count == 0 && WorkStruct._Headers.Count == 0)
            {

                // Combine both cache's //
                KeyValueSet.Union(MergeIntoStruc._Cache, WorkStruct._Cache);
            
                // Check to see if the work structure has any data left //
                if (WorkStruct._Cache.Count == 0)
                    return;

                // Otherwise, we have to 'save' both caches now //
                Header h_mis = KeyValueSet.Save(MergeIntoStruc._TempDir, MergeIntoStruc._Cache);
                Header h_ws = KeyValueSet.Save(WorkStruct._TempDir, WorkStruct._Cache);
                MergeIntoStruc._Headers.Add(h_mis);
                MergeIntoStruc._Headers.Add(h_ws);

            }
            // If the merge into structure has no headers, but the work data does, save the merge into data then add the headers over //
            else if (MergeIntoStruc._Headers.Count == 0 && WorkStruct._Headers.Count != 0)
            {

                Header h_mis = KeyValueSet.Save(MergeIntoStruc._TempDir, MergeIntoStruc._Cache);
                MergeIntoStruc._Headers.Add(h_mis);
                MergeIntoStruc._Headers.AddRange(WorkStruct._Headers);

            }
            // If the merge into structure has headers, but the work doesnt, save the work and add to the merge into collection //
            else if (MergeIntoStruc._Headers.Count != 0 && WorkStruct._Headers.Count == 0)
            {

                Header h_ws = KeyValueSet.Save(WorkStruct._TempDir, WorkStruct._Cache);
                MergeIntoStruc._Headers.Add(h_ws);

            }
            // Otherwise, they both have headers... so just add all the headers into the into structure //
            else
            {
                MergeIntoStruc._Headers.AddRange(WorkStruct._Headers);
            }

        }
Beispiel #6
0
        /*
        private void ExecuteHashTable()
        {

            // Build a reader //
            RecordReader BaseReader = this._source.OpenReader(this._filter);

            // Build the aggregate compiler //
            AggregateStructure group_by = new AggregateStructure(this._sink, this._keys, this._aggregates);
            this._reads = 0;
            this._writes = 0;

            // Load the aggregator //
            while (!BaseReader.EndOfData)
            {
                Record rec = BaseReader.ReadNext();
                this._basememory.Assign(rec);
                group_by.Insert(rec);
                this._reads++;
            }

            // Get the record reader //
            RecordReader aggregate_reader = group_by.Render().OpenReader();
            DataSet rs = group_by.Render();

            // Write the data to the output //
            while (!aggregate_reader.EndOfData)
            {

                // Assign //
                this._returnmemory.Assign(aggregate_reader.ReadNext());

                // Increment the ticks //
                this._writes++;

                // Add the record //
                this._writer.Insert(this._returnset.Evaluate());

            }


        }
        */

        private void ExecuteHashTable()
        {

            // Build a reader //
            RecordReader BaseReader = this._source.OpenReader(this._filter);

            // Build the aggregate compiler //
            AggregateStructure group_by = new AggregateStructure(this._sink, this._keys, this._aggregates);
            this._reads = 0;
            this._writes = 0;

            // Load the aggregator //
            while (!BaseReader.EndOfData)
            {
                Record rec = BaseReader.ReadNext();
                this._basememory.Assign(rec);
                group_by.Insert();
                this._reads++;
            }

            // Close the structure //
            group_by.Close();

            // Consolidate //
            AggregateStructure.Consolidate(group_by);

            // Write the data //
            this._writes = AggregateStructure.Render(group_by, this._writer, this._returnset);

            // Drop the tables //
            DataSetManager.DropRecordSet(group_by.Headers);

        }