Ejemplo n.º 1
0
        private void calculateMemoryAllocations()
        {
            foreach (EssesFileCacheContext fileCache in this.fileCaches.Values)
            {
                List <long> weights, demands, rates;
                long        epsilon = 0;
                weights = new List <long>();
                demands = new List <long>();
                rates   = new List <long>();

                foreach (Flow flow in fileCache.flows)
                {
                    EssesFlowCacheContext flowCacheContext = (EssesFlowCacheContext)flow.Context;
                    weights.Add(1);
                    rates.Add(0);
                    //XXXIS: Converting from ulong to long: NOT okay, but will have to do for now...
                    //demands.Add((long)((EssesFlowCacheContext)flow.Context).flowDemand);
                }
                WeightedMinMax(weights, demands, rates, (long)fileCache.fileCacheSize, epsilon);

                int i = 0;
                foreach (Flow flow in fileCache.flows)
                {
                    string responseString         = "";
                    EssesFlowCacheContext context = (EssesFlowCacheContext)flow.Context;
                    if (rates[i] != (long)context.cacheSizeAllocated)
                    {
                        //Response string example; commands to cache module can be any number of the commands below in the same message (in any order)
                        responseString            += "changeCacheSize=" + rates[i].ToString() + " ";
                        context.cacheSizeAllocated = (UInt64)rates[i];  //update our own state of the new flow cache size
                    }

                    //ADD WHATEVER OTHER PARAMETERS YOU WANT TO CHANGE THE CLIENT CACHE HERE
                    flow.IoFlowUpdateParams = responseString; //XXXIS: parameters to update flow get put in responseString
                    i++;
                }
            }
        }
Ejemplo n.º 2
0
        public OktofsPolicyEsses(string configFileName, string topConfigName, int slavePort)
        {
            shutdownEvent = new ManualResetEvent(false);
            //
            // initialize rate controller
            // parsing the config file defines VM placement and VM ordering within a traffic matrix
            //
            Random random = new Random();
            uint TenantId = (uint)random.Next(1, int.MaxValue); // min P of collision at slaves.

            rateController = new OktofsRateController(this, TenantId, slavePort);

            InitBandwidthWeights(BWWeightsConfigFile); //initialize the bandwidth weights from the appropriate config file

            string[] validInputRecords = new string[] { "D-VM-FILE-VOL", "CD-VM-FILE-VOL", "CH-VM-FILE-VOL", "C-VM-SHARE-VOL", "H-VM-SHARE-VOL", "CH-VM-SHARE-VOL" };
            listFlows = rateController.InitListFlows(configFileName, validInputRecords);

            fileCaches = new Dictionary<string, EssesFileCacheContext>();

            //Initialize some controller-side context about each flow's cache and the file caches they belong to based on the input tokens stored when flows were created
            foreach (Flow flow in listFlows)
            {
                UInt64 fileCacheSize = Convert.ToUInt64(flow.InputTokens[9]);
                string flowTenantID = flow.InputTokens[12];

                if (!this.fileCaches.ContainsKey(flowTenantID))
                {
                    this.fileCaches.Add(flowTenantID, new EssesFileCacheContext(fileCacheSize));
                }


                this.fileCaches[flowTenantID].flows.Add(flow); //Add this flow to the context for this file cache

                UInt64 flowCacheSizeAllocated = Convert.ToUInt64(flow.InputTokens[5]);
                CacheWritePolicy writePolicy = 0;
                CacheWriteBuffer cacheWrites = 0;
                switch (flow.InputTokens[7])
                {
                    case "write-through":
                        writePolicy = CacheWritePolicy.WriteThrough;
                        break;
                }
                switch (flow.InputTokens[8])
                {
                    case "noCacheWrites":
                        cacheWrites = CacheWriteBuffer.noCache;
                        break;
                    case "CacheWrites":
                        cacheWrites = CacheWriteBuffer.Cache;
                        break;
                }
                EssesFlowCacheContext flowContext = new EssesFlowCacheContext(flow, flowCacheSizeAllocated, writePolicy, cacheWrites, flow.InputTokens[11], flowTenantID);
                flow.Context = flowContext;

                //flowContext.minFlowGuarantee = Convert.ToUInt64(flow.InputTokens[10]);

                flowContext.guaranteedE2EBW = Convert.ToUInt64(flow.InputTokens[13]);

            }

            //
            // ask Rate Controller to create the queues on the remote servers. 
            //
            rateController.InstallFlows();

            //
            // At this point the Rate Controller is connected to the rate slaves and the
            // minifilter drivers are configured with queues. The minifilters will not enforce
            // rate limits until we install the RAPs, which is done in the Start() routine. 
            //
        }
Ejemplo n.º 3
0
        private void processFlowCacheStats(Flow flow, string statsString)
        {
            //Stats strings look like: "cacheSizeAllocated={0} cacheSizeUsed={1} cacheAccessesTotal={2} flowBytesAccessed={3} cacheAccessesHits={4} cacheEvictions={5} {6}"

            EssesFlowCacheContext fCC = (EssesFlowCacheContext)flow.Context;

            string[] statsStringSeparators = new string[] { " ", "\t" };
            char[]   tokenSeparator        = new char[] { '=' };
            string[] statsTokens           = statsString.Split(statsStringSeparators, StringSplitOptions.RemoveEmptyEntries);
            Debug.Assert(statsTokens.Length > 0);

            //Extract the stats from the string sent by the client slave
            fCC.cacheSizeAllocated = Convert.ToUInt64(statsTokens[0].Split(tokenSeparator)[1]);
            fCC.cacheSizeUsed      = Convert.ToUInt64(statsTokens[1].Split(tokenSeparator)[1]);

            UInt64 cacheAccessesNow, flowBytesAccessedNow, cacheAccessesHitsNow, cacheEvictionsNow;

            cacheAccessesNow     = Convert.ToUInt64(statsTokens[2].Split(tokenSeparator)[1]);
            flowBytesAccessedNow = Convert.ToUInt64(statsTokens[3].Split(tokenSeparator)[1]);
            cacheAccessesHitsNow = Convert.ToUInt64(statsTokens[4].Split(tokenSeparator)[1]);
            cacheEvictionsNow    = Convert.ToUInt64(statsTokens[5].Split(tokenSeparator)[1]);

            Debug.Assert(cacheAccessesNow >= fCC.cacheAccessesTotal);
            Debug.Assert(flowBytesAccessedNow >= fCC.flowBytesAccessedTotal);
            Debug.Assert(cacheAccessesHitsNow >= fCC.cacheAccessesHitsTotal);

            if (cacheAllocControlCounter == 0)
            {
                fCC.cacheAccessesLastAllocInterval     = 0;
                fCC.flowBytesAccessedLasAllocInterval  = 0;
                fCC.cacheAccessesHitsLastAllocInterval = 0;
                fCC.cacheEvictionsLastAllocInterval    = 0;
            }
            else if (cacheAllocControlCounter < cacheAllocControlFreq)
            {
                fCC.cacheAccessesLastAllocInterval     += (cacheAccessesNow - fCC.cacheAccessesTotal);
                fCC.flowBytesAccessedLasAllocInterval  += (flowBytesAccessedNow - fCC.flowBytesAccessedTotal);
                fCC.cacheAccessesHitsLastAllocInterval += (cacheAccessesHitsNow - fCC.cacheAccessesHitsTotal);
                fCC.cacheEvictionsLastAllocInterval    += (cacheEvictionsNow - fCC.cacheEvictionsTotal);
            }

            fCC.flowBytesAccessedLastSampleInterval = flowBytesAccessedNow - fCC.flowBytesAccessedTotal;

            fCC.cacheAccessesTotal     = cacheAccessesNow;
            fCC.flowBytesAccessedTotal = flowBytesAccessedNow;
            fCC.cacheAccessesHitsTotal = cacheAccessesHitsNow;
            fCC.cacheEvictionsTotal    = cacheEvictionsNow;

            fCC.hitRate = (fCC.cacheAccessesTotal > 0 ? (float)fCC.cacheAccessesHitsTotal / (float)fCC.cacheAccessesTotal : 0.0F);

            if (statsTokens.Length > 6) //cache demand string is included
            {
                string[] cacheDemandPairs = statsTokens[6].Split(';');
                Double[] xVals            = new Double[cacheDemandPairs.Length];
                Double[] yVals            = new Double[cacheDemandPairs.Length];
                for (int i = 0; i < cacheDemandPairs.Length; i++)
                {
                    xVals[i] = Convert.ToDouble(cacheDemandPairs[i].Split(',')[0]);
                    yVals[i] = Convert.ToDouble(cacheDemandPairs[i].Split(',')[1]);
                }

                if (!fCC.useSerializedCurve) //only save curve if we don't have a curve at the moment
                {
                    fCC.cacheDemandCurvePoints = new CacheCurvePoints(xVals, yVals);
                    //fCC.cacheDemandFunc = Interpolate.LinearBetweenPoints(xVals, yVals);
                    //fCC.cacheDemandFunc = Interpolate.Linear(xVals, yVals);
                    fCC.cacheDemandFunc = Interpolate.PolynomialEquidistant(xVals, yVals);
                }

#if COLLECT_CACHE_CURVE
                //////////XXXIS: quick test to see if we got the function fitted properly (since graphing's a pain...)

                //XXXIS change fileCacheSize accordingly to print the curve on the screen, IN ADDITION to it being collected when you Ctrl+C the
                //controller at the end of the run

                UInt64 fileCacheSize  = 10737418240;
                int    numCurvePoints = 500;
                float  stepSize       = 1.0F / (float)numCurvePoints;
                Console.WriteLine("Flow ID {0}", flow.FlowId);

                for (float frac = 0.0F; frac <= (1.0F + stepSize); frac += stepSize)
                {
                    UInt32 ghostCacheBlockSize = 4096;
                    //Compute and block-align the cache sizes; XXXIS: THIS ASSUMES ghost cache block size is a power of two!!!
                    UInt64 specCacheSize = ((UInt64)(frac * (float)fileCacheSize)) & (~((UInt64)(ghostCacheBlockSize - 1)));
                    Console.WriteLine("{0}, {1}", specCacheSize, fCC.cacheDemandFunc.Interpolate((double)specCacheSize));
                    //rateController.Log(String.Format("{0}, {1}", specCacheSize, fCC.cacheDemandFunc.Interpolate((double)specCacheSize)));
                }
#endif
            }

            //Console.WriteLine("Flow {0}, SizeAllocated {1}, SizeUsed {2}, flowBytesAccessedTotal {3}, AccessesTotal {4}, AccessesHits {5}, Evictions {6}, HitRate {7}",
            //    flow.FlowId, fCC.cacheSizeAllocated, fCC.cacheSizeUsed, fCC.flowBytesAccessedTotal, fCC.cacheAccessesTotal,
            //    fCC.cacheAccessesHitsTotal, fCC.cacheEvictionsTotal, fCC.hitRate);
        }
Ejemplo n.º 4
0
        private void calculateMemoryAllocations()
        {
            foreach (EssesFileCacheContext fileCache in this.fileCaches.Values)
            {
                List <IInterpolation> utilities = new List <IInterpolation>();
                List <UInt64>         minNecessaryAllocations = new List <UInt64>();
                List <UInt64>         finalAllocations        = new List <UInt64>();

                UInt64 epsilon = 1048576; //Water-filling constant ; XXIS: PICK A GOOD VALUE FOR THIS

                List <uint> flowsInAllocation = new List <uint>();
                foreach (Flow flow in fileCache.flows)
                {
                    EssesFlowCacheContext flowCacheContext = (EssesFlowCacheContext)flow.Context;

                    UInt64 flowBWSLA = flowCacheContext.guaranteedE2EBW;
                    double numerator = (double)(flowBWSLA - this.remoteStorageBW);
                    double deminator = (double)(this.localCacheBW - this.remoteStorageBW);

                    double necessaryHitRate = numerator / deminator;

                    Double[] newYVals = new Double[flowCacheContext.cacheDemandCurvePoints.yVals.Length];

                    for (int j = 0; j < flowCacheContext.cacheDemandCurvePoints.yVals.Length; j++)
                    {
                        newYVals[j] = necessaryHitRate - flowCacheContext.cacheDemandCurvePoints.yVals[j];
                    }

                    Func <double, double> rootFunc = Fit.PolynomialFunc(flowCacheContext.cacheDemandCurvePoints.xVals, newYVals, 10);


                    //double reqCacheSize = MathNet.Numerics.RootFinding.Bisection.FindRoot(rootFunc, 0, (double)fileCache.fileCacheSize);
                    double reqCacheSize; // = MathNet.Numerics.RootFinding.RobustNewtonRaphson.FindRoot(rootFunc, Differentiate.FirstDerivativeFunc(rootFunc), 0, (double)fileCache.fileCacheSize, 1e-3);

                    if (MathNet.Numerics.RootFinding.RobustNewtonRaphson.TryFindRoot(rootFunc, Differentiate.FirstDerivativeFunc(rootFunc), 0, (double)fileCache.fileCacheSize, 1e-3, 100, 20, out reqCacheSize))
                    {
                    }

                    utilities.Add(flowCacheContext.cacheDemandFunc);
                    minNecessaryAllocations.Add((UInt64)reqCacheSize);
                    flowsInAllocation.Add(flow.FlowId);
                }


                UtilityMaximizationAllocation(fileCache.fileCacheSize, utilities, minNecessaryAllocations, epsilon, finalAllocations);

                int i = 0; //counter for flows that were allocated cache this iteration (with flowIDs in flowsInAllocation)
                foreach (Flow flow in fileCache.flows)
                {
                    string responseString         = "";
                    EssesFlowCacheContext context = (EssesFlowCacheContext)flow.Context;

                    if (flowsInAllocation.Contains(flow.FlowId))
                    {
                        if (finalAllocations[i] != context.cacheSizeAllocated)
                        {
                            //Response string example; commands to cache module can be any number of the commands below in the same message (in any order)
                            responseString            += "changeCacheSize=" + finalAllocations[i].ToString() + " ";
                            context.cacheSizeAllocated = finalAllocations[i]; //update our own state of the new flow cache size

                            //if we changed the cache size, reset the counters to compute hit rate (client is doing so too)
                            context.cacheAccessesTotal     = 0;
                            context.cacheAccessesHitsTotal = 0;
                        }
                        i++; //only increment when we see a flow we allocated cache to
                    }
                    else //flow is idle; not included in cache allocation
                    {
                        if (context.cacheSizeAllocated != 0)
                        {
                            responseString            += "changeCacheSize=0";
                            context.cacheSizeAllocated = 0; //update our own state of the new flow cache size

                            //if we changed the cache size, reset the counters to compute hit rate (client is doing so too)
                            context.cacheAccessesTotal     = 0;
                            context.cacheAccessesHitsTotal = 0;
                        }
                    }

                    //ADD WHATEVER OTHER PARAMETERS YOU WANT TO CHANGE THE CLIENT CACHE HERE
                    flow.IoFlowUpdateParams = responseString; //XXXIS: parameters to update flow get put in responseString
                }
            }
        }
Ejemplo n.º 5
0
        private void calculateMemoryAllocations()
        {
            foreach (EssesFileCacheContext fileCache in this.fileCaches.Values)
            {
                List <IInterpolation> utilities        = new List <IInterpolation>();
                List <UInt64>         minGuarantees    = new List <UInt64>();
                List <UInt64>         finalAllocations = new List <UInt64>();

                UInt64 epsilon = 1048576; //Water-filling constant ; XXIS: PICK A GOOD VALUE FOR THIS

                List <uint> flowsInAllocation = new List <uint>();
                foreach (Flow flow in fileCache.flows)
                {
                    EssesFlowCacheContext flowCacheContext = (EssesFlowCacheContext)flow.Context;

                    if (flowCacheContext.cacheAccessesLastAllocInterval != 0)
                    {
                        utilities.Add(flowCacheContext.cacheDemandFunc);
                        minGuarantees.Add(flowCacheContext.minFlowGuarantee);
                        flowsInAllocation.Add(flow.FlowId);
                    }
                }



                UtilityMaximizationAllocation(fileCache.fileCacheSize, utilities, minGuarantees, epsilon, finalAllocations);

                int i = 0; //counter for flows that were allocated cache this iteration (with flowIDs in flowsInAllocation)
                foreach (Flow flow in fileCache.flows)
                {
                    string responseString         = "";
                    EssesFlowCacheContext context = (EssesFlowCacheContext)flow.Context;

                    if (flowsInAllocation.Contains(flow.FlowId))
                    {
                        if (finalAllocations[i] != context.cacheSizeAllocated)
                        {
                            //Response string example; commands to cache module can be any number of the commands below in the same message (in any order)
                            responseString            += "changeCacheSize=" + finalAllocations[i].ToString() + " ";
                            context.cacheSizeAllocated = finalAllocations[i]; //update our own state of the new flow cache size

                            //if we changed the cache size, reset the counters to compute hit rate (client is doing so too)
                            context.cacheAccessesTotal     = 0;
                            context.cacheAccessesHitsTotal = 0;
                        }
                        i++; //only increment when we see a flow we allocated cache to
                    }
                    else //flow is idle; not included in cache allocation
                    {
                        if (context.cacheSizeAllocated != 0)
                        {
                            responseString            += "changeCacheSize=0";
                            context.cacheSizeAllocated = 0; //update our own state of the new flow cache size

                            //if we changed the cache size, reset the counters to compute hit rate (client is doing so too)
                            context.cacheAccessesTotal     = 0;
                            context.cacheAccessesHitsTotal = 0;
                        }
                    }

                    //ADD WHATEVER OTHER PARAMETERS YOU WANT TO CHANGE THE CLIENT CACHE HERE
                    flow.IoFlowUpdateParams = responseString; //XXXIS: parameters to update flow get put in responseString
                }
            }
        }
Ejemplo n.º 6
0
        public override void CallbackControlInterval()
        {
            rateController.GetStatsFromServers(statsDelta, true);

            foreach (Flow flow in listFlows)
            {
                if (flow.RapD == null)
                {
                    continue;
                }
                Console.WriteLine("Flow {0} RapD.IoFlowStats {1}", flow.FlowId, flow.RapD.IoFlowStats);
                processFlowCacheStats(flow, flow.RapD.IoFlowStats);

                //flow.IoFlowUpdateParams = flow.RapD.IoFlowStats; //XXXIS: do we need this right now?
            }


            UInt64 totalNonIdleCacheSpaceLastInterval = 0;
            UInt64 totalBytesAccessedLastInterval     = 0;

            string logMessage = "";

            for (int i = 0; i < listFlows.Count; i++)
            {
                Flow flow = listFlows[i];
                EssesFlowCacheContext fCC = (EssesFlowCacheContext)flow.Context;

                if (fCC.flowBytesAccessedLastSampleInterval != 0)
                {
                    totalNonIdleCacheSpaceLastInterval += fCC.cacheSizeAllocated;
                    totalBytesAccessedLastInterval     += fCC.flowBytesAccessedLastSampleInterval;
                }
            }
            logMessage += totalNonIdleCacheSpaceLastInterval + "," + totalBytesAccessedLastInterval;

            rateController.Log(logMessage);

            //string logMessage = "";
            //for (int i = 0; i < listFlows.Count; i++)
            //{
            //    Flow flow = listFlows[i];
            //    EssesFlowCacheContext fCC = (EssesFlowCacheContext)flow.Context;
            //    logMessage += fCC.cacheSizeAllocated + "," + fCC.flowBytesAccessedLastSampleInterval;
            //    if (i != listFlows.Count - 1)
            //    {
            //        logMessage += ",";
            //    }
            //}
            //rateController.Log(logMessage);



#if CALCULATE_ALLOCATION
            this.cacheAllocControlCounter++;
            if (this.cacheAllocControlCounter == this.cacheAllocControlFreq)
            {
                //XXXIS: SOPHISTICATED MEMORY ALLOCATION CONTROL ALGORITHM GOES HERE
                calculateMemoryAllocations();
                rateController.UpdateRateLimits();
                this.cacheAllocControlCounter = 0;
            }
#endif
        }
Ejemplo n.º 7
0
        public OktofsPolicyEsses(string configFileName, string topConfigName, int slavePort)
        {
            shutdownEvent = new ManualResetEvent(false);
            //
            // initialize rate controller
            // parsing the config file defines VM placement and VM ordering within a traffic matrix
            //
            Random random   = new Random();
            uint   TenantId = (uint)random.Next(1, int.MaxValue); // min P of collision at slaves.

            rateController = new OktofsRateController(this, TenantId, slavePort);

            InitBandwidthWeights(BWWeightsConfigFile); //initialize the bandwidth weights from the appropriate config file

            string[] validInputRecords = new string[] { "D-VM-FILE-VOL", "CD-VM-FILE-VOL", "CH-VM-FILE-VOL", "C-VM-SHARE-VOL", "H-VM-SHARE-VOL", "CH-VM-SHARE-VOL" };
            listFlows = rateController.InitListFlows(configFileName, validInputRecords);

            fileCaches = new Dictionary <string, EssesFileCacheContext>();

            //Initialize some controller-side context about each flow's cache and the file caches they belong to based on the input tokens stored when flows were created
            foreach (Flow flow in listFlows)
            {
                UInt64 fileCacheSize = Convert.ToUInt64(flow.InputTokens[9]);
                string flowTenantID  = flow.InputTokens[12];

                if (!this.fileCaches.ContainsKey(flowTenantID))
                {
                    this.fileCaches.Add(flowTenantID, new EssesFileCacheContext(fileCacheSize));
                }


                this.fileCaches[flowTenantID].flows.Add(flow); //Add this flow to the context for this file cache

                UInt64           flowCacheSizeAllocated = Convert.ToUInt64(flow.InputTokens[5]);
                CacheWritePolicy writePolicy            = 0;
                CacheWriteBuffer cacheWrites            = 0;
                switch (flow.InputTokens[7])
                {
                case "write-through":
                    writePolicy = CacheWritePolicy.WriteThrough;
                    break;
                }
                switch (flow.InputTokens[8])
                {
                case "noCacheWrites":
                    cacheWrites = CacheWriteBuffer.noCache;
                    break;

                case "CacheWrites":
                    cacheWrites = CacheWriteBuffer.Cache;
                    break;
                }
                EssesFlowCacheContext flowContext = new EssesFlowCacheContext(flow, flowCacheSizeAllocated, writePolicy, cacheWrites, flow.InputTokens[11], flowTenantID);
                flow.Context = flowContext;

                //flowContext.minFlowGuarantee = Convert.ToUInt64(flow.InputTokens[10]);

                flowContext.guaranteedE2EBW = Convert.ToUInt64(flow.InputTokens[13]);
            }

            //
            // ask Rate Controller to create the queues on the remote servers.
            //
            rateController.InstallFlows();

            //
            // At this point the Rate Controller is connected to the rate slaves and the
            // minifilter drivers are configured with queues. The minifilters will not enforce
            // rate limits until we install the RAPs, which is done in the Start() routine.
            //
        }