private static int[] CalculateElementsFrequencies <T>(IList <T> elements, BitmapWrapper bitmapWrapper) { int frequencesSize = ProjectConstants.BlockSize * ProjectConstants.GridSize; var elementsFrequencies = new int[frequencesSize]; using (var cuda = new CUDA(0, true)) { var path = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), "apriori_count1.cubin"); cuda.LoadModule(path); var inputData = cuda.CopyHostToDevice(bitmapWrapper.RgbValues); var inputSetData = cuda.CopyHostToDevice(elements.ToArray()); var answer = cuda.Allocate(new int[frequencesSize]); CallTheFrequencyCount(cuda, inputData, inputSetData, answer, bitmapWrapper, elements.Count); cuda.CopyDeviceToHost(answer, elementsFrequencies); cuda.Free(inputData); cuda.Free(answer); cuda.UnloadModule(); } return(elementsFrequencies); }
// It's interesting to change the number of blocks and the number of threads to // understand how to keep the hardware busy. // // Here are some numbers I get on my G80: // blocks - clocks // 1 - 3096 // 8 - 3232 // 16 - 3364 // 32 - 4615 // 64 - 9981 // // With less than 16 blocks some of the multiprocessors of the device are idle. With // more than 16 you are using all the multiprocessors, but there's only one block per // multiprocessor and that doesn't allow you to hide the latency of the memory. With // more than 32 the speed scales linearly. static void Main(string[] args) { // Init CUDA, select 1st device. CUDA cuda = new CUDA(0, true); // load module cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "clock_kernel.cubin")); CUfunction func = cuda.GetModuleFunction("timedReduction"); int[] timer = new int[NUM_BLOCKS * 2]; float[] input = new float[NUM_THREADS * 2]; for (int i = 0; i < NUM_THREADS * 2; i++) { input[i] = (float)i; } CUdeviceptr dinput = cuda.CopyHostToDevice <float>(input); CUdeviceptr doutput = cuda.Allocate((uint)(sizeof(float) * NUM_BLOCKS)); CUdeviceptr dtimer = cuda.Allocate <int>(timer); cuda.SetParameter(func, 0, (uint)dinput.Pointer); cuda.SetParameter(func, IntPtr.Size, (uint)doutput.Pointer); cuda.SetParameter(func, IntPtr.Size * 2, (uint)dtimer.Pointer); cuda.SetParameterSize(func, (uint)(IntPtr.Size * 3)); //timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS>>>(dinput, doutput, dtimer); cuda.SetFunctionBlockShape(func, NUM_THREADS, 1, 1); cuda.SetFunctionSharedSize(func, (uint)(sizeof(float) * 2 * NUM_THREADS)); cuda.Launch(func, NUM_BLOCKS, 1); cuda.CopyDeviceToHost <int>(dtimer, timer); cuda.Free(dinput); cuda.Free(doutput); cuda.Free(dtimer); foreach (int i in timer) { Console.WriteLine(i); } Console.WriteLine("Test PASSED"); int minStart = timer[0]; int maxEnd = timer[NUM_BLOCKS]; for (int i = 1; i < NUM_BLOCKS; i++) { minStart = timer[i] < minStart ? timer[i] : minStart; maxEnd = timer[NUM_BLOCKS + i] > maxEnd ? timer[NUM_BLOCKS + i] : maxEnd; } Console.WriteLine("time = {0}", maxEnd - minStart); }
// // A sorting network is a sorting algorith, where the sequence of comparisons // is not data-dependent. That makes them suitable for parallel implementations. // // Bitonic sort is one of the fastest sorting networks, consisting of o(n log^2 n) // comparators. It has a simple implemention and it's very efficient when sorting // a small number of elements: // // http://citeseer.ist.psu.edu/blelloch98experimental.html // // This implementation is based on: // // http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm // static void Main(string[] args) { const int NUM = 256; // Init CUDA, select 1st device. CUDA cuda = new CUDA(0, true); // create values int[] values = new int[NUM]; Random rand = new Random(); for (int i = 0; i < NUM; i++) { values[i] = rand.Next(); } // allocate memory and copy to device CUdeviceptr dvalues = cuda.CopyHostToDevice <int>(values); // load module cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "bitonic.cubin")); CUfunction func = cuda.GetModuleFunction("bitonicSort"); cuda.SetParameter(func, 0, (uint)dvalues.Pointer); cuda.SetParameterSize(func, (uint)IntPtr.Size); //bitonicSort<<<1, NUM, sizeof(int) * NUM>>>(dvalues); cuda.SetFunctionBlockShape(func, NUM, 1, 1); cuda.SetFunctionSharedSize(func, sizeof(int) * NUM); cuda.Launch(func, 1, 1); cuda.CopyDeviceToHost <int>(dvalues, values); cuda.Free(dvalues); bool passed = true; for (int i = 1; i < NUM; i++) { if (values[i - 1] > values[i]) { passed = false; break; } } Console.WriteLine("Test {0}", passed ? "PASSED" : "FAILED"); }
private static int[] CalculateCandidatesFrequencies <T>(IList <List <int> > candidates, BitmapWrapper bitmapWrapper, double borderValue) { using (var cuda = new CUDA(0, true)) { var elementsFrequencies = new int[candidates.Count]; var inputSets = new int[candidates.Count * candidates[0].Count]; var ind = 0; foreach (var candidate in candidates) { foreach (var i in candidate) { inputSets[ind] = i; ind++; } } var path = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), Names.CudaAprioriCountModule); cuda.LoadModule(path); var inputData = cuda.CopyHostToDevice(bitmapWrapper.RgbValues); var inputSetData = cuda.CopyHostToDevice(inputSets); var frequenciesOnHost = cuda.Allocate(new int[candidates.Count]); var sw = new Stopwatch(); sw.Start(); CallTheSetsFrequenciesCount(cuda, inputData, inputSetData, frequenciesOnHost, bitmapWrapper, candidates[0].Count, candidates.Count, borderValue); sw.Stop(); //Console.WriteLine("CalculateCandidatesFrequencies: {0} ms", sw.ElapsedMilliseconds); cuda.CopyDeviceToHost(frequenciesOnHost, elementsFrequencies); cuda.Free(inputData); cuda.Free(inputSetData); cuda.Free(frequenciesOnHost); cuda.UnloadModule(); return(elementsFrequencies); } }
private int[] CalculateElementsFrequencies(BitmapWrapper bitmapWrapper) { var elementsFrequencies = new int[bitmapWrapper.Width]; using (var cuda = new CUDA(0, true)) { var path = Path.Combine(Path.GetDirectoryName(Assembly.GetExecutingAssembly().Location), Names.CudaAprioriCountModule); cuda.LoadModule(path); var inputData = cuda.CopyHostToDevice(bitmapWrapper.RgbValues); var answer = cuda.Allocate(new int[bitmapWrapper.Width]); CallTheFrequencyCount(cuda, inputData, answer, bitmapWrapper); cuda.CopyDeviceToHost(answer, elementsFrequencies); cuda.Free(inputData); cuda.Free(answer); cuda.UnloadModule(); } return(elementsFrequencies); }
static void Main(string[] args) { // Init and select 1st device. CUDA cuda = new CUDA(0, true); // load module //cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "transpose_kernel.cubin")); cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "transpose_kernel.ptx")); CUfunction transpose = cuda.GetModuleFunction("transpose"); CUfunction transpose_naive = cuda.GetModuleFunction("transpose_naive"); const int size_x = 4096; const int size_y = 4096; const int mem_size = sizeof(float) * size_x * size_y; float[] h_idata = new float[size_x * size_y]; for (int i = 0; i < h_idata.Length; i++) { h_idata[i] = (float)i; } // allocate device memory // copy host memory to device CUdeviceptr d_idata = cuda.CopyHostToDevice<float>(h_idata); CUdeviceptr d_odata = cuda.Allocate<float>(h_idata); // setup execution parameters cuda.SetFunctionBlockShape(transpose_naive, BLOCK_DIM, BLOCK_DIM, 1); cuda.SetParameter(transpose_naive, 0, (uint)d_odata.Pointer); cuda.SetParameter(transpose_naive, IntPtr.Size, (uint)d_idata.Pointer); cuda.SetParameter(transpose_naive, IntPtr.Size * 2, (uint)size_x); cuda.SetParameter(transpose_naive, IntPtr.Size * 2 + 4, (uint)size_y); cuda.SetParameterSize(transpose_naive, (uint)(IntPtr.Size * 2 + 8)); cuda.SetFunctionBlockShape(transpose, BLOCK_DIM, BLOCK_DIM, 1); cuda.SetParameter(transpose, 0, (uint)d_odata.Pointer); cuda.SetParameter(transpose, IntPtr.Size, (uint)d_idata.Pointer); cuda.SetParameter(transpose, IntPtr.Size * 2, (uint)size_x); cuda.SetParameter(transpose, IntPtr.Size * 2 + 4, (uint)size_y); cuda.SetParameterSize(transpose, (uint)(IntPtr.Size * 2 + 8)); // warmup so we don't time CUDA startup cuda.Launch(transpose_naive, size_x / BLOCK_DIM, size_y / BLOCK_DIM); cuda.Launch(transpose, size_x / BLOCK_DIM, size_y / BLOCK_DIM); //System.Threading.Thread.Sleep(10); int numIterations = 100; Console.WriteLine("Transposing a {0} by {1} matrix of floats...", size_x, size_y); CUevent start = cuda.CreateEvent(); CUevent end = cuda.CreateEvent(); cuda.RecordEvent(start); for (int i = 0; i < numIterations; i++) { cuda.Launch(transpose_naive, size_x / BLOCK_DIM, size_y / BLOCK_DIM); } cuda.SynchronizeContext(); cuda.RecordEvent(end); cuda.SynchronizeContext(); float naiveTime = cuda.ElapsedTime(start, end); Console.WriteLine("Naive transpose average time: {0} ms\n", naiveTime / numIterations); cuda.RecordEvent(start); for (int i = 0; i < numIterations; i++) { cuda.Launch(transpose, size_x / BLOCK_DIM, size_y / BLOCK_DIM); } cuda.SynchronizeContext(); cuda.RecordEvent(end); cuda.SynchronizeContext(); float optimizedTime = cuda.ElapsedTime(start, end); Console.WriteLine("Optimized transpose average time: {0} ms\n", optimizedTime / numIterations); float[] h_odata = new float[size_x * size_y]; cuda.CopyDeviceToHost<float>(d_odata, h_odata); float[] reference = new float[size_x * size_y]; computeGold(reference, h_idata, size_x, size_y); bool res = CompareF(reference, h_odata, size_x * size_y); Console.WriteLine("Test {0}", res == true? "PASSED":"FAILED"); cuda.Free(d_idata); cuda.Free(d_odata); Console.ReadKey(); }
static private void Worker(object cState) { try { Command cCmd; CUDA cCUDA = new CUDA(true); for (int i = 0; i < 10; i++) { try { cCUDA.CreateContext(i); (new Logger()).WriteDebug2(i + ": success"); break; } catch (Exception ex) { (new Logger()).WriteDebug2(i + ": failed"); if (Logger.bDebug && Logger.Level.debug3 > Logger.eLevelMinimum) (new Logger()).WriteError(ex); } } uint nMemoryReservedForMerge = 2 * 1024 * 1024; //PREFERENCES типа <memory reserved="2097152" /> uint nMemoryStarvationThreshold = cCUDA.TotalMemory / 2; //PREFERENCES через проценты... типа <memory starvation="50%" /> uint nMemoryFree; string sModule = "CUDAFunctions_" + Preferences.nCUDAVersion + "_x" + (IntPtr.Size * 8); if (Logger.bDebug) (new Logger()).WriteDebug3(sModule); cCUDA.LoadModule((byte[])Properties.Resource.ResourceManager.GetObject(sModule)); // $(ProjectDir)Resources\CUDAFunctions.cubin //cCUDA.LoadModule(@"c:\projects\!helpers\video\PixelsMap\Resources\CUDAFunctions.cubin"); CUfunction cCUDAFuncMerge = cCUDA.GetModuleFunction("CUDAFrameMerge"); int nThreadsPerBlock = 256; //пришлось уменьшить с 512 до 256 сридов на блок, потому что при добавлении "движения" и операций с float, ловил ошибку: Too Many Resources Requested for Launch (This error means that the number of registers available on the multiprocessor is being exceeded. Reduce the number of threads per block to solve the problem) cCUDA.SetFunctionBlockShape(cCUDAFuncMerge, nThreadsPerBlock, 1, 1); CUDADriver.cuParamSetSize(cCUDAFuncMerge, 8); Dictionary<ulong, CUdeviceptr> ahDevicePointers = new Dictionary<ulong, CUdeviceptr>(); CUdeviceptr cPMs; CUdeviceptr cInfos; CUdeviceptr cAlphaMap; { //IntPtr[] aPointersByAlpha = new IntPtr[254]; //те самые поинтеры-альфы. Ссылаются на массивы поинтеров B, т.е. BackGrounds //IntPtr[] aPointersByBackground = new IntPtr[256]; // те самые массивы поинтеров B, т.е. BackGrounds byte[] aAlphaMap = new byte[16646144]; int nResult, nIndx = 0; for (byte nAlpha = 1; 255 > nAlpha; nAlpha++) { for (ushort nBackground = 0; 256 > nBackground; nBackground++) { for (ushort nForeground = 0; 256 > nForeground; nForeground++) { if (255 < (nResult = (int)((float)(nAlpha * (nForeground - nBackground)) / 255 + nBackground + 0.5))) nResult = 255; aAlphaMap[nIndx++] = (byte)nResult; } //aPointersByBackground[nBackground] = (IntPtr)cCUDA.CopyHostToDevice<byte>(aResults).Pointer; } //aPointersByAlpha[nAlpha - 1] = (IntPtr)cCUDA.CopyHostToDevice<IntPtr>(aPointersByBackground).Pointer; } cAlphaMap = cCUDA.CopyHostToDevice<byte>(aAlphaMap); } //{ // IntPtr[] aPointersByAlpha = new IntPtr[254]; //те самые поинтеры-альфы. Ссылаются на массивы поинтеров B, т.е. BackGrounds // IntPtr[] aPointersByBackground = new IntPtr[256]; // те самые массивы поинтеров B, т.е. BackGrounds // byte[] aResults = new byte[256]; // int nResult; // for (byte nAlpha = 1; 255 > nAlpha; nAlpha++) // { // for (ushort nBackground = 0; 256 > nBackground; nBackground++) // { // for (ushort nForeground = 0; 256 > nForeground; nForeground++) // { // if (255 < (nResult = (int)((float)(nAlpha * (nForeground - nBackground)) / 255 + nBackground + 0.5))) // nResult = 255; // aResults[nForeground] = (byte)nResult; // } // aPointersByBackground[nBackground] = (IntPtr)cCUDA.CopyHostToDevice<byte>(aResults).Pointer; // } // aPointersByAlpha[nAlpha - 1] = (IntPtr)cCUDA.CopyHostToDevice<IntPtr>(aPointersByBackground).Pointer; // } // cAlphaMap = cCUDA.CopyHostToDevice<IntPtr>(aPointersByAlpha); //} #if DEBUG Dictionary<ulong, DateTime> ahDebug = new Dictionary<ulong,DateTime>(); #endif DateTime dtNextTime = DateTime.MinValue, dtNow; long nStartTick; // logging while (true) { if (1 > _aqCommands.CountGet() && (dtNow = DateTime.Now) > dtNextTime) { dtNextTime = dtNow.AddSeconds(60); #if DEBUG dtNow = dtNow.Subtract(TimeSpan.FromHours(2)); string sMessage = ""; foreach (ulong nID in ahDebug.Keys) if (dtNow > ahDebug[nID]) sMessage += "<br>[" + nID + ":" + ahDebug[nID].ToString("HH:mm:ss") + "]"; #endif (new Logger()).WriteDebug("CUDA free memory:" + cCUDA.FreeMemory #if DEBUG + "; possibly timeworn allocations:" + (1 > sMessage.Length ? "no" : sMessage) #endif ); } while (true) { try { cCmd = _aqCommands.Dequeue(); //если нечего отдать - заснёт break; } catch (Exception ex) { (new Logger()).WriteError(ex); } } _CommandsCount = _aqCommands.nCount; switch (cCmd.eID) { case Command.ID.Allocate: #region try { cCmd.cPM._cException = null; if (1 > cCmd.cPM._nID) { if (0 < cCmd.cPM._nBytesQty) { nMemoryFree = cCUDA.FreeMemory; if (nMemoryReservedForMerge < nMemoryFree - cCmd.cPM._nBytesQty) { bMemoryStarvation = (nMemoryFree < nMemoryStarvationThreshold); cCmd.cPM._nID = _nCurrentID++; ahDevicePointers.Add(cCmd.cPM._nID, cCUDA.Allocate(cCmd.cPM._nBytesQty)); #if DEBUG ahDebug.Add(cCmd.cPM._nID, DateTime.Now); #endif } else { bMemoryStarvation = true; throw new Exception("out of memory in CUDA device during Allocate. Only 2 MBytes reserved for the Merge"); } } else throw new Exception("bytes quantity in PixelsMap have to be greater than zero for Allocate [_bDisposed = " + cCmd.cPM._bDisposed + "][_bProcessing = " + cCmd.cPM._bProcessing + "][_bShiftVertical = " + cCmd.cPM._bShiftVertical + "][_bTemp = " + cCmd.cPM._bTemp + "][_dt = " + cCmd.cPM._dt + "][_nBytesQty = " + cCmd.cPM._nBytesQty + "][_nID = " + cCmd.cPM._nID + "][_nShiftPosition = " + cCmd.cPM._nShiftPosition + "][_stArea.nHeight = " + cCmd.cPM._stArea.nHeight + "][_stArea.nWidth = " + cCmd.cPM._stArea.nWidth + "][bKeepAlive = " + cCmd.cPM.bKeepAlive + "][bBackgroundClear = " + cCmd.cPM.bBackgroundClear + "][eAlpha = " + cCmd.cPM.eAlpha + "][bCUDA = " + cCmd.cPM.bCUDA + "][nAlphaConstant = " + cCmd.cPM.nAlphaConstant + "][nID = " + cCmd.cPM.nID + "][nLength = " + cCmd.cPM.nLength + "][stArea.nHeight = " + cCmd.cPM.stArea.nHeight + "][stArea.nWidth = " + cCmd.cPM.stArea.nWidth + "]"); } else throw new Exception("PixelsMap ID have to be zero for Allocate"); } catch (Exception ex) { if (ex is CUDAException) ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); (new Logger()).WriteError(ex); (new Logger()).WriteDebug("bytes qty:" + cCmd.cPM._nBytesQty); cCmd.cPM._cException = ex; } cCmd.cMRE.Set(); break; #endregion case Command.ID.CopyIn: #region nStartTick = DateTime.Now.Ticks; // logging try { cCmd.cPM._cException = null; if (1 > cCmd.cPM._nID) { if (cCUDA.FreeMemory - cCmd.cPM._nBytesQty > nMemoryReservedForMerge) { cCmd.cPM._nID = _nCurrentID++; if (cCmd.ahParameters.ContainsKey(typeof(IntPtr))) ahDevicePointers.Add(cCmd.cPM._nID, cCUDA.CopyHostToDevice((IntPtr)cCmd.ahParameters[typeof(IntPtr)], cCmd.cPM._nBytesQty)); else if (cCmd.ahParameters.ContainsKey(typeof(byte[]))) ahDevicePointers.Add(cCmd.cPM._nID, cCUDA.CopyHostToDevice((byte[])cCmd.ahParameters[typeof(byte[])])); else throw new Exception("unknown parameter type"); #if DEBUG ahDebug.Add(cCmd.cPM._nID, DateTime.Now); #endif } else throw new Exception("out of memory in CUDA device during CopyIn. Only 2 MBytes reserved for the Merge."); } else { if (cCmd.ahParameters.ContainsKey(typeof(IntPtr))) cCUDA.CopyHostToDevice(ahDevicePointers[cCmd.cPM._nID], (IntPtr)cCmd.ahParameters[typeof(IntPtr)], cCmd.cPM._nBytesQty); else if (cCmd.ahParameters.ContainsKey(typeof(byte[]))) cCUDA.CopyHostToDevice(ahDevicePointers[cCmd.cPM._nID], (byte[])cCmd.ahParameters[typeof(byte[])]); else throw new Exception("unknown parameter type"); } if (ahDevicePointers.ContainsKey(cCmd.cPM._nID)) (new Logger()).WriteDebug5("copy in [id:" + cCmd.cPM._nID + "][ptr:" + ahDevicePointers[cCmd.cPM._nID].Pointer + "]"); else (new Logger()).WriteDebug5("copy in [id:" + cCmd.cPM._nID + "][ptr: not in dictionary]"); } catch (Exception ex) { if (ex is CUDAException) ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); (new Logger()).WriteError(ex); cCmd.cPM._cException = ex; } if (new TimeSpan(DateTime.Now.Ticks - nStartTick).TotalMilliseconds >= 20) // logging (new Logger()).WriteNotice("PixelMap: Command.ID.CopyIn: execution time > 20ms: " + new TimeSpan(DateTime.Now.Ticks - nStartTick).TotalMilliseconds +"ms"); // logging cCmd.cMRE.Set(); break; #endregion case Command.ID.CopyOut: #region nStartTick = DateTime.Now.Ticks; // logging try { if (0 < cCmd.cPM._nID) { if(!cCmd.ahParameters.ContainsKey(typeof(IntPtr))) { if(cCmd.ahParameters.ContainsKey(typeof(byte[]))) { cCmd.cPM._aBytes = (byte[])cCmd.ahParameters[typeof(byte[])]; if(cCmd.cPM._nBytesQty != cCmd.cPM._aBytes.Length) (new Logger()).WriteWarning("wrong array size for copyout [got:" + cCmd.cPM._aBytes.Length + "][expected:" + cCmd.cPM._nBytesQty + "]"); } else cCmd.cPM._aBytes = new byte[cCmd.cPM._nBytesQty]; cCUDA.CopyDeviceToHost<byte>(ahDevicePointers[cCmd.cPM._nID], cCmd.cPM._aBytes); } else cCUDA.CopyDeviceToHost(ahDevicePointers[cCmd.cPM._nID], (IntPtr)cCmd.ahParameters[typeof(IntPtr)], cCmd.cPM._nBytesQty); (new Logger()).WriteDebug5("copy out [id:" + cCmd.cPM._nID + "][ptr:" + ahDevicePointers[cCmd.cPM._nID].Pointer + "]"); } else throw new Exception("PixelsMap have to be allocated for CopyOut"); } catch (Exception ex) { if (ex is CUDAException) ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); (new Logger()).WriteError(ex); cCmd.cPM._cException = ex; } if (new TimeSpan(DateTime.Now.Ticks - nStartTick).TotalMilliseconds >= 20) // logging (new Logger()).WriteNotice("PixelMap: Command.ID.CopyOut: execution time > 20ms: " + new TimeSpan(DateTime.Now.Ticks - nStartTick).TotalMilliseconds +"ms"); // logging cCmd.cMRE.Set(); break; #endregion case Command.ID.Merge: #region try { List<PixelsMap> aPMs = (List<PixelsMap>)cCmd.ahParameters[typeof(List<PixelsMap>)]; DisCom.MergeInfo cMergeInfo = (DisCom.MergeInfo)cCmd.ahParameters[typeof(DisCom.MergeInfo)]; List<IntPtr> aDPs = new List<IntPtr>(); if (1 > cCmd.cPM._nID) throw new Exception("background PixelsMap have to be allocated for Merge"); aDPs.Add((IntPtr)ahDevicePointers[cCmd.cPM._nID].Pointer); for (int nIndx = 0; nIndx < aPMs.Count; nIndx++) { if (!ahDevicePointers.ContainsKey(aPMs[nIndx]._nID)) throw new Exception("there is a corrupted ID in layers for merge [id:" + aPMs[nIndx]._nID + "]"); if (1 > ahDevicePointers[aPMs[nIndx]._nID].Pointer) throw new Exception("there is an empty pointer in layers for merge [id:" + aPMs[nIndx]._nID + "]"); aDPs.Add((IntPtr)ahDevicePointers[aPMs[nIndx]._nID].Pointer); } cPMs = cCUDA.CopyHostToDevice<IntPtr>(aDPs.ToArray()); cInfos = cCUDA.CopyHostToDevice(cMergeInfo, cMergeInfo.SizeGet()); cCUDA.SetParameter<IntPtr>(cCUDAFuncMerge, 0, (IntPtr)cPMs.Pointer); cCUDA.SetParameter<IntPtr>(cCUDAFuncMerge, IntPtr.Size, (IntPtr)cInfos.Pointer); cCUDA.SetParameter<IntPtr>(cCUDAFuncMerge, IntPtr.Size * 2, (IntPtr)cAlphaMap.Pointer); cCUDA.SetParameterSize(cCUDAFuncMerge, (uint)(IntPtr.Size * 3)); int nIterations = (0 == cMergeInfo.nBackgroundSize % nThreadsPerBlock ? cMergeInfo.nBackgroundSize / nThreadsPerBlock : cMergeInfo.nBackgroundSize / nThreadsPerBlock + 1); cCUDA.Launch(cCUDAFuncMerge, nIterations, 1); cCmd.cMRE.Set(); cMergeInfo.Dispose(); cCUDA.Free(cPMs); cCUDA.Free(cInfos); for (int nIndx = 0; nIndx < aPMs.Count; nIndx++) { lock (aPMs[nIndx]._cSyncRoot) aPMs[nIndx]._bProcessing = false; aPMs[nIndx].Dispose(); } } catch (Exception ex) { cCmd.cMRE.Set(); if (ex is CUDAException) ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); (new Logger()).WriteError(ex); cCmd.cPM._cException = ex; } break; #endregion case Command.ID.Dispose: #region nStartTick = DateTime.Now.Ticks; // logging (new Logger()).Write(Logger.Level.debug2, "dispose: in"); try { if (ahDevicePointers.ContainsKey(cCmd.cPM._nID)) { if (0 < cCmd.cPM._nID && 0 < ahDevicePointers[cCmd.cPM._nID].Pointer) { cCUDA.Free(ahDevicePointers[cCmd.cPM._nID]); //cCUDA.SynchronizeContext(); bMemoryStarvation = (cCUDA.FreeMemory < nMemoryStarvationThreshold); (new Logger()).WriteDebug3("dispose [id:" + cCmd.cPM._nID + "][ptr:" + ahDevicePointers[cCmd.cPM._nID].Pointer + "]"); } ahDevicePointers.Remove(cCmd.cPM._nID); #if DEBUG ahDebug.Remove(cCmd.cPM._nID); #endif cCmd.cPM._nID = 0; } } catch (Exception ex) { if (ex is CUDAException) ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); (new Logger()).WriteError(ex); cCmd.cPM._cException = ex; } (new Logger()).Write(Logger.Level.debug2, "dispose: out"); if (new TimeSpan(DateTime.Now.Ticks - nStartTick).TotalMilliseconds >= 20) // logging (new Logger()).WriteNotice("PixelMap: Command.ID.Dispose: execution time > 20ms: " + new TimeSpan(DateTime.Now.Ticks - nStartTick).TotalMilliseconds +"ms"); // logging break; #endregion } } } catch (Exception ex) { (new Logger()).WriteError(ex); } }
/// <summary> /// Solves the optimization problem /// </summary> /// <param name="minusOnes"></param> /// <param name="y_"></param> /// <param name="alpha_"></param> /// <param name="si"></param> /// <param name="shrinking"></param> private void Solve(float[] y, SolutionInfo si) { // initialize gradient { int i; for (i = 0; i < problemSize; i++) { G[i] = -1; alpha[i] = 0; } } InitCudaModule(); SetCudaData(); // optimization step float GMaxI = 0; float GMaxJ = 0; iter = 1; while (iter < MaxIter) { //Find i: Maximizes -y_i * grad(f)_i, i in I_up(\alpha) and j: minimizes -y_j * grad(f)_j, j in I_down(\alpha) //maximal voilating pair approche Tuple <int, float> maxIPair; Tuple <int, float> minJPair; FindMaxIMinJPair(out maxIPair, out minJPair); int i = maxIPair.Item1; GMaxI = maxIPair.Item2; int j = minJPair.Item1; GMaxJ = minJPair.Item2; if (GMaxI + GMaxJ < EPS) { break; } //Compute i-th kernel collumn, set the specific memory region on GPU, ComputeKernel(i, kiPtr, j, kjPtr); float old_alpha_i = alpha[i]; float old_alpha_j = alpha[j]; //update alpha - serial code, one iteration UpdateAlpha(i, j); // update gradient G float delta_alpha_i = alpha[i] - old_alpha_i; float delta_alpha_j = alpha[j] - old_alpha_j; UpdateGrad(i, j, delta_alpha_i, delta_alpha_j); //czy to potrzebne??? // update alpha_status and G_bar, iter++; }//end while cuda.CopyDeviceToHost(gradPtr, G); cuda.SynchronizeContext(); // calculate rho si.rho = calculate_rho(); //because we start from 1 not from 0; si.iter = iter - 1; // calculate objective value { float v = 0; int i; for (i = 0; i < problemSize; i++) { v += alpha[i] * (G[i] - 1); } si.obj = v / 2; } si.upper_bound_p = C; si.upper_bound_n = C; }
static void Main(string[] args) { // Init and select 1st device. CUDA cuda = new CUDA(0, true); // load module //cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "transpose_kernel.cubin")); cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "transpose_kernel.ptx")); CUfunction transpose = cuda.GetModuleFunction("transpose"); CUfunction transpose_naive = cuda.GetModuleFunction("transpose_naive"); const int size_x = 4096; const int size_y = 4096; const int mem_size = sizeof(float) * size_x * size_y; float[] h_idata = new float[size_x * size_y]; for (int i = 0; i < h_idata.Length; i++) { h_idata[i] = (float)i; } // allocate device memory // copy host memory to device CUdeviceptr d_idata = cuda.CopyHostToDevice <float>(h_idata); CUdeviceptr d_odata = cuda.Allocate <float>(h_idata); // setup execution parameters cuda.SetFunctionBlockShape(transpose_naive, BLOCK_DIM, BLOCK_DIM, 1); cuda.SetParameter(transpose_naive, 0, (uint)d_odata.Pointer); cuda.SetParameter(transpose_naive, IntPtr.Size, (uint)d_idata.Pointer); cuda.SetParameter(transpose_naive, IntPtr.Size * 2, (uint)size_x); cuda.SetParameter(transpose_naive, IntPtr.Size * 2 + 4, (uint)size_y); cuda.SetParameterSize(transpose_naive, (uint)(IntPtr.Size * 2 + 8)); cuda.SetFunctionBlockShape(transpose, BLOCK_DIM, BLOCK_DIM, 1); cuda.SetParameter(transpose, 0, (uint)d_odata.Pointer); cuda.SetParameter(transpose, IntPtr.Size, (uint)d_idata.Pointer); cuda.SetParameter(transpose, IntPtr.Size * 2, (uint)size_x); cuda.SetParameter(transpose, IntPtr.Size * 2 + 4, (uint)size_y); cuda.SetParameterSize(transpose, (uint)(IntPtr.Size * 2 + 8)); // warmup so we don't time CUDA startup cuda.Launch(transpose_naive, size_x / BLOCK_DIM, size_y / BLOCK_DIM); cuda.Launch(transpose, size_x / BLOCK_DIM, size_y / BLOCK_DIM); //System.Threading.Thread.Sleep(10); int numIterations = 100; Console.WriteLine("Transposing a {0} by {1} matrix of floats...", size_x, size_y); CUevent start = cuda.CreateEvent(); CUevent end = cuda.CreateEvent(); cuda.RecordEvent(start); for (int i = 0; i < numIterations; i++) { cuda.Launch(transpose_naive, size_x / BLOCK_DIM, size_y / BLOCK_DIM); } cuda.SynchronizeContext(); cuda.RecordEvent(end); cuda.SynchronizeContext(); float naiveTime = cuda.ElapsedTime(start, end); Console.WriteLine("Naive transpose average time: {0} ms\n", naiveTime / numIterations); cuda.RecordEvent(start); for (int i = 0; i < numIterations; i++) { cuda.Launch(transpose, size_x / BLOCK_DIM, size_y / BLOCK_DIM); } cuda.SynchronizeContext(); cuda.RecordEvent(end); cuda.SynchronizeContext(); float optimizedTime = cuda.ElapsedTime(start, end); Console.WriteLine("Optimized transpose average time: {0} ms\n", optimizedTime / numIterations); float[] h_odata = new float[size_x * size_y]; cuda.CopyDeviceToHost <float>(d_odata, h_odata); float[] reference = new float[size_x * size_y]; computeGold(reference, h_idata, size_x, size_y); bool res = CompareF(reference, h_odata, size_x * size_y); Console.WriteLine("Test {0}", res == true? "PASSED":"FAILED"); cuda.Free(d_idata); cuda.Free(d_odata); Console.ReadKey(); }
/// <summary> /// Barzilai-Borwein solver /// </summary> /// <param name="sub_prob"></param> /// <param name="w"></param> /// <param name="epsilon"></param> /// <param name="Cp"></param> /// <param name="Cn"></param> private void solve_l2r_l2_bb_svc_cuda(Problem <SparseVec> sub_prob, float[] w, double epsilon, double Cp, double Cn) { float obj = float.PositiveInfinity; float step = 0.01f; int M = 10; float sig1 = 0.1f; float sig2 = 0.9f; float gamma = 10e-4f; float lambda = 0; float l_min = 10e-20f; float l_max = 10e20f; float[] func_vals = new float[M]; float maxFuncVal = 0; int maxIter = 600; iter = 0; ComputeGradient(sub_prob); while (iter <= maxIter) { maxFuncVal = func_vals.Max(); lambda = step; //nonemonotone line search for (int i = 0; i < 10; i++) { DoBBstep(-lambda, sub_prob); //compute Obj obj = ComputeObjGPU(wTempVecPtr, alphaTmpPtr); //compute linpart float linPart = ComputeLinPart(gamma, lambda); //* (alpha_tmp-alpha)'*grad if (obj <= (maxFuncVal + linPart)) { int idx = (iter + 1) % M; func_vals[idx] = obj; break; } lambda = (sig1 * lambda + sig2 * lambda) / 2; } //change alpha's pointers var tmpPtr = alphaOldPtr.Pointer; alphaOldPtr.Pointer = alphaPtr.Pointer; alphaPtr.Pointer = alphaTmpPtr.Pointer; alphaTmpPtr.Pointer = tmpPtr; //change w - pointers var tempPtr = wVecPtr.Pointer; wVecPtr.Pointer = wTempVecPtr.Pointer; wTempVecPtr.Pointer = tempPtr; //change gradients //gradOldPtr = grad //compute new grad float gradNorm = float.PositiveInfinity; ComputeGradient(sub_prob); if (gradNorm < epsilon) { break; } //compute BB step step = ComputeBBStep(); iter++; } cuda.CopyDeviceToHost(wVecPtr, w); }
private static unsafe float[] CuDotProdSparseVecStruct() { int sparseVecSize = sizeof(SparseVecPtr); uint size = (uint)(N * sizeof(SparseVecPtr)); //always the same values Random rnd = new Random(1); CUDA cuda = new CUDA(0, true); // load module cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "structKernel.cubin")); //CUfunction structPassFunc = cuda.GetModuleFunction("DotProd"); CUfunction structPassFunc = cuda.GetModuleFunction("DotProd2"); SparseVecPtr[] vectors = new SparseVecPtr[N]; Console.WriteLine("init and copy data"); Stopwatch t = Stopwatch.StartNew(); mainIndex = StartingIndex; for (int i = 0; i < N; i++) { vectors[i] = new SparseVecPtr(); int vecSize = avgElements + i % stdElements; vectors[i].size = vecSize; float[] vals = Helpers.InitValues(i, vecSize, maxVal); int[] index = Helpers.InitIndices(i, vecSize, ref maxIndex); CUdeviceptr valsPtr = cuda.CopyHostToDevice(vals); CUdeviceptr idxPtr = cuda.CopyHostToDevice(index); vectors[i].indices = new IntPtr(idxPtr.Pointer); vectors[i].values = (IntPtr)valsPtr.Pointer; } GCHandle handle = GCHandle.Alloc(vectors, GCHandleType.Pinned); IntPtr ptr = handle.AddrOfPinnedObject(); float[] output = new float[N]; //CUdeviceptr dVectors = cuda.CopyHostToDevice(vectors); CUdeviceptr dVectors = cuda.CopyHostToDevice(ptr, size); CUdeviceptr dOutput = cuda.Allocate(output); Console.WriteLine("copy and init takes {0}", t.Elapsed); #region set cuda parameters cuda.SetFunctionBlockShape(structPassFunc, threadsPerBlock, 1, 1); int offset = 0; cuda.SetParameter(structPassFunc, offset, dVectors.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, dOutput.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, (uint)mainIndex); offset += sizeof(int); cuda.SetParameter(structPassFunc, offset, (uint)N); offset += sizeof(int); cuda.SetParameterSize(structPassFunc, (uint)offset); #endregion Console.WriteLine("start computation"); CUevent start = cuda.CreateEvent(); CUevent end = cuda.CreateEvent(); Stopwatch timer = Stopwatch.StartNew(); cuda.RecordEvent(start); cuda.Launch(structPassFunc, blocksPerGrid, 1); cuda.RecordEvent(end); cuda.SynchronizeContext(); //cuda.SynchronizeEvent(end); timer.Stop(); float naiveTime = cuda.ElapsedTime(start, end); Console.Write("Dot products with mainIndex {0} and {1}-vectors takes {2} ms stopwatch time {3} ms", mainIndex, N, naiveTime, timer.Elapsed); cuda.CopyDeviceToHost(dOutput, output); int lenght = Math.Min(displayCount, N); Console.WriteLine(); for (int i = 0; i < lenght; i++) { Console.WriteLine("{0}-{1}", i, output[i]); } cuda.Free(dVectors); cuda.Free(dOutput); return output; }
private static float[] CuDotProdEllPackTexCached() { //always the same values Random rnd = new Random(1); CUDA cuda = new CUDA(0, true); // load module CUmodule module = cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "structKernel.cubin")); CUfunction structPassFunc = cuda.GetModuleFunction("DotProdEllPackCached"); int maxRowSize = avgElements + stdElements - 1; Console.WriteLine("init arrays"); Stopwatch t = Stopwatch.StartNew(); float[] vecVals = new float[N * maxRowSize]; int[] vecIdx = new int[N * maxRowSize]; maxIndex = 0; for (int i = 0; i < N; i++) { int vecSize = avgElements + i % stdElements; float[] vals = Helpers.InitValues(i, vecSize, maxVal); //values are column-major aligment for (int z = 0; z < vals.Length; z++) { int m = z * N + i; vecVals[m] = vals[z]; } //Array.Copy(vals,0,vecVals,i*maxRowSize,vals.Length); int[] index = Helpers.InitIndices(i, vecSize, ref maxIndex); //Array.Copy(index, 0, vecIdx, i * maxRowSize, index.Length); for (int z = 0; z < index.Length; z++) { int m = z * N + i; vecIdx[m] = index[z]; } } float[] mainVec = new float[maxIndex + 1]; for (int j = 0; j < maxRowSize; j++) { int idx = vecIdx[mainIndex + N * j]; float val = vecVals[mainIndex + N * j]; mainVec[idx] = val; } Console.WriteLine("Init takes {0}", t.Elapsed); t.Start(); CUdeviceptr valsPtr = cuda.CopyHostToDevice(vecVals); CUdeviceptr idxPtr = cuda.CopyHostToDevice(vecIdx); CUarray cuArr = cuda.CreateArray(mainVec); cuda.CopyHostToArray(cuArr, mainVec, 0); //CUDAArrayDescriptor cuDesc = new CUDAArrayDescriptor(); //cuDesc.Format = CUArrayFormat.Float; //cuDesc.NumChannels = 1; //cuDesc.Width = maxIndex+1; CUtexref cuTexRef = cuda.GetModuleTexture(module, "texRef"); cuda.SetTextureFlags(cuTexRef, 0); cuda.SetTextureArray(cuTexRef, cuArr); float[] output = new float[N]; CUdeviceptr dOutput = cuda.Allocate(output); Console.WriteLine("copy to device takes {0}", t.Elapsed); cuda.SetFunctionBlockShape(structPassFunc, threadsPerBlock, 1, 1); int offset = 0; cuda.SetParameter(structPassFunc, offset, valsPtr.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, idxPtr.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, dOutput.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, (uint)maxRowSize); offset += sizeof(int); cuda.SetParameter(structPassFunc, offset, (uint)N); offset += sizeof(int); cuda.SetParameterSize(structPassFunc, (uint)offset); Console.WriteLine("start computation"); CUevent start = cuda.CreateEvent(); CUevent end = cuda.CreateEvent(); Stopwatch timer = Stopwatch.StartNew(); cuda.RecordEvent(start); cuda.Launch(structPassFunc, blocksPerGrid, 1); cuda.RecordEvent(end); cuda.SynchronizeContext(); //cuda.SynchronizeEvent(end); timer.Stop(); float naiveTime = cuda.ElapsedTime(start, end); Console.Write("EllPack Cached Dot products with mainIndex {0} and {1}-vectors takes {2} ms stopwatch time {3} ms", mainIndex, N, naiveTime, timer.Elapsed); cuda.CopyDeviceToHost(dOutput, output); int lenght = Math.Min(displayCount, N); Console.WriteLine(); for (int i = 0; i < lenght; i++) { Console.WriteLine("{0}-{1}", i, output[i]); } cuda.Free(valsPtr); cuda.Free(idxPtr); cuda.Free(dOutput); cuda.DestroyArray(cuArr); cuda.DestroyTexture(cuTexRef); return output; }
private static void CuAddVec() { int N = 50000; uint size = (uint)N * sizeof(float); CUDA cuda = new CUDA(0, true); // load module cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "structKernel.cubin")); CUfunction vecAddFunc = cuda.GetModuleFunction("VecAdd"); float[] A = new float[N]; float[] B = new float[N]; float[] C = new float[N]; for (int i = 0; i < A.Length; i++) { A[i] = (float)i; B[i] = (float)i + 0.1f; } CUdeviceptr dA = cuda.CopyHostToDevice(A); CUdeviceptr dB = cuda.CopyHostToDevice(B); CUdeviceptr dC = cuda.Allocate(A); int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; //error = cuFuncSetBlockShape(vecAdd, threadsPerBlock, 1, 1); cuda.SetFunctionBlockShape(vecAddFunc, threadsPerBlock, 1, 1); int offset = 0; cuda.SetParameter(vecAddFunc, offset, (uint)dA.Pointer); offset += IntPtr.Size; cuda.SetParameter(vecAddFunc, offset, (uint)dB.Pointer); offset += IntPtr.Size; cuda.SetParameter(vecAddFunc, offset, (uint)dC.Pointer); offset += IntPtr.Size; cuda.SetParameter(vecAddFunc, offset, (uint)N); offset += sizeof(int); cuda.SetParameterSize(vecAddFunc, (uint)offset); CUevent start = cuda.CreateEvent(); CUevent end = cuda.CreateEvent(); cuda.RecordEvent(start); cuda.Launch(vecAddFunc, blocksPerGrid, 1); cuda.RecordEvent(end); cuda.SynchronizeContext(); //cuda.SynchronizeEvent(end); float naiveTime = cuda.ElapsedTime(start, end); Console.Write("adding takes {0}ms", naiveTime); cuda.CopyDeviceToHost(dC, C); for (int i = 0; i < 10; i++) { Console.WriteLine("{0}-{1}", i, C[i]); } }
private static unsafe void CuStructPass() { int N = 4; int sparseVecSize = sizeof(SparseVecPtr); uint size = (uint)(N * sizeof(SparseVecPtr)); CUDA cuda = new CUDA(0, true); // load module cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "structKernel.cubin")); CUfunction structPassFunc = cuda.GetModuleFunction("StructPass"); SparseVecPtr[] vectors = new SparseVecPtr[N]; for (int i = 0; i < N; i++) { vectors[i] = new SparseVecPtr(); vectors[i].size = 2; float[] vals = new float[2] { (float)i + 1 % 5, (float)i + 2 % 7 }; //GCHandle valHandle = GCHandle.Alloc(vals, GCHandleType.Pinned); //vectors[i].values = valHandle.AddrOfPinnedObject(); int[] index = new int[2] { i % 5, i % 7 }; //GCHandle idxHandle = GCHandle.Alloc(index, GCHandleType.Pinned); //vectors[i].indices = idxHandle.AddrOfPinnedObject(); //valHandle.Free(); //idxHandle.Free(); CUdeviceptr valsPtr = cuda.CopyHostToDevice(vals); CUdeviceptr idxPtr = cuda.CopyHostToDevice(index); vectors[i].indices = new IntPtr(idxPtr.Pointer); vectors[i].values = (IntPtr)valsPtr.Pointer; } GCHandle handle = GCHandle.Alloc(vectors, GCHandleType.Pinned); IntPtr ptr = handle.AddrOfPinnedObject(); float[] output = new float[N]; //CUdeviceptr dVectors = cuda.CopyHostToDevice(vectors); CUdeviceptr dVectors = cuda.CopyHostToDevice(ptr, size); CUdeviceptr dOutput = cuda.Allocate(output); int threadsPerBlock = 256; int blocksPerGrid = (N + threadsPerBlock - 1) / threadsPerBlock; //error = cuFuncSetBlockShape(vecAdd, threadsPerBlock, 1, 1); cuda.SetFunctionBlockShape(structPassFunc, threadsPerBlock, 1, 1); int offset = 0; cuda.SetParameter(structPassFunc, offset, (uint)dVectors.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, (uint)dOutput.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, (uint)N); offset += sizeof(int); cuda.SetParameterSize(structPassFunc, (uint)offset); CUevent start = cuda.CreateEvent(); CUevent end = cuda.CreateEvent(); cuda.RecordEvent(start); cuda.Launch(structPassFunc, blocksPerGrid, 1); cuda.RecordEvent(end); cuda.SynchronizeContext(); //cuda.SynchronizeEvent(end); float naiveTime = cuda.ElapsedTime(start, end); Console.Write("passing struct takes {0}ms", naiveTime); cuda.CopyDeviceToHost(dOutput, output); int lenght = Math.Min(10, N); Console.WriteLine(); for (int i = 0; i < lenght; i++) { Console.WriteLine("{0}-{1}", i, output[i]); } }
/// <summary> /// Solves the optimization problem /// </summary> /// <param name="minusOnes"></param> /// <param name="y_"></param> /// <param name="alpha_"></param> /// <param name="si"></param> /// <param name="shrinking"></param> private void Solve(float[] y, SolutionInfo si) { // initialize gradient { int i; for (i = 0; i < problemSize; i++) { G[i] = -1; alpha[i] = 0; } } InitCudaModule(); SetCudaData(); // optimization step float GMaxI = 0; float GMaxJ = 0; iter = 1; while (iter < MaxIter) { //Find i: Maximizes -y_i * grad(f)_i, i in I_up(\alpha) Tuple <int, float> maxPair = FindMaxPair(); int i = maxPair.Item1; GMaxI = maxPair.Item2; if (iter % 250 == 0) { GMaxJ = FindStoppingGradVal(); if (GMaxI - GMaxJ < EPS) { break; } } //Compute i-th kernel collumn, set the specific memory region on GPU, ComputeKernel(i, kiPtr); // j: mimimizes the decrease of obj value // (if quadratic coefficeint <= 0, replace it with tau) // -y_j*grad(f)_j < -y_i*grad(f)_i, j in I_low(\alpha) int j = FindMinPair(i, GMaxI); //Compute j-th kernel collumn ComputeKernel(j, kjPtr); float old_alpha_i = alpha[i]; float old_alpha_j = alpha[j]; //update alpha - serial code, one iteration UpdateAlpha(i, j); // update gradient G float delta_alpha_i = alpha[i] - old_alpha_i; float delta_alpha_j = alpha[j] - old_alpha_j; UpdateGrad(i, j, delta_alpha_i, delta_alpha_j); //czy to potrzebne??? // update alpha_status and G_bar, iter++; }//end while cuda.CopyDeviceToHost(gradPtr, G); cuda.SynchronizeContext(); // calculate rho si.rho = calculate_rho(); //because we start from 1 not from 0; si.iter = iter - 1; // calculate objective value { float v = 0; int i; for (i = 0; i < problemSize; i++) { v += alpha[i] * (G[i] - 1); } si.obj = v / 2; } si.upper_bound_p = C; si.upper_bound_n = C; }
public override float[] Predict(SparseVec[] elements) { float[] prediction = new float[elements.Length]; uint reduceSize = (uint)reductionBlocks * sizeof(float); int loop = (elements.Length + NUM_STREAMS - 1) / NUM_STREAMS; for (int i = 0; i < loop; i++) { for (int s = 0; s < NUM_STREAMS; s++) { int idx = i * NUM_STREAMS + s; if (idx < elements.Length) { var vec = elements[idx]; //remove //float[] svDots = TrainedModel.SupportElements.Select(sv => sv.DotProduct(vec)).ToArray(); //set nonzero values to dense vector accessible through vecIntPtr CudaHelpers.InitBuffer(vec, mainVecIntPtrs[s]); #region sync version cuda.CopyHostToDevice(mainVecCuPtr[s], mainVecIntPtrs[s], vectorsDimMemSize); cuda.SetParameter(cuFuncEval, kernelResultParamOffset, evalOutputCuPtr[s]); //cuda.SetParameter(cuFuncEval, vectorSelfDotParamOffset, vec.DotProduct()); SetCudaEvalFuncParamsForVector(vec); cuda.SetParameter(cuFuncEval, texSelParamOffset, s + 1); cuda.Launch(cuFuncEval, evalBlocks, 1); float[] t = new float[sizeSV]; cuda.CopyDeviceToHost(evalOutputCuPtr[s], t); cuda.SetParameter(cuFuncReduce, offsetMemToReduce, evalOutputCuPtr[s]); cuda.SetParameter(cuFuncReduce, offsetOutMemReduce, reduceCuPtr[s]); cuda.Launch(cuFuncReduce, reductionBlocks, 1); cuda.CopyDeviceToHost(reduceCuPtr[s], reduceIntPtrs[s], reduceSize); float[] r = new float[reductionBlocks]; cuda.CopyDeviceToHost(reduceCuPtr[s], r); #endregion //cuda.CopyHostToDeviceAsync(mainVecCuPtr[s], mainVecIntPtrs[s], vectorsDimMemSize, stream[s]); ////cuFunc user different textures //cuda.SetParameter(cuFuncEval, kernelResultParamOffset, evalOutputCuPtr[s]); //cuda.SetParameter(cuFuncEval, vectorSelfDotParamOffset, vec.DotProduct()); //cuda.SetParameter(cuFuncEval, texSelParamOffset, s + 1); //cuda.LaunchAsync(cuFuncEval, evalBlocks, 1, stream[s]); //cuda.SetParameter(cuFuncReduce, offsetMemToReduce, evalOutputCuPtr[s]); //cuda.SetParameter(cuFuncReduce, offsetOutMemReduce, reduceCuPtr[s]); //cuda.LaunchAsync(cuFuncReduce, reductionBlocks, 1, stream[s]); //cuda.CopyDeviceToHostAsync(reduceCuPtr[s], reduceIntPtrs[s], reduceSize, stream[s]); } } //wait for all streams cuda.SynchronizeContext(); for (int s = 0; s < NUM_STREAMS; s++) { int idx = i * NUM_STREAMS + s; if (idx < elements.Length) { var vec = elements[idx]; //clear the buffer //set nonzero values to dense vector accessible thought vecIntPtr CudaHelpers.SetBufferIdx(vec, mainVecIntPtrs[s], 0.0f); float evalValue = ReduceOnHost(reduceIntPtrs[s], reductionBlocks); prediction[idx] = evalValue; } } } return(prediction); }
/// <summary> /// Barzilai-Borwein solver /// </summary> /// <param name="sub_prob"></param> /// <param name="w"></param> /// <param name="epsilon"></param> /// <param name="Cp"></param> /// <param name="Cn"></param> private void solve_l2r_l2_bb_svc_cuda(Problem <SparseVec> sub_prob, float[] w, double epsilon, double Cp, double Cn) { float obj = float.PositiveInfinity; float step = 0.0001f; int M = 10; float sig1 = 0.1f; float sig2 = 0.9f; float gamma = 10e-4f; float lambda = 0; float l_min = 10e-20f; float l_max = 10e20f; float[] func_vals = new float[M]; float maxFuncVal = 0; int maxIter = 5000; iter = 0; Stopwatch st = new Stopwatch(); st.Start(); ComputeGradient(sub_prob); while (iter <= maxIter) { maxFuncVal = func_vals.Max(); //change alpha's pointers var tmpPtr = alphaOldPtr.Pointer; alphaOldPtr.Pointer = alphaPtr.Pointer; alphaPtr.Pointer = tmpPtr; DoBBstep(-step, sub_prob); // obj = ComputeObjGPU(wVecPtr, alphaPtr); ////change alpha's pointers //var tmpPtr = alphaOldPtr.Pointer; //alphaOldPtr.Pointer = alphaPtr.Pointer; //alphaPtr.Pointer = tmpPtr; ////change w - pointers //var tempPtr= wVecPtr.Pointer; //wVecPtr.Pointer = wTempVecPtr.Pointer; //wTempVecPtr.Pointer = tempPtr; //change gradients //gradOldPtr = grad //compute new grad float gradNorm = ComputeGradient(sub_prob); if (gradNorm < epsilon) { break; } //change w - pointers //var tempPtr = wVecPtr.Pointer; //wVecPtr.Pointer = wTempVecPtr.Pointer; //wTempVecPtr.Pointer = tempPtr; //compute BB step step = ComputeBBStep(); iter++; } st.Stop(); obj = ComputeObjGPU(wVecPtr, alphaPtr); Console.WriteLine("Objective value = {0} time={1} ms={2} iter={3}", obj, st.Elapsed, st.ElapsedMilliseconds, iter); cuda.CopyDeviceToHost(wVecPtr, w); }
private static float[] CuRBFCSRCached() { //always the same values Random rnd = new Random(1); CUDA cuda = new CUDA(0, true); // load module CUmodule module = cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "structKernel.cubin")); CUfunction structPassFunc = cuda.GetModuleFunction("RBFspmv_csr_vector"); int maxRowSize = avgElements + stdElements - 1; Console.WriteLine("init arrays"); Stopwatch t = Stopwatch.StartNew(); List<float> vecValsL = new List<float>(N * maxRowSize / 2); List<int> vecIdxL = new List<int>(N * maxRowSize / 2); List<int> vecLenghtL = new List<int>(N); float[] vecVals; int[] vecIdx; int[] vecLenght; float[] selfDot = new float[N]; maxIndex = 0; int vecStartIdx = 0; for (int i = 0; i < N; i++) { int vecSize = avgElements + i % stdElements; float[] vals = Helpers.InitValues(i, vecSize, maxVal); vecValsL.AddRange(vals); for (int z = 0; z < vals.Length; z++) { selfDot[i] += vals[z] * vals[z]; } int[] index = Helpers.InitIndices(i, vecSize, ref maxIndex); vecIdxL.AddRange(index); vecLenghtL.Add(vecStartIdx); vecStartIdx += vecSize; } //for last index vecLenghtL.Add(vecStartIdx); vecVals = vecValsL.ToArray(); vecIdx = vecIdxL.ToArray(); vecLenght = vecLenghtL.ToArray(); float[] mainVec = new float[maxIndex + 1]; for (int j = vecLenght[mainIndex]; j < vecLenght[mainIndex + 1]; j++) { int idx = vecIdx[j]; float val = vecVals[j]; mainVec[idx] = val; } Console.WriteLine("Init takes {0}", t.Elapsed); t.Start(); CUdeviceptr valsPtr = cuda.CopyHostToDevice(vecVals); CUdeviceptr idxPtr = cuda.CopyHostToDevice(vecIdx); CUdeviceptr vecLenghtPtr = cuda.CopyHostToDevice(vecLenght); CUdeviceptr selfDotPtr = cuda.CopyHostToDevice(selfDot); //copy to texture CUarray cuArr = cuda.CreateArray(mainVec); cuda.CopyHostToArray(cuArr, mainVec, 0); CUtexref cuTexRef = cuda.GetModuleTexture(module, "texRef"); cuda.SetTextureFlags(cuTexRef, 0); cuda.SetTextureArray(cuTexRef, cuArr); float[] output = new float[N]; CUdeviceptr dOutput = cuda.Allocate(output); Console.WriteLine("copy to device takes {0}", t.Elapsed); cuda.SetFunctionBlockShape(structPassFunc, threadsPerBlock, 1, 1); int offset = 0; cuda.SetParameter(structPassFunc, offset, valsPtr.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, idxPtr.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, vecLenghtPtr.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, selfDotPtr.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, dOutput.Pointer); offset += IntPtr.Size; cuda.SetParameter(structPassFunc, offset, (uint)N); offset += sizeof(int); cuda.SetParameter(structPassFunc, offset, (uint)mainIndex); offset += sizeof(int); cuda.SetParameter(structPassFunc, offset, Gamma); offset += sizeof(float); cuda.SetParameter(structPassFunc, offset, (uint)vecStartIdx); offset += sizeof(int); cuda.SetParameterSize(structPassFunc, (uint)offset); Console.WriteLine("start computation"); CUevent start = cuda.CreateEvent(); CUevent end = cuda.CreateEvent(); Stopwatch timer = Stopwatch.StartNew(); cuda.RecordEvent(start); cuda.Launch(structPassFunc, blocksPerGrid, 1); cuda.RecordEvent(end); cuda.SynchronizeContext(); //cuda.SynchronizeEvent(end); timer.Stop(); float naiveTime = cuda.ElapsedTime(start, end); Console.Write("csr vector Dot products with mainIndex {0} and {1}-vectors takes {2} ms stopwatch time {3} ms", mainIndex, N, naiveTime, timer.Elapsed); cuda.CopyDeviceToHost(dOutput, output); int lenght = Math.Min(displayCount, N); Console.WriteLine(); for (int i = 0; i < lenght; i++) { Console.WriteLine("{0}-{1}", i, output[i]); } cuda.Free(valsPtr); cuda.Free(idxPtr); cuda.Free(dOutput); cuda.Free(selfDotPtr); cuda.Free(vecLenghtPtr); cuda.DestroyArray(cuArr); cuda.DestroyTexture(cuTexRef); cuda.DestroyEvent(start); cuda.DestroyEvent(end); return output; }
private void Worker() { #if CUDA int nN = _nMergingDeviceNumber; string sS = "CUDA-" + nN + "-" + _nIndex; try { Command cCmd; CUDA cCUDA; #region CUDA Init try { cCUDA = new CUDA(true); cCUDA.CreateContext(nN % 1000); // number of cuda in prefs (still alwais 0) } catch (Exception ex) { if (ex is CUDAException) { ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); } throw new Exception("CreateContext(" + nN % 1000 + ") error. Try to change CUDA's card number in prefs", ex); } (new Logger(sS)).WriteDebug("CreateContext(" + nN % 1000 + ") is ok!"); uint nMemoryReservedForMerge = 2 * 1024 * 1024; //PREFERENCES типа <memory reserved="2097152" /> uint nMemoryStarvationThreshold = cCUDA.TotalMemory / 2; //PREFERENCES через проценты... типа <memory starvation="50%" /> uint nMemoryFree; string sModule = "CUDAFunctions_" + Preferences.nCUDAVersion + "_x" + (IntPtr.Size * 8); if (Logger.bDebug) { (new Logger(sS)).WriteDebug(sModule + " Current CUDA = [name=" + cCUDA.CurrentDevice.Name + "][compute_capability=" + cCUDA.CurrentDevice.ComputeCapability + "]"); } cCUDA.LoadModule((byte[])Properties.Resource.ResourceManager.GetObject(sModule)); // $(ProjectDir)Resources\CUDAFunctions.cubin CUfunction cCUDAFuncMerge = cCUDA.GetModuleFunction("CUDAFrameMerge"); int nThreadsPerBlock = 16; //32 //256 //пришлось уменьшить с 512 до 256 сридов на блок, потому что при добавлении "движения" и операций с float, ловил ошибку: Too Many Resources Requested for Launch (This error means that the number of registers available on the multiprocessor is being exceeded. Reduce the number of threads per block to solve the problem) cCUDA.SetFunctionBlockShape(cCUDAFuncMerge, nThreadsPerBlock, nThreadsPerBlock, 1); CUDADriver.cuParamSetSize(cCUDAFuncMerge, 8); Dictionary <long, CUdeviceptr> ahPMIDs_DevicePointers = new Dictionary <long, CUdeviceptr>(); CUdeviceptr cPMs; CUdeviceptr cInfos; CUdeviceptr cAlphaMap; CUdeviceptr cAlphaMap_info3d; CUdeviceptr cAlphaMap_info2d; if (true) { //IntPtr[] aPointersByAlpha = new IntPtr[254]; //те самые поинтеры-альфы. Ссылаются на массивы поинтеров B, т.е. BackGrounds //IntPtr[] aPointersByBackground = new IntPtr[256]; // те самые массивы поинтеров B, т.е. BackGrounds byte[] aAlphaMap = new byte[(byte.MaxValue - 1) * (byte.MaxValue + 1) * (byte.MaxValue + 1)]; int[] aAlphaMap_info3d = new int[254]; // начала 2d слоёв ushort[] aAlphaMap_info2d = new ushort[256]; // начала строк в одном 2d int nResult, nIndx = 0, nIndxInfo = 0, nIndx2d = 0; for (byte nAlpha = 1; 255 > nAlpha; nAlpha++) { aAlphaMap_info3d[nIndxInfo++] = nIndx; for (ushort nBackground = 0; 256 > nBackground; nBackground++) { if (nAlpha == 1) { aAlphaMap_info2d[nIndx2d++] = (ushort)nIndx; } for (ushort nForeground = 0; 256 > nForeground; nForeground++) { if (255 < (nResult = (int)((float)(nAlpha * (nForeground - nBackground)) / 255 + nBackground + 0.5))) { nResult = 255; } aAlphaMap[nIndx++] = (byte)nResult; } //aPointersByBackground[nBackground] = (IntPtr)cCUDA.CopyHostToDevice<byte>(aResults).Pointer; } //aPointersByAlpha[nAlpha - 1] = (IntPtr)cCUDA.CopyHostToDevice<IntPtr>(aPointersByBackground).Pointer; } cAlphaMap_info3d = cCUDA.CopyHostToDevice <int>(aAlphaMap_info3d); cAlphaMap = cCUDA.CopyHostToDevice <byte>(aAlphaMap); cAlphaMap_info2d = cCUDA.CopyHostToDevice <ushort>(aAlphaMap_info2d); } CUdeviceptr cAlphaMap2; CUdeviceptr cAlphaMap2_info2d; { byte[] aAlphaMap2 = new byte[(byte.MaxValue - 1) * (byte.MaxValue - 1)]; ushort[] aAlphaMap2_info2d = new ushort[254]; int nIndx = 0, nIndx2d = 0; for (byte nFGColorAlpha = 1; 255 > nFGColorAlpha; nFGColorAlpha++) // можно использовать симметрию умножения, но х с ней пока { aAlphaMap2_info2d[nIndx2d++] = (ushort)nIndx; for (byte nPixelAlpha = 1; 255 > nPixelAlpha; nPixelAlpha++) { aAlphaMap2[nIndx++] = (byte)((float)nFGColorAlpha * nPixelAlpha / 255 + 0.5); } } cAlphaMap2 = cCUDA.CopyHostToDevice <byte>(aAlphaMap2); cAlphaMap2_info2d = cCUDA.CopyHostToDevice <ushort>(aAlphaMap2_info2d); } CUdeviceptr cAlphaMap3; CUdeviceptr cAlphaMap3_info2d; { byte[] aAlphaMap3 = new byte[byte.MaxValue * (byte.MaxValue - 1)]; ushort[] aAlphaMap3_info2d = new ushort[255]; int nIndx = 0, nIndx2d = 0; for (ushort nFGColorAlpha = 1; 256 > nFGColorAlpha; nFGColorAlpha++) { aAlphaMap3_info2d[nIndx2d++] = (ushort)nIndx; for (byte nMask = 1; 255 > nMask; nMask++) { aAlphaMap3[nIndx++] = (byte)(nFGColorAlpha * ((255 - nMask) / 255f) + 0.5); } } cAlphaMap3 = cCUDA.CopyHostToDevice <byte>(aAlphaMap3); cAlphaMap3_info2d = cCUDA.CopyHostToDevice <ushort>(aAlphaMap3_info2d); } #endregion CUDA Init #if DEBUG Dictionary <long, DateTime> ahDebug = new Dictionary <long, DateTime>(); Dictionary <long, Area> ahDebugAr = new Dictionary <long, Area>(); #endif DateTime dtNextTime = DateTime.MinValue, dtNow; bool bSet; List <IntPtr> aDPs; List <PixelsMap> aPMs; while (true) { if (1 > aqQueue.CountGet() && (dtNow = DateTime.Now) > dtNextTime) { dtNextTime = dtNow.AddMinutes(20); #if DEBUG dtNow = dtNow.Subtract(TimeSpan.FromHours(2)); string sMessage = ""; foreach (long nID in ahDebug.OrderBy(o => o.Value).Select(o => o.Key)) { if (dtNow > ahDebug[nID]) { sMessage += "<br>[" + nID + " - " + ahDebug[nID].ToString("HH:mm:ss") + "]" + ahDebugAr[nID].ToString(); } } #endif (new Logger(sS)).WriteDebug("CUDA free memory:" + cCUDA.FreeMemory #if DEBUG + "; possibly timeworn allocations:" + (1 > sMessage.Length ? "no" : sMessage) #endif ); } cCmd = aqQueue.Dequeue(); //если нечего отдать - заснёт switch (cCmd.eID) { case Command.ID.Allocate: #region try { cCmd.cPM._cException = null; if (1 > cCmd.cPM._nID) { if (0 < cCmd.cPM._nBytesQty) { nMemoryFree = cCUDA.FreeMemory; if (nMemoryReservedForMerge < nMemoryFree - cCmd.cPM._nBytesQty) { bMemoryStarvation = (nMemoryFree < nMemoryStarvationThreshold); (new Logger(sS)).WriteDebug3("pixelmap allocateCUDA [current_id=" + _nCurrentID + "]"); cCmd.cPM._nID = System.Threading.Interlocked.Increment(ref _nCurrentID); ahPMIDs_DevicePointers.Add(cCmd.cPM._nID, cCUDA.Allocate(cCmd.cPM._nBytesQty)); #if DEBUG ahDebug.Add(cCmd.cPM._nID, DateTime.Now); ahDebugAr.Add(cCmd.cPM._nID, cCmd.cPM.stArea); #endif } else { bMemoryStarvation = true; throw new Exception("out of memory in CUDA device during Allocate. Only 2 MBytes reserved for the Merge"); } } else { throw new Exception("bytes quantity in PixelsMap have to be greater than zero for Allocate [_bDisposed = " + cCmd.cPM._bDisposed + "][_bProcessing = " + cCmd.cPM._bProcessing + "][_stPosition.X = " + cCmd.cPM._stPosition.X + "][_stPosition.Y = " + cCmd.cPM._stPosition.Y + "][_bTemp = " + cCmd.cPM._bTemp + "][_dt = " + cCmd.cPM._dtCreate + "][_nBytesQty = " + cCmd.cPM._nBytesQty + "][_nID = " + cCmd.cPM._nID + "][_nShiftTotalX = " + cCmd.cPM._nShiftTotalX + "][_stArea.nHeight = " + cCmd.cPM._stArea.nHeight + "][_stArea.nWidth = " + cCmd.cPM._stArea.nWidth + "][bKeepAlive = " + cCmd.cPM.bKeepAlive + "][eAlpha = " + cCmd.cPM.eAlpha + "][bCUDA = " + cCmd.cPM.stMergingMethod + "][nAlphaConstant = " + cCmd.cPM.nAlphaConstant + "][nID = " + cCmd.cPM.nID + "][nLength = " + cCmd.cPM.nLength + "][stArea.nHeight = " + cCmd.cPM.stArea.nHeight + "][stArea.nWidth = " + cCmd.cPM.stArea.nWidth + "]"); } } else { throw new Exception("PixelsMap ID have to be zero for Allocate"); } } catch (Exception ex) { if (ex is CUDAException) { ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); } (new Logger(sS)).WriteError(ex); (new Logger(sS)).WriteDebug("bytes qty:" + cCmd.cPM._nBytesQty); cCmd.cPM._cException = ex; } cCmd.cMRE.Set(); break; #endregion case Command.ID.CopyIn: #region try { cCmd.cPM._cException = null; if (1 > cCmd.cPM._nID) { if (cCUDA.FreeMemory - cCmd.cPM._nBytesQty > nMemoryReservedForMerge) { (new Logger(sS)).WriteDebug3("pixelmap copyinCUDA not allocated [pm_id=" + _nCurrentID + "]"); cCmd.cPM._nID = System.Threading.Interlocked.Increment(ref _nCurrentID); if (cCmd.ahParameters.ContainsKey(typeof(IntPtr))) { ahPMIDs_DevicePointers.Add(cCmd.cPM._nID, cCUDA.CopyHostToDevice((IntPtr)cCmd.ahParameters[typeof(IntPtr)], cCmd.cPM._nBytesQty)); } else if (cCmd.ahParameters.ContainsKey(typeof(byte[]))) { ahPMIDs_DevicePointers.Add(cCmd.cPM._nID, cCUDA.CopyHostToDevice((byte[])cCmd.ahParameters[typeof(byte[])])); } else { throw new Exception("unknown parameter type"); } #if DEBUG ahDebug.Add(cCmd.cPM._nID, DateTime.Now); ahDebugAr.Add(cCmd.cPM._nID, cCmd.cPM.stArea); #endif } else { throw new Exception("out of memory in CUDA device during CopyIn. Only 2 MBytes reserved for the Merge."); } } else { (new Logger(sS)).WriteDebug4("pixelmap copyinCUDA allocated [pm_id=" + _nCurrentID + "]"); if (cCmd.ahParameters.ContainsKey(typeof(IntPtr))) { cCUDA.CopyHostToDevice(ahPMIDs_DevicePointers[cCmd.cPM._nID], (IntPtr)cCmd.ahParameters[typeof(IntPtr)], cCmd.cPM._nBytesQty); } else if (cCmd.ahParameters.ContainsKey(typeof(byte[]))) { cCUDA.CopyHostToDevice(ahPMIDs_DevicePointers[cCmd.cPM._nID], (byte[])cCmd.ahParameters[typeof(byte[])]); } else { throw new Exception("unknown parameter type"); } } } catch (Exception ex) { if (ex is CUDAException) { ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); } (new Logger(sS)).WriteError(ex); cCmd.cPM._cException = ex; } cCmd.cMRE.Set(); #endregion break; case Command.ID.CopyOut: #region try { if (0 < cCmd.cPM._nID) { if (!cCmd.ahParameters.ContainsKey(typeof(IntPtr))) { if (cCmd.ahParameters.ContainsKey(typeof(byte[]))) { byte[] aB = (byte[])cCmd.ahParameters[typeof(byte[])]; cCmd.cPM._aBytes = null; if (cCmd.cPM._nBytesQty != aB.Length) { (new Logger(sS)).WriteWarning("wrong array size for copyout [got:" + aB.Length + "][expected:" + cCmd.cPM._nBytesQty + "]"); } cCUDA.CopyDeviceToHost <byte>(ahPMIDs_DevicePointers[cCmd.cPM._nID], aB); } else // не юзается (см. copyout()) { cCmd.cPM._aBytes = _cBinM.BytesGet((int)cCmd.cPM._nBytesQty, 3); cCUDA.CopyDeviceToHost <byte>(ahPMIDs_DevicePointers[cCmd.cPM._nID], cCmd.cPM._aBytes.aBytes); } } else { cCUDA.CopyDeviceToHost(ahPMIDs_DevicePointers[cCmd.cPM._nID], (IntPtr)cCmd.ahParameters[typeof(IntPtr)], cCmd.cPM._nBytesQty); } (new Logger(sS)).WriteDebug5("copy out [id:" + cCmd.cPM._nID + "][ptr:" + ahPMIDs_DevicePointers[cCmd.cPM._nID].Pointer + "]"); } else { throw new Exception("PixelsMap have to be allocated for CopyOut"); } } catch (Exception ex) { if (ex is CUDAException) { ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); } (new Logger(sS)).WriteError(ex); cCmd.cPM._cException = ex; } cCmd.cMRE.Set(); #endregion break; case Command.ID.Merge: #region bSet = false; try { aPMs = (List <PixelsMap>)cCmd.ahParameters[typeof(List <PixelsMap>)]; DisCom.MergeInfo cMergeInfo = (DisCom.MergeInfo)cCmd.ahParameters[typeof(DisCom.MergeInfo)]; aDPs = new List <IntPtr>(); if (1 > cCmd.cPM._nID) { throw new Exception("background PixelsMap have to be allocated for Merge"); } aDPs.Add((IntPtr)ahPMIDs_DevicePointers[cCmd.cPM._nID].Pointer); for (int nIndx = 0; nIndx < aPMs.Count; nIndx++) { if (!ahPMIDs_DevicePointers.ContainsKey(aPMs[nIndx]._nID)) { throw new Exception("there is a corrupted ID in layers for merge [id:" + aPMs[nIndx]._nID + "]"); } if (1 > ahPMIDs_DevicePointers[aPMs[nIndx]._nID].Pointer) { throw new Exception("there is an empty pointer in layers for merge [id:" + aPMs[nIndx]._nID + "]"); } aDPs.Add((IntPtr)ahPMIDs_DevicePointers[aPMs[nIndx]._nID].Pointer); } cPMs = cCUDA.CopyHostToDevice <IntPtr>(aDPs.ToArray()); cInfos = cCUDA.CopyHostToDevice(cMergeInfo, cMergeInfo.SizeGet()); // operator intptr in DisCom.MergeInfo cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, 0, (IntPtr)cPMs.Pointer); cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size, (IntPtr)cInfos.Pointer); cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size * 2, (IntPtr)cAlphaMap.Pointer); // cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size * 3, (IntPtr)cAlphaMap_info3d.Pointer); // cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size * 4, (IntPtr)cAlphaMap_info2d.Pointer); // cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size * 5, (IntPtr)cAlphaMap2.Pointer); cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size * 6, (IntPtr)cAlphaMap2_info2d.Pointer); // cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size * 7, (IntPtr)cAlphaMap3.Pointer); cCUDA.SetParameter <IntPtr>(cCUDAFuncMerge, IntPtr.Size * 8, (IntPtr)cAlphaMap3_info2d.Pointer); // cCUDA.SetParameterSize(cCUDAFuncMerge, (uint)(IntPtr.Size * 9)); int nIterationsX = (0 == cMergeInfo.nBackgroundWidth % nThreadsPerBlock ? cMergeInfo.nBackgroundWidth / nThreadsPerBlock : cMergeInfo.nBackgroundWidth / nThreadsPerBlock + 1); int nIterationsY = (0 == cMergeInfo.nBackgroundHight % nThreadsPerBlock ? cMergeInfo.nBackgroundHight / nThreadsPerBlock : cMergeInfo.nBackgroundHight / nThreadsPerBlock + 1); //int nIterationsX = (0 == cMergeInfo.nBackgroundHight % nThreadsPerBlock ? cMergeInfo.nBackgroundHight / nThreadsPerBlock : cMergeInfo.nBackgroundHight / nThreadsPerBlock + 1); cCUDA.Launch(cCUDAFuncMerge, nIterationsX, nIterationsY); cCUDA.Free(cPMs); cCUDA.Free(cInfos); cCmd.cMRE.Set(); bSet = true; cMergeInfo.Dispose(); for (int nIndx = 0; nIndx < aPMs.Count; nIndx++) { lock (aPMs[nIndx]._cSyncRoot) aPMs[nIndx]._bProcessing = false; aPMs[nIndx].Dispose(); } } catch (Exception ex) { cCmd.cPM._cException = ex; if (!bSet) { cCmd.cMRE.Set(); } if (ex is CUDAException) { ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); } (new Logger(sS)).WriteError(ex); } #endregion break; case Command.ID.Dispose: #region (new Logger(sS)).Write(Logger.Level.debug4, "dispose: in"); try { if (ahPMIDs_DevicePointers.ContainsKey(cCmd.cPM._nID)) { if (0 < cCmd.cPM._nID && 0 < ahPMIDs_DevicePointers[cCmd.cPM._nID].Pointer) { cCUDA.Free(ahPMIDs_DevicePointers[cCmd.cPM._nID]); //cCUDA.SynchronizeContext(); bMemoryStarvation = (cCUDA.FreeMemory < nMemoryStarvationThreshold); (new Logger(sS)).WriteDebug3("dispose [id:" + cCmd.cPM._nID + "][ptr:" + ahPMIDs_DevicePointers[cCmd.cPM._nID].Pointer + "]"); } ahPMIDs_DevicePointers.Remove(cCmd.cPM._nID); #if DEBUG ahDebug.Remove(cCmd.cPM._nID); ahDebugAr.Remove(cCmd.cPM._nID); #endif cCmd.cPM._nID = 0; } } catch (Exception ex) { if (ex is CUDAException) { ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); } (new Logger(sS)).WriteError(ex); cCmd.cPM._cException = ex; } (new Logger(sS)).Write(Logger.Level.debug4, "dispose: out"); #endregion break; } } } catch (Exception ex) { if (ex is CUDAException) { ex = new Exception("CUDA Error:" + ((CUDAException)ex).CUDAError.ToString(), ex); } (new Logger(sS)).WriteError("CUDA STOPPED!!!! [id = " + _nIndex + "]", ex); } #endif }
// // A sorting network is a sorting algorith, where the sequence of comparisons // is not data-dependent. That makes them suitable for parallel implementations. // // Bitonic sort is one of the fastest sorting networks, consisting of o(n log^2 n) // comparators. It has a simple implemention and it's very efficient when sorting // a small number of elements: // // http://citeseer.ist.psu.edu/blelloch98experimental.html // // This implementation is based on: // // http://www.tools-of-computing.com/tc/CS/Sorts/bitonic_sort.htm // static void Main(string[] args) { const int NUM = 256; // Init CUDA, select 1st device. CUDA cuda = new CUDA(0, true); // create values int[] values = new int[NUM]; Random rand = new Random(); for (int i = 0; i < NUM; i++) { values[i] = rand.Next(); } // allocate memory and copy to device CUdeviceptr dvalues = cuda.CopyHostToDevice<int>(values); // load module cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "bitonic.cubin")); CUfunction func = cuda.GetModuleFunction("bitonicSort"); cuda.SetParameter(func, 0, (uint)dvalues.Pointer); cuda.SetParameterSize(func, (uint)IntPtr.Size); //bitonicSort<<<1, NUM, sizeof(int) * NUM>>>(dvalues); cuda.SetFunctionBlockShape(func, NUM, 1, 1); cuda.SetFunctionSharedSize(func, sizeof(int) * NUM); cuda.Launch(func, 1, 1); cuda.CopyDeviceToHost<int>(dvalues, values); cuda.Free(dvalues); bool passed = true; for (int i = 1; i < NUM; i++) { if (values[i - 1] > values[i]) { passed = false; break; } } Console.WriteLine("Test {0}", passed ? "PASSED" : "FAILED"); }
// It's interesting to change the number of blocks and the number of threads to // understand how to keep the hardware busy. // // Here are some numbers I get on my G80: // blocks - clocks // 1 - 3096 // 8 - 3232 // 16 - 3364 // 32 - 4615 // 64 - 9981 // // With less than 16 blocks some of the multiprocessors of the device are idle. With // more than 16 you are using all the multiprocessors, but there's only one block per // multiprocessor and that doesn't allow you to hide the latency of the memory. With // more than 32 the speed scales linearly. static void Main(string[] args) { // Init CUDA, select 1st device. CUDA cuda = new CUDA(0, true); // load module cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "clock_kernel.cubin")); CUfunction func = cuda.GetModuleFunction("timedReduction"); int[] timer = new int[NUM_BLOCKS * 2]; float[] input = new float[NUM_THREADS * 2]; for (int i = 0; i < NUM_THREADS * 2; i++) { input[i] = (float)i; } CUdeviceptr dinput = cuda.CopyHostToDevice<float>(input); CUdeviceptr doutput = cuda.Allocate((uint)(sizeof(float) * NUM_BLOCKS)); CUdeviceptr dtimer = cuda.Allocate<int>(timer); cuda.SetParameter(func, 0, (uint)dinput.Pointer); cuda.SetParameter(func, IntPtr.Size, (uint)doutput.Pointer); cuda.SetParameter(func, IntPtr.Size*2, (uint)dtimer.Pointer); cuda.SetParameterSize(func, (uint)(IntPtr.Size*3)); //timedReduction<<<NUM_BLOCKS, NUM_THREADS, sizeof(float) * 2 * NUM_THREADS>>>(dinput, doutput, dtimer); cuda.SetFunctionBlockShape(func, NUM_THREADS, 1, 1); cuda.SetFunctionSharedSize(func, (uint)(sizeof(float) * 2 * NUM_THREADS)); cuda.Launch(func, NUM_BLOCKS, 1); cuda.CopyDeviceToHost<int>(dtimer, timer); cuda.Free(dinput); cuda.Free(doutput); cuda.Free(dtimer); foreach (int i in timer) { Console.WriteLine(i); } Console.WriteLine("Test PASSED"); int minStart = timer[0]; int maxEnd = timer[NUM_BLOCKS]; for (int i = 1; i < NUM_BLOCKS; i++) { minStart = timer[i] < minStart ? timer[i] : minStart; maxEnd = timer[NUM_BLOCKS + i] > maxEnd ? timer[NUM_BLOCKS + i] : maxEnd; } Console.WriteLine("time = {0}", maxEnd - minStart); }
static void Main(string[] args) { // Init and select 1st device. CUDA cuda = new CUDA(0, true); // load module //cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "simpleCUFFT.ptx")); CUfunction func = new CUfunction();// cuda.GetModuleFunction("ComplexPointwiseMulAndScale"); // The filter size is assumed to be a number smaller than the signal size const int SIGNAL_SIZE = 50; const int FILTER_KERNEL_SIZE = 11; // Allocate host memory for the signal Float2[] h_signal = new Float2[SIGNAL_SIZE]; // Initalize the memory for the signal Random r = new Random(); for (int i = 0; i < SIGNAL_SIZE; ++i) { h_signal[i].x = r.Next() / (float)int.MaxValue; h_signal[i].y = 0; } // Allocate host memory for the filter Float2[] h_filter_kernel = new Float2[FILTER_KERNEL_SIZE]; // Initalize the memory for the filter for (int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = r.Next() / (float)int.MaxValue; h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Float2[] h_padded_signal; Float2[] h_padded_filter_kernel; int new_size = PadData(h_signal, out h_padded_signal, SIGNAL_SIZE, h_filter_kernel, out h_padded_filter_kernel, FILTER_KERNEL_SIZE); // Allocate device memory for signal // Copy host memory to device CUdeviceptr d_signal = cuda.CopyHostToDevice <Float2>(h_padded_signal); // Allocate device memory for filter kernel // Copy host memory to device CUdeviceptr d_filter_kernel = cuda.CopyHostToDevice <Float2>(h_padded_filter_kernel); // CUFFT plan CUFFT fft = new CUFFT(cuda); cufftHandle handle = new cufftHandle(); CUFFTResult fftres = CUFFTDriver.cufftPlan1d(ref handle, new_size, CUFFTType.C2C, 1); //fft.Plan1D(new_size, CUFFTType.C2C, 1); return; // Transform signal and kernel fft.ExecuteComplexToComplex(d_signal, d_signal, CUFFTDirection.Forward); fft.ExecuteComplexToComplex(d_filter_kernel, d_filter_kernel, CUFFTDirection.Forward); // Multiply the coefficients together and normalize the result // ComplexPointwiseMulAndScale<<<32, 256>>>(d_signal, d_filter_kernel, new_size, 1.0f / new_size); cuda.SetFunctionBlockShape(func, 256, 1, 1); cuda.SetParameter(func, 0, (uint)d_signal.Pointer); cuda.SetParameter(func, IntPtr.Size, (uint)d_filter_kernel.Pointer); cuda.SetParameter(func, IntPtr.Size * 2, (uint)new_size); cuda.SetParameter(func, IntPtr.Size * 2 + 4, 1.0f / new_size); cuda.SetParameterSize(func, (uint)(IntPtr.Size * 2 + 8)); cuda.Launch(func, 32, 1); // Transform signal back fft.ExecuteComplexToComplex(d_signal, d_signal, CUFFTDirection.Inverse); // Copy device memory to host Float2[] h_convolved_signal = h_padded_signal; cuda.CopyDeviceToHost <Float2>(d_signal, h_convolved_signal); // Allocate host memory for the convolution result Float2[] h_convolved_signal_ref = new Float2[SIGNAL_SIZE]; // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); // check result bool res = cutCompareL2fe(h_convolved_signal_ref, h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f); Console.WriteLine("Test {0}", (true == res) ? "PASSED" : "FAILED"); //Destroy CUFFT context fft.Destroy(); // cleanup memory cuda.Free(d_signal); cuda.Free(d_filter_kernel); }
static void Main(string[] args) { // Init and select 1st device. CUDA cuda = new CUDA(0, true); // load module //cuda.LoadModule(Path.Combine(Environment.CurrentDirectory, "simpleCUFFT.ptx")); CUfunction func = new CUfunction();// cuda.GetModuleFunction("ComplexPointwiseMulAndScale"); // The filter size is assumed to be a number smaller than the signal size const int SIGNAL_SIZE = 50; const int FILTER_KERNEL_SIZE = 11; // Allocate host memory for the signal Float2[] h_signal = new Float2[SIGNAL_SIZE]; // Initalize the memory for the signal Random r = new Random(); for (int i = 0; i < SIGNAL_SIZE; ++i) { h_signal[i].x = r.Next() / (float)int.MaxValue; h_signal[i].y = 0; } // Allocate host memory for the filter Float2[] h_filter_kernel = new Float2[FILTER_KERNEL_SIZE]; // Initalize the memory for the filter for (int i = 0; i < FILTER_KERNEL_SIZE; ++i) { h_filter_kernel[i].x = r.Next() / (float)int.MaxValue; h_filter_kernel[i].y = 0; } // Pad signal and filter kernel Float2[] h_padded_signal; Float2[] h_padded_filter_kernel; int new_size = PadData(h_signal, out h_padded_signal, SIGNAL_SIZE, h_filter_kernel, out h_padded_filter_kernel, FILTER_KERNEL_SIZE); // Allocate device memory for signal // Copy host memory to device CUdeviceptr d_signal = cuda.CopyHostToDevice<Float2>(h_padded_signal); // Allocate device memory for filter kernel // Copy host memory to device CUdeviceptr d_filter_kernel = cuda.CopyHostToDevice<Float2>(h_padded_filter_kernel); // CUFFT plan CUFFT fft = new CUFFT(cuda); cufftHandle handle = new cufftHandle(); CUFFTResult fftres = CUFFTDriver.cufftPlan1d(ref handle, new_size, CUFFTType.C2C, 1); //fft.Plan1D(new_size, CUFFTType.C2C, 1); return; // Transform signal and kernel fft.ExecuteComplexToComplex(d_signal, d_signal, CUFFTDirection.Forward); fft.ExecuteComplexToComplex(d_filter_kernel, d_filter_kernel, CUFFTDirection.Forward); // Multiply the coefficients together and normalize the result // ComplexPointwiseMulAndScale<<<32, 256>>>(d_signal, d_filter_kernel, new_size, 1.0f / new_size); cuda.SetFunctionBlockShape(func, 256, 1, 1); cuda.SetParameter(func, 0, (uint)d_signal.Pointer); cuda.SetParameter(func, IntPtr.Size, (uint)d_filter_kernel.Pointer); cuda.SetParameter(func, IntPtr.Size * 2, (uint)new_size); cuda.SetParameter(func, IntPtr.Size * 2 + 4, 1.0f / new_size); cuda.SetParameterSize(func, (uint)(IntPtr.Size * 2 + 8)); cuda.Launch(func, 32, 1); // Transform signal back fft.ExecuteComplexToComplex(d_signal, d_signal, CUFFTDirection.Inverse); // Copy device memory to host Float2[] h_convolved_signal = h_padded_signal; cuda.CopyDeviceToHost<Float2>(d_signal, h_convolved_signal); // Allocate host memory for the convolution result Float2[] h_convolved_signal_ref = new Float2[SIGNAL_SIZE]; // Convolve on the host Convolve(h_signal, SIGNAL_SIZE, h_filter_kernel, FILTER_KERNEL_SIZE, h_convolved_signal_ref); // check result bool res = cutCompareL2fe(h_convolved_signal_ref, h_convolved_signal, 2 * SIGNAL_SIZE, 1e-5f); Console.WriteLine("Test {0}", (true == res) ? "PASSED" : "FAILED"); //Destroy CUFFT context fft.Destroy(); // cleanup memory cuda.Free(d_signal); cuda.Free(d_filter_kernel); }
private void solve_l2r_l2_svc_cuda(Problem <SparseVec> sub_prob, float[] w, double epsilon, double Cp, double Cn) { //blocks per Grid for compuing dot prod int bpgDotProd = (sub_prob.Elements.Length + threadsPerBlock - 1) / threadsPerBlock; //blocks per Grid for solver kernel int bpgSolver = (sub_prob.Elements.Length + threadsPerBlock - 1) / threadsPerBlock; //blocks per Grid for update_W kernel int bpgUpdateW = (sub_prob.Elements[0].Dim + threadsPerBlock - 1) / threadsPerBlock; double obj = Double.PositiveInfinity; int maxIter = 2000; float[] deltasCu = new float[sub_prob.ElementsCount]; float[] alphaCu = new float[sub_prob.ElementsCount]; float[] alpha_i = new float[sub_prob.ElementsCount]; float[] w1 = new float[sub_prob.Elements[0].Dim]; float[] w2 = new float[sub_prob.Elements[0].Dim]; for (int i = 0; i < w.Length; i++) { w1[i] = w2[i] = w[i]; } //inverted hessian for toy_2d_3 problem //float[,] invHessian = new float[3, 3]{ // {5.05f, -0.96f, -3.5f}, // {-0.96f, 0.9f, 1}, // {-3.59f , 1f, 2.8f} //}; int iter = 0; while (iter < maxIter) { //computes dot product between W and all elements cuda.Launch(cuFuncDotProd, bpgDotProd, 1); cuda.SynchronizeContext(); float[] grad = new float[sub_prob.ElementsCount]; Marshal.Copy(gradIntPtr, grad, 0, grad.Length); #region test host code //float[] dots = new float[sub_prob.ElementsCount]; //float[] dots1 = new float[sub_prob.ElementsCount]; //for (int i = 0; i < dots.Length; i++) //{ // var element = sub_prob.Elements[i]; // for (int k = 0; k < element.Count; k++) // { // dots[i] += w[element.Indices[k] - 1] * element.Values[k]; // dots1[i] += w1[element.Indices[k] - 1] * element.Values[k]; // } // dots[i] *= sub_prob.Y[i]; // dots1[i] *= sub_prob.Y[i]; //} //float[] grad_i = new float[sub_prob.ElementsCount]; //for (int i = 0; i < grad_i.Length; i++) //{ // float dot = 0; // var vec_i = sub_prob.Elements[i]; // sbyte y_i = (sbyte)sub_prob.Y[i]; // for (int j = 0; j < sub_prob.ElementsCount; j++) // { // var vec_j = sub_prob.Elements[j]; // sbyte y_j = (sbyte)sub_prob.Y[j]; // float part_dot = 0; // for (int k = 0; k < vec_i.Dim; k++) // { // part_dot += vec_i.Values[k] * vec_j.Values[k]; // } // if (i == j) // { // part_dot += diag[y_i + 1] ; // } // part_dot = part_dot * y_i * y_j; // part_dot *= alpha_i[j]; // dot += part_dot; // } // grad_i[i] = dot - 1; //} #endregion cuda.Launch(cuFuncSolver, bpgSolver, 1); cuda.SynchronizeContext(); float[] grad2 = new float[sub_prob.ElementsCount]; Marshal.Copy(gradIntPtr, grad2, 0, grad2.Length); //float[] grad3 = new float[sub_prob.ElementsCount]; //float[] projGrad = new float[sub_prob.ElementsCount]; //float[] projGrad_i = new float[sub_prob.ElementsCount]; //for (int i = 0; i < grad3.Length; i++) //{ // sbyte y_i = (sbyte)sub_prob.Y[i]; // grad3[i] = dots1[i] - 1 + alpha[i] * diag[y_i + 1]; // if (alpha[i] == 0) // { // projGrad[i] = Math.Min(0, grad3[i]); // // projGrad_i[i] = Math.Min(0, grad_i[i]); // } // else // { // projGrad[i] = grad3[i]; // // projGrad_i[i] = grad_i[i]; // } //} cuda.CopyDeviceToHost(deltasPtr, deltasCu); cuda.CopyDeviceToHost(alphaPtr, alphaCu); cuda.Launch(cuFuncUpdateW, bpgUpdateW, 1); cuda.SynchronizeContext(); cuda.CopyDeviceToHost(mainVecPtr, w); //take grad and check stop condition //Marshal.Copy(gradIntPtr, , 0, results.Length); //compute w1 //double su = 0; //float[] wAll = new float[sub_prob.Elements[0].Dim]; //for (int p = 0; p < sub_prob.ElementsCount; p++) //{ // sbyte y_i = (sbyte)sub_prob.Y[p]; // float old_alpha = alpha[p]; // float alphaStep = 0; // //for (int k = 0; k < alpha_i.Length; k++) // //{ // // alphaStep += invHessian[p,k] * projGrad_i[k]; // //} // alpha[p] = Math.Max(alpha[p] -stepScaling* projGrad[p] / (QD[p] + diag[y_i + 1]), 0); // alpha_i[p] = Math.Max(alpha_i[p] - stepScaling* projGrad_i[p] / (QD[p] + diag[y_i + 1]), 0); // // alpha_i[p] = Math.Max(alpha_i[p] - 0.01f* projGrad_i[p] , 0); // //alpha_i[p] = Math.Max(alpha_i[p] - alphaStep, 0); // float d = deltasCu[p]; // float d2 =(alpha[p] - old_alpha) * y_i; // var spVec = sub_prob.Elements[p]; // for (int k = 0; k < spVec.Count; k++) // { // w1[spVec.Indices[k] - 1] += d2 * spVec.Values[k]; // w2[spVec.Indices[k] - 1] += d2 * spVec.Values[k]; // wAll[spVec.Indices[k] - 1] += alpha[p]*y_i * spVec.Values[k]; // } // su += y_i * alpha[p]; // } #if DEBUG obj = ComputeObj(w, alphaCu, sub_prob, diag); Debug.WriteLine(obj.ToString(CultureInfo.GetCultureInfo("pl-PL").NumberFormat)); #endif float minPG = float.PositiveInfinity; float maxPG = float.NegativeInfinity; for (int i = 0; i < grad2.Length; i++) { minPG = Math.Min(minPG, grad2[i]); maxPG = Math.Max(maxPG, grad2[i]); } if (maxPG < 0) { maxPG = float.NegativeInfinity; } if (minPG > 0) { minPG = float.PositiveInfinity; } if (Math.Abs(maxPG - minPG) <= epsilon) { break; } iter++; } cuda.SynchronizeContext(); //copy resulsts form device to host cuda.CopyDeviceToHost(mainVecPtr, w); cuda.CopyDeviceToHost(alphaPtr, alpha); ComputeObj(w, alpha, sub_prob, diag); //int l = sub_prob.ElementsCount;// prob.l; //int w_size = sub_prob.FeaturesCount;// prob.n; //double v = 0; //int nSV = 0; //for (int i = 0; i < w_size; i++) // v += w[i] * w[i]; //for (int i = 0; i < l; i++) //{ // sbyte y_i =(sbyte) sub_prob.Y[i]; // v += alpha[i] * (alpha[i] * diag[y_i+1] - 2); // if (alpha[i] > 0) ++nSV; //} //Debug.WriteLine("Objective value = {0}", v / 2); //Debug.WriteLine("nSV = {0}", nSV); }