void Update() { if (Input.GetMouseButton(0)) { if (!mouseDown) { mouseDown = true; mouseStartPos = Camera.main.ScreenToViewportPoint(Input.mousePosition); startRotation = transform.localRotation; currenntTrack.clear(); } } else { if (mouseDown) { mouseDown = false; startRotation = transform.localRotation; angle = Quaternion.Angle(startRotation, originRotation); if (angle != 0) { speed = new Accelerator(angle, accel, topRollbackSpeed, accel, 0); } } } if (mouseDown) { currenntTrack.add(Camera.main.ScreenToViewportPoint(Input.mousePosition)); Vector3 dif = mouseStartPos - currenntTrack.value; Quaternion r = new Quaternion(); r.eulerAngles = new Vector3(dif.y * maxAngle.y, dif.x * -maxAngle.x, 0); transform.localRotation = startRotation * r; } else if (angle != 0) { transform.localRotation = Quaternion.Lerp(originRotation, startRotation, speed.step(Time.deltaTime) / angle); } }
public override void Process(Camera cam, Texture output, Accelerator acc) { uint[] pix = output.Pixel; for(int i = 0; i < pix.Length; i++) { pix[i] = Color.AlphaChannel|Color.Random(pix[i], delta); } }
public void rebuildTree() { #if BVH _tree = new BvhTree(_spheres, maxPrims); #else _tree = new KDTree(_spheres, maxPrims); #endif }
public override void Process(Camera cam, Texture output, Accelerator acc) { int px = 250; int py = 250; int cx = 500; int cy = 500; float dx = (float)(cx-px); float dy = (float)(cy-py); int posx, posy, xsize, ysize; float zoom; for(int i = 0; i < flares; i++) { zoom = Maths.Pythagoras(dx, dy)/Maths.Pythagoras(cx, cy); zoom = 1.5f-0.5f*zoom; xsize = flare[i].Width; ysize = flare[i].Height; posx = px+(int)(dx*flareDist[i]); posy = py+(int)(dy*flareDist[i]); cam.Raster.AddWithAlpha(flare[i], posx-xsize/2, posy-ysize/2, xsize, ysize); } }
void Start() { originRotation = transform.localRotation; speed = new Accelerator(); }
/// <summary> /// Constructs a new Cuda stream from the given native pointer. /// </summary> /// <param name="accelerator">The associated accelerator.</param> /// <param name="ptr">The native stream pointer.</param> /// <param name="responsible"> /// Whether ILGPU is responsible of disposing this stream. /// </param> internal CudaStream(Accelerator accelerator, IntPtr ptr, bool responsible) : base(accelerator) { streamPtr = ptr; responsibleForHandle = responsible; }
internal ScanProvider(Accelerator accelerator) : base(accelerator) { bufferCache = new MemoryBufferCache(Accelerator); }
/// <summary> /// Launches a simple 1D kernel using shared memory. /// </summary> static void Main(string[] args) { // Create main context using (var context = new Context()) { // For each available accelerator... foreach (var acceleratorId in Accelerator.Accelerators) { // Create default accelerator for the given accelerator id using (var accelerator = Accelerator.Create(context, acceleratorId)) { Console.WriteLine($"Performing operations on {accelerator}"); // The maximum group size in this example is 128 since the second // kernel has a shared-memory array of 128 elements. var groupSize = Math.Min(accelerator.MaxNumThreadsPerGroup, 128); var data = Enumerable.Range(1, 128).ToArray(); using (var dataSource = accelerator.Allocate <int>(data.Length)) { // Initialize data source dataSource.CopyFrom(data, 0, 0, data.Length); var dimension = new GroupedIndex( (dataSource.Length + groupSize - 1) / groupSize, // Compute the number of groups (round up) groupSize); // Use the given group size using (var dataTarget = accelerator.Allocate <int>(data.Length)) { var sharedMemVarKernel = accelerator.LoadSharedMemoryStreamKernel1 < GroupedIndex, ArrayView <int>, ArrayView <int>, VariableView <int> >(SharedMemoryVariableKernel); dataTarget.MemSetToZero(); // Note that *no* value is passed for the shared-memory variable // since shared memory is handled automatically inside the runtime // and shared memory has to be initialized inside a kernel. // The delegate type for this kernel would be: // Action<GroupedIndex, ArrayView<int>, ArrayView<int>>. sharedMemVarKernel(dimension, dataSource.View, dataTarget.View); accelerator.Synchronize(); Console.WriteLine("Shared-memory kernel"); var target = dataTarget.GetAsArray(); for (int i = 0, e = target.Length; i < e; ++i) { Console.WriteLine($"Data[{i}] = {target[i]}"); } var sharedMemArrKernel = accelerator.LoadSharedMemoryStreamKernel1 < GroupedIndex, ArrayView <int>, ArrayView <int>, ArrayView <int> >(SharedMemoryArrayKernel); dataTarget.MemSetToZero(); // Note that *no* value is passed for the shared-memory variable // since shared memory is handled automatically inside the runtime // and shared memory has to be initialized inside a kernel. // The delegate type for this kernel would be: // Action<GroupedIndex, ArrayView<int>, ArrayView<int>>. sharedMemArrKernel(dimension, dataSource.View, dataTarget.View); accelerator.Synchronize(); Console.WriteLine("Shared-memory-array kernel"); target = dataTarget.GetAsArray(); for (int i = 0, e = target.Length; i < e; ++i) { Console.WriteLine($"Data[{i}] = {target[i]}"); } } } } } } }
/// <summary> /// Entry point for a single processing thread. /// </summary> /// <param name="arg">The absolute thread index.</param> private void ExecuteThread(object arg) { // Get the current thread information int absoluteThreadIndex = (int)arg; int threadIdx = absoluteThreadIndex % MaxNumThreadsPerMultiprocessor; bool isMainThread = threadIdx == 0; // Setup a new thread context for this thread and initialize the lane index int laneIdx = threadIdx % WarpSize; int warpIdx = threadIdx / WarpSize; var threadContext = new CPURuntimeThreadContext(laneIdx, warpIdx) { LinearGroupIndex = threadIdx }; threadContext.MakeCurrent(); // Setup the current warp context as it always stays the same var warpContext = warpContexts[warpIdx]; warpContext.MakeCurrent(); // Setup the current group context as it always stays the same groupContext.MakeCurrent(); CPUAcceleratorTask task = null; for (; ;) { // Get a new task to execute (if any) if (!Accelerator.WaitForTask(ref task)) { break; } // Setup the current group index threadContext.GroupIndex = Stride3D.DenseXY.ReconstructFromElementIndex( threadIdx, task.GroupDim); // Wait for all threads of all multiprocessors to arrive here Thread.MemoryBarrier(); processorBarrier.SignalAndWait(); try { // If we are an active group thread int groupSize = task.GroupDim.Size; if (threadIdx < groupSize) { try { var launcher = task.KernelExecutionDelegate; // Split the grid into different chunks that will be processed // by the available multiprocessors int linearGridDim = task.GridDim.Size; int gridChunkSize = IntrinsicMath.DivRoundUp( linearGridDim, Accelerator.NumMultiprocessors); int gridOffset = gridChunkSize * ProcessorIndex; int linearUserDim = task.TotalUserDim.Size; for ( int i = gridOffset, e = gridOffset + gridChunkSize; i < e; ++i) { BeginThreadProcessing(); try { // Setup the current grid index threadContext.GridIndex = Stride3D.DenseXY .ReconstructFromElementIndex( i, task.GridDim); // Invoke the actual kernel launcher int globalIndex = i * groupSize + threadIdx; if (globalIndex < linearUserDim) { launcher(task, globalIndex); } } finally { EndThreadProcessing(); } } } finally { // This thread has already finished processing FinishThreadProcessing(); } } } finally { // Wait for all threads of all multiprocessors to arrive here processorBarrier.SignalAndWait(); // If we reach this point and we are the main thread, notify the // parent accelerator instance if (isMainThread) { Accelerator.FinishTaskProcessing(); } } } }
/// <summary cref="Accelerator.CanAccessPeerInternal(Accelerator)"/> protected override bool CanAccessPeerInternal(Accelerator otherAccelerator) { return((otherAccelerator as CPUAccelerator) != null); }
/// <summary> /// Constructs a new context object. /// </summary> /// <param name="accelerator">The associated accelerator.</param> protected AlgorithmObject(Accelerator accelerator) : base(accelerator) { }
//private readonly Action<Index, ArrayView<int>> intSumKernel; public IlGpuWrapper() { this.gpu = Accelerator.Create(new Context(), Accelerator.Accelerators.First(a => a.AcceleratorType == AcceleratorType.Cuda)); //this.intSumKernel = this.gpu.LoadAutoGroupedStreamKernel<Index, ArrayView<int>>(ApplyKernel); }
void AddMenu(int count, bool addMenuItems = false, int countMenuItems = 1, bool withImage = false, Menu menuHolder = null, bool addSubMenu = false, bool addShortcut = false) { for (int i = 0; i < count; i++) { var menu = new Microsoft.Maui.Controls.Menu { Text = $"hello {i}" }; if (addMenuItems) { for (int j = 0; j < countMenuItems; j++) { var item = new MenuItem { Text = $"hello menu item {i}.{j}" }; if (withImage) { item.IconImageSource = IconImageSource = "bank.png"; } if (addShortcut) { var shourtCutKeyBinding = $"{j}"; if (j == 1) { shourtCutKeyBinding = $"shift+{j}"; } if (j == 2) { shourtCutKeyBinding = $"ctrl+{j}"; } if (j == 3) { shourtCutKeyBinding = $"alt+{j}"; } if (j == 4) { shourtCutKeyBinding = $"cmd+{j}"; } if (j == 5) { shourtCutKeyBinding = $"fn+{j}"; } if (j == 6) { shourtCutKeyBinding = $"cmd+alt+{j}"; } item.Text = shourtCutKeyBinding; MenuItem.SetAccelerator(item, Accelerator.FromString(shourtCutKeyBinding)); } menu.Items.Add(item); } } if (addSubMenu) { var submenu = new Microsoft.Maui.Controls.Menu { Text = $"submenu {i}" }; var item = new MenuItem { Text = $"submenu item {i}" }; submenu.Items.Add(item); menu.Add(submenu); } if (menuHolder == null) { var mainMenu = new Menu(); SetMenu(Application.Current, mainMenu); menuHolder = GetMenu(Application.Current); } menuHolder.Add(menu); } }
/// <summary cref="Accelerator.DisablePeerAccessInternal(Accelerator)"/> protected override void DisablePeerAccessInternal(Accelerator otherAccelerator) { }
/// <summary cref="Accelerator.EnablePeerAccessInternal(Accelerator)"/> protected override void EnablePeerAccessInternal(Accelerator otherAccelerator) => throw new InvalidOperationException( RuntimeErrorMessages.CannotEnablePeerAccessToDifferentAcceleratorKind);
/// <summary cref="Accelerator.CanAccessPeerInternal(Accelerator)"/> protected override bool CanAccessPeerInternal(Accelerator otherAccelerator) => false;
public override void Create() { // 以指定图片创建动画 this.coinAnimation = Animation.GetDefaultAnimation("assets/coin.png", 32, 32, 200); this.enemyAnimation = Animation.GetDefaultAnimation("assets/enemy.gif", 32, 32, 200, LColor.black); this.accelAnimation = Animation.GetDefaultAnimation( "assets/accelerator.gif", 32, 32, 200); this.jumpertwoAnimation = Animation.GetDefaultAnimation( "assets/jumper_two.gif", 32, 32, 200); // 注销Screen时释放下列资源 PutReleases(coinAnimation, enemyAnimation, accelAnimation, jumpertwoAnimation, hero); // 加载一张由字符串形成的地图(如果不使用此方式加载,则默认使用标准的数组地图) TileMap indexMap = TileMap.LoadCharsMap("assets/map.chr", 32, 32); // 如果有配置好的LTexturePack文件,可于此注入 // indexMap.setImagePack(file); // 设定无法穿越的区域(如果不设置此项,所有索引不等于"-1"的区域都可以穿越) indexMap.SetLimit(new int[] { 'B', 'C', 'i', 'c' }); indexMap.PutTile('B', "assets/block.png"); int imgId = indexMap.PutTile('C', "assets/coin_block.gif"); // 因为两块瓦片对应同一地图字符,所以此处使用了已载入的图片索引 indexMap.PutTile('i', imgId); indexMap.PutTile('c', "assets/coin_block2.gif"); // 加载此地图到窗体中 putTileMap(indexMap); // 获得地图对应的二维数组 int[][] maps = indexMap.GetMap(); int w = indexMap.GetRow(); int h = indexMap.GetCol(); // 遍历二维数组地图,并以此为基础添加角色到窗体之上 for (int i = 0; i < w; i++) { for (int j = 0; j < h; j++) { switch (maps[j][i]) { case 'o': Coin coin = new Coin(indexMap.TilesToPixelsX(i), indexMap.TilesToPixelsY(j), new Animation( coinAnimation), indexMap); AddTileObject(coin); break; case 'k': Enemy enemy = new Enemy(indexMap.TilesToPixelsX(i), indexMap.TilesToPixelsY(j), new Animation( enemyAnimation), indexMap); AddTileObject(enemy); break; case 'a': Accelerator accelerator = new Accelerator( indexMap.TilesToPixelsX(i), indexMap.TilesToPixelsY(j), new Animation( accelAnimation), indexMap); AddTileObject(accelerator); break; case 'j': JumperTwo jump = new JumperTwo(indexMap.TilesToPixelsX(i), indexMap.TilesToPixelsY(j), new Animation( jumpertwoAnimation), indexMap); AddTileObject(jump); break; } } } // 获得主角动作图 Animation animation = Animation.GetDefaultAnimation("assets/hero.png", 20, 20, 150, LColor.black); // 在像素坐标位置(192,32)放置角色,大小为32x32,动画为针对hero.png的分解图 hero = AddJumpObject(192, 32, 32, 32, animation); // 让地图跟随指定对象产生移动(无论插入有多少张数组地图,此跟随默认对所有地图生效) // 另外请注意,此处能产生跟随的对像是任意LObject,并不局限于游戏角色。 Follow(hero); // 监听跳跃事件 hero.listener = new JumpI(indexMap, enemyAnimation); AddActionKey(Key.LEFT, new GoLeftKey()); AddActionKey(Key.RIGHT, new GoRightKey()); AddActionKey(Key.UP, new GoJumpKey()); if (LSystem.type != LSystem.ApplicationType.JavaSE) { LPad pad = new LPad(10, 180); pad.SetListener(new PadClick(this)); Add(pad); } this.updateListener =new GameUpdateListener(this); }
/// <summary cref="Accelerator.DisablePeerAccess(Accelerator)"/> protected override void DisablePeerAccessInternal(Accelerator otherAccelerator) { Debug.Assert(otherAccelerator is CPUAccelerator, "Invalid EnablePeerAccess method"); }
private static IEnumerable <Index1D> GetIndices1D(Accelerator accelerator) { yield return(accelerator.MaxGroupSize.X + 1); }
static void Main() { using (var context = new Context(ContextFlags.AggressiveInlining)) { // Enable algorithms library context.EnableAlgorithms(); // For each available accelerator... foreach (var acceleratorId in Accelerator.Accelerators) { using (var accelerator = Accelerator.Create(context, acceleratorId)) { Console.WriteLine($"Performing operations on {accelerator}"); var sourceBuffer = accelerator.Allocate <int>(32); accelerator.Initialize(accelerator.DefaultStream, sourceBuffer.View, 2); // The parallel scan implementation needs temporary storage. // By default, every accelerator hosts a memory-buffer cache // for operations that require a temporary cache. // Computes an inclusive parallel scan using (var targetBuffer = accelerator.Allocate <int>(32)) { // Create a new inclusive scan using the AddInt32 scan operation // Use the available scan operations in the namespace ILGPU.Algorithms.ScanReduceOperations. var scan = accelerator.CreateInclusiveScan <int, AddInt32>(); // Compute the required amount of temporary memory var tempMemSize = accelerator.ComputeScanTempStorageSize <int>(targetBuffer.Length); using (var tempBuffer = accelerator.Allocate <int>(tempMemSize)) { scan( accelerator.DefaultStream, sourceBuffer.View, targetBuffer.View, tempBuffer.View); } Console.WriteLine("Inclusive Scan:"); accelerator.Synchronize(); var data = targetBuffer.GetAsArray(); for (int i = 0, e = data.Length; i < e; ++i) { Console.WriteLine($"Data[{i}] = {data[i]}"); } } // Computes an exclusive parallel scan using (var targetBuffer = accelerator.Allocate <int>(32)) { // Create a new exclusive scan using the AddInt32 scan operation // Use the available scan operations in the namespace ILGPU.Algorithms.ScanReduceOperations. var scan = accelerator.CreateExclusiveScan <int, AddInt32>(); // Compute the required amount of temporary memory var tempMemSize = accelerator.ComputeScanTempStorageSize <int>(targetBuffer.Length); using (var tempBuffer = accelerator.Allocate <int>(tempMemSize)) { scan( accelerator.DefaultStream, sourceBuffer.View, targetBuffer.View, tempBuffer.View); } Console.WriteLine("Exclusive Scan:"); accelerator.Synchronize(); var data = targetBuffer.GetAsArray(); for (int i = 0, e = data.Length; i < e; ++i) { Console.WriteLine($"Data[{i}] = {data[i]}"); } } // Creates a ScanProvider that hosts its own memory-buffer cache to allow // for parallel invocations of different operations that require // an extra cache. using (var scanProvider = accelerator.CreateScanProvider()) { var scanUsingScanProvider = scanProvider.CreateInclusiveScan <int, AddInt32>(); // Please note that the create scan does not need additional temporary memory // allocations as they will be automatically managed by the ScanProvider instance. using (var targetBuffer = accelerator.Allocate <int>(32)) { scanUsingScanProvider( accelerator.DefaultStream, sourceBuffer.View, targetBuffer.View); accelerator.Synchronize(); var data = targetBuffer.GetAsArray(); for (int i = 0, e = data.Length; i < e; ++i) { Console.WriteLine($"Data[{i}] = {data[i]}"); } } } sourceBuffer.Dispose(); } } } }