public virtual int sceMpegBaseCscVme(TPointer bufferRGB, TPointer bufferRGB2, int bufferWidth, TPointer32 bufferYCrCb) { SceMpegYCrCbBuffer sceMpegYCrCbBuffer = new SceMpegYCrCbBuffer(); sceMpegYCrCbBuffer.read(bufferYCrCb); int width = sceMpegYCrCbBuffer.frameBufferWidth16 << 4; int height = sceMpegYCrCbBuffer.frameBufferHeight16 << 4; int videoPixelMode = TPSM_PIXEL_STORAGE_MODE_32BIT_ABGR8888; int bytesPerPixel = sceDisplay.getPixelFormatBytes(videoPixelMode); int rangeX = 0; int rangeY = 0; int rangeWidth = width; int rangeHeight = height; int destAddr = bufferRGB.Address; //if (log.DebugEnabled) { Console.WriteLine(string.Format("sceMpegBaseCscVme sceMpegYCrCbBuffer: {0}", sceMpegYCrCbBuffer)); } int width2 = width >> 1; int height2 = height >> 1; int Length = width * height; int length2 = width2 * height2; // Read the YCbCr image int[] luma = getIntBuffer(Length); int[] cb = getIntBuffer(length2); int[] cr = getIntBuffer(length2); read(sceMpegYCrCbBuffer.bufferY, Length, luma, 0); read(sceMpegYCrCbBuffer.bufferCb, length2, cb, 0); read(sceMpegYCrCbBuffer.bufferCr, length2, cr, 0); // Convert YCbCr to ABGR int[] abgr = getIntBuffer(Length); H264Utils.YUV2ABGR(width, height, luma, cb, cr, abgr); releaseIntBuffer(luma); releaseIntBuffer(cb); releaseIntBuffer(cr); // Do not cache the video image as a texture in the VideoEngine to allow fluid rendering VideoEngine.Instance.addVideoTexture(destAddr, destAddr + (rangeY + rangeHeight) * bufferWidth * bytesPerPixel); // Write the ABGR image if (videoPixelMode == TPSM_PIXEL_STORAGE_MODE_32BIT_ABGR8888 && RuntimeContext.hasMemoryInt()) { // Optimize the most common case int pixelIndex = rangeY * width + rangeX; for (int i = 0; i < rangeHeight; i++) { int addr = destAddr + (i * bufferWidth) * bytesPerPixel; Array.Copy(abgr, pixelIndex, RuntimeContext.MemoryInt, addr >> 2, rangeWidth); pixelIndex += width; } } else { int addr = destAddr; for (int i = 0; i < rangeHeight; i++) { IMemoryWriter memoryWriter = MemoryWriter.getMemoryWriter(addr, rangeWidth * bytesPerPixel, bytesPerPixel); int pixelIndex = (i + rangeY) * width + rangeX; for (int j = 0; j < rangeWidth; j++, pixelIndex++) { int abgr8888 = abgr[pixelIndex]; int pixelColor = Debug.getPixelColor(abgr8888, videoPixelMode); memoryWriter.writeNext(pixelColor); } memoryWriter.flush(); addr += bufferWidth * bytesPerPixel; } } releaseIntBuffer(abgr); return(0); }
private int hleMpegBaseCscAvcRange(TPointer bufferRGB, int unknown, int bufferWidth, SceMp4AvcCscStruct mp4AvcCscStruct, int rangeX, int rangeY, int rangeWidth, int rangeHeight) { int width = mp4AvcCscStruct.width << 4; int height = mp4AvcCscStruct.height << 4; // It seems that the pixel output format is always ABGR8888. int videoPixelMode = TPSM_PIXEL_STORAGE_MODE_32BIT_ABGR8888; int bytesPerPixel = sceDisplay.getPixelFormatBytes(videoPixelMode); int destAddr = bufferRGB.Address; int width2 = width >> 1; int height2 = height >> 1; int Length = width * height; int length2 = width2 * height2; // Read the YCbCr image. // See the description of the format used by the PSP in sceVideocodecDecode(). int[] luma = getIntBuffer(Length); int[] cb = getIntBuffer(length2); int[] cr = getIntBuffer(length2); int sizeY1 = ((width + 16) >> 5) * (height >> 1) * 16; int sizeY2 = (width >> 5) * (height >> 1) * 16; int sizeCrCb1 = sizeY1 >> 1; int sizeCrCb2 = sizeY1 >> 1; int[] bufferY1 = getIntBuffer(sizeY1); int[] bufferY2 = getIntBuffer(sizeY2); int[] bufferCrCb1 = getIntBuffer(sizeCrCb1); int[] bufferCrCb2 = getIntBuffer(sizeCrCb2); read(mp4AvcCscStruct.buffer0, sizeY1, bufferY1, 0); read(mp4AvcCscStruct.buffer1, sizeY2, bufferY2, 0); read(mp4AvcCscStruct.buffer4, sizeCrCb1, bufferCrCb1, 0); read(mp4AvcCscStruct.buffer5, sizeCrCb2, bufferCrCb2, 0); for (int x = 0, j = 0; x < width; x += 32) { for (int y = 0, i = x; y < height; y += 2, i += 2 * width, j += 16) { Array.Copy(bufferY1, j, luma, i, 16); } } for (int x = 16, j = 0; x < width; x += 32) { for (int y = 0, i = x; y < height; y += 2, i += 2 * width, j += 16) { Array.Copy(bufferY2, j, luma, i, 16); } } for (int x = 0, j = 0; x < width2; x += 16) { for (int y = 0; y < height2; y += 2) { for (int xx = 0, i = y * width2 + x; xx < 8; xx++, i++) { cb[i] = bufferCrCb1[j++]; cr[i] = bufferCrCb1[j++]; } } } for (int x = 0, j = 0; x < width2; x += 16) { for (int y = 1; y < height2; y += 2) { for (int xx = 0, i = y * width2 + x; xx < 8; xx++, i++) { cb[i] = bufferCrCb2[j++]; cr[i] = bufferCrCb2[j++]; } } } read(mp4AvcCscStruct.buffer2, sizeY1, bufferY1, 0); read(mp4AvcCscStruct.buffer3, sizeY2, bufferY2, 0); read(mp4AvcCscStruct.buffer6, sizeCrCb1, bufferCrCb1, 0); read(mp4AvcCscStruct.buffer7, sizeCrCb2, bufferCrCb2, 0); for (int x = 0, j = 0; x < width; x += 32) { for (int y = 1, i = x + width; y < height; y += 2, i += 2 * width, j += 16) { Array.Copy(bufferY1, j, luma, i, 16); } } for (int x = 16, j = 0; x < width; x += 32) { for (int y = 1, i = x + width; y < height; y += 2, i += 2 * width, j += 16) { Array.Copy(bufferY2, j, luma, i, 16); } } for (int x = 8, j = 0; x < width2; x += 16) { for (int y = 0; y < height2; y += 2) { for (int xx = 0, i = y * width2 + x; xx < 8; xx++, i++) { cb[i] = bufferCrCb1[j++]; cr[i] = bufferCrCb1[j++]; } } } for (int x = 8, j = 0; x < width2; x += 16) { for (int y = 1; y < height2; y += 2) { for (int xx = 0, i = y * width2 + x; xx < 8; xx++, i++) { cb[i] = bufferCrCb2[j++]; cr[i] = bufferCrCb2[j++]; } } } releaseIntBuffer(bufferY1); releaseIntBuffer(bufferY2); releaseIntBuffer(bufferCrCb1); releaseIntBuffer(bufferCrCb2); // Convert YCbCr to ABGR int[] abgr = getIntBuffer(Length); H264Utils.YUV2ABGR(width, height, luma, cb, cr, abgr); releaseIntBuffer(luma); releaseIntBuffer(cb); releaseIntBuffer(cr); // Do not cache the video image as a texture in the VideoEngine to allow fluid rendering VideoEngine.Instance.addVideoTexture(destAddr, destAddr + (rangeY + rangeHeight) * bufferWidth * bytesPerPixel); // Write the ABGR image if (videoPixelMode == TPSM_PIXEL_STORAGE_MODE_32BIT_ABGR8888 && RuntimeContext.hasMemoryInt()) { // Optimize the most common case int pixelIndex = rangeY * width + rangeX; int addr = destAddr; for (int i = 0; i < rangeHeight; i++) { Array.Copy(abgr, pixelIndex, RuntimeContext.MemoryInt, addr >> 2, rangeWidth); pixelIndex += width; addr += bufferWidth * bytesPerPixel; } } else { int addr = destAddr; for (int i = 0; i < rangeHeight; i++) { IMemoryWriter memoryWriter = MemoryWriter.getMemoryWriter(addr, rangeWidth * bytesPerPixel, bytesPerPixel); int pixelIndex = (i + rangeY) * width + rangeX; for (int j = 0; j < rangeWidth; j++, pixelIndex++) { int abgr8888 = abgr[pixelIndex]; int pixelColor = Debug.getPixelColor(abgr8888, videoPixelMode); memoryWriter.writeNext(pixelColor); } memoryWriter.flush(); addr += bufferWidth * bytesPerPixel; } } releaseIntBuffer(abgr); return(0); }
public virtual void stepVideo() { image = null; int frameSize = -1; do { if (!readPsmfPacket(videoChannel)) { if (videoDataOffset <= 0) { // Enf of file reached break; } frameSize = findFrameEnd(); if (frameSize < 0) { // Process pending last frame frameSize = videoDataOffset; } } else { frameSize = findFrameEnd(); } } while (frameSize <= 0); if (frameSize <= 0) { endOfVideo = true; return; } int consumedLength = videoCodec.decode(videoData, 0, frameSize); if (consumedLength < 0) { endOfVideo = true; return; } consumeVideoData(consumedLength); if (videoCodec.hasImage()) { int width = videoCodec.ImageWidth; int height = videoCodec.ImageHeight; int size = width * height; int size2 = size >> 2; int[] luma = new int[size]; int[] cr = new int[size2]; int[] cb = new int[size2]; if (videoCodec.getImage(luma, cb, cr) == 0) { int[] abgr = new int[size]; H264Utils.YUV2ARGB(width, height, luma, cb, cr, abgr); image = display.createImage(new MemoryImageSource(videoWidth, videoHeight, abgr, 0, width)); frame++; long now = DateTimeHelper.CurrentUnixTimeMillis(); long currentDuration = now - startTime; long videoDuration = frame * 100000L / 3003L; if (currentDuration < videoDuration) { Utilities.sleep((int)(videoDuration - currentDuration), 0); } } } }