/// <summary>Decode an input stream into a bitmap.</summary> /// <remarks> /// Decode an input stream into a bitmap. If the input stream is null, or /// cannot be used to decode a bitmap, the function returns null. /// The stream's position will be where ever it was after the encoded data /// was read. /// </remarks> /// <param name="is"> /// The input stream that holds the raw data to be decoded into a /// bitmap. /// </param> /// <param name="outPadding"> /// If not null, return the padding rect for the bitmap if /// it exists, otherwise set padding to [-1,-1,-1,-1]. If /// no bitmap is returned (null) then padding is /// unchanged. /// </param> /// <param name="opts"> /// null-ok; Options that control downsampling and whether the /// image should be completely decoded, or just is size returned. /// </param> /// <returns> /// The decoded bitmap, or null if the image data could not be /// decoded, or, if opts is non-null, if opts requested only the /// size be returned (in opts.outWidth and opts.outHeight) /// </returns> public static android.graphics.Bitmap decodeStream(java.io.InputStream @is, android.graphics.Rect outPadding, android.graphics.BitmapFactory.Options opts) { // we don't throw in this case, thus allowing the caller to only check // the cache, and not force the image to be decoded. if (@is == null) { return null; } // we need mark/reset to work properly if ([email protected]()) { @is = new java.io.BufferedInputStream(@is, 16 * 1024); } // so we can call reset() if a given codec gives up after reading up to // this many bytes. FIXME: need to find out from the codecs what this // value should be. @is.mark(1024); android.graphics.Bitmap bm; if (@is is android.content.res.AssetManager.AssetInputStream) { bm = nativeDecodeAsset(((android.content.res.AssetManager.AssetInputStream)@is).mAsset , outPadding, opts); } else { // pass some temp storage down to the native code. 1024 is made up, // but should be large enough to avoid too many small calls back // into is.read(...) This number is not related to the value passed // to mark(...) above. byte[] tempStorage = null; if (opts != null) { tempStorage = opts.inTempStorage; } if (tempStorage == null) { tempStorage = new byte[16 * 1024]; } bm = nativeDecodeStream(@is, tempStorage, outPadding, opts); } if (bm == null && opts != null && opts.inBitmap != null) { throw new System.ArgumentException("Problem decoding into existing bitmap"); } return finishDecode(bm, outPadding, opts); }
/// <summary>Return the AudioFileFormat from the given InputStream.</summary> /// <remarks>Return the AudioFileFormat from the given InputStream. Implementation.</remarks> /// <param name="bitStream"></param> /// <param name="baos"></param> /// <param name="mediaLength"></param> /// <returns> /// an AudioInputStream object based on the audio file data contained /// in the input stream. /// </returns> /// <exception> /// UnsupportedAudioFileException /// if the File does not point to /// a valid audio file data recognized by the system. /// </exception> /// <exception> /// IOException /// if an I/O exception occurs. /// </exception> /// <exception cref="javax.sound.sampled.UnsupportedAudioFileException"></exception> /// <exception cref="System.IO.IOException"></exception> protected virtual javax.sound.sampled.AudioFileFormat getAudioFileFormat(java.io.InputStream bitStream, java.io.ByteArrayOutputStream baos, int mediaLength) { javax.sound.sampled.AudioFormat format; try { // If we can't read the format of this stream, we must restore stream to // beginning so other providers can attempt to read the stream. if (bitStream.markSupported()) { // maximum number of bytes to determine the stream encoding: // Size of 1st Ogg Packet (Speex header) = OGG_HEADERSIZE + SPEEX_HEADERSIZE + 1 // Size of 2nd Ogg Packet (Comment) = OGG_HEADERSIZE + comment_size + 1 // Size of 3rd Ogg Header (First data) = OGG_HEADERSIZE + number_of_frames // where number_of_frames < 256 and comment_size < 256 (if within 1 frame) bitStream.mark(3 * OGG_HEADERSIZE + SPEEX_HEADERSIZE + 256 + 256 + 2); } int mode = -1; int sampleRate = 0; int channels = 0; int frameSize = javax.sound.sampled.AudioSystem.NOT_SPECIFIED; float frameRate = javax.sound.sampled.AudioSystem.NOT_SPECIFIED; byte[] header = new byte[128]; int segments = 0; int bodybytes = 0; java.io.DataInputStream dis = new java.io.DataInputStream(bitStream); if (baos == null) { baos = new java.io.ByteArrayOutputStream(128); } int origchksum; int chksum; // read the OGG header dis.readFully(header, 0, OGG_HEADERSIZE); baos.write(header, 0, OGG_HEADERSIZE); origchksum = readInt(header, 22); header[22] = 0; header[23] = 0; header[24] = 0; header[25] = 0; chksum = org.xiph.speex.OggCrc.checksum(0, header, 0, OGG_HEADERSIZE); // make sure its a OGG header if (!OGGID.Equals(cspeex.StringUtil.getStringForBytes(header, 0, 4))) { throw new javax.sound.sampled.UnsupportedAudioFileException("missing ogg id!"); } // how many segments are there? segments = header[SEGOFFSET] & unchecked((int)(0xFF)); if (segments > 1) { throw new javax.sound.sampled.UnsupportedAudioFileException("Corrupt Speex Header: more than 1 segments" ); } dis.readFully(header, OGG_HEADERSIZE, segments); baos.write(header, OGG_HEADERSIZE, segments); chksum = org.xiph.speex.OggCrc.checksum(chksum, header, OGG_HEADERSIZE, segments); // get the number of bytes in the segment bodybytes = header[OGG_HEADERSIZE] & unchecked((int)(0xFF)); if (bodybytes != SPEEX_HEADERSIZE) { throw new javax.sound.sampled.UnsupportedAudioFileException("Corrupt Speex Header: size=" + bodybytes); } // read the Speex header dis.readFully(header, OGG_HEADERSIZE + 1, bodybytes); baos.write(header, OGG_HEADERSIZE + 1, bodybytes); chksum = org.xiph.speex.OggCrc.checksum(chksum, header, OGG_HEADERSIZE + 1, bodybytes ); // make sure its a Speex header if (!SPEEXID.Equals(cspeex.StringUtil.getStringForBytes(header, OGG_HEADERSIZE + 1, 8))) { throw new javax.sound.sampled.UnsupportedAudioFileException("Corrupt Speex Header: missing Speex ID" ); } mode = readInt(header, OGG_HEADERSIZE + 1 + 40); sampleRate = readInt(header, OGG_HEADERSIZE + 1 + 36); channels = readInt(header, OGG_HEADERSIZE + 1 + 48); int nframes = readInt(header, OGG_HEADERSIZE + 1 + 64); bool vbr = readInt(header, OGG_HEADERSIZE + 1 + 60) == 1; // Checksum if (chksum != origchksum) { throw new System.IO.IOException("Ogg CheckSums do not match"); } // Calculate frameSize if (!vbr) { } // Frames size is a constant so: // Read Comment Packet the Ogg Header of 1st data packet; // the array table_segment repeats the frame size over and over. // Calculate frameRate if (mode >= 0 && mode <= 2 && nframes > 0) { frameRate = ((float)sampleRate) / ((mode == 0 ? 160f : (mode == 1 ? 320f : 640f)) * ((float)nframes)); } format = new javax.sound.sampled.AudioFormat(org.xiph.speex.spi.SpeexEncoding.SPEEX , (float)sampleRate, javax.sound.sampled.AudioSystem.NOT_SPECIFIED, channels, frameSize , frameRate, false); } catch (javax.sound.sampled.UnsupportedAudioFileException e) { // reset the stream for other providers if (bitStream.markSupported()) { bitStream.reset(); } // just rethrow this exception throw; } catch (System.IO.IOException ioe) { // reset the stream for other providers if (bitStream.markSupported()) { bitStream.reset(); } throw new javax.sound.sampled.UnsupportedAudioFileException(ioe.Message); } return new javax.sound.sampled.AudioFileFormat(org.xiph.speex.spi.SpeexFileFormatType .SPEEX, format, javax.sound.sampled.AudioSystem.NOT_SPECIFIED); }