Exemplo n.º 1
0
        /**
         * Searches for the "End of central dir record", parses
         * it and positions the stream at the first central directory
         * record.
         */
        private void positionAtCentralDirectory()
        //throws IOException
        {
            bool found         = false;
            long off           = archive.length() - MIN_EOCD_SIZE;
            long stopSearching = java.lang.Math.max(0L, archive.length() - MAX_EOCD_SIZE);

            if (off >= 0)
            {
                archive.seek(off);
                byte[] sig  = ZipArchiveOutputStream.EOCD_SIG;
                int    curr = archive.read();
                while (off >= stopSearching && curr != -1)
                {
                    if (curr == sig[POS_0])
                    {
                        curr = archive.read();
                        if (curr == sig[POS_1])
                        {
                            curr = archive.read();
                            if (curr == sig[POS_2])
                            {
                                curr = archive.read();
                                if (curr == sig[POS_3])
                                {
                                    found = true;
                                    break;
                                }
                            }
                        }
                    }
                    archive.seek(--off);
                    curr = archive.read();
                }
            }
            if (!found)
            {
                throw new java.util.zip.ZipException("archive is not a ZIP archive");
            }
            archive.seek(off + CFD_LOCATOR_OFFSET);
            byte[] cfdOffset = new byte[WORD];
            archive.readFully(cfdOffset);
            archive.seek(ZipLong.getValue(cfdOffset));
        }
Exemplo n.º 2
0
        /**
         *
         * Advance the record pointer to the next record.
         *
         * @see tinySQLTable#NextRecord
         *
         */
        public override bool NextRecord()
        {//throws tinySQLException {
            // if the record number is greater than zero,
            // advance the pointer. Otherwise, we're on the first
            // record, and it hasn't been visited before.
            //
            if (record_number > 0)
            {
                // try to make it to the next record. An IOException
                // indicates that we have hit the end of file.
                //
                try
                {
                    ftbl.seek(ftbl.getFilePointer() + record_length + 1);
                }
                catch (java.io.IOException)
                {
                    return(false);
                }
            }

            // increment the record pointer
            //
            record_number++;

            // check for end of file, just in case...
            //
            try
            {
                if (ftbl.getFilePointer() == ftbl.length())
                {
                    return(false);
                }
            }
            catch (Exception e)
            {
                throw new TinySQLException(e.getMessage());
            }

            return(true);
        }
Exemplo n.º 3
0
        /*
         * Insert a row. If c or v == null, insert a blank row
         *
         * @param c Ordered Vector of column names
         * @param v Ordered Vector (must match order of c) of values
         * @see tinySQLTable#InsertRow()
         *
         */
        public override void InsertRow(java.util.Vector <Object> c, java.util.Vector <Object> v) //throws TinySQLException
        {
            try
            {
                /*
                 *       Go to the end of the file, then write out the not deleted indicator
                 */
                ftbl.seek(ftbl.length());
                ftbl.write(RECORD_IS_NOT_DELETED);

                /*
                 *       Write out a blank record
                 */
                for (int i = 1; i < dbfHeader.recordLength; i++)
                {
                    ftbl.write(' ');
                }
                int numRec = (int)dbfHeader.numRecords + 1;
                currentRecordNumber = numRec;
                dbfHeader.setNumRecords(ftbl, numRec);
            }
            catch (Exception e)
            {
                if (TinySQLGlobals.DEBUG)
                {
                    java.lang.SystemJ.err.println(e.toString());                      // e.printStackTrace();
                }
                throw new TinySQLException(e.getMessage());
            }
            if (c != null && v != null)
            {
                UpdateCurrentRow(c, v);
            }
            else
            {
                dbfHeader.setTimestamp(ftbl);
            }
        }
Exemplo n.º 4
0
        /**
         *
         * Deletes Columns from tableName, given a vector of
         * column definition (tsColumn) arrays.<br>
         *
         * ALTER TABLE table DROP [ COLUMN ] column { RESTRICT | CASCADE }
         *
         * @param tableName the name of the table
         * @param v a Vector containing arrays of column definitions.
         * @see tinySQL#AlterTableDropCol
         *
         */
        internal override void AlterTableDropCol(String tableName, java.util.Vector <Object> v)
        {//throws IOException, tinySQLException {
            // rename the file ...
            String fullpath = dataDir + java.io.File.separator + tableName + DBFFileTable.dbfExtension;
            String tmppath  = dataDir + java.io.File.separator + tableName + "-tmp" + DBFFileTable.dbfExtension;

            if (Utils.renameFile(fullpath, tmppath) == false)
            {
                throw new TinySQLException("ALTER TABLE DROP COL error in renaming " + fullpath);
            }

            try
            {
                // open the old file ...
                java.io.RandomAccessFile ftbl_tmp = new java.io.RandomAccessFile(tmppath, "r");

                // read the first 32 bytes ...
                DBFHeader dbfHeader_tmp = new DBFHeader(ftbl_tmp);

                // read the column info ...
                java.util.Vector <Object> coldef_list = new java.util.Vector <Object>(dbfHeader_tmp.numFields - v.size());
                int locn = 0; // offset of the current column

                nextCol : for (int i = 1; i <= dbfHeader_tmp.numFields; i++)
                {
                    TsColumn coldef = readColdef(ftbl_tmp, tableName, i, locn);

                    // remove the DROP columns from the existing cols ...
                    for (int jj = 0; jj < v.size(); jj++)
                    {
                        String colName = (String)v.elementAt(jj);
                        if (coldef.name.equals(colName))
                        {
                            Utils.log("Dropping " + colName);
                            goto nextCol;
                        }
                    }

                    locn += coldef.size; // increment locn by the length of this field.
                    // Utils.log("Recycling " + coldef.name);
                    coldef_list.addElement(coldef);
                }

                // create the new table ...
                CreateTable(tableName, coldef_list);

                // copy the data from old to new

                // opening new created dBase file ...
                java.io.RandomAccessFile ftbl = new java.io.RandomAccessFile(fullpath, "rw");
                ftbl.seek(ftbl.length()); // go to end of file

                int numRec = 0;
                for (int iRec = 1; iRec <= dbfHeader_tmp.numRecords; iRec++)
                {
                    if (DBFFileTable.isDeleted(ftbl_tmp, dbfHeader_tmp, iRec) == true)
                    {
                        continue;
                    }

                    numRec++;

                    ftbl.write(DBFFileTable.RECORD_IS_NOT_DELETED);  // write flag

                    // Read the whole column into the table's cache
                    String column = DBFFileTable._GetCol(ftbl_tmp, dbfHeader_tmp, iRec);

                    for (int iCol = 0; iCol < coldef_list.size(); iCol++) // write columns
                    {
                        TsColumn coldef = (TsColumn)coldef_list.elementAt(iCol);

                        // Extract column values from cache
                        String value = DBFFileTable.getColumn(coldef, column);
                        java.lang.SystemJ.outJ.println("From cache column value" + value);

                        value = Utils.forceToSize(value, coldef.size, " "); // enforce the correct column length

                        byte[] b = value.getBytes(Utils.encode);            // transform to byte and write to file
                        ftbl.write(b);
                    }
                }

                ftbl_tmp.close();

                // remove temp file
                java.io.File f = new java.io.File(tmppath);
                if (f.exists())
                {
                    f.delete();
                }

                DBFHeader.writeNumRecords(ftbl, numRec);
                ftbl.close();
            }
            catch (Exception e)
            {
                throw new TinySQLException(e.getMessage());
            }
        }
Exemplo n.º 5
0
        /**
         * Creates new Columns in tableName, given a vector of
         * column definition (tsColumn) arrays.<br>
         * It is necessary to copy the whole file to do this task.
         *
         * ALTER TABLE table [ * ] ADD [ COLUMN ] column type
         *
         * @param tableName the name of the table
         * @param v a Vector containing arrays of column definitions.
         * @see tinySQL#AlterTableAddCol
         */
        internal override void AlterTableAddCol(String tableName, java.util.Vector <Object> v)
        {//throws IOException, tinySQLException {
            // rename the file ...
            String fullpath = dataDir + java.io.File.separator + tableName + DBFFileTable.dbfExtension;
            String tmppath  = dataDir + java.io.File.separator + tableName + "_tmp_tmp" + DBFFileTable.dbfExtension;

            if (Utils.renameFile(fullpath, tmppath) == false)
            {
                throw new TinySQLException("ALTER TABLE ADD COL error in renaming " + fullpath);
            }

            try
            {
                // open the old file ...
                java.io.RandomAccessFile ftbl_tmp = new java.io.RandomAccessFile(tmppath, "r");

                // read the first 32 bytes ...
                DBFHeader dbfHeader_tmp = new DBFHeader(ftbl_tmp);

                // read the column info ...
                java.util.Vector <Object> coldef_list = new java.util.Vector <Object>(dbfHeader_tmp.numFields + v.size());
                int locn = 0; // offset of the current column
                for (int i = 1; i <= dbfHeader_tmp.numFields; i++)
                {
                    TsColumn coldef = readColdef(ftbl_tmp, tableName, i, locn);
                    locn += coldef.size; // increment locn by the length of this field.
                    coldef_list.addElement(coldef);
                }

                // add the new column definitions to the existing ...
                for (int jj = 0; jj < v.size(); jj++)
                {
                    coldef_list.addElement(v.elementAt(jj));
                }

                // create the new table ...
                CreateTable(tableName, coldef_list);

                // copy the data from old to new

                // opening new created dBase file ...
                java.io.RandomAccessFile ftbl = new java.io.RandomAccessFile(fullpath, "rw");
                ftbl.seek(ftbl.length()); // go to end of file

                int numRec = 0;
                for (int iRec = 1; iRec <= dbfHeader_tmp.numRecords; iRec++)
                {
                    String str = GetRecord(ftbl_tmp, dbfHeader_tmp, iRec);

                    // Utils.log("Copy of record#" + iRec + " str='" + str + "' ...");

                    if (str == null)
                    {
                        continue;                           // record was marked as deleted, ignore it
                    }
                    ftbl.write(str.getBytes(Utils.encode)); // write original record
                    numRec++;

                    for (int iCol = 0; iCol < v.size(); iCol++) // write added columns
                    {
                        TsColumn coldef = (TsColumn)v.elementAt(iCol);

                        // enforce the correct column length
                        String value = Utils.forceToSize(coldef.defaultVal, coldef.size, " ");

                        // transform to byte and write to file
                        byte[] b = value.getBytes(Utils.encode);
                        ftbl.write(b);
                    }
                }

                ftbl_tmp.close();

                DBFHeader.writeNumRecords(ftbl, numRec);
                ftbl.close();

                Utils.delFile(tmppath);
            }
            catch (Exception e)
            {
                throw new TinySQLException(e.getMessage());
            }
        }
Exemplo n.º 6
0
 public RAFStream(java.io.RandomAccessFile raf, long pos)  //throws IOException {
 {
     mSharedRaf = raf;
     mOffset    = pos;
     mLength    = raf.length();
 }
Exemplo n.º 7
0
        /*
         * Find the central directory and read the contents.
         *
         * <p>The central directory can be followed by a variable-length comment
         * field, so we have to scan through it backwards.  The comment is at
         * most 64K, plus we have 18 bytes for the end-of-central-dir stuff
         * itself, plus apparently sometimes people throw random junk on the end
         * just for the fun of it.</p>
         *
         * <p>This is all a little wobbly.  If the wrong value ends up in the EOCD
         * area, we're hosed. This appears to be the way that everybody handles
         * it though, so we're in good company if this fails.</p>
         */
        private void readCentralDir()  //throws IOException {

        /*
         * Scan back, looking for the End Of Central Directory field.  If
         * the archive doesn't have a comment, we'll hit it on the first
         * try.
         *
         * No need to synchronize mRaf here -- we only do this when we
         * first open the Zip file.
         */
        {
            long scanOffset = mRaf.length() - ENDHDR;

            if (scanOffset < 0)
            {
                throw new ZipException("too short to be Zip");
            }

            long stopOffset = scanOffset - 65536;

            if (stopOffset < 0)
            {
                stopOffset = 0;
            }

            while (true)
            {
                mRaf.seek(scanOffset);
                if (ZipEntry.readIntLE(mRaf) == 101010256L)
                {
                    break;
                }

                scanOffset--;
                if (scanOffset < stopOffset)
                {
                    throw new ZipException("EOCD not found; not a Zip archive?");
                }
            }

            /*
             * Found it, read the EOCD.
             *
             * For performance we want to use buffered I/O when reading the
             * file.  We wrap a buffered stream around the random-access file
             * object.  If we just read from the RandomAccessFile we'll be
             * doing a read() system call every time.
             */
            RAFStream rafs = new RAFStream(mRaf, mRaf.getFilePointer());

            java.io.BufferedInputStream bin = new java.io.BufferedInputStream(rafs, ENDHDR);

            int diskNumber         = ler.readShortLE(bin);
            int diskWithCentralDir = ler.readShortLE(bin);
            int numEntries         = ler.readShortLE(bin);
            int totalNumEntries    = ler.readShortLE(bin);

            /*centralDirSize =*/ ler.readIntLE(bin);
            long centralDirOffset = ler.readIntLE(bin);

            /*commentLen =*/ ler.readShortLE(bin);

            if (numEntries != totalNumEntries ||
                diskNumber != 0 ||
                diskWithCentralDir != 0)
            {
                throw new ZipException("spanned archives not supported");
            }

            /*
             * Seek to the first CDE and read all entries.
             * However, when Z_SYNC_FLUSH is used the offset may not point directly
             * to the CDE so skip over until we find it.
             * At most it will be 6 bytes away (one or two bytes for empty block, 4 bytes for
             * empty block signature).
             */
            scanOffset = centralDirOffset;
            stopOffset = scanOffset + 6;

            while (true)
            {
                mRaf.seek(scanOffset);
                if (ZipEntry.readIntLE(mRaf) == CENSIG)
                {
                    break;
                }

                scanOffset++;
                if (scanOffset > stopOffset)
                {
                    throw new ZipException("Central Directory Entry not found");
                }
            }

            // If CDE is found then go and read all the entries
            rafs = new RAFStream(mRaf, scanOffset);
            bin  = new java.io.BufferedInputStream(rafs, 4096);
            for (int i = 0; i < numEntries; i++)
            {
                ZipEntry newEntry = new ZipEntry(ler, bin);
                mEntries.put(newEntry.getName(), newEntry);
            }
        }