private void button1_Click(object sender, EventArgs e) { bool error = false; string errstr = ""; if (textBox1.Text == "") { error = true; errstr = "Please choose a valid folder"; } else if (!(comboBox1.Text != "GB" && comboBox1.Text != "TB")) { int value = 9999; try { value = Int32.Parse(textBox2.Text); } catch (Exception ex) { Console.WriteLine(ex.Message); errstr = "Please choose digits only for Virtual Disk Size"; textBox2.Text = "0"; error = true; } if (!error && comboBox1.Text == "GB" && value <= 8192 && value >= 1) { f1blks = value * (262144); f2blks = (value * (262144)) / 512; f3blks = OPS.OffsetToFBN((f1blks / 8)) + 1; } else if (!error && comboBox1.Text == "TB" && value <= 8 && value >= 1) { f1blks = value * (262144) * 1024; f2blks = ((value * (262144)) / 512) * 1024; f3blks = OPS.OffsetToFBN((f1blks / 8)) + 1; } else if (!error) { error = true; errstr = "Please choose size withing specified limits"; } } if (error) { MessageBox.Show(errstr, "Check VDSize"); return; } disable_all_ui_items(); worker = new BackgroundWorker(); worker.WorkerReportsProgress = true; worker.ProgressChanged += worker_ProgressChanged; worker.DoWork += worker_DoWork; worker.RunWorkerCompleted += worker_RunWorkerCompleted; worker.RunWorkerAsync(); }
private void do_inode_refupdate_work(UpdateReqI cu, int childcnt) { byte[] buffer = new byte[4096]; lock (tfile0) { tfile0.Seek((long)cu.tfbn * 4096, SeekOrigin.Begin); tfile0.Read(tmpiodatatfileR, 0, 4096); CONFIG.Decrypt_Read_WRBuf(tmpiodatatfileR, buffer); //DEFS.DEBUG("ENCY", "READ inoLo : " + OPS.ChecksumPageWRLoader(buffer)); DEFS.DEBUG("CNTR", "do_inode_refupdate_work (" + cu.tfbn + ") childcnt =" + childcnt); } /* * Parent of inowip is always -1. */ RedFS_Inode wip = new RedFS_Inode(WIP_TYPE.REGULAR_FILE, 0, -1); byte[] buf = new byte[128]; for (int i = 0; i < 32; i++) { for (int t = 0; t < 128; t++) { buf[t] = buffer[i * 128 + t]; } wip.parse_bytes(buf); BLK_TYPE type = BLK_TYPE.IGNORE; int numidx = 0; switch (wip.get_inode_level()) { case 0: type = BLK_TYPE.REGULAR_FILE_L0; numidx = OPS.NUML0(wip.get_filesize()); break; case 1: type = BLK_TYPE.REGULAR_FILE_L1; numidx = OPS.NUML1(wip.get_filesize()); break; case 2: type = BLK_TYPE.REGULAR_FILE_L2; numidx = OPS.NUML2(wip.get_filesize()); break; } for (int x = 0; x < numidx; x++) { int dbn = wip.get_child_dbn(x); //if (dbn <= 0) continue; DEFS.DEBUGCLR("^^^^^", "wip[" + x + "] " + dbn + "," + wip.get_wiptype() + "," + childcnt + " fsize = " + wip.get_filesize()); DEFS.DEBUGCLR("@@@", wip.get_string_rep2()); apply_update_internal(dbn, type, childcnt, cu.optype, true); } } OPS.dump_inoL0_wips(buffer); }
public void sync() { lock (this) { if (_mywip != null) { lock (REDDY.FSIDList[m_associated_fsid]) { RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("GC"); DEFS.DEBUG("SYNC", "CFile (" + m_inode + ") -mywip.size = " + _mywip.get_filesize()); REDDY.ptrRedFS.sync(_mywip); OPS.Checkin_Wip(inowip, _mywip, m_inode); _mywip.is_dirty = false; REDDY.FSIDList[m_associated_fsid].sync_internal(); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[m_associated_fsid]); } } else { DEFS.DEBUG("FSID", "inserted/unsyncd : " + m_name); } } }
private void MSG_AGGREGATE(fingerprintDMSG msg) { if (msg == null) { if (counter > 0) { REDDY.ptrIFSDMux.DoDedupeBatch(msglist); } DEFS.DEBUGYELLOW("BATCH", "DoDedupeBatch, counter = " + counter); for (int i = 0; i < 1024; i++) { msglist[i] = null; } counter = 0; top = -1; return; } else if ((top == -1) || (msg.fsid == msglist[top].fsid && msg.inode == msglist[top].inode && OPS.SomeFBNToStartFBN(1, msg.fbn) == OPS.SomeFBNToStartFBN(1, msglist[top].fbn))) { int idx = (int)(msg.fbn % 1024); DEFS.ASSERT(msglist[idx] == null, "This cannot be populated already"); msglist[idx] = DUP(msg); top = idx; counter++; } else { //send msg here. MSG_AGGREGATE(null); MSG_AGGREGATE(msg); return; } }
public int get_child_dbn(int idx) { lock (data) { return(OPS.get_dbn(data, idx)); } }
void Item.parse_bytes(byte[] buffer, int offset) { dbn = OPS.get_int(buffer, offset + 0); for (int i = (offset + 4), k = 0; i < (offset + 20); i++, k++) { fp[k] = buffer[i]; } }
public void set_child_dbn(int idx, int dbn) { lock (data) { is_dirty = true; OPS.set_dbn(data, idx, dbn); } }
void Item.get_bytes(byte[] buffer, int offset) { OPS.set_int(buffer, offset + 0, dbn); for (int i = (offset + 4), k = 0; i < (offset + 20); i++, k++) { buffer[i] = fp[k]; } }
void Item.get_bytes(byte[] buffer, int offset) { OPS.set_int(buffer, offset + 0, inode); OPS.set_int(buffer, offset + 4, fbn); for (int i = (offset + 8), k = 0; i < (offset + 24); i++, k++) { buffer[i] = fp[k]; } }
void Item.parse_bytes(byte[] buffer, int offset) { fsid = OPS.get_int(buffer, offset + 0); inode = OPS.get_int(buffer, offset + 4); fbn = OPS.get_int(buffer, offset + 8); dbn = OPS.get_int(buffer, offset + 12); cnt = OPS.get_int(buffer, offset + 16); for (int i = (offset + 20), k = 0; i < (offset + 36); i++, k++) { fp[k] = buffer[i]; } }
void Item.get_bytes(byte[] buffer, int offset) { OPS.set_int(buffer, offset + 0, fsid); OPS.set_int(buffer, offset + 4, inode); OPS.set_int(buffer, offset + 8, fbn); OPS.set_int(buffer, offset + 12, sourcedbn); OPS.set_int(buffer, offset + 16, destinationdbn); for (int i = (offset + 20), k = 0; i < (offset + 36); i++, k++) { buffer[i] = fp[k]; } }
//private int start_dbn_bulk; public Map256M(string name) { mfile = new FileStream(CONFIG.GetBasePath() + name, FileMode.OpenOrCreate, FileAccess.ReadWrite); xfile = new FileStream(CONFIG.GetBasePath() + name + ".x", FileMode.OpenOrCreate, FileAccess.ReadWrite); initialized = true; byte[] buf = new byte[4]; xfile.Read(buf, 0, 4); USED_BLK_COUNT = OPS.get_dbn(buf, 0); DEFS.DEBUGYELLOW("REF", "Found used block count = " + USED_BLK_COUNT); }
void CInode.unmount(bool inshutdown) { long curr = DateTime.Now.ToUniversalTime().Ticks; int seconds = (int)((curr - creation_time) / 10000000); DEFS.DEBUG("UNMOUNT", "CFile (" + m_inode + ") umnount : " + m_name + " inshutdown flag = " + inshutdown + " is _mywip null = " + (_mywip == null) + " secs = " + seconds); if (inshutdown == false && timeoutcheck() == false && m_state == FILE_STATE.FILE_IN_DOKAN_IO) { return; } /* * We cannot unmount a dirty wip directly, it must first be cleaned, so we * dont do this here. The next sync iteration will clean the wip, and then * we are good to unmount. If we are being shutdown, then we sync() here itself. */ if ((inshutdown == false) && ((_mywip == null)))// || _mywip.is_dirty == false)) { DEFS.ASSERT(m_state != FILE_STATE.FILE_IN_DOKAN_IO, "Cannot be dokan io when _mywip = NULL"); return; } /* * _mywip is not null and dirty, or we are shutting down. */ lock (this) { DEFS.ASSERT(m_state != FILE_STATE.FILE_ORPHANED, "We are in sync path can cannot have an orphaned file"); if (_mywip != null) { REDDY.ptrRedFS.sync(_mywip); REDDY.ptrRedFS.flush_cache(_mywip, inshutdown); } lock (REDDY.FSIDList[m_associated_fsid]) { if (_mywip != null) { RedFS_Inode inowipX = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("Umount file iwp:" + m_name); OPS.Checkin_Wip(inowipX, _mywip, m_inode); DEFS.ASSERT(m_state != FILE_STATE.FILE_DELETED, "Wrong state detected222!"); REDDY.FSIDList[m_associated_fsid].sync_internal(); REDDY.FSIDList[m_associated_fsid].set_dirty(true); _mywip = null; } m_state = FILE_STATE.FILE_UNMOUNTED; } } }
string Item.get_string_rep() { switch (stype) { case DEDUP_SORT_ORDER.UNDEFINED_PLACEHOLDER: case DEDUP_SORT_ORDER.DBN_BASED: return(dbn + "\t" + OPS.HashToString(fp)); case DEDUP_SORT_ORDER.FINGERPRINT_BASED: return(OPS.HashToString(fp) + "\t" + dbn); case DEDUP_SORT_ORDER.INO_FBN_BASED: return("Wrong type set"); } return(null); }
string Item.get_string_rep() { switch (stype) { case DEDUP_SORT_ORDER.DBN_BASED: case DEDUP_SORT_ORDER.UNDEFINED_PLACEHOLDER: return(sourcedbn + "\t" + destinationdbn + "\t" + OPS.HashToString(fp) + "\t" + fsid + "," + inode + "," + fbn); case DEDUP_SORT_ORDER.FINGERPRINT_BASED: return(OPS.HashToString(fp) + "\t" + sourcedbn + "\t" + destinationdbn + "\t" + fsid + "," + inode + "," + fbn); case DEDUP_SORT_ORDER.INO_FBN_BASED: return(fsid + "," + inode + "," + fbn + ",\t" + OPS.HashToString(fp) + "\t" + sourcedbn + "\t" + destinationdbn); } return(null); }
/* * Scans the given 4k buffer, and returns the offset of a free bit. * offset can vary between 0 and 4096*8 */ private int get_free_bitoffset(int startsearchoffset, byte[] data) { DEFS.ASSERT(data.Length == 4096 && startsearchoffset < 4096, "get_free_bitoffset input must be a " + " buffer of size 4096, but passed size = " + data.Length); for (int offset = startsearchoffset; offset < data.Length; offset++) { if (data[offset] != (byte)0xFF) { int x = OPS.get_first_free_bit(data[offset]); data[offset] = OPS.set_free_bit(data[offset], x); return(offset * 8 + x); } } return(-1); }
/* * This file dups itself into another file, the old version is not deleted. * More like a new reincarnate without loosing old data. The responsibility * of updating path/name is left to CDirectory, as there are only incore data * from the POV of CFile. */ public CFile dup_file(int newfsid, int inode, int pino, string newpath) { if (m_state == FILE_STATE.FILE_DELETED) { return(null); } open_file(false); /* * It is okay to not lock cfnew since nobody actually know that this is * on the system. As nobody else touch this, we can do without locks for the new file. */ lock (this) { if (_mywip == null) { DEFS.ASSERT(m_state == FILE_STATE.FILE_DELETED, "Should be marked deleted, or else some workflow issue"); return(null); } REDDY.ptrRedFS.sync(_mywip); REDDY.ptrRedFS.flush_cache(_mywip, true); //should we write the source inodefile to disk also? CFile cfnew = new CFile(newfsid, inode, pino, m_name, newpath); cfnew.open_file(true); REDDY.ptrRedFS.redfs_dup_file(_mywip, cfnew._mywip); cfnew.m_atime = m_atime; cfnew.m_ctime = m_ctime; cfnew.m_mtime = m_mtime; cfnew.m_size = _mywip.get_filesize(); cfnew.set_ibflag(_mywip.get_ibflag()); cfnew.touch(); //int numblocks = OPS.FSIZETONUMBLOCKS(m_clist[i].get_size()); int numblocks = OPS.NUML0(m_size) + OPS.NUML1(m_size) + OPS.NUML2(m_size); REDDY.FSIDList[newfsid].diff_upadate_logical_data(numblocks * 4096); //not the correct figure per-se REDDY.FSIDList[newfsid].set_dirty(true); DEFS.DEBUGYELLOW("INCRCNT", "Grew fsid usage by " + numblocks + " (file)"); cfnew.sync(); return(cfnew); } }
/* * The below functions will be used for dedupe/user command prompt etc. */ private int LoadWip_FindPINO(int fsid, int ino, ref WIP_TYPE type) { lock (REDDY.FSIDList[fsid]) { RedFS_Inode inowip = REDDY.FSIDList[fsid].get_inode_file_wip("Loadinode"); lock (inowip) { RedFS_Inode mywip = new RedFS_Inode(WIP_TYPE.UNDEFINED, ino, -1); bool ret = OPS.Checkout_Wip2(inowip, mywip, ino); REDDY.FSIDList[fsid].sync_internal(); type = mywip.get_wiptype(); //DEFS.DEBUG("LdIno", "Loaded ino= " + ino + "wip from disk, type = " + type); //DEFS.DEBUG("LdIno", mywip.get_string_rep2()); return((ret) ? mywip.get_parent_ino() : -1); } } }
public bool read(byte[] buffer, int bufoffset, int buflen, long fileoffset) { if (m_state == FILE_STATE.FILE_DELETED) { return(false); } lock (this) { if (_mywip == null || (fileoffset >= m_size)) { OPS.BZERO(buffer); return(false); } long request_end_offset = fileoffset + buflen; if (request_end_offset > m_size) { int old_buflen = buflen; long true_end_offset = m_size; DEFS.DEBUG("ERROR", "Trying to read beyond EOF = " + m_size + " (start_offset, end_offset) = " + fileoffset + "," + (fileoffset + buflen)); buflen = (int)(true_end_offset - fileoffset); DEFS.ASSERT(old_buflen >= buflen, "Something wrong in calculation"); for (int i = (bufoffset + buflen); i < (bufoffset + old_buflen); i++) { buffer[i] = 0; } } REDDY.ptrRedFS.redfs_read(_mywip, fileoffset, buffer, bufoffset, buflen); /* * VLC and office apps tries to read beyond EOF, and we end up growing the file, this happens * with filesize blowing up infinitely. */ m_size = _mywip.get_filesize(); touch(); return(true); } }
public void remove_ondisk_data2() { open_file(false); touch(); if (m_state == FILE_STATE.FILE_DELETED) { return; } m_state = FILE_STATE.FILE_DELETED; DEFS.ASSERT(_mywip != null, "Cannot be null in remove() after calling open()"); lock (this) { lock (REDDY.FSIDList[m_associated_fsid]) { DEFS.ASSERT(_mywip != null, "Unmount couldnt have worked on this"); RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("DF:" + m_name); //REDDY.ptrRedFS.sync(_mywip); REDDY.ptrRedFS.flush_cache(_mywip, false); REDDY.ptrRedFS.redfs_delete_wip(m_associated_fsid, _mywip, true); DEFS.ASSERT(_mywip.get_filesize() == 0, "After delete, all the wip contents must be cleared off"); for (int i = 0; i < 16; i++) { DEFS.ASSERT(_mywip.get_child_dbn(i) == DBN.INVALID, "dbns are not set after delete wip " + i + " " + _mywip.get_child_dbn(i)); } OPS.CheckinZerodWipData(inowip, m_inode); REDDY.FSIDList[m_associated_fsid].sync_internal(); _mywip = null; } } DEFS.DEBUG("IFSD", "<<<< DELETED FILE >>>> " + m_name); }
/* * Give a fsid, it looks into the iMapWip and gets a free bit. The fsid block has the * largest inode number that is currently used, and the iMapWip itself. I'm not using anylocks * for this wip since this operation will never be concurrent. All FS modification code that * may use this path already would have a lock on the rootdir. Ex duping, deleting, inserting etc. * * XXX: Note that we are never freeing the inode bit once set!. So basically this is a dummy function. * We still work because we can afford to wait for 500M inodes to allocated before we do a wrap around!!. */ private int find_free_ino_bit(int fsid) { int max_fbns = 16384; int curr_max_inode = REDDY.FSIDList[fsid].get_start_inonumber(); byte[] buffer = new byte[4096]; RedFS_Inode iMapWip = REDDY.FSIDList[fsid].get_inodemap_wip(); int fbn = OPS.OffsetToFBN(curr_max_inode / 8); for (int cfbn = fbn; cfbn < max_fbns; cfbn++) { OPS.BZERO(buffer); REDDY.ptrRedFS.redfs_read(iMapWip, (cfbn * 4096), buffer, 0, 4096); int startsearchoffset = ((cfbn == fbn) ? (curr_max_inode / 8) : 0) % 4096; int free_bit = get_free_bitoffset(startsearchoffset, buffer); if (free_bit != -1) { int free_inode = ((cfbn * (4096 * 8)) + free_bit); REDDY.ptrRedFS.redfs_write(iMapWip, (cfbn * 4096), buffer, 0, 4096); REDDY.ptrRedFS.sync(iMapWip); REDDY.FSIDList[fsid].set_inodemap_wip(iMapWip); REDDY.ptrRedFS.flush_cache(iMapWip, true); REDDY.FSIDList[fsid].set_start_inonumber(free_inode + 1); DEFS.DEBUG("IFSDMux", "Found free ino = " + free_inode + " so setting currmaxino = " + curr_max_inode + " for fsid = " + fsid); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[fsid]); return(free_inode); } } REDDY.FSIDList[fsid].set_start_inonumber(64); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[fsid]); //do we need this regularly? DEFS.DEBUG("FSID", "XXXXX VERY RARE EVENT XXXX INODE WRAP AROUND XXXX"); return(find_free_ino_bit(fsid)); }
public bool read(Red_Buffer wb) { if (!initialized) { return(false); } //Array.Clear(wb.buf_to_data(), 0, 4096); OPS.BZERO(wb.buf_to_data()); total_disk_reads++; if (wb.get_ondisk_dbn() == 0) { return(true); } lock (dfile) { //DEFS.DEBUG("RAID", "Reading dbn : " + wb.get_ondisk_dbn() + " level : " + wb.get_level()); dfile.Seek((long)wb.get_ondisk_dbn() * 4096, SeekOrigin.Begin); dfile.Read(wb.buf_to_data(), 0, 4096); } return(true); }
public void sync() { if (!initialized) { return; } lock (mfile) { for (int i = 0; i < 1024; i++) { if (mbufs[i] != null) { if (mbufs[i].is_dirty) { mfile.Seek((long)i * (256 * 1024), SeekOrigin.Begin); mfile.Write(mbufs[i].data, 0, (256 * 1024)); mbufs[i].is_dirty = false; } if (mbufs[i].get_buf_age() > 120000) { mbufs[i] = null; } } } xfile.SetLength(0); xfile.Seek(0, SeekOrigin.Begin); byte[] buf = new byte[4]; OPS.set_dbn(buf, 0, USED_BLK_COUNT); xfile.Write(buf, 0, 4); xfile.Flush(); DEFS.DEBUG("REF", "SAVED used block cnt = " + USED_BLK_COUNT); } //lock }
/* * We dont expect a write to come before opening because, cdirectory would * call a open_file() before inserting into the DIR CACHE. We shouldnt call * this with cfile-lock held. */ public bool open_file(bool justcreated) { if (m_state == FILE_STATE.FILE_DELETED) { return(false); } else if (m_state == FILE_STATE.FILE_IN_DOKAN_IO) { DEFS.ASSERT(_mywip != null, "My wip cannot be null when dokan_io flag is set in open_file"); return(true); } touch(); if (_mywip == null) { lock (this) { if (_mywip != null) { /* * It could be the case that someone already opend it, maybe previous call * that was locked in open_file(), just bail out. */ DEFS.ASSERT(m_state != FILE_STATE.FILE_IN_DOKAN_IO, "Suddendly cannot be in dokan io when it was just null"); return(true); } lock (REDDY.FSIDList[m_associated_fsid]) { _mywip = new RedFS_Inode(WIP_TYPE.REGULAR_FILE, m_inode, m_parent_inode); long oldsize = _mywip.get_filesize(); RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("OF:" + m_name); DEFS.DEBUG("F(_mywip)", "Loaded ino= " + m_inode + "wip from disk, size = " + _mywip.get_filesize()); bool ret = OPS.Checkout_Wip2(inowip, _mywip, m_inode); if (ret) { DEFS.DEBUG("FILE", "Loaded ino= " + m_inode + "wip from disk, size = " + _mywip.get_filesize()); } else { DEFS.DEBUG("FILE", "Loaded ino = " + m_inode + " (new) size = " + _mywip.get_filesize()); _mywip.set_ino(m_parent_inode, m_inode); } DEFS.ASSERT(m_size == _mywip.get_filesize(), "File size should match, irrespecitive of weather its " + " from disk, (=0) then, or inserted from an existing dir load, >= 0 in that case, msize:" + m_size + " _mywip.size:" + _mywip.get_filesize() + " fname =" + m_name + " ino=" + m_inode + " beforeread size = " + oldsize + " contents : " + _mywip.get_string_rep2() + " ret = " + ret); if (justcreated) { DEFS.ASSERT(ret == false, "This should be a new file " + _mywip.get_filesize() + " fname =" + m_name + " ino=" + m_inode + " beforeread size = " + oldsize + " contents : " + _mywip.get_string_rep2()); _mywip.setfilefsid_on_dirty(m_associated_fsid); _mywip.is_dirty = true; //this must make it to disk. } REDDY.FSIDList[m_associated_fsid].sync_internal(); m_state = FILE_STATE.FILE_DEFAULT; } } } return(true); }
string Item.get_string_rep() { return(inode + "," + fbn + ",\t" + OPS.HashToString(fp)); }
public int get_inode_level() { return(OPS.FSIZETOILEVEL(get_filesize())); }
private bool backup_file(bool firstjob, string oldchecksumfile, string newchecksumfile, ref int curroffset, string sourcefile, string destfile) { DEFS.DEBUG("BACKUP", "Entering backup_file ( " + firstjob + "," + oldchecksumfile + "," + newchecksumfile + "," + curroffset + "," + sourcefile + "," + destfile); MD5 md5 = System.Security.Cryptography.MD5.Create(); fingerprintBACKUP fptemp1 = new fingerprintBACKUP(); fingerprintBACKUP fptemp2 = new fingerprintBACKUP(); if (firstjob) { FileInfo srcfi = new FileInfo(sourcefile); if (srcfi.Exists == false) { REDDY.ptrIFSDMux.DeleteFile(2, destfile, null); return(false); } else { if (REDDY.ptrIFSDMux.CreateFile(2, destfile, FileAccess.ReadWrite, FileShare.ReadWrite, FileMode.Create, FileOptions.None, null) != 0) { MessageBox.Show("failed to create file 1"); return(false); } if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) { MessageBox.Show("failed to seteof 1"); return(false); } Inode_Info di = REDDY.ptrIFSDMux.GetFileInfoInternalAPI(2, destfile); REDDY.ptrIFSDMux.SetInternalFlag(2, destfile, 0, curroffset); if (di == null) { MessageBox.Show("failed to get a valid di 1"); return(false); } int ino = di.ino; byte[] buffer = new byte[4096]; byte[] tmpbuf = new byte[((Item)fptemp1).get_size()]; uint wrote = 0; int bcount = OPS.NUML0(srcfi.Length); FileStream fs = new FileStream(sourcefile, FileMode.Open); long outfileoffset = 0; byte[] lastchunkbuf = null; for (int i = 0; i < bcount; i++) { int size = fs.Read(buffer, 0, 4096); if (size < 4096) { lastchunkbuf = new byte[size]; for (int kx = size; kx < 4096; kx++) { buffer[kx] = 0; } for (int kx2 = 0; kx2 < size; kx2++) { lastchunkbuf[kx2] = buffer[kx2]; } } byte[] hash = md5.ComputeHash(buffer, 0, 4096); fptemp1.inode = ino; fptemp1.fbn = i; for (int k = 0; k < 16; k++) { fptemp1.fp[k] = hash[k]; } ((Item)fptemp1).get_bytes(tmpbuf, 0); if (REDDY.ptrIFSDMux.WriteFile(2, newchecksumfile, tmpbuf, ref wrote, curroffset, null) != 0) { MessageBox.Show("write failed, wrote = " + wrote); return(false); } if (size > 0) { if (size == 4096) { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, buffer, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee, wrote = " + wrote); return(false); } } else { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, lastchunkbuf, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee2, wrote = " + wrote); return(false); } } } newdatacopied += size; outfileoffset += size; curroffset += ((Item)fptemp1).get_size(); } //if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) //{ // MessageBox.Show("failed to seteof 1a"); // return false; //} fs.Close(); REDDY.FSIDList[2].set_dirty(true); return(true); } } else { DEFS.ASSERT(oldchecksumfile != null, "You must pass the oldchecksumfile path"); FileInfo srcfi = new FileInfo(sourcefile); if (srcfi.Exists == false) { REDDY.ptrIFSDMux.DeleteFile(2, destfile, null); return(false); } else { if (REDDY.ptrIFSDMux.CreateFile(2, destfile, FileAccess.ReadWrite, FileShare.ReadWrite, FileMode.CreateNew, FileOptions.None, null) != 0) { MessageBox.Show("Createfile has failed"); return(false); } if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) { MessageBox.Show("Set eof has failed!"); return(false); } Inode_Info di = REDDY.ptrIFSDMux.GetFileInfoInternalAPI(2, destfile); int localoffet = di.backupoffset; REDDY.ptrIFSDMux.SetInternalFlag(2, destfile, 0, curroffset); int ino = di.ino; byte[] buffer = new byte[4096]; byte[] tmpbuf = new byte[((Item)fptemp1).get_size()]; uint wrote = 0; int bcount = OPS.NUML0(srcfi.Length); FileStream fs = new FileStream(sourcefile, FileMode.Open); long outfileoffset = 0; byte[] lastchunkbuf = null; DEFS.DEBUG("--------", bcount + ", ArrangeStartingPosition LOOP "); for (int i = 0; i < bcount; i++) { int size = fs.Read(buffer, 0, 4096); if (size < 4096) { lastchunkbuf = new byte[size]; for (int kx = size; kx < 4096; kx++) { buffer[kx] = 0; } for (int kx2 = 0; kx2 < size; kx2++) { lastchunkbuf[kx2] = buffer[kx2]; } } byte[] hash = md5.ComputeHash(buffer, 0, 4096); fptemp1.inode = ino; fptemp1.fbn = i; for (int k = 0; k < 16; k++) { fptemp1.fp[k] = hash[k]; } byte[] existinghash = new byte[24]; uint readsize = 0; if (REDDY.ptrIFSDMux.ReadFile(2, oldchecksumfile, existinghash, ref readsize, localoffet, null) != 0) { MessageBox.Show("read failed, " + readsize + ",path = " + oldchecksumfile); return(false); } ((Item)fptemp2).parse_bytes(existinghash, 0); if (!(/* fptemp1.inode == fptemp2.inode &&*/ fptemp1.fbn == fptemp2.fbn && is_equal(fptemp1.fp, fptemp2.fp))) { if (size > 0) { if (size == 4096) { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, buffer, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee, wrote = " + wrote); return(false); } } else { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, lastchunkbuf, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee2, wrote = " + wrote); return(false); } } } newdatacopied += size; } ((Item)fptemp1).get_bytes(tmpbuf, 0); if (REDDY.ptrIFSDMux.WriteFile(2, newchecksumfile, tmpbuf, ref wrote, curroffset, null) != 0) { MessageBox.Show("write failed 22, wrote = " + wrote); return(false); } curroffset += ((Item)fptemp1).get_size(); localoffet += ((Item)fptemp1).get_size(); outfileoffset += size; //DEFS.DEBUG("---", bcount + "," + fs.Position); } fs.Close(); if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) { MessageBox.Show("Set eof has failed! 2"); return(false); } DEFS.DEBUG("--------", bcount + ", ArrangeStartingPosition LOOP sdfsfda"); REDDY.FSIDList[2].set_dirty(true); return(true); } } }