public bool read(byte[] buffer, int bufoffset, int buflen, long fileoffset) { if (m_state == FILE_STATE.FILE_DELETED) { return(false); } lock (this) { if (_mywip == null || (fileoffset >= m_size)) { OPS.BZERO(buffer); return(false); } long request_end_offset = fileoffset + buflen; if (request_end_offset > m_size) { int old_buflen = buflen; long true_end_offset = m_size; DEFS.DEBUG("ERROR", "Trying to read beyond EOF = " + m_size + " (start_offset, end_offset) = " + fileoffset + "," + (fileoffset + buflen)); buflen = (int)(true_end_offset - fileoffset); DEFS.ASSERT(old_buflen >= buflen, "Something wrong in calculation"); for (int i = (bufoffset + buflen); i < (bufoffset + old_buflen); i++) { buffer[i] = 0; } } REDDY.ptrRedFS.redfs_read(_mywip, fileoffset, buffer, bufoffset, buflen); /* * VLC and office apps tries to read beyond EOF, and we end up growing the file, this happens * with filesize blowing up infinitely. */ m_size = _mywip.get_filesize(); touch(); return(true); } }
public void remove_ondisk_data2() { open_file(false); touch(); if (m_state == FILE_STATE.FILE_DELETED) { return; } m_state = FILE_STATE.FILE_DELETED; DEFS.ASSERT(_mywip != null, "Cannot be null in remove() after calling open()"); lock (this) { lock (REDDY.FSIDList[m_associated_fsid]) { DEFS.ASSERT(_mywip != null, "Unmount couldnt have worked on this"); RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("DF:" + m_name); //REDDY.ptrRedFS.sync(_mywip); REDDY.ptrRedFS.flush_cache(_mywip, false); REDDY.ptrRedFS.redfs_delete_wip(m_associated_fsid, _mywip, true); DEFS.ASSERT(_mywip.get_filesize() == 0, "After delete, all the wip contents must be cleared off"); for (int i = 0; i < 16; i++) { DEFS.ASSERT(_mywip.get_child_dbn(i) == DBN.INVALID, "dbns are not set after delete wip " + i + " " + _mywip.get_child_dbn(i)); } OPS.CheckinZerodWipData(inowip, m_inode); REDDY.FSIDList[m_associated_fsid].sync_internal(); _mywip = null; } } DEFS.DEBUG("IFSD", "<<<< DELETED FILE >>>> " + m_name); }
//Will do dedupe starting from fsid, for all the derivated children by looking up the //tree info. inode is always a regular file. public bool DoDedupe(int fsid, int inode, int fbn, int currdbn, int donordbn) { int[] list = prepare_Inheritance_list(fsid, inode); //check fsid's for (int i = 0; i < list.Length; i++) { FileAttributes fa = FileAttributes.NotContentIndexed; string rpath = Load_Inode(list[i], inode, ref fa); if (rpath == null) { continue; } DEFS.ASSERT(fa == FileAttributes.Normal, "Only normal files can be deduped"); //DEFS.DEBUGYELLOW("-DEDUP-", "[" + list[i] + "] rpath = " + rpath + "fbn = " + fbn + "(" + currdbn + "->" + donordbn + ")"); REDDY.FSIDList[list[i]].rootdir.do_dedupe_tlock(rpath, fbn, currdbn, donordbn); } return(false); }
public static bool Checkout_Wip2(RedFS_Inode inowip, RedFS_Inode mywip, int m_ino) { WIP_TYPE oldtype = mywip.get_wiptype(); for (int i = 0; i < 16; i++) { DEFS.ASSERT(mywip.get_child_dbn(i) == DBN.INVALID, "Wip cannot be valid during checkout, " + i + " value = " + mywip.get_child_dbn(i)); } long fileoffset = m_ino * 128; lock (inowip) { REDDY.ptrRedFS.redfs_read(inowip, fileoffset, mywip.data, 0, 128); if (oldtype != WIP_TYPE.UNDEFINED) { mywip.set_wiptype(oldtype); } } DEFS.DEBUG("CO_WIP", mywip.get_string_rep2()); return(mywip.verify_inode_number()); }
public bool DoDedupeBatch(fingerprintDMSG[] fplist) { int fsid = -1; int inode = -1; for (int i = 0; i < 1024; i++) { if (fplist[i] != null) { //if ((fsid != -1 || fsid != fplist[i].fsid) || // (inode != -1 || inode != fplist[i].inode)) //{ // DEFS.DEBUG("BATCH", "Error detected in fplist"); // return false; //} fsid = fplist[i].fsid; inode = fplist[i].inode; } } int[] list = prepare_Inheritance_list(fsid, inode); //check fsid's for (int i = 0; i < list.Length; i++) { FileAttributes fa = FileAttributes.NotContentIndexed; string rpath = Load_Inode(list[i], inode, ref fa); if (rpath == null) { continue; } DEFS.ASSERT(fa == FileAttributes.Normal, "Only normal files can be deduped 2"); REDDY.FSIDList[list[i]].rootdir.do_dedupe_tlock_batch(rpath, fplist); } return(true); }
void insert_item(Item i, bool is_last) { DEFS.ASSERT(_internal_cache != null, "Cache memory is empty"); DEFS.ASSERT(inputF != null, "input file cannot be null in insert-item function"); i.get_bytes(_internal_cache, _internal_cnt * _item.get_size()); _internal_cnt++; if (is_last || _internal_cnt == 1024) { //print_contents(_internal_cache, _internal_cnt); inputF.Write(_internal_cache, 0, _internal_cnt * _item.get_size()); _internal_cnt = 0; inputF.Flush(); } if (is_last) { DEFS.ASSERT(_internal_cnt == 0, "_internal_cnt should be zero bcoz we just flushed"); DEFS.DEBUG("SORT", "Finised inserting all test_insert_items"); inputF.Seek(0, SeekOrigin.Begin); } }
private void insert_item_output(Item i, bool is_last) { DEFS.ASSERT(_internal_cache_op != null, "Cache memory is empty"); DEFS.ASSERT(outputF != null, "output file cannot be null in insert-item-output function"); if (i != null) { i.get_bytes(_internal_cache_op, _internal_cnt_op * _item.get_size()); _internal_cnt_op++; } if (is_last || _internal_cnt_op == 1024) { outputF.Write(_internal_cache_op, 0, _internal_cnt_op * _item.get_size()); _internal_cnt_op = 0; outputF.Flush(); } if (is_last) { DEFS.ASSERT(_internal_cnt_op == 0, "_internal_cnt should be zero bcoz we just flushed"); } }
/* * Below three functions are exposed for public. * MP Safe function, can take long if there * are too many updates to be made. */ public void touch_refcount(Red_Buffer wb, bool isinodefilel0) { DEFS.ASSERT(wb != null, "touch refcount needs the wb"); if (wb.get_touchrefcnt_needed() == false)// || wb.get_ondisk_dbn() == 0) { return; } else { //DEFS.DEBUG("-REF-", "CTH refcount for dbn = " + wb.get_ondisk_dbn() + " inofile = " + isinodefilel0); wb.set_touchrefcnt_needed(false); } if (wb.get_level() == 0 && isinodefilel0) { m_wrloader.mod_refcount(0, wb.get_ondisk_dbn(), REFCNT_OP.TOUCH_REFCOUNT, wb, true); } else { DEFS.ASSERT(wb.get_level() > 0, "touch_refcount is only for indirects only, except for ino-L0!"); m_wrloader.mod_refcount(0, wb.get_ondisk_dbn(), REFCNT_OP.TOUCH_REFCOUNT, wb, false); } }
public void verify_savings() { DEFS.ASSERT(outputF != null, "Output file cannot be null in verifcation phase"); long num_items = outputF.Length / _item.get_size(); }
public int MoveFile(String filename, String newname, bool replace, DokanFileInfo info) { DEFS.DEBUG("DOKAN", "Move file request for " + filename + " to " + newname); DEFS.ASSERT(false, "no move possible"); return(0);// _fs.move_file(filename, newname); }
public int Unmount(DokanFileInfo info) { DEFS.ASSERT(false, "This is not called in C# dokan"); unmountcalled2 = true; return(0); }
int IComparer.Compare(object obj1, object obj2) { switch (((Item)obj1).get_itemtype()) { case RECORD_TYPE.FINGERPRINT_RECORD_CLOG: { fingerprintCLOG c1 = (fingerprintCLOG)obj1; fingerprintCLOG c2 = (fingerprintCLOG)obj2; for (int i = 0; i < 16; i++) { if (c1.fp[i] < c2.fp[i]) { return(-1); } else if (c1.fp[i] > c2.fp[i]) { return(1); } } return(0); } //break; unreachable case RECORD_TYPE.FINGERPRINT_RECORD_FPDB: { fingerprintFPDB c1 = (fingerprintFPDB)obj1; fingerprintFPDB c2 = (fingerprintFPDB)obj2; for (int i = 0; i < 16; i++) { if (c1.fp[i] < c2.fp[i]) { return(-1); } else if (c1.fp[i] > c2.fp[i]) { return(1); } } return(0); } //break; unreachable case RECORD_TYPE.FINGERPRINT_RECORD_MSG: { fingerprintDMSG c1 = (fingerprintDMSG)obj1; fingerprintDMSG c2 = (fingerprintDMSG)obj2; for (int i = 0; i < 16; i++) { if (c1.fp[i] < c2.fp[i]) { return(-1); } else if (c1.fp[i] > c2.fp[i]) { return(1); } } return(0); } //break; unreachable } DEFS.ASSERT(false, "Shouldnt have come here wewrwr2"); return(0); }
int IComparer.Compare(object obj1, object obj2) { switch (((Item)obj1).get_itemtype()) { case RECORD_TYPE.FINGERPRINT_RECORD_CLOG: { fingerprintCLOG c1 = (fingerprintCLOG)obj1; fingerprintCLOG c2 = (fingerprintCLOG)obj2; if (c1.fsid < c2.fsid) { return(-1); } else if (c1.fsid > c2.fsid) { return(1); } else { if (c1.inode < c2.inode) { return(-1); } else if (c1.inode > c2.inode) { return(1); } else { if (c1.fbn < c2.fbn) { return(-1); } else if (c1.fbn > c2.fbn) { return(1); } else { if (c1.cnt > c2.cnt) { return(-1); } else if (c1.cnt < c2.cnt) { return(1); } else { return(0); //can actually assert! } } } } } //break; unreachable case RECORD_TYPE.FINGERPRINT_RECORD_MSG: { fingerprintDMSG c1 = (fingerprintDMSG)obj1; fingerprintDMSG c2 = (fingerprintDMSG)obj2; if (c1.fsid < c2.fsid) { return(-1); } else if (c1.fsid > c2.fsid) { return(1); } else { if (c1.inode < c2.inode) { return(-1); } else if (c1.inode > c2.inode) { return(1); } else { if (c1.fbn < c2.fbn) { return(-1); } else if (c1.fbn > c2.fbn) { return(1); } else { return(0); } } } } //break; unreachable } DEFS.ASSERT(false, "Shouldnt have come here 34234a23"); return(0); }
/* * We dont expect a write to come before opening because, cdirectory would * call a open_file() before inserting into the DIR CACHE. We shouldnt call * this with cfile-lock held. */ public bool open_file(bool justcreated) { if (m_state == FILE_STATE.FILE_DELETED) { return(false); } else if (m_state == FILE_STATE.FILE_IN_DOKAN_IO) { DEFS.ASSERT(_mywip != null, "My wip cannot be null when dokan_io flag is set in open_file"); return(true); } touch(); if (_mywip == null) { lock (this) { if (_mywip != null) { /* * It could be the case that someone already opend it, maybe previous call * that was locked in open_file(), just bail out. */ DEFS.ASSERT(m_state != FILE_STATE.FILE_IN_DOKAN_IO, "Suddendly cannot be in dokan io when it was just null"); return(true); } lock (REDDY.FSIDList[m_associated_fsid]) { _mywip = new RedFS_Inode(WIP_TYPE.REGULAR_FILE, m_inode, m_parent_inode); long oldsize = _mywip.get_filesize(); RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("OF:" + m_name); DEFS.DEBUG("F(_mywip)", "Loaded ino= " + m_inode + "wip from disk, size = " + _mywip.get_filesize()); bool ret = OPS.Checkout_Wip2(inowip, _mywip, m_inode); if (ret) { DEFS.DEBUG("FILE", "Loaded ino= " + m_inode + "wip from disk, size = " + _mywip.get_filesize()); } else { DEFS.DEBUG("FILE", "Loaded ino = " + m_inode + " (new) size = " + _mywip.get_filesize()); _mywip.set_ino(m_parent_inode, m_inode); } DEFS.ASSERT(m_size == _mywip.get_filesize(), "File size should match, irrespecitive of weather its " + " from disk, (=0) then, or inserted from an existing dir load, >= 0 in that case, msize:" + m_size + " _mywip.size:" + _mywip.get_filesize() + " fname =" + m_name + " ino=" + m_inode + " beforeread size = " + oldsize + " contents : " + _mywip.get_string_rep2() + " ret = " + ret); if (justcreated) { DEFS.ASSERT(ret == false, "This should be a new file " + _mywip.get_filesize() + " fname =" + m_name + " ino=" + m_inode + " beforeread size = " + oldsize + " contents : " + _mywip.get_string_rep2()); _mywip.setfilefsid_on_dirty(m_associated_fsid); _mywip.is_dirty = true; //this must make it to disk. } REDDY.FSIDList[m_associated_fsid].sync_internal(); m_state = FILE_STATE.FILE_DEFAULT; } } } return(true); }
private bool backup_file(bool firstjob, string oldchecksumfile, string newchecksumfile, ref int curroffset, string sourcefile, string destfile) { DEFS.DEBUG("BACKUP", "Entering backup_file ( " + firstjob + "," + oldchecksumfile + "," + newchecksumfile + "," + curroffset + "," + sourcefile + "," + destfile); MD5 md5 = System.Security.Cryptography.MD5.Create(); fingerprintBACKUP fptemp1 = new fingerprintBACKUP(); fingerprintBACKUP fptemp2 = new fingerprintBACKUP(); if (firstjob) { FileInfo srcfi = new FileInfo(sourcefile); if (srcfi.Exists == false) { REDDY.ptrIFSDMux.DeleteFile(2, destfile, null); return(false); } else { if (REDDY.ptrIFSDMux.CreateFile(2, destfile, FileAccess.ReadWrite, FileShare.ReadWrite, FileMode.Create, FileOptions.None, null) != 0) { MessageBox.Show("failed to create file 1"); return(false); } if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) { MessageBox.Show("failed to seteof 1"); return(false); } Inode_Info di = REDDY.ptrIFSDMux.GetFileInfoInternalAPI(2, destfile); REDDY.ptrIFSDMux.SetInternalFlag(2, destfile, 0, curroffset); if (di == null) { MessageBox.Show("failed to get a valid di 1"); return(false); } int ino = di.ino; byte[] buffer = new byte[4096]; byte[] tmpbuf = new byte[((Item)fptemp1).get_size()]; uint wrote = 0; int bcount = OPS.NUML0(srcfi.Length); FileStream fs = new FileStream(sourcefile, FileMode.Open); long outfileoffset = 0; byte[] lastchunkbuf = null; for (int i = 0; i < bcount; i++) { int size = fs.Read(buffer, 0, 4096); if (size < 4096) { lastchunkbuf = new byte[size]; for (int kx = size; kx < 4096; kx++) { buffer[kx] = 0; } for (int kx2 = 0; kx2 < size; kx2++) { lastchunkbuf[kx2] = buffer[kx2]; } } byte[] hash = md5.ComputeHash(buffer, 0, 4096); fptemp1.inode = ino; fptemp1.fbn = i; for (int k = 0; k < 16; k++) { fptemp1.fp[k] = hash[k]; } ((Item)fptemp1).get_bytes(tmpbuf, 0); if (REDDY.ptrIFSDMux.WriteFile(2, newchecksumfile, tmpbuf, ref wrote, curroffset, null) != 0) { MessageBox.Show("write failed, wrote = " + wrote); return(false); } if (size > 0) { if (size == 4096) { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, buffer, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee, wrote = " + wrote); return(false); } } else { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, lastchunkbuf, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee2, wrote = " + wrote); return(false); } } } newdatacopied += size; outfileoffset += size; curroffset += ((Item)fptemp1).get_size(); } //if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) //{ // MessageBox.Show("failed to seteof 1a"); // return false; //} fs.Close(); REDDY.FSIDList[2].set_dirty(true); return(true); } } else { DEFS.ASSERT(oldchecksumfile != null, "You must pass the oldchecksumfile path"); FileInfo srcfi = new FileInfo(sourcefile); if (srcfi.Exists == false) { REDDY.ptrIFSDMux.DeleteFile(2, destfile, null); return(false); } else { if (REDDY.ptrIFSDMux.CreateFile(2, destfile, FileAccess.ReadWrite, FileShare.ReadWrite, FileMode.CreateNew, FileOptions.None, null) != 0) { MessageBox.Show("Createfile has failed"); return(false); } if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) { MessageBox.Show("Set eof has failed!"); return(false); } Inode_Info di = REDDY.ptrIFSDMux.GetFileInfoInternalAPI(2, destfile); int localoffet = di.backupoffset; REDDY.ptrIFSDMux.SetInternalFlag(2, destfile, 0, curroffset); int ino = di.ino; byte[] buffer = new byte[4096]; byte[] tmpbuf = new byte[((Item)fptemp1).get_size()]; uint wrote = 0; int bcount = OPS.NUML0(srcfi.Length); FileStream fs = new FileStream(sourcefile, FileMode.Open); long outfileoffset = 0; byte[] lastchunkbuf = null; DEFS.DEBUG("--------", bcount + ", ArrangeStartingPosition LOOP "); for (int i = 0; i < bcount; i++) { int size = fs.Read(buffer, 0, 4096); if (size < 4096) { lastchunkbuf = new byte[size]; for (int kx = size; kx < 4096; kx++) { buffer[kx] = 0; } for (int kx2 = 0; kx2 < size; kx2++) { lastchunkbuf[kx2] = buffer[kx2]; } } byte[] hash = md5.ComputeHash(buffer, 0, 4096); fptemp1.inode = ino; fptemp1.fbn = i; for (int k = 0; k < 16; k++) { fptemp1.fp[k] = hash[k]; } byte[] existinghash = new byte[24]; uint readsize = 0; if (REDDY.ptrIFSDMux.ReadFile(2, oldchecksumfile, existinghash, ref readsize, localoffet, null) != 0) { MessageBox.Show("read failed, " + readsize + ",path = " + oldchecksumfile); return(false); } ((Item)fptemp2).parse_bytes(existinghash, 0); if (!(/* fptemp1.inode == fptemp2.inode &&*/ fptemp1.fbn == fptemp2.fbn && is_equal(fptemp1.fp, fptemp2.fp))) { if (size > 0) { if (size == 4096) { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, buffer, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee, wrote = " + wrote); return(false); } } else { if (REDDY.ptrIFSDMux.WriteFile(2, destfile, lastchunkbuf, ref wrote, outfileoffset, null) != 0) { MessageBox.Show("write failed ee2, wrote = " + wrote); return(false); } } } newdatacopied += size; } ((Item)fptemp1).get_bytes(tmpbuf, 0); if (REDDY.ptrIFSDMux.WriteFile(2, newchecksumfile, tmpbuf, ref wrote, curroffset, null) != 0) { MessageBox.Show("write failed 22, wrote = " + wrote); return(false); } curroffset += ((Item)fptemp1).get_size(); localoffet += ((Item)fptemp1).get_size(); outfileoffset += size; //DEFS.DEBUG("---", bcount + "," + fs.Position); } fs.Close(); if (REDDY.ptrIFSDMux.SetEndOfFile(2, destfile, srcfi.Length, null) != 0) { MessageBox.Show("Set eof has failed! 2"); return(false); } DEFS.DEBUG("--------", bcount + ", ArrangeStartingPosition LOOP sdfsfda"); REDDY.FSIDList[2].set_dirty(true); return(true); } } }
/* * Just get the list of all files and their properties. If a new lun is created * then the list must be reloaded. After the list is reloaded, then it can be displayed * on the UI. */ public void load_lun_list(bool freshload) { Inode_Info[] inodes = REDDY.ptrIFSDMux.FindFilesInternalAPI(1, "\\"); DEFS.DEBUG("lun", "Found " + inodes.Length + " luns in load_lun_list"); lock (m_lunlist) { if (freshload) { DEFS.ASSERT(m_lunlist.Count == 0, "some Lun_Items cannot already exist"); } for (int i = 0; i < inodes.Length; i++) { DEFS.DEBUG("lun", inodes[i].name); bool exists = false; if (freshload) { exists = false; } else { //find out.. for (int j = 0; j < m_lunlist.Count; j++) { try { Lun_Item li = m_lunlist.ElementAt(j); int did = Int32.Parse(inodes[i].name); if (li.drive_id == did) { exists = true; break; } } catch (Exception extp) { DEFS.DEBUG("lun", "Error in filename from LUNdisk"); DEFS.DEBUG("lun", "Exception:" + extp.Message); exists = true; //just to skip adding this. } } } if (!exists) { //create item and insert. try { Lun_Item li = new Lun_Item(); li.drive_id = Int32.Parse(inodes[i].name); li.drive_size = inodes[i].size; li.ctime = inodes[i].CreationTime.Ticks; m_lunlist.AddFirst(li); } catch (Exception ep) { DEFS.DEBUG("lun", ep.Message); } } } }//lock }
public void mod_refcount(int fsid, int dbn, REFCNT_OP optype, Red_Buffer wb, bool isinodefilel0) { DEFS.ASSERT(optype == REFCNT_OP.INCREMENT_REFCOUNT || /*optype == REFCNT_OP.DECREMENT_REFCOUNT ||*/ optype == REFCNT_OP.TOUCH_REFCOUNT || /*optype == REFCNT_OP.DO_LOAD || */ optype == REFCNT_OP.INCREMENT_REFCOUNT_ALLOC || optype == REFCNT_OP.DECREMENT_REFCOUNT_ONDEALLOC, "Wrong param in mod_refcount"); DEFS.ASSERT(isinodefilel0 || (wb == null || wb.get_level() > 0), "wrong type to mod_refcount " + isinodefilel0 + (wb == null)); UpdateReqI r = new UpdateReqI(); r.optype = optype; r.dbn = dbn; r.fsid = fsid; switch (optype) { case REFCNT_OP.INCREMENT_REFCOUNT: case REFCNT_OP.INCREMENT_REFCOUNT_ALLOC: r.value = 1; break; //case REFCNT_OP.DECREMENT_REFCOUNT: case REFCNT_OP.DECREMENT_REFCOUNT_ONDEALLOC: r.value = -1; break; case REFCNT_OP.TOUCH_REFCOUNT: //case REFCNT_OP.DO_LOAD: r.value = 0; break; } r.blktype = (wb != null) ? ((isinodefilel0) ? BLK_TYPE.PUBLIC_INODE_FILE_L0 : wb.get_blk_type()) : ((optype == REFCNT_OP.INCREMENT_REFCOUNT_ALLOC || optype == REFCNT_OP.DECREMENT_REFCOUNT_ONDEALLOC)? BLK_TYPE.IGNORE : BLK_TYPE.REGULAR_FILE_L0); if (wb != null && (wb.get_level() > 0 || BLK_TYPE.PUBLIC_INODE_FILE_L0 == r.blktype)) { lock (tfile0) { CONFIG.Encrypt_Data_ForWrite(tmpiodatatfileW, wb.buf_to_data()); tfile0.Seek((long)tfilefbn * 4096, SeekOrigin.Begin); tfile0.Write(tmpiodatatfileW, 0, 4096); //DEFS.DEBUG("ENCY", "Wrote : " + OPS.ChecksumPageWRLoader(wb.buf_to_data())); r.tfbn = tfilefbn; tfilefbn++; } } else { r.tfbn = -1; } if (optype != REFCNT_OP.INCREMENT_REFCOUNT_ALLOC && optype != REFCNT_OP.DECREMENT_REFCOUNT && optype != REFCNT_OP.DECREMENT_REFCOUNT_ONDEALLOC && optype != REFCNT_OP.TOUCH_REFCOUNT) { DEFS.DEBUG("REFCNT", "Queued update for " + r.blktype + ", dbn = " + r.dbn + ", and operation = " + r.optype + ", transaction offset : " + r.tfbn); } GLOBALQ.m_reqi_queue.Add(r); }
/* * Will block on GLOBALQ.m_reqi_queue and take it to * its logical conclusion. */ public void tServiceThread() { //long protected_blkdiff_counter = 0; long[] protected_blkdiff_counter = new long[1024]; while (true) { UpdateReqI cu = (UpdateReqI)GLOBALQ.m_reqi_queue.Take(); if (cu.optype == REFCNT_OP.SHUT_DOWN) { internal_sync_and_flush_cache_advanced(); DEFS.ASSERT(GLOBALQ.m_reqi_queue.Count == 0, "There cannot be any pending updates when shutting down"); DEFS.DEBUGYELLOW("REF", "Bailing out now!!"); //dont take a lock here. for (int i = 0; i < 1024; i++) { if (REDDY.FSIDList[i] == null || protected_blkdiff_counter[i] == 0) { continue; } REDDY.FSIDList[i].diff_upadate_logical_data(protected_blkdiff_counter[i]); REDDY.FSIDList[i].set_dirty(true); protected_blkdiff_counter[i] = 0; } cu.processed = true; m_initialized = false; break; } if (cu.optype == REFCNT_OP.DO_SYNC) { internal_sync_and_flush_cache_advanced(); //dont take a lock here. for (int i = 0; i < 1024; i++) { if (REDDY.FSIDList[i] == null || protected_blkdiff_counter[i] == 0) { continue; } REDDY.FSIDList[i].diff_upadate_logical_data(protected_blkdiff_counter[i]); REDDY.FSIDList[i].set_dirty(true); protected_blkdiff_counter[i] = 0; } cu.processed = true; tfile0.Flush(); mfile1.Flush(); dfile1.Flush(); continue; } if (cu.optype == REFCNT_OP.TAKE_DISK_SNAPSHOT || cu.optype == REFCNT_OP.UNDO_DISK_SNAPSHOT) { int rbn_update = cu.tfbn; //overloaded since its just file offset. load_wrbufx(rbn_update); //will dowork DEFS.ASSERT(cu.dbn == 0, "This should not be set"); DEFS.ASSERT(cu.optype == GLOBALQ.disk_snapshot_optype, "this must also match"); //DoSnapshotWork(rbn_update); counter++; total_ops++; printspeed(); continue; } if (cu.dbn != 0) { if (cu.optype == REFCNT_OP.DECREMENT_REFCOUNT_ONDEALLOC) { protected_blkdiff_counter[cu.fsid] -= 4096; } else if (cu.optype == REFCNT_OP.INCREMENT_REFCOUNT_ALLOC) { protected_blkdiff_counter[cu.fsid] += 4096; } //all other ops you can ignore. } int rbn = REFDEF.dbn_to_rbn(cu.dbn); total_ops++; counter++; /* * Now if this has a child update pending, then we must clean it up. * For each entry, i.e dbn, load the upto 1024, into memory and update * the refcount. Essentially when we access this buffer - it must not * have any pending update to itself or its children. * * How the children are updated depends on the blk_type, thats why so many * cases. */ load_wrbufx(rbn); if (cu.optype == REFCNT_OP.GET_REFANDCHD_INFO) { cu.processed = true; continue; } int childcnt = GLOBALQ.WRObj[rbn].incoretbuf.get_childcount(cu.dbn); if (childcnt > 0) { DEFS.DEBUG("CNTr", "Encountered child update for " + cu.dbn + " = " + GLOBALQ.WRObj[rbn].incoretbuf.get_refcount(cu.dbn) + "," + childcnt); if (cu.blktype == BLK_TYPE.REGULAR_FILE_L0)// || cu.blktype == BLK_TYPE.DIRFILE_L0) { /* Normal handling*/ //DEFS.ASSERT(cu.blktype == GLOBALQ.WRObj[rbn].incoretbuf.get_blk_type(cu.dbn), "Block mismatch"); DEFS.ASSERT(cu.tfbn == -1, "tfbn cannot be set for a level 0 block generally"); DEFS.ASSERT(false, "How can there be a childcnt update for a level zero block?"); } else if (cu.blktype == BLK_TYPE.REGULAR_FILE_L1 || cu.blktype == BLK_TYPE.REGULAR_FILE_L2) /* || * cu.blktype == BLK_TYPE.DIRFILE_L1 || cu.blktype == BLK_TYPE.DIRFILE_L2 || * cu.blktype == BLK_TYPE.PUBLIC_INODE_FILE_L2 || cu.blktype == BLK_TYPE.PUBLIC_INODE_FILE_L1)*/ { //DEFS.ASSERT(false, "Not yet implimented chdcnt in wrloader : " + REFDEF.get_string_rep(cu)); DEFS.ASSERT(cu.tfbn != -1, "Tfbn should've been set here."); do_regular_dirORfile_work(cu, childcnt); GLOBALQ.WRObj[rbn].incoretbuf.set_childcount(cu.dbn, 0); } else if (cu.blktype == BLK_TYPE.PUBLIC_INODE_FILE_L0) { DEFS.DEBUGCLR("------", "Do ino-L0 update work," + cu.optype + " , chdcnt = " + childcnt + " curr_refcnt = " + GLOBALQ.WRObj[rbn].incoretbuf.get_refcount(cu.dbn)); do_inode_refupdate_work(cu, childcnt); GLOBALQ.WRObj[rbn].incoretbuf.set_childcount(cu.dbn, 0); } else { DEFS.ASSERT(false, "passed type = " + cu.blktype + "dbn = " + cu.dbn + " chdcnt = " + childcnt); } } if (cu.optype != REFCNT_OP.TOUCH_REFCOUNT) { /* * Now that pending updates are propogated ,apply the queued update to this refcount. * If it becomes free, notify that. */ load_wrbufx(rbn); apply_update_internal(cu.dbn, cu.blktype, cu.value, cu.optype, (cu.optype == REFCNT_OP.INCREMENT_REFCOUNT)); checkset_if_blockfree(cu.dbn, childcnt); } /* After the load, see if we have to clean up */ if (cachesize > 15 * 1024) { internal_sync_and_flush_cache_advanced(); } printspeed(); } tfile0.Flush(); tfile0.Close(); dfile1.Flush(); dfile1.Close(); mfile1.Flush(); mfile1.Close(); }