private void do_fsid_sync_internal(int id) { lock (REDDY.FSIDList[id]) { RedFS_Inode inowip = REDDY.FSIDList[id].get_inode_file_wip("GC1"); REDDY.ptrRedFS.sync(inowip); REDDY.ptrRedFS.flush_cache(inowip, true); REDDY.FSIDList[id].sync_internal(); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[id]); } DEFS.DEBUG("FSIDSYNC", " Calling sync"); REDDY.FSIDList[id].rootdir.sync(); DEFS.DEBUG("FSIDSYNC", "Finished sync, calling gc"); REDDY.FSIDList[id].rootdir.gc(); DEFS.DEBUG("FSIDSYNC", " Finished gc"); if (m_shutdown == true) { ((CInode)REDDY.FSIDList[id].rootdir).unmount(true); } lock (REDDY.FSIDList[id]) { RedFS_Inode inowip = REDDY.FSIDList[id].get_inode_file_wip("GC2"); REDDY.ptrRedFS.sync(inowip); REDDY.ptrRedFS.flush_cache(inowip, true); REDDY.FSIDList[id].sync_internal(); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[id]); } }
private void do_inode_refupdate_work(UpdateReqI cu, int childcnt) { byte[] buffer = new byte[4096]; lock (tfile0) { tfile0.Seek((long)cu.tfbn * 4096, SeekOrigin.Begin); tfile0.Read(tmpiodatatfileR, 0, 4096); CONFIG.Decrypt_Read_WRBuf(tmpiodatatfileR, buffer); //DEFS.DEBUG("ENCY", "READ inoLo : " + OPS.ChecksumPageWRLoader(buffer)); DEFS.DEBUG("CNTR", "do_inode_refupdate_work (" + cu.tfbn + ") childcnt =" + childcnt); } /* * Parent of inowip is always -1. */ RedFS_Inode wip = new RedFS_Inode(WIP_TYPE.REGULAR_FILE, 0, -1); byte[] buf = new byte[128]; for (int i = 0; i < 32; i++) { for (int t = 0; t < 128; t++) { buf[t] = buffer[i * 128 + t]; } wip.parse_bytes(buf); BLK_TYPE type = BLK_TYPE.IGNORE; int numidx = 0; switch (wip.get_inode_level()) { case 0: type = BLK_TYPE.REGULAR_FILE_L0; numidx = OPS.NUML0(wip.get_filesize()); break; case 1: type = BLK_TYPE.REGULAR_FILE_L1; numidx = OPS.NUML1(wip.get_filesize()); break; case 2: type = BLK_TYPE.REGULAR_FILE_L2; numidx = OPS.NUML2(wip.get_filesize()); break; } for (int x = 0; x < numidx; x++) { int dbn = wip.get_child_dbn(x); //if (dbn <= 0) continue; DEFS.DEBUGCLR("^^^^^", "wip[" + x + "] " + dbn + "," + wip.get_wiptype() + "," + childcnt + " fsize = " + wip.get_filesize()); DEFS.DEBUGCLR("@@@", wip.get_string_rep2()); apply_update_internal(dbn, type, childcnt, cu.optype, true); } } OPS.dump_inoL0_wips(buffer); }
public void sync() { lock (this) { if (_mywip != null) { lock (REDDY.FSIDList[m_associated_fsid]) { RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("GC"); DEFS.DEBUG("SYNC", "CFile (" + m_inode + ") -mywip.size = " + _mywip.get_filesize()); REDDY.ptrRedFS.sync(_mywip); OPS.Checkin_Wip(inowip, _mywip, m_inode); _mywip.is_dirty = false; REDDY.FSIDList[m_associated_fsid].sync_internal(); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[m_associated_fsid]); } } else { DEFS.DEBUG("FSID", "inserted/unsyncd : " + m_name); } } }
private void CheckSumBuf(RedFS_Inode wip, Red_Buffer wb) { lock (fpcache_buf) { if (fpcache_cnt == 1024) { fpcache_cnt = 0; clogfile.Write(fpcache_buf, 0, fpcache_buf.Length); clogfile.Flush(); } if (wb.get_level() == 0 && wip.get_wiptype() == WIP_TYPE.REGULAR_FILE) { fingerprintCLOG fpt = (fingerprintCLOG)fptemp; fpt.fsid = wip.get_filefsid(); fpt.inode = wip.get_ino(); fpt.fbn = (int)wb.get_start_fbn(); fpt.dbn = wb.get_ondisk_dbn(); fpt.cnt = (int)clogfile.Position; byte[] hash = md5.ComputeHash(wb.buf_to_data()); for (int i = 0; i < 16; i++) { fpt.fp[i] = hash[i]; } fptemp.get_bytes(fpcache_buf, fpcache_cnt * fptemp.get_size()); fpcache_cnt++; } } }
/* * public static bool HasChildIncore(RedFS_Inode wip, int level, long sfbn) * { * if (level == 1) * { * RedBufL1 wbl1 = (RedBufL1)wip.FindOrInsertOrRemoveBuf(FIR_OPTYPE.FIND, 1, sfbn, null, null); * if (wbl1.get_numchildptrs_incore() != 0) return true; * else return false; * } * else if (level == 2) * { * RedBufL2 wbl2 = (RedBufL2)wip.FindOrInsertOrRemoveBuf(FIR_OPTYPE.FIND, 2, sfbn, null, null); * if (wbl2.get_numchildptrs_incore() != 0) return true; * else return false; * } * else * { * DEFS.ASSERT(false, "Dont pass wrong arguments"); * return false; * } * } */ public static bool HasChildIncoreOld(RedFS_Inode wip, int level, long sfbn) { DEFS.ASSERT(level > 0, "Incorrect level to HasChildIncore()"); if (level == 1) { int count0 = wip.L0list.Count; int span1 = 1024; for (int i = 0; i < count0; i++) { RedBufL0 wbl0 = (RedBufL0)wip.L0list.ElementAt(i); if (wbl0.m_start_fbn >= sfbn && wbl0.m_start_fbn < (sfbn + span1)) { return(true); } } return(false); } else { int count1 = wip.L1list.Count; int span2 = 1024 * 1024; for (int i = 0; i < count1; i++) { RedBufL1 wbl1 = (RedBufL1)wip.L1list.ElementAt(i); if (wbl1.m_start_fbn >= sfbn && wbl1.m_start_fbn < (sfbn + span2)) { return(true); } } return(false); } }
private void CheckSumBuf(RedFS_Inode wip, int fbn, int dbn, byte[] buffer, int offset) { lock (fpcache_buf) { if (fpcache_cnt == 1024) { fpcache_cnt = 0; clogfile.Write(fpcache_buf, 0, fpcache_buf.Length); clogfile.Flush(); } if (wip.get_wiptype() == WIP_TYPE.REGULAR_FILE) { fingerprintCLOG fpt = (fingerprintCLOG)fptemp; fpt.fsid = wip.get_filefsid(); fpt.inode = wip.get_ino(); fpt.fbn = fbn; fpt.dbn = dbn; fpt.cnt = (int)clogfile.Position; byte[] hash = md5.ComputeHash(buffer, offset, 4096); for (int i = 0; i < 16; i++) { fpt.fp[i] = hash[i]; } fptemp.get_bytes(fpcache_buf, fpcache_cnt * fptemp.get_size()); fpcache_cnt++; } } }
/* * public static Red_Buffer get_buf2(string who, RedFS_Inode wip, int level, int some_fbn, bool isquery) * { * DEFS.DEBUG("getbuf", "-> " + who + "," + wip.m_ino + "," + level + "," + some_fbn + "," + isquery); * Red_Buffer retbuf = wip.FindOrInsertOrRemoveBuf(FIR_OPTYPE.FIND, level, some_fbn, null, null); * if (!isquery) * { * DEFS.ASSERT(retbuf != null, "newer get_buf2 has failed"); * } * return retbuf; * } */ /* * This can never return null, the caller *must* know that this buffer * is incore before calling. Must be called with a lock held on wip. But in case * is query is set true, then the caller is not sure if the buf is incore, in that * case we can return null safely. */ public static Red_Buffer get_buf3(string who, RedFS_Inode wip, int level, int some_fbn, bool isquery) { List <Red_Buffer> list = null; switch (level) { case 0: list = wip.L0list; break; case 1: list = wip.L1list; break; case 2: list = wip.L2list; break; } DEFS.ASSERT(list != null, "List cannot be null in get_buf()"); int start_fbn = SomeFBNToStartFBN(level, some_fbn); //some optimization, 10-12 mbps more. if (level == 1) { if (wip._lasthitbuf != null && wip._lasthitbuf.get_start_fbn() == start_fbn) { return(wip._lasthitbuf); } } for (int idx = 0; idx < (list.Count); idx++) //for (int idx = (list.Count - 1); idx >= 0; idx--) { int idx2 = (level == 0) ? (list.Count - idx - 1) : idx; Red_Buffer wb = (Red_Buffer)list.ElementAt(idx2); if (wb.get_start_fbn() == start_fbn) { //if (wb.get_level() > 0 && list.Count > 2) //like splay tree. //{ // list.RemoveAt(idx2); // list.Insert(0, wb); //} if (level == 1) { wip._lasthitbuf = wb; //good opti - gives 10-12mbps more. } return(wb); } } DEFS.ASSERT(isquery, "who = " + who + ", get_buf() failed " + wip.get_ino() + "," + level + "," + some_fbn); return(null); }
public static bool CheckinZerodWipData(RedFS_Inode inowip, int m_ino) { long fileoffset = m_ino * 128; byte[] data = new byte[128]; lock (inowip) { REDDY.ptrRedFS.redfs_write(inowip, fileoffset, data, 0, 128); inowip.is_dirty = true; } return(true); }
public RedFS_Inode get_inodemap_wip() { RedFS_Inode inowip = new RedFS_Inode(WIP_TYPE.PUBLIC_INODE_MAP, 0, -1); byte[] buf = new byte[128]; for (int i = 0; i < 128; i++) { buf[i] = data[CFSvalueoffsets.fsid_inomap_data + i]; } inowip.parse_bytes(buf); return(inowip); }
public bool set_inodemap_wip(RedFS_Inode wip) { byte[] buf = new byte[128]; wip.get_bytes(buf); for (int i = 0; i < 128; i++) { data[CFSvalueoffsets.fsid_inomap_data + i] = buf[i]; } set_dirty(true); return(true); }
private void init_internal2() { RedFS_Inode w2 = get_inodemap_wip(); for (int i = 0; i < 16; i++) { w2.set_child_dbn(i, DBN.INVALID); } w2.set_filesize(0); set_inodemap_wip(w2); set_logical_data(0); set_dirty(true); }
void CInode.unmount(bool inshutdown) { long curr = DateTime.Now.ToUniversalTime().Ticks; int seconds = (int)((curr - creation_time) / 10000000); DEFS.DEBUG("UNMOUNT", "CFile (" + m_inode + ") umnount : " + m_name + " inshutdown flag = " + inshutdown + " is _mywip null = " + (_mywip == null) + " secs = " + seconds); if (inshutdown == false && timeoutcheck() == false && m_state == FILE_STATE.FILE_IN_DOKAN_IO) { return; } /* * We cannot unmount a dirty wip directly, it must first be cleaned, so we * dont do this here. The next sync iteration will clean the wip, and then * we are good to unmount. If we are being shutdown, then we sync() here itself. */ if ((inshutdown == false) && ((_mywip == null)))// || _mywip.is_dirty == false)) { DEFS.ASSERT(m_state != FILE_STATE.FILE_IN_DOKAN_IO, "Cannot be dokan io when _mywip = NULL"); return; } /* * _mywip is not null and dirty, or we are shutting down. */ lock (this) { DEFS.ASSERT(m_state != FILE_STATE.FILE_ORPHANED, "We are in sync path can cannot have an orphaned file"); if (_mywip != null) { REDDY.ptrRedFS.sync(_mywip); REDDY.ptrRedFS.flush_cache(_mywip, inshutdown); } lock (REDDY.FSIDList[m_associated_fsid]) { if (_mywip != null) { RedFS_Inode inowipX = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("Umount file iwp:" + m_name); OPS.Checkin_Wip(inowipX, _mywip, m_inode); DEFS.ASSERT(m_state != FILE_STATE.FILE_DELETED, "Wrong state detected222!"); REDDY.FSIDList[m_associated_fsid].sync_internal(); REDDY.FSIDList[m_associated_fsid].set_dirty(true); _mywip = null; } m_state = FILE_STATE.FILE_UNMOUNTED; } } }
/* * There is no need for locks for sync (shared lock) and unmount (exclusive lock). */ public void unmount(int fsid) { REDDY.FSIDList[fsid].rootdir.sync(); ((CInode)REDDY.FSIDList[fsid].rootdir).unmount(true); lock (REDDY.FSIDList[fsid]) { RedFS_Inode inowip = REDDY.FSIDList[fsid].get_inode_file_wip("GC"); REDDY.ptrRedFS.sync(inowip); REDDY.ptrRedFS.flush_cache(inowip, true); REDDY.FSIDList[fsid].sync_internal(); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[fsid]); } }
public static bool Checkin_Wip(RedFS_Inode inowip, RedFS_Inode mywip, int m_ino) { DEFS.ASSERT(m_ino == mywip.get_ino(), "Inode numbers dont match, can lead to corruption " + m_ino + "," + mywip.get_ino()); long fileoffset = m_ino * 128; lock (inowip) { REDDY.ptrRedFS.redfs_write(inowip, fileoffset, mywip.data, 0, 128); DEFS.DEBUG("OPS", "CheckIn wip " + mywip.get_ino() + " size = " + mywip.get_filesize()); inowip.is_dirty = true; } DEFS.DEBUG("CI_WIP", mywip.get_string_rep2()); return(true); }
public RedFS_Inode get_inode_file_wip(string requester) { if (_ninowip == null) { _ninowip = new RedFS_Inode(WIP_TYPE.PUBLIC_INODE_FILE, 0, -1); for (int i = 0; i < 128; i++) { _ninowip.data[i] = data[CFSvalueoffsets.fsid_inofile_data + i]; } _ninowip.set_wiptype(WIP_TYPE.PUBLIC_INODE_FILE); _ninowip.setfilefsid_on_dirty(m_dbn); } DEFS.DEBUG("FSID", "Giving a inowip to " + requester); return(_ninowip); }
/* * The below functions will be used for dedupe/user command prompt etc. */ private int LoadWip_FindPINO(int fsid, int ino, ref WIP_TYPE type) { lock (REDDY.FSIDList[fsid]) { RedFS_Inode inowip = REDDY.FSIDList[fsid].get_inode_file_wip("Loadinode"); lock (inowip) { RedFS_Inode mywip = new RedFS_Inode(WIP_TYPE.UNDEFINED, ino, -1); bool ret = OPS.Checkout_Wip2(inowip, mywip, ino); REDDY.FSIDList[fsid].sync_internal(); type = mywip.get_wiptype(); //DEFS.DEBUG("LdIno", "Loaded ino= " + ino + "wip from disk, type = " + type); //DEFS.DEBUG("LdIno", mywip.get_string_rep2()); return((ret) ? mywip.get_parent_ino() : -1); } } }
public static void dump_inoL0_wips(byte[] buffer) { byte[] buf = new byte[128]; RedFS_Inode wip = new RedFS_Inode(WIP_TYPE.REGULAR_FILE, 0, 0); for (int i = 0; i < 32; i++) { for (int t = 0; t < 128; t++) { buf[t] = buffer[i * 128 + t]; } wip.parse_bytes(buf); if (wip.get_ino() != 0) { DEFS.DEBUG("->", wip.get_string_rep2()); } } }
public bool write(RedFS_Inode wip, Red_Buffer wb) { if (!initialized) { return(false); } total_disk_writes++; lock (dfile) { //DEFS.DEBUG("RAID", "Writing dbn : " + wb.get_ondisk_dbn() + " level : " + wb.get_level()); dfile.Seek((long)wb.get_ondisk_dbn() * 4096, SeekOrigin.Begin); dfile.Write(wb.buf_to_data(), 0, 4096); dfile.Flush(); wb.set_dirty(false); CheckSumBuf(wip, wb); } return(true); }
public bool write(RedFS_Inode wip, int fbn, int dbn, byte[] buffer, int offset) { if (!initialized) { return(false); } total_disk_writes++; lock (dfile) { dfile.Seek((long)dbn * 4096, SeekOrigin.Begin); dfile.Write(buffer, offset, 4096); dfile.Flush(); //CheckSumBuf(wip, wb); CheckSumBuf(wip, fbn, dbn, buffer, offset); //DEFS.DEBUG("FASTWRITE", "dbn, bufoffset = " + dbn + "," + offset); } return(true); }
/* * Give a fsid, it looks into the iMapWip and gets a free bit. The fsid block has the * largest inode number that is currently used, and the iMapWip itself. I'm not using anylocks * for this wip since this operation will never be concurrent. All FS modification code that * may use this path already would have a lock on the rootdir. Ex duping, deleting, inserting etc. * * XXX: Note that we are never freeing the inode bit once set!. So basically this is a dummy function. * We still work because we can afford to wait for 500M inodes to allocated before we do a wrap around!!. */ private int find_free_ino_bit(int fsid) { int max_fbns = 16384; int curr_max_inode = REDDY.FSIDList[fsid].get_start_inonumber(); byte[] buffer = new byte[4096]; RedFS_Inode iMapWip = REDDY.FSIDList[fsid].get_inodemap_wip(); int fbn = OPS.OffsetToFBN(curr_max_inode / 8); for (int cfbn = fbn; cfbn < max_fbns; cfbn++) { OPS.BZERO(buffer); REDDY.ptrRedFS.redfs_read(iMapWip, (cfbn * 4096), buffer, 0, 4096); int startsearchoffset = ((cfbn == fbn) ? (curr_max_inode / 8) : 0) % 4096; int free_bit = get_free_bitoffset(startsearchoffset, buffer); if (free_bit != -1) { int free_inode = ((cfbn * (4096 * 8)) + free_bit); REDDY.ptrRedFS.redfs_write(iMapWip, (cfbn * 4096), buffer, 0, 4096); REDDY.ptrRedFS.sync(iMapWip); REDDY.FSIDList[fsid].set_inodemap_wip(iMapWip); REDDY.ptrRedFS.flush_cache(iMapWip, true); REDDY.FSIDList[fsid].set_start_inonumber(free_inode + 1); DEFS.DEBUG("IFSDMux", "Found free ino = " + free_inode + " so setting currmaxino = " + curr_max_inode + " for fsid = " + fsid); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[fsid]); return(free_inode); } } REDDY.FSIDList[fsid].set_start_inonumber(64); REDDY.ptrRedFS.redfs_commit_fsid(REDDY.FSIDList[fsid]); //do we need this regularly? DEFS.DEBUG("FSID", "XXXXX VERY RARE EVENT XXXX INODE WRAP AROUND XXXX"); return(find_free_ino_bit(fsid)); }
public void remove_ondisk_data2() { open_file(false); touch(); if (m_state == FILE_STATE.FILE_DELETED) { return; } m_state = FILE_STATE.FILE_DELETED; DEFS.ASSERT(_mywip != null, "Cannot be null in remove() after calling open()"); lock (this) { lock (REDDY.FSIDList[m_associated_fsid]) { DEFS.ASSERT(_mywip != null, "Unmount couldnt have worked on this"); RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("DF:" + m_name); //REDDY.ptrRedFS.sync(_mywip); REDDY.ptrRedFS.flush_cache(_mywip, false); REDDY.ptrRedFS.redfs_delete_wip(m_associated_fsid, _mywip, true); DEFS.ASSERT(_mywip.get_filesize() == 0, "After delete, all the wip contents must be cleared off"); for (int i = 0; i < 16; i++) { DEFS.ASSERT(_mywip.get_child_dbn(i) == DBN.INVALID, "dbns are not set after delete wip " + i + " " + _mywip.get_child_dbn(i)); } OPS.CheckinZerodWipData(inowip, m_inode); REDDY.FSIDList[m_associated_fsid].sync_internal(); _mywip = null; } } DEFS.DEBUG("IFSD", "<<<< DELETED FILE >>>> " + m_name); }
public static bool Checkout_Wip2(RedFS_Inode inowip, RedFS_Inode mywip, int m_ino) { WIP_TYPE oldtype = mywip.get_wiptype(); for (int i = 0; i < 16; i++) { DEFS.ASSERT(mywip.get_child_dbn(i) == DBN.INVALID, "Wip cannot be valid during checkout, " + i + " value = " + mywip.get_child_dbn(i)); } long fileoffset = m_ino * 128; lock (inowip) { REDDY.ptrRedFS.redfs_read(inowip, fileoffset, mywip.data, 0, 128); if (oldtype != WIP_TYPE.UNDEFINED) { mywip.set_wiptype(oldtype); } } DEFS.DEBUG("CO_WIP", mywip.get_string_rep2()); return(mywip.verify_inode_number()); }
public bool read(RedFS_Inode wip, int dbn, byte[] buffer, int offset) { if (!initialized) { return(false); } total_disk_reads++; if (dbn == 0) { Array.Clear(buffer, offset, 4096); return(true); } lock (dfile) { dfile.Seek((long)dbn * 4096, SeekOrigin.Begin); dfile.Read(buffer, offset, 4096); dfile.Flush(); } return(true); }
/* * We dont expect a write to come before opening because, cdirectory would * call a open_file() before inserting into the DIR CACHE. We shouldnt call * this with cfile-lock held. */ public bool open_file(bool justcreated) { if (m_state == FILE_STATE.FILE_DELETED) { return(false); } else if (m_state == FILE_STATE.FILE_IN_DOKAN_IO) { DEFS.ASSERT(_mywip != null, "My wip cannot be null when dokan_io flag is set in open_file"); return(true); } touch(); if (_mywip == null) { lock (this) { if (_mywip != null) { /* * It could be the case that someone already opend it, maybe previous call * that was locked in open_file(), just bail out. */ DEFS.ASSERT(m_state != FILE_STATE.FILE_IN_DOKAN_IO, "Suddendly cannot be in dokan io when it was just null"); return(true); } lock (REDDY.FSIDList[m_associated_fsid]) { _mywip = new RedFS_Inode(WIP_TYPE.REGULAR_FILE, m_inode, m_parent_inode); long oldsize = _mywip.get_filesize(); RedFS_Inode inowip = REDDY.FSIDList[m_associated_fsid].get_inode_file_wip("OF:" + m_name); DEFS.DEBUG("F(_mywip)", "Loaded ino= " + m_inode + "wip from disk, size = " + _mywip.get_filesize()); bool ret = OPS.Checkout_Wip2(inowip, _mywip, m_inode); if (ret) { DEFS.DEBUG("FILE", "Loaded ino= " + m_inode + "wip from disk, size = " + _mywip.get_filesize()); } else { DEFS.DEBUG("FILE", "Loaded ino = " + m_inode + " (new) size = " + _mywip.get_filesize()); _mywip.set_ino(m_parent_inode, m_inode); } DEFS.ASSERT(m_size == _mywip.get_filesize(), "File size should match, irrespecitive of weather its " + " from disk, (=0) then, or inserted from an existing dir load, >= 0 in that case, msize:" + m_size + " _mywip.size:" + _mywip.get_filesize() + " fname =" + m_name + " ino=" + m_inode + " beforeread size = " + oldsize + " contents : " + _mywip.get_string_rep2() + " ret = " + ret); if (justcreated) { DEFS.ASSERT(ret == false, "This should be a new file " + _mywip.get_filesize() + " fname =" + m_name + " ino=" + m_inode + " beforeread size = " + oldsize + " contents : " + _mywip.get_string_rep2()); _mywip.setfilefsid_on_dirty(m_associated_fsid); _mywip.is_dirty = true; //this must make it to disk. } REDDY.FSIDList[m_associated_fsid].sync_internal(); m_state = FILE_STATE.FILE_DEFAULT; } } } return(true); }
public static long ComputeNextStartFbn(RedFS_Inode wip) { int cnt = NUML0(wip.get_filesize()); return((long)cnt * 4096); }