public static bool DeleteRecursive(FilePath fileOrDirectory) { if (fileOrDirectory.IsDirectory()) { foreach (FilePath child in fileOrDirectory.ListFiles()) { DeleteRecursive(child); } } bool result = fileOrDirectory.Delete() || !fileOrDirectory.Exists(); return result; }
/// <exception cref="System.Exception"></exception> public virtual void TestUpgradeOldDatabaseFiles() { string directoryName = "test-directory-" + Runtime.CurrentTimeMillis(); string normalFilesDir = GetRootDirectory().GetAbsolutePath(); string fakeFilesDir = string.Format("%s/%s", normalFilesDir, directoryName); FilePath directory = new FilePath(fakeFilesDir); if (!directory.Exists()) { bool result = directory.Mkdir(); if (!result) { throw new IOException("Unable to create directory " + directory); } } FilePath oldTouchDbFile = new FilePath(directory, string.Format("old%s", Manager. DatabaseSuffixOld)); oldTouchDbFile.CreateNewFile(); FilePath newCbLiteFile = new FilePath(directory, string.Format("new%s", Manager.DatabaseSuffix )); newCbLiteFile.CreateNewFile(); FilePath migratedOldFile = new FilePath(directory, string.Format("old%s", Manager .DatabaseSuffix)); migratedOldFile.CreateNewFile(); base.StopCBLite(); manager = new Manager(new FilePath(GetRootDirectory(), directoryName), Manager.DefaultOptions ); NUnit.Framework.Assert.IsTrue(migratedOldFile.Exists()); //cannot rename old.touchdb in old.cblite, old.cblite already exists NUnit.Framework.Assert.IsTrue(oldTouchDbFile.Exists()); NUnit.Framework.Assert.IsTrue(newCbLiteFile.Exists()); FilePath dir = new FilePath(GetRootDirectory(), directoryName); NUnit.Framework.Assert.AreEqual(3, dir.ListFiles().Length); base.StopCBLite(); migratedOldFile.Delete(); manager = new Manager(new FilePath(GetRootDirectory(), directoryName), Manager.DefaultOptions ); //rename old.touchdb in old.cblite, previous old.cblite already doesn't exist NUnit.Framework.Assert.IsTrue(migratedOldFile.Exists()); NUnit.Framework.Assert.IsTrue(oldTouchDbFile.Exists() == false); NUnit.Framework.Assert.IsTrue(newCbLiteFile.Exists()); dir = new FilePath(GetRootDirectory(), directoryName); NUnit.Framework.Assert.AreEqual(2, dir.ListFiles().Length); }
private void DeleteLooseRef(string name) { FilePath path = new FilePath(diskRepo.Directory, name); NUnit.Framework.Assert.IsTrue(path.Delete(), "deleted " + name); }
public static bool RemoveItemIfExists(string path) { FilePath f = new FilePath(path); return f.Delete() || !f.Exists(); }
/// <summary>Execute this checkout</summary> /// <returns> /// <code>false</code> if this method could not delete all the files /// which should be deleted (e.g. because of of the files was /// locked). In this case /// <see cref="GetToBeDeleted()">GetToBeDeleted()</see> /// lists the files /// which should be tried to be deleted outside of this method. /// Although <code>false</code> is returned the checkout was /// successful and the working tree was updated for all other files. /// <code>true</code> is returned when no such problem occurred /// </returns> /// <exception cref="System.IO.IOException">System.IO.IOException</exception> public virtual bool Checkout() { toBeDeleted.Clear(); if (headCommitTree != null) { PreScanTwoTrees(); } else { PrescanOneTree(); } if (!conflicts.IsEmpty()) { if (failOnConflict) { dc.Unlock(); throw new CheckoutConflictException(Sharpen.Collections.ToArray(conflicts, new string [conflicts.Count])); } else { CleanUpConflicts(); } } // update our index builder.Finish(); FilePath file = null; string last = string.Empty; // when deleting files process them in the opposite order as they have // been reported. This ensures the files are deleted before we delete // their parent folders for (int i = removed.Count - 1; i >= 0; i--) { string r = removed[i]; file = new FilePath(repo.WorkTree, r); if (!file.Delete() && file.Exists()) { toBeDeleted.AddItem(r); } else { if (!IsSamePrefix(r, last)) { RemoveEmptyParents(file); } last = r; } } if (file != null) { RemoveEmptyParents(file); } foreach (string path in updated.Keys) { // ... create/overwrite this file ... file = new FilePath(repo.WorkTree, path); file.GetParentFile().Mkdirs(); file.CreateNewFile(); DirCacheEntry entry = dc.GetEntry(path); CheckoutEntry(repo, file, entry); } // commit the index builder - a new index is persisted if (!builder.Commit()) { dc.Unlock(); throw new IndexWriteException(); } return toBeDeleted.Count == 0; }
/// <exception cref="NGit.Errors.CheckoutConflictException"></exception> private void CleanUpConflicts() { foreach (string c in conflicts) { FilePath conflict = new FilePath(root, c); if (!conflict.Delete()) { throw new NGit.Errors.CheckoutConflictException(MessageFormat.Format(JGitText.Get ().cannotDeleteFile, c)); } RemoveEmptyParents(conflict); } foreach (string r in removed) { FilePath file = new FilePath(root, r); file.Delete(); RemoveEmptyParents(file); } }
public virtual void TestMkdirs() { FilePath root = new FilePath(trash, "test"); NUnit.Framework.Assert.IsTrue(root.Mkdir()); FilePath d = new FilePath(root, "test/test"); FileUtils.Mkdirs(d); NUnit.Framework.Assert.IsTrue(d.Exists() && d.IsDirectory()); try { FileUtils.Mkdirs(d); NUnit.Framework.Assert.Fail("creation of existing directory hierarchy must fail"); } catch (IOException) { } // expected FileUtils.Mkdirs(d, true); NUnit.Framework.Assert.IsTrue(d.Exists() && d.IsDirectory()); FileUtils.Delete(root, FileUtils.RECURSIVE); FilePath f = new FilePath(trash, "test"); FileUtils.CreateNewFile(f); try { FileUtils.Mkdirs(d); NUnit.Framework.Assert.Fail("creation of directory having path conflicting with existing" + " file must fail"); } catch (IOException) { } // expected NUnit.Framework.Assert.IsTrue(f.Delete()); }
/// <exception cref="NGit.Errors.NoWorkTreeException"></exception> /// <exception cref="System.IO.IOException"></exception> private void Checkout() { foreach (KeyValuePair<string, DirCacheEntry> entry in toBeCheckedOut.EntrySet()) { FilePath f = new FilePath(db.WorkTree, entry.Key); if (entry.Value != null) { CreateDir(f.GetParentFile()); DirCacheCheckout.CheckoutEntry(db, f, entry.Value); } else { if (!f.Delete()) { failingPaths.Put(entry.Key, ResolveMerger.MergeFailureReason.COULD_NOT_DELETE); } } modifiedFiles.AddItem(entry.Key); } }
/// <exception cref="NGit.Errors.CorruptObjectException"></exception> /// <exception cref="System.IO.IOException"></exception> /// <exception cref="NGit.Errors.MissingObjectException"></exception> /// <exception cref="NGit.Errors.IncorrectObjectTypeException"></exception> /// <exception cref="NGit.Errors.CheckoutConflictException"></exception> /// <exception cref="NGit.Errors.IndexWriteException"></exception> private bool DoCheckout() { toBeDeleted.Clear(); ObjectReader objectReader = repo.ObjectDatabase.NewReader(); try { if (headCommitTree != null) { PreScanTwoTrees(); } else { PrescanOneTree(); } if (!conflicts.IsEmpty()) { if (failOnConflict) { throw new NGit.Errors.CheckoutConflictException(Sharpen.Collections.ToArray(conflicts , new string[conflicts.Count])); } else { CleanUpConflicts(); } } // update our index builder.Finish(); FilePath file = null; string last = string.Empty; // when deleting files process them in the opposite order as they have // been reported. This ensures the files are deleted before we delete // their parent folders for (int i = removed.Count - 1; i >= 0; i--) { string r = removed[i]; file = new FilePath(repo.WorkTree, r); if (!file.Delete() && file.Exists()) { // The list of stuff to delete comes from the index // which will only contain a directory if it is // a submodule, in which case we shall not attempt // to delete it. A submodule is not empty, so it // is safe to check this after a failed delete. if (!file.IsDirectory()) { toBeDeleted.AddItem(r); } } else { if (!IsSamePrefix(r, last)) { RemoveEmptyParents(new FilePath(repo.WorkTree, last)); } last = r; } } if (file != null) { RemoveEmptyParents(file); } foreach (string path in updated.Keys) { // ... create/overwrite this file ... file = new FilePath(repo.WorkTree, path); if (!file.GetParentFile().Mkdirs()) { } // ignore DirCacheEntry entry = dc.GetEntry(path); // submodules are handled with separate operations if (FileMode.GITLINK.Equals(entry.RawMode)) { continue; } CheckoutEntry(repo, file, entry, objectReader); } // commit the index builder - a new index is persisted if (!builder.Commit()) { throw new IndexWriteException(); } } finally { objectReader.Release(); } return toBeDeleted.Count == 0; }
public virtual void TestMkdir() { FilePath d = new FilePath(trash, "test"); FileUtils.Mkdir(d); NUnit.Framework.Assert.IsTrue(d.Exists() && d.IsDirectory()); try { FileUtils.Mkdir(d); NUnit.Framework.Assert.Fail("creation of existing directory must fail"); } catch (IOException) { } // expected FileUtils.Mkdir(d, true); NUnit.Framework.Assert.IsTrue(d.Exists() && d.IsDirectory()); NUnit.Framework.Assert.IsTrue(d.Delete()); FilePath f = new FilePath(trash, "test"); FileUtils.CreateNewFile(f); try { FileUtils.Mkdir(d); NUnit.Framework.Assert.Fail("creation of directory having same path as existing file must" + " fail"); } catch (IOException) { } // expected NUnit.Framework.Assert.IsTrue(f.Delete()); }
/// <summary> /// This method implements how to handle conflicts when /// <see cref="failOnConflict">failOnConflict</see> /// is false /// </summary> /// <exception cref="NGit.Errors.CheckoutConflictException">NGit.Errors.CheckoutConflictException /// </exception> private void CleanUpConflicts() { // TODO: couldn't we delete unsaved worktree content here? foreach (string c in conflicts) { FilePath conflict = new FilePath(repo.WorkTree, c); if (!conflict.Delete()) { throw new NGit.Errors.CheckoutConflictException(MessageFormat.Format(JGitText.Get ().cannotDeleteFile, c)); } RemoveEmptyParents(conflict); } foreach (string r in removed) { FilePath file = new FilePath(repo.WorkTree, r); if (!file.Delete()) { throw new NGit.Errors.CheckoutConflictException(MessageFormat.Format(JGitText.Get ().cannotDeleteFile, file.GetAbsolutePath())); } RemoveEmptyParents(file); } }
/// <exception cref="NSch.SftpException"></exception> public virtual void Get(string src, string dst, SftpProgressMonitor monitor, int mode) { // System.out.println("get: "+src+" "+dst); bool _dstExist = false; string _dst = null; try { ((Channel.MyPipedInputStream)io_in).UpdateReadSide(); src = RemoteAbsolutePath(src); dst = LocalAbsolutePath(dst); ArrayList v = Glob_remote(src); int vsize = v.Count; if (vsize == 0) { throw new SftpException(SSH_FX_NO_SUCH_FILE, "No such file"); } FilePath dstFile = new FilePath(dst); bool isDstDir = dstFile.IsDirectory(); StringBuilder dstsb = null; if (isDstDir) { if (!dst.EndsWith(file_separator)) { dst += file_separator; } dstsb = new StringBuilder(dst); } else { if (vsize > 1) { throw new SftpException(SSH_FX_FAILURE, "Copying multiple files, but destination is missing or a file." ); } } for (int j = 0; j < vsize; j++) { string _src = (string)(v[j]); SftpATTRS attr = _stat(_src); if (attr.IsDir()) { throw new SftpException(SSH_FX_FAILURE, "not supported to get directory " + _src); } _dst = null; if (isDstDir) { int i = _src.LastIndexOf('/'); if (i == -1) { dstsb.Append(_src); } else { dstsb.Append(Sharpen.Runtime.Substring(_src, i + 1)); } _dst = dstsb.ToString(); dstsb.Delete(dst.Length, _dst.Length); } else { _dst = dst; } FilePath _dstFile = new FilePath(_dst); if (mode == RESUME) { long size_of_src = attr.GetSize(); long size_of_dst = _dstFile.Length(); if (size_of_dst > size_of_src) { throw new SftpException(SSH_FX_FAILURE, "failed to resume for " + _dst); } if (size_of_dst == size_of_src) { return; } } if (monitor != null) { monitor.Init(SftpProgressMonitor.GET, _src, _dst, attr.GetSize()); if (mode == RESUME) { monitor.Count(_dstFile.Length()); } } FileOutputStream fos = null; _dstExist = _dstFile.Exists(); try { if (mode == OVERWRITE) { fos = new FileOutputStream(_dst); } else { fos = new FileOutputStream(_dst, true); } // append // System.err.println("_get: "+_src+", "+_dst); _get(_src, fos, monitor, mode, new FilePath(_dst).Length()); } finally { if (fos != null) { fos.Close(); } } } } catch (Exception e) { if (!_dstExist && _dst != null) { FilePath _dstFile = new FilePath(_dst); if (_dstFile.Exists() && _dstFile.Length() == 0) { _dstFile.Delete(); } } if (e is SftpException) { throw (SftpException)e; } if (e is Exception) { throw new SftpException(SSH_FX_FAILURE, string.Empty, (Exception)e); } throw new SftpException(SSH_FX_FAILURE, string.Empty); } }
/// <summary> /// Deletes the %Database%. /// </summary> /// <exception cref="Couchbase.Lite.CouchbaseLiteException"/> public void Delete() { if (open) { if (!Close()) { throw new CouchbaseLiteException("The database was open, and could not be closed", StatusCode.InternalServerError); } } Manager.ForgetDatabase(this); if (!Exists()) { return; } var file = new FilePath(Path); var attachmentsFile = new FilePath(AttachmentStorePath); var deleteStatus = file.Delete(); if (!deleteStatus) { Log.V(Database.Tag, String.Format("Error deleting the SQLite database file at {0}", file.GetAbsolutePath())); } //recursively delete attachments path var deletedAttachmentsPath = true; try { var dirInfo = new DirectoryInfo(attachmentsFile.GetPath()); dirInfo.Delete(true); //Directory.Delete (attachmentsFile.GetPath (), true); } catch (Exception ex) { Log.V(Database.Tag, "Error deleting the attachments directory.", ex); deletedAttachmentsPath = false; } if (!deleteStatus) { throw new CouchbaseLiteException("Was not able to delete the database file", StatusCode.InternalServerError); } if (!deletedAttachmentsPath) { throw new CouchbaseLiteException("Was not able to delete the attachments files", StatusCode.InternalServerError); } }
/// <summary> /// Deletes the <see cref="Couchbase.Lite.Database" />. /// </summary> /// <exception cref="Couchbase.Lite.CouchbaseLiteException"> /// Thrown if an issue occurs while deleting the <see cref="Couchbase.Lite.Database" /></exception> public void Delete() { if (_isOpen && !Close()) { throw new CouchbaseLiteException("The database was open, and could not be closed", StatusCode.InternalServerError); } Manager.ForgetDatabase(this); if (!Exists()) { return; } var file = new FilePath(Path); var fileJournal = new FilePath(AttachmentStorePath + "-journal"); var deleteStatus = file.Delete(); if (fileJournal.Exists()) { deleteStatus &= fileJournal.Delete(); } //recursively delete attachments path var attachmentsFile = new FilePath(AttachmentStorePath); var deleteAttachmentStatus = FileDirUtils.DeleteRecursive(attachmentsFile); //recursively delete path where attachments stored( see getAttachmentStorePath()) var lastDotPosition = Path.LastIndexOf('.'); if (lastDotPosition > 0) { var attachmentsFileUpFolder = new FilePath(Path.Substring(0, lastDotPosition)); FileDirUtils.DeleteRecursive(attachmentsFileUpFolder); } if (!deleteStatus) { Log.V(Tag, String.Format("Error deleting the SQLite database file at {0}", file.GetAbsolutePath())); } if (!deleteStatus) { throw new CouchbaseLiteException("Was not able to delete the database file", StatusCode.InternalServerError); } if (!deleteAttachmentStatus) { throw new CouchbaseLiteException("Was not able to delete the attachments files", StatusCode.InternalServerError); } }
/// <exception cref="NGit.Errors.NoWorkTreeException"></exception> /// <exception cref="System.IO.IOException"></exception> private void Checkout() { ObjectReader r = db.ObjectDatabase.NewReader(); try { foreach (KeyValuePair<string, DirCacheEntry> entry in toBeCheckedOut.EntrySet()) { FilePath f = new FilePath(db.WorkTree, entry.Key); CreateDir(f.GetParentFile()); DirCacheCheckout.CheckoutEntry(db, f, entry.Value, r); modifiedFiles.AddItem(entry.Key); } // Iterate in reverse so that "folder/file" is deleted before // "folder". Otherwise this could result in a failing path because // of a non-empty directory, for which delete() would fail. for (int i = toBeDeleted.Count - 1; i >= 0; i--) { string fileName = toBeDeleted[i]; FilePath f = new FilePath(db.WorkTree, fileName); if (!f.Delete()) { failingPaths.Put(fileName, ResolveMerger.MergeFailureReason.COULD_NOT_DELETE); } modifiedFiles.AddItem(fileName); } } finally { r.Release(); } }
/// <summary>Delete file or folder</summary> /// <param name="f"> /// <code>File</code> /// to be deleted /// </param> /// <param name="options"> /// deletion options, /// <code>RECURSIVE</code> /// for recursive deletion of /// a subtree, /// <code>RETRY</code> /// to retry when deletion failed. /// Retrying may help if the underlying file system doesn't allow /// deletion of files being read by another thread. /// </param> /// <exception cref="System.IO.IOException"> /// if deletion of /// <code>f</code> /// fails. This may occur if /// <code>f</code> /// didn't exist when the method was called. This can therefore /// cause IOExceptions during race conditions when multiple /// concurrent threads all try to delete the same file. /// </exception> public static void Delete(FilePath f, int options) { if ((options & SKIP_MISSING) != 0 && !f.Exists()) { return; } if ((options & RECURSIVE) != 0 && f.IsDirectory()) { FilePath[] items = f.ListFiles(); if (items != null) { foreach (FilePath c in items) { Delete(c, options); } } } if (!f.Delete()) { if ((options & RETRY) != 0 && f.Exists()) { for (int i = 1; i < 10; i++) { try { Sharpen.Thread.Sleep(100); } catch (Exception) { } // ignore if (f.Delete()) { return; } } } throw new IOException(MessageFormat.Format(JGitText.Get().deleteFileFailed, f.GetAbsolutePath ())); } }
public void Delete() { if (open) { if (!Close()) { throw new CouchbaseLiteException("The database was open, and could not be closed" , Status.InternalServerError); } } manager.ForgetDatabase(this); if (!Exists()) { return; } FilePath file = new FilePath(path); FilePath attachmentsFile = new FilePath(GetAttachmentStorePath()); bool deleteStatus = file.Delete(); //recursively delete attachments path bool deleteAttachmentStatus = FileDirUtils.DeleteRecursive(attachmentsFile); if (!deleteStatus) { throw new CouchbaseLiteException("Was not able to delete the database file", Status .InternalServerError); } if (!deleteAttachmentStatus) { throw new CouchbaseLiteException("Was not able to delete the attachments files", Status.InternalServerError); } }
/// <summary> /// Deletes the <see cref="Couchbase.Lite.Database" />. /// </summary> /// <exception cref="Couchbase.Lite.CouchbaseLiteException"> /// Thrown if an issue occurs while deleting the <see cref="Couchbase.Lite.Database" /></exception> public void Delete() { if (_isOpen && !Close()) { throw new CouchbaseLiteException("The database was open, and could not be closed", StatusCode.InternalServerError); } Manager.ForgetDatabase(this); if (!Exists()) { return; } var file = new FilePath(Path); var fileJournal = new FilePath(Path + "-journal"); var fileWal = new FilePath(Path + "-wal"); var fileShm = new FilePath(Path + "-shm"); var deleteStatus = file.Delete(); if (fileJournal.Exists()){ deleteStatus &= fileJournal.Delete(); } if (fileWal.Exists()) { deleteStatus &= fileWal.Delete(); } if (fileShm.Exists()) { deleteStatus &= fileShm.Delete(); } //recursively delete attachments path var attachmentsFile = new FilePath(AttachmentStorePath); var deleteAttachmentStatus = FileDirUtils.DeleteRecursive(attachmentsFile); if (!deleteStatus) { Log.W(TAG, "Error deleting the SQLite database file at {0}", file.GetAbsolutePath()); throw new CouchbaseLiteException("Was not able to delete the database file", StatusCode.InternalServerError); } if (!deleteAttachmentStatus) { Log.W(TAG, "Error deleting the attachment files file at {0}", attachmentsFile.GetAbsolutePath()); throw new CouchbaseLiteException("Was not able to delete the attachments files", StatusCode.InternalServerError); } }
/// <exception cref="System.IO.IOException"></exception> private PackLock RenameAndOpenPack(string lockMessage) { if (!keepEmpty && GetObjectCount() == 0) { CleanupTemporaryFiles(); return null; } MessageDigest d = Constants.NewMessageDigest(); byte[] oeBytes = new byte[Constants.OBJECT_ID_LENGTH]; for (int i = 0; i < GetObjectCount(); i++) { PackedObjectInfo oe = GetObject(i); oe.CopyRawTo(oeBytes, 0); d.Update(oeBytes); } string name = ObjectId.FromRaw(d.Digest()).Name; FilePath packDir = new FilePath(db.GetDirectory(), "pack"); FilePath finalPack = new FilePath(packDir, "pack-" + name + ".pack"); FilePath finalIdx = new FilePath(packDir, "pack-" + name + ".idx"); PackLock keep = new PackLock(finalPack, db.GetFS()); if (!packDir.Exists() && !packDir.Mkdir() && !packDir.Exists()) { // The objects/pack directory isn't present, and we are unable // to create it. There is no way to move this pack in. // CleanupTemporaryFiles(); throw new IOException(MessageFormat.Format(JGitText.Get().cannotCreateDirectory, packDir.GetAbsolutePath())); } if (finalPack.Exists()) { // If the pack is already present we should never replace it. // CleanupTemporaryFiles(); return null; } if (lockMessage != null) { // If we have a reason to create a keep file for this pack, do // so, or fail fast and don't put the pack in place. // try { if (!keep.Lock(lockMessage)) { throw new IOException(MessageFormat.Format(JGitText.Get().cannotLockPackIn, finalPack )); } } catch (IOException e) { CleanupTemporaryFiles(); throw; } } if (!tmpPack.RenameTo(finalPack)) { CleanupTemporaryFiles(); keep.Unlock(); throw new IOException(MessageFormat.Format(JGitText.Get().cannotMovePackTo, finalPack )); } if (!tmpIdx.RenameTo(finalIdx)) { CleanupTemporaryFiles(); keep.Unlock(); if (!finalPack.Delete()) { finalPack.DeleteOnExit(); } throw new IOException(MessageFormat.Format(JGitText.Get().cannotMoveIndexTo, finalIdx )); } try { newPack = db.OpenPack(finalPack, finalIdx); } catch (IOException err) { keep.Unlock(); if (finalPack.Exists()) { FileUtils.Delete(finalPack); } if (finalIdx.Exists()) { FileUtils.Delete(finalIdx); } throw; } return lockMessage != null ? keep : null; }