void HandleSessionEvent(PeerSession s, PeerNode fromNode) { // check that the received Session doesn't claim to own something it doesn"t Task curTask = TaskScheduler.Instance().GetTask(s.TaskId); PeerSession curSess = sessionsList.GetById(s.Id); if (curTask == null || curSess == null) { throw new NodeSecurityException("Node #" + fromNode.Id + " claims it handles a task (#" + s.TaskId + ") or a session (#" + s.Id + ") which doesn't exist"); } if ((s.Kind == SessionType.Backup && (curTask.NodeId != fromNode.Id /*|| curTask.BackupSet.HandledBy != fromNode.Id */)) || (s.Kind == SessionType.Store && curSess.ToNode.Id != fromNode.Id) ) { throw new NodeSecurityException("Node #" + fromNode.Id + " claims it handles a task (#" + s.TaskId + ") it doesn't own !"); } Logger.Append("HUBRN", Severity.TRIVIA, "Task #" + s.TaskId + " : session #" + s.Id + " between node #" + s.FromNode.Id + " and node #" + s.ToNode.Id + " ended."); curSess.SetUsage(s); if (curSess.IsStorageUsageConfirmed()) { Logger.Append("HUBRN", Severity.TRIVIA, "Task #" + s.TaskId + " : session #" + s.Id + " : Storage space usage has been double-confirmed"); PeerNode n = NodesList[curSess.ToNode.Id]; lock (n){ // Release reserved space and set really consumed space n.ReservedSpace -= curSess.Budget * curTask.BackupSet.MaxChunkSize; n.StorageUsed += curSess.RealHandledData; } } }
private void PutNodeOnline(PeerNode pn) { if (NodesList.Contains(pn.Id) && NodesList.GetById(pn.Id).Status != NodeStatus.Idle) { Logger.Append("HUBRN", Severity.WARNING, "Node #" + pn.Id + " tried to connect but appears to already be online, rejecting."); pn.Status = NodeStatus.Rejected; pn.SendAuthStatus(); pn.Disconnect(); pn.Dispose(); } else { if (NodesList.Contains(pn.Id)) { NodesList.Remove(pn.Id); } NodesList.Add(pn); if (pn.Status == NodeStatus.Idle) { pn.LogEvent += new P2PBackupHub.PeerNode.LogHandler(LogEvent); pn.NeedStorageEvent += ChooseStorage; pn.SessionEvent += HandleSessionEvent; } pn.StartListening(); pn.OfflineEvent += ClearNode; pn.Status = NodeStatus.Online; Logger.Append("HUBRN", Severity.INFO, "Node #" + pn.Id + " is online (total : " + NodesList.Count + " online nodes)"); } }
private void ClearNode(PeerNode n) { Utilities.Logger.Append("HUBRN", Severity.INFO, "Node #" + n.Id + " has disconnected."); //Console.WriteLine("nodelist remove : "+nodeList.Remove(n)); n.Disconnect(); n.Status = NodeStatus.Idle; n.LastReceivedPing = DateTime.Now; }
/// <summary> /// Sends information about the storing client to the requesting client /// at recovery time /// </summary> /// <param name="source">the user storing the file</param> private void SendSource(PeerNode source) { SendMessage("SRC " + source.IP + " " + source.ListenPort + " " + source.Name + " " + source.PublicKey); if (LogEvent != null) { LogEvent(this.Name, false, "SRC " + source.IP + " " + source.ListenPort + " " + source.Name + " " + source.PublicKey); } }
internal void PauseTask(long taskId, User u) { Task task = GetTask(taskId); PeerNode taskTargetNode = GetHandlingNode(task); if (taskTargetNode != null) { Logger.Append("HUBRN", Severity.INFO, "Asking to node #" + task.BackupSet.NodeId + " to cancel task " + task.Id); taskTargetNode.ManageTask(task, TaskAction.Pause); SetTaskRunningStatus(taskId, TaskRunningStatus.Paused); } }
private static void HandleOfflineNode(PeerNode n) { Logger.Append("WATCHER", Severity.INFO, "Node #" + n.Id + " is offline (didn't reply for more than 5mn)"); try{ NodesList[n.Id].Dispose(); } catch {} try{ //nodeList.Remove( Console.WriteLine("Node remove : " + NodesList.Remove(n)); } catch {} }
private PeerNode CreateNewNode(string ip, NodeCertificate cert) { var node = new PeerNode(); node.Name = Dns.GetHostEntry(ip).HostName; node.IP = ip; node.Locked = true; node.Status = NodeStatus.New; node = new DAL.NodeDAO().Save(node); cert.NodeId = node.Id; cert = new DAL.CertificateDAO().Save(cert); Logger.Append("HUBRN", Severity.INFO, "Created new node #" + node.Id + " with cert #" + cert.Id + " for client " + ip); return(node); }
/// <summary> /// Te be used when a node connects. If it is complete re-connection (with re-login), /// clean previously running tasks. /// </summary> /// <param name='nodeId'> /// Node identifier. /// </param> internal void Clean(PeerNode node) { lock (TasksQueue){ for (int i = TasksQueue.Count - 1; i >= 0; i--) { if (TasksQueue.GetByIndex(i).UserId == node.Id) { TasksQueue.GetByIndex(i).AddLogEntry(new TaskLogEntry(TasksQueue[i].Id) { Code = 808 }); TasksQueue.GetByIndex(i).RunStatus = TaskRunningStatus.Cancelled; } } } }
private void RenewStorageSession(Task task, long sessionId, int budget) { PeerSession existingSession = sessionsList.GetById(sessionId); if (existingSession != null) { PeerNode n = NodesList[existingSession.ToNodeId]; existingSession.RenewBudget(budget); if ((n.StorageSize - n.StorageUsed - n.ReservedSpace) > task.BackupSet.MaxChunkSize * budget) { CreateStorageSession(existingSession, task, false); } else { throw new Exception("Cannot Renew session #" + sessionId); } } }
private bool StartHouseKeeping(Task task) { var cleanThread = System.Threading.Tasks.Task.Factory.StartNew(() => { List <P2PBackup.Common.Task> expiredBackups = new DAL.TaskDAO().GetExpiredBackups(); task.OriginalSize = expiredBackups.Sum(o => o.FinalSize); task.TotalItems = expiredBackups.Count; Logger.Append("HUBRN", Severity.INFO, "Started cleaning " + expiredBackups.Count + " expired backups"); //int done = 0; try{ foreach (P2PBackup.Common.Task nodeTask in expiredBackups) { if (nodeTask == null) { continue; } nodeTask.RunStatus = TaskRunningStatus.Expiring; new DAL.TaskDAO().Update(nodeTask); PeerNode node = Hub.NodesList.GetById(nodeTask.BackupSet.NodeId); if (node != null) { Logger.Append("HUBRN", Severity.INFO, "Asking node #" + node.Id + " (" + node.Name + ") to expire task " + nodeTask.Id); //node.SendMessage("EXP "+task.Id+" "+nodeTask.Id+" "+nodeTask.IndexName+" "+nodeTask.IndexSum); node.ManageTask(nodeTask, TaskAction.Expire); } else { Logger.Append("HUBRN", Severity.WARNING, "Can't expire task " + nodeTask.Id + " of node #" + nodeTask.BackupSet.NodeId + ", node is offline"); } //done++; } } catch (Exception e) { Console.WriteLine("StartHouseKeeping() : " + e.Message + " ---- " + e.StackTrace); } }, System.Threading.Tasks.TaskCreationOptions.LongRunning); /*cleanThread.ContinueWith(o=>{ * UpdateTask(task.Id, task.OriginalSize, task.FinalSize, "", "", new List<int>(), 100); * }, System.Threading.Tasks.TaskContinuationOptions.OnlyOnRanToCompletion);*/ return(true); }
private void CreateStorageSessions(PeerNode askingNode, List <PeerNode> targetNodes, Task currentTask, int budget, DataProcessingFlags flags, bool isIndexStorage) { foreach (PeerNode chunkDestNode in targetNodes) { PeerSession targetSess = null; try{ int sessId = sessionsList.Count + 1; targetSess = new PeerSession { FromNode = askingNode, ToNode = chunkDestNode, Id = sessId, //sessionId, Flags = flags, //currentTask.BackupSet.DataFlags, TaskId = currentTask.Id, Kind = SessionType.Backup, Secret = currentTask.EncryptionKey }; targetSess.RenewBudget(budget); CreateStorageSession(targetSess, currentTask, isIndexStorage); //if (SessionChanged != null && existingSession == null) SessionChanged(true, SessionType.Backup, targetSess.Id, this, chunkDestNode, currentTask.Id, budget); // 3 - we add the storage node(s) to task currentTask.AddStorageNode(chunkDestNode); } catch (IOException ioe) { // change back destination's available space //chunkDestNode.Available = chunkDestNode.Available + currentTask.BackupSet.MaxChunkSize*budget; Utilities.Logger.Append("HUBRN", Severity.ERROR, "dest " + chunkDestNode.Name + " not available (" + ioe.Message + "), looking for an alternate one"); // try another node, recursive call //ChooseStorage(askingNode.Id, s, 1, false, true); ChooseStorage(askingNode.Id, new PeerSession { TaskId = currentTask.Id, Id = -1 }, 1, false, true); } catch (Exception ex) { Utilities.Logger.Append("HUBRN", Severity.ERROR, "dest " + chunkDestNode.Name + " : " + ex.Message); } } }
public void ApproveNode(uint nodeId, bool lockStatus) { //(new DBHandle()).ApproveNode(nodeId, lockStatus); new DAL.NodeDAO().Approve(nodeId, lockStatus); if (lockStatus == false) { PeerNode n = Hub.NodesList.GetById(nodeId); if (n != null) { n.Status = NodeStatus.Idle; // TODO : call putonline() n.SendAuthStatus(); } } else // lock immediately : an online node will be disconnected { PeerNode n = Hub.NodesList.GetById(nodeId); if (n != null && n.Status != NodeStatus.Offline) { n.Disconnect(); } } }
private static void HandleTaskEvent(Task t, PeerNode n) { if (t.RunStatus == TaskRunningStatus.Cancelled || t.RunStatus == TaskRunningStatus.Done) { // save nodes used space foreach (P2PBackup.Common.Node curN in t.StorageNodes) { new DAL.NodeDAO().UpdateStorageSpace(curN); } // then, remove sessions var sessToRemove = new List <PeerSession>(); foreach (PeerSession s in sessionsList) { if (s.TaskId == t.Id) { sessToRemove.Add(s); } } foreach (PeerSession s in sessToRemove) { AddRemoveSession(s, false); } } }
internal void CancelTask(long taskId, User u) { Logger.Append("HUBRN", Severity.INFO, "Received cancel request for task " + taskId); Task task = GetTask(taskId); if (task.RunStatus <= TaskRunningStatus.PendingStart) { SetTaskRunningStatus(taskId, TaskRunningStatus.Cancelled); return; } PeerNode taskTargetNode = GetHandlingNode(task); Logger.Append("HUBRN", Severity.INFO, "Asking to node #" + task.BackupSet.NodeId + " to cancel task " + task.Id); if (taskTargetNode != null) { taskTargetNode.ManageTask(task, TaskAction.Cancel); taskTargetNode.Status = NodeStatus.Idle; } else { Logger.Append("HUBRN", Severity.WARNING, "Could not send cancel message to node #" + task.BackupSet.NodeId + ": node is offline"); } SetTaskRunningStatus(taskId, TaskRunningStatus.Cancelling); }
private void ClientSSLAccept(IAsyncResult ar) { StateObject so = (StateObject)ar.AsyncState; Socket client = so.workSocket; System.Threading.Tasks.Task sslT = System.Threading.Tasks.Task.Factory.StartNew(() => { NetworkStream clientStream = new NetworkStream(client); SslStream sslStream = new SslStream(clientStream, false, ClientCertAuthenticate); Logger.Append("HUBRN", Severity.TRIVIA, "SSL Connection attempt from " + client.RemoteEndPoint.ToString() + " : beginning SSL authentication"); try{ sslStream.AuthenticateAsServer(Certificate, true, SslProtocols.Default, false); } catch (AuthenticationException) { Logger.Append("HUBRN", Severity.INFO, "Client node " + client.RemoteEndPoint.ToString() + " tried to connect without certificate"); } Logger.Append("HUBRN", Severity.TRIVIA, "Connection attempt from " + client.RemoteEndPoint.ToString() + " : SSL authentication done."); Utils.DisplayCertificateInformation(sslStream.RemoteCertificate); PeerNode pn = AuthenticateNode(sslStream, client); if (pn == null) { return; } PutNodeOnline(pn); }); sslT.ContinueWith(t => { var aggException = t.Exception.Flatten(); foreach (var e in aggException.InnerExceptions) { Logger.Append("HUBRN", Severity.ERROR, "Unexpected error (" + e.Message + ") : " + e.ToString()); } }, System.Threading.Tasks.TaskContinuationOptions.OnlyOnFaulted); }
public string GetDrives(uint nodeId) { PeerNode n = Hub.NodesList.GetById(nodeId); return(n.GetDrives()); }
private void HandleGenericMessage(NodeMessage message) { char[] separator = { ' ' }; string[] decoded = message.Data.Split(separator); try{ switch (message.Action) { case "BROWSE": case "BROWSEINDX": // response to browse FS path or index request. Not handled here since it's synchronous Logger.Append("HUBRN", Severity.TRIVIA, "browse node result : got " + message.Data); break; case "BROWSESPECIALOBJECTS": // lists special backupable application objects break; case "BROWSEDRIVES": // response to mounted filesystems request Logger.Append("HUBRN", Severity.TRIVIA, "getdrives result : got " + message.Data); break; case "CONFIGURATION": // request for configuration if (verified) { SendNodeConfiguration(); } break; case "EMERGENCY": // unhandled error string errorMsg = ""; for (int i = 1; i < decoded.Length; i++) { errorMsg += decoded[i] + " "; } Logger.Append("HUBRN", Severity.ERROR, "Node #" + this.Id + " has crashed due to the followinf unrecoverable error : " + errorMsg); break; case "STORE": // Storage node confirms it has started the storage session //and is now waiting for client node to connect. // do nothing. break; case "IDLE": // Node informs it is going into 'idle' state. break; case "EXP": // expire if (verified && decoded.Length >= 3) { Task task = TaskScheduler.Instance().GetTask(long.Parse(decoded[1])); // verify if cleaning task exists (for security reasons) if (task == null) { Logger.Append("CLEAN", Severity.ERROR, "Suspect clean request from node " + this.Id); return; } if (decoded[3] == "DEL" && decoded.Length == 6) // request to delete stored chunk //dbhandle.DeleteTask(long.Parse(decoded[2])); { Logger.Append("CLEAN", Severity.DEBUG, "Task " + decoded[2] + " asks to delete chunk " + decoded[4] + " from node " + decoded[5]); int[] storageNodes = Array.ConvertAll(((string)decoded[5]).Split(new char[] { ',' }, StringSplitOptions.RemoveEmptyEntries), s => int.Parse(s)); foreach (uint nodeId in storageNodes) { PeerNode storageNode = Hub.NodesList.GetById(nodeId); if (storageNode != null) { //ReceiveDelete(int nodeId, long taskId, string cIp, string cN, string cK, string chunkName) storageNode.SendMessage("DEL " + this.Id + " " + decoded[2] + " " + this.IP + " " + this.Name + " " + this.PublicKey + " " + decoded[4]); } } } else if (int.Parse(decoded[3]) == 810) // unable to find or read backup index //dbhandle.DeleteTask(long.Parse(decoded[2])); { Logger.Append("CLEAN", Severity.WARNING, "Deleting damaged task " + decoded[2]); TaskScheduler.AddTaskLogEntry(message.TaskId, new TaskLogEntry { TaskId = long.Parse(decoded[0]), Code = int.Parse(decoded[1]), Message1 = decoded[2], Message2 = decoded[3] }); } else if (int.Parse(decoded[3]) == 710) // cleaning done { new DAL.TaskDAO().UpdateStatus(this.Id, long.Parse(decoded[2]), TaskRunningStatus.Expired); //dbhandle.DeleteTask(long.Parse(decoded[2])); Logger.Append("CLEAN", Severity.INFO, "Task " + decoded[2] + " cleaned."); } else { throw new P2PBackup.Common.ProtocolViolationException("Incorrect number of parameters for message " + message); } task.Percent += 1 / task.TotalItems * 100; } break; case "LFI": if (decoded.Length == 2 && verified) { GetLastFullBackupInformation(int.Parse(decoded[1])); } break; case "RIX": if ((decoded.Length == 2) && (verified)) { GetIndexSource(decoded[1]); } break; case "TSK": //"TSK "+taskId+" "+code+" "+data+" "+additionalMessage break; case "UNKNOWN": // unknown task if (decoded.Length == 2 && verified) { TaskScheduler.Instance().SetTaskStatus(long.Parse(decoded[1]), TaskStatus.Error); TaskScheduler.Instance().SetTaskRunningStatus(long.Parse(decoded[1]), TaskRunningStatus.Cancelled); Logger.Append("HUBRN", Severity.ERROR, "Task " + decoded[1] + " is unknown to node #" + this.Id + ", node was probably restarted."); } break; case "VMS": // send hosted VMs /*if(decoded.Length >1 && verified){ * string xml = ""; * for(int i=1; i<decoded.Length; i++) xml += decoded[i]+" "; * Logger.Append("HUBRN", Severity.TRIVIA, "getvms result : got "+xml); * vms = xml; * } * break;*/ case "701": Logger.Append("HUBRN", Severity.WARNING, "Backupset " + decoded[1] + ", path \"" + decoded[2] + "\" does not exist"); //dbhandle.AddBackupSetError(int.Parse(decoded[1]), type.Trim(), decoded[2]); //TaskScheduler.Instance().GetTask(long.Parse(decoded[1])).AddLogEntry(701, decoded[2], null); Logger.Append("HUBRN", Severity.ERROR, "TODO!!! move this message to Task messages, not node messages"); break; case "702": Logger.Append("HUBRN", Severity.WARNING, "Backupset " + decoded[1] + ", path \"" + decoded[2] + "\" acces denied"); Logger.Append("HUBRN", Severity.ERROR, "TODO!!! move this message to Task messages, not node messages"); //dbhandle.AddBackupSetError(int.Parse(decoded[1]), type.Trim(), decoded[2]); break; case "800": Logger.Append("HUBRN", Severity.WARNING, "Backupset " + decoded[1] + " cannot be processed by client : too many jobs. |TODO| maintain job on queue"); break; default: throw new P2PBackup.Common.ProtocolViolationException(message); } } catch (Exception ex) { Logger.Append("CLIENT", Severity.WARNING, "Node #" + this.Id + " : " + ex.ToString()); } }
// should be relevant only for NT systems (VSS providers) public string GetSpecialObjects(uint nodeId) { PeerNode n = Hub.NodesList.GetById(nodeId); return(n.GetSpecialObjects()); }
public BrowseNode BrowseIndex(uint nodeId, long taskId, string rootFS, long parentId, string filter) { PeerNode n = Hub.NodesList.GetById(nodeId); return(n.BrowseIndex(taskId, rootFS, parentId, filter)); }
public BrowseNode Browse(uint nodeId, string path) { PeerNode n = Hub.NodesList.GetById(nodeId); return(n.Browse(path)); }
/// <summary> /// Gets destinations for storing data chunks. Destinations count can be 1 to R (redundancy level) or 1 to p (p=paralleism) /// /// </summary> /// <param name="bsId"> /// A <see cref="System.Int32"/> /// </param> /// <param name="chunkName"> /// A <see cref="System.String"/> /// </param> /// <param name="size"> /// A <see cref="System.Int64"/> /// </param> /// <param name="isIndex"> /// A <see cref="System.Boolean"/>. If set to true, this tells the client to send a confirmation when chunk has effectively /// been transfered and stored : this way we can track index location(s) into database. /// </param> //private void ChooseStorage(int nodeId, long taskId, long sessionId, int parallelism, bool isIndex, bool isAlternateRequest){ private void ChooseStorage(uint nodeId, PeerSession s, int parallelism, bool isIndex, bool isAlternateRequest) { Task currentTask = TaskScheduler.Instance().GetTask(s.TaskId); try{ if (s.Id > 0 && !isAlternateRequest) { RenewStorageSession(currentTask, s.Id, 20); return; } } catch (Exception e) { Logger.Append("HUBRN", Severity.WARNING, "Could not renew storage session #" + s.Id + ", will try to obtain a new one. Error : " + e.Message); isAlternateRequest = true; } PeerNode askingNode = NodesList[nodeId]; Console.WriteLine("choosedestinations : askingNode = #" + askingNode.Id); List <PeerNode> dests = new List <PeerNode>(); List <P2PBackup.Common.Node> excludedDests = new List <P2PBackup.Common.Node>(); excludedDests.Add(askingNode); int budget = currentTask.StorageBudget; if (budget == 0) { budget = 20; // default value if task has never run before. } if (isIndex) // only return a session with a budget of 1 { Console.WriteLine("ChooseDestinations() : requested index storage"); budget = 1; parallelism = 1; } if (isAlternateRequest) // request for new destination after failure with an already existing session { currentTask.AddLogEntry(new TaskLogEntry { TaskId = s.TaskId, Code = 601, Message1 = "" + s.Id }); AddRemoveSession(s.Id, false); excludedDests.AddRange(currentTask.StorageNodes); parallelism = 1; } if (dests.Count == 0) // if not asked to renew already existing session (thus existing destination) { dests = CalculateChunkDestinations(s.TaskId, parallelism, budget, excludedDests); } if (dests.Count > 0) { // for each storage node, budget = budget/nbnodes+1 int perNodeBudget = budget / dests.Count + 1; CreateStorageSessions(askingNode, dests, currentTask, perNodeBudget, currentTask.BackupSet.DataFlags, isIndex); } else { Utilities.Logger.Append("HUBRN", Severity.WARNING, "Task #" + s.TaskId + " : No storage space available for request of client #" + askingNode.Id + " (" + askingNode.Name + "), <TODO> handle that and report"); TaskScheduler.AddTaskLogEntry(s.TaskId, new TaskLogEntry { TaskId = s.TaskId, Code = 806 }); TaskScheduler.Instance().SetTaskStatus(s.TaskId, TaskStatus.Error); TaskScheduler.Instance().SetTaskRunningStatus(s.TaskId, TaskRunningStatus.Cancelled); askingNode.ManageTask(currentTask, TaskAction.Cancel); } }
private bool StartTask(Task task) { bool done = false; if (task.Operation == TaskOperation.HouseKeeping) { return(StartHouseKeeping(task)); } PeerNode taskTargetNode = GetHandlingNode(task); // temp : for debugging udp wakeup /*if(taskTargetNode == null){ * NodesMonitor.Instance.WakeUp(new DAL.NodeDAO().Get(task.NodeId)); * Thread.Sleep(5000); * taskTargetNode = GetHandlingNode(task.Id); * }*/ if (taskTargetNode == null) { throw new UnreachableNodeException("Node #" + taskTargetNode + " is offline or unreachable"); } else if (taskTargetNode.Quota > 0 && taskTargetNode.UsedQuota >= taskTargetNode.Quota) { throw new OverQuotaException(taskTargetNode.UsedQuota, taskTargetNode.Quota); } else if (taskTargetNode.Status == NodeStatus.Idle) { Logger.Append("HUBRN", Severity.INFO, "Node #" + taskTargetNode.Id + " is idle, telling him to wakeup and prepare for task #" + task.Id); NodeWakeUpNeeded(taskTargetNode); return(false); } Logger.Append("HUBRN", Severity.INFO, "Starting Task " + task.Id + " : type " + task.Type + " ( level " + task.Level /* .BackupSet.ScheduleTimes[0].Level*/ + "), backup Set " + task.BackupSet.Id + " for client #" + task.BackupSet.NodeId + " (handled by node #" + task.BackupSet.HandledBy + ")"); //Console.WriteLine("TaskScheduler : handledby = "+task.BackupSet.HandledBy+", proxying info is null : "+(task.BackupSet.ProxyingInfo == null)); try{ BackupLevel referenceLevel = BackupLevel.Default; if (task.Level == BackupLevel.Differential) { referenceLevel = BackupLevel.Full; } P2PBackup.Common.Task referenceTask = new DAL.TaskDAO().GetLastReferenceTask(task.BackupSet.Id, referenceLevel); if (referenceTask != null) { task.StorageBudget = (int)((referenceTask.OriginalSize / task.BackupSet.MaxChunkSize) + 2); Console.WriteLine(" ____ ref task=" + referenceTask.Id + ", oSize=" + referenceTask.OriginalSize / 1024 / 1024 + "MB, maxchunksize=" + task.BackupSet.MaxChunkSize / 1024 / 1024 + "MB, %%=" + referenceTask.OriginalSize / task.BackupSet.MaxChunkSize + ", calculated budget=" + task.StorageBudget); task.ParentTask = referenceTask; } if (task.Level != BackupLevel.Full) { if (referenceTask == null || referenceTask.Id <= 0) // no ref backup found, doing full { Logger.Append("HUBRN", Severity.INFO, "No reference backup found for task " + task.Id + ", performing FULL backup."); task.Level = BackupLevel.Full; } else { task.ParentTrackingId = referenceTask.Id; Logger.Append("HUBRN", Severity.INFO, "Task " + task.Id + " is " + task.Level + "." + " Using reference task " + referenceTask.Id + " (" + referenceTask.StartDate + " - " + referenceTask.EndDate + ")"); } } taskTargetNode.ManageTask(task, TaskAction.Start); task.RunStatus = TaskRunningStatus.Started; //n.Status = NodeStatus.Backuping; done = true; } catch (Exception e) { done = false; Logger.Append("HUBRN", Severity.ERROR, "Could not send task " + task.Id + " to node #" + taskTargetNode.Id + " : " + e.ToString() /*+"---Stacktrace:"+e.StackTrace+" inner msg:"+e.InnerException.Message*/); //n.Status = NodeStatus.Error; } return(done); }