/// <summary> /// Gets the user storing the file at recovery time /// </summary> /// <param name="nodeName">username for storing user</param> /*private void GetSource(string nodeName){ * PeerNode source = null; * action = ActionType.Restore; * * if (GetClientEvent != null) * source = GetClientEvent(nodeName); * if (source != null){ * SendMessage("LET" + " " + this.IP + " " + this.Name + " " + this.PubKey); * if(LogEvent != null) LogEvent(source.Name, false, "LET" + " " + this.IP + " " + this.Name + " " + this.PubKey); * } * else{ * SendMessage("405"); * if(LogEvent != null) LogEvent(nodeName, false, "405"); * } * }*/ internal void SendSession(PeerSession session, SessionType type, bool isIndexSession) { string action = string.Empty; MessageContext ctx = MessageContext.Generic; session.Kind = type; bool synchronous = false; if (type == SessionType.Backup) { ctx = MessageContext.Task; action = "ASKSTORAGE"; } else if (type == SessionType.Store) { action = "STORE"; // wait for confirmation that the receive/store session has been started on storage node. // We avoid client node to connect to a storage node that is not yet ready. synchronous = true; } SendMessage(new NodeMessage { Context = ctx, TaskId = session.TaskId, Action = action, Data = session.ToJson <PeerSession>(), Data2 = isIndexSession.ToString(), Synchroneous = synchronous }); }
/// <summary> /// Creates a session for receiving a backup /// </summary> /// <param name="cIp">client1 IP</param> private void StartStoreSession(PeerSession s) { this.Status = NodeStatus.Storing; foreach (Session listeningSess in sessions /*Listen*/) { if (listeningSess.Kind == SessionType.Store && listeningSess.FromNode.Id == s.FromNode.Id && listeningSess.Id == s.Id) { Logger.Append(Severity.DEBUG, "Reusing existing session " + s.Id + " with node #" + s.FromNode.Id + ", " + s.FromNode.Name + " (" + s.FromNode.IP + ":" + ")"); listeningSess.RenewBudget(s.Budget); return; } } Logger.Append(Severity.DEBUG, "Creating new session to receive data from node #" + s.FromNode.Id + ", " + s.FromNode.Name + " (" + s.FromNode.IP + ":" + ")"); Session client1 = new Session(s, cert); client1.SessionRemoved += new Node.Session.RemoveSessionHandler(this.RemoveSession); client1.FileReceivedEvent += new Node.Session.FileReceivedHandler(this.FileReceived); client1.UpdateStorageEvent += this.SendSessionUpdate; client1.RenewBudget(s.Budget); sessions.Add(client1); try{ StartListeningForClient(client1); } catch (Exception e) { Logger.Append(Severity.WARNING, "Could not accept session with client node #" + s.FromNode.Id + " : " + e.Message); RemoveSession(client1); } }
/*private Session GetCleanSession(int taskId, int nodeId, string nodeIp, int port, string cN, string cK){ * Session cleanSession = null; * foreach(Session sess in sessionsConnect) * if(sess.Type == SessionType.Clean && sess.ClientId == nodeId){ * Logger.Append(Severity.DEBUG, "Already have an open cleaning session with storage node "+cN+" ("+nodeIp+":"+port+"), reusing it. "); * cleanSession = sess; * } * if(cleanSession == null){ * cleanSession = new Session(SessionType.Backup, nodeId, nodeIp, port, cN, cK, this.keyPairCrypto); * //with threadpool, ALT doesnt work (no exceptions propagation between threads) * ThreadPool.QueueUserWorkItem(cleanSession.ConnectToStorageNode); * //storageSession.ConnectToStorageNode(null); * cleanSession.RemoveSessionEvent += new Node.Session.RemoveSessionHandler(this.RemoveSession); * //storageSession.FileSentEvent += new Node.Session.FileSentHandler(this.ChunkSent); //, bsId, chunkName, nodeId); * cleanSession.FileReceivedEvent += new Node.Session.FileReceivedHandler(this.FileReceived); * sessionsConnect.Add(cleanSession); * cleanSession.AuthenticatedAndReadyEvent.WaitOne(); * } * return cleanSession; * }*/ private Session GetStorageSession(PeerSession s) { Session storageSession = null; foreach (Session sess in sessions) { if (sess.Kind == SessionType.Backup && sess.Id == s.Id) { Logger.Append(Severity.DEBUG, "Reusing open session #" + s.Id + " with storage node #" + s.ToNode.Id + " (" + s.ToNode.IP + ":" + s.ToNode.ListenPort + "), reusing it. "); storageSession = sess; storageSession.RenewBudget(s.Budget); } } if (storageSession == null) { storageSession = new Session(s, cert); storageSession.Connect(); storageSession.SessionRemoved += new Node.Session.RemoveSessionHandler(this.RemoveSession); //storageSession.FileSentEvent += new Node.Session.FileSentHandler(this.ChunkSent); //, bsId, chunkName, nodeId); storageSession.FileReceivedEvent += new Node.Session.FileReceivedHandler(this.FileReceived); if (storageSession.AuthenticatedEvent.WaitOne(new TimeSpan(0, 0, 30))) { sessions.Add(storageSession); } else { throw new TimeoutException("Didn't receive handshake confirmation in time for session with peer #" + s.ToNode.Id); } } //if(SessionReady != null) SessionReady(storageSession); return(storageSession); }
public static async Task MainAsync(string[] args) { Options options = Argument.Parse <Options>(args); if (options.IsValid()) { FileHash hash = FileHash.Parse(options.Hash); NetworkAddress address = new NetworkAddress(options.Host, Int32.Parse(options.Port)); using (PeerClient client = new PeerClient()) { Notification notification = null; PeerSession session = await client.ConnectAsync(hash, address); Console.WriteLine($"Hash: {hash}"); Console.WriteLine($"Peer: {session.Peer}"); Console.WriteLine(); switch (options.Command) { case "download": session.Download(options.Destination); break; } while (notification?.Type != NotificationType.DataCompleted) { notification = await session.NextAsync(); Console.WriteLine(notification); } } } }
void HandleSessionEvent(PeerSession s, PeerNode fromNode) { // check that the received Session doesn't claim to own something it doesn"t Task curTask = TaskScheduler.Instance().GetTask(s.TaskId); PeerSession curSess = sessionsList.GetById(s.Id); if (curTask == null || curSess == null) { throw new NodeSecurityException("Node #" + fromNode.Id + " claims it handles a task (#" + s.TaskId + ") or a session (#" + s.Id + ") which doesn't exist"); } if ((s.Kind == SessionType.Backup && (curTask.NodeId != fromNode.Id /*|| curTask.BackupSet.HandledBy != fromNode.Id */)) || (s.Kind == SessionType.Store && curSess.ToNode.Id != fromNode.Id) ) { throw new NodeSecurityException("Node #" + fromNode.Id + " claims it handles a task (#" + s.TaskId + ") it doesn't own !"); } Logger.Append("HUBRN", Severity.TRIVIA, "Task #" + s.TaskId + " : session #" + s.Id + " between node #" + s.FromNode.Id + " and node #" + s.ToNode.Id + " ended."); curSess.SetUsage(s); if (curSess.IsStorageUsageConfirmed()) { Logger.Append("HUBRN", Severity.TRIVIA, "Task #" + s.TaskId + " : session #" + s.Id + " : Storage space usage has been double-confirmed"); PeerNode n = NodesList[curSess.ToNode.Id]; lock (n){ // Release reserved space and set really consumed space n.ReservedSpace -= curSess.Budget * curTask.BackupSet.MaxChunkSize; n.StorageUsed += curSess.RealHandledData; } } }
public void AddIceCandidate(string sdpMid, int sdpMlineIndex, string candidate) { PeerSession.AddIceCandidate(new IceCandidate() { Content = candidate, SdpMid = sdpMid, SdpMlineIndex = sdpMlineIndex }); }
/// <summary> /// Send used space Hub, in order to update storagegroup available space /// </summary> public void SendSessionUpdate(PeerSession s) { this.StorageSize -= s.RealHandledData; HubWrite(new NodeMessage { Context = MessageContext.Task, TaskId = s.TaskId, Action = "SESSION", Data = s.ToJson <PeerSession>() //Data2 = sessionId+"" }); }
public void Dispose() { try { // Unable to exit process until DataChannel is removed/disposed, // and this throws internally (at least in 2.0 version). PeerSession?.RemoveDataChannel(CaptureChannel); } catch { } PeerSession.Transceivers.RemoveAll(x => true); Disposer.TryDisposeAll(Transceiver?.LocalVideoTrack, VideoSource, PeerSession); GC.SuppressFinalize(this); }
/// <summary> /// Adds or remove a transfer session. Also updates the storage node's load accordingly /// </summary> /// <param name='s'> /// S : the PeerSession /// </param> /// <param name='added'> /// If set to <c>true</c>,the session has to be added, else it has to be removed /// </param> private static void AddRemoveSession(PeerSession s, bool added) { Console.WriteLine("AddRemoveSession1(" + added + ") : Node #" + s.ToNode.Id + " load=" + s.ToNode.CurrentLoad); if (added) { sessionsList.Add(s); NodesList[s.ToNode.Id].CurrentLoad += 1 / (s.ToNode.StoragePriority); } else { sessionsList.Remove(s); NodesList[s.ToNode.Id].CurrentLoad -= 1 / (s.ToNode.StoragePriority); } Console.WriteLine("AddRemoveSession2(" + added + ") : Node #" + s.ToNode.Id + " load=" + NodesList[s.ToNode.Id].CurrentLoad); }
/// <summary> /// Request a destination for data chunks. /// Hub replies with Session object. /// </summary> private static void AskStorage(PeerSession s, int parallelism, bool isIndex, bool alternateRequest) { string action = "ASKSTORAGE"; if (isIndex) { action = "INDEXSTORAGE"; } HubWrite(new NodeMessage { Context = MessageContext.Task, Action = action, TaskId = s.TaskId, Data = s.ToJson <PeerSession>(), Data2 = string.Format("{0} {1}", parallelism, alternateRequest) }); }
public void Dispose() { try { // Unable to exit process until DataChannel is removed/disposed, // and this throws internally (at least in 2.0 version). PeerSession?.RemoveDataChannel(CaptureChannel); } catch { } Disposer.TryDisposeAll(new IDisposable[] { PeerSession, Transceiver?.LocalVideoTrack, VideoSource }); }
public void Dispose() { try { Transceiver?.LocalVideoTrack?.Dispose(); VideoSource?.Dispose(); try { // Unable to exit process until DataChannel is removed/disposed, // and this throws internally (at least in 2.0 version). PeerSession?.RemoveDataChannel(CaptureChannel); } catch { } PeerSession?.Dispose(); } catch { } }
private void CreateStorageSession(PeerSession s, Task currentTask, bool isIndexSession) { // 1 - we tell storage node to accept transfer from client, if shared key is verified NodesList.GetById(s.ToNode.Id).SendSession(s, SessionType.Store, false); // 2 - we tell client node where to put chunk NodesList.GetById(s.FromNode.Id).SendSession(s, SessionType.Backup, isIndexSession); NodesList.GetById(s.ToNode.Id).ReservedSpace += currentTask.BackupSet.MaxChunkSize * s.Budget; if (sessionsList.GetById(s.Id) == null) { AddRemoveSession(s, true); } else { sessionsList[s.Id].RenewBudget(s.Budget); } }
public Session(PeerSession s, X509Certificate2 nodeCert) { logger = new SessionLogger(this); this.Id = s.Id; this.Kind = s.Kind; this.Budget = s.Budget; this.FromNode = s.FromNode; this.ToNode = s.ToNode; this.TaskId = s.TaskId; this.GuidKey = System.Guid.NewGuid().ToByteArray(); this.Secret = s.Secret; //myKeyPairCrypto = csp; cert = nodeCert; AuthenticatedEvent = new ManualResetEvent(false); if (this.Kind == SessionType.Store) { this.Budget = 0; // bug?? do we have to initialize to 0 when receiving/storing? // client-side flags are not relevant here since the client data processing is not initialized by the Session. if (s.Flags.HasFlag(DataProcessingFlags.CChecksum)) { s.Flags ^= DataProcessingFlags.CChecksum; } if (s.Flags.HasFlag(DataProcessingFlags.CCompress)) { s.Flags ^= DataProcessingFlags.CCompress; } if (s.Flags.HasFlag(DataProcessingFlags.CDedup)) { s.Flags ^= DataProcessingFlags.CDedup; } if (s.Flags.HasFlag(DataProcessingFlags.CEncrypt)) { s.Flags ^= DataProcessingFlags.CEncrypt; } this.Flags = s.Flags; pipeline = new Node.DataProcessing.DataPipeline(Node.DataProcessing.PipelineMode.Read, this.Flags); logger.Log(Severity.DEBUG, "Creating storage session (" + this.Kind.ToString() + ") with client node #" + this.FromNode.Id + " (" + this.FromNode.IP + ":<UNAVAILABLE>)"); } else if (this.Kind == SessionType.Backup) { this.CryptoKey = System.Guid.NewGuid().ToByteArray(); logger.Log(Severity.DEBUG, "Creating client session #" + this.Id + " with storage node #" + this.ToNode.Id + " (" + this.ToNode.IP + ":" + this.ToNode.ListenPort + ")"); } }
private void RenewStorageSession(Task task, long sessionId, int budget) { PeerSession existingSession = sessionsList.GetById(sessionId); if (existingSession != null) { PeerNode n = NodesList[existingSession.ToNodeId]; existingSession.RenewBudget(budget); if ((n.StorageSize - n.StorageUsed - n.ReservedSpace) > task.BackupSet.MaxChunkSize * budget) { CreateStorageSession(existingSession, task, false); } else { throw new Exception("Cannot Renew session #" + sessionId); } } }
public async Task SetRemoteDescription(string type, string sdp) { if (!Enum.TryParse <SdpMessageType>(type, true, out var sdpMessageType)) { Logger.Write("Unable to parse remote WebRTC description type."); return; } await PeerSession.SetRemoteDescriptionAsync(new SdpMessage() { Content = sdp, Type = sdpMessageType }); if (sdpMessageType == SdpMessageType.Offer) { PeerSession.CreateAnswer(); } }
private void StartBackupSession(PeerSession s, bool isIndex) { Logger.Append(Severity.DEBUG, "Received permission to store " + s.Budget + " chunks to node #" + s.ToNode.Id + ":" + s.ToNode.IP + " " + s.ToNode.ListenPort); try{ Session backupSess = GetStorageSession(s); if (!isIndex) { currentJobs[s.TaskId].SessionReceived(backupSess); } else { currentJobs[s.TaskId].SendIndex(backupSess); } } catch (Exception e) { // could not get session with storage node, ask alternate destination to hub Logger.Append(Severity.WARNING, "Could not connect to storage node #" + s.ToNode.Id + " (" + e.Message + "---" + e.StackTrace + "), asking ALTernate destination to hub"); //AskStorage(s.TaskId, s.Id, 1, true); AskStorage(s, 1, false, true); } }
private void ReceiveDelete(PeerSession s, string chunkName) { bool alreadyExistingSession = false; foreach (Session listeningSess in sessions) { if (listeningSess.Kind == SessionType.CleanData && listeningSess.Id == s.Id) { alreadyExistingSession = true; Logger.Append(Severity.DEBUG, "Reusing existing cleaning session #" + s.Id + " with node " + s.FromNode.Id + " (" + s.FromNode.IP + ":" + ")"); } } if (!alreadyExistingSession) { Logger.Append(Severity.DEBUG, "Creating new session to delete data from node " + s.FromNode.Id + " (" + s.FromNode.IP + ":" + ")"); Session client1 = new Session(s, cert); client1.SessionRemoved += this.RemoveSession; client1.UpdateStorageEvent += this.SendSessionUpdate; sessions.Add(client1); StartListeningForClient(client1); } }
private void CreateStorageSessions(PeerNode askingNode, List <PeerNode> targetNodes, Task currentTask, int budget, DataProcessingFlags flags, bool isIndexStorage) { foreach (PeerNode chunkDestNode in targetNodes) { PeerSession targetSess = null; try{ int sessId = sessionsList.Count + 1; targetSess = new PeerSession { FromNode = askingNode, ToNode = chunkDestNode, Id = sessId, //sessionId, Flags = flags, //currentTask.BackupSet.DataFlags, TaskId = currentTask.Id, Kind = SessionType.Backup, Secret = currentTask.EncryptionKey }; targetSess.RenewBudget(budget); CreateStorageSession(targetSess, currentTask, isIndexStorage); //if (SessionChanged != null && existingSession == null) SessionChanged(true, SessionType.Backup, targetSess.Id, this, chunkDestNode, currentTask.Id, budget); // 3 - we add the storage node(s) to task currentTask.AddStorageNode(chunkDestNode); } catch (IOException ioe) { // change back destination's available space //chunkDestNode.Available = chunkDestNode.Available + currentTask.BackupSet.MaxChunkSize*budget; Utilities.Logger.Append("HUBRN", Severity.ERROR, "dest " + chunkDestNode.Name + " not available (" + ioe.Message + "), looking for an alternate one"); // try another node, recursive call //ChooseStorage(askingNode.Id, s, 1, false, true); ChooseStorage(askingNode.Id, new PeerSession { TaskId = currentTask.Id, Id = -1 }, 1, false, true); } catch (Exception ex) { Utilities.Logger.Append("HUBRN", Severity.ERROR, "dest " + chunkDestNode.Name + " : " + ex.Message); } } }
/// <summary> /// Gets destinations for storing data chunks. Destinations count can be 1 to R (redundancy level) or 1 to p (p=paralleism) /// /// </summary> /// <param name="bsId"> /// A <see cref="System.Int32"/> /// </param> /// <param name="chunkName"> /// A <see cref="System.String"/> /// </param> /// <param name="size"> /// A <see cref="System.Int64"/> /// </param> /// <param name="isIndex"> /// A <see cref="System.Boolean"/>. If set to true, this tells the client to send a confirmation when chunk has effectively /// been transfered and stored : this way we can track index location(s) into database. /// </param> //private void ChooseStorage(int nodeId, long taskId, long sessionId, int parallelism, bool isIndex, bool isAlternateRequest){ private void ChooseStorage(uint nodeId, PeerSession s, int parallelism, bool isIndex, bool isAlternateRequest) { Task currentTask = TaskScheduler.Instance().GetTask(s.TaskId); try{ if (s.Id > 0 && !isAlternateRequest) { RenewStorageSession(currentTask, s.Id, 20); return; } } catch (Exception e) { Logger.Append("HUBRN", Severity.WARNING, "Could not renew storage session #" + s.Id + ", will try to obtain a new one. Error : " + e.Message); isAlternateRequest = true; } PeerNode askingNode = NodesList[nodeId]; Console.WriteLine("choosedestinations : askingNode = #" + askingNode.Id); List <PeerNode> dests = new List <PeerNode>(); List <P2PBackup.Common.Node> excludedDests = new List <P2PBackup.Common.Node>(); excludedDests.Add(askingNode); int budget = currentTask.StorageBudget; if (budget == 0) { budget = 20; // default value if task has never run before. } if (isIndex) // only return a session with a budget of 1 { Console.WriteLine("ChooseDestinations() : requested index storage"); budget = 1; parallelism = 1; } if (isAlternateRequest) // request for new destination after failure with an already existing session { currentTask.AddLogEntry(new TaskLogEntry { TaskId = s.TaskId, Code = 601, Message1 = "" + s.Id }); AddRemoveSession(s.Id, false); excludedDests.AddRange(currentTask.StorageNodes); parallelism = 1; } if (dests.Count == 0) // if not asked to renew already existing session (thus existing destination) { dests = CalculateChunkDestinations(s.TaskId, parallelism, budget, excludedDests); } if (dests.Count > 0) { // for each storage node, budget = budget/nbnodes+1 int perNodeBudget = budget / dests.Count + 1; CreateStorageSessions(askingNode, dests, currentTask, perNodeBudget, currentTask.BackupSet.DataFlags, isIndex); } else { Utilities.Logger.Append("HUBRN", Severity.WARNING, "Task #" + s.TaskId + " : No storage space available for request of client #" + askingNode.Id + " (" + askingNode.Name + "), <TODO> handle that and report"); TaskScheduler.AddTaskLogEntry(s.TaskId, new TaskLogEntry { TaskId = s.TaskId, Code = 806 }); TaskScheduler.Instance().SetTaskStatus(s.TaskId, TaskStatus.Error); TaskScheduler.Instance().SetTaskRunningStatus(s.TaskId, TaskRunningStatus.Cancelled); askingNode.ManageTask(currentTask, TaskAction.Cancel); } }
private void HandleTaskMessage(NodeMessage message) { //TODO! security : verify that task is really assigned to this node string[] decoded = message.Data.Split(new char[] { ' ' }); string[] decoded2 = string.IsNullOrEmpty(message.Data2)? null : message.Data2.Split(new char[] { ' ' }); switch (message.Action) { case "TASK": long taskId = message.TaskId; int code = int.Parse(decoded[0]); string data = String.Empty; string msg = String.Empty; if (decoded.Length > 3) { data = decoded[3]; for (int i = 4; i < decoded.Length; i++) { msg += decoded[i] + " "; } } if (code < 700) { if (code == 699) // session ended //TaskScheduler.Instance().RemoveTaskSession(taskId, int.Parse(data)); //if (SessionChanged != null) SessionChanged(false, SessionType.Backup, short.Parse(data), this, null, taskId, 0); { } } if (code == 700) // messages updating CurrentAction, not archived { TaskScheduler.Instance().SetTaskCurrentActivity(taskId, code, msg); return; } else if (code < 800) // INFO class messages { TaskScheduler.Instance().SetTaskCurrentActivity(taskId, code, data); } else if (code < 900) // WARNING class messages { TaskScheduler.Instance().SetTaskStatus(taskId, TaskStatus.Warning); } else // ERROR messages { TaskScheduler.Instance().SetTaskCurrentActivity(taskId, code, data); TaskScheduler.Instance().SetTaskStatus(taskId, TaskStatus.Error); } TaskScheduler.AddTaskLogEntry(taskId, new TaskLogEntry { TaskId = taskId, Code = code, Message1 = data }); /*break; * default: * Logger.Append("DECODE_TASK", Severity.ERROR, "Unknown TaskContext message : "+message.Data); * break; * }*/ break; case "TASKSTATS": // "DBU" TaskScheduler.Instance().UpdateTaskStats(message.TaskId, long.Parse(decoded[0]), long.Parse(decoded[1]), long.Parse(decoded[2]), int.Parse(decoded[3])); break; case "ASKSTORAGE": // client node asks where to store data chunks PeerSession s = message.Data.FromJson <PeerSession>(); if (s.Id > 0) // Nodes consumed previous session budget, update storage space { SessionEvent(s, this); } //NeedStorageEvent(this.Id, message.TaskId, sessionId, int.Parse(decoded[1]), false, bool.Parse(decoded[2])); if (NeedStorageEvent != null) { NeedStorageEvent(this.Id, s, int.Parse(decoded2[0]), false, bool.Parse(decoded2[1])); } break; case "SESSION": if (SessionEvent != null) { SessionEvent(message.Data.FromJson <PeerSession>(), this); } break; case "INDEXSTORAGE": // client node asks where to store chunks PeerSession ps = message.Data.FromJson <PeerSession>(); if (NeedStorageEvent != null) { NeedStorageEvent(this.Id, ps, 1, true, false); // TODO : instead of false parse from message to know if index request is an alternate one } break; case "TASKDONE": SendAvailableSpace(); TaskScheduler.Instance().UpdateTerminatedTask(message.Data.FromJson <Task>()); break; default: throw new P2PBackup.Common.ProtocolViolationException(message); } }